diff --git a/.github/ISSUE_TEMPLATE/capability.md b/.github/ISSUE_TEMPLATE/capability.md new file mode 100644 index 0000000..41378f6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/capability.md @@ -0,0 +1,38 @@ +--- +name: JIRA Story/Capability +about: The structure for outlining work being done on a JIRA story +labels: JIRA Story +--- + +# Capability + + + +## Task + + + +## Plan/Outline + + + + +### TODOS + + +- [ ] + +### Additional components / Context + + diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml new file mode 100644 index 0000000..3e81b60 --- /dev/null +++ b/.github/workflows/cicd.yml @@ -0,0 +1,217 @@ +name: CI/CD Pipeline + +on: + pull_request: + branches: [main, development] + push: + #TODO: remove dev branch + branches: [main, development] + # Consider how you want to handle version tags + tags: ['v*.*.*'] + +permissions: + contents: read + packages: write + security-events: write + +env: + REGISTRY: ghcr.io + PYTHON_VERSION: '3.13' + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + image_base: ${{ steps.vars.outputs.image_base }} + pr_tag: ${{ steps.vars.outputs.pr_tag }} + commit_sha: ${{ steps.vars.outputs.commit_sha }} + commit_sha_short: ${{ steps.vars.outputs.commit_sha_short }} + test_image_tag: ${{ steps.vars.outputs.test_image_tag }} + steps: + - name: Compute image vars + id: vars + shell: bash + run: | + set -euo pipefail + ORG="$(echo "${GITHUB_REPOSITORY_OWNER}" | tr '[:upper:]' '[:lower:]')" + REPO="$(basename "${GITHUB_REPOSITORY}")" + IMAGE_BASE="${REGISTRY}/${ORG}/${REPO}" + echo "image_base=${IMAGE_BASE}" >> "$GITHUB_OUTPUT" + + if [ "${GITHUB_EVENT_NAME}" = "pull_request" ]; then + PR_NUM="${{ github.event.pull_request.number }}" + PR_TAG="pr-${PR_NUM}-build" + echo "pr_tag=${PR_TAG}" >> "$GITHUB_OUTPUT" + echo "test_image_tag=${PR_TAG}" >> "$GITHUB_OUTPUT" + fi + + if [ "${GITHUB_EVENT_NAME}" = "push" ]; then + COMMIT_SHA="${GITHUB_SHA}" + SHORT_SHA="${COMMIT_SHA:0:12}" + echo "commit_sha=${COMMIT_SHA}" >> "$GITHUB_OUTPUT" + echo "commit_sha_short=${SHORT_SHA}" >> "$GITHUB_OUTPUT" + echo "test_image_tag=${SHORT_SHA}" >> "$GITHUB_OUTPUT" + fi + + # TODO: Re-enable this job after linting rules are fixed or the code passes. + # ruff-linting: + # name: Ruff Linting + # runs-on: ubuntu-latest + # steps: + # - name: Checkout code + # uses: actions/checkout@v4 + # + # - name: Install uv + # uses: astral-sh/setup-uv@v5 + # with: + # enable-cache: true + # cache-dependency-glob: uv.lock pyproject.toml + # + # - name: Set up Python + # uses: astral-sh/setup-uv@v5 + # with: + # python-version: "3.13" + # cache-dependency-glob: uv.lock pyproject.toml + # + # - name: Install dependencies + # run: uv sync + # + # - name: Run ruff check + # run: | + # uv run ruff check . --config pyproject.toml --output-format=github + # + # - name: Run ruff formating check + # run: | + # uv run ruff format . --config pyproject.toml --check + + # TODO: Re-enable this job after mocking AWS services, or the more integration focused tests are removed. + # The current unit tests require live AWS credentials, which is not practical for a standard CI environment. + # unit-test: + # runs-on: ubuntu-latest + # strategy: + # matrix: + # python-version: ['3.11', '3.12', '3.13'] + # steps: + # - name: Checkout code + # uses: actions/checkout@v4 + # + # - name: Install uv + # uses: astral-sh/setup-uv@v5 + # with: + # enable-cache: true + # python-version: ${{ matrix.python-version }} + # cache-dependency-glob: uv.lock pyproject.toml + # + # - name: Install dependencies + # run: uv sync + # + # - name: Run tests + # run: | + # uv run pytest + + + build-and-scan: + name: Build and Scan Container + runs-on: ubuntu-latest + needs: setup + steps: + - uses: actions/checkout@v4 + - name: Build image for scanning + id: build + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + # Load the image to the local Docker daemon, but do not push it + load: true + tags: ${{ needs.setup.outputs.image_base }}:${{ needs.setup.outputs.test_image_tag }} + - name: Scan container with Trivy + uses: aquasecurity/trivy-action@0.20.0 + with: + # Scan the locally available image + image-ref: ${{ needs.setup.outputs.image_base }}:${{ needs.setup.outputs.test_image_tag }} + format: 'template' + template: '@/contrib/sarif.tpl' + output: 'trivy-results.sarif' + severity: 'CRITICAL,HIGH' + - name: Upload Trivy SARIF + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' + + codeql-scan: + name: CodeQL Scan + if: github.event_name == 'pull_request' || github.event_name == 'push' + runs-on: ubuntu-latest + # The 'unit-test' job is disabled for now. When you re-enable it, uncomment the line below. + # needs: unit-test + permissions: + actions: read + contents: read + security-events: write + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + enable-cache: true + python-version: ${{ env.PYTHON_VERSION }} + cache-dependency-glob: uv.lock pyproject.toml + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: python + - name: Install dependencies + run: uv sync + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + + publish: + name: Publish to Registry + if: > + github.event_name == 'push' && ( + github.ref == 'refs/heads/main' || + github.ref == 'refs/heads/development' || + startsWith(github.ref, 'refs/tags/v') + ) + runs-on: ubuntu-latest + # When you re-enable your other jobs: ruff-linting, unit-test. Add them to this list. + needs: [setup, build-and-scan, codeql-scan] + steps: + - uses: actions/checkout@v4 + - name: Prepare image tags + id: prep_tags + run: | + # Always start with the unique commit SHA tag for traceability + TAGS="${{ needs.setup.outputs.image_base }}:${{ needs.setup.outputs.commit_sha_short }}" + + # If it's a push to the main branch, also add the 'latest' tag + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + TAGS="$TAGS,${{ needs.setup.outputs.image_base }}:latest" + fi + + # If the trigger was a version tag, add that version as a tag + if [[ "${{ github.ref }}" == refs/tags/v* ]]; then + # github.ref_name holds the tag name (e.g., "v1.0.0") + VERSION_TAG=${{ github.ref_name }} + TAGS="$TAGS,${{ needs.setup.outputs.image_base }}:${VERSION_TAG}" + fi + + echo "tags=${TAGS}" >> "$GITHUB_OUTPUT" + - name: Log in to registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build & push final image + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/Dockerfile + push: true + tags: ${{ steps.prep_tags.outputs.tags }} diff --git a/.gitignore b/.gitignore index f5e8694..ce85dae 100644 --- a/.gitignore +++ b/.gitignore @@ -1,77 +1,217 @@ -# Compiled source # -################### -*.com -*.class -*.dll -*.exe -*.o -*.so -_site/ - -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.zip - -# Logs and databases # -###################### -*.log -*.sql -*.sqlite - -# OS generated files # -###################### -.DS_Store -.DS_Store? -.Spotlight-V100 -.Trashes -Icon? -ehthumbs.db -Thumbs.db - -# Vim swap files # -################## -*.swp - -# Python # -################# -*.pyc -*.egg-info/ +# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] -.env -.python-version +*$py.class -# pyenv # -######### -.python-version +# C extensions +*.so -# Django # -################# -*.egg-info +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ .installed.cfg +*.egg +MANIFEST +.DS_Store + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt # Unit test / coverage reports -################# htmlcov/ .tox/ +.nox/ .coverage +.coverage.* .cache nosetests.xml coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ -# Front-End # -############# -node_modules/ -bower_components/ -.grunt/ -src/vendor/ -dist/ +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +.vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +Pipfile + +.qodo +src/icefabric/_version.py +*.db +*.zarr +.zarr +*.tiff +*.tif +examples/iceberg_catalog/metadata/ + +# Local Terraform state +*.tfstate +*.tfstate.backup + +# Crash log files +crash.log + +# CLI configuration +.terraform/ + +# Terraform plan output (optional unless you want to inspect later) +*.tfplan + +# tiles +*.pmtiles +examples/icechunk_data_viewer/martin/tiles/quantiles/* +examples/icechunk_data_viewer/martin/tiles/legends/* +examples/icechunk_data_viewer/martin/tiles/legend.png +tests/data/topo_tifs + +# all data files +data +*.gpkg + +# R code +.Rproj.user +.Rdata +.Rhistory +icefabric.Rproj diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..4dbe867 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + exclude: LICENSE|\.csv$ + - id: end-of-file-fixer + exclude: LICENSE|\.csv$ + - id: check-yaml + exclude: mkdocs.yml$ + - id: debug-statements + + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.12.3 + hooks: + # Run the linter. + - id: ruff + args: [ --fix ] + # Run the formatter. + - id: ruff-format + + - repo: https://github.com/astral-sh/uv-pre-commit + rev: 0.7.20 + hooks: + - id: uv-lock + + - repo: https://github.com/kynan/nbstripout + rev: 0.8.1 + hooks: + - id: nbstripout diff --git a/.pyiceberg.yaml b/.pyiceberg.yaml new file mode 100644 index 0000000..4c42331 --- /dev/null +++ b/.pyiceberg.yaml @@ -0,0 +1,11 @@ +catalog: + glue: + type: glue + s3.endpoint: s3.us-east-1.amazonaws.com + warehouse: s3://52fcde3e7-5582-477d-7686ou4ij1ptxj8equ83a5xc51fsuse1b--table-s3 + region: us-east-1 + glue_region: us-east-1 + sql: + type: sql + uri: sqlite:////tmp/warehouse/pyiceberg_catalog.db + warehouse: file:///tmp/warehouse diff --git a/LICENSE b/LICENSE index e8ab96d..5b06463 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,14 @@ +Copyright 2025 Raytheon Company + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +Licensed under: https://opensource.org/license/bsd-2-clause + +- - - - - - - - - - - - - - + Apache License Version 2.0, January 2004 diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 0000000..696ce8d --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,14 @@ +-- + +The Hydrofabric data used in this repo is credited to Lynker and its contributors + +Johnson, J. M. (2022). National Hydrologic Geospatial Fabric (hydrofabric) for the Next Generation (NextGen) Hydrologic Modeling Framework, +HydroShare http://www.hydroshare.org/resource/129787b468aa4d55ace7b124ed27dbde + +-- + +Inspiration and assistance in the creation, and storage of network graphs comes from the following repos. Credit to the authors: +- https://github.com/DeepGroundwater/ddr/blob/master/engine/adjacency.py +- https://github.com/CIROH-UA/NGIAB_data_preprocess/blob/main/modules/data_processing/graph_utils.py + +-- diff --git a/README.md b/README.md index 2159d62..fb3d4e7 100644 --- a/README.md +++ b/README.md @@ -1,109 +1,58 @@ -#### OWP Open Source Project Template Instructions +# icefabric -1. Create a new project. -2. [Copy these files into the new project](#installation) -3. Update the README, replacing the contents below as prescribed. -4. Add any libraries, assets, or hard dependencies whose source code will be included - in the project's repository to the _Exceptions_ section in the [TERMS](TERMS.md). - - If no exceptions are needed, remove that section from TERMS. -5. If working with an existing code base, answer the questions on the [open source checklist](opensource-checklist.md) -6. Delete these instructions and everything up to the _Project Title_ from the README. -7. Write some great software and tell people about it. +icefabric -> Keep the README fresh! It's the first thing people see and will make the initial impression. +[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) -## Installation -To install all of the template files, run the following script from the root of your project's directory: +An [Apache Iceberg](https://py.iceberg.apache.org/) implementation of the Hydrofabric to disseminate continental hydrologic data -``` -bash -c "$(curl -s https://raw.githubusercontent.com/NOAA-OWP/owp-open-source-project-template/open_source_template.sh)" -``` - ----- - -# Project Title - -**Description**: Put a meaningful, short, plain-language description of what -this project is trying to accomplish and why it matters. -Describe the problem(s) this project solves. -Describe how this software can improve the lives of its audience. - -Other things to include: - - - **Technology stack**: Indicate the technological nature of the software, including primary programming language(s) and whether the software is intended as standalone or as a module in a framework or other ecosystem. - - **Status**: Alpha, Beta, 1.1, etc. It's OK to write a sentence, too. The goal is to let interested people know where this project is at. This is also a good place to link to the [CHANGELOG](CHANGELOG.md). - - **Links to production or demo instances** - - Describe what sets this apart from related-projects. Linking to another doc or page is OK if this can't be expressed in a sentence or two. - - -**Screenshot**: If the software has visual components, place a screenshot after the description; e.g., - -![](https://raw.githubusercontent.com/NOAA-OWP/owp-open-source-project-template/master/doc/Screenshot.png) - - -## Dependencies - -Describe any dependencies that must be installed for this software to work. -This includes programming languages, databases or other storage mechanisms, build tools, frameworks, and so forth. -If specific versions of other software are required, or known not to work, call that out. - -## Installation - -Detailed instructions on how to install, configure, and get the project running. -This should be frequently tested to ensure reliability. Alternatively, link to -a separate [INSTALL](INSTALL.md) document. - -## Configuration - -If the software is configurable, describe it in detail, either here or in other documentation to which you link. +> [!NOTE] +> To run any of the functions in this repo your AWS test account credentials need to be in your `.env` file and your `.pyiceberg.yaml` settings need to up to date with `AWS_DEFAULT_REGION="us-east-1"` set -## Usage - -Show users how to use the software. -Be specific. -Use appropriate formatting when showing code snippets. - -## How to test the software - -If the software includes automated tests, detail how to run those tests. - -## Known issues - -Document any known significant shortcomings with the software. - -## Getting help - -Instruct users how to get help with this software; this might include links to an issue tracker, wiki, mailing list, etc. - -**Example** - -If you have questions, concerns, bug reports, etc, please file an issue in this repository's Issue Tracker. - -## Getting involved - -This section should detail why people should get involved and describe key areas you are -currently focusing on; e.g., trying to get feedback on features, fixing certain bugs, building -important pieces, etc. +### Getting Started +This repo is managed through [UV](https://docs.astral.sh/uv/getting-started/installation/) and can be installed through: +```sh +uv sync +source .venv/bin/activate +``` -General instructions on _how_ to contribute should be stated with a link to [CONTRIBUTING](CONTRIBUTING.md). +### Running the API locally +To run the API locally, ensure your `.env` file in your project root has the right credentials, then run +```sh +python -m app.main +``` +This should spin up the API services at `localhost:8000/`. +If you are running the API locally, you can run +```sh +python -m app.main --catalog sql +``` ----- +### Building the API through Docker +To run the API locally with Docker, ensure your `.env` file in your project root has the right credentials, then run +```sh +docker compose -f docker/compose.yaml build --no-cache +docker compose -f docker/compose.yaml up +``` +This should spin up the API services -## Open source licensing info -These links must be included in the final version of your project README (keep this section, -as is, but remove this sentence): +### Development +To ensure that icefabric follows the specified structure, be sure to install the local dev dependencies and run `pre-commit install` -1. [TERMS](TERMS.md) -2. [LICENSE](LICENSE) +### Documentation +To build the user guide documentation for Icefabric locally, run the following commands: +```sh +uv pip install ".[docs]" +mkdocs serve -a localhost:8080 +``` +Docs will be spun up at localhost:8080/ +### Pytests ----- +The `tests` folder is for all testing data so the global confest can pick it up. This allows all tests in the namespace packages to share the same scope without having to reference one another in tests -## Credits and references +To run tests, run `pytest -s` from project root. -1. Projects that inspired you -2. Related projects -3. Books, papers, talks, or other sources that have meaningful impact or influence on this project +To run the subsetter tests, run `pytest --run-slow` as these tests take some time. Otherwise, they will be skipped diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e35508e --- /dev/null +++ b/app/__init__.py @@ -0,0 +1,51 @@ +from fastapi import HTTPException, Request +from pyiceberg.catalog import Catalog +from rustworkx import PyDiGraph + + +def get_catalog(request: Request) -> Catalog: + """Gets the pyiceberg catalog reference from the app state + + Parameters + ---------- + request : Request + The FastAPI request object containing the application state + + Returns + ------- + pyiceberg.catalog.Catalog + The loaded pyiceberg catalog instance used for querying versioned EDFS data + + Raises + ------ + HTTPException + If the catalog is not loaded or not available in the application state. + Returns HTTP 500 status code with "Catalog not loaded" detail message. + """ + if not hasattr(request.app.state, "catalog") or request.app.state.catalog is None: + raise HTTPException(status_code=500, detail="Catalog not loaded") + return request.app.state.catalog + + +def get_graphs(request: Request) -> PyDiGraph: + """Gets the rustworkx graph objects from the app state + + Parameters + ---------- + request : Request + The FastAPI request object containing the application state + + Returns + ------- + dict[str, rustworkx.PyDiGraph] + A dictionary with all pydigraph objects + + Raises + ------ + HTTPException + If the catalog is not loaded or not available in the application state. + Returns HTTP 500 status code with "Catalog not loaded" detail message. + """ + if not hasattr(request.app.state, "network_graphs") or request.app.state.network_graphs is None: + raise HTTPException(status_code=500, detail="network_graphs not loaded") + return request.app.state.network_graphs diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..6cbd517 --- /dev/null +++ b/app/main.py @@ -0,0 +1,155 @@ +import argparse +from contextlib import asynccontextmanager +from pathlib import Path + +import uvicorn +from fastapi import FastAPI, status +from fastapi.staticfiles import StaticFiles +from pydantic import BaseModel +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NoSuchTableError +from pyprojroot import here + +from app.routers.hydrofabric.router import api_router as hydrofabric_api_router +from app.routers.nwm_modules.router import ( + lasam_router, + lstm_router, + noahowp_router, + sacsma_router, + sft_router, + smp_router, + snow17_router, + topmodel_router, + topoflow_router, + troute_router, +) +from app.routers.ras_xs.router import api_router as ras_api_router +from app.routers.rise_wrappers.router import api_router as rise_api_wrap_router +from app.routers.streamflow_observations.router import api_router as streamflow_api_router +from icefabric.builds import load_upstream_json +from icefabric.helpers import load_creds + +tags_metadata = [ + { + "name": "Hydrofabric Services", + "description": "Data Querying functions for the Hydrofabric", + }, + { + "name": "RISE", + "description": "An interface to the RISE API for querying reservoir outflow data", + "externalDocs": {"description": "Link to the RISE API", "url": "https://data.usbr.gov/rise-api"}, + }, + { + "name": "NWM Modules", + "description": "Functions that interact with NWM modules. Mainly supports IPE generation.", + }, + { + "name": "HEC-RAS XS", + "description": "Data querying functions for HEC-RAS cross-sectional data (i.e. per flowpath ID or geospatial queries)", + }, +] + +parser = argparse.ArgumentParser(description="The FastAPI App instance for querying versioned EDFS data") + +# Glue = S3 Tables; Sql is a local iceberg catalog +parser.add_argument( + "--catalog", + choices=["glue", "sql"], + help="The catalog information for querying versioned EDFS data", + default="glue", +) # Setting the default to read from S3 +args, _ = parser.parse_known_args() + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Loads the iceberg catalog location from an environment variable + + Parameters + ---------- + app: FastAPI + The FastAPI app instance + """ + load_creds() + catalog = load_catalog(args.catalog) + hydrofabric_namespaces = ["conus_hf", "ak_hf", "hi_hf", "prvi_hf"] + app.state.catalog = catalog + try: + app.state.network_graphs = load_upstream_json( + catalog=catalog, + namespaces=hydrofabric_namespaces, + output_path=here() / "data", + ) + except NoSuchTableError: + raise NotImplementedError( + "Cannot load API as the Hydrofabric Database/Namespace cannot be connected to. Please ensure you are have access to the correct hydrofabric namespaces" + ) from None + yield + + +app = FastAPI( + title="Icefabric API", + description="API for accessing iceberg or icechunk data from EDFS services", + version="1.0.0", + docs_url="/docs", + redoc_url="/redoc", + lifespan=lifespan, + openapi_tags=tags_metadata, +) + + +class HealthCheck(BaseModel): + """Response model to validate and return when performing a health check.""" + + status: str = "OK" + + +# Include routers +app.include_router(hydrofabric_api_router, prefix="/v1") +app.include_router(streamflow_api_router, prefix="/v1") +app.include_router(sft_router, prefix="/v1") +app.include_router(snow17_router, prefix="/v1") +app.include_router(smp_router, prefix="/v1") +app.include_router(lstm_router, prefix="/v1") +app.include_router(lasam_router, prefix="/v1") +app.include_router(noahowp_router, prefix="/v1") +app.include_router(sacsma_router, prefix="/v1") +app.include_router(troute_router, prefix="/v1") +app.include_router(topmodel_router, prefix="/v1") +app.include_router(topoflow_router, prefix="/v1") +app.include_router(ras_api_router, prefix="/v1") +app.include_router(rise_api_wrap_router, prefix="/v1") + + +@app.get( + "/health", + tags=["Health"], + summary="Perform a Health Check", + response_description="Return HTTP Status Code 200 (OK)", + status_code=status.HTTP_200_OK, + response_model=HealthCheck, +) +@app.head( + "/health", + tags=["Health"], + summary="Perform a Health Check", + response_description="Return HTTP Status Code 200 (OK)", + status_code=status.HTTP_200_OK, +) +def get_health() -> HealthCheck: + """Returns a HealthCheck for the server""" + return HealthCheck(status="OK") + + +# Mount static files for mkdocs at the root +# This tells FastAPI to serve the static documentation files at the '/' URL +# We only mount the directory if it exists (only after 'mkdocs build' has run) +# This prevents the app from crashing during tests or local development. +docs_dir = Path("static/docs") +if docs_dir.is_dir(): + app.mount("/", StaticFiles(directory=docs_dir, html=True), name="static") +else: + print("INFO: Documentation directory 'static/docs' not found. Docs will not be served.") + +if __name__ == "__main__": + uvicorn.run("app.main:app", host="0.0.0.0", port=8000, reload=True, log_level="info") diff --git a/app/routers/__init__.py b/app/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/hydrofabric/__init__.py b/app/routers/hydrofabric/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/hydrofabric/router.py b/app/routers/hydrofabric/router.py new file mode 100644 index 0000000..0fc706f --- /dev/null +++ b/app/routers/hydrofabric/router.py @@ -0,0 +1,225 @@ +import pathlib +import tempfile +import uuid + +import geopandas as gpd +from fastapi import APIRouter, Depends, HTTPException, Query +from fastapi import Path as FastAPIPath +from fastapi.responses import FileResponse +from pyiceberg.expressions import EqualTo +from starlette.background import BackgroundTask + +from app import get_catalog, get_graphs +from icefabric.hydrofabric.subset import subset_hydrofabric +from icefabric.schemas import ( + DivideAttributes, + Divides, + FlowpathAttributes, + FlowpathAttributesML, + Flowpaths, + Hydrolocations, + Lakes, + Network, + Nexus, + POIs, +) +from icefabric.schemas.hydrofabric import HydrofabricDomains, IdType + +api_router = APIRouter(prefix="/hydrofabric") + + +@api_router.get("/{identifier}/gpkg", tags=["Hydrofabric Services"]) +async def get_hydrofabric_subset_gpkg( + identifier: str = FastAPIPath( + ..., + description="Identifier to start tracing from (e.g., catchment ID, POI ID, HL_URI)", + openapi_examples={ + "hl_uri": {"summary": "USGS Gauge", "value": "gages-01010000"}, + "wb-id": {"summary": "Watershed ID", "value": "wb-4581"}, + }, + ), + id_type: IdType = Query( + IdType.HL_URI, + description="The type of identifier being used", + openapi_examples={ + "hl_uri": {"summary": "USGS Gauge", "value": IdType.HL_URI}, + "wb-id": {"summary": "Watershed ID", "value": IdType.ID}, + }, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, description="The iceberg namespace used to query the hydrofabric" + ), + layers: list[str] | None = Query( + default=["divides", "flowpaths", "network", "nexus"], + description="Layers to include in the geopackage. Core layers (divides, flowpaths, network, nexus) are always included.", + examples=["divides", "flowpaths", "network", "nexus", "lakes", "pois", "hydrolocations"], + ), + catalog=Depends(get_catalog), + network_graphs=Depends(get_graphs), +): + """ + Get hydrofabric subset as a geopackage file (.gpkg) + + This endpoint creates a subset of the hydrofabric data by tracing upstream + from a given identifier and returns all related geospatial layers as a + downloadable geopackage file. + + **Parameters:** + - **identifier**: The unique identifier to start tracing from + - **id_type**: Type of identifier (hl_uri, id, poi_id) + - **domain**: Hydrofabric domain/namespace to query + - **layers**: Additional layers to include (core layers always included) + + **Returns:** Geopackage file (.gpkg) containing the subset data + """ + unique_id = str(uuid.uuid4())[:8] + temp_dir = pathlib.Path(tempfile.gettempdir()) + tmp_path = temp_dir / f"subset_{identifier}_{unique_id}.gpkg" + + try: + # Create the subset (same as CLI logic) + output_layers = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=id_type, + layers=layers or ["divides", "flowpaths", "network", "nexus"], + namespace=domain.value, + graph=network_graphs[domain], + ) + + # Check if we got any data + if not output_layers: + raise HTTPException( + status_code=404, + detail=f"No data found for identifier '{identifier}' with type '{id_type.value}'", + ) + + # Write to geopackage (same as CLI logic) + tmp_path.parent.mkdir(parents=True, exist_ok=True) + + layers_written = 0 + for table_name, layer_data in output_layers.items(): + if len(layer_data) > 0: # Only save non-empty layers + # Ensure we have a GeoDataFrame for spatial layers + if not isinstance(layer_data, gpd.GeoDataFrame): + if hasattr(layer_data, "geometry") or "geometry" in layer_data.columns: + layer_data = gpd.GeoDataFrame(layer_data) + else: + # For non-spatial layers (like network), convert to GeoDataFrame with empty geometry + layer_data = gpd.GeoDataFrame(layer_data, geometry=[None] * len(layer_data)) + + layer_data.to_file(tmp_path, layer=table_name, driver="GPKG") + layers_written += 1 + print(f"Written layer '{table_name}' with {len(layer_data)} records") + else: + print(f"Warning: {table_name} layer is empty") + + if layers_written == 0: + raise HTTPException( + status_code=404, detail=f"No non-empty layers found for identifier '{identifier}'" + ) + + # Verify the file was created successfully + if not tmp_path.exists(): + raise HTTPException(status_code=500, detail="Failed to create geopackage file") + + if tmp_path.stat().st_size == 0: + tmp_path.unlink(missing_ok=True) + raise HTTPException(status_code=500, detail="Created geopackage file is empty") + + # Verify it's actually a file, not a directory + if not tmp_path.is_file(): + raise HTTPException(status_code=500, detail="Expected file but got directory") + + print(f"Successfully created geopackage: {tmp_path} (size: {tmp_path.stat().st_size} bytes)") + + # Create download filename + safe_identifier = identifier.replace("/", "_").replace("\\", "_") + download_filename = f"hydrofabric_subset_{safe_identifier}_{id_type.value}.gpkg" + + return FileResponse( + path=str(tmp_path), + filename=download_filename, + media_type="application/geopackage+sqlite3", + headers={ + "Content-Description": "Hydrofabric Subset Geopackage", + "X-Identifier": identifier, + "X-ID-Type": id_type.value, + "X-Domain": domain.value, + "X-Layers-Count": str(layers_written), + }, + background=BackgroundTask(lambda: tmp_path.unlink(missing_ok=True)), + ) + + except HTTPException: + # Clean up temp file if it exists and re-raise HTTP exceptions + if tmp_path.exists(): + tmp_path.unlink(missing_ok=True) + raise + except FileNotFoundError as e: + # Clean up temp file if it exists + if tmp_path.exists(): + tmp_path.unlink(missing_ok=True) + raise HTTPException(status_code=404, detail=f"Required file not found: {str(e)}") from None + except ValueError as e: + # Clean up temp file if it exists + if tmp_path.exists(): + tmp_path.unlink(missing_ok=True) + if "No origin found" in str(e): + raise HTTPException( + status_code=404, + detail=f"No origin found for {id_type.value}='{identifier}' in domain '{domain.value}'", + ) from None + else: + raise HTTPException(status_code=400, detail=f"Invalid request: {str(e)}") from None + + +@api_router.get("/history", tags=["Hydrofabric Services"]) +async def get_hydrofabric_history( + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, description="The iceberg namespace used to query the hydrofabric" + ), + catalog=Depends(get_catalog), +): + """ + Get Hydrofabric domain snapshot history from Iceberg + + This endpoint takes a domain of hydrofabric data and querys for the + hydrofabric snapshot history from Iceberg. Returns each layer's + history for the chosen domain. Each snapshot is summarized. + + **Parameters:** + - **domain**: Hydrofabric domain/namespace to query + + **Returns:** A JSON representation of the domain's snapshot history + """ + return_dict = {"history": []} + layers = [ + ("divide-attributes", DivideAttributes), + ("divides", Divides), + ("flowpath-attributes-ml", FlowpathAttributesML), + ("flowpath-attributes", FlowpathAttributes), + ("flowpaths", Flowpaths), + ("hydrolocations", Hydrolocations), + ("lakes", Lakes), + ("network", Network), + ("nexus", Nexus), + ("pois", POIs), + ] + snapshots_table = catalog.load_table("hydrofabric_snapshots.id") + domain_table = snapshots_table.scan(row_filter=EqualTo("domain", domain.replace("_hf", ""))).to_polars() + if domain_table.is_empty(): + raise HTTPException( + status_code=404, + detail=f"No snapshot history found for domain '{domain.value}'", + ) + for e_in, entry in enumerate(domain_table.iter_rows()): + return_dict["history"].append({"domain": entry[0], "layer_updates": []}) + for l_in, layer_id in enumerate(entry[1:]): + layer_name = layers[l_in][0] + tab = catalog.load_table(f"{domain.value}.{layer_name}") + snap_obj = tab.snapshot_by_id(layer_id) + layer_update = {"layer_name": layer_name, "snapshot_id": layer_id, "snapshot_summary": snap_obj} + return_dict["history"][e_in]["layer_updates"].append(layer_update) + + return return_dict diff --git a/app/routers/nwm_modules/__init__.py b/app/routers/nwm_modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/nwm_modules/router.py b/app/routers/nwm_modules/router.py new file mode 100644 index 0000000..e9c1fb3 --- /dev/null +++ b/app/routers/nwm_modules/router.py @@ -0,0 +1,466 @@ +from fastapi import APIRouter, Depends, Query +from pyiceberg.catalog import Catalog + +from app import get_catalog, get_graphs +from icefabric.modules import SmpModules, config_mapper +from icefabric.schemas import HydrofabricDomains +from icefabric.schemas.modules import ( + Albedo, + LASAM, + LSTM, + NoahOwpModular, + SacSma, + SFT, + SMP, + Snow17, + Topmodel, + TRoute, +) + +sft_router = APIRouter(prefix="/modules/sft") +snow17_router = APIRouter(prefix="/modules/snow17") +smp_router = APIRouter(prefix="/modules/smp") +lstm_router = APIRouter(prefix="/modules/lstm") +lasam_router = APIRouter(prefix="/modules/lasam") +noahowp_router = APIRouter(prefix="/modules/noahowp") +sacsma_router = APIRouter(prefix="/modules/sacsma") +troute_router = APIRouter(prefix="/modules/troute") +topmodel_router = APIRouter(prefix="/modules/topmodel") +topoflow_router = APIRouter(prefix="/modules/topoflow") + + +@sft_router.get("/", tags=["HF Modules"]) +async def get_sft_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"sft_example": {"summary": "SFT Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"sft_example": {"summary": "SFT Example", "value": "conus_hf"}}, + ), + use_schaake: bool = Query( + False, + description="Whether to use Schaake for the Ice Fraction Scheme. Defaults to False to use Xinanjiang", + openapi_examples={"sft_example": {"summary": "SFT Example", "value": False}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[SFT]: + """ + An endpoint to return configurations for SFT. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns SFT (Soil Freeze-Thaw) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID to trace upstream from to get all catchments + - **domain**: The geographic domain to search for catchments from + - **use_schaake**: Determines if we're using Schaake or Xinanjiang to calculate ice fraction + + **Returns:** + A list of SFT pydantic objects for each catchment + """ + return config_mapper["sft"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + use_schaake=use_schaake, + ) + + +@snow17_router.get("/", tags=["NWM Modules"]) +async def get_snow17_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"snow17_example": {"summary": "SNOW-17 Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"snow17_example": {"summary": "SNOW-17 Example", "value": "conus_hf"}}, + ), + envca: bool = Query( + False, + description="If source is ENVCA, then set to True. Defaults to False.", + openapi_examples={"sft_example": {"summary": "SNOW-17 Example", "value": False}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[Snow17]: + """ + An endpoint to return configurations for SNOW-17. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns SNOW-17 (Snow Accumulation and Ablation Model) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + - **envca**: Designates that the source is ENVCA. + + **Returns:** + A list of SNOW-17 pydantic objects for each catchment. + """ + return config_mapper["snow17"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + envca=envca, + ) + + +@smp_router.get("/", tags=["NWM Modules"]) +async def get_smp_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"smp_example": {"summary": "SMP Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"smp_example": {"summary": "SMP Example", "value": "conus_hf"}}, + ), + module: SmpModules = Query( + None, + description="A setting to determine if a module should be specified to obtain additional SMP parameters.", + openapi_examples={"smp_example": {"summary": "SMP Example", "value": None}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[SMP]: + """ + An endpoint to return configurations for SMP. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns SMP (Soil Moisture Profile) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + - **module**: Denotes if another module should be used to obtain additional SMP parameters. Confined to certain modules. + + **Returns:** + A list of SMP pydantic objects for each catchment. + """ + return config_mapper["smp"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + module=module, + ) + + +@lstm_router.get("/", tags=["NWM Modules"]) +async def get_lstm_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"lstm_example": {"summary": "LSTM Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"lstm_example": {"summary": "LSTM Example", "value": "conus_hf"}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[LSTM]: + """ + An endpoint to return configurations for LSTM. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns LSTM (Long Short-Term Memory) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + + **Returns:** + A list of LSTM pydantic objects for each catchment. + """ + return config_mapper["lstm"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + ) + + +@lasam_router.get("/", tags=["NWM Modules"]) +async def get_lasam_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"lasam_example": {"summary": "LASAM Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"lasam_example": {"summary": "LASAM Example", "value": "conus_hf"}}, + ), + sft_included: bool = Query( + False, + description='True if SFT is in the "dep_modules_included" definition as declared in HF API repo.', + openapi_examples={"lasam_example": {"summary": "LASAM Example", "value": False}}, + ), + soil_params_file: str = Query( + "vG_default_params_HYDRUS.dat", + description="Name of the Van Genuchton soil parameters file. Note: This is the filename that gets returned by HF API's utility script get_hydrus_data().", + openapi_examples={ + "lasam_example": {"summary": "LASAM Example", "value": "vG_default_params_HYDRUS.dat"} + }, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[LASAM]: + r""" + An endpoint to return configurations for LASAM. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns LASAM (Lumped Arid/Semi-arid Model) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + - **sft_included**: Denotes that SFT is in the \"dep_modules_included\" definition as declared in the HF API repo. + - **soil_params_file**: Name of the Van Genuchton soil parameters file. + + **Returns:** + A list of LASAM pydantic objects for each catchment. + """ + return config_mapper["lasam"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + sft_included=sft_included, + soil_params_file=soil_params_file, + ) + + +@noahowp_router.get("/", tags=["NWM Modules"]) +async def get_noahowp_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"noahowp_example": {"summary": "Noah-OWP-Modular Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"noahowp_example": {"summary": "Noah-OWP-Modular Example", "value": "conus_hf"}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[NoahOwpModular]: + """ + An endpoint to return configurations for Noah-OWP-Modular. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns Noah-OWP-Modular parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + + **Returns:** + A list of Noah-OWP-Modular pydantic objects for each catchment. + """ + return config_mapper["noah_owp"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + ) + + +@sacsma_router.get("/", tags=["NWM Modules"]) +async def get_sacsma_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"sacsma_example": {"summary": "SAC-SMA Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"sacsma_example": {"summary": "SAC-SMA Example", "value": "conus_hf"}}, + ), + envca: bool = Query( + False, + description="If source is ENVCA, then set to True. Defaults to False.", + openapi_examples={"sacsma_example": {"summary": "SAC-SMA Example", "value": False}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[SacSma]: + """ + An endpoint to return configurations for SAC-SMA. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns SAC-SMA (Sacramento Soil Moisture Accounting) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + - **envca**: Designates that the source is ENVCA. + + **Returns:** + A list of SAC-SMA pydantic objects for each catchment. + """ + return config_mapper["sacsma"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + envca=envca, + ) + + +@troute_router.get("/", tags=["NWM Modules"]) +async def get_troute_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"troute_example": {"summary": "T-Route Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"troute_example": {"summary": "T-Route Example", "value": "conus_hf"}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[TRoute]: + """ + An endpoint to return configurations for T-Route. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns T-Route (Tree-Based Channel Routing) parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + + **Returns:** + A list of T-Route pydantic objects for each catchment. + """ + return config_mapper["troute"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + ) + + +@topmodel_router.get("/", tags=["NWM Modules"]) +async def get_topmodel_ipes( + identifier: str = Query( + ..., + description="Gage ID from which to trace upstream catchments.", + examples=["01010000"], + openapi_examples={"topmodel_example": {"summary": "TOPMODEL Example", "value": "01010000"}}, + ), + domain: HydrofabricDomains = Query( + HydrofabricDomains.CONUS, + description="The iceberg namespace used to query the hydrofabric.", + openapi_examples={"topmodel_example": {"summary": "TOPMODEL Example", "value": "conus_hf"}}, + ), + catalog: Catalog = Depends(get_catalog), + network_graphs=Depends(get_graphs), +) -> list[Topmodel]: + """ + An endpoint to return configurations for TOPMODEL. + + This endpoint traces upstream from a given gage ID to get all catchments + and returns TOPMODEL parameter configurations for each catchment. + + **Parameters:** + - **identifier**: The Gage ID from which upstream catchments are traced. + - **domain**: The geographic domain used to filter catchments. + + **Returns:** + A list of TOPMODEL pydantic objects for each catchment. + """ + return config_mapper["topmodel"]( + catalog=catalog, + namespace=domain.value, + identifier=f"gages-{identifier}", + graph=network_graphs[domain], + ) + + +# TODO - Restore endpoint once the generation of IPEs for TopoFlow is possible/implemented +# @topoflow_router.get("/", tags=["NWM Modules"]) +# async def get_topoflow_ipes( +# identifier: str = Query( +# ..., +# description="Gage ID from which to trace upstream catchments.", +# examples=["01010000"], +# openapi_examples={"topoflow_example": {"summary": "TopoFlow Example", "value": "01010000"}}, +# ), +# domain: HydrofabricDomains = Query( +# HydrofabricDomains.CONUS, +# description="The iceberg namespace used to query the hydrofabric.", +# openapi_examples={"topoflow_example": {"summary": "TopoFlow Example", "value": "conus_hf"}}, +# ), +# catalog: Catalog = Depends(get_catalog), +# ) -> list[Topoflow]: +# """ +# An endpoint to return configurations for TopoFlow. +# +# This endpoint traces upstream from a given gage ID to get all catchments +# and returns TopoFlow parameter configurations for each catchment. +# +# **Parameters:** +# - **identifier**: The Gage ID from which upstream catchments are traced. +# - **domain**: The geographic domain used to filter catchments. +# +# **Returns:** +# A list of TopoFlow pydantic objects for each catchment. +# """ +# return config_mapper["topoflow"]( +# catalog=catalog, +# namespace=domain.value, +# identifier=f"gages-{identifier}", +# graph=network_graphs[domain], +# ) + + +@topoflow_router.get("/albedo", tags=["NWM Modules"]) +async def get_albedo( + landcover_state: Albedo = Query( + ..., + description="The landcover state of a catchment for albedo classification", + examples=["snow"], + openapi_examples={"albedo_example": {"summary": "Albedo Example", "value": "snow"}}, + ), +) -> float: + """ + An endpoint to return albedo values for TopoFlow Glacier module. + + This endpoint matches a catchment's land cover class ("snow", "ice", "other) with an albedo value [0, 1] + + **Parameters:** + - **landcover_state**: Land cover state: "snow", "ice", or "other" + + **Returns:** + A float albedo value [0, 1] + """ + return Albedo.get_landcover_albedo(landcover_state.landcover).value diff --git a/app/routers/ras_xs/__init__.py b/app/routers/ras_xs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/ras_xs/router.py b/app/routers/ras_xs/router.py new file mode 100644 index 0000000..95be271 --- /dev/null +++ b/app/routers/ras_xs/router.py @@ -0,0 +1,219 @@ +import os +import pathlib +import tempfile +import uuid + +from fastapi import APIRouter, Depends, HTTPException, Path, Query +from fastapi.responses import FileResponse +from pydantic import BaseModel, field_validator +from shapely.geometry import box +from starlette.background import BackgroundTask + +from app import get_catalog +from icefabric.ras_xs import subset_xs +from icefabric.schemas import XsType + +api_router = APIRouter(prefix="/ras_xs") + +NEW_MEXICO_BOUNDS = { + "min_lat": {"summary": "New Mexico Bounds", "value": 31.3323}, + "min_lon": {"summary": "New Mexico Bounds", "value": -109.0502}, + "max_lat": {"summary": "New Mexico Bounds", "value": 37.0002}, + "max_lon": {"summary": "New Mexico Bounds", "value": -103.0020}, +} + + +class BoundingBox(BaseModel): + """Pydantic representation of a lat/lon geospatial bounding box.""" + + min_lat: float + min_lon: float + max_lat: float + max_lon: float + + @field_validator("max_lat", "max_lon", mode="after") + @classmethod + def max_must_be_greater(cls, v, values): + """Validation function to make sure that the max bounds values are less than min bounds values""" + max_name, max_val = values.field_name, v + min_name = f"min_{max_name[4:]}" + min_val = values.data[min_name] + if max_val <= min_val: + raise ValueError(f"{max_name} must be greater than {min_name}") + return max_val + + +def get_bbox_query_params( + min_lat: float = Query( + ..., + description="The minimum latitudinal coordinate that defines the bounding box.", + openapi_examples={"New Mexico Bounds Example": NEW_MEXICO_BOUNDS["min_lat"]}, + ), + min_lon: float = Query( + ..., + description="The minimum longitudinal coordinate that defines the bounding box.", + openapi_examples={"New Mexico Bounds Example": NEW_MEXICO_BOUNDS["min_lon"]}, + ), + max_lat: float = Query( + ..., + description="The maximum latitudinal coordinate that defines the bounding box.", + openapi_examples={"New Mexico Bounds Example": NEW_MEXICO_BOUNDS["max_lat"]}, + ), + max_lon: float = Query( + ..., + description="The maximum longitudinal coordinate that defines the bounding box.", + openapi_examples={"New Mexico Bounds Example": NEW_MEXICO_BOUNDS["max_lon"]}, + ), +) -> BoundingBox: + """Dependency function that parses BoundingBox query parameters""" + try: + return BoundingBox(min_lat=min_lat, min_lon=min_lon, max_lat=max_lat, max_lon=max_lon) + except ValueError as e: + raise HTTPException(status_code=422, detail=f"Bounding box validation failed - {e}") from e + + +def filesystem_check(tmp_path: pathlib.PosixPath, temp_dir: pathlib.PosixPath): + """Wraps temp file validations in a helper function. Ensures tmp_path is inside temp_dir.""" + try: + normalized_tmp = tmp_path.resolve() + normalized_temp_dir = temp_dir.resolve() + except OSError as err: + raise HTTPException(status_code=400, detail="Invalid file path.") from err + if not str(normalized_tmp).startswith(str(normalized_temp_dir) + "/"): + raise HTTPException(status_code=400, detail="File path is outside of temp directory.") + + if not tmp_path.exists(): + raise HTTPException(status_code=500, detail=f"Failed to create geopackage file at {tmp_path}.") + if tmp_path.stat().st_size == 0: + tmp_path.unlink(missing_ok=True) + raise HTTPException(status_code=404, detail="No data found for subset attempt.") + # Verify it's actually a file, not a directory + if not tmp_path.is_file(): + raise HTTPException(status_code=500, detail=f"Expected file, but got directory at {tmp_path}.") + + +@api_router.get("/{identifier}/", tags=["HEC-RAS XS"]) +async def get_xs_subset_gpkg( + identifier: str = Path( + ..., + description="The flowpath ID from the reference hydrofabric that the current RAS XS aligns is conflated to. Must be numeric.", + pattern=r"^\d+$", + max_length=10, + examples=["20059822"], + openapi_examples={"flowpath_id": {"summary": "XS Example", "value": "20059822"}}, + ), + schema_type: XsType = Query( + XsType.CONFLATED, description="The schema type used to query the cross-sections" + ), + catalog=Depends(get_catalog), +): + """ + Get geopackage subset from the HEC-RAS XS iceberg catalog by table identifier (aka flowpath ID). + + This endpoint will query cross-sections from the HEC-RAS XS iceberg catalog by flowpath ID & return + the data subset as a downloadable geopackage file. + + """ + unique_id = str(uuid.uuid4())[:8] + temp_dir = pathlib.Path(tempfile.gettempdir()) + tmp_path = temp_dir / f"ras_xs_{unique_id}.gpkg" + + # Normalize and ensure the path is contained within the temp_dir + normalized_path = os.path.normpath(str(tmp_path)) + if not normalized_path.startswith(str(temp_dir)): + raise HTTPException(status_code=400, detail="Invalid path detected.") + + try: + # Create data subset + data_gdf = subset_xs( + catalog=catalog, identifier=f"{identifier}", output_file=tmp_path, xstype=schema_type + ) + if len(data_gdf) == 0: + raise HTTPException( + status_code=422, detail="Query returned no cross-sectional data. Try a different flowpath ID." + ) + + filesystem_check(tmp_path=tmp_path, temp_dir=temp_dir) + + print(f"Returning file: {tmp_path} (size: {tmp_path.stat().st_size} bytes)") + download_filename = f"ras_xs_{unique_id}.gpkg" + return FileResponse( + path=str(tmp_path), + filename=download_filename, + media_type="application/geopackage+sqlite3", + headers={ + "data-source": f"ras_xs.{schema_type.value}", + "flowpath-id": identifier, + "description": f"RAS XS ({schema_type.value} schema) Geopackage", + "total-records": f"{len(data_gdf)}", + }, + background=BackgroundTask(lambda: tmp_path.unlink(missing_ok=True)), + ) + except HTTPException: + raise + except Exception: + # Clean up temp file if it exists + if "tmp_path" in locals() and tmp_path.exists(): + tmp_path.unlink(missing_ok=True) + # Double-check containment before deletion as a precaution + try: + normalized_tmp = tmp_path.resolve() + normalized_temp_dir = temp_dir.resolve() + if str(normalized_tmp).startswith(str(normalized_temp_dir) + "/"): + tmp_path.unlink(missing_ok=True) + except OSError: + pass + raise + + +@api_router.get("/within", tags=["HEC-RAS XS"]) +async def get_by_geospatial_query( + bbox: BoundingBox = Depends(get_bbox_query_params), + schema_type: XsType = Query( + XsType.CONFLATED, description="The schema type used to query the cross-sections" + ), + catalog=Depends(get_catalog), +): + """ + Get geopackage subset from a lat/lon bounding box geospatial query. + + This endpoint will query cross-sections from the HEC-RAS XS iceberg catalog by bounding box. All + data selected will be within the bounding box. Returns the data subset as a downloadable + geopackage file. + """ + unique_id = str(uuid.uuid4())[:8] + temp_dir = pathlib.Path(tempfile.gettempdir()) + tmp_path = temp_dir / f"ras_xs_bbox_{unique_id}.gpkg" + try: + # Create data subset + bbox = box(bbox.min_lat, bbox.min_lon, bbox.max_lat, bbox.max_lon) + data_gdf = subset_xs(catalog=catalog, bbox=bbox, output_file=tmp_path, xstype=schema_type) + if len(data_gdf) == 0: + raise HTTPException( + status_code=422, + detail="Query returned no cross-sectional data. Try a different bounding box definition.", + ) + + filesystem_check(tmp_path=tmp_path, temp_dir=temp_dir) + + print(f"Returning file: {tmp_path} (size: {tmp_path.stat().st_size} bytes)") + download_filename = f"ras_xs_bbox_{unique_id}.gpkg" + return FileResponse( + path=str(tmp_path), + filename=download_filename, + media_type="application/geopackage+sqlite3", + headers={ + "data-source": f"ras_xs.{schema_type.value}", + "bounding-box": str(bbox), + "description": f"RAS XS ({schema_type.value} schema) Geopackage", + "total-records": f"{len(data_gdf)}", + }, + background=BackgroundTask(lambda: tmp_path.unlink(missing_ok=True)), + ) + except HTTPException: + raise + except Exception: + # Clean up temp file if it exists + if "tmp_path" in locals() and tmp_path.exists(): + tmp_path.unlink(missing_ok=True) + raise diff --git a/app/routers/rise_wrappers/__init__.py b/app/routers/rise_wrappers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/rise_wrappers/router.py b/app/routers/rise_wrappers/router.py new file mode 100644 index 0000000..e77a76d --- /dev/null +++ b/app/routers/rise_wrappers/router.py @@ -0,0 +1,99 @@ +from fastapi import APIRouter, Depends, HTTPException + +from icefabric.helpers import ( + EXT_RISE_BASE_URL, + basemodel_to_query_string, + make_get_req_to_rise, +) +from icefabric.schemas import ( + CatItemParams, + CatRecParams, + LocItemParams, +) + +api_router = APIRouter(prefix="/rise") + + +@api_router.get("/catalog-item", tags=["RISE"]) +async def get_catalog_item(query: CatItemParams = Depends()): + """Retrieves the collection of CatalogItem resources.""" + query_url_portion = basemodel_to_query_string(query) + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/catalog-item{query_url_portion}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] + + +@api_router.get("/catalog-item/{id}", tags=["RISE"]) +async def get_catalog_item_by_id(id: str): + """Retrieves a CatalogItem resource, per a given ID.""" + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/catalog-item/{id}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] + + +@api_router.get("/catalog-record", tags=["RISE"]) +async def get_catalog_record(query: CatRecParams = Depends()): + """Retrieves the collection of CatalogRecord resources.""" + query_url_portion = basemodel_to_query_string(query) + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/catalog-record{query_url_portion}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] + + +@api_router.get("/catalog-record/{id}", tags=["RISE"]) +async def get_catalog_record_by_id(id: str): + """Retrieves a CatalogRecord resource, per a given ID.""" + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/catalog-record/{id}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] + + +@api_router.get("/location", tags=["RISE"]) +async def get_location(query: LocItemParams = Depends()): + """Retrieves the collection of Location resources.""" + query_url_portion = basemodel_to_query_string(query) + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/location{query_url_portion}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] + + +@api_router.get("/location/{id}", tags=["RISE"]) +async def get_location_by_id(id: str): + """Retrieves a Location resource, per a given ID.""" + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/location/{id}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] + + +# TODO - Restore endpoint once the RISE api/result endpoint is no longer timing out +# @api_router.get("/result", tags=["RISE"]) +# async def get_result(query: ResParams = Depends()): +# """Retrieves the collection of Result resources.""" +# query_url_portion = basemodel_to_query_string(query) +# rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/result{query_url_portion}") +# if rise_response["status_code"] != 200: +# raise HTTPException(**rise_response) +# else: +# return rise_response["detail"] + + +@api_router.get("/result/{id}", tags=["RISE"]) +async def get_result_by_id(id: str): + """Retrieves a Result resource, per a given ID.""" + rise_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/result/{id}") + if rise_response["status_code"] != 200: + raise HTTPException(**rise_response) + else: + return rise_response["detail"] diff --git a/app/routers/streamflow_observations/__init__.py b/app/routers/streamflow_observations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/streamflow_observations/router.py b/app/routers/streamflow_observations/router.py new file mode 100644 index 0000000..b8a0e6e --- /dev/null +++ b/app/routers/streamflow_observations/router.py @@ -0,0 +1,343 @@ +import io +from datetime import datetime +from enum import Enum + +from botocore.exceptions import ClientError +from fastapi import APIRouter, HTTPException, Path, Query +from fastapi.responses import Response +from pyiceberg.catalog import load_catalog + +api_router = APIRouter(prefix="/streamflow_observations") + + +# TODO add other gauges used by NWM +class DataSource(str, Enum): + """All observational streamflow sources""" + + USGS = "usgs" + + +# Configuration for each data source +DATA_SOURCE_CONFIG = { + DataSource.USGS: { + "namespace": "streamflow_observations", + "table": "usgs_hourly", + "time_column": "time", + "units": "cms", + "description": "USGS stream gauge hourly data", + }, +} + + +def get_catalog_and_table(data_source: DataSource): + """Get catalog and table for a given data source""" + config = DATA_SOURCE_CONFIG[data_source] + try: + catalog = load_catalog("glue") + table = catalog.load_table(f"{config['namespace']}.{config['table']}") + except ClientError as e: + msg = "AWS Test account credentials expired. Can't access remote S3 Table" + print(msg) + raise e + return catalog, table, config + + +def validate_identifier(data_source: DataSource, identifier: str): + """Check if identifier exists in the dataset""" + catalog, table, config = get_catalog_and_table(data_source) + schema = table.schema() + available_columns = [field.name for field in schema.fields] + + if identifier not in available_columns: + available_ids = [col for col in available_columns if col != config["time_column"]] + raise HTTPException( + status_code=404, + detail=f"ID '{identifier}' not found in {data_source} dataset. Available IDs: {available_ids[:10]}...", + ) + + return catalog, table, config + + +@api_router.get("/{data_source}/available") +async def get_available_identifiers( + data_source: DataSource = Path(..., description="Data source type"), + limit: int = Query(100, description="Maximum number of IDs to return"), +): + """ + Get list of available identifiers for a data source + + Examples + -------- + GET /data/usgs/available + GET /data/usgs/available?limit=50 + """ + try: + _, table, config = get_catalog_and_table(data_source) + + schema = table.schema() + # Get all columns except time column + identifier_columns = [field.name for field in schema.fields if field.name != config["time_column"]] + + return { + "data_source": data_source.value, + "description": config["description"], + "total_identifiers": len(identifier_columns), + "identifiers": sorted(identifier_columns)[:limit], + "showing": min(limit, len(identifier_columns)), + "units": config["units"], + } + + except HTTPException as e: + raise HTTPException(status_code=500, detail=str(e)) from e + + +@api_router.get("/{data_source}/csv") +async def get_data_csv( + data_source: DataSource = Path(..., description="Data source type"), + identifier: str = Query( + ..., + description="Station/gauge ID", + examples=["01010000"], + openapi_examples={"station_example": {"summary": "USGS Gauge", "value": "01010000"}}, + ), + start_date: datetime | None = Query( + None, + description="Start Date", + openapi_examples={"sample_date": {"summary": "Sample Date", "value": "2021-12-31T14:00:00"}}, + ), + end_date: datetime | None = Query( + None, + description="End Date", + openapi_examples={"sample_date": {"summary": "Sample Date", "value": "2022-01-01T14:00:00"}}, + ), + include_headers: bool = Query(True, description="Include CSV headers"), +): + """ + Get data as CSV file for any data source + + Examples + -------- + GET /data/usgs_hourly/csv?identifier=01031500 + """ + try: + _, table, config = validate_identifier(data_source, identifier) + scan_builder = table.scan(selected_fields=[config["time_column"], identifier]) + if start_date: + scan_builder = scan_builder.filter(f"{config['time_column']} >= '{start_date.isoformat()}'") + if end_date: + scan_builder = scan_builder.filter(f"{config['time_column']} <= '{end_date.isoformat()}'") + + df = scan_builder.to_pandas() + + if df.empty: + return Response( + content="Error: No data available for the specified parameters", + status_code=404, + media_type="text/plain", + ) + + df = df.rename(columns={config["time_column"]: "time", identifier: "q_cms"}) + + csv_buffer = io.StringIO() + df.to_csv(csv_buffer, index=False, header=include_headers) + csv_data = csv_buffer.getvalue() + + filename_parts = [data_source.value, identifier, "data"] + if start_date: + filename_parts.append(f"from_{start_date.strftime('%Y%m%d_%H%M')}") + if end_date: + filename_parts.append(f"to_{end_date.strftime('%Y%m%d_%H%M')}") + filename = "_".join(filename_parts) + ".csv" + + return Response( + content=csv_data, + media_type="text/csv", + headers={ + "Content-Disposition": f"attachment; filename={filename}", + "X-Total-Records": str(len(df)), + "X-Data-Source": data_source.value, + "X-Units": config["units"], + }, + ) + + except HTTPException: + raise + + +@api_router.get("/{data_source}/parquet") +async def get_data_parquet( + data_source: DataSource = Path(..., description="Data source type"), + identifier: str = Query( + ..., + description="Station/gauge ID", + examples=["01010000"], + openapi_examples={"station_example": {"summary": "USGS Gauge", "value": "01010000"}}, + ), + start_date: datetime | None = Query( + None, + description="Start Date", + openapi_examples={"sample_date": {"summary": "Sample Date", "value": "2021-12-31T14:00:00"}}, + ), + end_date: datetime | None = Query( + None, + description="End Date", + openapi_examples={"sample_date": {"summary": "Sample Date", "value": "2022-01-01T14:00:00"}}, + ), +): + """ + Get data as Parquet file for any data source + + Examples + -------- + GET /data/usgs/parquet?identifier=01031500 + GET /data/usgs/parquet?identifier=01031500&start_date=2023-01-01T00:00:00&compression=gzip + """ + try: + _, table, config = validate_identifier(data_source, identifier) + + scan_builder = table.scan(selected_fields=[config["time_column"], identifier]) + + if start_date: + scan_builder = scan_builder.filter(f"{config['time_column']} >= '{start_date.isoformat()}'") + if end_date: + scan_builder = scan_builder.filter(f"{config['time_column']} <= '{end_date.isoformat()}'") + + df = scan_builder.to_pandas() + if df.empty: + raise HTTPException(status_code=404, detail="No data available for the specified parameters") + + # Prepare output with metadata + df = df.rename(columns={config["time_column"]: "time", identifier: "q_cms"}).copy() + df["data_source"] = data_source.value + df["identifier"] = identifier + df["units"] = config["units"] + + parquet_buffer = io.BytesIO() + df.to_parquet(parquet_buffer, index=False, compression="lz4", engine="pyarrow") + parquet_data = parquet_buffer.getvalue() + + # Fix filename generation with proper datetime formatting + filename_parts = [data_source.value, identifier, "data"] + if start_date: + filename_parts.append(f"from_{start_date.strftime('%Y%m%d_%H%M')}") + if end_date: + filename_parts.append(f"to_{end_date.strftime('%Y%m%d_%H%M')}") + filename = "_".join(filename_parts) + ".parquet" + + return Response( + content=parquet_data, + media_type="application/octet-stream", + headers={ + "Content-Disposition": f"attachment; filename={filename}", + "X-Total-Records": str(len(df)), + "X-Data-Source": data_source.value, + "X-Compression": "lz4", + "X-Units": config["units"], + }, + ) + + except HTTPException: + raise + except ValueError as e: + Response(content=f"Error: {str(e)}", status_code=500, media_type="text/plain") + + +@api_router.get("/{data_source}/info") +async def get_data_source_info( + data_source: DataSource = Path(..., description="Data source type"), +): + """ + Get information about dataset size and recommendations + + Examples + -------- + GET /data/usgs/info + """ + try: + _, table, config = get_catalog_and_table(data_source) + + df = table.inspect.snapshots().to_pandas() + + # Converting to an int rather than a numpy.int64 + latest_snapshot_id = int(df.loc[df["committed_at"].idxmax(), "snapshot_id"]) + snapshots = table.inspect.snapshots().to_pydict() + + snapshots = dict(snapshots) + # Converting to an int rather than a numpy.int64 + if "snapshot_id" in snapshots and snapshots["snapshot_id"]: + snapshots["snapshot_id"] = [int(sid) for sid in snapshots["snapshot_id"]] + + return { + "data_source": data_source.value, + "latest_snapshot": latest_snapshot_id, + "description": config["description"], + "units": config["units"], + "snapshots": snapshots, + } + except HTTPException as e: + raise HTTPException(status_code=500, detail=str(e)) from e + + +@api_router.get("/{data_source}/{identifier}/info") +async def get_data_info( + data_source: DataSource = Path(..., description="Data source type"), + identifier: str = Path(..., description="Station/gauge ID", examples=["01031500"]), +): + """ + Get information about dataset size and recommendations + + Examples + -------- + GET /data/usgs/01031500/info + """ + try: + _, table, config = validate_identifier(data_source, identifier) + + # Get data info + df = table.scan(selected_fields=[config["time_column"], identifier]).to_pandas() + df_clean = df.dropna(subset=[identifier]) # Droping NA to determine full date range + + return { + "data_source": data_source.value, + "identifier": identifier, + "description": config["description"], + "total_records": len(df_clean), + "units": config["units"], + "date_range": { + "start": df_clean[config["time_column"]].min().isoformat() if not df_clean.empty else None, + "end": df_clean[config["time_column"]].max().isoformat() if not df_clean.empty else None, + }, + "estimated_sizes": { + "csv_mb": round(len(df_clean) * 25 / 1024 / 1024, 2), + "parquet_mb": round(len(df_clean) * 8 / 1024 / 1024, 2), + }, + } + + except HTTPException: + raise + except ValueError as e: + Response(content=f"Error: {str(e)}", status_code=500, media_type="text/plain") + + +@api_router.get("/sources") +async def get_available_sources(): + """ + Get list of all available data sources + + Examples + -------- + GET /data/sources + """ + sources = [] + for source, config in DATA_SOURCE_CONFIG.items(): + sources.append( + { + "name": source.value, + "description": config["description"], + "namespace": config["namespace"], + "table": config["table"], + "units": config["units"], + } + ) + + return {"available_sources": sources, "total_sources": len(sources)} diff --git a/data/LICENSE b/data/LICENSE new file mode 100644 index 0000000..b03120f --- /dev/null +++ b/data/LICENSE @@ -0,0 +1,540 @@ +## ODC Open Database License (ODbL) + +### Preamble + +The Open Database License (ODbL) is a license agreement intended to +allow users to freely share, modify, and use this Database while +maintaining this same freedom for others. Many databases are covered by +copyright, and therefore this document licenses these rights. Some +jurisdictions, mainly in the European Union, have specific rights that +cover databases, and so the ODbL addresses these rights, too. Finally, +the ODbL is also an agreement in contract for users of this Database to +act in certain ways in return for accessing this Database. + +Databases can contain a wide variety of types of content (images, +audiovisual material, and sounds all in the same database, for example), +and so the ODbL only governs the rights over the Database, and not the +contents of the Database individually. Licensors should use the ODbL +together with another license for the contents, if the contents have a +single set of rights that uniformly covers all of the contents. If the +contents have multiple sets of different rights, Licensors should +describe what rights govern what contents together in the individual +record or in some other way that clarifies what rights apply. + +Sometimes the contents of a database, or the database itself, can be +covered by other rights not addressed here (such as private contracts, +trade mark over the name, or privacy rights / data protection rights +over information in the contents), and so you are advised that you may +have to consult other documents or clear other rights before doing +activities not covered by this License. + +------ + +The Licensor (as defined below) + +and + +You (as defined below) + +agree as follows: + +### 1.0 Definitions of Capitalised Words + +"Collective Database" – Means this Database in unmodified form as part +of a collection of independent databases in themselves that together are +assembled into a collective whole. A work that constitutes a Collective +Database will not be considered a Derivative Database. + +"Convey" – As a verb, means Using the Database, a Derivative Database, +or the Database as part of a Collective Database in any way that enables +a Person to make or receive copies of the Database or a Derivative +Database. Conveying does not include interaction with a user through a +computer network, or creating and Using a Produced Work, where no +transfer of a copy of the Database or a Derivative Database occurs. +"Contents" – The contents of this Database, which includes the +information, independent works, or other material collected into the +Database. For example, the contents of the Database could be factual +data or works such as images, audiovisual material, text, or sounds. + +"Database" – A collection of material (the Contents) arranged in a +systematic or methodical way and individually accessible by electronic +or other means offered under the terms of this License. + +"Database Directive" – Means Directive 96/9/EC of the European +Parliament and of the Council of 11 March 1996 on the legal protection +of databases, as amended or succeeded. + +"Database Right" – Means rights resulting from the Chapter III ("sui +generis") rights in the Database Directive (as amended and as transposed +by member states), which includes the Extraction and Re-utilisation of +the whole or a Substantial part of the Contents, as well as any similar +rights available in the relevant jurisdiction under Section 10.4. + +"Derivative Database" – Means a database based upon the Database, and +includes any translation, adaptation, arrangement, modification, or any +other alteration of the Database or of a Substantial part of the +Contents. This includes, but is not limited to, Extracting or +Re-utilising the whole or a Substantial part of the Contents in a new +Database. + +"Extraction" – Means the permanent or temporary transfer of all or a +Substantial part of the Contents to another medium by any means or in +any form. + +"License" – Means this license agreement and is both a license of rights +such as copyright and Database Rights and an agreement in contract. + +"Licensor" – Means the Person that offers the Database under the terms +of this License. + +"Person" – Means a natural or legal person or a body of persons +corporate or incorporate. + +"Produced Work" – a work (such as an image, audiovisual material, text, +or sounds) resulting from using the whole or a Substantial part of the +Contents (via a search or other query) from this Database, a Derivative +Database, or this Database as part of a Collective Database. + +"Publicly" – means to Persons other than You or under Your control by +either more than 50% ownership or by the power to direct their +activities (such as contracting with an independent consultant). + +"Re-utilisation" – means any form of making available to the public all +or a Substantial part of the Contents by the distribution of copies, by +renting, by online or other forms of transmission. + +"Substantial" – Means substantial in terms of quantity or quality or a +combination of both. The repeated and systematic Extraction or +Re-utilisation of insubstantial parts of the Contents may amount to the +Extraction or Re-utilisation of a Substantial part of the Contents. + +"Use" – As a verb, means doing any act that is restricted by copyright +or Database Rights whether in the original medium or any other; and +includes without limitation distributing, copying, publicly performing, +publicly displaying, and preparing derivative works of the Database, as +well as modifying the Database as may be technically necessary to use it +in a different mode or format. + +"You" – Means a Person exercising rights under this License who has not +previously violated the terms of this License with respect to the +Database, or who has received express permission from the Licensor to +exercise rights under this License despite a previous violation. + +Words in the singular include the plural and vice versa. + +### 2.0 What this License covers + +2.1. Legal effect of this document. This License is: + + a. A license of applicable copyright and neighbouring rights; + + b. A license of the Database Right; and + + c. An agreement in contract between You and the Licensor. + +2.2 Legal rights covered. This License covers the legal rights in the +Database, including: + + a. Copyright. Any copyright or neighbouring rights in the Database. + The copyright licensed includes any individual elements of the + Database, but does not cover the copyright over the Contents + independent of this Database. See Section 2.4 for details. Copyright + law varies between jurisdictions, but is likely to cover: the Database + model or schema, which is the structure, arrangement, and organisation + of the Database, and can also include the Database tables and table + indexes; the data entry and output sheets; and the Field names of + Contents stored in the Database; + + b. Database Rights. Database Rights only extend to the Extraction and + Re-utilisation of the whole or a Substantial part of the Contents. + Database Rights can apply even when there is no copyright over the + Database. Database Rights can also apply when the Contents are removed + from the Database and are selected and arranged in a way that would + not infringe any applicable copyright; and + + c. Contract. This is an agreement between You and the Licensor for + access to the Database. In return you agree to certain conditions of + use on this access as outlined in this License. + +2.3 Rights not covered. + + a. This License does not apply to computer programs used in the making + or operation of the Database; + + b. This License does not cover any patents over the Contents or the + Database; and + + c. This License does not cover any trademarks associated with the + Database. + +2.4 Relationship to Contents in the Database. The individual items of +the Contents contained in this Database may be covered by other rights, +including copyright, patent, data protection, privacy, or personality +rights, and this License does not cover any rights (other than Database +Rights or in contract) in individual Contents contained in the Database. +For example, if used on a Database of images (the Contents), this +License would not apply to copyright over individual images, which could +have their own separate licenses, or one single license covering all of +the rights over the images. + +### 3.0 Rights granted + +3.1 Subject to the terms and conditions of this License, the Licensor +grants to You a worldwide, royalty-free, non-exclusive, terminable (but +only under Section 9) license to Use the Database for the duration of +any applicable copyright and Database Rights. These rights explicitly +include commercial use, and do not exclude any field of endeavour. To +the extent possible in the relevant jurisdiction, these rights may be +exercised in all media and formats whether now known or created in the +future. + +The rights granted cover, for example: + + a. Extraction and Re-utilisation of the whole or a Substantial part of + the Contents; + + b. Creation of Derivative Databases; + + c. Creation of Collective Databases; + + d. Creation of temporary or permanent reproductions by any means and + in any form, in whole or in part, including of any Derivative + Databases or as a part of Collective Databases; and + + e. Distribution, communication, display, lending, making available, or + performance to the public by any means and in any form, in whole or in + part, including of any Derivative Database or as a part of Collective + Databases. + +3.2 Compulsory license schemes. For the avoidance of doubt: + + a. Non-waivable compulsory license schemes. In those jurisdictions in + which the right to collect royalties through any statutory or + compulsory licensing scheme cannot be waived, the Licensor reserves + the exclusive right to collect such royalties for any exercise by You + of the rights granted under this License; + + b. Waivable compulsory license schemes. In those jurisdictions in + which the right to collect royalties through any statutory or + compulsory licensing scheme can be waived, the Licensor waives the + exclusive right to collect such royalties for any exercise by You of + the rights granted under this License; and, + + c. Voluntary license schemes. The Licensor waives the right to collect + royalties, whether individually or, in the event that the Licensor is + a member of a collecting society that administers voluntary licensing + schemes, via that society, from any exercise by You of the rights + granted under this License. + +3.3 The right to release the Database under different terms, or to stop +distributing or making available the Database, is reserved. Note that +this Database may be multiple-licensed, and so You may have the choice +of using alternative licenses for this Database. Subject to Section +10.4, all other rights not expressly granted by Licensor are reserved. + +### 4.0 Conditions of Use + +4.1 The rights granted in Section 3 above are expressly made subject to +Your complying with the following conditions of use. These are important +conditions of this License, and if You fail to follow them, You will be +in material breach of its terms. + +4.2 Notices. If You Publicly Convey this Database, any Derivative +Database, or the Database as part of a Collective Database, then You +must: + + a. Do so only under the terms of this License or another license + permitted under Section 4.4; + + b. Include a copy of this License (or, as applicable, a license + permitted under Section 4.4) or its Uniform Resource Identifier (URI) + with the Database or Derivative Database, including both in the + Database or Derivative Database and in any relevant documentation; and + + c. Keep intact any copyright or Database Right notices and notices + that refer to this License. + + d. If it is not possible to put the required notices in a particular + file due to its structure, then You must include the notices in a + location (such as a relevant directory) where users would be likely to + look for it. + +4.3 Notice for using output (Contents). Creating and Using a Produced +Work does not require the notice in Section 4.2. However, if you +Publicly Use a Produced Work, You must include a notice associated with +the Produced Work reasonably calculated to make any Person that uses, +views, accesses, interacts with, or is otherwise exposed to the Produced +Work aware that Content was obtained from the Database, Derivative +Database, or the Database as part of a Collective Database, and that it +is available under this License. + + a. Example notice. The following text will satisfy notice under + Section 4.3: + + Contains information from DATABASE NAME, which is made available + here under the Open Database License (ODbL). + +DATABASE NAME should be replaced with the name of the Database and a +hyperlink to the URI of the Database. "Open Database License" should +contain a hyperlink to the URI of the text of this License. If +hyperlinks are not possible, You should include the plain text of the +required URI's with the above notice. + +4.4 Share alike. + + a. Any Derivative Database that You Publicly Use must be only under + the terms of: + + i. This License; + + ii. A later version of this License similar in spirit to this + License; or + + iii. A compatible license. + + If You license the Derivative Database under one of the licenses + mentioned in (iii), You must comply with the terms of that license. + + b. For the avoidance of doubt, Extraction or Re-utilisation of the + whole or a Substantial part of the Contents into a new database is a + Derivative Database and must comply with Section 4.4. + + c. Derivative Databases and Produced Works. A Derivative Database is + Publicly Used and so must comply with Section 4.4. if a Produced Work + created from the Derivative Database is Publicly Used. + + d. Share Alike and additional Contents. For the avoidance of doubt, + You must not add Contents to Derivative Databases under Section 4.4 a + that are incompatible with the rights granted under this License. + + e. Compatible licenses. Licensors may authorise a proxy to determine + compatible licenses under Section 4.4 a iii. If they do so, the + authorised proxy's public statement of acceptance of a compatible + license grants You permission to use the compatible license. + + +4.5 Limits of Share Alike. The requirements of Section 4.4 do not apply +in the following: + + a. For the avoidance of doubt, You are not required to license + Collective Databases under this License if You incorporate this + Database or a Derivative Database in the collection, but this License + still applies to this Database or a Derivative Database as a part of + the Collective Database; + + b. Using this Database, a Derivative Database, or this Database as + part of a Collective Database to create a Produced Work does not + create a Derivative Database for purposes of Section 4.4; and + + c. Use of a Derivative Database internally within an organisation is + not to the public and therefore does not fall under the requirements + of Section 4.4. + +4.6 Access to Derivative Databases. If You Publicly Use a Derivative +Database or a Produced Work from a Derivative Database, You must also +offer to recipients of the Derivative Database or Produced Work a copy +in a machine readable form of: + + a. The entire Derivative Database; or + + b. A file containing all of the alterations made to the Database or + the method of making the alterations to the Database (such as an + algorithm), including any additional Contents, that make up all the + differences between the Database and the Derivative Database. + +The Derivative Database (under a.) or alteration file (under b.) must be +available at no more than a reasonable production cost for physical +distributions and free of charge if distributed over the internet. + +4.7 Technological measures and additional terms + + a. This License does not allow You to impose (except subject to + Section 4.7 b.) any terms or any technological measures on the + Database, a Derivative Database, or the whole or a Substantial part of + the Contents that alter or restrict the terms of this License, or any + rights granted under it, or have the effect or intent of restricting + the ability of any person to exercise those rights. + + b. Parallel distribution. You may impose terms or technological + measures on the Database, a Derivative Database, or the whole or a + Substantial part of the Contents (a "Restricted Database") in + contravention of Section 4.74 a. only if You also make a copy of the + Database or a Derivative Database available to the recipient of the + Restricted Database: + + i. That is available without additional fee; + + ii. That is available in a medium that does not alter or restrict + the terms of this License, or any rights granted under it, or have + the effect or intent of restricting the ability of any person to + exercise those rights (an "Unrestricted Database"); and + + iii. The Unrestricted Database is at least as accessible to the + recipient as a practical matter as the Restricted Database. + + c. For the avoidance of doubt, You may place this Database or a + Derivative Database in an authenticated environment, behind a + password, or within a similar access control scheme provided that You + do not alter or restrict the terms of this License or any rights + granted under it or have the effect or intent of restricting the + ability of any person to exercise those rights. + +4.8 Licensing of others. You may not sublicense the Database. Each time +You communicate the Database, the whole or Substantial part of the +Contents, or any Derivative Database to anyone else in any way, the +Licensor offers to the recipient a license to the Database on the same +terms and conditions as this License. You are not responsible for +enforcing compliance by third parties with this License, but You may +enforce any rights that You have over a Derivative Database. You are +solely responsible for any modifications of a Derivative Database made +by You or another Person at Your direction. You may not impose any +further restrictions on the exercise of the rights granted or affirmed +under this License. + +### 5.0 Moral rights + +5.1 Moral rights. This section covers moral rights, including any rights +to be identified as the author of the Database or to object to treatment +that would otherwise prejudice the author's honour and reputation, or +any other derogatory treatment: + + a. For jurisdictions allowing waiver of moral rights, Licensor waives + all moral rights that Licensor may have in the Database to the fullest + extent possible by the law of the relevant jurisdiction under Section + 10.4; + + b. If waiver of moral rights under Section 5.1 a in the relevant + jurisdiction is not possible, Licensor agrees not to assert any moral + rights over the Database and waives all claims in moral rights to the + fullest extent possible by the law of the relevant jurisdiction under + Section 10.4; and + + c. For jurisdictions not allowing waiver or an agreement not to assert + moral rights under Section 5.1 a and b, the author may retain their + moral rights over certain aspects of the Database. + +Please note that some jurisdictions do not allow for the waiver of moral +rights, and so moral rights may still subsist over the Database in some +jurisdictions. + +### 6.0 Fair dealing, Database exceptions, and other rights not affected + +6.1 This License does not affect any rights that You or anyone else may +independently have under any applicable law to make any use of this +Database, including without limitation: + + a. Exceptions to the Database Right including: Extraction of Contents + from non-electronic Databases for private purposes, Extraction for + purposes of illustration for teaching or scientific research, and + Extraction or Re-utilisation for public security or an administrative + or judicial procedure. + + b. Fair dealing, fair use, or any other legally recognised limitation + or exception to infringement of copyright or other applicable laws. + +6.2 This License does not affect any rights of lawful users to Extract +and Re-utilise insubstantial parts of the Contents, evaluated +quantitatively or qualitatively, for any purposes whatsoever, including +creating a Derivative Database (subject to other rights over the +Contents, see Section 2.4). The repeated and systematic Extraction or +Re-utilisation of insubstantial parts of the Contents may however amount +to the Extraction or Re-utilisation of a Substantial part of the +Contents. + +### 7.0 Warranties and Disclaimer + +7.1 The Database is licensed by the Licensor "as is" and without any +warranty of any kind, either express, implied, or arising by statute, +custom, course of dealing, or trade usage. Licensor specifically +disclaims any and all implied warranties or conditions of title, +non-infringement, accuracy or completeness, the presence or absence of +errors, fitness for a particular purpose, merchantability, or otherwise. +Some jurisdictions do not allow the exclusion of implied warranties, so +this exclusion may not apply to You. + +### 8.0 Limitation of liability + +8.1 Subject to any liability that may not be excluded or limited by law, +the Licensor is not liable for, and expressly excludes, all liability +for loss or damage however and whenever caused to anyone by any use +under this License, whether by You or by anyone else, and whether caused +by any fault on the part of the Licensor or not. This exclusion of +liability includes, but is not limited to, any special, incidental, +consequential, punitive, or exemplary damages such as loss of revenue, +data, anticipated profits, and lost business. This exclusion applies +even if the Licensor has been advised of the possibility of such +damages. + +8.2 If liability may not be excluded by law, it is limited to actual and +direct financial loss to the extent it is caused by proved negligence on +the part of the Licensor. + +### 9.0 Termination of Your rights under this License + +9.1 Any breach by You of the terms and conditions of this License +automatically terminates this License with immediate effect and without +notice to You. For the avoidance of doubt, Persons who have received the +Database, the whole or a Substantial part of the Contents, Derivative +Databases, or the Database as part of a Collective Database from You +under this License will not have their licenses terminated provided +their use is in full compliance with this License or a license granted +under Section 4.8 of this License. Sections 1, 2, 7, 8, 9 and 10 will +survive any termination of this License. + +9.2 If You are not in breach of the terms of this License, the Licensor +will not terminate Your rights under it. + +9.3 Unless terminated under Section 9.1, this License is granted to You +for the duration of applicable rights in the Database. + +9.4 Reinstatement of rights. If you cease any breach of the terms and +conditions of this License, then your full rights under this License +will be reinstated: + + a. Provisionally and subject to permanent termination until the 60th + day after cessation of breach; + + b. Permanently on the 60th day after cessation of breach unless + otherwise reasonably notified by the Licensor; or + + c. Permanently if reasonably notified by the Licensor of the + violation, this is the first time You have received notice of + violation of this License from the Licensor, and You cure the + violation prior to 30 days after your receipt of the notice. + +Persons subject to permanent termination of rights are not eligible to +be a recipient and receive a license under Section 4.8. + +9.5 Notwithstanding the above, Licensor reserves the right to release +the Database under different license terms or to stop distributing or +making available the Database. Releasing the Database under different +license terms or stopping the distribution of the Database will not +withdraw this License (or any other license that has been, or is +required to be, granted under the terms of this License), and this +License will continue in full force and effect unless terminated as +stated above. + +### 10.0 General + +10.1 If any provision of this License is held to be invalid or +unenforceable, that must not affect the validity or enforceability of +the remainder of the terms and conditions of this License and each +remaining provision of this License shall be valid and enforced to the +fullest extent permitted by law. + +10.2 This License is the entire agreement between the parties with +respect to the rights granted here over the Database. It replaces any +earlier understandings, agreements or representations with respect to +the Database. + +10.3 If You are in breach of the terms of this License, You will not be +entitled to rely on the terms of this License or to complain of any +breach by the Licensor. + +10.4 Choice of law. This License takes effect in and will be governed by +the laws of the relevant jurisdiction in which the License terms are +sought to be enforced. If the standard suite of rights granted under +applicable copyright law and Database Rights in the relevant +jurisdiction includes additional rights not granted under this License, +these additional rights are granted in this License in order to meet the +terms of this License. diff --git a/data/README.md b/data/README.md new file mode 100644 index 0000000..7bcccc3 --- /dev/null +++ b/data/README.md @@ -0,0 +1,26 @@ +# Icefabric Data Directory + +This directory is meant to both store smaller csv files (<50MB) to allow for data access through Github, as well as scripts to create larger data objects that are used by API/CLIs + +### Hydrofabric Upstream Connections +To speed up processing of hydrofabric subsetting, upstream connections have been preprocessed into JSON files in order to determine upstream flow connectivitity. In this case, the `key` of the JSON object is the downstream flowpath and the `values` are the upstream flowpaths. Also, in this file, there is metadata to provide the data, and HF snapshot used to create these files. + +### Module iniial parameters +CSV files for initial parameterizations are kept in the `modules_ipes` folder for versioning of default parameters. + +### Scripts to create data files: +*Hydrofabric Upstream Connections* +```sh +icefabric build-upstream-connections --help +Usage: icefabric build-upstream-connections [OPTIONS] + + Creates a JSON file which documents the upstream connections from a + particular basin + +Options: + --catalog [glue|sql] The pyiceberg catalog type + --domain [ak_hf|conus_hf|gl_hf|hi_hf|prvi_hf] + The domain you are querying [required] + -o, --output-path PATH Output path of the upstream connections json + --help Show this message and exit. +``` diff --git a/data/all_gages.csv b/data/all_gages.csv new file mode 100644 index 0000000..2a24eb8 --- /dev/null +++ b/data/all_gages.csv @@ -0,0 +1,1722 @@ +gageid,agency,domain +15056210,USGS,Alaska +15200280,USGS,Alaska +15209700,USGS,Alaska +15226620,USGS,Alaska +15236900,USGS,Alaska +15238600,USGS,Alaska +15238978,USGS,Alaska +15238986,USGS,Alaska +15239050,USGS,Alaska +15243900,USGS,Alaska +15258000,USGS,Alaska +15266110,USGS,Alaska +15271000,USGS,Alaska +15272380,USGS,Alaska +15274600,USGS,Alaska +15275100,USGS,Alaska +15276000,USGS,Alaska +15281000,USGS,Alaska +15283700,USGS,Alaska +15284000,USGS,Alaska +15290000,USGS,Alaska +15292000,USGS,Alaska +15292700,USGS,Alaska +15292800,USGS,Alaska +15293200,USGS,Alaska +15293700,USGS,Alaska +15294005,USGS,Alaska +15493000,USGS,Alaska +01010000,USGS,CONUS +01011000,USGS,CONUS +01013500,USGS,CONUS +01015800,USGS,CONUS +01021480,USGS,CONUS +01027200,USGS,CONUS +01029200,USGS,CONUS +01029500,USGS,CONUS +01030500,USGS,CONUS +01031300,USGS,CONUS +01031500,USGS,CONUS +01031510,USGS,CONUS +01037380,USGS,CONUS +01038000,USGS,CONUS +01044550,USGS,CONUS +01046000,USGS,CONUS +01047000,USGS,CONUS +01047200,USGS,CONUS +01048000,USGS,CONUS +01048220,USGS,CONUS +01052500,USGS,CONUS +01054200,USGS,CONUS +01054300,USGS,CONUS +01055000,USGS,CONUS +01057000,USGS,CONUS +01064500,USGS,CONUS +01064801,USGS,CONUS +01067950,USGS,CONUS +01069700,USGS,CONUS +01073000,USGS,CONUS +01073500,USGS,CONUS +01074520,USGS,CONUS +01075000,USGS,CONUS +01077400,USGS,CONUS +01078000,USGS,CONUS +01082000,USGS,CONUS +01086000,USGS,CONUS +01089100,USGS,CONUS +01091000,USGS,CONUS +01093852,USGS,CONUS +01095220,USGS,CONUS +01096000,USGS,CONUS +010965852,USGS,CONUS +01097000,USGS,CONUS +01100600,USGS,CONUS +01101000,USGS,CONUS +01105000,USGS,CONUS +01105933,USGS,CONUS +01108000,USGS,CONUS +01115187,USGS,CONUS +01115630,USGS,CONUS +01116905,USGS,CONUS +01117370,USGS,CONUS +01117468,USGS,CONUS +01117800,USGS,CONUS +01118000,USGS,CONUS +01121000,USGS,CONUS +01123000,USGS,CONUS +01127500,USGS,CONUS +01130000,USGS,CONUS +01134500,USGS,CONUS +01135300,USGS,CONUS +01135500,USGS,CONUS +01137500,USGS,CONUS +01139000,USGS,CONUS +01142500,USGS,CONUS +01144000,USGS,CONUS +01150900,USGS,CONUS +01152500,USGS,CONUS +01154000,USGS,CONUS +01162500,USGS,CONUS +01169000,USGS,CONUS +01170100,USGS,CONUS +01171500,USGS,CONUS +01174565,USGS,CONUS +01181000,USGS,CONUS +01187300,USGS,CONUS +01193500,USGS,CONUS +01194000,USGS,CONUS +01194500,USGS,CONUS +01197500,USGS,CONUS +01198000,USGS,CONUS +01200000,USGS,CONUS +01201487,USGS,CONUS +012035055,USGS,CONUS +01203805,USGS,CONUS +01208990,USGS,CONUS +01321000,USGS,CONUS +01330000,USGS,CONUS +01332500,USGS,CONUS +01333000,USGS,CONUS +01334000,USGS,CONUS +01343060,USGS,CONUS +01350000,USGS,CONUS +01350035,USGS,CONUS +01350080,USGS,CONUS +01350140,USGS,CONUS +01362200,USGS,CONUS +0136230002,USGS,CONUS +01362370,USGS,CONUS +01362497,USGS,CONUS +01363382,USGS,CONUS +01365000,USGS,CONUS +01367500,USGS,CONUS +01372500,USGS,CONUS +0137449480,USGS,CONUS +01374559,USGS,CONUS +0137462010,USGS,CONUS +01374781,USGS,CONUS +01374890,USGS,CONUS +01379000,USGS,CONUS +01381400,USGS,CONUS +01384500,USGS,CONUS +01387450,USGS,CONUS +01387500,USGS,CONUS +01390450,USGS,CONUS +01391500,USGS,CONUS +01396660,USGS,CONUS +01397000,USGS,CONUS +01400000,USGS,CONUS +01403900,USGS,CONUS +01409810,USGS,CONUS +01411300,USGS,CONUS +01413408,USGS,CONUS +01413500,USGS,CONUS +01414000,USGS,CONUS +01414500,USGS,CONUS +01415000,USGS,CONUS +01421618,USGS,CONUS +01421900,USGS,CONUS +01422500,USGS,CONUS +01423000,USGS,CONUS +0142400103,USGS,CONUS +01434017,USGS,CONUS +01434498,USGS,CONUS +01439500,USGS,CONUS +01440000,USGS,CONUS +01440400,USGS,CONUS +01447720,USGS,CONUS +01451800,USGS,CONUS +01464000,USGS,CONUS +01465500,USGS,CONUS +01467042,USGS,CONUS +01470500,USGS,CONUS +01471875,USGS,CONUS +01473000,USGS,CONUS +01478245,USGS,CONUS +01481000,USGS,CONUS +01485000,USGS,CONUS +01485500,USGS,CONUS +01487000,USGS,CONUS +01490000,USGS,CONUS +01491000,USGS,CONUS +01492500,USGS,CONUS +01493500,USGS,CONUS +01502500,USGS,CONUS +01505000,USGS,CONUS +01509000,USGS,CONUS +01510000,USGS,CONUS +01516500,USGS,CONUS +01518862,USGS,CONUS +01525981,USGS,CONUS +01527500,USGS,CONUS +01532000,USGS,CONUS +01534000,USGS,CONUS +01539000,USGS,CONUS +01543000,USGS,CONUS +01544500,USGS,CONUS +01545600,USGS,CONUS +01547700,USGS,CONUS +01547950,USGS,CONUS +01548500,USGS,CONUS +01549500,USGS,CONUS +01550000,USGS,CONUS +01552000,USGS,CONUS +01552500,USGS,CONUS +01555000,USGS,CONUS +01556000,USGS,CONUS +01557500,USGS,CONUS +01558000,USGS,CONUS +01564500,USGS,CONUS +01566000,USGS,CONUS +01567500,USGS,CONUS +01568000,USGS,CONUS +01571184,USGS,CONUS +01571500,USGS,CONUS +01573000,USGS,CONUS +01580000,USGS,CONUS +01581810,USGS,CONUS +01581870,USGS,CONUS +01583500,USGS,CONUS +01586610,USGS,CONUS +01591400,USGS,CONUS +01594526,USGS,CONUS +01595000,USGS,CONUS +01596500,USGS,CONUS +01599000,USGS,CONUS +01601000,USGS,CONUS +01605500,USGS,CONUS +01606000,USGS,CONUS +01609000,USGS,CONUS +01610155,USGS,CONUS +01611500,USGS,CONUS +01613095,USGS,CONUS +01613525,USGS,CONUS +01613900,USGS,CONUS +01615000,USGS,CONUS +01616500,USGS,CONUS +01620500,USGS,CONUS +01632000,USGS,CONUS +01632900,USGS,CONUS +01634500,USGS,CONUS +01636690,USGS,CONUS +01638480,USGS,CONUS +01639500,USGS,CONUS +01643000,USGS,CONUS +01643700,USGS,CONUS +01644000,USGS,CONUS +01645000,USGS,CONUS +01649500,USGS,CONUS +01651000,USGS,CONUS +01653600,USGS,CONUS +01661050,USGS,CONUS +01662800,USGS,CONUS +01664000,USGS,CONUS +01665500,USGS,CONUS +01666500,USGS,CONUS +01669000,USGS,CONUS +01669520,USGS,CONUS +01673800,USGS,CONUS +02011400,USGS,CONUS +02011460,USGS,CONUS +02013000,USGS,CONUS +02014000,USGS,CONUS +02015700,USGS,CONUS +02017500,USGS,CONUS +02018500,USGS,CONUS +02024915,USGS,CONUS +02027000,USGS,CONUS +02027500,USGS,CONUS +02028500,USGS,CONUS +02030500,USGS,CONUS +02032640,USGS,CONUS +02046000,USGS,CONUS +02051000,USGS,CONUS +02051500,USGS,CONUS +02053200,USGS,CONUS +02053800,USGS,CONUS +02055100,USGS,CONUS +02056900,USGS,CONUS +02059500,USGS,CONUS +02065500,USGS,CONUS +02069700,USGS,CONUS +02070000,USGS,CONUS +02070500,USGS,CONUS +02074500,USGS,CONUS +02077200,USGS,CONUS +02079640,USGS,CONUS +0208111310,USGS,CONUS +02081500,USGS,CONUS +02082770,USGS,CONUS +02082950,USGS,CONUS +02084160,USGS,CONUS +02087359,USGS,CONUS +02091000,USGS,CONUS +02092500,USGS,CONUS +0209399200,USGS,CONUS +02095000,USGS,CONUS +02108000,USGS,CONUS +02110500,USGS,CONUS +02111180,USGS,CONUS +02111500,USGS,CONUS +02118500,USGS,CONUS +0212467595,USGS,CONUS +02128000,USGS,CONUS +02132320,USGS,CONUS +02137727,USGS,CONUS +02140991,USGS,CONUS +02142000,USGS,CONUS +0214269560,USGS,CONUS +02143000,USGS,CONUS +02143040,USGS,CONUS +021459367,USGS,CONUS +02147500,USGS,CONUS +02149000,USGS,CONUS +02152100,USGS,CONUS +02157470,USGS,CONUS +02160381,USGS,CONUS +021652801,USGS,CONUS +02167450,USGS,CONUS +02177000,USGS,CONUS +02178400,USGS,CONUS +02192500,USGS,CONUS +02193340,USGS,CONUS +02196000,USGS,CONUS +02198100,USGS,CONUS +02198690,USGS,CONUS +02201000,USGS,CONUS +02202600,USGS,CONUS +02204130,USGS,CONUS +02207385,USGS,CONUS +02208150,USGS,CONUS +02212600,USGS,CONUS +02215100,USGS,CONUS +02216180,USGS,CONUS +02217475,USGS,CONUS +02219000,USGS,CONUS +02221525,USGS,CONUS +02228500,USGS,CONUS +02231342,USGS,CONUS +02235200,USGS,CONUS +02236500,USGS,CONUS +02245500,USGS,CONUS +02247510,USGS,CONUS +02266200,USGS,CONUS +02266480,USGS,CONUS +02296500,USGS,CONUS +02297155,USGS,CONUS +02298123,USGS,CONUS +02298488,USGS,CONUS +02299472,USGS,CONUS +02299950,USGS,CONUS +02300033,USGS,CONUS +02300700,USGS,CONUS +02301990,USGS,CONUS +02303350,USGS,CONUS +02303420,USGS,CONUS +02307359,USGS,CONUS +02310525,USGS,CONUS +02310947,USGS,CONUS +02312200,USGS,CONUS +02314500,USGS,CONUS +023177483,USGS,CONUS +02320700,USGS,CONUS +02321000,USGS,CONUS +02324000,USGS,CONUS +02324400,USGS,CONUS +02326000,USGS,CONUS +02326900,USGS,CONUS +02327100,USGS,CONUS +02330400,USGS,CONUS +02330450,USGS,CONUS +02331000,USGS,CONUS +02335580,USGS,CONUS +02335870,USGS,CONUS +02336120,USGS,CONUS +02336152,USGS,CONUS +02336526,USGS,CONUS +02336910,USGS,CONUS +02336968,USGS,CONUS +02338523,USGS,CONUS +02339495,USGS,CONUS +02342850,USGS,CONUS +02343225,USGS,CONUS +02343940,USGS,CONUS +02344620,USGS,CONUS +02349605,USGS,CONUS +02349900,USGS,CONUS +02350600,USGS,CONUS +02359500,USGS,CONUS +02361000,USGS,CONUS +02362240,USGS,CONUS +02363000,USGS,CONUS +02365470,USGS,CONUS +02365769,USGS,CONUS +02366000,USGS,CONUS +02366996,USGS,CONUS +02369800,USGS,CONUS +02370500,USGS,CONUS +02371500,USGS,CONUS +02372250,USGS,CONUS +02373000,USGS,CONUS +02374500,USGS,CONUS +02374745,USGS,CONUS +02374950,USGS,CONUS +02378170,USGS,CONUS +02380500,USGS,CONUS +02384500,USGS,CONUS +02387600,USGS,CONUS +02388975,USGS,CONUS +02390000,USGS,CONUS +02390140,USGS,CONUS +02392975,USGS,CONUS +02395120,USGS,CONUS +02400680,USGS,CONUS +02408540,USGS,CONUS +02415000,USGS,CONUS +02418760,USGS,CONUS +02421000,USGS,CONUS +02422500,USGS,CONUS +02423130,USGS,CONUS +02423400,USGS,CONUS +02427250,USGS,CONUS +02430085,USGS,CONUS +02430880,USGS,CONUS +02438000,USGS,CONUS +02448900,USGS,CONUS +02450250,USGS,CONUS +02450825,USGS,CONUS +02453000,USGS,CONUS +02455980,USGS,CONUS +02457595,USGS,CONUS +02464000,USGS,CONUS +02465493,USGS,CONUS +02467500,USGS,CONUS +02469800,USGS,CONUS +02470072,USGS,CONUS +02472000,USGS,CONUS +02472500,USGS,CONUS +02472850,USGS,CONUS +02479155,USGS,CONUS +02479300,USGS,CONUS +02479560,USGS,CONUS +02479945,USGS,CONUS +02481000,USGS,CONUS +02481510,USGS,CONUS +02481880,USGS,CONUS +02AB021,ENVCA,CONUS +02AC001,ENVCA,CONUS +02AD010,ENVCA,CONUS +02BA003,ENVCA,CONUS +02BB003,ENVCA,CONUS +02BC006,ENVCA,CONUS +02BF002,ENVCA,CONUS +02CF011,ENVCA,CONUS +02DC012,ENVCA,CONUS +02EA018,ENVCA,CONUS +02EB014,ENVCA,CONUS +02ED003,ENVCA,CONUS +02FA004,ENVCA,CONUS +02FB007,ENVCA,CONUS +02FD001,ENVCA,CONUS +02FE008,ENVCA,CONUS +02FF007,ENVCA,CONUS +02GB007,ENVCA,CONUS +02GC002,ENVCA,CONUS +02GG003,ENVCA,CONUS +02GH002,ENVCA,CONUS +02HA006,ENVCA,CONUS +02HB029,ENVCA,CONUS +02HC025,ENVCA,CONUS +02HD012,ENVCA,CONUS +02HL005,ENVCA,CONUS +02HM010,ENVCA,CONUS +03010655,USGS,CONUS +03010820,USGS,CONUS +03011800,USGS,CONUS +03015500,USGS,CONUS +03021350,USGS,CONUS +03028000,USGS,CONUS +03029000,USGS,CONUS +03032500,USGS,CONUS +03042000,USGS,CONUS +03045000,USGS,CONUS +03050000,USGS,CONUS +03052000,USGS,CONUS +03052500,USGS,CONUS +03065000,USGS,CONUS +03068800,USGS,CONUS +03069500,USGS,CONUS +03070500,USGS,CONUS +03072000,USGS,CONUS +03075905,USGS,CONUS +03076600,USGS,CONUS +03078000,USGS,CONUS +03093000,USGS,CONUS +03110830,USGS,CONUS +03114500,USGS,CONUS +03115400,USGS,CONUS +03140000,USGS,CONUS +03141870,USGS,CONUS +03144000,USGS,CONUS +03145483,USGS,CONUS +03149500,USGS,CONUS +03151400,USGS,CONUS +03154000,USGS,CONUS +03159540,USGS,CONUS +03161000,USGS,CONUS +03164000,USGS,CONUS +03165000,USGS,CONUS +03170000,USGS,CONUS +03173000,USGS,CONUS +03175500,USGS,CONUS +03177710,USGS,CONUS +03180500,USGS,CONUS +03183500,USGS,CONUS +03186500,USGS,CONUS +03187500,USGS,CONUS +03198350,USGS,CONUS +03201405,USGS,CONUS +03201902,USGS,CONUS +03202400,USGS,CONUS +03206600,USGS,CONUS +03207800,USGS,CONUS +03208950,USGS,CONUS +03210000,USGS,CONUS +03212750,USGS,CONUS +03212980,USGS,CONUS +03213500,USGS,CONUS +03217500,USGS,CONUS +03228750,USGS,CONUS +03230310,USGS,CONUS +03230500,USGS,CONUS +03237280,USGS,CONUS +03237500,USGS,CONUS +03241500,USGS,CONUS +03248300,USGS,CONUS +03251200,USGS,CONUS +03252300,USGS,CONUS +03254550,USGS,CONUS +03271300,USGS,CONUS +03272700,USGS,CONUS +03275000,USGS,CONUS +03277075,USGS,CONUS +03277130,USGS,CONUS +03277300,USGS,CONUS +03280700,USGS,CONUS +03281100,USGS,CONUS +03281500,USGS,CONUS +03282040,USGS,CONUS +03282500,USGS,CONUS +03285000,USGS,CONUS +03291500,USGS,CONUS +03291780,USGS,CONUS +03292474,USGS,CONUS +03300400,USGS,CONUS +03302680,USGS,CONUS +03307000,USGS,CONUS +03314000,USGS,CONUS +03318800,USGS,CONUS +03328000,USGS,CONUS +03339500,USGS,CONUS +03340800,USGS,CONUS +03346000,USGS,CONUS +03351072,USGS,CONUS +03357330,USGS,CONUS +03361650,USGS,CONUS +03364500,USGS,CONUS +03366500,USGS,CONUS +03368000,USGS,CONUS +03373508,USGS,CONUS +03378635,USGS,CONUS +03379500,USGS,CONUS +03384450,USGS,CONUS +03403910,USGS,CONUS +03408500,USGS,CONUS +03409500,USGS,CONUS +03413200,USGS,CONUS +03415000,USGS,CONUS +03424730,USGS,CONUS +03431599,USGS,CONUS +03431800,USGS,CONUS +03432350,USGS,CONUS +03433500,USGS,CONUS +03436690,USGS,CONUS +03439000,USGS,CONUS +03441000,USGS,CONUS +0344894205,USGS,CONUS +03455500,USGS,CONUS +03456500,USGS,CONUS +03460000,USGS,CONUS +03463300,USGS,CONUS +03471500,USGS,CONUS +03479000,USGS,CONUS +03488000,USGS,CONUS +03491000,USGS,CONUS +03497300,USGS,CONUS +03500000,USGS,CONUS +03500240,USGS,CONUS +03504000,USGS,CONUS +03518500,USGS,CONUS +03527220,USGS,CONUS +03535000,USGS,CONUS +03535400,USGS,CONUS +03539778,USGS,CONUS +03544970,USGS,CONUS +03567340,USGS,CONUS +03574500,USGS,CONUS +03578000,USGS,CONUS +03578500,USGS,CONUS +03588500,USGS,CONUS +03597590,USGS,CONUS +03599450,USGS,CONUS +03604000,USGS,CONUS +03605078,USGS,CONUS +04015330,USGS,CONUS +04024430,USGS,CONUS +04027000,USGS,CONUS +04031000,USGS,CONUS +04033000,USGS,CONUS +04040500,USGS,CONUS +04043050,USGS,CONUS +04043150,USGS,CONUS +04043244,USGS,CONUS +04045500,USGS,CONUS +04046000,USGS,CONUS +04056500,USGS,CONUS +04057510,USGS,CONUS +04057800,USGS,CONUS +04059500,USGS,CONUS +04060993,USGS,CONUS +04063700,USGS,CONUS +04066500,USGS,CONUS +04067958,USGS,CONUS +04073365,USGS,CONUS +04074950,USGS,CONUS +04080000,USGS,CONUS +04085200,USGS,CONUS +04085427,USGS,CONUS +04086500,USGS,CONUS +04100500,USGS,CONUS +04101370,USGS,CONUS +04104945,USGS,CONUS +04105700,USGS,CONUS +04112850,USGS,CONUS +04114498,USGS,CONUS +04115265,USGS,CONUS +04117500,USGS,CONUS +04118500,USGS,CONUS +04122200,USGS,CONUS +04122500,USGS,CONUS +04124000,USGS,CONUS +04124500,USGS,CONUS +04126970,USGS,CONUS +04127800,USGS,CONUS +04127997,USGS,CONUS +04136000,USGS,CONUS +04142000,USGS,CONUS +04144500,USGS,CONUS +04146063,USGS,CONUS +04148140,USGS,CONUS +04152238,USGS,CONUS +04159492,USGS,CONUS +04161540,USGS,CONUS +04163400,USGS,CONUS +04164500,USGS,CONUS +04166000,USGS,CONUS +04180988,USGS,CONUS +04185000,USGS,CONUS +04189000,USGS,CONUS +04196800,USGS,CONUS +04197100,USGS,CONUS +04197170,USGS,CONUS +04199155,USGS,CONUS +04199500,USGS,CONUS +04207200,USGS,CONUS +04208000,USGS,CONUS +04209000,USGS,CONUS +04213000,USGS,CONUS +04213500,USGS,CONUS +04214500,USGS,CONUS +04215000,USGS,CONUS +04215500,USGS,CONUS +04216418,USGS,CONUS +04217000,USGS,CONUS +04218000,USGS,CONUS +04221000,USGS,CONUS +04224775,USGS,CONUS +04229500,USGS,CONUS +04230500,USGS,CONUS +04231000,USGS,CONUS +0423205010,USGS,CONUS +04233286,USGS,CONUS +04234000,USGS,CONUS +04242500,USGS,CONUS +04243500,USGS,CONUS +04252500,USGS,CONUS +04256000,USGS,CONUS +04262500,USGS,CONUS +04265432,USGS,CONUS +04268800,USGS,CONUS +04273700,USGS,CONUS +04273800,USGS,CONUS +04275000,USGS,CONUS +04280450,USGS,CONUS +04282000,USGS,CONUS +04282500,USGS,CONUS +04282525,USGS,CONUS +04282650,USGS,CONUS +04282780,USGS,CONUS +04286000,USGS,CONUS +04288000,USGS,CONUS +04292000,USGS,CONUS +04293000,USGS,CONUS +04293500,USGS,CONUS +04296000,USGS,CONUS +05014300,USGS,CONUS +05056000,USGS,CONUS +05056100,USGS,CONUS +05056200,USGS,CONUS +05057000,USGS,CONUS +05057200,USGS,CONUS +05059600,USGS,CONUS +05061000,USGS,CONUS +05061500,USGS,CONUS +05062500,USGS,CONUS +05078500,USGS,CONUS +05087500,USGS,CONUS +05120500,USGS,CONUS +05123400,USGS,CONUS +05127500,USGS,CONUS +05129115,USGS,CONUS +05131500,USGS,CONUS +05132000,USGS,CONUS +05212700,USGS,CONUS +05245100,USGS,CONUS +05275000,USGS,CONUS +05290000,USGS,CONUS +05291000,USGS,CONUS +05293000,USGS,CONUS +05315000,USGS,CONUS +05316970,USGS,CONUS +05316992,USGS,CONUS +05317000,USGS,CONUS +05317200,USGS,CONUS +05319500,USGS,CONUS +05327000,USGS,CONUS +05336700,USGS,CONUS +05353800,USGS,CONUS +05357335,USGS,CONUS +05362000,USGS,CONUS +05368000,USGS,CONUS +05383950,USGS,CONUS +05387440,USGS,CONUS +05389000,USGS,CONUS +05389400,USGS,CONUS +05393500,USGS,CONUS +05399500,USGS,CONUS +05407470,USGS,CONUS +05411850,USGS,CONUS +05412400,USGS,CONUS +05413500,USGS,CONUS +05414000,USGS,CONUS +05420680,USGS,CONUS +05426000,USGS,CONUS +05426067,USGS,CONUS +05431486,USGS,CONUS +05444000,USGS,CONUS +05447500,USGS,CONUS +05451210,USGS,CONUS +05454000,USGS,CONUS +05455500,USGS,CONUS +05458000,USGS,CONUS +05459500,USGS,CONUS +05464220,USGS,CONUS +05466500,USGS,CONUS +05467000,USGS,CONUS +05473450,USGS,CONUS +05478265,USGS,CONUS +05480820,USGS,CONUS +05481950,USGS,CONUS +05482300,USGS,CONUS +05483450,USGS,CONUS +05487980,USGS,CONUS +05488200,USGS,CONUS +05489000,USGS,CONUS +05494300,USGS,CONUS +05495500,USGS,CONUS +05498150,USGS,CONUS +05498700,USGS,CONUS +05501000,USGS,CONUS +05503800,USGS,CONUS +05506100,USGS,CONUS +05507600,USGS,CONUS +05508805,USGS,CONUS +05514500,USGS,CONUS +05514840,USGS,CONUS +05516500,USGS,CONUS +05525500,USGS,CONUS +05551200,USGS,CONUS +05551675,USGS,CONUS +05556500,USGS,CONUS +05567500,USGS,CONUS +05569500,USGS,CONUS +05570000,USGS,CONUS +05577500,USGS,CONUS +05584500,USGS,CONUS +05587000,USGS,CONUS +05591550,USGS,CONUS +05592050,USGS,CONUS +05592575,USGS,CONUS +05593575,USGS,CONUS +05593900,USGS,CONUS +05595730,USGS,CONUS +06024450,USGS,CONUS +06024540,USGS,CONUS +06025250,USGS,CONUS +06025500,USGS,CONUS +06036805,USGS,CONUS +06036905,USGS,CONUS +06037100,USGS,CONUS +06037500,USGS,CONUS +06043500,USGS,CONUS +06061500,USGS,CONUS +06073500,USGS,CONUS +06091700,USGS,CONUS +06093200,USGS,CONUS +06099500,USGS,CONUS +06102500,USGS,CONUS +06108000,USGS,CONUS +06119600,USGS,CONUS +06120500,USGS,CONUS +06139500,USGS,CONUS +06151500,USGS,CONUS +06186500,USGS,CONUS +06187915,USGS,CONUS +06195600,USGS,CONUS +06209500,USGS,CONUS +06218500,USGS,CONUS +06220800,USGS,CONUS +06221400,USGS,CONUS +06224000,USGS,CONUS +06278300,USGS,CONUS +06280300,USGS,CONUS +06289000,USGS,CONUS +06309200,USGS,CONUS +06311000,USGS,CONUS +06331000,USGS,CONUS +06332515,USGS,CONUS +06334500,USGS,CONUS +06336600,USGS,CONUS +06339100,USGS,CONUS +06342450,USGS,CONUS +06344600,USGS,CONUS +06347000,USGS,CONUS +06347500,USGS,CONUS +06350000,USGS,CONUS +06351200,USGS,CONUS +06352000,USGS,CONUS +06353000,USGS,CONUS +06360500,USGS,CONUS +06392900,USGS,CONUS +06400000,USGS,CONUS +06402430,USGS,CONUS +06404000,USGS,CONUS +06409000,USGS,CONUS +06422500,USGS,CONUS +06424000,USGS,CONUS +06429500,USGS,CONUS +06430770,USGS,CONUS +06430850,USGS,CONUS +06431500,USGS,CONUS +06437020,USGS,CONUS +06440200,USGS,CONUS +06441500,USGS,CONUS +06446500,USGS,CONUS +06447230,USGS,CONUS +06447500,USGS,CONUS +06453600,USGS,CONUS +06464500,USGS,CONUS +06466500,USGS,CONUS +06468170,USGS,CONUS +06469400,USGS,CONUS +06470800,USGS,CONUS +06471200,USGS,CONUS +06477500,USGS,CONUS +06478690,USGS,CONUS +06479215,USGS,CONUS +06479438,USGS,CONUS +06482610,USGS,CONUS +06601000,USGS,CONUS +06605850,USGS,CONUS +06607200,USGS,CONUS +06608500,USGS,CONUS +06609500,USGS,CONUS +06620000,USGS,CONUS +06622700,USGS,CONUS +06623800,USGS,CONUS +06625000,USGS,CONUS +06632400,USGS,CONUS +06708800,USGS,CONUS +06709000,USGS,CONUS +06710150,USGS,CONUS +06710385,USGS,CONUS +06775500,USGS,CONUS +06784000,USGS,CONUS +06795500,USGS,CONUS +06799100,USGS,CONUS +06800000,USGS,CONUS +06803000,USGS,CONUS +06803510,USGS,CONUS +06803530,USGS,CONUS +06806500,USGS,CONUS +06807410,USGS,CONUS +06809210,USGS,CONUS +06811500,USGS,CONUS +06813000,USGS,CONUS +06814000,USGS,CONUS +06815000,USGS,CONUS +06817000,USGS,CONUS +06819185,USGS,CONUS +06821080,USGS,CONUS +06821500,USGS,CONUS +06823500,USGS,CONUS +06846500,USGS,CONUS +06847900,USGS,CONUS +06853800,USGS,CONUS +06863420,USGS,CONUS +06866900,USGS,CONUS +06868850,USGS,CONUS +06869950,USGS,CONUS +06876700,USGS,CONUS +06878000,USGS,CONUS +06883000,USGS,CONUS +06884200,USGS,CONUS +06885500,USGS,CONUS +06886500,USGS,CONUS +06888000,USGS,CONUS +06888500,USGS,CONUS +06889200,USGS,CONUS +06890100,USGS,CONUS +06893500,USGS,CONUS +06895000,USGS,CONUS +06898000,USGS,CONUS +06899700,USGS,CONUS +06903400,USGS,CONUS +06903700,USGS,CONUS +06906150,USGS,CONUS +06906800,USGS,CONUS +06907700,USGS,CONUS +06909500,USGS,CONUS +06909950,USGS,CONUS +06910230,USGS,CONUS +06910750,USGS,CONUS +06910800,USGS,CONUS +06911490,USGS,CONUS +06911900,USGS,CONUS +06914000,USGS,CONUS +06917000,USGS,CONUS +06917500,USGS,CONUS +06918460,USGS,CONUS +06919500,USGS,CONUS +06921070,USGS,CONUS +06921200,USGS,CONUS +06921590,USGS,CONUS +06921600,USGS,CONUS +06921720,USGS,CONUS +06923940,USGS,CONUS +06927000,USGS,CONUS +06928000,USGS,CONUS +06928300,USGS,CONUS +06930000,USGS,CONUS +06932000,USGS,CONUS +07001985,USGS,CONUS +07010350,USGS,CONUS +07014000,USGS,CONUS +07017200,USGS,CONUS +07021000,USGS,CONUS +07025400,USGS,CONUS +07030392,USGS,CONUS +07030500,USGS,CONUS +07037500,USGS,CONUS +07048550,USGS,CONUS +07049000,USGS,CONUS +07050152,USGS,CONUS +07050500,USGS,CONUS +07050690,USGS,CONUS +07050700,USGS,CONUS +07053250,USGS,CONUS +07053810,USGS,CONUS +07054080,USGS,CONUS +07055646,USGS,CONUS +07055875,USGS,CONUS +07056515,USGS,CONUS +07057500,USGS,CONUS +07058000,USGS,CONUS +07060710,USGS,CONUS +07061270,USGS,CONUS +07064440,USGS,CONUS +07065200,USGS,CONUS +07071500,USGS,CONUS +07072000,USGS,CONUS +07075000,USGS,CONUS +07083000,USGS,CONUS +07096250,USGS,CONUS +07103700,USGS,CONUS +07103780,USGS,CONUS +07105490,USGS,CONUS +07126200,USGS,CONUS +07126300,USGS,CONUS +07142300,USGS,CONUS +07144780,USGS,CONUS +07145700,USGS,CONUS +07148400,USGS,CONUS +07149000,USGS,CONUS +07151500,USGS,CONUS +07157500,USGS,CONUS +07160500,USGS,CONUS +07164600,USGS,CONUS +07165562,USGS,CONUS +07167500,USGS,CONUS +07180500,USGS,CONUS +07184000,USGS,CONUS +07185090,USGS,CONUS +07185910,USGS,CONUS +07187000,USGS,CONUS +07188653,USGS,CONUS +07189100,USGS,CONUS +07191222,USGS,CONUS +07195800,USGS,CONUS +07196900,USGS,CONUS +07197360,USGS,CONUS +07208500,USGS,CONUS +07226500,USGS,CONUS +07233500,USGS,CONUS +07241780,USGS,CONUS +07247250,USGS,CONUS +07249400,USGS,CONUS +07249800,USGS,CONUS +07249920,USGS,CONUS +07250935,USGS,CONUS +07250974,USGS,CONUS +07252000,USGS,CONUS +07257006,USGS,CONUS +07257500,USGS,CONUS +07260000,USGS,CONUS +07261000,USGS,CONUS +07263295,USGS,CONUS +072632962,USGS,CONUS +072632982,USGS,CONUS +07274000,USGS,CONUS +07283000,USGS,CONUS +07288280,USGS,CONUS +07291000,USGS,CONUS +07299670,USGS,CONUS +07301410,USGS,CONUS +07303400,USGS,CONUS +07311500,USGS,CONUS +07311800,USGS,CONUS +07312200,USGS,CONUS +07315200,USGS,CONUS +07315700,USGS,CONUS +07325860,USGS,CONUS +07329780,USGS,CONUS +07331300,USGS,CONUS +07332390,USGS,CONUS +07335700,USGS,CONUS +07337900,USGS,CONUS +07340300,USGS,CONUS +07342480,USGS,CONUS +07342500,USGS,CONUS +07346045,USGS,CONUS +07348700,USGS,CONUS +07351500,USGS,CONUS +07359610,USGS,CONUS +07360200,USGS,CONUS +07362100,USGS,CONUS +07362587,USGS,CONUS +07366200,USGS,CONUS +07372200,USGS,CONUS +07373000,USGS,CONUS +07375000,USGS,CONUS +07375800,USGS,CONUS +07376500,USGS,CONUS +07377000,USGS,CONUS +07378000,USGS,CONUS +08010000,USGS,CONUS +08013000,USGS,CONUS +08014500,USGS,CONUS +08017300,USGS,CONUS +08020700,USGS,CONUS +08023080,USGS,CONUS +08023400,USGS,CONUS +08025500,USGS,CONUS +08029500,USGS,CONUS +08031000,USGS,CONUS +08041500,USGS,CONUS +08041700,USGS,CONUS +08047500,USGS,CONUS +08049700,USGS,CONUS +08050800,USGS,CONUS +08050840,USGS,CONUS +08053500,USGS,CONUS +08057200,USGS,CONUS +08064100,USGS,CONUS +08065200,USGS,CONUS +08066200,USGS,CONUS +08066300,USGS,CONUS +08068325,USGS,CONUS +08068780,USGS,CONUS +08070000,USGS,CONUS +08070500,USGS,CONUS +08075000,USGS,CONUS +08079600,USGS,CONUS +08082700,USGS,CONUS +08086212,USGS,CONUS +08095300,USGS,CONUS +08099300,USGS,CONUS +08101000,USGS,CONUS +08103900,USGS,CONUS +0810464660,USGS,CONUS +08104900,USGS,CONUS +08109700,USGS,CONUS +08128400,USGS,CONUS +08130700,USGS,CONUS +08131400,USGS,CONUS +08133250,USGS,CONUS +08144500,USGS,CONUS +08150800,USGS,CONUS +08152900,USGS,CONUS +08154700,USGS,CONUS +08155200,USGS,CONUS +08158700,USGS,CONUS +08158810,USGS,CONUS +08158860,USGS,CONUS +08159000,USGS,CONUS +08164000,USGS,CONUS +08164300,USGS,CONUS +08164600,USGS,CONUS +08165300,USGS,CONUS +08165500,USGS,CONUS +08166000,USGS,CONUS +08171290,USGS,CONUS +08175000,USGS,CONUS +08176900,USGS,CONUS +08177300,USGS,CONUS +08178880,USGS,CONUS +08181480,USGS,CONUS +08186500,USGS,CONUS +08189200,USGS,CONUS +08189300,USGS,CONUS +08190000,USGS,CONUS +08190500,USGS,CONUS +08194200,USGS,CONUS +08195000,USGS,CONUS +08196000,USGS,CONUS +08198000,USGS,CONUS +08200000,USGS,CONUS +08200977,USGS,CONUS +08201500,USGS,CONUS +08202700,USGS,CONUS +08210400,USGS,CONUS +08212400,USGS,CONUS +08267500,USGS,CONUS +08269000,USGS,CONUS +08271000,USGS,CONUS +08277470,USGS,CONUS +08302500,USGS,CONUS +08315480,USGS,CONUS +08324000,USGS,CONUS +08340500,USGS,CONUS +08377900,USGS,CONUS +08380500,USGS,CONUS +08386505,USGS,CONUS +08400000,USGS,CONUS +08401200,USGS,CONUS +08405105,USGS,CONUS +08408500,USGS,CONUS +08447020,USGS,CONUS +09035900,USGS,CONUS +09050100,USGS,CONUS +09051050,USGS,CONUS +09059500,USGS,CONUS +09064600,USGS,CONUS +09065500,USGS,CONUS +09066000,USGS,CONUS +09081600,USGS,CONUS +09107000,USGS,CONUS +09112500,USGS,CONUS +09115500,USGS,CONUS +09124500,USGS,CONUS +09146020,USGS,CONUS +09183600,USGS,CONUS +09188500,USGS,CONUS +09196500,USGS,CONUS +09210500,USGS,CONUS +09217900,USGS,CONUS +09223000,USGS,CONUS +09238900,USGS,CONUS +09253000,USGS,CONUS +09258980,USGS,CONUS +09266500,USGS,CONUS +09292000,USGS,CONUS +09306242,USGS,CONUS +09310700,USGS,CONUS +09312600,USGS,CONUS +09329050,USGS,CONUS +09352900,USGS,CONUS +09359500,USGS,CONUS +09386900,USGS,CONUS +09399400,USGS,CONUS +09401110,USGS,CONUS +09404110,USGS,CONUS +09404208,USGS,CONUS +09404222,USGS,CONUS +09404343,USGS,CONUS +09404450,USGS,CONUS +09405500,USGS,CONUS +09408000,USGS,CONUS +09413900,USGS,CONUS +09424447,USGS,CONUS +09430500,USGS,CONUS +09430600,USGS,CONUS +09444200,USGS,CONUS +09447800,USGS,CONUS +09470750,USGS,CONUS +09470800,USGS,CONUS +09484000,USGS,CONUS +09484550,USGS,CONUS +09484580,USGS,CONUS +09484600,USGS,CONUS +09485000,USGS,CONUS +09487000,USGS,CONUS +09489500,USGS,CONUS +09492400,USGS,CONUS +09497800,USGS,CONUS +09497980,USGS,CONUS +094985005,USGS,CONUS +09498502,USGS,CONUS +09499000,USGS,CONUS +09504420,USGS,CONUS +09505200,USGS,CONUS +09505350,USGS,CONUS +09505800,USGS,CONUS +09507980,USGS,CONUS +09508300,USGS,CONUS +09510200,USGS,CONUS +09512280,USGS,CONUS +09513780,USGS,CONUS +09537200,USGS,CONUS +10011500,USGS,CONUS +10023000,USGS,CONUS +10109000,USGS,CONUS +10128500,USGS,CONUS +10146400,USGS,CONUS +10149400,USGS,CONUS +10166430,USGS,CONUS +10172700,USGS,CONUS +10172860,USGS,CONUS +10172870,USGS,CONUS +10173450,USGS,CONUS +10205030,USGS,CONUS +10234500,USGS,CONUS +10242000,USGS,CONUS +10243260,USGS,CONUS +10243700,USGS,CONUS +10249300,USGS,CONUS +10257600,USGS,CONUS +10258000,USGS,CONUS +10258500,USGS,CONUS +10259000,USGS,CONUS +10259200,USGS,CONUS +10263500,USGS,CONUS +10308200,USGS,CONUS +10308794,USGS,CONUS +10310000,USGS,CONUS +10310500,USGS,CONUS +10316500,USGS,CONUS +10321590,USGS,CONUS +10321940,USGS,CONUS +10329500,USGS,CONUS +103366092,USGS,CONUS +10336645,USGS,CONUS +10336660,USGS,CONUS +10347310,USGS,CONUS +10352500,USGS,CONUS +10396000,USGS,CONUS +11014000,USGS,CONUS +11015000,USGS,CONUS +11023000,USGS,CONUS +11042400,USGS,CONUS +11046300,USGS,CONUS +11046360,USGS,CONUS +11055800,USGS,CONUS +11063510,USGS,CONUS +11098000,USGS,CONUS +11111500,USGS,CONUS +11114495,USGS,CONUS +11118500,USGS,CONUS +11120500,USGS,CONUS +11124500,USGS,CONUS +11129800,USGS,CONUS +11138500,USGS,CONUS +11141280,USGS,CONUS +11143000,USGS,CONUS +11147500,USGS,CONUS +11148900,USGS,CONUS +11151300,USGS,CONUS +11152000,USGS,CONUS +11162500,USGS,CONUS +11162570,USGS,CONUS +11169800,USGS,CONUS +11172945,USGS,CONUS +11173200,USGS,CONUS +11176400,USGS,CONUS +11180500,USGS,CONUS +11180900,USGS,CONUS +11200800,USGS,CONUS +11203580,USGS,CONUS +11224500,USGS,CONUS +11253310,USGS,CONUS +11264500,USGS,CONUS +11274500,USGS,CONUS +11274630,USGS,CONUS +11274790,USGS,CONUS +11284400,USGS,CONUS +11299600,USGS,CONUS +11335000,USGS,CONUS +11374000,USGS,CONUS +11381500,USGS,CONUS +11383500,USGS,CONUS +11402000,USGS,CONUS +11413000,USGS,CONUS +11449500,USGS,CONUS +11451100,USGS,CONUS +11456000,USGS,CONUS +11458000,USGS,CONUS +11461000,USGS,CONUS +11467000,USGS,CONUS +11467200,USGS,CONUS +11467510,USGS,CONUS +11468000,USGS,CONUS +11468500,USGS,CONUS +11468900,USGS,CONUS +11473900,USGS,CONUS +11475800,USGS,CONUS +11476600,USGS,CONUS +11478500,USGS,CONUS +11480390,USGS,CONUS +11481000,USGS,CONUS +11481200,USGS,CONUS +11481500,USGS,CONUS +11521500,USGS,CONUS +11522500,USGS,CONUS +11523200,USGS,CONUS +11525530,USGS,CONUS +11525670,USGS,CONUS +11528700,USGS,CONUS +11532500,USGS,CONUS +12010000,USGS,CONUS +12013500,USGS,CONUS +12020000,USGS,CONUS +12024000,USGS,CONUS +12025700,USGS,CONUS +12035000,USGS,CONUS +12039005,USGS,CONUS +12039500,USGS,CONUS +12040500,USGS,CONUS +12041200,USGS,CONUS +12043000,USGS,CONUS +12043300,USGS,CONUS +12048000,USGS,CONUS +12054000,USGS,CONUS +12056500,USGS,CONUS +12060500,USGS,CONUS +12079000,USGS,CONUS +12082500,USGS,CONUS +12092000,USGS,CONUS +12094000,USGS,CONUS +12095000,USGS,CONUS +12097500,USGS,CONUS +12108500,USGS,CONUS +12114500,USGS,CONUS +12115500,USGS,CONUS +12117000,USGS,CONUS +12120600,USGS,CONUS +12134500,USGS,CONUS +12137290,USGS,CONUS +12141300,USGS,CONUS +12142000,USGS,CONUS +12143400,USGS,CONUS +12145500,USGS,CONUS +12155300,USGS,CONUS +12158040,USGS,CONUS +12167000,USGS,CONUS +12175500,USGS,CONUS +12178100,USGS,CONUS +12179900,USGS,CONUS +12182500,USGS,CONUS +12186000,USGS,CONUS +12201500,USGS,CONUS +12205000,USGS,CONUS +12208000,USGS,CONUS +12210000,USGS,CONUS +12210900,USGS,CONUS +12302055,USGS,CONUS +12304500,USGS,CONUS +12323670,USGS,CONUS +12323710,USGS,CONUS +12324590,USGS,CONUS +12330000,USGS,CONUS +12332000,USGS,CONUS +12354000,USGS,CONUS +12358500,USGS,CONUS +12359800,USGS,CONUS +12363000,USGS,CONUS +12370000,USGS,CONUS +12374250,USGS,CONUS +12377150,USGS,CONUS +12381400,USGS,CONUS +12390700,USGS,CONUS +12392155,USGS,CONUS +12395000,USGS,CONUS +12411000,USGS,CONUS +12413125,USGS,CONUS +12413130,USGS,CONUS +12413370,USGS,CONUS +12413875,USGS,CONUS +12424000,USGS,CONUS +12447383,USGS,CONUS +12447390,USGS,CONUS +12451000,USGS,CONUS +12452800,USGS,CONUS +12452890,USGS,CONUS +12456500,USGS,CONUS +12458000,USGS,CONUS +12488500,USGS,CONUS +13010065,USGS,CONUS +13011500,USGS,CONUS +13011900,USGS,CONUS +13016305,USGS,CONUS +13023000,USGS,CONUS +13032500,USGS,CONUS +13037500,USGS,CONUS +13046995,USGS,CONUS +13083000,USGS,CONUS +13139510,USGS,CONUS +13148500,USGS,CONUS +13161500,USGS,CONUS +13162225,USGS,CONUS +13185000,USGS,CONUS +13186000,USGS,CONUS +13190500,USGS,CONUS +13215000,USGS,CONUS +13217500,USGS,CONUS +13233300,USGS,CONUS +13235000,USGS,CONUS +13237920,USGS,CONUS +13239000,USGS,CONUS +13240000,USGS,CONUS +13247500,USGS,CONUS +13266000,USGS,CONUS +13296500,USGS,CONUS +13297330,USGS,CONUS +13306385,USGS,CONUS +13309220,USGS,CONUS +13310700,USGS,CONUS +13313000,USGS,CONUS +13331500,USGS,CONUS +13334450,USGS,CONUS +13336500,USGS,CONUS +13337000,USGS,CONUS +13338500,USGS,CONUS +13339500,USGS,CONUS +13340600,USGS,CONUS +13346800,USGS,CONUS +13348000,USGS,CONUS +14013000,USGS,CONUS +14020000,USGS,CONUS +14020300,USGS,CONUS +14033500,USGS,CONUS +14036860,USGS,CONUS +14046890,USGS,CONUS +14092750,USGS,CONUS +14096850,USGS,CONUS +14107000,USGS,CONUS +14113200,USGS,CONUS +14137000,USGS,CONUS +14138800,USGS,CONUS +14138870,USGS,CONUS +14138900,USGS,CONUS +14139800,USGS,CONUS +14141500,USGS,CONUS +14150800,USGS,CONUS +14154500,USGS,CONUS +14158500,USGS,CONUS +14158790,USGS,CONUS +14159200,USGS,CONUS +14161500,USGS,CONUS +14166500,USGS,CONUS +14178000,USGS,CONUS +14179000,USGS,CONUS +14180300,USGS,CONUS +14182500,USGS,CONUS +14185000,USGS,CONUS +14185900,USGS,CONUS +14187000,USGS,CONUS +14211400,USGS,CONUS +14211500,USGS,CONUS +14216000,USGS,CONUS +14216500,USGS,CONUS +14219000,USGS,CONUS +14222500,USGS,CONUS +14226500,USGS,CONUS +14236200,USGS,CONUS +14242580,USGS,CONUS +14299800,USGS,CONUS +14301000,USGS,CONUS +14301500,USGS,CONUS +14302480,USGS,CONUS +14305500,USGS,CONUS +14306500,USGS,CONUS +14307620,USGS,CONUS +14308000,USGS,CONUS +14308500,USGS,CONUS +14308990,USGS,CONUS +14309500,USGS,CONUS +14315950,USGS,CONUS +14316455,USGS,CONUS +14316495,USGS,CONUS +14316700,USGS,CONUS +14318000,USGS,CONUS +14325000,USGS,CONUS +14357500,USGS,CONUS +14362250,USGS,CONUS +14375100,USGS,CONUS +14400000,USGS,CONUS +16103000,USGS,Hawaii +16294100,USGS,Hawaii +16704000,USGS,Hawaii +402114105350101,USGS,CONUS +50043800,USGS,Puerto Rico +50075500,USGS,Puerto Rico +50147800,USGS,Puerto Rico +CDR,CADWR,CONUS +CHC,CADWR,CONUS +CLV,CADWR,CONUS +CMF,CADWR,CONUS +CNF,CADWR,CONUS +DCM,CADWR,CONUS +EVA,CADWR,CONUS +ICR,CADWR,CONUS +JBR,CADWR,CONUS +LCB,CADWR,CONUS +LCV,CADWR,CONUS +MCD,CADWR,CONUS +MCK,CADWR,CONUS +MFP,CADWR,CONUS +MPD,CADWR,CONUS +MSS,CADWR,CONUS +MTK,CADWR,CONUS +NFW,CADWR,CONUS +PDR,CADWR,CONUS +RBW,CADWR,CONUS +RCS,CADWR,CONUS +SFH,CADWR,CONUS +SMW,CADWR,CONUS +STO,CADWR,CONUS +TRR,CADWR,CONUS +16010000,USGS,Hawaii +16019000,USGS,Hawaii +16060000,USGS,Hawaii +16068000,USGS,Hawaii +16071500,USGS,Hawaii +16108000,USGS,Hawaii +16200000,USGS,Hawaii +16211600,USGS,Hawaii +16213000,USGS,Hawaii +16226200,USGS,Hawaii +16226400,USGS,Hawaii +16301050,USGS,Hawaii +16304200,USGS,Hawaii +16345000,USGS,Hawaii +16400000,USGS,Hawaii +16414200,USGS,Hawaii +16501200,USGS,Hawaii +16508000,USGS,Hawaii +16518000,USGS,Hawaii +16587000,USGS,Hawaii +16604500,USGS,Hawaii +16614000,USGS,Hawaii +16618000,USGS,Hawaii +16620000,USGS,Hawaii +16717000,USGS,Hawaii +16720000,USGS,Hawaii +16725000,USGS,Hawaii +16770500,USGS,Hawaii +50014800,USGS,Puerto Rico +50025155,USGS,Puerto Rico +50027000,USGS,Puerto Rico +50028000,USGS,Puerto Rico +50028400,USGS,Puerto Rico +50034000,USGS,Puerto Rico +50035000,USGS,Puerto Rico +50038100,USGS,Puerto Rico +50039500,USGS,Puerto Rico +50039995,USGS,Puerto Rico +50043197,USGS,Puerto Rico +50049100,USGS,Puerto Rico +50049620,USGS,Puerto Rico +50050900,USGS,Puerto Rico +50051310,USGS,Puerto Rico +50051800,USGS,Puerto Rico +50053025,USGS,Puerto Rico +50055000,USGS,Puerto Rico +50055225,USGS,Puerto Rico +50055750,USGS,Puerto Rico +50057000,USGS,Puerto Rico +50058350,USGS,Puerto Rico +50059210,USGS,Puerto Rico +50063800,USGS,Puerto Rico +50064200,USGS,Puerto Rico +50065500,USGS,Puerto Rico +50070900,USGS,Puerto Rico +50075000,USGS,Puerto Rico +50076000,USGS,Puerto Rico +50081000,USGS,Puerto Rico +50083500,USGS,Puerto Rico +50085100,USGS,Puerto Rico +50090500,USGS,Puerto Rico +50092000,USGS,Puerto Rico +50093000,USGS,Puerto Rico +50100200,USGS,Puerto Rico +50100450,USGS,Puerto Rico +50106100,USGS,Puerto Rico +50110650,USGS,Puerto Rico +50110900,USGS,Puerto Rico +50113800,USGS,Puerto Rico +50114900,USGS,Puerto Rico +50124200,USGS,Puerto Rico +50129254,USGS,Puerto Rico +50136400,USGS,Puerto Rico +50138000,USGS,Puerto Rico +50144000,USGS,Puerto Rico +08030530,USGS,CONUS +08031005,USGS,CONUS +08031020,USGS,CONUS +08041788,USGS,CONUS +08041790,USGS,CONUS +08041940,USGS,CONUS +08041945,USGS,CONUS +08041970,USGS,CONUS +08042455,USGS,CONUS +08042468,USGS,CONUS +08042470,USGS,CONUS +08042515,USGS,CONUS +08042539,USGS,CONUS +08064990,USGS,CONUS +08065080,USGS,CONUS +08065310,USGS,CONUS +08065340,USGS,CONUS +08065420,USGS,CONUS +08065700,USGS,CONUS +08065820,USGS,CONUS +08065925,USGS,CONUS +08066087,USGS,CONUS +08066138,USGS,CONUS +08066380,USGS,CONUS +08067280,USGS,CONUS +08067505,USGS,CONUS +08067520,USGS,CONUS +08067653,USGS,CONUS +08068020,USGS,CONUS +08068025,USGS,CONUS +08070220,USGS,CONUS +08070550,USGS,CONUS +08070900,USGS,CONUS +08076990,USGS,CONUS +08077110,USGS,CONUS +08077640,USGS,CONUS +08077670,USGS,CONUS +08077888,USGS,CONUS +08078400,USGS,CONUS +08078890,USGS,CONUS +08078910,USGS,CONUS +08078935,USGS,CONUS +08097000,USGS,CONUS +08098295,USGS,CONUS +08100950,USGS,CONUS +08102730,USGS,CONUS +08108705,USGS,CONUS +08108710,USGS,CONUS +08109310,USGS,CONUS +08110520,USGS,CONUS +08111006,USGS,CONUS +08111051,USGS,CONUS +08111056,USGS,CONUS +08111070,USGS,CONUS +08111080,USGS,CONUS +08111085,USGS,CONUS +08111090,USGS,CONUS +08111110,USGS,CONUS +08117375,USGS,CONUS +08117403,USGS,CONUS +08117857,USGS,CONUS +08117858,USGS,CONUS +08162580,USGS,CONUS +08163720,USGS,CONUS +08163880,USGS,CONUS +08163900,USGS,CONUS +08164150,USGS,CONUS +08164200,USGS,CONUS +08164410,USGS,CONUS +08167000,USGS,CONUS +08169778,USGS,CONUS +08173210,USGS,CONUS +08174545,USGS,CONUS +08180990,USGS,CONUS +08189298,USGS,CONUS +08189320,USGS,CONUS +08189520,USGS,CONUS +08189585,USGS,CONUS +08189590,USGS,CONUS diff --git a/data/module_ipes/cfe_params.csv b/data/module_ipes/cfe_params.csv new file mode 100644 index 0000000..b829027 --- /dev/null +++ b/data/module_ipes/cfe_params.csv @@ -0,0 +1,25 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +soil_params.b,beta exponent on Clapp-Hornberger (1978) soil water relations,NULL,double,True,attr,2,15,4.05,mode.bexp_soil_layers_stag=1, +soil_params.satdk,saturated hydraulic conductivity,m/s,double,True,attr,0.000000195,0.00141,0.00000338,geom_mean.dksat_soil_layers_stag=1, +soil_params.satpsi,saturated capillary head,m,double,True,attr,0.036,0.955,0.355,geom_mean.psisat_soil_layers_stag=1, +soil_params.slop,this factor (0-1) modifies the gradient of the hydraulic head at the soil bottom. 0=no-flow.,m/m,double,True,attr,0.0000598,1,0.05,mean.slope_1km, +soil_params.smcmax,saturated soil moisture content (Maximum soil moisture content),m/m,double,True,attr,0.16,0.58,0.439,mean.smcmax_soil_layers_stag=1, +soil_params.wltsmc,wilting point soil moisture content (< soil_params.smcmax),m/m,double,True,attr,0.05,0.3,0.439,mean.smcwlt_soil_layers_stag=1, +soil_params.expon,"optional; defaults to 1, This parameter defines the soil reservoirs to be linear, Use linear reservoirs",NULL,double,False,const,NULL,NULL,1,, +soil_params.expon_secondary,"optional; defaults to 1, This parameter defines the soil reservoirs to be linear, Use linear reservoirs ",NULL,double,False,const,NULL,NULL,1,, +max_gw_storage,maximum storage in the conceptual reservoir,m,double,True,attr,0.01,0.25,0.05,mean.Zmax, +Cgw,the primary outlet coefficient,m/hr,double,True,attr,0.0000018,0.0018,0.000018,mean.Coeff, +expon,exponent parameter for nonlinear ground water reservoir (1.0 for linear reservoir),NULL,double,True,attr,1,8,3,mode.Expon, +gw_storage,initial condition for groundwater reservoir - it is the ground water as a decimal fraction of the maximum groundwater storage (max_gw_storage) for the initial timestep,m/m,double,False,const,NULL,NULL,0.05,, +alpha_fc,alpha at fc for clapp hornberger (field capacity),NULL,double,False,const,NULL,NULL,0.33,, +soil_storage,initial condition for soil reservoir - it is the water in the soil as a decimal fraction of maximum soil water storage (smcmax x depth) for the initial timestep. Default = 0.5,m/m,double,False,const,NULL,NULL,0.5,, +K_nash,Nash Config param for lateral subsurface runoff (Nash discharge to storage ratio),1/m,double,True,const,0,1,0.003,, +K_lf,Nash Config param - primary reservoir,NULL,double,True,const,0,1,0.01,, +nash_storage,Nash Config param - secondary reservoir ,NULL,double,False,const,NULL,NULL,"0.0,0.0",, +giuh_ordinates,Giuh (geomorphological instantaneous unit hydrograph) ordinates in dt time steps,NULL,double,False,const,NULL,NULL,"0.55, 0.25, 0.2",, +a_Xinanjiang_inflection_point_parameter,when surface_water_partitioning_scheme=Xinanjiang ,NULL,double,True,iceberg,-0.5,0.5,-0.2,AXAJ,CFE-X_params +b_Xinanjiang_shape_parameter,when surface_water_partitioning_scheme=Xinanjiang ,NULL,double,True,iceberg,0.01,10,0.66,BXAJ,CFE-X_params +x_Xinanjiang_shape_parameter,when surface_water_partitioning_scheme=Xinanjiang ,NULL,double,True,iceberg,0.01,10,0.02,XXAJ,CFE-X_params +urban_decimal_fraction,when surface_water_partitioning_scheme=Xinanjiang,NULL,double ,False,const,0,1,0.01,, +refkdt,Reference Soil Infiltration Parameter (used in runoff formulation),NULL,double,True,attr,0.1,4,1,mean.refkdt, +soil_params.depth,soil depth,m,double,False,const,NULL,NULL,2,, \ No newline at end of file diff --git a/data/module_ipes/lasam_out.csv b/data/module_ipes/lasam_out.csv new file mode 100644 index 0000000..285b009 --- /dev/null +++ b/data/module_ipes/lasam_out.csv @@ -0,0 +1,16 @@ +variable,description +actual_evapotranspiration,volume of AET +giuh_runoff,volume of giuh runoff +groundwater_to_stream_recharge,outgoing water from ground reservoir to stream channel +infiltration,volume of infiltrated water +mass_balance,mass balance error +percolation,volume of water leaving soil through the bottom of the domain (ground water recharge) +potential_evapotranspiration,volume of PET +precipitation,total precipitation +soil_depth_layers,Soil depth layers +soil_depth_wetting_fronts,Soil depth wetting fronts +soil_moisture_wetting_fronts,Soil moisture wetting front +soil_num_wetting_fronts,Number of soil wetting fronts +soil_storage,volume of water left +surface_runoff,volume of water surface runoff +total_discharge,total outgoing water \ No newline at end of file diff --git a/data/module_ipes/lasam_params.csv b/data/module_ipes/lasam_params.csv new file mode 100644 index 0000000..c7eb030 --- /dev/null +++ b/data/module_ipes/lasam_params.csv @@ -0,0 +1,26 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +forcing_file,provides precip. and PET inputs,NULL,string,FALSE,NULL,NULL,NULL,NULL,, +soil_params_file,provides soil types with van Genuchton parameters,NULL,string,FALSE,NULL,NULL,NULL,NULL,, +theta_r,residual water content - the minimum volumetric water content that a soil layer can naturally attain,NULL,double,TRUE,NULL,0.01,0.15,0.095,, +theta_e,the maximum volumetric water content that a soil layer can naturally attain,NULL,double,TRUE,NULL,0.3,0.8,0.41,, +alpha,the van Genuchton parameter related to the inverse of air entry pressure,1/cm,double,TRUE,NULL,0.001,0.3,0.019,, +n,the van Genuchton parameter related to pore size distribution,NULL,double,TRUE,NULL,1.01,3,1.31,, +Ks,the saturated hydraulic conductivity of a soil,cm/h,double,TRUE,NULL,0.001,100,0.26,, +layer_thickness,individual layer thickness (not absolute),cm,double (1D array),FALSE,NULL,NULL,NULL,200,, +initial_psi,used to initialize layers with a constant head,cm,double,FALSE,NULL,0,NULL,2000,, +ponded_depth_max,max amount of water unavailable for surface drainage,cm,double,TRUE,NULL,0,5,1.1,, +timestep,timestep of the model,sec/min/hr,double,FALSE,NULL,0,NULL,300,, +forcing_resolution,timestep of the forcing data,sec/min/hr,double,FALSE,NULL,NULL,NULL,3600,, +endtime,time at which model simulation ends,"sec, min, hr, d",double,FALSE,NULL,0,NULL,NULL,, +layer_soil_type,layer soil type (read from soil_params_file),NULL,int (1D array),FALSE,NULL,NULL,NULL,9,, +max_valid_soil_types,max number of valid soil types read from soil_params_file,NULL,int,FALSE,NULL,1,NULL,15,, +wilting_point_psi,the amount of water not available for plants - used in computing AET,cm,double,FALSE,NULL,NULL,NULL,15495,, +field_capacity_psi,capillary head corresponding to volumetric water content at which gravity drainage becomes slower - used in computing AET,cm,double,TRUE,NULL,10.3,516.6,340.9,, +use_closed_form_G,determines whether the numeric integral or closed form for G is used; a value of true will use the closed form,NULL,boolean,FALSE,NULL,NULL,NULL,FALSE,, +giuh_ordinates,GIUH ordinates (for giuh based surface runoff),NULL,double (1D array),FALSE,NULL,NULL,NULL,"0.06,0.51,0.28,0.12,0.03",, +verbosity,controls IO (screen outputs and writing to disk),NULL,string,FALSE,NULL,NULL,NULL,NULL,, +sft_coupled,couples LASAM to SFT,NULL,boolean,FALSE,NULL,NULL,NULL,NULL,, +soil_z,vertical resolution of the soil column,cm,double (1D array),FALSE,NULL,NULL,NULL,NULL,, +calib_params,"when set to true, calibratable params are calibrated",NULL,boolean,FALSE,NULL,NULL,NULL,FALSE,, +adaptive_timestep,"when set to true, will use an internal adaptive timestep, and the above timestep is used as a minimum timestep",NULL,boolean,FALSE,NULL,NULL,NULL,NULL,, + diff --git a/data/module_ipes/modules.csv b/data/module_ipes/modules.csv new file mode 100644 index 0000000..b12d663 --- /dev/null +++ b/data/module_ipes/modules.csv @@ -0,0 +1,10 @@ +module,file,outputs +CFE-X,cfe_params.csv, +Noah-OWP-Modular,noah_owp_modular_params.csv, +Snow-17,snow17_params.csv, +Sac-SMA,sac_sma_params.csv, +TopModel,topmodel_params.csv, +UEB,ueb_params.csv, +SFT,sft_params.csv, +SMP,smp_params.csv, +LASAM,lasam_params.csv,lasam_out.csv diff --git a/data/module_ipes/noah_owp_modular_params.csv b/data/module_ipes/noah_owp_modular_params.csv new file mode 100644 index 0000000..dea7f31 --- /dev/null +++ b/data/module_ipes/noah_owp_modular_params.csv @@ -0,0 +1,14 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +RSURF_EXP,Exponent in the resistance equation for soil evaporation,NA,double,True,const,1,6,4.84,, +CWPVT,Canopy wind parameter for canopy wind profile formulation,m-1,double,True,const,0.09,0.36,0.18,, +VCMX25,Maximum carboxylation at 25oC,umol/m2/s,double,True,const,24,112,52.2,, +MP,Slope of Ball-Berry conductance relationship,NA,double,True,const,3.6,12.6,9.7,, +MFSNO,Melt factor for snow depletion curve,NA,double,True,const,0.5,4,2,, +RSURF_SNOW,Soil surface resistance for snow,s/m,double,True,const,0.136,100,49.2,, +SCAMAX,Maximum fractional snow cover area,NA,double,True,const,0.7,1,0.89,, +ISLTYP,Soil type,NA,integer,False,attr,,,,mode.ISLTYP, +IVGTYP,Vegetation type,NA,integer,False,attr,,,,mode.IVGTYP, +longitude,longitude,degrees,double,False,attr,-180,180,,centroid_x, +latitude,latitude,degrees,double,False,attr,-90,90,,centroid_y, +slope,slope of terrain,degrees,double,False,attr,0,90,,mean.slope, +azimuth,azimuth,degrees,double,False,attr,0,360,,circ_mean.aspect, diff --git a/data/module_ipes/sac_sma_params.csv b/data/module_ipes/sac_sma_params.csv new file mode 100644 index 0000000..c8ce61e --- /dev/null +++ b/data/module_ipes/sac_sma_params.csv @@ -0,0 +1,20 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +hru_id,Identification string for each hrus,NULL,char,False,const,25,125,NA,divide_id, +hru_area,Area of each HRU,NULL,double,False,attr,10,100,NA,areasqkm, +uztwm,upper zone tension water maximum storage,mm,double,True,iceberg,25,125,75,,sac_sma_params +uzfwm,Maximum upper zone free water,mm,double,True,iceberg,10,100,30,,sac_sma_params +lztwm,Maximum lower zone tension water,mm,double,True,iceberg,75,300,150,,sac_sma_params +lzfsm,"Maximum lower zone free water, secondary (aka supplemental)",mm,double,True,iceberg,15,300,150,,sac_sma_params +lzfpm,"Maximum lower zone free water, primary",mm,double,True,iceberg,40,600,300,,sac_sma_params +adimp,Additional impervious area due to saturation,decimal percent,double,True,const,0,0.2,0,, +uzk,Upper zone recession coefficient,per day ,double,True,iceberg,0.2,0.5,0.3,,sac_sma_params +lzpk,"Lower zone recession coefficient, primary",decimal percent,double,True,iceberg,0.001,0.015,0.01,,sac_sma_params +lzsk,"Lower zone recession coefficient, secondary (aka supplemental)",decimal percent,double,True,iceberg,0.03,0.2,0.1,,sac_sma_params +zperc,Minimum percolation rate coefficient,NULL,double,True,iceberg,20,300,10,,sac_sma_params +rexp,Percolation equation exponent,NULL,double,True,iceberg,1.4,3.5,2,,sac_sma_params +pctim,impervious fraction of the watershed area ,decimal percent,double,True,const,0,0.05,0,, +pfree,fraction of water percolating from upper zone directly to lower zone free water storage. ,decimal percent,double,True,iceberg,0,0.5,0.1,,sac_sma_params +riva,Percent of the basin that is riparian area,decimal percent,double,True,const,0,0.2,0,, +side,Portion of the baseflow which does not go to the stream,decimal percent,double,False,const,0,0.2,0,, +rserv,Percent of lower zone free water not transferable to the lower zone tension water,decimal percent,double,False,const,0.2,0.4,0.3,, + diff --git a/data/module_ipes/sft_params.csv b/data/module_ipes/sft_params.csv new file mode 100644 index 0000000..1af8bd7 --- /dev/null +++ b/data/module_ipes/sft_params.csv @@ -0,0 +1,14 @@ +name,description,units,data_type,calibratable,source_file,min,max,nwm_name,default_value +end_time,Simulation duration. If no unit is specified defaults to hour.,"s, sec, h, hr, d, day",double,FALSE,const,NULL,NULL,NULL,NULL +soil_params.quartz,"soil quartz content, used in soil thermal conductivity function of Peters-Lidard",m,double,FALSE,const,NULL,NULL,NULL,NULL +ice_fraction_scheme,"runoff scheme used in the soil reservoir models (e.g. CFE), options: Schaake and Xinanjiang",NULL,int,FALSE,const,NULL,NULL,NULL,NULL +soil_z,vertical resolution of the soil column (computational domain of the SFT model),m,double,FALSE,const,NULL,NULL,NULL,NULL +soil_temperature,initial soil temperature for the discretized column,K,double,FALSE,const,NULL,NULL,NULL,NULL +soil_moisture_content,initial soil total (liquid + ice) moisture content for the discretized column,NULL,double,FALSE,const,NULL,NULL,NULL,NULL +soil_liquid_content,initial soil liquid moisture content for the discretized column,NULL,double,FALSE,const,NULL,NULL,NULL,NULL +bottom_boundary_temp,"temperature at the bottom boundary (BC) of the domain, if not specified, the default BC is zero-geothermal flux",K,double,FALSE,const,NULL,NULL,NULL,NULL +top_boundary_temp,"temperature at the top/surface boundary of the domain, if not specified, then other options include: 1) read from a file, or 2) provided through coupling",K,double,FALSE,const,NULL,NULL,NULL,NULL +sft_standalone,true for standalone model run; default is false,NULL,boolean,FALSE,const,NULL,NULL,NULL,NULL +soil_moisture_bmi,If true soil_moisture_profile is set by the SoilMoistureProfile module through the BMI; if false then config file must provide soil_moisture_content and soil_liquid_content,NULL,boolean,FALSE,const,NULL,NULL,NULL,NULL +dt,Size of a simulation timestep. If no unit is specified defaults to hour.,"s, sec, h, hr, d, day",double,FALSE,const,NULL,NULL,NULL,NULL +verbosity,"high, low, or none",NULL,NULL,FALSE,const,NULL,NULL,NULL,NULL diff --git a/data/module_ipes/smp_params.csv b/data/module_ipes/smp_params.csv new file mode 100644 index 0000000..b8b13ee --- /dev/null +++ b/data/module_ipes/smp_params.csv @@ -0,0 +1,8 @@ +name,description,units,data_type,calibratable,source,min,max,nwm_name,default_value,divide_attr_name,source_file +soil_z,vertical resolution of the soil moisture profile (depths from the surface),m,double,FALSE,NULL,NULL,NULL,NULL,NULL,, +soil_storage_depth,"depth of the soil reservoir model (e.g., CFE). Note: this depth can be different from the depth of the soil moisture profile which is based on soil_z",m,double,FALSE,NULL,NULL,NULL,NULL,NULL,, +soil_storage_model,"if conceptual, conceptual models are used for computing the soil moisture profile (e.g., CFE). If layered, layered-based soil moisture models are used (e.g., LGAR). If topmodel, topmodel's variables are used",NULL,string,FALSE,NULL,NULL,NULL,NULL,NULL,, +soil_moisture_profile_option,constant for layered-constant profile. linear for linearly interpolated values between two consecutive layers. Needed if soil_storage_model = layered.,NULL,string,FALSE,NULL,NULL,NULL,NULL,NULL,, +soil_depth_layers,Absolute depth of soil layers. Needed if soil_storage_model = layered.,NULL,double,FALSE,NULL,NULL,NULL,NULL,NULL,, +soil_moisture_fraction_depth,**user specified depth for the soil moisture fraction (default is 40 cm),m,double,FALSE,NULL,NULL,NULL,NULL,NULL,, +water_table_based_method,"Needed if soil_storage_model = topmodel. flux-based uses an iterative scheme, and deficit-based uses catchment deficit to compute soil moisture profile",NULL,string,FALSE,NULL,NULL,NULL,NULL,NULL,, diff --git a/data/module_ipes/snow17_params.csv b/data/module_ipes/snow17_params.csv new file mode 100644 index 0000000..671cbe1 --- /dev/null +++ b/data/module_ipes/snow17_params.csv @@ -0,0 +1,27 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +mfmax, Maximum melt factor during non-rain periods – assumed to occur on June 21,mm/˚C/hr,double,True,iceberg,0.1,2.2,1,,sac_sma_params +hru_area, needed for combination and routing conv, sq-km,double,False,attr,,,,areasqkm, +latitude, centroid latitude of hru, decimal degrees,double,False,attr,,,,centroid_y, +elev, mean elevation of hru, m,double,False,attr,,,,mean.elevation, +uadj, The average wind function during rain-on-snow periods,mm/mb/6 hr,double,True,iceberg,0.01,0.2,0.05,, +si, The mean areal water equivalent above which there is always 100 percent areal snow cover,mm,double,True,const,0,10000,500,, +adc1, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.05,0.05,0.05,, +adc2, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.1,0.1,0.1,, +adc3, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.2,0.2,0.2,, +adc4, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.3,0.3,0.3,, +adc5, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.4,0.4,0.4,, +adc6, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.5,0.5,0.5,, +adc7, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.6,0.6,0.6,, +adc8, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.7,0.7,0.7,, +mfmin, Minimum melt factor during non-rain periods – assumed to occur on December 21,mm/˚C/hr,double,True,iceberg,0.01,0.6,0.2,, +hru_id, local hru ids for multiple hrus,,string,False,const,,,,, +scf,The multiplying factor which adjusts precipitation that is determined to be in the form of snow,,double,True,const,0.9,1.8,1.1,, +adc9, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.8,0.8,0.8,, +adc10, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,0.9,0.9,0.9,, +adc11, Curve that defines the areal extent of the snow cover as a function of how much of the original snow cover remains after significant bare ground shows up,NULL,double,False,const,1,1,1,, +nmf, Maximum negative melt factor,mm/˚C/hr,double,True,const,0.01,0.3,0.15,, +tipm, Antecedent temperature index parameter,,double,True,const,0,1,0.1,, +pxtemp, Antecedent temperature index parameter,˚C,double,True,const,0.5,5,1,, +mbase, Base temperature for snowmelt computations during non-rain periods,˚C,double,False,const,0,0,0,, +plwhc, maximum amount of liquid water as a fraction of the ice portion of the snow that can be held against gravity drainage, %,double,True,const,0.01,0.3,0.03,, +daygm, Constant daily amount of melt which takes place at the snow-soil interface whenever there is a snow cover,mm/day,double,True,const,0,0.5,0,, diff --git a/data/module_ipes/topmodel_params.csv b/data/module_ipes/topmodel_params.csv new file mode 100644 index 0000000..28b8547 --- /dev/null +++ b/data/module_ipes/topmodel_params.csv @@ -0,0 +1,26 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +subcat,character title of subcatment; often same as model title,,char,False,const,,,,, +szm,exponential scaling parameter for the decline of transmissivity with increase in storage deficit; units of depth,meters,double,True,const,0.001,0.25,0.0125,, +t0,downslope transmissivity when the soil is just saturated to the surface,meters/hour,double,True,const,0,0.0001,0.000075,, +td,unsaturated zone time delay per unit storage deficit,hours,double,True,const,0.001,40,20,, +chv,average channel flow velocity,meters/hour,double,True,const,50,2000,1000,, +rv,internal overland flow routing velocity,meters/hour,double,True,const,50,2000,1000,, +srmax,maximum root zone storage deficit,meters,double,True,const,0.005,0.05,0.04,, +Q0,initial subsurface flow per unit area,meters/hour,double,False,const,0,,0.0000328,, +sr0,initial root zone storage deficit below field capacity,meters,double,True,const,0,0.1,0,, +infex,set to 1 to call subroutine to do infiltration excess calcs; not usually appropriate in catchments where Topmodel is applicable (shallow highly permeable soils); default to 0,,int,False,const,0,1,,, +xk0,surface soil hydraulic conductivity,meters/hour,double,True,const,0.0001,0.2,2,, +hf,wetting front suction for G&A soln.,meters,double,False,const,0.01,0.5,0.1,, +dth,water content change across the wetting front; dimensionless,,double,False,const,0.01,0.6,0.1,, +num_sub_catchments,number of subcatments; BMI adaption always sets to 1 as loop to be handled by framework,,int,False,const,1,1,,, +imap,ordinarily tells code to write map; NOT IMPLEMENTED,,int,False,const,,,,, +yes_print_output,set equal to 1 to print output files,,int,False,const,,,,, +subcat,the name of each sub-catchment,,string,False,const,,,,, +num_topodex_values,number of topodex histogram values,,int,False,const,1,30,,, +area,catchment area as % to whole catchment (set to 1),,double,False,const,0,1,,, +dist_area_lnaotb,the distribution of area corresponding to ln(A/tanB) histo.,,double,False,const,0,1,,, +lnaotb,ln(a/tanB) values; TWI,,double,False,attr,,,,dist_4.twi, +num_channels,number of channels,,int,False,const,1,10,1,, +cum_dist_area_with_dist,channel cum. distr. of area with distance,,double,False,const,0,1,1,, +dist_from_outlet,distance from outlet to point on channel with area known,meters,double,False,attr,0,,,lengthkm, + diff --git a/data/module_ipes/ueb_params.csv b/data/module_ipes/ueb_params.csv new file mode 100644 index 0000000..05d0ab2 --- /dev/null +++ b/data/module_ipes/ueb_params.csv @@ -0,0 +1,66 @@ +name,description,units,data_type,calibratable,source,min,max,default_value,divide_attr_name,source_file +USic,Energy content initial condition,kg m-3,double,False,const,,,0,, +WSis,Snow water equivalent initial condition,m,double,False,const,,,0,, +Tic,Snow surface dimensionless age initial condition,NULL,double,False,const,,,0,, +WCic,Snow water equivalent dimensionless age initial condition,m,double,False,const,,,0,, +df,Drift factor multiplier,NULL,double,True,const,0.5,6,1,, +apr,Average atmospheric pressure,Pa,double,True,iceberg,30000,101325,,atm_pres, +Aep,Albedo extinction coefficient,m,double,False,const,NULL,NULL,0.1,, +cc,Canopy coverage fraction,NULL,double,True,const,0,0.8,0.4,, +hcan,Canopy height,m,integer,True,const,0,10,5,, +lai,Leaf area index,NULL,integer,True,const,0,4,2,, +sbar,Maximum snow load held per unit branch area,kg/m^2,double,False,const,,,6.6,, +ycage,Forest age flag for wind speed profile parameterization,NULL,double,False,const,2,3,2.5,, +Slope,A 2-D grid that contains the slope at each grid point,degrees,double,False,attr,,,,mean.slope, +aspect,A 2-D grid that contains the aspect at each grid point,degrees,double,False,attr,,,,circ_mean.aspect, +latitude,A 2-D grid that contains the latitude at each grid point,degrees,double,False,attr,,,,centroid_y, +subalb,The fraction of shortwave radiation (fraction 0-1) reflected by the substrate beneath the snow (ground or glacier),NULL,double,True,const,0.25,0.7,0.25,, +subtype,Type of beneath snow substrate ,NULL,integer,False,const,0,3,0,, +gsurf,The fraction of surface melt that runs off (e.g. from a glacier),NULL,double,False,const,,,0,, +b01,Monthly mean of daily temperature range for January used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b02,Monthly mean of daily temperature range for February used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b03,Monthly mean of daily temperature range for March used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b04,Monthly mean of daily temperature range for April used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b05,Monthly mean of daily temperature range for May used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b06,Monthly mean of daily temperature range for June used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b07,Monthly mean of daily temperature range for July used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b08,Monthly mean of daily temperature range for August used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b09,Monthly mean of daily temperature range for September used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b10,Monthly mean of daily temperature range for October used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b11,Monthly mean of daily temperature range for November used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +b12,Monthly mean of daily temperature range for December used in Bristow Campbell formulas for atmospheric transmissivity,C,double,False,iceberg,,,,,ueb_deltat +ts_last,Snow surface temperature one day prior to the model starting time,C,double,False,const,,,-9999,,ueb_deltat +longitude,A 2-D grid that contains the longitude at each grid,degrees,double,False,attr,,,,centroid_x, +ems,Emissivity of snow,NULL,double,True,const,0.98,0.99,0.99,, +cg,Ground heat capacity,KJ/kg/˚C,double,True,const,2.09,2.12,2.09,, +zo,Roughness length,m,double,True,const,0.0002,0.014,0.01,, +rho,Snow density,kg/m3,double,True,const,100,600,300,, +rhog,Soil density,kg/m3,double,True,const,1100,1700,1700,, +ks,Snow saturated hydraulic conductivity,m/hr,integer,True,const,0,20,20,, +de,Thermally active soil depth,m,double,True,const,0.1,0.4,0.1,, +avo,Visual new snow albedo,,double,True,const,0.85,0.95,0.95,, +irad,Radiation control flag,NULL,integer,False,const,0,2,2,, +ireadalb,Albedo reading control flag,NULL,integer,False,const,0,1,0,, +tr,Rain threshold temperature,˚C,double,False,const,,,3,, +ts,Snow threshold temperature,˚C,double,False,const,,,-1,, +z,Air measurement height,m,double,False,const,,,2,, +lc,Liquid holding capacity,NULL,double,False,const,,,0.05,, +anir0,NIR new snow albedo,NULL,double,False,const,,,0.65,, +lans,Thermal conductivity of surface snow,kJ/ m/C/ hr,double,False,const,,,1,, +lang,Thermal conductivity of soil,kJ/ m/C/ hr,double,False,const,,,4,, +wlf,Low frequency surface temperature parameter,rad/hr,double,False,const,,,0.0654,, +rd1,Damping depth adjustment parameter,NULL,double,False,const,,,1,, +dnews,New snow threshold depth,m,double,False,const,,,0.001,, +emc,Canopy emissivity,NULL,double,False,const,,,0.98,, +alpha,Shortwave leaf scattering coefficient,NULL,double,False,const,,,0.5,, +alphal,Scattering coefficient for long wave radiation,NULL,double,False,const,,,0,, +g,Leaf orientation geometry factor,degree,double,False,const,,,0.5,, +uc,Unloading rate coefficient,hr-1,double,False,const,,,0.00463,, +as,Cloudy atmospheric transmissivity,NULL,double,False,const,,,0.25,, +bs,Clear sky atmospheric transmissivity increment,NULL,double,False,const,,,0.5,, +lambda,Clear sky direct radiation fraction,NULL,double,False,const,,,0.857,, +rimax,Richardson number upper bound,NULL,double,False,const,,,0.16,, +wcoeff,Forest wind decay coefficient,NULL,double,False,const,,,0.5,, +a,Transmissivity parameter,NULL,double,False,const,,,0.8,, +c,Transmissivity exponent,NULL,double,False,const,,,2.4,, + diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..d5666f1 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,61 @@ +FROM python:3.12-slim AS base + +# Install system dependencies including GDAL +RUN apt-get update && apt-get install -y \ + curl \ + gdal-bin \ + libgdal-dev \ + gcc \ + g++ \ + && rm -rf /var/lib/apt/lists/* + +# Set GDAL environment variables +ENV GDAL_CONFIG=/usr/bin/gdal-config +ENV CPLUS_INCLUDE_PATH=/usr/include/gdal +ENV C_INCLUDE_PATH=/usr/include/gdal + +# Install UV +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +WORKDIR /app + +# Builder stage (install deps, build docs) +FROM base AS builder +COPY . . + +# Create virtual env and install deps from pyproject.toml +RUN uv venv && uv sync + +# Install the local icefabric package itself in editable mode +# This registers the package in the virtual environment +RUN uv pip install -e . + +# Build docs into static site +RUN uv pip install ".[docs]" && uv run mkdocs build -d /app/static/docs + +# Final stage / Final Image +FROM base AS final +WORKDIR /app + +# Copy virtualenv, the runnable 'app', the 'icefabric' source, and docs +COPY --from=builder /app/.venv ./.venv +COPY --from=builder /app/app ./app +COPY --from=builder /app/src ./src +COPY --from=builder /app/static/docs ./static/docs +COPY --from=builder /app/.pyiceberg.yaml . +COPY --from=builder /app/pyproject.toml . + +# Set the PATH to use the virtualenv +ENV PATH="/app/.venv/bin:$PATH" + +# Set the PYTHONPATH so the interpreter can find the icefabric module +ENV PYTHONPATH="/app/src" + +# Create the data directory that the application needs to run +RUN mkdir /app/data + +# Expose the port the application will run on +EXPOSE 8000 + +# Command to run the FastAPI application +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/docker/Dockerfile.api b/docker/Dockerfile.api new file mode 100644 index 0000000..9f33a5f --- /dev/null +++ b/docker/Dockerfile.api @@ -0,0 +1,36 @@ +FROM python:3.12-slim + +# Install system dependencies including GDAL +RUN apt-get update && apt-get install -y \ + curl \ + gdal-bin \ + libgdal-dev \ + gcc \ + g++ \ + && rm -rf /var/lib/apt/lists/* + +# Set GDAL environment variables +ENV GDAL_CONFIG=/usr/bin/gdal-config +ENV CPLUS_INCLUDE_PATH=/usr/include/gdal +ENV C_INCLUDE_PATH=/usr/include/gdal + +# Install curl for UV installation +RUN apt-get update && apt-get install -y curl + +# Install UV properly by copying from the official image +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +# Set the working directory +WORKDIR /app + +# Copy the content of the build context to /app +COPY ../ /app + +RUN uv venv +ENV PATH="/app/.venv/bin:$PATH" + +# Install the package in development mode +RUN uv sync + +# Run the API +CMD ["python", "-m", "app.main"] diff --git a/docker/compose.yaml b/docker/compose.yaml new file mode 100644 index 0000000..19cd5b7 --- /dev/null +++ b/docker/compose.yaml @@ -0,0 +1,9 @@ +services: + myapp: + build: + context: ../ + dockerfile: docker/Dockerfile.api + ports: + - "8000:8000" + env_file: + - ../.env diff --git a/docs/datasets/hydrofabric.md b/docs/datasets/hydrofabric.md new file mode 100644 index 0000000..288e343 --- /dev/null +++ b/docs/datasets/hydrofabric.md @@ -0,0 +1,25 @@ +# Hydrofabric + +### Catalog information + +The hydrofabric is a unique case where there are many namespaces containing the same tables/layer layouts. This is to model the version 2.2 Hydrofabric dataset. + +
+ ![Hydrofabric data model](../img/hydrofabric_data_model.png){ width="600" } +
The Hydrofabric v2.2 Data Model. Credit to Lynker Spatial: https://lynker-spatial.s3-us-west-2.amazonaws.com/hydrofabric/v2.2/hfv2.2-data_model.html
+
+ +#### Namespaces + +The following namespaces make up the Full Hydrofabric Dataset + +- `conus_hf` + - The CONUS Domain +- `ak_hf` + - The Alaskan Domain +- `hi_hf` + - The Hawaii Domain +- `prvi_hf` + - The Puerto Rico/Virgin Islands Domain +- `gl_hf` + - The Great Lakes Domain diff --git a/docs/datasets/index.md b/docs/datasets/index.md new file mode 100644 index 0000000..4123244 --- /dev/null +++ b/docs/datasets/index.md @@ -0,0 +1,3 @@ +# Versioned Datasets + +The following pages are to document the specific datasets versioned in the icefabric diff --git a/docs/datasets/ras_xs.md b/docs/datasets/ras_xs.md new file mode 100644 index 0000000..d4e3a54 --- /dev/null +++ b/docs/datasets/ras_xs.md @@ -0,0 +1,70 @@ +# RAS XS + +### Catalog information +#### Namespace: `ras_xs` + +This namespace contains HEC-RAS submodels used to create FIMs. + +### Tables + +#### `extracted` + +These are processed HEC-RAS cross-section attributes per flowpath extracted to the Hydrofabric v3 reference. There is one representative XS per flowpath with joined metadata from hydraulic units and NHDPlus. + +##### How is this file created: + +1. Download the reference fabric from PyIceberg and MIP data from S3. + + The MIP data is located in the following path on the NGWPC S3 bucket: `s3://fimc-data/ripple/fim_100_domain/collections/` and can be downloaded using the following command: `aws s3 sync s3://fimc-data/ripple/fim_100_domain/collections/ . --exclude "*" --include "*.gpkg"`. Within the OE, this data has already been downloaded for local use in the following location: `/efs/edfs-data/pi_5/mip_full_collection/`. + + The v3 reference fabric has been version-controlled within the NGWPC Data Lake and can be downloaded using the following script: `python tools/hydrofabric/download_reference_fabric.py --catalog glue` if it is not on your local system. + +2. Extracting the HEC-RAS `submodels` + + The `icefabric/tools/iceberg/R/fema_xs_processing.R` script processes elevation profile data from FEMA BLE submodels, extracting key cross-sectional (XS) geometry attributes for hydraulic analysis. The overall goal is to compute summary statistics and representative cross-sections per stream reach in the reference fabric, so that it can be used within the existing riverML framework for predicting channel size and shape. This work was completed by Lynker-Spatial and intergrated into the Icefabric by the Raytheon NGWPC EDFS Team. + + Inside of this function each submodel has its cross-sections (XS) read into memory and transformed into a common CRS (EPSG:5070). + + The following data operations are performed within the code: + + - Parse and clean the raw string of elevation points. + - Identify left/right bank extents. + - Subset the relevant portion and smooth the elevation. + - Filter out degenerate transects (e.g., no real depth). + - Compute metrics: + - Ym: channel depth + - TW: top width + - A: channel area + - r: Dingmans R coefficient + - Save it as a geometry object with selected metadata. + - Also extracts metadata like the coordinate system units and stores them for each reach. + + Once the data is extracted, it's merged together where one XS is made per flowpath. + +3. Exporting the geopackage to parquet + + Using the following command `python tools/ras_xs/gpkg_to_parquet.py --gpkg riverML_ripple_beta.gpkg` to write the geopackage to an arrow parquet file. The reason that this is done in python and not in the R script is geopandas has support for writing geometries to WKB, which can be ingested by Arrow/S3 Tables. + +4. Writing the parquet file to the PyIceberg S3 Tables + + Using the following command `python tools/iceberg/production/build_ras_xs.py --file data/hydrofabric/riverML_ripple_beta.gpkg.parquet` write the outputted parquet file into the PyIceberg S3 Tables. + +Once that's inputted, you can see the following outputs from PyIceberg: + +##### Example Output: +```py +>>> load_catalog("glue").load_table("ras_xs.extracted").scan().to_pandas() + flowpath_id r TW Y source_river_station river_station model ftype streamorde geometry +0 10023956 1.557875 67.663333 6.398817 560.22 2.0 /Users/taddbindas/projects/NGWPC/icefabric/dat... StreamRiver 2.0 b'\x01\x02\x00\x00\x00\x05\x00\x00\x00\xc5mkmd... +1 10023980 2.258627 31.300000 0.615000 3403.89 14.0 /Users/taddbindas/projects/NGWPC/icefabric/dat... StreamRiver 5.0 b'\x01\x02\x00\x00\x00\x04\x00\x00\x00\x19X\xc... +2 10023986 0.923102 168.696667 5.120922 335.04 2.0 /Users/taddbindas/projects/NGWPC/icefabric/dat... CanalDitch 2.0 b'\x01\x02\x00\x00\x00\x05\x00\x00\x00\x03VE\x... +3 10024318 1.884064 201.115000 9.884953 74820.00 1.0 /Users/taddbindas/projects/NGWPC/icefabric/dat... StreamRiver 5.0 b'\x01\x02\x00\x00\x00\x02\x00\x00\x00\xd1!.^i... +4 10024334 2.150152 362.250000 13.597386 81856.00 6.0 /Users/taddbindas/projects/NGWPC/icefabric/dat... StreamRiver 5.0 b'\x01\x02\x00\x00\x00\x04\x00\x00\x00<\x81\x8... +``` + +
+ ![Sample RAS XS](../img/ras_xs.png){ width="600" } +
Example XS (in red) mapped to the reference fabric (blue)
+
+ +If you would like to further understand the operations done to the HEC-RAS XS, please see the document: https://lynker-spatial.github.io/mip-riverml/#final-data-access diff --git a/docs/description.md b/docs/description.md new file mode 100644 index 0000000..47ff1de --- /dev/null +++ b/docs/description.md @@ -0,0 +1,98 @@ +# Icefabric: Lakehouse Architecture for Hydrologic Data Management + +
+ ![Icefabric version controlling system](img/icefabric_version.png){ width="600" } +
The icefabric lake house architecture. Data is moved from sources to an underlying specificed format (iceberg/icechunk) and queried to consumers via APIs and services.
+
+ + +## Overview + +Icefabric implements a modern **lakehouse architecture** to combine the flexibility of data lakes with the performance and governance of data warehouse. This system provides versioned, centralized access to hydrologic datasets to support the National Water Model. + +## The Problem: Hydrologic Data Complexity + +### Traditional Challenges + +Hydrologic research and operations face unique data management challenges: + +- **Heterogeneous Data Sources**: Datasets are sourced from different agencies in various formats +- **Multiple Formats**: Tabular, vectorized, COGs, etc +- **Version Control Needs**: Hydrofabric topology updates, data quality improvements, and research reproducibility + +### Why Traditional Solutions Fall Short + +**Traditional database systems** struggle with: + +- Large geospatial datasets and complex geometries +- Schema evolution for evolving datasets +- Version control for scientific workflows + +**File-based approaches** suffer from: + +- Data duplication and storage inefficiencies +- Lack of ACID transactions +- Manual version management +- Limited discovery and access controls + +## Lakehouse Architecture Solution + +### Technology Stack Rationale + +=== "Apache Iceberg - Structured Data" + + **Used For:** + - Hydrofabric geospatial products + - Streamflow observations time series (USGS, Local Agencies) + - Cross-section geometries (RAS XS [MIP/BLE]) + + **Why Iceberg:** + - **ACID Transactions**: Ensure data consistency during hydrofabric updates + - **Schema Evolution**: Handle network topology changes without breaking existing workflows + - **Time Travel**: Access historical network versions for model comparisons + - **Performance**: Optimized queries across continental-scale datasets + - **Partition Pruning**: Efficient spatial and temporal filtering + +=== "Icechunk - Array Data" + + **Used For:** + - Topobathy elevation surfaces + - Land cover classifications + + **Why Icechunk:** + - **Virtual References**: Avoid duplicating large raster datasets + - **Zarr Compatibility**: Seamless integration with scientific Python ecosystem + - **Git-like Versioning**: Branch/merge workflows for experimental processing + - **Chunked Storage**: Optimized for geospatial access patterns + - **Compression**: Efficient storage of repetitive classification data + +## Benefits Realized + +### For Hydrologic Research + +- **Reproducible Science**: Exact data versions enable repeatable research +- **Collaborative Workflows**: Branching enables parallel research without conflicts +- **Quality Evolution**: Track data quality improvements over time + +### For Operational Forecasting + +- **Consistent Baselines**: Stable data versions for operational model runs +- **Real-time Integration**: Fast access to latest observations and forecasts +- **Rollback Capabilities**: Quick recovery from data quality issues + +### For Data Management + +- **Access Unification**: Single API for diverse hydrologic data types +- **Version Management**: Automated tracking eliminates manual version confusion +- **Quality Assurance**: Built-in validation prevents bad data propagation + +## Conclusion + +The Icefabric lakehouse architecture addresses fundamental challenges in hydrologic data management through: + +1. **Unified Access**: Single interface for diverse water data sources +3. **Version Control**: Git-like workflows for scientific data management +4. **Quality Assurance**: Automated validation and lineage tracking +6. **Research Support**: Reproducible environments for collaborative science + +This architecture enables EDFS to provide reliable, versioned, high-performance access to critical water resources data supporting both operational forecasting and cutting-edge research. diff --git a/docs/img/hydrofabric_data_model.png b/docs/img/hydrofabric_data_model.png new file mode 100644 index 0000000..5a8d0d9 Binary files /dev/null and b/docs/img/hydrofabric_data_model.png differ diff --git a/docs/img/icefabric.png b/docs/img/icefabric.png new file mode 100644 index 0000000..f52c528 Binary files /dev/null and b/docs/img/icefabric.png differ diff --git a/docs/img/icefabric_api.png b/docs/img/icefabric_api.png new file mode 100644 index 0000000..4ef872d Binary files /dev/null and b/docs/img/icefabric_api.png differ diff --git a/docs/img/icefabric_mission.png b/docs/img/icefabric_mission.png new file mode 100644 index 0000000..3a3ffc5 Binary files /dev/null and b/docs/img/icefabric_mission.png differ diff --git a/docs/img/icefabric_version.png b/docs/img/icefabric_version.png new file mode 100644 index 0000000..2762cc4 Binary files /dev/null and b/docs/img/icefabric_version.png differ diff --git a/docs/img/production_promotion.png b/docs/img/production_promotion.png new file mode 100644 index 0000000..9f2e3e4 Binary files /dev/null and b/docs/img/production_promotion.png differ diff --git a/docs/img/ras_xs.png b/docs/img/ras_xs.png new file mode 100644 index 0000000..40bc953 Binary files /dev/null and b/docs/img/ras_xs.png differ diff --git a/docs/img/test_account_overview.png b/docs/img/test_account_overview.png new file mode 100644 index 0000000..a3a886a Binary files /dev/null and b/docs/img/test_account_overview.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..2ac7447 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,58 @@ +# Welcome to Icefabric + +!!! warning "In Progress" + These docs are a work in progress and will continously be updated + +# Icefabric + +An [Apache Iceberg](https://py.iceberg.apache.org/)/[Icechunk](https://icechunk.io/en/latest/) implementation of the Hydrofabric to disseminate continental hydrologic data + +!!! note + To run any of the functions in this repo your AWS test account credentials + `AWS_DEFAULT_REGION="us-east-1"` need to be in your `.env` file and your `.pyiceberg.yaml` settings need to up to date + +### Getting Started +This repo is managed through [UV](https://docs.astral.sh/uv/getting-started/installation/) and can be installed through: +```sh +uv sync +source .venv/bin/activate +``` + +### Running the API locally +To run the API locally, ensure your `.env` file in your project root has the right credentials, then run +```sh +python -m app.main +``` +This should spin up the API services at `localhost:8000/`. + +If you are running the API locally, you can run +```sh +python -m app.main --catalog sql +``` + +### Building the API through Docker +To run the API locally with Docker, ensure your `.env` file in your project root has the right credentials, then run +```sh +docker compose -f docker/compose.yaml build --no-cache +docker compose -f docker/compose.yaml up +``` +This should spin up the API services + + +### Development +To ensure that icefabric follows the specified structure, be sure to install the local dev dependencies and run `pre-commit install` + +### Documentation +To build the user guide documentation for Icefabric locally, run the following commands: +```sh +uv sync --extra docs +mkdocs serve -a localhost:8080 +``` +Docs will be spun up at localhost:8080/ + +### Pytests + +The `tests` folder is for all testing data so the global confest can pick it up. This allows all tests in the namespace packages to share the same scope without having to reference one another in tests + +To run tests, run `pytest -s` from project root. + +To run the subsetter tests, run `pytest --run-slow` as these tests take some time. Otherwise, they will be skipped diff --git a/docs/modules/index.md b/docs/modules/index.md new file mode 100644 index 0000000..0651976 --- /dev/null +++ b/docs/modules/index.md @@ -0,0 +1,3 @@ +# Modules + +The following pages are to document the support from the EDFS team towards creating configuration files for NWM Modules. diff --git a/docs/modules/sft.md b/docs/modules/sft.md new file mode 100644 index 0000000..519c203 --- /dev/null +++ b/docs/modules/sft.md @@ -0,0 +1,191 @@ +# SFT (Soil Freeze-Thaw) Module Documentation + +## Overview + +The SFT (Soil Freeze-Thaw) module simulates the freeze-thaw processes in soil columns and is used in cold regions where freeze-thaw cycles significantly affect water movement and storage. + +## Parameter Reference + +### Core Parameters + +| Parameter | Description | Units | Data Type | Default | Calibratable | +|-----------|-------------|--------|-----------|---------|--------------| +| `end_time` | Simulation duration. If no unit is specified, defaults to hour | s, sec, h, hr, d, day | double | `1.[d]` | FALSE | +| `dt` | Size of a simulation timestep. If no unit is specified, defaults to hour | s, sec, h, hr, d, day | double | `1.0[h]` | FALSE | +| `verbosity` | Logging verbosity level | - | string | `none` | FALSE | + +**Options for verbosity:** `high`, `low`, `none` + +### Soil Physical Properties + +These properties are based on Hydrofabric divide attributes provided in the latest enterprise version. + +| Parameter | Description | Units | Data Type | Default | Calibratable | +|-----------|-------------|--------|-----------|---------|--------------| +| `soil_params.smcmax` | Maximum soil moisture content | - | double | - | TRUE | +| `soil_params.b` | Soil moisture retention curve parameter (bexp) | - | double | - | TRUE | +| `soil_params.satpsi` | Saturated soil suction (psisat) | - | double | - | TRUE | +| `soil_params.quartz` | Soil quartz content, used in soil thermal conductivity function of Peters-Lidard | - | double | `1.0` | FALSE | + +### Domain Configuration + +| Parameter | Description | Units | Data Type | Default | Calibratable | +|-----------|-------------|--------|-----------|---------|--------------| +| `soil_z` | Vertical resolution of the soil column (computational domain of the SFT model) | m | array[double] | `[0.1, 0.3, 1.0, 2.0]` | FALSE | +| `soil_temperature` | Initial soil temperature for the discretized column | K | array[double] | - | FALSE | + +**Ice Fraction Scheme Options:** + +The following ice fraction schemes are dictated by what version of CFE is used + +- `Schaake`: Traditional Schaake ice fraction calculation +- `Xinanjiang`: Xinanjiang ice fraction calculation method (default) + +## Data Structures + +### SFT Configuration Model + +The SFT module uses a Pydantic model to validate and structure configuration parameters: + +```python +class SFT(BaseModel): + catchment: str # Catchment identifier + verbosity: str = "none" # Logging level + soil_moisture_bmi: int = 1 # BMI soil moisture flag + end_time: str = "1.[d]" # Simulation duration + dt: str = "1.0[h]" # Time step + soil_params_smcmax: float # Maximum soil moisture + soil_params_b: float # Soil retention parameter + soil_params_satpsi: float # Saturated soil suction + soil_params_quartz: float = 1.0 # Quartz content + ice_fraction_scheme: IceFractionScheme # Ice fraction method + soil_z: list[float] = [0.1, 0.3, 1.0, 2.0] # Soil layer depths + soil_temperature: list[float] # Initial temperatures +``` + +### Ice Fraction Schemes + +```python +class IceFractionScheme(str, enum.Enum): + SCHAAKE = "Schaake" + XINANJIANG = "Xinanjiang" +``` + +## Usage + +### Command Line Interface + +The SFT config text files can be created using the `icefabric` CLI tool: + +```bash +icefabric params \ + --gauge "01010000" \ + --module "sft" \ + --domain "conus" \ + --catalog "glue" \ + --ice-fraction "xinanjiang" \ + --output "./output" +``` + +**CLI Parameters:** +- `--gauge`: Gauge ID to trace upstream catchments from +- `--module`: Module type (use "sft" for Soil Freeze-Thaw) +- `--domain`: Hydrofabric domain (`conus`, `alaska`, etc.) +- `--catalog`: PyIceberg Catalog type (`glue` or `sql`) +- `--ice-fraction`: Ice fraction scheme (`schaake` or `xinanjiang`) +- `--output`: Output directory for configuration files + +### REST API + +The SFT module is also accessible via REST API: + +```http +GET /modules/sft/?identifier=01010000&domain=conus&use_schaake=false +``` + +**API Parameters:** +- `identifier` (required): Gauge ID to trace upstream from +- `domain` (optional): Geographic domain (default: `conus`) +- `use_schaake` (optional): Use Schaake ice fraction scheme (default: `false`) + +**Response:** Returns a list of SFT configuration objects, one for each upstream catchment. + +### Python API + +Direct Python usage: + +```python +from icefabric.modules import get_sft_parameters +from icefabric.schemas.hydrofabric import HydrofabricDomains +from pyiceberg.catalog import load_catalog + +# Load catalog +catalog = load_catalog("glue") + +# Get SFT parameters +configs = get_sft_parameters( + catalog=catalog, + domain=HydrofabricDomains.CONUS, + identifier="01010000", + use_schaake=False +) + +# Each config is an SFT pydantic model +for config in configs: + print(f"Catchment: {config.catchment}") + print(f"Soil layers: {config.soil_z}") + print(f"Initial temperatures: {config.soil_temperature}") +``` + +## Parameter Estimation + +The system automatically estimates initial parameters from hydrofabric data: + +### Soil Parameters +- **smcmax**: Calculated as mean across available soil moisture maximum values +- **b (bexp)**: Derived from mode of soil moisture retention curve parameters +- **satpsi**: Calculated as geometric mean of saturated soil suction values +- **quartz**: Default value of 1.0 (assuming high quartz content) + +### Temperature Initialization +- **soil_temperature**: Currently set to a uniform 45°F (280.37K) across all layers +- This represents a reasonable estimate for mean soil temperature + +### Spatial Resolution +- **soil_z**: Default 4-layer discretization [0.1, 0.3, 1.0, 2.0] meters +- Provides adequate resolution for freeze-thaw processes + +## Output Files + +The CLI and API generate BMI-compatible configuration files: + +``` +{catchment_id}_bmi_config_sft.txt +``` + +Example file content: +``` +verbosity=none +soil_moisture_bmi=1 +end_time=1.[d] +dt=1.0[h] +soil_params.smcmax=0.434 +soil_params.b=4.05 +soil_params.satpsi=0.0355 +soil_params.quartz=1.0 +ice_fraction_scheme=Xinanjiang +soil_z=0.1,0.3,1.0,2.0[m] +soil_temperature=280.37,280.37,280.37,280.37[K] +``` + +## Notes and Limitations + +1. **Temperature Initialization**: Current implementation uses uniform 45°F across all soil layers. Future versions should implement depth-dependent temperature profiles. + +2. **Parameter Weighting**: Soil parameters are currently averaged with equal weighting rather than weighted averaging based on layer thickness. + +3. **Quartz Support**: The `soil_params.quartz` was removed in v2.2 of the Hydrofabric and is defaulted to 1.0 + +4. **Spatial Coverage**: Parameter estimation depends on available hydrofabric data coverage for the specified domain. + +5. **Temporal Considerations**: Initial parameters represent steady-state estimates. Actual model runs may require spin-up periods for equilibration. diff --git a/docs/user_guide/icefabric_api.md b/docs/user_guide/icefabric_api.md new file mode 100644 index 0000000..1625713 --- /dev/null +++ b/docs/user_guide/icefabric_api.md @@ -0,0 +1,275 @@ +# Icefabric API Guide + +## Overview + +The Icefabric API is a FastAPI-based service that provides access to EDFS data stored in Apache Iceberg format. The API offers multiple data export formats and metadata endpoints for the hydrofabric and streamflow observations. + +## Architecture + +The API consists of several key components: + +1. **Main Application** (`app/main.py`) - FastAPI application with health checks and router configuration +2. **Data Routers** - Handles all data endpoints. Streamflow observations, Hydrofabric subsetting, National Water Model module configuration, and HEC-RAS cross-section retrieval are supported. +3. **Apache Iceberg Backend** - Defaults to hosted AWS Glue catalog. Local SQLite-backed catalog may be built using instructions below. + +### Running the API locally +To run the API locally, ensure your `.env` file in your project root has the right credentials (`test`), then run +```sh +uv sync +source .venv/bin/activate +python -m app.main +``` +This should spin up the API services at `localhost:8000/` + +### Building the API through Docker +To run the API locally with Docker, ensure your `.env` file in your project root has the right credentials, then run +```sh +docker compose -f docker/compose.yaml build --no-cache +docker compose -f docker/compose.yaml up +``` +This should spin up the API services + +### Running the API with a local Iceberg catalog - Advanced Use +To run the API locally against a local catalog, the catalog must first be exported from glue. In the following code block, run build script for as many catalog namespaces as you need. Ensure your `.env` file in your project root has the right credentials (`test`), then run +```sh +uv sync +source .venv/bin/activate +python tools/pyiceberg/export_catalog.py --namespace conus_hf +# Run additional tool times with other namespaces as necessary +``` + +To view the namespaces hosted on glue, you can run the following commands in the terminal: +```python +>>> from pyiceberg.catalog import load_catalog +>>> catalog = load_catalog("glue") +>>> catalog.list_namespaces() +``` + + +To run the API locally with a local SQL backend, ensure your `.env` file in your project root has the right credentials (`test`), then run +```sh +uv sync +source .venv/bin/activate +python -m app.main --catalog sql +``` +This should spin up the API services + +## How It Works + +### Data Flow + +1. **Request Processing** - Validates data source and identifier parameters +2. **Data Filtering** - Applies optional date range filters to Iceberg tables +3. **Format Conversion** - Exports data in requested format (CSV/Parquet) +4. **Response Generation** - Returns data with appropriate headers and metadata + +### Supported Data Sources + +#### Observations +Currently supports: + +- **USGS** - United States Geological Survey hourly streamflow data + +#### Hydrofabric +Provides geospatial watershed data: + +- **Subset Generation** - Creates upstream watershed subsets from identifiers + +!!! note "Data Storage" + All data is stored remotely as Apache Iceberg tables on AWS glue unless you built the catalog locally. Then, it is stored at SQLite-backed catalog locally built at `/tmp/warehouse/pyiceberg_catalog.db` + +### National Water Model Modules +Retrieve National Water Model (NWM) module parameters. + +Currently supports: + +- **Soil Freeze Thaw (SFT)** - Retrieve paramters for Soil Freeze Thaw module +- **TopoFlow-Glacier** - Retrieve parameters for the TopoFlow Glacier module + +### RAS Cross-sections +Retrieves geopackage data of HEC-RAS cross-sections + +Currently supports: + +- **HUC ID**: Download a geopackage for given HUC ID +- **HUC ID** and **Reach ID**: Download a geopackage for a given HUC ID and Reach ID + +## Usage Examples + +### Streamflow Observations + +```python +import requests +import pandas as pd +from io import StringIO, BytesIO + +base_url = "http://localhost:8000/v1/streamflow_observations" + +# Get available data sources +sources = requests.get(f"{base_url}/sources").json() + +# Get available identifiers for USGS +identifiers = requests.get(f"{base_url}/usgs/available", params={"limit": 10}).json() + +# Get station information +station_info = requests.get(f"{base_url}/usgs/01031500/info").json() +print(f"Station has {station_info['total_records']} records") + +# Download CSV data with date filtering +csv_response = requests.get( + f"{base_url}/usgs/csv", + params={ + "identifier": "01031500", + "start_date": "2023-01-01T00:00:00", + "end_date": "2023-01-31T00:00:00", + "include_headers": True + } +) +df_csv = pd.read_csv(StringIO(csv_response.text)) + +# Download Parquet data (recommended for large datasets) +parquet_response = requests.get( + f"{base_url}/usgs/parquet", + params={ + "identifier": "01031500", + "start_date": "2023-01-01T00:00:00" + } +) +df_parquet = pd.read_parquet(BytesIO(parquet_response.content)) +``` + +### Hydrofabric Subset + +```python +import requests + +# Download hydrofabric subset as geopackage +response = requests.get("http://localhost:8000/v1/hydrofabric/01010000/gpkg") + +if response.status_code == 200: + with open("hydrofabric_subset_01010000.gpkg", "wb") as f: + f.write(response.content) + print(f"Downloaded {len(response.content)} bytes") +else: + print(f"Error: {response.status_code}") +``` + +## Performance Considerations + +### Data Format Recommendations + +| Dataset Size | Recommended Format | Reason | +|-------------|-------------------|---------| +| < 50,000 records | CSV | Simple, widely supported | +| > 50,000 records | Parquet | Better compression, faster processing | +| > 200,000 records | Parquet + date filters | Reduced data transfer | + +## Development + +### Running the API + +```bash +# Install dependencies +uv sync + +# Start development server +python -m app.main +``` + +### Adding New Data Observation Sources + +To add a new data source, update the configuration in your router: + +Below is an example for the observations router + +```python +class DataSource(str, Enum): + USGS = "usgs" + NEW_SOURCE = "new_source" # Add new source + +# Add configuration +DATA_SOURCE_CONFIG = { + DataSource.NEW_SOURCE: { + "namespace": "observations", + "table": "new_source_table", + "time_column": "timestamp", + "units": "m³/s", + "description": "New data source description", + }, +} +``` + +## API Documentation + +### Interactive Documentation + +The API provides interactive documentation at: + +- **Swagger UI**: `http://localhost:8000/docs` +- **ReDoc**: `http://localhost:8000/redoc` + +### OpenAPI Schema + +Access the OpenAPI schema at: `http://localhost:8000/openapi.json` + +## Verification + +### Observations + +```bash +# List available data sources +curl http://localhost:8000/v1/streamflow_observations/sources + +# Get available identifiers (limit results) +curl "http://localhost:8000/v1/streamflow_observations/usgs/available?limit=5" + +# Get data source information +curl http://localhost:8000/v1/streamflow_observations/usgs/info + +# Get specific station information +curl http://localhost:8000/v1/streamflow_observations/usgs/01010000/info + +# Download CSV with headers +curl "http://localhost:8000/v1/streamflow_observations/usgs/csv?identifier=01010000&include_headers=true" + +# Download CSV with date filtering +curl "http://localhost:8000/v1/streamflow_observations/usgs/csv?identifier=01010000&start_date=2021-12-31T14%3A00%3A00&end_date=2022-01-01T14%3A00%3A00&include_headers=true" + +# Download Parquet file +curl "http://localhost:8000/v1/streamflow_observations/usgs/parquet?identifier=01010000&start_date=2021-12-31T14%3A00%3A00&end_date=2022-01-01T14%3A00%3A00" -o "output.parquet" +``` + +### Hydrofabric + +```bash +# Download hydrofabric subset +curl "http://localhost:8000/v1/hydrofabric/01010000/gpkg" -o "subset.gpkg" + +# Download with different identifier +curl "http://localhost:8000/v1/hydrofabric/01031500/gpkg" -o "subset.gpkg" +``` + +### NWM Modules +```bash +# Return parameters for Soil Freeze Thaw by catchment +curl "http://localhost:8000/v1/modules/sft/?identifier=01010000&domain=conus_hf&use_schaake=false" + +# Return albedo value for given catchment state (snow, ice, or other) +curl "http://localhost:8000/v1/modules/topoflow/albedo?landcover=snow" +``` + +### RAS Cross-sections +```bash +# Download RAS cross-sections for a HUC ID +curl "http://localhost:8000/v1/ras_xs/02040106/" -o "ras_02040106.gpkg" + +# Download RAS cross-sections for a HUC ID and Reach ID +curl "http://localhost:8000/v1/ras_xs/02040106/dsreachid=4188251" -o "ras_02040106_4188251.gpkg" +``` + +### Health Check + +```bash +# Check API health +curl http://localhost:8000/health +``` diff --git a/docs/user_guide/icefabric_tools.md b/docs/user_guide/icefabric_tools.md new file mode 100644 index 0000000..470e531 --- /dev/null +++ b/docs/user_guide/icefabric_tools.md @@ -0,0 +1,77 @@ +# Icefabric Tools + +A series of compute services built on top of version controlled EDFS data + +## Hydrofabric Geospatial Tools + +### Overview + +The Hydrofabric Geospatial Tools module provides Python functions for subsetting and analyzing hydrofabric data stored in Apache Iceberg format + +### Functionality + +- **Data Subsetting** - the `subset()` function provides all upstream catchments related to a given gauge + +### Usage Examples + +#### Basic Subsetting + +```python +from pathlib import Path +from pyiceberg.catalog import load_catalog +from icefabric_tools import subset, IdType + +# Load the catalog using default settings +catalog = load_catalog("glue") + +# Basic subset using a hydrofabric ID +result = subset_hydrofabric( + catalog=catalog, + identifier="wb-10026", + id_type=IdType.ID, + layers=["divides", "flowpaths", "network", "nexus"] +) + +# Access the filtered data +flowpaths = result["flowpaths"] +divides = result["divides"] +network = result["network"] +nexus = result["nexus"] +``` + +#### Export to GeoPackage + +```python +# Export subset directly to GeoPackage +output_path = Path("subset_output.gpkg") + +subset_hydrofabric( + catalog=catalog, + identifier="01031500", + id_type=IdType.POI_ID, + layers=["divides", "flowpaths", "network", "nexus", "pois"], + output_file=output_path +) +``` + +#### Getting all layers + +```python +# Include all available layers +all_layers = [ + "divides", "flowpaths", "network", "nexus", + "divide-attributes", "flowpath-attributes", + "flowpath-attributes-ml", "pois", "hydrolocations" +] + +result = subset_hydrofabric( + catalog=catalog, + identifier="HUC12-010100100101", + id_type=IdType.HL_URI, + layers=all_layers +) + +# Process specific layers +pois_data = result["pois"] +attributes = result["flowpath-attributes"] +``` diff --git a/docs/user_guide/index.md b/docs/user_guide/index.md new file mode 100644 index 0000000..e38398a --- /dev/null +++ b/docs/user_guide/index.md @@ -0,0 +1,5 @@ +# Icefabric + +## The mission + +The idea for the icefabric came from the need to version control datasets for the National Water Model 4.0. There were many different file formats, and hydrofabric versions, but the need for an [Apache Iceberg](https://iceberg.apache.org/) style backend was realized. The name itself, icefabric, is a reference to this. diff --git a/docs/user_guide/terraform.md b/docs/user_guide/terraform.md new file mode 100644 index 0000000..8249e71 --- /dev/null +++ b/docs/user_guide/terraform.md @@ -0,0 +1,140 @@ +# AWS S3 Tables with Apache Iceberg - Terraform Implementation + +!!! note + These docs are taken from `src/icefabric_manage/terraform/README.md` + +This directory contains PoC Terraform IaC for deploying Apache Iceberg tables using the AWS S3 Tables service with AWS Glue catalog integration. It also contains a basic demo / test python script used to verify things. + +## Architecture Overview + +The infrastructure creates: +- **AWS S3 Tables bucket** - Managed storage for Iceberg table data +- **S3 Tables namespace and table** - Logical organization for tables +- **AWS Glue Catalog database** - Metadata storage for table schemas +- **Lake Formation permissions** - Access control and governance +- **IAM policies** - Secure access between services + +## Prerequisites + +### AWS Requirements +- AWS CLI configured with appropriate credentials. (Older versions may not support AWS S3 Tables) +- Terraform >= 1.0 +- AWS Account with permissions (basically Admin due to IAM requirements) for: + - S3 Tables + - AWS Glue + - Lake Formation + - IAM + +### ⚠️ Critical: Enable S3 Table Buckets Integration + +**This step must be completed before running Terraform**, otherwise the deployment will fail. + +1. Navigate to the [S3 Table Buckets Console](https://console.aws.amazon.com/s3tables/home) in your target region +2. Locate the section titled **"Integration with AWS analytics services"** +3. Click the **"Enable integration"** button +4. Confirm that the integration status shows **"Enabled"** for your deployment region + +This integration allows services like Athena, Glue, Redshift, and EMR to interact with S3 Table Buckets. Without this step, your Iceberg tables won't be accessible through these analytics services. + +> **Note**: This is a one-time setup per AWS region. Once enabled, all future S3 Table Buckets in that region will have access to AWS analytics services integration. + +### Python Requirements +- Python 3.8+ +- pyiceberg python module w/deps +- boto3 (for AWS SDK) + +## Quick Start + +### 1. High Level Deploy Infrastructure + +Create a `terraform.tfvars` file replacing the values below as appropriate for your environment or deploy: + +```hcl +env = "dev" +application = "myapp" +team = "NGWPC" +region = "us-east-1" +identity_center_role_arn = "arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_DataRole" + +# Optional: Specify Lake Formation admins +lakeformation_admin_arns = [ + "arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_DataRole", + "arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_Admin" +] +``` + +Deploy the infrastructure: + +```bash +terraform init +terraform plan +terraform apply +``` + +### 3. Set Environment Variables + +After deployment, set these environment variables for the Python demo: + +```bash +# From Terraform outputs +export ICEBERG_WAREHOUSE_PATH=$(terraform output -raw s3tables_table_warehouse_location) +export AWS_DEFAULT_REGION="us-east-1" +``` + +### 4. Install Python Dependencies in your preferred active virtual environment + +```bash +pip install pyiceberg boto3 mypy_boto3_glue pyarrow +``` + +### 5. Run the Demo + +```bash +python iceberg_demo.py +``` + +## Terraform Configuration + +### Variables + +| Variable | Description | Type | Default | Required | +|----------|-------------|------|---------|----------| +| `region` | AWS region | string | `us-east-1` | No | +| `env` | Environment name (test/oe/other) | string | - | Yes | +| `application` | Application name | string | - | Yes | +| `team` | Team name (for future tagging if supported) | string | - | Yes | +| `identity_center_role_arn` | IAM role ARN for accessing resources | string | - | Yes | +| `lakeformation_admin_arns` | Lake Formation administrator ARNs | list(string) | `[]` | No | + +### Outputs + +| Output | Description | +|--------|-------------| +| `s3tables_bucket_arn` | ARN of the S3 Tables bucket | +| `s3tables_table_warehouse_location` | Warehouse location for Iceberg tables (devs need this!!!) | +| `glue_database_name` | Name of the Glue catalog database | +| `lakeformation_admins` | List of Lake Formation administrators | + +## Python Integration + +### Basic Usage + +The provided `iceberg_demo.py` demonstrates: +- Connecting to AWS Glue catalog +- Creating/loading Iceberg tables +- Very Basic schema definition + +### Configuration + +The Python script uses these environment variables: +- `ICEBERG_WAREHOUSE_PATH` - S3 Tables warehouse location +- `AWS_REGION` - AWS region for services +- `AWS_DEFAULT_REGION` - Default AWS region + +## Permissions and Security + +### Lake Formation Integration + +The infrastructure automatically configures basic Lake Formation settings. This can get very granular in the future. +- Database-level permissions for the specified Identity Center role (SoftwareEngineersFull) +- Table-level permissions are supported, but have not been tested diff --git a/examples/iceberg/iceberg_version_control_examples.ipynb b/examples/iceberg/iceberg_version_control_examples.ipynb new file mode 100644 index 0000000..6fdd1c2 --- /dev/null +++ b/examples/iceberg/iceberg_version_control_examples.ipynb @@ -0,0 +1,489 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Apache Iceberg Version Control for Hydrofabric and Streamflow Data\n", + "\n", + "### Overview\n", + "\n", + "This notebook demonstrates **enterprise-grade version control capabilities** for hydrological datasets using Apache Iceberg. We'll showcase how the hydrofabric and streamflow observations can be managed with full version control.\n", + "\n", + "#### What is Apache Iceberg?\n", + "\n", + "**Apache Iceberg** is a high-performance table format designed for large-scale data lakes. Unlike traditional file formats, Iceberg provides:\n", + "\n", + "- **Automatic snapshots** of every data change\n", + "- **Time travel queries** to access historical versions\n", + "- **ACID transactions** for data consistency\n", + "- **Schema evolution** without breaking existing queries\n", + "- **Query performance** through advanced indexing and pruning\n", + "- **Complete audit trails** for regulatory compliance" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pyarrow as pa\n", + "from pyiceberg.catalog import load_catalog\n", + "\n", + "from icefabric.helpers import load_creds, load_pyiceberg_config\n", + "\n", + "# dir is where the .env file is located\n", + "load_creds()\n", + "\n", + "# Loading the local pyiceberg config settings\n", + "pyiceberg_config = load_pyiceberg_config()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading SQL Catalog\n", + "# This catalog can be downloaded by running the following commands with AWS creds:\n", + "# python tools/iceberg/export_catalog.py --namespace conus_hf\n", + "catalog = load_catalog(\n", + " name=\"sql\",\n", + " type=pyiceberg_config[\"catalog\"][\"sql\"][\"type\"],\n", + " uri=pyiceberg_config[\"catalog\"][\"sql\"][\"uri\"],\n", + " warehouse=pyiceberg_config[\"catalog\"][\"sql\"][\"warehouse\"],\n", + ")\n", + "\n", + "# # Loading Glue Catalog\n", + "# catalog = load_catalog(\"glue\", **{\n", + "# \"type\": \"glue\",\n", + "# \"glue.region\": \"us-east-1\"\n", + "# })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exploring the Data Catalog\n", + "\n", + "Apache Iceberg organizes data into **catalogs**, **namespaces**, and **tables** - similar to databases, schemas, and tables in traditional systems. However, each table maintains complete version history automatically.\n", + "\n", + "#### Hydrofabric Tables\n", + "\n", + "The `conus_hf` namespace contains hydrofabric layers associated with the CONUS-based geopackage\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "catalog.list_tables(\"conus_hf\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's examine the **hydrolocations** table and make some versioned additions. Below we'll see both the snapshots from the hydrolocations table, and actual geopackage layer exported to a pandas dataframe" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "table = catalog.load_table(\"conus_hf.hydrolocations\")\n", + "table.inspect.snapshots()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = table.scan().to_pandas()\n", + "df.tail()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Snapshot Analysis: Understanding Version History\n", + "\n", + "Each snapshot in Iceberg contains:\n", + "- **Unique identifier** (snapshot_id)\n", + "- **Summary metadata** describing the operation\n", + "- **Timestamp** of the change\n", + "- **File manifests** pointing to data files\n", + "- **Schema information** at that point in time\n", + "\n", + "This enables **complete traceability** of how data evolved over time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for snapshot in table.snapshots():\n", + " print(f\"Snapshot ID: {snapshot.snapshot_id}; Summary: {snapshot.summary}\")\n", + "snapshot_id = table.metadata.snapshots[0].snapshot_id" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Demonstrating Version Control: Adding New Monitoring Location\n", + "\n", + "Now we'll demonstrate Iceberg's version control by adding a **new hydrologic monitoring location**\n", + "\n", + "#### The Version Control Process:\n", + "\n", + "1. **Modify data** (add new monitoring location)\n", + "2. **Overwrite table** (creates new snapshot automatically)\n", + "3. **Preserve history** (all previous versions remain accessible)\n", + "4. **Track changes** (complete audit trail maintained)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_df = df.copy()\n", + "new_df.loc[len(new_df)] = {\n", + " \"poi_id\": 99999,\n", + " \"id\": \"wb-0\",\n", + " \"nex_id\": \"tnx-0\",\n", + " \"hf_id\": 999999,\n", + " \"hl_link\": \"Testing\",\n", + " \"hl_reference\": \"testing\",\n", + " \"hl_uri\": \"testing\",\n", + " \"hl_source\": \"testing\",\n", + " \"hl_x\": -1.952088e06,\n", + " \"hl_y\": 1.283884e06,\n", + " \"vpu_id\": 18,\n", + "}\n", + "new_df.tail()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Writing Changes: Automatic Snapshot Creation\n", + "\n", + "When we write changes to an Iceberg table:\n", + "\n", + "1. **Schema validation** ensures data compatibility\n", + "2. **New snapshot created** automatically with unique ID\n", + "3. **Previous snapshots preserved** for time travel\n", + "4. **Metadata updated** with operation summary\n", + "5. **ACID guarantees** ensure consistency\n", + "\n", + "This happens **atomically** - either the entire operation succeeds or fails, with no partial states.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "_df = pa.Table.from_pandas(new_df, preserve_index=False)\n", + "with table.update_schema() as update_schema:\n", + " update_schema.union_by_name(_df.schema)\n", + "table.overwrite(_df)\n", + "table.scan().to_pandas().tail()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Verifying New Snapshot Creation\n", + "\n", + "Let's examine the updated snapshot history. Notice how we now have **multiple snapshots**:\n", + "\n", + "1. **Original data** (initial snapshot)\n", + "2. **Data with new location** (our recent addition)\n", + "\n", + "Each snapshot is **completely independent** and can be accessed separately for different analyses or rollback scenarios.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for snapshot in table.snapshots():\n", + " print(f\"Snapshot ID: {snapshot.snapshot_id}; Summary: {snapshot.summary}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Iceberg's **time travel capability** allows querying any previous snapshot using its ID\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "snapshot_id = table.metadata.snapshots[0].snapshot_id\n", + "snapshot_id_latest = table.metadata.snapshots[-1].snapshot_id\n", + "table.scan(snapshot_id=snapshot_id).to_pandas().tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "table.scan(snapshot_id=snapshot_id_latest).to_pandas().tail()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Comparing Versions: Before and After\n", + "\n", + "Notice the difference between snapshots:\n", + "- **Original snapshot**: Contains original monitoring locations\n", + "- **Latest snapshot**: Includes our new test location (poi_id: 99999)\n", + "\n", + "This demonstrates **non-destructive updates** - both versions coexist and remain queryable.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Streamflow Observations: Time Series Version Control\n", + "\n", + "Now let's examine **streamflow observations** - time series data that requires different version control considerations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "table = catalog.load_table(\"streamflow_observations.usgs_hourly\")\n", + "table.inspect.snapshots()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = table.scan().to_pandas().set_index(\"time\")\n", + "df.tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for snapshot in table.snapshots():\n", + " print(f\"Snapshot ID: {snapshot.snapshot_id}; Summary: {snapshot.summary}\")\n", + "snapshot_id = table.metadata.snapshots[0].snapshot_id" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Adding Time Series Data: Simulating Real-Time Updates\n", + "\n", + "We'll now add a new streamflow observation to demonstrate version control for time series data\n", + "\n", + "The process maintains **historical context** while adding new information." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_streamflow_df = df.copy()\n", + "new_streamflow_df.loc[len(new_df)] = 0.1\n", + "new_streamflow_df.tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "_df = pa.Table.from_pandas(new_streamflow_df)\n", + "with table.update_schema() as update_schema:\n", + " update_schema.union_by_name(_df.schema)\n", + "table.overwrite(_df)\n", + "table.scan().to_pandas().tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for snapshot in table.snapshots():\n", + " print(f\"Snapshot ID: {snapshot.snapshot_id}; Summary: {snapshot.summary}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Time Travel with Time Series Data\n", + "\n", + "Comparing different snapshots of time series data reveals:\n", + "\n", + "#### Original Snapshot (Baseline Data):\n", + "- Contains original observational record\n", + "- Represents specific quality control state\n", + "- Suitable for historical analysis\n", + "\n", + "#### Latest Snapshot (Updated Data): \n", + "- Includes new observations\n", + "- Represents current operational state\n", + "- Suitable for real-time applications" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "snapshot_id = table.metadata.snapshots[0].snapshot_id\n", + "snapshot_id_latest = table.metadata.snapshots[-1].snapshot_id\n", + "table.scan(snapshot_id=snapshot_id).to_pandas().tail().set_index(\"time\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = table.scan(snapshot_id=snapshot_id).to_pandas()\n", + "_df = pa.Table.from_pandas(df)\n", + "with table.update_schema() as update_schema:\n", + " update_schema.union_by_name(_df.schema)\n", + "table.overwrite(_df)\n", + "table.scan().to_pandas().tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "table.scan(snapshot_id=snapshot_id_latest).to_pandas().tail().set_index(\"time\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Demonstration Cleanup: Reverting Changes\n", + "\n", + "To maintain data integrity, we'll now **revert our test changes** by removing the added records. This demonstrates:\n", + "\n", + "- **Controlled rollback** procedures\n", + "- **Data management** best practices \n", + "- **Cleanup workflows** for testing environments\n", + "\n", + "**Important**: Even these cleanup operations create new snapshots, maintaining complete audit trails of all activities." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cleaning up hydrofabric changes\n", + "table = catalog.load_table(\"conus_hf.hydrolocations\")\n", + "new_df = new_df.drop(new_df.index[-1])\n", + "_df = pa.Table.from_pandas(new_df, preserve_index=False)\n", + "with table.update_schema() as update_schema:\n", + " update_schema.union_by_name(_df.schema)\n", + "table.overwrite(_df)\n", + "catalog.load_table(\"conus_hf.hydrolocations\").scan().to_pandas().tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cleaning up Streamflow Observation changes\n", + "table = catalog.load_table(\"streamflow_observations.usgs_hourly\")\n", + "new_streamflow_df = new_streamflow_df.drop(new_streamflow_df.index[-1])\n", + "_df = pa.Table.from_pandas(new_streamflow_df)\n", + "with table.update_schema() as update_schema:\n", + " update_schema.union_by_name(_df.schema)\n", + "table.overwrite(_df)\n", + "catalog.load_table(\"streamflow_observations.usgs_hourly\").scan().to_pandas().tail()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**This demonstration showcases Apache Iceberg's capability to provide version control for water resources data, enabling both reliability and reproducibility for large-scale hydrological modeling systems.**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/iceberg/pyiceberg_time_travel.ipynb b/examples/iceberg/pyiceberg_time_travel.ipynb new file mode 100644 index 0000000..283b48b --- /dev/null +++ b/examples/iceberg/pyiceberg_time_travel.ipynb @@ -0,0 +1,469 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Demo: Time Travel with Iceberg Tables - CRUD Operations & Version Control\n", + "\n", + "## Overview\n", + "This notebook demonstrates **Create, Read, Update, and Delete (CRUD) operations** on version-controlled data using Apache Iceberg tables. The notebook showcases how Iceberg's snapshot-based architecture enables time travel capabilities and maintains a complete history of all data modifications.\n", + "\n", + "## Key Features Demonstrated:\n", + "- **CREATE**: Creating new tables and adding data\n", + "- **READ**: Querying current and historical data snapshots\n", + "- **UPDATE**: Modifying table schemas and data\n", + "- **DELETE**: Removing columns and dropping tables\n", + "- **VERSION CONTROL**: Time travel through snapshots to view historical states\n", + "\n", + "## Prerequisites:\n", + "- a local pyiceberg catalog spun up and referenced through .pyiceberg.yaml\n", + "\n", + "## Objectives:\n", + "By the end of this notebook, you will understand how to:\n", + "1. Perform all CRUD operations on Iceberg tables\n", + "2. Leverage version control to access historical data states\n", + "3. Create and manage table snapshots\n", + "4. Navigate between different versions of your data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from pyiceberg.catalog import load_catalog\n", + "\n", + "from icefabric.helpers import load_creds, load_pyiceberg_config\n", + "\n", + "# dir is where the .env file is located\n", + "load_creds()\n", + "\n", + "# Loading the local pyiceberg config settings\n", + "pyiceberg_config = load_pyiceberg_config()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# Loading SQL Catalog\n", + "# This catalog can be downloaded by running the following commands with AWS creds:\n", + "# python tools/iceberg/export_catalog.py --namespace streamflow_observations\n", + "# catalog = load_catalog(\n", + "# name=\"sql\",\n", + "# type=pyiceberg_config[\"catalog\"][\"sql\"][\"type\"],\n", + "# uri=pyiceberg_config[\"catalog\"][\"sql\"][\"uri\"],\n", + "# warehouse=pyiceberg_config[\"catalog\"][\"sql\"][\"warehouse\"],\n", + "# )\n", + "\n", + "# Loading Glue Catalog\n", + "catalog = load_catalog(\"glue\", **{\"type\": \"glue\", \"glue.region\": \"us-east-1\"})" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "### READ Operation: Loading and Inspecting Existing Data\n", + "\n", + "We begin by demonstrating the **READ** operation by loading an existing table and examining its version history. This shows how Iceberg maintains complete metadata about all snapshots (versions) of the data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "table = catalog.load_table(\"streamflow_observations.usgs_hourly\")\n", + "table.inspect.snapshots()" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "Let's examine the current data in the table. This represents the latest version of our dataset. Notice how we can easily convert Iceberg tables to pandas DataFrames for analysis.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "df = table.scan().to_pandas().set_index(\"time\")\n", + "df.tail()" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "### Version Control: Capturing Initial State\n", + "\n", + "**Version Control Feature**: Every operation in Iceberg creates a snapshot with a unique ID. We're capturing the initial snapshot ID here so we can demonstrate time travel capabilities later. This snapshot represents the baseline state of our data before any modifications.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "for snapshot in table.snapshots():\n", + " print(f\"Snapshot ID: {snapshot.snapshot_id}; Summary: {snapshot.summary}\")\n", + "snapshot_id = table.metadata.snapshots[0].snapshot_id" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "### UPDATE Operation: Schema Evolution and Data Modification\n", + " \n", + "Now we'll demonstrate the **UPDATE** operation by adding a new column to our existing table. This involves:\n", + "1. Creating synthetic data for the new column\n", + "2. Updating the table schema to accommodate the new column\n", + "3. Overwriting the table with the updated data\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "\n", + "n = len(df)\n", + "x = np.linspace(0, n, n)\n", + "y = np.sin(2 * np.pi * 1 * x / n).astype(np.float32)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "import pyarrow as pa\n", + "\n", + "df[\"12345678\"] = y\n", + "df.tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "_df = pa.Table.from_pandas(df)\n", + "with table.update_schema() as update_schema:\n", + " update_schema.union_by_name(_df.schema)\n", + "table.overwrite(_df)" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "After our UPDATE operation, we can verify that the schema has been modified. The new column \"12345678\" should now be part of the table structure.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "table.schema().fields[-1]" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "### Version Control: Tracking All Changes\n", + "\n", + "**Version Control Feature**: Notice how Iceberg has automatically created new snapshots for our UPDATE operation. The snapshot history now shows:\n", + "- Original data snapshot\n", + "- Delete operation snapshot (part of overwrite)\n", + "- New append operation snapshot (with the new column)\n", + "\n", + "This complete audit trail is essential for data governance and debugging." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "for snapshot in table.snapshots():\n", + " print(f\"Snapshot ID: {snapshot.snapshot_id}; Summary: {snapshot.summary}\")" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "**Time Travel Feature**: Using the snapshot ID we captured earlier, we can query the table as it existed before our UPDATE operation. This demonstrates Iceberg's powerful time travel capabilities - you can access any historical state of your data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "table.scan(snapshot_id=snapshot_id).to_pandas().tail()" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "Comparing the current state (with the new column) versus the historical state (without the column) demonstrates how version control preserves all data states while allowing easy access to current data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "table.scan().to_pandas().tail()" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "Now we'll demonstrate another **UPDATE** operation by removing the column we just added. This shows how Iceberg handles schema evolution in both directions (adding and removing columns).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "with table.update_schema() as update_schema:\n", + " update_schema.delete_column(\"12345678\")\n", + "\n", + "df = df.drop(\"12345678\", axis=1)\n", + "_df = pa.Table.from_pandas(df)\n", + "table.overwrite(_df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "table.schema().fields[-1]" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "### CREATE Operation: Building New Tables\n", + "\n", + "Now we'll demonstrate the **CREATE** operation by building an entirely new table from scratch. This shows how to:\n", + "1. Prepare data for a new table\n", + "2. Create the table structure in the catalog\n", + "3. Populate the table with initial data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "__df = df.copy()\n", + "__df[\"12345678\"] = y\n", + "subset_df = __df[[\"12345678\"]].copy()\n", + "subset_df.tail()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "namespace = \"streamflow_observations\"\n", + "table_name = \"testing_hourly\"\n", + "arrow_table = pa.Table.from_pandas(subset_df)\n", + "iceberg_table = catalog.create_table(\n", + " f\"{namespace}.{table_name}\",\n", + " schema=arrow_table.schema,\n", + ")\n", + "iceberg_table.append(arrow_table)" + ] + }, + { + "cell_type": "markdown", + "id": "27", + "metadata": {}, + "source": [ + "### READ Operation: Verifying New Table Creation \n", + "\n", + "After our **CREATE** operation, we can verify that the new table exists in our namespace and examine its initial snapshot. Every new table starts with its first snapshot upon creation.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "catalog.list_tables(namespace)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "table = catalog.load_table(f\"{namespace}.{table_name}\")\n", + "table.inspect.snapshots()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "table.scan().to_pandas().tail()" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "### DELETE Operation: Table Removal\n", + "\n", + "Finally, we demonstrate the **DELETE** operation by completely removing the table we just created. This shows how to clean up resources and manage table lifecycle.\n", + "\n", + "**Important**: Unlike column deletion (which is reversible through time travel), table deletion is permanent and removes all snapshots and data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32", + "metadata": {}, + "outputs": [], + "source": [ + "catalog.drop_table(f\"{namespace}.{table_name}\")\n", + "catalog.list_tables(namespace)" + ] + }, + { + "cell_type": "markdown", + "id": "33", + "metadata": {}, + "source": [ + "### Summary: CRUD Operations and Version Control Demonstrated\n", + " \n", + "This notebook has successfully demonstrated all required CRUD operations with version-controlled data:\n", + " \n", + "#### CREATE Operations:\n", + "- Created new tables with `catalog.create_table()`\n", + "- Added new columns to existing tables\n", + "- Populated tables with initial data using `append()`\n", + "\n", + "#### READ Operations:\n", + "- Loaded existing tables with `catalog.load_table()`\n", + "- Queried current data states with `table.scan()`\n", + "- Accessed historical data states using snapshot IDs\n", + "- Inspected table schemas and metadata\n", + " \n", + "#### UPDATE Operations:\n", + "- Modified table schemas by adding columns\n", + "- Updated data through `overwrite()` operations\n", + "- Removed columns from existing tables\n", + "\n", + "#### DELETE Operations:\n", + "- Deleted columns from table schemas\n", + "- Removed entire tables with `catalog.drop_table()`\n", + "\n", + "#### Version Control Features:\n", + "- **Snapshot Management**: Every operation creates tracked snapshots\n", + "- **Time Travel**: Access any historical state using snapshot IDs\n", + "- **Audit Trail**: Complete history of all table modifications\n", + "- **Schema Evolution**: Track changes to table structure over time\n" + ] + }, + { + "cell_type": "markdown", + "id": "34", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/iceberg_data_viewer/ras_xs.ipynb b/examples/iceberg_data_viewer/ras_xs.ipynb new file mode 100644 index 0000000..1d95274 --- /dev/null +++ b/examples/iceberg_data_viewer/ras_xs.ipynb @@ -0,0 +1,261 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "### RAS XS Geopackage Viewer\n", + "\n", + "An interactive viewer to show the versioned and cataloged HEC-RAS XS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from pyiceberg.catalog import load_catalog\n", + "\n", + "from icefabric.helpers import load_creds, load_pyiceberg_config, to_geopandas\n", + "\n", + "# dir is where the .env file is located\n", + "load_creds()\n", + "\n", + "# Loading the local pyiceberg config settings\n", + "pyiceberg_config = load_pyiceberg_config()" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "The below code will load a catalog. The SQL catalog is for a local build while the Glue Catalog will pull from S3 tables and requires AWS creds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Loading SQL Catalog\n", + "# This catalog can be downloaded by running the following commands with AWS creds:\n", + "# python tools/iceberg/export_catalog.py --namespace conus_reference\n", + "# python tools/iceberg/export_catalog.py --namespace ras_xs\n", + "# catalog = load_catalog(\n", + "# name=\"sql\",\n", + "# type=pyiceberg_config[\"catalog\"][\"sql\"][\"type\"],\n", + "# uri=pyiceberg_config[\"catalog\"][\"sql\"][\"uri\"],\n", + "# warehouse=pyiceberg_config[\"catalog\"][\"sql\"][\"warehouse\"],\n", + "# )\n", + "\n", + "# Loading Glue Catalog\n", + "catalog = load_catalog(\"glue\", **{\"type\": \"glue\", \"glue.region\": \"us-east-1\"})" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "Let's first load our tables that we'll be using and view the data schemas" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "catalog.load_table(\"ras_xs.conflated\")" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "Let's load a sample BLE dataset: \"ble_05119_Pulaski\". First we'll scan the pyiceberg catalog for all conflated XS within the BLE area, then pull the reference divides and flowpaths " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "from pyiceberg.expressions import EqualTo, In\n", + "\n", + "gdf = to_geopandas(\n", + " catalog.load_table(\"ras_xs.conflated\")\n", + " .scan(row_filter=EqualTo(\"domain\", \"/ble_05119_Pulaski\"))\n", + " .to_pandas()\n", + ")\n", + "gdf.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "reference_divides = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_divides\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "reference_flowpaths = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_flowpaths\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "reference_divides = reference_divides.to_crs(epsg=3857)\n", + "\n", + "reference_flowpaths = reference_flowpaths.to_crs(epsg=3857)\n", + "gdf = gdf.to_crs(epsg=3857)\n", + "\n", + "ref_div_ex = reference_divides.explore(color=\"grey\")\n", + "ref_flo_ex = reference_flowpaths.explore(m=ref_div_ex, color=\"blue\")\n", + "gdf.explore(m=ref_flo_ex, color=\"black\")" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "Now, we'll see how the representative XS look on this same data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "gdf_rep = to_geopandas(\n", + " catalog.load_table(\"ras_xs.representative\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "\n", + "ref_div_ex = reference_divides.explore(color=\"grey\")\n", + "ref_flo_ex = reference_flowpaths.explore(m=ref_div_ex, color=\"blue\")\n", + "gdf_rep.explore(m=ref_flo_ex, color=\"red\")" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "Let's now do the same work, but with a different area: \"mip_18010110\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "gdf_mip = to_geopandas(\n", + " catalog.load_table(\"ras_xs.conflated\").scan(row_filter=EqualTo(\"domain\", \"/mip_18010110\")).to_pandas()\n", + ")\n", + "gdf_mip.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "reference_divides_mip = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_divides\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf_mip[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "reference_flowpaths_mip = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_flowpaths\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf_mip[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "reference_divides_mip = reference_divides_mip.to_crs(epsg=3857)\n", + "reference_flowpaths_mip = reference_flowpaths_mip.to_crs(epsg=3857)\n", + "gdf_mip = gdf_mip.to_crs(epsg=3857)\n", + "\n", + "ref_div_ex_mip = reference_divides_mip.explore(color=\"grey\")\n", + "ref_flo_ex_mip = reference_flowpaths_mip.explore(m=ref_div_ex_mip, color=\"blue\")\n", + "gdf_mip.explore(m=ref_flo_ex_mip, color=\"black\")" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "Let's see how the representative XS look" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "gdf_rep_mip = to_geopandas(\n", + " catalog.load_table(\"ras_xs.representative\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf_mip[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "\n", + "ref_div_ex_mip = reference_divides_mip.explore(color=\"grey\")\n", + "ref_flo_ex_mip = reference_flowpaths_mip.explore(m=ref_div_ex_mip, color=\"blue\")\n", + "gdf_rep_mip.explore(m=ref_flo_ex_mip, color=\"red\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/iceberg_data_viewer/ras_xs_api.ipynb b/examples/iceberg_data_viewer/ras_xs_api.ipynb new file mode 100644 index 0000000..3062365 --- /dev/null +++ b/examples/iceberg_data_viewer/ras_xs_api.ipynb @@ -0,0 +1,295 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "### RAS XS API Viewer\n", + "\n", + "An interactive viewer to show the versioned and cataloged HEC-RAS XS" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "Make sure to load the prerequisite credentials, then load the iceberg catalog." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import threading\n", + "\n", + "from pyiceberg.catalog import load_catalog\n", + "from pyprojroot import here\n", + "\n", + "from icefabric.helpers import load_creds, load_pyiceberg_config, to_geopandas\n", + "\n", + "os.chdir(here())\n", + "print(f\"directory changed to {here()}\")\n", + "load_creds()\n", + "pyiceberg_config = load_pyiceberg_config()\n", + "\n", + "# Loading SQL Catalog\n", + "# This catalog can be downloaded by running the following commands with AWS creds:\n", + "# python tools/iceberg/export_catalog.py --namespace conus_reference\n", + "# python tools/iceberg/export_catalog.py --namespace ras_xs\n", + "catalog = load_catalog(\n", + " name=\"sql\",\n", + " type=pyiceberg_config[\"catalog\"][\"sql\"][\"type\"],\n", + " uri=pyiceberg_config[\"catalog\"][\"sql\"][\"uri\"],\n", + " warehouse=pyiceberg_config[\"catalog\"][\"sql\"][\"warehouse\"],\n", + ")\n", + "\n", + "# Loading Glue Catalog\n", + "# catalog = load_catalog(\"glue\", **{\"type\": \"glue\", \"glue.region\": \"us-east-1\"})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Starting the API locally\n", + "def run_api():\n", + " \"\"\"Starts the icefabric API locally\"\"\"\n", + " !python -m app.main --catalog sql\n", + "\n", + "\n", + "threading.Thread(target=run_api).start()" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "Let's call the API to return the conflated cross-sections for a specific flowpath ID to get a sample response. \n", + "\n", + "#### NOTE: This API returns a response via a geopackage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "import geopandas as gpd\n", + "import httpx\n", + "\n", + "from icefabric.schemas import XsType\n", + "\n", + "# Set up parameters for the API call\n", + "flowpath_id = \"20059822\"\n", + "url = f\"http://0.0.0.0:8000/v1/ras_xs/{flowpath_id}/\"\n", + "schema_type = XsType.CONFLATED\n", + "params = {\n", + " \"schema_type\": schema_type.value,\n", + "}\n", + "headers = {\"Content-Type\": \"application/json\"}\n", + "\n", + "# Use HTTPX to stream the resulting geopackage response\n", + "with (\n", + " httpx.stream(\n", + " method=\"GET\",\n", + " url=url,\n", + " params=params,\n", + " headers=headers,\n", + " timeout=60.0, # GLUE API requests can be slow depending on the network speed. Adding a 60s timeout to ensure requests go through\n", + " ) as response\n", + "):\n", + " response.raise_for_status() # Ensure the request was successful\n", + " with open(f\"ras_xs_{flowpath_id}.gpkg\", \"wb\") as file:\n", + " for chunk in response.iter_bytes():\n", + " file.write(chunk)\n", + "\n", + "# Load geopackage into geopandas\n", + "gdf = gpd.read_file(f\"ras_xs_{flowpath_id}.gpkg\")" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "Next, pull the reference divides and flowpaths for when we explore the data from the API" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "from pyiceberg.expressions import In\n", + "\n", + "# Pull and filter reference divides/flowpaths from the catalog\n", + "reference_divides = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_divides\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "reference_flowpaths = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_flowpaths\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "\n", + "# Convert all data to the EPSG:4326 coordinate reference system\n", + "reference_divides = reference_divides.to_crs(epsg=4326)\n", + "reference_flowpaths = reference_flowpaths.to_crs(epsg=4326)\n", + "gdf = gdf.to_crs(epsg=4326)" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "Finally, project the conflated dataset over the references" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "ref_div_ex = reference_divides.explore(color=\"grey\")\n", + "ref_flo_ex = reference_flowpaths.explore(m=ref_div_ex, color=\"blue\")\n", + "\n", + "# View the data\n", + "gdf.explore(m=ref_flo_ex, color=\"black\")" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "Now let's do the same thing, except filter the data so it lies inside a lat/lon bounding box - the following query gets all the data in the state of New Mexico" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "from app.routers.ras_xs.router import BoundingBox\n", + "\n", + "url = \"http://0.0.0.0:8000/v1/ras_xs/within\"\n", + "schema_type = XsType.REPRESENTATIVE\n", + "bbox = BoundingBox(min_lat=31.3323, min_lon=-109.0502, max_lat=37.0002, max_lon=-103.002)\n", + "params = {\n", + " \"schema_type\": schema_type.value,\n", + " \"min_lat\": bbox.min_lat,\n", + " \"min_lon\": bbox.min_lon,\n", + " \"max_lat\": bbox.max_lat,\n", + " \"max_lon\": bbox.max_lon,\n", + "}\n", + "\n", + "# Use HTTPX to stream the resulting geopackage response\n", + "with (\n", + " httpx.stream(\n", + " method=\"GET\",\n", + " url=url,\n", + " params=params,\n", + " headers=headers,\n", + " timeout=60.0, # GLUE API requests can be slow depending on the network speed. Adding a 60s timeout to ensure requests go through\n", + " ) as response\n", + "):\n", + " response.raise_for_status() # Ensure the request was successful\n", + " with open(\"ras_xs_bbox.gpkg\", \"wb\") as file:\n", + " for chunk in response.iter_bytes():\n", + " file.write(chunk)\n", + "\n", + "# Load geopackage into geopandas\n", + "gdf_bbox = gpd.read_file(\"ras_xs_bbox.gpkg\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Pull and filter reference divides/flowpaths from the catalog\n", + "reference_divides_bbox = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_divides\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf_bbox[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "reference_flowpaths_bbox = to_geopandas(\n", + " catalog.load_table(\"conus_reference.reference_flowpaths\")\n", + " .scan(row_filter=In(\"flowpath_id\", gdf_bbox[\"flowpath_id\"]))\n", + " .to_pandas()\n", + ")\n", + "\n", + "# Convert all data to the EPSG:4326 coordinate reference system\n", + "reference_divides_bbox = reference_divides_bbox.to_crs(epsg=4326)\n", + "reference_flowpaths_bbox = reference_flowpaths_bbox.to_crs(epsg=4326)\n", + "gdf_bbox = gdf_bbox.to_crs(epsg=4326)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "ref_div_ex_bbox = reference_divides_bbox.explore(color=\"grey\")\n", + "ref_flo_ex_bbox = reference_flowpaths_bbox.explore(m=ref_div_ex_bbox, color=\"blue\")\n", + "\n", + "# View the data\n", + "gdf_bbox.explore(m=ref_flo_ex_bbox, color=\"black\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/iceberg_data_viewer/viewer.ipynb b/examples/iceberg_data_viewer/viewer.ipynb new file mode 100644 index 0000000..0a5bc5e --- /dev/null +++ b/examples/iceberg_data_viewer/viewer.ipynb @@ -0,0 +1,310 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Hydrofabric Geopackage Viewer" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "Reads/extracts files from the NGWPC pyiceberg resources" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "\n", + "import pandas as pd\n", + "from ipywidgets import interact\n", + "from pyiceberg.catalog import load_catalog\n", + "\n", + "from icefabric.builds import load_upstream_json\n", + "from icefabric.helpers import load_creds, load_pyiceberg_config\n", + "from icefabric.hydrofabric import subset_hydrofabric\n", + "from icefabric.schemas import IdType\n", + "from icefabric.ui import create_time_series_widget, get_hydrofabric_gages, get_streamflow_data\n", + "\n", + "# dir is where the .env file is located\n", + "load_creds()\n", + "\n", + "# Loading the local pyiceberg config settings\n", + "pyiceberg_config = load_pyiceberg_config()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Loading SQL Catalog\n", + "# This catalog can be downloaded by running the following commands with AWS creds:\n", + "# python tools/iceberg/export_catalog.py --namespace conus_reference\n", + "# python tools/iceberg/export_catalog.py --namespace ras_xs\n", + "# catalog = load_catalog(\n", + "# name=\"sql\",\n", + "# type=pyiceberg_config[\"catalog\"][\"sql\"][\"type\"],\n", + "# uri=pyiceberg_config[\"catalog\"][\"sql\"][\"uri\"],\n", + "# warehouse=pyiceberg_config[\"catalog\"][\"sql\"][\"warehouse\"],\n", + "# )\n", + "\n", + "# Loading Glue Catalog\n", + "catalog = load_catalog(\"glue\", **{\"type\": \"glue\", \"glue.region\": \"us-east-1\"})" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## Getting streamflow observations for different gages\n" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "Step 1) getting the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "from pprint import pprint\n", + "\n", + "# Using a local warehouse for this example. This was created through the following command and NGWPC test AWS account\n", + "# python tools/pyiceberg/export_catalog.py --namespace streamflow_observations\n", + "streamflow_obs_df = get_streamflow_data(catalog_name=\"sql\", **pyiceberg_config[\"catalog\"])\n", + "\n", + "# List all gauge IDs\n", + "\n", + "pprint(streamflow_obs_df.columns.tolist())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# select the gauge ID you want to use:\n", + "gage_id = \"12145500\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "streamflow_obs_df[\"time\"] = pd.to_datetime(streamflow_obs_df[\"time\"])\n", + "\n", + "# Scatter Plot of observations\n", + "create_time_series_widget(streamflow_obs_df, point_size=5, time_col=\"time\", flow_col=gage_id)" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Geopackage Summary" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "Print list of layers, number of catchments, and list of hydrolocations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "pprint(get_hydrofabric_gages(catalog=catalog))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Using a local warehouse for this example. This was created through the following command and NGWPC test AWS account\n", + "# python tools/pyiceberg/export_catalog.py --namespace conus_hf\n", + "gage_id = \"11280000\"\n", + "layers = [\"flowpaths\", \"nexus\", \"divides\", \"network\", \"hydrolocations\", \"pois\"]\n", + "domain = \"conus_hf\"\n", + "\n", + "graph = load_upstream_json(\n", + " catalog=catalog,\n", + " namespaces=[domain],\n", + " output_path=Path.cwd() / \"data\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "layers_df = subset_hydrofabric(\n", + " catalog=catalog,\n", + " layers=layers,\n", + " identifier=f\"gages-{gage_id}\",\n", + " id_type=IdType.HL_URI,\n", + " namespace=\"conus_hf\",\n", + " graph=graph[\"conus_hf\"],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Layers:\")\n", + "print(list(layers_df.keys()))\n", + "row, col = layers_df[\"divides\"].shape\n", + "print(\"Number of catchments:\")\n", + "print(row)\n", + "print(\"Hydrolocations:\")\n", + "hl = layers_df[\"hydrolocations\"].hl_uri.tolist()\n", + "hl_str = \", \".join(hl)\n", + "print(hl_str)" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "## Map of divides, nexuses, and flowpaths" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "Plot divides, nexuses, and flowpaths on a map." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "divides = layers_df[\"divides\"].to_crs(epsg=3857)\n", + "\n", + "flowpaths = layers_df[\"flowpaths\"].to_crs(epsg=3857)\n", + "nexus = layers_df[\"nexus\"].to_crs(epsg=3857)\n", + "\n", + "div_ex = divides.explore()\n", + "fl_ex = flowpaths.explore(m=div_ex, color=\"yellow\")\n", + "nexus.explore(m=fl_ex, color=\"red\")" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "## View Layers" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "Select layer and print table" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "# Configure pandas display options for better formatting\n", + "pd.set_option(\"display.max_columns\", None)\n", + "pd.set_option(\"display.width\", None)\n", + "pd.set_option(\"display.max_colwidth\", 50)\n", + "pd.set_option(\"display.expand_frame_repr\", False)\n", + "\n", + "# Interactive display with limited rows\n", + "interact(lambda layer_name: layers_df[layer_name].head(20), layer_name=layers)" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/icechunk/icechunk_version_control_examples.ipynb b/examples/icechunk/icechunk_version_control_examples.ipynb new file mode 100644 index 0000000..c127894 --- /dev/null +++ b/examples/icechunk/icechunk_version_control_examples.ipynb @@ -0,0 +1,364 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "## Icechunk Version Control for Land Cover Data\n", + "\n", + "### Overview\n", + "\n", + "This notebook demonstrates **version control capabilities for geospatial raster data** using Icechunk, a new cloud-native storage format. We'll showcase how slowly-changing time-varying raster data (specifically NLCD land cover data) can be managed with full version control, enabling reproducible research and data lineage tracking.\n", + "\n", + "#### What is Icechunk?\n", + "\n", + "**Icechunk** is a cloud-native storage format that brings **Git-like version control** to large scientific datasets. Icechunk is very similar to Iceberg, but for data-cube/tensor data. Unlike traditional file systems where data changes overwrite previous versions, Icechunk:\n", + "\n", + "- **Creates snapshots** of your data at each change\n", + "- **Enables time travel** to access any previous version\n", + "- **Supports branching and merging** for collaborative workflows\n", + "- **Tracks data lineage** with commit messages and metadata\n", + "- **Uses virtual references** to avoid data duplication. This means existing .nc or COGs can be referenced without rewriting the data\n", + "\n", + "### Dataset: National Land Cover Database (NLCD)\n", + "\n", + "#### Source: https://www.mrlc.gov/data\n", + "\n", + "The NLCD provides land cover classifications for the Continental United States (CONUS)\n", + "\n", + "#### Land Cover Classes\n", + "\n", + "The NLCD uses standardized codes for different land cover types:\n", + "- **11**: Open Water\n", + "- **12**: Perennial Ice/Snow\n", + "- **21**: Developed, Open Space\n", + "- **22**: Developed, Low Intensity\n", + "- **23**: Developed, Medium Intensity\n", + "- **24**: Developed, High Intensity\n", + "- **31**: Barren Land (Rock/Sand/Clay)\n", + "- **41**: Decidous Forest\n", + "- **42**: Evergreen Forest\n", + "- **43**: Mixed Forest\n", + "- **52**: Shrub/Scrub\n", + "- **71**: Grassland/Herbaceous\n", + "- **81**: Pasture/Hay\n", + "- **82**: Cultivated Crops\n", + "- **90**: Woody Wetlands\n", + "- **95**: Emergent Herbaceous Wetlands" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "from pathlib import Path\n", + "\n", + "import icechunk as ic\n", + "import matplotlib.pyplot as plt\n", + "import xarray as xr\n", + "\n", + "from icefabric.helpers import load_creds\n", + "\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "# dir is where the .env file is located\n", + "load_creds()" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "### Opening the Icechunk Repository\n", + "\n", + "Unlike traditional file formats (GeoTIFF, NetCDF), Icechunk stores data in a **repository structure** similar to Git. Each repository contains:\n", + "\n", + "- **Snapshots**: Immutable versions of your data\n", + "- **Branches**: Parallel development lines (like Git branches)\n", + "- **Virtual references**: Pointers to external data files (avoiding duplication)\n", + "- **Metadata**: Rich attribution and processing history\n", + "\n", + "#### Virtual Chunk Architecture\n", + "\n", + "Our NLCD data uses **virtual references** - instead of copying large GeoTIFF files into Icechunk, we store lightweight references pointing to the original files. This provides:\n", + "\n", + "- **Fast ingestion** (no data copying)\n", + "- **Storage efficiency** (references vs. full copies) \n", + "- **Source preservation** (original files remain unchanged)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# NOTE This demonstration/example assumes the data/land_cover icechunk has been made locally to the store path, and the TIFs are in the correct location in the data path\n", + "file_location = Path(\"data/land_cover_tifs\").resolve()\n", + "store_path = Path(\"data/land_cover\").resolve()\n", + "\n", + "storage = ic.local_filesystem_storage(str(store_path))\n", + "repo = ic.Repository.open(\n", + " storage=storage,\n", + " authorize_virtual_chunk_access=ic.containers_credentials({f\"file://{file_location}\": None}),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "### Repository History: Data Lineage Tracking\n", + "\n", + "One of Icechunk's key features is **automatic lineage tracking**. Every change to the dataset creates a new snapshot with:\n", + "\n", + "- **Unique identifier** (snapshot ID)\n", + "- **Timestamp** of the change\n", + "- **Commit message** describing what changed\n", + "- **Parent relationships** showing data evolution\n", + "\n", + "This provides complete **audit trails** for scientific reproducibility.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Print repo ancestry\n", + "for ancestor in repo.ancestry(branch=\"main\"):\n", + " print(f\"Snapshot ID:\\t{ancestor.id}\")\n", + " print(f\"Timestamp:\\t{ancestor.written_at}\")\n", + " print(f\"Message:\\t{ancestor.message}\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "### Accessing Current Data\n", + "\n", + "The data appears as a standard Xarray Dataset, but with version control underneath." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "session = repo.readonly_session(branch=\"main\")\n", + "ds = xr.open_zarr(session.store, consolidated=False)\n", + "ds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# Set up plot for 1990 land cover\n", + "ds[\"5\"].sel(year=1990).plot(x=\"X5\", y=\"Y5\")\n", + "\n", + "# Invert the y-axis to show the CONUS region correctly\n", + "plt.gca().invert_yaxis()\n", + "\n", + "# Add labels and show the plot\n", + "plt.xlabel(\"LON\")\n", + "plt.ylabel(\"LAT\")\n", + "plt.title(\"1990 CONUS Land Cover\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "### Demonstrating Version Control: Adding Metadata\n", + "\n", + "Now we'll demonstrate Icechunk's version control by **adding metadata** to our dataset\n", + "\n", + "#### The Version Control Process\n", + "\n", + "1. **Create a writable session** (like checking out code for editing)\n", + "2. **Modify the dataset** (add/update attributes, data, etc.)\n", + "3. **Commit changes** with descriptive message\n", + "4. **New snapshot created** automatically\n", + "\n", + "**Important**: The original data remains **completely unchanged** and accessible.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "session = repo.writable_session(\"main\")\n", + "ds.attrs[\"sample_attr\"] = \"sample_attr\"\n", + "ds2 = ds.copy()\n", + "ds2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "session.store.sync_clear() # Clears the store, but preserves snapshots and references to the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# NOTE This may take 8-10 minutes\n", + "ds2.virtualize.to_icechunk(session.store)\n", + "print(session.commit(\"Added a sample attribute\"))" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "### Verifying Version History\n", + "\n", + "Let's examine the repository history again. Notice how we now have **two snapshots**:\n", + "\n", + "1. **Original dataset** (initial commit)\n", + "2. **Dataset with metadata** (our recent addition)\n", + "\n", + "This demonstrates **non-destructive updates** - both versions coexist and remain accessible." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Print repo ancestry\n", + "for ancestor in repo.ancestry(branch=\"main\"):\n", + " print(f\"Snapshot ID:\\t{ancestor.id}\")\n", + " print(f\"Timestamp:\\t{ancestor.written_at}\")\n", + " print(f\"Message:\\t{ancestor.message}\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "### Time Travel: Accessing Previous Versions\n", + "\n", + "One of Icechunk's most powerful features is **time travel** - the ability to access any previous version of your data using its snapshot ID.\n", + "\n", + "#### Use Cases for Time Travel:\n", + "\n", + "- **Reproducing analyses** from specific points in time\n", + "- **Debugging** when something goes wrong\n", + "- **Comparing versions** to understand changes\n", + "- **Rolling back** to previous states\n", + "- **Auditing** data processing workflows\n", + "\n", + "Below, we access the **original version** (before we added metadata):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "snapshot_id = list(repo.ancestry(branch=\"main\"))[1].id\n", + "print(f\"Snapshot ID:\\t{snapshot_id}\")\n", + "\n", + "session = repo.readonly_session(snapshot_id=snapshot_id)\n", + "_ds = xr.open_zarr(session.store, consolidated=False)\n", + "_ds" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "Notice how the **original version lacks the `sample_attr`** we added. This proves that the data is versioned and preserved" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "snapshot_id = list(repo.ancestry(branch=\"main\"))[0].id # Latest\n", + "print(f\"Snapshot ID:\\t{snapshot_id}\")\n", + "\n", + "session = repo.readonly_session(snapshot_id=snapshot_id)\n", + "latest_ds = xr.open_zarr(session.store, consolidated=False)\n", + "latest_ds" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "This demonstrates how Icechunk enables robust version control for geospatial data, meeting enterprise requirements for data governance, reproducibility, and collaborative research workflows (FAIR)" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/icechunk_data_viewer/README.md b/examples/icechunk_data_viewer/README.md new file mode 100644 index 0000000..8f0618c --- /dev/null +++ b/examples/icechunk_data_viewer/README.md @@ -0,0 +1,70 @@ +# Topobathy Data Viewer +The purpose of this data viewer is to visualize version-controlled tif files hosted by the NGWPC program. The data viewer can be run either on pre-processed tiles, or by creating said tiles on an end-to-end full workflow. Details on how to run each are below +## If tiles are pre-created and stored on s3: +1. Move to icechunk_data-viewer folder: + + `cd examples/icechunk_data_viewer` +2. Export AWS data account credentials to your environment and run in CLI: + + `aws s3 sync s3://hydrofabric-data/surface/nws-topobathy/tiles ./martin/tiles` + +3. Start martin tile server: + + `cd martin` + + `docker compose -f compose.martin.yaml up` + +4. Tile server is now running. Confirm by checking `localhost:3000/catalog`. You should see a list of tile sources. Debug logs should be populating in console. +5. Open a new terminal and move to `icechunk_data_viewer` root + + `cd examples/icechunk_data_viewer` +6. Start jupyter lab in activated icefabric virtual environment. This will start the jupyter server at `localhost:8888/lab` + + `jupyter lab` + +7. Open `viewer.ipynb` in Jupyter Lab +8. Execute cells in `viewer.ipynb`. The map should show the tiles served from Martin. + + +## Full Pipeline - from creating tiles to viewing +1. __In icefabric repo__: Export topobathy from icechunk to TIFF using `icefabric_tools/icechunk/topobathy_ic_to_tif.py`. These will be stored locally. + + __NOTE__: Some files may require more memory than average desktop. If 'killed', move to a cluster with more memory. + +2. Clone hydrofabric-ui-tools +3. __In hydrofabric-ui-tools repo__: Copy saved icechunk TIFs to `data` folder in `hydrofabric-ui-tools` +5. Create or modify `config` files to match TIF +6. Run `build_topobathy_tiles.py` using docker or local environment as described in `README.md`. Some regions may require more memory than average desktop. +7. Tiles will be uploaded to s3 if specified in config. +8. __Return to icefabric repo__ +9. Sync from s3 or paste `.pmtiles` files into + + `icefabric/examples/icechunk_data_viewer/martin/tiles` + +AWS option with data account credentials in env vars. + + cd examples/icechunk_data_viewer + aws s3 sync s3://hydrofabric-data/surface/nws-topobathy/tiles ./martin/tiles + +10. Open `martin_config.yaml` + + `icefabric/examples/icechunk_data_viewer/martin/martin_config.yaml` + +11. Match tile names in `tiles` folders to source name if not correct. Source name will be the URI for tile serving. + +12. Start martin tile server. This must be done in the `martin` working directory to copy the files correctly. + ``` + cd examples/icechunk_data_viewer/martin + docker compose -f compose.martin.yaml up + ``` + +13. Tile server is now running. Confirm by checking `localhost:3000/catalog`. You should see a list of tile sources. +14. Open a new terminal and move to `icechunk_data_viewer` root + + `cd examples/icechunk_data_viewer` +15. Start jupyter lab in activated icefabric virtual environment. This will start the jupyter server at `localhost:8888/lab` + + `jupyter lab` + +16. Open `viewer.ipynb` in Jupyter Lab +17. Execute cells in `viewer.ipynb`. The map should show the tiles served from Martin. diff --git a/examples/icechunk_data_viewer/martin/compose.martin.yaml b/examples/icechunk_data_viewer/martin/compose.martin.yaml new file mode 100644 index 0000000..c4788a9 --- /dev/null +++ b/examples/icechunk_data_viewer/martin/compose.martin.yaml @@ -0,0 +1,10 @@ +services: + martin: + image: ghcr.io/maplibre/martin:v0.17.0 + ports: + - 3000:3000 + environment: + - RUST_LOG=debug + volumes: + - "./tiles:/tiles" + command: -c /tiles/martin_config.yaml diff --git a/examples/icechunk_data_viewer/martin/tiles/.gitkeep b/examples/icechunk_data_viewer/martin/tiles/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/examples/icechunk_data_viewer/martin/tiles/martin_config.yaml b/examples/icechunk_data_viewer/martin/tiles/martin_config.yaml new file mode 100644 index 0000000..e2b11ee --- /dev/null +++ b/examples/icechunk_data_viewer/martin/tiles/martin_config.yaml @@ -0,0 +1,8 @@ +pmtiles: + sources: + alaska: /tiles/alaska.pmtiles + atlantic: /tiles/conus_atlantic_gulf.pmtiles + pacific: /tiles/conus_pacific.pmtiles + great_lakes: /tiles/great_lakes.pmtiles + hawaii: /tiles/hawaii.pmtiles + pr_usvi: /tiles/pr_usvi.pmtiles diff --git a/examples/icechunk_data_viewer/viewer.ipynb b/examples/icechunk_data_viewer/viewer.ipynb new file mode 100644 index 0000000..02812c5 --- /dev/null +++ b/examples/icechunk_data_viewer/viewer.ipynb @@ -0,0 +1,155 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "9f3c3eb3", + "metadata": {}, + "source": [ + "# Data Viewer: Topobathy\n", + "\n", + "## NOTE: This must viewed in Jupyter Lab\n", + "\n", + "A JupyterGIS project to interactively view topobathy and other spatial files. \n", + "\n", + "Maps are served as pmtiles to allow high performance loading and seamless transition. Maps are classified in quantiles - each color represents 10% of the dataset in its region. Legend can be viewed within notebook.\n", + "\n", + "The layer list can be viewed by clicking the bottom tab on left with \"globe\" symbol after layers are loaded.\n", + "\n", + "Opacity can be adjusted on the layers by clicking the right tab \"globe\" > layer properties > opacity > okay\n", + "\n", + "Requirements:\n", + "- Pre-computed map tiles\n", + "- Running local tile server \"Martin\" on port 3000" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a3120a17", + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.image as mpimg\n", + "import matplotlib.pyplot as plt\n", + "from jupytergis import GISDocument" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7439900c", + "metadata": {}, + "outputs": [], + "source": [ + "# create the map doc\n", + "doc = GISDocument()\n", + "\n", + "# open the map\n", + "doc.sidecar()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "a05cbcdc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'6e20a7fa-e8ce-40b9-92cb-f2e13f4ff566'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Add Open Street Map basemap\n", + "doc.add_raster_layer(\"https://tile.openstreetmap.org/{z}/{x}/{y}.png\", name=\"Basemap\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "09b6ae09", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'ae71a180-bc06-4582-91bb-c0265f59fbae'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Add topobathy tile layers\n", + "doc.add_raster_layer(\"http://localhost:3000/pr_usvi/{z}/{x}/{y}\", name=\"Puerto Rico/VI\")\n", + "doc.add_raster_layer(\"http://localhost:3000/alaska/{z}/{x}/{y}\", name=\"Alaska\")\n", + "doc.add_raster_layer(\"http://localhost:3000/atlantic/{z}/{x}/{y}\", name=\"Atlantic/Gulf\")\n", + "doc.add_raster_layer(\"http://localhost:3000/hawaii/{z}/{x}/{y}\", name=\"Hawaii\")\n", + "doc.add_raster_layer(\"http://localhost:3000/pacific/{z}/{x}/{y}\", name=\"Pacific\")\n", + "doc.add_raster_layer(\"http://localhost:3000/great_lakes/{z}/{x}/{y}\", name=\"Great Lakes\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "dd6e4c55", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA0kAAAJ8CAYAAAAruerqAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsvXeUnNd5mP9MrzszO3V7L9iC3isJgk1Uo0SZoiVZtCRbsmInOTmW45O45OSXI7fYlpPYTmwrcokcOpIpkRRJEawAiUa0BRZle+9tdnd6+Wa+3x/wdzWLQoIACEDQfXhwCOx+87W5973v+9636FRVVZFIJBKJRCKRSCQSCQD6O30DEolEIpFIJBKJRHI3IY0kiUQikUgkEolEIilAGkkSiUQikUgkEolEUoA0kiQSiUQikUgkEomkAGkkSSQSiUQikUgkEkkB0kiSSCQSiUQikUgkkgKkkSSRSCQSiUQikUgkBUgjSSKRSCQSiUQikUgKkEaSRCKRSCQSiUQikRQgjSSJRCKRSCQSiUQiKUAaSRKJRCKRSCQSiURSgDSSJBKJRCKRSCQSiaQAaSRJJBKJRCKRSCQSSQHSSJJIJBKJRCKRSCSSAqSRJJFIJBKJRCKRSCQFSCNJIpFIJBKJRCKRSAqQRpJEIpFIJBKJRCKRFCCNJIlEIpFIJBKJRCIpQBpJEolEIpFIJBKJRFKANJIkEolEIpFIJBKJpABpJEkkEolEIpFIJBJJAdJIkkgkEolEIpFIJJICpJEkkUgkEolEIpFIJAVII0kikUgkEolEIpFICpBGkkQikUgkEolEIpEUII0kiUQikUgkEolEIilAGkkSiUQikUgkEolEUoA0kiQSiUQikUgkEomkAGkkSSQSiUQikUgkEkkB0kiSSCQSiUQikUgkkgKkkSSRSCQSiUQikUgkBUgjSSKRSCQSiUQikUgKkEaSRCKRSCQSiUQikRQgjSSJRCKRSCQSiUQiKUAaSRKJRCKRSCQSiURSgDSSJBKJRCKRSCQSiaQAaSRJJBKJRCKRSCQSSQHSSJJIJBKJRCKRSCSSAox3+gbuRVRVRVVV0uk0er0eg8GAwWAAQKfT3fS5s9ksiqJgNBoxGo3o9XfG1s1kMmQyGSwWC0bjpaF0s893K9DefyaTQVVV8Z7gw78/VVWBS+8GQK/X3/S18/k8uVwORVEwmUwYDIa74j3/LKOqKtFoVIx9bX5/2GhjIZlM4nA40Ov1d91YUBQFRVEAMJvN4v7y+TypVIpcLoder8dkMmGxWD7w+bU5lkwmATCZTJhMplt09zePJn+SyeQVMvp2yB9t7dHWnZsdI4qikMvlgEvvWqfT3XVjTnJ1tLmSSqUAMBgMYq5o81RRFPL5PA6HQ3zX2pz6ML9nTZal02lsNpsYV3fL2Mrn8+J9aPP4dtyb9p1p34tOpxPf2Y1cX5MJmlwwm81Sh/gASCPpBtAGXDabRVVVdDodZrMZuDSINQV9cHAQp9OJy+XC7XbfkkGZz+eZn59ncXERr9eLz+cT175VZLNZcrkc+Xz+it9pz6rX61lYWGBkZITa2lp8Pt9tUxQL0d61TqdbYYxms1lmZ2fJZrN4vd5b9v6vh1wux8zMDKqqYrPZ8Pv9N3XtbDZLPB5nfn6eQCCA0+m8bQL7XkZVVfL5PNlsFkAYO9p7VVVVzAOdTrfinedyOTo6Oqirq8Pr9eJwOG7pfQHivjRHi06nI5vNEolEGBoaYvXq1cLIuNHFE36irOTzeTHn9Xq9+PNBHTyxWIzZ2Vl0Oh0VFRWYzWby+TzxeJyBgQGy2Sx2ux2fz0dZWdmK9609dy6XE/8upFDJGxgYQKfTEQgECIVCH/j5bxZt/CiKIpwh2rMoisLAwAA+nw+3231Lx8f73VM2m2VoaIji4mJcLpdQQG+URCLB8vIyiqIQDAax2+1i3ZN8cArlTuEaq61hNzLn3u96ExMT6HQ6ioqKCAQC5HI5FhYWWFxcJBKJoCgKbW1tRCIRlpeXCYVCN7VuFRpfl6PNFUVRhCxraWnBbrffMR3iag7tTCbD8vIy0WiU4uJiiouLb9uYz2azhMNhUqkUFouFUCh0U9fO5XKkUimGh4cpLy/H6XTecr3xXkUaSTdIKpVifHyceDyO1Wqlubl5xQI5Pz/PH/3RH7F69Wq2bt3K9u3bb4kAyGQyvPLKKxw4cICHH36YRx99FL/ff9PnLWR+fp6FhQWWl5dX/Fyv12OxWKitraWoqIjXXnuNP/mTP+G3f/u3eeyxx7Db7bf0Pt4PbbHp7+/HbrdTXFyMx+MBYGlpiVdeeYVwOMz999/P5s2bb5sXd3l5me9///vodDoaGxt55JFHbsrTvbS0RHd3Ny+88AIf+chHWLduHT6f7xbe+c8m2o7MyMgI+XyesrIyPB7PCkN7eXmZSCSCzWajpKREjKF4PM4nP/lJvvnNb/Lggw/S3Nx8S+8tm80yNTWFoii43W7xfUciEc6cOcOf/Mmf8O1vf5tAIHBDi51mgKiqSiQSYWFhgVgsRjqdRlVVrFYrDocDl8tFcXHxBxq/Fy9e5Pvf/z4Gg4F/82/+DcFgkGQyyenTp/nmN79JNpulqamJvXv38vnPf/6K+5qammJ2dlbsxmoYDAZcLhe1tbVYLBZ+7/d+D71ez+OPP87P/dzPfeB3cLOoqko8Hmd6ehqr1UpJSYkwCKPRKH/2Z3/G3r172bZtGw0NDbflfjTn0B/90R/x6KOPsmHDBurq6m4q2mBoaIjDhw8zPz/P448/Tltb2x2LXrhXiEQiTExMEI/HURRFOB89Hg8ejwev13vL1qt8Ps///b//F7PZzJo1a/jIRz5CPB7ntdde48iRI5w/f550Os1f/MVfcPr0aTo6OvjEJz7Bww8/fMPf8/z8PLOzs8RisRXODp1Oh91up7Kykng8zqlTp/jP//k/8+1vf5vm5macTucteeYPSnd3N263G6/Xi8vlAmBhYYGTJ09y/vx5du3axfbt2z90w0J7V4uLi7z66qtMTk5SU1PDk08+eVPnTSaTDA0N8V//63/lF3/xF2lvb79pw+tnBWkk3SAdHR380z/9E11dXdhsNp555hnsdvs9Meh+9KMf8cYbb9DV1UV7e7v4uclkIhAI8Au/8Au3ZdG/HtLpNL/7u79Lc3MzDz74IA888AAAPp+PJ598knw+f9Oe1A+Coih0dnYSDodpaGigqanppkOBPB4PVVVVlJSUcPLkSUKhEA6HA5vNdovu+meTxcVFLly4wN/8zd8wPj7Ov/pX/4p9+/YJg2R5eZmDBw9y9OhRmpqaePrpp7Farbfl3paWlviHf/gHwuEwe/bs4fHHHweguLiYHTt20NraSjAYvCnHSzqdZmJigueee47BwUGWl5fJ5XJYLBbsdjter5fKykp27NhBW1vbDc8hVVWZnZ3lhz/8IWVlZXzpS1+ipaXlqgqRoij8wz/8A/v37wegqqpK/M7hcNDc3MwXvvAFgsHgjT30LSSbzdLV1cV3vvMdqqur+fKXv0xJSQl6vR63283v//7vY7PZbtuYAZidneXo0aPCcRcKhW7aOdfU1ERfXx+jo6McP36c2traO+b1vxfI5XKcPn2ab3/72ywsLGC1WikqKiKbzWKz2Whra+OJJ56grq7ullxPr9fzq7/6q+h0OrHz3N/fT2dnJ2azmX/37/4dGzZsoKSkhMbGRh5//HGcTudNfb+vvvoqzz77LDMzM1RXV4uQT4PBQEVFBZ/+9KfvmEF0NX7t136N++67j49+9KNs374dgGAwyL59+9i9ezdWq/W2hfTmcjl6e3sZGhrCbDazZcuWm9ZfNCfO6tWrOXHiBCaTCa/XK3eTrgNpJH1AtBCc48eP43K5qKysZGRkhO7ubtra2q6puGphYV1dXSwsLJBIJFBVlUAgQFVVFcXFxWInZnZ2lomJCWZmZkgkEpjNZkKhEKWlpRQXF19xbi0Er7+/Xwghj8fD8PAw8/PzRKNR8vk8Ho+HhoYGvF7vey7cmUwGk8lETU0NX/nKV8TP9Xo9NpuN8vLyq04ubdt6YWGBnp4e4vE4RqMRl8vFqlWrcLvdRKNRZmdnGRsbY8+ePSLuOZvNMjY2xvT0NNXV1fj9fhRF4ezZsyLUw2g0UlJSQn19PXa7nUQiwcWLFxkaGiKTyWAwGFheXiYYDNLU1MTExASZTIaKigrhNVleXmZmZoaBgQHS6TQulwu/309VVZUIyRscHGRkZIRQKEQkEmFpaQlVVSkuLmbVqlUUFRVddQHJ5/Mkk0n6+/vR6/V4vV4CgQAA4XCYCxcuoKoq1dXV9PT0oCgKPp+PYDCI2+2mt7eXpaUl9Ho9Pp+PdevWifCioqIiqqqqOHr0KDMzM4RCIWkk3QT5fJ65uTmGhobw+/3Mzs4yNTXF+Pg4Xq+XfD7P2NgYvb29XLhwgaWlJYqKiqiurqasrEx4GzVyuRyxWIzu7m6WlpbEeCwtLaWhoUE4UDKZDD09PSJfLpfLMTs7i9vtpqqqirKyMoxGI2fPnqWnp4fZ2Vn0ej2KorBq1So8Hg+KotDR0cFDDz0kHACKojA+Ps7w8DCJRAIAp9PJunXrrhqemU6nmZyc5JVXXuH8+fPU1dWxYcMGAoEAdruddDpNMpkU11ZVleHhYbG73NraitFoJJfLsby8TG9vL4FAgLKysivedXd3N++++y6dnZ04HA66u7sxm81iR7oQTU663W6am5v56Ec/Kn5nMpnweDxXvHvtc/l8nsXFRebm5hgbGyORSOBwOCgtLaWyspKioiKmp6eZmZnBYDDQ1NSExWIRcmF6eprh4WHuv/9+VFUlHA4zODhIOBxGr9fjcrkIBoPU1dVhNBqZnJyku7ubnp4ewuEwL7zwAnV1dYRCIaqqqjhz5oxwbrjdblRVZXp6esVOWTAYpLq6Gp/Ph8ViQVVVTp8+jaIomM1m0uk08/PzuFwuAoEA1dXV13TGZbNZ5ubm6O7uprq6muLiYiwWixibR48eJRQKoaoqc3Nz5PN5GhsbcblcKIpCX18fy8vLeDweysrKqK6uFnljpaWlBAIBzp49y8c+9jGsVqs0km4C7fttbW1l7dq1VFRUMD09zf79+zl16hQ+n49QKLRiDQfw+/3U1tbi9XqF0p7L5VhcXGR2dpbR0VHS6TR2u51QKER9fT02m42+vj4MBoOYP0eOHKG/vx9VVYWj1+PxMDExwfT0NHV1dVRXV6OqKrFYjLm5OQYGBkgkEmJNb2xsJBQKXXW3SZN/q1ev5oknnliRU+N0OqmpqRFyqhBVVVEUhenpafr7+4nH4+h0uhVrbzQaZWJignA4zNatW4Uuo0WVJBIJ3G43JSUlpFIpurq6iEQi4p4qKiqEPMjlcpw6dYrp6WnOnTuH1WplamqKUChEXV0dyWSShYUFysrKKC0tRa/XE4/HGRkZYXh4GEVRsNvt+P1+6uvrcTqdKIrC3Nwc58+fp7m5mcnJSRHS6Pf7WbVqFQ6H46pGl6ZDdXd3k8vl8Pv9IpR4YmKC2dlZ4vE4paWl9PX1kc/ncblcVFRUUFRUxMDAAMvLyyIyYtWqVZhMJoxGIw6Hg9raWo4fP87U1BRLS0t3hbPpbkcaSR8QLZRicHCQhoYGiouLyWaznDhxgrq6uvc0PtLpNL29vczOzooE5uHhYWKxGHV1dcJreubMGUZGRgiHw8ClXAlNOLnd7hXnzOVyRCIRTpw4wejoKD6fj+LiYoxGI8PDw0xNTYkJqgm8pqYm6uvrgWvHPGtxsDt27Fjxcy2J8FqCcW5ujrNnz9LX1yeSty0WC8lkkq1bt5JIJBgfH+eNN96gvb0dn8+H0WgknU7T2dnJ5OQkdrsdt9tNIpHgwoULxGIxca7h4WGMRiMVFRVks1nm5+dJJBIsLS0xMTGB2+1GURRKS0vp7u4mFouJZ9EMmLNnzzI1NSWeJRAIkMlkWL16NSaTiaGhId58800qKysxmUxEIhHS6TRWqxWz2UxTUxN2u/2Kd5DL5YjH40xMTGCz2VbkIoTDYd555x0SiQSbN29mdHSUaDQqlJ+SkhKGhoZYXFwklUphNpspKSkhFAoJRSUUChGNRllcXCQWiwkD7F7YvbzdaMrn7OwsVVVVIgZ8YGCA9vZ2MVeWl5dZXl7GbDbT39+P2WzG6XRe4QXV5EJPT48wklRVZWBgAIPBIBblTCZDR0cHi4uL6PV6ioqKmJubQ1EUwuEwqqpSWVnJ/Py8yA+Ympqir69PfN9aKOmOHTuwWCwivPfo0aNMT0+jKAoGgwGbzUZ9fT1Wq1UUD9HQxumpU6ew2+20trayatUqgsEgNptNzKlkMklRURE6nY7h4WHGxsYAaGxsxGAwiPs+duwYra2tV3XihMNhodQYDAbm5uZYWFigpKTkqt+NTqfD5XJRX1+/Qv7odLoVhVAuJxKJMDAwwNDQEBMTE+RyOXQ6HRMTEywuLrJ161bC4TDnz58HIBQKCSVhZmaGjo4O+vv72blzJ/l8noWFBS5cuCCcTGazWTiYysvLicViLC0tEYlEMBgMDA0NiQRvv9/PO++8w9atW7Hb7bhcLmKxGB0dHYyOjrK0tARcyquKxWI0NjZSWVkJwIkTJ4hEItjtdux2O/Pz8+h0OsrLyzGZTCJ87vJ5n0wmCYfDTE9Ps23bNhwOxwpD9kc/+pHYwdPG9dLSEsXFxej1+hVrjhbSWFlZiV6vx+/34/f7OXDgANFoFLfbLfMibxKr1UpFRQXr1q2jqamJ6elp3n33XXp6eujq6iKRSDA0NMTU1JQYg3q9nkQiQWNjIxUVFcCl+dXT0yN0Cy0sPplMUlpaisVioaOjQzg+6+vrmZqaYnl5GZ1OJ+RgNpsV66PZbKayspJYLMbY2Bjnz59ndHRUFELS8t3eS8k2m81UVVWxbdu2FQVcCnOSLkcLcT527BiTk5Mi5NZisZDP52lpaSEWizE6OsqhQ4doaWkR+kg2m+Xs2bNks1lqamrwer3CYasZSVp+Vjabpbq6GpvNxuTkJNlsVugQ2rW8Xi8zMzP09fWh1+sJhULCQDp06JCYw5pels/naWtrQ1EUJiYmeOWVV8RaHYvFSCaTmM1mzGYzdXV1V82T1pxEQ0NDWCwWfD6fcIZOTk5y5swZxsfHWbt2LePj48KJPjo6SlVVlXDqpFIpenp6CIVCIlxac7ZnMhkWFxcJh8PSSLoOpJH0Aclms4yPjzM/P8+uXbtE6NP+/ft59NFHRU7M5aiqSiqVYmZmRlQY0Sb1wsIC8Xhc5Bb98Ic/JJVKCY9kOp0mHo+TSCRWJEJqydDDw8P8v//3/ygtLcXj8WCxWIjH4ywtLZHNZoUQOXHiBENDQ+zevZuampr39QRqXh0NTUm5FpFIhL6+Pl566SU8Hg9+v59EIsHIyAidnZ00NDSg0+kIh8O8/vrrfOxjH8PhcOBwOIjH4xw4cEB4nxRFIZFIMDMzIzzh0WiUU6dOYTabhVfZbrdjMplEyEJxcTFFRUWk02kuXLjA/Pw8lZWVtLW1MT09zaFDh3j11Vdpbm4mGAwyOjrK0NAQc3Nzwvs6MDDASy+9hNfrZd++fRiNRpaWlhgZGcFoNBIMBrFYLFe8Cy2pfmpqitWrV+N2u4V3OBwOc+DAAaampkgkEtTW1rK0tER/fz/pdJry8nJKSkrI5/NMTExw/vx56uvrefDBB0WloVAoRCqVYnFxkWg0KpOnb4JIJMLs7CzhcJht27ZRWVnJO++8w7lz53jssccwGo2YzWasVqvIz/F6vTidTqxW6xXvXZuL4XBYGN+xWIwDBw6g1+vZvXs3LS0tpFIpTpw4weDgIC6Xi/Xr12M0Gjly5AgzMzMoikJlZSV2ux2LxYLVasXpdOL1erHZbEL+PP/88/z7f//vcblcLC4ucvbsWX7wgx+IXViz2czy8jLJZPKqydOLi4sMDw8zMDDA1772NbZu3So8pVq1Ke05td3e4eFhYWB84hOfwGq1oigKCwsLHD58GLPZTFtb2xXXstlsFBUVYTKZsNlsuFwunE7ne4avaLtshfLnWrJHi+MfGRnh2LFjDA8Pix1ibTewo6OD9vZ2kskk3d3dJBIJYRTm83kGBwd58803RRELbfdlbm4Oj8dDMplkYGCA06dP43A4CAQCGI1GLBYLZrNZeOLdbjdWq5V0Os3rr7+O2+0Wu4/j4+O8+OKLpFIpgsEgXq+XEydOCPkfCoUwGo0cO3aMkZER3G43O3bsIJ/P093dzfDwsFBeC5XOwjG9sLBANBqlpKQEm82GwWAgk8mwtLTED37wAzZv3kxrayuhUIixsTH6+vrE+CopKcFgMIh1wmazCcPN6/USDAZFvmowGLytYYT3IpqxYLVasdvtYtdxeHiYubk5YfRq+kImk+HYsWMiEqW8vJx8Pk9PTw9vv/02Fy5coLq6WuQAas69fD7P8ePHsVqt6PV6Vq1aJeSLtkPqcrnQ6/X09PTw5ptvUltby5YtW0SI5cGDBwkGg5SXl4vKjQsLC1ctrlJILpcjm82uMOoNBsNVDWzNMTU4OMg///M/i13OXC7HwMAAs7OzFBUVYbVaicVifO973+Opp54SRQhisRiHDx/G5/Ph8XhEcQpt99tisRCNRnn77bdFUZympiYRWmi1WnG5XHi9XoqKikgmkwwODnLixAlCoRDt7e3COHv22WeFI2J2dpauri6Wl5epqKjAYDCIua4ZNDabjXQ6zfHjxzGZTDgcjqtGpGh64vDwMGvXriUYDAq5Nz4+ztGjR+no6CCRSFBdXU00GmVqaooDBw6wadMm7HY7mUyG+fl53n33XTZv3kxLSwsej0dEt2i61NzcHKtWrbolY/leRhpJH5B4PM6RI0coLS2lrKyM5uZmysvL+dM//VN6enpwOp1X7PbAJQPD7Xbzmc98RuwM5PN59u/fz8mTJ+nr66OxsRGr1cqpU6e4//77+fmf/3mamprIZDKiLGehojAxMcFrr73GM888w6OPPsqjjz5KfX09DoeDVCrFI488IgQSwPbt2/nd3/1dVFXl/vvvv2pojMbMzAydnZ088cQT4mcOh4OmpiZ+6Zd+SXixNLLZLN3d3XR0dKAoCr/yK7+Cx+MhnU7T39/P1772NTo6Oli3bh3V1dU0NTVx4MABsaU/NjbGhQsX+OpXv0pVVRUOhwOLxcKXv/xlHA4HOp2OpaUlvve973H+/HkqKyvx+/1s2rSJ0tJSWlpaePjhh3nwwQfR6/VMTU1doVT9+Mc/pqOjA6/Xy6//+q/jcDg4d+4cJ06c4NixY9x///1i58dgMOD1ennqqafweDyMjY3x2muv0dHRwbZt2ygqKrrqrl4ikSASieB0Oq8Ih9PpdPh8Pnbs2MFDDz3E4OAgL774IsePH6eqqopPfepTuN1uOjo6WF5e5sUXXxTXMhgMwnsbjUaJRCKiNKnkgzM6OsrMzAwA9fX1lJWVcfToUQYGBujq6hLjtLGxkdnZWZqbm/mFX/gFsYOohb9omEwmKisr+fmf/3ksFgsGg4F4PI6qqhw4cAC32y0WJM1L2d7ezuc//3lcLhcGg4HOzk4OHjzIpz71KTZt2sSxY8coLi5m7969fOpTn8JoNDI1NUVvb6+4bjqdpq+vjxdffJFgMMiXv/xlysvLMRgMpFIpXC7XVY2RaDTK/Pw8AJWVlTidThFK8td//de8/fbbpFIpvF4v9913H1/+8pdv+F2vWbMGi8XCkSNHaG1t5WMf+xi1tbXXNHry+TwXL16ks7OTF198Ufy8srKSDRs28OSTT64oXKIVUDh69ChLS0s0NTXx+OOPU1RUJGTGD3/4Q44ePcqqVavErm1nZycbN25kaWlJeO9/53d+R3x/7e3tVFdXi3C0d999lwMHDrB//352795NRUUFbW1tNDQ0UFNTw2c/+1nKysrI5/MsLS2tKH2eTCZ5/vnnWVxcZO/evXzyk5/EarXy8ssv8/LLL5NKpdi+fTterxe4ZFjW1NTw9NNPY7FYePbZZ7l48SKnTp1i37591/xOY7EYgPg+CzGZTCJ3c/369Zw9e5Y/+ZM/Qa/Xs3btWj796U8Lh0xXVxcHDhzg4x//OABFRUXinY+NjVFdXS12GCU3j6qqXLx4UYT2btu2jUAgwGOPPSZCrvP5PKFQiOeee450Os0jjzxCPB7n1VdfZWFhgdbWVn7pl34Jh8MhqrVpoZna96SFrG/dupX5+XkcDgef+MQnWL169RW7k9FolOPHj9Pd3U1JSQlf/epXCQaDwoGh5Rhdi/n5eZ555hkOHTokxqLJZKKtrY2vfOUrV4xhVVXp7+/nRz/6EcFgkC9+8YsEAgFSqRQXLlzgm9/8Jps3b2bDhg1s2rQJm83GsWPHsNvtYk7Pzs4K2e1yuWhubqa2tlaUvU4mk3z3u99laGhIFIF58MEHcblctLW18eijj7Jz506xs3o5b731FocOHaK1tZVf+7Vfw2KxMDw8zIkTJ3jllVfYunWr0I30ej319fV86lOfIhQKsbS0hNls5syZM7S1tVFRUXFF1Uut+MrMzAwWi+UKp7uWCvHEE0/Q2trK5OQkBw4c4O///u9RFIWPfexj+P1+hoeHeffddzly5AiBQACPxyOKZmhrw+WFuSRXR2pYHwAt36azs5PVq1cTCoVwuVwYjUY2btzIqVOn8Hg8bNiw4YrPagLl6NGjnD17lqGhIZaWlkR8/KZNm4R3oLW1VYR+bN68mS1btgjvj+YVnpyc5OLFixQXF/Poo4/y5JNPEgqFhHdPURSOHTvG2bNnGRgYIBwOE41GRRWXmZmZ9zSSXC4XLS0t/PIv/7L4mclkori4GJ/Pd4WAUxSF0dFRTp48ybFjxxgbG8NgMKCqqthN6u/vp7GxEb/fz9atW3n77bd57LHHiMVi9PX14XA4qKqqwuVyiTyLF198kc7OThF+ND4+TigUYvPmzSLRVSvdqXn/tfd9OT09PQCsW7dOeGgqKyuZmJhAURSR+wSXQiHWrFkjcjSi0SiBQIDOzk6SyeRVQwUKvd9X61+l0+nw+/1s3LhR7Hpp77Oqqkp4Z7XSyJ2dneI6mkffaDSSzWbJZDJX3SGQvDdazLdWqKC4uFjEc7e0tNDZ2cnLL7/M2rVrxbjSSmFr7x+uHF/5fJ5YLMZLL73EhQsXRDjLyMgIqVSKnTt3imNNJhPBYFDkymlx8trOp6qqK0oBGwwGsXNwuSJTGCby8Y9/nNLSUmG8v1cfJW3H5PJzms1m9uzZQ3l5uQh/HR4efl+P8XtR6DnWvOfv1TtMp9NRWloqFBiNoqIigsHgVRWLeDwuvL5aaJleryeXyzE5OUk0GqW3t5dNmzZRU1MjKkZqu3BTU1N4vV7WrFmDyWQilUoxNjbGm2++ycmTJ4lEIszNzbG4uIjH4yGTyQiFQ/ueNAPjarJBURR6e3upqKigpqYGv98vjJMDBw6I8xcXF2MwGAgGgzQ0NIgyzH6/H6fTyfz8vAidvhxtB0xbay5/txaLhbq6Ourq6nC5XJSWlmKz2SgrK6OpqUkoZCUlJYyMjDA3Nyc+q8lWvV5PLBYTfZMkN87k5CRnz57l+eefx263s7S0hMvl4r777uOJJ54gk8mIXL7e3l4WFxeZnp5mdHQUk8lEOp0mlUoxMTFBcXExbW1tYrwU9h0q/K60OVgo17T5WDimtKqXc3NzqKpKc3OzCD/Xfv9+BrLT6WTz5s189KMfXTH//X4/gUDgipwkVVWZmZnh0KFDTE1N0dPTI4zDWCzGxYsXGRsbo7m5Ga/Xy969ezl+/DiNjY243W4GBwcxmUyUlJSI6oC5XI7vf//79Pb2MjMzQzQaZXx8nMrKSiorK0kkEuJY7V1cqxeYVugmkUiwc+dOvF4vBoOBUChEdXU1ZrOZ+fl5ERGk1+vZuHEjwWBQ5FFWVVXR2dlJPB4nnU5ftTVAPp8nk8mINacQLddMqwTodrsJBAKUl5dTVlYm0i203eSZmRnRIwsQeom2wyd5f6SRdJ1ozSMnJiZEeNbAwABut5tsNsvg4CDpdJrGxkbWrFlzxee10LrXXnuNXC5HSUkJ1dXVTExMiPwYrbLUpz/9aZG0PT8/z0svvUR/fz+rV68W3mij0SjCKBYWFvB6vWL7XNvVOXz4sBAC5eXlQmHIZrOiEeO1MJvNBAIBNm3aJH6m1+sxm81C+SpEaxSpqipVVVVs3rxZHJPP51m7di3t7e14PB6sViurVq3i5ZdfZmJiAr1ez9jYGKFQSDzH8vIyp06d4sCBAyJZWqtUF4vFyOfzQlBrwkz7+7UUusKEdk34aOEywArlQys3rDVv1ZQELUH8atfQBK1er7/mMSaTCbfbLQSgFqNss9lWXMtisYhyzPCTxPRcLnfNcAXJe6OVaI9EIqK6UzQaJZVKYTKZhLI8MDAgxrLGezU6zOVyRKNROjo6OHHihIjF10I4zpw5s2JB0nIGCismaaFtWgz+1cb1tb5vrVdPUVGR8Ji+H5oyoC2WmsFtMBioqalZkbeSTqfFfWjv8fL/v5/BfrXnea9jnU4n1dXVK+SPFlZ7edEYzfuq7XyVlJSsKMmuNVZtbW0VoXLFxcWMjo4yNzfHhQsXSCaT1NfX43K5yOfzDA0NcerUKU6ePEkgEKCyslIkxl8rzKjwe7paKFEymRTfu2YkaqE+6XR6RV8so9GI3W4Xx2kGyrUMJO1zmszVxvrlv7fb7cKxpJ1TCyfVFGXt54Ul2DXZU/h7yc1hsVgoKSmhrKwMv98v/l1fX09JSQm9vb0cPnyYaDQqigv5/X4WFxdRFEX0adScctp3WMi1xmnh36+1bmrfuTZuNEfN9a47JpOJ8vJyNm7cuMLw0KpnFirvGul0WuRcaX3gNPmyceNGWlpaRO+vHTt28Pd///cMDw/j8XhETrbmUEgmk7z88sucOnUKh8MhcvlsNtuKRrGXv5tCOXX5+9DCF91utzA0tcggbX5qslCn0+HxeIRM1kIrNUfqtWSmTqcTc/Hy+zMajdhsNhHRoDnQtLBwrdefpq9oUUiFz6Cq6opeXJL3RhpJ14mqqiwuLjI+Ps7y8jLj4+Oiillhsp32+8tRFIV33nmHzs5Otm3bxkMPPURJSQlnz57lrbfewmQyCQ/yww8/zOrVqxkcHKSzs5MDBw4wOjpKLBajtLQUuOQl1uJf+/r6GB8fF5WPMpkM58+fFxX3Nm/eTF1dHYlEgsOHD1918l2ONqErKyuv6e29HIPBIMLQnnrqKSEYNeGr5XTk83mqqqowm80MDQ2hKApTU1MihMNoNLKwsMDBgwcZGhrigQceENW0Dhw4wJEjR1act1B5ey+Pt8lkIpvNCkGnGabauyj0qBfmYhQKzssVxMvfiWbgZDKZqyo0hd6hQq+e5vnV/miGloa2A6JVRbpaTpTk/cnn84TDYUZHRxkbG2NpaYnZ2VngJ32TJicnmZ2dXfH9a2PrWh58zaifnJzkoYceYs2aNbjdbl5++WW6u7uvMLg0hfbyBfny4wp/dq2xrS2U6XR6RfPby729hX/Xcvi0HEGtQIMWt242m/H5fNjtdrEzoo1RbVxrckQzUG4lFosFr9e7ogT41Z7j8vdQVVVFfX09Dz30kFACNKWguLhY7NR6vV7S6TQjIyNcvHgRo9EoCrfkcjmRwD4zM8NnP/tZSktL6e/v5+jRoxw6dEjcx+UK5rW+K002FDbZ1Ixi7e+FSsvlBSrezwEEiJ0szWi82j1oil2hTCsMyS6k8POZTEbkt7nd7ttWDvlepqioiIqKCnbu3ElTU5OYd1pe4IULF+jq6qKxsZH77ruPmpoaOjo66O3tFbsBOt1PGtkXjqXL18YbQdsd1el0osFz4ZzSzn+ta2gFDSoqKkQVyUKutn5pRszq1at58skncTqdK+Sg3+8X4fcbNmzgf/2v/8Xw8DA2m42JiQnKyspE/mY4HObZZ59FVVXWr1/Ppk2bRP64lltZaNBo19Dk5rXmDyCcaIXOS62wRuFzXr4rdT26imaU5nI54aAq/F3hLrF27st1CO3Yyw0xzSGm6SmS90caSdeJVjJ4cnJS9OrQhJrmJfy93/s9pqamOHv27Ir+QnDJSDp//jxNTU3cd9997NmzR8QhJxIJrFarUDry+TylpaWUl5eze/dudu7cyXe+8x0mJiYYHR0FLhlJjz76KOXl5fzJn/wJ/+W//Bf+zb/5N6xZs0Y0FKyqqmLLli3s3LkTh8PBkSNHrhoKci3ea8G/HKPRSCAQEPGwWqd5LeQuk8mIsrHZbBa3201DQwOnT58WHtZ9+/aJ8MVYLEZPTw9bt27lvvvuo7KyUnjQLg930yp8aZ6hawntqqoqsfAkk0mMRqNIRNa819djeFxLuGnePK3U+dVKnN4ImrcwHA6Lkp9aHovkg5HP55mamiKVSrF371727t27Iuz09OnT/I//8T949dVX2bNnjzBmNGP6ciUELs3teDzO1NQUa9euZdeuXbS0tJBIJAiHw1corNejuBQqtJqCcrWxaTQacTqd+Hw+Ll68yL59+0QegrYYXi3kzufziUIlWo6lViRC84he7mnVwsvm5+dFtcdEIsHCwgLj4+M3FZJ3Nd7P6aGhzd1AIMDi4qJQprQQF+170xTQ4uJigsEgRqORt99+m56eHrZs2SJ2v9PptCjRvXnzZvbu3YuqqqLK2OW7gprzRTN+rnbPmgGntTnQdre0Mscej4dAIHBNo/Z6KCoqwul0igT4a93LjaCFXuXzeVEVTO5k3xxms5ni4mIqKyuprq4WP1cUhXQ6zdjYGGVlZWzbtk2EnR4+fHiFMaQ5MxKJBIODg2JXvHBn+EbGgE6nE2vZ/Pw8IyMjIhywUP5dT5+dq83jazlei4uLqa+vZ2ZmhkAgQCAQEM+QzWbFroyqqpSXl1NXVycqAKZSKfbs2SPWxmw2y7lz5/i1X/s1HnjgAerr64nH48JRWjiPtSbQmqy+2juzWCyiKI7WgqGwCEIymcTtdt9U7yHtOw2FQqTTadF+5FagyYVsNovVar1mkTHJSqSRdJ3Mzc3R39/P6Ogo69evZ+PGjWLLU/Pyb926VVQgKQz3gEvKzLp163juued46623xCR9/vnnmZ6eFtVYBgcHeeONN8SEtNlsvPvuu6TTaaqrqwmFQgwMDACXknvXrFnDX//1X/Mbv/EbfOc732H79u089NBDlJeXc+LECY4cOcLi4iI2m40f/ehHTExMiJjZ9yKVSolqMRqad7OhoeGKogUWi4W1a9eSSqU4c+YM3/rWt1i7di0Oh0OU8v7c5z4nKne53W42bNjAX/zFX5BOp1m/fj1VVVXC81JUVERraytvvvkm+/fvx+v1Mjc3J96dJjj0ej0tLS10dXXx7rvvUlRUJATZ5cLl8ccfJ51Os3//fv7bf/tv1NfX09nZydzcHE1NTdTU1NyUd0Vr0FZTU0M4HGZhYYFkMnlL+hllMhnGxsZE1a6r9YqRvDdaWNzbb7+N0+lk9erV7Ny5Uyxq2nh5+OGH+cEPfiDGr8Ph4Pz58xw6dIjKykoCgcCKhVAzVCoqKkT/tIsXLxIOhzly5MgVXeevB20sTU9Pc+LECSoqKqiuriaVSq3wDlosFhobG3nsscf48z//c77zne+InJO5uTkeeOABkXtSiNfrZfXq1Xz2s5/lb/7mb5ifn6exsZGmpiaKi4tFefCZmRnWrVuHTqejoaGBwcFBjh8/zj/90z9RU1NDT08PZ86cuaX5cVpI5ODg4Ar5o8mFxsbGFc+jVejas2cPL730EsePHycajbJ+/XoymQwzMzPEYjEee+wxGhsb8Xg8VFZWUltbyz/+4z9SUlJCSUmJqO5nNBpFH7LTp0/zwgsvEIlEOHv2LOfPn1+RJ6jt6J85c4YjR47Q2NhIcXHxCvmjecefeOIJ/vAP/5DXXnuN6elpKioqeP755/F6vWzevPmKnlEfFC2sOhgM0t/fLwpm3ApmZmaYnJwU70/uJH14aCGQNTU1dHZ2cujQIdEv6OWXX2Z8fJyysjIxHx588EFef/113njjDXK5HI2NjYTDYdxuNxs3biQQCNyQou3xeNi4cSPJZJKDBw/y13/917S1tYlQtUAgwJ49e67prNN2ao8dO7YiekKLULl891mn07Fq1SqeeOIJvvnNb/I//+f/FDtsyWSS3t5ePvGJT4jiVCaTiR07dohm2OvWraO5uVn0mzSbzWzatIm33noLRVGE8fXKK6+IiBWNlpYWZmdnOXDggCi9fbVQvL1795LP53n22WdFee3h4WG6u7tpamqitbX1po0km81GS0sLy8vLjI2NrdjBu1E0B/zMzAy5XA6PxyOikiTvjTSSrhPNi6HFu9tsNpEXosVpa80/U6kUiqJQU1NDSUmJKFH58MMPMzs7SzQa5ejRozgcDtra2qiqqhLJ43a7HVW91Lixq6sLVVUxmUxs27aNtrY2QqGQaCjn8/lwOBz4fD5+6Zd+iWPHjhEOhxkaGmL9+vWiVHlHRwdOp5NHHnkERVEoLy9/z27XwWCQQCBAJBLhBz/4gfi5wWDAbrfzmc98BpvNhs/no62tTfTZ8Hq9rFu3js997nOcO3eOkydPirC1oqIiUZJW85Zo+VuKotDe3r6i3KVW2Wdubo7e3l4RS79t2zZisRglJSUikf7RRx8VvWJ+/OMfU1dXx+7du0UZXM2gq6ys5IEHHsBmszE+Ps7MzAwmk4mmpiY2bdokEjEDgYAoEa55vEwmEz6fj7q6uisE7OXvp6mpiaNHj7KwsMDc3JyoHtbU1CTykbRz+v1+0UxSu5aWTN3c3IzZbEZRFNGvIhgMioIh0pP7wSgMM21paRFGcWEeh9/vZ8eOHXR1daHT6URM/VtvvcWrr75Ke3s77e3tNDQ0sH79eoLBoCihvHv3bubn5xkbGxPlaj/60Y9y4sQJodSYTCYRflLoyfN4PFRXV4vxYbPZWL9+PXq9nvPnz/P888/zwAMPEAqF8Hg8rFu3TlRh83q9rF+/nk9+8pNMTEzQ0dEhdjWvhRYvv2fPHrLZLCMjIywuLnLo0CERvuf3+2ltbWXLli3o9XrR8ySVStHf38/k5CRut5s1a9aI3Rmtj1RlZaU4j/Y89fX112xEraHX6ykrK2NgYICxsbEV8kev1wtHkc1mo66uDp1OJxKvV61aRSqVoru7m9HRUV577TURNlw4Z/R6PR6Ph/b2dlatWkVLS8sK5UqrwKWFXx45cgS3243f7+e+++5jcnISs9ksjKndu3czNTUlers0NTWxZs0aUUlPk3u1tbV84hOfYGBgQIR7VlRUsGXLFtrb20XRHa09Q2EFP7fbLRoNX6uipSZPNKfR4uKiyKu02WyiL52msJrNZurr6yktLRXPDojdDe04rfHw3Nwca9euFXlUUv7cOFppeK3YUuG71HIWN2zYIBqInj59GrfbzX333ScqH2pjobW1lWw2i8fjYWhoiLGxMZH3q+1GaxXetOqJLpeLyspKbDabcDjodDpKSkpYtWoVPp8PvV4vcoGNRiMDAwMcPHhQ9Ba8Wiisht/vp7S0lLm5OV544YUVIWBer5eHH34Yj8eDx+Ohra1N7FIXFxezdu1avvSlLzE0NMS5c+cARAuMQnltNBpZu3YtfX19mEwmVq9eTWlpqcidcjqdPP300/z4xz8WhUhcLhf3338/iqJQVlYmnAif/OQn6ejoYHZ2lhdeeIGmpiY2bNgg1methHZTUxOAaAqu3VtjYyO7d++muLgYRVEoLi6mvb19RZlv7fnq6+tFL8vL0Qzk1tZWjh8/ztzcHHNzc5SUlFBcXLyi6b32DjweD3V1dfh8PnEtrVm3x+PBZrOJlgYDAwP4fD7Ky8tXyBfJtZFG0nVitVpFh/j6+vorFgmDwUBjYyMmk4loNIrD4WDLli0Eg0H8fj9Go5H6+nr27t3L+Pg48XhcKBPpdFrsEGjKtNPpFE1gA4EAra2tVFRUiG7VBoOB6upqrFYrFouFbdu2ibA27bw7duxgdHSU5eVlbDYbW7duBS6FzWiNKa9GQ0OD6Nhc6IHSlB2t30J1dTUPP/ywyI3SOrPv3LlT5BXl83msVitlZWUill1TVCorK9m3bx/5fJ7y8nLRqwEuJTS3tLSwb98+ZmdnRXyzFlqghXzo9XpaW1tZXFxkamqKTCYjcp/a2tpIp9OiK7jD4aClpQWbzUZHRwfJZBKfz0dFRQWtra0ibrquro69e/fS0NAghI7WM2Tr1q0EAoGremi1vIOamhouXLggKo9VVFTg9XrZs2fPimR9TdFzOByUl5eLZ9fKkbpcLux2O9lslkQiQSKRoKamBp/PJ3uU3ABa0rBWmbKiouKKEDatyt3evXvx+XyiL1I8HhcFUrQk5scff1zsqtpsNhobG9m5cyezs7OizHdlZaVQPDTlZ926dULB1igtLWXTpk0kk0kRwqXJGbPZTCQSEWGWdrudj3/846J/mMlkoqysjPvvv5/z588TiUSAS8qudszV3oX2uT179tDb28v09DSLi4vkcjlR/ayiooKGhgaxW9PQ0ICqqkJ5KS8vx+1209LSIpQun8/H2rVrhdNAK1+/Y8cOkVNwLfR6vQhVvrzMuk53qcqbNn+2bdsGIEruamXV3W63CBMym83CwNCMJE2BWrVqFY899hjV1dXU1dUJ402v11NaWipCfGZmZkT4sMViYXZ2VjyXNl527dpFOBzG6/WKxPJ9+/bR3NwsFCy3283WrVvx+/2Mjo6SSqWoqqqivb1dGED5fJ5NmzYJY1GjpKREFMXR5O/V3p3H46GpqYmzZ88Si8XIZDLCIP/IRz4i1gztHWzfvl0UstDeTXl5OXCpoSwgevRpxS+udX3J9aHXX2pMumHDBjFeCtHWkcrKSrZv387Y2BjLy8uipLVWLU0zBgKBAO3t7TidTi5evChK/2tRG3q9ns2bN2MwGMR3GwwGWbt2rZgfGs3NzZhMJqqrq4UjpaGhAZPJhN1uJxaLCSMpFApd01Cuq6tjx44dTE5OXqFDaPNDW/ceeugh0b9Hq/y5d+9e3G434XBYhOrX1dXh8XiEPNN0kF27dlFfX09DQ4Mw4OEnO0mxWEwUW9FSApLJ5IpKmRs3bhRNWZPJJMXFxaLlhsVioaysbMX80kL5MpkMbreb8vJy2trasFqtZDIZysvLeeSRR0Q/NU3elpeXs337dsrLy6+5y2s0GqmurmZgYIBcLsf8/DwlJSWUl5ezYcMGUqmUmH+azrV582ZqamqEDLPb7Wzbtk30b9McPouLi6Jv262IcPlZQKfe6kByieRnmHw+TzQaZf/+/aK088aNG2/K87q0tMTY2BiHDh1i8+bNouSpRCKRFJLNZgmHw3z7299m9+7dNDY2CifRjTI0NMTZs2dZXFzkvvvuo7q6+pql5SUSyc2hqpeaz584cYLFxUXKy8vZtWvXexbJeL/zJZNJpqen+d73vse+ffvEzpPk/ZFGkkRyC9Gm0/z8PNlsFqPRKEIXblSp0MqiLi8vEwwGRZiVRCKRFKJV2xofHxehj1dr2fBBWFpaEvkjWr4V3FzlNIlEcnW0whHLy8ti3mm7djdqJGkFryYnJ99zN1pyJdJIkkg+BAqr72kGzY0qFVqVqlwuJ7bupYIikUiuhqYUaWHNNysvtP5IcPOyTCKRvDfaXCusTnkz867wfIqiXLNZruTqSCNJIpFIJBKJRCKRSAqQ+20SiUQikUgkEolEUoA0kiQSiUQikUgkEomkAGkkSSQSiUQikUgkEkkB0kiSSCQSiUQikUgkkgLuyWayWgnFwuogt5NbVVVIIpF8OGjyIZ/P35Hra/JBlmGVSO5OCquC3Uk5IXtSSSR3jnvOSNKUn2w2SzQave2Gkk6nw2AwYLPZREfjWyXgCp9Nr9djMBhWCFDNOMzlcqJsq6aIaaWjgRW/v9Yx8JOSkdc67m4S3Jc/u8FgEO9HQ/udNiYKjyl8lsLxkslkgJ90Qb/bnlvywdC+22w2SyqVIpVK3XYFSK/XY7FYxB+4cRlR6BDSxrYmGy4vG1uo9BXOA60bfOFc0el04vhcLoeiKCtkzs3c881SeF+aAqvNZ6PRuOI5tN8XHqcpnlqDZzmfJZejjbFMJkMymSSTydwROWGz2bBYLKJs841yuYwoXM8vXydh5doPP9FrLu/PV3hODW0OFsodRVHEMdocvHztvZvkCSDur/CZL3+PwBXy9vJzFpbQv1x/UlX1mmPr8u9Hyqk7wz1pJC0vL9PX18ebb75JOp2+rUaSXq/H6XTS2trKfffdh9VqvWWDO5FIMDs7y+nTpwkEAtTU1FBaWorJZAIuTcilpSXGx8fp6+sjlUphs9kIhUKsX78eu91OOp1mYmKC7u5ulpeXyefzOJ1OAoEATU1N+Hw+dDodyWSS+fl5zp49SzQaRVVVLBYLwWCQ1atX43K5MBrvnuGTz+dZXFxkYmKC8+fP097eTnV1NR6PRxwzOjrK6OgoU1NTKIpCZWUldXV1+P1+YdBqaIL94MGDJJNJvF4vbW1tFBcX3+Ynk9xq8vk8AwMDXLhwgZ6eHmEI3y4MBgMNDQ00NzezevVqMX9vhHw+TyQSYWJigsHBQdFwuKqqioaGhivmaC6XY2xsjLGxMaanp0mlUhQVFbF27VpCoRB2u33F8alUiomJCU6ePElVVRWVlZWUlpbeFc2MBwcHmZ6eZmFhgWg0Sm1tLc3NzQSDQXFMLBZjenqa0dFR0eDZ7XYTDAapq6sjEAjcwSeQ3M3kcjm6u7vp6OhgfHycbDZ7W69vNBppbW2lra2N+vr6m5ITiqIwMTEh1r9MJoPNZsPv91NdXU1lZSUGgwFVVclms4TDYc6cOUMkEiGfz2Oz2Whubl7RiDSfzzM9PU1fXx/z8/Ok02nsdjtNTU2UlJTgdrvR6/UsLy9z4cIFZmdnSSaTqKqK3++nqqqKsrIy3G73LXxrN4a2JoyPj7O4uEg6naa+vp7q6mpKS0vFcdq76enpYXZ2FkVRCAaD1NbWUldXt+KciqLQ1dXF8PAwsVgMnU5HKBSivb0dt9uNxWIhl8tx8OBBZmdnr1iHNN2tqamJ8vLy2/IeJFdy92i5t4hsNsv09AwHD7zNX33r21hyNgwY0fHhW+EqKooui81jYfuDW1i7di1+v190KL8ZEokEg4ODvP3223z3u99l27ZtPPbYY3i9XoxGI7lcjr6+Ps6cOcPJkycxGo2YzWZMJhNLS0usWrUKu91OMplkdHSUY8eOCUEXj8fR6XTU19fz5JNP4na7icfjjI6OcuLECfR6PblcjkwmQzQaZXx8nF27dlFSUiI84XcKzQCORqMcPXqUl19+mXPnzvHUU0/hcDjweDzk83mGhob427/9W+bn57FYLJjNZg4dOsTatWtZv349GzduXLGTlkwmOXPmDN/5zndYXFyktbUVn88njaR7gHQ6zenTHfz4hR9z+M2jWPMO9LcpPVNFJa1LsHpbGw8+uo/6+vqbUhJyuRyzs7McP36cjo4Oent7qampYdu2bVRWVgojSfNY9vb28vzzzxOJRNDpdJjNZvR6PSUlJXg8nhVGUiKRYGxsjJdeeolnnnmGRx55hL179+L3++8KI+nUqVN0d3czPT3NwMAAa9eu5amnnlphJA0NDdHR0cGFCxcwm83odDoSiQQmk4mamho+85nPUFxcfFc5fCR3Hm2+HDv2Ls9973l6zvZiUe23UU7kSemSbNu3CeXTCqWlpTclJxRFYXx8nI6ODhYWFtDpdESjUXK5HC6Xi6997WuEQiFUVWVycpLvfve7jI+P4/F40Ov1xONx3nzzTZ5++mnq6+txOp3E43H+6q/+isXFRYxGI06nk8XFRQ4fPsyePXvYtm0bfr+fhYUFOjs7mZqaErpEOBymvLyc1tZW9u7de8cNpWw2S0dHB8ePHyeZTNLT08PevXt54IEHVhhJyWSSkZERXnrpJRKJBL29vTQ2NvLII4+sMJKWlpa4cOEC//AP/4DL5RI64Lvvvsvg4CA7d+6ktrYWg8HA3Nwc4+PjpFIpsVM1MDBALBajsbGRQCAgjaQ7yD23MlwSbmkiy8skFlJY8i6MWG+LcMuTJ0ueVC7D4uLiiu3qG0WbNNPT0wwPDzM5OYnRaCSZTJLNZsX50+k0J0+epL+/H6fTyfr167HZbKiqitPpxGq1AmA2mykpKWHDhg04HA7g0g7LyMgIHR0dPPzwwzgcDiwWC4FAgI0bN2K328nn88K7dO7cOZqbm/F4PHfcSIJLXqDOzk56enqIRqOYzWZSqRSKogCXFoh33nmH0dFRysrKWL9+PVarlRMnTjA2NobZbKa+vh6/3w9c8p5PTk7yyiuvEAqFWFhYIBKJiPNJfrrJ5/PEYjGii3GS8xkc+P7FkfLhonJJ+YkSIxqOEYvFbnpM6XQ6nE4nVVVV6HQ6FhcXxU5woezJ5/MkEgkOHDhANBqloqKCqqoq7HY7mUyGUCi0wpmjKAozMzP09/czMTGByWQik8mQTqdv6n5vJZri4Pf7CYfDxOPxK7z9drud8vJyDAYDXq8Xg8HA2NgYExMTdHV1MTU1hcPhkEaS5ApyuRzRaIRYOE56IYcdK8bboDKpqOTJkyHG8mKERCJBLpe7qXMaDAYCgQCtra0oioLRaCQcDtPd3U1nZyejo6MUFxeTz+eZm5vjyJEj7Nmzh8bGRsxmMzMzM/zwhz9keHgYn8+H2WxmdnaWY8eOsXXrVpqbm/H7/YyOjvLmm28yMjJCfX09Pp8Pp9PJqlWrqKysxGKxkM/nOX36tNiR2bhxIy6X646Gk+n1esrLy1m9ejWZTIaBgQFSqdQVuztGoxGPx0NbWxv5fJ7R0VEymQypVGrFcQsLCxw4cIB0Ok1bWxuBQIB8Pk9vby/d3d3C6A2FQqxevZqqqioRkphOp5meniabzeJwOKRj9g5zz60MhbkpZtWCi2JsODDw4Xs+8+QwYCSnplbE/t4ohQbQ8PAwExMT6PV6ysrKhFdUOy4SiXDmzBkWFxdZt24dwWAQu92O1WrF6XRisVjQ6XRYLBbKy8vxeDwUFRWRz+dxOBxEIhFOnz5NMpkU2+ulpaUUFxcLI2liYoKxsTG6urqIRqO3PfzgcrTvenFxUXjIQqEQuVxOxFhrYXOHDx8GoKmpiT179mCxWIhGo5w8eZKRkRHm5+fx+/3k83lmZ2e5ePEig4OD7Nu3j+np6Tv5mJJbzCXHQw5dXocVGx58GDF96LvNmvKTJokur78ilv9G0Ov1uN1uVq1aRUVFBYODg1cs2NociEajvPPOO9TV1VFcXCxkhN1ux+v1rnB4aB7TgYEBrFYr5eXlwtFyt1BfX09paSnhcJjh4eErnhsQz1VXVyd23bu7u1EUhe7ububn56mpqbn9Ny+569H0CEPeiA0HHnyYuPmokPe9Lio5FFIkUPPckrxqo9FIWVkZxcXFmM1mjEYjU1NTLC0t8cYbbxAOh0X+TDQaZWhoiC9+8Yts2LABm83G8PAwf/d3f8fi4iKpVEocNzIywsc+9jHWrl1LRUUFLpeLl19+maWlJRKJBAAul4vVq1cL/SOXy7GwsMD8/LwIwbvTGAwG6uvrCQQCpFIpDh48eNXwRrPZTDAYZPv27aiqypEjR67qKF5eXub06dNUV1ezceNGKisryWazGI1GOjo6GBsbE6HLLS0t4jvO5/MsLy/z8ssvU1JSQlVVFT6f73a8Ask1uOeMpEL06DH+y38G3Yf/qDlVjwEj6i3ctVJVlenpaU6dOkU2m2XDhg1XeIlVVWVoaIje3l6y2Sx1dXX86Z/+KTabjc2bN7NlyxYRe280GnG5XLhcLnK5HMlkknQ6TSqVwm63iwRuLVSv8DhFUYjFYtjtdiwWy13hfdV2ic6dO0dTUxMbN27kmWeeWWFAZjIZjh8/zmc/+1lWr15NSUkJqqrS0tJCT08PiUSCqakpVq1a9S8hFsfYv38/e/bsYf369Rw/fvyOG4SSW48O3b/ICNMlI+lD9mSqqkqOHPpb6LAxGAw4nU4cDgeJRAKn03lVr3M6nWZ2dpYTJ05QXl7OkSNHeOmllzAYDHzyk59k69atIi9PVVURmjM6OsrHP/5xlpaWbion4laifU/l5eVkMhmsVitWq/Wqu1w+n2+FkqGqKsXFxbjdbgwGw4qkaonkaujQYfgXOWHAeFvkBIABwy1z22i50k6nUxR/SiaTItS+uLhYFAiwWq3Y7XbC4TDZbBaTycTy8jJ6vV44TQ0Ggwgji0ajxGIx4JJxYDAYcDgcOBwOdDqdKGKlGZ3xeJx4PI6qqtjt9luSjnAzaEUpysrKCAQChMPhaxbKMJlMFBcX4/F4WF5eFs7ny0mn08zPz7N7924CgQAej4d0Ok1ZWZnQN2ZnZ0XRHPiJrqKF++3YsYOmpqa7zjn1s8ad13Il10QLkfnBD35APB6ntbWVnTt3cuzYsSvCdAYHB5mcnGR5eZni4mKeeOIJBgcHOXr0KMePH+cb3/gG7e3tGAwGkYz9//1//x9nzpwRXpQvfvGL1NTUCM9IPp9nfn6er3zlK4yMjKDT6aiqquIb3/gGq1atoqio6E68FkEqlWJsbIzf//3f59d//ddpb2+/YntcURQikQjRaJRgMCgKOWjC22w2k0gkiMfjADz77LOcO3eOYDDIz//8zzM9PS2rykh+6onFYoyMjDA5Ocnx48fZtWsXGzduJBwO82d/9mc8/fTT7N69m6amJjKZDH/7t3+L3W5n586drF69mueee+6nfh6oqkoymaS7u5vu7m5MJhN1dXVSCZH8TJDJZJicnOTZZ5/l//yf/0MmkxF5yO3t7ULhb25u5nd+53f4r//1v/Lnf/7nKIqCw+HgC1/4Aps2bSIYDGIymaioqOC3fuu3eOaZZ/jHf/xHstksFouFz3/+8+zatYvq6mpx7Wg0yhtvvMFf/dVfMTg4SCAQ4NFHH+XBBx+kvLz8p162XI7L5aK1tZUf/OAHrF27FoPBIBy6x48fx+v1ip02DVVVicfjHDx4kGw2S2trK+3t7XfoCSQa0ki6C5iZmaGzs5M33niDoaEhAPbu3cu2bdtIpVKcPHmST3ziE6xbt04UW9DKUGqeUE1A1dfX88gjj3D//fezceNGTCYT58+f5/jx4zQ2NmKz2TCZTPj9fr7whS+we/duent7mZub4+DBg2zYsIFAICDC+YqKivjqV7/K9PQ0U1NTnD9/nmPHjhEMBoXH6f2IxWK88MILvP766yQSCcxmM6WlpXzzm9+8rt2ooaEh3njjDV577TXgUsjcgw8+iNls5tlnn+W+++6jtbUVr9fL5OTkinejbWNrJU8LhXFhWU2tEs3hw4cpKytj7969mM3mFWVBC0ss32tCXXL3ksvlGB0d5dlnn6W/v1/kHf36r/86TU1NuFyu9z2H5sW12+1s376dBx54gNbWVtLpNBcuXGBwcJBgMEhZWRmvvfYa8/Pz7Nmzh82bNwOsmFOag0abU+/F4OAgL7zwAsePHyeXy+Hz+fjkJz/J6tWrKSsre8/PRqNRRkdHeeaZZxgaGkJRFBoaGvja175GZWXldc/BwhDs06dPc+zYMcLhMA8//DAlJSV3zQ6ZRHIzZDIZxsfH+e53v8vw8DDxeBybzca/+3f/jqamJkwmE16vlwceeAC/309/fz+Tk5McOnSIBx54gNraWgDm5+d54403RAVOo9HI9PQ0b7/9tsjn9Xq9xGIxXnvtNXw+nwj37e3tpbOzk1AoJKpsAlgsFlavXs3XvvY1US1zZmaG06dP09DQIHZ7329Ov/vuu+KPoigEAgE++clPsn37dpxO53t+NpFIMDw8zH/7b/+NaDSKzWajurqar3zlK5SUlNzSYjTl5eV87nOf47//9//OH//xH2MwGESRnPLycpxO5xXRQFql0gMHDtDY2EhFRQVFRUVS17jDSCPpLkDbcnU4HGKiW61Wstksg4ODDA4O0tnZKba0L168SD6fF+XF9+3bh81mw2q14vf7aWpqIhQK4ff7KS0tpa+vj4mJCWFQadvh69evF3H9nZ2dnDhxgunpaVwul9huNpvNbN26VVS1U1WV3t5eZmdnRU7D9TyfxWLB6XRiMBgwmUzX9TkNLQRAezd2ux29Xk84HObkyZPU1tZy8OBBzGYzc3NzIumypKQEnU6Hw+HAYDCQTqdXhM1d3utgZGSE0dFREokEHo+HgYEBZmZm6OvrA+Cdd97B7XZTUVFxx0MEJD9baL3XnE6n6JN2eV+z6/m8xWKhpqaG6upqysvLyeVylJeXs7S0xMLCAul0mjNnzjA2Nsa5c+fI5/OiKt709LTIYdy1a9d1zYHC+9baDWjKwvtR2HPO6XSiKIqY+x8ELYxlcnKSt99+m3g8TnV1tci3kEqI5F6hcL5o0RKanNDr9djtdlFQob6+nhMnTvDmm29y/vx5SkpKRP5zV1cXn/vc51i9ejVms5nR0VEuXrxIX18foVAIi8VCX18fQ0NDfPSjH2XDhg2UlpZSV1fH3//93zM+Ps7o6Cjl5eUYjUZMJhOlpaUi/9lgMNDV1cXQ0BALCwt4vd7rmocmkwmr1UpRUZEobHC9PaQ0eeJwOFBVVYQBfhgNvYuKimhvb+eRRx6hq6uLeDyOwWDA7XaTSqVwOp1XOGdSqRTz8/N0d3fz1FNPEQgE7oqUhp915DdwF+D1etm0aZPIiYFLhkA8Hufdd98lnU7zox/9CJ1ORy6XY25uDkD0/tizZw+BQACHwyEEJPzEOLFaraK8pPbHYDCIeP2ioiIMBgOvvfYaU1NTVFVVCUGi1+sJBAIEAgG8Xi+KovCtb32L+fn5K7aLr4XNZmPfvn1s3bqVfD4vGrNer+empKSExx57jD179gCXkifNZjMdHR0oisKJEyc4ceKEKFO+sLBAT0+P2DXbuXOnKE+q9X2AS54lTeG02WyEw2H0ej2dnZ2cPHlSKFcLCwuYzWby+byogieNJMntQivR/eSTT5LJZETOUeE41OZ14d8Ld1AtFgter1coGIXj1+l0ivwDrd9YOBzmlVde4ZVXXhHFTIxGI+Pj4ySTSbZs2XJdyklJSQlPPPEEjz32mJA7LpfrukLcbDYbNTU1fOUrXxGVPM1mM16v94rn1Lj833CpvO/i4iJHjhzh0KFD7N27l71799LU1CQNJMk9g2aIfPGLXxRzWafT4ff7hUKu1+spKiqiqKiIiooKADo6Ojh16hQ7duxgfn6evr4+Zmdn+djHPiZylaqrq3n++efp7++nsbERl8vFqVOnSKVSbNmyhe3bt6PT6aitrWX//v0sLS0xNDTE5s2bReSFZpQEg0EURWFqaoqRkREWFhZoaGi4rmdctWoVVVVVfOQjH0FVVZFjfT3yxGKxUF1dza//+q+Ld6PlGOn1+ivk5tXki9YA9mpyRzsnXDJWPR4PX/7yl8lms6TTadHn8k//9E9xuVyiurDG4uIiQ0NDzMzMsH79evx+v5RPdwHSSLoLKEx0LMTtdvPzP//zPProo2JCJhIJ/vIv/5JcLseuXbvYtWsXdrudlpYW/H4/iUSC7u5uamtrSSQSTE9Ps7y8THNzMyaTiUQiIcJltGTK5eVlFhcXhVGk0+lIpVLE43HRUyifzxONRpmfnyeXy32gTvU6nQ63231FL4Tr/bxmFBWWwlRVlW3btvF//s//Ec+j9XX5h3/4BxobG3nsscdEHtbWrVvp6uqirKyMhoYGTCYTAwMDLC8vU1RURGVlJVu3bmXHjh1igYFLhuj//t//m1wux1e+8hW2b9/+gXbBJJJbgRYiezXy+TyKopBKpchmsyiKQjabJZVKiQIrWins8vJyhoeHhTc5nU4zNDSE2+3G7/fjcrn47d/+bVHlUsuL/Na3voXP52Pt2rXs3bv3undgzGbzDVdn0uv1WCwWQqHQVX+vKR9aOwRFUUQ5Xs0Jo5XzPXz4MH/5l3/Jr/zKr/DAAw9QU1MjqmB+GJ5kieROYDKZVvQJ01AUhUQiQTqdFk7URCJBOBwmkUiIqBRtx0nrv6ZVwltaWiIejxMKhTAajWInW1EUFhcXWVpawmazEY1GSSQSuN1u4YhZWloSjlGTyUQ+n2dhYYFEIiF0jutFM7RuBM1ZdK0wX01mplIpUdxCky9ms1k4dfP5vDhOURR0Op34rMlkEk15tRLiFosFvV6Poih0dnaSTCYJhULCSNW4ePEir776Ko2NjaxZs0bkT0vuLPe0kZQnj/Iv/+XVmyu1e33Xu1S6M88Hu9bVlA3NS+LxeFbkHESjUVwuF4qi4Pf78fv9ogzwfffdx9mzZ3n55ZdZXFxkfn6e8fFxvF4vu3btwmQy0d3dzcDAABMTE2JnaGJigunpaerr62lsbMThcDA2NsaRI0dIJpM4HA5yuRyLi4t0d3ezZs0aqqqqrisX4lrPd7PvBy4JzEJBE41GURQFp9NJcXExxcXFosz5U089xd/8zd/wzjvvMDs7i9Pp5NSpU5SVlbFu3ToqKyux2+1iBw1+UjpZ29oPBAI3FO4juTvRSnIrZLUffOjkyZPng/U8ea/5oygK4XCYgYEBTp48Kcr4T0xMYDAYWLt2La2trcJJ8eSTT3LixAkSiQQdHR1CUdq9ezerV6/GarVisVjEHNBK/RYVFeF2u8Xu8/XuAt/M3L/WZ7V7O3fuHIODgwwMDHD+/HmSySSvvPIK09PTNDc309raSk9PDz/84Q/553/+Z0pKSlAURTTeNZvNbNiw4a5oii25e7lUkvv2yQkVyKGQI/eBLnUtPQIgEolw9OhRzp8/TygUQq/XMzs7y9jYGAaDgQceeACXy4XFYqGtrY3a2lr+9//+39TX12M0GpmbmyMajYq10u12s23bNp5//nleeeUVurq6CAaDjI6OEolEaG9vp62tDaPRyJEjR5iamhKtRRRF4cKFC6TTaVatWkVZWdl1r6kfpjw5c+YMXV1dzM7OCgdqMplkbm6OrVu3incxNTXF/v37iUaj9PT0AAijavPmzaJn3eDgIO+88w4Oh4NMJkM4HObixYts27aNDRs2rGgQqzWfvXDhAr/wC78gw4DvIu45I0nzhBiMRlK6JEvqHHEit6mZrEqKOAYdwttyowO9cNu2UCGxWq00NzejKArBYFB4aywWC5s2bcLhcHDx4kUWFhbI5/Nie7qyslLEJJvNZrLZLFNTU8Al71NjYyNVVVWico3ZbKaoqIj5+XkikYi4n8bGRtatW0dFRcUNe3RuBYVeLw2r1SoavdXU1FBUVCR+39bWxoMPPsj09LTw8DQ0NNDW1kZzc7MQSoUxwKqqiio1iqLgdrtl0YZ7AC02XTXkSRBngRn0t7Dc7rXQmsnGWCav94uSuzeL1gNpbm4On8+HoihYLBYxdzVvp91uZ8+ePeRyORKJBJFIBFVV2bt3L6tXrxbKE/xE/mihqC0tLXg8HiEfCo+5U8RiMcLhMJFIRBhAiqIwNzdHeXn5CieHlhy+tLRELBYTeY7Nzc033atKcm+irQc5vUKcCAvM3JZ+iyqXei7GiaDTc0vWHK1wgMFgYHZ2Frik2FdXV7N+/XoaGxuxWCyi4uOnPvUpJiYmiEajYp198MEHaW9vx+fzYbVaqays5PHHH2dhYYFsNsvs7Cx6vZ5du3axbt06YfwUFRUxOTlJJBIRu0rBYJCSkhLq6urw+XwfKDLlw0KToUtLS9TV1Qln0czMzIq2K1qz11QqRWVlJXBpx3x6elpE6mjvMpPJEIvFhIzZsGEDO3fupLy8fEXIczqdpri4mDVr1rB169brzrOSfPjcc0aStvj5/X6CVX7MOettUYDgknAzYcPmsVJSUnLdCcofBKPRSHt7O/l8npKSEqHUa2W87XY7RUVFTE1NYbPZqKqqoqqqSij4Xq+XiooKMpkMkUhE7EIFg0GampqEYeFyuaitrUVRFJF0aLfbRYfooqKiuy6p0Gg04na72bhxI8XFxSKsQK/XEwwGue+++0RT3mw2S01NDQ0NDQQCgWt6xp1OJ+vXryefz18RLij56USv1+PxFBMo8eOvLsaaN37ojWQ1VPTodS6CZUG8Xu9NV1bT4uqLioooKytbEUqi0+lwuVzCYWMwGETeo6a0mM1m1qxZQ0VFhUj2vtr5161bh81mEzvXdwNOpxO/34+qqqKKFiDi/TUFra2t7YpqUlooX2FTbomkEIPBgNfrJVDuYzG8iFm9nXJCh0HnJlQWwu123/Raq4WZJRIJlpaWRBGVQCBAbW3tijUwGAzy4IMPcvbsWZHDa7fbhcNVK3bg8Xh46KGH6OvrE0VfHA4HTU1NomG9TqejoqKCVCrFwsICqVQKg8FASUkJ1dXVopjD3YDT6SQUCuF0OoXxA4iiWJqcMJvNhEIhFEURu0GaQW232zEajSIKqLq6muXlZVFAqr6+nlWrVmG1WlfIUYPBQENDAw6Hg4aGhrtGxkpAp95jnfTy+TxLS0v09/fz5ptvkslkbqunUGva1traxp49u6+YDBKJ5M6hJdj29vZy8eJFenp6ruit9WFjNBqpr6+nubmZ9vZ26TWUSO4yNDlx4cIFUe3xdjYU13a7W1vbaG1tpb6+Tpaql0juAPeckaRVHVEURSQf385H1LamLRaLiHOXCpBEcndQmGuTyWRuuxMFEKX1TSYzJpNR/EwikdwdaHJCURTS6TSKotwROaHtdhoMBikjJJI7wD1nJMFPBNztNpA0tPjauyHOViKRXMm1SrzeLrQ8AykfJJK7k0I9ovDftxMpJySSO8s9aSRJJBKJRCKRSCQSyY0ik2UkEolEIpFIJBKJpABpJEkkEolEIpFIJBJJAdJIkkgkEolEIpFIJJICpJEkkUgkEolEIpFIJAVII0kikUgkEolEIpFICri5Ns53KXdTwT5ZulMiufu4W2SElA8Syd3J3SIjQMoJieROcU8aSfl8nmw2y/LyMoqi3FZhp9frMRqNOBwO7Hb7bbuuRCK5frLZLMlkkmQyiaIot/XaBoMBm82G1WrFarXe1mtLJJLrJ5PJkEgkREPZ24nBYMDhcGC1WjGbzbf12hKJ5BL3nJGUy+WYm5uns7OTH/3oBbLZ7G01knQ6HUVFRaxbt45PfOIT2O12DAbDLTn38vIyIyMjvP7661RVVdHW1kZdXZ0QoEtLS/T399PR0cHk5CShUIj29naampoIBALodDrC4TB9fX2cPn2ahYUFVFXF5/NRXV3Npk2bCAaDd7x5XS6XY2pqisOHDzMxMUEqlaKiooL29nY2bNggjksmk0xMTPD3f//3VyxgmzdvZuPGjZSXl2M0Xhrm0WiU3t5ezp07x8jICDqdjrq6OhobG2lvb8dut0uP3T2OqqooisLZs52cOnWKc+c6b7vyo9frWbVqFWvXrmXbtm1YLJYbHneKorCwsCDm/cLCAlVVVbS0tLB+/XrMZjM6nY58Pk8qleLkyZN0dXURDocxGo1s3bqVbdu2YTKZ7vjY176b5557jp6eHuLxuPidz+dj8+bNtLS0EAwG7+BdSn4W0MbiiRMnOXr0CENDw+Ryt99IWrduHZs2baKtrQ2LxXLD51IUhYmJCQYHB7lw4QJzc3O0tLTQ0tLCqlWrhJzI5XJEIhE6Ozs5f/484XAYr9fL+vXr2bJlCwaDAZ1Oh6qq5PN5hoaGOHXqFBMTE8TjcUKhELt27aK8vJyioiL0+ksZHdlslunpabq6uujo6CCdTuPxeKipqWHnzp14vd47Jn80/fDVV1/l9OnTRCIR8Tu3201lZSUPPPAAwWAQg8FANBplYGCAgwcPEolEsNlslJSUsH37dqqrq4W+kcvlOH36tJDLuVyO8vJydu/eTWlpKQ6H4448r+SDcU8aSZHIMr29vRw68C5G7OgxoOPDn4AqkCODveiS0fLggw9itVpviZGUTqeZmZnh7NmzvPnmm2zYsIFgMEhVVRVms5lsNsvZs2fp7Oykv78fu91OT08PyWSSeDzOrl27sNvtpFIplpeXmZ+fJ5fLkc/nGRsbE5P4gQceuKWG3Y2Qz+eJRCJcvHiRSCTC5OQky8vLeDyeFUZSNptlfn6e/fv3s2nTJoqLi4VQLrx/VVXJZrMcPnyYnp4eZmZm0Ol0GI1GEokEiUTitivKkjuHoiiMjo5ytuMcxw6dxKR3oL9N6ZkqkFXjxKNJioqKWL9+/U0pP/l8nlgsxujoKKOjowwODhKNRrHZbKxevVo4ULQ5MDg4yPDwMOPj46iqit/vZ/PmzRiNxjtuJMGl5zl79iwDAwPYbDbKy8uBS/NZm9sSyYeNZiQNDQ1y8vgZersGMelst1FOqGTUBDlFJRQK0djYeFNyIpfLsbi4yOjoKCMjI/T19QGXjIDGxsafXFdVSaVSQk6MjIzg9XrxeDxs3rxZHJPL5YjFYvz4xz9mZmYGRVEwmUz09/djNBrZsGEDjY2N2O12VFVleHiYCxcu0NHRQS6XQ6/Xk0qliMViZLPZm3tZt4ju7m7effdd3G43VVVVABiNRmEYwiVHdV9fHy+//DKJRAKj0UgqlSIajaKqKk6nk+LiYhRFYXJykhdffBFFUcTn+/r6MBqNbN68merqamw22x17Xsn1cc8ZSfl8nkQiwdTkFJNDYbw2Fxa9A73uwxduKioJZZHYUoSRkRFSqRT5fP7Gz1ewA7a0tMTIyAjnzp1jaGiIsrIyYrGYOH88HufgwYP09/dTXFzM1q1bOXbsGBcvXmR+fp62tjZsNhs6nQ6Hw0F1dTWlpaUoikJnZydDQ0PMz88L7/OdNJI0L5WiKIRCIUZGRlhaWmJpaWnFcblcjng8Tnd3N08//TStra1CKQyFQsJoUlWVhYUFnn/+eRKJBLW1tUIxNJlMFBcX3xUKouT2kMvlmJ+fY3JshunRCAF7AIPO9KG7UVRAJU84tcC4d5qZmRkymcxNnzefz5PL5fD5fAwODhKJRAiHw1fIHlVVyWQyuN1uZmZmhLPkZmTUrUZVVSYnJ1EUhdraWh544AEArFYroVAIp9N5h+9Q8rOCoijMzMwwNTrH/GicYpvvtsmJvKoQTi0wNjrBwsLCLZETuVxORI709PSwuLjI0tLSFfM/n8+TyWQoLi5mbGyMeDwuok40MpkMExMTPPvss6xbt46WlhbKy8s5ceIEhw8fxmw24/f7sdvtZLNZTp48yalTp5idneWxxx6juLiYfD6PzWYTOy93mtnZWRYWFmhra+ORRx5Bp9NhsVhwOp0UFRWh0+mYmprixIkTvPDCC/ziL/4i1dXVLC4uMjAwwIEDB2hqasJsNpNKpTh+/Dj79+/nU5/6FPX19ZhMJk6dOsU777yDy+XC5XKJcGupf9y93B2j8xaiqqpQsq3GIoqtFdhNbgx604d+7byqsJyykTUskM/nxb3cDJpH6+LFi3R3d5PNZtmyZcuK0LB8Ps/g4CCdnZ34fD6++tWv0tTURENDA//3//5fBgYGGB4epqSkhGAwiN/vZ9OmTcIQCoVCHDt2jDfeeINIJILP57spr9XNYjKZaG5u5jd/8zdZXl5maWnpPb1NOp2OlpYWtmzZIu5bCxnU6XRks1n++Z//mampKR566CGefvppEV6gcSeNQsntRZMPBp0Ju7EYv70Go/7GQ94+0HVVhUwuiQGzkBE3g8lkora2lvLycrEjGo1GrzhOr9fjcrn4whe+wNLSEq+99hodHR03de0PA23O+nw+Ghsb2bp164qfa6E+UqmQfNhoOyZGnRWH2Xcb5USenJolk0+iU/W3JF3AbDazevVqmpubSSQSLC8vX/U5DAYDJSUlfOELX2BxcZHvfe97TE1NXXFcNBrl7bffJp1O8/GPf5zt27djMBhoa2vjt37rtxgcHKS6uppQKMTS0hIvvvgiJpOJr371q2zevFk4LwFhJN3pea3T6bDb7dTV1bFlyxb0ev2K1AOdTifCEBsaGnj66aexWCwsL1+KXPpf/+t/ceHCBfx+P9FolP3797N+/Xo+9alPUV1djaIoVFdX8+d//ucMDAzg8/koLS29Y88ruT7uOSPpci4Ncv1tCbfToV3n1l5raGiIY8eOoaoqDz30EG+++eYKwamqKmfPnkVVVcrLy2lqasJkMtHY2EhFRQUjIyNcvHiRjRs3YrVa0ev1orhFLBZjaGiIsbExfD4fTqfzrjAYtOIXyWQSg8HwvuFwv/Ebv4HT6cThcLBlyxaeeuopysvLcTqdKIrC66+/jsfjoaOjg3fffZeBgQFWrVrFvn372Lhx44qQA8nPGrrbstN8ScG/9bLIYDBgtVrJ5/MYjcZrhqXpdDpRLMJkMt2V4WuaY+no0aMcOnSI//k//yfl5eU88cQTrF+/noqKCkwm0x1XqCQ/a1zSI26HnECnR/chyAltzufzeQwGwzWNL71ej81mI5VKXTMMV8txNJlMmM1mEcGh6ReTk5P09vayYcMGjh49ytTUFAaDgbfeeovf/d3fxeVysX37dnbs2MGmTZtuKi/zVjI2Nsaf/dmf8e1vf5u6ujo2bdrErl27WL9+PQaDgUwmg6Io2Gw2zGYzRqNR/EmlUoyPj7O8vEwmkyGVSuH3+8VxqqpisViIRqNMT08zPz8v5dhPAfe8kaRxWwbiDTp8crkc2Wx2RQUdi8WCyWQinU5z6NAhcrkcNTU1rFq1ioMHD64wGlRVFYnYDocDk+nSrpnZbMZms2EymVheXhbb6tlslrm5Ob773e8yOTnJ4uIiVquVHTt24PF4rnv7W1EUIQxUVUWv12MymcQu13u9c22HrLC6mFbNR4sB1jw518JkMuH3+/mFX/gFKioq0Ol0IqnypZdeYu/evTQ3NwMwOTnJ2NgYtbW1NDc3097eztjYGB0dHcTjcZxOJ2VlZdf13JJ7i0JP4YfJjXqENY+2Nle0eVxUVLTCKHqv+y98xsI/N3o/cCk+P5fLiXNpisP7GV+agyaTyYgwIovFIj67YcMGampqRKXQiYkJ9u/fTzqdZuvWrdTW1krFQnLH+NDH3k3KiUQiIfKNtUJSmrFz+Y7s5Vz+bNeSE1arlcbGRhKJBGfPnsVutxMIBDh79izj4+NiNyWXy4mcYlVVmZmZ4dFHH2Vubo6RkRHi8Th+v5/Gxsbrcs5eTefQZMf76S2F70ereqzX60WBifb2dhFmqNfrmZqaYmBggOXlZdxuN7W1tZSWluL3+3n77bfp6OigrKyMhYUFLl68yMDAAFVVVaRSKRwOB42NjRw/fpzz58+jKAp6vZ5z584xMDCA0+kU8lzT1yR3Jz8zRtLdTCqVYn5+nrGxMZaXlwGorKwkGAyyvLzM8ePHqaurIxgMYjKZSKVSIh8nHo9jsVhIp9MYDIYVSYbwk4TnTCYjvLRabsLk5CSjo6Nks1mCwaDwAsH1bX0nEgmmp6cZGBggn89jsVjw+/20tLRcV8nSSCRCT08PkUgEVVVxOBysXbsWh8NxXYaayWQiGAzymc98hlAohKqqTE9P89xzz3H27Fkh1IqKikgkEsRiMdauXcuOHTtobGzkzTff5MyZM/T29rJu3TppJEnuSlRVJR6P09/fz9LSEqlUCoCNGzdSXFx8x8oDnzlzhkQiAVxyyDQ0NFBSUvK+Zc0zmQwLCwtMTk4yOzsLQEVFBWVlZRQXF7N582ZMJpMInT19+jTf+9736O3tpaSkhMrKSqlYSCSXoRVx6e7uJhKJkM1mRRGF4uLiWxohohlJFRUVDAwMkMvlKC0tpb+/n2g0isvlEs7PeDyOoihYLBYCgQAf/ehHGRwc5PXXX6e/v5+BgQHq6ure1ykKlyraTk5OMjQ0RC6Xw2w2U1lZSUlJCR6P5z0/qxlIFy5cIBKJiHvatGkTRUVFtLe3U1paSjAYRK/Xc/z4cTo6Oujq6mJwcJDKykqqqqpobGzk3Xff5fDhw5SXlxOLxRgYGCCVSgknlsvlYuPGjRw7dowzZ86wsLCA2WwWxbSy2awwZCV3Nz+DRpLKrQ6Hu1mWl5fp7OzkpZdeoq+vD1VVeeyxx9i+fTsTExOcPXuWpqYmcrkc586dY3Z2llwux/DwMBUVFRQXF2Mymcjn8yvyHLTcC1VVV3iSzGYzFRUV/Mf/+B9ZWFjgxIkTdHZ28vLLL7Np0ybKy8uvq+rK3Nwchw8f5plnniGXy+H1eoUX+P0UN1VVmZiY4J/+6Z8YGBhAURQqKir49//+31NZWXldRpLFYqG0tHRFXG9NTQ1Go5G//Mu/ZHp6mrm5OeFJq6mpYdeuXXz0ox8Vxy8sLLCwsMD4+DibNm1632tKJLcbVVWZnZ3l+eefp7u7m/n5efR6Pf/5P//nFcVKbjff+c53mJycRK/X43a7+fznP8/OnTvf10hKJBL09PTwxhtvcPToUQAeffRRHnjgAbxeL1u2bFmRb1lTU8P58+eJx+MMDQ2xadMmaSRJ7kFuTjdRFIXZ2Vm+//3vMzAwQCQSweFw8J/+03/C4XDcskpqWthubW0tX/rSl9i/fz/79+9HURRaWlrweDy43W6sVis6nQ6DwYDFYmHVqlX83M/9HC0tLTQ0NDA/P8/Ro0fp7u5m9+7d17Xmh8NhDh06xD//8z+TTqfxer088sgj7Nmz532NJEVRmJ+f5/vf/z59fX1iF6uiogKHw0F7e/uK4z0eD3q9nqWlJQYHB9mxYwd1dXVYrVYymQzPPfcc6XSaoqIiUSXQZrNhMBgIBoM8/PDDjI+P8/bbb3PgwAEMBgOlpaU0NDTg9Xplxc6fEn4GjaT3F0KqCppDo/Dv//KT6zrHByEQCLB37162bt0qwk+0mGCt4MLbb7/NO++8g6Io9Pb2ioptkUiE1atXU1VVxdGjRwmHw8TjcRwOB7FYjKWlJdLpNOXl5cKTpNPpMJlMhEIhUTHKZrPxF3/xF8zOzhIMBq9LoFZWVvLpT3+ahx9+GFVVhTAsKip638/qdDqam5v57d/+bdHLymg0CoPvRtEq0hiNRhHGqNfrqampwWQyrXguLZZYr9fLEuCSuxa9Xk91dTX/9t/+WzKZDLlcDgCv13tHm0z+0R/9kZg3WmGI62mO63a72bZtG2vWrOHrX/86AHa7/ap9yjRZZbfbiUajYhddIrn3uDm9wmQyUVNTw3/4D/+BTCYjwu0+LDlhMpn4+Mc/zqOPPoqiKKTTaWKxGL/1W79FaWmp2PGtr6/H4XBgNptFdUqDwYDZbBYpBdcbilxWVsZnPvMZHn30UaFz2O3269JXNOew9n60z/t8vqsaaGazWZw7kUgIPaWiooIvf/nLfPGLXxQtVsbGxviLv/gLSktLcbvdWCwWDAYDX//61/nlX/5l0uk0yWSShYUF/viP/xifz3fN60ruLuQ3dBUK1+krd39v/S6UpqhbLBYhLPR6PXa7nT179lBfXy9+nkgk+Lu/+ztyuRzbtm1j586dmM1mWltbcblcTE9Pc/DgQTZt2sSZM2cYGxsToTBGo5GpqSmRkOl2u8lmswwNDTE8PCyMnOv1bhiNRmFgwU/il6/381r5be3ZNK+TTqcjk8kQi8VYXFwkmUyKf8/PzwuBm0wmGR8fx2q1UlRUJAzHEydOoCgKRUVFuFwuDAYD999/Py+99BLd3d10dXURCoXo6uoSXnnZoFKyEm3Rvjt2nbX5WlgxU1tgNQVlcXGRRCIhFu5wOCy8vlqidiKRYHFxkVgstuI4bWHXlKnrybvw+Xwr5q5er7+uua/X67FarZjNZhFuooXaZDIZLly4IBw1WsPKgYEBysrKCIVCd7TypkRyt6L1/vN4PFeVE9lsllQqJdZULTyvUE5ohR00ORGPx0kmkyQSCcLhME6nUxgAqqqK/BqDwUAsFhP50hUVFaLsdWtrK2VlZUSjUQ4dOsSDDz7IzMwMw8PDJBIJEXlyPTLnajrH9YTpacdqOocmdzSdIxwOixxLTa51dXXR29vL4uIi27dvF/IqGo0yOztLIBAgmUwyPDzM2bNnyWazNDY24vV6UVWVSCTC/Pw8LpeLXC7HwsIChw4dIpvNUlFRQVVVlcyt/CngnjaS8mqeXD6Lks+gqh9+7GdezZFTs6jqB/N0ahP18gaomvJeaEjEYjH8fj+5XE7EyGplO9va2hgdHeXo0aPMz88zNDSEqqo0NjZSVlaGXq9ndnaW0dFRUawhl8sxPT1NOBymvb2d4uLi6/ZuXK1x6wd55msZVFohikOHDjE9PU1/f79I9HY4HGzbto2ysjKSySR9fX3Mzc1htVqFEjg8PEx9fT2VlZV4PB4MBgObNm3iwoULhMNh3njjDbxeLyMjI5hMpitC9iQ/G1wqyZ1DyaX/xRT68Dug5PI58qqCyvV7dt9rruRyOZaXlxkaGhLx85rH0uVysWbNGpqbm3G5XGSzWc6fP8/58+c5ffo0Q0NDLCws8OMf/5iWlhaqq6spLy9/X0NHW9hvdMf3Ws+j7fwePnyYoqIizGazCDW0WCxUV1dTXV0tQ+0ktxWVPHlVQcllbk9TelUll8+Syyt8kGpQ7yUnFEVhbm6O4eFhzp8/L8L6k8mkKFrQ3t6OxWIhHo9z8eJFzpw5w/nz51lcXCQcDvPSSy+xevVqqqur8fl85HI5Tp48KcL60+k0fX191NfX09DQIHJ7AoEAGzZsYHBwkDNnzpDP51lcXGR5eZmSkhLq6+uvu5n11XSlD/J+gCv0G82gOX/+PDMzMzgcDgBR1KqsrIy6ujrMZjO5XI65uTkOHTqE3W4X+ZULCwusWrWKmpoanE4nuVxOGIUmk0nkkA8NDdHU1ERdXR1+v18aST8F3JNGkk6nQ6fXkc2nSGSXyOcVdLoPv6y1quZJZpfBkESv995UBSntsxaLZYXnVKfTEQgEyOVyeDwe0SDW4/EIb8eZM2cYHx8HYNWqVaxfv15MyFgsxtjYGBcvXiSTyYgKUj6fj61btxIIBK7bq/Nhsri4yDvvvMPU1BQLCwuiIlY8HqeyshKv10s2m2V+fp7jx4+LEBxt+3379u00NDTgdrsxGAysWbOGnTt30tXVxalTp0TZ5JaWFtrb22XRhp8xdDodKnmy+QTxTPhf+qjdDgUoR1qJoWK5Kfmgkc/niUQi9Pb28tZbbzE9PY2iKMRiMQ4cOIDJZKKsrAyn00kmk6Grq4vDhw8zPj5OLBYjkUjw5ptvisaOpaWldzROPp/P09fXJ6pXaUpfU1MT7e3t1NbWyhAVyW1BG3s5VSGtJIhnFjDob0d4q0pOVcjkYkDRLVmL8/k88/PzdHV18dZbbzE/Pw8gdpONRiMNDQ2YTCYSiQRdXV288847TE5OkslkiMfjvP7665jNZtxuN8XFxeRyOfr7+5mZmSGdTgPgcrnYvXu3yE2CS1U4d+/ejc1m4+TJk7z11lvodDrKy8tZtWqVMJLuJJlMhtHRUS5cuCB6MhqNRkpKSmhqaqKxsVEUzYpEInR2dhKPxy/147Ra8fv97Nmzh8rKSmw2m6jAd/bsWWKxGLlcDr1ej8fjYceOHTQ1Nb1vDpXk7uCeW20MBoPIPYlkpoim59BhuE1K/yXPtFvnxGqtv65yuB8Um83G5z//eVRVFcmRcMmru3nzZpqamti3bx+JRAKbzYbP58Pj8WC32wFYvXo1lZWV7Ny5k0wmIxqoFRUVEQgERLLlnaasrIwvfvGLQvgWUldXJzpgP/LII6xdu1YINpPJhMfjIRQKiTAjuJQH8cQTT7CwsMDi4iLZbBan04nf78flct2ypFbJ3Y/RaLwUymVIs5AaYSk1he529D8BLilAGWr0m0TI2c2geWp37txJbW3tFb8PhUL4fD4MBgM2m4377ruP1tbWK/J6AoGASCa+U2ghxl//+tdFGWNNPgWDQYqKiu6afiqSex9tzmSJMZfsYyE5fJvkhMolOZFltblUtAO5GQwGA1VVVTidTlpbW1f8TotY0cLm3G439913Hy0tLVfkCpWWlopcGoPBwFNPPUUsFhMlrn0+H8FgUOQFaxEx69ato66ujvvvv59YLIbZbMbj8eDxeHC5XLfEYXQzVFZW8rGPfYxdu3aRTqeF3HG5XHg8HpFnbbFYaGho4Jd+6ZdIJBKi9YHL5aKkpEQ4mM1mM2VlZXzpS18imUyiqipms5ni4mJCoRBms/mu6EcpeX906q1o53wXocXTTk9P09nZSTqVIX8bQu00DHo9NrudmppqWlpa3rO5441QWLHuarG4hRXurpYnoMUqF1bBu9pxd1JgafeYy+WumtBZWBXm8op+mrAtLFKhUXhs4fu73phmyU8/2nc/NTXF2NgY4+PjpNOZ23Z9HWAwGikvL6O8vJzKysoryvZ/EArL+l+toIE2r7XzX6vs7OXH3Yn5oM1hbd5fSz7JuSr5sNHG3/j4OIMDg8zNz5HJZG/b9XVccuZU11RTWVlJKBS6KaW6cN2/1vwvPP/1yonCuVoY7ne1XkyF17/8OLhz8/r95M7lBtzlusnV5JP2O60fk/bza+kmkruXe85I0gZwOp0WiXi3E20SOBwOnE7nHfeQSCSSn6CJO63aUDKZvCMywm63Y7VaVyQgSySSuwNNTqRSKeLxOOl0+rb3tNF2Vm02m9xBlUjuEPeckQQ/EXB38tHutHdEIpFcm0KP4Z1AygeJ5O5G6hESieSeNJIkEolEIpFIJBKJ5EaR7X4lEolEIpFIJBKJpABpJEkkEolEIpFIJBJJAdJIkkgkEolEIpFIJJICpJEkkUgkEolEIpFIJAVII0kikUgkEolEIpFICpBGkkQikUgkEolEIpEUYLzTN/BhkM/nyWQyLC8vr+h4fDvQ6XSYTCYcDgd2u132N5BI7kIymQypVIpEIoGiKLf12gaDQTSTtVgst/XaEonk+lBVlWw2SzweJ5VK3fam0waDAafTicViwWw239ZrSySSS9xzRlI+nycWizE6Osrx4yfIZNK31UjSumTX19ezYcMGzGYzev2Nbdipqko+n2dycpLZ2Vmi0SjZbJbq6moqKyuxWCzo9XpUVUVRFHp6epibmyOZTIpzOJ1OfD4f9fX1WCwW8vk8Y2NjXLhwYYUBZ7VaKS0tpa6uDpPJdMP3fCvQFqdoNEpvby+JRELcY1lZGaWlpeIeVVUlGo0yMDBAJBIhm83icrmora3F7XZjNpuloSoRaHNqYmKCwcFBhoaGyWYzt/UejEYjlZWV1NTU0NDQgMFguOkxqsm4cDjM1NQUy8vLeDweGhoa7vh8zufzJBIJRkZGmJ+fJ5vNYjAYcDgcVFVV4fP5MBqNcp5K7ho0OTEyMkJ3dzczMzNks9nbeg9Go5H6+nrq6uqoqKjAaLxxdU1RFCYnJ5mYmGBxcVH83GQy4XK5WLt2rVgrs9ksi4uLDA4Okkgk0Ov1FBUV0dDQgNPpxGAwAJfe0cLCAhMTEywvL5PJZLBarVRWVuL1enE4HELu3Im5rcnE4eFhwuEwsViMdDpNWVkZtbW1OByOq35GVVUWFxfp6elBp9MRCASoq6tDp9Oh0+mEvtXf38/8/DzpdBqdTofX6xXnNZlMt/txJR8S95yRlMvlmJqa4pVXXuH3fu8PyeXyt/cGdCre4mIeeeRhGhoaKC4uvikjKZ1Oc+DAAV5//XW6urpYXFzkF3/xF3n66acJBoPCw5RMJvnOd77DW2+9xdTUlJikDQ0N7N69m1/91V8lGAySTqd59dVX+c3f/E1sNht6vR6dTkcoFOLxxx/nV37lV3C73XfcSIpEIpw6dYr/8T/+B+Pj4wAEAgEee+wxPvOZz+D3+7FYLORyOS5evMhf/dVfcfHiRRKJBA0NDfzyL/8y69evJxQKCaEukQBks1neffddfvjD59i//7V/WUxv3yKu06ns2LGDj370McrLy3E6nTd1Pk0ZyOfznD9/nueee46zZ8+yceNGfuM3fuOmZNCtIJfLMT4+zt/93d9x7NgxlpaWsNlsVFVV8dRTT7F3715cLpc0lCR3DZqj7uDBg/zjPz5DR8eZOyInHnnkYbHe3YycSCaTvPXWW/y///f/OHXqFCaTCZ1Oh8fjob29nW9961v4fD50Oh3Ly8scOXKEb3/720KXqK+v51d/9VdZs2YNdrsdvV6PoigcP36cH/7wh3R1dbG8vIzP5+Pnfu7n2LlzJ83Nzdhstlv4Rj44qqry4x//mOPHj9Pb28vk5CSf+tSn+Nf/+l9TV1d3xbFwyaA8e/Ysv//7v4/RaGTfvn3863/9rzGbzWJchMNhvvvd73LkyBHm5+cxGo1s2bKFr3/968LRLLk3uOeMJEVRSCQSLCyEMZlLKQ20Y7G40Os/fEU5n88Tj8+iY5H5+TkSiQQul+umJoyqquj1ejZt2kRJSQlDQ0Ok02ny+SuNP6PRyJo1a/jSl77Epz/9afEzq9UqBKx2vmAwyB/+4R+yevVqLBaLCAFyOp13VKECSKfTHD9+nK9//et85jOf4Rvf+AaqqnLx4kW++93v4vf72b17N6WlpczPz/PFL36R7du38+Uvf5nGxkZeeeUV/vZv/5ZIJMKuXbuorKy8o88jubvQPKXJpA67o55gaC1G4+0JZ8nncyws9JBMwdLSIslk8qaNJLhkiExMTPDOO+/Q09NDNBoln8/f1l30a5HNZolEIkxMTPCNb3yD0tJSZmZmOHnyJH/8x3+M0+lk3bp1hEKhO32rEokgk8mwsLCAotgocrXi86+6LXJCVSGfz7Kw0E0kkiQSWSadTt+UnFBVVThDv/rVr/KlL31JRLmYTCbhSJmZmeHdd9/lN3/zN/nyl7/M5s2bicfjHD16lD/4gz/gd37nd2hqasJmszEzM8M3vvEN9uzZw9e+9jXa29s5dOgQ//RP/8Ty8jIWi4WWlpZb+GZu/Nnb29upra2lp6fnmvoTXNI9Zmdn+cu//Evi8TiBQIB0Oi1+n8/nOXfuHH/wB39AIpHga1/7GvX19VitVqLRKH6/XxpI9xj3nJFUiMlkw2bzYrMVozd8+AM3n1PI57PkcrcmfEen02G1WvnIRz5CIpGgs7OTdPra4YPaVnE+nyeXy2EymbBarVit1isMH1VVyeVy5HI59Ho9ZrMZq9V6V3hyl5eXmZubI5PJ8Pjjj9PS0kI+n8dms3Ho0CEikQiJRILZ2VkOHDgAwOc+9zk2bNiA0WjEaDTyN3/zN8zMzDA5OUlFRQVwZ7b8JXcveoMRk9mB3eHHZLR9+E5iVSWfV4jFptDrb83FtHkci8U4fPgwqqpSW1tLMBi8a8a71Wqlra2N3/u936O4uBiTyURVVRXFxcWcPHmS2dlZYrEYfr//pkKKJJIPA4PBhNnivCQnTB/+zoiqquSUDNHoJDrdrXVYFuoHOp0Os9ksdoYAxsbG6O7uJhAI8NnPfhafz0cymcTn83H06FH6+/vx+XwEAgGmp6dRVZXNmzezY8cOgsEger2et99+m1wux/LyMvl8/o46XXU6HU8++SSpVIqRkRFisZgI37+cbDbL+Pg4L7/8Mj6fj9LS0iuiUAYGBjhx4gR9fX386Z/+KcFgEJvNhtlspqSk5Kad4pK7j3t4RdKh0+nRG4zoDSYMhg/fA6RDh15vJJ+/NUJBp9NhMBjwer3Y7XaRY3OtY00mE8lkkp6eHl588UXsdjtVVVVUV1dTXV0twlmMRiN6vZ5Tp04xPj6O2+3G7/fT2tpKZWXlHVdULBYLbrebkpISRkZGKCoqQlVVZmZmhNfLarUSj8fp6+ujrKyMiooK/H4/uVyOyspKnE4niUSCpaUlcrmcDLmTXIEOHXqdHoPBhN5g+tCNCi1cR6czoOPW7PCoqko8Hmd0dJSenh6Rezg5OXlLzn8r0Ov1OBwOkQOgqiqZTEZ4srWcrLvFqJNIVqDTo9cbLskJ/e2QE3lQ85eiX27RtTRdQlVVRkZGeOWVV7DZbCJfedWqVRgMBhRFIZPJiDmrhcvZ7XbS6TRjY2M0NzcTDAZxOp34/X6WlpYYGxtDURTGxsaw2Wy43W6cTucdndPatQOBAKlUiuXlZcxm8xVGkuZ0np6epru7m+7ubnbt2sXY2BiRSGTFsePj4wwPD4u/9/X1oaoqdrud2tpa1qxZg8FguOPROJJbxz1sJP30kMvlUBSFdDotKuiYzWYRBnc96HQ6/H4/U1NTRCIRTpw4IbzKS0tL+P1+kWvkcrkoLy9nYmKC8fFxjEYjfr+fTCZDIBDAbrdf13W1+9aEjmaAFeY6XQvNA55KpUQFQp1Oh9PpxG63U1paSmNjI4cOHSIejwsjSVVVSktLKSoqYnp6msnJSaqqqsQumE6no6ioCIfDISoTSSNJ8tNM4VzJ5XIiVMThcJDP55mfn6e7u5toNCqUnfn5+WuGlLzXdVRVZXl5GfiJYmU2m9+3AIrmoc5kMmSzWSHHbDbbFYUjcrkckUiEmZkZjEYjHo/nrtnFlkh+WtHkxP/P3ntH13GeB96/mdt7AS56BwgCJMAmNlGkCkVKYizLlh3b0mdnnd183k3bTbK7Z8/u55Oc7J6TTdmS/XY3J2unfLYTx44sW5ZldVGFolhFsBMgQfReL26vM/P9Qc0YECnpQiQuIer9nUNRvPfOzDsXeJ95+pNKpVAUxXimulwuJEnC6/USDAYZHx/n1KlTaJpGIBBgamqK6upqvF4vdrsdj8cDwPDwMNlslkwmw9TUFJlMhsnJSaLRKLIsEwqFqK+vp7+/n2w2S01NDT09PVitVkpLSwkGgwXt6cXr1juNyrKMw+EwHLofhqqq5PP5JR0I9cyYQp/7qVSKK1eucOnSJTRN49577+XnP/85sVhsyef07BTdyax3RzWbzQwPDxMKhaipqTG+c8EnH2EkrQISiQTj4+NcvnyZ2dlZAFpbW1m7di2hUKigc9hsNh566CHuu+8+3G43mqZx8OBBTp48ycDAAC0tLXR0dGA2m9m6dSvf/OY3aWhoIB6Pc/z4cc6ePctTTz1FZ2cnNTU1BSktiUSCsbExjh07hqZp2O12ysvL2bJlC36//yPXHA6HOX36NDMzM2QyGex2O3v37jUiWzt27OC5554zvOKaplFXV4ff78disZDP54nH4wQCAUMY6hE1q9V6neEpEHwS0Q2XM2fOMDMzQyKRQJIkHnzwQaNZw+uvv87+/ftZu3YtMzMzhnd08d+FPLRTqRTPPPMMmqZhtVrx+/20tbXR3Nz8kccnk0l6enoYGxtjbm4OgLvvvttQwPR1hMNhLl68yNtvv01dXZ3RiVJ4XwWCj4+iKCwsLHDy5Enm5ubIZDJYrVYeeOABgsEgGzdupLq6GofDgclk4uzZsxw/fpwf/vCHbNy4kQ0bNlBbW8uGDRt49dVX+c53vmPU8+od7BKJBLlczigF2Lx5M8ePH6e/vx+Hw0EsFmPDhg0EAgFsNlvBcmd+fp4TJ04wMzMDXItcbd++nbKysht2oVtMOp1mamqK8+fPMzs7i6ZpNDU1sXnz5oL0EICenh5ee+01otEo3/jGNwx59H45Go/HmZ6eZnBwkF/91V/lnnvuIZvN0tPTw9NPP204bD9qzYJPDsJIWgXk83mi0Sjj4+OGQVBSUrKkYPCjMJvNtLW1Ab8IM/t8PqLRKGfOnOHKlSu0tbVhsViorq6msrLSEAL6zJa/+Zu/YW5ujvLycux2e0HrjsViDA0NoSiK4T0ppFWqnnIzNTXF6OgoqVTKCOlfa99+gh//+Mf823/7b2lpaUHTNMbHx/nHf/xH3nrrLaO7jtPpJJlMLvGa5/N5crmcMV9CRJEEn3R0b+7Y2BjRaBRJkojH4/T19fH2229z6NAhPB4P58+fp6+vj4GBAQD+6q/+ii996Us0NzcXlCuvKAqDg4NomobNZiOZTBo1fR+F3vVpfHyciYkJANavX08oFDKUjHA4zPPPP8/ly5fJ5/N89atfpaGhoSB5IxAIPhi9G+7ExASTk5OkUilsNhvpdBqTyUR1dTVVVVWGflBZWYnNZqO7u5szZ87Q0tKCz+dj8+bN/Lt/9+84deoUqVQKh8NBe3s7XV1dhvGTTCa5cOECL730Eo8//jibNm2ipKSEsbEx/r//7//j5MmT2O129uzZU1D6fiaTMVqUA3i9XtavX1/QDLt8Pk8ikWBiYoKxsTE0TcPj8SyrZbve9Q/gpz/9Kaqqcu7cORYWFnC5XDQ0NLBv3z7gmgFXUlLCV7/6VTweD9lsFp/Px8mTJxkbG2NhYYHq6mpRm3SHIIykVYDT6aShoQGXy2WkroVCIQKBQEHH60Lv/cJID1XrqTC6omIymZYYDnoOrZ72VmhHLH3djz76KJqmYbFYcLvdRrj+o9bs8/nYuXMniUTCCFkHg0EuXbrE6OgoiUSCbdu2Gd4gPQd6ZmaGWCyG2+2mpqaGt956i1QqZdxnLBYjHo8TDAbxeDzCSBJ8otH3yrZt21i/fj3ZbBZJkqisrCQej9Pe3k46ncbv9yPLMmaz2ag/0GVAoakfdrudxx57DE3TMJlMOBwOQqFQQcc7nU7WrVtHTU2NIcfq6upwu92oqkoqleK5557j0qVLeL1e7rnnHlpaWrDZbCI1RSC4SUwmE36/n127dhmpayaTiVAoZDzjF+8zi8WCyWQy0tX0zrcej4fOzk4qKirI5XJks1kjw6Wuro5gMEgul6Ovr49MJkNTUxPr1q3DZrNRUlKC2+02Ii6KohRkJAUCAfbs2UMikTDWVlVVVVALcX1+4p49e0gmk2iaRjAYXFY3wLa2NqxWK+l02nDW6t+ZyWQy6rlLS0spKysznEhms5l8Pr/kuxWy7M5CGEmrAKvVSjAYxOfzGRER3ZBZPPU7mUwaCpL+b7gmUJLJpJEr7HQ60TSN/v5+5ufnDSVLlmWi0SjZbBZN03C5XOTzecbGxpicnMRuty9r+K2+bt0okiTJUNIKwel0Ul9fbxhwercdXXCnUini8biRPphOp0kmk4ZS5Xa7Wbt2LU8//TR9fX34fD5sNpsxBM/lchmKo0DwScZuty/ZK/ALRUJ/eOukUikikYiRdrKc/Hiz2cz69esBjBq/Qp0MeoensrIyQ47p81ii0Sjnz5/n+PHjeL1e6uvraWtrw+FwLHHMCAVDIPh46ClwjY2Nxp7S64QVRSEcDhvOFMCIvGSzWYLBoPHcVhSFTCZDZWUluVyOyclJ5ubmcLlc1NbWLnmmJpNJEokE6XQas9lMMpkknU4ve5SI0+mkqanJkBt62nwh57BYLPh8PqNGEzAMGz26lkgkSCQSRs2k/m+LxYLFYqGzs5OqqirS6TRwTYZOTU1hMpkMZ6zNZqO6upq6ujrgWopeQ0OD0TY8Ho/j8Xiw2WxC57iDuKONJE271inmmsBY+aGy1x702rULLwNZlm9oXOiRkbm5OSYmJhgeHmZ+fh6r1UpfXx/5fJ5QKEQoFGJqaoqzZ88iSRK1tbXk83leeuklhoaGjHCxyWRicHCQyclJMpkM9fX1xGIxjh07xsWLF6mqqjJa9BairHzQugtBV8Bu1K2vvLzcENDPPfccO3fuRNM0BgYG6Ovr4zOf+Qx+v5+ysjK2bduGJEm8/vrrRKNRqqurefvtt43vpry8XAgswQ3RwJAN1+TDyne3M2TEMtANlcXGim5UVFZWUl5eTmdn55L34vE4mnZtaG1JSUlBe0Dfkx9nAKR+7I2uk81mjWGyV69e5cknn6SpqclIoQ0EAjidzo9sDiEQ3B60RTpE4ZkWH/tqhs5y6+RENBrl4sWLDA0NGU6QkydP0tXVhaZprF+/3nBYLCws8M4779DQ0GA0NDh8+DCNjY00NTURCARQVZWWlhbS6TQnTpwAoKGhgYsXLzIxMUFtbS1VVVUF6Qa67LDZbMu63w+7b/3eNU1jenqamZkZBgYGmJubIxaL0d/fj8ViIRgMEgqF6OzsXPKdx2IxZmZmcLlcuN1uNm7ciMPhoLW1lampKTweD0899RT79+8nn8/T09NDOBympqYGj8cjdI47iDvYSNLI5ZKkknNoqoIsr/ytqqpCKjkPWgK4+ZbjevTkhz/8IT/84Q+5cuUKmUwGSZJ45plnuPvuuzlw4AC/9mu/Rjab5dVXX+XEiRPMz88bNUp79+5l9+7dNDY2IkkSyWSS48eP89xzzzE7O4vdbqe5uZm77rqLL37xi6uiBXhVVRX79+9HlmX+9m//lr/9279FkiRKSkp48MEHOXDggBGKb2ho4H/+z//Jt7/9bf7Lf/kvJJNJGhsb+Rf/4l+wfft2qqqqbuu9CFYvqpIjm42TTMxgMq18ytc1p4dCJh1BVZ235Jy6crA4/91qtWI2m1FVdVXU5EWjUbq7u/n+97+PpmmcP39+iSPm937v93j44YeNlB2BYDWh5LNkMzGSiRnM5pWvnbvW7S1HNhNFU3235Jy5XI6zZ8/y9NNPMz4+jiRJ1NTUcNddd/HNb37TSDfL5XLMzs7y9NNP09XVhSRJVFVVsX37dv7pP/2nNDY2YrVajflI/+bf/BteeOEFXnnlFRKJBH6/n8997nM88MADdHZ2rgpj4Vvf+havvfaaoT+pqsprr73G9u3b+fznP8/Xvva166LtuVwOi8VizF3U05Z9Ph/33HMPf/7nf84f/dEf8eyzz2Kz2WhoaOCXf/mX2bNnDz7frfmZCVYHkrYaRrLfQrLZLFevXuWnP32WP/mT/wqSGQn5ls0b+Cg0NY/P52b//vv5oz/6o5uawKxHkmZnZwmHw0sGyeqtPb1eLyUlJeRyOaanp42WlHroXW+HrXuHk8kksViMSCRifM5ms+F0Oo2izNs9s0RPMYzFYszNzRnddMxmMx6Px4h26QI4Ho8zOztrtAC12WyUlpbicrkKjooJPh3ojoenn/4xzzzzU1599Q1k2Vo0+YAGiprh7p1befTRX+JXf/VX8Xq9t/R3NBKJGK1ry8vLb/vcDr3Biz5f5P21EWVlZUb74dWgVAkEqqqSTqf57ne/x/f/4YecPXMBWbYUUU5oKGqW/fvu48tf/hKPPvpoQbW+15/mmr6Qz+cJh8OEw2GjoYE+SNbn8xkDZfX0tMnJSaOLpsViweVyUVJSsiTaq6oqCwsLRCIRo4us2WzG5/Ph8XiMUSBwe9Jo9cjQxMQEsVjsOv3J6XTi9/uN7rj6GvWW5OFw2JgZpQ/KBYxxDBMTE0b5g81mIxAI4PV6l5xL8MnnjjOSFEVhbm6Oixcv8sILL5DN5lY8PL4YWZZwezxs3LCBAwcOGO02Pw6L209+0D0sNmhu9LkbGTyFfO52G0n63x+2TkmSjPffPxNG/5wQVoLF6A/A8+fPc/r0GS5evEAu99EdlG4lJrOJ1tZWOjs62LZt2y1PM1u8F1ZDMbG+jwuRYWK/ClYDmqaRz+c5daqL48ePMzQ0VFCntVuJyWxi44YNbN68mfb29g8cJP9hLN5zheoHN/rsBz134YOfvfr/3y6Wqz8t1qMW/734vQ/TORY7eIQcu3O444wk+IWlPzc3ZzQpKBayLGOxWPD7/Xg8HrFZBIJVSDqdJpFIEIvFltUq9lZgNptxu924XC6czluTdicQCG4tetQ5Go0aGRrFRI/K6CM6BAJB8bkjjSSBQCAQCAQCgUAg+LiIBHCBQCAQCAQCgUAgWIQwkgQCgUAgEAgEAoFgEcJIEggEAoFAIBAIBIJFCCNJIBAIBAKBQCAQCBYhjCSBQCAQCAQCgUAgWIQwkgQCgUAgEAgEAoFgEebbvYCVYDV1NRdzkgSC1cdqkRFCPggEq5PVIiNAyAmB4HZxxxlJ+nTlfD5PJpO5LYJOHyhrtVrRNO1jCzh97Yqi3PB9fVr04vNrmmZMgr7R+7dL2N5o8rf+2vvXKR4IgpVi8Z7K5XLk8/miywhJkjCbzcYf/bWPw+Lp8B+072/3flq8xvfve31K/e1eo0CwGP13NJ/Pk8vlUBTltsgJi8WC2WzGZDLd1B5ZrpxYfK+qqhp6jL5fdW6kmyw+72rY14vlzuL7kmX5unu+kV7y/nsWfLq444wkgGw2SyQS4erVq2QyGUMoFAOTyYTdbqeqqoqampqb3mCqqhIOh68T0JqmYbVasdlsxjTufD5PNpslm82iKApmsxmbzYbFYrlpIXurUBSFfD5PKpUin88vMSitViuSJN2UYSkQfBSaphGLxZienmZmZoZ0Ol3U61ssFkKhEKFQiJKSkpv+XdcVuXQ6jaIoxn6y2+2rZh+pqmqsMZ/PGwqg0+nEZDLd7uUJBNehaRrRaJTx8XHC4TDZbLao17dYLFRVVVFaWorf77/p8+VyOXK5HJlMBkVRDP1Bf+6+H93ZnM1mSafT2O12HA6HodNomsb8/Px1x9ntdkPvuJ3yZ7FhmMvlDN1IVVVMJpMhI3X5k8lkjO9GVVXjPqxW6227B8Ht544zknK5HAMDA/zsZz/jT//Lf0dFBkmGIu1VTVXw+zw8/OAD/Kf/9J8oKSnBYrF8rHPl83lmZ2f5whe+YChymqaRyWSYnZ3l3nvv5bHHHuPJJ58kn89z+PBhjh49yvHjx1lYWKCsrIzdu3eze/duNm/efNs3u6qq9PT0cOLECZ5//nmmpqaw2+20tbVxzz338Mgjj+D1eoXnRrCiZDIZXnzxRZ756bO8evANkM2AVBwZoYGm5ti1Yxu/dOAR/sk/+Sd4vd6PfbpcLkdfXx9vvPEGL7zwAr29vWzcuJH9+/fzxBNP4Ha7b+HiPx6aprGwsMDx48f5u7/7O/r6+rDb7axfv55//+//PdXV1UZETSBYDejP2R//+Mf8ww/+kTPnLoBsgmIp/e/JiYf23s8v//IX+aVf+iU8Hs/HPl0mk+HMmTMcOXKEgwcP0tPTw969e3nooYc4cOAALpfrumPy+TxHjx7llVde4YUXXuDxxx/n61//OhUVFZjNZiKRCJs2bcJms+F0Og394rHHHmP//v1s2rRpVcifubk5Dh06xLFjxzh9+jTxeJza2lp2797Nww8/zNq1a8lms/zoRz/i9ddfZ3h4mLGxMb7whS9w4MAB7r333tt9C4LbyB33ZFJVlWw2SyweR7UHcJc3Y7G7keSVv1VNU8nE55DVJJFIxPBafFxMJhM+n4/f//3fJ5/PA9cE16VLl/jJT35CY2Mjzc3NSJJEX18fL774IrFYjP3791NTU0Nvby8TExO8+uqrVFZWUltbC9y+1JZwOMyLL75IV1cX7e3tPPHEE8zNzTE2NsaxY8eorq5m27Zt2Gy2VeMBF9x5KIpCPB4nnZeQ3RV4q1qRzVZW3kq6luqSmBkklYdEIkEul7vps9psNsrKyrj33nuxWCx4vd5bct5biSzLuFwutm/fTmVlJXNzc6TT6aJG+QWC5aAoCrFYjCxWzP5a3GWN78mJlUVDQ1PyxKcHSaSzJJPJm97PkiThdDqpqqri3nvvRdM07Ha7oVcsuf57KXmDg4NcuXKFiYkJTCYT2Wz2uowWWZZ56KGH2Lx5M1VVVQDU1dVRWVmJzWa7qTXfChRF4ZlnnqG3txdVVfnqV7+K1+tFkiTKy8uNCJ2maQSDQdrb21m7di2vvPIKkiR9YKmD4NPDHWck6Rs8n1ewOLy4Q/XY3EEk08eL5izr2mqe5LwTKTllhGxvJo9ZkiTsdjv33XefkSubSqUIh8PU1NRQX19PVVUVkiQxMDDAyMgIFRUV3HvvvdTV1REIBHj55Zfp7+9neHiYqqqq2+qxnZ2d5fLly8zOzvIrv/IrbN26ldnZWd555x3eeecdLl26xPr167FYLCKaJFgxNE279vAzWbC6g3gqmjFZHCvvJdY0VDWPkk0iybIhI24GWZbx+XysWbOGiooKpqenV53hIUkSNpuNqqoq7HY7ly9fpru7m8nJydu9NIHgA7mmR+SRLHZs3tJrcsLqWPkLaypKPks+k0BDumk9Aq45XEtLS2lra6O6upqhoSEcjuvvRdefUqkU3d3dxGIxnE4n5eXlH3jeQCBAdXU19fX1uFwuSktLl6Sx3S5UVWVhYYE333wTl8tFS0sLNTU1BINBLBYLPp/PiKCZTCYaGxuNyNfp06c/dgaQ4M7ijjOSdCRJQjZbMNtcmO1uTCvuAdJQlTxmmwOyt+ZaeuGgvpEVRSGTyXD16lUqKiqorq4mGAyiaRojIyNIkkRVVRUbNmzAbDazfv16jh8/Tl9fH1evXmXr1q23tTZpenqaSCSCw+Hgnnvuwev14vP5GBkZoauri0uXLhGPx3G73SL9RrDiSJKMbLZisXswWR0rvi80TUNVcpgsNiT51ngozWYzJSUlBINBkskk77zzDpFI5Jac+1ahe7HXrFkDXKsZHR8fF0aS4BOBLJswWWxY7O5rRpIk3TDmrHEzsehfHK1pKnIug8liu2XBbbPZTFVVFeXl5SSTSV555ZUPdKbkcjlmZmbo6uqioqKCpqamD3W8jI+Pc+HCBWZmZqisrMRqtWI2m2+7kaEoCiMjI5w6dYp9+/ZRU1PDyMgI4+PjNDQ04Ha7jTXq+lIul2N+fn5J7ZXg043QRG8ZK294JBIJhoeHuXDhAg8//DDNzc24XC5kWaaqqopsNsvo6Cg9PT2sWbOG3t5eRkZGmJqaoq+vj3w+f1vrkkKhEC6Xi5mZGY4dO8Z9993HxMQEQ0NDDA8PYzabSSaTIsQtWP3cnEYkEAg+oXzQti9IHHyg3FgdwkRRFGZnZ3n++edJp9Ns3ryZZDLJ0NDQdZ+1WCzcfffdBAIBotEo09PTvPjii9xzzz3cc889rFu37oa1TsUim81y+vRpEokE58+fJ5/PU1JSwsLCAm+//TYbNmxg3759tLe337Y1ClY/nxojaTXrNLOzs/T09HDkyBGGh4cB2LVrF7t27aK+vh645o0Nh8N0d3ejaRotLS2EQiEjpH3XXXfR3d1Nd3c3/+k//Sd8Ph8Wi4X+/n7gmneo0JB9LBbj2LFj/PznP0dRFHw+H1u2bGHjxo20tLR86LF6U4lnn32WgYEBUqkUXq+XX/3VX6WiooL9+/fz9ttv8xd/8Rc89dRTmEwmFhYWSKfT+Hy+69p0CgSrkiIKE0VRGB8f56WXXmJwcJBIJIIkSXzjG99YkiJys2iaRjwe57/9t/9GOBw2UnR27tzJfffd96HpM3rHwKNHj3LhwgUGBgYAeOSRR9iwYcMt6fQpEHxi+CCFYwXlRjabZXJykp/+9KeMjo6STCax2+382q/9Go2NjR9ZI6RpGmNjY5w+fZqjR4/yG7/xG7S2ttLb23vdZ2VZxm6386//9b/G4/FgNptJJBIcPHiQd999F5/PR2VlZUFGkqqqnD9/npdeeomxsTEASkpKeOKJJ6ipqfnIc8zNzdHb28vLL7/MzMwMTqeTLVu2sH37dmKxGIqiUFtby7333su2bdtYWFjgmWeeYWxsjHfffZeGhoYbph4KBPApMpI+KDz+Ye8XC7PZjNvtpqKiwngtGAwuifroxsfAwADl5eWUl5fjcrmMFKGSkhKjFmlmZgZVVfH7/UbIefFnPwqTyYTX66W2thZFUfB4PAQCgYIKMfWW3qFQyGj563K5jPahW7Zswev10tfXh6IoOBwOJicn6evrQ5ZlzGazaNogWDXoMuJ2/kZKkoTVaqW0tJRcLofP50OSpBVJCZFlmcrKStxuN7Is4/f7C+6qpcuN8vJyoyDc5/Pd9q6aAsFKc52cuA0CQ5cTZWVlAKTT6WWNAlhsJJ09e5bnnnuOt99+m6mpKbq6urDZbJSUlHDffffR0dGB2WymtbXVaCGeTqdZt24dhw8fJhKJkEwml8wc+rB1OxwOysvLDXnm9/ux2+0FyTez2YzL5aKyshKHw4HdbjfqjvRz1NbWsm7dOmpqaigpKaG8vJyJiQmmpqbIZrOralyCYHXxqTGSVjN2u52KigpkWSaRSKBpGuXl5UuUk0QiwfT0NOPj49TX11NSUmJsbE3TcDgcbNq0iZaWFqLRKKlUClmWGR0dJRwOU15eXnA9ksVioaamhnvuuceYx1RSUlJQq2K9i9X69eupq6sz5rboka3GxkbKy8tZv3490WgUi8XC+fPnCYfDyLKM1WoVHmfBqqeYkWm9LrG9vZ3a2lqy2SySJBmKAPCh0ddCFBUdi8XCtm3byGQyRuOYsrKygo+tra3F7XbT1NQEQE1NDW6325BT71/Tx1mjQCC4HlmWcbvddHZ2kkgkyOfzmM1m/H5/wU0UstksmUwGh8PBpUuXkGWZcDjM2NgYZrOZCxcu0NbWRnt7u/Fc1zGbzZSVlaGqKplMZlkd+UpKSozUPrjWsdPn8xVUm2yz2SgvL2fr1q2k02mjTtPpdFJaWorJZMLv9xMKhXA4HFitVjweD1NTU6RSKSO9/4Nk6OLXhXz69PGpMZI+KoXrdiZ46S18Q6GQ8driadWapjE7O8vY2BgzMzPs3buXQCBwnYfW4XDgcDgIhUJomsa5c+dIJBLIssyGDRsKHu5mNpuprKxcEtkqdHq2LMs4HA7a2tqum24NGIJVT62bnZ1FURSi0Sjbt2/H6/WKpg2CoqL/ln6ooVHgayuFy+UyGh/oLJYPeleqxf/WXyt078K1vb9x48brrlOokVRVVUVlZeV1xy8e7Pj+bl2qqiLLslBABJ8IlisnioXuoGxra7vu9cUyYXFK+/v3Y2dnJxUVFXz2s581ju/u7ubll1/G5XLxy7/8y3R0dGCxWK5r5pDP5wmHw+RyOSOjpFCCweB1A3MLlTs2m43S0lJKSkqWvK4oitEtNxaLEQ6HqaysJJfLkUgkjEwWXd9Y/F3cjAwV3Fnc8droLwTCyouva9cC6WNc6sM2oaqqjI6OMjk5idVqpb29/brUt2w2y5tvvkk6naayspJwOMzf//3fYzKZ2LJlCy0tLctqyXkzQkE/7kbHnzx5kvHxcSoqKpAkiVdeeYX+/n4kSeLee+/F7Xbf9tahgk8ZmoamqUXpbqdp2jVR9DFkxI0irHq73rm5OQYHBxkZGSEej6OqKl1dXdTV1VFeXo7DUVj3vptVBj7o+Hw+TzQapa+vj56eHsPhc/bsWRKJBBUVFUa0WSBYbWiavn+LISfUWy4n9JrBubk5o8ObnoFy5swZY7aR3+/H7/cvqT02mUycPXsWr9fL2rVrCYVCxGIxjhw5wvz8PPX19VitVqanp/nOd76D3++ntraWQCBw0+sulBvJHVmWWbNmDXv37uXs2bPMzc3x1a9+lb6+Po4cOUJ1dTVbtmwxajrHxsaYnJxkdnaWubk5bDYbPT09+P1+ampqKC0tFYbSp5A7zkjSN4skQS4dJxWeQMkkkUxFGCarKqQj05izUWQ5aKzno/iwz2iaRjKZZHJyElVVjTkH70+dU1WVoaEhzp49y/z8PADl5eVs2bKFzZs3G3UMhXCzStIH3YemaUQiEc6ePcvTTz9t1Du1tbWxefNmGhoaRBRJUBQkSUJTcmSTERJzI9fa7RZlmKxCJj6P6nUWbJB82GcURWFiYoLDhw/zk5/8hKmpKfL5PDabje7ubh555BEOHDhAbW3tRyohK7Hv4dreTyQSdHd38+d//ueEw2FisRjpdJr/83/+D5s3b+aRRx5hy5YtwkgSrBr0/ankMmQTERJzo+/JiZVF0zQ0JUcmPg8h/y15bufzefr7+zl69CgvvvgiExMTAPT29nL69GkefvhhvvCFLxAIBK6TE3a7HZ/Ph8fjMbJRJElClmXefPNNUqkUmqZhNpupqanhwQcfpKOjwxjaejPrvpn71jTNaHDzxhtvcOnSJf7iL/4Ci8VCe3s7W7ZsYf369cb9vvnmmxw8eJDp6WmjK/DVq1c5duwYX//619m9e/eqGJArKC53nEaqFy/6vD68NglzehqUKBShzkXSNCzZOA6zaqTD3QrPg8lkMnL9XS6XUVT9/s80NjYaMw7MZjPt7e20t7cbnp7VQEVFBWvXrkV+b5hmTU0Na9asob293fDoCG+NYCUxmUy43W68bjtucx45PgayuRg2EpKm4iCJz116S1JL9S5TZWVlrFu3jnXr1hnvSZJERUUFNpvttu8ps9mMx+Nh7dq117X4r6+vx+v1igiyYFWhNyLxOi045QxyfBSK4GyVNEBVcEhp/D4PLpfrlsgJvbnB+vXrWb9+vfGexWKhvLzcaJr0fllRUlLC9u3bsdlsRqaH1WqltraWDRs2EIlEUFUVu91Oa2srW7ZsobS0dFXoHJIk0dHRQS6XIxgMMjY2hsfjYePGjTQ3Ny9J0QuFQqxZs4by8nI6OzuN191uNz6fT9RKf0qRtDus33I+n2diYoIjR47w3e9+l2w2W9QJ9LIs4/P7uWfXLv7pP/2nRntMgUBw+9E0jVwux+HDh3nrrbc4duzYsgqMbwUms5nNmzaxa9cuHnrooYJT4QQCQXHQ5cRrr73Gyy+/TE9PT1HlhCRJyCYT9+zaxQMPPMD27dtFm2qB4DZwxxlJeqFdJpNhYWGBXC5XdCPJYrHg9XqN/v5CARIIVg+appHNZkkmkyQSiaIbSXrLWr1drZAPAsHqQleLMpkM8XicdDpddCPJZDLh8XiMjmxCTggExeeOM5Jgaben23F7er6u6IgiEKxO3t/JqJj8om5SEikcAsEqRZcLt1NOCD1CILi93JFGkkAgEAgEAoFAIBB8XIQbUyAQCAQCgUAgEAgWIYwkgUAgEAgEAoFAIFiEMJIEAoFAIBAIBAKBYBHCSBIIBAKBQCAQCASCRQgjSSAQCAQCgUAgEAgWcUdOOdUb9t2uxn2L23WK1p0CwepjtcgIIR8EgtXJYtkg5IRA8OnkjjSSABRFIZfL3bY5SRaLBZPJVPRrCwSCj0ZVVRRFIZ/P35brm81mTCaTkBECwSpGVVXy+TyKotyW61ssFmRZFnJCILhN3HFGkqqqxONxRkZGOHnyJJlMpqiGkizLuFwumpqb2bxpE1ar9WMPjNQH2KVSKUZHRwmHwyQSCWRZpry8nLKyMvx+P2ZzcX6MmUyG6elpent7yeVyqKpqvCfLMoFAgPLycurr65ccl8vlGBgYYHh4mFAoRGVlJWVlZUVZs0CwGH2I7MjICP39/QwODpLNZou6BrPZTG1tLfUNDTQ3NWE2mwv2FMdiMebn5xkeHiaVSuFyuSgtLaW6uhqXy1VUj7OmaSwsLDA7O8vY2BiKolBSUkJpaSmlpaXYbDbhARd8ItHlxMDAAD09PUxNTZHL5Yq6BrPZTHNLC42NjdRUV9/0c15VVXK5HHNzcwwODpJOp9E0DYfDQXt7Ox6PB5PJhKqqpNNpent7iUQiqKqK0+mkubkZn8+3LHl1M2iaRiKRIBKJMDExQSQSAcBut+P3+2lubhYyRrDi3HFGkqIozM3N8e6pU/zdUz9Gs9qQTCaQVr78StM0yOfw2K1s39DBmpYW/H7/TRlJuVyOrq4uzp07x8LCArIso6oqZrOZjRs30tHRQVVVVVGmcusC9uLFiySTScO7ls1muXjxIhs3buSee+5ZYiTphtVLL73E0aNH2bp1K/fcc48wkgS3jXw+T3dPD28dfocTp8+AzYEky8BKP2yvOT3IpOlobeGenTuWpfzEYjHOnz/PhQsXmJycRJZlFEUhGAyybt067r333qIoMLrTaWFhgZMnT3LhwgWi0SiyLGOz2aivr2fjxo20trYiy7JQYgSfOPRn77lz53j59TfpHRwCqw1JLkZER0NTNcim2L6hgwcfeIBQaelNGUmaphGPx5mYmODYsWNMTk6iqiqyLON0OqmsrMTpdCJJkiFn3nrrLcNRK0kS7e3t7Nmzh0AggNVqvYX3e2NUVWVmZobe3l7Onj1r6Bv6unfu3MnOnTux2+0rvhbBp5c7zkhSVZVYLEZfXz/Hz13AWl6Nye6Ej2moLPPi5BNRXFoepwypVAqv1/uxT6dpGtlsluPHjzM4OIjZbKayspJ8Ps/Zs2eRJAmv10tlZWVRFBFJkjCZTFitVhRFMVIR5ubmOHLkCH6/n23bthmfV1WV+fl5ent7uXLlCkeOHMHpdLJmzZoVX6tA8EHk83nGRke50HOZExcvY6+sRTJbinJtTVXJzkyg5rLUVJSTzWZxuVwFHTsxMcGFCxc4ffo0brebQCDA3Nwcs7OzzM/Ps3HjRgKBQNFSc4aGhjh16hTd3d1UV1djtVoZGhoiHo9jtVqpq6vD4XAII0nwiURRFIaGhjjb3cPFoVFsoSqkomRtaGiKQnZmArss07527U1HsRRFYWZmhp6eHs6cOYPP58NutyPLMpqmGQZIPp8nHA5z+PBh+vv7KS8vx2QyEYlEmJ2dpaWlBbvdjsViKYozJp1OE41GicViRqR8YWGB6elpYrEYHR0dN5WtIxB8FHekkaQoCtlcFktpOd72zVh8QeQiCDdNUUhPj2Gevxaa1w2Jj32+91LtDh8+TGtrKzt37mTPnj2kUin+43/8jwwNDREMBtm6dWtRhITT6aStrY2WlhbDm5xMJnn11Vd5+eWXaW9vp6Ojw3gvnU5z5coVDh06RENDQ9E8UALBh6GnnWgWG7byavwbdiIXIW1D0zQ0JU+s5xyaVSKXyxVUE6XvpzNnznDp0iVyuRxf+tKXaGxs5PLly7z11lscPHiQ/fv3s3nzZhwOx4reh86bb77JpUuXcDgc/Oqv/ioOh4PnnnuO/v5+jh07xo4dO7DZbEKBEXwiURSFbDaL5HBhr27A17YZUxGiFpqmoeWyRHvOokrSTdVE6bIjk8nQ3d3N0aNHURSFz33uc4RCIUwmE4qiGGn7iUSC0dFRfvazn/HEE09w33334XA4uHz5Mv/1v/5Xdu7cidfrxe12r7gzRpZlSkpK6OjooLm5merqagD6+vp48803+au/+iu+9rWv4fF4sNlsK7oWwaeXO85I+gUSsmxGtjkwOZzIRfAUa0oe2Wq/pd4mvcZJF5R2u51UKkUmk8Hv9xMMBovqqTWbzUvC/vF4nB/+8Ids2LCBjo4OKioqjPeOHDnChQsXUFWVL3zhC/zsZz8TCpNg1SDJMrLZimx3YLY7YMX3kYaazyNZrCAtr2GEqqqcP38eSZLYtGkT7e3tOJ1OOjs7WVhY4MyZM3R1ddHe3l5UIykUCnHfffexZs0aJEnirrvuIplMcu7cOebm5giFQkVZi0CwUkiyCdlsxeRwYrIXYW9pGorJ/J6cuDX11KlUikuXLnH8+HG2b9/Ot7/9bRRFobKyku3bt/PAAw8AGDXHkUiEz3zmM1RXV2OxWAgEAlRUVHD58mXq6uqoqalZcSNJkiRCoRClpaXGv+Fa2n9LSwvhcPimnNACQSHcwUbSUopSaPiLq92S88myjNvt5otf/CJvvPEGTz31FE8//TS5XI5AIMCmTZvYuHFj0dJr3v8dxmIxBgYGOHv2LP/hP/wHmpqajPB9f38/Z8+eBeDuu++moqKiaAWfAsFykCQJVrqmT1skHyTjPwWTyWSIxWJYLBaCwaDR9cpsNuN0OvH5fMzNzRW1C9fU1BSVlZWEQiHD+eF2u7FaraTT6aI3zREIVorFu7UoNX/GJW7NtWKxGBMTE/T29uJ2u9mzZ49R8/P9738fj8fD5s2biUQizM/PY7VaCQaDRiqbzWYjGAwSjUZJJpNFMU5u1P48n8/T29vLa6+9xvbt24vauErw6UT8dq0CotEo4+PjXL58mbm5OQBaW1tZs2YNPp+PfD5vKBwul4t0Ok08Hicej5PNZo3Cyg9DV1YOHTrE6OgomUwGq9VKe3s7DQ0NlJSULGvNmqYxMzPDpUuX0DSNjo4OSktLjTzi8+fPk0qlqKmpYc2aNdd1odHXI4wmwacCicVelGWjtyu32WxYrdYlCoQsy1gslmUZJTMzM4yMjHDx4kVyuRxer5fGxkY2bNhQsDMjnU4jSdKSVBe9tbmqqsLLKxAUkXw+bzRTmZ+fJ5PJYLFYeOCBB0gmk4auUFpaaqTod3d3Mzw8zIkTJ2htbSWbzZLJZAyZojdekWUZq9VacIowXJNZyWSSn/70p+RyOcxmM263m82bN1NTU4PFUlh2jy7TVFXl3LlzvPvuu0xMTPC5z30Oj8cjslMEK8qn00jS+GgHTSGfuUVEo1EuXrzIM888Q3d3NwCf//znjVzbU6dOIUkSW7ZsYefOnSwsLPCd73yHK1euUFlZSUtLS8GC4rXXXuPIkSNEIhFcLhdf+cpXcLlcH8tIGhsb49SpU9TX19PU1ITX60VRFGKxGKdOnTIEbT6fZ2RkhHQ6TSwWY3Z2lqmpKcrLy5f9XQkEq5Iiygu945T+//rfy3E4TE9Pc+LECb7//e+TTCapq6tj//79tLe3F+yZ1a93o6GbwvkhEBTILZId+XyemZkZfvrTn3LlyhWi0Sgul8totmA2m/H7/WzatIkdO3ZgtVrxeDwMDg7y7rvv8vjjjy+RI4tly+J9XejeVhSFaDTKX/3VX5FMJrHZbFRVVeH1eqmoqCjISFpsIM3Pz/Pqq69y/vx5fD4fX/7yl4s+9kDw6ePTaSQVsqeKuO/Ky8vZt28f27dvJ5PJAODxeLDb7UxPT/PKK6/w27/92+zdu9dorz03N8epU6d48803eeihhwpWbH77t3+bf/bP/hmKomAymYwizOWSTqe5evUqx48f55/8k39iCCvdm9Xf38+RI0dYWFgwhJhuPL3++uu88MIL/OAHPyjYmyQQrGpWWF44nU6cTie5XM6YXaLPcslkMiQSCSPdtRCam5upqKhg//79aJqGxWLB7XYvq51uMBhEVVUWFhaWNGvJ5/NYrdYlES+BQPAB3KItYrVaaWxs5A/+4A/IZDKoqookSVRUVLCwsIDX68Xn8xmdKM1mM16vl6qqKk6dOkU+n8flcuH3+8lkMsTjcRwOB2azGUVRWFhYoLq6GrvdXtC+tlgslJWV8Z3vfMdYi17fVEijBV2m6BGp//W//hddXV1s3bqVf/7P/7lRj71cB5FAsBw+nUbSexTR+fuhmM1mPB7PklbAkiQZ4XG41kUukUiQzWZRVZVkMommactWRPSUOJ2PO8fk9OnTdHd3Y7FYePTRR41icavVSk1NDb//+79PJBJZEpr/3d/9XRoaGti1axcPP/ywmCIuWH2sFqHwPmRZpqmpie7ubnp7e42mCHNzc4yMjDA6OsqXv/zlgrs86Wl7Pp9vyTWWIwvuuusuJicn6erq4oEHHsBisTAwMMD8/DyBQACfzyeUF4HgfayUiNFTXysqKq57xmezWSorK6msrKS/v59EIoHZbGZmZoahoSFqa2ux2WwEAgGam5uRJIl3332XLVu24HA4GBoaYmRkhN27d1NWVlbws9tsNlNXV7fkteXImXQ6zcjICM8//zynT5/miSeeYOvWrYaDRsxhE6w0d7SRpLfc1fJ51A8QS7eyrFhT8miKgqaqQOEGgB7CXuwF1jQNk8mE2+2mqamJvr4+TCYT09PTpNNpLl68iMPhoLW1tSCBpQuSmzVMNE0jn89z8eJFIpEIbW1tlJSUGOeVZRm73U59fT35fH6JsHa5XASDQaqrq2loaBDCTXB70UDTVNR8DilvRlph60hDQ8vn0RQVzIVLHn2frF+/nunpaQYGBnjttdeor6/nypUrXL16lZqaGtrb2wtusX8jmbNcdu/ezWuvvUZfXx8vvvgibrebrq4uMpkMbW1tNzVIWyBYNWgamqqi5vPI+RzaLZATH7b7NU39hZxYxrU+7Blvt9tpbm5mfHycS5cu8eabb2Iymejv72dycpL777/fiCRXVlZy1113cejQIWKxmDH/zO/309zcTElJSUH7Wl/Px22soKoqw8PDHDt2jFdeeYWKigpUVWV8fJxIJIIsy7S0tOB2u0VGimDFuCONpGsPfwk1myIXDYOmIpmKMCdJVcnFFpDSSSSf11jLx0Ef3Op2u9mxYwdjY2MMDg6SyWSMUHpDQwPr1q0ruiKip9S5XC4aGhqMgZGLhbTT6VxyjKZpVFRUUFZWRjAYLHiApkCwEkiSBKqCkkqQC8+hFGnOhqYoKMkYWOzLyu+HaylyIyMjhMNhLl++zPz8PGNjY8C1qE5VVVVROz1t2LCB4eFho+W30+kkFotRUVHB+vXrcbvdwkgSfGLRnQiakiefipNbmEWxFkNOaGi5HEoyDm73LXEmWiwWGhsbiUQixlBqSZKIx+OUlJRw11134XQ6sVgslJaWsmfPHo4dO0ZPTw+yLLOwsEBHRwd1dXVFa5agqirhcJiJiQnm5+dpbW1lenqaeDxudPYMhULYbDZhJAlWjDvOSJJlGZPJhNVqRYsvkBm6Qs5mh2I8rFUNJRnDgoq9vvCQ9Aehz0h68sknOXHiBP39/UxNTWGxWNi3bx8bNmygqamp6GlruVyOqqoqqqur6ejoKPj6u3fvpqKiYsksJYGg2MiyjNVmw6TlURdmSfZduOZEWfkxSaCp5OemMXvrlj0pvqqqivvvv5+qqireeecdxsfHqaioYO3atezYscNwVqw0+jVqa2s5cOAADQ0NnDhxgng8bsxwWrduHRaLRUSLBZ9YdDkh59Ioc5MkAclchGetxjUHztw0lopWo8vczWA2m2loaMDj8WCxWOjq6gKu7WF9xqG+X0OhEJ///OcxmUwMDQ2hKApNTU08/PDD1NTUFG1wq6ZpOBwOqqqq2LFjB5qmMTw8bLxvtVrZsWMHwWCwKOsRfDqRtDtskIWiKMzNzXHp0iWef/6FJXU9xUCSJTweD5s2buSRRx7B4XB8bCPm/V2j9D+6B/r9CkhRZkEt6jajX/NG8wxuhKIoH7h2gaAYLE4XPX36NBcuXiSbza54qt1iTGYTa1tb6ezsZOvWrcuqK1wsB24kC4q5r97f/er96xF7XPBJRZcTXV1dHD9+nMGhIXK5XPHkhHQtI2Pjhg1s3rx5Wam0H8SNutXBB3ese38L/8WGWrF0jcV/boSuWwlZI1gp7jgjSdM0crkc8XicqakpFEUprpH0XgcXv99PaWmpKCwUCFYRuiyIx+NEIhGi0WhRB7DCNWXD4/Hg9Xrxer3CoBAIVhm6nIhGo4TDYZLJ5G2RE36/H6/Xi8vlEqmrAsFt4I4zkgCjNW4ul7stE9/1lD+TySSUH4FgFaKqKoqiFN2JAr+oN9TlhEAgWH3oeoSiKEbL/WKiywldVggEguJzRxpJAoFAIBAIBAKBQPBxEe4JgUAgEAgEAoFAIFiEMJIEAoFAIBAIBAKBYBHCSBIIBAKBQCAQCASCRQgjSSAQCAQCgUAgEAgWIYwkgUAgEAgEAoFAIFiEMJIEAoFAIBAIBAKBYBHm272AlUBVVfL5PMlkknw+X9Rr67MN7HY7NptNzEkSCFYh+XyebDZLJpO5LUMibTYbVqsVi8VS1GsLBILC0DSNfD5PJpMhl8vdFjlht9uxWq2YzXekqiYQrHruuJ2nqirxeJzR0VFOnz5d9IGykiThdDqpr69n48aNWK3WggbBLSwsMDw8zNjYGKqqLnnParVSU1NDWVkZPp+PoaEhhoeHSafTxmdlWaayspLKykrKy8tX5N5uRC6XY2BggKmpKdLpNJqmYbFYqKqqorKyEqfTuUTAa5qGoigkk0m6urpwOp2Ul5dTWVmJxWIhk8lw5coVJiYmlhi4TqeTYDDI2rVrhfEp+NjoAyLHxsYYHBxkeHi46I4Uk8lEVVUVdXV1NDU1FTx0OpFIMDc3x+zsLAsLC2QyGZqbm6moqMDr9RZh5dfQ5enMzAwzMzOEw2GSySQmk4nS0lJKS0spKSkR+1TwiUWXE0NDQ1y9epWZmZnbIicaGhpoaGigqqqqIENJ0zTm5+eZmJgw9iWAx+OhrKyMsrKyosqKxSiKQiaTob+/n3A4TFlZGRUVFfh8PgCy2Szz8/OMjY0xPz+PLMuUlJRQWlpKeXk5ZrNZyBNB0bkjjaRwOMyZM2f4ux/8gByAJEEx9pYGkqbhdbm4e+tW1qxZg9lsLshIikQiXLhwgePHj6MoCpqmoWka2WyWfD7P/v372bp1Ky6Xi3PnzvHGG2+QyWQMBctsNnPXXXchy3JRjaRsNsv58+e5cOEC8XgcRVGwWq20trayY8cOqqur8fl8hnDTNI1UKsXQ0BBPP/005eXlbN26lUAggMViIZVKcejQIU6ePInNZjM87aWlpaxZs4b6+nqsVqsQloKPjaIo9Pb28tbbb3P83XdRZAkogox4z1cjqyob1q3jnrvvprq6GqfTWdDh0WiUK1eucOnSJUZGRhgcHOSLX/wiO3bsuC2KT19fH93d3QwPDxONRjGZTNTU1NDa2sqGDRuorKwEEHtV8Ikkn8/T3d3Ny6+9xuWrV1Fl6ZousdIskhM777qLvQ88QCgUKjiaNDk5SVdXF0NDQ4TDYTRNw+/3s2bNGtatW0dHR0fBjplbha7LzM7O8vrrrzM0NMTWrVvZvn27Ibump6fp7u6mq6uLmZkZJEmitraWNWvWYLPZKCkpAYQ8ERSXO85IyufzLCwscKm7m7evXsHa1ITsdiGZTCt/cVUlPx/GFZ7DAnzlK1/B5XIVJNy8Xi/r1q3D6/WiqiqappFMJrl8+TI/+MEP2LFjByaTCUVRuHr1KmNjY7S1tdHR0YHT6TS802VlZSt/n4vQNI1YLEZVVRUOhwOAqakpnnnmGWKxGHfffTebN2/GYrEYUaSxsTGefvppjh49SnV1NaWlpWzbtg2AdDrN6dOnuXTpEl/72tdoaGgwonPCOy24FWSzWfr7+zl1uYd3hgext7aCpQheSk1DU1WyQ8NkLmmESkq49957l2UkzczMsLCwQDAY5Nvf/jadnZ20tbWt7Lo/gBMnTjA/P4/NZmPjxo3EYjFOnjxJb28v8/PzPP7449jtdrFfBZ84dKX+ypUrnLxymYvTU9gaGpCsRUiP1TQ0RSE7OITp7FmaGhvZuHGj8Xz9KFKpFDabjZqaGtasWQPAsWPHeP755zl27Bjf/OY3CYVCRdmXetRZVVXm5+c5c+YM//AP/4DVaiUUCrF27Vrjc0899RSnT58mm83ymc98hkgkwtWrV7ly5Qr5fJ6HH35YpCcLis4dZyTpYfK8qmIOBnG0tWIOBpEsRbhVRSEzOoZpYhJFUQxjpxB8Ph/r1q1j7dq1xj2Mjo5y/Phxmpqa6OjooLa21gj5l5SU0NnZyX333WdEamRZxlQMY3ARDoeDxx57DFmWkWUZTdNIJBLMzs4yOTnJ8PAwa9euNbxFvb29HDt2jDNnzvCFL3yBq1ev3vC8Pp+PnTt30tHRgSRJxv0JISm4WXRjHasNS3k5zs71yHZ7UaLNWj4PqgKqZMiIQqmvrycUCvHAAw8gSRL/43/8j9tqgHzlK19B0zRMJhM2mw1VVfF6vVy+fJlz585x//33U15eXlAkXSBYbeg1SZLTibWq6pqccNiLcWG0bBYtn0eTTMvSIwDa29tpaWkBMCJGnZ2d/PCHP+TMmTNcvXqVkpKSou7LWCzGuXPn+PnPf86ePXsYHh7Gbv/FdzkxMcFbb72Fy+Xit37rt9i4cSOKonD48GFOnz7NoUOH2LJlC6FQCKvVWrR1CwR3nJG0BJMJyWJBtlmRiqBca/k8ksXysaJWkiRhsViMqFMulyOVStHV1cW9995LKBTCYrEYRtLAwABPPfUUb7/9NoFAgM2bN7N9+3YqKipu6T19FLIs4/F4jH+n02nDSKqursblchlCLRKJcOjQIbq7u/nsZz9r5HzfiLGxMf7P//k/+P1+amtraW9vp6Ojg5qamqLcl+BTgCxdkxHWazJixVNpNA1VlsFkBq1w40hHL+DWlabbbXwEg0EAw4Ghqip2ux2TyUQ2m72taxMIbhmyBGYTks2KbLOt/PU0DRWu6REfo5xabxqlG1aKojA+Pk4kEsFsNhMMBovqXMnn85w8eZLu7m68Xi/btm1jbm5uifxKp9Mkk0m8Xi/l5eW4XC40TcPtdiNJkpHSGwgEhJEkKCp3tpEE1wScJCEVQaHQJD1nefkCSBda+t/RaJTR0VFGR0fZvHkzgUDAUEZKSkooKysjnU4zOztLOBxmdnYWSZLYtGkTjY2NS851w7UuCoMv9lTpESE9elMIJpOJqakphoaGGB8fZ2pqilQqZTSRMJvNKIrC6dOnGRwcxGw2s2vXLt59993rrqHXNdTU1JBOpxkbG2Nubo5IJEI6ncbn8xmCUyC4aXTZ8N7v/Eqiadp79ZEf7zr63gRuutOWvucXn2dxJPrDvgv9PV1Z0esn4/E44XCYfD5PIBAQqXaCO4T3dAhp0d8riKaqv5ATyzSS9O666XSaqakpent7SSQSdHV1kc1maW1tpaSkpOB70PUDPeKt6yByAfJS1ymmp6c5ffo00WiUzs5OqqurrytBsNlsOBwOstksIyMj1NTUkMlkDN1mZmaGVCq1rMi7QHAruPONpE8gmqbR39/P6dOnMZvNbN++3TCSzGYz69evp66uDrfbjdlsZnh4mD/7sz8jHA4TiUSoq6srKO1Or3tKJpNkMhkAXC4XHo9n2WltZ8+e5fvf/z5HjhxBVVUefPBB2tvbqaurQ5Ikkskk3/ve96isrGTPnj3U1dVx8uRJQ8HSBardbueBBx5g06ZN1NfXk8lkeOmll+jt7aW3t5fm5mba29tvuxddIPgkk8/nSafTxGIxFEVBlmWcTic+n29Ze0vfu/l8nt7eXnp6epBlma1bt+L1esU+FQhuA9FolCNHjvD//r//L+Pj47hcLh555BEOHDiA3+8v2EjKZrMkk0kSiYTRudbj8eB0Ogs6h6IonDhxgosXL7JmzRoOHDhAb2/vdc/98vJyGhsb6e3t5Sc/+QkVFRXMz88b9cmRSKTonYoFAvi0G0kaxel6t0zy+TxdXV0cO3aMAwcOGKl2gGE0LWbTpk243W7+1//6X3R1dfHQQw9RVVX1kdfJ5XK8+OKLvPDCC5w7dw5ZlvnsZz/LV77yFZqampYV1t6zZw8bNmxgdnaWgYEBvv/97xsd+DZs2MAzzzzD0NAQW7Zsob6+np6eHsbHx1lYWGBmZobx8XHcbjdut5v77rtvybmrqqp49dVXOXToEKdOnWLNmjWiNkkguAmGhoY4duwY3//+95mZmcHr9fLwww/z27/927hcroLPs7hb5d/93d9htVrZsmUL9957r5jtIhDcJkpKSnj00UfZtWsXkUiE119/nTNnzvDXf/3XlJWVsXbt2oIcqWfPnuX111/n+eefJ5lM0tLSwpNPPskjjzzykY0kFEVhfn6eP/iDP+Cxxx6js7OT0dFRBgYGCIfDTE5OMjo6SnNzM16vl9/5nd/hrbfe4ic/+QlPPPEEwWAQv99PIBAwmlOJyLSg2Hy6n2KrdL8NDAxw5coVYrEYjz766JL5AO8XEnptQkVFhTFnKJFIFHQds9nM7t27aW1tJRqNIkmSMbNoOQ0gJEnC4XBgtVoJBALU1NQwMDDA4OAgQ0ND1NbWGh133njjDU6cOGHUI01PT7OwsIDZbOarX/0qNTU11ylXVqsVh8OBzWYjlUoJb5JAcJNUVlbywAMP0NraSjabxWw2EwqFlhRTfxSaphEOh7l69SrPP/88AHv37qWjo2NZ3mqBQHBr0SPDNpuNsrIy3G43iqLw7rvvcvr0aVpaWgp6xuvpeffddx/5fB63201VVVVBDlRFUYjH45SVlXHu3DmuXr2K2Wxmbm6Ovr4+hoeHmZmZoaKigs2bN1NVVcVjjz3G7t27iUajyLJMT08P3d3dDAwM4HK5RGRaUHQ+3UbSKkNX/k+fPs38/DyhUGhJapmqquTzeaLRKGaz2ZjBlEql6OnpQVVVPB5PwYqOLMuUlpbi8/mM2gSLxVLwAFx9PdPT03i9XsxmM5qmkcvlSKfT5PN5VFXFarUaLUyz2ayRnqN3w6uqqqKxsRGn00k0GjUMP7vdjqIoTE5OMjc3RzabxePxCOVL8KlE32/6n8V7LZlMGo1fCtkfDocDi8WC3+9HVVWjcUyhabqA0dL33LlzTE9Ps2PHDpqbmwkEAktSacR+FQiKg+64gGtOUKvViqqqZDIZstnsslPWPB4PDoeD8vJyo5OlxWIpSD+QZRm3280Xv/hFo+GUoihGE4aqqiqam5sJBoPIskw6nUaWZcrKygiFQszNzZHL5chms9TU1Bg6hkBQTO7s3zhNA0VFU1W0myx0Luhy6rVroakgfTyPRzab5cSJE2SzWdra2qisrDSUDFVVSafTDAwMANeKHWVZJhqNcuLECZxOpzG89aNYXHz9cbvFaJpGOp3m3LlzVFRUGG2Aw+EwExMTuFwufD4fHo+Hbdu2sWHDBqO9eSaTweFwIMsynZ2d7Nq1i0AgQH9/P9Fo1DD48vk8ly5dYnJyErPZTFVVlfAmCW4NmgbaNdmgKcqKd7fT3pNHfMzi41wux/z8PAsLC6RSKSOdZXR0FJ/PRygUIhAIFGTomEwmo3X3x6Wvr4+3336bc+fO4fP5aGxsRFEU5ubmsNlslJaWirRYwSceTdNAvTa7CEW51qBpJa+nqmjv6S2w/Gfd6Ogo6XQas9mMw+FAURQuX75s7MuysrKCn6G6M3Y5EebFxwYCAb70pS8Zr2WzWc6dO8fc3BxtbW3s2rWL2tpaZFlmZmaGWCyGqqrYbDYjEyWdTrNp0ya8Xm/RR5wIBHeukfTerAElHkeyWFCKNCdJjSdQM1mwL/96qqoyPj7OqVOn6OzsZN++fUuEmaqqJJNJDh8+TE9PD7lczng9k8mwd+9eduzYUZCRdCvQB8n+7Gc/Q1EUw3usqioWi4V77rmHDRs24Pf7jYJPfb3pdNrwUjmdTjweD2azmcHBQY4fP87w8LDRySaXy1FWVsbdd9/Nxo0bheIluDWoKlo6gxKNoWazRWkBTl5FTaXRTMuXD+FwmEOHDvHOO+8wNTVFIpHg0KFDDA0NUV9fz2c/+1l27969rJqim+HnP/85L7zwgtGB83vf+x6SJBkG05e//GVKS0uFYiP4ZKPk0dLpa3LivWfuiqJpkM2hpTNo1uUbJxcuXOD8+fPMzs6iqiqyLJPJZCgpKWHPnj1s2bKlKI5GPTodCoWM17LZLH6/H4fDgcfjwev1GgZYb28vp0+fpre3F1VVyeVyhEIh2tra2Lt3r0i3E9wW7jgjSQ8HO+x2sgODZGdmwWJBkosxKVJDy2TwWW3Yd+wsOCytoysY3/zmNykvL6e+vn7J+2azGZ/Px0MPPURnZyeZTAZN07BarZSVlRkh6WIhyzKBQICvf/3rRKNRI6Rus9koLy+nqqrK6MD3/vu0Wq3s2bOH9vZ2/H4/brcbgA0bNlBaWsr8/LwRfvd4PJSWllJWVlb0GQ+COw+TyYTdbkdOJEl1XyYzPoFkKtLDV9NQYnHkjk7sdvuyDH6Px0NnZyeBQIBkMskXv/hFTCaT4ektdkOTRx99lO3bt5NKpYw0P0mSsNls+P1+XC6X2KuCTyxmsxmn0wnzCyQvXybV1188OaGqKLE4lh07sNlsy0oz27VrF42NjUSjUbLZLCaTCbfbTSgUIhQK3dZnqMlkYs2aNXzjG9/A6/VSWlpqvNfZ2UkoFGLTpk3k83lcLpeRerectuUCwa1E0u6wKng9BeVSdzevHDxIzhjaWIwNpiEBLoeTzrY29u3bZ3RlKejo92oMotGoMTdg8dwSPUqTTCZJp9NGpEWv37Hb7QXXJNwK9PVEo9Eluc66Emqz2TCZTNcZivrn0uk02WwWi8VizFTJZrPG64qiGN4oq9WKzWYTUSTBTaHXw126dIlzFy7QffkyOU0r4gNYQwZaGhpZ397O5s2bsdlsBV0/n8+TSqWMVDsdfXaJy+UyUlhXEn3/RiIRY58uRndUeTwe0ZFK8IlElxNnzpzhZFcXI2Nj5G+DnOhY28aGDRtY29paUGqs3m0yk8mQy+WW1BvabDasVuttfYbqOo7uWFm8nnQ6TSaTIZ1Oo2ma4fxZ/BkhSwTF5o4zkjRNI5vNEolEGB4eLnpvfVmWsdpshEpLjS5xYmMLBKsDPSU0Go0yNzfH/Py8kbZaLExmM8FAgEAgYHh1hYwQCFYPupxYWFhgenp6SaZEsTC913EyGAiImWMCwW3ijjOS4BcCbnGHpWKiKz1C+REIVie3U0YsbucvFB+BYHWiywVVVW+bHgHXHK9CjxAIbg93rJG0WhDCTSBYfawWGSHkg0CwehFyQiD4dHNHGkkCgUAgEAgEAoFA8HERuR4CgUAgEAgEAoFAsAhhJAkEAoFAIBAIBALBIoSRJBAIBAKBQCAQCASLEEaSQCAQCAQCgUAgECxCGEkCgUAgEAgEAoFAsAjz7V7ASqCqKqqqkk6nUVW16NfXJ85bLBbRulMgWIUoikIulyOfzxddRkiShMViwWw2YzbfkSJYIPjEo2naEjlxO+apWa1WzGYzJpOpqNcWCATXuOOe0Jqmkc/nicVijIyMkM1mi6oEybKMzWajrKyMsrKyggfBqapKPp8nnU6jKIrxutlsxm63YzKZrhs8qWkaqqqSTCbJ5/PY7XasVusSgaooCvl8nkwmQz6fR5Ikw4hzOBy37sYLQF9vLpcjk8mgqiqSJGE2m7FarVgsFgDS6bTxvo7+ObvdjtlsRpIkVFVFURRSqZTxncmyjNVqxWaziWG+guvQFZ1EIsH8/Dzz8/Nks9mirsFsNhMMBvEHAgT8fqCwOSi5XM5Q2Bb/vlssFmPf347fd30gbzqdJp/PYzKZluxngeCThi4n4vE4MzMzRKNRcrlc0a6vP6fLysrw+/14PJ6C9ramacbz9f2Gnf7cdzqdxmd1IzCTyaBpGiaTyXjOFvv5qcu2XC6HoiiYTCZsNhsWi2WJ7pPL5chms+RyOeP+9HvTn/u6PpVMJm9o3FqtVkNGCR1B8GHccUZSPp9naGiIF198kT/7sz8jm80W1QMkyzLBYJCHH3mEb/4//w+BQKAgb/HMzAxnzpzhO9/5Dj09PaRSKZxOJ21tbfzWb/0W69evx/+eQrWYwcFB/uiP/oijR4/yO7/zOzz44IOsWbNmyfvvvvsuTz31FOfOncPhcLB27VoefPBB/sW/+BdFExD6z2BgYIDjx4/zgx/8gMuXL+Pz+di5cyef/exn2bdvH7Is85d/+Zc8/fTTDA8PG8eFQiE6Ojr43d/9XTZu3IjFYiESiXD27Fn+x//4HwwMDABQVlbGL/3SL/Hkk08SDAaxWq1FuT/BJ4dsNsurr77Ks88+y2uvvVZU5QeuPdB33XMPBx55hCeeeAK3213QcWfPnuX111/nnXfe4cqVK2iaRmNjIzt37uSxxx5j06ZNK7vwDyCfz7OwsMBf/MVf0NXVRWtrK0888QR33XWXUEAEn1gymQw/+9nP+OEPf8jp06fJ5/NFu7YkSciyzEMPP8wXHn+c/fv343K5Cjr2jTfe4O///u/p6upiYWEBuCZz1qxZw/3338/v//7vA9ccqFevXuWVV17h7//+74lGo7S0tLB7927++T//5wQCgZW6vRty+vRpjhw5wptvvkl3dzft7e08+eST7Nu3j5KSkiWf+9GPfsTbb7/N/Pw8LpeLxsZG9u/fz5e//GV8Ph9jY2O88847/OEf/uENDaUvfOELfOYzn+GBBx7AZrMV9T4FnyzuOCNJURSSySQzc7NEK7zYO5ow+1xI5pUPV2uKSn5qnkQ4yeTkJMlkEq/XW5CRZDab8fl8PProo3z1q1/FZDIRiUQ4fPgwf/zHf8y/+lf/iu3btxuGkqZpnD9/nqNHjxIOh5meniaRSCzxHp05c4aXX36Znp4e7rrrLv7ZP/tnhtdX9yYVk4sXL/Kd73yHK1eucO+99/Lrv/7rZLNZPB4PVVVVxufi8Tgul4snn3ySz3zmM4a33O12U1dXZ3w3x44d44//+I/ZunUrX/3qVwEYGRnh1VdfxePxsHfvXhobG4WiJlhCLpdjfn6eMDkSjSEcm9Yg2ays9K+JpgGKQrpniHk1w9z8PJlMpmAjyW6309nZSV1dHT6fD4B3332X/v5+/vRP/5Q///M/JxQKFS2FT48MRyIRTp06xcGDBzGbzciyXHTDUyC4legRmdnZWcI2SK2rwd7WgGyzwIrLCQ0tp5DpHmQ2EWVhYYFMJlOwkZTNZjGZTOzcuZPPfe5z+Hw+ZFnG6XQSCAQM/eDUqVMcOnSIN998k1/5lV+hrKyMU6dOGTrFH/7hHxoRpWKQSqWw2+2sX7+euro6rl69Sjqdvs7AcbvdtLW1sX37drxeL7Ozs/T39/PCCy/g8/l48MEHCYVCPPDAA9TX1xtR91QqxejoKN/97nepqqrC7/eLaLfgI7njjCT9wZ3PK+B2YKorx1zqQ7Ks/K1qeRXVbAJ1lvx7oeBCo1hOp5O6ujpKSkpwu93Iskw8HkeSJE6cOMHk5CTRaNRQjqanpzl79ix9fX089NBDHDp0CFVVjetls1lOnz7NwsIClZWVbNu2jaamJkPgFbMWQv8eXnrpJRYWFmhububuu++mqqoKVVWxWq243W5jbaqqYjKZKC0tpbm52ajdsNlsOJ1OJEkiGo0yPj7O6Ogo/+7f/Ts6OjpQVZVAIGB8X7oHSRhJgsXoKbmKWQa/G3NTNbLDtuLKDxpoeQVpdgElI6Mssx6qvLwcj8eDoii4XC4kScJms5HP53n99deZmpoqOHJ9q0gmk4yPj3P06FGam5uJxWIieiu4I9DlhGq1IJV4MTdVITuLEHVQNdRsjuxMGCV3LSVuOdkw+jPP4/HQ0NBAaWmpkQJrt9uBa46iq1evMjU1RW1tLbt37yYYDOJ0OjGZTBw/fpzJyUkqKyuNY1aa+vp6PB4Pa9eu5cqVK/T399/wvsvLy9mxYwcejwen00k4HMblcnH58mXGxsZIJpMEAgGCwSAul8vQCRcWFkgkEtjtdqqqqggGg0I3EHwkd5yRpKMBss2CyevC7PcgWVfeY6DlFJT5KLI9BonlHWu326msrERVVbLZLIqiIMsyDQ0NWCwWox4Brgm4np4e+vv7UVWVRx55hD/8wz/8xTo0jUQiwaVLl1BVldraWhwOB1NTU9hsNvx+vxFKL5YRkcvlOHjwIFVVVbS0tGC325mensbtdmO325d4rEwmE5qmEYlEGBoawmaz4fF48Pv9Rh2VnpNssVhoaGjA6/WSy+UIBAKUlpYiSVLRC20FnywkkwnZYcPs91xTflZ4H+geYtnlQMov/1qlpaXGA1+vIaiqqqK8vByTyXRDr+tKoF9DURSmpqbo7e1leHiY7du3c/78eeGdFdwxaJqGZDYhm+3X5IRr5Q0GTdWQ01lkpx1iy5cTei2RHjlJJBI4nU6CwaCxN/X3MpkMGzduZO3atdhsNqxWK4qi8NOf/pTR0VECgUBRjaS6ujpSqRSJROIDnT3BYJBAILCkfqm0tJTS0tIl9Zp63RFcSwmORCLMzMzgdDqprq4WRpKgIO5YI2kpxSlA1G7iErpSn81mGRgYIJFIMDc3xzvvvENZWRn19fWUlpaiqiqzs7M8++yzVFRUsGfPHkKh0JL7UxSFyclJJicnjZSBt99+m3A4jN/vZ9OmTRw4cICtW7cWXAz6/v9ffNxHnUNRFObm5ujt7UWWZS5cuMCLL75IIpGgsbGRbdu2sWfPHtra2jCZTLhcLhRF4a233uLtt9/G5/PR2trK1q1bOXDgAD6fj1AoRFNTEzU1NTz11FPs2rULgOHhYWZnZ7n33nvx+/1CCAoKpAgyQrvmvPm4SJJEPp8nHo8zNDREKpXi1KlTXL58merqasOhUtBS3tvHH2c/68fFYjGOHj3KmTNn2Lx5Mxs3bjRqAwWCO5UVlxOSdlNyQu+Id/nyZf70T/8Uk8lEeXk5u3btYs+ePXR2djI/P8/c3ByyLNPS0oLNZkOWZfx+P1VVVbjdboaHh2lubr5hLfRiPko/uNG/b8RyHJuqqjI3N8f4+DiTk5P09fUxNTXFrl27jBIH/ZqappHJZJicnOTEiRO0tLRQX19f9JorwSeTT4mR9MlAVVXC4TDf+973GBgYIBKJkEwmefzxx6mpqcFkMjE3N8c//uM/4vf72bJlC1u2bLnO46JHkmKxGAsLC9TU1PCNb3yDyspKDh48yLlz5/jjP/5jvv3tbxMMBj+yvWg+n2dmZobJyUnGxsYAqK6uprKysqAaCFVVmZ6eJp/PMzg4SFlZGf/6X/9rbDYbL7zwAhcvXmR+fp7y8nJKSkrYu3cvHR0dlJSUYLFYOHfuHGfOnOHZZ5/FarXymc98BrvdTmNjI48++ig/+clPOHnypBEVq6mpoaWlBY/Hc3M/EIFglZFKpejv7+cv//IvmZqaIh6PU1VVxf3334/H47muA+YHEYlEmJqaor+/n3w+j8vloqqqykhv/SilRlVVnn32WS5duoTP5+NrX/saV65cuRW3KBAIboLW1lb+7//7/0ZVVdxuN9FolJ///OccPnyYs2fP8h//438klUqRy+WQJAmHw2HIDb2rnsvlIpFILOm0+0GoqkosFmNycpLe3l4A/H4/5eXlNDU1rUj78mw2y/Hjx/nJT37CzMwMkiRRXV1Na2urkZIPvzDaFhYWGB4e5sqVK/ybf/NvCAaDBctKwacbYSStInRPzhe/+EUWFhZYWFjg6tWrnD59mvr6eqNV58mTJwkEApw9e5aZmRmj/e7FixcpKysz2vBKkkRDQwNbtmxhw4YNeL1eo6j62WefZXh4GK/X+5FCTM/nHRoa4ty5cwBGy/HFXWc+DL3VZnNzMzt37mTTpk3Isszk5CTnz59nenqa+fl5AoEAzc3N1NbWYrVakSQJp9OJqqokEgmjmDMWi3HhwgWOHz/O448/Tn19PZqmMTc3x+nTpzl27Bh2ux2n0ylqJAR3DHa7nfr6er7+9a8TjUYZGhpieHiYEydOsG3bNtra2gpKj0kkEkxMTHD27FkymYyxjxsbGz/yWN2Z87Of/QyApqYmXnnlFQYGBrhy5Qp2ux2Xy0VNTQ0VFRUFGV0CgeDWUFFRQTAYBK7VHuvG0M9+9jMuX77M1atXjfEk+ogQ3cH4i5rufMHtsTVNI5lMMjY2RldXFwA1NTUANDQ0rIiRZLFY2Lx5Mz6fj7m5OSYmJrh06RInTpzA5/MtSbVTFIX+/n56e3txu92sW7fOqOkUCD4KYSStMvTuLrlcjng8TigU4vDhw4yMjODz+XC5XDgcDkwmE+Fw2JirpKe0TU5OEg6HKSsrw+FwEAgEKC8vJxgMYrPZqKmpIRgMkslkiMfjBRWO696mYDBIbW0tgFHkWYigkWUZj8eD3W4nGAwa64FrdRYOh4PJyUmjzuL9EaCysjJKS0txOp1Eo1GjFmJwcJCpqSn27NlDTU0NmqYxOTnJ4OAg/f39dHZ2Ul9fL4wkwR2DxWLB7/ezefNmstms0RXy0qVLDA4O0tzcXJCRZLVa8Xq9VFdXk8vl8Hq9RhesQsjlckYxuCRJ9Pf3Mzo6yvz8PDabjbGxMWKxWFG77QkEn3b0Z7Veu6tHUhoaGvD5fORyORYWFqitrcVutxOPx41nqm5QpVIp0uk0Ho+noPRdfeitz+cz9INQKITX610xQ8RkMlFZWUkwGCQWizEwMMD4+Di9vb1s27aNUChkrD2XyzEwMMDw8DANDQ2Ul5eLuklBwYin1ypBN3QAbDabUUSph6sXFhZIpVJGJ5pwOGwYOPr8Bn0QnN5OPBgMYjabjSYHZrOZVCpFNps12moXIsQsFovhFd68ebPxmn6tj0KfHaWHuNPptNGEQm9SYTKZMJlMRuMK/ThJkshkMmSzWfL5vNG6PBwOMzs7iyRJtLS04Ha7je+jvLycixcvGkN2BYI7AX14oq4I2e12amtr6evrQ9M0pqamCkqPgWtODo/Hw5o1a9A0DVmWC97PcG3/79u3j3g8bihiZrPZ2Md6K3AoXnMYgeDTjt66HH4xa0kfOK+/bjab8Xq9+P1+4vE4o6OjRvvtSCTC9PQ0mUyG8vLygmYIybJMIBDA7XbT2toKLNVFlrN2fUC83tFv8b9lWTYG4OpGndPpxGazkUqlKC0t5dKlS8YoFP2c8Xicq1evMjExwWc/+1ncbveKRLcEdybCSFolzMzMcOnSJaM9JcDs7CxvvvkmY2NjHDhwgObmZpqamqiurl7SXjyTyfDf//t/Z9u2bezdu5d169YhSRIbNmygq6uLt99+m5qaGurq6njuuefo6uqiurqatra2gj0qusD7OJ1u9JS5Rx99lGPHjvHzn/+c+vp6HA4Hb731FuFw2OjSNTY2xsjICLlcjoaGBsxmMydPnuTo0aP09/fzxBNP4HA4qKqqorS0lJGREZ555hm2b98OQH9/P4cOHaK1tbWonXkEgpXm1KlTZLNZnE4nFRUVZDIZjh8/zpEjR5ifn6etra3gqKksy4Yz5v18lEGjOz2+9rWvGY4JTdM4deoUkiThdrt58MEHaW5uNtJ+BYI7Fo2VHx+wDI4cOYIkSXi9XkpLS1lYWOAf/uEf6O7uxuv10tnZid/vp62tjdnZWV5++WXWrFlDRUUFhw8f5vDhwzQ2NtLe3l7wDLcPkyeFkkwmicViTE9PMz09fW3e5cwMo6OjqKpKSUmJ0bRmcHCQ9evX43Q6jZT9t956i02bNlFSUrLkuX/ixAn6+/ux2Ww8/PDDohZJsCzuYCNJQ0mmyc+Gr3lKizInSSE/H8GUSAHLS/HS621efvllIpEIkiRht9vx+Xz8+q//Onv27DEGqeqhdLimnJhMJiMytDgX95FHHsHlcnHs2DH+5E/+BACfz8f69evZu3evUaP0UdwKJUeSJL761a8SCAQ4duwYf/AHf4AkSdTW1rJlyxZ27dqFz+djfn6eEydOcOLECcLhaz87r9dLfX09n//853nggQeMpg179+4lmUzyzDPP8IMf/MDwsDc0NPDYY4/R1NQkwuqCD0TN5VHiSbJTc8gO24or85qmQV5BicTR8o6PPuB9TE5OcuzYMXp6eohGo0bKXG1tLb/5m7/Jxo0bC1ZSbvZeZVleIodUVcVut2OxWLBYLNhsNpFmJ7gj0LI5lGSc7PQcssO+8uPUVA0tmyMfTaBpy28+NDg4yJEjRxgdHSWdThtp7nv27GHHjh3GyIAdO3bgcDhQVZU/+7M/I5fL4Xa7aWxs5Dd+4zfw+XwFRVxuldw8f/48r776Ks899xzRaJTp6Wn6+/t5+eWX2bZtG7/5m7+J1WplZGSE5557jm9961tks1nsdjuBQICNGzfyhS98gerq6iXOoueffx5N09i0aRMVFRW3ZK2CTw933FNMTxuxW20oIzMk3jqDZLdCMcKrqooaS2JSZWzt1/JeC/VaBAIBNm/ejMPhIJFIGHm+Xq+X9evXU11dbcwSWiyUNE3DYrHw7//9v2fnzp2Ul5cb75eVlbFt2zbKysoYHh5GVVV8Ph81NTWsXbu2aCFnvSC0vLyc3bt3U11dzeTkJHAtNa62tpb6+nrMZjMlJSXs3LmTqqoqYrGYUaNUXl5OTU0NJSUlyLKM3W6nrq6OAwcOGLMV4FpNV3V1tdHdTnixBe9H93qaU1lyV8eIpTNIJvOKz0lC00DTyE/NY6pfg9VmW5YRv3btWux2O21tbSSTSSwWC263m7KyMhobG40h1CuNvqfe3zZc349Wq5X6+vrrPiMQfJIwmUzXnrmRBNnhIZRwDMlsYsXDRpoGikp+eh7L2o1GS+9C2bhxI16vl3A4TDabxWKxUFJSYswh0g0Iv99Pe3s7ZrOZtWvXGkZSRUWF8Xox929lZSU7d+7E5/MZM5DMZjNut5vq6mqjRqq1tZXPfOYzzM/Pk8/nDT2pqqqKhoaG62qlH374YTRNo7KyUjhNBctG0u6wiZu5XI7R0VHeeOMN/vJb3yKv5G9q5sBykbgWrbl/z738y3/5L/H5fAUJOL2jzOLp2npOsZ7ffyOBpX82lUoZKXGLawH0nF49R1eWZaNu4P0G10qyeADl4vorPQqmr2dxDrKeyrN4zYtbler5yXqthv66ng9dzPsTfDLQZ5EdPHiQV159lbcPH0bRFIqZLyNLEndtuYsH7r+fzz32WMENUHT5oKoqqqoa8kHfHx8kI4rBYlkD3Pb1CAQ3gy4nnnvuOX723HNcvHTptsiJe/fs4cDDj7Bnzx6jHvfD0DRtiZzQ6wEXPz8/SD/QaxNv1/NTX8f764h1Oacbd7qutPj+Fq9bP2ZxOYL+GWEkCZbLHWck6UIilUoxOTlJOp0uqIPbrcJkMmGz2SgtLTWGmQpFQSBYHSx2KkQiESKRCOl0uqhrsFgsBAIBPB6PkfMvZIRAsHrQ5UQymWR+fp54PE4mkyna9SVJwmw2U1paisfjweFwCBkhENwG7kgj6f3/X8xb/KBUFIFAcPu53fIBhIwQCFY7q0VOCBkhENxe7jgjSSAQCAQCgUAgEAhuBtELUSAQCAQCgUAgEAgWIYwkgUAgEAgEAoFAIFiEMJIEAoFAIBAIBAKBYBHCSBIIBAKBQCAQCASCRQgjSSAQCAQCgUAgEAgWUfgY508Qq6lhn2jbKRCsPlaLjBDyQSBYnawWGQFCTggEt4s7zkhaPNPgdgo5fcaBPhG6EG7VevXrfdT5ii14P2w9i9dSyPewnHtczvkEdzbvn3lyu2TE+3/fCvn9W22/x4V+d2JvCT5prCY5sRw9YjnP/ELlySdR7hS67kIRMuzTyx1nJAHkcjni8Tijo6OkUqmiCjiTyYTdbqesrIzS0lJkefkZjblcDlVVUVXVOKfJZEKWZSRJQlEU8vm88f7ia5tMJszmaz9WTdNQFMU4l6ZpxmdMJtOyDLibRdM0VFVFURQURUHTNGRZxmw2G/e1GP2z+XweSZKQZdn4/AedP5/PoygKsixjsViWnFO/rv49SJK05HsVfLpIJBKEw2Hm5+dJp9NFvbbFYqGkpAS/34/P5yv4uPf/Duvo+8Nqta7Ecj+U9+9rXalbLIcEgk8qsViM2dlZIpEI2Wy2qNe2WCyUlZXh9/txu90FH3ej577FYsFkMl1nJC2WKcCSvfv+z+rPV1326M9jk8l0i+648Pu70Vr09bxfl8hms0ue+xaLZYnOoSjKB/5sZVnGZDJhsVhW9qYEq5Y77immKArj4+O8+eab/PV3/oa8poLEtT8rjXbtj8/t5Z6du/it3/xNfD5fQcqCLrDGx8d56aWXuHLlCtPT0yiKQmtrK4888gitra04nU4OHjzIT37yExYWFlBV1djIGzduZNeuXdx3331omsbY2BinT5/m3Llz9PX1oSgKTU1NbNmyhXvuuYdgMLjy38kiTp06xcmTJ7l8+TKTk5OUlpbya7/2a6xdu3bJQyCRSNDX18fzzz/PpUuXMJvN1NXVsWHDBvbt24fX671OgA8MDPDqq6/y1ltv0dLSwu/93u/h8/mQJIlMJsNbb73F4cOHmZycJJ1O43K52LZtGzt37qSxsRGXy1XU70Jw+8hmsxw+fJiDb7zOkWNHUSQVSS6Sp1ADSYXNGzZx7+49fPazn8XpdBZ06OnTp3n99dc5deoU+XzeeNCXlpbS0dHBb/7mb67w4q+x2Ls+MzPD1atXOXjwIH19fQDU1dWxZcsW9u3bh8vlEl5YwSeSbDbLwYMHef7FF+i+0o2CVnQ5cc/OXTy0bz+7d+8uWE6Mjo5y9OhRuru7GRwcZHR0lN/5nd9hy5YtVFdXG5+7cuUK58+f5+TJk4yPjyNJEn6/n/r6evbt20dHRwcmk4lUKsXU1BQ///nP6e7uJpFIIEkSoVCIxx57jPXr1+P3+4vmbOzr6+Odd97h+PHjxGIxzGYzgUCAtrY2Dhw4QFlZGTabDUVRmJqa4vvf/z4DAwPE43GcTicPPPAAu3btory8HFVVOXz4MD/60Y+IRCIoigJAPp8nk8mwdu1a7r//fj7/+c8X5d4Eq4870kiKxWIMDg1yfuQy9hofJocVybTyG1hTNXKRFM6ZWUp7gqTTaTweT0HH5vN5FhYWOHjwIFevXiWbzRIIBJZ4iHWv7eTkJH19fXg8HiorK3E4HMiyTGlpqSFIM5mMYSCNj4/j9XoBGB8fB65FnR555JEbRnFWimg0SjweJ5lMGpG+hYUF8vm8cX+apnHlyhWOHz/OmTNnKCkpQVVVpqeneffdd6mpqWHz5s1YrVZj3YlEgnfffZeuri56e3vJZDLkcjk0TSOXyzE5Ocnzzz9PLpfDbrfjdrvJZDKcOnUKl8uF1WqltbW1KN+B4PajKAoTExP0DvdxcfoqzroAstXEyntSNDRFIzUewTHgpLG+gWw2W7DyEw6HGRwcZGhoiI0bNxr73u/3EwgEVnjt1xOPxzl79ixdXV1MT08TCASQJAmXy4Usy6uqpkMgWA56tGJkZISekV665waxV3uRLUVQmbT35MRYhJKrJWzo3EAulyv48HQ6TTweJ51OI0kShw8f5stf/jKpVGrJ5/RsFKvVSllZGZqmkUqlDP2jvr4ej8fD/Pw8Z86c4cyZM9jtdvx+v/FMfvHFF/F6vTidTux2+63+Jm6IHvnRr6koiqHv1NTUsH37dkwmE8lkkldffZULFy5gNptxu93kcjmOHTtGMBjEZDIRDAZxuVxUVlbidruN6Nv09DRXr17F7XZf970JPl3ccUaSqqrXvADZLM7mEioeXIu9zPueErTC186rxPtnUftjZFIZI22uEFKpFMPDw/zkJz+ho6ODPXv2GBGWWCxmeEd0Kioq2L17N/fffz+BQACTyYTD4cBms6GqKtFolEOHDpFMJqmrq+Pzn/88JpOJH//4x1y+fJnR0VHuu+8+nE5n0Ywkr9fL+vXrqa6uZm5ujh/96EfXfSafz3P06FHefPNNTCYTv/7rv048HufEiRO8++67HDlyhNbWViNNT1EURkZGOHLkCOFwmObm5iUPlFQqxcDAAC+88AK//du/zc6dO6msrOTq1av87//9v7lw4QJ+v18YSZ8i9IcqbgvedRVUPbIOk9PGSm8DTQM1pzD7Th9SzEI2m12W8gPgcDhoaWnht37rtygpKcFsNmM2m4umoCxmbGyMt956i3PnzvHggw+yd+9eI83VYrFcl/IqEHySUBSFdDqNKejAH6qhbE8LZtfKp7RqqoaSyTNzuA81Abls1nAkFoLNZqOxsZHKykqAGz5nAYLBIJ2dnbS2tlJWVoaiKJw6dYq3336bp556iieeeAKbzcbU1BTHjh0jGo3yla98hfr6ejKZDC+++CLf/e532bp1K7W1tUWTQX6/n87OTnbu3EkwGCQSidDd3c2PfvQjTp48SUtLCw6Hg9nZWb773e/S1NTEgQMHaGlpYWRkhL/+67/m9OnTuFwuysrK2LJlC+vWrTOctPl8npMnT/L973+furo66uvri3JfgtXJHWckGUhgcVqxl3lwVHmRrSt/q2peIRdLkZ/JwjKdDxMTExw6dAiLxUJnZydWq5W+vj6cTidr167F7/djt9uN+olkMkl/fz8ul4tgMEhlZSVNTU04nU5UVWVqaorZ2Vk6Ojq47777DCPg8ccf58c//jH/+I//yODgIK2trUWpZZAkiW3btqFpGnNzc7z77rs3VKBmZ2c5ffo0kUiEP/zDP6Szs5N8Po/FYiGfz3Px4kVmZ2ex2+1YLBYWFhb41re+hcfjYdu2bUiSxNGjR43z5XI5IpEIdrud9vZ21qxZg8fjQdM0qqqqMJlMRc81F6wOZIuMxWPHUenD7LIVI5CEkstj8TuQ0h/PaaMoCgsLC5w6dYpAIEAwGKSiooK6urpbvNiP5vnnn2d4eJhQKMSGDRvo6+vD6/VSXl5ORUXFEqeOQPBJxWQzY3XZcVR6MbuL8DutaeRTOax+B1J6eUJJkiTq6uoMeTA/P/+B2SKVlZWGIQXXHMzZbJZIJMKLL75IPB4nl8uRy+XIZDI0NjbS0NBAdXU1+XyenTt38r3vfY90Ol3UZ2hFRQXl5eXGv4PBoFFKkEwmyWQyzM3NcfnyZS5cuMB//s//mXXr1uFyuaivr+fMmTNMTU0xNDREZ2cnHo/HkFWqqpJIJOjp6cFisVBXV8fatWuLdm+C1cedayS9j2syQhcUGoVpRIvTRW70ee1D3lse0WiU3t5exsbGOHjwoFHLlEqleOONN3j00UdZt24dTqeTkpISGhsbyefzXLp0iXw+z8zMDHfffTfbt29n48aNuFwubDYbc3NzDA4O0t7ejslkoqenh/7+fiPC0tTUVLSC70I6zoyMjBCNRnE6nbS3txteaZ/PR2lpKclkkkgkQigUMsL9yWSSL33pS0bIfTEej4fOzk7uvvtufv7zn3PkyBEjhUBRFNrb21m3bt1K3rbgE8LHi3p8kAx4/+/5zXdb8ng81NXVkU6nuXLlCoqiEA6HCQaDbN26lccff/yGhcsrxZkzZxgaGsLhcPDcc89hsVhIJBIEg0HWr1/PgQMH8Hg8IpokuKNY/u9zoXrEtfdvVk4Uuj5Jkkgmk0xPT3Pw4EEikQgTExNEIhEef/xxampqcDqdVFVVsWvXLl544QW+973v4XK5yOfz9Pf389hjj9HR0VHUdF89jX5gYIC33nqLubk5IpEIMzMzPProo5SVlbGwsMDIyAher5fKykpcLhcmkwmXy0VNTQ1TU1MsLCwQi8WMUgQ9ihSJRDh06BAdHR2Gg1rw6eVTYyQtFU636qF96x7++XyeeDxOJBKhpaWF5uZmnE4nMzMzvP7661y4cAGXy0VnZyctLS1GtxVZlkmn0xw/fpyzZ88iSRINDQ0EAgHWr1/PyMgIx44dY3Z2FpPJxPj4OPPz84YBVkg6oKIoTE9PMzIywuXLlwGoqqoyQtG30shKpVLk83kjfRCWdtzR0ylHRkYYGRnh0qVL7Nu3j/r6esbGxq47nyzL2Gw2JEkiHo8bnW2i0SipVMoo1BQIbi233jCorKxkx44drFmzBrvdTiaTMRwrb7zxhpFKWkgnptnZWcbGxuju7iaXy+H1eqmvr2f9+vUFG1q6kuHxeLjrrrsIBAJcvXqVyclJurq62L59O06nU3SPFAg+lNvrRNAjSIlEgkgkQjQapby83IhAmUwmrFYr6XSaWCxmGBMLCwuUlZUt6zq5XI7+/n7Onz9PJpPBbrcvqTNejqzQ06bj8TjRaJRkMml0ucvn86RSKWw2m/HMh1/oA4s75C1Gz9CZnZ2lqamJ2traonfvE6wuPkVGEh8SQPqgN5YhvG6yRllvymAymVi/fr0RBp6cnOSNN95geHiY2tpaNm7cSE1NDWVlZUb76nQ6TS6X4wc/+AGXL19mfn6ehoYGNm7ciKIoDA4O0tPTs6Ru6YNab3/Q2mKxGGNjY5w7dw641hjC7XZTW1t7czf+IXzY2oaGhjh//jxXr15l3759DA0NMTg4yNjYGAsLC0anLZPJxOzsLOl0mqqqKiorK/H5fDgcDnp6elhYWGB+fh5VVY32xYJPOzeIDmmAdAvkxE0QCoXwer0oioLNZiOTyeD3+0mn05w5c4axsTFCoVBBRlIqlWJycpLz58+TzWYpLS3FZrPR1tZWcOtuveW33+9nx44dlJSU4HK5SCQSnD17lnA4THV1dVFHDQgEt5cbyQjpA17/iMOKgCzLOBwOI2okSRITExPMzMwQDocxmUzE43FmZ2fxeDzU1tbi9XqN7m/T09PMzs5SU1NTUHptPp9ndnaWixcvkkwmja62nZ2dy2qzLUmSoX/Y7XYcDgfpdJrp6WkSiUTBNVyLo3a6nnPhwgUsFgsNDQ2EQqGC1yS4M/l0GUkfKIRugXS6yVPo3VcaGhpobm6moqICq9Vq5MVGo1Hm5uaQZXnJbBVN0/B6vezevZuf/exnLCwsMDk5yZo1a9i6dStNTU3Mzc0xMzNjFHi/8cYbXLx4seC2nboi1NTUZLxWXl5OeXn5LfeyuFwuzGYzmUyGRCKB1Wo12qPncjkj/W5gYICuri66u7v5b//tvwHXutwtLCwQjUb5m7/5Gz73uc9RWlpKV1cX0WiUb3zjG7S1tWG32wmHw/zt3/6tkS60ceNGUUMheI8bbGbpA14vombjcrmMVvWapuHxeFi/fj0TExO88cYbjIyMsH79eiMC+2G43W7q6urYuXMn+Xwet9tNdXX1svaz1+tF0zTq6upobm5GkiQaGxuNDpOJRKLgxjUCwZ3BB8mDj5ATt8mHYLPZKCsrY//+/YYBc/bsWf7kT/6E8+fP09bWRn9/PydOnGDLli088sgjBAIBFEVhbGyM3/iN3+D8+fOUlJQUNFLEbDZTVVXF9u3byWaz2Gw2Kioqlt3kxWQyUV1dTSgUIpvNMjo6yssvv8ypU6dob2/HarXidDpJJBLGPCU93T+bzSJJEmaz2TDMdCf1zMwMb7zxBu3t7YZBKPh08+kyklYxHo+HhoYGY56R7uFQVZVYLIbFYvlQ5ScSiRieGf1zTqfTyCnWIyUHDx5kdnYWn8/Hpk2bCkqVk2XZEIJ6EaM+m+lWp9I0NzdTUlLClStXOHnyJPv37yeXyzE3N8fExAQej4dAIMAjjzzCzp07mZ+fBzBypM+fP093dzdf+tKXaG9vZ35+nomJCWw2G6WlpUZbdVmWCQQCxGIxQ5AKBJ80stms0VJfbwleCD6fD4/HQ1NT05LhysvZzx0dHZw5c4ZIJGK8lk6nyWQyxugCEUESCFY3egqazWZDlmXKy8vJZrPMz88Tj8eJx+NGal0gEDCcI3omij7W46PQHZwNDQ3U1NQYckfXJZYjK/SsD7vdjt1uJxgMGp3uEokEpaWlrFmzhng8Tm9vLy6XC6/XSyQS4erVqzgcDkKhED6fz9C15ufnuXr1KqdOneJ//+//TSgUEqnCgk+BkfTegNeizOzQFv1ZJqFQiK1bt/LMM8/w/PPPs337dioqKrh06RLnzp3j0UcfpaGhgUQiwfHjx7FarQQCAaPV5be+9S0SiQQbN26kubkZgJ6eHtLpNLIs43a76e7u5h/+4R/I5XI8/vjjeL3egoXA4pxeWH7xqqZpxONxwuEw4+PjjI6OkkqlGB0dpbS0lFQqRTAYxO12s2nTJsLhMN/+9rcpKSkhHo9z8uRJLly4wN13301JSQlWq5WKigry+bxRyOlwOJZE0kpLS5FlmdbWVp599lleffVVwuEw5eXlDA4O8s4771BbWys6cX3a0UC79p8VvszNXePYsWOk02kcDgdVVVWEw2Feeuklzpw5QyAQoLOzs+DfY90oMplMhmxc7p7ev38/o6Oj9PT08IMf/IDNmzdz6NAhrl69SmlpKSUlJSKfX3Dn8N7eXWldQrsJPUKPlITDYZLJJLOzs8ZQ1aGhIcxmM36/H6fTaaSbW61WqqurSSaTnDt3jqNHj6IoCm1tbVRWVhKJRAgEAjz77LN4PB5qamrIZDIcOnSISCRyXZe8D0M3bhan1i1H7mSzWXp7ezl//rxRkjA/P093dzcvv/wyFRUVhEIhysvLsdls3HXXXXzve99jenqaNWvWMDw8zNmzZ7nvvvuua1t+4cIFYzbjli1bcLvdwskjuJONJIl8KkdmPoFkMRVnTlJOJRtOoSazxuYqdJO53W5aWlrYvXs3Q0NDRCIRXC4XkUiEtWvX0tHRQU1NDYqiMDw8zPDwsBEy1osXd+zYwY4dO4wBtjMzM1y5coWpqSkkSWJ+fh6LxUJ7ezv33XdfwTnAi+/hZoTGyMgIx48fp7u72+ii88YbbzA+Pk5rayv33XcfgUCALVu2kEqlOHjwID/96U8Nb3l5eTlbtmzB6XRisViWPKxyuRw+nw+v14vL5cLtdmOz2fD5fHR2drJhwwYuX77MzMwMLpeLaDSKy+Wira2NNWvWCGXuU4iW18gnMqRn4piT1hVPedE00HIKuWgGLW9bdg3c9PQ0fX19RCIRHA4HuVyOoaEhfD4fO3bsMOoUC+FW7Onm5ma2bt1KLpfj8OHD9Pf3MzMzg8PhYP369ZSUlBR1WLVAcCvR96eaU8jFMqRn45hTK9/qWp+TlItn0FT3so+PRqMcO3aMvr4+pqenMZvNnDp1ioWFBRobG9m2bRsbNmww2mRPTU3hcDhQVZXZ2Vni8TgPP/wwdXV1eL1eqqur2bp1K2+++SZvvPEGLpfLmE+4e/du1q5dW1Cq3a2QOZqmMT8/z7lz5+jr68NisZDJZIjFYjidTkMO6t3svvCFL/DGG29w5MgRLl26RDwep7q6mnXr1lFdXW04fTVNY3BwkPHxcTZs2EAwGCy4NlNwZ3PH/RZIkoRsMmExm9FiOVIDYZRwGsmywkqwBpqikp6IYlrIYnFZlhWqtdlsVFdX8yu/8iv83d/9HRcvXiSRSFBTU8PXvvY1tmzZQjAYNDq49PT0MDk5SS6Xw+/3c8899/Doo4/S2NhobG7doDp9+jTz8/NUVVXxyCOPsH37djo6OoCbM3qWy8TEBMeOHePdd98FrtU0nD9/ntnZWRKJBHfddRd+v59Nmzbh8XiIxWK8/vrrWK1W1q1bx549e9iyZYuRv7x47XoBaklJCbW1tUaHLr0F+P/1f/1fvPDCC3R1dRltih9++GHuuecempqahCL3KUKSJMwWC1IelPk0ySszyHbLyhpJ73mGVUUhNx1HtniX3a7bbDYzOztr7Bmfz0d7ezvbtm1j3759BdUi3Qr0NZeUlPDggw/i9/v50Y9+RE9PD62trWzatMkYci0QfFIx5ERGI59Ikrw8g+wovLnAx0IDNA0lq5CbTmB2lC1bTuiNU44dO8b09DQtLS0MDw8zPT3N8PAwZWVldHZ2YjKZiEajxtwgvT12R0cHn/vc54wOdzU1Nezbt49wOMyxY8eIRCKYTCYaGhr4+te/Tltbm+GYXWn01LxcLsehQ4dIJBJGdGvXrl3s3bsXv9+PxWLBbDbz5JNPMj8/z+nTp4067CeffJItW7YQCoWWjCVJJpPYbDb27NkjDCSBgaQVJQ+teORyOYZHRjh48CD/83/+z2sT7Yt4i5Ik4Q8EePDBB/m93/1d/H6/2HACwSpBT0d55dVXefHFF3nzzTdRljHN/lYgyTI7duxg34MP8vjjj+NyuYSRLhCsIjRNI5PJ8Oyzz/KTZ57h3LlzqEWuW5VNJvbu3ctnfumXuP/++3E6nUW9vkAguAONJL0+JRaLMTIyQi6XK2qHJb0IsrS0dMmsAYFAcPvRxV0sFmN+fp5wOFzUafFwLSIUCATw+/34/X7Rel4gWGXockIfUhqLxa45XIuI2Ww2mg253W7RREAguA3ccUYSYLSMzmQyRe9apndrsVgsy25rKRAIioOiKOTz+aI7UeAXXZ7MZrOIMgsEqxS9LXQ2m0VRlNsiJ6xWK2azWdTMCgS3iTvSSBIIBAKBQCAQCASCj4uI3woEAoFAIBAIBALBIoSRJBAIBAKBQCAQCASLEEaSQCAQCAQCgUAgECxCGEkCgUAgEAgEAoFAsAhhJAkEAoFAIBAIBALBIu7I/rOrqWGfaAEuEKw+VouMEPJBIFi9CDkhEHy6uSONJABVVVEUBU3Tiiro9MGQJpNJDH8TCFYp+gwU/U8xWSwfhPIjEKxOdN1B1yOEnBAIPn3ccUaSpmmk02nm5+e5evUquVyuqEaSLMvYbDYqKiqor6/HbDYXJOAURSGbzRIOh0kmk+TzecxmMxUVFbhcLuMc+r1Eo1Hi8bjxWZfLRSAQwOFwXDegUhf2IyMjZDIZnE4nod6tB/4AAQAASURBVFAIq9WKJEnkcjnS6TQLCwskEgkkScJut+PxeAgEAkUX0PF4nEQiQSKRIJPJYLfb8fv9BAKB6z6rfx+5XI6FhQUWFhZwOBxUVFQY372macTjccLhsDFg2Gw24/P5cLvd2Gw2w6BNJpPEYjFjwrp+bafTic1mK+r3ILj16Hthfn6eqakpZmZmyOVyRV2D2WympKSEUChEWVlZwUrQ7OwssViMVCq1RKbJsozJZKK0tBSfz7figyf1a+v7KhaLkUgk0DQNl8uF2+3G7XZjMpmEcif4RKLLiZmZGcbHx1lYWCCfzxd1DfrzPxQKEQgECtrXmqaRTCaJRqOkUimy2SwANpvN2JcOh8P4bCaTIRaLsbCwgKIoWK1WXC4XJSUlxv5VVZVcLsfc3ByJRAJVVQ0doaysDKvVWlSHsP6zSSaTzM3NkclkUFUVk8lEKBTC5XJhNptRVZWFhQVisRjZbBZN03A4HNTW1hpySVVVEokEk5OTN9QVHQ4HHo+HkpISwwH+/7P3nlFynudh9jW995kts71jG8pi0QsLSEgsoqhii5QVWYpj2YlzpJPYf3LiHPvYyZc4TrFk2Y7tmLEpS1SxSLE3ECR6r4tdbO+9zO7M7PTyfj/g99EuigSCxJKCnuscqOzOvm3e537ufkt+ubjnjKR8Pk80GqWnp4e33nkTRZsD7RqGzPMaLCYbTfXNFBYWYrVab0u4qUr+lStXCIVChEIhDAYD+/bto7KyctXiTCQS9Pb2MjMzQzgcJpvNYrFYqK+vp6SkBL/fD/w0RJ/L5QiHw5w4cYK5uTnKy8vZs2cPRqORXC5HKBRienqa4eFhotEoGo0Gi8WC3++nra0Nq9W6pkJwdnaW0dFRZmZmWFhYwOVy0dzcfFMjCa4JzZmZGbq7uxkYGCAQCPCJT3wCnU6Hoiik02m6urqYnJwkkUgIgVpQUEB1dTUFBQVYrVay2SxDQ0NMTEwwNzdHNpvFbDZTXl5OSUkJJSUl0qt3D5DL5RgdHeVSxyW6e7tAl4e1/EpzGqrLa2lqvPZO367xPTU1xdjYGAsLC8KrnclkyGQyaDQa9u7di81mu+tGksrS0pJYL8vLy+TzeVwuF8XFxVRUVODz+QCZKiT5xSSXyzE0NMSZc6cZnxoD7VrKCQ1kobG+mY0bNuFwOG57XYdCIYaGhpifnxcOFZPJRCAQIBgMUl1djU6nI5lMMjMzw8DAANPT08JI8ng8tLa2UlRUBFzTN+bm5rhy5QrLy8vkcjk0Gg1Go5Hm5mbKyspWOXLvNvl8nng8Tm9vL2NjYyQSCXHtGzZsoLS0FL1eTzabpb+/n4mJCWEEer1eSkpKxLNUDanLly8Tj8eFXM1ms8RiMbxeL7W1tbdtpEruPe45IymbzTI7O8uRo0f4u+f+GleJGYNVh1Z79xdwXoFkOIMxa2FLw2527dqFyWS6rcWVTCaZnJzk4MGDTE9Pc/78eXK5HGVlZZSXl6PVakXof3p6mldeeYWJiQkSiQQ6nY5oNMq2bdvYsWMHO3fuFNEkRVGIxWJ0dXXxne98h4sXL3LffffR2NiIx+MhkUjQ3d3N+fPnuXz5MlqtVggKu92Ox+OhoaEBg8GwZkKwv7+fEydO0N/fT1dXF16vly984Qts3Lhx1edUr082m+XcuXO89tprnDhxgrq6Onbv3o3ZbCaXy7GwsMAPfvADQqEQcM1Dl8lksFgs7Nu3j7a2NqqqqlhaWuLNN9+kt7eXxcVFzGYziUSCuro6Nm/ejMfjwW63r8kzkNw9MpkMly5d4uU3X+TY+ffwlNvQGdZqg4fIVJymsk18IrJMS0vLbRtJo6OjXLp0iZGREZECND8/z8zMDNFoVMiKtYp4Xr16lXfffZfu7m5MJhP5fB69Xk9NTQ07d+5k27ZtGI3GNbkWieTDRHWunT59mh+/+kP6xrtwFlvWRE4oQD6nEJ1MsLl+Fzqtnqqqqtte1xMTE5w7d46RkRHi8TiKopBIJPD7/TQ2NlJUVITD4WB2dpZz587x1ltvEY1G0ev1pFIpHA4HsViMxx9/HJ1Ox/z8PKdOneLVV19Fr9ej1WrJ5XJEIhF27drFk08+eYMj926SSCQYHx/nn/7pn5ifnyeXy4kMHpfLhd/vx2KxkEqlOHbsGF1dXQwPD7O8vExxcTFPPPGE0MlUJ/Hp06eFIaUoCuFwmK6uLqqqqnjkkUfYvHnzmtyb5OPHPWckqaHhVDpJsNVFy6fKcBVb0Rnvvhcgn80z2xsmdDVNcj5JJpO57TxmNUXviSeewG6385d/+ZecOHHihs+lUilee+01+vr62LZtG7t27aKoqIg33niDV199lYWFBSoqKqioqAAQi/3v//7vuf/++xkfH18lzEZGRjh9+jT9/f3s3LmTRx55RGwOr732Gn/xF3/Bf/7P/1l4hdeC6upq7HY7DzzwAG+88QZ9fX23/Gwul+Ps2bO88cYbxGIxduzYwfz8vPh9NBrlwoULvPHGG/yH//AfhLEzOjrKt7/9bS5evIjJZKK8vJxDhw5x6NAhmpqa+NKXvkRNTQ3Hjx/njTfe4ODBgxQXF7N9+/a1eASSu0gulyOZTGJya6nc7mfDZysxWfVw1zd5hVwmT8+BScwzWlKp1PtK9du3bx/33Xcf+Xxe1Ei89tprHD9+nOHhYdrb2zGbzXfx+lfzzDPPkEqlWLduHV/84hfR6XS88cYbDA8P89Zbb1FbW0tBQcEN6b8SyS8CuVyORCKBI2iktqaQxv0lGG2Gu35eRVHIJnN0H5hAM5snnU6/r1S/yspKCgsLRUaIRqPh/PnzvPLKK7z66qts27aNxsZGjh8/zrlz58jn8/zu7/4uPp+Pixcvcvz4cf78z/+czZs343Q6GRgY4ODBgwSDQb7whS8QCAQIh8O8+eabfOc736GpqYmioiKsVutdfCo/pbu7mxdffJGenh5+53d+h4qKCmw2m0iPdzgcaDQaTCYTO3bsYPv27Vy6dInOzk4mJiZWHctoNNLU1MTv//7vC10tl8tx6dIl/u7v/o6ioiJaWlqkDPsl5p7+5vUmHWaXEavHhN50942kXCaP2WHAYM69779Vw+FqTcHNaoFyuRzLy8t0dXVRXV1NU1MTNTU1WK1W7r//fk6fPk0kEqG7u5uKigpSqRQXLlzgwoULVFZWsmPHDl555ZVVx4xGo6RSKZGuFwgEUBSFmpoaqqurefHFFwmFQjgcjjXzUJeWllJYWEgymeTkyZO3jMRlMhlmZ2d55plnaGpqwmg0srS0tMpIUtMCstksWq1W5BhbLBa0Wi1GoxGTyUQul+PMmTMUFhbS3NxMU1MTDoeDjRs30tXVJdL5pJF076DVazCYdVjcRsz2ux8pVRSFXDqP0apHo3v/6atGoxGj0SgiqKlUiq6uLubn59m6deuap4QMDw9TW1tLc3MzJSUlaDQaqqqqmJycpLe3l1AoJFJ/JZJfVHR6LUar/pqccNz9yKiSV0gnshitdyaTfD6fUPjVqM/KTBCDwUA4HGZkZIRMJsPWrVupqanBbDbT1tZGPp/n7bffZmBggIaGBtE8IpfLYbFYsNvtZDIZkeZvMBjWLB0/mUzS29vLu+++y4MPPsipU6c4deoUFouFmpoaduzYseo+W1tbyWazzM/PMzIyctNj6vV6YeDl83mRXaPT6SgtLaWurk6mDP8Sc08bSRqtBq1Og1avQWd4f4tYQU0/XlnP9LMXiqIoaHTaO0rtUxV2NT3lZp6LXC5HPB4nFApRV1eH1+vFarViNBrx+Xy4XC7C4TBzc3MADA0NMTQ0RCwWY8OGDauaNagYDNc8Y6lUiqWlJdEVMBKJMDExwfz8POFwmEwms2ZGktlsxmg0otPpbtn4Ip/PMzc3x6FDh9Dr9TQ1NZHNZunu7r7hWCUlJWzZsoUrV64wNzeHw+FgYWEBi8VCZWUlJSUl5PN5pqamKCwsxOfzieJPl8uF2+1mfn6e+fn5a9+xFJj3BBquyQidXotOr0Vzxym5qoz4OfIhr6DkFbQ67R0FrVYqIoqiMDExwdjYGBqNhs2bN4u1vFbYbDZRr5DJZNBqtUQiEWZmZkStwMelhbJEcsdo/lmX0GvR6j9I8f7tyYl8TkGn6hHv81QajQaDwUA8Hmd8fJzLly8TjUYZHh5Gq9WyadMmvF6vaO4AUFxcjN1uR6vV4vF4RPR3fn6euro6AoEAzc3NdHR08Prrr4t1PzAwwH333SeaJK0F0WiU2dlZxsbGGBsbo6ysDK1WSzKZ5MyZM7hcLlpbW4Wz2Wazkc1mhT5xs+cFiN+pzSw6OztxOp0Eg8Fb1kJLfjm4p42kO0IBNAqaNa3kvj1yuRypVIpEIoHdbsdsNovFrdfrsdlsRKNRIpEIqVSKy5cvMzc3h9PpZNOmTaLgciUulwuHw0Eul6Ojo4OysjLR6GBwcJB0Os3y8rLokvOzUFMdI5GIEMBq+Ntut3+o3WFisRiDg4O88847bNu2jXXr1jE9PX3D50wmE8XFxWzcuJFDhw5x+vRpDAYD6XSapqYmiouLKSwsFHnIlZWVIsoE156r+pyXl5c/lGuXfHz5qXNkrf/4fZzmn7s7Xbp0iVAoRElJCRs2bABuv0lCMpkkHo8TDofJ5/MYDAbsdjtut/u212lTUxMjIyNcvnyZ1tZW9Ho9PT09DA8PMz8/v+ZdAyUSyTWSySSDg4O8/PLLzM7OkkwmaWlpobm5GZfLxejoKMlkEo1Gg9PpFA2JDAYDZrMZi8VCJBJBURR8Ph8NDQ0cOXKEN998U6T7ZrNZvv71r+P3+28rkqTWVEciESF39Ho9DocDl8t1W02RlpeXhX7R19fH5s2bcblcLC0tcerUKU6cOPG+GmZdf32ZTIZwOMzVq1fZt28fZWVlohug5JeTX04jSQHlVk4ajfiPlT+45XHW0pZS6xAURbnpHCZVyKTTafr6+jh16hQ1NTW0t7dTVFTE5OSkOI76r6Kigt27d5PP5/nHf/xH/uqv/gqj0Uh1dTV1dXVcuXLltr3ByWSSgYEBfvSjH/Hcc8+Ry+Vob2/nM5/5DE8++eSHWi9x9uxZTpw4QS6X41d/9Vcxm80igrbyHjOZDHNzc7z66qts2LCBlpYWSkpK6O/v54033uDixYs4nU5aWlpEAej1glpVGnO5959GKfnF4naW843LXnP7f/whkc/nefHFF7HZbOKdfj/09vZy8OBBnnnmGWKxGJWVlTz22GP81m/91m3XFnzta1/jueee47XXXuNHP/oRdrudsrIy0UUKPj7DOCWSu8pt6QJrJyDsdjubNm3C5/MRCoU4cOAAo6OjHDhwgF27dokGBcANrfrV+UzqDLn5+XkuXrxINpvlc5/7HH6/n6WlJV588UW+973vUVJSgtfr/bmGUjabZWFhge9973s8++yzRCIRiouL+dznPsdv/MZviLEBPws1zc9kMrF7924+//nP43a7mZ2dxWKx8N5779HW1obH43nf+oaiKESjUTo7O1lYWKC5uZnq6mqZOfJLzi+nkfRhvfNrvHbUaJHFYhHzUvL5vOhIp7bntNvtTExMcP78eV588UVSqRQGg4FcLsfs7Cxms5menh5+4zd+gy9+8Yu0trZSX1/P008/LdLQFhYWOH/+PC+//DKBQOC2FCeTyURVVRVf/epXeeyxxwBwOBz4/f4PvcvV0NAQJ06c4OTJkxw+fBiAdDpNMpkkmUzyta99jaeeeori4mLOnDmDz+fjy1/+MnV1dej1etrb24nH48zOznLhwgXq6upwu90kEgmSyaTYQNToXS6Xw+FwfKj3IJHcCZlMhqmpKY4cOcJXv/rVVXn4t0t1dTVer5fdu3eTy+Uwm814vd73lVJbXl7O17/+dX7jN35DyJ7h4WFOnz7NhQsXMJlMUsGQ/HJw/Wu+xg7U69Hr9fj9fjweD/l8nqamJp5//nlOnTolDCWTyUQikSAajQoHoZoNEo1GcTqdzM3NMTg4SEdHB//iX/wLdu/eLZok7Nq1i89//vOcO3cOt9tNfX39z7wmtdb6V37lV9i7d69o2+33+7HZbLcVjVIzX0wmE01NTWLOodVqFRkhyWTytjJfrkdRFKanp3nppZdobGykrq5O1lRKfkmNJG4uv/450+7nC7e7IABXKuXAqg5WapqcmmNbWFjI1NQUc3NzxONxbDYb8/PzzM3NodPpqKiooKamht/8zd9kaWlJCIylpSW+//3v43Q6+cxnPkN7e7sYpGoymTAYDNhsNhRFYXR0lO7ubpqamvB4PLdV76A2RigqKhKeZL1e/74LO1UPVi6XW/UcstksGo0GrVZLe3s7TqeThx56SITxJycn6e/vZ3h4mCeeeILm5mbS6bRog6rWOKn/VG+Z+nwrKyuZnJxkZmaGRCIhaizm5uZIpVIUFxdLpe+XhRVr/Prlfss34GZy4UOWFaq38/Dhw5hMJhoaGqiqqnrf76U6dNrpdKIoClqtFoPB8L5TVIxGoziOmsufTqfx+/0ijUciuVe55fK+hey426itywGxV2q1WlEzlM1myWQyuFwufD4fY2NjDAwMsGPHDsxmM/Pz82LofGlpKUajkUwmQzqdFkOr9Xo9iqIIx2s2m72tLr5qOp/aoOpO5I7L5SIQCOD1epmcnCSZTGI0Gkmn00QiEXQ6nZBJajaJqkesTBNUM0ZWys14PM709DQdHR382q/9Gh6PR8ovyb1tJCnKtULpfF4hn7u9VtwfJDlEyV07351kmKjCbX5+nmw2SyQSIZ1Oi4nfTqcTi8UiurhcuXKFoaEh4S06d+4ciUSC8vJyKisrCQaD7Nu3j2w2K4TF1NQU7777Ln6/n/vuu090rolEIiwvL5PJZNDpdIRCIbq6uhgbG2Pv3r23FQaHn4bp1eu8U+LxOMvLyywuLhIOh0WziomJCaxWK263m6qqKoqLi8Uk7UwmQ1dXF1arlUwmw86dOyktLWV2dlYUqg4NDWE0GrFarSwuLjI3N4fBYMDpdIpWoAMDA4yMjNDd3U1hYSFdXV2EQiFMJhOVlZV3fE+Sjx8KoCjXCqXzeQXNz1i4H0bSmKL8VEbcKfl8nnA4zPHjx6murqa8vPyOCot1Oh06ne4DpcAuLS0JuWEymVhYWGBgYIBUKkV1dTVut1sqGZJffJSf6hJKTkHRKNf/+uf9+fsi/886i6IodyR4JicnxV6udm5Vh8vqdDrRmKikpISFhQX6+/vp7+/H6XTS399Pd3c3BQUFBINBNBoNVqsVi8XC8PAwlZWVOJ1O0WXOZrPhcrluK9NENUrMZvMdyx2TyUQwGGTdunX09vYyODiI2+1mYWGB0dFRPB4PTqcTs9ksBsWqHW+j0ahoaOFwOLDZbFitViGjZmdnGR4eJhKJ0NbWJuqoJb/c3NNGUi6jkEnkSMey5DN3Pzc+l82TSebIZW7PIFuJahidOXOGaDTK2NgYyWSSjo4OdDoddXV1lJWVEQgE2L59O+fPn+fSpUssLi5SVFTEK6+8gtfrpbW1laqqKvR6/SpDRTWUbDYbdrudQCCAw+EgnU4Lb1IoFMJoNNLd3c3w8DD5fJ6nn34am832YT6mn8vMzAyDg4MMDQ0xODjIwsICfX19HD16lMrKStavXy9aeauk02kWFhbweDzYbDZ8Ph9WqxWfz8eGDRv4p3/6J959911GR0fx+/1MTU3R29vL9u3bqaqqwmq1snPnTt577z16e3tJJBKsW7eOkydPkk6naW5uprm5eU2fg+TuouQhl86TiWfRaG6/6cEdn0+5Nicpm86hv0NDKZPJsLCwwOnTp/nCF74g5qF8FIyOjtLX10coFMLlctHT08PAwADl5eXs2LHjtuoUJJKPO/mcQjaVJx3LXut0d5djQ4pyTW/JpvJ3ZCSdPXuWxcVFtFotTqeTTCbDuXPnmJ2dxefzsX79esxmM42NjczPz3Pw4EHefvttvF4vnZ2djI6OsnfvXkpKSsjlcpSUlFBSUsKRI0dQFAWPx8Py8jKHDx+murqampoaAoHAh/8gboJGo6Guro5PfvKT/MVf/AUHDhzA7XYTjUbp6Ohg7969BINB7HY72WyW3t5eent76ejoYGxsjKWlJQ4dOkRlZSUVFRWUlZWJFOPu7m6uXr1KQUEBGzZsWLO5T5KPN/eckaRGMwx6A6GhZa6+OYHZZUCnv/ubdT6nEJ1NklvU4/MbbiiI/FlkMhmWlpY4duwYMzMzmEwmtm7dyvj4OOFwmGw2i9Vqpbi4mC1btvDbv/3bnD17lp6eHrq7u9myZQv79++ntrb2lm2zLRYLu3btwul0CsNH/Vw0GhXRqKKiIh599FF27NhBdXX1h/eAbpOpqSkuXrzI+fPnURSF0tJSYrGY6GRXW1t7Q+2E2r60rq4ORVFEPYTb7Wbnzp380R/9EUePHqW3t5dLly7hdDp5+umn2bx5M6WlpWg0GioqKvh3/+7fcfbsWS5cuMCBAweoqqpiz549NDU1iRRCyS82aopHOppjpj/MpZ+MYDCvzXyhfFZhuidMpT0gUj7fD7FYjFQqRVtbG5/5zGcoLi6+S1f687HZbEQiEbq6uohEIhQUFPDkk0+K+W3SQJL8IqOO5YgvZJgcDJFN5zCswbxFRbk2c3GmO8w6bwV6vf59raVAIMDIyAijo6OEw2FMJhMVFRU88MADtLa2ikHzra2teL1eSkpKOHjwID09PRQXF/Mrv/IrPProo6KOuLW1Fb/fz5tvvkl3dzeJRAKj0cjGjRv57Gc/S2lp6ZoOsa6srBR10sePH2d0dBSn08mjjz7K/v378Xg86PV6YrEY3d3dHD58mEQigdlspqqqigMHDtDW1oZOp6OoqEjoErlcjqKiIn7jN37jtmukJPc+GuUeaz+UzWaZmZnh9OnT/PgnP0LR5UG7RrNtlGveaavZxqbWzXzx6S9it9tva4ZAPp8X0ZBMJrOqK5Rai2S1WoUwisfjxONxkskk+XxetNo2mUw3VbzU3Fw15O52u0XebiqVIh6PE4vFUBRFRKFsNtuazUZayfLyMrFYjHg8vurnWq1WhPev7+6npismk0lSqRQ+n0/kHSuKQiwWE6lB+Xx+1TNdWTOltkaOx+Oii47abn2t59BIPnzU1MxTp05x8tQJLnZcAH3+A8xIer8XAPksrKttZHNbOw/c/wAWi+W25VM2myWZTBIKhSgsLBRDItcymrRymO3y8jKJREIMrLTZbGLOmVQyJL+oqHLi0KFDHDryHkOjA6BT1kxOKAooGdjY2sbOHTvZ3Lb5tlLYFUVheXmZeDxOOp0WDRmMRqNIg1eNH/Ue4/E40WhUjAIwm82rWnKvHO2hNjVSh7S73e41HSarXreiKKJMQN3PLRYLTqdTOKdzuZxICb6+ZspqtYp/6rWr92cwGPB4PDLVTgLcg0ZSPp8nFosxNTXFpUuXRE3OWqHRaLBYLJSVl9HS3LLmAkQikdwatQnI5OQko6OjTExMkM1m1/QadDodRcVFlJaUUlFR8b4izhKJ5O6jyonR0VEGBweZn59f8xEQOp2OsvIyysvK13Rgq0Qi+Sn3nJEEiI5laovstUTtKKN2i5PKj0Ty8UPt8qRGFtcS1QtrMBik4iORfExRsy/S6fQN2R1rgUajEXrE+03LlUgkHw73pJEkkUgkEolEIpFIJHeKzAOTSCQSiUQikUgkkhVII0kikUgkEolEIpFIViCNJIlEIpFIJBKJRCJZgTSSJBKJRCKRSCQSiWQF0kiSSCQSiUQikUgkkhVII0kikUgkEolEIpFIVnBPDulQB8GlUikxnXmtUOck6fV69Hq9nJMkkXwMyeVy5HI5stnsRzInSZUPcv6JRPLxRNUjstksuVzuI5ET6owkKSckko+Ge85IUhSFTCZDNBplamqKbDa75kaSwWDA6/VSUFCAVqu9LUNJFcaxWIx0Ok0+n0er1eJyuTCZTLc8RjabZXl5mXg8js1mw2w2YzKZgGuKYCqVEv/y+Tx6vR6TyYTFYhHDbjOZDOl0mkQiQSaTQavVYrFYcDgcH4mRt7y8TCKRIJ1Oi5+phqfD4cBkMpHP58lkMiQSCXFvWq0Wg8GAzWZb9cwURSGRSBCPx8X7oB7r+kF9+XxePItkMomiKBgMBoxGI1arVRq+v+CosiAWi7G0tEQ4HCabza7pNajrWv2n0Whu653KZrNiLasywm63i7Wsoip3yWSS5eVl8vm8GGDrdDrR6XQfyTusXlcsFhPDOR0OB0ajEY1GI76bdDotPpPL5dBqtdjtdsxms1irS0tL4hkoioJOp8NqtWIymdDr9Wi1MklCcueo72I0GiUUCrG8vEwul1vTa9BqtXi9XlwuF3a7/X290+r+GI/HxT6m1+sxGo3Y7XZ0Op0YlJtMJoVD2WazYbPZVg25VtdtPB4nkUiI56DX67Hb7RiNxjU14tR9P5lMksvl0Gg06HQ6zGYzVqtV6FzqMOBoNLpKp7JarVgsFiEHFUUhFAqJz6isPKbRaBTnTqVSQpfQ6XSYTCbMZjNms3nNnoFk7bjnjKRcLsfCwjznz5/nRz/6Eel0em2NJK0Gp9PJlvat/Mqv/Ao2m+22BEgqlWJ+fp6jR48yODhIJBLB6XTymc98hoaGhpseQ1EUwuEw77zzDmfOnGH37t20trZSXV0NQCKRoKenh6tXr9Lb20symcTv91NfX09bWxtlZWUoisLc3ByDg4NcvHiRsbExbDYbmzdv5pOf/ORtG3kfJufOnePcuXMMDw+Ln1mtVoqKiti/fz/19fXEYjEmJiY4f/48fX19JJNJbDYbRUVF7Ny5k3Xr1olnlsvl6Ojo4MyZM8zOzpJOpykqKmLfvn1UVFTgcDjEedLpNH19fVy+fJmrV6+STCYpLi6mrq6OLVu2UFRUtKbPQvLhk81m6ezs5NSpk5w/f55MJrOm59fpdDQ3t9De3s6ePXuEU+PnsbS0RG9vLz09PQwPDxMOh9m/fz/r16+ntLR01WdjsRgdHR0cPnyYcDiMxWKhoqKCxx9/HI/Hs+brWpXBy8vLHDt2jNHRUbLZLI8++ijl5eVCKcvn8wwPD3P8+HHGx8eJRCLY7XYefPBBGhoa8Pv95PN5XnnlFcbHx4lGo2QyGVwuF1u2bGHdunUUFRXd9jOVSG5FJpPh4sULHDp0iP7+/jV3puh0Otrbt7Bjxw42btx4W++0us6SySSjo6OcO3eOzs5OstksXq+Xqqoq9u3bh9frZXFxkZGRETo7O+nr6yMWi7Fnzx727NlDIBAQx8zlcsRiMU6cOMHly5dZWlpCURQCgQAPPfQQVVVV2O32NZMnMzMzdHR00NHRQSgUwmAw4Pf7aW5uZtu2bcJQyufzLC4u8sYbbzA2NkYsFsNkMrFz507Wr1+Px+MRcueHP/whY2NjLC8vi/Oox2xvb6eiogJFUZiZmaGvr0/oEh6Ph9raWlpbW2lsbASQTtR7jHvSSJqfX+DcubNc7nmRhg1GHG4dOv3df3HzOYX56SwDkzaW342xf/9+TCbTbRlJyWSSmZkZenp6ADh79iyLi4ts2bKF+vr6m/7N1NQU58+f5x/+4R/o7u7GarVSUFAgjKSDBw9y4sQJZmZmaGxspLS0lM7OTjo7O3nvvff44z/+Y2w2GwsLCwwPDzMyMkJ/fz/z8/PodDr279//kXhkr1y5wtGjR1EUhU996lNoNBpMJhNutxun04lGo2FqaoorV65w4cIFSkpKKCwsZGlpic7OTs6cOcPv/M7vUF1dTT6fp6Ojg//xP/4HDQ0NFBQUYDKZGB0d5a//+q/51Kc+RVtbG36/n1wux3PPPceVK1cIh8O0tLRgtVpFtGllZEvyi0s6nebq1atcuPwevWOHaN5sxmjSwN3e3BSFXA4GrqaIX5jGYDDQ1tZ22wp9KBRiZGSEkZERFEXhu9/9LoFAgNLS0lVG0uTkJJcuXeIf/uEfqKysxOfzsbS0xCuvvEJvby/f+MY3CAQCa76ZX1M6L/Lcc88xPj5OYWEhmzdvpqSkBL1eTyKRYHh4mD/4gz+gpKREyDI1+qtGjRRFYWpqCqvVitvtFvLgH//xH9mxYwc7duxg06ZNa3pvknsLNSPl0qXLXLxygNByJ7XNJozmu78fKopCNqMw0JUifTKC1+ulvr7+tuVEKpXipZde4sKFCywuLtLU1CQiJ1qtllQqBVxzuoyPjzM4OEgikeDHP/4xRqOR9evXrzKSpqenOXnyJD/84Q9pbGyksLAQgJ6eHk6cOMFv//Zvs3nzZtxu94f+LG7G4OAgr776KoFAgLKyMhKJBFNTU5w8eZJ4PM6OHTtwu90sLCzw//1//x8LCwvU1NRQVFREJBLhe9/7HjMzM2zevJmGhgYA3njjDTQaDRUVFWzcuBEAp9NJaWkpVqsVRVEYGhriueee4+rVq5SVlVFXVyeezfnz5/nKV75CXV3dqiic5Befe+7bVEOsmUwaT4FCc7uBQLH+mhJ0l8mkFYb7YLhbITWfel/heYvFQmlpKZ/85CfR6/VMTExw6tSpW+ZBx+NxTp06xdmzZ3nggQcYGBgQCoQq4Ht7e8lkMlRXV/Pggw/idDopLy/nyJEjHD9+nJmZGSoqKiguLkav11NRUcHbb7/NqVOn1jT6dj35fB6dTkdhYSF79+4Vwt1oNOJyudBqtfj9ftavX09RURFerxeDwcDi4iL9/f288sor9Pf34/f7SafTnDp1CrPZzO7du6msrESn0zEyMsILL7xAf38/Ho8Hr9fL1atXOX36NF6vl71791JbWyvC7CaTCY/H85E9E8mHh5pSqTdlKSyFTbuMWG1aNHdZ/1HykMkopNNp4rPZG9I7fh5+v5+2tjaqq6vRarX81V/91Q21EoqiMDo6SmdnJyaTiX379lFQUEA0GqWsrIwf/OAHPPLIIyIleC1Q5dHi4iKvv/66SDFUDR+VmZkZfvjDH+J2u7n//vuprKzEYrGQy+Vwu90i/Ven07Fv3z7MZjMGgwFFUZienmZ2dpa5uTnGx8fZsGHDbacxSiQ3Q5UTZnuWkoCGtt1GLDbtXfel5PMKqYRCKpkhOZt533WTqiNUURQeeeQRGhoaxDrR6/Vi/fn9flpaWoSD8eWXXyaXy92w94fDYfr6+nA4HOzYsYNgMIhOp6O+vp4//MM/ZHh4mMrKyjUzkqqqqnjyySdxu93Y7XZSqRSjo6PE43GuXLlCQ0MD+XyewcFBDh06xDe+8Q0aGxtxu90sLy/zk5/8hP7+fpxOJyUlJdjtdjKZDEVFRTQ2NnL//fcD19IJrVYrdrsdgAsXLjA1NYXb7eaxxx6jsLCQmZkZLl26RHd3N2fPnqWiouIjS2eW3B3uSSPp2r88VruWgqCe4jLDmhlJy5Ec8xNaQnOKuJ7bwWAw4PF4sFqt6HQ6fD7fTReaerzu7m4GBwdJp9Ns376dZ5999obPqzm7JpOJ4uJiEWJXvbYqTqcTi8VCUVERHR0dmM3mj3yRZzIZFhYWuHLlCna7Ha/XS2FhoTBanE4nZrOZwsJCUavgdrtFbnU4HBa1RWNjYzidTqqrq6mqqhL1GapyNT09TS6X4+rVq4RCIZxOJ/l8nt7eXux2OwUFBdhsNiwWy0f6TCQfHoqioDeA3aWjuMyAzXH3lR9FgXRawe3VkV78qay6XdR6vOLiYmEsXL9OM5kMMzMzTE1NiU3f4/GQTCbR6XT87d/+LWNjY5SVla2pkRSNRrlw4QJLS0tUVVURiUSYnJwUn8lms8zPz3PkyBG2bt1KLBZjbGwMvV5PIBCgqKhoVT1gfX29qD1SFTu1vnKt0ycl9yaqEW80abB4dBSVGbA77n4kKZ+HRDyP06sjvfD+G0/19PSwsLCAwWAgm83S09ODzWbD7/dTUFAgamfsdjsmkwm/38/c3Nwta59zuRzJZJJ8Po/f76ekpASdTkc0GiUej695rZbf7xf1hwaDgXQ6jUZzrcwhFAqJmqGFhQUmJydpbm6mvr4ei8VCMpkkEAjQ29vL5OQksVhMGEGRSITx8XE6OjpEFMlut4t6z8nJSVKpFIWFhTQ2NuJwOLDb7UxNTdHV1UVfXx+pVOpjoT9JPjzuOSNJoAGtDnR6DXqDBoNxLdLG8uj1Gu4kQ03tYKOG1NWmCitRhWUqleLtt98mkUjQ1NRES0vLDWlxqnIxOzvL1NQU09PTwDUv0+zsLMXFxQQCgVVRGrPZLIqo74TbEea3c2y9Xk8ymaS7u5u+vj4KCwtpbm5m586dPPTQQyIFTjV01HOv7AJkMBhEXnI6nRYCVQ2FG41GUqmU8D6rdSparZapqSleeeUVOjo6KCgoYOfOnWzfvh2v1yu7DN1DaDSg04HecE1GaLV3d2PL568pPFrdnWX2GQwGsWGrBcvXk0gkCIVCRKNR2tvbcbvdorFDSUkJPp+PqakpwuHwbZ3z563pn7ee1SjSxMQEP/nJT6ivr6euro7h4WEhk9TrnpmZ4fLly+zevZuXX36Z+fl5ALZt28ZTTz1FeXm5kIt2u51kMkkikWB5eVnUJ5WWlooUPInkw0Cj/Wc58c+6xN1+t/I5haxBg+4O9AhFUeju7iaRSBCJRHjllVfo6uoiEAiwefNmdu3aRUFBATqdTsgTk8nE8vLyLesULRYLfr+fS5cuMTU1JWp4L126hM1mo6Cg4LajSB9UnqjXozosVUNW1X9WNpFQI3Bq4wW1cYXBYGBhYYHZ2VkikQiFhYWYTCaGhobo6+vjzTffpLy8nIcffpitW7fS1NSEyWQSxph6DJ1Oh9FoRKvVkk6nmZycFA20VGeu5Befe9dIukdJJpMcOHCAgYEB7r//fpF2cjPh8rnPfQ6j0cjLL7/M/v37yWazeDwe9u/fzx/+4R/ele51apc+tchVq9UKgXy7bNy4EZ/Ph9vtxu12c+bMGc6dO8df/uVfkslkePzxx2/oJJNOpxkaGuLkyZMsLS3R2tqK1+tFq9XS2trKt7/9bdrb28lms7hcLo4ePcrhw4dpbm6mubmZeDzOxMQEx44do7i4mL179/Ktb32LCxcucPDgQa5evUoqleKTn/zkh/q8JB8jFOAXXLdOJBLEYjGy2Sw+n09EX9TukG63m2g0SjKZvK3jKYpCNpsV0Rn1WLdbH6EoChcvXuTIkSOMjo7yJ3/yJwwNDTE6Orrqc0tLS8zOzpJIJPiHf/gHfuu3fov6+nrS6TTPPvss2WxW1A+qCtKzzz7La6+9xokTJ9BoNDz11FPs2bOHtra29/HEJJI1ZA1kzNzcHEePHsVsNvPYY4/xb/7Nv+HChQscOXKEM2fOYDKZ2LNnz23XzlRUVPC5z32O0dFR/t2/+3csLi4C1yLbf/7nf862bdveVyq62nVXRTU27kQXURSFyclJLl++zPnz5/nyl79MIBDA4XBQX1+Pz+fjr/7qr3jyySdpbGxkfHycl19+mUgkQmVlpZCDn//857HZbHg8HoxGI2+++SYvvPACXV1dPPnkk9x///2sX7+es2fPcuLECTZu3Mju3btF/fSRI0fYuHEjsVhszSL0krVBGkm/QCSTSSYmJnj11Vepra3F7/cTi8VE2/BoNMri4iJLS0vY7XYOHjxIT08PhYWF/Omf/ik+n49z584xMjLC//pf/4s/+ZM/we/3f2iFhplMhqmpKQ4dOsT3v/99crkcdXV17Nq1iyeeeAKLxXJbgnDjxo00NzcLA6ukpISioiKOHDnChQsXhIBXDa90Os25c+d499136ejo4Nd//depra0VIfknnniCsbExfvSjH/EP//AP6PV6nE7nKkNK9aI5nU4efPBBfvM3f5OioiKCwSDJZJLBwUHOnz/PJz7xCUB2sLknuQe+0pV1ODerY1DT0m6XcDjM0aNH+eu//msymQwFBQWsX7+ef/2v/zVWq/XnHmt2dpZXXnmFy5cv8+u//ussLS0xNzfH0tISsViMubk5EomEaFOu1+v53Oc+x+OPP05FRQXxeJxoNMrp06cZHx+noqJCNKl49NFHaW9vZ3p6mt7eXk6dOsWJEycA2Llzp2wDLrnrvG+bZ41kjJp98S//5b+krKyM0tJSDAYDJ06c4NixY+zYseO29v18Ps/U1BTHjh1jZGSEf/Nv/g3BYBCAixcv8u1vf5tsNsuOHTtuq/NrOp3m2LFj/PVf/zVLS0s4HA6am5v5+te/jsvlum1dRE1VHh0d5a233qKjo4NNmzaxd+9e3G43er2e8vJy/ut//a9873vf40/+5E8AsNlsIoVuZVv1xx9/XOgbcC2lT1EUIpEIHR0d7Nmzh+3btxONRjl69Cj/+3//b/7sz/4Mh8OBXq+ntbVV/L3UDe4tpJH0C0Qmk2F5eZlYLEZPTw/z8/OcPn1atLq8fPmyqM/ZsmULnZ2d6HQ6NmzYwN69e3E4HPj9ft59910OHjxIX1+fECgfBlqtFpvNRnV1NQ888ACKolBYWEhlZeX7mi+0MpwO4PP5KCgowOPxMD09LeYiqal0nZ2dHDx4kKWlJTZu3Mj27dtFbZdWq6WgoIAnnniChoYGwuEwuVwOm83GyZMnsdvt2Gw2DAYDTqcTj8eD3+8nEAhgNptxu914vV4mJiYIh8OiqYRE8nFETZvV6/VEo1FRr6NGeJeXl4Xz4HaPV1payv33308+n8fpdFJWVnbb63lpaYnl5WVCoRBnzpyhv7+f6elpRkZGmJiY4PXXX6eyshKHwyHmkKn1UupMp0AgINb6ynojNc1HVQKnp6dZWlpicHCQTZs2YbPZ7vg5SiS3w8dRHbbb7fh8PgKBAH6/X+xjHo8Hm80mIkG3g1rjePXqVSorK0XjBo1GQ1FREadPn2ZoaIjy8vLbMpK0Wi1FRUXs2bOHeDyOyWSitLT0Z86CvB61Odfc3BwHDhxgZGQEj8fDgw8+iNfrFbWKFouFbdu2kc/nmZubE2n34+PjjI2NCTmo0WhwuVyrjh8IBAgEAsTjcSKRCJlMRoxG8Xg8NDQ0EI1GsVqtovYpn89jNpulc+Ye4542khTlWn5vLquQzd79bm25rEIup3AnjeHUXF21CFJtd5vL5UT9gVp3U1FRwcjIiEhZyefzJBIJ0dlpYmKCpqYmpqamCAaDVFRUiDa7jY2N9Pf3k8lkmJycXNVeV+2UtVKpUgvE4edHT3Q6HS6Xi40bN1JXVwcgcp5vJ91u5XnVTjyAGCq58nO5XI5sNsvMzAxHjhyht7eXuro60RVr5TPV6XS0tbXR1tYmijrD4TDnz58XRpDBYKCoqEikICaTScxmM5lMRni5pXF075FXlH9et9dqhu4migK53LUud9yhjFi5VgHxv7PZrEiDczgcmM1mZmZmiMVion394uIi0WhUNIi5HSwWCw0NDZSUlAj5YzQabzvnPp/PU1JSwszMDOPj44yPj4to0uLiIgMDA4TDYbxeL263G5fLJeqY0um0SN1Vo72AuF+dTofFYhEDtIuLixkfH2d+fp5UKiWNJMmHg3KtmUIud01O3NHifT+ny1/TI+5UHgUCAaanp0Vtrzo0duXgVfipPFHX00rZoq65TCZDOBxmamqKlpYWSktLxfgAs9ksjK5QKHRb16bX66murhaOD3UAvMViuW3jIpvNEolEuHTpEqdOncLr9dLU1MSWLVtWDaZWHSyf/vSnSafTopHTj370I6LRKE6nE5vNdlMZszK9GH6qjwWDQTGLMRqNEovFOH36NHNzc7jdbuGcldw73LtGkgKZlEIsmicayWNM3X0jKZtRSMQU0ndwLlVYqZO9VeG2vLzM0tISNpsNk8lEXV0dX//618XEelWh6Orq4qGHHuLBBx9k+/bt11qXms1EIhFGR0eZm5vDZrMxNDTExMQE+Xwej8eDTqcTCok6UTuTyZBMJllaWsJoNOJ0Om8r2qTRaEQxqNox5v2STCaZnZ0llUoJ42VsbIyenh5GRkaorKzEZrORy+UYGxvjrbfe4rnnnuOpp55i9+7dNDU1kUgk0Ov16PV6FEURBqTD4RCdts6cOcP8/DwbN26kuroavV7Ppk2bePfddxkfH6enp4empibGxsZEe9Hm5mYZTr+HyOWudZuLRnLklTUZk0Q2nSeZvDMFKJ/Pk0wmSaVSwnhPJpMsLy8TiUSwWCyYTCaKiorw+/10dHQwNDREMBhkaWmJc+fOEY/Hqa6uxufz3dY51Qn1t2tUXU9NTQ1f/epXefrpp4XTQm2139HRwdNPP82GDRuw2+2Ew2E2bNjA2bNn2bRpEzqdjkQiQW9vL2azGafTiclkIhwOMzc3h8vlWtWlcnx8nFwuJyJSEsmHQTarkEpe61y7FpMxlDwkEzlSSeWaQ+V9oNFoaGpq4urVq2LwqdlsZmJigpGRERYWFti1a5foCqnu+5FIROgdkUiEcDgs1rzaJGlsbIyZmRkRRR4eHiYajWKxWG7bIaEaV9fXFL8fFhYWuHjxIt/61reoqKhg9+7dbNmyRTiDVna9HBsbE5kiiUSCgYEBLl68SHV1tWhbHolEmJubw2q1ipKA3t5e+vr6yOfz1NTUYDKZyGazonuew+Egl8vR29tLd3c3oVCIBx98UMyjktw73HM7iVqgbDCY6L+SJpVcwmrXrtkw2fBCnlTcTm2ZSSzW2yGVSjE1NcVzzz3H7Owsx44dY3p6mu9+97ucPXuWbdu20dLSIqbOqyiKIgSD3W4XYfVMJsO+ffs4deoUx44do6urC7PZzPz8PBqNhgceeIC2tjaMRiPDw8NcuXKFY8eOce7cOYaGhgiFQiwtLeFyufjt3/5t0QHmbhONRnnnnXe4cuWK6Fa3vLxMPp+noKCARx55BJfLRV9fH2+//Tbf/OY3cTgcdHV1MTk5KZpE7N27l/Xr1+N0OpmdneXv//7vSaVSZLNZUqkU0WiUBx54gD179lBTU4NOp2Pr1q088sgjdHd383/+z/+hsLCQZDKJVqultraWffv23fX7l9x91GhIdFFDR2eKaDiEwXj3u1apke2JoQwlAY3ouHS7hEIhTp48ydmzZ5mbmyMajXLw4EHGxsaoqqrioYceYsuWLTQ2NoqC5v/7f/+vaH0biUT48pe/TGtr65rNNDEajXi93lVdrRYWFnC5XFitVnw+n0gRLisr4xvf+Abf/OY3+c53viO8y9FolP3799PY2IjVamV8fJzvfve7qyLs8XicTCbDtm3b2Lx58x07aSQSFbXb7PyUwuSlONNjGQxGzV3PsVMUyGWuyYnqUr3opHa77NixQ3SKfOaZZwgEAiQSCRRFoa6ujv3796PX65mdnaWzs5MjR44wPDzMzMwMx48fJxqNEgwGRYq6mj7/wgsv8MwzzwgDZ3FxkdraWrZt23bLgfd3g7Nnz/Knf/qnXL58GafTydtvv83Ro0dFk6bt27dTVFRELpfj7bffZmRkhEzm2rypWCxGUVER999/P+vWrUOv1zM9Pc33vvc9EomE2AMikYgYrKvWQEciEQ4dOsTly5dF6u/y8jIej4f77ruPPXv2yK529yD3nJGk0+nw+/1s2bKN0OKX0RkyaHWsTfKwArliMBntrKvfhMPhuG3hpubQVlZW4vF4hGDzeDy4XC4xq+f6AYmKomAwGPjKV75CU1MTJSUlIqKzadMm3G43ExMTxGIx4Np8EY/HQ3l5OU6nU5w3EAhQX1+P1+slkUiIwa1Wq3VN82xtNhvr1q3DbDYTj8cBRL1QYWEhtbW1ovFCc3MzX/nKV9BqtaK2Si3+drvdouW30+lk/fr1RCIREVZ3uVy0trYSDAaF8We1WnnooYeora1lcnKSTCaDyWQiEAhQWloqCsYlv9jo9Xqam5tJZxIUFgTRG3PXokh3W0b8s51QW6Shonwd61s3va/ZWwaDAb/fT01NDX6/n2984xvY7XbsdjsulwuPxyPe7XXr1vG5z32OsbExMpmMiAi1t7eLdb8WqLJqpcwqKCigra2NwsJCSktLxbq12Wy0tLTwpS99iaWlJZFS5/F4WL9+PYWFhRgMBtHOOBqNii6aRqORgoICqqqqKC4ulnUBkg+Euodu3LiRTCbO+NR69Ib8mskJRYHaYg0N9RvFfni7OJ1Odu/eTXl5OWNjY6TTaeGsKCkpEXPW1J/V1NTgdrspKSkREVtVphiNRnw+H+3t7eh0OsLh8Kp0+IqKCpqamnA6nXfxgaymtLSUxx57jF27dhEIBEQbbo1GI2qJ1dS5xsZGLBYL6XQaALPZTE1NDXV1dbhcLjGTsq2tTcxW1Gg0IiKv1jvCNRlTWVkpGjooioLdbqewsJCKigqcTqfMMrkH0Sjvd1LZxxy1PkctNlTT0tYKrfZaOLmkpJT6+vqbzju6GdlslkQiweDgIMlkctU1q7m1qsG0EjXlrrOzU7TMVucYpFIpwuEw4XCYSCRCPp/HZDLhdDrx+XzY7XY0Gg2RSES04FWVDhWDwUBDQ8OahJHV1EG1+5Xa9cpsNosomToDJRqNEgqFmJ2dvaGLl1arpbi4GJ/PJ+YbDA8PE4/HyefzwogqKipaNVcBrrVQVnOs4/E4ZrNZPHf1uUpB+IuLul5mZmaYmJhgenp6zYeP6nQ6CgsLKSoqorS09JbzSa5HTUVdWFhY1UJXVeiCwSB+vx+dTkc2m2VxcZGpqSkymQw6nQ6Hw0FpaalQKj4q1PSeWCwmnD9arVZ8N5OTk4RCIWHceTweCgoKhDMjlUoxNjZGPB4X8spsNuPz+XC5XCIyJZHcKeq7ODExwejoKKFQ6Ia98W6j0+koLS0hGCwRMw1vF3WNzc/PiwYJ6v6p6hDxeFzs+ytHAqiOxrKyMlwulxjYOjs7y9LS0iqDo7i4GIfDsaYRlNnZWUZGRm46xNbn81FYWChSBaenp8UcRNUhXFxcjN1uF+n4K+WJuheoUW6HwyFS8HK5HLOzsywuLhKLxdBoNDgcDvFcP0gKoeTjyz1nJMFP63tW1u2sFWqkRw2RS4VaIvn4oTZEUZuVrCWqErJy+LFEIvl4sbKRkNpNda1Q9QZVj5CRUYnko+GeNZI+LkgjSSL5+PFxkRFSPkgkH1+knJBIfrm5J40kiUQikUgkEolEIrlTZAxXIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFYgjSSJRCKRSCQSiUQiWYE0kiQSiUQikUgkEolkBdJIkkgkEolEIpFIJJIVSCNJIpFIJBKJRCKRSFag/6gvQHL3URQFRVGIxWKYTCb0ej1a7cfPPlYUhVQqhaIo6HQ6DAYDABqN5iO+smuo1weg0+nQ6/Vrdm2KopDNZsnlcuj1enQ63R2fW30fstksiqKg1WrX9F4kHw/y+TyZTIZ8Po/BYBAyIR6PYzQa0el06HS6D3SOeDyOVqsV6wU+HutZXQPJZFKsJ61WuybXpp47k8kACHn8QdZzPp8nl8sJ2flB5INE8lGiro90Oo1Go1klOz4OKIoCQCwWw2AwCPmxVuTzeVKplNi3P8i5FUUhl8uRyWTQaDSYTCbg4yGjPy58fN68Dxn1Rc7lcuTzefL5vPiZRqMRG7eqGNztl+L66wEwGo239flcLrdKibmTc6fTaTo6OqisrMTr9WI2m+/oWHd6/nw+TzqdFv9fZeV3odPpGB8fJ5fLYbPZCAaDH8liXfnOGI1GcQ2KojAzMwOAw+HA6XSuifBWFapQKEQikcDn82G1Wj/QubPZLAsLC2QyGUwmEwUFBSiKIoXjHbLS8NRqteLfyt/l83my2ezHZiPKZrOEQiHi8TiBQACr1Uo+n6e/v5/CwkJcLhdWq/WGv1PvM5vNiv+voq5ndfMeGBjAZDLhdrsJBAJrdm8rUWUogMFgQKPRiHsYHx/HbrfjcDiw2WxrZiSl02lmZmbQ6XS43W4sFssHUnbS6TShUIhcLofdbsfj8cj1/DFAld2qAat+HysdUxqNZtX+qHK9nrLyu1TXnKrcqu+0oiirnIsrP6eeV33PbqYbqXJLp9OJ32k0mhscHOrnVUNGvb5cLndLh4N6fblcTjjorr9f1SBKpVKMjY1hNBpxOBz4fL4P8jXcEeq15vN58UzUe8rlcgwMDOD3+3G5XNjt9jW7plQqxdDQEG63G4/Hg8Vi+UDHTCQSTE5OotfrKS8v/1gZpB8H7smnsdLAWFxcJBKJEIlESCaT6HQ6TCYTVqsVj8eDx+O5qfFx/cb/YbG0tMTi4iL5fJ6Ghoaf+dl0Os3CwgKhUIiKigocDscdnTOTyTA9Pc3nPvc5/viP/5j9+/dTXl5+R8e6E/L5PIuLiwwNDZHJZG54tk6nk4KCAgKBAN/+9rdJJBK0trbyr//1v/5INvlYLEYoFCIajdLY2CiERiaT4cUXX0Sj0bBx40ba2trWRKDkcjkWFhZ4/fXXmZiY4JOf/CQNDQ04nc47PmY0GuXAgQPMzs4SCAR46qmnxOYqef/k83ni8TjT09NC6V65cSaTSSKRCBMTE7S0tPxMB8laEYlEeO+99xgYGODRRx+lrq6OTCbDf/7P/5nPfvazbNu2jZqamhv+Lp/PMzMzw+TkpFC+VPR6PS6Xi8LCQtxuN3/wB39AUVER9913H7/6q7+6VrcmUBSFSCTC/Pw8AFVVVej1evL5PNFolP/3//4fzc3NrF+/nsbGxrseYVeNs5mZGf7xH/8Rj8fD/fffT01NzQcykmZnZ3nttdeIxWK0tLTw0EMPral3W3JzMpkMo6OjzM3NCYNDp9PhcDgoLCzEZrOh1+uJx+P09PSI/VGr1WI0GvF4PLjdbhwOxw17TTabZXR0FIvFIgytVCpFWVnZqswARVEYGRlBq9XicDgIBAJkMplVulE6nUav12OxWMS1LS4uEo/HMZvNBAIBsT+s1K8GBgYwm81YrVaMRiNzc3MUFBRgs9lukHGqwTg9Pc309LRwsqhoNBoCgQBlZWXMzs7yZ3/2ZxQVFbFlyxYeeeSRu/UV3ZJ0Ok04HCYcDuNyufD5fGJNpVIp/ut//a888sgjbNu2jXXr1q3JNWWzWcbGxvhP/+k/8elPf5r777//A+tyw8PD/NVf/RV2u53f/d3fpaCg4EO62nuDe9ZIUiMn77zzDmNjY4TDYfL5PDabTXg2Kysr+dSnPoXP57upsns3DKVjx47x1ltvkUgkeOaZZ37mPSwuLvKTn/yEN998k9///d9ny5YtH8o1rDWpVIrjx4/zH//jf6SgoICioiLxO41GQ2NjI3v37v3IPM3Xc/XqVd58801OnDjBD37wA2GMGAwGvvSlL4mw9FpF4yKRCKOjo1y5coXt27cTDAY/sOfK7XZTV1dHPB7n6tWrjI2NUV5eLg2lOySTyTAwMMDf/M3fsHnzZtrb29mwYYP4/eTkJMeOHePP//zPeeWVV/D7/R+5x87j8fDYY4+RTqex2+0YDAYUReGb3/wmDofjlu93IpHghRde4JlnnsHj8VBcXCx+Z7FYaGlp4cEHH8Ttdq/Rnfxszp8/zwsvvADAf/kv/wWXyyUiOP/+3/97TCaTSC9cC+bn5+nt7WVycpKdO3dSUFDwgY3m4uJiysvL6enp4cKFC7S3t+N2uz/yd+yXnVAoxLe+9S2OHDlCMBjE7XYLvWL37t20t7dTU1PD0NAQX/7ylwkGgwQCAfG9uVwu2tvb2bFjxyqnai6XIxwO84d/+Ic8+OCDaLVaQqEQk5OT/N7v/R5er1e8U7lcjj/7sz/DbDazZcsWPv/5z9PR0cHhw4cZHBwkEomQz+fR6XRYrVYqKir4tV/7NY4fP87JkycpKiri85//PFVVVeL8ambD7/3e77Fz507WrVuH0+nkueee44tf/CKtra2r9nn4afbCd77zHd58801sNtsqJ7VWq2XXrl089dRTd/U7uV0WFxc5dOgQx44dY/fu3XzqU58SURuz2cz/+l//C5vNtqZZOaOjo5w6dYqZmRkefPBBCgsLP/AxGxsbqa+vZ2BggB/+8If823/7bz+EK713uCclaCqVYnJykpdffplQKERBQQFbtmzB6/ViMplIJBKi9kUN+05OTjI9PS1CvYuLi+j1ekpLS6msrGRiYoKZmRkWFxdRFIXCwkLKy8txuVwYDAby+TzDw8PMzs4SDofJZrO4XC7Ky8vxer1YrVYWFxe5cOEC3d3dJJNJnn/+eZxOJ9XV1QQCgRsiRapnaHl5+Qavi0omk2FhYYHe3l5isRiKomA0GikvL6e0tFSk9lxPLpdjeXmZ3t5e4bEKBoMsLi4yMjLC4uIiiUQCq9VKZWUlBQUFQjGfnp5mdHSUcDhMMpnEaDQSDAYpKyvD6/XecC7VaE0mk2zZsoUHHnhA/E6j0eD1elcpWtcTi8WYnZ1lcnKSxcVFjEYjxcXFFBYW4vP5yGQynDt3DpvNRmVlpVDOYrEY8/PzDA8Ps2nTJqxWKwsLC0xMTDA3N0cmk8Fms+Hz+SgtLcXj8TA/P09/fz8dHR2MjIzw0ksv4fP58Pv94j3QaDT4/X4KCgrQaDSEw2GGh4eZn58nHo9jt9uprKzE5/PhcDjQaDR0dXWJ+gyTycTIyIhIcysuLsbv94vncf07EIlEmJ6eJhqNUlZWJtKCUqkUb7/9NjU1NSSTScLhMMvLywSDQYqLi8nn88zNzTExMYHFYqG8vJxgMIjFYkGr1eL3+3E6ncTjcUZHRykuLpa1SXeImpoRj8dJpVIivUtFTadRFRI1kjE7O8vw8DCJRAKdTifenZKSEvL5POFwmKmpKQwGA5WVlcA1xevChQtCNlksFhKJBH19ffh8PjweDzqdjtHRUSYmJkT+usPhoK6uThho6XSaqakpwuGwWDf5fJ5Lly5RU1NDUVHRLZX3VCqFwWBgx44d7N27V/xcr9fj8/kIBoM3fUZwzeifnZ1lYmKCaDSK1WqlpKREpPiFw2GuXr2Kw+GgtrZWKCZLS0vMz88zMTHBtm3b0Ov1zM3NCXmVz+dxOp34/X6xTsbHx+nr66O7uxtFUXj55ZcpLCwkEAhQXl5OX18fXq8Xv98vUtTm5uYYHR1lYWGBdDqN2+2mqqoKj8cj0g/Pnj0rUnByuRxTU1PY7XYKCwspLi6+pYGoRoXHx8fR6/ViPeZyOWKxGIcOHaK+vp5EIiHOX1FRIeTc5OQks7OzYm8JBoOiLqKwsJDJyUlGR0eZmZm5afRBsrYoikIikSAQCPDwww9TX19PPB7nyJEjXLp0SWS0ZLNZIpEITzzxBDt37sRoNDI1NcWxY8fo7e3F5XJRVVUl1mMqlRJ7W0FBAalUiqmpKRKJhEjnX4n682QySTwe5+TJk0xPTxMIBNizZw8Oh4NUKkUsFsNsNmM2mykqKkJRFObn5xkZGRHyRz1eZ2cns7OzFBUVUVxcTDgcJh6Pk81mb3oN6vNIJpOUl5ezadMmmpqahHNCq9VSXFx8U51FlR1TU1PMzMwwOztLMpnE7XYTDAYpKCjA4XDQ09MjdK+ioiLh+IlEImLv37p1q1hf09PTLC4uCqdJQUEBlZWVKIrCxMQEAwMDdHV1odVqyefzlJaWUlxcTGlpKZcvX6aiooLCwkKMRqOI7E1NTRGJRMjlcgQCAerr63E4HOh0OuLxOP39/SL9OpFIsLS0hNvtprS0lJKSkls6KvP5PAMDA5w/f5729nacTic6nY5kMkkoFGJ0dBSTyUQsFmN5eRlFUWhqasLpdBKLxRgcHCQajYrvy+/3YzQa0ev1tLS0sLS0xOHDh/mX//JfCh1Bcg8aSYqiEI/HGRkZ4dKlSzQ1NdHU1MSGDRsIBALodDqWl5dFmNlkMqHRaJicnOTMmTOk02lsNhuxWEyEjO12OxcvXmR6eloYIkNDQ8TjcaqrqykqKhJG0tjYGIuLi2QyGfR6PeFwmOrqaqqqqkgkEsKISqfT9PT0iM3Z6XTeUTqdquioSngul0Or1QqDr6Cg4IZFp6YGXb16lcuXL1NYWIhGo8HlcnHp0iWGh4eJRCJkMhkMBgNzc3M0NzeLSMPly5cZGhpieXmZfD6PXq8nFovhdDpvaiSp6HQ66uvr2bNnzypFfGWe9PXfpSp4+vv7hTKp0WiYmJigoqKCzZs3o9PpOHv2rBBEqoISCoXo7e3l3Llz1NfXYzKZmJubo7OzUygger0er9dLLBZj06ZNLC8vi5TIeDxOX18fS0tLZDIZAoEAnZ2daLVaGhoa8Hg8aDQaLl26RHd3N6FQSBROLy4u0tDQQGVlJTabjY6ODqEYFRUVMT4+TjabpaioiNraWux2+009Uvl8nqWlJebm5oRxrr6zqVSKl156ic2bN2MwGITAHR4eFmlFoVCI8fFx4vE4jY2NZDIZGhsbgWteSvWdU5Vpk8kk03TWgHw+z/LyMqOjo3R2dgqnjcFgYGJigkcffRS9Xs/S0hI9PT0oikIgECCXyzE0NMSrr75Ka2srTqdTpMacOHGCTZs2odfrMRgM9Pf3MzQ0RDqdFsdeWlpi165dOJ1OUqkUg4ODjI2NiZqcbDbLO++8A4DVasXlct3yHmw2G+vWrVtlJMFP6y1uRiqVYmBggMHBQcbHx0mn02i1WqampqiurmbTpk3E43HOnDkjUo3KysoAmJmZobu7m97eXtra2oBraWYdHR1Eo1Gy2SxGo5GCggLy+Tz19fWEw2EWFxdFJkFfX5+Qk16vl7Nnz1JTU4NWq8XpdJLNZjl79ixDQ0OEw2E0Gg0Gg4FYLEZtbS1lZWUYjUZOnTpFOp3GZDLhcDiYnp4GoKysjFQqRXNz800dDmrt0OzsrKgdMhqN5HI5IpEIL7zwAnv27EGr1RKJRISRXFJSAlxzUM3MzJDJZGhpaUGr1VJeXi6cTXa7neXlZebm5qioqFhVUyn56AgEAmzcuJHNmzezvLzM2NgYR44cYWpqiuXlZfG5+vp6du7ciclkore3l+PHj7OwsMDk5KR4vwGh4+h0OgoLC1laWrqt61BrWoaHh8lkMlRVVbFt2za8Xi+JREI4eC0WizD21T1lx44d4n1aXl7mzJkzOJ1OysrKCAQChMPh234excXFrF+/nu3bt6/ST1bWiV/P4uIiV65cETpWNpvFbDYzOztLVVUVjY2NDA8PMzc3R1lZGW63WxhJ09PT9PT00NfXx6ZNmwiHw4yNjTEwMMDi4iJwLVNENawKCwtZXl4W6XZTU1P09vaiKApms5nCwkIOHjzI7t27MZlMOJ1O5ufnOXv2rNAT4ZrTKJPJUFtbSyAQEM9NlXs6nY5QKEQ+n6epqUk45m9GOp1mbGyMoaEhPvvZz4rvIplMMj4+zttvvy1kXywWE/IjGAySTqcZGBhgfn4ej8cjUovVSFR1dTXd3d3iM8FgUBpJ/8w9ZySpnve+vj4ikQjt7e1s2bJFbLSJRAKXyyU2fzXNYmJigsOHDzMwMMC+ffvw+/2YzWZyuZxI21MXkU6n48SJE6uKnhVFEV5ZjUaD0Wiks7OTyclJQqGQCKFbrVbhKVGNow+imGYyGSFojUYj+XyehYUFLly4gN1uZ9OmTatCsqpna3Jykpdeeol4PC6iHTMzM7zwwgtYLBZcLhdOp5PFxUWhhGQyGXw+H6+99hparRa3243f7yedTjM3N0c8Hv+516t6UNSN++dt4LFYjGPHjtHf308ikaCyslJ4mru7u0VEr7u7W3yfLS0tAIyNjXHx4kXOnz/P008/jaIoLC0tMT4+Lr4HVehMTExQVVUlvju1C6Db7cblconozYULF9Dr9djtdqqrq8nn83z3u98lFovhdrspLy8Xx1S95JWVlVy6dIkTJ05gNBrZsWMHXq+XoaEhRkdHmZycpLKy8qaNKtRI4czMjIh6rfQm/vjHP2ZwcJCWlhaCwSCpVIoDBw7g8/lEBMxoNHLhwgWxuahGksPhwO12i+cQj8ex2WzSSPoAqHn3yWRSbJTqmltZmJ3L5VZFC8xmM5FIhLGxMd544w1aWlooKSkhHo8zNDTE9PS0MCCuXLnC66+/zvDwMBs3bsTtdjM7O8u7775LaWkpBQUFQi6om/ry8jLj4+OcPn2a0tJSamtrSSaT9Pb20tHRQUNDA8XFxaTTaV599VVKSkooLy8XcvNW96quZ5WV7+/N1nYoFOLQoUOrIimTk5NiPaspPaqXvbS0lLKyMhRFob+/n9OnT4t6Bo1Gw9LSEhMTE3i9XvL5PIODg/T397O8vExpaSl6vV6k0ymKIuSa2WwmnU5z7NgxMpkMXq+X0tJSwuEw//iP/4hOp8Pv91NUVERnZyczMzPCERQIBDh27Bjj4+N4PB62bduGyWTi6tWrTExMiMic0+m84RkkEgnm5+dZWFigqKgIu92OXq8XCuoPf/hDFhcXWbduHT6fj3A4zKVLl8R69nq9GAwG3n33Xaampsjn86Imwe12C0NvZmaGdDr9gRtCSD4c1AwCu92OyWQShm06nRYdU+Gn+6PaTEGt+bs+Oh0Khbh48SJ1dXX4fD4SicRtX4va1VKtz1b1D7fbjc/nE8pxQUEBZWVlwlkYjUaFY3BxcZEDBw6wceNGysvL37eDV5UdK+XHz9IFFEXhypUrHDhwgEQiQVFREQ6Hg4mJCYaGhhgaGqKgoICFhQVOnTrF0tISdXV1OBwO8vk8vb29nDhxgtnZWTQaDdFolKWlJWKxmIjEX7p0iYsXL5LNZvn85z+PwWAQuoDNZhO1Yape+Nprr4moeUFBgZDLDodDNFQ4fPgws7OzPPzww9x3331Eo1GOHz9OKBTC7XbT2tqKRqPhyJEjIkvlVkZSOBxmcnKSubk5Nm7cKNZ1IpFgdHSUH/3oR2zatIny8nKsVitDQ0OcOXOG+vp6kRWQSqU4evQo4XAYs9kssmHUe4jFYvT39+P3+2Xq/T9zzxlJmUyGcDjMxMQEgUBALCa4ttB+67d+i4WFBbRaLR6Phy996Uvs3LkTuObFcLlcfPazn6WiogKz2Uw4HOaP/uiPqKmpYfv27WzYsAGtVsurr77Km2++SSqVYufOnVitVvbv3y+UbK1Wy6lTp3j++ee5evUq27dvp6amhs2bNzM7O0sikeArX/nKDZ2w3i82m42tW7eyZcsWzGYz2WyWubk5/uZv/kZsrh6PR3w+Fotx4sQJTp8+TSaT4Wtf+xq1tbXE43F+8pOfoCgKjz76KOvWrcPhcBAKhXjmmWcYGBhgeXmZHTt2MDIywpNPPkl7ezvV1dVks1lSqdTPrZOJx+N861vf4gc/+IEQiE6nk7a2Nvbt23dDzZWq9Bw7doyqqiq+9KUvUV5eTjKZ5Lvf/S7d3d288cYb/M7v/A47d+7k7NmzHDlyRBSI9/b2Mj09LRRJvV5Pa2srVVVV2O12tFotV69e5fz585w4cYLx8XHWrVtHQ0ODCD//2q/9Gi6XC61WKzr5rHyWFy9e5PDhw3zjG9/g4Ycfxu/3MzAwwP/9v/+X/v5+kcqnvptVVVV8+ctfJhAIcPbsWY4ePcr4+DgjIyMUFRXd8B6oqZ9LS0t4PB5MJtMNnwkGgzzwwANs376deDzON7/5TSYmJiguLmb//v3U1NTg8/l4+eWXefvtt/mt3/otALFpO51OoQSqnm3JnTE7O8t3v/tdvv/974taNrX9/kqPscFgoLi4WKSemkwmUqkUPT09/Mf/+B955513+NSnPoXb7aakpISuri6ROjk+Ps7+/ft57bXXmJ2dpbi4mOnpaRKJBCUlJQQCAUwmE08++aQw9hOJBCMjI/z+7/8+58+f/7lRotthZGSE//k//yff/e53xc/UQuv9+/dTV1cnfq6m3J49e5b+/n5qamr4/Oc/j8fjIRKJ8Ld/+7d0dXVx+PBhHnvsMdavX8/Q0BBHjx7lgQceIBKJ0N3dzfT0NA888AAWiwW9Xs/GjRupqanB6XSiKApnzpzhwoULnD17ln379lFeXk5jYyN1dXUoisKXvvQlXC6XqOlYyfLyMi+88AJDQ0N87WtfY//+/VgsFk6fPs1zzz1HZ2cnFRUVIjVWr9dTVVXFr//6r+NwOHj77be5dOkSPT09zM7O3lRxTCaTRKNR4vE4Xq/3BgNGo9FQW1vLww8/TEtLC1NTU/y3//bfSCaT+Hw+nnzySQKBANlslo6ODt5++20++9nPAtf2AtUAnJiYIJFIiOi65KNnZZdSVRm3WCwicwXg29/+Ns899xwACwsLJBIJdu3aRW1t7apOk5OTk7z77rv8q3/1r96XgaIaQ/fddx8vvvgizz77LN///vdZt24dra2tbNy4UaSH2e126uvrmZ+f5+LFi4yPj+NwOFhYWKCjo4OOjg7+4A/+gGAw+L6MNIC33nqLw4cP43a7xX7m9XrZv38/X/rSl254bqlUiu985zuUl5ezd+9etm3bhtVqZXBwkNdee42xsTHOnj3L5s2bOXLkCJOTk0xMTBAMBslkMly5coWBgQGeeOIJLBYLVVVVlJaWks/nMZlMZLNZDh8+zNGjR3n22Wf51V/9VcrLy6mtrWViYoK9e/fy5JNPYrVa0Wg0q+43lUoxMzPDgQMH8Pv97N+/n9bWVkwmE83NzfzJn/wJWq2W2tpa4JoeUFRUxMaNG/nMZz4j9Laenh7efvttPv3pT9/0mU1PT7OwsABARUXFDevaZDKxZcsWduzYQXl5OaOjo3z605/G6XSyadMmHnvsMZaXl/nbv/1bIpEIvb29IgtFjYZ5vV76+vpoa2vDZrO9r+/0XuWeM5JWzoxYOf9C5amnniIUCjE4OMjJkyeZm5sT9T4mk4nGxkaKi4uFgpPNZjl69Chnz57lzTffFD9Xc+p3795NMpnEYrEwODjIuXPnRDrX7Owso6OjtLW1EYvFRCtN9Zo+jFQItbbh+9//PiMjIywtLRGJRBgcHGTPnj1EIhHhqVIUhX/6p38SXsl/+2//LTU1NaJeqru7m3fffZerV69it9vR6XRkMhlGRkYoLS1l27ZtFBYW4vV6efHFFzlx4gRNTU20tbWxcePGn6tcG41G7rvvPnbt2iV+ZjKZKCwsvMF7oqbaDQwMcOXKFU6fPs3x48cxm83k83nGxsYAhHdt06ZNjI2NcerUKXp7ezGbzSKlbcOGDaID0NzcHOfPn+fw4cPMz88TCoVYWloikUiIPN6VLUyNRqNoG3x9J69UKkVXVxfFxcVUV1cLz3V1dbXYOKanp8XzLygooKKiQuQd+3w+YViq577Z96vOR7rV+9LY2EhFRYUwolQlqri4mOLiYhHNMhqNQplWU/vUeVQr29RK7hy73U5VVRUVFRWrcvinpqbo6ekRqWxwzcju7u7mtddeY3x8nFgsRjgcpre3V6TzFhYW4vf7RS3L0NCQKNo9e/YsU1NT6PV6hoaG8Pv9BAIBbDYby8vLnD9/XnQwXF5eFl7CsbExotHoBzaSvF4ve/fuZfv27avuf2WNnYqqIHZ3d3P27FmOHTvGwYMH0ev15HI5BgcH0ev19Pb28sQTT7BhwwbxLGZmZhgdHSUUCmGxWGhsbBRpNBMTExw/fpzTp0+zuLjI/Pw8y8vLmEwmkskk8NO2xmrKobqer19LqVSKy5cvU1tbS2VlJYFAAI1GQ319PS6Xi3Q6zfz8vJA5wWCQmpoaCgoKROTJarUKeXKztaS2I1dnU8GN0bempiaqqqrEOd1ut6hZKCwsFPWFquGsoqZZ6nQ6EonETdssS9YeRVE4fvw4o6OjuN1uUZ/70EMPiT11cHAQgL1799LW1kYul+PEiRPCCdvW1iaUYjWFempqiq1bt77vFtAGg4Fdu3ZRWlrK2NgYo6OjjI6O8tJLL/Haa69RV1fH17/+dVF7U1xczLlz5+jv76eyspLOzk6Rwt7Q0IDNZnvfRtK6detobm6moaFB6Geq8XI92WyWpaUlRkdHuXz5Mm+//bYwrtRovMvlorGxkYceeojS0lIWFxe5ePEiGzZsEI2JtFotO3bsQKvVEo/H6erq4uzZs3R2dhKJRJibmxO1jStbpqvpw6oucP2aUmtNQ6EQra2tlJSUiIjcjh07sFqthEIhxsbGKCkpEfVepaWlwlFSXl7O4OCgGDFyM5LJpNBrVRmmotVqReQvEAjgdruFvhAMBkXNqdFoxOfzEQqFRFdDVQ6p3Q1jsdgta8p+GbnnjCStVovBYMBsNpNMJkmn08II0mg0rF+/nnA4jKIonDx5ctVGotPphKKpbt6qF7ihoYHS0lJR75LJZEgkEjQ1NWEwGJifn+f1119nbm4OnU5HTU0NXq+XaDSKVqu9QcFWr/WDoCgKCwsLPP/883R3d+P1ekURqBqeV5VflWw2K2qJ1NCxeq+RSISioiLWrVuH2+1Gp9OhKArr1q2jsLCQhoYGvF4vjz76KFevXhVd19Rw944dO6itrb3lfel0OqqqqtiyZcuqeREWi+WmUShFUQiHwxgMBsrKymhraxPHbmhowGg0Ul1djU6nIxAIUFBQgMViobu7G4/HQyKRELnVOp2Oubk5Tp06xcGDB7FaraJhxuTkJENDQ6uek6pEqTMg1J+tRK0rMZvNWCwWIZQsFgtms1k0CFEFjtlsFt0V4aepnur3cjOun0FxM5xOJ1arVcyYUFME1H/qM9Zqtavm26jPWK1ju5niKHl/mEwmSktLaW5uprm5Wfx8aGhIKO1wLXWiv7+fw4cPs7i4SFFREXq9nmg0yszMjCiANhqNQuaoRcvpdJra2lpqa2tFmuvc3JxoAqAaF2+//bYwlrVaLclkkrm5uVUy8YNgtVqpqalh69at4mdqSvHNvJCqQ0c1pNS0WLhWi2G1WmlpaUGv11NcXIzP56Orq0vIl0wmg8fjwe/3o9FohKPryJEjq5ovzMzMsLCwcIPRr66lWw1uVRSF5eVl7HY7FotFKETqTDK1+YZ6TFVuqc4hdT2rTrqbsXI9q3LheqXL6XSKNDm1KcNKGaPVaoWTY2Wq1so0rY/rwPBfRtR6X9WRpuoZ7e3tovGKSmVlpaizNZvNXLx4UTRwUR2JY2NjotW23+8XMwZV+X79e69GcU0mk1Cu1SZWhYWFVFZWiprA06dP09nZyZkzZ3jsscdEUyWv10t3dzebNm2ip6eHgYEBtm3bdseRykAgwLp169i8ebOoX1Qb11yP2uAgkUiIdGKv1yvWUD6fx+12U19fj8ViobKykmg0Snd3N+l0mkuXLpHJZAgGgxQWForOxxcvXqS/v59AIEAwGMTr9TIyMsLAwMBN25OvnEe1EnUPzWQyWCwW8ZwBoUdls1kh/9X1bDabxefMZrOoM74V6ppeuc5XyrGVMkKVwxqNBqvVKmSYamABqxpsrJzjd70B9svOPWckqS+Hx+MReadqZxetVkswGMThcDA6OnpDzuXKVLmVP7NarWzZsoUtW7as8g4riiKU3v7+ft555x2qq6vZv38/LS0tjI+PEwqFsFqtwhi7fvDb9ef/WVz/+Xw+z+zsLN/73vdYv349O3bsoKqqSmzSqjG08u8qKirQ6/XMzMzQ29u7KgdZq9VSX1/P448/Lop+VYxGIzabDYfDwSOPPEJDQwMDAwN0dnbyzjvv0NXVhd1up7y8/JYtMbVaLT6fj4qKihvu9Wb3rj4rr9fLhg0b+MIXvrDq2DqdTigTVqtVzFq6cuUKwWCQXC4n8qw1Gg1jY2OcPn2aM2fO8Lu/+7u0traKVAK1a+HK867Mm74VBoNBGB7qO6YOy1OvUb23m03Hvn443/WoeeNGo1F4km52DSuNnJVDCK+v/VqZA656zNTahQ8ysFhyDbWOrbi4mIqKCvHzdDq9qqlJKBSir6+Ps2fPsmnTJnbv3o3P52N2dpbu7m6h6KhGkl6vZ3BwkLm5OVEb2dzczPz8PKOjo2SzWbZs2SK6Yl25coUzZ87wxS9+UXQ4ikajXLly5YY6ojvFYDDg9/tX3afKrdaz2sGqtbWVJ554YtV6UOfH6HQ6YfRYLBa6uroYHx9Hp9OtSp++evUqZ86cob+/n9/7vd+jqqqKoaEhLl68yPHjx1edV1Wobmc9q0ak+ozU/63OuFGPpyqnN7vnW51DVVL0ej3JZPKmn7teTqw810rnErDqe8xkMqRSKTKZDA6HQ67njxGVlZU8+uijtLe3ixEkqq6x0nmi7o+qo0GtI+3t7aW+vh6DwcDg4CALCws0NjaKzAq1fkatcVLlhxrBjcfjwpkG194fu92O3W4nGAzS0tJCMpkkmUzy2muvce7cOR555BHRsbGkpITu7m6Ghobo7e1lbm6Or371q3esUKvHvdnYiZ/lYFi/fj1tbW3U1dWtcl4aDAYx3L2mpobBwUEuXLhAOBzmzJkz6PV66urqsNlsRCIRLl68SG9vLzqdjk984hMUFRXR1dUl6p+vH/77s3SBlXuuaqSq8kJ1kq5sZnMzZ83N1vPNnpna5EWtpbo+mnSzY66UU7fSP1VDOh6P33J26C8r95yRpLa6raqqIhKJ0NPTQ0FBwapai5VK7c87ltFopKqqilQqhdlspry8fJW3UE3j6O3tRavV0t7ezuc+9znRsSmTyayKIqnRBtXzsHLTvRWqp+J670Y6nWZxcZHh4WH++I//mPb2diwWC1NTU8Tj8ZsaYQ888ABFRUVcvHiR//7f/zv/6T/9J1pbWzEajVRWVtLT0yMEp6qMqEXSqqKh0+loaGigsbGRT3ziEzz66KP82q/9Gn19faKzzM/iZtd1s5/pdDpqa2tFQwqdTkdlZaV4fqrwgZ968GtqanjjjTeoqKjA6/VSUlIivE5qXVV1dTVf+MIX0Gq1XLp0CUDkhMPqFDRV6Knf80pMJhO1tbU8++yzzMzMiEYNU1NToiWwWh9yp6jt2T0ej4hGqJ6iD4o64DQSiVBWViZbBn+IrIxArnSOqKh1KQ6Hg4cfflh0NkqlUoyPjwuvsdFoFGl0p0+fxmQy0draitlsZvPmzXznO9+hv78fr9fLl770JYxGI0tLS4RCIcrLy9m/fz+FhYWi++bU1NQtlZA75Vbr+fo0MoPBIAwZtYmCOhtNdSKp75/VahWtat955x2RNqvWSwAMDg6SyWREvr3arjibza6SfzqdDqPRKNryqp7Y6zEajdTV1fH6668zOzsrWqerzRiuHyh5J8qhxWIRyqoa7fqwULtxqR53Nbos+eix2WyUlJSscrLCje+QqujqdDpKS0upr6+no6NDtH0uLi7mypUrzM3NiRponU6HzWbDbreLtDGPxyNGk8zNzYl9WZUFasRCPZf632ozHzViqtFo8Pl8tLS08Prrr/PKK6/Q19eH2+1m3759H/j9ul5Zv9maMhgMBAIBfD4fy8vL5HI5SkpKsFgsq6Iq6rU0NTXR2dnJgQMHOH/+PIcOHeLTn/60qHkOh8PMzs7i8XjYunUre/bsIZ/P09XVtapL4Mp6cTUjZ2XTqZXXpxqcU1NToqGXwWCgq6tLRM/VDpV3SjAYxO/3iyY2NzNmVjpKr//5zyKVShEOh5mfnxedgCXXuCc1Io/Hw5YtW3jyySc5dOgQXV1dtLS0UF9fj16vZ3x8XEy3VgcL3gy1kcNTTz3FSy+9JDw6JSUlLCwssLi4SFlZGQ8//DB1dXUYjUbOnTsn2tC+9dZb9PT0iII9uNYi1ul00t/fz8svv0xVVRXFxcW4XK5VhZkrWVxc5Pjx42JqPFxT0Kurq0Vx5Q9+8ANmZmZEV5NTp07R2tp6w7H0er1oiZ7L5fje977H3r172bRpE48//jhnzpzh+9//vsg51miutdt2OBz4/X4KCws5derUKq/UsWPHRD7syiYR15PL5ejr6+P48eOrFq06Bfz64XM6nY7GxkbWr1/PyMgIf/EXf8GDDz6IyWRiZmaGfD5PdXU1999/v+iUVV9fz9/93d8xMzPDk08+KdprA1RVVeF2u+no6BDT7i9evMjFixdFQSQgOl0tLCyIjmF+v/+G2SdqVO3v//7vefHFF+nu7mbDhg1cuHCBUChETU2NqJ+4U4xGI0VFRZSXl9PZ2cnExIRoS/9BWVxcZGFhgVQqRW1trexst4Y4nU5sNhvxeJy33npLzGG7dOnSqgiDmlpVVVXFa6+9RkVFhZh/1tbWxt/8zd8wOTmJ3W5n3bp1mEwmkZI2NTXF66+/LuTV6dOnV6WLfVDi8Tg9PT03RG1cLpdIl1v5c6PRyO7du+ns7KSrq4tvfetb7Nq1i3w+z+TkJADbt29n/fr1WCwWysrKaGho4Ac/+AGlpaXs3r2bkpISYUhVV1czMDBAR0cHP/7xj8nn86LGYGWNhNfrpaysjBdffJHDhw9TWlqKy+W6QRFwOBw8/fTTvPnmm/zkJz+hr6+Pmpoa3nvvPQwGg3j2HyQVxWazEQwGKSkpoa+vj4WFhQ/NMTE/P8/S0hI2m43i4mLZnepjxvt9b9RRE2NjY1y5coXLly+LEQA6nW5VbW9BQQH19fUcP36cH/7wh6xfvx6/308ymeQnP/kJfr+frVu3iujzSy+9JJx6paWlGI1Grly5wsmTJ0kkEjz00EPivfT5fGzYsAGPx8Pzzz/PunXr2Ldv3wcaojozM8OVK1cAVkVYbjZjTavVYrVaefrpp3n++ed5/vnnGR0dpbGxkWg0KlLyW1paWL9+PU6nk5qaGmpqavijP/ojstks69ato6WlZZV8UlOStVqtGBx78eJFcV41I8lut3P+/HlRO63WfaqoNUZ79+7lueeeI5PJ0NnZidPp5Nlnn6WmpoZdu3ZRWVnJ+Pj4HT8ztaNuTU0NR48eZdOmTR/aGu/p6WF0dJSioiKampqkkbSCe85IUj0rLpeLxx9/HK/Xy9TUFJOTk4yNjYmaDbfbzWc/+1k2b94sWl6Xl5eL+hXVGjebzezevZtUKsXExARXr17l6tWrmEwmgsGgCPFWVVXx6KOPMjExQVdXF2NjY1RVVZFOpwkGg9jtdjQaDeXl5WzdupVMJsPRo0dX1fJcbySpHhR1Ps/o6Kj4ncPhQKvVUlNTw2/+5m9y4sQJLl68KJSvhx56CLvdvmpWQGtrK36/X/z8scce48CBA8TjcWZmZmhpaeHXf/3X6evrY2pqSrTpdTqdYmaJxWIhlUrR0dGxqiHEv/gX/4K2trabCk61k2B9fT3Dw8P85Cc/WfX7yspKNm3aJAytVColWlO6XC4eeeQROjs7hYGlpqAFg0HRklRNiwwGg2zfvp10Ok1dXZ04DkBtba3wGqnPymw209LSgt/vF8+0sLCQtrY27r//fk6cOIHH46GmpoYtW7ZQUlIi3i+1K8zXvvY1Ll++TCgU4siRI8C1aeobNmwQEQFVuVvZjl1twRmPx3G73bdMUVIH4zmdTkZHRykoKBA5xhs2bMDv9wuhpl6/2hhCNXrU5g1qu1fVwxiJRLBarZSXl8tBsh8AdSNXC/6vX8smkwm/309zc7NY183NzSLtLhqN4nQ6qays5JFHHqGoqEikU6g1jps2baKiooKGhgaRXrJhwwYxW8Pr9Yp0tra2NjFIdXR0FKvVSm1tLXq9noqKCtF6Wm0moqa5qU4UtUHAre61oKCAwsJCBgYGVnWJU7s47dq1C5/PR3V1tagV0mg0FBcX8+ijj9Ld3c3w8DDvvfeeWM+q8aKuZ6fTSVVVFW1tbQSDQaqrq1elLG7YsEGkUquOG5fLRVtbG0tLS6IOr6SkhF27dtHf38/Ro0cpLCykurqaDRs2UF1dTWFhITabTazPr371qwwMDDAxMcH09DQmk4kdO3bQ0tIiDL+qqirRkl/FZrMJT/2tHF46nU50vOzu7mZqagqHw4HFYsFisbBp0yaRXqlG39Qh3eqzgWszztRibLXJjRrNVr8buZ4/elTjWp2/eLPvQ6vVYrPZWL9+vVjD6ueKi4upra0VTV3OnDkjOmOqTjt1rdTV1fHpT39aDKxXFAWtViuaLq1fvx6z2YyiKBQVFbG0tMTIyAj9/f0iItHa2kp5eTkbN24U12EwGPB4POzcuZNsNkt7ezvbtm1bdS+qnlBVVYXT6byp8q4eKxgMMjg4yMWLFxkcHFyVHtbS0sK+ffswGo2Ul5fj8/lwOp1otVp27txJJpMRozOGh4fFtZWXl4umWuo979y5k8nJSbZt20ZNTY3YI9WOwCaTie7ubg4dOoTb7aaxsZFgMMiVK1dETVh1dTXt7e2cPHmSAwcOUF9fL4wxVU7abDYsFgttbW0ii+TKlStoNBpqamp48MEHWb9+vahFKi8vX5U2DNdql1S5fCvU423atIljx47xla98RTSTcLvdVFdXr8oG0Wg0tLa2EgwGV6VZBgIBtFqt6Nqby+W4fPkyCwsLonOolBs/5Z4zkuCnaXL19fXodDqGh4eZmJgQk5VdLheFhYXU1NQQDAbR6XQEg0Ha2tqEh3Fl3rm64NSNU83vraysFIWYbrebPXv2MDAwwOzsLAaDgfr6eiorK3E4HKIuxu12C8VmbGxMKOrXexNVA62uro6HHnrohrQMq9Uq+vHff//9oomAulGrvfzVbip6vZ7HH3+cyspKMbi0sbFRpKC4XC7cbje7d+/G5XIxPj7O0tKSUBwqKiooKirCbDZTU1OD0WgUKWo+n48dO3ZQXFx8U6+oXq+npKSET3ziE2IY70pU4wuuKT7ZbFZ0ljIajTQ2NmK1WnE6nYyNjQnDIRgMUlRUJBa0Woi6f/9+UqkUjY2Nq5Qqj8cjhFVfXx8mk4ni4mJxLwUFBaLDS2VlJZ/85CcZGxsTrbKNRiPr168Xyp5aO7Br1y5sNhujo6NEIhE8Hg8bN24UKWwajYaWlhYhHFUcDge1tbV4PJ5V93H9e2Cz2URx6eLiIolEglwuh8lk4oknnhAtzTWaa90T6+vricViq2YdqNFV1eumTnjP5/MUFhZSWFgoo0gfgJXF2MFg8Iaoo9r17hOf+IQo2i0vLxcDGtPptKipa2lpWTUUWW148vDDDxMIBKiurhZFuDt37hSd0FS55XA4qKys5L777uPKlSvk83kRHVWbsni9XsxmM7W1tbhcLvGuaLVaPvGJT1BXVyeUjutR37EHHnjghq5W6tpUFZIdO3Zgt9spKysTjgzV2+tyucTQa4/Hs6oxDlxTZsrLy3n00UfxeDzU1tauUiyKi4uFMjc2Nibuw2g0sry8LLIE3G43DQ0NPPjgg8zOzorB3VarlW3bthEMBvH5fMKBdt999+Hz+RgfHyeRSAinSTAYFE6grVu3YjQaV3Xl9Hq9NDQ0UFRUtKq18UpU5SQYDOJyuVhcXCSVSgn59ulPf5pgMCgarpjNZjZu3CjkunrMYDBIe3u7eP6pVEo0CSovL7/l+SVri5oWm81mVxnUK1Fr8B577DHhrFJR9wi1lu3y5csEAgGqqqpWGSJqWm57eztwrV10MpnEYDBQUlIiBsaqyn9NTQ2AGG4P1wzviooK0dBIRXUAqUOo6+vrqa2tvaFpQFFREdu2bRMOnutRGwi1tLSQSqVW1WLBT409g8EgHAY2m03smaoDtKCggMHBQeEIWZnFol6T3+9n8+bNRCIR6urqKC8vF+vBYDCIBlM2m43Z2dlVHXbVyJo6VHbDhg2kUinm5+eFrmIwGHj44YdpaGgQjuiSkhJ27twparay2SwVFRVs2bIFv98vzrdp0yZcLhcFBQXi3ktLS9m8efOqtP+bodaPvfPOOywsLAhdrqSkRDwbVfZqNBoeffRRmpqaVu0ltbW1FBUVYbVaxRD62dlZ9Ho9u3btWlX7KAGNInuESiQfa/L5vEi5XFpaYvv27ZSWlr7v1q8qahevU6dOEQqFsNvt7N+/XwpHiWQNyGQyzM/P8+abb+L3+9mwYcMtHUy3g1qLdeLECeLx+KoOonI93zsoisKf//mfi6wM+R3/8pHL5ZiamuLP/uzPePzxx0UWzJ2iKAqTk5P84Ac/wGQy8fTTT68yNiXSSJJIPvaojTuWl5eJRCK4XC7R6vNOj5dOpwmFQqJDoxo1kMJRIrm7qOs5FAqRyWREy+8PYiTF43HC4bDwxqvpNXI93zuoDaLUSKhaFyO/418eVqbW5nI5/H7/Bxr6qigKi4uLRCIRkYkjDe/VSCNJIvkFYOWQ5JXDbu/0WKqiBje2DpVIJHcXdf2pdSMf5nq+nY6pkl88VOfWrcY7SH45UA2lD2Memio31K59K+uZJNeQRpJEIpFIJBKJRCKRrEBWdkokEolEIpFIJBLJCqSRJJFIJBKJRCKRSCQrkEaSRCKRSCQSiUQikaxAGkkSiUQikUgkEolEsoJ7cpis2u0nn8/fMLh0LdBoNKJbkewSIpF8/FgpHz4KGaHKBznwUyL5eKLKhY9aTsjOoxLJR8c9ZySpyk8mk2F5eZl8Pr/m16BOtVaHfd6OgFMFcC6XE61c1b/V6/WrjqG2glYFt6psfRStX9VrUFtSXn/NK68llUrddKO5/vrVZ7Dys+qxbnZcieR2Ud+pTCZDKpUilUqtiYxQ1ylce5dNJhMmk0nMuvog7Z9VeXD9fWi12htkx/VrFVjVUv76z8I1mZTNZtHpdKtaD2ez2Vs6ojQaDQaD4WOzVlcqvCtb36v3k8/nV8lVuFEuqcfJZDI3vWdVoV35+euPqx5T/Zzk44n6/abTaZLJJOl0es2NJI1Gg9lsxmQyibX0QVi5BrLZ7Krf3ez4N9Mzrv+cKnfUY6+UEdef71ZyYmUb649KVqxshX29XqX+W3l9K+9N/Xeze1dlpMrP0tOu/xtVLktH2kfLPWkkhcNh+vr6OHjw4C0V87uFVqvFbrfT3NzM3r17MZvNt20kpdNp+vr6GBoaYnl5GQC/309bWxsulwu9Xk8ul2NsbIyBgQEWFxdJpVLodDpKS0upr6/H4/Hc8ZDROyGfzzM3N0dXVxeLi4tks1nsdjvbt2/H6XRiMBjE/b3yyitEo9EblDmHw0FhYSEbN27Ebrdz+fJlBgYGiEaj4jNmsxmfz8fGjRvx+/1SwZDcMfl8noGBATo7O+np6SGdTq/p+fV6PTU1NTQ0NNDa2irWyJ2QzWaZnp6ms7OTycnJVefweDzcd9992Gw2tFotuVyO+fl5rly5wtLSEul0GrPZzLp16ygpKcFms60aaKooColEgtHRUc6dO8e6desoKysjEAig0Wi4cOECY2NjhMPhG+7P4XCwZ88ePB7Px2at5nI5hoeHGRgYIJ1OU1dXR0NDAwDhcJjJyUn6+vpYXl5Go9FgsVgoKCigubkZh8OBoigsLS3x3nvvsby8fMO+EggEqKyspLKyEofDAcDc3ByTk5MMDQ0RjUaxWq0UFRVRXl5ORUXFmj8Dye2Ty+Xo7u7mwoULjI+Pk8lk1vT8er2epqYmmpubqamp+UByAq7JvVQqxdTUFJ2dnULHUPdrj8cj1n82m2VkZITBwUEWFxdJp9M4HA62bt2K1+vFaDSSz+e5fPkyMzMzRKNRcrkc69evp7a2FpPJBFyTIQMDA0LmXI/b7aa9vZ1gMPiRyQlV9+rp6WF0dJRIJEI2mxV6SVlZGcFg8AZjRVEUJiYmGB4eZmZmhnXr1rFu3ToMBoM4ZkdHBxMTEyQSCbRaLcFgkJaWFux2+w3fp6IoxGIxzp07x8LCAjU1NZSWluL3+9fycUiu454zkjKZDDMzMxw+dJi//eYzmHIWtOjQcPc9FAoKWTJYPSa279vKhg0b0Ov1t2W0hEIhrl69yo9//GPMZjM6nQ5FUVheXqa7u5tPfvKTVFRUkE6nOXr0KD09PcLboCgKb7/9Nps3b2br1q1s2bLlrt+rSiaTEQbp/Pw80WiUWCxGRUUFFotllSCYmppicXFReHITiQRXr17FarXS0tLCunXrsNlsvPvuuxw4cACHw0FjYyMajQa73U4+n19zhVZy75FKpbhw4QKvv/Qmxw4ex6zY0K5ReaaCQkqToHVbEw99ch81NTW4XK47Pl46naa/v58f/vCHhEIhGhsbhec5lUoJD24ulyMcDvN3f/d3TE1NYTabMZvNRKNRDh06xJNPPklLS4vYkDUaDbFYjMHBQZ5//nleeuklnnrqKR544AF8Ph9arZbFxUWmpqaYn5+/dm+KwuDgIOFwGKfTSUtLCy6X6yM3ktT7X1pa4tlnn6WjowOz2cwjjzxCfX09Go2GpaUlent7uXTpEnBNQU6n0yQSCbZv384DDzyA0+kklUoxOTnJ0tKSiOIlk0kuXLhASUkJ+/fvp6SkBEVR6O3t5eDBgwwNDYnoodFoJB6PYzQapZH0MUZVck+dOs1PfvgiPZf7MCsWNGsqJ+Js27eFXC5HMBj8QEZSPp8nFAoxMDDAq6++SiqVwmAwiKyXhoYGnE4nWq2WZDLJwMAAP/rRj8TwWoPBgFarpa6uTij4mUyGQ4cOMT4+ztTUFKFQiM9+9rOUlpYKIwkgHo8zNTXF3Nyc+Nno6CgTExN4PB5KS0spLi7+QM/rg5JKpejp6eHKlSvATw0Wg8FAcXExTz75JKWlpULfUp3xP/7xjzl37hyhUIhPfepTwphdWFjgzJkzPP/88/h8PgwGA7lcjhMnTjA+Ps62bdsoKytb5UQOhUJ0dXXxl3/5l8zPz/OZz3yGBx54QBpJHzH3nJGkKAqpVJpwOMzyXAxD3oYe05ooQQp5UmSJZ1OEQqFbpmVcTz6fZ35+nvPnz5NIJNi4cSMejwdFURgdHeXChQvU1NRgt9txuVwUFBSg1+tFyk4ul+O9997jypUrWCwWWltbMZlMaxK61mq1uFwuGhsbmZ+fZ3R0lNOnT9/03tva2kgkEkLITE1NMTw8jNFoxOl0iqhbNBolmUxSX1/Prl270Gq1GI1GHA4HDofjY5G+I/nFJZ/PE40uEwlFiM8lseBBt0aiME+eFFHCC1Gi0egNaS/vFzX9K5FIEAgE2LJli1B27Ha7WFOZTIalpSWOHTvGli1bqKmpwe12s7CwwAsvvMDw8DBFRUV4vV60Wi2ZTIbJyUl6e3uZnZ1Fo9GQTqdXedOrqqpwOBzE43EURSEej7OwsEAmk8HtdmO1Wj8Wa1WNiJ08eZLZ2VlhOMZiMfEZq9VKSUkJOp0Oo9FILpdjYWGBq1evcuHCBZqbm7FYLDgcDtrb20kkEsA1IzUUCnH69GmsVit2ux2TyUQ2m+W9995jYmICu93OunXrcDqdADidTnw+30fyLCS3Ty6XIxKJEF1YJjmXxoxrTeSEgkKePEnCLC0sEYvFPrCcyGQyjIyMcOnSJSKRiJATauqXw+FAq9WSz+dZXFzkyJEjxONx6uvrKSgowGw2k06ncblcIuVOq9VSWVkpItCqg/T6fb+goIC2tjbxu3w+TzweJxwOY7PZsFqtH3lKmU6no6ysTBiNGo2G8fFxEU2bmJiguLhYOHxyuRynTp0SUSKdTrfq/mZnZ3n33XcxGAy0tLTgdrtJp9NcvXqVjo4OioqKcLlcQg6oRtrx48fx+XyMjIyQSCTWPHopuZF70khSlDy5bA6jYsaJFws2dNx9b2aeHDoM5JTkDbm6P/Pv/tnLc/XqVVwuF+3t7RQXF6MoCv39/bz88sv09/cTDAbx+/3U19ej1WqxWq1iQ56amuK1115jcHCQdDq9ypNzN9HpdASDQex2O3Nzc1gsFk6fPn3Tz7a3t69qqnHlyhVOnjyJzWajsrJylVKl1+txuVwUFhZiNpuxWq04HA6cTufHQvGS/OJy7f3LoclrMWPFgx89hrsebb6m/ORIk0Sb1960juhO0el0OJ1OCgsL8fl8mM1mvF6vcJbkcjni8TgDAwM8+eSTbNq0ieLiYiYmJnj22WcJhUKrUsii0SiDg4MMDAzgcDgoLi5eJVM0Gg1VVVVUVFSIv5mamuLixYsinVA11j5KrjnNUszMzHD8+HFcLhfZbPaG6L7T6aSmpobq6mrMZjPZbJbR0VEWFhY4efKkSBNW04PUe47FYgwPD2M2m6mqqqK4uBij0Ug0GuXw4cMUFhZSWlpKMBjE4XBgNpux2WzCYJJ8fLkWgcyiy+uxYP9nOWG86zkpCgo5sqRIoOS5bT3ipsf6/9l77zi7zvLA/3t77/dOvdOrZjTqXbKau7GNAYeSYExbdkmWhJBsNkB+IYQUYJMsgWxCIHSwAeNeZclFVq8zqjOa3uudcu+d28v5/TGclxlJtka2NCPE+fLRB3nmdJ33OU9/ZtVWdXZ2curUKUwmE4WFhTgcDgwGA0ajUUR8k8kk4+Pj7Nu3j4qKCnJzcykoKMBkMmEymXA6nSL6odVqWbFiBaFQCJPJJCLKs1GpVOTk5ODxeMS3PxqNcurUKZLJJD6fT0SmF/O7rtfrqaqqoqSkBIvFgkql4vz58wSDQXp7e5mYmBCyOp1OMzk5yYEDB9BqteTl5RGPx8WxstmsiCTdddddrFu3jtzcXGKxGNlslieffJLe3l78fj9utxuAgYEBzp8/T0dHB+vXr6e5uXnRZafCDDedkTQbNWq0v/6fRnX9bzUjqdGgRbrKqJUkScTjccLhMLW1tXi9XhwOB5IkUVpaiiRJdHd3U1FRwcqVKykrK5uzbzabFeHqVCq1oItLo9Hg9Xrxer1otVo6Ozsvu51KpRKKiZzKMDQ0RDQaJT8/n8rKSpEPrVKpGB8f5/nnn+fo0aP4/X6WL1/Ohg0bWLVqlWIkKVwTVKh+LSN0M0bSdX6vJEkigxr1NXbYzETGwjQ2NnLu3Dny8/Opqqri/vvvp7q6WhRGGwwGTCYTwWBQ1GqGQiFUKhUOh0NEabPZrFCohoaGePe7383Q0NCceiVgTqqIJEmMjo4KR4mcOrvYa1WSJPr7+zl48CDnzp3jc5/7HEePHp1TvwVgMpkwGo3AjJc4FouRTqcJBoNCQZRTjmRjUfa6y3UMtbW1+P1+0uk0nZ2dtLa2kpOTw9jYGN/85jdJpVKsWbOGzZs3s2rVqjkNIhRuXFSo0Ag5oV0QOQGgQXPNDDI5he706dNs2LCB733ve6TTaQoLC9m4cSN33303MJMaNzg4yLFjx6ioqODZZ58lEolgMpm4//77hWElR5LKysqYmJhgYGDgEsfDbIfn7FqnqakpOjs78Xg8LFmyRBgKi4XckEK+Drnxlxw5l+sT5YYMExMT7N+/n6amJj71qU8xPj7O8ePHxfHkFNyJiQkqKyvx+Xziufn9foLBIAMDA4yNjVFZWUkmk+GJJ55gbGyM2tpatm3bxi9/+cvFehwKF3FTG0m/LWi1WpxOJwUFBbz88susXr1aFFq/+OKLnD17FpfLRSgUumRf2TPzve99D7fbzdKlS0VXvRsVWfnYv38/VquVqqqqOYbfmjVrcDgc5OfnYzKZOHHiBMePH+fw4cP84R/+IWvXrl3Q5hQKCjcqWq2WoqIi7rvvPoxGI3q9XqR0HDhwgG9+85sUFBSg1+spKCjgb/7mb/iP//gPfvzjH4u6hA9/+MNs2LCBoqIiAEKhEN/97nfx+/3cddddlJWVvaViKEdrzp8/TzKZpLi4mIqKihtC+R8bG+Pll1/mueee40tf+hJ5eXmcOXPmstumUik6OjqE4mM0GikpKeFzn/sclZWVWCyWOdun02kGBgbYs2cPlZWV1NbW4vP5iEaj9PX1EYlE2LNnD/X19dx3330APP/88/T09NDW1sbHP/5xxVussCBMTU3R19fH+fPn0Wq1fPCDHySbzdLe3s63vvUtdDod27ZtY2Jigp6eHgYGBjh06BC33norXq+X0dFRvvrVr/Lnf/7nrFu3TtTnXC3pdJr29naCwSDV1dWUl5e/44YU14psNktzczPf+ta3OHDgABqNhoaGBm677TbWr1+PTqdjcnKSo0eP8i//8i989atfxe/3c/LkyTnH0el0uFwuysvL+dnPfkZRURGVlZWEw2H27NnDuXPnqKioIBqNkk6nefrppzl37hwbNmzgPe95D4lE4oaQnQozKEbSDUJRURF33XUXg4OD/NM//RMajQadTodWq6WqqgqLxTIn5C4XI/f39/PII48Qi8W49dZb2bRp01V9eA8dOsRrr73G+fPnUalULFu2jAceeICSkpLrYojILYiDwSAXLlxgw4YN+P3+Oedas2YNS5YsEQ0s8vPzOXz4MI2NjZw+fZrly5dfk5aoCgq/DcgdLZ988knRbUqlUvHZz36WiooKioqKsFqtot13TU0Np0+f5jvf+Q5nzpzBYrHgcrkIh8O8+uqr5ObmsmrVKtxut/AaV1dXY7fbcblc/PznPxdF2nV1daINsNyaP5PJiAJm+I13+Ny5c9jtdoqLi8nJyQHmHyU5fvw4R48e5ciRI2QyGTweDzt27OCBBx644r7JZJKOjg7+9V//lenpaSwWC+Xl5Tz00EP88pe/ZGBggM2bNwvFZHbbXrmJjEqlQqPRkJOTwx/+4R8SCAQYHh6mo6OD/fv3i3RDo9Eo7jsejzM8PMzp06f5wAc+MKdJhdw2fe3atezcuZNt27aJZ3Xu3DnOnTvHyMgIeXl5ihxTuCYkk0kGBgZ49NFH6enpERGgz3zmM0iShFqtxul0sn79em699Va0Wi1nz54lFAqxd+9eVq1aJWqJzWYzmzdvZseOHRQUFIj1LdfT5OXlXbWRlE6nCYfDtLS0YLFYKCgouOr3v6WlhcOHD7N3715SqRQul4s77rhDpLS9FfF4nIGBAb773e8yOjoKQG5uLp/+9KdF97q8vDwefPBBVq1aRWtrK6FQiNOnT7N27Vqqqqp47bXXOH78OJs3b6a6upp0Oi3SpmePLikuLuajH/0o3/ve9/i7v/s7dDqd6HpXUFCAxWIhHo/T39/P008/zcaNG1m9ejUmk0nUeM4e7XCjjFL4XUQxkm4A5HSX+vp6du7cyalTp4hEImg0GvLy8ohGo9hsNgwGw5w5L11dXTQ2NnL8+HE2bdrEypUrKSwsvKrFJCsHsoJ1vTtRyUXU/f39hEIh/H4/eXl5cww7n8+Hz+cT/63T6RgaGqKjo4OhoSGSySQmk2nRu2YpKCwU8tqcvVbllBer1YrVahXbykXCOp2OgYEB0U2tr6+Ps2fP8sADD9DQ0IDX62VwcJBvfetbdHV1idqDI0eOMDExwblz50gkEoRCIXp6eoCZ9Fqz2cyyZcuEB1jO0e/v72fZsmXk5+djNpvf1v3J9zZ7dsp8kdN65H0zmQxnz55lbGyM4uJiXn75ZYLBII2NjYyMjBCJRDh9+rRoiqPRaLDZbGzbto1wOExvby96vZ729naGhoZwuVwiJQ9gZGSE3t5ewuEwy5cvFzWVcic7uW6rrq5OdLyrrq6mp6eH7u5uQqHQFRU7BYWrYfY60mq1c2btGAwGUXtXVFQkauf8fj+nTp0ikUig1WpF50u5Ric/Px+Xy0VhYSGjo6MEg8GrrqWUG8wEg0E6Ojrwer3k5uZedY2xLPO0Wi2SJF313MTZzweYs79KpcJut7Ny5UoqKytFlKirq4uOjg4KCwtpa2vj7Nmz5OXlsXv3bqLRKM3NzSI61tTURHV1NVarlXXr1tHV1UV7ezvxeFxkDMViMSwWC5lMhuHhYdrb2/H7/cCMETg1NcXw8DBnz57F4XDgcDiorKy8quetcO1QjKQbBJ1OR05ODh/96EdJpVJigF0qleKzn/0sTqdTFPvKnZfeeOMN9u7di1qt5tOf/jQul0t4K2B+XtyGhgbKysqIx+NieJ08k2k+XBzdeqv/hpmQ9tTUFKdPn0av11NWVkZubu6cAWwy8vXr9XosFgtWq5VoNEoymRTtzxUUbnbUajX5+fl86EMfIpFIkMlkUKlUeL3eOXV+slEhrxeLxUIoFBKd7VpaWhgeHua2224TrWpramp46qmnGBwcpLu7m5KSEiYmJujq6qK1tRWYMYLGxsa4cOECfX19JJNJamtrhYxIJpOMjIwwMTEhvMxXa+AsWbKE0tJS7r77bqH8zDb83gqtVktpaSl/+Zd/SSaTEd0w5ahPf38/ra2t7Nmzh1QqRTgcJpVKYbFYqKqqmlNnITum8vLy8Hg8aDQa/v3f/52hoSEKCwtFNypJkmhububChQs4nU4aGhpEmrNGo8HpdIqunfLMJFm+zp6jstADShVuXrRaLQUFBXz0ox8V30i1Wo3X62Vqagq73S7ex9nfVpvNJpoKmEwmXC6XMKjkNa5Wq7Hb7QSDQdEdUn53L36HL/fdh5lITiAQoKuri7vuuovc3NyrdqaUlJTg8/nYvn37HDkxn+PIKcd/9Ed/RDqdFs4Yj8cj9A+tVovb7cbtdouW/7KDdvXq1aRSKcbHx2lvb2f//v1kMhkikQiRSAS9Xk9paSkGg4H6+npyc3P54z/+Y5LJJIlEgmg0Sm9vL/39/dhsNvR6PVNTUxgMBp577jnxXOVGXBMTE0xOTgrDVokkLQ6KkXSDkMlkxFwTOc1uenqapqYmhoaGuPvuu6moqCCTyTA2Nsb/+3//j87OTtxuN3/xF3+B0+kUx7ka48FsNl+1oLoYeUidbNjJhYuJRAK9Xi+8NTATARsaGuKVV15h9erVFBcXz1GG4vG4aBMuKx1yq/DBwUGqq6tFGp6Cwu8KOp3usvMyZn+kZeVH/ogHAgEKCgowm83CeJDHDfh8Pkwmk9hXbl3tcDj4t3/7N6HAy0rBv/zLv7BkyRLWrl3L5s2bRSFzKpUiEAhw6NAhnE4nNTU1wit6NcjNEd5OEbdsfBQWFs75uSRJfOUrXxEKIMzUKO3atYve3l7Ky8v52Mc+hsPhIJlMii5dcgvwUCjE2NiYkKmzDb9EIsGRI0fo7OzkjjvumOMR1+v11NTUkJeXx8DAAC0tLaKxTk9PjxgqK7dbV/htR4IFmMN4JeQGBHKq62zkjnaFhYW0trYSDAbR6XQMDw/T2dkplHun0ym2u3DhAlVVVRgMBtFsIT8/X6SVyvJB/tan02kxjsBsNovIN/zGOdrZ2UkkEmHJkiWXvc4rIY89cblcb+v5GAyGy85kmp6eFu22Zb1jamqKYDBIPB5Hp9NhMpn41Kc+xYc//GHRkj0YDHLkyBEOHz6M3+/nU5/6FD6fD61WK1qCy6UBsViMkydPikZb9fX1uFwuamtrRZpeOp1mfHycL3zhC2zZsoVt27axfv16xUBaRG5qIylLlgxp0qTJStem1e5bn2+mdWeWqz/X9PS0SJ/T6XTC63Lq1Cluu+021q5dS0FBAdPT0zz66KO8/PLL+P1+PB4P+/btE2HygoICampqhPfySrzTxZfJZBgZGeHVV19lcHCQ9vZ2RkdHefHFF+nq6qKqqkp4awFGR0fp6Oigr6+Pz3zmMzidzjnX0N/fz/HjxxkeHsbpdKLRaOjp6WF0dBSj0cjGjRuFgqag8E6Q55GkSck/uO5kfy2VrobLveuzW3WfPXuWV199leLiYmDGqdDf309paSlr167F5XKRzWZpaGigvLycxx57jNLSUhwOB2NjY0xNTXHLLbcIhUgePwAzUSK5bbXL5cLj8eBwOIRyL9flHDt2jHXr1olmK1fLO1nPb/Z85PbDsz3bBoMBt9vN1NQUbrcbl8slajMOHjyIwWDAYrGQTCYJBAJ0dHRQXV1NRUXFHAPu7NmzdHR0oNVqueOOO+ak/Gg0GtxuNw888ACNjY288MILDA8Po1KpOHXqFHa7nc2bN5OTk6PIsd8CZlpyL6ycyJAmQ+aqTvVW75I8PzEcDrN//34eeeQRVCoVw8PDBAIBHnroIZxOJwaDgby8PN73vvdx+PBhkskkTqeTcDhMKBTiPe95D+Xl5Wg0GjKZDAcOHKCtrY3z58/T1tZGKBQSkZSKigqKi4tRq9UkEgl6e3s5duwYlZWVYkTA1XKt5QQgBj83NzeLiJtKpaKvr4/h4WGMRiPLly/HYrGItS3Lk4mJCTo7O7HZbDidTpGSOzU1xfnz58XsylgsxtjYGC0tLWzbto1ly5aRl5cn9LbZ9Z2yIeh0OvF4PPPW5RSuDzedkSTnrGq0WhKqGJNSAD3hBZmULSERI4JWhfA8Xk2urFarJRaLEQgEROh12bJlbN++Hb/fj06nIxaLYTAYKC8vx263k81m6evrA8BisYgUtoVETrcJBoPo9XqWLl1KLBZjYmKCcDgsiqNhJpJkNBpZs2aNiArNxmAwoNPpSKfTjIyMADPPsrKykry8PNHSWFEuFN4uclqVpM4SI8I4owsyRw1mBk5PEyKr9l4SnXg7yPn1Go1GrBd5dMAtt9wyZ75RQUEBDzzwAMPDw2JQYTqdFh/t/Px8IbPk9aVSqbBarSxdupTS0lLcbvclqbgGg4GSkhJWrlw5x4BaTGa3H5aRJAmLxUJJSQk6nU506JI9zCaTSaS4yFRUVLBs2TJRryUTi8WorKzEbDZfIm/l92vr1q3o9XqGhoYYHx9Ho9FQUlJCeXk59fX1N0xXL4XLI3+TM+o0UcJMMHrN2/dfnhnnTYQwKjXXZIaQVquluLiYdDpNNBplenpa1EJv2bJFdMWVU0W3bt0qGggEg0FUKhU7d+4ULbvVarWoRZTXi9/vx2w2EwgEmJycJB6Pz0m7MxgM+Hw+GhoariqlfyGQ7z0Wi4kh0yqVSsyKqqioEHL24v3y8vKora0lNzcXvV4/o3/+eih1JBIhGAyKaPTatWvZuHEjeXl5c4byysg1pitXrhSOLCVrZnG5cd7Sa4RcoOjxePD63egyOjTXfUzkDBKgRo/ZZZyzYOaDXq8Xoe5AIIBKpRJDVlesWIFerxeh26qqKvR6/SXTmOVZHwu9qHQ6HR6PB7PZjN/vp76+HgC32y28LzLyrIA777xTCIrZ2O12ioqKhHCWJAmXy0VBQQHFxcWifklB4e2iVqtxOJx48jy4i5wYsjPy4foPiQRQ4VBZ8eV7RRTjnaDX6/H5fCxZskR0vbNareTk5LBkyZI5A12dTid33HEHZ8+eZWpqSkSJ6urqKCkpwWazXSKv1Go1JpOJtWvX4vF4RP6+jEajwePxsHHjRurq6i5pk32jYTQaqaiowOv1zoliu1wuqqqq6O3tJRaLodVqsdls5ObmsnTp0jn1GQA2m40NGzZgt9vfNEWwtrZWRMIHBweFkVRSUnLVDXYUFh6NRoPL5caT72Y8MI5eUi+AqxUkVEiocaqs+PJ8l7x7bwd5nqFcw9je3g7MfKPLysooLCwUzkeLxcKSJUuIRqMMDAwQj8dFJMrv92M0GsW7Kw+wvjgqZLfb53SsleXEsmXLKCkpwWw23xDOFBmv10tRURFqtVqMWnG5XOTn54tunZe7XoPBINJ8bTab0HX0ej1er5fCwkKmp6dFTVdVVRU1NTXo9frLrn9Z3t5yyy34/X5RRqGweKikm6xyVM59bWtr47XXXhNDExcK2RNQV1fP1q23YDQabyhhoKDwu4zcWrW1tZXz589z4cIFksnkgl6DRqOhoqKSmpoaGhqWKu3sFRRuMOSW9+fPn6exsZH+/v5LnJLXG61WS11dHXV1daLRioKCwsJy0xlJcv65HFbOZrMLaiTJ4VOj0SjSXBQFSEHhxmB27ncymRSNRhYSufuaPDsDFBmhoHAjMVtOJBIJUqnUgsqJ2a3k5UZOioxQUFh4bjojCX4j4GbXwiwks/vuK4JNQeHGQ3amLLSBJCM7UxT5oKBwYyLrEQvtaJ3NxTWCCgoKC8tNaSQpKCgoKCgoKCgoKCi8XZRiGQUFBQUFBQUFBQUFhVkoRpKCgoKCgoKCgoKCgsIsFCNJQUFBQUFBQUFBQUFhFoqRpKCgoKCgoKCgoKCgMAvFSFJQUFBQUFBQUFBQUJjFOxvjfINyIzXsU1p3KijceNwoMkKRDwoKNyY3iowARU4oKCwWN6WRlM1mSaVShEKhBZ+SrVKp0Gq1WCwWzGbzgp5bQUFhfqRSKeLxOLFYjHQ6vaDnVqvVmEwmjEYjRqNxQc+toKAwf5LJJNFolEQiseBzF9VqNRaLBaPRiF6vX9BzKygozHDTGUmZTIZAIMCZM2d49tnnSKWSC+oRUqvVWK1WVqxYwb333ovZbEaj0Vxxv3g8zsTEBEePHqWnp4dwOIzNZuPuu++msrIStVottnvppZc4c+YMiURizjGMRiNlZWWsWbOG6upqVCoVU1NTDAwMcPz4cXp7e1GpVLjdbioqKrjjjjsWzEMlSRLt7e1cuHCBvr4+JiYm8Hq93HbbbRQUFGAymZAkienpaXbv3s3Zs2dJJpNzjqHX66mtrWX79u04nU50Oh2ZTIbTp0/T1NREIBAgmUzidrvZsmULJSUlOByOBbk/hd8OJEkinU5z+vQZTp48yZkzZ0inF9aRotFoqKmpYfny5axfvx69Xn/N1mF3dzenT59mYGAAv9/Pjh07MJlMAExPT/PEE0+IdaLRaPD5fNxyyy1iDaZSKfbt28eRI0eIx+Nzjq1Wq1myZAk7duwgNzeXM2fOcPz4cTo7O8U2KpWK8vJy1q9fz5IlSxbdA97b20traystLS1MTk6iUqnIzc2lpKSEhoYG8vLyxDWm02kmJiZ45ZVXGBwcJBaLodfrWb16NStWrMDhcKDRaMhkMrz++uu0tbUxOTmJVqtl5cqVrFy5EqfTiVb7m8+q/L5NTU3x/e9/H5vNxurVq1m1ahU6nW6xHovCFZD/3U6cOMGhQ4fo7u5ecGeKRqNh+fLlrFmzhrq6OgwGw9s+VjKZpK2tjebmZrq6uojH41itVgoLC6mvr6e6uhqtVivWgjxsu6uri+bmZlpaWtiyZQv19fWYzWbUajWZTIampibOnj3L2NgYmUyGsrIyNm7ciM/nw2g0IkkSPT09HD9+nIGBAaanp1GpVJSWlrJkyRLKyspwuVzX6pG9LSRJorOzkwsXLtDb28vExAQqlYqSkhIqKyupqqqac42pVIpwOMzu3bvp6+sjHo9jNBqpr69nw4YNWK1W+vv7eeqppxgfH7/kfAUFBaxevZqKigrsdju/+MUv6OnpmSNvHQ4Hq1evpqamhsLCwgV5DgqX56Y0kkKhEBcutLLvtUNoMaFCg4qF+FhLZEhhthmQJImdO3diNBrnZSSlUikmJyfp6OhgbGyM1tZWANasWUNFRcXcs/xagMl/AAYHBxkfHycWi7F8+XIAQqEQ58+f5/z58/T09JDJZFCr1YTDYSYmJq7xvV+Z4eFhuru76erqYmJigpaWFlasWIHP5xOK3MX3CJBIJAgEAoTDYbRaLVu2bAEgFovR19fHyy+/TCQSIZvNolarCYVCTE9PL/hHTeG3g3Q6TW9vL00nT3P4wDF0KjOqBSvPlEhJMcLBiHCmXAsvsSRJxGIxWlpaOHz4MF1dXaxYsYJNmzaJaFUqlaKvr490Ok02myWbzTI+Po4kSWzduhW/349arb5EtsiKzvT0NHq9no0bNyJJEoODgzQ2NtLR0cHKlSvnXMuNkqoUCoUYHx9nfHycVCpFNpuls7OT8fFxksmkMCIzmQwTExO8+OKLdHZ2kk6nUalUpNNpkZEgSRKJRIILFy7wyiuvkEqlhFL5xhtvoNfrqampIS8vb841TExMcOTIEQ4dOoTZbMbj8bB8+XLFSLqBkY2kzs5Ojh1ppLW5A53KtGByQkIiJUXJpLPk5uZSWVn5joykbDZLKBRidHSUcDhMNptlenqaiYkJRkZG8Pl8uN1uNBqNeKfD4bCQJ42NjRQWFlJZWYnJZCKZTDI0NMSuXbuIRqNks1lUKhVNTU2YTCbq6+spKipCq9USDocZGxtjcnKSTCZDNpvlzJkzxGIxEokEK1euxGAwLKpDpa2tjba2NgKBANlsFkmSOHv2LMFgkEQiwbp169Dr9WQyGUZGRjhw4ACnT59GkiRUKhWSJBEMBoWcgLlyMJvNEo/H6evrY3R0lJKSEsrKyshmsxw5coSpqSmsVitut1sc70aSo7/L3HRGUjabJRKJMDg4yEBnALexFIPGgkp1ZUPlnSKRJZqaImwK0l3YTTweJ5vNzmvfTCZDMplErVZTVlbGuXPn6O7uJhwOz1koWq2WpUuXkp+fL8L/mUyGF154ga6uLiKRiIie9PX1cejQIZqbmyktLWXVqlXCAzTb27lQxGIxNBoNdrsdgMOHDzM9PT3nGRkMBhoaGsjPzxc/Hx8f5/jx4zQ1NZFOpzEYDKjVagKBAK+//jqvvvoqd9xxB2VlZdjtdpLJJFardV7GqcLvHplMhrGxUQZ6hxnqDuIze9GotLAAjhSJDBPxCfpcQwwPD5NIJLDZbG//eL+WDTP3NMa5c+dobm5meHiY/Px80um0+JCr1Wo8Hg9+vx+9Xs/09DTHjh3jueeeo7CwELfbjdPppLq6GofDIfbNZDL86le/IhqNimMBBINBRkZGyGQy3HPPPeKaHA4Hubm57+xBXSNUKpW4J6/XSyKR4NSpU/T09DAyMsKKFSvQ6XRMT0/T2trKj370IzZt2sSSJUvweDyoVCqsVitarVZEunft2sXRo0fZtm0blZWVxONxnn76aUwmE3q9Hp/PJyL/iUSCrq4unnvuOaanpxkbGxOKmMKNTTqdZnh4mMGeUcZ6IrhMHjSqhTBsJbJShol4gJ7CPhH5facYDAa8Xi+FhYWYzWY6Ozs5d+4cR48eZfPmzdhsNjQaDZIkkc1m6e/v59y5c5w/f56+vpnrkGVCJBKhsbGRF154gdtuu43q6mqMRiOvvPIKBw4cQK/X43a7cTgcqFQq8vLy8Pl8OBwO4vE4u3btoqmpiUgkQkVFBT6f7xo8t7fP0NAQ2WyW/Px8ioqKSKfTvPHGG5w+fZqpqSmWLFmC2+0mEonQ3NzMj3/8Y5YsWcKqVatwOp1oNBoMBoPQOdxuN9u2bRPRoWQySSAQ4JlnnmF6ehq1Wo3BYCCbzdLd3U1OTg719fWsWLECtVqNXq8nNzdXyYS5AbjpjCTZ+s5mshi1dlymIsw6B2rV9b9ViQzB+Agp7bjwRszXE2C321m6dCk1NTWoVCq6u7vp6+u7ZDuNRkNFRQXl5eXiZ6lUiu985zv4/X7q6+spKChAkiReeeUVAoEAS5Ys4UMf+pAQRIvlodi6dSubNm1iYmKCpqYmjhw5csk2Op3ukvtrbW2lo6MDvV7PsmXLMJvNpFIpmpub+fa3v83nP/95tm7disvlEp5wlUqlGEkKl0VWAjQqHWatC6+5FK367Xtpr4aslCaZiaNBL2TEOz5mNkssFuPgwYNMTEyQl5eHx+MRijrMyA2n08nHP/5x4SCJRqMUFRXxZ3/2Z0xMTBCLxXC73RQVFVFUVAQgUsXS6TQ1NTVCWZDR6XR4vV7Wr18vfiYbZDcCNTU1VFVVIUmSuKb8/HyOHj3K/v37GRkZweVycfr0aZ555hny8vL4oz/6I9xu95x70Gq1pNNpRkdHefTRR/nIRz7CnXfeSVlZGbFYjFgsxvHjx/H5fFRWVuL1egE4ceIE+/btY3BwkA996EM88cQTi/IcFK4e2UGgVRmx6D0LKCckMlKKZDaGStJcExkhOx/r6urEd3H9+vXs2bOH8+fPC0eqXq9HkiSi0ShvvPEGwWCQnJycS+qrJycnefbZZ6mrq+Pd73439fX1pNNpnE4n//mf/0lRURGVlZU4HA5qa2tF+r8cJUmn0xw7dozBwUGCwSBer3dRI0kf+MAHhIyQn4/T6RSploODgzgcDo4ePcprr71GOp3mi1/8okg9lFGr1ajVanQ6HStWrBA/n56epq2tjVAoxLp16ygqKsJmswkjKjc3l7q6OjZs2CCOJz8vhcXlpjOSLmbmRVOjVl3/j3ZWkn6d1nf1L7bc8EEOd79VpGf24kmlUoyOjnLo0CEefvhhkfYSCATo6OggFosRDAb58z//c1GnsH79eu655x4qKirmeIavNwaDAZ1Oh9FonJP//Fb3l0gkaG9vZ3R0FKvVSkNDAzqdjvb2dlFnEAgE+OxnP8vk5CRWq5Xt27fz4IMP4na7r2m9h8LNimpB5AOAJKmveerv9PQ0HR0dvPbaa9x1110MDQ3R1dV1yXYqlQqDwUA6nRaezaamJux2OzabTaS8zF4v8vobGhpi2bJlVFZWCoVJpVIRCoU4efIkt99+OxaLhU2bNrF+/XpWr16Nw+FYUPlyOTQajXCcJBIJIpEIHR0d9PX1YbVa8Xq9qNVqUTexbt06Pv/5zzM6OopWq6W2tpaHHnqIyspKpqen6e3tZXJykk2bNlFQUCC8xzU1NZw4cYKpqSkCgQAej4eenh6efPJJIpEIX/ziF+nu7r5hjEeFq2Xh9AgJCZWkuuZyQl4LctZKS0sLbW1tpNNpKioqRDpfNBrl7NmzHDlyhLvuuotkMsnevXvnHCubzRKNRvH5fOj1eqGvGI1GQqEQQ0NDjIyMUFpaKqKwmUxGNNS6cOEC8XicvLw8rFbron+jZzfQUalUIn1Q/qPT6VCpVJw/f57W1laKior43//7f9Pd3Y3FYqGqqor3ve99c9Kn5bUuSRLhcJj+/n4GBwdZtWoVOTk54p7VajW7du1i165dWCwWCgoKePe7383GjRtFyqLC4vE78/RnXkiJ65pS8w4cPvKCuZKwuPj34XCYgwcPotPpqKmpobi4WITDQ6EQAwMDGAwGli1bxrJly+jt7eXcuXNMTk7yuc99DovFcsVzyp6fZDIpmkXodDr0ej06nW5eH/7Z9/dmHpLZP5OjXbFYjNbWVqLRKCUlJeTm5oq6o8nJSeLxOEePHqWqqgqtVks8Huf48ePk5OSwfv16ioqKlIiSwlsyv7V3sey4elnydj3CsoIRj8dFPRGA1WolnU7T19fH0aNH8fl81NbWkslk6OnpmZPSJX/4I5EIe/bsoa2tjb6+PkZGRoRnc7a3WN4+FovR29uLVqvF5/OJaC1AXl4eO3bsoKamhpycHOGYkVODtmzZMu96q0QiQTKZFHWEGo0GvV5/xe5/spxIp9NEIhHhDdZqtZjNZlFXNDw8zLe//W1isRhTU1OYzWbWrVuH0+kknU4TDocZGRnh6NGjbNiwgYqKCpLJJGNjYzz//PO85z3vEfWckiThdDqFUSl3K9RoNKTTaaLRKLFYjOeffx6j0Uh1dTXl5eX09PTM/x9d4Ybluiv00tvTU2Q5IXfslB0UcrpoJpNhamqK48ePs2/fPpEqu2bNGgoLC9HpdCQSCQYHB9m9ezd1dXWUl5czNjZ2yT2bTCZqa2s5evQozc3NwlA6ffo0Q0NDTE5OilR6jUZDLBbj1KlT7Nq1i6mpKcbGxqitrWXp0qVXlW4sywm5a7FGo8FoNM6rXiubzZJOp5mengYQcsJkMqFWq0WUK5vNEggEOH/+PMFgkIKCApF6Ozk5SVdXF1qtlq1bt3LLLbcQjUYJh8M8//zzuN1ukc4ok06nGRwc5Pz583i9XsrLy7HZbCLbZePGjSQSCeE4DgQC7N69m2Qyybp161iyZAmgtIFfLH5njKQZbq6XLJvNMjU1xb59+yguLqakpASPx4MkSSSTSZLJJLFYDIvFwsaNG6murubgwYMcOnSIvXv38vu///uUlpZe0ciRixJHR0fFh97n85GXl0deXt51847K5+3u7kaSJCorK7FarQDi3tLpNOl0ms2bN+P1ehkdHeWRRx6hsbFRGFUXN4VQUHhrZGNmtry4WHYsnCyRnR4dHR0Eg0Hi8TgqlYqVK1cSj8fp6OigsbGRVatWifWRSCREcbbdbhfeyHQ6zcjICJ2dnfT19ZFMJrHb7cJTOhvZ69vd3Y3L5cLn84l6QgC/3y8MNbfbTXd3N08//TT9/f00NTWxevXqyx73coyNjTE0NCQaythsNgoLCykpKbnivplMhvHxcc6dO0cqlcJgMODxeKiurkav14ui6QsXLhCLxVCr1RQWFmK321Gr1cIBFI/HGR0dZe3atZSVlREKhdi3bx+NjY2sWbMGp9MplLOLnUOzI1bxeJze3l4OHz7M8uXLqaysFClM6XSaRCJBNBpFr9fPKZRXuBm4zo7Yt0B2gly4cIFwOEwymRSdF51Op9ALxsbGuHDhAqFQCKfTSUVFhXgHA4EAFy5c4NSpUzz88MNYrVaGh4fF+ohGo9jtdiwWC6tXr+bIkSOcPXuWSCSC0Wikvb2daDQ6x5CRr21ycpK2tjYmJibEu39xutqVGB0dZWRkhPHxmZIGi8VCZWUlBQUFV9w3k8kQDAY5efIk2WwWo9GIy+WiurpadNeVHUMtLS10dXUJ57PT6UStVpNIJAgGg2SzWdF9bmRkhMbGRo4cOUJvby9OpxOz2SycYslkkv7+flpbW6moqCA3N1cYdRqNhi1btmC1WsU1nDt3jh/96EecOXMGh8NBTU2NEoFeRH7HjKSrYfGE3XxJJBIMDw/z+uuv8/DDD5OXlye8mXJubFVVFcuWLWPLli3Y7XZh1Jw5c4bTp09TUFBwxS5L6XSanp4eDhw4wJNPPgnM5DPLdUDXKxwsF48ODg5SUVHB0qVLxe/k+9NoNHzsYx9jzZo1WK1WYcy9+uqrDA8PU15erhhJClfJjbXuJUlidHSUJ598kpaWFgKBAGq1mi9/+cuEQiGampo4f/489957Lx0dHXR2djI6Oko2m6WlpQWr1SqiHna7nQ984APcc889DA0NcejQIV566SX8fj92ux2TySQ8qpFIhOHhYVpaWqirq6OwsFB4QIE5dYOSJFFeXk4kEuHEiRO0t7czMTGBxWKZ1wf+zJkzvPLKKzQ1NSFJElVVVdxxxx1XNJJko6S5uZlvfvObhMNhXC4Xq1at4r/9t/+Gy+VCr9dTUlLCN77xDaampjhy5AgtLS3s3r2bZcuWifoji8XChg0buOOOO7Db7QSDQYxGI+fOnWN4eHhOvUImk5kTGZQjWiqVilgsxu7duwmFQuj1elKpFGfOnKGnp4dgMMjw8DBdXV1UVVVht9sVI+mmYXF1Bjli+vOf/5z29nbC4TBms5m/+Zu/ob6+HqPRSE5ODu9+97u55ZZbaG1t5ciRI+zbt481a9awadMmzp8/z+uvv04sFsNoNNLX1yfWcn9/Pz09PZhMJnw+H7fddhvd3d0cPHiQAwcOoNFoqK2txe12Y7PZ5jhIzGYzW7dupaGhgfHxcZ588kkGBwfZv38/lZWVFBYWzmsdNDU1sX//fk6cOEEmk6GgoICPf/zj8zKS4vE43d3d/J//83/IZDL4fD7q6ur41Kc+JeSj3DH3qaeeEpGcbdu2iYi43HiqoKCA973vfWi1WiHnTp06JXSV2Y0owuEwPT099PT08N//+3/HaDQKmajVarnlllvEttlslqKiIrq7u2lvb6ejo0M4fhQWB8VIelNu/A9XW1sbR44cIRKJcP/994uOUiqVCofDIbquWCwW8XHXarUYDAZMJhPxeHxeKUA6nY76+nrKy8t5z3veA8zk8JpMputmgMg1BM8++6xIWamtrRW/93g85OTkoNPpxGwSOXwtz2eQ240qKPw2o1arKSkp4U/+5E9IJpOiq6XL5eLo0aPodDocDgff+973UKlUjIyMEAgEgJmPek5ODm63G61Wi1qtxul04nQ6ycvLo7S0lF27dtHV1YXf7yc/P1/UEIyPj9PT00MgEGD58uW4XK63TF1VqVSYTCZ0Oh3pdPqqhm9u3bqVNWvWzEnntVgsV9xPpVJhNpvZsGED//Ef/yHGABiNRjHXCGZmrPn9fgoLCzGZTJjNZp555hm6urpEGqHH48Hlcs1Jv9RqtSJVyWQykZOTg0ajoa+vTxhgmUyGyclJocw4HA6MRiNms5nXX3+d1157jWw2y9DQEP39/eh0OkwmE7/3e79HbW2t4iW+aVhcnUGn01FaWsrnP/95ksmkqKvxeDyiNler1WK327Hb7ZSUlGA2m2lra2Pv3r00NDQAiHf+29/+tmiN39/fT19fn6jBc7vdGI1GPv3pT/OJT3yCdDpNPB6npaWF3t5ecnNzRWo8/GZ+pNVqxe/3i297b28vo6Oj854FtGPHDjZs2EAikUCSJLRa7bw7wFksFhoaGvjBD34A/CalV44SBQIBzp49y+OPP040GuUjH/kIS5cuxePxiGN4vV6KiormRNTlRg1yc5eLdaqDBw/S2tqKzWZj+/btV3RKy0121Go18XicRCKhGEmLiGIk3SBks1kymQyRSGROXnE4HGZychKz2SzqgGDGa9Tc3MzZs2dZt24d+fn5cwoGbTYbFRUVdHR00NHRIZSg1tZW2trayGazVFRUzDsKJOccy4qLnIs/Xy+oHOqXawJCoZCYQTI1NYVGoxF5/TCT6jMxMcGxY8fYsmULVVVVcwRFTk4OFRUVFBQUsGfPHhE6DwQCnDt3DpfLhcvluqQrj4LC9eH6epG1Wi1Op3OO0a9Wq8U4gFtuuUWsxUOHDtHY2IgkSTz88MOUlJSIRg3hcFhETuTUnGg0KgyH2Q1T+vr66Onpwel0ioYNcpQJoKWlBb1ej8ViwWQyMTY2xpkzZxgbG8Pj8cyr3lHGaDSKzlpwdR3yZKNotvdWjvp0d3eTzWYxm81YLBbhTe7r6yOTyWA2m9FqtRQVFbFs2TJOnz5NW1sb+fn5BINBmpubyWazuN1ucnNzSaVSVFdX8/LLL6NWq6msrCQWi3H48GGMRiN5eXkUFxfj8XhoaGgQEadkMsn+/ft5+eWX2bhxI/fff7+YS6Xw28q1WvPX5jhygwE5tU5Go9GI9T89PU1OTg5qtZrx8XE6OjoYHx9n5cqV6HQ6li1bRn5+PuFwGJiJvpw7d449e/ZQWVnJu971Lqqrq1Gr1USjUYaGhrBarUiSRCAQ4I033hAdMuX12NraKhyzBoOBRCJBW1sbU1NTwmk7X+RjvB05oVKpRGttGVlODAwMiEh2NBrlwQcfpLy8HL1eL9IW1Wo1NTU1YrTKuXPnyMvLY3R0lLa2NhKJBF6vd04dZTKZ5MCBA4TDYVavXj2na2YqlWJ6epqRkREcDodoCd7X18fJkyfF4GvFQFpcbmojKStlyWRTZLJJJOn6RxSyUoaMlEaS5u9BlUmn0wSDQQ4fPkw4HBbh8mPHjpHJZKioqBCeXpiZHdTV1UUgEODWW28VxYcyer2e+vp6sQj37duHz+ejr6+PYDDI0qVL8fv98zKS5EYL7/SDPjIyQktLi0gJmpiY4OjRo6IpQ0NDg0jniUQitLa2Mjk5SUVFBUVFRXPOb7FYKCkp4ZZbbqGjo4M33ngDi8VCNBoVcw0UAaPwVkjSzDySdCb5axXlenuCJTLZDFkpjcT8B8hebv3JSoLL5cJms+H3+8XvRkZGGBgYQJIkqqurRavZ0dFRjh8/LlLg4vE4Q0NDouX37A94PB5nYGCAoaEhUet4sQe0ubmZSCQi5noEg0GhNNXU1Mw71Q5mFLm302BldtTnck0ienp6GB0dJZ1Oi9S3gYEBpqamKC8vJycnB71eT1FREevWraO3t5f9+/fjcrmEoVhWVkZBQQEul4t0Os327du5cOECR44coaurS3QYrayspKSkRETxZ49ckI9lt9vJzc2lrKzshujqpXBlJLJkpfSv5cQCzFKTZlqAZ7NprqYb1FvJiWQySVdXFxcuXBC1eMFgkL6+PrxeL7W1tRiNRoxGI06nU0SB5SYkckMC+f2Wa+z2798v6v7C4TCjo6PU1dVRWloqGjJ0dHQQCoVE5CeVStHW1obZbCYvL++qZgG9XTkx+/lcLCckSaKpqYk33niDlpYWvF4v/f39TE9PYzabcTgcYvRBeXk5DQ0NtLW18frrr+PxeIhEIoyOjlJUVDSnaYMkSQwMDNDe3o7H42HlypVzzi07iY8fPy6MNdnYlJ0xlZWVytDpReamNJLkxZDOJoilgmSlDGoWZphsPBVE0sTfsovb5ZDnkbzxxhsMDw8zPDyMXq/n1KlTTExMkEgkMBqNc4wkOed469atcwSjnHbW0NBAOBwmHo9z7NgxIVz8fj9bt24lNzd3QTu/DQ8Pc+TIEU6dOkUoFEKlUtHY2EgwGGRycpLy8nKhOESjUbq7u/F6vVRXV18ynFKv11NcXMz999/PT37yE44cOSIEoNfrZd26dRQUFMy7u5bC7xYqlQqJLKlsjGhqAk16Yd6TLBmSmQgSl7bbvhrk/XQ63SUfUafTKRq4zO5sFQ6HOXnypEhpm91dqba2ds6skmQySTgcJp1Os3Tp0ssOZx4cHKSzs5NgMCi6yrlcLioqKlizZs1VRZKuFxMTE7S0tDAwMEAqlRIpNj6fjxUrVoiazMLCQjZu3EhbW5uoi5JT/rZv3y7mmmSzWd71rneJWq1z584BM41sVq9eTUVFhfAkyw4aSZLQ6/W4XC5ycnJwuVyYTCalte8Njrw+M1KaZCa6gHJCIiOlSWQiSNiuyRpKp9MMDQ3R1NQ0Z/3bbDZWrlzJ8uXLRVR19vnkCLbX68XtdottstksiUSCEydOEI1GRZprfn4+GzZsoLKyUmSdyNkdk5OTZDIZEfmtra2loaEBt9u96HKipaWFnp4eJiYmcDgcHDx4EEA4oHJzc3E6nRQVFZFMJhkcHOTkyZMi1c5ut7N27VqKi4vnGEn9/f1kMhn8fv+cuUmAaCjT3NwsDCPZyC0qKmL16tUsWbJE6c67yKikxZgqeh1JJpO0tbXxxBNP8I9/939QSVpULFQHIYmslMbhtHHbXdv52te+htfrnZcnQG4d29nZSSwWmxMu12q1Qkg5nU4A0UYzlUpRWVkphNvFbbTD4TDBYFBsazabhdAzGo0LJpwkSWJiYoKhoSFCodCc3xkMBmw2G0VFRULBiMVijI6OEg6HKS8vn5MKNPuYclMJueOMPNzS5/PNuz25wu8OcqH/L3/5Sx7/1ZPs3vUqGvTMzEG5/pEkgIyUZMOmNdx7/7v4xCc+cc2L9ycnJwkGgwAUFBSIXPloNEpfX5+oRZSbFeTl5WGxWOYoSIlEgtHRUTELZbYiI8um/v5+gsEgsViMTCYjDAGHw4Hdbr8hOreNjY0xNTUlun3JHbXsdrtQ+uRrTKfTjI+PMzQ0JAwqm81GQUEBRqNReHqz2SwDAwNMTk6SSCRQq9X4fD58Pp+o2ZiNvM/k5CRjY2M4nU5R26RwYyIrsN///vf52U8epanxLBp0qBZknpqEhERWSnHrndv44Ac/wH333TenDmbeR/r1Wk2lUoyPjxMIBMT6l1NlZb3icunzsnMlEAhgNpvxer2iZjGVStHa2irqg2QHpdfrFTVQkiQxMjIihlVnMhmxruQGD7IzYbFkhSRJItolD3eVkUcJFBcXY7VaUavVoutnb28vqVQKnU6H1WoVclTWOeRW4kNDQ7hcLgoLC+es+UwmIyLM4XBYRO/k2ke5tnGxZejvOjedkSQPORsaGuLUqVMk4nKq3cIYSWq1GpPZTGlpCXV1dfNW1GfP+5D/W0b2Lsh/YOY+s9msCGHP3vbiY8rbyr+XPcgXb389ka/lcs0U5Pu7WGmQ8/llZetyRpK83exjyu145WMrKMjI7+Hg4CC9vX309/eTTCRYuKJrCY1WS2FhAX6/n+Li4mtuTMxe77PXlLz+LpYtb3b+N1t/s9ed/DxlZBn1TqJk1xL5Gi+eGTW7pnL2fcm1oTIX38+b3fts+fVmckr+d5l9TIUbE/nftq+vj46ODsbGAqSSSRayOYNWq6G0rJSioiLRufZqmb02Z+sMMhe/i5d7d+V1cbmU+4sbFcz+9r7VeWfrNLN/thjM1ksuVodn60uzZcBsXQ0uL/dm616Xk7OzdaK3ejaKnFhcbjojSX7pEokEU1NTl7yA1xu5g4zFYhH1NcpLrqBwYyDLAnlWTTQavaoubNcCtVqN2Wye0x1SkREKCjcOspyIxWJEIhESicSCyglZb5Dn58gtqhUUFBaWm85Igt8IuMW8tTfzzCgoKCw+F0dAFhpFPigo3NgoeoSCgsJNaSQpKCgoKCgoKCgoKCi8XZSqdgUFBQUFBQUFBQUFhVkoRpKCgoKCgoKCgoKCgsIsFCNJQUFBQUFBQUFBQUFhFoqRpKCgoKCgoKCgoKCgMAvFSFJQUFBQUFBQUFBQUJiFYiQpKCgoKCgoKCgoKCjMQrvYF3A9yGazJJNJQqHQJROhrzfyMFl5CJwy30BB4cYjlUoRi8WIxWJzJqcvBPIwWaPRiMFgWNBzKygozA9JkkilUosyTBZAo9FgsVgwGAzo9foFPbeCgsIMN52RlM1mmZ6eZmBggOPHj5NMJslmF85I0mhmFKCysjJWrFiBTqdDrb5ywC6dThONRunv72dqaopEIoHBYKC6uhqPxyOMrXQ6TWtrKyMjI5cod1qtFrfbTUFBATabjc7OTiYnJ4lGoxddowaz2UxDQwMmk2le1/dOkSSJkZERxsfHCYfDxGIxbDYb1dXVWK3WOdeQSCQIhUL09/cTDAZRqVRYLBacTieFhYVi+ng2myWRSNDa2kooFALAZDJRWFhIbm4uarVaMVIV5iAPkR0cHKSrq4uenh6SydSCXoNWq8Hv91NSUkJ5eTkajeZtv6eZTIZQKMTw8DCBQIB0Oo1Wq8VisVBQUIDX60Wj0czZfmRkhEAgIJxIVquV0tJSnE4nWu3MJ2F6elpsF4vFcDgcFBYWYrfbMRqNAOK8ExMTxONxVCoVdrsdj8eDz+fDZDIBizsIc2pqiomJCSYmJpienkalUmGz2XC5XOTm5oprlCSJRCJBX18fY2NjpNNpdDodbrebkpISIXNg5hl2d3czMTFBLBYjm83icDgoKCjA4XCI55NKpYTMCwaDZLNZ8Xy8Xi8WiwVQBoXeiEiSRDabpa+vT3xvU6mFc6aoVDPf87KyMsrKyigoKBBr82qQHcTpdJrx8XHGxsYIBoOk02lMJhNOp5Pc3Fzsdvuc9zCdTjM8PMzo6CiRSIRsNovFYqG6uhqLxUImk2F0dJTW1lay2ewl5/X7/RQUFGC32+dcSyqVor+/n+HhYbxeL7m5uTidzqt/QNcQ+bqGh4cZHx8nEomQyWQoKirC4/HgcDjmbBuNRhkeHmZoaIh0Oo3RaMTpdOL3+zGbzahUKuLxOKOjo3R0dJDNZpEkCZVKhVqtxu/3k5+fj81mm3NcSZKIxWI0NzeTTCbx+/14vV7MZvNiPBaFX3PTGUmZTIahoSGef/55/uEfvkomk0WSgIX4Dkkzws3tdnLnnXfyt3/7ZVwu17yMkGg0SmdnJz/60Y84duwYg4OD+Hw+/v7v/57t27cLARmLxfjBD37As88+SzgcFvun02nMZjObN2/moYceYtmyZfzwhz/kwIEDdHd3i+1SqRQGg4GysjK+//3vU1JSsiBGEsCBAwfYu3cvp0+fpqenh6VLl/KVr3yFuro69Hq9EOiBQIATJ07w05/+lHPnzqHRaCgtLWXNmjX8wR/8AcXFxWg0GuLxOD09PXz1q1/l/PnzABQVFfGBD3yABx54AIvFohhKCpeQTCY5dOgQTz75FLt27Z5xoizUKyKBWg2bNm3i3nvv4aGHHsJqtb7twyWTSS5cuMBjjz3G3r17mZ6exmq1UlZWxgMPPMC9994r1oEkSYRCIV5++WX27t3LmTNnCIfD1NTU8JnPfIY1a9bgcrnIZrO0tbXxzDPP8PrrrzM0NER9fT0f+tCHWLNmDcXFxajVarHNG2+8wfDwMBqNhrq6OrZs2cKdd95JVVXVgsmWN6O5uZn9+/ezb98+Ojo6UKvV1NbWsm7dOu677z5xjYlEgoGBAb7//e+zd+9ewuEwLpeLdevW8T/+x//A7/djNBrJZrOEQiF++tOfcvToUfr6+kilUixdupT3v//9rF69mtLSUtRqNRMTEzz33HPs27ePs2fPkkgkqKurY9u2bezYsYOlS5cu+vNRuDyy4vz666/z058+QmNj08LJiV/rK2oV3HnnHTz44IPcffdd70hORKNR9u/fz65duzh16hTT09MUFBSwevVq7r33XtavX49OpwNmdIlgMMizzz7Lnj176OrqIh6PU11dzZe+9CUqKytJJBK88sorfPnLXyaZTM5ctiSRyWRIp9M8/PDDfPjDH2bFihXi+5tKpQgEAjz66KM8/fTT3H777bz73e9m/fr17/iRvVOCwSAvvfQSr732Gm1tbUxPT/Oxj32M22+/nVWrVontkskkPT09PP744zz33HOEQiHy8/NZuXIlH/3oR6mpqUGn0zE2NsYzzzzDV7/6VWEgqVQqTCYTH/nIR3jf+95HXV3dnGtIpVL09vby13/914yPj/OJT3yC2267jfLy8oV+HAqzuOmMJDkiMz4+gU5fQL63HoPRjlp9/W81m80QiYyiYopAYIxoNIrdbhfC563IZDJkMhn8fj9r1qzhySef5MKFC5dEiywWC5///Of57Gc/Kzw4mUyGp556iueff56cnBxWrFiBx+PhL//yL0kkEuIYkiTx/e9/nzNnzqDT6SgrK1vQj3Q8HqeiooLc3FzGxsY4e/bsZVMYfvCDH7Bnzx4KCwv5yU9+wvT0NIcPH+bw4cOk02n++I//GKvVSlNTE3/913+N1Wrlb//2b1Gr1Zw/f57vfve7WCwWNmzYQEFBwYLdn8JvB6lUisnJSWIxFWZLBTm5y9Bq9DMejuvIjLcwy/h4C7E4v76G2DtSftRqNV6vly1btvDe974Xh8NBZ2cnx44d4x/+4R8oKiqioaEBm81GOBzmRz/6Ea+88opwOOTn59Pf309NTY0wAoLBIH/913+NwWBgx44d7Nixg0ceeYSnn36a/v5+PvCBD5CXl4fZbGbr1q1s2LCB4uJiIpEITz75JI2NjfT39/O5z32OnJycOZGshUb2dN99992sXr2aVCrF7t27OXXqFL29vfzVX/0Vbreb/fv384tf/IIzZ87wpS99CafTSX9/Py+99BLf+ta3+PSnP01ZWRnhcJhvfvObvPTSS3zgAx9g48aNGI1Gfv7zn/PLX/6SoaEhHnjgAYqLi/mnf/onWlpa8Pv9fPvb38ZkMvH4449z4sQJWltb+au/+ivy8vIUJ84NSjKZ/HV01oTNXofHW4NWe/3TY2eiWCkCgQuEQjFCoSCJROIdyYlIJEI4HMZut/OVr3wFj8fD/v37OXnyJH/7t3/Lf/3Xf5Gfn49KpWJ4eJjHHnuMF198kfvuu48//uM/xu12MzAwQEFBAXq9HrPZzIMPPshtt90mnJuxWIxnn32W119/ncrKyjkZMNlslrGxMY4dO8bTTz+NTqdDp9MteKrzm5FKpTCZTNTW1rJp0yZ+9atfoVKpLtFPdu/ezUsvvcS+ffv4x3/8R7xeL62trRw5coRvfOMbfOELX6CgoIBsNks6nSYvL4+vf/3rVFVVCUPJarVeEh3KZrOcOHGCl19+mUwmQyQSIZlMLmipiMLluemMpNlotSZMZjdGkxuN5sqGyjslm0mTzabJpK8+fcdms1FbW0thYSE6nY6mpiba2tou2U5OabHZbCJEm0gkOH78OLm5uSxZsgS3241Go8Fut4ttYCaFprOzk0wmwx133LHgXsy77rqLRCLByMgIBw4c4OzZs5dsE41GCQQCaDQa7rnnHqqqqkilUiQSCcLhsAiHj42Nce7cOQYHB/nud79LTU0NmUyGnJwc2tvbaW5uprKykry8vEVV0hRuXNQaLTqdBbPFh1ZrvO7KqiRJSNk04fDgNVt7Op0Ov9+Px+MRqb1erxeDwcCePXsYGhqisrISvV7P6Ogozz77LPfccw/Lly+npKQEvV7P8uXLsdvt6PV6UqkUhw4dIhgMcu+99/K+972PnJwcYrEYv/jFL0QKTl5eHmVlZfj9fiRJQq/Xk06n6evr49SpU3NS0RZz/b3rXe8ilUqJ+g5ZebFarZw6dYqxsTFMJhN9fX20t7fz8MMPs2bNGsxmM/n5+WSzWZ588km6urpE2k1XVxfLly9n1apVLF26FIDNmzczPj5OMpkUEf6enh6KiopYt24ddXV16HQ6brnlFrLZLENDQwSDQXJzcxft2SjMD41Gh95gxWzxodOZrvv5JClLJp3EYBhCpbo2csLr9XLvvfdy66234nA40Ol0mM1mNBoNjz/+OCMjI/h8PrGGn3/+ed773veyYsUKcnJy0Ov1+Hw+nE6nkDMmk0nUSsnpfP39/ZSWluL3++ek2k1OTnLu3Dn279/Pzp076e7uvqHSyLxeL3fffbcwJl988cXLyuiWlhZGRkZ473vfy4YNGzAYDOKZPP7447S1tc0xZuW0TVkOGo1GTCaTyAyS9bOWlhYOHTpEa2srf/iHf8hXvvKVBbt3hbfmJjaSZvI/1RodGo0Ojeb6Fz6qUKFWa8m+DQVIq9WKWgIAo/HySpvcGEImmUwyNTVFW1sbO3fupLKyUkSuZm+XzWZpb29nYmICj8fDypUrxfEWApVKhcfjIZ1Oi3zoy51bo9Hg9XpxOp2Mjo4yPDxMMplkcnKSdDpNTk4OBoOB/v5+xsbGMJvN1NTU4HK5yGQyJBIJioqKRM1FMpkUdQcKCrNR/VpGyPJhIYyk7K9lhIpr4yFUq9UYDAa0Wi3hcJhwOEwgEGB8fByr1YrD4UCr1RKNRhkYGKC7u5toNEpHRwc9PT2oVCoqKiqoqanB7XaLmkez2UxhYSFFRUXo9Xqqq6sxm83E43HGxsbIZrOi8UQ8Hhe1S1NTUwC4XC70+uv/TK+E1+sVf5fTgYxGo7g2lUqFJEkkk0kSiQR2ux2z2YzJZMJsNmM2mwkEAgQCASKRiKg9Gh0dJRAIMDg4Y/COj49jNptxOBxC+SsoKCCVSjE1NcXQ0BA6nY5AICBk4Y2kJCq8BSo1arUGjUaHWq1bADmRBUlCrda84+i2fK2ykTNz/BnZI68DtVotDAK5Frinp4d4PM7Zs2fRaDRoNBoqKiqor6/Hbrf/Wm5qhAMkkUgwOTnJ6OgoK1euJC8vT3x3k8kkzc3NdHZ2otFoWL16tXCE3gioVCr0ej1erxe73S7u63L/zolEglQqJaJBOp0Ok8mEwWAQNV/xeBy1Wo1Op0Or1XLkyBE6OztFjeOqVavwer2YTCYkSWJycpIDBw4QDodZvnw5lZWVN8yzUbipjaTfDeLxOK2trYyPj1NRUXHZ/FVZOThw4ADpdJrCwkJqa2vnfQ45PzudTov8Y71eL4TAtfxoGAwGKioqGBwc5NixY/h8PuLxOL29vUxMTLB161YsFgvhcJjp6Wm8Xq9o/KBSqTCbzeTm5tLb28v09DSJREIxkhR+q5G9kfF4nEwmI9JsLRYLGo1GKPmDg4N0dHTQ29tLb28vdrsdv9+PyWRiYmKCvr4+QqEQPT09DA4OEo/HSafT4gMu/+nr68PlcuFwODAYDEiShMfjwWq1CiMsk8mg0WjIZDIEg0GampqYnJykubkZgMrKSiwWy7xlQyqVEn8kSUKtVqPX60UThCs9HzlFRc7/12g0oimNbAjBTNpRIBAgHA7jdDqxWq2ikY3dbmdwcFA4XwKBACMjI0xMTBAMBonH4+Tm5rJixQp+/vOfc/r0acLhMHq9ntOnT2M2m8nJycHlcgGwfPlyDh8+TEtLC06nE71eT2NjI5IkUVJSMu96VQWF+SCvA1lOyGvBbDZf0vQhmUwyMjJCMBjEZrMJw2dqaoqBgQGCwSBdXV3CeSBJEh0dHaK5i81mE2tbkiSmp6fp6+sjGAxSUlIiok+SJBEIBGhsbBQ6Sk1NDa+++urbuke5fEDuWqxWq4WxdyWy2SyZTIZoNCr2laM7861dttlsmEwmxsbGGB0dxWg0EggEhNMkGAySSqUwm82imcOFCxdEVpDFYhHRe51OJ+o/Gxsbqays5NZbbxXNHxRuDBQj6bcYSZIYHx/n8ccfp6ysjKVLl+L3+y/ZLpvNEo1GefbZZ6mqqmL16tVzQuFXIpVKMTAwQG9vL+3t7cCMElRSUiLSA68l1dXVDA8P8/TTT/OrX/2KbDaLVqvF6/VSWFiIRqMRHp3ZipgcZTOZTKRSKZLJ5A2T86yg8HaRJImpqSmampoYGxsjEomgUqm49dZbycnJQavVCq/v7t27GR4eRpIkamtrsVqt4vdyOphGo2HHjh1UV1czODjI17/+dVwuFzqdjvLycsLh8JxUGpjxOssfddkYAURXqKeffppAIEAqlaK6uhq/3z+nI9yVkLsNdnV1kc1mRefLFStWXHFfudPWa6+9Rjwex2Aw4PV62bhx45zOmel0mrNnz3LkyBHGx8fZtGkTeXl56HQ6lixZwqZNm9i1axfJZFIYhC0tLUSjUdEC2mAwsGzZMp577jkaGxtpbGwU0bRt27bh9XrFc2toaKC5uZnz58/z+OOPi+3q6uooLi5+W93KFBTejEwmw9TUFMeOHWN8fJxEIoFer2fHjh3k5OSI77QkSfT29vLGG2/Q39/P+vXrRe1gJBJhcnKSZDKJVqvlvvvuw+1209vbyz//8z9TUlKCTqeb05RFTrVra2vDZrNRXFyM0+lEpVKRTqd54YUXaG1tpaamhve85z0MDQ2J65j9/3DlzJbu7m66u7sZHBwUXSVXrlxJRUXFFZ9PKpVibGyM3bt3i259Pp+PtWvXYrVa5yWrVq9eTTgcZs+ePVitVmw2G5OTk7S3tzM5OUkikRDXtW7dOpYsWYLL5RKO3scff5xf/epXaDQaVqxYQSKR4Ec/+hEVFRVs2rSJ2tpaBgcHxXOZXS6hGE6LgyKlf4sJhUK0t7fz6quv8md/9mf4/f7Leianp6c5fvw43d3dfPjDH2bNmjVXdR657eXY2JjolOdwOPB6vde8sHB4eJinnnqKtrY2PvKRj7Bx40bS6TTNzc00NTXx7LPPUlRUJAynWCw2R9jKKXdarRadTqeErRVuCuRavoGBAUKhECqVimg0KhR3u93OHXfcwfr16wkEArS2tvLII4/wxhtvsH37dtRqNRaLBbvdzs6dO0VTk+rqap555hmGh4fp7u6mtLQUi8XC6OgoqdRvaitlh4PsuZU/2EajkZqaGr70pS8RCoU4cuQIbW1t7NmzRxgD84mWyHWG3d3dZLNZnE4nOTk583o2cl1mb28v0WgUk8lEJpMRUSmYKSo/deoUP/nJT3C5XGzatInbbrtNGDR1dXV4PB6Ki4sZHBwkHA6jVqtZsmQJExMTwtgKBAL8+7//Ox6Ph/e9731UV1ej0Wg4fPgwR48exWAwYLFYqKys5Ac/+AEmk4n777+fTZs2odfrOX78OOfOnePVV1+ltLSUpUuXKsaSwjVBXgdDQ0MMDw8Ti8WEYS6vAzni/NOf/pRIJEJtbS233367iAzp9XoxbuOuu+5i5cqVuFwuSktLefbZZ+no6KCyspLy8nLx3gaDQXp6erhw4QL19fX4/X5R/xeLxcR6mWmoNU4gEODkyZN0d3czNjaGTqcTkZUrGQLhcJiRkREhJzweD9XV1fN6PnI0vqenh0wmIxzFV+NIXblyJTk5OZSWltLd3U0oFEKn01FZWcnY2JjoJGo2mykvLxcRq2w2K2ocf/jDHzI4OIjVamV0dJSDBw+KlOWXX36ZUCjEwMAAr7/+Otlslp07d4raR4WFR5HOv8X09vZy7tw51Go1a9euFd6b2UiSRDAYZN++fZSUlFBWVjYnT38+aDQa8vLyMBqNFBUVATO5/i6X65obIWNjY/T39xOPx0XXLFn4j4yMcPz4ccLhMFarFavVytjYTBdBnU4nQukjIyPYbDasVqsyrFPhtx65Wcvq1aupra0Vxktubq6ordFoNCIVxOVyiXaz3d3dRCIR7Ha7mLUi59zLSs7sdDS1Wk1RUREXLlwgGAySTCbR6XSMj48zPT0tagblda9SqTAajeTk5OB2u0WKmpzuOl8nSl5eHnq9nuLiYmAmnXe+RpJGo8Hj8XDHHXeQTqfRaDRYrVYRZZ6YmKCtrY1f/OIXuN1u1qxZw9KlS0VanHwPeXl57NixQ8yPmp6epqurC51Oh8vlwmQyEY1GOX36NO9973uprKyktLQUgImJCRobG5menmZ8fJy8vDza29vZtm0bdXV1lJaWotFoCAaDDAwMMDY2xvj4uNK9SuGaoVarcTgcbNiwQThQZq/XWCzG0NAQL774oqh/qa+vp6CgQDgyHA6HaHYkywjZ4SjLidlOSbmmZmRkhMnJSXbu3ClSWGFmbd5yyy2iI97sFH25FupqUk6LioqwWq3U1NQAM+n58+1gq9Pp8Hq93HPPPUiShE6nw2azXVV6m9FopKCggFtuuYWGhgbRLr2np4fjx4/jdruFE2m280OtVovnKHcylrNjbr31Vmw2m3DYzH4+Go1GScldZBQj6QZhdt2BXGOQyWSIxWJEIhGMRuMcxSaTydDV1UVbWxslJSUUFxdftvYmkUgQCARoampixYoVcwoq54tGo8HpdGKz2YRAkoXofBewXNeUSCTmpK9Eo1GRIyynA6RSKeLxONFoVHSGmZ0brVKp8Pl85OTkkEqlOHPmDLW1tWSzWQYGBujr66O4uHje7dcVFG50TCYTpaWlc5QUOf0tFAoRDodxOByo1WqSySTRaJR4PA4gDKa8vDxcLhdDQ0OMjo5iNpsJh8Oiy5r8oa6qquL555+nv7+frq4ucnNzuXDhArFYDK/Xi8/nQ61WMzo6ikqlQqfTYTAYSKfTJBKJOWMH5ovD4cBqtVJYWCiueb4OGHnYdH19/ZyZJDqdjmAwyPnz5zl06BBDQ0Pcc889VFdXi9bbs9MGk8kkNpsNj8dDJBKht7eXqakpEdWSU/ASiQSxWIxUKkU2m0WlUpFMJkmlUqJlsEqlErIuHo/P2U5JA1a4Hsgd5yoqKoSckJX1bDZLf38/p06d4tSpU1RWVlJVVUVRUZH4vTxoWR7w2tvbS0lJiWjfHQqFxDDZ2YOVh4eHGRsbQ6VSUV5ejsFgEHPZdDqdcF7ImR4Wi4WWlhZycnIoKCgQtXnzMVQ8Hg9Op1PUZcrrfD7IjqSGhgaxr2y8AELniEQiQpbJ+pder0er1Yq1KzujIpEIsVhMzKeT9atYLEYgEMBisQjZOD4+TmdnJ1qtVjR50ev13HbbbUKWZDIZJiYmMBqN+Hw+8vLyrqo0QuHac1MbSZI0O6/z0qnQ1/58EiDxdhpXyQWXPT09JJNJkVMsF2P7fD4cDodYMHLDho6ODrZt2zYn9342k5OTdHV10d7ezic/+Ul8Pt9V57bKCss7jRpNTU0RCATo6elhZGREKCJ2ux23201OTg65ubnk5OTQ29vLY489xgc+8AHi8ThNTU2cO3dOTMB2Op1MTU3h9Xp55JFHeM973oNKpaK1tZWenh5uvfVWPB6Pksqi8KbMrNYZ2TAjH65/dzv5rFfD5dafrNxHo1G6urpoampi2bJlovFCU1MTg4ODVFdXC29pfn4+9fX1nDhxAr1eP6eJw44dOygpKcFoNLJ69WqsVivnz5/HaDSydu1a9uzZI7pLyhGe48ePI0kSdrsdr9dLMBjk8OHDYk3PLu6+Eu9EvryVfGptbeWFF15g9+7d3H333dTX12MwGJicnGR6ehqXy4XBYGBqaore3l7C4TB5eXkMDg5y9uxZ9u7dy7p16ygtLRXd/6qqqjhw4IDo1qnX6zl8+DCTk5OUlZWRk5OD3W6nsLCQCxcuAGC32zEajRw+fJienh4RnVe8xL8NSCCcE9J1j/5dXIsyX95KToTDYQ4dOsSuXbuIxWI8+OCDOBwOYcR7PB5MJpNwVNTV1fHGG29gtVpxuVx0d3czPDxMZWUlubm54hzxeJyOjg7Gxsbw+XxUVFTMMVq0Wi233nqr+O90Ok1LSwtnz56loqKCZcuWXdVAezmy9XaY3dBlNvIzGhkZEY1apqenGRsbo7e3F7fbTWFhIQ6Hg8nJSeFkKi4uZmBggBMnTnDq1CnKysqoqKjAZrMxMjLCK6+8QklJCV6vl1gsRldXF7t27cLn85Gfn09BQQFms5mCgoI5zprBwUH+67/+i+rqaurr68nPz39b96twbbiJNUiJVCpKLBpAyqYXbJhsLDoB0jTguqp9o9Eora2tfPazn6Wzs5OpqSmSySRf+MIXyMnJ4f3vfz933nknW7duBaCxsZGWlhYkSeL3fu/33lRwtLa20tTUhNfrZdWqVYvqlXj66ad57LHHOHHiBMlkkng8zv/8n/+T2tpatm3bxmc+8xlycnL45Cc/SX19Pb/4xS944okn0Gg0+P1+Vq5cyYc//GHy8/PR6XSsWrWKr33ta3z5y1/mT//0TwEoLCzkwQcf5Pbbb1c8MApvSTaTIpmYJhoZQ6OZf5OBt8tMtDhDIh4im702HRfT6TTd3d388pe/5B//8R/JZDI4nU5KS0v5oz/6I2677TbsdjtarZacnBy+8IUv8K1vfYuf/OQnTExM4HK5ePjhh9mxYwd+vx+tVktubi5/8Rd/wdNPP80jjzzCN77xDWpqaviDP/gD1q1bJz7aY2NjvPrqq5w5c4bJyUksFgu1tbVs3LiR2267TXipF5MXX3xRDI9tb2/nO9/5jog8VVZW8qUvfYlly5YRjUa5cOEC3/rWtxgZGcFqtVJeXs7OnTv58Ic/LLp65ebm8rd/+7d84xvfEM8mnU5TWVnJe9/7XjZu3EhtbS0ajYYvfvGLPPHEExw+fJjHH3+cVCpFeXk5W7ZsYefOnVRXVytG0m8BmXSSRCJMNDKKVnvlbovvlJmIS4pkIoSUdVyTYw4ODnLw4EGeeOIJVCoVBw4cmDPc9Gtf+5pI+yovL+cv//Iv+drXvsbXv/514vE4OTk5fPzjH2fz5s14PB7x3obDYc6ePcvU1BRbtmzBbrfPkaNyKquM7FSQU89upLrhb33rW/zsZz8Ts85OnjyJxWKhtLSUL37xi9x5551Eo1HOnDnDd77zHfr6+sR8y82bN/P+979fGJDT09OcOnWKf/mXfyEUCmEwGMjJyWHDhg08/PDDlJSUiHlKs+9fo9GISJxSV31joJJusqToZDJJe3s7Tz31NF/96j+jUulmBrItRGcQCbJSGofDwu23bePv//7v8Xq98woHy6l1PT09IhVNLvrTarUigiIr/uFwmLGxMZLJJKWlpXPmfswmFAqJdJyKigqR77rQnVIkSWJsbIyJiQnRHSubzYoUAZvNJjpNxeNxwuEwExMTxOPxOQWlXq9X5PzK6YkDAwPEYjFgppbB7XbjdruveXtyhd9+JEkiHo/zq189zpNPPsXu3a+h0RgXRj7MXACZbIIN61dz77338NGPfvQSxWJ+h5kR2+l0mnA4zOjoqEhH1Wq1Ysih0+kUMz9m1/bJKSVarRafzydSU+U0GbnLlVyfYzQaRRtwuWudPIssGo2Kpg5ms1nMZ5LX6WKuwf7+fqampohGo3PqH+QGFIWFhZjNZlKpFNPT0wwPD4vhs0ajEbvdjsfjmfMMk8kkw8PDRKNR0RzCaDTidruxWq3ivuPxuPBKy2nUBoMBm80m0gtB6Vp1IyJ/W370ox/xs0d+walTZ9GoDQsuJ267dSvvf//vce+992Kz2d7GYX7TuGRkZIRAIAAwJ71NrkOUZwTNHikgD4SW63nkuWvyvqlUiqGhITKZDDabTdQ7v9k7LUmSuBaj0Sg6xC0m8jPq6+sjEAgI3QRmjBa9Xi8iSbIBNTo6KjqFyvqL2+0Wul48Hmd0dJRgMEgmkxFzk+RnJM+nuvg5yaUFHR0dIrVwPmMQFK4fN52RlMlkCATGOXv2DM8999ycDkcLgUqlxmazsnz5cu69915MJtO8PAHyNcqL8+JrlhfU7NkE8razvZGXa9wg/5G3W4yP8uzruPje5PuafV2Xa385e5vZ9QTyc7j4ePLfFRRkJEkinU5z+vQZGhtPcubMWdLp1JV3vIaoNRpqampYvmwZ69ate1tDV2evoTdrFftmBsrFa/BK282u87ncGp19PZfbbjHXYDabfVOZI///bJk6+34v/v1smXMl+STzVtspsunGRZYTx48f59Chw3R3d5PJLGwdmVqjYcXy5axatYr6+vp5zQK6mNnvvbwWLuZK7+7F28l/v3gtzOe9fjN5cSMg1z9fzMX3NV85Mft5v9l2FzNbp1HkxI3BTWckwW/qewKBAMlkckGNJHkIotzoQHnBFRRuPOLxONPT00xPT4sByQuFPIzRYrFgNpsX9NwKCgrzQ446y9HS2S3xFwKtVovT6cRsNivRBAWFReKmNJIUFBQUFBQUFBQUFBTeLkrVqIKCgoKCgoKCgoKCwiwUI0lBQUFBQUFBQUFBQWEWipGkoKCgoKCgoKCgoKAwC8VIUlBQUFBQUFBQUFBQmIViJCkoKCgoKCgoKCgoKMxCMZIUFBQUFBQUFBQUFBRmoV3sC7ge3EhdzZU5SQoKNx43ioxQ5IOCwo3JjSIjQJETCgqLxU1nJMmTkDOZDMlk8rITlK83arUarVaLXq+fM5X5rZg9aVn+78tNZ5/PxPfLTYaWt1Or1Ysyxfly1/1m0+dnP4s3m2g/e+L37Gcm/06tVou/KyjIyO9IJpMhnU6TTqcXXBlSqVRotVo0Gg1arVb87GqYfc2XkwWAWAMX73fxtpfbDt5cFr3ZsS5Go9G8rXu7llxOBl5Olsze/uL9LpaZ85HBMFd+zUbe5s2eu8LiM1tOpFIpMpnMoskJWVa8k3V0pe+vfL7Z93jx9/dy7+vl3vHLyYrLfaMv3max5MTFz0b+u3zPl7uuK8mJ+coIeZvL6amKnLgxuOmMJIBUKsXU1BQdHR3EEwmkBTSU1Go1JpOJgoICCgsL5/2Cy4ZdLBYjnU6TyWRQq9VYrVb0ev0l2yYSCZLJpFDy1Go1BoMBvV6PVqtFpVKRTqdJJpNCyGcyGex2O3q9flEEUjqdJpFIiOtRq9WYzWZ0Op1QqGSBkk6nicfjYsq5RqNBr9djMBhQq9VzDKRIJEI6nSabzaJSqdDpdBiNxretgCrc3EiSRDgcZmxsjNHRURKJxIKeX6vVkpOTg9frxePxvGPlJ5VKiXUlywKtVivWwGzlPpVKEYvFyGQywMy6MpvNaDQaIavk7WTZkc1mhXzR6XTimPF4nEQiQTqdnnM9MCMH7XY7Op3ubd/btSCdTot7kRVdjUaDTqfDYDCg1WqFQjRbsZH3SSaTmEwmjEbjnG0ymYyQT5lMBpVKhV6vF38AotGoOO9sRVSWYwaDYXEeisK8kCSJYDDI0NAQk5OTJJPJBT2/TqcjPz8fn8+Hw+F4x8dLpVJz1sLs9/Vip2MmkyESiQjlXZYTF8uTqampSxR8+f2Wj5tOp4VeI2+r1WqFPHkzQ2ShkXWqTCZDNpsVa/Ry+pf8/7Nlr9FoxGw2z9lOlgGzn6PJZBKGL8zIiUQiMec5KnLixuGmM5JSqRRdXV0888yzfP2f/oUsalCpYaHWYDaD02Hj9p3b+fKXv4zH45mXohAOh+ns7ORnP/sZ586do7u7G5vNxj/8wz+wfft2ofBLksT4+Di/+MUvaGxspKenh0gkgtvtZsOGDWzatIlNmzZhNBo5c+YMBw4c4NixY5w/f56uri5++MMfsnPnTiwWy/V+EpfQ1NTE7t27OX78OP39/ej1ev7X//pfrFu3jvz8fCF8BgYGaGxs5KmnnqKrq4tsNktRURErVqzgwQcfxO/3o9FomJ6epr29nX/7t3+jr6+PeDyOVqulvLycD33oQ9TX1+Pz+YQwUlCAmY/hSy+9xFNPP8PuV14DtRZQLZiMkDIpNq5bw7vuuZuHHnoIm832to8VDoc5efIkjz32GG1tbSQSCSwWC8XFxdxzzz3ceuutmEwmstks09PTPPfcc7z44ouMjo6SzWbx+Xx85CMfYdWqVXi9XjQaDalUihdeeIH9+/dz/vx5wuEwVquVjRs3smHDBjZs2IDdbueJJ57g5Zdf5ty5c+J6IpEIyWQSp9PJd7/7XZYuXbqoH/kzZ85w+PBhDh48SF9fH5lMhry8PJYsWcLtt9/Ohg0bLpHPkUiEF198kVdeeYVjx47xJ3/yJ9x77714PB5gxvA6fPgwe/bs4cyZMwwPD6PX69m6dSvbt29n3bp1WCwW/u7v/o79+/cTCASwWCyoVCocDgcbN27k9ttvZ/v27YvwRBTmg+yIfOKJJ3j057+k6fRZUGtgoZR5CaRsitt3bufB972Xu++++x3JiUQiwalTpzh48CCvvvoqLS0t7Ny5k9tvv5277rpL6AOZTIaJiQn279/PL37xCwKBAABer5f3v//9bN26FafTiUqlIhKJcOeddzI9PY1Go8FoNAKwdetW7r77btatW4fdbqezs5Mf/ehHnDlzhmAwSCaTwe/3s2XLFjZs2MCyZcsuMUQWmmQyya9+9Stee+01ent7GRgY4D3veQ933303t9xyyyXbS5JEPB7n2WefZdeuXZw9e5YPf/jDfPKTn8RsNpNKpRgfH+ff/u3fOHv2rDAmfT4f999/P5s3b6akpARJkvjmN7/JE088QSQSwWQyoVKpsFqtbN++ne3bt7Njx45FeCIKMjedkZTNZkkkEoTCIdJ6O9acCrRGKyr19VeUJSlLcnoCKRslGAxeVbqf7DmoqKigvLyc5557jr6+vjleWphZzAcPHqSpqYmcnBzWr1+Py+Wiu7ubU6dOkUwmycnJYenSpej1egoKCli+fDk5OTlcuHBBeJAXA61WS1FRESqVirq6Ol544QXhqZ59f3v27OHw4cMYDAY++clPkkqlGBoaYmBggD179vDe974Xu91Of38/P/nJT4hEIrz73e/G6XQSi8U4efIkr7/+OkajEYvF8o4+Lgo3H5lMhnA4TDQpIZl92POrUWv0C2IkSdkMkbFuoqmZaNY79U5LkiQiU2vXrsVgMDAyMkJ/fz+//OUvqa+vp6CggHg8TktLC4899hirV69m+/bt6HQ6uru7eeGFFzCbzTQ0NOB0OgkEAjz++ONYLBZ27txJSUkJnZ2dnD9/HpVKRVFRETabjQ0bNuD3+5mcnARmnuvhw4fp7u5GkiSsVuuip4rE43FsNhurV6/mrrvuQqVS0dXVxcTEBLt27aK0tJS8vDyhpGWzWU6dOkVTUxMDAwM4HA4RoYMZ+XThwgV+/OMf4/F42LZtGz6fD7VajdvtprCwUDhl4vE4+fn5LF++nFtvvRW1Wo1eryc3N5eCgoJFeyYK8yOTyRAKhYhntajsBVh9pai1C6DMSxLZbJrIWA/T0TiRSERkVLxdVCoVJpOJvLw8Nm/eTDqdRqfTXXLcUCjEhQsX+NnPfsbSpUspLi4GYHh4mOeee468vDxqampwOp0iTWzDhg3U19dTWVkJQH5+Pn6/H5PJBIDJZGLNmjVUV1djNpvJZDI0NjbS2dlJKpWioKCA/Pz8RU/LdTqdVFVVUVFRwe7duwEu0b9kstkszc3NHDlyhJGREVwuF8lkUsiJQCAgnCy33XYb+fn5ALS1tXHw4EFsNhsGg4Hc3FwSiQRer5dbbrmFLVu2oFarRRRR3k9h8bjpjCR54abTGXQmB9acUgwWNyrN9b9VKZshOjmIKjIiQsvzzWPW6XS4XC5WrlyJ1WrlzJkzDAwMXLJdNpulr6+PUChEQ0MD69evx+/309jYyNGjR0VqAIDL5aK6uhqfz8fAwMCipdnJeDwe6uvrKSoqIhgM8tJLL12yTTQa5fz583R3d3P//fezfft20uk0p06dIhQKcfr0aXbs2IHJZCIcDnPmzBlKSkpYs2YNhYWFTE1N0d3dzcDAAFNTUwueIqFw4yOntqLRobe6sedXotGZFuTc2WyaTDKOSq0WaR3vBJ1OR25uLmvXrqWurg6TyURXVxdHjx5l3759TE1NkZOTIyLVfX19fOITn2DJkiVotVrcbjff/va36enpoaCgALvdTjgcpr29nXXr1rFixQqWL1+O3W7n1KlTjI2NEQqFkCSJ4uJiCgoKhJybnp5mcHCQTCaD2WzG4XAsupHkdrtRq9VUVVVRVFSEWq3m+PHjnDx5kra2NiYnJ/F6vej1euFFP3HiBMlkEp/PNycKJkkSsViMI0eOMDg4SFFREcXFxeTn5wuHjMvlElF/ALPZjM/no7y8HL1ej81mw2azYbVaF+NxKFwFM3pEGpXOiNHuW0A5IZFJJ8kkY0iorkqPeDM0Gg0ej4eamhoKCgro7e2dkxomEwqF6Ovro7W1lY997GPU1tYC0N7ezquvvkp7ezs+nw+73S6O63a7KSoqorKyEpPJhMfjEWn0KpUKm83GsmXLUKlU2O120uk0k5OTnDt3joGBAaLR6Du6t2uBRqOhrKwMs9mMWq2mqanpTTOA0uk0wWCQo0ePkslkcLvdl/z7RKNROjs7CYfDLF26lLq6OmBGhpw7d47x8XEikYjY3mg0XiInHA6HMDQVFo+bzkiSUalUqLU6tAYLWpMVzVt4gCSujRM5m0mhjZogefXeJpPJJGqZYCa8/WYGjSRJ4mNrsViEgWW320XIG6CwsJDCwkLC4TAajWZR085UKhXFxcUUFxeLNLnLEQ6HCQQCqFQqtm7dKtLwwuEwAwMDPP/884yOjuJ2u8VxfT4fFotF5AR7PB7Gx8cXXUFTuLFRqdRotAZ0RhsaveltORCuRnZIkkQ2k0KjN6BSv7OIrnytFouF6upqqqurxTlUKhUTExMAohZgenqagYEBobAUFxeTTqdFel1vby9lZWUUFxeTSqUwGAzY7XacTic6nQ6HwyE+2rJCoNPpRHMaSZIYGxtjbGwMlUpFdXU1Dodj0WsNZCUPflNLUF5eztjYGG1tbaLuIpvNEo/HOXPmDCdPnmTt2rXU1tbyxhtvzDleOBzmxRdfJD8/H4fDQSQS4cyZM3i9Xurq6ubUGmg0GqLRKP39/Rw+fBir1UpxcTElJSWXVVAVbkzUag0a3ZXlxLXSIyQpizqVQKMzXLPotlarpbCwkLy8PKLRKHv27LmsgyYUConv79q1a/F4PEiSJOoWL1y4QHl5OUVFRWKf4eFhzp8/TzQaxefzsXTpUrRarTCSXC4XTqdT1EgnEglcLhdWq/WSZhGLgVzHvHTpUmpqapiYmBDG0sVIkkQkEqG1tZU33niDnTt3CgfUxdtJkoTb7cZut4uUZ6fTidVqneNI0Wg0JBIJenp6OHLkCCaTiZKSEioqKoR8la9TYeG5aY2kq+HtvnrXSiheDXq9nh07dtDf38+BAwc4cOAAVquVkZERcnJyWLduHUuXLl3gq7p2WK1WHA4HPT097Nu3j/LychKJBP39/TQ3N9Pf38/U1BSpVIri4mI+8YlP8NOf/pR//ud/xmAwCI/23XffTXV1tfB4KShcD260z1Y2m6W/v5+2tjYcDgcejwe9Xo/ZbCY3N5eJiQlaWlpEwfb58+cZGxuju7ubmpoaAEpLS7nnnntoa2vj//7f/4vb7WZ8fJzc3FyWLVtGRUXFJR/sbDZLW1sbY2NjwmCY7bC5UYjFYvT29jI4OCieiU6nY3p6mtbWVv71X/+VD37wgyxZsoSxsbE5+6ZSKUKhEC0tLTgcDhKJBDk5Oej1evbt20djYyObNm1i48aN2O12KioqMJlMJJNJBgcHRYrfsmXL2LZtG+vXr1+kp6BwPbjRZMHbwWaz4XK5RA3T8uXLAWhubiYQCNDR0cH4+LhI8129ejVms5lkMsn58+fp6+ujoaGBrVu3smLFCvH9jcVinD17lt27d4u1UFFRwapVq8jLy/utMQBkp8h//ud/sn37djZv3kxzc/MlRlJubi7vete7aG1t5Tvf+Y5oojM5OUlDQwNLly7F7/cL53EkEkGSJAYGBgiFQjz//PNClqxbt26R7lYBFCPpssw2ft7KEFqMZS1JEslkkmAwiFqtxuVykZOTQywWY3x8nOHhYSKRCC6X65qet6enh7Nnz7Jr1y5gRpFasWIFGzZsuKZeUZvNxp133onZbOb555/n+PHj6HQ64vE4U1NTWCyWOR3wwuEw0WgUj8eD3W4nmUwyOTnJ8PAw09PTZDKZRe+wpfBbymJ4QS5DJpNhaGiIl156iZ6eHoLBICqVik984hOUlZWJ1C05lWPfvn20tLRw7733kpeXh8FgwOv1snbtWtatW8cPf/hDjEYjRqMRjUYjulnJ3d+y2SyTk5Ok02msVit+v1+sq9HRUSKRCF6vd871TU9Pc+rUKXQ6HX6/X9QyzFf5aWtr4/Tp0xw4cIB0Oo3T6WTNmjXs3LnziqlpqVSKgYEBfvrTnzI5OYlarcbn8/HQQw8JI1GWF8ePH+fQoUNMTk6yY8cO8vLyADhx4gS7d+9m5cqVbN26FY1Gc4mRJI+VSKVSaDQabrnlFtavX4/BYODo0aMcOHCA5uZmCgoKaGho4I477iCdTosag9HRUV544QUGBwd5/fXXWbp0KWaz+bdGQVS4sUkmk4yMjPDUU0+JNDaj0cjHPvYxysrK5tVAxev10tDQwO233853vvMdrFYrBoOBbDaLRqOZ087aaDTyqU99ShgA8XicQ4cOsWfPHlGfJ9cDazQabDabqEmempoiFAoxPj5+VfVWcr3gqVOnSKfTeDwebr31VlauXHnFZlSxWIyBgQF++MMfEolEMBqNFBQU8P73v180rXkz5Pt+4403OHHihDCC3myfbDZLMpkkEAhQWVkpUnqj0SjDw8NMTU0Rj8exWCxs3bqVdevWiWyfYDDI888/T3t7O5IkUVFRIZrGKCw8v9NG0uwgr2oef19sZAOpsbERo9FISUkJVVVVuN1u/H4/+/btIxAI0NzcjN/vv6bnlvNk5UJCr9crwuXXEq1Wy5IlS9DpdPh8PpLJJDqdjlgsRiAQIB6Po9PphHf2wIEDbNq0ibVr12K324nFYhgMBoaGhhgcHCQvL++G9Ggr/Bag+o2MWEw5IKeDuN1u4vG4SAUxGo0iJSSbzTI4OMi+ffsYGxujsLCQjRs3ijQZo9FIfn4+9913Hz09PULRl+sfZWUok8nQ0tJCf38/ZWVlLF++nLy8PCoqKnj99deFN9nv94uUkUwmQzAYpKurC4/HI2qbrga9Xo/dbicvL49sNovVasVms80rZVZ+PnINkew8ktsVy3Kzu7ubPXv2kEqlqK2tZcWKFRgMBsLhsKjjstvtPPLII6TTafr7+2lpaSGVSpGbm0ttba2YW1JYWEhpaSmlpaVoNBrGx8c5evQokUhE1ITKxejyPna7nerqatEUYnp6WnSzUvjt5kaSEx6PR4wTkVtIz/cdMxqNFBYWcs8999DS0iKcjBqNhp6eHmw2m6htVqvVVFRUiHWWSqWYnp7m5ZdfZmpqSnR0U6vVaDQafD4fK1asoKysDJvNRk9PD729vYyPjwun7pWu02g04nQ6ycvLI5PJ4HQ63zQ17mJmN06JxWLo9Xo8Hs+8Z1ClUikuXLjAkSNHSKfTPProo6RSKTo6Omhra6O/v5/6+noaGhqYnp7mwIEDFBQUsGnTJvLz84Xz5uTJkwwODlJYWIjX6yU3N3fOnCWPx0NtbS0jIyOMjo4SDAYVI2kR+Z02km4krpSXK3syEokEp0+fxmq1smTJEtasWYPVaqW0tJTm5mbGxsZobW3l9ttvf9NjXjwAcj4Cwmq1UlJSIjwnNpsNj8czJ7f2nd6fTGFhIW63m6qqKqamplCpVAwPD9Pc3CwKTmXv8dGjR/ngBz/IihUrsNlsogXxE088wejoKKFQiNzc3Hldo4LCjYhKpcJisVBXV0dxcTGJREJ8THU6nZhp0tjYyJkzZygoKGDlypUsWbJEKDRarRa73c727duZmJgQs5IkSeKJJ54QkVi5a9Po6Cjbtm1j586dmEwmIpEIp06dYnJykq6uLjZv3ixkQSqVIhAIMDQ0RE1NDfn5+Vfd9tvpdFJRUSEixXq9Hp/PNy/5Ihsga9euFc9Grk9Uq9XCo7tv3z5aW1vZuHEja9eupaysTNRkyfPmJicneeONN0ilUgSDQQYHB0kkErS3t5OXl4fZbMZiseDz+XC73dhsNiRJwuVyCSMzFosBXOLZ1mq1IrI3MTFBPB5f9HoMhatHDjBLgOpGsI5+jTxXcdmyZUSjUVKpFFqtFpfLNe96ZHn7zZs3U1NTI5oqJBIJXnnlFVH/Kyv0s50her2e/Px8NBoNyWSSaDQq3m+NRiPqpuUZTcPDwyL1To5UXQmv10ttbS0+nw9JkjAYDOTn589LTmg0GhwOB+vXrxf1mGazWURz30pfghlHlDxnKpFIsHfvXtHqOxAIEAgEuHDhgmggdfDgQZYvX8769espKCgQEfLjx4+L+k3gkkwc+Z40Gg2xWEzIE4XF4XfGSLqikr5A1/FWzL7G2QPLZG+MnBYzNTWF2+0WC1yO8siLXe6aMvsYF5/naosBrVYrFotlToTq4snR872/K12PRqPBYrGIVJ9EIkEoFCIcDuNwOETL3UgkwvT0NF6vVzRuyGazQnFJp9Nv2sJTQUFGfhvfSkYstnywWCxUVVXN+Zm89uRudPJcos2bN4tZPXDpgFeHwyFq986ePUskEqGkpISSkhLUarVQWsxmM3a7fc5wWnk+iIw8zLmrq0scx+/3X3XTFLnjW0lJyZyfzzeSZLVaRf3Exc9neHiYQ4cO8fWvf53Pfvaz3HrrrZSXl4v0Ibvdzt13382KFStEN0y5FfKuXbtECl5ZWRmRSITKykqRLiPLZ3kQpUqlErWRF8tY2ZiUvdhX4+FXuIGQpN/IDOb+ZTHlhDycfcmSJZf8XH4fZ/+Rufi/ZWNL/o7K8mV0dJT7779fKPDyfvI7nMlkmJycJBaLoVarMZlMQm8BhLNGq9WK8QDyupkvchMIud24fNz5rCM55W/16tWXPJ/Zz+HNnpVer+fBBx9k+/bt4prD4TCNjY2cOHGC/Px8brvtNvx+P1NTUwQCAdFcy2QykUqlsNlsaLVaMeT6Yjkhd16dmpoikUgI/U5h8bjpjaTfvITXX3zNnGuWd+kqkD2QXV1dJBIJBgcHiUajtLW14fF4yM3Nxel0YjKZaGhoYNeuXUSjM/OYysrKOHDgAK+99hq5ublCCITDYVGn1NzcTCqVoq2tDbfbjcPhoLi4GLvdPu8P9dUaRRcTjUYZHBxkaGiIvr4+otEo7e3t2O12IpGImK1w5swZBgcHMRgMuN1uTp48yfHjx+nt7eXhhx/G7/ej0+koKyujtLSU/+//+//4+Mc/TkFBAcFgkMceewxJkvD5fHNqJxQULoskIUnZ666wCln0NsXR5QyGYDDI3r17RXHwjh07yM/PF55Nr9eL0WhEpVIxPT3NU089RVVVFel0mu7ubp588kkaGhqoq6sTUY4tW7bw6KOP8txzzxGJRFi7di1Hjhzh0KFDFBYWsmzZMlHnl0gkGB4eZs+ePVRWVlJdXU1OTs5V39s7lS1v9nzOnDnDz3/+c5588kne9773sWnTJrRaLQMDA6IFujxPbvZMkvHxcVQqFSdOnKCsrAy/3y86/X3sYx/jT//0T3nssccYHh6moKCAX/3qV0xMTLB06VIqKytF/ZHP5xPe95MnT/Loo4/i9XrZunUrPp9PMZJ+S5Akef0uhJzIvm058WbrSDZ2xsfH6evrY3BwEJiJYjQ1NVFUVCRSwsbHx9m7d6/odHnhwgVeffVVSktLWbVqFQUFBaIRw/nz5ykvL8disTA5Ocmjjz6KSqWisLBQdKZ95plnyGazwsEZiUR46qmn6OnpwePxXFXjhncqJ95q/4GBAYaHhxkfH2d8fByj0ciFCxdwOp0UFRXhdrspKSkR9ZYAExMTonOoPBLAarXi8/nYuHEjP/zhD7FYLNTX16PVann99dfp6uqivr5ejGb48Y9/TFlZmUgXbm1t5T//8z8pKipiw4YNlJaWvu37VXjn3HRGkrwIVCpIxyPEpobJJGMLNicpHhxBmwihVv+mRfV8kA2j733vewwPD9PR0UEmk+Gpp57i+PHjbNu2jbVr17JkyRLuvvtu4vE4AwMDPPbYYyJUvWrVKlauXMmKFSsA6OvrE4WGvb29WK1Wnn/+eY4ePUpubi4f//jHqaurm1dqzLX4MAwMDPDEE09w9OhRYrEYyWSSF198kbNnz1JbW8vv//7vU1xcTDKZpLOzk2PHjomapLy8PN7znvewadMm4YGtrKzkT/7kT3jmmWd45plnhFfLbDZz9913s3TpUhwOxzu+boWbD5VKhZRJkYoFiY73o9YZ4HorrJKElM2QmJ4ga7fM+4P/ZkoPzDhCRkZG6OnpQavV8u///u/CKFKr1Xzyk59k2bJl2Gw2kskkzc3NvP7666RSKXQ6HRUVFdx3331UVlaK9LTKyko++tGPija3+/fvR5Ikli1bxsqVK6murhapMZlMhmg0yvj4OHfeeScul+tttd5/p4rPm9Ha2srg4CDJZJL9+/fT0dGBRqNBr9fjdrt56KGHqKmpuSTlRafTYTKZcDqdWCwWUXchD8b8+Mc/TmdnJ0888QRqtVo0nFmxYgUej4dIJMLg4KCoU5JTCFeuXMnKlStZs2aNMqLgBkden5l0gmQkRHSiH7VuAepbfz0qIBGZBGn+bfTfart0Ok1XVxeHDx9m165d9Pf3AzPzj06dOsUdd9zBAw88gM1mIx6P09HRwQsvvEA6nRZpdHfeeSfFxcUYjUYSiQRqtZqjR4+yd+9eke1itVq5/fbbWblypRjTodFoOHr0KCMjI6RSKRHx2rBhAw0NDcI4e6f3+Hb3lWXp3r17ee211xgdHaWvr4/R0VE6Ozs5evQoDz30EJs3b74kqqPVakXEXY4SyUbiBz/4QeLxOEeOHOHgwYNi+7vuuouNGzdSXFyMSqVicnKS06dPE4vFhJxYvXo1mzZtoqGhYd4lDQrXh5vu6atUKlEEbNVLaGOjkA7BQnyQJAltYhqzNiO8jvNd1Gq1GoPBQGFhIQaDYU5amzz7x2QyodVqKS8vZ9u2bbS1tTEwMEAkEsHj8bBs2TKqq6tFkZ/RaMTr9VJaWorT6WTZsmXAjAIgR6UW0pNpMBjIycmhvLwcgPr6emAm1SY3N1fUT8iqkLr+AADCZUlEQVQem/HxcdG5rry8nPr6+jnF0G63m82bNzM9PU1/fz+xWAydTkdpaSlr1qwRx1RQmI3csMBmMWLWpFFND8ACOFEAkLIYieKweMRH9Z1gNBopLS3lzjvvvOR3ssNAo9EI+VJbW4vJZCKTyWCz2aipqaGhoQG73Y5Wq0WSJBwOBzt27MDn89HV1UUoFMLhcIj237OLiOVozKZNm1i1ahU2m+2Gio74fD7Wrl2L1+udk1Ikz36Sm19cfM1yXcCmTZsoLy8XjhmtVovX6+WOO+6gsbGRvr4+UqkU5eXlrF27lsLCQpH2W1tbi9FoJBgMik5cDQ0NVFdXi/a/CjcuGo0Gu92O3aTDrIqjCi+QnJBAJWUwEcPh8GM2m9+xnJDT37xeLxUVFVRUVIjfyc0etFqtMGCqqqpIJBJitk9ZWRmrV6/Gbrej0WjIZDL4fD7q6+uZmJgQxlRFRQUbNmwgNzdXOF+Li4sJBAJYrVYikQg6nY6SkhLq6upEFAoWfw6Q2+2muLgYp9Mp5s7BTKmB3KTqcnKiqKiItWvX4na7hZEk14zfddddtLS0EAwGkSSJnJwcNm3aRFlZmejGW19fLxrIyM0oli9fLqL7ijNlcVFJN1nlaDqdZnBwkIMHD/KDH/yAZDK5oMWxKrUap9PJls2b+fjHP35NFCEFBYVrgyRJpFIp9u3bx969ezl06NBVtaC9Fmi0WlauWMHmzZu58847lQ5nCgo3GLKc2L17Ny+99BLNzc0LXt+q0WrZvGkTO3fuZP369ZhMpgU9v4KCwk1oJM0upJ2cnBTFcQuFPBNDLkSGxfeQKCgozCDLArn7UiQSWXAjSavVimJeOTVOQUHhxkGWE4lEgunpaWKx2IIbSVqtFpvNhslkElkWCgoKC8tNZyTBbwqks9nsorRYlesBrkUxsoKCwrVntnxYDBkhywcllUJB4cZkdnczuUPbQqNWqy+bDqqgoLAw3JRGkoKCgoKCgoKCgoKCwttFcWMqKCgoKCgoKCgoKCjMQjGSFBQUFBQUFBQUFBQUZqEYSQoKCgoKCgoKCgoKCrNQjCQFBQUFBQUFBQUFBYVZKEaSgoKCgoKCgoKCgoLCLG7KKaezG/YtdPO+i1t1Kq07FRRuPG4UGaHIBwWFG5OL5YIiJxQUfve4KY0kgEwms+CDZGVUKhU6nQ6NRrPg51ZQULgy2WyWTCaz4AMiZbRaLRqNRpERCgo3MNlslnQ6TSaTWZTz63Q61Gq1IicUFBaJm85IymazTE9P09/fz7Fjx0gmkwtqKKnVasxmM+X/P3vvHR7XeR74/qZ3zAwGGPTeQYC9SGwiVUgVyytZli0nu9ZuHHtt392s43tvdq9377NJdnNz10+yKU9iJ0+K47tRIsuybElWJUWRYm8gAAJEIxqBQccMpvdz7h/U+QyQlASKIghR5/c8UMGcOXO+g/ne8/a3pob169ZhNBqXNTAynU4Tj8cZGxsjEAiQTCYxmUw0NDSQl5cnPEmyLJPJZJiYmGB+fp5wOEw6nSY3N5fy8nJycnIwGo0ApFIp/H4/8/PzzM3NkclkcLlc5OfnU1hYiMFgWDEPlSzLjI+PMz8/TyQSIZFIYDKZyM/Px+Px4HK5MBgMAAQCAfx+P3Nzc4TDYaxWK8XFxZSVlS15WGQyGaLRKAMDA0QiEWRZxmw2U1JSQlFREXq9XvXAqSxBGQw5NjbG8PAwo6OjpFKpFb0GvV5PaVkZFRUVVFdVfazvqSLTstkswWCQiYkJAoEAmUwGk8mEw+GgtLQUp9N53TBKWZYJhULMzs4yNjZGVVUV+fn5WCwWcVwmk2Fubo7Z2Vnm5+eRZRmn00l+fj4FBQUYjUZxrKJIjo2NMTMzg8ViobS0FI/Hc8f3XyqVYn5+nqmpKcLhMJIkUVBQQH5+Pnl5eeK4mZkZxsbGmJubA351fzUaDRaLhcrKSnJzc7Hb7eI9kiQRDAYZHR0lEomIY6xWK7IsE4vFmJycFDJPp9NRWFhIfn4+LpdLVXxXMYqcGB4epr+/n+npadLp9Ipeg16vp7qmhqrKSkpKStDrP766ls1mCYVCBAIBpqeniUaj5Ofn4/V6yc/PR6fTXScjABYWFpibm2NqaorCwkJKSkowm81oNBpkWcbv9+Pz+QiFQqTTaUwmE6Wlpbjdbmw225LvuCzLpNNpJicnGRkZwePxUFRUhMfj+fg36RNA+VtPTk4yOztLJBIhnU5TXFyM1+slNzd3yfGpVIpQKER/fz+JRAKDwYDT6RT6l1arJZ1OE4lEGB0dJRwOk81mMZvNNDQ0CJmsfDaAz+djdnaWhYUFJEnC6/VSVFSE0+kUepHKneGuM5Ky2Sx+v59z58/z3IsvIRtMoNPBSjysZRkyGexmI1vXtlBXW4ter1+WkZRMJpmenubkyZPMzc0xOjqKRqPha1/7Grm5ueh0OmEgDQ0Ncfz4cYLBIOl0GkmS0Gg0NDY20tDQQF1dHRqNhuHhYS5duiQUQWVDFhUV0drayvr165FlecUUmRMnTjAzM0MikRCeOZvNRnV1NS0tLZSVlaHRaJiYmKCnp4fBwUEmJyfR6XTs2rWLoqIiIXQzmQzT09OcPXuWS5cuid9rtVry8vLYs2cPBQUFWK3WFVmbyqeHTCZDX18fR44d52x7B5gsoNHC7d4GMiDLyOkELXW17Ni2lZLi4ltSfhKJBCMjIxw9elQYe9lsFpPJRHV1NQ888ABWq3WJshKLxRgYGKCtrY1jx47x5JNPsnXrVkwmEzqdDkmS6O/vp6enh7GxMVKpFBqNBq/XS11dHS6XSzhiFJk0Pz/P66+/ztDQEGVlZezdu/eOKz8A8XicwcFBzp49SygUYmpqik2bNrFp06YlRlIwGGRoaIihoSFkWUaWZZLJpFD+vvCFL2A0GrHb7UsUyEuXLvHmm2+ysLDAE088wZo1a7BYLKTTaU6fPs3AwAALCwtoNBokScLhcFBfX09zczMlJSV36raoLIN0Os3Fri4OHDrMwMgoGE2wjGf5LSMDsoScSrB1bQv379lDXl7eLRtJU1NT9PT00Nvby8jICM3NzWzYsOEDDfZ4PE5/fz8XLlzgwoUL7Ny5k3379onoVjKZ5MCBA0xMTJBMJtHr9aTTabxeL+vWraOhoYGcnBxxPsVhceDAAU6fPs2GDRvYvn37qpAT2WyWnp4eLl68SCQSwefzsXXrVrZs2bLESEqn00xMTHDs2DEuX74s9Duz2Uxrayvbtm3DZrORTCbx+XwcPXqUYDDI/Pw8AF//+tdxOBxLjKSpqSkOHDjAzMwM6XRaZCK1trbS0NBAZWUlGo3mjjucPqvcdUaSJEmEQiEuXx7kxIVOjN5itGYLGu3t99rJkkQmGsIuZzAjEY/HcTgcy3qvJEkkEgkCgQCxWIzu7m78fj+f//znxUNZlmVSqRSdnZ10dnZiNBrJyclBp9MxOTkpHujFxcUYjUYuXbrExYsXmZubw+v1otPphGc4HA7T2NiIyWRasc03MTFBKBQSQiAej3PlyhWi0SgWi4Xi9xXGWCzGwsICfr+f0dFR/H4/paWlSJIkzhWLxbhy5QrvvPMOWq2W/Px84KrSOD4+TnFxMWazGbPZvCwjVeWzgxL16Ozp5WTnJUxFZWh1+tvvSJFlZEkiNTdFJpGg2JtPKpXCZrN97FNmMhnC4TCTk5PY7Xb0ej2hUIhwOMyVK1fYtGnTkmi2LMvMzs5y+fJlLly4wMmTJ1m/fj0tLS1IkoRWqyUej3P27FmGh4eJxWK4XC6hAClOGQVJkojFYgwNDXHu3DkGBweJRCKsXbv2lm/XJ0E2myUSiTA7O0s8Huf8+fM4HA4qKyuXHKfRaERakyzLZLNZsS6/38+DDz64JOUqk8kwMjLCxYsX6erqYmxsjC1btlBVVSW89kePHmVhYQGDwYDH4yGZTDI5OUkymcRgMFBYWHhdlE9ldaB8B0ZGRmjr6qZ7dBxjfhFa/Qp49WUZKZslNTeJEZmGurpbjmLJskw8HhfP1UuXLqHVaikoKLgulVBxEszPz3P58mXa29s5ffo0+fn57Nq1C0mSRDT64MGDmEwmnE4nTqcTv9/P0NAQZrOZgoICHA6HcBCEw2FGR0c5d+4cp0+fxmaz0dDQcEvr+qRQosJzc3PEYjEuXLiA2+2murp6yXGhUIjBwUEOHjyI3W4nNzeXZDLJxMQEsViMmpoaDAaDkB/z8/PC2JyZmeGLX/ziEn1OkiS6uro4c+YMkiSRn5+PXq9nZGSEbDaLRqOhuLgYk8l0J26LCnepkZTNZkmlUxjyvOQ0b8TgzEV7C16Y5SJnMyRmJtD7r4bmM5nMEoXiwzCZTBQWFrJv3z6sViuBQID33ntvyTHKw/edd97B4XCwfft2Nm3ahM1m48yZM/z4xz8mGo3S0NBAQUEBx44dQ6PRsGbNGh599FGsVisXLlzg0KFD/OxnP+PRRx+ltLR0xYyITZs2CcHicDjw+/08//zzzMzMcOHCBTZv3ozdbqekpASLxcL69es5deoUb7/99nXnmp6epqOjg3fffZc/+7M/o7GxEYDx8XGee+45Tpw4gcfjITc3VxUwKkuQJOlqvaLBhKmwFPe6e9GugLNAlmXkbIZwbyeyUSNkxK1gNBqprKzk0Ucfpbq6GrPZzNjYGGfPnuXv/u7veOaZZ5ak4CaTSS5evMjIyAjpdJqqqqol+z+bzTI5OcnLL79MXV0du3fvZv369ZhMJpHKp0RnlfSZ6elpDh8+LJwVbrf7ltb0SWI2m2lsbMTlcqHRaBgZGcFsNl93XGVl5RJHTDwep6+vj1/84hf4/X4qKytxu91CwQmFQhw+fJi+vj7WrVvH/Py8uI/pdJqRkRHeeecdHn30UXbv3k1jYyOhUIiXXnqJ8fFxwuEwmzdvviUDWeX2ks1mSSWTaKx2zKVVOBs3oLvBd+eTRpZl5HSKUG8Hkkb7idRE6XQ6kSq3Zs0aEonEDb97ioGUTqfp6uriypUrZDIZysrKlhy3OKLy7W9/mz179lBcXExvby/f//73GRoaor6+ntLSUjQaDalUirGxMU6dOkUikaC0tHRJlOlOo9frWbduHSUlJciyzOTk5A31hsHBQU6dOsWFCxf467/+a0pKSpiamqKtrY233nqLrVu3YrfbycnJobS0lMcffxy9Xs+LL77Im2++ed35UqkUL730EpFIhF27drF//36MRiMvvvgi3d3dpFIpWlpa1KjzHeSuM5J+hQat1oDWZEFntqJdgbxOOZtBazSj+RgGmdFoJC8vT4SenU7ndUrbYu/E2rVrqaqqoqCgAL1ez+bNm3n++efx+XxcuHCBXbt2EQgEqK6upqGhgeLiYjQaDRs2bKC/vx+fz0dbWxter3dFcl41Gg07duxYsiaLxUJhYSGJRIJEIrEkHbCgoIBUKsXAwMAN0wwCgQDz8/Po9Xq2bt2K1WoVaYcVFRWcP3+esbExysvL8Xq9t319Kp8+NFotWr0RrdmC3mxZkUiSlM2gMRhB88k0jLBYLJSXl4tUVaVWYHJyUjiMFMUnk8lw+fJl3n33XcrLy9mzZw8HDhxYsidTqRTvvvsufr8fv99Pe3s7L774IkVFRezdu5empiZcLhdw1djs6enh3LlzDA8P84UvfIF33nlHGGSrAeX+lJSU4Pf7PzByfm0TjUgkwtzcHL29vTz66KMUFhYK40qSJH74wx8SiURoaWlhy5YtHD58WLw3k8kwMzNDNpulurqa5uZmcnJysNvt1NfXMzExwcDAAD6fj9raWrU2aZWj0erQ6o3oLFZ0Zsvt/0BZIqvTvy8nPpl6ap1Oh9frxePxEI1GcblcH1irnUqlGB8f57XXXhNpqe3t7UuO0Wq1ouYokUiQyWQwm82kUikMBgNWq3WJM6W/v58zZ87Q3d3Nl7/8ZX72s5+tGuelRqNBp9NRU1NDeXm5kBM3ch5funSJ4eFhtmzZwsaNG9Hr9dhsNjKZDEeOHOHKlSvU1taSn59PUVERhYWFRCIRITMXk06n8fv9nD9/nq9+9avcc889whjdvn07Pp+PhYUFRkZGVCPpDnIXG0mL0KxMC81fiZyb/yzl+q7992L0ej12u528vDwuXrxIQUEBhYWFOJ1O3n33XYaHh9Fqtfj9fnJycsjNzWVqaooLFy7Q3NyMw+Hg2LFjnD9/nmQyKR7kK4WiDChh5omJCQYHB8lkMjQ2NorCcSX15cMiXHl5eeTl5RGNRnnnnXfYsWMHWq2WK1eucObMGYLBIOFwmEQisVLLU/mUotFoYAVyvoV80Ih/fGw+qD1wNptldHSU3t5eEUlV0j+CwSAvvvgiBQUFNDU1YbPZrltzJpNhYGCAqakpzGYzLpeLLVu2MDAwwBtvvIHP5+PRRx+loKCA6elpjh8/Tm9vL//iX/yL6xo6rAYWG47Kf9/o+hY3xpEkiampKcbGxkgmk6xbt07cq1gsxqlTp+jv72ffvn2sW7fuumwBg8FAcXExsiwLOb1t2zYCgQCnT5+mv78frVZLMBi8I91XVW6exd+YD/x+y3widY2yrFl0nk9mLy3WK5QUzxt99zKZDLOzs/zyl7/E4/HQ3NxMOBzm4sWLS44zGAwUFBTw1a9+le7ubs6dO4fRaCQWi9HY2MjatWspKipCo9EQDAZ54403CIfDPPjgg5SWlt5SfdXt4Fq944P+xgsLC0SjUerq6sRxilFoMpmIxWKi/lt5/YP0mEwmQzAYJBaLkZeXJ6LdGo0Gq9WK2WwmmUwSDodv59JVPoLV9U39tPEJCcXlonTO27hxI8eOHePkyZOMjY1hs9no7+8nk8lgt9tJpVJYrVZaWlro7Oykp6eHf/qnf8JqtTIyMsLc3Bx2u51kMrnsh/Tc3Bzj4+NCWObl5VFRUUFNTc1NeYQW5/ufOnWKcDhMaWkp9fX1NyU43W43NTU1rFu3jrfeeovR0VEMBgN+v190sVJ+VFQ+zUiSxMLCAp2dnczPzxOLxdBoNNx3333k5+cv2X/j4+N0d3czNjbGpk2bcLvd6PV6FhYW6OnpYWRkhC984QtUVlYSi8XE+5RokyzLoiFMXl4ezc3NrFmzBrPZzNGjRxkaGmJqaoq8vDyOHj3K5OQkHo+H1tZWUW+onEc5L3y0k+rKlStcuXKF0dFRJEnCZrNRW1u7rNompXHEkSNHRFdQj8fDli1bruuw9WEo16rUrE1MTOB2uykvL8dkMpFKpZiZmeHQoUMiQl9UVMTk5OSScyhd7DZs2MDk5CSvv/666IQ1NjZGIpHAarWueLc0ldvMHfYNKEp3W1ub6JBrMBjYvXu3qHX5MGRZJhAIMDg4SHd3Nw8//DClpaWMjY0tOUbZJ1qtllQqJUatGI1GIpEIkUiEZDIpapeOHz/OzMwMhYWFrF27dklX3cXng5uXE3a7nTVr1lBRUfGRekgqlSIQCHD06FFxb1wuF9u2bVvSTOHDUFKkLZZfRRS1Wq0Y6aCkRi5Hr5IkiVQqJTrfLb4vSmRbo9GocuIOoxpJt8IKC0WlHe2DDz7IyMgI58+fF3mu69evx+12i041JpOJnTt3EovFOHLkCH/3d3+HLMusXbtWKBE3k+aheI3/5m/+BoDW1lb27dtHaWnpsoykxUpTPB5ndHSUt956i6KiIpqammhqarqp2iin00lLSwtf/vKX+bM/+zOOHz+O0WjE5XJRWFhIOBxWW4Cr3DZW0j8iSRIzMzO88sordHV1MTMzg1arpaSkBJvNhtFoFN3Yzp07Jzo0PfXUU8JImpqa4ujRo4TDYQoKCgCYnZ0lGo2KYm6Px4NWqxUKz7p163j44YfJz8+nuLiYzs5OFhYWGBsbo6GhgZ/97Gfk5eWxZcsW4GobbWXf+f1+IpEIFotlWXLm8uXLHDx4kLfffptMJkNRURFPPvnksoykbDbL2NgYP/jBD1hYWMDpdNLc3ExdXZ3o2LdclPt4+fJlJiYmqKmpoaCgAJ1Ox/z8PMPDwxw8eJD/9J/+ExaLhfn5eaanp4UCtrCwQFFREfn5+XzhC1/gZz/7GUePHuW1114T6XYOh2PZXU9VVJaLkub5wgsv0N/fTzAYxG63U1FRgdPpXJYTcnx8nPb2doaHh6muriaVSjE3N0ckEgFgfn4ep9OJxWLB7/dz4sQJmpqaaGlpoba2lvb2dg4ePEhfXx9VVVW4XC5+8pOf4HQ68Xg8mEwmJicniUajBINB/H4/oVAIu92+rP0wMDDAwYMHOXjwoGhS9W/+zb+hoKBgWUbS+Pg4f/VXf8XCwgI2m426ujrq6uqw2Ww3tR8/bNDwzXSiW2wsftQxKncG1Uj6FKHkzlZUVPDf/tt/I5lMkkwmSafTaLVafv/3f59gMIjX60Wj0VBTU8M3v/lNfvM3f1MIOYBXXnmFv/zLv6SwsHDZ0ZuamhrRWAKu5vrb7fabLjyOxWJcvHiRH/zgB8iyzNNPPy1Sf24GjUZDQUEBTz31FF/4whdE6koikeDNN9/k0KFDWK3WJR4fFZVPipV8bOl0Oqqqqvg//8//U+T/azQaUSejKPbHjx/nhRdeoLq6mqeeeop77rlHGAhzc3OcOnWK9957j0OHDgFXja9kMslrr73GkSNHePLJJ3n66adF6/y8vDwKCwtF+++cnBwCgYCYX9bb28vY2Bj//M//LNLUlDbkP/vZz/D7/Tz99NOiO96HsXXrVpqbm/mN3/gN4Gpq8XILuw0GA83Nzfz93/892WxWOImW4z2/EX6/n76+PhYWFnjmmWdE2oziZZ+dneVrX/ua8IIrHf4GBgZ46KGHePrpp3nyySe5//772bt3r2glbjab6ejo4MCBAwwPD+N2u1UF6DPJ7XGxGI1Gqqqq+N3f/V2SySTZbBatVruknu5Dr+r9WYbnzp3j7Nmz7N27F41GQyaTEa2pT506xbe//W12797N66+/Tjgc5qmnnuLee+9Fo9Gwfft2Mbrj/PnzVFRUcPbsWebm5vjHf/xH8TmJRAKNRkN5eTl9fX38+3//73E4HB/p0Ni2bRtr1qwR+89gMOB2u5fML/sgLBYLzc3N/M3f/I24N4qcWK4jxeFwYLFYxDw1uGqcJhIJUqkUFotl2fMnDQaDaKqjpPHBr7oYK2l7anOXO4tqJK0SlIet0u1K6YyXTqeX5LgqIW6lSYHiPTlz5gwTExMUFBSwfv16ABHKBbBarSSTSU6fPk1PTw82m40tW7Ysu8jaZDKJTQ1Lc5uXy/z8PK+88gonT54kmUzyX/7Lf6GyshKTySTaD8OvOhQmk0kymYxI0UulUiKsrdFoRNqe0WjEYrEQDofx+XycO3eOqqoqysrKcDqdy74+FZXVitFoxOv1XpfuorTq7e7u5q//+q9pbW1l165dbNiwYcn7169fzx/8wR+wsLAAXE0bGRsb4yc/+Qn19fU89NBDtLS04HA42LZtGwcOHGB6epqRkRGKi4sZGxtjYWFBpJI5nU7+9m//VtT8ybLMyMgIb7/9tmgk8/nPf14MV/woFIeG0rjmZr2xZrOZ8vLyJXPfFs9UU9KCstmsUPqU2S6L5Zgsy7z99tsEAgHKysrYvHmzeK2kpITHHnuM1tZWYQwmEgkmJyf50z/9Uz7/+c+zc+dOIX+j0ag4t91uFzVJoVCI2tpaysrK1GjSp5FbtnFuj2Gs6ANKpFiRE8o+UPSLVColnqvK/ysR1+3bt1NbW8s3vvENcd7e3l7efvttbDYbX/rSl2htbRVZGplMhkgkQjQaFTVJynNa+fmHf/gHkZKXzWaZnp7mJz/5Cfn5+axfv55HH3102ZGcG8mJ5eohilFUVlb2oXJC0ZtuJCeqqqpEhzu/34/dbhcDdaPR6JLZjMo5FINV0eeSySQajQa9Xk9+fj5VVVV0dXXh9XrFWIDh4WEWFhbEaBSVO8ddbSQpLXflTAZpBXy/spRBlrLIkgTcXMeibDZLNBqlr6+PWCzG+Pg4sViMnp4ecnJyKC4uxuPxkJOTw9jY2JI8+GAwyDvvvCOEjtIhZXx8XPT9NxqNzM3NcfLkScLhMPfff/9NeVCUKNatdGJ65ZVXePvtt5mbm2Pz5s0EAgGRn5+Tk0NhYaHwqszMzDA2NkZ/f79Iczl79iyFhYVUVlZiNpsJBAK0tbWJTjTKEN5YLMYDDzxAQUGBOq1a5YORQZYlpEwaTUaP5jbLCJn35VFWAsPyC/avfZjDrxSgUChEd3c3P/3pTwkEAtjtdqLRqGgOUFlZSU5OjhjarDhhkskkZrOZ3NxciouLKSsrw+PxYDQaaW5upra2Fp/Px1tvvUVTUxN9fX2k02nRSU+v19PY2Chq/pQUvQsXLmAwGKioqBADMJerwFy7xpu5Px9UIK10+hsYGCAUCjE3N4fFYuHSpUvk5ORQW1srmlsoEbnTp09jsViuG4appCnbbDZx/2OxGA6HA5vNRklJCUVFRcIx097eLqL8Go2GwcFBBgcHKSwsZPPmzcvy7qusAt6fbyZlMmgzGW53qw1ZlpAzmff1iOXLJGWf3Sh6ms1mxXN1eHiY8fFxEYnxer1UVVVRXl5OTk7OdYp5JpOhra1NzBdzu91ks1kaGhowmUycO3eOeDyOx+NhYmKChYUFSktLKS8vx2Aw0NTUtKTez+12k5ubS0FBAWVlZSKddTncDjmhXNvo6Cg+n4+5uTlmZ2fR6/V0d3djtVqpqanB6/VSW1vL5OQkp0+f5uWXX6asrEzMk/J4PJSXl4tud5FIhIsXLxKNRhkZGSEajdLV1YVer6e0tFSMJ9mzZw9tbW2cPXsWuOoQO3HiBEajkdraWmH0qtwZ7kojSaPRoNVokFIJMqGFq7/TrcBSpSyZcBBNMo5G4xDXshwUI6m7u5tAIEAkEsFms3HlyhUxo8RoNOJwOAgGg/T39xOPx8WwRL/fz/bt29m4caNoN6m0j5yenhbpIvPz85SWlnL//fdjtVpXNN3jwoUL+Hw+DAYDTqeT3t5eAFwuFyUlJUJZCQaDjIyM0NHRIYxBv99PR0cHkUgEr9crvFaDg4PCqxsOhwmFQpSXl9PS0kJubq7qqVW5IRqNBqQs2USM9IKfrMnE7U+ikyGbJRuLgMH0iUxRTyQSTE1NcenSJREtvnLlCj6fT0QwzGazqF1SSCaTuN1u0RY4JydHeJNLSkrYunUro6Oj9PX1kUgkmJiYEEpCQUEBWq12iQEhSZI4n16vx+12r5pOd9PT07S1tYlGFcqgXcVIzMnJEUZSIpHA7/fT2tpKY2PjEieL4iRaXPtgNBoJhUIUFBTg8Xiw2+2iRmx8fBy/3y/k0+TkJFarlbq6OlpaWtTW36scRaGWsxmy8Sjp4DzZ+ArICVlCzqSvygmb/RPZQ8rw19HRUS5cuCDS7ycnJ+no6BARYuX7u5jc3Fy8Xq+Y/6O8XltbS1NTEzMzM8L4mZ6eJjc3V2Ry6HQ6cnJyxBoymQzRaBSv1ytmqq2WlHifz8fFixeZm5sjm80SDocZHh7GaDTidDrJy8ujqKhINLPp7OxkdnaWRCJBOBymqamJ4uJiLBaLeP+FCxeIxWIEAgGsViuDg4MiG0aRyzt37mRkZAS/38+FCxcwGo34/X6am5vF+ACVO8ddZyQpU9ONRiNSOEB8pA+tyQwroSzLEtloBB1ZTGV5N/UQVOoDJiYmmJ2dpaSkRPTG9/l8lJSUiDxeh8MhijRjsRg2m41nnnmGzZs3k5eXJ86p5LKGQiFRcPnoo4/S3NxMfX39J7v2ZeD1etm4cSPZbBafz4fP5wMQSteaNWuAq4MclRC22WwWxdsTExM4nU7S6TSSJGE0GvF4PFy4cIF0Oo3T6WTNmjXs3LmTysrKVddmVGV1oNVqMRiNaKUM2cAM0YGLH2u22cdCkkjPT6NzVGAwGD4RI97lctHS0gJc3SMTExPA1XW2trZSWFh43XsUI2fjxo2UlpaKdBclZefLX/4ybW1tdHd3Mzk5SXFxMffccw9VVVXk5uZedz6NRkNOTg5r1qwRM1lWC+FwmPHxcTKZDM3NzcBVpXFsbIxoNLokGpZMJlm7di333nuvkEcfhqIE7tq1i7KysiX1A263G5/Px8zMDOl0mtLSUrZv3051dfWquj8qN0ar1V419FMJ0nOTRJFWxtn6vjMlMz+DwdvwiTT5UBomBQIBJicnl8zdmZycJBAIfOA4ELfbzdq1azGbzVitVuEsKC0t5Zvf/Cbnzp1jYmICn89HTk4ODzzwAHV1dSJ1bDFKe+v169fjcrnIz8+/pXV9kiwsLDAxMUEwGBTD6SVJYnx8nEgkgizLWK1W1qxZw2/91m/xi1/8QkTv169fz549e0RULBaLkUgkGB8fJ5lM4vF42L59O+l0mqmpKSoqKkilUmi1WjZv3kw2m6Wrq4vBwUGCwSC7du1i8+bNVFRUqM6UO4xGvssGNWSzWZGj//rrr5N8v55npdBqtTjsdtatW8cjjzyy7O5OyjXeqH3k4vkei1vsXttRRRFIitdGacGp/Fx7npX28ip50NdybRh8cUH0hx0Hv1rjtcfcifWprH6UPPyuri7a29vp6uoitcItVnU6HQ0NDbS+P4j040RcFn/nP6zVvVK/B0uj2sr7ldrGG33+tXtrce7/tccvPp/y+mqI4iqy5EYK4OJ6hsVzY7LZ7LJrHRa/59pz3UiGLT5GZfWiyIm2tjZOnT7N6MgI6cwnMwB6ueh1OtatW8eGDRtobGy8peGri/WAj3qu3mhvK++5UaratbrIcuWE8tpqkROL9aXF3EhOXHsfr71/i4/5IH1u8bqvvY+qHrN6uOuMJFmWSafThMNhpqamPlAxv11otVrRf1+p+VG/5CoqqwNFFoTDYRYWFgiFQqJOZ6XQ6XQ4HA6cTidOp1N9EKqorDIUOaG0qY5Goys6eF1Rol0uF06nc9ktslVUVD5Z7jojCX7lFVA6qqzkEhWFZ3EXNhUVldWF0kFR6Tq0kijyQUkNVlFRWX0oeoQiI+6UnNDpdKqBpKJyh7grjSQVFRUVFRUVFRUVFZWPi+qeUFFRUVFRUVFRUVFRWYRqJKmoqKioqKioqKioqCxCNZJUVFRUVFRUVFRUVFQWoRpJKioqKioqKioqKioqi1CNJBUVFRUVFRUVFRUVlUWoRpKKioqKioqKioqKisoi9Hf6Am4HkiSRyWSIxWJks9kVn5Ok0+mwWCwYjUZ1TpKKyiokk8mQSqVIJpMrOiQSrg6cNplMGI1GDAbDin62iorK8pBlmUwmQzKZJJ1O3xE5YbFYMBgM6PV3paqmorLquet2niRJRCIRfD4fHR0dYqDsSqHRaLBYLFRUVLB27VoMBsOyBsHF43EWFhbw+XyEw2EkScJgMOByuaipqcFsNqPVaslkMkxPTzM1NUU0GiWdTqPX6yksLKS4uBibzSYGVKbTaebn55mdnWVmZgaNRkNeXh5erxev1ysG395uJEkimUwyOjrKwsIC8XicTCaDw+GguLgYt9uN3W4HIJFIMDExwdzcnJhybrFYyM3NJT8/n7y8PHHeWCzG9PQ04+PjpFIpTCYTTqeTyspKdUK5yg1RBkROTEwwOjrK2NgYmUxmRa9Bp9NRVFREWVkZVVVVtzR0WpIkotEo8/PzTExMEIvFcLlceL1eiouLl8iCcDjM2bNnb3ie0tJSCgoK8Hg8Qi7IskwymWRmZoaBgQHKysrIz8/H6XSK6w0EAszPzzM5OUkqlRJ7taCgAJfLtWIy5oOQZZloNIrf7+fKlSskEgksFgsul4vi4mJxjXDVcJ6ZmWF0dJR4PA6A2WymvLycvLw8jEYjcFVWDw8PEwqFSCaTaDQaGhsb8Xg8qtF7l6DIibGxMS5fvszs7OyKG0k6nY6KigoqKiooKiq6JUMpm80SCoXw+/1MT08TjUaFHpCfny9kkLLuZDJJf38/wWAQWZaxWq3U1NTgdDqFTJFlmXA4zNjYGIFAgFQqJZ6/DodD7BdZlpmenmZubo75+Xmy2Swej4eCggLcbjdms/kTuV8fF0XO9fX1MTU1tURftNvt5OXlUVVVhdFoJBgM4vf7mZ2dJRKJoNFosNls5ObmUlpaitlsRqPRIEkSqVSKwcFB/H4/6XQanU6H1+uloqICk8mkDhL/lHBXGkkLCwu0t7fz4396jowMskbDSj2mNbJMjt3OvZs3U1tbi8PhWJayHg6H6evr4/Tp08zOziJJEiaTCa/Xi16vp7y8HLPZTDwep6enhwsXLjA/P08ymcRkMlFZWcmOHTuoqKggJycHgIWFBbq7u+nq6mJ4eBiNRkN5eTmtra1s3boVh8Nxu28HcFUIBYNBTp06xfj4OMFgkFQqRW5uLuvWraOhoYGqqipMJhPRaJSuri66u7uZn58nnU7jcDioqKigqamJzZs3i/swMTHB6dOn6e7uJpFIYLVaKSoqQpZlGhsbl22gqny2yGaz9Pf3896xo5w+d44MK6vIa2WJtc1r2H7PPZSUlGCxWD72ubLZLIFAgK6uLk6fPs3k5CTV1dVs2LBBKD8AqVSK2dlZXn31VaEEyLIsjKd9+/axZcsW3G63eE8mkyEQCHD+/Hl+8Ytf8NBDD7Fp0yZycnLQaDQkk0kGBwe5ePEi/f39JJNJbDabcBCtW7cOk8l06zfsFkilUsJh1tHRQSwWw263U1payqZNm4QjS5IkYUS2tbURiUQAsNlsbN68mc2bN+PxeNBqtSwsLHDs2DEmJiaEcvjss89it9tVI+kuIpPJcOnSJd46eIC+y5fJajSsnCZxVU7cs2kze/fsIS8v75aNpOnpabq7u2lvb2d8fJz169ezceNGXC7XEsMnmUwyNjbGgQMHhHHodDq5//77aW1tFY7YbDbL5cuXOXHiBD6fj0QiQV5eHvfffz81NTV4PB50Oh2RSISOjg76+vrw+Xyk02nKy8tZu3YtDQ0NFBcX33FnSjQa5d133+XMmTPY7XZxr4uKimhubqakpASDwcDExAQ9PT309vYyNzeHVqvF7XZTVlbGrl27KCsrQ6/Xk0wmmZiY4NChQ0xNTZFIJNDr9dTU1GA0GiksLMRqtaqZRp8C7jojSXmwd1+6xNGBfozV1WhtNjT6FbDaJYmMP4AtMI9elvnSl76E1WpdlnDz+/20t7cTDodpbGzEaDQSCAQ4cOAAly9f5utf/zp1dXWEw2Ha29tJpVJUVFTgcDgIh8M8//zzjIyM8MADD/DQQw8hyzLvvvsux48fZ25ujm3btgHw9ttvi8954oknkGX5tm/UTCbDlStX6OjowOPxUF9fj9VqpaOjg5///OesXbuWRx55hKamJpLJJJIk4XQ6qaiowGAw0NfXR1tbG+fPn8dms9Ha2kpXVxdHjhzh9ddf5/Of/zwej4eZmRn6+vro6enhd37ndygoKLjjXiqV1UcqlWJoaIhzvb0cGx3BXF8PBv3tf2DJMrIkkRq9QqJbJi83l927d9+SkaREzsfHx0kmk/h8PrLZLC6Xi507d4rjDAYDHo+H/fv3CyMpHo8zMDDAc889x44dO5ZEtGRZZmFhgZ6eHl566SXee+89KioqqKqqQpZlZFnm8uXLvPDCC7S3t7N3714qKiro7u7mzJkzdHR0kJ+fT1lZ2R01HIaHh/n5z3/O66+/zv79+2loaGBsbIyenh76+/txu92UlpYSiUTo7Ozkd3/3d9m3bx/19fXodDqGhob48Y9/TDabZePGjRQWFhIIBBgbG0Oj0ZBIJDh16hQPPfQQTU1Nd2ydKp8ssiyTSqXo6+vjTF8f3TPTmCor0Rj0sBJyIpMlNTqKtr2dqspK1q1bd8tyIhAIMDk5SSKRYGxsDKvVitfrZcOGDe9/rEw2m2VycpIf//jHdHd3s3nzZgwGA0NDQ/zFX/wF//E//kdqa2uxWCzEYjH+5//8n8BVY6K2tpYTJ04wPz/PQw89xL333ktOTg7vvPMOzz//PDqdjnvvvReHw8GJEydERszjjz++Yg7bDyKRSNDW1kZ3dzff+MY3KC0tBcDhcOD1ekXpRG9vL+3t7ej1ejZs2CCiRW+88QZTU1N87Wtfw+VyMTo6yo9+9CM6OjrYt28fbrebcDjM4cOHCQaDPProozQ2NqrRpE8Bd52RpISLM5KE3u3G3FCPIdeNZiUe1NkMSd8EuokpstkskiQtO9WvoqKCL3/5y0iShNVqRavVEg6Hqamp4b//9//Onj17KCwsJC8vj3/5L/8lGo0Go9GITqcjlUoRj8c5efIkbrebvXv3Mjs7y/Hjx7HZbGzdupUHH3wQnU6HyWTi/Pnz/OQnP+Hxxx9fkU1qMBhoamrit3/7tzEajZhMJjQaDfX19fzkJz8hEAgwODhIU1MT+fn57N27l0wmIwRTU1MTx44d4/z58wwMDNDc3ExbWxs9PT1s376dr3zlK5jNZubn5+np6eH/+//+P86fP8+2bdsoKSm57etT+XShKAMYjRgKvFhamtFZzHDbvcQyciYD2SzIGiEjbgWDwUBlZSUej4dIJMI//MM/kEgkbnic2+3mgQceEL+bmZlhbGyMuro6ampqyM/PF0ZSLBajs7OTc+fOkZOTw7p1665TZNrb25mZmaG4uJivfe1rmM1mqqurOXr0KOfOnSMQCNzx/ffee+8xMjJCfX09X//61zGbzSKy9Oabb9Ld3U1ubi7T09N0dXURj8d59tlnKSwsJJlMMjw8zOjoKFNTUwQCAcrKyqiuruZb3/oWmUyGU6dO0dHRcUfXqHJ7UGqSNFYrxuIirC3NaG/BUFn+B0vIqTRyJo2s1d2UHvFBGAwGWlpaqKqqIhwO88Mf/lCkwy0mGo1y5coV3nzzTb773e+ydetWLBYLIyMj/N//9/9NZ2cnNpuNvLw8zp49S1dXF9/97nfZsWMHDoeDsrIyfvzjHzMwMEBtbS12u52f/vSn5Obmcs899/DYY4+h1+txuVzCmbJp0ybq6+vveFRFo9Hgcrm45557aGhoQKPRoNVq0el0wtG9Z88etm7dik6nw2q1IkkSAwMDnD59muPHjzMxMUEymWRgYIB3332X/+P/+D+45557yMnJIRKJ4PV6ee2116isrMTlct1x+ajy0dx1RtIS9Hq0RgNas2lFjCQ5o0NjMHysqJXRaMTj8QAsCX07HA5RV6XVaoVHWAlPazQaMpkMZrNZGIiSJDE3N0ckEqGoqIjy8nLcbjcajYa1a9cyPj5OT08Ps7Oz5Ofn3/aiUI1Gg9VqxWg0otVqRe6zw+EQn60oi3q9HrvdLh4K2WyWWCxGJBIhlUrhcDjQaDREo1GSySRutxun04nBYCCdTpOTk0MsFsPn8xGLxW7rulQ+5Wi1oNOjNRnRmkwr4iGWdDrQ6yFza8aRgkajwWQyodfrMZlMWK3WG9ZYLW4oA5BMJolGo4yOjtLQ0IDX68Vqtb5/mTKjo6MipWT37t384he/uC51VZFDfr8fv99PWVkZ4XCYZDKJxWLBYrHcccUnFAqJ+ke3241Wq8XpdGK1WkkkEkJOmEwmXC4X4XCY6elpIYsVuWO1WkW9gclkIj8/X6QX3uk1qtxmtBrQ69GYTGjNK5A+KstIGg0avR4+oXJqrVYrnsFGoxGLxXJDwysQCODz+Ugmk6xdu5aioiIRCc7NzWV8fBy/34/ZbKanp0ekmin1j01NTZhMJoLBIDMzM5SXl+P3+/F6vbhcLlHP6HK5SCQSzM3NEQqFRHT6Ttcvjo2N8dd//de43W4qKytpbGykqamJgoIC4GpkSdnzOt1VA9Zms2E2m8lkMiKFOR6PE4vFKCwsxOl0YrPZ0Gq15Obm4vf7mZmZYWFhQTWSPgXc3UYSXBVwGg2aFahNkTWa9xWtm9/oOp1uSVQnm80Sj8cZGhrC5XKRk5MjmjcsLohUiizHx8ex2+243W7gqnKgRKVycnKEoCspKRGh30AgIJSBD13X+5+j/BuuCl3F4PkowaYIFJ1OJ4RhJpNhYWFBFHsrxo/ys7CwQG9vL4FAgIGBAWZmZvB4PBQXF6PVajGbzej1eubn5wmHw9hsNlGgPTMzg9/vJ5lM3vTfQeUzhiIb3v8u305kWb4qH27yc5Q9o+xBRbnR6/VibykOFO0N1rH4/xUHRTQaZWZmhqmpKR588EFyc3OFXEkkEqLmMTc3l+bmZl577bXrzlNeXk5paSnBYJCDBw9SXl5Od3c3oVCIiooKnE7nsmsCFeeOIl8UL+6N1vNB92dxJ1ONRoNerxdR+VAoRCAQwG63E4lEWFhYYH5+nkAgQDKZxG63i3TC48ePMzk5iVarZXh4GKfTSVFRkajFUmRZNpu9paYbKp8WFB3i/X/fbjkhSb+SEzdhJN1oH8BSOQEIOXFtFFtpxDA/P49Op6O4uFjsn5ycHNxuN4FAgEgkInQOj8eD0+kURpfX6xU1wwsLC8iyLByXynsNBgPBYJDZ2VnhzJUkaVmy4pOQE4udSIqcUNZbXFwsmtFMT08TCARIp9Ps2LEDq9Uq9Cjl/iaTSdHMQXG+mEwm8ePz+aipqUGn0wmZGwgEWFhYUJ24nxLufiPpU4iixIyMjPDCCy+wdu1aysvLl6S7KBs+Ho8zMjLCgQMHePTRR2lpaQEQXZf0ev2SmgC73S68zZFIZFlhfEmSSCQSxONx0fXJ4XBgNptvqs354uhQJBKhvb2dYDBIaWkplZWVS44dGhriD/7gD+jq6sJgMLB582YeffRRGhoa0Ov1VFZWcvnyZY4dO8aDDz5IYWEho6OjnDp1iv7+fnbv3r3i3YhUVG4XmUyGUChEKpUim82i0WjweDwiOnszyLLM3Nwco6OjRKNRGhoacLvdGAwGMpkMU1NT/PKXv2T9+vXcd9995OTkXGeoAVRXV7Nr1y6i0Sjf//73kSRJpKs89dRTonHEcjzEqVSKRCJBNBpFkiRh4Njt9mWlBGcyGdE5S5F7ioHX19fH8ePHOX78OE1NTVy6dIm2tjYGBgbYunUr6XQau91Oc3Mzzz77LH/0R39ENBpFp9PhcDj4jd/4DRoaGsjLy7vj3m4VlQ9isfMxnU4Lw0ORE8shHo8TDocxGAzCKasYIg6Hg0QiQSqVIp1OEwwGhQNW2RPK6JN0Ok00GkWWZbZu3cqhQ4c4ffo01dXVuN1uzpw5Q39/P5lMRhy3HJLJpNBDFDnhcDiwWCwfKSeUKM/8/DySJKHRaEQassViYd++fWzatImamhrC4TCvvvoqFy9eZHR0lOrqaiorK9Hr9eJaJUliamqK7u5uLl68yPr16ykoKBANpCoqKnjhhRdwu90UFRUxPz/PwYMHmZ+fJxaLkUqllrVmlTuLaiStQubm5vjlL3/Ju+++i81m47vf/S5er3fJMbIsMzMzw4ULF/jjP/5jtm/fztNPP01TUxPZbBatVisUm8UCSPHCKIJvOYTDYY4cOSJ+AJ555hnuu+++ZXewUq4hnU4zNTXFiRMn+PnPf85TTz3F9u3bqaqqWnJ8c3MzP/zhDwmFQpw/f57Ozk7efvttiouL2blzJ3v27KGoqAij0cjv/M7vIMsyTqcTt9vNjh07VkWqj4rKJ0E2m2VoaIi//du/pauri9nZWTQaDX/0R3/E2rVrRfR4uaTTacbHxxkdHaWwsJCysjKRhqcYPHq9XrS9HhgYIBgMMj09zeTkJDMzMxQWFtLW1sbrr7/O8PAwf/zHf0xNTQ1nz56lp6eH559/nrKyMurr65eloHV1dXH48GFefPFF0uk0Xq+XRx55hH/1r/7VR64vnU7T19fHf/2v/5XJyUnROvn3fu/32Lp1KzabDbfbzR/+4R+SzWYpKCjAYrHQ3NwsvOqzs7N0dnbyF3/xF3znO9+hqakJrVbLpUuXeP3118nNzWXbtm2iVkFFZbWRSqUYHh7mT//0T+nv7ycUCmGz2fj+97/P2rVrP7Lxw+IanMVZIwqKA2LxcYlEYol+sThtTonwfOMb38BqtXLs2DH+w3/4D+j1elGvtNzGVgonT57k4MGDHDp0iHQ6TXFxMc8++yz79u0TXX0/iFgsRn9/P//X//V/sbCwgM1mo66ujv/0n/4TZWVl7NmzR9wHWZbJy8vj0KFDnDlzht7eXkpLS5eUB/h8Pp577jnGx8cpLS3lX/7LfynS6pqbm/m93/s9vv/97/Nf/+t/FY6YyspKamtrbyrKrnJnUY2kVYISJp+amuL5559naGgIj8fDb/3Wb+H1eoW3RhFeQ0NDvPvuu7S3t9Pc3My3v/1tysrKRJ7s4rlD8XhcCDIlF9ZsNouWth+F1Wpl69at1NTU8PnPfx64mraXl5d3U52rUqkUPT09nDt3jkOHDvH5z3+evXv3UlJScp3iYTKZKCgoIC8vTwj3EydO0N3dzbZt27BYLDQ0NPC//W//G1/5yldIJBKk02nm5ub4X//rf5GXl6d2tlO5K9DpdJSUlPCv//W/FjU/AC0tLWKf3wzz8/OMjIwwMzPDmjVryM/PX2LIKN7WN954g8OHDxOPx+nu7mZycpK5uTkymQxPPvkk77zzDrFYjPXr1/Pggw9iNpvF9Rw8eBCfz0dVVdUST/MHUVNTg8vlYuvWrUvGH9hsto9cjzIi4T/+x/8oIugWi4WioiJMJhPNzc0UFRXxhS98gXg8LhoyvPnmm8LzOzQ0xKlTp6isrGT//v3k5+eTyWTIy8ujvb2dqakpZmdnRQtfFZXVhsFgoLS0lG9961tEIhEymQw6nW7Zjgq4miHi8XhIJpP4/X5R76j8f1VVFTabDaPRSHFxMSdOnCAWi4msjUAgQCwWEzOQdDodNpuNp556igcffJBwOEw2m2VsbIzDhw8TCoVwuVzLNhhaW1spLCzk4YcfRpIkzGbzEifPh6E0llGMFp1Oh91ux+v1CoNOQaPRYDabxTyjxc1wEomE6AA4PT1Nc3Mze/fuFfqUIn9qa2v53d/9XYLBIJlMhnQ6zdjYGD/96U9xOBx3vKOfyvJQjaRVQjKZZG5ujkOHDgkDac2aNdTU1CwxjpRBmO+99x6Dg4OYzWZ27NhBaWkpBoNBRJHcbjc2m41QKMTY2Jg4z6VLl5iZmaGoqEgUMn8Uer1e5B6Xl5cDVxtNKLnOH4US5u7u7ubkyZMMDg5SVFTE1q1byc/PX3LdihBVCqThqtdGETKKMFaKJBWhHolEmJycxOfzodFoKCsrW5aCpaLyacBqtVJdXb2k3kBJMVH2RywWI51Ok06nSaVSxGIxsU+VfS7LsqgFyGQySwZVw9V9fd999+H3+5EkiWw2SzgcZnR0lOLiYsrKyigqKkKr1ZJIJEgmk2IvKudXrudmOnIpKTNKxFxJhVmOl1kZ6LhmzZolNZNKMxtZljGbzdTU1IhmFYoCqcypUlKKU6nUEg+6ksKk3HdlUKTifEomk2SzWZEumEwml3TDUlFZKZTmDHV1dUvSYpW6omw2K5oKKOl4Smdcg8GAwWDA6XRSUFCAyWTi0qVLoiPu+Pg4gUCAe++9V9Te1NbW8uabbzI+Pi6et/39/WK2YW5uLlqtVgx6z8/Pp6CggEgkQl9fn3CEKE2lloPL5RJzzuBXjWuWk5KrGEWtra3i3uh0Osxmsxiaq9VqMZlMYq7U4mG6Go1G6FPHjx9nbGyMNWvWsHbtWsrKygCWpOKlUiny8/PJz88XTSo6OztxOBzk5+fjcrlu9k+scge4uyW5LENWQpYk5BWoT5Glq5+FLN9074ZQKERPTw+/+MUvMJvNtLa2Ul9fTyAQEN4Wk8lEOp2ms7OTN998E6vVSnNzM42NjUxPT6PX67HZbDidTjweD0VFRQQCAfr6+kTDg7a2NkKhEGvWrFn2oFulWcTH9aAqgxrfffddTp48STab5XOf+xwulwu/308sFsPhcGC325mZmRFNJxQjZ2RkhKmpKTGpW6vVEo/HRYGlw+EgEokwODhIb28vOTk5VFdXq54alQ9Hlq+2281mr8qHlWjcoMiHm0Apula6zy1GadwSDAZF56lwOIzJZGJkZISSkhI8Ho9ou5/JZPD5fASDQYxGI1VVVcLZIcsyJpOJxx9/fEmnptnZWfr6+mhpaWHLli1s3LgRg8FAUVGRSL/r7e3F7XYzNDTE7OwsJpMJh8Ox7MYGer0evV7/saK/Sg3SjaJqiUSC2dlZfD4feXl5RKNRenp6GB0dxeFwUFlZidlsxuFwkJeXRyQSobe3V9Rrjo2NCfmkdLVS5r5Fo1GmpqZIJpNMT09z5coVMpkMTqfzI1N/VD5FyDJIMnJWAkn6pBrOffDHva9HyJLMzSgSipy4kXNQkiTRTEEZgqyk7CtyIi8vD4fDIRoYnDt3TnTNHBoaIpvNUlFRgcfjETV8er2e/v5+MXOpra1NGER5eXloNBrC4TATExNks1kxqqO3txetVktFRYUwppYjJxRj7uPMjFLkxLV6gSzLTE1NiQHSdrtdZL3Mzs5isVjEWiYmJjhz5gw///nPcbvdYmiu0ojB7XZjMpnEMNlEIoHJZBIddy9dukRpaalooKWy+rl7jSRZRk6lyEaiaIwGsivh2ctKSNEYUjIJ5usVmg9jfHycV199lXfeeYeNGzdy+PBhzp49i0ajITc3l3379lFbW4ssy7z00kucPXuWgoICEokEfX19oq3mli1beOyxx3A6nTz44IMcPnyYrq4uOjs7gaspb2vXruWJJ55YtmC6VZSagR/96EeEQiHy8vI4fvw4J06cAKC+vp4dO3awY8cORkdHOXPmDKOjo6RSKbRaLZlMBrvdTlNTE9u3b8doNDI7O0tXVxdvvvkm8XhcdKxxOp188YtfpL6+Xk23U/lwJAk5mSQbCiOl0isxJgmyWaR4HFn7ycgjZfjjyZMnefnllxkYGCCdTmOxWOjr62P//v08+OCDlJaWotVqicViXLx4kUQiQWlpKdXV1cJRoihZix/eSmTFbDZjs9mWGAsPPfQQyWSSs2fP8oMf/ACTyUQikcDhcLBx40Zqa2tvqrHL7SCVStHR0cEvf/lL4d3V6XQUFhayf/9+amtrMZvNVFVV8cADD3Dp0iWee+45cd1Kyt26desoKysTSt9f/MVfMDc3J6LXL774Ih0dHWzZsoXt27eLAZ0qn37kbAY5kSAbDiOlV6DYXpYhlUZOJJCNn8wzLJPJMDQ0xJkzZ3jrrbfo6ekBrtYCXrhwgYceeojHH39cDHH/tV/7NX72s59x6dIl4Kqc2b17Ny0tLaIRRENDA0888QQdHR20tbWJAbN79uxh7dq15OXlAVdrrF9++WVGRkbEkF6n08k999zD5s2byc3N/UTWeCsMDAxw9uxZxsbGRD13JpOhpKSE3bt3U19fj16v58SJE/z0pz/lyJEj7Nmzh5deekkYbmVlZTz55JOUlZURiUTo6Ojg0KFDZDIZoZ+43W4ef/xx6uvr1UyXTwl3nZGktMK1mM2khkdJzc1fnZGkXYEHtSwjJ5M4DSbM27aJouDlUF5ezjPPPMOOHTtEeszijjFVVVUid/ff/Jt/w2OPPYYsy0s8tWazmZKSEvGAb21txev1ct999zE3NwdcnW1SWFgowtUrgV6vp6Kigv/n//l/lnTdU/B4PCJc3dLSQm5urugAo8xYcrvdeL1eCgoK0Ol0eDweWltbsdlsRCIREUr3eDxUVVWJmSYqKteipFhoozHil/pITkyiWanJ57KMFI6gXdOCyWy+qZq+G6HM3li3bh02m21JWphOpxNDCxV5Yjabefjhh8lms8Lg+TB0Oh25ubl861vfwuVyLaljrKmp4ctf/jJ79uwR6Xsmkwm32y0GX9/p4mSz2cy6devEMEetVivWobQ4hqvOlbVr1/K9731PDISEq7WRpaWl4lil3uDxxx8nHo+TzWbJZDIiClZQUEBRUdGdXLLKJ4TSYRH/ArH+PuJDw2h0K/R9liSy4QiGrVsxvl8XdCsojoHNmzeTl5cnlHYlrbWyshKr1SqcJI888ggVFRWEw2EkSRI1Nkr9opLm9uu//uvs2LFDdNRzOBzU1tbicrmEk7K4uJiHH34Yv99PJpPBYDBQUlIiZietxED7j2LTpk0UFhbi9/uFI8XpdJKXl4fX6xXt/7dv3y7qQ5VaS6VJhXK80WjE6XSydetWcnNzSSaTIhWypKSEsrKyZTW7UlkdaORbHeW8yshms8zPz9Pd3c2bBw+SkWXkFdSVNTLYrBbWNjaxf/9+IXg+CmW44+ICQQUlhK4IJ6UV8LV/Oq1Wi8ViwW63C29IMpkkkUiI85rNZvEDrIghsTgl6NqOOXC1DsJqtWKxWESOtNLqGK4+rIxGo5g9AIg6gGg0SjqdFsax0WgUnm7VSFK5FsVD2NXVRcfFi1zq7SUDtz+KtAitDHVVVbQ0N7Np0yaRCvdxWDy8MBqNXve61WrFZrMt6cqkpJUIJZAPlgNKQxmlLbDRaFxi2CWTSSFjJElCp9NhNBpF0fOHnXslUNKMYrEYmUxGKIUmkwmz2bzEySRJErFYjFgsJuSUkua4uO5BGdZ97YgBpZ7BYrGoUexPOYqcaGtr48z581wZHycLKy4nWhsbWb9uHY2NjbekWC/WBW40n8dmsy3RGyRJIhQKiUH2SjrrtZHhdDot2llLkoTBYMButy+pg8xkMoTD4SVtyZU9tfi4O4VybxS9Y3F7cEVOKPIzHA4vyVxZjMFgwOFwCPmYSCTEDKjFc9sUB66qn3w6uOuMJOULHwqFGB0dFZt8pdC8/6D05udTVFSkDhtUUVlFKIX8oVCIubk5/H4/6XR6Ra9Bic4oP+oDU0VldaHIiYWFBaanpwmFQjdUjG8nOr0eb34+ubm55OTk3HFjQkXls8hdZyTBrwTctTOCVgpF6VGVHxWV1cmdlBGKTLiZWWUqKiory+JOZXdKjwBWrHZYRUXleu5aI2m1oAo3FZXVx2qREap8UFFZvahyQkXls81daSSpqKioqKioqKioqKh8XNRcDxUVFRUVFRUVFRUVlUWoRpKKioqKioqKioqKisoiVCNJRUVFRUVFRUVFRUVlEaqRpKKioqKioqKioqKisgjVSFJRUVFRUVFRUVFRUVmE/k5fwO1AkiQkSSKZTK74jANl9onRaESv16utO1VUViHZbJZMJiOmwK8kWq0WvV4vflRUVFYfsiwjSRLpdJpMJrOickKZsWgwGNDr9eh0uhX7bBUVlV9x1z2hZVkmk8kQDocZHx9fcSVIq9ViMpnIz88nPz9/2YPgJEkik8mQSqXIZrPAVUGp1+sxmUw3PI8ixBOJBJlMBpPJhMFgEAJVlmUh4NPpNIAQugaDYcUMOGVgZzKZJJvNivUtNiYXr2/xwymVSiHLMhqNBp1Oh8lkQqfTIUkS2WxWrE85n3KMOoBP5UYoDpNoNEogECAQCJBKpVb0GvR6PW63G5fLhcvlAj7+HBRlbylyIJvNCkeNyWS6zghbfJyyr4xGIwaDYclgW2X/KfJTOZ9OpxPH3an9pfwNY7EYmUxmiRNMp9Oh1+sxGo3iOmVZJpvNkkgkxFp0Oh1GoxGdTrdE7qTTaeFc+yj5q3L3onynIpEIs7OzhMNh8QxdKfR6Pfn5+bhcLux2+y19/2RZJpVKLXleAuK5arVa0Wg0Yq8oeogsy9c9VxejyIjFxy6WE3dyzyh/w3A4fJ0OqOhAitxTZKiig0mSJGTJYp1K5bPHXWckZTIZRkdHeeONN/j+979POp1e8UhSrsfD/v37+c/f+x5ut3tZ3uKpqSnOnj3LT37yE3p6eshkMjidTjZv3sxv/dZvUVRUhMViue59IyMj/MEf/AGnTp3it37rt3jggQeoq6sDrgqHI0eOcOjQIQ4ePIhGo+H+++/noYceYv/+/UsUhNtJNptldnaWv/u7v+P8+fOMjo4Si8VobGzkC1/4Alu2bKG+vn6JcTc2NsbLL7/MCy+8QCAQIDc3lzVr1vCbv/mbrF+/npGREdra2nj99ddpb28nm81SXFzMhg0bePbZZ6mvr1e99Co3JJVKceDAAV555RUOHDiwRGlYCXQ6Hdu3b+fhRx7hK888g81m+9jnkiSJhYUFBgYG+Iu/+At6enowGo3U1dXx7LPPsn37dkwmExqNhnQ6zcDAAH/1V39FV1cX8/Pz2O12vvzlL7N//34qKiowm80AjI+P8+qrr/L2228zMDBAfX09v/7rv87mzZupqKi4Tlm6E3z3u9/l+PHjBAIB8buGhgZ27tzJV7/6VWpqagCYn5+nu7ubP/qjP2JoaIji4mI2btzIF7/4RTZv3ixkYCwW4/Dhw/z5n/85U1NT2O12Nm3axLe//W0qKyvFvVH5bJBMJnn55Zd5/vnnaW9vvyNyYt/+/Tz5xBM89NBDtyQnIpEIr7zyCj/72c84ffq0+L3b7aalpYU//dM/xePxEAwG6e3t5R//8R85f/48qVSKvLw81q9fz7/9t/+WiooKjEYjcNVAeuutt/j5z39OT08P4XCYiooKnn32WTZt2kR5eTkGg+GW78OtIMsy+/btw+fzLTGUdu7cycMPP8wjjzxCQUEBAENDQ5w4cYIf//jH+Hw+Wlpa2LNnD/v27aOhoeFOLUHlDnPXaZHZbJZYLMbs/DzhAgemlmp0OTbQr4AnICuRmQkQDcSYnpoiHo+Tk5OzLGU9FAoxOjpKc3Mzn//85zEajczPz/Paa6/x//6//y/f/OY3aW1tFUJHlmW6uro4efIkgUCAmZkZotGo8KxKksQvf/lLTp06RTAY5Ld/+7eRZZlXXnmF559/nmg0ytNPPy28ybf1tmSzjI2NEQqF2LFjB1/60pewWq2cP3+egwcPMjU1xSOPPEJLSwsA77zzDocPH6atrY3f+I3fIC8vT3imXC4XGo0Gn8/HzMwMdXV1PPbYYxgMBoaGhujp6eFP/uRP+N73vkdRUZGq2KhcRzqdxu/345dTRKvysayrRWM0wu32F8hANkuib5R5KYl/fp5EIvGxlJ/F0ZSzZ8/yD//wD+Tm5vLd736XSCTCyMgIf/Znf0ZhYSGVlZUYjUbm5ub4/ve/j1ar5XOf+xzl5eVMTU3x0ksvYTAY2LlzJ83NzaRSKf7yL/+SyclJysrKeOaZZzhx4gRvvfUWMzMzfOELX6CoqGhFZMeHEYlEqKys5Etf+hK7du0CwGaz4Xa7KSoqAiAej/PWW2/xv/7X/6K8vJxnnnmGkZERJicn+fGPf4zH46G0tJRwOEx7ezv/+T//Zx599FEqKipIJBKcP3+eH/3oR/zrf/2vqaysvKGjSuXuQ4kqzs3NETBBrLEEc2MFGtMKKP2yjJy+KidmI0EWFhZIJpO3ZCQpOoHdbudf/It/wRNPPCEiKXa7HbfbjU6nw+/3Mzk5icVi4Tvf+Q4GgwGfz8eFCxf4wQ9+wLe+9S3KysrQaDSMjIzwP/7H/2Djxo38q3/1rygsLOT06dP89Kc/JRwOs3//fkpKSoA7F3UGWFhYYOfOnezZs4f6+nrgqnGYl5eH2+0GYHp6mh/96EecO3eOTZs28Y1vfIOzZ89y6dIl/H4///bf/lvy8/PViNJnkLvOSFocNpUdVnTlBejzXGgMt3+pciaLZNSDNCfSVJYbxfJ4PGzevBmA4uJiDAYDfr+fUCjEyy+/zMTEBNXV1TidTgBmZ2fp6OhgcHCQBx98kPfee098nizLBAIBLl26hE6no7W1lR07dqDRaLhy5QpDQ0McPXqUJ598ckWiLTqdjoKCAh588EFyc3PxeDwiJD81NcXCwgJTU1O0tLTg9/s5d+4cg4OD7Nq1iw0bNpCTkyNC9y6XC61WS1lZGSaTCVmWKS0tRafTkZeXh1ar5eDBg0xOTuJyuVQjSeU6lJTcrE4LLjv66hK0FhPc7ge5LCNnsmj8QbJJ7SdS57CwsIDP58Pn8/HEE0+wefNm4f09e/YsnZ2duFwucnNzSSaT9Pb28sgjj7B+/Xpqa2vx+/28+uqrzM7OMj8/TzqdZnJykq6uLmpqarjvvvvYvHkzOp2O1157jbGxMSYnJykoKLjj0SRJkjCbzRQWFopItNFoxGQyiX1/+fJl+vr6WFhY4N/9u3/HmjVrKCgooKOjg7NnzzI2NkZ+fj7j4+O0tbVhMBjYu3cvZWVlBINBMpkM586dY3x8nNzcXNVI+gyhyAnJqEfjyUFf876cuP0fjJRMk5oPkE3LIpXt1k8ro9Pp8Hg81NXVYTabRTqpUj/tdrtpbGzE6XRSW1uLXq9ndHSUeDzOyZMnCQaDFBYWAjA2Nsbc3ByNjY3s3LkTl8tFOp3m4sWLLCwsEIvFbvmaPwmy2SxOp5Py8nIhJywWi0j1B7hw4QJjY2PYbDYee+wxqqqqkGWZixcvMjExgc/nw+PxqEbSZ5C7zkhSkAGtyYDOaUfvdqAx3n4PkJzOkg2E0VrCEL259+bl5ZGXl/erc8kyTqeTHTt28NxzzxEKhUgkEjidTtLpNH19fQwODpJOp3nkkUf4/d//ffFeSZKYm5sThtWmTZuoqalBq9Wya9cuotEoZ8+eJR6P33Ku83LQ6XSUl5dTXl6+5BobGxs5ceIEGo2GcDgMXE3z6e3tZWFhgXvvvZdMJsPCwgIWiwWn04nNZkOj0VBdXU11dfWS+6XT6YjFYrz11lsEg8EVrzVR+XSh0evQWkzo3TlorKbbvg/k9z3EWpsFTeaT+axwOEwwGCSbzbJt2zaKi4tFGq3dbqejo4PW1lZcLpdQ+goKCsjLy8PlcmE0GsnNzQUQ9QrDw8P4/X4eeOABdu3aJTyox44dIxKJMDExwdq1a++4kaTVakmn0ywsLDA+Po7FYhFOFMVI6uvrY3JyErfbzcMPP4xOp8NsNhOPx2lra8Pn87FmzRp8Ph/d3d2sWbOGrVu3kpOTQyAQIJFIcPz4caanpwkGgyI1R+WzgSzLaPR6tHozepcDrdV82yPOsiSjTaTQWi0Q/uQ+TKk5ikQi+Hw+bDYbNpsNj8cjjlH0ECWrQ6kzmpmZ4dChQ0tqpqPRKCaTiaKiIgoLCzEYDBQVFWG1WtHpdCta5vBh6PV64vE4c3NzjI+PY7PZyMvLW9JY69y5c6TTaerr67nvvvuAq1H6QCDAxMQEk5OTrFmzRm3G9RnkrjWSlqJZ3hdb5pYEoHwL771RU4ZEIsGVK1ewWq3k5ORgsViQJIn5+Xl+/vOfU1hYyK5du/B6vUverxhJkiSRk5OzxANSV1dHZ2cnwWAQn89HTU2NyDH+wHV9hLD7qHu7+HXlXJIkEQ6HSSaT2O127HY7AB0dHUQiEaLRKK+99hoXLlxAp9NRWlrK+vXreeqppygsLBSRJeV8siwTjUZZWFhAq9WSk5Nzx/OhVT49aJYrI24F+aqIuem3fcD+U9LdlOuORCJkMhkymQyxWIx0Ok1vby+hUAiNRoPVamX9+vVcvHgRnU7HzMwMk5OTIm2toqICWZa5dOkSVquVoqIiCgoKkGWZkpISvF4vExMTjI6Okk6nl1XT+GGy41beC2C32xkeHubll1/m1Vdfxev1snXrVrZs2cLWrVux2WyMjIwIh4ziNbbb7Xg8HnJycpibmyORSDA7O4vP5+Pxxx8XxxmNRtxuN1arlXA4vGo84yp3EM0yU8c+TJf4KD1DI3+icgJ+1QQqlUpx7Ngx2tracDgc1NXVcd9997Fv374lDUoWP6fj8TiBQACHwyEyQIxGI83Nzbjdbs6cOUMsFqOoqIjjx4/jdrspKCgQWS+3ct3Ktd/K+10uFxcvXmRoaAitVktpaSkPPvggGzdupKGhAZPJxOXLl8nJyaG2tnbJ+5RyCb/fv+JdUFVWB58RI2mZrCIHweTkpMiHf/TRR2lsbMRisTA/P89PfvITXC4XGzZsYOPGjdelzCmd5JTONYtfdzgcWCwW0QFwOd6edDrN1NQU09PTTE9PA1BZWUlBQQG5ubk3HYJOpVLMz8/z+uuvk0wmaWxspKmpCbhaZD01NSWKsf/wD/+Q+fl5Lly4wJkzZzAajfzar/3adRGwK1eucPToUY4fPy4aQeTk5NzUdamorEZkWSYcDjMwMMDCwgKJRAKAzZs34/F4qK2tpbi4mL/+678W6Xajo6NMTEyIFBiNRoPNZmPfvn08//zzXL58WURilPcrnfaCwSBWq3WJ80Sj0WA2m9FqtUSj0WV7iUOhEJcvX2ZqagpZlrFYLNTU1JCXlyccIx9EOp0mHA7T09NDMBhElmVcLhdr167Fbrfza7/2a6RSKdxuNxqNhiNHjtDd3Y3P58NisbB161ZCoZBwFiko3e9MJhPRaJR0Ok08HicSieB0OoVcUbpvKvdppQv3VVY5H2bsfKgRdBuuhV85Hnt6egiFQqTTafR6PZs2bcJut7N582aKi4sxm83odDo6Ojpob2/nr/7qr6ioqKChoWFJOqkkSYyPj3PhwgVOnTrFzp07KSwsFHIgPz+fvXv30t7eztmzZzGZTMTjce677z5KS0tvqoZqamqKgYEBkVFitVppbW0lJyfnI5248Xic+fl5BgYGiMVi6PV6iouLqampwWKx8Du/8zs4nU6cTifJZJI33niDQ4cOMTMzg16vp7m5mWAwSHFx8RKZpHS202q113UEVfnsoBpJq5CBgQGOHz9OZ2cnjY2NogNLPB7nypUrnD17FrfbzcWLF/H7/SLqdOnSJQoKCshkMks6xS32gCjtOrVa7bIjLZlMhvn5eQYHB+nv7weuChCr1SoKHz8KRalKJBL4fD6OHDnC2NgYW7duZf369SLVUDHo8vPzefLJJ2loaBCtfoPBIMPDw0QiEcxmMwaDQQjy119/nbGxMbxeL4888ggOh0PNH1a5a4jFYgwNDeHz+QiHw2g0GmpraykoKKCpqYkvfvGLdHV1MTIyIhT7kpISMZNJcUy88847NDc3U15eTm5uLuFwmHfffZfu7m6sVivNzc0YDAay2ewSuaG0BgZuKuUkFosxMjJCT0+PMFZycnKWRI8/CMWR09/fz+TkJNlslpKSEurr63E4HLS2tiLLshhnIMsyoVCImZkZxsfH2bRpk3htcftmZXxANpsV4wcUZ9LiFN3FslOr1d7x9EKVVcYq05WVqM/AwADT09PE43FMJhN1dXU4HA6Ki4tFVolSf6TVaunv76enp2dJd0tJkpiYmODYsWMMDg7i9Xq57777RLfeaDRKV1cXvb29bNq0ibKyMux2O6Ojo3R3d9Pb20t+fj51dXXL2jdKV72ZmRngahSnsrLyOmfNjVBkm9JkwWw2k81mKS0txWq1sn37dtGkIpPJEI/HefHFFwmHw8zMzNDc3IzRaBS17Ivvp1Lnrdwz1UD67KEaSasE5YE8MzPDyZMn6e/vR5ZlduzYQV1dHRaLhWAwSDqdFi19/X4/8XhcPPDn5uZERyrFI7R41hBcFUbRaFR0tVmOANNoNFgsFtxutyjazMnJwWS6uSLWdDrNyMgIXV1ddHV1UVpaSktLCxUVFVgsFlGHpRRe19bWkpOTg9lsxuPx4HQ6GR8fFx38lHqEY8eOMTQ0hMPhoKmpicbGRoxGoyrQVO4aDAYDbrebdDot6vLMZjMmk4nCwkK2bt2KyWQiHA5jMBiIRCL09PTgdrsxm82iU1dXVxff/OY3Wb9+PV6vl0gkwqlTp/D5fJSUlNDU1CSaPMRiMZLJJEajkUQiQTweB65Go5c7A0Wv1+NyuSgqKkKSJGw2G1ardVkNY5RIjsfjEUaax+MR71VqqeCq/CwuLiYnJ0fMtZEkCYfDgUajIRAICC9wJpMhmUySSqXEtVitVux2u0hTBsRA8kwms6TIW0VlNaIMn1VqjBKJBEajUThNTCbTkuiO0WikoKAAnU7H/Py8iJQoEam2tjYGBweRZZmWlhYRmdFqtcTjcRHhbW5uZsOGDVitVkpKSjh79ixTU1PMzs4uSV/7MMxms2i8BFdlzOJZZx+GMucpPz8fk8mE0WjE6XQKw2ZxHaEkSZSWlmIymUQEWdE70uk0wWBQHKvoTkqDGNVJ8tlElfqrBEmSiMVinDp1ijfffJOioiLuuece9u/fLwa9KQJw586dBAIB8TC/dpiqopjodDqi0Sh+v1+k3IyMjDA3N4fT6VwilD4Mk8lEdXU15eXl3HvvvQBCaVhOtEbpuDc3N8fRo0dpa2sjm83y7LPPUltbi9VqFUZcaWkpTqeTVCpFLBYjm82KYbiZTEZ4fiVJIhKJ0NXVxU9/+lMaGxvZuHEj9957Lzk5OcKzrBpKKp92FK/vzp07lwxPVRQWgLKyMkpLS8UIhL6+Pl5++WURdVEaoMzMzFBeXk5JSQlOpxOXy4XX62V+fp6FhQU0Gg1VVVUkk0nm5+eZn5/H6/UyOzsr6v0KCwuXHaV1u93ce++9bNmyRaxFqWv4KIxGI16vlwcffFBEsZTGC0qkSxmcC1c9ysr9UfZ9SUkJvb29jI+PE4vFMJvNRCIRFhYWiEajuN1uTCaTcAANDAwID3wikSAQCJBOp7Hb7WpnO5VVjU6nw+12s3fv3iUd8RT9QemmqRj7iqNAiZTAr6JRQ0NDvPHGGzidTlpbW9mzZ4/oMqu8d3BwEKPRSHFxMaWlpQBiOHUsFiMYDIqhrB9FSUkJ+fn5SwbNL9cwsdlsVFVVUVxcvGQItNFoFHMyFafOYsfH4hqsiooKenp6GB8fJ5FIYDAYCAaDRCIRJEkSDWFUfeKzh2okrRKmpqY4deoUf/Znf8aePXvYvn07LS0twrthNpsxm81UV1dTUlIiDCSl/uhP/uRP2Lp1Kw888ADNzc1kMhlqa2sZHh4mEAgIw+H5558nEAiwd+/eJULvw1AMtI/bCEGSJILBIH/4h3/I6OgoRUVFfO1rX6OkpERMt1c8QBs3bmTNmjXiXnzzm99kZmaG06dPc+nSJSGsfT4fJ06c4O///u/ZsGEDn/vc56ioqECn0xEKhbBYLMs24lRUVjuLO7YtJpPJMDMzw/DwsIi4dHR0cObMGRKJBPfff79o1+31etHr9bz55ptEo1Gqq6uZnp7m9OnTrFu3TtQbKP/d0dGBVqvloYce4u2332Z6epq6urolg58/CqXd7sdBqam0Wq1Lfi/LMpcvX2Z4eBibzUZxcTGyLPP6669z8eJFjEYjVVVVGAwGtm/fzuDgICdPnuRv/uZveOCBB7h06RKdnZ2kUinq6+uxWq00Njaye/dufv/3f5/XX3+d+vp6QqEQBw4cwGQyUVlZuaT7qIrKakSr1V6335Q01J6eHqampkSa6unTpzlx4gTBYJDNmzdjs9kIh8NcunSJP/7jP8btdrNjxw6am5sxmUwEg0FsNht6vR6LxUJzczOvvvoq7733HplMBq/Xy6lTpxgfH6e6uvqmnCl6vf5jR2oVo+ja96dSKTo7O/H7/Xi9XpxOJ7FYjJdeekmMHFHmqT322GMMDAxw6tQpXnzxRTZt2sTBgwcZGBgQWS1qJPmzyV38V5fJxhJk5hauehJWYk5SViLjD6KLxoEPz6O9lsnJSQ4cOEB7ezuTk5O88cYbWK1Wodw8/fTTbNmyhfLy8iVCUJZlEV0xGAwYjUYMBgN6vZ7Pfe5zvPfee5w7d47vfe97ABQWFrJ3714ee+yxZYePb9V7kkqlaG9v591332V2dhaTyURXV5cwutavX8/+/ft5+OGHsdvtfOlLX6KyspI33niDf/fv/h0Wi4XCwkK2bNnCvn37sFqtXLx4kUOHDtHZ2cnY2BgnT57EZDJhMBjIz8/n2WefZcOGDWrLXpUPREpnyEZipGbm0Zpvfwtw5Kuz1LKhCHJm+YbDh11XNptlZmaGI0eOcOzYMbLZLHa7nbKyMv79v//31NXViUhtWVkZ3/nOd3jvvff4wQ9+IGoT6+rqeOCBB2htbcVoNOJyufj617/OkSNHOHToEL/85S+xWq3s2rWLbdu2UVpauux7dSv39IPeqziG3nrrLQYHB0kkEiLatmbNGtavX09ra6uQnY888ghWq5W33nqLX/7yl1gsFsrKynjyySepr6/HbDZTXFzMnj178Pl8vPDCCyLN0Ov18uUvf1nUdah89pBTabLxKOmZebSWFZi7J8vIyTSZcBRZWv537kb7RYmqptNpurq6ePPNN0VNo81mo6ysjG9+85vU19djMBjo7+/n+PHjHDlyBKvVSkdHBzabTegKf/iHf0hTUxM5OTncf//99Pf3c/78ed59911kWcZoNLJ792527dpFVVXVsvb/rcrdD5MT4XCY5557jrm5OVGrnZ+fz/3338/GjRvFGJG6ujq+/OUvc+LECZ577jl+9KMf4XK5WLNmDdu3b6esrEx1uH5GueuMJKVQ2Ww0kfXNEj3WgcZiQrMS+aSSTDYcRZfRYGrwon+/M8pyKCgo4KGHHqKoqEh0n1mcn1tdXS06ul3bUttgMPC//+//O9u3bxftwDUaDWVlZdx7770UFRXh8/kAKCoqorq6muLiYmBlJmHr9XpKS0v52te+RiwWE/nRCqWlpZSVlQFXPc8VFRWiTmBubk7kK5eVlYkc6traWh555BFqa2uFUaik3jgcjiV1WSoqi1G+f/p4iszQBJF0Bo1Of/sLsWVAlkhPzaMtr8P4vlF/K+h0OrxeL9u2bcPlcok6HK/XS2trq3C0wNXW17t37yY3N5eFhQXRxruyspLm5mY8Ho9IKVm7di0mk4n6+nrC4TAOh4M1a9ZQVlYmIlp3KvVEo9Hg9XrZs2cPTU1NJJNJ4OpA7oqKCkpLS4VBYzAYqKioYO/evdjtdqLRKFarFa/XS2Njo1AATSYTBQUF7N+/n6KiIpFyk5eXx7p169RGMJ9BlNROTShGamwUKRhBsxLRBBnIZklPzWOoX4vBaLzl757FYmHt2rXo9XoikYiYo1ZYWEhNTQ02m00MZN+yZQvf+c53RMraYh2moKBAOGLz8/N59NFHuXLlCqFQiEwmI6KyFRUVKzKD8cNQdIn9+/eL61OMpPr6eoqKioSOYLFYRDe98vJy4vE4breb8vJyampq1KH0n2E08mqZ+PUJkU6nGRsb49133+Uvf/hDMtnMx5o58HHRAE6ni727d/Mf/sN/wOl0LitMm81mRee56875fjhZp9NdJyyVP188HhchZ0WoKcXOyg8gzqGcZyWEmHIdixtILEa5HkVhVI7PZDJks1mRdqMYwIB47YPul8FgWGJoqqjA1e9WKpXi4MGDvPX227x39ChZOctKtqrSaDRs2biJ+/fu5YknnhA1Ax8HpdBa2S9K/r0SWb7WoaJ0cFK6Ni2WLYv3ijJ4Vqn9Uc63OC//TilAyjrS6bRYB/xKjizuRKfUISw+XnldOV5Zh7JmpY5h8QgFtR7hs4UiJ1555RV+8fLLdF+6tOJyQqvRcN+u3TzyyCPct3v3dWmny2HxHEFFRizu2Lh4z8CvOj8u7ga5mMV1PMASGaHIHmW/3Ok9o6xZ2c/Knl58jYudzovlqFJLtfgeqXw2ueuMJOVBF4/HmZycFDUvK4VOp8NkMomJ9mrbSBWV1cNip0IwGCQYDIq5QyuF0qnO4XCINtiqjFBRWT0oSnU8Hsfv9xMOh5e0h18JlEimMttQlREqKivPXWkkLf7vO7G8aw0jVbipqKwOVoN8AFVGqKisZlajnFBlhIrKynPXGUkqKioqKioqKioqKiq3glqwoaKioqKioqKioqKisgjVSFJRUVFRUVFRUVFRUVmEaiSpqKioqKioqKioqKgsQjWSVFRUVFRUVFRUVFRUFqEaSSoqKioqKioqKioqKotYgfHRK89qatintu1UUVl9rBYZocoHFZXVyWqREaDKCRWVO8Vd1wJ88YTpxf9eSa6da7AcAbec61x8no86XqPRLOuYlWC5fwPlepZ7L1bL+lQ+PdxILtxNs9RWai13cm/djjWqskJlMatRTqwmGbHa9//N6koqKh/EXRlJSqfTRCIRxsfHSSQSSJK0Yp+t0+kwm814vV7y8vLQapeX0SjLMtlslmw2K65Xo9Gg1WrR6/XXnedGx2u1WnQ6HXq9/rrXZVkW59PpdOh0OvG7lSKdTotrkWVZXItWq12yvmuvXaPRiON0Ot2Sc0qSJM77YfdLRWUx0WiUQCCA3+8nkUis6GcbDAY8Hg8ulwun0/mJnluSJLF3FFmw2PkgyzKZTEbsw8X7ZbEsuFZ2KDJDOW6lZceNSKVSZLPZJb9TZIDBYBDXmclkyGQyH3qcisqNCIfDzM3NEQqFSCaTK/rZRqMRr9eL0+nEbrd/oueWJIlMJiNkgMFgEK8pcmLxc1Wj0Yi9shr2y2IdQTGCrpV3ynGwVC4qskuRadfqFCoqi7nrjKRsNsvExATvvfcef/fjH5GRs6Dh6s/tRgaNDDn2HHZsu5dvfetb5OTkoNd/9G2enZ3l4sWLHDlyhMnJSSRJwmw2U1ZWxhNPPEFZWRk2mw24usbBwUHa2tro7u7mypUraLVaqqur2bp1Kw888AB6vZ7jx4/T1tbG5cuXCYVCGI1GSkpKaG1tZfv27RQXF9/uOwJcFVThcJiXXnqJwcFBZmdnicfjeDweNm3axJo1a2hoaMBisYi1Xbx4kZMnTzI3N4fD4aCmpoYNGzZwzz33YDKZkGWZUCjE5cuXef7555mdncVgMOD1etm7dy/33nsvFotFNZZUriOdTnPixAkOvXuIE2dOkUVCo12hB78MGgnWt65j985dPPbYY1it1ls/rSwjSRLt7e289957XL58maamJn79138dh8OBRqMhFotx5swZ3n33Xebm5shms+Tl5fHkk09SW1tLTk4OWq2WVCrF8ePHaW9vZ2hoiFgsRklJCXv27KGhoYGioqJVoVj8j//xP+ju7iaVSmE0GgEoKyujpaWFhx56iOLiYoLBIK+++irPP/88VqtVXHdubi4bNmzgS1/6Eg6HY1WsR2V1kUqlOHToEK+/9Qa9/X13RE5s33YvDz34EDu2b//E5EQ2m+Xs2bO8+eabhMNhGhsb+cpXvoLVakWWZYLBIBcuXOCtt95iYWEBvV6P1+vl85//vHhO3+n9kkgkePvttzlz5gw+nw+tVsu6dev4xje+gdlsXpJpEo/HuXTpEhcvXqStrY1gMIjL5WL9+vXs2rWLurq6O7oWldXNXWkkhcNhhoaHaR/uxlSSg85qQLMCyrIsyWRCCSxhA+5LTuLx+LI9QPF4nKmpKcLhsNjkkiTR29vLoUOH2L9/PxUVFWi1WmZnZzlx4gQjIyMEg0HxkFfeowiHVCqFJEkYjUbcbjeSJDE6Oio851/84hdXxCMsyzKJRIKxsTEikQhGoxGDwUAsFuPChQtkMhnsdju1tbXMz89z6tQp2tvbiUaj5OTkkMlkGBsbI5vNUlxcTFVVFdlslrGxMd588018Pp8wIGdmZnjvvfcoKyujtLRU/F5FRSGTyeDz+egdGaBzvBdLuRutYWUe+rIkk5gIYRg0UVFeQSqVumXlRzGQFhYW6O3t5cKFCwwMDGAwGIQMSKfTTE5O8sYbbxCJRERUdm5ujqNHj2I2m6mqqsJsNjM+Ps6hQ4cIBALIsozdbmdqaoq2tjYkScLhcOB0Ou94NKmrq4uBgQEKCwspKSkBwO12Y7PZhGMqlUoxPDzMmTNn2LdvHy6XC61Wi8vlwuFwqE4UlRuiRCCvXLnCpeE+uqcHMBflrJycyEjEp0O4+ly0rGkhnU7f+jnfX9PCwgLd3d20tbURi8UwGo0iqhSJRBgZGeHNN98UjlVZlpmenua9997D7XZTVFQkdJQ7RTabFRG+mZkZEokEJpPpusiyLMv09fVx5swZLl++jCzL5OTkYDKZ0Gq1q6ruTGV1ctcZSUoYOZlKYq3xUPhgAyZvDjrj7RduUloiMjyHNBQmGU+KcPVy0Gq1WCwW9uzZQ319PSaTienpaV544QVefPFF6urqKCwsxGAw0NfXxzvvvENhYSHbtm1j06ZN6PV6YVQoD/7y8nLy8/PR6XTk5eURDAb5x3/8R3p6evD5fDz11FMrIugU4Zyfn8+GDRuoqKjA5XJx7tw5fv7zn9PR0UFeXh41NTUMDw9z6NAhpqen+c3f/E22bdvG5cuXOX36NBcvXqSsrIySkhLC4TDd3d288MIL/Pqv/zoPPPAAsViMzs5OXnnlFdatW4fdbsdqta6K9ACV1UM2m72aOmM3kNNSRPH+ZnQ2E7f7ayLLIKWzzB0fRBs2kEomb1n5WZwaMzIywtDQEPF4HJvNtsTbGwqF6Ovr47XXXuO3f/u3WbduHWazmVOnTvHKK69QXl6O2+3G4/HQ0dHB4cOH2b17Nw8//DClpaW89tprtLe3k8lkqKqqIicn547vK51OR01NDfv27eNzn/ucSBsymUxYLBZxnFarxe12841vfIPq6mqMRiN6vR6z2azKB5UPJJvNkkgk0HksuL1leHfVoLeabntWiizJZBMZZo8PIkUhnUpdly560+d8X04kEglGR0cZGhpCkiSsVuuSTJf5+Xm6urp44403+C//5b/Q3NxMLBbj3LlzvPbaa6xduxa73Y7JZLqj+0an0+H1etm9ezfFxcX4fL7rDCS4qg8eOHCA7u5urFYrTz31FBUVFcTjcSwWC7m5uXfg6lU+Tdx1RpJAAwabEbPXgaU4B63x9i9VSmdJR+JkZlMQv7n3lpWVUVZWtuR3hYWFGI1GDh8+LLwmFouFN954A7fbTVlZGXa7nUuXLuFyuaipqSEvL09Elerr65ecLzc3l5aWFvx+P6Ojo0iStCJhc51OR3FxMd/85jfF72RZRq/X09XVRTweZ35+HoDOzk6y2SzNzc08+eSTaLVacnJyiMVizMzM0NbWxp49e+ju7qarqwubzcY3v/lNbDYb8Xgct9vNxYsX6e7upra2lsLCwjueGqCyOtEatBgcJizFTvS226X8yIgTy5BNZzC4LGgSn9x3MpVKMT8/z8GDB7Hb7TQ2NhIMBpfUIinKj9fr5eGHH6asrIx0Oo3H4+HFF19kcHCQ4uJibDYbp06doqysjO3bt7N7924A9u/fT3d3NxMTE4yMjFBVVXXH95WSGnT58mVOnjxJbm4upaWl5OfnX1fjmEgkuHjxIrOzs3g8HvLz8ykvL1cNJJWPRGfSY7SZsRQ50dtNt+lTFssJmUw8jdFlQZP45L6fiUSCyclJ3nnnHbxeL62trcTj8SVywufzMTg4KNLrrFYriUSC3NxcfvrTn9Lb24vX6yU3N/eORmEtFguPP/448Xicw4cPk0qlmJqaWnKMJEn4/X7++Z//mXvvvZft27cTj8fp7e0VWSYOh+MOrUDl08LdayRdw8d7GC4Oxd7o/e+/Lmtuue7p2uuTJIlgMMixY8fwer14vV7MZjOhUIgrV64wPz/PwsICY2NjGAwG/H4/RUVFbNy4kQceeEB4SKemphgZGaGrq4v5+Xn6+vqw2Ww8/fTTK6bkXLs2JT1oenqaQCCA1WolPz8fjUZDXl6eeO3y5ctUV1cTCASYmJjgypUr2O12QqEQExMThEIhqqurRUGpwWDA4XBQVFREIBAgEomQyWTuuDKnspr5kA5zMsvY04qM+JUh9Kv3LP6fj+7G+HHIZDJMTU1x7tw5pqenefrppxkYGODixYvi8zQaDRaLBY/HQygUYnh4WNTnDA0NsbCwwMjICHV1dTQ2NlJYWEhnZyc+n4+pqSncbjejo6NMT0+j0+kYGxtbFfuqpqYGo9FIIpHgzJkzhMNhCgsLaWxsZO/evcJhVFpaytq1axkfH+fKlSsiDbq1tZXPfe5z2O32O74WlU8PN69LXKtHXCtYbr+cSKfTDA0N0dHRwczMDF/96ld57733GB0d/dWVaTTY7XacTiehUIjBwUEqKiqIxWIMDw+zsLDA0NAQLS0tK9oM61qWe//T6TT9/f3Mz8/T29uLVqvFbreTSCTQarU0NTWxY8cOGhsbV01DCpXVx2fGSLo9aJb865NkbGyMCxcucP78efbs2UNpaSl6vZ5kMkkikSCVSlFQUMCGDRvIzc1lcHCQjo4OdDodtbW1NDU1Ab/q/haPx1lYWBCFmEqh83JQ6hnGx8cZGRkBoLKykrKyMoqKipbVmEJBSQ1aWFjg2LFjABQXF1NZWQlAXV0dtbW1DA4O8sILL1BWVkYkEmFgYGBJ/nQikSCTyQhjcHFnO7PZTDqdJp1O3zAEr6KyLD7OvtZ84P98bJR6I8XRoXh/d+7ciSzLXL58mfb2dtatW0d5eTnT09PXtch3Op3U19dTWlrKgQMH6OzsxGQy4ff7SSaTJJNJUqkUer2e9evXc/78eS5dukQsFsPlcjE1NUUgEMDhcBCPx0X6zkcpFn6/n56eHsbGxkR6T3NzM4WFheTk5Hzoe5PJJAsLC7S3txMIBJAkCY/Hw7Zt28jJyWHnzp2EQiFRdzgwMMDY2Bjd3d1UV1fj8Xgwm820trai1WopKCgQaYlKjdWGDRuoqqpakp6nonJ7uXbPfDJyIpPJEAwGxX5JpVIYDAZ27NhBJpOhp6eHgYEB1q1bR2lp6Q1TTfPz86mrq6OkpIRf/OIX5OfnA1cbSynP3XQ6vSxDLpvNEo/HeeONN0in0+j1emw2G62trRQXF3+k3hAOhxkZGcHn8+H3+wHYuHEjxcXF2O32j5Q9itzMZDLo9XqR7h+Pxzl37pyQFZWVlXe8xkpl9aIaSR/Kndk0s7OzdHZ2cu7cObLZLLt27aKgoAC9Xo8kSUiShM1mo7q6mi1btohIU3t7Oz6fj9HRUWEkGY1GnE4nJSUl6HQ6gsGgaHqgFDt+lHDIZrMEAgFGRkY4f/68+L3dbqegoGBZa1KEajabJRgM0t3dTXd3N+Xl5dTW1opOe+Xl5axfv55sNsvo6Chzc3Oi3mqxIFuspF17/YsVRLUwU+VjsawoEss96JaQZZlYLMbg4CBjY2Mila6lpYVIJEJvby8dHR00Nzfj8/nw+XzMzs4iyzIDAwM0NzdjtVqpqalh69atTExMEAgEMJlMmEwm9Hq9SNHV6/U0NTWxdu1aJiYm6O/vx2w2YzabAa5rsftRxGIxhoaGuHjxItlsFpfLRX5+Pi6X6yONpEwmQzgcpre3V9QclJeXs3btWnJyctiwYQOyLItry8nJYWZmhpmZGcbHx1m/fj0mk4na2lqKioqw2+1ks1ncbjdtbW2cOHGCyclJSktLVSNJ5TZy7X5ZtnC5KSRJIhaL0d/fz8TEBLFYDJPJREtLC3Nzc3R3d9PT00NDQwNXrlxhcnKS2dlZTCYT/f39tLS04Ha7qaurY/PmzYyPjzMzMyN0BIPBcN2ogA9DlmWSySQXLlwQBpvb7aa0tFToMx+Gkh7Y09PD+Pg4ACUlJeTm5i67IVYmk0Gj0ZCbm0ttbS333nsvyWSSsbExpqamGBwcJB6PCxmionItqpG0Sljcz//tt98W3aW+/e1vs3nzZkwmE+l0GpPJhNFopKKigurqalFHUF9fT3FxMbOzs0KgwNVuT4oXOZFI0N3dzdtvv81rr73G008/TVlZ2UfmFiuNH+rr64UwKS8vx+Px3FResizLRKNRenp6+Pu//3tycnK4//77aWhowOl0otFocDqdPPDAA2zYsEGk41ksFkZHR+nu7iYYDGI2m7HZbBiNRqEMKil8mUxGPByUAm0VlZtmFTkVlTSYpqYmCgsLRSTJ7XbT09NDe3s7Z86cwe/3o9PpCAQCLCwsAFcdLt/73vfYvHkzhYWFfO1rX8Pn8xGNRpEkCbvdzoULF4TiodfrKSws5Ktf/Sqzs7MEg0ESiQRms5nZ2Vk0Gg0ul0sYVR+FzWajoaEBh8OBLMtYLBaKioqWZZTo9XqcTifr1q2joqICSZLIzc0VHvDCwkJxrCzLNDU1cfHiRWKxGNPT00Jeut1u3G63OLampoZAIMDp06eZm5u75aJ4FZWb4/YIF51Oh91up6WlhbKyMmGYuFwuTp48yYULFzh37hzj4+Po9XqmpqaIxWLodDomJib4kz/5E8rLy6muruY3f/M3uXLlinhdlmUhJywWy7L2vlarxWw2c88994i5bVarVaTBfhQWi4Xy8nIMBgMVFRXAVSPpZj7f6XSi1+upqqqitbWVvLw8ACoqKgiFQszPz5NIJFRnqsoHomqQq4h4PM4//dM/8dxzz7F9+3a+/vWvs2HDBpEapwi8kpIS4vH4kkG5kiQRiURIp9NLvCzKoFYlxa6kpASHw0EwGCQcDi8rt1in01FQUEB+fj4tLS1LznszufzRaJQf//jHHD16FIvFwu/8zu9QUFBwXeqfw+HAbrdTVlaGJEnMzs7i8/kIBALU19fj8XiorKzk8uXLHD58mGQyiV6vJ5VKsbCwwJUrV6irqyMnJ+em0gpVVJZwexy+N41GoyEnJ4eNGzcu2a96vZ4dO3ZQU1MjmpwAnDt3js7OTmRZ5td+7deoqqoSA2OLioooKChAkiSi0Sj9/f3EYjFqamqoqakR9X2FhYUUFBSIuSqnTp0Sc5UaGhqW7XxwOp2sX7+etWvXirUoQ2k/CqPRSG5uLvfee++S+qrFgy8XE4/HiUQiIkL+QZ+RTCaJxWIkEgmMRqOaZqNyV6AYBdu2bVsiJwwGA/fffz9NTU34/X6xLw4ePMjo6ChGo5FnnnmGvLw8ESleLCcCgQDd3d1EIhFaWlooKSlZ1v5XaiH37du35HfLHfauRL+rqqrE/r+ZQfEGg4GWlhYsFguxWIxgMAhcdajE43EymYyIpKuofBB3/7dDvvqzIp4CedHPTTI5Ocnp06d5/vnn2b59O9u2bRNtu/V6PRaLRbS33bZtG//8z//MuXPnMJvNNDQ0cODAAQYHB6murmbt2rUEAgH6+/vRaDRYrVbcbjdzc3O8+uqrdHZ2im56yxV2ymTqxcrKcpEkiUQiwf/8n/+Tjo4O7HY7X/nKV0T6n9FoxGQyiXS6jo4O/H4/5eXlpFIpjh49yqVLl0ilUmKYbG1tLRMTE/ziF7/ghz/8IXv37iUWi9HV1cXExASPP/642t5TZXnIIF/9xw1f++Q+5gM+YxkoxsG1cszj8eBwOISnFa62+56dnUWSJFpaWsRMo2g0yrFjxygvLyebzTI+Ps4vf/lLEZVW6g/S6TRHjhwhJyeHnJwc5ufn+ed//mdMJhN1dXU3NYT6Rtd9M7JDaet9LZcvXxYNKAoLC0Wr356eHgwGA5WVleh0Ojo6OgiFQqKBQzQa5eTJk6J+s7y8XHWkqCwPpU/TbdYl5FvQIz5IThQUFOB2u5eMHBgaGiKVSmEymVi7di0Wi0XU8Zw/f57Kykri8Th9fX0cOXKEuro6qqurcblcy97DGo1myf66mb2vGFQ3kh1KCcD09DQ+n4/5+XmCwSCDg4Pk5+fjcDiwWq04nU7uu+8+JiYmeOWVV4SD+PTp0+j1ejZt2qTOS1P5UO5qIymbyJAMxNAYdSvSAlzOZEktxJHiKbGZlysUAoEAnZ2dDAwMCE/N0NAQGo0Gh8PBxo0bKS8vx+FwsHbtWs6fP8/CwgKHDx+mq6uLoaEhKioqWL9+PcXFxWQyGUZHR5mcnBQeU2VGQn5+Po2NjcsWDovX8HG8rpIkiSGxExMT5OXl0dnZSV9fn/Ba19XV0drail6v58qVK3R2dnL+/HnS6TRTU1MYjUY2bNhAbW0ter0el8tFfX09999/v0ixyWazhEIhGhoaqK+vXxWzXFRWL3JGJhNLkZyPoIsbWYlBSVJaIhNOImdMy+6odKP9pygOSl3RtU0a7HY7sizjdDqFkZFIJGhra6Ojo4NMJkMkEiESibBz504qKytFBDqbzdLW1kYymRRDnxOJBBs3bmTNmjU3ta9uVg4u571K7UVHRweRSASbzYYsy1y5cgWXy0V1dbUYvB0MBunt7WVychKHw0E6nWZ6ehqALVu2UFxcrBpJKjdEo9GARoOUlkhHkiTno2QStz7U9SORZLLJNOlIEllaXu0NfLicUOoKr5UTDodD1C3rdDqRkXL+/Hna29tFk6VkMsmePXuWnSq7+Bo+7jP4g96vNH/q7Ozk9OnTogYrGo3y0ksv0draSmNjI3V1dRiNRvbt28exY8eYnJzklVdeEQO2y8vLaWpqUve/yody1xlJGo0GrU6HXm8gG0oRH/aTDSTQ3O5J2TLI2SyJyRC6QAq9zXBT3olUKkUoFMLtdtPR0UFHR4d4TZlw7XQ6cbvdNDc38/jjj/Puu+9y/vx5/H4/TU1NPP7446xbt46cnBzC4TCpVIrLly/T399PKBTC5XKxfv167rnnHnbv3r1iYWYlZA9XQ+CKZ1qhtbWVffv20dzcjF6vJx6PMzw8TH9/v3h969at7Ny5U9QhGI1Gamtr+frXv84f//Efc/ToUYxGI1VVVTzzzDPU1taqwk/lhigeSk1GJuuPE+2bRWs23F4jSeaqkZSVSE1H0Opzll3XcyNu1KxEwW634/V6xSwyRQ5JksTc3BydnZ1kMhk8Hg/bt2/nc5/7HHl5eUv2y+zsLD09PaJ73COPPML9999PRUUFVqv1htewklgsFpEGND8/LxpZ3HPPPcKhpKT7RCIROjo6mJ6exmKxUFtby4YNG9i3bx8FBQWqI0Xlhmg0Ggx6PSQl0oEY0b4ZtBYDtzUH9305kU1nSM9E0Vm8t01OOJ1O8vPzMRqNSz4jnU4zOztLW1sbWq2W0tJStm3bJrIzbrZ5y+0glUrR3d3N66+/TiQSEb9/5ZVXxDDtyspKTCYTjz76KHq9niNHjnDo0CFsNhsPPvgg27Zto7m5WW3/r/KhaOS7rGItnU5zZWyMd955hz//8z8nnUqt6OdrNBpcbjcPPPAAv/2d7+ByuZZtjCznT3Fta99bZaWE3XKveTnrW+49uNOCXGX1IcsyqVSKtw8c4I033uDw4cP/f3t3HhxXdh32/9uv90Y3utELgMa+A1xAEgRBcsghOftoRqPFjl3jsWTZStnyUrHjpJI4iapSZVUqccpOKrFjKVU/SbFiySVLlsrWSBYpjobbkMRw3wGCBEDsALE2utH7e+/3B+Y9AUOOCHIIkOacT9WUhhTQ/bqn3+l77j33XNQ13rhvURR27NjBC88/zy/8wi9QUFDwUD+r77833j+r/HOv7Z/IvfUkvRbx+DE6s/3DP/wD3/v+97l8+TLaGh8nYVEUnn/+eT7+6qs888wz5uTEw3K3OPFP5X552MPWx+E1icfTE5ckGYOgeDzO4OAg2Wx2TTuXKIqC0+kkEolQWlp6Xy0zhRCry4gF8/PzzMzMMDMzQ3aNJ1JsNpvZca2oqEgOMhTiMWPEibm5OSYnJ5mfn1+2n2ctGGf7FBUVyb4ZIR6RJy5JAsyzhIzub2v5Eo1DTR0Oh9kpSgjxeFFV1TxweK1PjzeaEdhsNumsJMRjyujsmMvlyOfzjyROGMdYSEmYEI/GE5kkCSGEEEIIIcSDkvVbIYQQQgghhFhCkiQhhBBCCCGEWEKSJCGEEEIIIYRYQpIkIYQQQgghhFhCkiQhhBBCCCGEWOKJ7D/7uDTsk/bfQjyeJEYIIe5F4oQQH21PZJIEi8Etn8+j6/qan5NksViwWq1ykKwQjyld183z1Nb6/BOJD0I8/oyxg6qqZrxYSxInhHj0nrgkSdd10uk0MzMz9PX1kcvl1jRJUhQFp9NJSUkJVVVV2Gy2FQW4XC5HMpkkFouRTqcBsFqtuN1uQqEQdrsdRVHMoD03N8fCwgLZbBZN0/B6vRQVFeFyucyTuRcWFlhYWCCVSpmPWVFRgcfjWfOgq+s6s7OzLCwskMlkUFUVRVHwer0UFBTg8XjMgzUnJyeJxWLLTji3Wq24XC7C4TButxuLxYKmaWSzWSYmJshkMlgsFpxOJ0VFRRQUFMgJ5eIOxsBnZmaG27dvMzU1texzthasVivhcJhwOEwkEvlQgyBVVUkmk+a9ZbBYLLhcLsrKyrDZbMvuBSNGJhIJZmdnCYVCeL1eM8asNV3XicVixONxFhYWKC8vx+PxYLVaUVWVWCzG5OTkHYPUcDiMz+fD6XSiaRrz8/MkEoll8cXn85kxRgaaYqWMODE1NcXo6CixWIx8Pr+m12C1WiktLSUSiRAIBD7UgbKappFOp0kmk8TjcTKZjHlveL1eMwZpmkYmk2F8fNwcM8BiPPF4PEQiEZxO55rFCU3TyOfzJBIJ5ubmyOfzKIqCy+UiEongcDiWjQXm5uZIJpPkcjkURaGqqgq73W5OXgtxv564JEnTNOLxOD03ejj49k/QLSpYdFiLG0Rf/MfjLGBd0waKi4vNL/t7SSaTDA4O0tfXRzKZBH6WGGzcuJHS0lLcbjeapjE9PU1XVxdzc3OkUil0Xcfn89HY2EhpaSl+vx+AmZkZhoaGGB8fZ2ZmhmQyyac+9SmqqqpW8134QENDQ4yNjTE/P08ul8NqteL3+4lGo1RUVBAKhQDo6enh+vXr5HI5MyFyuVwEg0FcLhcul8t8z4aHh7l06RLZbBZFUXC73VRXV9PS0oLT6ZSTysUdVFVlaGiIS1cu0d3Tha6oWBSANYoRqoW66nrWr9tAIBDA6XQ+8MOpqsrk5CTnzp1jcnISj8eDoijYbDYCgQChUOiOeyCfz3P79m36+/u5evUq27dvp76+3owbay2dTnPr1i1u3LjB0NAQr732GpWVlbjdbvL5PLdu3eLMmTNYLBYcDoc5QNu0aRMVFRU4nU7y+TwjIyOMjo4yNzdnxpfCwkKqqqqora2loKDgkbw+8U+Tqqr09/dz5txphseGQNHWZhwBZpxY17iezZu34PV6P3SSNDs7y/DwMAMDA0xPT1NVVUVdXR319fXmPaVpGrFYjOPHj7OwsGB+hyqKQmlpKQUFBXdMuqwWXdfJZDLMzc3R29vL2NiYmfwUFBSwbt06MwnK5/PEYjGuXbvG7du3icVi6LrO66+/TmFhoSRI4oE9cUmSMQA4dvQo/983v0JhmQuHx/reIGh16TqkYznseQ/bW57mqaeeWvFAfXp6mlOnTvHuu++iqqo5O7KwsMCLL77Iiy++SHl5OZlMhnPnzrF//34SiYQ545XNZtm1axfbt2+nvb0di8VCf38/p0+f5vLly9y8eZPu7m42bNhAZWXlIwkaFy9epLu7m+npaVRVNWeF1q1bx9NPP23Olh08eJA333wTl8tFc3MzFovFHOxUV1cTDAbRdZ3R0VHefPNNjh8/bs4UK4pCXV0dgUCAaDSK2+1e89cpHm+5XI4LFy7w5oF/4Pi5wwQqPVjta3M/aBrEx1Ksr2rj5blXWL9+/YdKkrLZLD09Pfz1X/81o6OjNDc343Q6cTgcVFRU0N7ejsvlwmq1mivq8/PzXLp0iYMHD/L973+ff/2v/zWBQOBDD8Tul3E9Y2NjHDlyhB//+MccO3aM2tpaQqEQbrebTCbDiRMn+PrXv04oFKKkpMR8v/x+P4FAgKKiIjKZDN3d3Vy+fJnx8XFUVSWXy5HL5di2bRuvvPIKLS0tgOzvEPdmfKe+++67fO9H36Fn6BqFUdeaxQlVXYwT25p2Y7Eo1NTUfKg4kc/nGRoa4uTJk5w+fZpLly6xY8cOXnjhBSorK7Hb7ebPjYyM8H/+z/9B13UqKirwer3YbDbWrVtHQ0MDBQUF5s+vJl3XmZ6e5urVq/z4xz8mFothsVjI5/NYLBa2bt3KZz/7WQKBANlslvHxcX76059y48YNenp6zLGTsVImxIN44pIkTdPI5XKks2nKWv1s/EQlhVEPNsfq3yRqXud2T4zZrizpqTS5XG7Fdcw+n4/169ezfv16s0RmZmaGt956i2984xvm4CCZTPK3f/u3lJeX88ILL9Da2ordbmf//v0cOHCA8fFxKisrzXK/wsJCNm3axNWrV/nv//2/P9JgsXPnTp566ikcDgcej4fp6Wm+853vMDIywrFjx9iwYQNerxeAkpISXnzxRX79138dRVFQFAWr1WomnYODgxw/fpy//uu/5otf/CJbtmwhmUzS1dXFt7/9bRoaGtizZw/V1dWP7PWKx5OqqqTTaZwBhZodYTb/Yg2OAtuqTxLrOmg5je6fjuKcUMhkMg+l1M9qtRIIBGhvb+fXfu3XKCoqMvczuN3uZfd8Npvl7NmzdHV1EY/HaWhoeGQxQdd15ufn+c53vgNAc3MznZ2dd/3ZtrY2XnzxRXbu3EkgEADA5XKZpTROp5MdO3bQ3t5uvu6pqSn+9E//lLNnz2KxWMwkSYiVUFWVVCqFL+qgsb6ElpfKcRas/pBJ1yCXVrn+0xEsE4tlZB+21M9qtVJRUcGePXvYsmULf/u3f4vP57vrzxorNS+99BIvv/wyFRUVWCwWbDbbmpba5XI5rl27RmdnJ/F4nN/+7d8mGo0Sj8e5dOkSX/rSl1i/fj1btmzB7/dTUlLCJz7xCSYnJ3n77bc5duzYmlyneLI9cUnSUjanFZffgafIid25+jOkak7F5bNjd6n3/buBQICNGzcCmHW2RqndN7/5TdLpNKlUikQiwfT0NHv37qWuro7S0lIsFgs7d+7kH//xH7l16xY3b96kpKSEaDRKJBIhFAoxOTn5yEvPjDI/oz44nU6bCdD7l/BnZ2c5duwY8/PzRCIRampqaGpqora2FoBbt24xMDBAJBLh6aefNmeTYXHf1a1bt2htbaWiouKRv27xeFJsFuxuK+4iBy6vfdVXGHRdR81qOD02FOvDG2gYK84nT54km81SWlpKNBqlo6ODsrIy87nz+TzDw8O8++67FBQUsHXrVtLp9CNZWTGu+e233yaZTLJlyxai0Sjf+ta37vrzvb29ZLNZrl69Sjgcpr29nY0bN+JwOACw2+0UFxebP68oirmnwtgnKsSDsNoVHB4bnoADl8+x6s+nazrZVB6Hx45FeTj3ps1mo7i4GL/fz8LCAoFA4AOTHWP/0rFjx5iYmKC8vJxoNMru3buJRCJr9n2az+eZn58nnU5TUVFBbW0thYWFhMNhFEUhm81y/vx5ysrKCIVCBINBCgoKGBwc5MKFC/K9Lx6KJzpJsigWFKsFq82C1f6gg5KlTR9+fsDSdR3FqqA8QGCz2+3LlrCNFbH+/n78fj8+nw+32006ncbhcDA9PU0ikUBVVWw2G8PDw2azg8nJSQCcTidOp9P8HXi0pSZOp5OZmRmGh4e5evUqs7OzTE1NEQwGqa+vN2eFi4uLqa2tJRAIoGkaw8PDzM7OmnsuSkpKmJ2dZX5+nnA4TDAYNFeYjH0YxgbOfD4vwVLclYXFGGG1KVhtyocYkBgx4h7xQdPRtcUY8bBuQ2PWt7Kykng8DsDIyAjj4+PMz8/z6U9/Gp/Ph8ViIZVK0dnZiaqqRKNRCgsLOXv27MO5kPuUTCYZGRmhq6uL8vJyqquruX379h3xyWhyUVlZid/vR9M0xsfH+dGPfoSmaTQ3N1NSUmKuJo2MjDA8PEx/fz9TU1Pk83mqqqpobm5+JK9TPAEs740lbAqKbXkDAJ372cm4sjihqTpWYxzxkOKEsZ/P+C602+13rXKxWCy43W5qampwu91YrVYmJycZHx8nn8/z3HPPEQwGzfHEalIUxZwwnp2dJZ1O4/P5SKVSTExMsLCwwPDwMPF43PxZh8OxbIVZiA/riU6SVub+wtxa0HWdRCLB4OAgR48epaWlxWzIoGka5eXl3Lhxg2AwaC6Bv/POO8zOzmKz2Zifn3/o17O0Mw6A2+3G4/Hcdxe5qakpLly4wN/93d+RSCQoLi6murqauro6s7tdU1MTVqvVDMZdXV0MDAxw8+ZNqqqqCIVCZpe8QCBgBkMjUPp8PmZmZshkMuTzeZlFFg/mMQkNxv6IWCxmdm4DzJWTSCTCtm3bsFqt2Gw2+vv7uXHjBpcvX2bHjh3mgGFiYoKjR4/S3t5OQ0PDA08eGBuqjSTEKMUx7td7Pa6qqkxPT9PT08Pk5CT79u2jsrKSRCJxx/PYbDZqa2t55plnzFgzPDzMd7/7XbOTZ3FxsRkDRkZG6Ozs5NChQ0xPTxONRqmsrJQkSayKxyA8mIyJ1dnZWbPrraIoZke6lSQNiqLg9/vZtWuXWbY7Pj7OpUuX+MEPfkBTUxNut3tFSYixej02NoamaWacCAQCd5QB343dbicYDFJYWMj169e5dOkSlZWVzMzMcOrUKfO1Lu3CJ8TD9pFJkj54vHOvwLE2YXBpm3Jd13nnnXf44Q9/yM2bN/nqV79KbW2t2QL7137t1/hf/+t/8ed//udMTk5isVh4+umnKSwsJBQKPfTzHIx9DG+99Rb/7//9PwCeeeYZPvaxj/HKK698YG3z3ZSVlfHMM89QV1dHT08Phw4dore3l+LiYhobG3E6nezZs4fdu3ebg62tW7dy5MgRjhw5wqlTp2hvb0dVVTRNw2q1LmvvaZTvGedaPC6HAYp/glZ8669ujFBVlZs3b/IXf/EXXLx4kYmJCRRF4Stf+Qpbt26lvr6e6upq8x6IxWJcvHiR//Sf/hNnzpzB6/USj8f5yU9+Qjwe58UXXyQcDtPf33/Hc+m6fs/BTy6X4+rVq/zBH/wB4+PjOBwOysvL+eM//mM2btx4zy55sViM7u5u3n33XTZs2MD69evv6EBl3LcOh4OtW7eyadMmc1CVTqeJxWIcPXoUt9vN7t27zd9rbGykqKiItrY2rl27xre//W1Onz5NTU0N5eXlK3vDhVgVqxsnstksN27c4E/+5E+4du0asVgMr9fLX/7lX7J161Y8Hs89H8Nms1FaWsrnPvc5sy343Nwcra2t/Pt//+/NydmVNHnJ5XJMTEzw2muvsbCwgMvloqKign/1r/4Ve/fuvef1WK1W1q1bByyW1//+7/8+2WyWUChEZWUlW7ZsMSdWhVgtH5lP2B3h6R6zxI9iEtkY2H/ta1/j8OHDZDIZvvKVr9DY2GiW4tntdjZv3sz//J//k3g8TiqVMmdovvSlLwGL54c8THa7ndbWVsrLy3nppZcAKCoqIhgM3nf3OI/Hg9vtprS0lLa2NoqKirh8+TLnzp1j586dVFdX37GUX1RURDQapbS0lMnJSZLJpNkKfHx83OzwZ5zxMD8/j8vlwu12r0lZgHhyrOi+X+PgYLVaqa6u5g//8A9JJBJks1ksFgvNzc3mYGXpgCUUClFRUUE4HGZkZIRkMml2kDt69CidnZ0oimLOxP7oRz/i7bff5hOf+AS/+qu/arbd/yA2m42mpib+/M//3LwWp9O54jbbsViMgYEBTp48yde//nX+5E/+BEVRSKVSzMzM8Pu///s8//zzfPrTn+bTn/40Nptt2WDIarXS0dHB/v37zdUsIz4WFhbi9Xqpqqpi69at2O12Tp48yTe+8Q1efvnlD/FfQYjHm91up7a2lv/4H/8jCwsLZqm50fFypYxziAxFRUU0NDTg8/mYmJhgfn5+RZOPNpuNSCTCV7/6VfPcMqfTSWVl5YqvJxAIsG3bNtatW8e/+Tf/hkwmg8PhIJ1O89u//duUlJRIa3+xqj4ySdId7jHIudcYaOk46WGMmYyNzG+99RbHjh2jvLycjo4O6uvr75gtsVgs+Hw+CgoKzFbaZ8+eJZ1OU1paSn19vfmYxuGzS//dOEHcmJm918yxxWKhoKAAh8NBMBgEFgOg3W5fUcmOEVCNRg1LB3VWq9VMbjRNW3atxs+kUinm5+eZn5/H5/OZm1ADgQCnTp1ifn4ev99PKpVidnaWiYkJqqqqzNalQqzUiu7jFU6uPMxcyu12U1lZad67xt8pikI+nzf3JsLi/RKLxVhYWDDb9dbV1fHGG2+we/duLBaLeb7SkSNHKC8v57nnnjM7Zd6LsW+hsbHRvBZjYLWSeFBUVMS2bdtwu92Mj4+bK8F9fX184xvf4KWXXuLZZ5+lsbHRPEzSaO8PizPmQ0NDy7r3GaVGxs8Z12G8P0sP2hXiSWTcg9XV1cuqKIyz04yJxKXfscafjZ9dOkYw4kkymWR6eppMJmNOPK6kdM9isWC325eVuhrXuJISfeM5rFYrXq/XPDdtYWGBiYkJbt++TUtLC+Fw2JwofX8FiREbjQoT2ack7teTPYLUFzdKa5qOpj7cEjRY3tJBB3R18fkepMIrkUjQ29vLP/7jPzI/P09HRwfr168nlUqRyWTweDxmXfH09LSZcMDiuScnT540N1xGo1EAcx/R7du3mZmZQVVV8wRxu91OOBxe8aDImM190HOHVFWlt7fX3Fhps9lIJBKMjIyQTqcpLCzE6XSysLDA3NwcmUwGr9eLxWJhaGiIoaEhEokETU1NOBwOysrKKCsrI5vNcuHCBWpqakilUvT19RGLxSgvL8fn88n5COID6bzXlltdjBGWh1Sa+f64YP67/rPmDffDaOe9dMbUGARks1lmZ2cZHx83JzAmJye5fv06qqpSXl6Ox+PB7/fz3HPPmfX72WyWvr4+BgcH2bBhAzt27KCxsXFFSY4RDwoLC+/rdRi8Xi9NTU2Ul5eTzWbNvz916hTf/va32bVrFzt37qSkpMTcpK3ruhmrYrEYV69epbi4mGg0anbKHB0dNTtlWq1W4vE4w8PDaJpmxkQh7pu+eL8Z967O6pZwG2MWXde5n6cy7kvjGI2ljInIRCLB5OQk8XgcXdeZnZ1lbGwMVVXx+XzkcjkSiQRjY2MUFRUBmIfXW61WIpGI+b28kusxDox/UNlsloWFBebn57Hb7eRyOUZGRrhw4QJFRUU0NTVRVFS0bN+mcZhsNptlYmICu92O3++noKBg2QqZECvxRCdJak4nl1LJJvNo+dXfm6LlNHJpFTV3/wnZ6Ogo+/fv5+DBg7z++ut4vV76+vro7+/H7XZTV1dHcXExTqeT7u5uBgcHsVgs6LrOyMgIBw4c4OMf/7jZDhsWmyT09/dz8+ZNrl69ah6imclkKCwsZM+ePeYBrqtJ13VyuRxvvvkmHo+HUCiE0+lkdHSUs2fPEggE6OjoIBQKMTAwwOnTp5mcnKSkpASbzUZ3dzejo6MAbN68GZfLRU1NDbOzs9TX1/N//+//ZdeuXWSzWYaHh1EUhdbWVvN9EOJudA3UnEYumceirH7nR13X0XI6+ayK7T4TpQ+ysLBAV1cXBw8eNOv3+/r66OvrIxwOs3nzZgKBAC6Xa9keAGMCxefzEQwGzX0GazGpYAzk3j+YGxgYMCdvjKY0k5OTHDt2jFQqRUFBAbquMzw8zJkzZ/jUpz5FW1sbmqYxOzvLkSNH0HXdPEzXaHcejUZ57rnnVv11iSeTpi227s8m8w+tJffPo2s6ubRKPqvdV5L082iaxvT0NLdu3eLKlSsMDg6aq0bGUSMbN24kkUhw8+ZN9u/fb64AjY6Ocu3aNcrLy6mvrycYDK7Z5GMsFuPWrVt0dXXhcrnM/YzXrl3jU5/6FBs2bMDv95sJ0tmzZ+np6aG/v59EIkFnZyeRSISWlhZqa2spLS1dk+sWT44nLkkyZi/sNjvT/Qm6fjKC2+9Asa1BcFN15ifSaHM2QiG72VRgJYy6/p07dzI4OMjQ0JD5/4XDYV599VVcLheRSASXy8XAwACjo6NkMhn8fj9/9Ed/REdHB5FIxPy9iYkJLly4wNWrV5mfn2ffvn309/czNjZGSUkJW7ZsobCwcNWTJOO/SXFxMb29vVy7do1MJkMwGOSZZ55h/fr1tLS04PF4CAQCZqvwrq4ucxZ4z549bN68mQ0bNpjXu2nTJv7zf/7PfOtb3+Ly5cvY7XbKysp44403Vlw6JD56FEXBbreTjatM3Ixx8R8GFs9RW+0Q8d6q1UR3jGpvxFzx+DDsdrvZ/v7w4cNYLBZCoRA7d+7k2Wefpaqq6q4lp4qiUFhYSHt7OzU1NY/FqfShUIjXXnuNsrIyHA6H2bnT4/Fw8eJFEokEFosFv9/PF7/4RXbs2EEoFAIw9yB2dXVx+/ZtcrkcwWCQX/mVX2HTpk1ykKy4b0a31OR0jpHeWfIZFZtrDY6T0EHNaoxfj9FSVH3HGYIPQtM0pqam6Orq4siRI+aeoJmZGY4ePYrNZjMbJxmTEUeOHDHvt46ODj72sY9RUVGxpvt8bTab2Znz+vXrZpOY3/md3+HVV181X0c+n2dubo5Tp04xMDAALB5Afe7cOex2O7quEwgEJEkS982iP2Htv/L5POPj45w6dYrv/cN30a0aFoW12Witg66Cx1VAW2s7v/rGr5p7aO5l6T6C97PZbPj9fjweDzabjWQyyfz8PNls1ty7U1hYSEFBgRlUYHGWOZFIkEqlzLbB8LNa4eLi4hXXF38YRq2wUdecz+fRNA2bzYbH4zEHOMZmcqMhRS6XQ9d1s0TPaPpgvAZjH4LxuMbg13ivpP5YvJ+xqtnZ2UnnqZNcuHQO7PqazBAvXgBoOWhpWMe2rR08++yz92yUcNeHeS9sa5pGKpVibm6OXC4HYCYXRgkr3LlKZtw7sVgMh8OBx+N55GeLpFIpxsfHKS4uxuVymXun4vG4eSYcLL4+n8+3rMOWqqrEYjFSqRT5fN6MiwUFBbjd7mVlwhIXxL0YceLw4cMcOXaY/sFesK19nNiycSu7n9pNe3v7A5e6w88OiDVK15ayWCwUFhYSCASAxRK3mZkZM55YrVacTieBQMCMEWt1D+VyOTKZjDkmML7jjRJi41qMcsKZmRmz/fnS12fEiw/zHoqPpicuSTIaIBh1q8ZAe60oioLb7aaquorWja04HI5HPkMrhFhkbOwdGRlhYGCA4eFhczCwVqxWK2VlZVRUVlBbU3tfK85CiNVnxImBgQF6e3uZnJwkn8+v2fMbDUiqq6uprq4mGo1KEyIhHoEnLkkCzI4tqVRqzc/KMUrLnE7nI5+ZFULcXT6fJ5fLkcvllq2yrgWjjMdut8vAR4jHlLFnZ2n1w1oxVkiMccRql8QLIe7uiUyShBBCCCGEEOJBSR2YEEIIIYQQQiwhSZIQQgghhBBCLCFJkhBCCCGEEEIsIUmSEEIIIYQQQiwhSZIQQgghhBBCLCFJkhBCCCGEEEIs8UQe0mEcBPf+k5fXinEqtBwSKcTjSdM08vk8+Xx+Tc9Rg8UzUGw2G1arVc4/EeIxZYwj8vk8qqo+kjhhjCPkQHohHo0nLknSdZ1cLkcikWB8fHzNB0FGYCsqKiISiaAoyooSJVVVyeVyLCwsmNdsPFZBQQF2ux1FUdA0jWQySTKZXHZQrsViweVymf8ApNNpstks2WyWfD6P1WrF7/c/kkNujetUVZVsNksymSSfz5sHa7pcLpxOJwCJRIJUKrXskE8j8fR6vdjtdnRdNx8nl8uh6zqKomCz2XC73TidTvNAPiEMxudwYWGBWGyOWGyefD6/ptegKAqFhYX4/X4KCwsf6HOq6zoLCwuk02kA3G43Ho8Hi8VCJpMhk8mQTCaXxT5FUfB4PBQUFNwx6NI0jUwmQyKRQFVVQqEQNpvtkcUJTdNIpVKkUilsNhs+n89MKHVdJx6Pk8vlzEkwt9t919eVTqfJZDJks1lUVcVut+NyuczDfA25XI5MJmPGHSOWuFwu3G43gMSSjxDjc5hIJJiZmWFhYeGRHDpdVFSE3++/62f7bozrNmJAJpNB0zTzc+90Os37KJfLkc1myWQy5HI5c/LG4XDg8XhWPHZ5WIzrSafT5njF+C5feuh2LpcjnU6TTqdRVRWLxYLVasXpdJrXbRwEnEwml02W22w2PB4PDofD/Ll0Ok08Hr/rhLoxNikoKFiz90E8Xp64JElVVaanpzh37hzf/e53yWaza5skKQr+wkK2bevgl3/5lykoKFjRbHEikWBwcJCjR48yNjZGLpfDbrcTjUZ54YUXqKiowOPxkMvl6Ozs5J133mF+/mcDPJvNRltbG1u2bKG1tRWAvr4+uru76evr4/bt2wQCAX7lV36FmpqaR/KFr+s6U1NT3Lx5k87OTsbHx/F4PNTX19PW1saGDRtQFIV33nmHU6dOMTU1ZV6nz+ejsrKSl19+mcrKStLpNIODgxw7dozBwUGy2Swej4fi4mJ27NhBa2srDodjzV+jePzl83muXLnCu+92cu7cOXK53Jo+v9VqZcOGjWzbto09e/aYkwMrpes6uq7z7rvvcvr0aex2O9u3b+fpp58GYHBwkMuXL9PZ2WkmURaLBbfbzb59+9i7d+8dX/rZbJYrV65w4MABYrEYf/iHf0hJScmywcla0XWdubk5zp8/T2dnJ2VlZbz22msUFRVhtVrJ5XIcPHiQoaEh5ubm0DSN7du38+KLL5oTREbMv3nzJteuXeP69evMz89TVlbGli1baGpqoqyszHzO27dv093dTWdnJ3NzcxQUFBCNRtm8eTPt7e2P5H0Qj1Yul+P8+XMcOXKEmzdvrvlkitVqZdu2Dp566im2bNmy4jiRy+Xo7u7m+vXr5ue+srKSTZs2sXHjRsLhMLquMzQ0ZN4f4+Pj2Gw2SktLaWhoYPfu3fh8vlV+hcvdvn2bGzducPnyZUZHRwkGg3R0dJjXbBgfH+fChQtcvHiR+fl5bDYbwWCQdevWsWvXLnw+H6qqMjMzwzvvvMP169dZWFhAURQikQh79+6loaEBr9dLNpvlwoUL/PjHPzYniAwWi4Wamhra2tp45pln1vS9EI+PJy7yq6rK1NQ0586d5dL1H9C82YHPr2C1r35SoKk6U+MqvaMeEocXeOmll5bN3Pw8uVyOWCyGrutUVFRgsVhYWFjg4sWLnD9/nt/4jd9g8+bNKIrCpUuXuHz5MtXV1bS0tOB2u7FardTU1OD3+83HHBoaYmBggMHBQQYGBrh9+zbPPvssNTU1q/gufLBbt27x3e9+lytXrlBVVUVdXR2apuF2u5cFp7Nnz3LkyBFqamrYvXs3iqLgdrsJBoPmjFo2myWRSJBIJCgvL8dmszE3N0d/fz9nzpzhC1/4Ai0tLRQVFT2S1yoeX9lslq6uLs5fPsKN4SOsb3PicCmw2iFCB1XV6e3Kkjo/gd1uZ+vWrfedJGWzWUZGRvjyl7/M8PAwZWVleL1eM0kaHx/n0qVLXLlyhZdeeslciXY4HJSWlt41Hh06dIhjx45x4sQJrl69yuc+9znC4fAjSQ5yuRxvvfUWhw8f5uLFi9TV1bFnzx58Ph+KopDJZDhz5gy6rjM6OsrY2BiKorBv3z4zSdI0jbNnz/K1r32NfD5PXV0dDQ0N9PX18eabb1JbW8s/+2f/jGg0Sm9vL/v37+ftt99mw4YN1NfXMzs7S3d3NydOnOB3f/d3aW1tldnkjxCjIuXixUtcuPoWM/FrNGxwLMaJVX9uyGd1ersy5N6NEwqFaGpqWnGcOHToEAcPHiQej1NXV0djYyPXr1/n5s2bnD59mt/7vd/DbrfT3d1NV1cXc3NzVFdXk8/n6enp4fjx41y+fJl/+S//5X3Hpg9jYmKC/v5++vr6yGQynDp1CrfbTXV19bIkKZlMMjU1hdfrpbi42Pzzt771LeLxOPv27UNRFHp7ezl69Cjl5eWUlZWRz+cZHh7my1/+Mr/0S7/Epk2biEQihEIhNm7cSDqdNqtzxsbGOHToEPF4nKqqqjV7D8Tj54lLkoxl1mw2QyCisb7dSiRqw+5Y/eCWz+nc6skwcF0nPZ2+r+X5goICamtrl5XDpVIpqqqq+PKXv8zAwABVVVWEQiFUVcXr9dLU1MTOnTvx+XxYLBa8Xu+yL/KmpibC4TD19fW88847jI+Pr3ld9VI/+MEPGBsbo7a2lldeeYWioiI0TcPpdJplR7A402+326mrq2Pv3r0oimIupxs/53a7qaqq4rnnnsPlcmG1WpmdnaW3t5c333yTW7duUV5eLkmSuIOxX9HmyBEp09my24G7YPVLM3VdJ5/VyWSypCZzD7RnUlVVJiYm+OY3v8mGDRvI5XI4HI5lscZ4zEAgwI4dOygqKjLLSwoLC5eVmem6zvXr17lw4QLpdJrt27dz7ty5RxInjD0gXV1dXLx4kVwux6ZNm5idnTVfk8Viwel08sorrwBw7tw5jh8/fscsv6ZpHDt2jGw2S0tLC8899xx+v5+KigpOnTrF6Ogo165do7S0lP7+fkZGRrBYLLz88suEw2Gmp6fp6uri0KFD9PX10dTUJEnSR4wRJ1yePGUh2LLLgcdrYbVnU3RNJ5PWSaezZCazy8pKf+7vvTf2OX36NJqm0dTUxN69e/H7/VRXV3P8+HF6enq4cuUKbW1tNDc3E4lEUFWVoqIiVFWltLSU8+fPc/ToUT7/+c9TVFS0ZhMlZWVl2O12qqqqGB8f58aNG3fdCxaJRNi+fbs5dkin0wwNDZFIJLh+/TqbN2+mpKSEqqoqPvnJTxIKhXC5XGSzWW7dusX3vvc9BgYGKC0tpbS0lGg0itvtNrc55HI5Tp48ic/nIxKJUF1dvSavXzyensgkafEfjQKfQkmZndIqO07n6q8k5bI6C3GV6TGFmSndvJ6VcDgchMNhs6QEflYPHY/H7xhQpVIpxsbGuHnzJoFAgGg0SjAYXDbzU1paSjgcprCwkBs3bqx5jbHBCDwnTpygsLCQsrIyZmZmmJubIxgMmnX/S68tm80yOTnJ9evX8fl8hEIhs5YYFt+vYDCIz+czg7jH42Fubs6ss34UTTvEPw26rmOzgy+gEK2y4/UprPatoeuQzegEQmlycz+LVSv/fZ3JyUl6enq4ceMGv/zLv8z4+DipVOqOn9U0jYWFBW7evEkoFMLv9xMOh/F6vWYtvlGPf/r0aTKZDCUlJYRCoUcWI4x9SIcPH0bXdcrLy/F4PMzOzpo/Z+zT3LRpE5qmMTU1xYULF+54PE3T6O/vx+FwUF5eTktLixkbL1++zOTkJLdu3TL3Nhr7IEpLSykvL8fpdDI6Oko+nzf3PIqPFuMz6XBZcAetlFXZ8Rau/mSrpkEqqeEPWZmaub8Yoaoqw8PDBAIBKioqaGxsxOPx4HK5uHz5MlevXuXmzZu0tbURjUYpLi42Jx40TSOdTjMyMsLw8PCaf+6Liorwer1m0vJByZnP58Plcpl7kZLJJJlMBpvNRiqVIp/P43A4iEQiFBQUmJU26XTabIJh7HteOrm8NAYtLCyYY5Xy8vI1ew/E4+eJS5JMFlCsYLVbsNsta7KSBBo2m4UHaURj3LzGZsNcLsfk5CRXr17F5XKZCQEs7j8aHR3l+vXr/PCHPyQQCLBnzx4+9alP0djYaM54GhuOE4nEstnj+3WvQHmvQZWmacRiMS5fvkxrayu3b9/m0KFDzM/Ps3XrVnbt2sXTTz9tbjy32WzE43F++tOfcuzYMSoqKmhra2PPnj3s2bPHnBU3mjkYSeTk5CSjo6Nks1nC4bBZeiPE3VgsYLUuxgeb3YKirG5yoGmLAx6rlftKyIz7L5/Pc/XqVU6cOEFJSQlPPfUUJ06cYGRkZNnPK4pCPp+nv7+fP/uzP8Pn81FVVcXWrVv5/Oc/TyAQMEtWJyYm+OlPf8q+ffuorKwkHo/f9+tayUBqJYlXJpNhZGSEv/u7v+N3f/d3CQaD3Lp1647HsVgsFBUVkclkcDqdH7ihPZ1O43Q6cTqdZpcwp9OJqqrMz88zMjKCpmlEIhE8Hg/xeJy+vj4KCgqYmJhgYmKCbDZrznCLjyaLAlYr2N6LE6s9iaCpOnm7Bet9jiOWdvW12Wx3fO6NEv6+vj50XTfHCcb9azQ/Mv73fho8fdgxAmDeq0azhA/6HZvNhs1mI5vNsrCwwPT0NGNjY9y+fZvNmzdTUFBgvna3270sfhrjLON/lzbNMd6/dDrNuXPnzBW44uLiFb0H4sn05CZJ76Pz4RbJP+zvr4SmaQwODvJ7v/d7XLt2jVQqRSgU4s/+7M/Yvn07RUVF5PN52tvbaWhowOfzYbfb6e/v57/9t/9GT08PH//4x3njjTceeiBXVdVshQqLSZ3RxnglvzsyMkIqleLYsWO0trbyz//5P8fr9fK9732PAwcOMDo6yhe+8AUKCgp4+umnqa6upqKiArfbzbFjx7h48SJnz55F0zT27t1rzgqnUineeOMNrly5Qjabpbi4mN/6rd8y9zAI8aAe1j3/sB7n0qVLdHZ2MjY2xh/90R9RWFh41/uvoqKCF154gR07dlBeXs7o6CgnTpzgG9/4BgMDA3zxi18kFAoxPj7O3/zN31BfX8/WrVvx+Xxcvnz5ga7NWC1eWh7jcDhWfAyCpmn09PTwX/7Lf+HTn/40u3btYmFh4Y4kaaUURWH79u38zd/8DRaLhcrKSpqbmzl58iTvvPMOw8PDRKNR0uk0bW1tZlfMz3zmM6iqisPhoLm5mddff529e/dKExjx2LNYLDgcDlpaWjhz5gyJRILKykpqa2vZv38/R44c4fr166xbt+6uSU08HufQoUPs37+fV199dVlHyXvRdd1cdTWuxWq1rtp9k0wmOXDgAF/84heZmpoiGAyyb98+fuEXfoFoNGquQi1NkGZmZjhz5gwTExPU1NRQUVFxx+PmcjmmphYbf/3mb/4mjY2N0n79I+4jkyRZPuRI5ef+6kMaBSmKQklJCV/60peYnZ1lYmKCM2fO8Fd/9VdYrVY6OjoIhUK0t7ejqqoZwNatW4eqqnz/+9/n0qVLvPjii0QikQ9/Qe9JpVKcO3eOzs5O3n77bQCeffZZdu3aRXt7+4pXbCwWC1u3buXjH/84H/vYx7BarWQyGS5dusTAwABjY2PU1dXR0dHBli1bsNlsZkeaSCTC8ePHuXjxIjt37sThcJhfCn/8x3/M9PQ0Q0ND9PX1cfjwYZqbm9m0aZNZTiDE/brXp+aut/1d/vLDfvp0XWd+fp4f/vCHZDIZNm3aRC6XY2xsjHg8TjKZJBaLMTMzg9/vp6ysjEgkYrb+bWpqMicdvva1rzEyMsLMzAw3btygt7eXl19+GYvFwtzcHDMzM2iaxuTkJCUlJRQWFpor0h9EVVVisRjf+c53OHnypLmp+rOf/Sz79u3D6/Xec7B14cIFjh49ytTUFHv27CGfz3P79m1isRjJZJKZmRmzLflKVnVsNhuf/OQnicfjnD9/nv/wH/4DdrvdTCyNwY+iKPT09HDkyBHOnj3Lv/t3/47GxkbGx8e5desWR44coampiY6OjmX7JoX4MFZr0lVRFH7xF38Ru93O+fPn+bf/9t9itVoJhUJYrVY2bdq0rLW/sXoyOzvL//7f/5ve3l6am5v5zd/8zftKcEZHR+ns7OTrX/86gLly/ZnPfAaPx/PQX6fT6WTXrl18+ctfZnx8nIGBAc6fP8+BAwd45ZVXlq3+5vN5ent7OXPmDH//93/PJz7xCdra2u5YITLKdw8fPozX62XDhg1UVlbKPf8R95FJklZ1GeghPrbL5WLdunVks1lisRjBYJC//Mu/pL+/n4qKCsLh8LINxLqum7+jKArz8/MkEomHmiRZrVbC4TBNTU1ks1kAmpubCYVCK5plsVgs5qpXOBymrKyMQCAAQDgcxuPxMDExQTqdXlYGYAiHw0QiEQoLC5mdnTXbulssFhRFobm5mWw2ay6N37p1i56eHqqrq4lEIhLkxKq466dqFT5quq6TTCZJJBJMTU0xNzdn7tO5fPkysVgMgJaWFp5//nl8Pt+yvYkul4toNEpjYyPz8/Pm0QGxWIzp6Wneffddenp6yGQyDA0Nkcvl+MlPfsLCwgLr16+noaHh516fMVlRV1dHPp8nHo+bz7nSkp25uTkmJyfJZDIcPHgQRVGYmpqiv7/fbPUPi3EnGAyu6H0Lh8M8++yzVFdXMzw8TDqdxuv1cuPGDRKJBF6vF5vNxrVr1xgbGyMUCvHCCy8QjUaZmpoiEAgwNjbGtWvXaG1tpbCwcEXPK8S9rMY3ksViQdd1ysrKzPLZoaEhMpkMwWCQnp4ec78SYO7Hm5mZ4cCBAwwODlJfX8/27dspKyu7r/Pb3G43FRUVZofNSCRCbW3tqh2WrSgKwWCQtrY24vE4FRUVxONxbty4QXt7O0VFRdjtdrPs+MSJE3R3d1NXV8fu3bsJh8N3TLYsHh8zzZkzZ2hqaqKkpGRVEjzxT8sTnSTp+mJ9r6rqqPnV34Coqvp7+w4e5HdVs5TNOBjS4/FgtVqx2+1MTU0xPT2NpmnmwW9GgmJ0dnr/n412lqqqLvt3oyTG+Pl7BUK73U5NTQ3RaJQdO3YAmAfTraTzjaIoBAIB/H4/DofDDM4Wi2VZ+Z7xZ2NTufF3RtcZo17aeA+M6zfeK7vdTjabxe/3Mz4+TjKZvK//BuKjR9MX44OmLnaVWk26DqoKusbiVPJ90DSN0tJS5ufnuX37Nrdv30bXdfNzbrPZ6Ovr4+mnnzbr8I0YsTS2GH9WFIWCggKCwSBDQ0MMDg6SyWSYnp5GVVWuX79ORUXFijYtG4fUPvXUU2zduhVN08wN0T9vb8FSDofD7D538eJFNE0jHo8zPT3NzMwMvb29tLS0UFNTY8YAI64Z/xiv0xjcWSwWNm7cyIYNG9A0zWyEk0qlGBgYoLi4GJvNxq1bt0gkEuYZMW63G5fLZU5SGfscxUeUDtp7YwlNfe8vVpGmLcakB22a4HA4aG1tZcOGDaiqysLCAvF4nEQiwfDwMBUVFeaexKmpKbq7u3nrrbcIh8Ps2LGDvXv3LitXW8n96/f7aW1tpba2FsA8lPZ+VqOM7/2lY5Sl9/XS8Y3RAdfn85lNF6qrqzl27Bjz8/NkMhmzxO7UqVOcP3+ebDbLa6+9xrp167BareZrM97nZDLJ+Pg4XV1dfPaznyUYDMr5aOIJTpL097rNJTTiMY2sc/WTpFxOJ7mgk83e/3Mlk0kmJyfNNr2w2HChu7vbnJl1uVyk02nGxsbMTYlWq5W5uTkOHDhALpcz+/7DYoe4bDZLPB43T6dOJBLEYjHzFPuVrAQpimI+/9JzmFbKODl88+bNxONxLly4wNatW7Hb7fT29jI7O4vP58Pv95NIJIjH46iqam4w7+vr48aNG4yNjdHR0YHL5SKRSJinoAcCASwWC9PT0wwPDzM9PU11dbVsthY/l6pCNquTiKno2v01U3gQug65nEY6vTiZslJGGe5v/MZvkM1mzYQnn8/zP/7H/2B0dJT29nZ+/dd/nYKCAmZnZ0kkEjgcDnw+nxlHDh8+THFxMZFIhJqaGpqbm3nqqafMLpCTk5NcuHCBK1eu8Ad/8Ads2LBhRasnxv6DD7PS0t7ezrp16/jMZz5jvrbe3l5OnTrFyZMn+fznP09DQ4MZf4yYtrCwQC6XI51OE4vFzE3qDoeDVCrFzMwMVqsVt9tNNpvl3LlzDA0NYbPZaG1tNZO5fD7P2NgYw8PDlJSUMD09zfj4ODMzMzQ3N6/ajLh4/OXzOpmUTnxeRdPXYm8ypFMamfR7Eyr3yUgSjK516XSakydP0tvbi81mY9u2bSiKwsjICJ2dnbz55ps4HA4+97nPUVlZCfys6clK9+MYzRQ+TJt8VVXJZDLMz88Tj8fJ5/OkUikzwTMee2ZmhomJCUpKSnA6naRSKYaHhxkYGDAnbwFisRj79+/nr/7qr2hra+OFF17gmWeeMRtbGPuqDQMDA1y7do3Z2VmeffbZBxrriCfPE5ckKYqyeMPandy8kiWbjuHxWbBa1+IwWZibVskueKmvdJp7alZiamqKo0ePcvXqVbPrSj6fZ25ujm3btrFr1y5qa2uZn5/nwIEDZvtai8VCJpNhbGyMZ555hp07d5pnA128eJFz585x5coVrly5wvDwMF/72tc4ePAgpaWlfOELXzDPWFptFouF3/qt3+KHP/wh3d3d/Omf/il2u51EIkF1dTUdHR0UFxczMDDAoUOH6OnpMVeS4vE4iqJQX1/Pc889h9vtZmhoiMuXL3PhwgXzfTBa+UajUfbt20c0GpVSO3EHRVFwOBwkZi1cuZYhMT+L3WFZkyRJU2GkP0dZeLFEbaUzlTab7Y4zv3K5HF6vF7fbTWFhIcFgkEQiwbVr1zh//jyDg4M4HA5yuRyJRIJkMsnv/M7vUFNTYyY0Xq/XfDyr1UpRUREWi8VsG75Wh0kaqzdLX1s8Hsfv9+NyuQgEAng8Hmw2G/l8nq9+9auMjo5y8+ZNbty4gdfrJZlM0tHRQVtbG21tbUxPT/P3f//3jI+Pk8/nzQFYQ0MDbW1tbNq0CYvFwt69e9E0jePHj/MXf/EXeL1eMpkMmUwGj8fDs88+K6V2H0FGwj01pjN2KcXESA67Y/W/T3Qd1JzOcH+OunKb2aFupYaHhzl58iTDw8NkMhnS6TQzMzM0NDTw6quv0tzcTC6X49ChQ/z4xz/mxIkT7Nmzh29+85tmTPL7/bz++usUFxevWZfY8fFxTp8+zVtvvcX09DQ3btwgk8kwMDDAunXr+KVf+iX8fj/d3d384Ac/MDvUGfd2KpXi5ZdfNst+r1y5wn/9r/8VVVUJBoMcO3aMzs5OADo6OmhtbaWxsdF8/pMnT3L69Gmeeuop6uvr1/QgXfH4euKSJGOTYse2DqZnPoPNnkexsvrTPwA6qCXgdHhpbmy7r+4wPp+PpqYmAHMWyOgO09DQQH19PYWFhebhiB6Ph0wmAyy+5t27d9PS0kJ5ebmZmBUWFlJeXo6maZSVlbF7926CwSBer3dND4kzlrSbm5vJ5/O0tLQQj8exWCwUFhZSUVFBXV0dDoeDoqIimpqacLvd5vkvxt9Ho1Gqq6uxWq0Eg0EzGC4sLACLA0mXy0VZWRmNjY14vV5JksQd7HY769evJ5NZIBwuxe7MY1nl9t8mHWojFmqqW2jduOWeTRHgZ+WwSz/Luq5js9l4+umnmZubo7q62twbVFpaSktLC16v12zw4nA4KCwsZOvWrWar/fc/ptfrpbGxkX/xL/4FkUhk2Qbv1fb+/Q82m41IJMKWLVvw+XwEg0Fzf5PFYqGqqoqCggJKSkpoa2vDarXi9/uJRqPmfe/z+WhsbMTv95NOp4HFsqDa2loqKyvN976yspI9e/ZQWlrKzMyM+Z653W7C4TCNjY0rLhsUTwbjPK7NmzeTySYYGd2IzaGuTZzQFxOluhILzY2Lh77eT6JifDe63W6SyaT5PVtdXU1tbS02mw1N06ivr+f555+noaGBcDhsNkOyWq14vd77Wkl6GJxOJ+Fw2NxjbJS+Gve1Ud4fjUbp6OggHo+jaRqKouB0OvH7/WzevJlgMEg6naakpITXX38dXdcpLCxctr+oqKjojiSoqakJm81GaWmpVKEI0xOXJBnlai0t69A03dzkv5bP73K5zC/hlX6xFhQUUFNTsyz5MZavy8rK8Hg8ZjemhoYGioqKzMPe7HY7oVDojrOBQqEQuq5TUlJilugA5gDgfs5BeBj8fj/r1q2jpKTE3HgeCATM/UrGwKa+vp5QKGQ2cvB4PBQWFhIIBMwBkPHzxhcBYJYgRiIRioqKpHWnuCubzUZ1dTU2m43y8mqzbe3aPb+VkpJSs7HBg7JYLGzYsIFsNmuuCNntdoqLi7Hb7UQiEfL5vNne2u/3U1pa+oETN06nk2g0yic/+Un8fv8jvX+MOF5XV0coFFpWGqwoCi0tLSSTyTsOjA4Gg2ZjB4/HQ1NTE9FolEwmY272DoVCyzrueb1eamtrCQQCTE1Nkc1mzVgSDAYllnxEWa1W6uvrsdlsTE9vNPf6rhWbzUpFRSXl5eX3FSf8fj/19fVEIhHS6bS5Ch0MBs0VUavVSm1tLX6/n40bNy77faNMzxhzrBWj+cP745PVajUPhTXi2pYtW8yyfGNy1O/3U1JSYk7ulJaW8sorr9z1UPni4uI7jgipr6+nuLiYYDBoVvMIYdGfwKPEjc1/RhKxli/RmOk0lsjlRhPi8WNsBjY2/68l48Bkq9Uqe12EeEwZjQPy+bzZHGQtLR1HSJIuxKPxxCZJjwtJkoR4/DwuMULigxCPL4kTQny0PZFJkhBCCCGEEEI8KFnDFUIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQoglJEkSQgghhBBCiCUkSRJCCCGEEEKIJSRJEkIIIYQQQogl/n8YFlYdavXd9wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# show the legend\n", + "image = mpimg.imread(\"./martin/tiles/legend.png\")\n", + "fig, ax = plt.subplots(figsize=(25, 8))\n", + "ax.imshow(image)\n", + "ax.set_axis_off()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e67f5f73-6c5d-44ba-b8c9-6376307445f6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/initial_parameters/lstm.ipynb b/examples/initial_parameters/lstm.ipynb new file mode 100644 index 0000000..b23a37d --- /dev/null +++ b/examples/initial_parameters/lstm.ipynb @@ -0,0 +1,213 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LSTM support\n", + "\n", + "The following notebook contains a walkthrough of the support for the LSTM module\n", + "\n", + "The notebook is organized in the following sections:\n", + "1. Icefabric API support\n", + "2. Icefabric CLI support" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Running imports\n", + "import json\n", + "import os\n", + "import threading\n", + "import zipfile\n", + "from pathlib import Path\n", + "\n", + "import httpx\n", + "from pyprojroot import here\n", + "\n", + "#Change directory to project root\n", + "os.chdir(here())\n", + "print(f\"directory changed to {here()}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Starting the API locally\n", + "def run_api():\n", + " \"\"\"Starts the icefabric API locally\"\"\"\n", + " !python -m app.main --catalog sql\n", + "\n", + "\n", + "threading.Thread(target=run_api).start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will start the API on localhost:8000. This can be visited at: http://localhost:8000/docs \n", + "\n", + "![Icefabric API](../../docs/img/icefabric_api.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To test the LSTM endpoint, we can use an example API call. This will return all config entries in JSON form. Each item in the output is the BMI Config contents for a specific catchment upstream of USGS 01010000" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make the request\n", + "response = httpx.get(\n", + " \"http://0.0.0.0:8000/v1/modules/lstm/\",\n", + " params={\n", + " \"identifier\": \"01010000\", # the Gauge ID we're testing\n", + " \"domain\": \"conus_hf\", # The CONUS domain\n", + " \"use_schaake\": \"false\", # Specifying we're not using Schaake for the ice fraction setting\n", + " },\n", + " timeout=60.0, # GLUE API requests can be slow depending on the network speed. Adding a 30s timeout to ensure requests go through\n", + ")\n", + "\n", + "print(f\"Status code: {response.status_code}\")\n", + "print(json.dumps(response.json(), indent=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CLI\n", + "\n", + "Now that we can return to the user all of the information / IPEs for a module, we can use the icefabric CLI to generate config files to disk for the user\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Path(\"/tmp/lstm_output_test\").mkdir(exist_ok=True, parents=True)\n", + "\n", + "# Ensure the current working dir is where your `.pyiceberg.yaml` file is located\n", + "!uv run icefabric params --gauge 01010000 --nwm-module lstm --domain conus_hf --catalog sql --output /tmp/lstm_output_test" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that these are created, let's view the contents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with zipfile.ZipFile(\"/tmp/lstm_output_test/configs.zip\", \"r\") as f:\n", + " print(\"Files in archive:\")\n", + " for file_info in f.filelist:\n", + " print(f\" {file_info.filename} ({file_info.file_size} bytes)\")\n", + " f.extractall(\"/tmp/lstm_output_test/configs\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's view one of their contents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "files = Path(\"/tmp/lstm_output_test/configs\").glob(\"*\")\n", + "first_file = list(files)[0]\n", + "content = first_file.read_text()\n", + "print(content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also view the metadata.json file that was created. This will contain additional information about the query parameters used to make these configs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "metadata = Path(\"/tmp/lstm_output_test/configs/metadata.json\")\n", + "content = json.loads(metadata.read_text())\n", + "\n", + "print(content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Through either the API, or CLI, any modeler can create a BMI Config file for LSTM that is compatible with NextGen. Now, let's clean up the `/tmp/` dir" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!rm -rf /tmp/lstm_output_test" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/initial_parameters/soil_freeze_thaw.ipynb b/examples/initial_parameters/soil_freeze_thaw.ipynb new file mode 100644 index 0000000..1e8bfeb --- /dev/null +++ b/examples/initial_parameters/soil_freeze_thaw.ipynb @@ -0,0 +1,211 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Soil Freeze-Thaw support\n", + "\n", + "The following notebook contains a walkthrough of the support for the Soil Freeze-Thaw (SFT) module\n", + "\n", + "The notebook is organized in the following sections:\n", + "1. Icefabric API support\n", + "2. Icefabric CLI support" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Running imports\n", + "import json\n", + "import os\n", + "import threading\n", + "import zipfile\n", + "from pathlib import Path\n", + "\n", + "import httpx\n", + "from pyprojroot import here\n", + "\n", + "os.chdir(here())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Starting the API locally\n", + "def run_api():\n", + " \"\"\"Starts the icefabric API locally\"\"\"\n", + " !python -m app.main --catalog sql\n", + "\n", + "\n", + "threading.Thread(target=run_api).start()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will start the API on localhost:8000. This can be visited at: http://localhost:8000/docs \n", + "\n", + "![Icefabric API](../../docs/img/icefabric_api.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To test the SFT endpoint, we can use an example API call. This will return all config entries in JSON form. Each item in the output is the BMI Config contents for a specific catchment upstream of USGS 01010000" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make the request\n", + "response = httpx.get(\n", + " \"http://0.0.0.0:8000/v1/modules/sft/\",\n", + " params={\n", + " \"identifier\": \"01010000\", # the Gauge ID we're testing\n", + " \"domain\": \"conus_hf\", # The CONUS domain\n", + " \"use_schaake\": \"false\", # Specifying we're not using Schaake for the ice fraction setting\n", + " },\n", + " timeout=60.0, # GLUE API requests can be slow depending on the network speed. Adding a 30s timeout to ensure requests go through\n", + ")\n", + "\n", + "print(f\"Status code: {response.status_code}\")\n", + "print(json.dumps(response.json(), indent=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CLI\n", + "\n", + "Now that we can return to the user all of the information / IPEs for a module, we can use the icefabric CLI to generate config files to disk for the user\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Path(\"/tmp/sft_output_test\").mkdir(exist_ok=True, parents=True)\n", + "\n", + "# Ensure the current working dir is where your `.pyiceberg.yaml` file is located\n", + "!uv run icefabric params --gauge 01010000 --module sft --domain conus --catalog sql --output /tmp/sft_output_test" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that these are created, let's view the contents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with zipfile.ZipFile(\"/tmp/sft_output_test/configs.zip\", \"r\") as f:\n", + " print(\"Files in archive:\")\n", + " for file_info in f.filelist:\n", + " print(f\" {file_info.filename} ({file_info.file_size} bytes)\")\n", + " f.extractall(\"/tmp/sft_output_test/configs\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's view one of their contents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "files = Path(\"/tmp/sft_output_test/configs\").glob(\"*\")\n", + "first_file = list(files)[0]\n", + "content = first_file.read_text()\n", + "print(content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also view the metadata.json file that was created. This will contain additional information about the query parameters used to make these configs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "metadata = Path(\"/tmp/sft_output_test/configs/metadata.json\")\n", + "content = json.loads(metadata.read_text())\n", + "\n", + "print(content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Through either the API, or CLI, any modeler can create a BMI Config file for SFT that is compatible with NextGen. Now, let's clean up the `/tmp/` dir" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!rm -rf /tmp/sft_output_test" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/module_ipes/modules_ipes.ipynb b/examples/module_ipes/modules_ipes.ipynb new file mode 100644 index 0000000..11abd3f --- /dev/null +++ b/examples/module_ipes/modules_ipes.ipynb @@ -0,0 +1,405 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Demo: Generation & extraction of the IPEs for the NWM Modules\n", + "\n", + "Generate & extract relevant IPEs for the NWM modules.\n", + "\n", + "__Pre-requisites:__\n", + "\n", + "- `.env` with your AWS credentials" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "## Modules/Libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from pathlib import Path\n", + "\n", + "from pyiceberg.catalog import load_catalog\n", + "\n", + "from icefabric.helpers import load_creds, load_pyiceberg_config\n", + "from icefabric.modules import (\n", + " get_cfe_parameters,\n", + " get_lasam_parameters,\n", + " get_lstm_parameters,\n", + " get_noahowp_parameters,\n", + " get_sacsma_parameters,\n", + " get_smp_parameters,\n", + " get_snow17_parameters,\n", + " get_topmodel_parameters,\n", + " get_troute_parameters,\n", + " get_ueb_parameters,\n", + ")\n", + "from icefabric.schemas.hydrofabric import HydrofabricDomains\n", + "\n", + "# dir is where the .env file is located\n", + "load_creds()\n", + "\n", + "# Loading the local pyiceberg config settings\n", + "pyiceberg_config = load_pyiceberg_config()" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "## User Arguments" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# Assumes your S3 .env file is set up for the NGWPC Test Account\n", + "catalog = load_catalog(\"glue\", **{\"type\": \"glue\", \"glue.region\": \"us-east-1\"})\n", + "\n", + "# Picking a gauge in CONUS and loading upstream connections\n", + "domain = HydrofabricDomains.CONUS\n", + "upstream_connections_path = Path(f\"data/hydrofabric/{domain.value}_upstream_connections.json\")\n", + "\n", + "if not upstream_connections_path.exists():\n", + " raise FileNotFoundError\n", + "\n", + "with open(upstream_connections_path) as f:\n", + " data = json.load(f)\n", + " print(\n", + " f\"Loading upstream connections generated on: {data['_metadata']['generated_at']} \"\n", + " f\"from snapshot id: {data['_metadata']['iceberg']['snapshot_id']}\"\n", + " )\n", + " upstream_dict = data[\"upstream_connections\"]\n", + "\n", + "# Pick the gauge identifier\n", + "# identifier = 'gages-06710385'\n", + "identifier = \"gages-11280000\"" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## TopModel" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "topmodel_pydantic_models = get_topmodel_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict\n", + ")\n", + "topmodel_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## OWP NOAH MODULAR" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "noahowp_pydantic_models = get_noahowp_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict\n", + ")\n", + "noahowp_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## LASAM" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "lasam_pydantic_models = get_lasam_parameters(\n", + " catalog=catalog,\n", + " namespace=domain.value,\n", + " identifier=identifier,\n", + " upstream_dict=upstream_dict,\n", + " sft_included=True,\n", + " soil_params_file=\"vG_default_params_HYDRUS.dat\",\n", + ")\n", + "lasam_pydantic_models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "lasam_pydantic_models = get_lasam_parameters(\n", + " catalog=catalog,\n", + " namespace=domain.value,\n", + " identifier=identifier,\n", + " upstream_dict=upstream_dict,\n", + " sft_included=False,\n", + " soil_params_file=\"vG_default_params_HYDRUS.dat\",\n", + ")\n", + "lasam_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "## LSTM" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "lstm_pydantic_models = get_lstm_parameters(\n", + " catalog=catalog,\n", + " namespace=domain.value,\n", + " identifier=identifier,\n", + " upstream_dict=upstream_dict,\n", + ")\n", + "lstm_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "## SMP" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "module = \"CFE-S\" # , 'CFE-X', 'LASAM', 'TopModel'\n", + "smp_pydantic_models = get_smp_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict, module=module\n", + ")\n", + "smp_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "## Snow17" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "snow17_pydantic_models = get_snow17_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict, envca=False\n", + ")\n", + "\n", + "snow17_pydantic_models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "snow17_pydantic_models = get_snow17_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict, envca=True\n", + ")\n", + "snow17_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "## SAC SMA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "sacsma_pydantic_models = get_sacsma_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict, envca=False\n", + ")\n", + "sacsma_pydantic_models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "sacsma_pydantic_models = get_sacsma_parameters(\n", + " catalog=catalog, namespace=domain.value, identifier=identifier, upstream_dict=upstream_dict, envca=True\n", + ")\n", + "sacsma_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "## T-Route" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "troute_pydantic_models = get_troute_parameters(\n", + " catalog=catalog,\n", + " namespace=domain.value,\n", + " identifier=identifier,\n", + " upstream_dict=upstream_dict,\n", + ")\n", + "troute_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## UEB" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "ueb_pydantic_models = get_ueb_parameters(\n", + " catalog, namespace=domain.value, identifier=identifier, envca=False, upstream_dict=upstream_dict\n", + ")\n", + "\n", + "ueb_pydantic_models" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "## CFE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "cfex_pydantic_models = get_cfe_parameters(\n", + " catalog, namespace=domain.value, identifier=identifier, module=\"CFE-X\", upstream_dict=upstream_dict\n", + ")\n", + "cfex_pydantic_models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "cfes_pydantic_models = get_cfe_parameters(\n", + " catalog,\n", + " namespace=domain.value,\n", + " identifier=identifier,\n", + " module=\"CFE-S\",\n", + " upstream_dict=upstream_dict,\n", + " sft_included=True,\n", + ")\n", + "cfes_pydantic_models" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..88fc749 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,70 @@ +site_name: Icefabric +site_description: A version controlled method for storing multi-dimensional and tabular data through Pyiceberg and icechunk +repo_url: https://github.com/NGWPC/icefabric +repo_name: NGWPC/icefabric +theme: + name: material + features: + - navigation.footer + - navigation.indexes + - navigation.sections + - navigation.tabs + - navigation.top + - navigation.tracking + - search.suggest + icon: + repo: fontawesome/brands/github + palette: + - scheme: light blue + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate + toggle: + icon: material/brightness-4 + name: Switch to light mode + +markdown_extensions: + - def_list + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.arithmatex: + generic: true + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true + - pymdownx.inlinehilite + - pymdownx.snippets + - admonition + - footnotes + - pymdownx.details + - pymdownx.superfences + - pymdownx.mark + - attr_list + - md_in_html + +plugins: + - search + +nav: + - Home: + - index.md + - Description: description.md + - User Guide: + - user_guide/index.md + - API: user_guide/icefabric_api.md + - Tools: user_guide/icefabric_tools.md + - Terraform S3 Glue Endpoint: user_guide/terraform.md + - Data Support: + - datasets/index.md + - Hydrofabric: datasets/hydrofabric.md + - RAS XS: datasets/ras_xs.md + - Module Support: + - modules/index.md + - SFT: modules/sft.md diff --git a/py.typed b/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..e0ab7ea --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,191 @@ +[build-system] +build-backend = "hatchling.build" +requires = ["hatchling", "hatch-vcs"] + +[tool.hatch] +version.source = "vcs" +build.hooks.vcs.version-file = "src/icefabric/_version.py" + +[tool.hatch.build.targets.wheel] +packages = ["src/icefabric", "app", "tools"] + +[project] +name = "icefabric" +version = "2025.7.1" +description = "An Apache Iceberg + Icechunk implementation of Hydrofabric data services" +readme = "README.md" +requires-python = ">=3.11" +license = { file = "LICENSE" } +authors = [ + { name = "Tadd Bindas", email = "tadd.bindas@ertcorp.com" }, + { name = "Sylvia Chin", email = "sylvia.c.chin@rtx.com" }, + { name = "Daniel Cumpton", email = "dcumpton@rtx.com" }, + { name = "Quercus Hamlin", email = "qhamlin@asrcfederal.com" }, + { name = "Brock Hinkson", email = "brock.w.hinkson@rtx.com" }, + { name = "Farshid Rahmani", email = "Farshid.Rahmani@rtx.com" }, +] +maintainers = [ + { name = "Tadd Bindas", email = "tadd.bindas@ertcorp.com" }, + { name = "Sylvia Chin", email = "sylvia.c.chin@rtx.com" }, + { name = "Daniel Cumpton", email = "dcumpton@rtx.com" }, + { name = "Quercus Hamlin", email = "qhamlin@asrcfederal.com" }, + { name = "Brock Hinkson", email = "brock.w.hinkson@rtx.com" }, + { name = "Farshid Rahmani", email = "Farshid.Rahmani@rtx.com" }, +] + +dependencies = [ + "ambiance==1.3.1", + "click==8.2.1", + "dask==2025.5.1", + "fiona==1.10.1", + "fsspec==2025.5.1", + "geodatasets==2024.8.0", + "geopandas==1.0.1", + "h5netcdf==1.6.1", + "h5py==3.13.0", + "icechunk==1.1.4", + "ipywidgets==8.1.6", + "jupytergis==0.5.0", + "jupyterlab==4.4.3", + "kerchunk==0.2.8", + "mapclassify==2.9.0", + "matplotlib==3.10.3", + "netCDF4==1.7.2", + "numpy==2.3.2", + "pandas==2.2.3", + "polars==1.31.0", + "pyarrow==20.0.0", + "pyiceberg[s3fs,glue,sql-sqlite]==0.9.1", + "pyprojroot==0.3.0", + "python-dotenv==1.1.0", + "rioxarray==0.19.0", + "rustworkx==0.16.0", + "s3fs==2025.5.1", + "scipy==1.15.3", + "tifffile==2025.5.21", + "tqdm==4.67.1", + "virtualizarr==1.3.2", + "xarray==2025.07.1", + "zarr==3.1.0", +] + +[project.scripts] +icefabric = "icefabric.cli.main:cli" + +[project.optional-dependencies] +docs = [ + "mkdocs-material==9.6.14", + "sympy==1.14.0" +] + +[dependency-groups] +app = [ + "fastapi[standard]>=0.115.12", + "pydantic==2.11.5", + "pydantic-settings==2.9.1", +] +dev = [ + "pre-commit==3.8.0", + "ruff==0.11.13", + "black==25.1.0", + "mypy==1.15.0", + "nbstripout==0.8.1", +] +examples = [ + "folium==0.19.5", + "httpx==0.28.1", + "ipykernel==6.29.5", + "jupyterlab==4.4.3", + "jupytergis==0.5.0", + "matplotlib==3.10.3", +] +tests = [ + "moto==5.1.8", + "pytest==8.4.1", + "pytest-asyncio==1.1.0", + "pytest-cov==6.1.1", +] + +[tool.uv] +default-groups = ["app", "dev", "examples", "tests"] + +[tool.black] +line-length = 110 +skip-string-normalization = true +preview = true +target-version = ['py311'] + +[tool.ruff] +line-length = 110 +exclude = [".csv", "LICENSE", ".tf", ".tfvars"] +lint.select = [ + "F", # Errors detected by Pyflakes + "E", # Error detected by Pycodestyle + "W", # Warning detected by Pycodestyle + "I", # isort + "D", # pydocstyle + "B", # flake8-bugbear + "Q", # flake8-quotes + "TID", # flake8-tidy-imports + "C4", # flake8-comprehensions + "BLE", # flake8-blind-except + "UP", # pyupgrade + "RUF100", # Report unused noqa directives +] +lint.ignore = [ + # line too long -> we accept long comment lines; black gets rid of long code lines + "E501", + # Do not assign a lambda expression, use a def -> lambda expression assignments are convenient + "E731", + # allow I, O, l as variable names -> I is the identity matrix + "E741", + # Missing docstring in public package + "D104", + # Missing docstring in public module + "D100", + # Missing docstring in __init__ + "D107", + # Errors from function calls in argument defaults. These are fine when the result is immutable. + "B008", + # __magic__ methods are are often self-explanatory, allow missing docstrings + "D105", + # first line should end with a period [Bug: doesn't work with single-line docstrings] + "D400", + # First line should be in imperative mood; try rephrasing + "D401", + ## Disable one in each pair of mutually incompatible rules + # We don't want a blank line before a class docstring + "D203", + # We want docstrings to start immediately after the opening triple quote + "D213", + # Bare except okay for passing + "E722", +] + +[tool.ruff.lint.isort] +known-first-party = ["app", "icefabric"] + +[tool.ruff.lint.pydocstyle] +convention = "numpy" + +[tool.ruff.lint.per-file-ignores] +"docs/*" = ["I"] +"tests/*" = ["D"] +"*/__init__.py" = ["F401"] + +[tool.mypy] +python_version = "3.11" +warn_return_any = false +disallow_any_unimported = false +warn_unused_configs = true +strict_optional = true +ignore_missing_imports = true +check_untyped_defs = true +disallow_untyped_defs = true +no_implicit_optional = true +show_error_codes = true +warn_unused_ignores = true + +[tool.pytest.ini_options] +pythonpath = ["tests", "src/icefabric_api", "src/icefabric_tools"] +testpaths = ["src/icefabric_api/tests", "src/icefabric_tools/tests", "tests"] diff --git a/src/icefabric/__init__.py b/src/icefabric/__init__.py new file mode 100644 index 0000000..89919dc --- /dev/null +++ b/src/icefabric/__init__.py @@ -0,0 +1,6 @@ +"""An Apache Iceberg + Icechunk implementation of Hydrofabric data services""" + +from . import builds, cli, helpers, hydrofabric, modules, ras_xs, schemas, ui +from ._version import __version__ + +__all__ = ["__version__", "builds", "cli", "hydrofabric", "helpers", "modules", "schemas", "ui", "ras_xs"] diff --git a/src/icefabric/builds/__init__.py b/src/icefabric/builds/__init__.py new file mode 100644 index 0000000..fcfd6bf --- /dev/null +++ b/src/icefabric/builds/__init__.py @@ -0,0 +1,14 @@ +"""Functions / objects to be used for building tables/objects""" + +from .build import build_iceberg_table +from .graph_connectivity import load_upstream_json, read_edge_attrs, read_node_attrs +from .icechunk_s3_module import IcechunkRepo, S3Path + +__all__ = [ + "build_iceberg_table", + "load_upstream_json", + "read_edge_attrs", + "read_node_attrs", + "IcechunkRepo", + "S3Path", +] diff --git a/src/icefabric/builds/build.py b/src/icefabric/builds/build.py new file mode 100644 index 0000000..8f5c969 --- /dev/null +++ b/src/icefabric/builds/build.py @@ -0,0 +1,43 @@ +"""Build scripts for pyiceberg""" + +import pyarrow.parquet as pq +from pyiceberg.catalog import Catalog + + +def build_iceberg_table( + catalog: Catalog, parquet_file: str, namespace: str, table_name: str, location: str +) -> None: + """Builds the hydrofabric catalog based on the .pyiceberg.yaml config and defined parquet files. + + Creates a new Iceberg table from a parquet file if the table doesn't already exist. + If the table exists, the function will skip the build process and print a message. + + Parameters + ---------- + catalog : Catalog + The Apache Iceberg Catalog instance used to manage tables + parquet_file : str + Path to the parquet file to be loaded into the Iceberg table + namespace : str + The namespace (database/schema) where the table will be created + table_name : str + The name of the table to be created in the catalog + location : str + The storage location where the Iceberg table data will be stored + + Notes + ----- + - The function will automatically infer the schema from the parquet file + - If the table already exists, no action is taken and a message is printed + - The parquet data is appended to the newly created Iceberg table + """ + if catalog.table_exists(f"{namespace}.{table_name}"): + print(f"Table {table_name} already exists. Skipping build") + else: + arrow_table = pq.read_table(parquet_file) + iceberg_table = catalog.create_table( + f"{namespace}.{table_name}", + schema=arrow_table.schema, + location=location, + ) + iceberg_table.append(arrow_table) diff --git a/src/icefabric/builds/graph_connectivity.py b/src/icefabric/builds/graph_connectivity.py new file mode 100644 index 0000000..766277d --- /dev/null +++ b/src/icefabric/builds/graph_connectivity.py @@ -0,0 +1,148 @@ +"""Creates a rustworkx graph containing network table information""" + +from datetime import UTC, datetime +from pathlib import Path + +import polars as pl +import rustworkx as rx +from pyiceberg.catalog import Catalog +from tqdm import tqdm + + +def _build_graph(flowpaths: pl.LazyFrame, network: pl.LazyFrame) -> rx.PyDiGraph: + """A function to build a rustworkx graph for getting upstream river segments + + Parameters + ---------- + flowpaths: pl.LazyFrame + The flowpaths table in LazyFrame mode + network: pl.LazyFrame + The network table in LazyFrame mode + + Return + ------ + rx.PyDiGraph + The rustworkx graph object + """ + fp = flowpaths.select([pl.col("id"), pl.col("toid")]).collect() + + if "wb-0" not in fp["id"].to_list(): + wb0_df = pl.DataFrame({"id": ["wb-0"], "toid": [None]}) + fp = pl.concat([fp, wb0_df], how="vertical") + + fp = fp.lazy() + + network_table = network.select([pl.col("id"), pl.col("toid")]).collect() + network_table = network_table.filter(pl.col("id").str.starts_with("wb-").not_()) + + fp = fp.with_row_index(name="idx").collect() + network_table = network_table.unique(subset=["id"]) + + _values = zip(fp["idx"], fp["toid"], strict=False) + fp = dict(zip(fp["id"], _values, strict=True)) + + # define network as a dictionary of nexus ids to downstream flowpath ids + network_dict = dict(zip(network_table["id"], network_table["toid"], strict=True)) + + graph = rx.PyDiGraph(check_cycle=False, node_count_hint=len(fp), edge_count_hint=len(fp)) + gidx = graph.add_nodes_from(fp.keys()) + for idx in tqdm(gidx, desc="Building network graph"): + id = graph.get_node_data(idx) + nex = fp[id][1] # the downstream nexus id + terminal = False + ds_wb = network_dict.get(nex) + if ds_wb is None: + # we found a terminal nexus + terminal = True + if not terminal: + graph.add_edge(idx, fp[ds_wb][0], nex) + + return graph + + +def serialize_node_attrs(node_data): + """Convert node data to string key-value pairs""" + return {"data": str(node_data)} + + +def serialize_edge_attrs(edge_data): + """Convert edge data to string key-value pairs""" + return {"data": str(edge_data)} + + +def read_node_attrs(node_data): + """Convert node data to an output list""" + return node_data["data"] + + +def read_edge_attrs(edge_data): + """Convert edge data to an output list""" + return edge_data["data"] + + +def load_upstream_json(catalog: Catalog, namespaces: list[str], output_path: Path) -> dict[str, rx.PyDiGraph]: + """Builds an upstream lookup graph and save to JSON file + + Parameters + ---------- + catalog : str + The pyiceberg catalog + namespaces : str + the hydrofabric namespaces to read from + output_file : Path + Where the json file should be saved + """ + graph_dict = {} + for namespace in namespaces: + output_file = output_path / f"{namespace}_graph_network.json" + network_table = catalog.load_table(f"{namespace}.network") + flowpaths_table = catalog.load_table(f"{namespace}.flowpaths") + if not output_file.exists(): + graph = _build_graph(flowpaths=flowpaths_table.to_polars(), network=network_table.to_polars()) + graph.attrs = { + "generated_at": datetime.now(UTC).isoformat(), + "catalog_name": catalog.name, + "flowpath_snapshot_id": str(flowpaths_table.current_snapshot().snapshot_id), + "network_snapshot_id": str(network_table.current_snapshot().snapshot_id), + } + output_path.parent.mkdir(parents=True, exist_ok=True) + rx.node_link_json( + graph, + path=str(output_file), + graph_attrs=lambda attrs: dict(attrs), + edge_attrs=serialize_edge_attrs, + node_attrs=serialize_node_attrs, + ) + else: + print(f"Loading existing network graph from disk for: {namespace}") + graph: rx.PyDiGraph = rx.from_node_link_json_file( + str(output_file), + edge_attrs=read_edge_attrs, + node_attrs=read_node_attrs, + ) # type: ignore + uses_updated_network_table = graph.attrs["network_snapshot_id"] == str( + network_table.current_snapshot().snapshot_id + ) + uses_updated_flowpaths_table = graph.attrs["flowpath_snapshot_id"] == str( + flowpaths_table.current_snapshot().snapshot_id + ) + if not uses_updated_network_table or not uses_updated_flowpaths_table: + graph = _build_graph( + flowpaths=flowpaths_table.to_polars(), + network=network_table.to_polars(), + ) + graph.attrs = { + "generated_at": datetime.now(UTC).isoformat(), + "catalog_name": catalog.name, + "flowpath_snapshot_id": str(flowpaths_table.current_snapshot().snapshot_id), + "network_snapshot_id": str(network_table.current_snapshot().snapshot_id), + } + rx.node_link_json( + graph, + path=str(output_file), + graph_attrs=lambda attrs: attrs, # using the graph's own attributes + edge_attrs=serialize_edge_attrs, + node_attrs=serialize_node_attrs, + ) + graph_dict[namespace] = graph + return graph_dict diff --git a/src/icefabric/builds/iceberg_table.py b/src/icefabric/builds/iceberg_table.py new file mode 100644 index 0000000..61f1855 --- /dev/null +++ b/src/icefabric/builds/iceberg_table.py @@ -0,0 +1,268 @@ +import os + +import pyarrow as pa +import pyarrow.parquet as pq +import s3fs +from pyiceberg.catalog import load_catalog +from pyiceberg.schema import Schema +from pyiceberg.types import ( + BinaryType, + BooleanType, + DoubleType, + LongType, + NestedField, + StringType, +) + + +class IcebergTable: + """ + Create a Iceberg table per parquet file w/ its inherited schema set. + + Note: Allows for user to have the option to read parquets from S3 or locally. It is okay to expect + following warning statements throughout process: "Iceberg does not have a dictionary type. will be inferred as string on read." + + """ + + def __init__(self) -> None: + # Generate folder for iceberg catalog + if not os.path.exists(f"{os.getcwd()}/iceberg_catalog"): + os.makedirs(f"{os.getcwd()}/iceberg_catalog") + + # Initialize namespace to be set for Iceberg catalog + self.namespace = "" + + def read_data_dirs(self, data_dir: str) -> list: + """ + Extract the list of parquet directories. + + Args: + data_dir (str): Parent directory of the parquet files. + Note: All the ml_auxiliary_data parquet + files are save under same filenames, + but categorized by 'vpuid' conditions. + + Return (list): List of directories associated with each parquet file. + + """ + parquet_list = [] + for folder, _subfolders, files in os.walk(data_dir): + if folder != data_dir: + for file in files: + parquet_list.append(f"{folder}/{file}") + + return parquet_list + + def read_data(self, parquet_file_path: str) -> pa.Table: + """ + Load a single parquet as a Pyarrow table. + + Args: + parquet_file_path (str): Directory of a single parquet. + + + Return: A Pyarrow table. + + """ + data = pq.read_table(parquet_file_path) + + return data + + def establish_catalog( + self, catalog_name: str, namespace: str, catalog_settings: dict[str, str] | None = None + ) -> None: + """ + Creates a new Iceberg catalog. + + Defaults to saving in ./iceberg_catalog/{catalog_name}_catalog.db if no uri + specified in catalog_settings + Specify 'uri' and 'warehouse' to select location for catalog and files + + Args: + catalog_name (str): Name of the catalog to be created. + Default: 'dev' for development catalog + namespace (str): Name of namespace. + catalog_settings (str): Optional catalog settings accepted by pyiceberg.load_catalog() + + Return: None + + """ + # Check if catalog settings exist, if not initialize a URI and warehouse to default location + if not catalog_settings or not isinstance(catalog_settings, dict): + catalog_settings = {} + catalog_settings["uri"] = ( + f"sqlite:///iceberg_catalog/{catalog_name}_catalog.db" + if "uri" not in catalog_settings.keys() + else catalog_settings["uri"] + ) + catalog_settings["warehouse"] = ( + "file://iceberg_catalog" + if "warehouse" not in catalog_settings.keys() + else catalog_settings["warehouse"] + ) + + # Establish a new Iceberg catalog & its configuration + self.catalog = load_catalog( + name=catalog_name, + **catalog_settings, + ) + + # Establish namespace to be create w/in catalog + self.namespace = namespace + if self.namespace not in self.catalog.list_namespaces(): + self.catalog.create_namespace(self.namespace) + + return + + def convert_pyarrow_to_iceberg_schema(self, arrow_schema: Schema) -> Schema: + """ + Translate a given Pyarrow schema into a schema acceptable by Iceberg. + + Args: + arrow_schema (object): Pyarrow schema read from the loaded + parquet of interest. + + Return (Iceberge.Schema): Iceberg schema + + """ + fields = [] + for idx in range(len(arrow_schema)): + # Extraction of the datatype & name of each schema row + field_name = arrow_schema.field(idx).name + arrow_type = arrow_schema.field(idx).type + + # Iceberg datatypes to pyarrow datatypes + if pa.types.is_int32(arrow_type): + iceberg_type = LongType() + elif pa.types.is_string(arrow_type): + iceberg_type = StringType() + elif pa.types.is_float64(arrow_type): + iceberg_type = DoubleType() + elif pa.types.is_int64(arrow_type): + iceberg_type = LongType() + elif pa.types.is_boolean(arrow_type): + iceberg_type = BooleanType() + elif pa.types.is_binary(arrow_type): + iceberg_type = BinaryType() + elif pa.types.is_dictionary(arrow_type): + if pa.types.is_string(arrow_type.value_type): + iceberg_type = StringType() + elif pa.types.is_int32(arrow_type.value_type): + iceberg_type = LongType() + else: + raise ValueError(f"Unsupported PyArrow type: {arrow_type}") + + # Establish the new schema acceptable to Iceberg + fields.append( + NestedField(field_id=idx + 1, required=False, name=field_name, field_type=iceberg_type) + ) + # Iceberg schema + schema = Schema(*fields) + + return schema + + def create_table_for_parquet(self, iceberg_tablename: str, data_table: pa.Table, schema: Schema) -> None: + """ + Convert parquet Pyarrow table to iceberg table & allocate Iceberg catalog under the ./iceberg_catalog directory. + + Args: + iceberg_tablename (str): Name of the Iceberg table to be created. + + data_table (object): Pyarrow table + + schema (object): Unique Iceberg schema to be set for the Iceberg table. + + namespace (str): Namespace for which the Iceberg table will reside within + the Iceberg catalog. + + Return: None + + """ + # Create an Iceberg table + iceberg_table = self.catalog.create_table( + identifier=f"{self.namespace}.{iceberg_tablename}", schema=schema + ) + + # Updates the Iceberg table with data of interest. + iceberg_table.append(data_table) + + return + + def create_table_for_all_parquets(self, parquet_files: list[str], app_name: str = "mip-xs") -> None: + """ + Convert parquets to Iceberg tables - each w/ their inherited schema. + + Args: + parquet_files (list): List of directories of the parquet files. + + app_name (str): Application to create Iceberg tables. + Options: 'mip-xs' & 'bathymetry_ml_auxiliary' + + Return: None + + Note: The sourced data structures for the data in 'mip-xs' & + 'bathymetry_ml_auxiliary' S3 buckets differ. + + """ + for _idx, parquet_file in enumerate(parquet_files): + if app_name == "mip_xs": + iceberg_tablename = f"{os.path.split(os.path.split(parquet_file)[1])[1].split('.')[0]}" + + elif app_name == "bathymetry_ml_auxiliary": + iceberg_tablename = f"{os.path.split(os.path.split(parquet_file)[0])[1]}" + + data_table = self.read_data(parquet_file) + data_pyarrow_schema = data_table.schema + schema = self.convert_pyarrow_to_iceberg_schema(data_pyarrow_schema) + self.create_table_for_parquet(iceberg_tablename, data_table, schema) + return + + def create_table_for_all_s3parquets(self, app_name: str, bucket_name: str) -> None: + """ + Convert parquets from S3 to Iceberg tables - each w/ their inherited schema. + + Parameters + ---------- + app_name : str + Application to create Iceberg tables. + Options: 'mip_xs', 'ble_xs' & 'bathymetry_ml_auxiliary' + bucket_name : str + S3 bucket name. + + Returns + ------- + None + + """ + fs = s3fs.S3FileSystem( + key=os.environ["AWS_ACCESS_KEY_ID"], + secret=os.environ["AWS_SECRET_ACCESS_KEY"], + token=os.environ["AWS_SESSION_TOKEN"], + ) + glob_patterns = { + "mip_xs": f"{bucket_name}/full_mip_xs_data/**/*.parquet", + "ble_xs": f"{bucket_name}/full_ble_xs_data/**/*.parquet", + "bathymetry_ml_auxiliary": f"{bucket_name}/ml_auxiliary_data/**/*.parquet", + } + if app_name not in glob_patterns: + raise KeyError(f"App {app_name} not supported. Please add your app to the glob_patterns") + + # Table Name Factory + parquet_files = fs.glob(glob_patterns[app_name]) + pyarrow_tables = {} + for file_path in parquet_files: + if app_name in {"mip_xs", "ble_xs"}: + # Extracts the HUC as the table name + table_name = file_path.split("/")[-1].removesuffix(".parquet") + elif app_name in {"bathymetry_ml_auxiliary"}: + # Extract vpuid from directory structure + table_name = file_path.split("/")[-2] + else: + raise KeyError(f"App {app_name} not supported. Please add your app the table name factory") + s3_uri = f"s3://{file_path}" + pyarrow_tables[table_name] = pq.read_table(s3_uri, filesystem=fs) + + for table_name, data_table in pyarrow_tables.items(): + schema = self.convert_pyarrow_to_iceberg_schema(data_table.schema) + self.create_table_for_parquet(table_name, data_table, schema) diff --git a/src/icefabric/builds/icechunk_s3_module.py b/src/icefabric/builds/icechunk_s3_module.py new file mode 100644 index 0000000..4aeae8a --- /dev/null +++ b/src/icefabric/builds/icechunk_s3_module.py @@ -0,0 +1,448 @@ +""" +NGWPC Icechunk interface module + +Module containing classes/methods pertaining +to S3 pathing and Icechunk repos +""" + +import subprocess +import warnings +from pathlib import Path +from typing import Any + +import icechunk as ic +import xarray as xr +from icechunk.xarray import to_icechunk + + +class S3Path: + """ + Class representing an S3 path. + + Corresponds to an S3 bucket, prefix and region + + Parameters + ---------- + bucket: str + The bucket of the S3 path. + prefix: str + The S3 path (minus the bucket). + region: str + The S3 region the bucket/path belongs to. Defaults to 'us-east-1'. + """ + + bucket: str + prefix: str + region: str + + def __init__(self, bucket: str, prefix: str, region: str | None = "us-east-1"): + self.bucket = bucket + self.prefix = prefix + self.region = region # type: ignore + + def __str__(self): + """Returns the full S3 path""" + return f"s3://{self.bucket}/{self.prefix}" + + def partial_path(self): + """Returns the S3 path without the 'S3://' prefix""" + return f"{self.bucket}/{self.prefix}" + + +class IcechunkRepo: + """ + Class representing an S3 bucket or local icechunk store + + Parameters + ---------- + location: S3Path | Path + The S3Path or local path of the repo. + repo: ic.Repository + The icechunk repo, derived from the bucket, prefix, and region. S3 + credentials are provided from the environment. + virtual_chunks: list[ic.VirtualChunkContainer] | None + A list of virtual chunk containers corresponding to reference data + for virtualized stores. Allows icechunk to reference S3 locations + in virtualized datasets. + """ + + location: S3Path | Path + repo: ic.Repository + virtual_chunks: list[ic.VirtualChunkContainer] | None + + def __init__(self, location: S3Path | Path, virtual_chunk_mapping: list[dict[str, str]] | None = None): + self.location = location + self.virtual_chunks = self.gen_virtual_chunk_containers(virtual_chunk_mapping) + self.repo = self.open_repo() + + def open_repo(self) -> ic.Repository: + """ + Opens an icechunk repo + + Using the class instance parameters, open and assign an icechunk repo corresponding + to the setup (bucket, prefix, region, etc.) + + Returns + ------- + ic.Repository + Icechunk repo corresponding to the S3 bucket path defined in the instance + """ + if isinstance(self.location, S3Path): + storage_config = ic.s3_storage( + bucket=self.location.bucket, + prefix=self.location.prefix, + region=self.location.region, + from_env=True, + ) + credentials = ic.containers_credentials({self.location.bucket: ic.s3_credentials(from_env=True)}) + config = ic.RepositoryConfig.default() + if self.virtual_chunks: + for vcc in self.virtual_chunks: + config.set_virtual_chunk_container(vcc) + else: + # self.location is a Path + storage_config = ic.local_filesystem_storage(str(self.location)) + credentials = None + config = None + + repo = ic.Repository.open_or_create(storage_config, config, credentials) + return repo + + def delete_repo(self, quiet: bool | None = False): + """ + Deletes the entire icechunk repo from S3. + + Parameters + ---------- + quiet : bool | None, optional + Suppresses AWS CLI output. By default False + """ + del_command = ["aws", "s3", "rm", str(self.location), "--recursive"] + if quiet: + del_command.append("--quiet") + subprocess.call(del_command) + print(f"Icechunk repo @ {str(self.location)} in its entirety was successfully deleted.") + + def gen_virtual_chunk_containers( + self, virtual_chunk_mapping: list[dict[str, str]] | None = None + ) -> list[ic.VirtualChunkContainer]: + """ + Create a list of virtual chunk containers + + Given a list of dictionaries mapping out virtual chunks, generate + and return a list of VirtualChunkContainers + + Parameters + ---------- + virtual_chunk_mapping : list[dict[str, str]] | None, optional + A list of dictionaries, each entry mapping out a single + virtual chunk definition. Should include a bucket and region. + By default None + + Returns + ------- + list[ic.VirtualChunkContainer] + A list of VirtualChunkContainers corresponding to the list of passed-in + dict mappings. + """ + v_chunks = None + if virtual_chunk_mapping: + v_chunks = [ + self.set_up_virtual_chunk_container(vc["bucket"], vc["region"]) + for vc in virtual_chunk_mapping + ] + return v_chunks + + def create_session( + self, read_only: bool | None = True, snap_id: str | None = None, branch: str | None = "main" + ) -> ic.Session: + """ + Open a session under the repo defined by an instance of IcechunkS3Repo + + Parameters + ---------- + read_only : bool | None, optional + Denotes if the session will be read-only or writable. By default True + snap_id: str | None, optional + The Snapshot ID of a specific commit to base the session on. Leave out if you want the + latest. By default None + branch : str | None, optional + Icechunk repo branch to be opened. By default "main" + + Returns + ------- + ic.Session + Icechunk repo session. Writable or read-only based on parameters. Branch + can be configured. + """ + if read_only: + if snap_id: + return self.repo.readonly_session(snapshot_id=snap_id) + else: + return self.repo.readonly_session(branch) + return self.repo.writable_session(branch) + + def retrieve_dataset(self, branch: str | None = "main", snap_id: str | None = None) -> xr.Dataset: + """ + Returns the repo's store contents as an Xarray dataset + + Parameters + ---------- + branch : str | None, optional + Icechunk repo branch to be opened, by default "main" + snap_id : str | None, optional + The Snapshot ID of a specific commit you want to retrieve. Leave out if you want the + latest. By default None. + + Returns + ------- + xr.Dataset + Xarray dataset representation of the Icechunk store + """ + if snap_id: + session = self.create_session(read_only=True, branch=branch, snap_id=snap_id) + else: + session = self.create_session(read_only=True, branch=branch) + ds = xr.open_zarr(session.store, consolidated=False, chunks={}) + + # geotiff rasters saved in zarr need to be convereted to spatial-aware xarray with rioxarray + if "spatial_ref" in ds.data_vars: + ds.rio.write_crs(ds.spatial_ref.spatial_ref, inplace=True) + + return ds + + def retrieve_rollback_to_snapshot(self, snap_id: str, branch: str | None = "main") -> xr.Dataset: + """Retrieves the repo data a specific snapshot ID""" + return self.retrieve_dataset(branch=branch, snap_id=snap_id) + + def retrieve_rollback_n_snapshots(self, n: int, branch: str | None = "main") -> xr.Dataset: + """Retrieves the repo data from snapshot(s) ago""" + try: + snap_id = list(self.repo.ancestry(branch=branch))[n].id + except IndexError: + print(f"Rolled back too far! Branch ({branch}) has fewer previous commits than was specified") + return self.retrieve_rollback_to_snapshot(snap_id, branch=branch) + + def retrieve_prev_snapshot(self, branch: str | None = "main") -> xr.Dataset: + """Retrieves the repo data one snapshot ago""" + return self.retrieve_rollback_n_snapshots(n=1, branch=branch) + + def write_dataset( + self, ds: xr.Dataset, commit: str, virtualized: bool | None = False, branch: str | None = "main" + ): + """ + Given a dataset, push a new commit alongisde the data to the icechunk store + + Parameters + ---------- + ds : xr.Dataset + Dataset to be commited to the icechunk store. + commit : str + Commit message that will accompany the dataset push. + virtualized : bool | None, optional + Designates if the dataset to be written is virtualized. Affects + how it's written to icechunk. By default False + branch : str | None, optional + Icechunk repo branch to be pushed. By default "main". + """ + session = self.create_session(read_only=False, branch=branch) + if virtualized: + ds.virtualize.to_icechunk(session.store) + else: + to_icechunk(ds, session) + snapshot = session.commit(commit) + print(f"Dataset is uploaded. Commit: {snapshot}") + + def append_virt_data_to_store( + self, vds: xr.Dataset, append_dim: str, commit: str, branch: str | None = "main" + ): + """ + Add new data to the store + + Given a virtualized dataset, push a new commit to append + data to an existing icechunk store. The data will be + appended on a specified dimension. + + Parameters + ---------- + vds : xr.Dataset + The virtualized dataset to be appended to the + existing icechunk store. + append_dim : str + What dimension the dataset will be appended on. Likely + time or year, etc. + commit : str + Commit message that will accompany the dataset addition. + branch : str | None, optional + Icechunk repo branch to be pushed. By default "main". + """ + session = self.create_session(read_only=False, branch=branch) + vds.virtualize.to_icechunk(session.store, append_dim=append_dim) + snapshot = session.commit(commit) + print(f"Dataset has been appended on the {append_dim} dimension. Commit: {snapshot}") + + def create_new_branch_from_snapshot(self, name: str, snap_id: str): + """Create a new branch that is based on a specific snapshot ID""" + self.repo.create_branch(name, snapshot_id=snap_id) + + def create_new_branch(self, name: str, origin: str | None = "main"): + """Create a new branch that is based on the most recent snapshot on a given branch""" + branch_latest_snap_id = self.repo.lookup_branch(origin) + self.create_new_branch_from_snapshot(name, snap_id=branch_latest_snap_id) + + def print_history(self, branch: str | None = "main"): + """ + Prints a nicely-formatted summary of the history of the icechunk repo branch. + + Parameters + ---------- + branch : str | None, optional + The branch whose history will be printed. By default "main" + """ + for ancestor in self.repo.ancestry(branch=branch): + print(f"Snapshot ID:\t{ancestor.id}") + print(f"Timestamp:\t{ancestor.written_at}") + print(f"Message:\t{ancestor.message}\n") + + def retrieve_and_convert_to_tif( + self, + dest: str | Path, + var_name: str = None, + branch: str | None = "main", + compress: str = "lzw", + tiled: bool = True, + minx: float | None = None, + miny: float | None = None, + maxx: float | None = None, + maxy: float | None = None, + profile_kwargs: dict[Any, Any] = None, + ) -> None: + """A function to retrieve a raster icechunk dataset and download as a tif. + + Parameters + ---------- + dest : str | Path + Destination file path for tiff + var_name : str, optional + Name of xarray variable to be used for raster data, by default None + branch : str | None, optional + Icechunk repo branch to be opened, by default "main" + compress : str, optional + Specify a compression type for raster, by default "lzw" + tiled : bool, optional + Specify if raster should be tiled or not. Cloud-Optimized Geotiffs (COG) must be tiled, by default True + minx : float | None, optional + Specify a bounding box minimum x. Must have all [minx, miny, maxx, maxy] specified, by default None + miny : float | None, optional + Specify a bounding box minimum y. Must have all [minx, miny, maxx, maxy] specified, by default None + maxx : float | None, optional + Specify a bounding box maximum x. Must have all [minx, miny, maxx, maxy] specified, by default None + maxy : float | None, optional + Specify a bounding box maximum x. Must have all [minx, miny, maxx, maxy] specified, by default None + profile_kwargs : dict[Any, Any], optional + Any additional profile keywords accepted by GDAL geotiff driver + (https://gdal.org/en/stable/drivers/raster/gtiff.html#creation-options), by default None + + + Raises + ------ + AttributeError + If an xarray dataset does not have a "band" attribute in coordinates, the file is not deemed a raster + and will raise error. + """ + ds = self.retrieve_dataset(branch=branch) + + if "band" not in ds.coords.dims: + raise AttributeError("Dataset needs a 'band' coordinate to export geotiff") + + # infer variable name if none provided - MAY HAVE UNEXPECTED RESULTS + if not var_name: + var_name = self._infer_var_name_for_geotiff(list(ds.data_vars.variables)) + + # initialize keywords dict if none + profile_kwargs = {} if not profile_kwargs else profile_kwargs + + # clip to window + if minx and miny and maxx and maxy: + subset = ds.rio.clip_box(minx=minx, miny=miny, maxx=maxx, maxy=maxy) + subset[var_name].rio.to_raster(dest, compress=compress, tiled=tiled, **profile_kwargs) + del subset + print(f"Saved clipped window to {dest}") + + else: + ds[var_name].rio.to_raster(dest, compress=compress, tiled=tiled, **profile_kwargs) + del ds + print(f"Saved dataset to {dest}") + + def _infer_var_name_for_geotiff(self, variable_list: list) -> str: + """Infer a variable name for saving a geotiff from xarray variables + + Picks the first variable that isn't 'spatial_ref'. In zarr, 'spatial_ref' from CRS is moved + from coordinates to variables. We want a variable that is not it. + This arbitarily picks the first variable. + + Parameters + ---------- + variable_list : list + Output of list(ds.data_vars.variables) + + Returns + ------- + str + Variable name to use for geotif generation + """ + if "spatial_ref" in variable_list: + variable_list.remove("spatial_ref") + var_name = variable_list[0] + warnings.warn( + UserWarning, + f"Inferring xarray variable name {var_name} for raster data. This may have unintended consequences." + "Open dataset separately to check variable names to insure correct output.", + stacklevel=2, + ) + return var_name + + @staticmethod + def create_local_virtual_chunk_container(path: str) -> ic.VirtualChunkContainer: + """ + Create a virtual chunk container from a mapping for local files. + + Parameters + ---------- + path : str + The local path to the files which need to be virtualized + + Returns + ------- + ic.VirtualChunkContainer + A definition of a virtual chunk that the icechunk repo + uses to define access to virtualized data. + """ + abs_path = str(Path(path).resolve()) + store_config = ic.local_filesystem_store(abs_path) + return ic.VirtualChunkContainer(f"file://{abs_path}", store_config) + + @staticmethod + def set_up_virtual_chunk_container(bucket: str, region: str) -> ic.VirtualChunkContainer: + """ + Create a virtual chunk container from a mapping + + Given an S3 bucket/region, generate and return a VirtualChunkContainer + so Icechunk can point to virtualized data inside S3 buckets. + + Parameters + ---------- + bucket : str + The S3 bucket the virtual chunk points to. + region : str + The region of the S3 bucket. + + Returns + ------- + ic.VirtualChunkContainer + A definition of a virtual chunk that the icechunk repo + uses to define access to virtualized data. + """ + return ic.VirtualChunkContainer( + name=bucket, url_prefix=f"s3://{bucket}/", store=ic.s3_store(region=region) + ) diff --git a/src/icefabric/builds/map_fim_data.py b/src/icefabric/builds/map_fim_data.py new file mode 100644 index 0000000..e7cb254 --- /dev/null +++ b/src/icefabric/builds/map_fim_data.py @@ -0,0 +1,621 @@ +import collections +import json +import os +import re +import warnings + +import geopandas +import numpy as np +import pandas as pd + +warnings.filterwarnings("ignore") + + +class MapData: + """ + Maps FIM MIP & BLE XS datasets to relevant IDs & categorize by HUC. + + At this time, ensure FIM datasets are saved to local disk. + """ + + def __init__(self, data_dir: str, subfolder_key_prefix: str) -> None: + self.data_dir = data_dir + self.subfolder_key_prefix = subfolder_key_prefix + + # Parent directory of the FIM files. + # Note: All the jsons & geopackages are relevant + # to map the files to IDs. + self.fim_data_dirs: list[str] = [] + + # List of directories associated with each file type of + # the FIM data sample (e.g. geopackage of a given model @ HUC#, json, + # source_models.gpkg, ripple.gpkg) + self.model_gpkg_dirs: list[str] = [] + self.src_models_gpkg_dirs: list[str] = [] + self.rip_gpkg_dirs: list[str] = [] + self.gpkg_dirs: list[str] = [] + self.json_dirs: list[str] = [] + self.xs_df_list: list[geopandas.GeoDataFrame] = [] + + # Variables to be used later + self.model_gpkg_tablenames: list[str] = [] + self.src_models_gpkg_tablenames: list[str] = [] + self.rip_gpkg_tablenames: list[str] = [] + self.gpkg_tablenames: list[str] = [] + self.json_tablenames: list[str] = [] + + self.id2json: dict = collections.defaultdict(dict) + self.model_id2gpkg: dict = collections.defaultdict(dict) + self.us_ref_dict: dict = collections.defaultdict(dict) + self.ds_ref_dict: dict = collections.defaultdict(dict) + self.rip_huc2gpkg: dict = collections.defaultdict(dict) + self.groupbyriver_dict: dict = collections.defaultdict(dict) + self.crs_dict: dict = collections.defaultdict(dict) + self.consolidated_id2xs: geopandas.GeoDataFrame = geopandas.GeoDataFrame() + + # NOTE Commenting out as the manipulated states between these function calls may be needed. Not wise to run all data preprocessing inside the initialization function + # self.read_data_dirs() + # self.cat_data_dirs(self.subfolder_key_prefix) + # self.map_model2huc() + # self.filter_model2huc_map( + # keys_to_drop={"metrics", "low_flow", "high_flow", "eclipsed", "lengths", "coverage"} + # ) + + # # Generate maps of model_id & HUC # to xs (for both us & ds cross-section) + # # to reach ID & "network_to_id" from each model @ HUC's json file + # self.map_modelhuc_xs2ids() + + # # Generate maps of model_id & HUC # to gpkg from each model @ HUC's geopackage + # self.map_model2huc_gpkg() + + # # Generate maps of HUC # to ripple gpkg + # self.map_huc2ripple_gpkg() + + # # Map IDs to each model's cross-section instance + # self.map_model_xs2ids() + + # # [Optional: Per HUC, save each river's set of XS data as geoparquetss & geopackages] + # self.save_xs_data() + + # # Save map of inherited CRS to HUC, model_id, river name + # self.save_crs_map() + + # # Consolidated all HEC RAS models' cross-sections featuring IDs + # self.consolidate_id2xs_dfs() + + # # Save HEC RAS models' cross-sections consolidated by HUC as geoparquets & geopackages + # # TODO: does this need to be called with a `xs_data_type` ? + # self.save_xsbyhuc_data() + + def read_data_dirs(self) -> None: + """ + Extract the list of FIM data sample's directories. + + Args: + None + + Return (list): List of directories associated with each file type of + the FIM data sample. + + """ + for folder, _subfolders, files in os.walk(self.data_dir): + if folder != self.data_dir: + for file in files: + self.fim_data_dirs.append(f"{folder}/{file}") + + return + + def cat_data_dirs(self, subfolder_key_prefix: str) -> None: + """ + Categorize FIM data sample files. + + Args: + subfolder_key_prefix (str): Prefix of the FIM subfolder's data of interest + Options: 'mip' or 'ble' + + Return: None + + """ + # Extract a list of directories corresponding to each set of files + for x in self.fim_data_dirs: + # Covers all HEC-RAS models gpkg featuring 1D model flowlines per HUC (contains reach_id & nwm_to_id + # for network layer & reaches layer. The rating curves layer only has reach_id. The models layer + # contains collection_id & model_id) + if re.search("ripple.gpkg", x): + self.rip_gpkg_dirs.append(x) + t = re.search(f"/{subfolder_key_prefix}(.*)", x) + rip_gpkg_tblname = t.group() # type: ignore[union-attr] + self.rip_gpkg_tablenames.append(rip_gpkg_tblname.lstrip("/").replace("/", "_")) + + # Covers all HEC-RAS models gpkg featuring XS per HUC (contains model_id) + elif ( + not x.endswith("source_models.gpkg") + and not x.endswith(".json") + and not re.search("ripple.gpkg", x) + ): + self.model_gpkg_dirs.append(x) + t = re.search(f"/{subfolder_key_prefix}(.*)", x) + model_gpkg_tblname = t.group() # type: ignore[union-attr] + self.model_gpkg_tablenames.append(model_gpkg_tblname.lstrip("/").replace("/", "_")) + + # Covers all HEC-RAS models gpkg featuring 1D model flowlines per HUC (contains model_id & their HEC-RAS 1D model flowlines) + elif x.endswith("source_models.gpkg"): + self.src_models_gpkg_dirs.append(x) + t = re.search(f"/{subfolder_key_prefix}(.*)", x) + src_models_gpkg_tblname = t.group() # type: ignore[union-attr] + self.src_models_gpkg_tablenames.append(src_models_gpkg_tblname.lstrip("/").replace("/", "_")) + + # Covers all HEC-RAS models + Ripple gpkg per HUC + if x.endswith(".gpkg"): + self.gpkg_dirs.append(x) + t = re.search(f"/{subfolder_key_prefix}(.*)", x) + gpkg_tblname = t.group() # type: ignore[union-attr] + self.gpkg_tablenames.append(gpkg_tblname.lstrip("/").replace("/", "_")) + + # Covers each HEC-RAS models' result of conflating its model w/ the NWM network + elif x.endswith(".json"): + self.json_dirs.append(x) + t = re.search(f"/{subfolder_key_prefix}(.*)", x) + json_tblname = t.group() # type: ignore[union-attr] + self.json_tablenames.append(json_tblname.lstrip("/").replace("/", "_")) + + return + + def drop_nested_keys(self, map_dict: dict, keys_to_drop: dict) -> dict | list: + """ + Drop keys irrelevant for linking each XS to IDs + + Args: + map_dict (dict): Dictionary to filter + + keys_to_drop (dict): List of keys irrelevant for linking each XS to IDs. + + Return: None + + """ + if isinstance(map_dict, dict): + return { + k: self.drop_nested_keys(v, keys_to_drop) + for k, v in map_dict.items() + if k not in keys_to_drop + } + elif isinstance(map_dict, list): + return [self.drop_nested_keys(i, keys_to_drop) for i in map_dict] + else: + return map_dict + + def map_model2huc(self) -> None: + """ + Map each conflation json file to their corresponding model ID & HUC #. + + Args: + None + + Return: None + + """ + for x in self.json_dirs: + # Note: model_ids found in each src source_models.gpkg is featured is declared as + # sub-foldername of where model gpkg file resides + model_id = x.split("/")[-2] + huc_num = x.split("/")[-4].split("_")[1] + self.id2json[model_id][huc_num] = {} + try: + with open(x) as f: + json2dict = json.loads(f.read()) + self.id2json[model_id][huc_num].update(json2dict) + except: + pass + + return + + def filter_model2huc_map(self, keys_to_drop: dict) -> None: + """ + Extract only relevant keys from model2huc map for linking each XS to a feature ID. + + Args: + keys_to_drop (dict): List of keys irrelevant for linking each XS to IDs. + (e.g. {'metrics','low_flow', 'high_flow', 'eclipsed', + 'lengths', 'coverage'}) + + Return: None + + """ + self.id2json = self.drop_nested_keys(self.id2json, keys_to_drop) + + return + + def map_modelhuc_xs2ids(self) -> None: + """ + Parse JSONs & map model_id & HUC # to xs to reach ID & "network_to_id" + + Args: + None + + Return: None + + Note: Per model @ HUC cross-section layer, the attribute of interest is "river_reach_rs" + in order to link the IDs to each individual cross-section & their associated xs_id. + + To map each cross-section of a model @ HUC#, there has to be a shared attribute between a + model @ HUC#'s cross section w/in its XS layer & the details provided within a model @ HUC#'s + conflation json file. + + - Each conflation json file reveals ... + - Per reach, there is a set of cross-sections. + - Within each model's cross-section (XS) layers, there are a set of cross-section + instances - each instance featuring a unique "thalweg" (aka "min_elevation"), + "xs_max_elevation" (aka "max_elevation"), "reach_id" ("reaches"), & "river_station" (aka "xs_id") + + - Each model @ HUC#'s XS layer contains a collection of cross-section instancees. + Thus, each unique cross-section w/in a given model @ HUC#'s XS layer will need to be mapped in + such a way to allow each cross-section to be associated with a feature ID (aka "reach_id" and/or + "network_to_id"). + + - "river_reach_rs" is formatted differently across models' XS layers, however multplie keys + referenced in the conflation jsons can be referenced to obtain the "river_reach_rs" from the jsons + As a result, the mapping of IDs to each model's cross-section instance will be based on the info. + extracted from a model @ HUC#'s conflation.json + + - There can be multiple reach_ids tied to same nwm_to_id (aka "network_to_id). + + """ + # Keys to join values from that makes up the 'river_reach_rs' reflected in each model's XS layer + keys_to_join = ["river", "reach", "xs_id"] + for model_id, huc_dict in self.id2json.items(): + for huc_num, reach_dict in huc_dict.items(): + for reach_id, v_dict in reach_dict["reaches"].items(): + # Joining the attribute because each model's xs layer features three atttrib concat (to be used as reference) + if "us_xs" in v_dict: + usxs_joined_values = " ".join(str(v_dict["us_xs"][key]) for key in keys_to_join) + if "min_elevation" in v_dict["us_xs"]: + us_xs_min_elev = v_dict["us_xs"]["min_elevation"] + if "max_elevation" in v_dict["us_xs"]: + us_xs_max_elev = v_dict["us_xs"]["max_elevation"] + + if "ds_xs" in v_dict: + dsxs_joined_values = " ".join(str(v_dict["ds_xs"][key]) for key in keys_to_join) + if "min_elevation" in v_dict["ds_xs"]: + ds_xs_min_elev = v_dict["ds_xs"]["min_elevation"] + if "max_elevation" in v_dict["ds_xs"]: + ds_xs_max_elev = v_dict["ds_xs"]["max_elevation"] + + if "network_to_id" in v_dict: + nwm2id = v_dict["network_to_id"] + + # Generated maps of model_id & HUC # to xs (for both us & ds cross-section) + # to reach ID & "network_to_id" + self.us_ref_dict[(model_id, huc_num)].update( + {(usxs_joined_values, us_xs_min_elev, us_xs_max_elev): [reach_id, nwm2id]} + ) + self.ds_ref_dict[(model_id, huc_num)].update( + {(dsxs_joined_values, ds_xs_min_elev, ds_xs_max_elev): [reach_id, nwm2id]} + ) + + return + + def map_model2huc_gpkg(self) -> None: + """ + Map model ID & HUC # to each HEC-RAS model's geopackage. + + Args: + None + + Return: None + + Note: model_ids found in each source_models.gpkg is featured in last + sub-foldername of where model gpkg file resides + + """ + # Each HEC-RAS model gpkg per model per HUC + for x in self.model_gpkg_dirs: + model_id = x.split("/")[-2] + huc_num = x.split("/")[-4].split("_")[1] + self.model_id2gpkg[(model_id, huc_num)] = {"XS": None} + self.model_id2gpkg[(model_id, huc_num)] = {"XS concave hull": None} + self.model_id2gpkg[(model_id, huc_num)] = {"River": None} + + try: + self.model_id2gpkg[(model_id, huc_num)].update( + {"XS": geopandas.read_file(x, engine="pyogrio", use_arrow=True, layer="XS")} + ) + + self.model_id2gpkg[(model_id, huc_num)].update( + {"River": geopandas.read_file(x, engine="pyogrio", use_arrow=True, layer="River")} + ) + self.model_id2gpkg[(model_id, huc_num)].update( + { + "XS concave hull": geopandas.read_file( + x, engine="pyogrio", use_arrow=True, layer="XS concave hull" + ) + } + ) + except: + pass + + return + + def map_huc2ripple_gpkg(self) -> None: + """ + Map HUC # to ripple geopackage (features HEC RAS 1D model flowlines). + + Args: + None + + Return: None + + Note: ripple.gpkg features the HEC RAS 1D model flowlines categorized by HUC #. + + """ + for x in self.rip_gpkg_dirs: + huc_num = x.split("/")[-2].split("_")[1] + self.rip_huc2gpkg[huc_num] = {"reaches": None} + self.rip_huc2gpkg[huc_num] = {"rating curves": None} + self.rip_huc2gpkg[huc_num] = {"network": None} + self.rip_huc2gpkg[huc_num] = {"models": None} + self.rip_huc2gpkg[huc_num] = {"metadata": None} + self.rip_huc2gpkg[huc_num] = {"rating_curves_no_map": None} + self.rip_huc2gpkg[huc_num] = {"processing": None} + + try: + self.rip_huc2gpkg[huc_num].update( + {"reaches": geopandas.read_file(x, engine="pyogrio", use_arrow=True, layer="reaches")} + ) + self.rip_huc2gpkg[huc_num].update( + { + "rating_curves": geopandas.read_file( + x, engine="pyogrio", use_arrow=True, layer="rating_curves" + ) + } + ) + self.rip_huc2gpkg[huc_num].update( + {"network": geopandas.read_file(x, engine="pyogrio", use_arrow=True, layer="network")} + ) + self.rip_huc2gpkg[huc_num].update( + {"models": geopandas.read_file(x, engine="pyogrio", use_arrow=True, layer="models")} + ) + self.rip_huc2gpkg[huc_num].update( + {"metadata": geopandas.read_file(x, engine="pyogrio", use_arrow=True, layer="metadata")} + ) + self.rip_huc2gpkg[huc_num].update( + { + "rating_curves_no_map": geopandas.read_file( + x, engine="pyogrio", use_arrow=True, layer="rating_curves_no_map" + ) + } + ) + self.rip_huc2gpkg[huc_num].update( + { + "processing": geopandas.read_file( + x, engine="pyogrio", use_arrow=True, layer="processing" + ) + } + ) + except: + pass + return + + def map_model_xs2ids(self) -> None: + """ + Map each cross-section instance featured in HEC-RAS model's cross-section layer to their corresponding IDs. + + Args: + None + + Return: None + + """ + for (model_id, huc_num), model_gpkg_dict in self.model_id2gpkg.items(): + df = model_gpkg_dict["XS"] + df["huc"] = huc_num + df["model_id"] = model_id + array_of_lists = [[None, None] for _ in range(len(df))] + df["us_ids"] = pd.DataFrame([array_of_lists]).T + df["ds_ids"] = pd.DataFrame([array_of_lists]).T + + # Covers us_xs + if (model_id, huc_num) in self.us_ref_dict: + df["us_ids"] = df.set_index(["river_reach_rs", "thalweg", "xs_max_elevation"]).index.map( + self.us_ref_dict[(model_id, huc_num)].get + ) + else: + print( + f"The model_id @ HUC# ({(model_id, huc_num)}) IS NOT featured in current model @ HUC's conflation json file." + ) + continue + + # Covers ds_xs + if (model_id, huc_num) in self.ds_ref_dict: + df["ds_ids"] = df.set_index(["river_reach_rs", "thalweg", "xs_max_elevation"]).index.map( + self.ds_ref_dict[(model_id, huc_num)].get + ) + else: + print( + f"The model_id @ HUC# ({(model_id, huc_num)}) IS NOT featured in current model @ HUC's conflation json file." + ) + continue + + # Extracts & appends reach_id & network_to_id to each model @ HUC's unique XS + # Should the ids not be available in the conflation, must initialize columns + us_id_df = df["us_ids"].apply(pd.Series) + if us_id_df.shape[1] == 0: + us_id_df = pd.DataFrame(np.nan, index=range(us_id_df.shape[0]), columns=[0, 1]) + us_id_df.columns = ["us_reach_id", "us_network_to_id"] + + # Should the ids not be available in the conflation, must initialize columns + ds_id_df = df["ds_ids"].apply(pd.Series) + if ds_id_df.shape[1] == 0: + ds_id_df = pd.DataFrame(np.nan, index=range(ds_id_df.shape[0]), columns=[0, 1]) + ds_id_df.columns = ["ds_reach_id", "ds_network_to_id"] + + # Fill any nan to string + us_id_df[["us_reach_id", "us_network_to_id"]] = us_id_df[ + ["us_reach_id", "us_network_to_id"] + ].fillna("None") + ds_id_df[["ds_reach_id", "ds_network_to_id"]] = ds_id_df[ + ["ds_reach_id", "ds_network_to_id"] + ].fillna("None") + df = df.fillna("None") + df = pd.concat([df, us_id_df, ds_id_df], axis=1) + df = df.drop(["us_ids", "ds_ids"], axis=1) + + model_gpkg_dict["XS"] = df + self.xs_df_list.append(model_gpkg_dict["XS"]) + + return + + def save_xs_data(self) -> None: + """ + Consolidate HEC-RAS models cross-sections based on HUC & river & save to storage + + Args: + None + + Return: None + + Note: These saved parquet files will preserve each river @ HUC's inherited CRS. + + """ + for (model_id, huc_num), _model_gpkg_dict in self.model_id2gpkg.items(): + # Generate data folder per HUC + if not os.path.exists(f"{os.getcwd()}/xs_data/huc_{huc_num}"): + os.makedirs(f"{os.getcwd()}/xs_data/huc_{huc_num}") + + # Save each river's geopandas as a geoparquet & geopackage under each HUC folder + grouped_xslayers = self.model_id2gpkg[(model_id, huc_num)]["XS"].groupby(["river"]) + for river_name in set(self.model_id2gpkg[(model_id, huc_num)]["XS"]["river"]): + filterbyriver = grouped_xslayers.get_group(river_name) + + # Generate map of each river's set of XS to HUC & model ID to be used as a + # look-up reference + self.groupbyriver_dict[huc_num].update({model_id: filterbyriver}) + + # Save XS as geoparquet per river per HUC + filterbyriver.to_parquet( + f"{os.getcwd()}/xs_data/huc_{huc_num}/{river_name}.parquet", engine="pyarrow" + ) + + # Save XS as geopackage per river per HUC + filterbyriver.to_file(f"{os.getcwd()}/xs_data/huc_{huc_num}/{river_name}.gpkg", driver="GPKG") + + return + + def save_crs_map(self, output_dir=None) -> None: + """ + Consolidate HEC-RAS models cross-sections based on HUC & river & save to storage + + Args: + None + + Return: None + + Note: This saved pickle file will map each river @ HUC's inherited CRS for one to + analyze & reference. + + """ + if output_dir is None: + output_dir = os.getcwd() + for (model_id, huc_num), _model_gpkg_dict in self.model_id2gpkg.items(): + # Generate data folder per HUC + if not os.path.exists(f"{os.getcwd()}/xs_data/crs_map"): + os.makedirs(f"{os.getcwd()}/xs_data/crs_map") + + # Generate map of the CRS to each river's geopandas per HUC + grouped_xslayers = self.model_id2gpkg[(model_id, huc_num)]["XS"].groupby(["river"]) + for river_name in set(self.model_id2gpkg[(model_id, huc_num)]["XS"]["river"]): + filterbyriver = grouped_xslayers.get_group(river_name) + self.crs_dict[(huc_num, model_id)].update( + { + river_name: ( + f"""ESPG: { + str(filterbyriver.crs.to_epsg()) if filterbyriver.crs.to_epsg() else None + }, {filterbyriver.crs.name},""" + ) + } + ) + + # Save map of inherited CRS to HUC, model_id, river name as JSON + with open(f"{output_dir}/xs_data/crs_map/crs_mapping.json", "w") as handle: + json.dump(self.crs_dict, handle, indent=2, ensure_ascii=False) + + return + + def consolidate_id2xs_dfs(self) -> None: + """ + Consolidate HEC-RAS models cross-sections featuring their corresponding IDs. + + Args: + None + + Return: None + + Note: A set CRS standard is needed in order to consolidates all XS layers of all models + under a single dataframe to maintain a consistent CRS. This consolidation will check if the XS + dataframes have a consistent CRS if it does not then it will not consolidate the XS dataframes. + As of 05/14/25, only CRS in this condition is considering the + NAD83, but additional CRS can be added to this methods as new findings are made. + + """ + crs_list = [] + for xs_df in self.xs_df_list: + if "NAD83" in xs_df.crs.name or "NAD_1983" in xs_df.crs.name: + # Convert all GeoDataFrames in the list to the target CRS + crs_list.append("EPSG:5070") + else: + print(False) + + # Will consolidate ONLY if the CRS is consistent across XS geodpandas dataframes + if len(set(crs_list)) == 1: + target_crs = str(np.unique(crs_list)[0]) + self.consolidated_id2xs = geopandas.GeoDataFrame( + pd.concat([xs_df.to_crs(target_crs) for xs_df in self.xs_df_list], ignore_index=True) + ) + print( + f"The consolidated XS geopandas dataframes now has a standardized CRS of:\n{self.consolidated_id2xs.crs.name}" + ) + else: + print( + "Cannot consolidate XS geodpandas dataframes because the CRS is inconsistent across XS geodpandas dataframes." + ) + + return + + def save_xsbyhuc_data(self, output_dir=None) -> None: + """ + Consolidate HEC-RAS models cross-sections based on HUC & save to storage + + Args: + output_dir (str, optional): Directory to save output files. Defaults to current working directory. + + Return: None + + Note: These saved parquet files will be the transformed CRS of all XS per HUC to ensure + a consistent standardized CRS. + + """ + if output_dir is None: + output_dir = os.getcwd() + + unique_huc_nums = set(self.consolidated_id2xs["huc"]) + for huc_num in unique_huc_nums: + # Generate data folder per HUC using the subfolder_key_prefix + output_folder = f"{output_dir}/xs_data/{self.subfolder_key_prefix}_{huc_num}" + if not os.path.exists(output_folder): + os.makedirs(output_folder) + + # Filter consolidated XS geopanda dataframe by HUC + filterbyhuc = self.consolidated_id2xs[self.consolidated_id2xs["huc"] == huc_num] + + # Save XS as geoparquet per HUC + filterbyhuc["thalweg"] = filterbyhuc["thalweg"].astype(str) + filterbyhuc["xs_max_elevation"] = filterbyhuc["xs_max_elevation"].astype(str) + + # Define file paths using the subfolder_key_prefix + parquet_path = f"{output_folder}/huc_{huc_num}.parquet" + gpkg_path = f"{output_folder}/huc_{huc_num}.gpkg" + + # Save files + filterbyhuc.to_parquet(parquet_path, engine="pyarrow") + filterbyhuc.to_file(gpkg_path, driver="GPKG") + + print(f"Saved: {self.subfolder_key_prefix}_{huc_num}") diff --git a/src/icefabric/cli/__init__.py b/src/icefabric/cli/__init__.py new file mode 100644 index 0000000..f3097c6 --- /dev/null +++ b/src/icefabric/cli/__init__.py @@ -0,0 +1,6 @@ +from pyiceberg.catalog import Catalog, load_catalog + + +def get_catalog(_catalog: str = "glue") -> Catalog: + """Gets the pyiceberg catalog reference""" + return load_catalog(_catalog) diff --git a/src/icefabric/cli/hydrofabric.py b/src/icefabric/cli/hydrofabric.py new file mode 100644 index 0000000..567452e --- /dev/null +++ b/src/icefabric/cli/hydrofabric.py @@ -0,0 +1,103 @@ +"""Contains all click CLI code for the hydrofabric""" + +from pathlib import Path + +import click +import geopandas as gpd +from pyprojroot import here + +from icefabric.builds.graph_connectivity import load_upstream_json +from icefabric.cli import get_catalog +from icefabric.helpers import load_creds +from icefabric.hydrofabric.subset import subset_hydrofabric, subset_hydrofabric_vpu +from icefabric.schemas.hydrofabric import HydrofabricDomains, IdType + +load_creds() + + +@click.command() +@click.option( + "--catalog", + type=click.Choice(["glue", "sql"], case_sensitive=False), + default="glue", + help="The pyiceberg catalog type", +) +@click.option( + "--identifier", + type=str, + required=True, + help="The specific ID you are querying the system from", +) +@click.option( + "--id-type", + type=click.Choice([e.value for e in IdType], case_sensitive=False), + required=True, + help="The ID type you are querying", +) +@click.option( + "--domain", + type=click.Choice([e.value for e in HydrofabricDomains], case_sensitive=False), + required=True, + help="The domain you are querying", +) +@click.option( + "--layers", + multiple=True, + default=["divides", "flowpaths", "network", "nexus"], + help="The layers to include in the geopackage. Will always include ['divides', 'flowpaths', 'network', 'nexus']", +) +@click.option( + "--output-file", + "-o", + type=click.Path(path_type=Path), + default=Path.cwd() / "subset.gpkg", + help="Output file. Defaults to ${CWD}/subset.gpkg", +) +def subset( + catalog: str, + identifier: str, + id_type: str, + domain: str, + layers: tuple[str], + output_file: Path, +): + """Subsets the hydrofabric based on a unique identifier""" + id_type_enum = IdType(id_type) + _catalog = get_catalog(catalog) + + connectivity_graphs = load_upstream_json( + catalog=_catalog, + namespaces=[domain], + output_path=here() / "data", + ) + + layers_list = list(layers) if layers else ["divides", "flowpaths", "network", "nexus"] + + if id_type_enum == IdType.VPU_ID: + output_layers = subset_hydrofabric_vpu( + catalog=_catalog, + layers=layers_list, + namespace=domain, + vpu_id=identifier, + ) + + else: + output_layers = subset_hydrofabric( + catalog=_catalog, + identifier=identifier, + id_type=id_type_enum, + layers=layers_list, + namespace=domain, + graph=connectivity_graphs[domain], + ) + + output_file.parent.mkdir(parents=True, exist_ok=True) + + if output_file: + for table_name, _layer in output_layers.items(): + if len(_layer) > 0: # Only save non-empty layers + gpd.GeoDataFrame(_layer).to_file(output_file, layer=table_name, driver="GPKG") + else: + print(f"Warning: {table_name} layer is empty") + + click.echo(f"Hydrofabric file created successfully in the following folder: {output_file}") diff --git a/src/icefabric/cli/main.py b/src/icefabric/cli/main.py new file mode 100644 index 0000000..2eae897 --- /dev/null +++ b/src/icefabric/cli/main.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +"""Main CLI entry point for icefabric""" + +import click + +from icefabric import __version__ +from icefabric.cli.hydrofabric import subset +from icefabric.cli.modules import params +from icefabric.cli.streamflow import streamflow_observations + + +def get_version(): + """Get the version of the icefabric package.""" + return __version__ + + +@click.group() +@click.version_option(version=get_version()) +@click.pass_context +def cli(ctx): + """ + Ice fabric tools and utilities. + + A comprehensive toolkit for working with ice fabric data, + hydrofabric processing, and related geospatial operations. + """ + ctx.ensure_object(dict) + + +# Add subcommands +cli.add_command(params) +cli.add_command(subset) +cli.add_command(streamflow_observations) + +# Main entry point for when run as a script +if __name__ == "__main__": + cli() diff --git a/src/icefabric/cli/modules.py b/src/icefabric/cli/modules.py new file mode 100644 index 0000000..d3aea7b --- /dev/null +++ b/src/icefabric/cli/modules.py @@ -0,0 +1,173 @@ +"""Contains all click CLI code for NWM modules""" + +from pathlib import Path + +import click +from dotenv import load_dotenv +from pyprojroot import here + +from icefabric._version import __version__ +from icefabric.builds.graph_connectivity import load_upstream_json +from icefabric.cli import get_catalog +from icefabric.helpers.io import _create_config_zip +from icefabric.modules import NWMModules, SmpModules, config_mapper, modules_with_extra_args +from icefabric.schemas.hydrofabric import HydrofabricDomains +from icefabric.schemas.modules import IceFractionScheme + +load_dotenv() + + +def validate_options(ctx, param, value): + """Validates options are only used with their respective modules""" + if value is not None: + module_choice = ctx.params.get("nwm_module") + try: + if param.name not in modules_with_extra_args[module_choice]: + raise click.BadParameter( + f"'{param.opts[0]}' is inappropriate for the '{module_choice}' module." + ) + except KeyError as err: + raise KeyError( + f"The '{module_choice}' module can't be used with non-standard (gage id, domain, etc.) arguments." + ) from err + return value + + +@click.command() +@click.option( + "--gauge", + type=str, + help="The Gauge ID to subset the Hydrofabric from and get upstream catchment information", +) +@click.option( + "--nwm-module", + "nwm_module", + type=click.Choice([module.value for module in NWMModules], case_sensitive=False), + help="The module to create initial parameter config files for", +) +@click.option( + "--domain", + type=click.Choice([e.value for e in HydrofabricDomains], case_sensitive=False), + required=True, + help="The domain you are querying", +) +@click.option( + "--catalog", + type=click.Choice(["glue", "sql"], case_sensitive=False), + default="glue", + help="The pyiceberg catalog type", +) +@click.option( + "--ice-fraction", + "use_schaake", + type=click.Choice(IceFractionScheme, case_sensitive=False), + default=None, + help="(SFT only) - The ice fraction scheme used. Defaults to False to use Xinanjiang", + callback=validate_options, +) +@click.option( + "--envca", + type=bool, + default=None, + help="(Snow-17/SAC-SMA only) - If source is ENVCA, then set to True. Defaults to False.", + callback=validate_options, +) +@click.option( + "--smp-extra-module", + "extra_module", + type=click.Choice(SmpModules, case_sensitive=False), + default=None, + help="(SMP only) - Name of another module to be used alongisde SMP to fill out additional parameters.", + callback=validate_options, +) +@click.option( + "--cfe-version", + "cfe_version", + type=click.Choice(["CFE-X", "CFE-S"], case_sensitive=False), + default=None, + help="the CFE module type (e.g. CFE-X, CFE-S) for which determines whether to use Shaake or Xinanjiang for surface partitioning.", + callback=validate_options, +) +@click.option( + "--sft-included", + type=bool, + default=None, + help='(LASAM only) - Denotes that SFT is in the "dep_modules_included" definition as declared in the HF API repo', + callback=validate_options, +) +@click.option( + "--soil-params-file", + type=str, + default=None, + help="(LASAM only) - Name of the Van Genuchton soil parameters file", + callback=validate_options, +) +@click.option( + "--output", + "-o", + type=click.Path(path_type=Path), + default=Path.cwd(), + help="Output path for the zip file. Defaults to current directory", +) +def params( + gauge: str, + nwm_module: str, + domain: str, + catalog: str, + use_schaake: IceFractionScheme, + envca: bool, + extra_module: SmpModules, + sft_included: bool, + cfe_version: str, + soil_params_file: str, + output: Path, +): + """Returns a zip file containing all config files requested by a specific module""" + # TODO - Remove the below if statement once topoflow has IPE/BMI generation + if nwm_module == "topoflow": + raise NotImplementedError("Topoflow not implemented yet") + + ice_fraction_enum = ( + IceFractionScheme[use_schaake.upper()] if use_schaake else IceFractionScheme.XINANJIANG + ) # Defaults to Xinanjiang + use_schaake = True if use_schaake == IceFractionScheme.SCHAAKE else False + _catalog = get_catalog(catalog) + graph = load_upstream_json( + catalog=_catalog, + namespaces=[domain], + output_path=here() / "data", + )[domain] + + ipe_kwargs = {} + ipe_kwargs["catalog"] = _catalog + ipe_kwargs["namespace"] = domain + ipe_kwargs["identifier"] = f"gages-{gauge}" + ipe_kwargs["graph"] = graph + + if nwm_module in modules_with_extra_args: + for extra_arg in modules_with_extra_args[nwm_module]: + if locals()[extra_arg] is not None: + ipe_kwargs[extra_arg] = locals()[extra_arg] + + get_param_func = config_mapper[nwm_module] + configs = get_param_func(**ipe_kwargs) + + output.parent.mkdir(parents=True, exist_ok=True) + + zip_metadata_kwargs = { + "gauge_id": gauge, + "domain": domain, + "version": __version__, + "module": nwm_module, + "catalog_type": catalog, + } + if nwm_module == "sft": + zip_metadata_kwargs["ice_fraction"] = ice_fraction_enum.value + + _create_config_zip( + configs=configs, + output_path=output, + kwargs=zip_metadata_kwargs, + ) + + click.echo(f"Config files created successfully in the following folder: {output}") diff --git a/src/icefabric/cli/streamflow.py b/src/icefabric/cli/streamflow.py new file mode 100644 index 0000000..af33376 --- /dev/null +++ b/src/icefabric/cli/streamflow.py @@ -0,0 +1,114 @@ +"""Contains all click CLI code for NWM modules""" + +from pathlib import Path + +import click +import icechunk +import numpy as np +import polars as pl +import xarray as xr +from dotenv import load_dotenv + +from icefabric.schemas.hydrofabric import StreamflowDataSources + +load_dotenv() + +BUCKET = "edfs-data" +PREFIX = "streamflow_observations" +TIME_FORMATS = [ + "%Y", + "%Y-%m", + "%Y-%m-%d", + "%Y-%m-%d %H", + "%Y-%m-%d %H:%M", + "%Y-%m-%d %H:%M:%S", + "%Y-%m-%dT%H:%M:%S.%f", +] +MIN_DT = str(np.datetime64("1678-01-01T00:00:00.000000", "ms")) +MAX_DT = str(np.datetime64("2262-04-11T23:47:16.854775", "ms")) + + +def validate_file_extension(ctx, param, value): + """Validates that the output path is a CSV file""" + if value and not value.suffix == ".csv": + raise click.BadParameter("Output file path must have a CSV ('.csv') extension.") + return value + + +@click.command() +@click.option( + "--data-source", + "-d", + type=click.Choice([module.value for module in StreamflowDataSources], case_sensitive=False), + help="The data source for the USGS gage id", +) +@click.option( + "--gage-id", + "-g", + type=str, + help="The Gauge ID you want the hourly streamflow data from", +) +@click.option( + "--start-date", + "-s", + type=click.DateTime(formats=TIME_FORMATS), + default=MIN_DT, + help="Start of the hourly streamflow timestamp range to be included. If not specified, time range defaults to having no minimum.", +) +@click.option( + "--end-date", + "-e", + type=click.DateTime(formats=TIME_FORMATS), + default=MAX_DT, + help="End of the hourly streamflow timestamp range to be included. If not specified, time range defaults to having no maximum.", +) +@click.option( + "--output-file", + "-o", + type=click.Path(path_type=Path, dir_okay=False), + default=None, + help="Output CSV file. Must be a CSV file extension. Defaults to ${CWD}/${gage-id}.csv", + callback=validate_file_extension, +) +def streamflow_observations( + data_source: str, gage_id: str, start_date: str, end_date: str, output_file: Path +): + """Generates a CSV file containing the hourly streamflow data for a specific gage ID""" + if not output_file: + output_file = Path.cwd() / f"{gage_id}.csv" + ic_store_type = "usgs" if data_source == "USGS" else "envca_cadwr_txdot" + + # Get the data from the icechunk store + storage_config = icechunk.s3_storage( + bucket=BUCKET, prefix=f"{PREFIX}/{ic_store_type}_observations", region="us-east-1", from_env=True + ) + repo = icechunk.Repository.open(storage_config) + session = repo.writable_session("main") + ds = xr.open_zarr(session.store, consolidated=False) + + # Slice the dataset to greatly reduce dataframe conversion time + ds = ds.sel(time=slice(start_date, end_date), id=gage_id) + + # Convert xarray dataset to pandas dataframe for greater control in querying the data + df = ds.to_dataframe().reset_index() + pl_df = pl.from_pandas(df) + + # Filter the dataframe based on gage ID and start/end time. + # Don't include any entries with null q_cms entries + pl_df = pl_df.filter(pl.col("gage_type") == data_source) + pl_df = pl_df.filter(pl.col("id") == gage_id) + pl_df = pl_df.filter((pl.col("time") >= start_date) & (pl.col("time") <= end_date)) + pl_df = pl_df.drop_nulls(subset=["q_cms"]) + + # Check if the data has any entries in the 'q_cms_denoted_3' column (if so, include in CSV) + has_q_cms_denoted_3_vals = (~pl_df["q_cms_denoted_3"].is_null()).any() + + # Pare down the number of CSV columns for output + if has_q_cms_denoted_3_vals: + pl_df_reordered = pl_df.select(["time", "q_cms", "q_cms_denoted_3"]) + else: + pl_df_reordered = pl_df.select(["time", "q_cms"]) + + # Write finalized CSV file + pl_df_reordered.write_csv(output_file, datetime_format="%Y-%m-%d %H:%M:%S") + click.echo(f"CSV file created successfully (output path: {output_file})!") diff --git a/src/icefabric/helpers/__init__.py b/src/icefabric/helpers/__init__.py new file mode 100644 index 0000000..0aacd21 --- /dev/null +++ b/src/icefabric/helpers/__init__.py @@ -0,0 +1,42 @@ +"""Helper functions designed to assist with managing data. Similar to util functions""" + +from .arch_weather_file_utils import ( + add_time_dim_to_datasets, + extract_dates_from_archival_files, + get_archival_weather_files, + load_tiff_file, + virtualize_and_concat_archival_files_on_time, +) +from .creds import load_creds +from .geopackage import table_to_geopandas, to_geopandas +from .io import load_pyiceberg_config +from .nc_conv_utils import conv_nc, convert_files_to_netcdf4, convert_nc_files_from_s3 +from .rise import ( + EXT_RISE_BASE_URL, + RISE_HEADERS, + basemodel_to_query_string, + make_get_req_to_rise, + make_sync_get_req_to_rise, +) +from .topobathy_ic_to_tif import convert_topobathy_to_tiff + +__all__ = [ + "get_archival_weather_files", + "load_tiff_file", + "virtualize_and_concat_archival_files_on_time", + "extract_dates_from_archival_files", + "add_time_dim_to_datasets", + "load_creds", + "table_to_geopandas", + "to_geopandas", + "convert_files_to_netcdf4", + "convert_nc_files_from_s3", + "conv_nc", + "convert_topobathy_to_tiff", + "basemodel_to_query_string", + "make_get_req_to_rise", + "make_sync_get_req_to_rise", + "EXT_RISE_BASE_URL", + "RISE_HEADERS", + "load_pyiceberg_config", +] diff --git a/src/icefabric/helpers/arch_weather_file_utils.py b/src/icefabric/helpers/arch_weather_file_utils.py new file mode 100644 index 0000000..49d948f --- /dev/null +++ b/src/icefabric/helpers/arch_weather_file_utils.py @@ -0,0 +1,282 @@ +""" +Moduile interacting with archival weather files + +Utilities for loading/supplying/interacting/virtualizing +archival weather files, mainly to interface with icechunk +""" + +import os +from pathlib import Path + +import fsspec +import pandas as pd +import rioxarray as rxr +import xarray as xr +from tqdm import tqdm +from virtualizarr import open_virtual_dataset + +from icefabric.builds.icechunk_s3_module import S3Path +from icefabric.schemas.topobathy import FileType + + +def load_tiff_file(fp: str, attr_name: str) -> xr.Dataset | xr.DataArray | list[xr.Dataset]: + """ + Loads a GEOTIFF + + Takes a local filepath to a TIFF and loads it into an Xarray + Dataset object. Could also return a Dataarray or list of + Datasets. + + Parameters + ---------- + fp : str + File path to the TIFF that will be returned as a dataset. + attr_name : str + Name of the attribute of interest. Ex: "elevation". + Note: Not all rasters will be elevation in future. + + Returns + ------- + xr.Dataset | xr.DataArray | list[xr.Dataset] + The Xarray representation of the TIFF file. + + Raises + ------ + FileNotFoundError + """ + if os.path.exists(fp) is False: + raise FileNotFoundError(f"Cannot find: {fp}") + ds = rxr.open_rasterio(fp) + ds = ds.to_dataset(name=attr_name) + return ds + + +def get_archival_weather_files( + loc: str | S3Path, file_type: FileType | None = None, manual_file_pattern: str | None = None +) -> list[str]: + """ + Collect files from a directory + + Given a directory in either local or S3 storage, return all files of a specific type or + matching a file pattern. + + Parameters + ---------- + loc : str | S3Path + Directory housing the files - could be a string for a local path, or an S3Path object + for files on an S3 bucket + file_type : FileType | None, optional + The file type to be collected - NETCDF, TIF, etc. Must be given if no manual pattern is + supplied. By default None + manual_file_pattern : str | None, optional + If supplied, will collect filepaths according to the file pattern instead of by + filetype. Should be parsable by an fsspec.glob() call. By default None + + Returns + ------- + list[str] + A list of all retrieved filepaths associated with the directory, file type and/or + manual file pattern. + + Raises + ------ + ValueError + """ + sort_prefix = "" + if type(loc) is S3Path: + sort_prefix = "s3://" + fs = fsspec.filesystem("s3") + else: + loc = os.path.abspath(loc) + fs = fsspec.implementations.local.LocalFileSystem() + + if manual_file_pattern: + print(manual_file_pattern) + files = fs.glob(f"{str(loc)}/{manual_file_pattern}") + else: + if not file_type: + raise ValueError("Must supply a file_type argument if no manual_file_pattern is given") + files = fs.glob(f"{str(loc)}/*{file_type.value}") + files = sorted([f"{sort_prefix}{f}" for f in files]) + return files + + +def extract_dates_from_archival_files( + file_paths: list[str] | list[Path], file_pattern: str, just_year: bool | None = False +) -> list[pd.DatetimeIndex] | list[int]: + """ + Pull dates out of list of file names + + Extracts and returns a sorted list of datetimes corresponding to the provided + list of filepaths. + + Parameters + ---------- + file_paths: list[str] + List of filepaths. File names should correspond to a datetime, and + should contain the datetime embedded in the filename. + file_pattern: str + Matching pattern used to extract the datetime. Format should match the files, + with an asterisk replacing the datetime section of the filename. + just_year: bool | None, optional + If supplied, will only extract year values in int form. + + Returns + ------- + list[DatetimeIndex] | list[int] + Sorted list of the DatetimeIndexes extracted from the filenames. Same + length and ordering as the list of filepaths. Could also just be years + as ints. + """ + pre, post = file_pattern.split("*")[0:2] + files = [fp.split("/")[-1] for fp in file_paths] + dates = [f.replace(pre, "").replace(post, "") for f in files] + if just_year: + date_dims = [pd.date_range(d, periods=1).year for d in sorted(dates)] + else: + date_dims = [pd.date_range(d, periods=1) for d in sorted(dates)] + return date_dims + + +def _virtualize_datasets( + file_list: list[str], loadable_vars: list[str] | None = None, testing_file_quantity: int | None = None +) -> list[xr.Dataset]: + """ + Virtualize archival weather files + + Takes a list of archival weather filepaths and converts each to a virtual dataset. + NOTE: May take a very long time to process if the filelist is long or large. + + Parameters + ---------- + file_list : list[str] + List of archival weather filepaths, each of which will be converted to a virtual dataset. + loadable_vars : list[str] | None, optional + List of variables to open as lazy numpy/dask arrays instead of instances of + ManifestArray. By default None. + testing_file_quantity : int | None, optional + Include if you want to test the virtualization with a subset of files. + Only opens the number specified, starting with the first in the list. By default None + + Returns + ------- + list[xr.Dataset] + List of the virtualized datasets derived from the provided archival weather files. + """ + if testing_file_quantity: + file_list = file_list[:testing_file_quantity] + v_datasets = [] + for i in tqdm( + range(len(file_list)), + desc="Opening files as virtual datasets.", + unit="files", + ncols=125, + colour="#37B6BD", + ): + v_datasets.append( + open_virtual_dataset(filepath=file_list[i], indexes={}, loadable_variables=loadable_vars) + ) + return v_datasets + + +def add_time_dim_to_datasets( + timeless_datasets: list[xr.Dataset], + datetimes: list[pd.DatetimeIndex], + just_year: bool | None = False, + testing_file_quantity: int | None = None, +) -> list[xr.Dataset]: + """ + Add time dimension to a collection of data + + Expands each entry in a list of virtualized datasets with a single time dimension + + Parameters + ---------- + timeless_datasets : list[xr.Dataset] + List of virtualized datasets missing time dimensions. + datetimes : list[pd.DatetimeIndex] + List of the DatetimeIndexes that will be added onto ```timeless_datasets```. + Should be the same length and ordering. + just_year: bool | None, optional + Include if your datetimes list is only years in int format. Will add a 'year' dimension + instead. By default False. + testing_file_quantity : int | None, optional + Include if you want to only add the time dimension to a subset of the virtualized datasets. + Only opens the number specified, starting with the first in the list. + NOTE: Make sure the length of ```timeless_datasets``` and ```datetimes``` are equal after + accounting for this. By default None. + + Returns + ------- + list[xr.Dataset] + List of the virtualized datasets with newly added time dimensions. + """ + if testing_file_quantity: + datetimes = datetimes[:testing_file_quantity] + v_datasets = [ + d.expand_dims(year=t) if just_year else d.expand_dims(time=t) + for d, t in zip(timeless_datasets, datetimes, strict=False) + ] + return v_datasets + + +def virtualize_and_concat_archival_files_on_time( + location: str | S3Path, + file_date_pattern: str, + file_type: FileType | None = None, + manual_file_pattern: str | None = None, + just_year: bool | None = False, + loadable_vars: list[str] | None = None, + testing_file_quantity: int | None = None, +) -> xr.Dataset: + """ + Virtualize a collection of weather files and combine them + + Per a given local file directory or S3 bucket directory, collect every archival + weather file (with time data only in the filename) and virtualize and concatenate + the set on a time dimension. Produces a single virtualized dataset. + + Parameters + ---------- + lcoation : str | S3Path + Directory housing the files - could be a string for a local path, or an S3Path object + for files on an S3 bucket + file_date_pattern : str + Matching pattern used to extract the datetime. Format should match the files, + with an asterisk replacing the datetime section of the filename. + file_type: FileType | None, optional + The file type to be collected - NETCDF, TIF, etc. Must be given if no manual pattern is + supplied. By default None + manual_file_pattern : str | None, optional + If supplied, will collect filepaths according to the file pattern instead of by + filetype. Should be parsable by an fsspec.glob() call. By default None. + just_year: bool | None, optional + Include if your filenames only contain years. Will add a 'year' dimension + instead when virtualizing. By default False. + loadable_vars : list[str] | None, optional + List of dataset variables to open as lazy numpy/dask arrays when virtualizing, + instead of instances of ManifestArray. Leads to data duplication, but is necessary + in some cases. By default None. + testing_file_quantity : int | None, optional + Include if you want to test the virtualization with a subset files. + Only opens the number specified, starting with the first. Useful for virtualizing + smaller groups of files when testing or debugging. By default None. + + Returns + ------- + xr.Dataset + The fully time-concatenated, virtualized dataset. + """ + arch_files = get_archival_weather_files( + loc=location, file_type=file_type, manual_file_pattern=manual_file_pattern + ) + datetimes = extract_dates_from_archival_files(arch_files, file_date_pattern, just_year=just_year) + timeless_datasets = _virtualize_datasets(arch_files, loadable_vars, testing_file_quantity) + time_added_datasets = add_time_dim_to_datasets( + timeless_datasets, datetimes, just_year, testing_file_quantity + ) + concat_dim = "year" if just_year else "time" + final_dataset = xr.concat( + time_added_datasets, dim=concat_dim, coords="minimal", compat="override", combine_attrs="override" + ) + return final_dataset diff --git a/src/icefabric/helpers/creds.py b/src/icefabric/helpers/creds.py new file mode 100644 index 0000000..883ffe0 --- /dev/null +++ b/src/icefabric/helpers/creds.py @@ -0,0 +1,29 @@ +"""A file for cred helpers""" + +import os + +from dotenv import load_dotenv +from pyprojroot import here + + +def load_creds(): + """Loads the .env and .pyiceberg.yaml files from the project root + + Parameters + ---------- + dir : Path + The directory where the creds exist + + Raises + ------ + FileNotFoundError + The .pyiceberg.yaml file does not exist + """ + load_dotenv(dotenv_path=here() / ".env") + pyiceberg_file = here() / ".pyiceberg.yaml" + if pyiceberg_file.exists(): + os.environ["PYICEBERG_HOME"] = str(pyiceberg_file) + else: + raise FileNotFoundError( + "Cannot find .pyiceberg.yaml. Please download this from NGWPC confluence or create " + ) diff --git a/src/icefabric/helpers/geopackage.py b/src/icefabric/helpers/geopackage.py new file mode 100644 index 0000000..c9e2585 --- /dev/null +++ b/src/icefabric/helpers/geopackage.py @@ -0,0 +1,70 @@ +"""Contains all api functions that can be called outside of the icefabric_tools package""" + +import geopandas as gpd +import pandas as pd +from pyiceberg.expressions import BooleanExpression +from pyiceberg.table import ALWAYS_TRUE, Table + + +def table_to_geopandas( + table: Table, + row_filter: str | BooleanExpression = ALWAYS_TRUE, + case_sensitive: bool | None = True, + snapshot_id: int | None = None, + limit: int | None = None, +) -> gpd.GeoDataFrame: + """Converts a table to a geopandas dataframe + + Parameters + ---------- + table : Table + The iceberg table you are trying to read from + row_filter : str | None, optional + A string or BooleanExpression that describes the desired rows, by default "" + case_sensitive : bool | None, optional + If True column matching is case sensitive, by default True + snapshot_id : int | None, optional + Optional Snapshot ID to time travel to. + If None, scans the table as of the current snapshot ID, by default None + limit : int | None, optional + An integer representing the number of rows to return in the scan result. + If None, fetches all matching rows., by default None + + Returns + ------- + gpd.DataFrame + The resulting queried row, but in a geodataframe + """ + df = table.scan( + row_filter=row_filter, + case_sensitive=case_sensitive, + snapshot_id=snapshot_id, + limit=limit, + ).to_pandas() + return to_geopandas(df) + + +def to_geopandas(df: pd.DataFrame, crs: str = "EPSG:5070") -> gpd.GeoDataFrame: + """Converts the geometries in a pandas df to a geopandas dataframe + + Parameters + ---------- + df: pd.DataFrame + The iceberg table you are trying to read from + crs: str, optional + A string representing the CRS to set in the gdf, by default "EPSG:5070" + + Returns + ------- + gpd.DataFrame + The resulting queried row, but in a geodataframe + + Raises + ------ + ValueError + Raised if the table does not have a geometry column + """ + if "geometry" not in df.columns: + raise ValueError("The provided table does not have a geometry column.") + + return gpd.GeoDataFrame(df, geometry=gpd.GeoSeries.from_wkb(df["geometry"]), crs=crs) diff --git a/src/icefabric/helpers/io.py b/src/icefabric/helpers/io.py new file mode 100644 index 0000000..60a527a --- /dev/null +++ b/src/icefabric/helpers/io.py @@ -0,0 +1,74 @@ +import json +import tempfile +import zipfile +from pathlib import Path +from typing import Any + +import yaml +from pyprojroot import here +from tqdm import tqdm + +from icefabric.schemas.modules import NWMProtocol + + +def _create_config_zip(configs: list[NWMProtocol], output_path: Path, **kwargs): + """Creates a zip file of BMI configs with a metadata.json file containing query information + + Parameters + ---------- + configs : list[NWMProtocol] + The list of config NWMProtocol models + output_path : Path + The output path location to write zip files + """ + with tempfile.TemporaryDirectory() as temp_dir_str: + temp_dir = Path(temp_dir_str) + config_files = [] + + # Write config files + for config in tqdm(configs, desc="Creating a config file", total=len(configs), ncols=140): + file_path = config.model_dump_config(temp_dir) + config_files.append(file_path) + + # Create metadata file + metadata_path = temp_dir / "metadata.json" + with metadata_path.open("w", encoding="UTF-8") as f: + json.dump(kwargs["kwargs"], f) # Removes the root from the dict + config_files.append(metadata_path) + + output_file = output_path / "configs.zip" + + with zipfile.ZipFile(output_file, "w", zipfile.ZIP_DEFLATED) as f: + for file_path in config_files: + archive_name = file_path.name + f.write(file_path, archive_name) + + +def load_pyiceberg_config() -> dict[str, Any]: + """Reads a .pyiceberg.yaml config file to memory + + Parameters + ---------- + cwd : Path + the path to the .pyiceberg.yaml file + + Returns + ------- + dict[str, Any] + The pyiceberg yaml file + + Raises + ------ + FileNotFoundError + Can't find the YAML file in the CWD + yaml.YAMLError + Error parsing the YAML file + """ + try: + with open(here() / ".pyiceberg.yaml", encoding="utf-8") as file: + data = yaml.safe_load(file) + return data if data is not None else {} + except FileNotFoundError as e: + raise FileNotFoundError(f".pyiceberg YAML file not found in pwd: {here()}") from e + except yaml.YAMLError as e: + raise yaml.YAMLError(f"Error parsing .pyiceberg YAML file: {e}") from e diff --git a/src/icefabric/helpers/nc_conv_utils.py b/src/icefabric/helpers/nc_conv_utils.py new file mode 100644 index 0000000..185f500 --- /dev/null +++ b/src/icefabric/helpers/nc_conv_utils.py @@ -0,0 +1,147 @@ +"""Utilities for converting NETCDF3 files into NETCDF4""" + +import os +import subprocess + +from netCDF4 import Dataset as NCDataset +from tqdm import tqdm + +import icefabric.helpers.arch_weather_file_utils as awf_utils +from icefabric.builds.icechunk_s3_module import S3Path + + +def convert_files_to_netcdf4( + files: list[str], + new_dir: str | None = "", + fn_prefix: str | None = "", + fn_suffix: str | None = "", +): + """ + Convert collection of NCDF3 files to v4 + + Given a list of NETCDF3 files, convert them all to NETCDF4 and store them + in a new directory. Pre/suffixes can be specified as well for the new file + names. Will be stored alongside the old files, or in a sibling directory if + a dir name is provided. + + Parameters + ---------- + files : list[str] + The list of NETCDF3 filepaths to be converted. + new_dir : str | None, optional + If provided, will store the newly converted files + in a different directory. It will create the directory + in the same relative path as all the files to be converted. + By default "" + fn_prefix : str | None, optional + If provided, will prepend a prefix to the new file names. By default "". + fn_suffix : str | None, optional + If provided, will append a suffix to the new file names. By default "". + """ + if not new_dir and not fn_prefix and not fn_suffix: + fn_suffix = "_new" + for f in files: + dir_path, basename = os.path.split(f) + basename, ext = os.path.splitext(basename) + if not os.path.exists(os.path.join(dir_path, new_dir)): + os.makedirs(os.path.join(dir_path, new_dir)) + dir_path = os.path.join(dir_path, new_dir) + conv_nc(f, f"{dir_path}/{fn_prefix}{basename}{fn_suffix}{ext}") + + +def convert_nc_files_from_s3( + orig: S3Path, + dest: S3Path, + manual_file_pattern: str | None = None, + testing_file_quantity: int | None = None, +): + """ + Convert NETCDF3 collection from S3 location to v4 + + Given an S3 path populated with NETCDF3 files, sequentially + DL & convert them to NETCDF4, then re-upload them to a different + S3 path. All files created on local filesystems are deleted as the + process runs. + + Parameters + ---------- + orig : S3Path + S3 path containing the files to be converted + dest : S3Path + S3 path where the newly-converetd files will be + uploaded. + manual_file_pattern : str | None, optional + If given, will supply a manual file pattern to + when gathering the filepaths for conversion. May be + useful to only include subsets of files. By default None. + testing_file_quantity : int | None, optional + Include if you want to test the conversion with a subset files. + Only opens the number specified, starting with the first. By default None. + """ + temp_down_dir = os.path.join(os.getcwd(), ".tmp") + temp_conv_dir = os.path.join(os.getcwd(), ".tmp/conv") + nc3_file_list = awf_utils.get_archival_weather_files( + loc=orig, file_type=awf_utils.FileType.NETCDF, manual_file_pattern=manual_file_pattern + ) + if testing_file_quantity: + nc3_file_list = nc3_file_list[:testing_file_quantity] + os.makedirs(temp_down_dir, exist_ok=True) + os.makedirs(temp_conv_dir, exist_ok=True) + + for i in tqdm( + range(len(nc3_file_list)), + desc="Converting netcdf files from S3", + unit="files", + ncols=125, + colour="#37B6BD", + ): + nc3_file = nc3_file_list[i].removesuffix("s3://").split("/")[-1] + down_path = os.path.join(temp_down_dir, nc3_file) + conv_path = os.path.join(temp_conv_dir, "conv.nc") + subprocess.call(["aws", "s3", "cp", nc3_file_list[i], down_path, "--quiet"]) + conv_nc(down_path, conv_path, quiet=True) + subprocess.call(["aws", "s3", "cp", conv_path, f"{str(dest)}/{nc3_file}", "--quiet"]) + subprocess.call(["rm", down_path, "-f"]) + subprocess.call(["rm", conv_path, "-f"]) + + +def conv_nc(orig_file_path: str, new_file_path: str, quiet: bool | None = False): + """ + Given a NETCDF3-formatted file, convert it to NETCDF4. + + Parameters + ---------- + orig_file_path : str + NETCDF3 filepath. + new_file_path : str + Filepath where the converted file will end up. + quiet : bool | None, optional + Provide if no print/log statements are desired. + By default False. + """ + # Open the NetCDF3 file in read mode + nc3_data = NCDataset(orig_file_path, mode="r", format="NETCDF3_CLASSIC") + + # Create a new NetCDF4 file in write mode + nc4_data = NCDataset(new_file_path, mode="w", format="NETCDF4") + + # Copy global attributes + for attr_name in nc3_data.ncattrs(): + nc4_data.setncattr(attr_name, nc3_data.getncattr(attr_name)) + + # Copy dimensions + for dim_name, dim in nc3_data.dimensions.items(): + nc4_data.createDimension(dim_name, len(dim) if not dim.isunlimited() else None) + + # Copy variables + for var_name, var in nc3_data.variables.items(): + nc4_var = nc4_data.createVariable(var_name, var.datatype, var.dimensions) + nc4_var.setncatts({attr_name: var.getncattr(attr_name) for attr_name in var.ncattrs()}) + nc4_var[:] = var[:] + + # Close both files + nc3_data.close() + nc4_data.close() + + if not quiet: + print(f"Conversion from NetCDF3 to NetCDF4 completed: {new_file_path}") diff --git a/src/icefabric/helpers/rise.py b/src/icefabric/helpers/rise.py new file mode 100644 index 0000000..00d73d7 --- /dev/null +++ b/src/icefabric/helpers/rise.py @@ -0,0 +1,85 @@ +"""A file to assist with querying data from the RISE app""" + +from typing import Any +from urllib.parse import urlencode + +import httpx +from pydantic import BaseModel + +from icefabric.schemas.rise_parameters import PARAM_CONV + +EXT_RISE_BASE_URL = "https://data.usbr.gov/rise/api" +RISE_HEADERS = {"accept": "application/vnd.api+json"} + + +def basemodel_to_query_string(model: BaseModel) -> str: + """ + Encodes a basemodel into the querying string portion of a GET request. + + Also uses the PARAM_CONV definition to convert parameter names that are + invalid in python. + """ + filtered_params = model.model_dump(exclude_none=True) + for k in PARAM_CONV.keys(): + if k in filtered_params: + filtered_params[PARAM_CONV[k]] = filtered_params.pop(k) + q_str = urlencode(filtered_params) + if q_str != "": + q_str = f"?{q_str}" + return q_str + + +async def make_get_req_to_rise(full_url: str): + """ + Makes an asynchronous GET request to the RISE API. + + Returns a response dict with the status code and the message body. If + the response is an error from RISE, the original code and message is + returned as well. + """ + rise_response = {} + async with httpx.AsyncClient() as client: + try: + rise_response = {"status_code": 200, "detail": ""} + print(f"Making GET request to RISE (full URL): {full_url}") + resp = await client.get(full_url, headers=RISE_HEADERS, timeout=15) + resp.raise_for_status() + rise_response["detail"] = resp.json() + except httpx.HTTPError as err: + print(f"RISE API returned an HTTP error: {err}") + rise_response["status_code"] = int(err.response.status_code) + rise_response["detail"] = err.response.text + return rise_response + + +def make_sync_get_req_to_rise(full_url: str) -> dict[str, Any]: + """ + Makes a synchronous GET request to the RISE API. + + Returns a response dict with the status code and the message body. If + the response is an error from RISE, the original code and message is + returned as well. + + Parameters + ---------- + full_url : str + The complete URL to make the GET request to + + Returns + ------- + dict[str, Any] + Dictionary containing 'status_code' and 'detail' keys + """ + rise_response = {} + try: + rise_response = {"status_code": 200, "detail": ""} + print(f"Making GET request to RISE (full URL): {full_url}") + + resp = httpx.get(full_url, headers=RISE_HEADERS, timeout=15) + resp.raise_for_status() + rise_response["detail"] = resp.json() + except httpx.HTTPError as err: + print(f"RISE API returned an HTTP error: {err}") + rise_response["status_code"] = int(err.response.status_code) + rise_response["detail"] = err.response.text + return rise_response diff --git a/src/icefabric/helpers/topobathy_ic_to_tif.py b/src/icefabric/helpers/topobathy_ic_to_tif.py new file mode 100644 index 0000000..4ee6057 --- /dev/null +++ b/src/icefabric/helpers/topobathy_ic_to_tif.py @@ -0,0 +1,23 @@ +import os + +from tqdm import tqdm + +from icefabric.builds.icechunk_s3_module import IcechunkRepo +from icefabric.schemas.topobathy import NGWPCLocations + + +def convert_topobathy_to_tiff(output_dir: str, ic_rasters: list[str]) -> None: + """Converts topobathy layers from icechunk to tiff for use in tiles + + Parameters + ---------- + output_dir : str + Directory to save outputs to + ic_rasters : list[NGWPCLocations] + list of NGWPCLocation raster paths. eg. [NGWPCLocations[TOPO_AK_30M_IC].path] + """ + for ic_raster in tqdm(ic_rasters, desc="Downloading IC Rasters to .tif"): + repo = IcechunkRepo(location=NGWPCLocations[ic_raster].path) + output = os.path.join(output_dir, f"{str.split(str(NGWPCLocations[ic_raster].path), '/')[-1]}.tif") + + repo.retrieve_and_convert_to_tif(dest=output, var_name="elevation") diff --git a/src/icefabric/hydrofabric/__init__.py b/src/icefabric/hydrofabric/__init__.py new file mode 100644 index 0000000..88909ec --- /dev/null +++ b/src/icefabric/hydrofabric/__init__.py @@ -0,0 +1,9 @@ +"""Helper functions designed to assist with managing data. Similar to util functions""" + +import json +from pathlib import Path + +from .origin import find_origin +from .subset import subset_hydrofabric + +__all__ = ["find_origin", "subset_hydrofabric"] diff --git a/src/icefabric/hydrofabric/origin.py b/src/icefabric/hydrofabric/origin.py new file mode 100644 index 0000000..807d3da --- /dev/null +++ b/src/icefabric/hydrofabric/origin.py @@ -0,0 +1,72 @@ +"""Finds the origin of the Hydrofabric id""" + +import polars as pl +from polars import LazyFrame + +from icefabric.schemas.hydrofabric import IdType + + +def find_origin( + network_table: LazyFrame, + identifier: str | float, + id_type: IdType = IdType.HL_URI, + return_all: bool = False, +) -> pl.DataFrame: + """Find an origin point in the hydrofabric network. + + This function handles the case where multiple records match the identifier. + It follows the R implementation to select a single origin point based on + the minimum hydroseq value. + + Parameters + ---------- + network_table : LazyFrame + The HF network table from the hydrofabric catalog + identifier : str | float + The unique identifier you want to find the origin of + id_type : IdType, optional + The network table column you can query from, by default "hl_uri" + return_all: bool, False + Returns all origin points (for subsetting) + + Returns + ------- + pd.DataFrame + The origin row from the network table + + Raises + ------ + ValueError + The provided identifier is not supported + ValueError + No origin for the point is found + ValueError + Multiple origins for the point are found + """ + # Get all matching records + origin_candidates = ( + network_table.filter(pl.col(id_type.value).is_not_null() & (pl.col(id_type.value) == identifier)) + .select(["id", "toid", "vpuid", "hydroseq", "poi_id", "hl_uri"]) + .collect() + ) + + if origin_candidates.height == 0: + raise ValueError(f"No origin found for {id_type}='{identifier}'") + + origin = origin_candidates.unique() + + if not return_all: + # Find the record with minimum hydroseq if column exists + if "hydroseq" in origin.columns: + # Check if there are multiple unique hydroseq values + unique_hydroseq = origin.select(pl.col("hydroseq").unique()) + if unique_hydroseq.height > 1: + # Sort by hydroseq and take the first row (minimum) + origin = origin.sort("hydroseq").slice(0, 1) + + # Check for multiple origins after processing + if origin.height > 1: + origin_ids = origin.get_column("id").to_list() + raise ValueError(f"Multiple origins found: {origin_ids}") + + return origin diff --git a/src/icefabric/hydrofabric/subset.py b/src/icefabric/hydrofabric/subset.py new file mode 100644 index 0000000..95121b4 --- /dev/null +++ b/src/icefabric/hydrofabric/subset.py @@ -0,0 +1,347 @@ +"""Functional hydrofabric subset implementation using pre-computed upstream lookup table with Polars""" + +import geopandas as gpd +import pandas as pd +import polars as pl +import rustworkx as rx +from pyiceberg.catalog import Catalog +from pyiceberg.expressions import EqualTo, In + +from icefabric.helpers.geopackage import to_geopandas +from icefabric.hydrofabric.origin import find_origin +from icefabric.schemas.hydrofabric import UPSTREAM_VPUS, IdType + + +def get_upstream_segments(origin: str, graph: rx.PyDiGraph) -> set[str]: + """Subsets the hydrofabric to find all upstream watershed boundaries upstream of the origin fp + + Parameters + ---------- + origin: str + The starting point where we're tracing upstream + graph: rx.PyDiGraph + a dictionary which preprocesses all toid -> id relationships + + Returns + ------- + set[str] + The watershed boundary connections that make up the subset + """ + indices = graph.node_indices() + data_list = graph.nodes() + node_to_index = dict(zip(data_list, indices, strict=False)) + + start_idx = node_to_index.get(origin) + + if start_idx is None: + return set() + + upstream_indices = rx.bfs_predecessors(graph, start_idx) + flattened = set() + for key, values in upstream_indices: + flattened.add(key) + flattened.update(values) + + return flattened + + +def subset_layers( + catalog: Catalog, + namespace: str, + layers: list[str], + upstream_ids: set[str], + vpu_id: str, +) -> dict[str, pd.DataFrame | gpd.GeoDataFrame]: + """Efficiently subset a layer using Polars and the upstream IDs + + Parameters + ---------- + catalog : Catalog + The pyiceberg catalog + namespace : str + the domain / namespace we're reading from in the catalog + layers : list[str] + The layers to read into a file + upstream_ids : set[str] + Upstream IDs queried + vpu_id : str + VPU of query + + Returns + ------- + dict[str, pd.DataFrame | gpd.GeoDataFrame] + Dictionary of layer name to dataframe + """ + # Ensuring there are always divides, flowpaths, network, and nexus layers + if layers is None: + layers = [] + layers.extend(["divides", "flowpaths", "network", "nexus"]) + layers = list(set(layers)) + + upstream_ids_list = list(upstream_ids) + + # Create VPU filter + if vpu_id in UPSTREAM_VPUS: + # Use upstream VPUs mapping if available + vpu_filter = In("vpuid", UPSTREAM_VPUS[vpu_id]) + else: + # Use single VPU filter + vpu_filter = EqualTo("vpuid", vpu_id) + + print("Subsetting network layer") + network = catalog.load_table(f"{namespace}.network").scan(row_filter=vpu_filter).to_polars() + filtered_network = network.filter( + pl.col("id").is_in(upstream_ids_list) | pl.col("toid").is_in(upstream_ids_list) + ).with_columns( + pl.col("poi_id").map_elements(lambda x: str(int(x)) if x is not None else None, return_dtype=pl.Utf8) + ) + valid_hf_id = ( + filtered_network.select(pl.col("hf_id").drop_nulls().unique().cast(pl.Float64)).to_series().to_list() + ) + + print("Subsetting flowpaths layer") + flowpaths = catalog.load_table(f"{namespace}.flowpaths").scan(row_filter=vpu_filter).to_polars() + filtered_flowpaths = flowpaths.filter(pl.col("id").is_in(upstream_ids_list)) + assert filtered_flowpaths.height > 0, "No flowpaths found" + filtered_flowpaths_geo = to_geopandas(filtered_flowpaths.to_pandas()) + + print("Subsetting nexus layer") + valid_toids = filtered_flowpaths.filter(pl.col("toid").is_not_null()).get_column("toid").to_list() + assert valid_toids, "No nexus points found" + nexus = catalog.load_table(f"{namespace}.nexus").scan(row_filter=vpu_filter).to_polars() + filtered_nexus_points = nexus.filter(pl.col("id").is_in(valid_toids)).with_columns( + pl.col("poi_id").map_elements(lambda x: str(int(x)) if x is not None else None, return_dtype=pl.Utf8) + ) + filtered_nexus_points_geo = to_geopandas(filtered_nexus_points.to_pandas()) + + print("Subsetting divides layer") + valid_divide_ids = ( + filtered_network.filter(pl.col("divide_id").is_not_null()).get_column("divide_id").unique().to_list() + ) + assert valid_divide_ids, "No valid divide_ids found" + divides = catalog.load_table(f"{namespace}.divides").scan(row_filter=vpu_filter).to_polars() + filtered_divides = divides.filter(pl.col("divide_id").is_in(valid_divide_ids)) + filtered_divides_geo = to_geopandas(filtered_divides.to_pandas()) + + output_layers = { + "flowpaths": filtered_flowpaths_geo, + "nexus": filtered_nexus_points_geo, + "divides": filtered_divides_geo, + "network": filtered_network.to_pandas(), # Convert to pandas for final output + } + + if "lakes" in layers: + print("Subsetting lakes layer") + lakes = catalog.load_table(f"{namespace}.lakes").scan(row_filter=vpu_filter).to_polars() + filtered_lakes = lakes.filter(pl.col("hf_id").is_in(valid_hf_id)) + filtered_lakes_geo = to_geopandas(filtered_lakes.to_pandas()) + output_layers["lakes"] = filtered_lakes_geo + + if "divide-attributes" in layers: + print("Subsetting divide-attributes layer") + divides_attr = ( + catalog.load_table(f"{namespace}.divide-attributes").scan(row_filter=vpu_filter).to_polars() + ) + filtered_divide_attr = divides_attr.filter(pl.col("divide_id").is_in(valid_divide_ids)) + output_layers["divide-attributes"] = filtered_divide_attr.to_pandas() + + if "flowpath-attributes" in layers: + print("Subsetting flowpath-attributes layer") + flowpath_attr = ( + catalog.load_table(f"{namespace}.flowpath-attributes").scan(row_filter=vpu_filter).to_polars() + ) + filtered_flowpath_attr = flowpath_attr.filter(pl.col("id").is_in(upstream_ids_list)) + output_layers["flowpath-attributes"] = filtered_flowpath_attr.to_pandas() + + if "flowpath-attributes-ml" in layers: + print("Subsetting flowpath-attributes-ml layer") + flowpath_attr_ml = ( + catalog.load_table(f"{namespace}.flowpath-attributes-ml").scan(row_filter=vpu_filter).to_polars() + ) + filtered_flowpath_attr_ml = flowpath_attr_ml.filter(pl.col("id").is_in(upstream_ids_list)) + output_layers["flowpath-attributes-ml"] = filtered_flowpath_attr_ml.to_pandas() + + if "pois" in layers: + print("Subsetting pois layer") + pois = catalog.load_table(f"{namespace}.pois").scan(row_filter=vpu_filter).to_polars() + filtered_pois = pois.filter(pl.col("id").is_in(upstream_ids_list)) + output_layers["pois"] = filtered_pois.to_pandas() + + if "hydrolocations" in layers: + print("Subsetting hydrolocations layer") + hydrolocations = ( + catalog.load_table(f"{namespace}.hydrolocations").scan(row_filter=vpu_filter).to_polars() + ) + filtered_hydrolocations = hydrolocations.filter(pl.col("id").is_in(upstream_ids_list)) + output_layers["hydrolocations"] = filtered_hydrolocations.to_pandas() + + return output_layers + + +def subset_hydrofabric_vpu( + catalog: Catalog, + namespace: str, + layers: list[str], + vpu_id: str, +) -> dict[str, pd.DataFrame | gpd.GeoDataFrame]: + """Subsets layers by VPU ID + + Parameters + ---------- + catalog : Catalog + The pyiceberg catalog + namespace : str + the domain / namespace we're reading from in the catalog + layers : list[str] + The layers to read into a file + vpu_id : str + Desired VPU + + Returns + ------- + dict[str, pd.DataFrame | gpd.GeoDataFrame] + Dictionary of layer name to dataframe + """ + # Ensuring there are always divides, flowpaths, network, and nexus layers + if layers is None: + layers = [] + layers.extend(["divides", "flowpaths", "network", "nexus"]) + layers = list(set(layers)) + + # Use single VPU filter + vpu_filter = EqualTo("vpuid", vpu_id) + + print("Subsetting network layer") + network = catalog.load_table(f"{namespace}.network").scan(row_filter=vpu_filter).to_polars() + filtered_network = network.with_columns( + pl.col("poi_id").map_elements(lambda x: str(int(x)) if x is not None else None, return_dtype=pl.Utf8) + ) + valid_hf_id = ( + filtered_network.select(pl.col("hf_id").drop_nulls().unique().cast(pl.Float64)).to_series().to_list() + ) + assert filtered_network.height > 0, "No network records found" + + print("Subsetting flowpaths layer") + filtered_flowpaths = catalog.load_table(f"{namespace}.flowpaths").scan(row_filter=vpu_filter).to_polars() + + assert filtered_flowpaths.height > 0, "No flowpaths found" + filtered_flowpaths_geo = to_geopandas(filtered_flowpaths.to_pandas()) + + print("Subsetting nexus layer") + nexus = catalog.load_table(f"{namespace}.nexus").scan(row_filter=vpu_filter).to_polars() + filtered_nexus_points = nexus.with_columns( + pl.col("poi_id").map_elements(lambda x: str(int(x)) if x is not None else None, return_dtype=pl.Utf8) + ) + filtered_nexus_points_geo = to_geopandas(filtered_nexus_points.to_pandas()) + + print("Subsetting divides layer") + valid_divide_ids = ( + filtered_network.filter(pl.col("divide_id").is_not_null()).get_column("divide_id").unique().to_list() + ) + assert valid_divide_ids, "No valid divide_ids found" + divides = catalog.load_table(f"{namespace}.divides").scan(row_filter=vpu_filter).to_polars() + filtered_divides_geo = to_geopandas(divides.to_pandas()) + + output_layers = { + "flowpaths": filtered_flowpaths_geo, + "nexus": filtered_nexus_points_geo, + "divides": filtered_divides_geo, + "network": filtered_network.to_pandas(), # Convert to pandas for final output + } + + if "lakes" in layers: + print("Subsetting lakes layer") + lakes = catalog.load_table(f"{namespace}.lakes").scan(row_filter=vpu_filter).to_polars() + filtered_lakes = lakes.filter(pl.col("hf_id").is_in(valid_hf_id)) + filtered_lakes_geo = to_geopandas(filtered_lakes.to_pandas()) + output_layers["lakes"] = filtered_lakes_geo + + if "divide-attributes" in layers: + print("Subsetting divide-attributes layer") + divides_attr = ( + catalog.load_table(f"{namespace}.divide-attributes").scan(row_filter=vpu_filter).to_polars() + ) + filtered_divide_attr = divides_attr.filter(pl.col("divide_id").is_in(valid_divide_ids)) + output_layers["divide-attributes"] = filtered_divide_attr.to_pandas() + + if "flowpath-attributes" in layers: + print("Subsetting flowpath-attributes layer") + output_layers["flowpath-attributes"] = ( + catalog.load_table(f"{namespace}.flowpath-attributes").scan(row_filter=vpu_filter).to_pandas() + ) + + if "flowpath-attributes-ml" in layers: + print("Subsetting flowpath-attributes-ml layer") + output_layers["flowpath-attributes-ml"] = ( + catalog.load_table(f"{namespace}.flowpath-attributes-ml").scan(row_filter=vpu_filter).to_pandas() + ) + + if "pois" in layers: + print("Subsetting pois layer") + output_layers["pois"] = ( + catalog.load_table(f"{namespace}.pois").scan(row_filter=vpu_filter).to_pandas() + ) + + if "hydrolocations" in layers: + print("Subsetting hydrolocations layer") + output_layers["hydrolocations"] = ( + catalog.load_table(f"{namespace}.hydrolocations").scan(row_filter=vpu_filter).to_pandas() + ) + + return output_layers + + +def subset_hydrofabric( + catalog: Catalog, + identifier: str | float, + id_type: IdType, + layers: list[str], + namespace: str, + graph: rx.PyDiGraph, +) -> dict[str, pd.DataFrame | gpd.GeoDataFrame]: + """ + Main subset function using pre-computed upstream lookup + + Parameters + ---------- + catalog : Catalog + PyIceberg catalog + identifier : str | float + The identifier to subset around + id_type : str + Type of identifier + layers : List[str] + List of layers to subset + namespace : str + Domain name / namespace + upstream_dict : Dict[str, Set[str]] + Pre-computed upstream lookup dictionary + + Returns + ------- + Dict[str, pd.DataFrame | gpd.GeoDataFrame] + Dictionary of layer names to their subsetted dataframes + """ + print(f"Starting subset for {identifier}") + + network_table = catalog.load_table(f"{namespace}.network").to_polars() + origin_row = find_origin(network_table, identifier, id_type, return_all=True) + origin_ids = origin_row.select(pl.col("id")).to_series() + to_ids = origin_row.select(pl.col("toid")).to_series() + vpu_id = origin_row.select(pl.col("vpuid")).to_series()[0] # only need the first + upstream_ids = set() + for origin_id, to_id in zip(origin_ids, to_ids, strict=False): + print(f"Found origin flowpath: {origin_id}") + _upstream_ids = get_upstream_segments(origin_id, graph) + upstream_ids |= _upstream_ids # in-place union + if len(upstream_ids) == 0: + upstream_ids.add(origin_id) # Ensuring the origin WB is captured + else: + upstream_ids.add(to_id) # Adding the nexus point to ensure it's captured in the network table + print(f"Tracking {len(upstream_ids)} total upstream segments") + + output_layers = subset_layers( + catalog=catalog, namespace=namespace, layers=layers, upstream_ids=upstream_ids, vpu_id=vpu_id + ) + + return output_layers diff --git a/src/icefabric/modules/__init__.py b/src/icefabric/modules/__init__.py new file mode 100644 index 0000000..26396fa --- /dev/null +++ b/src/icefabric/modules/__init__.py @@ -0,0 +1,87 @@ +"""Contains helper functions to support NWM modules""" + +import enum + +from .create_ipes import ( + get_cfe_parameters, + get_lasam_parameters, + get_lstm_parameters, + get_noahowp_parameters, + get_sacsma_parameters, + get_sft_parameters, + get_smp_parameters, + get_snow17_parameters, + get_topmodel_parameters, + get_topoflow_parameters, + get_troute_parameters, + get_ueb_parameters, +) +from .rnr import get_rnr_segment + + +class NWMModules(enum.Enum): + """A list of all supported NWM Modules""" + + SFT = "sft" + LSTM = "lstm" + LASAM = "lasam" + NOAHOWP = "noah_owp" + SMP = "smp" + SNOW17 = "snow17" + SACSMA = "sacsma" + TROUTE = "troute" + TOPMODEL = "topmodel" + TOPOFLOW = "topoflow" + UEB = "ueb" + CFE = "cfe" + + +class SmpModules(str, enum.Enum): + """Enum class for defining acceptable inputs for the SMP 'module' variable""" + + cfe_s = "CFE-S" + cfe_x = "CFE-X" + lasam = "LASAM" + topmodel = "TopModel" + + +modules_with_extra_args = { + "sft": ["use_schaake"], + "snow17": ["envca"], + "sacsma": ["envca"], + "smp": ["module"], + "cfe": ["cfe_version"], + "lasam": ["sft_included", "soil_params_file"], +} + + +config_mapper = { + "sft": get_sft_parameters, + "lstm": get_lstm_parameters, + "lasam": get_lasam_parameters, + "noah_owp": get_noahowp_parameters, + "smp": get_smp_parameters, + "snow17": get_snow17_parameters, + "sacsma": get_sacsma_parameters, + "troute": get_troute_parameters, + "topmodel": get_topmodel_parameters, + "topoflow": get_topoflow_parameters, + "ueb": get_ueb_parameters, + "cfe": get_cfe_parameters, +} + +__all__ = [ + "get_sft_parameters", + "get_rnr_segment", + "get_lstm_parameters", + "get_lasam_parameters", + "get_noahowp_parameters", + "get_smp_parameters", + "get_snow17_parameters", + "get_sacsma_parameters", + "get_troute_parameters", + "get_topmodel_parameters", + "get_topoflow_parameters", + "get_ueb_parameters", + "get_cfe_parameters", +] diff --git a/src/icefabric/modules/create_ipes.py b/src/icefabric/modules/create_ipes.py new file mode 100644 index 0000000..6b13b78 --- /dev/null +++ b/src/icefabric/modules/create_ipes.py @@ -0,0 +1,999 @@ +import collections +import json + +import geopandas as gpd +import pandas as pd +import polars as pl +import rustworkx as rx +from ambiance import Atmosphere +from pyiceberg.catalog import Catalog +from pyproj import Transformer + +from icefabric.hydrofabric import subset_hydrofabric +from icefabric.schemas.hydrofabric import IdType +from icefabric.schemas.modules import ( + CFE, + LASAM, + LSTM, + SFT, + SMP, + UEB, + CalibratableScheme, + CFEValues, + IceFractionScheme, + NoahOwpModular, + SacSma, + SacSmaValues, + Snow17, + SoilScheme, + Topmodel, + Topoflow, + TRoute, + UEBValues, +) + + +def _get_mean_soil_temp() -> float: + """Returns an avg soil temp of 45 degrees F converted to Kelvin. This equation is just a reasonable estimate per new direction (EW: 07/2025) + + Returns + ------- + float + The mean soil temperature + """ + return (45 - 32) * 5 / 9 + 273.15 + + +def get_sft_parameters( + catalog: Catalog, + namespace: str, + identifier: str, + graph: rx.PyDiGraph, + use_schaake: bool = False, +) -> list[SFT]: + """Creates the initial parameter estimates for the SFT module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + use_schaake : bool, optional + A setting to determine if Shaake should be used for ice fraction, by default False + + Returns + ------- + list[SFT] + The list of all initial parameters for catchments using SFT + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = {"smcmax": "mean.smcmax", "bexp": "mode.bexp", "psisat": "geom_mean.psisat"} + + df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] # Keep the divide_id + for param_name, prefix in attr.items(): + # Find all columns that start with the prefix + matching_cols = [col for col in df.columns if col.startswith(prefix)] + if matching_cols: + # Calculate mean across matching columns for each row. + # NOTE: this assumes an even weighting. TODO: determine if we need to have weighted averaging + expressions.append( + pl.concat_list([pl.col(col) for col in matching_cols]).list.mean().alias(f"{param_name}_avg") + ) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}_avg")) + result_df = df.select(expressions) + mean_temp = _get_mean_soil_temp() + pydantic_models = [] + for row_dict in result_df.iter_rows(named=True): + # Instantiate the Pydantic model for each row + model_instance = SFT( + catchment=row_dict["divide_id"], + smcmax=row_dict["smcmax_avg"], + b=row_dict["bexp_avg"], + satpsi=row_dict["psisat_avg"], + ice_fraction_scheme=IceFractionScheme.XINANJIANG + if use_schaake is False + else IceFractionScheme.SCHAAKE, + soil_temperature=[ + mean_temp for _ in range(4) + ], # Assuming 45 degrees in all layers. TODO: Fix this as this doesn't make sense + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_snow17_parameters( + catalog: Catalog, namespace: str, identifier: str, envca: bool, graph: rx.PyDiGraph +) -> list[Snow17]: + """Creates the initial parameter estimates for the Snow17 module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + envca : bool, optional + If source is ENVCA, then set to True - otherwise False. + + Returns + ------- + list[Snow17] + The list of all initial parameters for catchments using Snow17 + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = {"elevation_mean": "mean.elevation", "lat": "centroid_y", "lon": "centroid_x"} + + # Extraction of relevant features from divide attributes layer + # & convert to polar + divide_attr_df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] + for param_name, prefix in attr.items(): + # Find all columns that start with the prefix + matching_cols = [col for col in divide_attr_df.columns if col.startswith(prefix)] + if matching_cols: + expressions.append(pl.concat([pl.col(col) for col in matching_cols]).alias(f"{param_name}")) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}")) + + divide_attr_df = divide_attr_df.select(expressions) + + # Extraction of relevant features from divides layer + divides_df = gauge["divides"][["divide_id", "areasqkm"]] + + # Ensure final result aligns properly based on each instances divide ids + result_df = pd.merge(divide_attr_df.to_pandas(), divides_df, on="divide_id", how="left") + + # Convert elevation from cm to m + result_df["elevation_mean"] = result_df["elevation_mean"] * 0.01 + + # Convert CRS to WGS84 (EPSG4326) + crs = gauge["divides"].crs + transformer = Transformer.from_crs(crs, 4326) + wgs84_latlon = transformer.transform(result_df["lon"], result_df["lat"]) + result_df["lon"] = wgs84_latlon[0] + result_df["lat"] = wgs84_latlon[1] + + # Default parameter values used only for CONUS + result_df["mfmax"] = CalibratableScheme.MFMAX.value + result_df["mfmin"] = CalibratableScheme.MFMIN.value + result_df["uadj"] = CalibratableScheme.UADJ.value + + if namespace == "conus_hf" and not envca: + divides_list = result_df["divide_id"] + domain = namespace.split("_")[0] + table_name = f"divide_parameters.snow-17_{domain}" + params_df = catalog.load_table(table_name).to_polars() + conus_param_df = params_df.filter(pl.col("divide_id").is_in(divides_list)).collect().to_pandas() + result_df.drop(columns=["mfmax", "mfmin", "uadj"], inplace=True) + result_df = pd.merge(conus_param_df, result_df, on="divide_id", how="left") + + pydantic_models = [] + for _, row_dict in result_df.iterrows(): + model_instance = Snow17( + catchment=row_dict["divide_id"], + hru_id=row_dict["divide_id"], + hru_area=row_dict["areasqkm"], + latitude=row_dict["lat"], + elev=row_dict["elevation_mean"], + mf_max=row_dict["mfmax"], + mf_min=row_dict["mfmin"], + uadj=row_dict["uadj"], + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_smp_parameters( + catalog: Catalog, + namespace: str, + identifier: str, + graph: rx.PyDiGraph, + extra_module: str | None = None, +) -> list[SMP]: + """Creates the initial parameter estimates for the SMP module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + extra_module : str, optional + A setting to determine if a module should be specified to obtain additional SMP parameters. + Available modules declared for addt'l SMP parameters: 'CFE-S', 'CFE-X', 'LASAM', 'TopModel' + + Returns + ------- + list[SMP] + The list of all initial parameters for catchments using SMP + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = {"smcmax": "mean.smcmax", "bexp": "mode.bexp", "psisat": "geom_mean.psisat"} + + df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] # Keep the divide_id + for param_name, prefix in attr.items(): + # Find all columns that start with the prefix + matching_cols = [col for col in df.columns if col.startswith(prefix)] + if matching_cols: + # Calculate mean across matching columns for each row. + # NOTE: this assumes an even weighting. TODO: determine if we need to have weighted averaging + expressions.append( + pl.concat_list([pl.col(col) for col in matching_cols]).list.mean().alias(f"{param_name}_avg") + ) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}_avg")) + result_df = df.select(expressions) + + # Initializing parameters dependent to unique modules + soil_storage_model = "NA" + soil_storage_depth = "NA" + water_table_based_method = "NA" + soil_moisture_profile_option = "NA" + soil_depth_layers = "NA" + water_depth_layers = "NA" + water_table_depth = "NA" + + if extra_module: + if extra_module == "CFE-S" or extra_module == "CFE-X": + soil_storage_model = SoilScheme.CFE_SOIL_STORAGE.value + soil_storage_depth = SoilScheme.CFE_STORAGE_DEPTH.value + elif extra_module == "TopModel": + soil_storage_model = SoilScheme.TOPMODEL_SOIL_STORAGE.value + water_table_based_method = SoilScheme.TOPMODEL_WATER_TABLE_METHOD.value + elif extra_module == "LASAM": + soil_storage_model = SoilScheme.LASAM_SOIL_STORAGE.value + soil_moisture_profile_option = SoilScheme.LASAM_SOIL_MOISTURE.value + soil_depth_layers = SoilScheme.LASAM_SOIL_DEPTH_LAYERS.value + water_table_depth = SoilScheme.LASAM_WATER_TABLE_DEPTH.value + else: + raise ValueError(f"Passing unsupported module into endpoint: {extra_module}") + + pydantic_models = [] + for row_dict in result_df.iter_rows(named=True): + # Instantiate the Pydantic model for each row + model_instance = SMP( + catchment=row_dict["divide_id"], + smcmax=row_dict["smcmax_avg"], + b=row_dict["bexp_avg"], + satpsi=row_dict["psisat_avg"], + soil_storage_model=soil_storage_model, + soil_storage_depth=soil_storage_depth, + water_table_based_method=water_table_based_method, + soil_moisture_profile_option=soil_moisture_profile_option, + soil_depth_layers=soil_depth_layers, + water_depth_layers=water_depth_layers, + water_table_depth=water_table_depth, + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_lstm_parameters(catalog: Catalog, namespace: str, identifier: str, graph: rx.PyDiGraph) -> list[LSTM]: + """Creates the initial parameter estimates for the LSTM module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + + Returns + ------- + list[LSTM] + The list of all initial parameters for catchments using LSTM + + *Note: Per HF API, the following attributes for LSTM does not carry any relvant information: + 'train_cfg_file' & basin_name' -- remove if desire + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = { + "slope": "mean.slope", + "elevation_mean": "mean.elevation", + "lat": "centroid_y", + "lon": "centroid_x", + } + + # Extraction of relevant features from divide attributes layer + # & convert to polar + divide_attr_df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] + for param_name, prefix in attr.items(): + # Extract only the relevant attribute(s) + matching_cols = [col for col in divide_attr_df.columns if col == prefix] + if matching_cols: + expressions.append(pl.concat([pl.col(col) for col in matching_cols]).alias(f"{param_name}")) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}")) + + divide_attr_df = divide_attr_df.select(expressions) + + # Extraction of relevant features from divides layer + divides_df = gauge["divides"][["divide_id", "areasqkm"]] + + # Ensure final result aligns properly based on each instances divide ids + result_df = pd.merge(divide_attr_df.to_pandas(), divides_df, on="divide_id", how="left") + + # Convert elevation from cm to m + result_df["elevation_mean"] = result_df["elevation_mean"] * 0.01 + + # Convert CRS to WGS84 (EPSG4326) + crs = gauge["divides"].crs + transformer = Transformer.from_crs(crs, 4326) + wgs84_latlon = transformer.transform(result_df["lon"], result_df["lat"]) + result_df["lon"] = wgs84_latlon[0] + result_df["lat"] = wgs84_latlon[1] + + pydantic_models = [] + for _, row_dict in result_df.iterrows(): + # Instantiate the Pydantic model for each row + model_instance = LSTM( + catchment=row_dict["divide_id"], + area_sqkm=row_dict["areasqkm"], + basin_id=identifier, + elev_mean=row_dict["elevation_mean"], + lat=row_dict["lat"], + lon=row_dict["lon"], + slope_mean=row_dict["slope"], + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_lasam_parameters( + catalog: Catalog, + namespace: str, + identifier: str, + sft_included: bool, + graph: rx.PyDiGraph, + soil_params_file: str = "vG_default_params_HYDRUS.dat", +) -> list[LASAM]: + """Creates the initial parameter estimates for the LASAM module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + sft_included: bool + True if SFT is in the "dep_modules_included" definition as declared in HF API repo. + soil_params_file: str + Name of the Van Genuchton soil parameters file. Note: This is the filename that gets returned by HF API's utility script + get_hydrus_data(). + + Returns + ------- + list[LASAM] + The list of all initial parameters for catchments using LASAM + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = {"soil_type": "mode.ISLTYP"} + + # Extraction of relevant features from divide attributes layer + # & convert to polar + divide_attr_df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] + for param_name, prefix in attr.items(): + # Extract only the relevant attribute(s) + matching_cols = [col for col in divide_attr_df.columns if col == prefix] + if matching_cols: + expressions.append(pl.concat([pl.col(col) for col in matching_cols]).alias(f"{param_name}")) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}")) + + result_df = divide_attr_df.select(expressions) + + pydantic_models = [] + for row_dict in result_df.iter_rows(named=True): + # Instantiate the Pydantic model for each row + model_instance = LASAM( + catchment=row_dict["divide_id"], + soil_params_file=soil_params_file, # TODO figure out why this exists? + layer_soil_type=str(row_dict["soil_type"]), + sft_coupled=sft_included, + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_noahowp_parameters( + catalog: Catalog, namespace: str, identifier: str, graph: rx.PyDiGraph +) -> list[NoahOwpModular]: + """Creates the initial parameter estimates for the Noah OWP Modular module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + + Returns + ------- + list[NoahOwpModular] + The list of all initial parameters for catchments using NoahOwpModular + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = { + "slope": "mean.slope", + "aspect": "circ_mean.aspect", + "lat": "centroid_y", + "lon": "centroid_x", + "soil_type": "mode.ISLTYP", + "veg_type": "mode.IVGTYP", + } + + # Extraction of relevant features from divide attributes layer + # & convert to polar + divide_attr_df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] + for param_name, prefix in attr.items(): + # Extract only the relevant attribute(s) + matching_cols = [col for col in divide_attr_df.columns if col == prefix] + if matching_cols: + expressions.append(pl.concat([pl.col(col) for col in matching_cols]).alias(f"{param_name}")) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}")) + + result_df = divide_attr_df.select(expressions).to_pandas() + + # Convert CRS to WGS84 (EPSG4326) + crs = gauge["divides"].crs + transformer = Transformer.from_crs(crs, 4326) + wgs84_latlon = transformer.transform(result_df["lon"], result_df["lat"]) + result_df["lon"] = wgs84_latlon[0] + result_df["lat"] = wgs84_latlon[1] + + pydantic_models = [] + for _, row_dict in result_df.iterrows(): + # Instantiate the Pydantic model for each row + model_instance = NoahOwpModular( + catchment=row_dict["divide_id"], + lat=row_dict["lat"], + lon=row_dict["lon"], + terrain_slope=row_dict["slope"], + azimuth=row_dict["aspect"], + isltyp=row_dict["soil_type"], + vegtyp=row_dict["veg_type"], + sfctyp=2 if row_dict["veg_type"] == 16 else 1, + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_sacsma_parameters( + catalog: Catalog, namespace: str, identifier: str, envca: bool, graph: rx.PyDiGraph +) -> list[SacSma]: + """Creates the initial parameter estimates for the SAC SMA module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + + envca : bool, optional + If source is ENVCA, then set to True - otherwise False. + + Returns + ------- + list[SacSma] + The list of all initial parameters for catchments using SacSma + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + + # Extraction of relevant features from divides layer + pd.options.mode.chained_assignment = None + result_df = gauge["divides"][["divide_id", "areasqkm"]] + + # Default parameter values used only for CONUS + result_df["uztwm"] = SacSmaValues.UZTWM.value + result_df["uzfwm"] = SacSmaValues.UZFWM.value + result_df["lztwm"] = SacSmaValues.LZTWM.value + result_df["lzfpm"] = SacSmaValues.LZFPM.value + result_df["lzfsm"] = SacSmaValues.LZFSM.value + result_df["adimp"] = SacSmaValues.ADIMP.value + result_df["uzk"] = SacSmaValues.UZK.value + result_df["lzpk"] = SacSmaValues.LZPK.value + result_df["lzsk"] = SacSmaValues.LZSK.value + result_df["zperc"] = SacSmaValues.ZPERC.value + result_df["rexp"] = SacSmaValues.REXP.value + result_df["pctim"] = SacSmaValues.PCTIM.value + result_df["pfree"] = SacSmaValues.PFREE.value + result_df["riva"] = SacSmaValues.RIVA.value + result_df["side"] = SacSmaValues.SIDE.value + result_df["rserv"] = SacSmaValues.RSERV.value + + if namespace == "conus_hf" and not envca: + divides_list = result_df["divide_id"] + domain = namespace.split("_")[0] + table_name = f"divide_parameters.sac-sma_{domain}" + params_df = catalog.load_table(table_name).to_polars() + conus_param_df = params_df.filter(pl.col("divide_id").is_in(divides_list)).collect().to_pandas() + result_df.drop( + columns=[ + "uztwm", + "uzfwm", + "lztwm", + "lzfpm", + "lzfsm", + "uzk", + "lzpk", + "lzsk", + "zperc", + "rexp", + "pfree", + ], + inplace=True, + ) + result_df = pd.merge(conus_param_df, result_df, on="divide_id", how="left") + + pydantic_models = [] + for _, row_dict in result_df.iterrows(): + # Instantiate the Pydantic model for each row + # *Note: The HF API declares hru_id as the divide id, but to remain consistent + # keeping catchment arg. + model_instance = SacSma( + catchment=row_dict["divide_id"], + hru_id=row_dict["divide_id"], + hru_area=row_dict["areasqkm"], + uztwm=row_dict["uztwm"], + uzfwm=row_dict["uzfwm"], + lztwm=row_dict["lztwm"], + lzfpm=row_dict["lzfpm"], + lzfsm=row_dict["lzfsm"], + uzk=row_dict["uzk"], + lzpk=row_dict["lzpk"], + lzsk=row_dict["lzsk"], + zperc=row_dict["zperc"], + rexp=row_dict["rexp"], + pfree=row_dict["pfree"], + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_troute_parameters( + catalog: Catalog, namespace: str, identifier: str, graph: rx.PyDiGraph +) -> list[TRoute]: + """Creates the initial parameter estimates for the T-Route + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + + Returns + ------- + list[TRoute] + The list of all initial parameters for catchments using TRoute + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + + # Extraction of relevant features from divide attributes layer + divide_attr_df = pd.DataFrame(gauge["divide-attributes"]) + nwtopo_param = collections.defaultdict(dict) + nwtopo_param["supernetwork_parameters"].update({"geo_file_path": f"gauge_{identifier}.gpkg"}) + nwtopo_param["waterbody_parameters"].update( + {"level_pool": {"level_pool_waterbody_parameter_file_path": f"gauge_{identifier}.gpkg"}} + ) + + pydantic_models = [] + for _, row_dict in divide_attr_df.iterrows(): + model_instance = TRoute(catchment=row_dict["divide_id"], nwtopo_param=nwtopo_param) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_topmodel_parameters( + catalog: Catalog, namespace: str, identifier: str, graph: rx.PyDiGraph +) -> list[Topmodel]: + """Creates the initial parameter estimates for the Topmodel + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + + Returns + ------- + list[Topmodel] + The list of all initial parameters for catchments using Topmodel + + *Note: + + - Per HF API SME, relevant information presented here will only source info that was + written to the HF API's {divide_id}_topmodel_subcat.dat & {divide_id}_topmodel_params.dat + files. + + - The divide_id is the same as catchment, but will return divide_id variable name here + since expected from HF API - remove if needed. + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = {"twi": "dist_4.twi"} + + # Extraction of relevant features from divide attributes layer + # & convert to polar + divide_attr_df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] + for param_name, prefix in attr.items(): + # Extract only the relevant attribute(s) + matching_cols = [col for col in divide_attr_df.columns if col == prefix] + if matching_cols: + expressions.append(pl.concat([pl.col(col) for col in matching_cols]).alias(f"{param_name}")) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}")) + + divide_attr_df = divide_attr_df.select(expressions) + + # Extraction of relevant features from divides layer + divides_df = gauge["divides"][["divide_id", "lengthkm"]] + + # Ensure final result aligns properly based on each instances divide ids + result_df = pd.merge(divide_attr_df.to_pandas(), divides_df, on="divide_id", how="left") + + pydantic_models = [] + for _idx, row_dict in result_df.iterrows(): + twi_json = json.loads(row_dict["twi"]) + model_instance = Topmodel( + catchment=row_dict["divide_id"], + divide_id=row_dict["divide_id"], + twi=twi_json, + num_topodex_values=len(twi_json), + dist_from_outlet=round(row_dict["lengthkm"] * 1000), + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_topoflow_parameters( + catalog: Catalog, namespace: str, identifier: str, graph: rx.PyDiGraph +) -> list[Topoflow]: + """Creates the initial parameter estimates for the Topoflow module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : str + the hydrofabric namespace + identifier : str + the gauge identifier + + Returns + ------- + list[Topoflow] + The list of all initial parameters for catchments using Topoflow + + *Note: This is a placeholder for Topoflow as the generation of IPEs for + Topoflow does not exist currently. + """ + raise NotImplementedError("Topoflow not implemented yet") + + +def get_ueb_parameters( + catalog: Catalog, + namespace: str, + identifier: str, + envca: bool, + graph: rx.PyDiGraph, +) -> list[UEB]: + """Creates the initial parameter estimates for the UEB module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : HydrofabricDomains + the hydrofabric namespace + identifier : str + the gauge identifier + envca : bool, optional + If source is ENVCA, then set to True - otherwise False. + + Returns + ------- + list[UEB] + The list of all initial parameters for catchments using UEB + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + attr = { + "slope": "mean.slope", + "aspect": "circ_mean.aspect", + "elevation": "mean.elevation", + "lat": "centroid_y", + "lon": "centroid_x", + } + + # Extraction of relevant features from divide attributes layer + # & convert to polar + divide_attr_df = pl.DataFrame(gauge["divide-attributes"]) + expressions = [pl.col("divide_id")] + for param_name, prefix in attr.items(): + # Find all columns that start with the prefix + matching_cols = [col for col in divide_attr_df.columns if col == prefix] + if matching_cols: + expressions.append(pl.concat([pl.col(col) for col in matching_cols]).alias(f"{param_name}")) + else: + # Default to 0.0 if no matching columns found + expressions.append(pl.lit(0.0).alias(f"{param_name}")) + + result_df = divide_attr_df.select(expressions).to_pandas() + + # Convert elevation from cm to m + result_df["elevation"] = result_df["elevation"] * 0.01 + + # Convert CRS to WGS84 (EPSG4326) + crs = gauge["divides"].crs + transformer = Transformer.from_crs(crs, 4326) + wgs84_latlon = transformer.transform(result_df["lon"], result_df["lat"]) + result_df["lon"] = wgs84_latlon[0] + result_df["lat"] = wgs84_latlon[1] + + # Default parameter values used only for CONUS + result_df["jan_temp_range"] = UEBValues.JAN_TEMP.value + result_df["feb_temp_range"] = UEBValues.FEB_TEMP.value + result_df["mar_temp_range"] = UEBValues.MAR_TEMP.value + result_df["apr_temp_range"] = UEBValues.APR_TEMP.value + result_df["may_temp_range"] = UEBValues.MAY_TEMP.value + result_df["jun_temp_range"] = UEBValues.JUN_TEMP.value + result_df["jul_temp_range"] = UEBValues.JUL_TEMP.value + result_df["aug_temp_range"] = UEBValues.AUG_TEMP.value + result_df["sep_temp_range"] = UEBValues.SEP_TEMP.value + result_df["oct_temp_range"] = UEBValues.OCT_TEMP.value + result_df["nov_temp_range"] = UEBValues.NOV_TEMP.value + result_df["dec_temp_range"] = UEBValues.DEC_TEMP.value + + if namespace == "conus_hf" and not envca: + divides_list = result_df["divide_id"] + domain = namespace.split("_")[0] + table_name = f"divide_parameters.ueb_{domain}" + params_df = catalog.load_table(table_name).to_polars() + conus_param_df = params_df.filter(pl.col("divide_id").is_in(divides_list)).collect().to_pandas() + col2drop = [col for col in result_df.columns if col.endswith("_temp_range")] + result_df.drop(columns=col2drop, inplace=True) + result_df = pd.merge(conus_param_df, result_df, on="divide_id", how="left") + result_df.rename( + columns={ + "b01": "jan_temp_range", + "b02": "feb_temp_range", + "b03": "mar_temp_range", + "b04": "apr_temp_range", + "b05": "may_temp_range", + "b06": "jun_temp_range", + "b07": "jul_temp_range", + "b08": "aug_temp_range", + "b09": "sep_temp_range", + "b10": "oct_temp_range", + "b11": "nov_temp_range", + "b12": "dec_temp_range", + }, + inplace=True, + ) + + pydantic_models = [] + for _, row_dict in result_df.iterrows(): + model_instance = UEB( + catchment=row_dict["divide_id"], + aspect=row_dict["aspect"], + slope=row_dict["slope"], + longitude=row_dict["lon"], + latitude=row_dict["lat"], + elevation=row_dict["elevation"], + standard_atm_pressure=round(Atmosphere(row_dict["elevation"]).pressure[0], 4), + jan_temp_range=row_dict["jan_temp_range"], + feb_temp_range=row_dict["feb_temp_range"], + mar_temp_range=row_dict["mar_temp_range"], + apr_temp_range=row_dict["apr_temp_range"], + may_temp_range=row_dict["may_temp_range"], + jun_temp_range=row_dict["jun_temp_range"], + jul_temp_range=row_dict["jul_temp_range"], + aug_temp_range=row_dict["aug_temp_range"], + sep_temp_range=row_dict["sep_temp_range"], + oct_temp_range=row_dict["oct_temp_range"], + nov_temp_range=row_dict["nov_temp_range"], + dec_temp_range=row_dict["dec_temp_range"], + ) + pydantic_models.append(model_instance) + return pydantic_models + + +def get_cfe_parameters( + catalog: Catalog, + namespace: str, + identifier: str, + cfe_version: str, + graph: rx.PyDiGraph, + sft_included: bool = False, +) -> list[CFE]: + """Creates the initial parameter estimates for the CFE module + + Parameters + ---------- + catalog : Catalog + the pyiceberg lakehouse catalog + namespace : HydrofabricDomains + the hydrofabric namespace + identifier : str + the gauge identifier + cfe_version: str + the CFE module type (e.g. CFE-X, CFE-S) for which determines whether + to use Shaake or Xinanjiang for surface partitioning. + sft_included: bool + True if SFT is in the "dep_modules_included" definition as declared in HF API repo. + + Returns + ------- + list[CFE] + The list of all initial parameters for catchments using CFE + """ + gauge: dict[str, pd.DataFrame | gpd.GeoDataFrame] = subset_hydrofabric( + catalog=catalog, + identifier=identifier, + id_type=IdType.HL_URI, + namespace=namespace, + layers=["flowpaths", "nexus", "divides", "divide-attributes", "network"], + graph=graph, + ) + + # CFE + df = pd.DataFrame(gauge["divide-attributes"]) + divides_list = df["divide_id"] + domain = namespace.split("_")[0] + table_name = f"divide_parameters.cfe-x_{domain}" + params_df = catalog.load_table(table_name).to_polars() + conus_param_df = params_df.filter(pl.col("divide_id").is_in(divides_list)).collect().to_pandas() + df = pd.merge(conus_param_df, df, on="divide_id", how="left") + + if cfe_version == "CFE-X": + surface_partitioning_scheme = CFEValues.XINANJIANG.value + urban_decimal_fraction = CFEValues.URBAN_FRACT.value + is_sft_coupled = "NA" + elif cfe_version == "CFE-S": + surface_partitioning_scheme = CFEValues.SCHAAKE.value + a_Xinanjiang_inflection_point_parameter = "NA" + b_Xinanjiang_shape_parameter = "NA" + x_Xinanjiang_shape_parameter = "NA" + urban_decimal_fraction = "NA" + if sft_included: + is_sft_coupled = 1 + else: + is_sft_coupled = 0 + else: + raise ValueError(f"Passing unsupported cfe_version into endpoint: {cfe_version}") + + pydantic_models = [] + for _, row_dict in df.iterrows(): + # Instantiate the Pydantic model for each row + model_instance = CFE( + catchment=row_dict["divide_id"], + surface_partitioning_scheme=surface_partitioning_scheme, + is_sft_coupled=str(is_sft_coupled), + soil_params_b=row_dict["mode.bexp_soil_layers_stag=1"], + soil_params_satdk=row_dict["geom_mean.dksat_soil_layers_stag=1"], + soil_params_satpsi=row_dict["geom_mean.psisat_soil_layers_stag=1"], + soil_params_slop=row_dict["mean.slope_1km"], + soil_params_smcmax=row_dict["mean.smcmax_soil_layers_stag=1"], + soil_params_wltsmc=row_dict["mean.smcwlt_soil_layers_stag=1"], + max_gw_storage=row_dict["mean.Zmax"], + Cgw=row_dict["mean.Coeff"], + expon=row_dict["mode.Expon"], + a_Xinanjiang_inflection_point_parameter=str(row_dict["a_Xinanjiang_inflection_point_parameter"]) + if cfe_version == "CFE-X" + else a_Xinanjiang_inflection_point_parameter, + b_Xinanjiang_shape_parameter=str(row_dict["b_Xinanjiang_shape_parameter"]) + if cfe_version == "CFE-X" + else b_Xinanjiang_shape_parameter, + x_Xinanjiang_shape_parameter=str(row_dict["x_Xinanjiang_shape_parameter"]) + if cfe_version == "CFE-X" + else x_Xinanjiang_shape_parameter, + urban_decimal_fraction=str(urban_decimal_fraction), + refkdt=row_dict["mean.refkdt"], + ) + pydantic_models.append(model_instance) + return pydantic_models diff --git a/src/icefabric/modules/rnr.py b/src/icefabric/modules/rnr.py new file mode 100644 index 0000000..df3f24e --- /dev/null +++ b/src/icefabric/modules/rnr.py @@ -0,0 +1,125 @@ +"""A file to hold all replace and route (RnR) geospatial scripts""" + +import geopandas as gpd +import numpy as np +import pandas as pd +import polars as pl +from pyiceberg.catalog import Catalog +from pyiceberg.expressions import And, EqualTo, In, LessThanOrEqual + +from icefabric.helpers.geopackage import table_to_geopandas, to_geopandas +from icefabric.schemas.hydrofabric import UPSTREAM_VPUS + + +def get_rnr_segment( + catalog: Catalog, reach_id: str, domain="conus_hf" +) -> dict[str, pd.DataFrame | gpd.GeoDataFrame]: + """Returns a geopackage subset from the hydrofabric based on RnR rules + + Parameters + ---------- + catalog : Catalog + The iceberg catalog of the hydrofabric + reach_id : str + The reach_id, or hf_id, from the NWPS API + + Returns + ------- + dict[str, pd.DataFrame | gpd.GeoDataFrame] + a dictionary of dataframes and geodataframes containing HF layers + """ + network = catalog.load_table(f"{domain}.network") + origin_row = network.scan(row_filter=f"hf_id = {reach_id}").to_pandas() + + vpu_id = origin_row["vpuid"].iloc[0] + if vpu_id in UPSTREAM_VPUS: + vpu_filter = In("vpuid", UPSTREAM_VPUS[vpu_id]) + else: + vpu_filter = EqualTo("vpuid", vpu_id) + + flowpaths = catalog.load_table(f"{domain}.flowpaths").scan(row_filter=vpu_filter).to_polars() + lakes = catalog.load_table(f"{domain}.lakes").scan(row_filter=vpu_filter).to_polars() + + pois = catalog.load_table(f"{domain}.pois") + hydrolocations = catalog.load_table(f"{domain}.hydrolocations") + divides = catalog.load_table(f"{domain}.divides") + nexus = catalog.load_table(f"{domain}.nexus") + flowpath_attr = catalog.load_table(f"{domain}.flowpath-attributes") + divides_attr = catalog.load_table(f"{domain}.divide-attributes") + + mainstem_expression = EqualTo("hf_mainstem", origin_row["hf_mainstem"].iloc[0]) + hydroseq_expression = LessThanOrEqual("hydroseq", origin_row["hydroseq"].iloc[0]) + + combined_filter = And(And(mainstem_expression, hydroseq_expression), vpu_filter) + + # Find all streams with the same stream order + mainstem_features = network.scan(row_filter=combined_filter).to_polars() + segment_flowpaths = flowpaths.filter( + pl.col("divide_id").is_in(mainstem_features["divide_id"].unique().implode()) + ) + joined_df = mainstem_features.join(segment_flowpaths, on="divide_id", how="full") + stream_order = joined_df.filter(pl.col("hf_id") == int(reach_id))["order"].item() + filtered_flowpaths = segment_flowpaths.filter(pl.col("order") == stream_order) + + # Find any lakes contained in the RnR segment + poi_ids = filtered_flowpaths["poi_id"].filter(filtered_flowpaths["poi_id"].is_not_null()).cast(pl.Int64) + filtered_lakes = lakes.filter(pl.col("poi_id").is_in(poi_ids.implode())) + + if filtered_lakes.shape[0] > 0: + # Ensuring we break connectivity at lakes + lake_ids = filtered_lakes["hf_id"].filter(filtered_lakes["hf_id"].is_not_null()) + network_rows = mainstem_features.filter(pl.col("hf_id").is_in(lake_ids.implode())) + upstream_lake = network_rows[ + "hf_hydroseq" + ].max() # since hydroseq decreases as you go downstream, we want the upstream most value + mainstem_features = mainstem_features.filter(pl.col("hf_hydroseq").ge(upstream_lake)) + segment_flowpaths = flowpaths.filter( + pl.col("divide_id").is_in(mainstem_features["divide_id"].unique().implode()) + ) + joined_df = mainstem_features.join(segment_flowpaths, on="divide_id", how="full") + stream_order = joined_df.filter(pl.col("hf_id") == int(reach_id))["order"].item() + filtered_flowpaths = segment_flowpaths.filter(pl.col("order") == stream_order) + + poi_ids = ( + filtered_flowpaths["poi_id"].filter(filtered_flowpaths["poi_id"].is_not_null()).cast(pl.Int64) + ) + filtered_lakes = lakes.filter(pl.col("poi_id").is_in(poi_ids.implode())) + + # Convert output to geopandas + filtered_nexus_points = table_to_geopandas( + table=nexus, row_filter=In("id", filtered_flowpaths["toid"].to_numpy().tolist()) + ) + filtered_divides = table_to_geopandas( + table=divides, row_filter=In("divide_id", filtered_flowpaths["divide_id"].to_numpy().tolist()) + ) + filtered_divide_attr = divides_attr.scan( + row_filter=In("divide_id", filtered_flowpaths["divide_id"].to_numpy().tolist()) + ).to_pandas() + filtered_flowpath_attr = flowpath_attr.scan( + row_filter=In("id", filtered_flowpaths["id"].to_numpy().tolist()) + ).to_pandas() + filtered_pois = pois.scan(row_filter=In("poi_id", poi_ids.to_numpy().tolist())).to_pandas() + filtered_hydrolocations = hydrolocations.scan( + row_filter=In("poi_id", poi_ids.to_numpy().tolist()) + ).to_pandas() + filtered_flowpaths = to_geopandas(filtered_flowpaths.to_pandas()) + filtered_network = network.scan( + row_filter=In( + "id", np.concatenate([filtered_flowpaths["toid"].to_numpy(), filtered_flowpaths["id"].to_numpy()]) + ) + ).to_pandas() + filtered_lakes = to_geopandas(filtered_lakes.to_pandas()) + + layers = { + "flowpaths": filtered_flowpaths, + "nexus": filtered_nexus_points, + "divides": filtered_divides, + "divide-attributes": filtered_divide_attr, + "network": filtered_network, + "pois": filtered_pois, + "flowpath-attributes": filtered_flowpath_attr, + "hydrolocations": filtered_hydrolocations, + } + if len(filtered_lakes) > 0: + layers["lakes"] = filtered_lakes + return layers diff --git a/src/icefabric/ras_xs/__init__.py b/src/icefabric/ras_xs/__init__.py new file mode 100644 index 0000000..1c20b1b --- /dev/null +++ b/src/icefabric/ras_xs/__init__.py @@ -0,0 +1,5 @@ +"""Helper functions designed to assist with managing data.""" + +from .subset import subset_xs + +__all__ = ["subset_xs"] diff --git a/src/icefabric/ras_xs/subset.py b/src/icefabric/ras_xs/subset.py new file mode 100644 index 0000000..80a7d78 --- /dev/null +++ b/src/icefabric/ras_xs/subset.py @@ -0,0 +1,79 @@ +"""A file to hold ras cross-section tools""" + +from pathlib import Path + +import geopandas as gpd +import pandas as pd +from botocore.exceptions import ClientError +from pyiceberg.catalog import Catalog +from pyiceberg.expressions import And, EqualTo, GreaterThanOrEqual, LessThanOrEqual +from pyiceberg.expressions.literals import literal +from shapely.geometry import Polygon + +from icefabric.helpers.geopackage import to_geopandas +from icefabric.schemas.ras_xs import XsType + + +def subset_xs( + catalog: Catalog, + xstype: XsType = XsType.CONFLATED, + identifier: str | None = None, + bbox: Polygon | None = None, + output_file: Path | None = None, +) -> dict[str, pd.DataFrame | gpd.GeoDataFrame] | None: + """Returns a geopackage subset from the RAS XS iceberg catalog. + + This function delivers a subset of the cross-sectional data by filtering the data with either a + given flowpath identifier or bounding box. Collects all relevant cross-sectional information in a + geopackage. + + Parameters + ---------- + catalog : Catalog + The iceberg catalog containing the RAS XS data + xstype : XsType, optional + The schema of cross-sectional data to subset (conflated or representative). + identifier : str, optional + The flowpath ID. Used when subsetting on flowpath ID. + bbox : Polygon, optional + A lat/lon bounding box for subsetting based on geospatial location. + output_file : Path, optional + The output file path where the geopackage will be saved. + + Returns + ------- + GeoDataFrame + Subset of the cross-sectional data based on identifiers. + """ + try: + xs_table = catalog.load_table(f"ras_xs.{xstype.value}") + except ClientError as e: + msg = "AWS Test account credentials expired. Can't access remote S3 Table" + print(msg) + raise e + + # Filter prior to pandas conversion, to save time/memory + if identifier: + filter_cond = EqualTo("flowpath_id", literal(identifier)) + elif bbox: + min_lat, min_lon, max_lat, max_lon = bbox.bounds + filter_cond = And( + GreaterThanOrEqual("min_y", min_lat), + GreaterThanOrEqual("min_x", min_lon), + LessThanOrEqual("max_y", max_lat), + LessThanOrEqual("max_x", max_lon), + ) + else: + raise ValueError("Please either supply an identifier or bounding box to subset the dataset.") + xs_scan = xs_table.scan(row_filter=filter_cond) + df = xs_scan.to_pandas() + + data_gdf = to_geopandas(df) + + # Save data. + if output_file: + if len(data_gdf) > 0: + gpd.GeoDataFrame(data_gdf).to_file(output_file, layer="ras_xs", driver="GPKG") + else: + print("Warning: Dataframe is empty") + return data_gdf diff --git a/src/icefabric/schemas/__init__.py b/src/icefabric/schemas/__init__.py new file mode 100644 index 0000000..3224d04 --- /dev/null +++ b/src/icefabric/schemas/__init__.py @@ -0,0 +1,94 @@ +"""Contains helper functions to support NWM modules""" + +import json +from pathlib import Path + +from .hydrofabric import UPSTREAM_VPUS, HydrofabricDomains, IdType +from .iceberg_tables.conus_reference import ReferenceDivides, ReferenceFlowpaths +from .iceberg_tables.hydrofabric import ( + DivideAttributes, + Divides, + FlowpathAttributes, + FlowpathAttributesML, + Flowpaths, + Hydrolocations, + Lakes, + Network, + Nexus, + POIs, +) +from .iceberg_tables.hydrofabric_snapshots import HydrofabricSnapshot +from .iceberg_tables.ras_xs import ConflatedRasXS, RepresentativeRasXS +from .modules import ( + CFE, + LASAM, + LSTM, + SFT, + SMP, + UEB, + Albedo, + CalibratableScheme, + IceFractionScheme, + NoahOwpModular, + SacSma, + SacSmaValues, + Snow17, + SoilScheme, + Topmodel, + Topoflow, + TRoute, +) +from .ras_xs import XsType +from .rise_parameters import ( + PARAM_CONV, + CatItemParams, + CatRecParams, + LocItemParams, +) +from .topobathy import FileType, NGWPCLocations, NGWPCTestLocations + +__all__ = [ + "ConflatedRasXS", + "ReferenceDivides", + "ReferenceFlowpaths", + "RepresentativeRasXS", + "DivideAttributes", + "Divides", + "FlowpathAttributes", + "FlowpathAttributesML", + "Flowpaths", + "POIs", + "Network", + "Nexus", + "Lakes", + "Hydrolocations", + "HydrofabricSnapshot", + "UPSTREAM_VPUS", + "IdType", + "HydrofabricDomains", + "SFT", + "IceFractionScheme", + "Albedo", + "Snow17", + "CalibratableScheme", + "SMP", + "SoilScheme", + "SacSma", + "SacSmaValues", + "LSTM", + "LASAM", + "NoahOwpModular", + "TRoute", + "UEB", + "CFE", + "Topmodel", + "Topoflow", + "FileType", + "NGWPCLocations", + "NGWPCTestLocations", + "XsType", + "PARAM_CONV", + "CatItemParams", + "CatRecParams", + "LocItemParams", +] diff --git a/src/icefabric/schemas/hydrofabric.py b/src/icefabric/schemas/hydrofabric.py new file mode 100644 index 0000000..ae2f204 --- /dev/null +++ b/src/icefabric/schemas/hydrofabric.py @@ -0,0 +1,62 @@ +"""Contains all schemas and enums for the NGWPC Enterprise Hydrofabric""" + +from enum import Enum + + +class IdType(str, Enum): + """All queriable HF fields. + + Attributes + ---------- + HL_URI : str + Hydrolocation URI identifier + HF_ID : str + Hydrofabric ID identifier + ID : str + Generic ID identifier + POI_ID : str + Point of Interest ID identifier + """ + + HL_URI = "hl_uri" + HF_ID = "hf_id" + ID = "id" + POI_ID = "poi_id" + VPU_ID = "vpu_id" + + +class HydrofabricDomains(str, Enum): + """The domains used when querying the hydrofabric + + Attributes + ---------- + AK : str + Alaska + CONUS : str + Conterminous United States + GL : str + The US Great Lakes + HI : str + Hawai'i + PRVI : str + Puerto Rico, US Virgin Islands + """ + + AK = "ak_hf" + CONUS = "conus_hf" + GL = "gl_hf" + HI = "hi_hf" + PRVI = "prvi_hf" + + +class StreamflowDataSources(str, Enum): + """The data sources used for hourly streamflow data""" + + USGS = "USGS" + ENVCA = "ENVCA" + CADWR = "CADWR" + TXDOT = "TXDOT" + + +# For catchments that may extend in many VPUs +UPSTREAM_VPUS: dict[str, list[str]] = {"08": ["11", "10U", "10L", "08", "07", "05"]} diff --git a/src/icefabric/schemas/iceberg_tables/__init__.py b/src/icefabric/schemas/iceberg_tables/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/icefabric/schemas/iceberg_tables/conus_reference.py b/src/icefabric/schemas/iceberg_tables/conus_reference.py new file mode 100644 index 0000000..bcab691 --- /dev/null +++ b/src/icefabric/schemas/iceberg_tables/conus_reference.py @@ -0,0 +1,175 @@ +"""Contains the PyIceberg Table schema for the conus reference fabric""" + +import pyarrow as pa +from pyiceberg.schema import Schema +from pyiceberg.types import BinaryType, BooleanType, DoubleType, NestedField, StringType + + +class ReferenceFlowpaths: + """The schema for the reference_flowpaths table containing flowpath data""" + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "flowpath_id", + "VPUID", + "reachcode", + "frommeas", + "tomeas", + "burnline_event", + "reversed", + "source", + "flowpath_toid", + "lengthkm", + "areasqkm", + "totdasqkm", + "terminalpa", + "dnhydroseq", + "hydroseq", + "mainstemlp", + "dnlevelpat", + "pathlength", + "terminalfl", + "streamorder", + "startflag", + "geometry", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for reference_flowpaths + """ + return Schema( + NestedField(1, "flowpath_id", StringType(), required=True), + NestedField(2, "VPUID", StringType(), required=False), + NestedField(3, "reachcode", StringType(), required=False), + NestedField(4, "frommeas", DoubleType(), required=False), + NestedField(5, "tomeas", DoubleType(), required=False), + NestedField(6, "burnline_event", BooleanType(), required=False), + NestedField(7, "reversed", BooleanType(), required=False), + NestedField(8, "source", StringType(), required=False), + NestedField(9, "flowpath_toid", DoubleType(), required=False), + NestedField(10, "lengthkm", DoubleType(), required=False), + NestedField(11, "areasqkm", DoubleType(), required=False), + NestedField(12, "totdasqkm", DoubleType(), required=False), + NestedField(13, "terminalpa", DoubleType(), required=False), + NestedField(14, "dnhydroseq", DoubleType(), required=False), + NestedField(15, "hydroseq", DoubleType(), required=False), + NestedField(16, "mainstemlp", DoubleType(), required=False), + NestedField(17, "dnlevelpat", DoubleType(), required=False), + NestedField(18, "pathlength", DoubleType(), required=False), + NestedField(19, "terminalfl", DoubleType(), required=False), + NestedField(20, "streamorder", DoubleType(), required=False), + NestedField(21, "startflag", DoubleType(), required=False), + NestedField(22, "geometry", BinaryType(), required=False), + identifier_field_ids=[1], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for reference_flowpaths + """ + return pa.schema( + [ + pa.field("flowpath_id", pa.string(), nullable=False), + pa.field("VPUID", pa.string(), nullable=True), + pa.field("reachcode", pa.string(), nullable=True), + pa.field("frommeas", pa.float64(), nullable=True), + pa.field("tomeas", pa.float64(), nullable=True), + pa.field("burnline_event", pa.bool_(), nullable=True), + pa.field("reversed", pa.bool_(), nullable=True), + pa.field("source", pa.string(), nullable=True), + pa.field("flowpath_toid", pa.float64(), nullable=True), + pa.field("lengthkm", pa.float64(), nullable=True), + pa.field("areasqkm", pa.float64(), nullable=True), + pa.field("totdasqkm", pa.float64(), nullable=True), + pa.field("terminalpa", pa.float64(), nullable=True), + pa.field("dnhydroseq", pa.float64(), nullable=True), + pa.field("hydroseq", pa.float64(), nullable=True), + pa.field("mainstemlp", pa.float64(), nullable=True), + pa.field("dnlevelpat", pa.float64(), nullable=True), + pa.field("pathlength", pa.float64(), nullable=True), + pa.field("terminalfl", pa.float64(), nullable=True), + pa.field("streamorder", pa.float64(), nullable=True), + pa.field("startflag", pa.float64(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + ] + ) + + +class ReferenceDivides: + """The schema for the reference_flowpaths table containing flowpath data""" + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "divide_id", + "vpuid", + "areasqkm", + "has_flowpath", + "flowpath_id", + "geometry", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for reference_flowpaths + """ + return Schema( + NestedField(1, "divide_id", StringType(), required=True), + NestedField(2, "vpuid", StringType(), required=False), + NestedField(3, "areasqkm", DoubleType(), required=False), + NestedField(4, "has_flowpath", DoubleType(), required=False), + NestedField(5, "flowpath_id", StringType(), required=False), + NestedField(6, "geometry", BinaryType(), required=False), + identifier_field_ids=[1], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for reference_flowpaths + """ + return pa.schema( + [ + pa.field("divide_id", pa.string(), nullable=False), + pa.field("vpuid", pa.string(), nullable=True), + pa.field("areasqkm", pa.float64(), nullable=True), + pa.field("has_flowpath", pa.float64(), nullable=True), + pa.field("flowpath_id", pa.string(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + ] + ) diff --git a/src/icefabric/schemas/iceberg_tables/hydrofabric.py b/src/icefabric/schemas/iceberg_tables/hydrofabric.py new file mode 100644 index 0000000..be476b2 --- /dev/null +++ b/src/icefabric/schemas/iceberg_tables/hydrofabric.py @@ -0,0 +1,1334 @@ +"""Contains the PyIceberg Table schemas for Hydrofabric v2.2 data model tables""" + +import pyarrow as pa +from pyiceberg.schema import Schema +from pyiceberg.types import BinaryType, BooleanType, DoubleType, IntegerType, NestedField, StringType + + +class DivideAttributes: + """The schema for divide attributes table + + Attributes + ---------- + divide_id : str + Unique divide identifier + mode.bexp_soil_layers_stag=1 : float + Beta Parameter: soil C-H B exponent for soil layer 1 (mode) + mode.bexp_soil_layers_stag=2 : float + Beta Parameter: soil C-H B exponent for soil layer 2 (mode) + mode.bexp_soil_layers_stag=3 : float + Beta Parameter: soil C-H B exponent for soil layer 3 (mode) + mode.bexp_soil_layers_stag=4 : float + Beta Parameter: soil C-H B exponent for soil layer 4 (mode) + mode.ISLTYP : float + Dominant soil type category (mode) + mode.IVGTYP : float + Dominant vegetation type category (mode) + geom_mean.dksat_soil_layers_stag=1 : float + Saturated Soil Connectivity for soil layer 1 (geometric mean) [mm/h] + geom_mean.dksat_soil_layers_stag=2 : float + Saturated Soil Connectivity for soil layer 2 (geometric mean) [mm/h] + geom_mean.dksat_soil_layers_stag=3 : float + Saturated Soil Connectivity for soil layer 3 (geometric mean) [mm/h] + geom_mean.dksat_soil_layers_stag=4 : float + Saturated Soil Connectivity for soil layer 4 (geometric mean) [mm/h] + geom_mean.psisat_soil_layers_stag=1 : float + Saturated soil matric potential for soil layer 1 (geometric mean) [kPa] + geom_mean.psisat_soil_layers_stag=2 : float + Saturated soil matric potential for soil layer 2 (geometric mean) [kPa] + geom_mean.psisat_soil_layers_stag=3 : float + Saturated soil matric potential for soil layer 3 (geometric mean) [kPa] + geom_mean.psisat_soil_layers_stag=4 : float + Saturated soil matric potential for soil layer 4 (geometric mean) [kPa] + mean.cwpvt : float + Empirical canopy wind parameter [1/m] + mean.mfsno : float + Snowmelt m parameter (unitless) + mean.mp : float + Slope of Conductance to photosynthesis relationship (unitless) + mean.refkdt : float + Parameter in the surface runoff parameterization (unitless) + mean.slope_1km : float + Slope [0-1] at 1km resolution [degrees] + mean.smcmax_soil_layers_stag=1 : float + Saturated value of soil moisture [volumetric] for soil layer 1 [m/m] + mean.smcmax_soil_layers_stag=2 : float + Saturated value of soil moisture [volumetric] for soil layer 2 [m/m] + mean.smcmax_soil_layers_stag=3 : float + Saturated value of soil moisture [volumetric] for soil layer 3 [m/m] + mean.smcmax_soil_layers_stag=4 : float + Saturated value of soil moisture [volumetric] for soil layer 4 [m/m] + mean.smcwlt_soil_layers_stag=1 : float + Wilting point soil moisture [volumetric] for soil layer 1 [m/m] + mean.smcwlt_soil_layers_stag=2 : float + Wilting point soil moisture [volumetric] for soil layer 2 [m/m] + mean.smcwlt_soil_layers_stag=3 : float + Wilting point soil moisture [volumetric] for soil layer 3 [m/m] + mean.smcwlt_soil_layers_stag=4 : float + Wilting point soil moisture [volumetric] for soil layer 4 [m/m] + mean.vcmx25 : float + Maximum rate of carboxylation at 25 C [μmol/m²/s] + mean.Coeff : float + Groundwater Coefficient [m³/s] + mean.Zmax : float + The total height of the baseflow "bucket" [mm] + mode.Expon : float + Groundwater Exponent (unitless) + centroid_x : float + X coordinates of divide centroid [units of CRS] + centroid_y : float + Y coordinates of divide centroid [units of CRS] + mean.impervious : float + Percent Impervious Surface [percent] + mean.elevation : float + Elevation from DEM [meters] + mean.slope : float + Slope computed from DEM [0-100] [degrees] + circ_mean.aspect : float + Aspect computed from DEM [degrees] + dist_4.twi : str + Topographic Wetness Index quartile distribution (unitless) + vpuid : str + Vector Processing Unit ID + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "divide_id", + "mode.bexp_soil_layers_stag=1", + "mode.bexp_soil_layers_stag=2", + "mode.bexp_soil_layers_stag=3", + "mode.bexp_soil_layers_stag=4", + "mode.ISLTYP", + "mode.IVGTYP", + "geom_mean.dksat_soil_layers_stag=1", + "geom_mean.dksat_soil_layers_stag=2", + "geom_mean.dksat_soil_layers_stag=3", + "geom_mean.dksat_soil_layers_stag=4", + "geom_mean.psisat_soil_layers_stag=1", + "geom_mean.psisat_soil_layers_stag=2", + "geom_mean.psisat_soil_layers_stag=3", + "geom_mean.psisat_soil_layers_stag=4", + "mean.cwpvt", + "mean.mfsno", + "mean.mp", + "mean.refkdt", + "mean.slope_1km", + "mean.smcmax_soil_layers_stag=1", + "mean.smcmax_soil_layers_stag=2", + "mean.smcmax_soil_layers_stag=3", + "mean.smcmax_soil_layers_stag=4", + "mean.smcwlt_soil_layers_stag=1", + "mean.smcwlt_soil_layers_stag=2", + "mean.smcwlt_soil_layers_stag=3", + "mean.smcwlt_soil_layers_stag=4", + "mean.vcmx25", + "mean.Coeff", + "mean.Zmax", + "mode.Expon", + "centroid_x", + "centroid_y", + "mean.impervious", + "mean.elevation", + "mean.slope", + "circ_mean.aspect", + "dist_4.twi", + "vpuid", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for divide attributes table + """ + return Schema( + NestedField(1, "divide_id", StringType(), required=True), + NestedField(2, "mode.bexp_soil_layers_stag=1", DoubleType(), required=False), + NestedField(3, "mode.bexp_soil_layers_stag=2", DoubleType(), required=False), + NestedField(4, "mode.bexp_soil_layers_stag=3", DoubleType(), required=False), + NestedField(5, "mode.bexp_soil_layers_stag=4", DoubleType(), required=False), + NestedField(6, "mode.ISLTYP", DoubleType(), required=False), + NestedField(7, "mode.IVGTYP", DoubleType(), required=False), + NestedField(8, "geom_mean.dksat_soil_layers_stag=1", DoubleType(), required=False), + NestedField(9, "geom_mean.dksat_soil_layers_stag=2", DoubleType(), required=False), + NestedField(10, "geom_mean.dksat_soil_layers_stag=3", DoubleType(), required=False), + NestedField(11, "geom_mean.dksat_soil_layers_stag=4", DoubleType(), required=False), + NestedField(12, "geom_mean.psisat_soil_layers_stag=1", DoubleType(), required=False), + NestedField(13, "geom_mean.psisat_soil_layers_stag=2", DoubleType(), required=False), + NestedField(14, "geom_mean.psisat_soil_layers_stag=3", DoubleType(), required=False), + NestedField(15, "geom_mean.psisat_soil_layers_stag=4", DoubleType(), required=False), + NestedField(16, "mean.cwpvt", DoubleType(), required=False), + NestedField(17, "mean.mfsno", DoubleType(), required=False), + NestedField(18, "mean.mp", DoubleType(), required=False), + NestedField(19, "mean.refkdt", DoubleType(), required=False), + NestedField(20, "mean.slope_1km", DoubleType(), required=False), + NestedField(21, "mean.smcmax_soil_layers_stag=1", DoubleType(), required=False), + NestedField(22, "mean.smcmax_soil_layers_stag=2", DoubleType(), required=False), + NestedField(23, "mean.smcmax_soil_layers_stag=3", DoubleType(), required=False), + NestedField(24, "mean.smcmax_soil_layers_stag=4", DoubleType(), required=False), + NestedField(25, "mean.smcwlt_soil_layers_stag=1", DoubleType(), required=False), + NestedField(26, "mean.smcwlt_soil_layers_stag=2", DoubleType(), required=False), + NestedField(27, "mean.smcwlt_soil_layers_stag=3", DoubleType(), required=False), + NestedField(28, "mean.smcwlt_soil_layers_stag=4", DoubleType(), required=False), + NestedField(29, "mean.vcmx25", DoubleType(), required=False), + NestedField(30, "mean.Coeff", DoubleType(), required=False), + NestedField(31, "mean.Zmax", DoubleType(), required=False), + NestedField(32, "mode.Expon", DoubleType(), required=False), + NestedField(33, "centroid_x", DoubleType(), required=False), + NestedField(34, "centroid_y", DoubleType(), required=False), + NestedField(35, "mean.impervious", DoubleType(), required=False), + NestedField(36, "mean.elevation", DoubleType(), required=False), + NestedField(37, "mean.slope", DoubleType(), required=False), + NestedField(38, "circ_mean.aspect", DoubleType(), required=False), + NestedField(39, "dist_4.twi", StringType(), required=False), + NestedField(40, "vpuid", StringType(), required=True), + identifier_field_ids=[1, 40], + ) + + @classmethod + def arrow_schema(cls): + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for divide attributes table + """ + fields = [ + pa.field("divide_id", pa.string(), nullable=False), + pa.field("mode.bexp_soil_layers_stag=1", pa.float64()), + pa.field("mode.bexp_soil_layers_stag=2", pa.float64()), + pa.field("mode.bexp_soil_layers_stag=3", pa.float64()), + pa.field("mode.bexp_soil_layers_stag=4", pa.float64()), + pa.field("mode.ISLTYP", pa.float64()), + pa.field("mode.IVGTYP", pa.float64()), + pa.field("geom_mean.dksat_soil_layers_stag=1", pa.float64()), + pa.field("geom_mean.dksat_soil_layers_stag=2", pa.float64()), + pa.field("geom_mean.dksat_soil_layers_stag=3", pa.float64()), + pa.field("geom_mean.dksat_soil_layers_stag=4", pa.float64()), + pa.field("geom_mean.psisat_soil_layers_stag=1", pa.float64()), + pa.field("geom_mean.psisat_soil_layers_stag=2", pa.float64()), + pa.field("geom_mean.psisat_soil_layers_stag=3", pa.float64()), + pa.field("geom_mean.psisat_soil_layers_stag=4", pa.float64()), + pa.field("mean.cwpvt", pa.float64()), + pa.field("mean.mfsno", pa.float64()), + pa.field("mean.mp", pa.float64()), + pa.field("mean.refkdt", pa.float64()), + pa.field("mean.slope_1km", pa.float64()), + pa.field("mean.smcmax_soil_layers_stag=1", pa.float64()), + pa.field("mean.smcmax_soil_layers_stag=2", pa.float64()), + pa.field("mean.smcmax_soil_layers_stag=3", pa.float64()), + pa.field("mean.smcmax_soil_layers_stag=4", pa.float64()), + pa.field("mean.smcwlt_soil_layers_stag=1", pa.float64()), + pa.field("mean.smcwlt_soil_layers_stag=2", pa.float64()), + pa.field("mean.smcwlt_soil_layers_stag=3", pa.float64()), + pa.field("mean.smcwlt_soil_layers_stag=4", pa.float64()), + pa.field("mean.vcmx25", pa.float64()), + pa.field("mean.Coeff", pa.float64()), + pa.field("mean.Zmax", pa.float64()), + pa.field("mode.Expon", pa.float64()), + pa.field("centroid_x", pa.float64()), + pa.field("centroid_y", pa.float64()), + pa.field("mean.impervious", pa.float64()), + pa.field("mean.elevation", pa.float64()), + pa.field("mean.slope", pa.float64()), + pa.field("circ_mean.aspect", pa.float64()), + pa.field("dist_4.twi", pa.string()), + pa.field("vpuid", pa.string(), nullable=False), + ] + + return pa.schema(fields) + + +class Divides: + """The schema for divides table + + Attributes + ---------- + divide_id : str + Unique divide identifier + toid : str + Flowpath id where water flows + type : str + Divide Type, one of coastal, internal, network + ds_id : float + Most Downstream flowpath element adjacent to internal divides + areasqkm : float + Incremental Areas of Divide [square kilometers] + vpuid : str + Vector Processing Unit ID + id : str + Unique flowpath identifier + lengthkm : float + Length in kilometers of Flowpath + tot_drainage_areasqkm : float + Total Upstream Drainage Area [square kilometers] + has_flowline : bool + Does divide have an associated flowpath + geometry : binary + Spatial Geometry (POLYGON format) + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "divide_id", + "toid", + "type", + "ds_id", + "areasqkm", + "vpuid", + "id", + "lengthkm", + "tot_drainage_areasqkm", + "has_flowline", + "geometry", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for divides table + """ + return Schema( + NestedField(1, "divide_id", StringType(), required=True), + NestedField(2, "toid", StringType(), required=False), + NestedField(3, "type", StringType(), required=False), + NestedField(4, "ds_id", DoubleType(), required=False), + NestedField(5, "areasqkm", DoubleType(), required=False), + NestedField(6, "vpuid", StringType(), required=True), + NestedField(7, "id", StringType(), required=False), + NestedField(8, "lengthkm", DoubleType(), required=False), + NestedField(9, "tot_drainage_areasqkm", DoubleType(), required=False), + NestedField(10, "has_flowline", BooleanType(), required=False), + NestedField(11, "geometry", BinaryType(), required=False), + identifier_field_ids=[1, 6], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for divides table + """ + return pa.schema( + [ + pa.field("divide_id", pa.string(), nullable=False), + pa.field("toid", pa.string(), nullable=True), + pa.field("type", pa.string(), nullable=True), + pa.field("ds_id", pa.float64(), nullable=True), + pa.field("areasqkm", pa.float64(), nullable=True), + pa.field("vpuid", pa.string(), nullable=False), + pa.field("id", pa.string(), nullable=True), + pa.field("lengthkm", pa.float64(), nullable=True), + pa.field("tot_drainage_areasqkm", pa.float64(), nullable=True), + pa.field("has_flowline", pa.bool_(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + ] + ) + + +class FlowpathAttributes: + """The schema for flowpath attributes table + + Attributes + ---------- + link : str + Identical to id, but naming needed for t-route + to : str + Identical to toid, but naming needed for t-route + Length_m : float + Length of flowpath id [meters] + Y : float + Estimated depth (m) associated with TopWdth + n : float + Manning's in channel roughness + nCC : float + Compound Channel Manning's roughness + BtmWdth : float + Bottom width of channel [meters] + TopWdth : float + Top Width [meters] + TopWdthCC : float + Compound Channel Top Width [meters] + ChSlp : float + Channel side slope + alt : float + Elevation in meters, at the headwater node, taken from the 3DEP 10m DEM + So : float + Slope [meters/meters], computed from the 3DEP 10m DEM + MusX : float + Muskingum routing parameter + MusK : float + Muskingum routing time [seconds] + gage : str + If there is a gage, the hl_link is stored + gage_nex_id : str + The downstream nexus associated with the gage + WaterbodyID : str + If there is a waterbody, the hl_link is stored + waterbody_nex_id : str + The downstream nexus associated with the waterbody + id : str + Unique flowpath identifier + toid : str + Flowpath id where water flows + vpuid : str + Vector Processing Unit ID + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "link", + "to", + "Length_m", + "Y", + "n", + "nCC", + "BtmWdth", + "TopWdth", + "TopWdthCC", + "ChSlp", + "alt", + "So", + "MusX", + "MusK", + "gage", + "gage_nex_id", + "WaterbodyID", + "waterbody_nex_id", + "id", + "toid", + "vpuid", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for flowpath attributes table + """ + return Schema( + NestedField(1, "link", StringType(), required=False), + NestedField(2, "to", StringType(), required=False), + NestedField(3, "Length_m", DoubleType(), required=False), + NestedField(4, "Y", DoubleType(), required=False), + NestedField(5, "n", DoubleType(), required=False), + NestedField(6, "nCC", DoubleType(), required=False), + NestedField(7, "BtmWdth", DoubleType(), required=False), + NestedField(8, "TopWdth", DoubleType(), required=False), + NestedField(9, "TopWdthCC", DoubleType(), required=False), + NestedField(10, "ChSlp", DoubleType(), required=False), + NestedField(11, "alt", DoubleType(), required=False), + NestedField(12, "So", DoubleType(), required=False), + NestedField(13, "MusX", DoubleType(), required=False), + NestedField(14, "MusK", DoubleType(), required=False), + NestedField(15, "gage", StringType(), required=False), + NestedField(16, "gage_nex_id", StringType(), required=False), + NestedField(17, "WaterbodyID", StringType(), required=False), + NestedField(18, "waterbody_nex_id", StringType(), required=False), + NestedField(19, "id", StringType(), required=True), + NestedField(20, "toid", StringType(), required=True), + NestedField(21, "vpuid", StringType(), required=True), + identifier_field_ids=[19, 20, 21], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for flowpath attributes table + """ + return pa.schema( + [ + pa.field("link", pa.string(), nullable=True), + pa.field("to", pa.string(), nullable=True), + pa.field("Length_m", pa.float64(), nullable=True), + pa.field("Y", pa.float64(), nullable=True), + pa.field("n", pa.float64(), nullable=True), + pa.field("nCC", pa.float64(), nullable=True), + pa.field("BtmWdth", pa.float64(), nullable=True), + pa.field("TopWdth", pa.float64(), nullable=True), + pa.field("TopWdthCC", pa.float64(), nullable=True), + pa.field("ChSlp", pa.float64(), nullable=True), + pa.field("alt", pa.float64(), nullable=True), + pa.field("So", pa.float64(), nullable=True), + pa.field("MusX", pa.float64(), nullable=True), + pa.field("MusK", pa.float64(), nullable=True), + pa.field("gage", pa.string(), nullable=True), + pa.field("gage_nex_id", pa.string(), nullable=True), + pa.field("WaterbodyID", pa.string(), nullable=True), + pa.field("waterbody_nex_id", pa.string(), nullable=True), + pa.field("id", pa.string(), nullable=False), + pa.field("toid", pa.string(), nullable=False), + pa.field("vpuid", pa.string(), nullable=False), + ] + ) + + +class FlowpathAttributesML: + """The schema for machine learning flowpath attributes table + + Attributes + ---------- + link : str + Identical to id, but naming needed for t-route + to : str + Identical to toid, but naming needed for t-route + Length_m : float + Length of flowpath id [meters] + alt : float + Elevation in meters, at the headwater node, taken from the 3DEP 10m DEM + So : float + Slope [meters/meters], computed from the 3DEP 10m DEM + MusX : float + Muskingum routing parameter + MusK : float + Muskingum routing time [seconds] + gage : str + If there is a gage, the hl_link is stored + gage_nex_id : str + The downstream nexus associated with the gage + WaterbodyID : str + If there is a waterbody, the hl_link is stored + waterbody_nex_id : str + The downstream nexus associated with the waterbody + id : str + Unique flowpath identifier + toid : str + Flowpath id where water flows + vpuid : str + Vector Processing Unit ID + n : float + Manning's in channel roughness + BtmWdth : float + Bottom width of channel [meters] + TopWdth : float + Top Width [meters] + ChSlp : float + Channel side slope + nCC : float + Compound Channel Manning's roughness + TopWdthCC : float + Compound Channel Top Width [meters] + Y : float + Estimated depth (m) associated with TopWdth + YCC : float + Estimated depth (m) associated with TopWdthCC + dingman_r : float + Estimated channel shape parameter + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "link", + "to", + "Length_m", + "alt", + "So", + "MusX", + "MusK", + "gage", + "gage_nex_id", + "WaterbodyID", + "waterbody_nex_id", + "id", + "toid", + "vpuid", + "n", + "BtmWdth", + "TopWdth", + "ChSlp", + "nCC", + "TopWdthCC", + "Y", + "YCC", + "dingman_r", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for flowpath attributes ML table + """ + return Schema( + NestedField(1, "link", StringType(), required=False), + NestedField(2, "to", StringType(), required=False), + NestedField(3, "Length_m", DoubleType(), required=False), + NestedField(4, "alt", DoubleType(), required=False), + NestedField(5, "So", DoubleType(), required=False), + NestedField(6, "MusX", DoubleType(), required=False), + NestedField(7, "MusK", DoubleType(), required=False), + NestedField(8, "gage", StringType(), required=False), + NestedField(9, "gage_nex_id", StringType(), required=False), + NestedField(10, "WaterbodyID", StringType(), required=False), + NestedField(11, "waterbody_nex_id", StringType(), required=False), + NestedField(12, "id", StringType(), required=True), + NestedField(13, "toid", StringType(), required=True), + NestedField(14, "vpuid", StringType(), required=True), + NestedField(15, "n", DoubleType(), required=False), + NestedField(16, "BtmWdth", DoubleType(), required=False), + NestedField(17, "TopWdth", DoubleType(), required=False), + NestedField(18, "ChSlp", DoubleType(), required=False), + NestedField(19, "nCC", DoubleType(), required=False), + NestedField(20, "TopWdthCC", DoubleType(), required=False), + NestedField(21, "Y", DoubleType(), required=False), + NestedField(22, "YCC", DoubleType(), required=False), + NestedField(23, "dingman_r", DoubleType(), required=False), + identifier_field_ids=[12, 13, 14], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for flowpath attributes ML table + """ + return pa.schema( + [ + pa.field("link", pa.string(), nullable=True), + pa.field("to", pa.string(), nullable=True), + pa.field("Length_m", pa.float64(), nullable=True), + pa.field("alt", pa.float64(), nullable=True), + pa.field("So", pa.float64(), nullable=True), + pa.field("MusX", pa.float64(), nullable=True), + pa.field("MusK", pa.float64(), nullable=True), + pa.field("gage", pa.string(), nullable=True), + pa.field("gage_nex_id", pa.string(), nullable=True), + pa.field("WaterbodyID", pa.string(), nullable=True), + pa.field("waterbody_nex_id", pa.string(), nullable=True), + pa.field("id", pa.string(), nullable=False), + pa.field("toid", pa.string(), nullable=False), + pa.field("vpuid", pa.string(), nullable=False), + pa.field("n", pa.float64(), nullable=True), + pa.field("BtmWdth", pa.float64(), nullable=True), + pa.field("TopWdth", pa.float64(), nullable=True), + pa.field("ChSlp", pa.float64(), nullable=True), + pa.field("nCC", pa.float64(), nullable=True), + pa.field("TopWdthCC", pa.float64(), nullable=True), + pa.field("Y", pa.float64(), nullable=True), + pa.field("YCC", pa.float64(), nullable=True), + pa.field("dingman_r", pa.float64(), nullable=True), + ] + ) + + +class Flowpaths: + """The schema for flowpaths table + + Attributes + ---------- + id : str + Unique flowpath identifier + toid : str + Flowpath id where water flows + mainstem : float + Persistent Mainstem Identifier + order : float + Stream order (Strahler) + hydroseq : int + Hydrologic Sequence + lengthkm : float + Length in kilometers of Flowpath + areasqkm : float + Incremental Areas of Divide [square kilometers] + tot_drainage_areasqkm : float + Total Upstream Drainage Area [square kilometers] + has_divide : bool + Does Flowpath ID have an associated divide + divide_id : str + Unique divide identifier + poi_id : str + Unique Point of Interest identifier + vpuid : str + Vector Processing Unit ID + geometry : binary + Spatial Geometry (LINESTRING format) + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "id", + "toid", + "mainstem", + "order", + "hydroseq", + "lengthkm", + "areasqkm", + "tot_drainage_areasqkm", + "has_divide", + "divide_id", + "poi_id", + "vpuid", + "geometry", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for flowpaths table + """ + return Schema( + NestedField(1, "id", StringType(), required=True), + NestedField(2, "toid", StringType(), required=True), + NestedField(3, "mainstem", DoubleType(), required=False), + NestedField(4, "order", DoubleType(), required=False), + NestedField(5, "hydroseq", IntegerType(), required=False), + NestedField(6, "lengthkm", DoubleType(), required=False), + NestedField(7, "areasqkm", DoubleType(), required=False), + NestedField(8, "tot_drainage_areasqkm", DoubleType(), required=False), + NestedField(9, "has_divide", BooleanType(), required=False), + NestedField(10, "divide_id", StringType(), required=True), + NestedField(11, "poi_id", StringType(), required=False), + NestedField(12, "vpuid", StringType(), required=True), + NestedField(13, "geometry", BinaryType(), required=False), + identifier_field_ids=[1, 2, 10, 12], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for flowpaths table + """ + return pa.schema( + [ + pa.field("id", pa.string(), nullable=False), + pa.field("toid", pa.string(), nullable=False), + pa.field("mainstem", pa.float64(), nullable=True), + pa.field("order", pa.float64(), nullable=True), + pa.field("hydroseq", pa.int32(), nullable=True), + pa.field("lengthkm", pa.float64(), nullable=True), + pa.field("areasqkm", pa.float64(), nullable=True), + pa.field("tot_drainage_areasqkm", pa.float64(), nullable=True), + pa.field("has_divide", pa.bool_(), nullable=True), + pa.field("divide_id", pa.string(), nullable=False), + pa.field("poi_id", pa.string(), nullable=True), + pa.field("vpuid", pa.string(), nullable=False), + pa.field("geometry", pa.binary(), nullable=True), + ] + ) + + +class Hydrolocations: + """The schema for hydrolocations table + + Attributes + ---------- + poi_id : int + Unique Point of Interest identifier + id : str + Unique flowpath identifier + nex_id : str + Unique nexus ID + hf_id : float + Unique ID of the source (hf_source) reference hydrofabric + hl_link : str + Unique ID of the hydrolocations in the reference dataset + hl_reference : str + Native dataset that hydrolocation was extracted from + hl_uri : str + Concatenation of hl_reference and hl_link + hl_source : str + Where is the data source from? USGS-NOAA Reference Fabric, or, NOAA-OWP + hl_x : float + X coordinate of the hydrolocation + hl_y : float + Y coordinate of the hydrolocation + vpuid : str + Vector Processing Unit ID + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "poi_id", + "id", + "nex_id", + "hf_id", + "hl_link", + "hl_reference", + "hl_uri", + "hl_source", + "hl_x", + "hl_y", + "vpuid", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for hydrolocations table + """ + return Schema( + NestedField(1, "poi_id", IntegerType(), required=False), + NestedField(2, "id", StringType(), required=True), + NestedField(3, "nex_id", StringType(), required=True), + NestedField(4, "hf_id", DoubleType(), required=False), + NestedField(5, "hl_link", StringType(), required=False), + NestedField(6, "hl_reference", StringType(), required=False), + NestedField(7, "hl_uri", StringType(), required=False), + NestedField(8, "hl_source", StringType(), required=False), + NestedField(9, "hl_x", DoubleType(), required=False), + NestedField(10, "hl_y", DoubleType(), required=False), + NestedField(11, "vpuid", StringType(), required=True), + identifier_field_ids=[2, 3, 11], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for hydrolocations table + """ + return pa.schema( + [ + pa.field("poi_id", pa.int32(), nullable=True), + pa.field("id", pa.string(), nullable=False), + pa.field("nex_id", pa.string(), nullable=False), + pa.field("hf_id", pa.float64(), nullable=True), + pa.field("hl_link", pa.string(), nullable=True), + pa.field("hl_reference", pa.string(), nullable=True), + pa.field("hl_uri", pa.string(), nullable=True), + pa.field("hl_source", pa.string(), nullable=True), + pa.field("hl_x", pa.float64(), nullable=True), + pa.field("hl_y", pa.float64(), nullable=True), + pa.field("vpuid", pa.string(), nullable=False), + ] + ) + + +class Lakes: + """The schema for lakes table + + Attributes + ---------- + lake_id : float + Unique NWS Lake ID (taken from NHDPlus) + LkArea : float + Area associated with lake_id [square kilometers] + LkMxE : float + Maximum lake elevation [meters above sea level] + WeirC : float + Weir coefficient + WeirL : float + Weir length [meters] + OrificeC : float + Orifice coefficient + OrificeA : float + Orifice cross-sectional area [square meters] + OrificeE : float + Orifice elevation [meters above sea level] + WeirE : float + Weir elevation [meters above sea level] + ifd : float + Initial fraction water depth + Dam_Length : float + Length of the dam in meters + domain : str + Domain of Hydrofabric (conus, hi, gl, ak, prvi) + poi_id : int + Unique Point of Interest identifier + hf_id : float + Unique ID of the source (hf_source) reference hydrofabric + reservoir_index_AnA : float + Reservoir release data type for AnA configuration; Level pool = 1, USGS-persistence = 2, USACE-persistence = 3, RFC-forecasts = 4 + reservoir_index_Extended_AnA : float + Reservoir release data type for extended AnA configuration; Level pool = 1, USGS-persistence = 2, USACE-persistence = 3, RFC-forecasts = 4 + reservoir_index_GDL_AK : float + Reservoir release data type for Alaska domain; Level pool = 1, USGS-persistence = 2, USACE-persistence = 3, RFC-forecasts = 4, APRFC-GDL = 5 + reservoir_index_Medium_Range : float + Reservoir release data type for extended medium range configuration; Level pool = 1, USGS-persistence = 2, USACE-persistence = 3, RFC-forecasts = 4 + reservoir_index_Short_Range : float + Reservoir release data type for extended short range configuration; Level pool = 1, USGS-persistence = 2, USACE-persistence = 3, RFC-forecasts = 4 + res_id : str + Unique Reservoir Identifier + vpuid : str + Vector Processing Unit ID + lake_x : float + X coordinate of the lake centroid + lake_y : float + Y coordinate of the lake centroid + geometry : binary + Spatial Geometry (POLYGON format) + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "lake_id", + "LkArea", + "LkMxE", + "WeirC", + "WeirL", + "OrificeC", + "OrificeA", + "OrificeE", + "WeirE", + "ifd", + "Dam_Length", + "domain", + "poi_id", + "hf_id", + "reservoir_index_AnA", + "reservoir_index_Extended_AnA", + "reservoir_index_GDL_AK", + "reservoir_index_Medium_Range", + "reservoir_index_Short_Range", + "res_id", + "vpuid", + "lake_x", + "lake_y", + "geometry", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for lakes table + """ + return Schema( + NestedField(1, "lake_id", DoubleType(), required=False), + NestedField(2, "LkArea", DoubleType(), required=False), + NestedField(3, "LkMxE", DoubleType(), required=False), + NestedField(4, "WeirC", DoubleType(), required=False), + NestedField(5, "WeirL", DoubleType(), required=False), + NestedField(6, "OrificeC", DoubleType(), required=False), + NestedField(7, "OrificeA", DoubleType(), required=False), + NestedField(8, "OrificeE", DoubleType(), required=False), + NestedField(9, "WeirE", DoubleType(), required=False), + NestedField(10, "ifd", DoubleType(), required=False), + NestedField(11, "Dam_Length", DoubleType(), required=False), + NestedField(12, "domain", StringType(), required=False), + NestedField(13, "poi_id", IntegerType(), required=True), + NestedField(14, "hf_id", DoubleType(), required=False), + NestedField(15, "reservoir_index_AnA", DoubleType(), required=False), + NestedField(16, "reservoir_index_Extended_AnA", DoubleType(), required=False), + NestedField(17, "reservoir_index_GDL_AK", DoubleType(), required=False), + NestedField(18, "reservoir_index_Medium_Range", DoubleType(), required=False), + NestedField(19, "reservoir_index_Short_Range", DoubleType(), required=False), + NestedField(20, "res_id", StringType(), required=False), + NestedField(21, "vpuid", StringType(), required=True), + NestedField(22, "lake_x", DoubleType(), required=False), + NestedField(23, "lake_y", DoubleType(), required=False), + NestedField(24, "geometry", BinaryType(), required=False), + identifier_field_ids=[13, 21], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for lakes table + """ + return pa.schema( + [ + pa.field("lake_id", pa.float64(), nullable=True), + pa.field("LkArea", pa.float64(), nullable=True), + pa.field("LkMxE", pa.float64(), nullable=True), + pa.field("WeirC", pa.float64(), nullable=True), + pa.field("WeirL", pa.float64(), nullable=True), + pa.field("OrificeC", pa.float64(), nullable=True), + pa.field("OrificeA", pa.float64(), nullable=True), + pa.field("OrificeE", pa.float64(), nullable=True), + pa.field("WeirE", pa.float64(), nullable=True), + pa.field("ifd", pa.float64(), nullable=True), + pa.field("Dam_Length", pa.float64(), nullable=True), + pa.field("domain", pa.string(), nullable=True), + pa.field("poi_id", pa.int32(), nullable=False), + pa.field("hf_id", pa.float64(), nullable=True), + pa.field("reservoir_index_AnA", pa.float64(), nullable=True), + pa.field("reservoir_index_Extended_AnA", pa.float64(), nullable=True), + pa.field("reservoir_index_GDL_AK", pa.float64(), nullable=True), + pa.field("reservoir_index_Medium_Range", pa.float64(), nullable=True), + pa.field("reservoir_index_Short_Range", pa.float64(), nullable=True), + pa.field("res_id", pa.string(), nullable=True), + pa.field("vpuid", pa.string(), nullable=False), + pa.field("lake_x", pa.float64(), nullable=True), + pa.field("lake_y", pa.float64(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + ] + ) + + +class Network: + """The schema for network table + + Attributes + ---------- + id : str + Unique flowpath identifier + toid : str + Flowpath id where water flows + divide_id : str + Unique divide identifier + ds_id : float + Most Downstream flowpath element adjacent to internal divides + mainstem : float + Persistent Mainstem Identifier + hydroseq : float + Hydrologic Sequence + hf_source : str + Source of the reference hydrofabric + hf_id : float + Unique ID of the source (hf_source) reference hydrofabric + lengthkm : float + Length in kilometers of Flowpath + areasqkm : float + Incremental Areas of Divide [square kilometers] + tot_drainage_areasqkm : float + Total Upstream Drainage Area [square kilometers] + type : str + Feature type, one of coastal, internal, network + vpuid : str + Vector Processing Unit ID + hf_hydroseq : float + Source hydrofabric hydrosequence + hf_lengthkm : float + Source hydrofabric length in kilometers + hf_mainstem : float + Source hydrofabric mainstem + topo : str + Topological information + poi_id : float + Unique Point of Interest identifier + hl_uri : str + Concatenation of hl_reference and hl_link + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "id", + "toid", + "divide_id", + "ds_id", + "mainstem", + "hydroseq", + "hf_source", + "hf_id", + "lengthkm", + "areasqkm", + "tot_drainage_areasqkm", + "type", + "vpuid", + "hf_hydroseq", + "hf_lengthkm", + "hf_mainstem", + "topo", + "poi_id", + "hl_uri", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for network table + """ + return Schema( + NestedField(1, "id", StringType(), required=False), + NestedField(2, "toid", StringType(), required=False), + NestedField(3, "divide_id", StringType(), required=False), + NestedField(4, "ds_id", DoubleType(), required=False), + NestedField(5, "mainstem", DoubleType(), required=False), + NestedField(6, "hydroseq", DoubleType(), required=False), + NestedField(7, "hf_source", StringType(), required=False), + NestedField(8, "hf_id", DoubleType(), required=False), + NestedField(9, "lengthkm", DoubleType(), required=False), + NestedField(10, "areasqkm", DoubleType(), required=False), + NestedField(11, "tot_drainage_areasqkm", DoubleType(), required=False), + NestedField(12, "type", StringType(), required=False), + NestedField(13, "vpuid", StringType(), required=False), + NestedField(14, "hf_hydroseq", DoubleType(), required=False), + NestedField(15, "hf_lengthkm", DoubleType(), required=False), + NestedField(16, "hf_mainstem", DoubleType(), required=False), + NestedField(17, "topo", StringType(), required=False), + NestedField(18, "poi_id", DoubleType(), required=False), + NestedField(19, "hl_uri", StringType(), required=False), + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for network table + """ + return pa.schema( + [ + pa.field("id", pa.string(), nullable=True), + pa.field("toid", pa.string(), nullable=True), + pa.field("divide_id", pa.string(), nullable=True), + pa.field("ds_id", pa.float64(), nullable=True), + pa.field("mainstem", pa.float64(), nullable=True), + pa.field("hydroseq", pa.float64(), nullable=True), + pa.field("hf_source", pa.string(), nullable=True), + pa.field("hf_id", pa.float64(), nullable=True), + pa.field("lengthkm", pa.float64(), nullable=True), + pa.field("areasqkm", pa.float64(), nullable=True), + pa.field("tot_drainage_areasqkm", pa.float64(), nullable=True), + pa.field("type", pa.string(), nullable=True), + pa.field("vpuid", pa.string(), nullable=True), + pa.field("hf_hydroseq", pa.float64(), nullable=True), + pa.field("hf_lengthkm", pa.float64(), nullable=True), + pa.field("hf_mainstem", pa.float64(), nullable=True), + pa.field("topo", pa.string(), nullable=True), + pa.field("poi_id", pa.float64(), nullable=True), + pa.field("hl_uri", pa.string(), nullable=True), + ] + ) + + +class Nexus: + """The schema for nexus table + + Attributes + ---------- + id : str + Unique flowpath identifier + toid : str + Flowpath id where water flows + type : str + Nexus type, one of coastal, internal, network + vpuid : str + Vector Processing Unit ID + poi_id : float + Unique Point of Interest identifier + geometry : binary + Spatial Geometry (POINT format) + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "id", + "toid", + "type", + "vpuid", + "poi_id", + "geometry", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for nexus table + """ + return Schema( + NestedField(1, "id", StringType(), required=True), + NestedField(2, "toid", StringType(), required=True), + NestedField(3, "type", StringType(), required=False), + NestedField(4, "vpuid", StringType(), required=True), + NestedField(5, "poi_id", DoubleType(), required=False), + NestedField(6, "geometry", BinaryType(), required=False), + identifier_field_ids=[1, 2, 4], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for nexus table + """ + return pa.schema( + [ + pa.field("id", pa.string(), nullable=False), + pa.field("toid", pa.string(), nullable=False), + pa.field("type", pa.string(), nullable=True), + pa.field("vpuid", pa.string(), nullable=False), + pa.field("poi_id", pa.float64(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + ] + ) + + +class POIs: + """The schema for points of interest (POIs) table + + Attributes + ---------- + poi_id : int + Unique Point of Interest identifier + id : str + Unique flowpath identifier + nex_id : str + Unique nexus ID + vpuid : str + Vector Processing Unit ID + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "poi_id", + "id", + "nex_id", + "vpuid", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for POIs table + """ + return Schema( + NestedField(1, "poi_id", IntegerType(), required=True), + NestedField(2, "id", StringType(), required=True), + NestedField(3, "nex_id", StringType(), required=True), + NestedField(4, "vpuid", StringType(), required=True), + identifier_field_ids=[1, 2, 3, 4], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for POIs table + """ + return pa.schema( + [ + pa.field("poi_id", pa.int32(), nullable=False), + pa.field("id", pa.string(), nullable=False), + pa.field("nex_id", pa.string(), nullable=False), + pa.field("vpuid", pa.string(), nullable=False), + ] + ) diff --git a/src/icefabric/schemas/iceberg_tables/hydrofabric_snapshots.py b/src/icefabric/schemas/iceberg_tables/hydrofabric_snapshots.py new file mode 100644 index 0000000..ff0322d --- /dev/null +++ b/src/icefabric/schemas/iceberg_tables/hydrofabric_snapshots.py @@ -0,0 +1,96 @@ +"""Contains the PyIceberg Table schema for all hydrofabric layers""" + +import pyarrow as pa +from pyiceberg.schema import Schema +from pyiceberg.types import LongType, NestedField, StringType + + +class HydrofabricSnapshot: + """The schema containing all snapshots of the layers for the Hydrofabric. This is used to version control many layers + + Attributes + ---------- + - domain + - divide-attributes + - divides + - flowpath-attributes + - flowpath-attributes-ml + - flowpaths + - hydrolocations + - lakes + - network + - nexus + - pois + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "domain", + "divide-attributes", + "divides", + "flowpath-attributes", + "flowpath-attributes-ml", + "flowpaths", + "hydrolocations", + "lakes", + "network", + "nexus", + "pois", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for Hydrofabric + """ + return Schema( + NestedField(1, "domain", StringType(), required=True), + NestedField(2, "divide-attributes", LongType(), required=False), + NestedField(3, "divides", LongType(), required=False), + NestedField(4, "flowpath-attributes", LongType(), required=False), + NestedField(5, "flowpath-attributes-ml", LongType(), required=False), + NestedField(6, "flowpaths", LongType(), required=False), + NestedField(7, "hydrolocations", LongType(), required=False), + NestedField(8, "lakes", LongType(), required=False), + NestedField(9, "network", LongType(), required=False), + NestedField(10, "nexus", LongType(), required=False), + NestedField(11, "pois", LongType(), required=False), + identifier_field_ids=[1], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for Hydrofabric + """ + return pa.schema( + [ + pa.field("domain", pa.string(), nullable=False), + pa.field("divide-attributes", pa.int64(), nullable=True), + pa.field("divides", pa.int64(), nullable=True), + pa.field("flowpath-attributes", pa.int64(), nullable=True), + pa.field("flowpath-attributes-ml", pa.int64(), nullable=True), + pa.field("flowpaths", pa.int64(), nullable=True), + pa.field("hydrolocations", pa.int64(), nullable=True), + pa.field("lakes", pa.int64(), nullable=True), + pa.field("network", pa.int64(), nullable=True), + pa.field("nexus", pa.int64(), nullable=True), + pa.field("pois", pa.int64(), nullable=True), + ] + ) diff --git a/src/icefabric/schemas/iceberg_tables/ras_xs.py b/src/icefabric/schemas/iceberg_tables/ras_xs.py new file mode 100644 index 0000000..1314425 --- /dev/null +++ b/src/icefabric/schemas/iceberg_tables/ras_xs.py @@ -0,0 +1,260 @@ +"""Contains the PyIceberg Table schema for Extracted RAS-XS mapped to the hydrofabric""" + +import pyarrow as pa +from pyiceberg.schema import Schema +from pyiceberg.types import BinaryType, DoubleType, NestedField, StringType + + +class RepresentativeRasXS: + """The schema for RAS XS extracted to the hydrofabric + + Attributes + ---------- + - flowpath_id: The flowpath id the RAS XS aligns to in the reference hydrofabric + - r: Dingmans R coefficient (-) + - TW: Channel Top width (ft) + - Y: Channel depth (ft) + - source_river_station: Original river station from source dataset + - river_station: River station fraom median cross-section within the flowpath + - model: The submodel from which the XS was extracted from. ex: '/ble_05119_Pulaski/submodels/15312271/15312271.gpkg' + - ftype: Feature type classification. ex: ['StreamRiver', 'CanalDitch', 'ArtificialPath', 'Connector', None, 'Pipeline'] + - streamorde: Stream order of the mapped reference flowpath + - geometry: Binary Linestring geometry data (WKB format) + - min_x: The minimum longitude associated with the linestring geometry data + - min_y: The minimum latitude associated with the linestring geometry data + - max_x: The maximum longitude associated with the linestring geometry data + - max_y: The maximum latitude associated with the linestring geometry data + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "flowpath_id", + "r", + "TW", + "Y", + "source_river_station", + "river_station", + "model", + "ftype", + "streamorde", + "geometry", + "metdata_units", + "epsg", + "crs_units", + "min_x", + "min_y", + "max_x", + "max_y", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for RAS XS table + """ + return Schema( + NestedField(1, "flowpath_id", StringType(), required=True), + NestedField(2, "r", DoubleType(), required=False), + NestedField(3, "TW", DoubleType(), required=False), + NestedField(4, "Y", DoubleType(), required=False), + NestedField(5, "source_river_station", DoubleType(), required=False), + NestedField(6, "river_station", DoubleType(), required=False), + NestedField(7, "model", StringType(), required=False), + NestedField(8, "ftype", StringType(), required=False), + NestedField(9, "streamorde", DoubleType(), required=False), + NestedField(10, "geometry", BinaryType(), required=False), + NestedField(11, "metdata_units", StringType(), required=False), + NestedField(12, "epsg", DoubleType(), required=False), + NestedField(13, "crs_units", StringType(), required=False), + NestedField(14, "min_x", DoubleType(), required=False), + NestedField(15, "min_y", DoubleType(), required=False), + NestedField(16, "max_x", DoubleType(), required=False), + NestedField(17, "max_y", DoubleType(), required=False), + identifier_field_ids=[1], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for RAS XS table + """ + return pa.schema( + [ + pa.field("flowpath_id", pa.string(), nullable=False), + pa.field("r", pa.float64(), nullable=True), + pa.field("TW", pa.float64(), nullable=True), + pa.field("Y", pa.float64(), nullable=True), + pa.field("source_river_station", pa.float64(), nullable=True), + pa.field("river_station", pa.float64(), nullable=True), + pa.field("model", pa.string(), nullable=True), + pa.field("ftype", pa.string(), nullable=True), + pa.field("streamorde", pa.float64(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + pa.field("metdata_units", pa.string(), nullable=True), + pa.field("epsg", pa.float64(), nullable=True), + pa.field("crs_units", pa.string(), nullable=True), + pa.field("min_x", pa.float64(), nullable=True), + pa.field("min_y", pa.float64(), nullable=True), + pa.field("max_x", pa.float64(), nullable=True), + pa.field("max_y", pa.float64(), nullable=True), + ] + ) + + +class ConflatedRasXS: + """The schema for RAS XS extracted to the hydrofabric + + Attributes + ---------- + - flowpath_id: The flowpath id the RAS XS aligns to in the reference hydrofabric + - Ym: Mean depth (ft) + - TW: Channel Top width (ft) + - A: Cross-sectional area (sq ft) + - r: Dingmans R coefficient (-) + - river_station: River station from median cross-section within the flowpath + - source_river_station: Original river station from source dataset + - model: The submodel from which the XS was extracted from + - domain: Domain information + - river_reach_rs: River reach and river station identifier + - source_river: Source river name + - source_reach: Source reach name + - station_elevation_points: Cross-section elevation points as JSON string + - bank_stations: Bank station locations as JSON string + - ftype: Feature type classification + - streamorde: Stream order of the mapped reference flowpath + - geometry: Binary Linestring geometry data (WKB format) + - metdata_units: Metadata units + - epsg: EPSG coordinate system code + - crs_units: Coordinate reference system units + - min_x: The minimum longitude associated with the linestring geometry data + - min_y: The minimum latitude associated with the linestring geometry data + - max_x: The maximum longitude associated with the linestring geometry data + - max_y: The maximum latitude associated with the linestring geometry data + """ + + @classmethod + def columns(cls) -> list[str]: + """Returns the columns associated with this schema + + Returns + ------- + list[str] + The schema columns + """ + return [ + "Ym", + "TW", + "flowpath_id", + "river_station", + "model", + "A", + "r", + "domain", + "river_reach_rs", + "source_river", + "source_reach", + "source_river_station", + "station_elevation_points", + "bank_stations", + "metdata_units", + "epsg", + "crs_units", + "ftype", + "streamorde", + "geometry", + "min_x", + "min_y", + "max_x", + "max_y", + ] + + @classmethod + def schema(cls) -> Schema: + """Returns the PyIceberg Schema object. + + Returns + ------- + Schema + PyIceberg schema for RAS XS table + """ + return Schema( + NestedField(1, "Ym", DoubleType(), required=False), + NestedField(2, "TW", DoubleType(), required=False), + NestedField(3, "flowpath_id", StringType(), required=True), + NestedField(4, "river_station", DoubleType(), required=False), + NestedField(5, "model", StringType(), required=False), + NestedField(6, "A", DoubleType(), required=False), + NestedField(7, "r", DoubleType(), required=False), + NestedField(8, "domain", StringType(), required=False), + NestedField(9, "river_reach_rs", StringType(), required=False), + NestedField(10, "source_river", StringType(), required=False), + NestedField(11, "source_reach", StringType(), required=False), + NestedField(12, "source_river_station", DoubleType(), required=False), + NestedField(13, "station_elevation_points", StringType(), required=False), + NestedField(14, "bank_stations", StringType(), required=False), + NestedField(15, "metdata_units", StringType(), required=False), + NestedField(16, "epsg", DoubleType(), required=False), + NestedField(17, "crs_units", StringType(), required=False), + NestedField(18, "ftype", StringType(), required=False), + NestedField(19, "streamorde", DoubleType(), required=False), + NestedField(20, "geometry", BinaryType(), required=False), + NestedField(21, "min_x", DoubleType(), required=False), + NestedField(22, "min_y", DoubleType(), required=False), + NestedField(23, "max_x", DoubleType(), required=False), + NestedField(24, "max_y", DoubleType(), required=False), + identifier_field_ids=[3], + ) + + @classmethod + def arrow_schema(cls) -> pa.Schema: + """Returns the PyArrow Schema object. + + Returns + ------- + pa.Schema + PyArrow schema for RAS XS table + """ + return pa.schema( + [ + pa.field("Ym", pa.float64(), nullable=True), + pa.field("TW", pa.float64(), nullable=True), + pa.field("flowpath_id", pa.string(), nullable=False), + pa.field("river_station", pa.float64(), nullable=True), + pa.field("model", pa.string(), nullable=True), + pa.field("A", pa.float64(), nullable=True), + pa.field("r", pa.float64(), nullable=True), + pa.field("domain", pa.string(), nullable=True), + pa.field("river_reach_rs", pa.string(), nullable=True), + pa.field("source_river", pa.string(), nullable=True), + pa.field("source_reach", pa.string(), nullable=True), + pa.field("source_river_station", pa.float64(), nullable=True), + pa.field("station_elevation_points", pa.string(), nullable=True), + pa.field("bank_stations", pa.string(), nullable=True), + pa.field("metdata_units", pa.string(), nullable=True), + pa.field("epsg", pa.float64(), nullable=True), + pa.field("crs_units", pa.string(), nullable=True), + pa.field("ftype", pa.string(), nullable=True), + pa.field("streamorde", pa.float64(), nullable=True), + pa.field("geometry", pa.binary(), nullable=True), + pa.field("min_x", pa.float64(), nullable=True), + pa.field("min_y", pa.float64(), nullable=True), + pa.field("max_x", pa.float64(), nullable=True), + pa.field("max_y", pa.float64(), nullable=True), + ] + ) diff --git a/src/icefabric/schemas/modules.py b/src/icefabric/schemas/modules.py new file mode 100644 index 0000000..132db6e --- /dev/null +++ b/src/icefabric/schemas/modules.py @@ -0,0 +1,1305 @@ +"""A file to host schemas for all NWM modules. Based off the table from https://confluence.nextgenwaterprediction.com/pages/viewpage.action?spaceKey=NGWPC&title=BMI+Exchange+Items+and+Module+Parameters""" + +import enum +from pathlib import Path +from typing import Literal, Protocol + +from pydantic import BaseModel, ConfigDict, Field, field_validator + + +class NWMProtocol(Protocol): + """Protocol defining the interface that configuration NWM BaseModel classes should implement.""" + + def to_bmi_config(self) -> list[str]: + """Converts the contents of the base class to a BMI config for that specific module""" + ... + + def model_dump_config(self, output_path: Path) -> Path: # Changed to return Path + """Outputs the BaseModel to a BMI Config file""" + ... + + +class IceFractionScheme(str, enum.Enum): + """The ice fraction scheme to be used in SFT""" + + SCHAAKE = "Schaake" + XINANJIANG = "Xinanjiang" + + +class SFT(BaseModel): + """Pydantic model for SFT (Snow Freeze Thaw) module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + verbosity: str = Field(default="none", description="Verbosity level") + soil_moisture_bmi: int = Field(default=1, description="Soil moisture BMI parameter") + end_time: str = Field(default="1.[d]", description="End time with units") + dt: str = Field(default="1.0[h]", description="Time step with units") + soil_params_smcmax: float = Field(..., description="Maximum soil moisture content", alias="smcmax") + soil_params_b: float = Field(..., description="Soil moisture retention curve parameter (bexp)", alias="b") + soil_params_satpsi: float = Field(..., description="Saturated soil suction (psisat)", alias="satpsi") + soil_params_quartz: float = Field(default=1.0, description="Quartz content", alias="quartz") + ice_fraction_scheme: IceFractionScheme = Field(..., description="Ice fraction scheme") + soil_z: list[float] = Field(default=[0.1, 0.3, 1.0, 2.0], description="Soil depth layers in meters") + soil_temperature: list[float] = Field(..., description="Soil temperature in Kelvin for each layer") + + @field_validator("soil_temperature") + @classmethod + def validate_soil_temperature_length(cls, v, info): + """Ensure soil_temperature has same length as soil_z""" + # Get soil_z from the data being validated + soil_z = info.data.get("soil_z", [0.1, 0.3, 1.0, 2.0]) + if len(v) != len(soil_z): + raise ValueError(f"soil_temperature must have {len(soil_z)} values to match soil_z layers") + return v + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + temp_values = ",".join([str(temp) for temp in self.soil_temperature]) + z_values = ",".join([str(z) for z in self.soil_z]) + + return [ + f"verbosity={self.verbosity}", + f"soil_moisture_bmi={self.soil_moisture_bmi}", + f"end_time={self.end_time}", + f"dt={self.dt}", + f"soil_params.smcmax={self.soil_params_smcmax}", + f"soil_params.b={self.soil_params_b}", + f"soil_params.satpsi={self.soil_params_satpsi}", + f"soil_params.quartz={self.soil_params_quartz}", + f"ice_fraction_scheme={self.ice_fraction_scheme.value}", + f"soil_z={z_values}[m]", + f"soil_temperature={temp_values}[K]", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + sft_bmi_file = output_path / f"{self.catchment}_bmi_config_sft.txt" + with open(sft_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return sft_bmi_file + + +class AlbedoValues(enum.Enum): + """A class to store land cover-derived albedo. + + Update land cover classes and corresponding values here. + Values are [0, 100] + """ + + snow = 0.75 + ice = 0.3 + other = 0.2 + + +class Albedo(BaseModel): + """A model to handle `/topoflow/albedo` inputs and outputs. + + Note: + This Literal will fail static type checking due to dynamically created values. + However, generating dynamically keeps this function DRY and creates the appropriate API inputs. + If changes to albedo values are needed, they are only made in `AlbedoValues`. `Albedo` will never change. + """ + + landcover: Literal[tuple(AlbedoValues._member_names_)] + + def get_landcover_albedo(v: str): + """Return the albedo value""" + return getattr(AlbedoValues, v) + + +class CalibratableScheme(str, enum.Enum): + """The calibratable values to be used in Snow17""" + + MFMAX = 1.00 + MFMIN = 0.2 + UADJ = 0.05 + + +class Snow17(BaseModel): + """Pydantic model for Snow-17 module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + hru_id: str = Field(..., description="Unique divide identifier") + hru_area: float = Field(..., description="Incremental areas of divide") + latitude: float = Field(..., description="Y coordinates of divide centroid") + elev: float = Field(..., description="Elevation from DEM") + scf: float = Field(default=1.100, description="Snow Correction Factor") + mf_max: float = Field(default=1.00, description="Maximum non-rain melt factor") + mf_min: float = Field(default=0.20, description="Minimum non-rain melt factor") + uadj: float = Field(default=0.05, description="Average wind function for rain on snow") + si: float = Field(default=500.00, description="100% snow cover threshold") + pxtemp: float = Field(default=1.000, description="Precipitation vs Snow threshold temperature") + nmf: float = Field(default=0.150, description="maximum negative melt factor") + tipm: float = Field(default=0.100, description="Antecedent snow temperature index") + mbase: float = Field(default=0.000, description="Base Temperature for non-rain melt factor") + plwhc: float = Field(default=0.030, description="Percent liquid water holding capacity") + daygm: float = Field(default=0.000, description="Daily ground melt") + adc1: float = Field(default=0.050, description="areal depletion curve, WE/Ai=0") + adc2: float = Field(default=0.100, description="areal depletion curve, WE/Ai=0.1") + adc3: float = Field(default=0.200, description="areal depletion curve, WE/Ai=0.2") + adc4: float = Field(default=0.300, description="areal depletion curve, WE/Ai=0.3") + adc5: float = Field(default=0.400, description="areal depletion curve, WE/Ai=0.4") + adc6: float = Field(default=0.500, description="areal depletion curve, WE/Ai=0.5") + adc7: float = Field(default=0.600, description="areal depletion curve, WE/Ai=0.6") + adc8: float = Field(default=0.700, description="areal depletion curve, WE/Ai=0.7") + adc9: float = Field(default=0.800, description="areal depletion curve, WE/Ai=0.8") + adc10: float = Field(default=0.900, description="areal depletion curve, WE/Ai=0.9") + adc11: float = Field(default=1.000, description="areal depletion curve, WE/Ai=1.0") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return [ + f"hru_id: {self.hru_id}", + f"hru_area: {self.hru_area}", + f"latitude: {self.latitude}", + f"elev: {self.elev}", + f"scf: {self.scf}", + f"mf_max: {self.mf_max}", + f"mf_min: {self.mf_min}", + f"uadj: {self.uadj}", + f"si: {self.si}", + f"pxtemp: {self.pxtemp}", + f"nmf: {self.nmf}", + f"tipm: {self.tipm}", + f"mbase: {self.mbase}", + f"plwhc: {self.plwhc}", + f"daygm: {self.daygm}", + f"adc1: {self.adc1}", + f"adc2: {self.adc2}", + f"adc3: {self.adc3}", + f"adc4: {self.adc4}", + f"adc5: {self.adc5}", + f"adc6: {self.adc6}", + f"adc7: {self.adc7}", + f"adc8: {self.adc8}", + f"adc9: {self.adc9}", + f"adc10: {self.adc10}", + f"adc11: {self.adc11}", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + snow17_bmi_file = output_path / f"{self.catchment}_bmi_config_snow17.txt" + with open(snow17_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return snow17_bmi_file + + +class SoilScheme(str, enum.Enum): + """The calibratable scheme to be used in SMP""" + + CFE_SOIL_STORAGE = "conceptual" + CFE_STORAGE_DEPTH = "2.0" + TOPMODEL_SOIL_STORAGE = "TopModel" + TOPMODEL_WATER_TABLE_METHOD = "flux-based" + LASAM_SOIL_STORAGE = "layered" + LASAM_SOIL_MOISTURE = "constant" + LASAM_SOIL_DEPTH_LAYERS = "2.0" + LASAM_WATER_TABLE_DEPTH = "10[m]" + + +class SMP(BaseModel): + """Pydantic model for SMP module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + verbosity: str = Field(default="none", description="Verbosity level") + soil_params_smcmax: float = Field(..., description="Maximum soil moisture content", alias="smcmax") + soil_params_b: float = Field(..., description="Soil moisture retention curve parameter (bexp)", alias="b") + soil_params_satpsi: float = Field(..., description="Saturated soil suction (psisat)", alias="satpsi") + soil_z: list[float] = Field(default=[0.1, 0.3, 1.0, 2.0], description="Soil depth layers in meters") + soil_moisture_fraction_depth: float = Field( + default=0.4, description="Soil moisture fraction depth in meters" + ) + soil_storage_model: str = Field( + default="NA", + description="If conceptual, conceptual models are used for computing the soil moisture profile (e.g., CFE). If layered, layered-based soil moisture models are used (e.g., LGAR). If topmodel, topmodel's variables are used", + ) + soil_storage_depth: str = Field( + default="none", + description="Depth of the soil reservoir model (e.g., CFE). Note: this depth can be different from the depth of the soil moisture profile which is based on soil_z", + ) + water_table_based_method: str = Field( + default="NA", + description="Needed if soil_storage_model = topmodel. flux-based uses an iterative scheme, and deficit-based uses catchment deficit to compute soil moisture profile", + ) + soil_moisture_profile_option: str = Field( + default="NA", + description="Constant for layered-constant profile. linear for linearly interpolated values between two consecutive layers. Needed if soil_storage_model = layered", + ) + soil_depth_layers: str = Field( + default="NA", description="Absolute depth of soil layers. Needed if soil_storage_model = layered" + ) + water_table_depth: str = Field(default="NA", description="N/A") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + z_values = ",".join([str(z) for z in self.soil_z]) + + return [ + f"verbosity={self.verbosity}", + f"soil_params.smcmax={self.soil_params_smcmax}", + f"soil_params.b={self.soil_params_b}", + f"soil_params.satpsi={self.soil_params_satpsi}", + f"soil_z={z_values}[m]", + f"soil_moisture_fraction_depth={self.soil_moisture_fraction_depth}[m]", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + smp_bmi_file = output_path / f"{self.catchment}_bmi_config_smp.txt" + with open(smp_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return smp_bmi_file + + +class SacSmaValues(str, enum.Enum): + """The values to be used in SAC SMA""" + + UZTWM = 75.0 + UZFWM = 30.0 + LZTWM = 150.0 + LZFPM = 300.0 + LZFSM = 150.0 + ADIMP = 0.0 + UZK = 0.3 + LZPK = 0.01 + LZSK = 0.1 + ZPERC = 100.0 + REXP = 2.0 + PCTIM = 0.0 + PFREE = 0.1 + RIVA = 0.0 + SIDE = 0.0 + RSERV = 0.3 + + +class SacSma(BaseModel): + """Pydantic model for SAC SMA module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + hru_id: str = Field(..., description="Unique divide identifier") + hru_area: float = Field(..., description="Incremental areas of divide") + uztwm: float = Field( + default=float(SacSmaValues.UZTWM.value), description="Maximum upper zone tension water" + ) + uzfwm: float = Field(default=float(SacSmaValues.UZFWM.value), description="Maximum upper zone free water") + lztwm: float = Field( + default=float(SacSmaValues.LZTWM.value), description="Maximum lower zone tension water" + ) + lzfpm: float = Field( + default=float(SacSmaValues.LZFPM.value), description="Maximum lower zone free water, primary" + ) + lzfsm: float = Field( + default=float(SacSmaValues.LZFSM.value), description="Maximum lower zone free water, secondary" + ) + adimp: float = Field( + default=float(SacSmaValues.ADIMP.value), description="Additional 'impervious' area due to saturation" + ) + uzk: float = Field(default=float(SacSmaValues.UZK.value), description="Upper zone recession coefficient") + lzpk: float = Field( + default=float(SacSmaValues.LZPK.value), description="Lower zone recession coefficient, primary" + ) + lzsk: float = Field( + default=float(SacSmaValues.LZSK.value), description="Lower zone recession coefficient, secondary" + ) + zperc: float = Field( + default=float(SacSmaValues.ZPERC.value), description="Minimum percolation rate coefficient" + ) + rexp: float = Field(default=float(SacSmaValues.REXP.value), description="Percolation equation exponent") + pctim: float = Field( + default=float(SacSmaValues.PCTIM.value), description="Minimum percent impervious area" + ) + pfree: float = Field( + default=float(SacSmaValues.PFREE.value), + description="Percent percolating directly to lower zone free water", + ) + riva: float = Field( + default=float(SacSmaValues.RIVA.value), description="Percent of the basin that is riparian area" + ) + side: float = Field( + default=float(SacSmaValues.SIDE.value), + description="Portion of the baseflow which does not go to the stream", + ) + rserv: float = Field( + default=float(SacSmaValues.RSERV.value), + description="Percent of lower zone free water not transferable to the lower zone tension water", + ) + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return [ + f"hru_id: {self.hru_id}", + f"hru_area: {self.hru_area}", + f"uztwm: {self.uztwm}", + f"uzfwm: {self.uzfwm}", + f"lztwm: {self.lztwm}", + f"lzfpm: {self.lzfpm}", + f"lzfsm: {self.lzfsm}", + f"adimp: {self.adimp}", + f"uzk: {self.uzk}", + f"lzpk: {self.lzpk}", + f"lzsk: {self.lzsk}", + f"zperc: {self.zperc}", + f"rexp: {self.rexp}", + f"pctim: {self.pctim}", + f"pfree: {self.pfree}", + f"riva: {self.riva}", + f"side: {self.side}", + f"rserv: {self.rserv}", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + sacsma_bmi_file = output_path / f"{self.catchment}_bmi_config_sacsma.txt" + with open(sacsma_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return sacsma_bmi_file + + +class LSTM(BaseModel): + """ + Pydantic model for LSTM module configuration + + *Note: Per HF API, the following attributes for LSTM does not carry any relvant information: + 'train_cfg_file' & basin_name' -- remove if desire + + """ + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + area_sqkm: float = Field(..., description="Allows bmi to adjust a weighted output") + basin_id: str = Field( + ..., description="Refer to https://github.com/NOAA-OWP/lstm/blob/master/bmi_config_files/README.md" + ) + basin_name: str = Field( + default="", + description="Refer to https://github.com/NOAA-OWP/lstm/blob/master/bmi_config_files/README.md", + ) + elev_mean: float = Field(..., description="Catchment mean elevation (m) above sea level") + inital_state: str = Field( + default="zero", description="This is an option to set the initial states of the model to zero." + ) + lat: float = Field(..., description="Latitude") + lon: float = Field(..., description="Longitude") + slope_mean: float = Field(..., description="Catchment mean slope (m km−1)") + timestep: str = Field( + default="1 hour", + description="Refer to https://github.com/NOAA-OWP/lstm/blob/master/bmi_config_files/README.md", + ) + train_cfg_file: str = Field( + default="", + description="This is a configuration file used when training the model. It has critical information on the LSTM architecture and should not be altered.", + ) + verbose: str = Field( + default="0", description="Change to 1 in order to print additional BMI information during runtime." + ) + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return [ + f"area_sqkm: {self.area_sqkm}", + f"basin_id: {self.basin_id}", + f"basin_name: {self.basin_name}", + f"elev_mean: {self.elev_mean}", + f"inital_state: {self.inital_state}", + f"lat: {self.lat}", + f"lon: {self.lon}", + f"slope_mean: {self.slope_mean}", + f"timestep: {self.timestep}", + f"train_cfg_file: {self.train_cfg_file}", + f"verbose: {self.verbose}", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + lstm_bmi_file = output_path / f"{self.catchment}_bmi_config_lstm.txt" + with open(lstm_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return lstm_bmi_file + + +class LASAM(BaseModel): + """Pydantic model for LASAM module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + verbosity: str = Field(default="none", description="NA") + soil_params_file: str = Field(..., description="Full path to vG_default_params.dat") + layer_thickness: str = Field(default="200.0[cm]", description="Thickness of each layer (array)") + initial_psi: str = Field(default="2000.0[cm]", description="NA") + timestep: str = Field(default="300[sec]", description="NA") + endtime: str = Field(default="1000[hr]", description="NA") + forcing_resolution: str = Field(default="3600[sec]", description="NA") + ponded_depth_max: str = Field( + default="1.1[cm]", + description="Maximum amount of ponded water that is allowed to accumulate on the soil surface", + ) + use_closed_form_G: bool = Field(default=False, description="NA") + layer_soil_type: float = Field(default="", description="Type of each soil layer (array)") + max_soil_types: int = Field(default=15, description="NA") + wilting_point_psi: str = Field( + default="15495.0[cm]", description="Wilting point (the amount of water not available for plants)" + ) + field_capacity_psi: str = Field( + default="340.9[cm]", + description="Capillary head corresponding to volumetric water content at which gravity drainage becomes slower", + ) + giuh_ordinates: list[float] = Field(default=[0.06, 0.51, 0.28, 0.12, 0.03], description="giuh") + calib_params: bool = Field(default=True, description="NA") + adaptive_timestep: bool = Field(default=True, description="NA") + sft_coupled: bool = Field(..., description="NA") + soil_z: list[float] = Field(default=[10, 30, 100.0, 200.0], description="NA") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + z_values = ",".join([str(z) for z in self.soil_z]) + giuh_ordinates = ",".join([str(giuh) for giuh in self.giuh_ordinates]) + + return [ + f"verbosity={self.verbosity}", + f"soil_params_file={self.soil_params_file}", + f"layer_thickness={self.layer_thickness}", + f"initial_psi={self.initial_psi}", + f"timestep={self.timestep}", + f"endtime={self.endtime}", + f"forcing_resolution={self.forcing_resolution}", + f"ponded_depth_max={self.ponded_depth_max}", + f"use_closed_form_G={self.use_closed_form_G}", + f"layer_soil_type={self.layer_soil_type}", + f"max_soil_types={self.max_soil_types}", + f"wilting_point_psi={self.wilting_point_psi}", + f"field_capacity_psi={self.field_capacity_psi}", + f"giuh_ordinates={giuh_ordinates}", + f"calib_params={self.calib_params}", + f"adaptive_timestep={self.adaptive_timestep}", + f"sft_coupled={self.sft_coupled}", + f"soil_z={z_values}[cm]", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + lasam_bmi_file = output_path / f"{self.catchment}_bmi_config_lasam.txt" + with open(lasam_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return lasam_bmi_file + + +class NoahOwpModular(BaseModel): + """Pydantic model for Noah OWP module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + dt: float = Field(default=3600.0, description="Timestep [seconds]") + startdate: str = Field(default="202408260000", description="UTC time start of simulation (YYYYMMDDhhmm)") + enddate: str = Field(default="202408260000", description="# UTC time end of simulation (YYYYMMDDhhmm)") + forcing_filename: str = Field(default=".", description="File containing forcing data") + output_filename: str = Field(default=".", description="NA") + parameter_dir: str = Field(default="test", description="NA") + general_table: str = Field(default="GENPARM.TBL", description="General param tables and misc params") + soil_table: str = Field(default="SOILPARM.TBL", description="Soil param table") + noahowp_table: str = Field(default="MPTABLE.TBL", description="Model param tables (includes veg)") + soil_class_name: str = Field(default="STAS", description="Soil class data source - 'STAS' or 'STAS-RUC'") + veg_class_name: str = Field( + default="USGS", description="Vegetation class data source - 'MODIFIED_IGBP_MODIS_NOAH' or 'USGS'" + ) + lat: float = Field(..., description="Latitude [degrees] (-90 to 90)") + lon: float = Field(..., description="Longitude [degrees] (-180 to 180)") + terrain_slope: float = Field(..., description="Terrain slope [degrees]") + azimuth: float = Field(..., description="Terrain azimuth or aspect [degrees clockwise from north]") + ZREF: float = Field(default=10.0, description="Measurement height for wind speed (m)") + rain_snow_thresh: float = Field( + default=0.5, description="Rain-snow temperature threshold (degrees Celsius)" + ) + precip_phase_option: int = Field(default=6, description="NA") + snow_albedo_option: int = Field(default=1, description="NA") + dynamic_veg_option: int = Field(default=4, description="NA") + runoff_option: int = Field(default=3, description="NA") + drainage_option: int = Field(default=8, description="NA") + frozen_soil_option: int = Field(default=1, description="NA") + dynamic_vic_option: int = Field(default=1, description="NA") + radiative_transfer_option: int = Field(default=3, description="NA") + sfc_drag_coeff_option: int = Field(default=1, description="NA") + canopy_stom_resist_option: int = Field(default=1, description="NA") + crop_model_option: int = Field(default=0, description="NA") + snowsoil_temp_time_option: int = Field(default=3, description="NA") + soil_temp_boundary_option: int = Field(default=2, description="NA") + supercooled_water_option: int = Field(default=1, description="NA") + stomatal_resistance_option: int = Field(default=1, description="NA") + evap_srfc_resistance_option: int = Field(default=4, description="NA") + subsurface_option: int = Field(default=2, description="NA") + isltyp: float = Field(..., description="Soil texture class") + nsoil: int = Field(default=4, description="Number of soil levels") + nsnow: int = Field(default=3, description="Number of snow levels") + nveg: int = Field(default=27, description="Number of vegetation type") + vegtyp: int = Field(..., description="Vegetation type") + croptype: int = Field( + default=0, description="Crop type (0 = no crops; this option is currently inactive)" + ) + sfctyp: int = Field(..., description="Land surface type, 1:soil, 2:lake") + soilcolor: int = Field(default=4, description="Soil color code") + dzsnso: list[float] = Field( + default=[0.0, 0.0, 0.0, 0.1, 0.3, 0.6, 1.0], description="Level thickness [m]" + ) + sice: list[float] = Field(default=[0.0, 0.0, 0.0, 0.0], description="Initial soil ice profile [m3/m3]") + sh2o: list[float] = Field(default=[0.3, 0.3, 0.3, 0.3], description="Initial soil liquid profile [m3/m3]") + zwt: int = Field(default=-2.0, description="Initial water table depth below surface [m] ") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + dzsnso = ",".join([str(th_lvl) for th_lvl in self.dzsnso]) + sice = ",".join([str(ice) for ice in self.sice]) + sh2o = ",".join([str(h2o) for h2o in self.sh2o]) + + return [ + f"dt={self.dt} [s]", + f"startdate={self.startdate}", + f"enddate={self.enddate}", + f"forcing_filename={self.forcing_filename}", + f"output_filename={self.output_filename}", + f"parameter_dir={self.parameter_dir}", + f"general_table={self.general_table}", + f"soil_table={self.soil_table}", + f"noahowp_table={self.noahowp_table}", + f"soil_class_name={self.soil_class_name}", + f"veg_class_name={self.veg_class_name}", + f"lat={self.lat} [degrees]", + f"lon={self.lon} [degrees]", + f"terrain_slope={self.terrain_slope} [degrees]", + f"azimuth={self.azimuth} [degrees clockwise from north]", + f"ZREF={self.ZREF} [m]", + f"rain_snow_thresh={self.rain_snow_thresh} [C]", + f"precip_phase_option={self.precip_phase_option}", + f"snow_albedo_option={self.snow_albedo_option}", + f"dynamic_veg_option={self.dynamic_veg_option}", + f"runoff_option={self.runoff_option}", + f"drainage_option={self.drainage_option}", + f"frozen_soil_option={self.frozen_soil_option}", + f"dynamic_vic_option={self.dynamic_vic_option}", + f"radiative_transfer_option={self.radiative_transfer_option}", + f"sfc_drag_coeff_option={self.sfc_drag_coeff_option}", + f"canopy_stom_resist_option={self.canopy_stom_resist_option}", + f"crop_model_option={self.crop_model_option}", + f"snowsoil_temp_time_option={self.snowsoil_temp_time_option}", + f"soil_temp_boundary_option={self.soil_temp_boundary_option}", + f"supercooled_water_option={self.supercooled_water_option}", + f"stomatal_resistance_option={self.stomatal_resistance_option}", + f"evap_srfc_resistance_option={self.evap_srfc_resistance_option}", + f"subsurface_option={self.subsurface_option}", + f"isltyp={self.isltyp}", + f"nsoil={self.nsoil}", + f"nsnow={self.nsnow}", + f"nveg={self.nveg}", + f"vegtyp={self.vegtyp}", + f"croptype={self.croptype}", + f"sfctyp={self.sfctyp}", + f"soilcolor={self.soilcolor}", + f"dzsnso={dzsnso} [m]", + f"sice={sice} [m3/m3]", + f"sh2o={sh2o} [m3/m3]", + f"zwt={self.zwt} [m]", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + noahowp_bmi_file = output_path / f"{self.catchment}_bmi_config_noahowp.txt" + with open(noahowp_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return noahowp_bmi_file + + +# TODO: integrate TRoute-config class for creating/validating +class TRoute(BaseModel): + """Pydantic model for T-Route module configuration""" + + # Default values + bmi_param = { + "flowpath_columns": ["id", "toid", "lengthkm"], + "attributes_columns": [ + "attributes_id", + "rl_gages", + "rl_NHDWaterbodyComID", + "MusK", + "MusX", + "n", + "So", + "ChSlp", + "BtmWdth", + "nCC", + "TopWdthCC", + "TopWdth", + ], + "waterbody_columns": [ + "hl_link", + "ifd", + "LkArea", + "LkMxE", + "OrificeA", + "OrificeC", + "OrificeE", + "WeirC", + "WeirE", + "WeirL", + ], + "network_columns": ["network_id", "hydroseq", "hl_uri"], + } + + log_param = {"showtiming": True, "log_level": "DEBUG"} + + ntwk_columns = { + "key": "id", + "downstream": "toid", + "dx": "lengthkm", + "n": "n", + "ncc": "nCC", + "s0": "So", + "bw": "BtmWdth", + "waterbody": "rl_NHDWaterbodyComID", + "gages": "rl_gages", + "tw": "TopWdth", + "twcc": "TopWdthCC", + "musk": "MusK", + "musx": "MusX", + "cs": "ChSlp", + "alt": "alt", + } + + dupseg = [ + "717696", + "1311881", + "3133581", + "1010832", + "1023120", + "1813525", + "1531545", + "1304859", + "1320604", + "1233435", + "11816", + "1312051", + "2723765", + "2613174", + "846266", + "1304891", + "1233595", + "1996602", + "2822462", + "2384576", + "1021504", + "2360642", + "1326659", + "1826754", + "572364", + "1336910", + "1332558", + "1023054", + "3133527", + "3053788", + "3101661", + "2043487", + "3056866", + "1296744", + "1233515", + "2045165", + "1230577", + "1010164", + "1031669", + "1291638", + "1637751", + ] + + nwtopo_param = { + "supernetwork_parameters": { + "network_type": "HYFeaturesNetwork", + "geo_file_path": "", + "columns": ntwk_columns, + "duplicate_wb_segments": dupseg, + }, + "waterbody_parameters": { + "break_network_at_waterbodies": True, + "level_pool": {"level_pool_waterbody_parameter_file_path": ""}, + }, + } + + res_da = { + "reservoir_persistence_da": { + "reservoir_persistence_usgs": False, + "reservoir_persistence_usace": False, + }, + "reservoir_rfc_da": { + "reservoir_rfc_forecasts": False, + "reservoir_rfc_forecasts_time_series_path": None, + "reservoir_rfc_forecasts_lookback_hours": 28, + "reservoir_rfc_forecasts_offset_hours": 28, + "reservoir_rfc_forecast_persist_days": 11, + }, + "reservoir_parameter_file": None, + } + + stream_da = { + "streamflow_nudging": False, + "diffusive_streamflow_nudging": False, + "gage_segID_crosswalk_file": None, + } + + comp_param = { + "parallel_compute_method": "by-subnetwork-jit-clustered", + "subnetwork_target_size": 10000, + "cpu_pool": 16, + "compute_kernel": "V02-structured", + "assume_short_ts": True, + "restart_parameters": {"start_datetime": ""}, + "forcing_parameters": { + "qts_subdivisions": 12, + "dt": 300, + "qlat_input_folder": ".", + "qlat_file_pattern_filter": "nex-*", + "nts": 5, + "max_loop_size": divmod(5 * 300, 3600)[0] + 1, + }, + "data_assimilation_parameters": { + "usgs_timeslices_folder": None, + "usace_timeslices_folder": None, + "timeslice_lookback_hours": 48, + "qc_threshold": 1, + "streamflow_da": stream_da, + "reservoir_da": res_da, + }, + } + + output_param = { + "stream_output": { + "stream_output_directory": ".", + "stream_output_time": divmod(5 * 300, 3600)[0] + 1, + "stream_output_type": ".nc", + "stream_output_internal_frequency": 60, + } + } + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + bmi_param: dict = Field(default=bmi_param, description="BMI Parameters") + log_param: dict = Field(default=log_param, description="Log Parameters") + nwtopo_param: dict = Field(default=nwtopo_param, description="Network Topology Parameters") + comp_param: dict = Field(default=comp_param, description="Compute Parameters") + res_da: dict = Field(default=res_da, description="Res DA parameters for computation") + stream_da: dict = Field(default=stream_da, description="Stream parameters for computation") + output_param: dict = Field(default=output_param, description="Output Parameters") + ntwk_columns: dict = Field(default=ntwk_columns, description="A network topology set of parameters") + dupseg: list[str] = Field(default=dupseg, description="A network topology set of parameters") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return { + f"bmi_parameters: {self.bmi_param}", + f"log_parameters: {self.log_param}", + f"network_topology_parameters: {self.nwtopo_param}", + f"compute_parameters: {self.comp_param}", + f"output_param: {self.output_param}", + } + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + troute_bmi_file = output_path / f"{self.catchment}_bmi_config_troute.txt" + with open(troute_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return troute_bmi_file + + +class Topmodel(BaseModel): + """Pydantic model for Topmodel module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + divide_id: str = Field(..., description="The catchment ID") + num_sub_catchments: int = Field(default=1, description="Number of sub catchments") + imap: int = Field(default=1, description="NA") + yes_print_output: int = Field(default=1, description="NA") + twi: list[dict] = Field(default=[{"twi": "dist_4.twi"}], description="NA") + num_topodex_values: int = Field(..., description="NA") + area: int = Field(default=1, description="NA") + num_channels: int = Field(default=1, description="Number of channels") + cum_dist_area_with_dist: float = Field(default=1.0, description="NA") + dist_from_outlet: float = Field(..., description="NA") + szm: float = Field(default=0.0125, description="Exponential decline parameter of transmissivity") + t0: float = Field( + default=0.000075, description="Downslope transmissivity when the soil is saturated to the surface" + ) + td: float = Field(default=20, description="Unsaturated zone time delay per unit storage deficit") + chv: float = Field(default=1000, description="Average channel flow velocity") + rv: float = Field(default=1000, description="Internal overland flow routing velocity") + srmax: float = Field(default=0.04, description="Maximum root zone storage deficit") + Q0: float = Field(default=0.0000328, description="Initial subsurface flow per unit area") + sr0: float = Field(default=0, description="Initial root zone storage deficit below field capacity (m)") + infex: float = Field( + default=0, + description="Whether to call subroutine to do infiltration excess calcs, Not typically appropriate in catchments where TOPMODEL is applicable (i.e., shallow highly permeable soils). 0 = FALSE (default)", + ) + xk0: float = Field(default=2, description="Surface soil hydraulic conductivity") + hf: float = Field(default=0.1, description="Wetting front suction for Green & Ampt solution.") + dth: float = Field(default=0.1, description="Water content change across the wetting front") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return [ + f"catchment={self.catchment}", + f"divide_id={self.divide_id}", + f"num_sub_catchments={self.num_sub_catchments}", + f"imap={self.imap}", + f"yes_print_output={self.yes_print_output}", + f"twi={self.twi}", + f"num_topodex_values={self.num_topodex_values}area={self.area}", + f"num_channels={self.num_channels}", + f"cum_dist_area_with_dist={self.cum_dist_area_with_dist}", + f"dist_from_outlet={self.dist_from_outlet}", + f"szm={self.szm}", + f"t0={self.t0}", + f"td={self.td}", + f"chv={self.chv}", + f"rv={self.rv}", + f"srmax={self.srmax}", + f"Q0={self.Q0}", + f"sr0={self.sr0}", + f"infex={self.infex}", + f"xk0={self.xk0}", + f"hf={self.hf}", + f"dth={self.dth}", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + topmodel_bmi_file = output_path / f"{self.catchment}_bmi_config_topmodel.txt" + with open(topmodel_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return topmodel_bmi_file + + +class Topoflow(BaseModel): + """ + Pydantic model for Topoflow module configuration + + *Note: This is a placeholder for Topoflow's BaseModel as the generation of IPEs for + Topoflow does not exist currently. + + """ + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return [ + f"catchment={self.catchment}", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + topoflow_bmi_file = output_path / f"{self.catchment}_bmi_config_topoflow.txt" + with open(topoflow_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return topoflow_bmi_file + + +class UEBValues(str, enum.Enum): + """The calibratable values to be used in UEB""" + + JAN_TEMP = 11.04395 + FEB_TEMP = 11.79382 + MAR_TEMP = 12.72711 + APR_TEMP = 13.67701 + MAY_TEMP = 13.70334 + JUN_TEMP = 13.76782 + JUL_TEMP = 13.90212 + AUG_TEMP = 13.9958 + SEP_TEMP = 14.04895 + OCT_TEMP = 13.44001 + NOV_TEMP = 11.90162 + DEC_TEMP = 10.71597 + + +class UEB(BaseModel): + """Pydantic model for UEB module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + aspect: float = Field(..., description="Aspect computed from DEM") + slope: float = Field(..., description="Slope") + longitude: float = Field(..., description="X coordinates of divide centroid") + latitude: float = Field(..., description="Y coordinates of divide centroid") + elevation: float = Field(..., description="Elevation from DEM") + standard_atm_pressure: float = Field(..., description="Standard atmospheric pressuure (atm)") + jan_temp_range: float = Field(default=UEBValues.JAN_TEMP.value, description="Average temperature") + feb_temp_range: float = Field(default=UEBValues.FEB_TEMP.value, description="Average temperature") + mar_temp_range: float = Field(default=UEBValues.MAR_TEMP.value, description="Average temperature") + apr_temp_range: float = Field(default=UEBValues.APR_TEMP.value, description="Average temperature") + may_temp_range: float = Field(default=UEBValues.MAY_TEMP.value, description="Average temperature") + jun_temp_range: float = Field(default=UEBValues.JUN_TEMP.value, description="Average temperature") + jul_temp_range: float = Field(default=UEBValues.JUL_TEMP.value, description="Average temperature") + aug_temp_range: float = Field(default=UEBValues.AUG_TEMP.value, description="Average temperature") + sep_temp_range: float = Field(default=UEBValues.SEP_TEMP.value, description="Average temperature") + oct_temp_range: float = Field(default=UEBValues.OCT_TEMP.value, description="Average temperature") + nov_temp_range: float = Field(default=UEBValues.NOV_TEMP.value, description="Average temperature") + dec_temp_range: float = Field(default=UEBValues.DEC_TEMP.value, description="Average temperature") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + return [ + f"aspect: {self.aspect}", + f"slope: {self.slope}", + f"longitude: {self.longitude}", + f"latitude: {self.latitude}", + f"elevation: {self.elevation}", + f"standard_atm_pressure: {self.standard_atm_pressure}", + f"jan_temp_range: {self.jan_temp_range}", + f"feb_temp_range: {self.feb_temp_range}", + f"mar_temp_range: {self.mar_temp_range}", + f"apr_temp_range: {self.apr_temp_range}", + f"may_temp_range: {self.may_temp_range}", + f"jun_temp_range: {self.jun_temp_range}", + f"jul_temp_range: {self.jul_temp_range}", + f"aug_temp_range: {self.aug_temp_range}", + f"sep_temp_range: {self.sep_temp_range}", + f"oct_temp_range: {self.oct_temp_range}", + f"nov_temp_range: {self.nov_temp_range}", + f"dec_temp_range: {self.dec_temp_range}", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + ueb_bmi_file = output_path / f"{self.catchment}_bmi_config_ueb.txt" + with open(ueb_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return ueb_bmi_file + + +class CFEValues(str, enum.Enum): + """The default values & schemes to be used in UEB""" + + FORCINGFILE = "BMI" + VERBOSITY = 1 + SRFC_RUNOFF_SCHEME = "GIUH" + DEBUG = 0 + NUM_TIMESTEPS = 1 + ICE_CONTENT_THR = 0.15 + SCHAAKE = "Schaake" + XINANJIANG = "Xinanjiang" + A_XINANJIANG_INFLECT = -0.2 + B_XINANJIANG_SHAPE = 0.66 + X_XINANJIANG_SHAPE = 0.02 + SOIL_EXPON = 1 + SOIL_EXPON_SECONDARY = 1 + MAX_GIUH_STORAGE = 0.05 + GW_STORAGE = 0.05 + ALPHA_FC = 0.33 + SOIL_STORAGE = 0.5 + K_NASH = 0.003 + K_LF = 0.01 + NASH_STORAGE = [0.0, 0.0] + GIUH = [0.55, 0.25, 0.2] + URBAN_FRACT = 0.01 + SOIL_DEPTH = 2 + SOIL_WLTSMC = 0.439 + SOIL_SMCMAX = 0.439 + SOIL_SLOP = 0.05 + SOIL_SATPSI = 0.355 + SOIL_SATDK = 0.00000338 + SOIL_B = 4.05 + CGW = 0.000018 + EXPON = 3 + REFKDT = 1 + + +class CFE(BaseModel): + """Pydantic model for CFE module configuration""" + + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + catchment: str = Field(..., description="The catchment ID") + forcing_file: str = Field(default=CFEValues.FORCINGFILE.value, description="NA") + verbosity: int = Field(default=CFEValues.VERBOSITY.value, description="NA") + surface_partitioning_scheme: str = Field(..., description="Selects Xinanjiang or Schaake") + surface_runoff_scheme: str = Field( + default=CFEValues.SRFC_RUNOFF_SCHEME.value, + description="Accepts 1 or GIUH for GIUH and 2 or NASH_CASCADE for Nash Cascade; default is GIUH, version 1 is GIUH, Version 2 is Nash", + ) + debug: int = Field(default=CFEValues.DEBUG.value, description="NA") + num_timesteps: int = Field(default=CFEValues.NUM_TIMESTEPS.value, description="NA") + is_sft_coupled: str = Field( + ..., + description="Optional. Turns on/off the CFE coupling with the SoilFreezeThaw. If this parameter is defined to be True (or 1) in the config file and surface_partitioning_scheme=Schaake, then ice_content_threshold also needs to be defined in the config file.", + ) + ice_content_thresh: float = Field( + default=CFEValues.ICE_CONTENT_THR.value, + description="Optional. This represents the ice content above which soil is impermeable. If this is_sft_couple is defined to be True (or 1) in the config file and surface_partitioning_scheme=Schaake, then this also needs to be defined in the config file.", + ) + soil_params_b: float = Field( + default=CFEValues.SOIL_B.value, + description="Beta exponent on Clapp-Hornberger (1978) soil water relations", + ) + soil_params_satdk: float = Field( + default=CFEValues.SOIL_SATDK.value, description="Saturated hydraulic conductivity" + ) + soil_params_satpsi: float = Field( + default=CFEValues.SOIL_SATPSI.value, description="Saturated capillary head" + ) + soil_params_slop: float = Field( + default=CFEValues.SOIL_SLOP.value, + description="This factor (0-1) modifies the gradient of the hydraulic head at the soil bottom. 0=no-flow.", + ) + soil_params_smcmax: float = Field( + default=CFEValues.SOIL_SMCMAX.value, + description="Saturated soil moisture content (Maximum soil moisture content)", + ) + soil_params_wltsmc: float = Field( + default=CFEValues.SOIL_WLTSMC.value, + description="Wilting point soil moisture content (< soil_params.smcmax)", + ) + soil_params_expon: float = Field( + default=CFEValues.SOIL_EXPON.value, + description="Optional; defaults to 1, This parameter defines the soil reservoirs to be linear, Use linear reservoirs", + ) + soil_params_expon_secondary: float = Field( + default=CFEValues.SOIL_EXPON_SECONDARY.value, + description=" Optional; defaults to 1, This parameter defines the soil reservoirs to be linear, Use linear reservoirs", + ) + max_gw_storage: float = Field( + default=CFEValues.MAX_GIUH_STORAGE.value, description="Maximum storage in the conceptual reservoir" + ) + Cgw: float = Field(default=CFEValues.CGW.value, description="Primary outlet coefficient") + expon: float = Field( + default=CFEValues.EXPON.value, + description="Exponent parameter for nonlinear ground water reservoir (1.0 for linear reservoir)", + ) + gw_storage: float = Field( + default=CFEValues.GW_STORAGE.value, + description="Initial condition for groundwater reservoir - it is the ground water as a decimal fraction of the maximum groundwater storage (max_gw_storage) for the initial timestep", + ) + alpha_fc: float = Field( + default=CFEValues.ALPHA_FC.value, description="Alpha at fc for clapp hornberger (field capacity)" + ) + soil_storage: float = Field( + default=CFEValues.SOIL_STORAGE.value, + description="Initial condition for soil reservoir - it is the water in the soil as a decimal fraction of maximum soil water storage (smcmax x depth) for the initial timestep. Default = 0.5", + ) + K_nash: float = Field( + default=CFEValues.K_NASH.value, + description="Nash Config param for lateral subsurface runoff (Nash discharge to storage ratio)", + ) + K_lf: float = Field(default=CFEValues.K_LF.value, description="Nash Config param - primary reservoir") + nash_storage: list[float] = Field( + default=CFEValues.NASH_STORAGE.value, description="Nash Config param - secondary reservoir" + ) + giuh_ordinates: list[float] = Field( + default=CFEValues.GIUH.value, + description="Giuh (geomorphological instantaneous unit hydrograph) ordinates in dt time steps", + ) + a_Xinanjiang_inflection_point_parameter: str = Field( + default=CFEValues.A_XINANJIANG_INFLECT.value, + description="When surface_water_partitioning_scheme=Xinanjiang", + ) + b_Xinanjiang_shape_parameter: str = Field( + default=CFEValues.B_XINANJIANG_SHAPE.value, + description="When surface_water_partitioning_scheme=Xinanjiang", + ) + x_Xinanjiang_shape_parameter: str = Field( + default=CFEValues.X_XINANJIANG_SHAPE.value, + description="When surface_water_partitioning_scheme=Xinanjiang", + ) + urban_decimal_fraction: str = Field(..., description="When surface_water_partitioning_scheme=Xinanjiang") + refkdt: float = Field( + default=CFEValues.REFKDT.value, + description="Reference Soil Infiltration Parameter (used in runoff formulation)", + ) + soil_params_depth: float = Field(default=CFEValues.SOIL_DEPTH.value, description="Soil depth") + + def to_bmi_config(self) -> list[str]: + """Convert the model back to the original config file format""" + nash_storage = ",".join([str(n) for n in self.nash_storage]) + giuh_ordinates = ",".join([str(giuh) for giuh in self.giuh_ordinates]) + return [ + f"forcing_file: {self.forcing_file}", + f"verbosity: {self.verbosity}", + f"surface_partitioning_scheme: {self.surface_partitioning_scheme}", + f"surface_runoff_scheme: {self.surface_runoff_scheme}", + f"debug: {self.debug}", + f"num_timesteps: {self.num_timesteps}", + f"is_sft_coupled: {self.is_sft_coupled}", + f"ice_content_thresh: {self.ice_content_thresh}", + f"soil_params.b: {self.soil_params_b}", + f"soil_params.satdk: {self.soil_params_satdk}[m/s]", + f"soil_params.satpsi: {self.soil_params_satpsi}[m]", + f"soil_params.slop: {self.soil_params_slop}[m/m]", + f"soil_params.smcmax: {self.soil_params_smcmax}[m/m]", + f"soil_params.wltsmc: {self.soil_params_wltsmc}[m/m]", + f"soil_params.expon: {self.soil_params_expon}", + f"soil_params.expon_secondary: {self.soil_params_expon_secondary}", + f"max_gw_storage: {self.max_gw_storage}[m]", + f"Cgw: {self.Cgw}[m/hr]", + f"expon: {self.expon}", + f"gw_storage: {self.gw_storage}[m/m]", + f"alpha_fc: {self.alpha_fc}", + f"soil_storage: {self.soil_storage}[m/m]", + f"K_nash: {self.K_nash}[1/m]", + f"K_lf: {self.K_lf}", + f"nash_storage: {nash_storage}", + f"giuh_ordinates: {giuh_ordinates}", + f"a_Xinanjiang_inflection_point_parameter: {self.a_Xinanjiang_inflection_point_parameter}", + f"b_Xinanjiang_shape_parameter: {self.b_Xinanjiang_shape_parameter}", + f"x_Xinanjiang_shape_parameter: {self.x_Xinanjiang_shape_parameter}", + f"urban_decimal_fraction: {self.urban_decimal_fraction}", + f"refkdt: {self.refkdt}", + f"soil_params.depth: {self.soil_params_depth}[m]", + ] + + def model_dump_config(self, output_path: Path) -> Path: + """Outputs the BaseModel to a BMI Config file + + Parameters + ---------- + output_path : Path + The path for the config file to be written to + + Returns + ------- + Path + The path to the written config file + """ + file_output = self.to_bmi_config() + cfe_bmi_file = output_path / f"{self.catchment}_bmi_config_cfe.txt" + with open(cfe_bmi_file, "w") as f: + f.write("\n".join(file_output)) + return cfe_bmi_file diff --git a/src/icefabric/schemas/ras_xs.py b/src/icefabric/schemas/ras_xs.py new file mode 100644 index 0000000..16daf88 --- /dev/null +++ b/src/icefabric/schemas/ras_xs.py @@ -0,0 +1,19 @@ +"""Contains all schemas and enums for the RAS cross-sections""" + +from enum import Enum + + +class XsType(str, Enum): + """The domains used when querying the cross-sections. + + Attributes + ---------- + CONFLATED : str + HEC-RAS data mapped to nearest hydrofabric flowpath. + REPRESENTATIVE : str + The median, representative, cross-sections - derived from + the conflated data set. Used as training/testing inputs for RiverML. + """ + + CONFLATED = "conflated" + REPRESENTATIVE = "representative" diff --git a/src/icefabric/schemas/rise_parameters.py b/src/icefabric/schemas/rise_parameters.py new file mode 100644 index 0000000..9aac95c --- /dev/null +++ b/src/icefabric/schemas/rise_parameters.py @@ -0,0 +1,103 @@ +from pydantic import BaseModel + +# Conversion dict for python-incompatible names found in the RISE API +PARAM_CONV = { + "orderUpdateDate": "order[updateDate]", + "orderRecordTitle": "order[recordTitle]", + "orderLocationName": "order[locationName]", + "orderId": "order[id]", + "orderFulltext": "order[fulltext]", + "orderLikematch": "order[likematch]", + "orderDateTime": "order[dateTime]", + "updateDateBefore": "updateDate[before]", + "updateDateStrictlyBefore": "updateDate[strictly_before]", + "updateDateAfter": "updateDate[after]", + "updateDateStrictlyAfter": "updateDate[strictly_after]", + "dateTimeBefore": "dateTime[before]", + "dateTimeStrictlyBefore": "dateTime[strictly_before]", + "dateTimeAfter": "dateTime[after]", + "dateTimeStrictlyAfter": "dateTime[strictly_after]", + "catalogItemIsModeled": "catalogItem.isModeled", +} + + +class CatItemParams(BaseModel): + """Parameters for the catalog-item resource""" + + page: int | None = 1 + itemsPerPage: int | None = 25 + id: str | None = None + hasItemStructure: bool | None = None + + +class CatRecParams(BaseModel): + """Parameters for the catalog-record resource""" + + page: int | None = 1 + itemsPerPage: int | None = 25 + updateDateBefore: str | None = None + updateDateStrictlyBefore: str | None = None + updateDateAfter: str | None = None + updateDateStrictlyAfter: str | None = None + id: str | None = None + stateId: str | None = None + regionId: str | None = None + unifiedRegionId: str | None = None + locationTypeId: str | None = None + themeId: str | None = None + itemStructureId: str | None = None + generationEffortId: str | None = None + search: str | None = None + hasItemStructure: bool | None = None + hasCatalogItems: bool | None = None + orderUpdateDate: str | None = None + orderRecordTitle: str | None = None + orderId: str | None = None + orderFulltext: str | None = None + orderLikematch: str | None = None + + +class LocItemParams(BaseModel): + """Parameters for the location resource""" + + page: int | None = 1 + itemsPerPage: int | None = 25 + id: str | None = None + stateId: str | None = None + regionId: str | None = None + locationTypeId: str | None = None + themeId: str | None = None + parameterId: str | None = None + parameterTimestepId: str | None = None + parameterGroupId: str | None = None + itemStructureId: str | None = None + unifiedRegionId: str | None = None + generationEffortId: str | None = None + search: str | None = None + catalogItemsIsModeled: bool | None = None + hasItemStructure: bool | None = None + hasCatalogItems: bool | None = None + orderUpdateDate: str | None = None + orderLocationName: str | None = None + orderId: str | None = None + orderFulltext: str | None = None + orderLikematch: str | None = None + + +class ResParams(BaseModel): + """Parameters for the result resource""" + + page: int | None = 1 + itemsPerPage: int | None = 25 + id: str | None = None + itemId: str | None = None + modelRunId: str | None = None + locationId: str | None = None + parameterId: str | None = None + dateTimeBefore: str | None = None + dateTimeStrictlyBefore: str | None = None + dateTimeAfter: str | None = None + dateTimeStrictlyAfter: str | None = None + orderDateTime: str | None = None + orderId: str | None = None + catalogItemIsModeled: bool | None = None diff --git a/src/icefabric/schemas/topobathy.py b/src/icefabric/schemas/topobathy.py new file mode 100644 index 0000000..752f042 --- /dev/null +++ b/src/icefabric/schemas/topobathy.py @@ -0,0 +1,81 @@ +"""Contains all schemas and enums for topobathy definitions in the NGWPC S3""" + +from enum import Enum + +from icefabric.builds.icechunk_s3_module import S3Path + +TOPO_BP = "surface/nws-topobathy" +TOPO_NOS = f"{TOPO_BP}/nws-nos-surveys" + + +class FileType(Enum): + """ + Archival weather file types + + Enum class for instantiating different archival weather file + formats. Used when virtualizing and collecting files. + """ + + GEOTIFF = ".tif" + NETCDF = ".nc" + + +class NGWPCLocations(Enum): + """ + Important NGWPC S3 locations + + Enum class for instantiating S3Paths corresponding to the + icechunk stores, as well as the reference locations for virtualized + stores. + """ + + SNODAS_REF = ("ngwpc-forcing", "snodas_nc_v4") + SNODAS_V3 = ("ngwpc-forcing", "snodas_nc") + SNODAS_IC = ("hydrofabric-data", "forcing/snodas") + NLCD_REF = ("ngwpc-hydrofabric", "NLCD_Land_Cover_CONUS") + NLCD_IC = ("hydrofabric-data", "land-cover/NLCD-Land-Cover") + TOPO_AK_10M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_alaska_10m") + TOPO_AK_30M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_alaska_30m") + TOPO_CONUS_ATL_GULF_30M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_conus_atlantic_gulf_30m") + TOPO_CONUS_PAC_30M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_conus_pacific_30m") + TOPO_GREAT_LAKES_30M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_great_lakes_30m") + TOPO_HA_10M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_hawaii_10m") + TOPO_HA_30M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_hawaii_30m") + TOPO_PR_USVI_10M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_pr_usvi_10m") + TOPO_PR_USVI_30M_IC = ("hydrofabric-data", f"{TOPO_BP}/tbdem_pr_usvi_30m") + TOPO_ALBEMARLE_SOUND_IC = ("hydrofabric-data", f"{TOPO_NOS}/Albemarle_Sound_NOS_NCEI") + TOPO_CHESAPEAKE_BAY_IC = ("hydrofabric-data", f"{TOPO_NOS}/Chesapeake_Bay_NOS_NCEI") + TOPO_MOBILE_BAY_IC = ("hydrofabric-data", f"{TOPO_NOS}/Mobile_Bay_NOS_NCEI") + TOPO_TANGIER_SOUND_IC = ("hydrofabric-data", f"{TOPO_NOS}/Tangier_Sound_NOS_NCEI") + + def __init__(self, bucket, prefix): + self.path = S3Path(bucket, prefix) + + +class NGWPCTestLocations(Enum): + """ + Important NGWPC S3 locations + + Enum class for instantiating S3Paths corresponding to the + icechunk stores, as well as the reference locations for virtualized + stores. + """ + + # SNODAS_IC = ("edfs-data", "forcing/snodas") # commenting out until data can be moved from data to test bucket + NLCD_IC = ("edfs-data", "land-cover/NLCD-Land-Cover") + TOPO_AK_10M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_alaska_10m") + TOPO_AK_30M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_alaska_30m") + TOPO_CONUS_ATL_GULF_30M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_conus_atlantic_gulf_30m") + TOPO_CONUS_PAC_30M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_conus_pacific_30m") + TOPO_GREAT_LAKES_30M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_great_lakes_30m") + TOPO_HA_10M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_hawaii_10m") + TOPO_HA_30M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_hawaii_30m") + TOPO_PR_USVI_10M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_pr_usvi_10m") + TOPO_PR_USVI_30M_IC = ("edfs-data", f"{TOPO_BP}/tbdem_pr_usvi_30m") + TOPO_ALBEMARLE_SOUND_IC = ("edfs-data", f"{TOPO_NOS}/Albemarle_Sound_NOS_NCEI") + TOPO_CHESAPEAKE_BAY_IC = ("edfs-data", f"{TOPO_NOS}/Chesapeake_Bay_NOS_NCEI") + TOPO_MOBILE_BAY_IC = ("edfs-data", f"{TOPO_NOS}/Mobile_Bay_NOS_NCEI") + TOPO_TANGIER_SOUND_IC = ("edfs-data", f"{TOPO_NOS}/Tangier_Sound_NOS_NCEI") + + def __init__(self, bucket, prefix): + self.path = S3Path(bucket, prefix) diff --git a/src/icefabric/ui/__init__.py b/src/icefabric/ui/__init__.py new file mode 100644 index 0000000..091f21d --- /dev/null +++ b/src/icefabric/ui/__init__.py @@ -0,0 +1,17 @@ +"""Contains helper functions to support NWM modules""" + +from .geo_utils import ( + create_time_series_widget, + get_geopackage_uri, + get_hydrofabric_gages, + get_observational_uri, + get_streamflow_data, +) + +__all__ = [ + "create_time_series_widget", + "get_geopackage_uri", + "get_observational_uri", + "get_streamflow_data", + "get_hydrofabric_gages", +] diff --git a/src/icefabric/ui/geo_utils.py b/src/icefabric/ui/geo_utils.py new file mode 100644 index 0000000..c07cc28 --- /dev/null +++ b/src/icefabric/ui/geo_utils.py @@ -0,0 +1,208 @@ +import httpx +import ipywidgets as widgets +import matplotlib.pyplot as plt +import pandas as pd +from pyiceberg import expressions +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.exceptions import NoSuchTableError + +from icefabric.schemas.hydrofabric import HydrofabricDomains + + +def get_streamflow_data( + catalog_name: str, + gage_id: str | None = None, + row_filter: str | None = None, + snapshot_id: int | None = None, + **kwargs, +) -> pd.DataFrame: + """Gets streamflow data for the Jupyter UI to display + + Parameters + ---------- + gage_id : str + The gauge ID you are looking to view + catalog_name : str, optional + The pyiceberg catalog name + row_filter : str | None, optional + The row filter to specify a start/end time, by default None + snapshot_id : int | None, optional + the snapshot ID to , by default None + **kwargs + the pyiceberg.yaml file settings + + Returns + ------- + pd.DataFrame + The streamflow output for the specified gauge + + Raises + ------ + NoSuchTableError + There is no existing record for the streamflow values + """ + catalog = load_catalog( + name=catalog_name, + type=kwargs[catalog_name]["type"], + uri=kwargs[catalog_name]["uri"], + warehouse=kwargs[catalog_name]["warehouse"], + ) + try: + table = catalog.load_table("streamflow_observations.usgs_hourly") + if row_filter is None: + df = table.scan(snapshot_id=snapshot_id).to_pandas() + else: + df = table.scan(row_filter=row_filter, snapshot_id=snapshot_id).to_pandas() + if gage_id is not None: + return df[["time", gage_id]] + else: + return df + except NoSuchTableError as e: + msg = "No table/namespace found for streamflow_observations.usgs_hourly in the catalog" + print(msg) + raise NoSuchTableError from e + + +def get_hydrofabric_gages( + catalog: Catalog, domain: HydrofabricDomains = HydrofabricDomains.CONUS +) -> list[str]: + """Returns the hydrofabric gages within the network table + + Parameters + ---------- + catalog : Catalog + the pyiceberg warehouse reference + domain : HydrofabricDomains, optional + the hydrofabric domain, by default HydrofabricDomains.CONUS + + Returns + ------- + list[str] + The list of all gages in the conus-hf + """ + return ( + catalog.load_table(f"{domain.value}.network") + .scan(row_filter=expressions.StartsWith("hl_uri", "gages-")) + .to_pandas() + .drop_duplicates(subset="hl_uri", keep="first")[("hl_uri")] + .tolist() + ) + + +def get_observational_uri( + gage_id: str, source: str = "USGS", domain: str = "CONUS", version: str = "2.1", timeout=None +) -> str: + """Fetch observational data URI from the NextGen Water Prediction API. + + Retrieves the URI for observational streamflow data for a specific gage + from the NextGen Water Prediction hydrofabric API. + + Parameters + ---------- + gage_id : str + The gage identifier (e.g., USGS station ID). + source : str, default "USGS" + Data source provider for the observational data. + domain : str, default "CONUS" + Geographic domain name for the data request. + version : str, default "2.1" + API version to use for the request. + timeout : float or None, optional + Request timeout in seconds. If None, uses httpx default. + + Returns + ------- + str + The URI pointing to the observational dataset. + """ + base_url = f"https://hydroapi.oe.nextgenwaterprediction.com/hydrofabric/{version}/observational" + params = {"gage_id": gage_id, "source": source, "domain": domain} + + response = httpx.get(base_url, params=params, timeout=timeout) + response.raise_for_status() # Raise an error if request failed + data = response.json() + + return data["uri"] + + +def get_geopackage_uri( + gage_id: str, source: str = "USGS", domain: str = "CONUS", version: str = "2.2", timeout=None +) -> str: + """Fetch GeoPackage URI for a gage from the NextGen Water Prediction API. + + Retrieves the URI for a hydrofabric GeoPackage containing network topology + and catchment boundaries for a specific gage from the NextGen API. + + Parameters + ---------- + gage_id : str + The gage identifier for which to retrieve the hydrofabric GeoPackage. + source : str, default "USGS" + Data source provider for the gage data. + domain : str, default "CONUS" + Geographic domain name for the hydrofabric request. + version : str, default "2.2" + Hydrofabric version to retrieve. + timeout : float or None, optional + Request timeout in seconds. If None, uses httpx default. + + Returns + ------- + str + The URI pointing to the GeoPackage file in S3 storage. + """ + base_url = "https://hydroapi.oe.nextgenwaterprediction.com/hydrofabric/geopackages" + params = {"gage_id": gage_id, "source": source, "domain": domain, "version": version} + + response = httpx.get(base_url, params=params, timeout=timeout) + response.raise_for_status() + data = response.json() + + return data["uri"] + + +def create_time_series_widget( + df: pd.DataFrame, flow_col: str, point_size: float = 30, time_col: str = "time" +): + """ + Creates an interactive time series plot using matplotlib and ipywidgets. + + Parameters + ---------- + df (pd.DataFrame): DataFrame with 'DateTime' and 'q_cms' columns + start_slider (widgets.SelectionSlider): Widget for selecting start time + end_slider (widgets.SelectionSlider): Widget for selecting end time + """ + start_slider = widgets.SelectionSlider( + options=df["time"].dt.strftime("%Y-%m-%d %H:%M:%S").tolist(), + description="Start:", + layout=widgets.Layout(width="95%"), + ) + + end_slider = widgets.SelectionSlider( + options=df["time"].dt.strftime("%Y-%m-%d %H:%M:%S").tolist(), + description="End:", + layout=widgets.Layout(width="95%"), + ) + + @widgets.interact(start=start_slider, end=end_slider) + def plot_flow(start, end): + start_dt = pd.to_datetime(start) + end_dt = pd.to_datetime(end) + + if start_dt >= end_dt: + print("Warning: Start must be before End.") + return + + filtered = df[(df[time_col] >= start_dt) & (df[time_col] <= end_dt)] + + fig, ax = plt.subplots(figsize=(10, 4)) + ax.scatter(filtered[time_col], filtered[flow_col], s=point_size, label="Flow rate (cms)", alpha=0.7) + ax.set_xlabel("Date Time") + ax.set_ylabel("Discharge (cms)") + ax.set_title("Streamflow Time Series (Scatter Plot)") + ax.grid(True) + ax.legend() + plt.xticks(rotation=45) + plt.tight_layout() + plt.show() diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000..13940e0 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,137 @@ +# AWS S3 Tables with Apache Iceberg - Terraform Implementation + +This directory contains PoC Terraform IaC for deploying Apache Iceberg tables using the AWS S3 Tables service with AWS Glue catalog integration. It also contains a basic demo / test python script used to verify things. + +## Architecture Overview + +The infrastructure creates: +- **AWS S3 Tables bucket** - Managed storage for Iceberg table data +- **S3 Tables namespace and table** - Logical organization for tables +- **AWS Glue Catalog database** - Metadata storage for table schemas +- **Lake Formation permissions** - Access control and governance +- **IAM policies** - Secure access between services + +## Prerequisites + +### AWS Requirements +- AWS CLI configured with appropriate credentials. (Older versions may not support AWS S3 Tables) +- Terraform >= 1.0 +- AWS Account with permissions (basically Admin due to IAM requirements) for: + - S3 Tables + - AWS Glue + - Lake Formation + - IAM + +### ⚠️ Critical: Enable S3 Table Buckets Integration + +**This step must be completed before running Terraform**, otherwise the deployment will fail. + +1. Navigate to the [S3 Table Buckets Console](https://console.aws.amazon.com/s3tables/home) in your target region +2. Locate the section titled **"Integration with AWS analytics services"** +3. Click the **"Enable integration"** button +4. Confirm that the integration status shows **"Enabled"** for your deployment region + +This integration allows services like Athena, Glue, Redshift, and EMR to interact with S3 Table Buckets. Without this step, your Iceberg tables won't be accessible through these analytics services. + +> **Note**: This is a one-time setup per AWS region. Once enabled, all future S3 Table Buckets in that region will have access to AWS analytics services integration. + +### Python Requirements +- Python 3.8+ +- pyiceberg python module w/deps +- boto3 (for AWS SDK) + +## Quick Start + +### 1. High Level Deploy Infrastructure + +Create a `terraform.tfvars` file replacing the values below as appropriate for your environment or deploy: + +```hcl +env = "dev" +application = "myapp" +team = "NGWPC" +region = "us-east-1" +identity_center_role_arn = "arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_DataRole" + +# Optional: Specify Lake Formation admins +lakeformation_admin_arns = [ + "arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_DataRole", + "arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_Admin" +] +``` + +Deploy the infrastructure: + +```bash +terraform init +terraform plan +terraform apply +``` + +### 3. Set Environment Variables + +After deployment, set these environment variables for the Python demo: + +```bash +# From Terraform outputs +export ICEBERG_WAREHOUSE_PATH=$(terraform output -raw s3tables_table_warehouse_location) +export AWS_DEFAULT_REGION="us-east-1" +``` + +### 4. Install Python Dependencies in your preferred active virtual environment + +```bash +pip install pyiceberg boto3 mypy_boto3_glue pyarrow +``` + +### 5. Run the Demo + +```bash +python iceberg_demo.py +``` + +## Terraform Configuration + +### Variables + +| Variable | Description | Type | Default | Required | +|----------|-------------|------|---------|----------| +| `region` | AWS region | string | `us-east-1` | No | +| `env` | Environment name (test/oe/other) | string | - | Yes | +| `application` | Application name | string | - | Yes | +| `team` | Team name (for future tagging if supported) | string | - | Yes | +| `identity_center_role_arn` | IAM role ARN for accessing resources | string | - | Yes | +| `lakeformation_admin_arns` | Lake Formation administrator ARNs | list(string) | `[]` | No | + +### Outputs + +| Output | Description | +|--------|-------------| +| `s3tables_bucket_arn` | ARN of the S3 Tables bucket | +| `s3tables_table_warehouse_location` | Warehouse location for Iceberg tables (devs need this!!!) | +| `glue_database_name` | Name of the Glue catalog database | +| `lakeformation_admins` | List of Lake Formation administrators | + +## Python Integration + +### Basic Usage + +The provided `iceberg_demo.py` demonstrates: +- Connecting to AWS Glue catalog +- Creating/loading Iceberg tables +- Very Basic schema definition + +### Configuration + +The Python script uses these environment variables: +- `ICEBERG_WAREHOUSE_PATH` - S3 Tables warehouse location +- `AWS_REGION` - AWS region for services +- `AWS_DEFAULT_REGION` - Default AWS region + +## Permissions and Security + +### Lake Formation Integration + +The infrastructure automatically configures basic Lake Formation settings. This can get very granular in the future. +- Database-level permissions for the specified Identity Center role (SoftwareEngineersFull) +- Table-level permissions are supported, but have not been tested diff --git a/terraform/environments/test/.terraform.lock.hcl b/terraform/environments/test/.terraform.lock.hcl new file mode 100644 index 0000000..abf9ba6 --- /dev/null +++ b/terraform/environments/test/.terraform.lock.hcl @@ -0,0 +1,24 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "5.99.1" + hashes = [ + "h1:967WCGUW/vgrjUMBvC+HCie1DVgOXHwUkhm2ng3twJw=", + "zh:00b0a61c6d295300f0aa7a79a7d40e9f836164f1fff816d38324c148cd846887", + "zh:1ee9d5ccb67378704642db62113ac6c0d56d69408a9c1afb9a8e14b095fc0733", + "zh:2035977ed418dcb18290785c1eeb79b7133b39f718c470346e043ac48887ffc7", + "zh:67e3ca1bf7061900f81cf958d5c771a2fd6048c2b185bec7b27978349b173a90", + "zh:87fadbe5de7347ede72ad879ff8d8d9334103cd9aa4a321bb086bfac91654944", + "zh:901d170c457c2bff244a2282d9de595bdb3ebecc33a2034c5ce8aafbcff66db9", + "zh:92c07d6cf530679565b87934f9f98604652d787968cce6a3d24c148479b7e34b", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:a7d4803b4c5ff17f029f8b270c91480442ece27cec7922c38548bcfea2ac2d26", + "zh:afda848da7993a07d29018ec25ab6feda652e01d4b22721da570ce4fcc005292", + "zh:baaf16c98b81bad070e0908f057a97108ecd6e8c9f754d7a79b18df4c8453279", + "zh:c3dd496c5014427599d6b6b1c14c7ebb09a15df78918ae0be935e7bfa83b894c", + "zh:e2b84c1d40b3f2c4b1d74bf170b9e932983b61bac0e6dab2e36f5057ddcc997f", + "zh:e49c92cb29c53b4573ed4d9c946486e6bcfc1b63f1aee0c79cc7626f3d9add03", + "zh:efae8e339c4b13f546e0f96c42eb95bf8347de22e941594849b12688574bf380", + ] +} diff --git a/terraform/environments/test/backend.tf b/terraform/environments/test/backend.tf new file mode 100644 index 0000000..b781b31 --- /dev/null +++ b/terraform/environments/test/backend.tf @@ -0,0 +1,8 @@ +terraform { + backend "s3" { + bucket = "ngwpc-infra-test" + key = "terraform/icefabric/test/edfs/terraform.tfstate" + region = "us-east-1" + encrypt = true # Encrypt the state file + } +} diff --git a/terraform/environments/test/main.tf b/terraform/environments/test/main.tf new file mode 100644 index 0000000..55aa3bd --- /dev/null +++ b/terraform/environments/test/main.tf @@ -0,0 +1,28 @@ +provider "aws" { + region = var.region +} + +module "iceberg" { + source = "../../modules/iceberg" # Path to the iceberg module + env = var.env + team = var.team + application = var.application + identity_center_role_arn = var.identity_center_role_arn + lakeformation_admin_arns = var.lakeformation_admin_arns +} + +output "s3tables_bucket_arn" { + value = module.iceberg.s3tables_bucket_arn +} + +output "s3tables_table_warehouse_location" { + value = module.iceberg.s3tables_table_warehouse_location +} + +output "glue_database_name" { + value = module.iceberg.glue_database_name +} + +output "lakeformation_admins" { + value = module.iceberg.lakeformation_admins +} diff --git a/terraform/environments/test/terraform.tfvars b/terraform/environments/test/terraform.tfvars new file mode 100644 index 0000000..71c8ea7 --- /dev/null +++ b/terraform/environments/test/terraform.tfvars @@ -0,0 +1,8 @@ +# Test Deploy +env = "test" +application = "icefabric" +region = "us-east-1" +team = "EDFS" +identity_center_role_arn = "arn:aws:iam::591210920133:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_SoftwareEngineersFull_529cc40c0355e893" +lakeformation_admin_arns = ["arn:aws:iam::591210920133:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_SoftwareEngineersFull_529cc40c0355e893", + "arn:aws:iam::591210920133:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_AdminLogin_fe24f9db156f282d"] diff --git a/terraform/environments/test/variables.tf b/terraform/environments/test/variables.tf new file mode 100644 index 0000000..dc5c863 --- /dev/null +++ b/terraform/environments/test/variables.tf @@ -0,0 +1,33 @@ +# variables.tf + +variable "region" { + description = "AWS region" + type = string + default = "us-east-1" +} + +variable "env" { + description = "Environment name" + type = string +} + +variable "application" { + description = "Application name" + type = string +} + +variable "team" { + description = "Team name" + type = string +} + +variable "identity_center_role_arn" { + description = "ARN of the Identity Center role that will access the resources" + type = string +} + +variable "lakeformation_admin_arns" { + description = "List of ARNs to set as Lake Formation administrators" + type = list(string) + default = [] # Will be populated with current account or specific users/roles +} diff --git a/terraform/iceberg_demo.py b/terraform/iceberg_demo.py new file mode 100644 index 0000000..e113e04 --- /dev/null +++ b/terraform/iceberg_demo.py @@ -0,0 +1,59 @@ +# ruff: noqa: BLE001 +import os + +from pyiceberg.catalog import load_catalog +from pyiceberg.schema import Schema +from pyiceberg.types import LongType, NestedField, StringType + +# Define warehouse path as a variable at the top (replace with your actual path or load dynamically) +WAREHOUSE_PATH = os.getenv("ICEBERG_WAREHOUSE_PATH") +TABLE_SUBPATH = "tables/icefabric" # Please avoid restricted directories + +# Debug: Print AWS region and Warehouse environment variables +print(f"AWS_DEFAULT_REGION from env: {os.getenv('AWS_DEFAULT_REGION')}") +print(f"Using warehouse path: {WAREHOUSE_PATH}") + +# Configure the catalog to use AWS Glue +config = { + "type": "glue", + "s3.endpoint": "s3.us-east-1.amazonaws.com", + "warehouse": f"{WAREHOUSE_PATH}", + "region": "us-east-1", + "glue_region": "us-east-1", +} +print(f"Catalog configuration: {config}") + +try: + catalog = load_catalog("glue", **config) + print("Catalog loaded successfully") +except Exception as e: + print(f"Error loading catalog: {e}") + raise + +# Define a schema for the Iceberg table (used if creating the table) +schema = Schema( + NestedField(field_id=1, name="id", field_type=LongType(), is_optional=False), + NestedField(field_id=2, name="name", field_type=StringType(), is_optional=True), +) + +# Load or create a table in the Glue catalog, pointing to S3 for storage +try: + table = catalog.load_table(("icefabric_db", "icefabric")) + print(f"Table loaded: {str(table)}") +except Exception as e: + print(f"Table not found, creating it: {e}") + try: + table = catalog.create_table( + identifier=("icefabric_db", "icefabric"), schema=schema, location=f"{TABLE_SUBPATH}" + ) + print(f"Table created: {str(table)}") + except Exception as create_error: + print(f"Error creating table: {create_error}") + raise + +# Example: List tables in the catalog to verify +try: + tables = catalog.list_tables("icefabric_db") + print(f"Tables in namespace: {tables}") +except Exception as list_error: + print(f"Error listing tables: {list_error}") diff --git a/terraform/modules/app_service/data.tf b/terraform/modules/app_service/data.tf new file mode 100644 index 0000000..0eae10d --- /dev/null +++ b/terraform/modules/app_service/data.tf @@ -0,0 +1,98 @@ +# modules/stac/data.tf + +# AWS Account +data "aws_caller_identity" "current" {} + +# ALB Service Account IDs for S3 Bucket Policy Permissions +variable "alb_service_account_ids" { + default = { + "us-east-1" = "127311923021" + "us-east-2" = "033677994240" + "us-west-1" = "027434742980" + "us-west-2" = "797873946194" + "us-gov-east-1" = "190560391635" + "us-gov-west-1" = "048591011584" + } +} + +# VPC +data "aws_vpc" "main" { + tags = { + Name = var.vpc_name + } +} + +# Subnets +data "aws_subnets" "private" { + filter { + name = "vpc-id" + values = [data.aws_vpc.main.id] + } + + filter { + name = "tag:Name" + values = [var.subnet_name_pattern] + } +} + +# AMI - ARM64 version +data "aws_ami" "ubuntu_arm" { + most_recent = true + owners = ["099720109477"] # Canonical + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-arm64-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } + + filter { + name = "state" + values = ["available"] + } +} + +# AMI +data "aws_ami" "ubuntu" { + most_recent = true + owners = ["099720109477"] # Canonical + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } + + filter { + name = "state" + values = ["available"] + } +} diff --git a/terraform/modules/app_service/main.tf b/terraform/modules/app_service/main.tf new file mode 100644 index 0000000..aeb71db --- /dev/null +++ b/terraform/modules/app_service/main.tf @@ -0,0 +1,682 @@ +locals { + common_tags = { + Application = var.app_name + Environment = var.environment + ManagedBy = "terraform" + } +} + +# Security Groups +# The security group for the instance should now only allow traffic from within the appropriate VPCs +resource "aws_security_group" "instance" { + name_prefix = "${var.app_name}-${var.environment}-instance" + description = "Security group for API instances" + vpc_id = data.aws_vpc.main.id + + ingress { + from_port = var.container_port + to_port = var.container_port + protocol = "tcp" + security_groups = var.is_test_env ? null : [aws_security_group.alb[0].id] + cidr_blocks = var.is_test_env ? concat([data.aws_vpc.main.cidr_block], var.additional_vpc_cidrs) : null + } + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = concat([data.aws_vpc.main.cidr_block], var.additional_vpc_cidrs) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}-instance" + }) + + lifecycle { + create_before_destroy = true + } +} + + +# The ALB security group should now only allow internal access +resource "aws_security_group" "alb" { + count = var.is_test_env ? 0 : 1 + + name_prefix = "${var.app_name}-${var.environment}-alb" + description = "Security group for API load balancer" + vpc_id = data.aws_vpc.main.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = concat([data.aws_vpc.main.cidr_block], var.additional_vpc_cidrs) + } + + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = concat([data.aws_vpc.main.cidr_block], var.additional_vpc_cidrs) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}-alb" + }) + + lifecycle { + create_before_destroy = true + } +} + +# IAM Resources +resource "aws_iam_role" "instance_role" { + name = "${var.app_name}-${var.environment}-instance-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + } + ] + }) + + lifecycle { + create_before_destroy = true + } + + tags = local.common_tags +} + +resource "aws_lakeformation_permissions" "icefabric_tbl" { + principal = aws_iam_role.instance_role.arn + permissions = ["DESCRIBE", "SELECT"] + catalog_id = data.aws_caller_identity.current.account_id + + lf_tag_policy { + resource_type = "TABLE" + + expression { + key = "Team" + values = ["EDFS"] + } + + expression { + key = "Environment" + values = ["Test"] + } + } +} + +resource "aws_iam_role_policy" "instance_policy" { + name_prefix = "instance-policy" + role = aws_iam_role.instance_role.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ] + Resource = compact([ + var.ad_secret + ]) + }, + { + Sid = "CloudWatchLogsAccess" + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = [ + "${aws_cloudwatch_log_group.api_logs.arn}:*", + aws_cloudwatch_log_group.api_logs.arn + ] + }, + { + Sid = "LakeFormationDataAccess" + Effect = "Allow" + Action = "lakeformation:GetDataAccess" + Resource = "*" + }, + { + Sid = "GlueCatalogReadOnlyAccess" + Effect = "Allow" + Action = [ + "glue:GetDatabase", + "glue:GetDatabases", + "glue:GetTable", + "glue:GetTables", + "glue:GetPartitions" + ] + Resource = [ + var.glue_catalog_arn, + "arn:aws:glue:${var.aws_region}:${data.aws_caller_identity.current.account_id}:database/*", + "arn:aws:glue:${var.aws_region}:${data.aws_caller_identity.current.account_id}:table/*/*" + ] + }, + { + Sid = "S3IcebergDataAccess" + Effect = "Allow" + Action = [ + "s3:GetObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ] + Resource = [ + "${var.data_lake_bucket_arn}", + "${var.data_lake_bucket_arn}/*", + "${var.data_bucket_arn}", + "${var.data_bucket_arn}/*" + ] + } + ] + }) +} + +resource "aws_iam_instance_profile" "instance_profile" { + name = "${var.app_name}-${var.environment}-instance-profile" + role = aws_iam_role.instance_role.name + tags = local.common_tags +} + +resource "aws_iam_role_policy_attachment" "session_manager_logging" { + role = aws_iam_role.instance_role.id + policy_arn = var.session_manager_logging_policy_arn +} + +# Test Environment Resources +resource "aws_instance" "test_instance" { + count = var.is_test_env ? 1 : 0 + + ami = var.ami_id != null ? var.ami_id : data.aws_ami.ubuntu.id + instance_type = var.instance_type + + root_block_device { + volume_type = var.root_volume_type + volume_size = var.root_volume_size + encrypted = true + } + + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 1 + } + + iam_instance_profile = aws_iam_instance_profile.instance_profile.name + vpc_security_group_ids = [aws_security_group.instance.id] + subnet_id = data.aws_subnets.private.ids[0] # Use private subnet + associate_public_ip_address = false + + user_data_replace_on_change = true + user_data_base64 = base64encode(templatefile("${path.module}/templates/user_data.sh.tpl", { + aws_region = var.aws_region + container_port = var.container_port + s3_bucket = trimsuffix(var.data_lake_bucket_arn, "/*") + directory_id = var.directory_id, + directory_name = var.directory_name, + ad_secret = var.ad_secret, + ad_dns_1 = var.ad_dns_1, + ad_dns_2 = var.ad_dns_2, + log_group_name = aws_cloudwatch_log_group.api_logs.name + environment = var.environment + docker_image_uri = var.docker_image_uri + deployment_timestamp = var.deployment_timestamp + })) + + tags = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}" + }) + + lifecycle { + create_before_destroy = true + } +} + +# Production Environment Resources +resource "aws_launch_template" "app" { + count = var.is_test_env ? 0 : 1 + + name_prefix = "${var.app_name}-${var.environment}" + image_id = coalesce(var.ami_id, data.aws_ami.ubuntu.id) + instance_type = var.instance_type + update_default_version = true + + network_interfaces { + associate_public_ip_address = false + security_groups = [aws_security_group.instance.id] + delete_on_termination = true + } + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = var.root_volume_size + volume_type = var.root_volume_type + encrypted = true + kms_key_id = var.kms_key_arn + delete_on_termination = true + } + } + + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 1 + } + + iam_instance_profile { + name = aws_iam_instance_profile.instance_profile.name + } + + user_data = base64encode(templatefile("${path.module}/templates/user_data.sh.tpl", { + aws_region = var.aws_region + container_port = var.container_port + s3_bucket = trimsuffix(var.data_lake_bucket_arn, "/*") + directory_id = var.directory_id, + directory_name = var.directory_name, + ad_secret = var.ad_secret, + ad_dns_1 = "10.3.1.74", + ad_dns_2 = "10.3.0.60", + log_group_name = aws_cloudwatch_log_group.api_logs.name + environment = var.environment + docker_image_uri = var.docker_image_uri + deployment_timestamp = var.deployment_timestamp + })) + + monitoring { + enabled = true + } + + tag_specifications { + resource_type = "instance" + tags = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}" + }) + } + + tag_specifications { + resource_type = "volume" + tags = local.common_tags + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_autoscaling_group" "app" { + count = var.is_test_env ? 0 : 1 + + name = "${var.app_name}-${var.environment}" + desired_capacity = var.asg_desired_capacity + max_size = var.asg_max_size + min_size = var.asg_min_size + target_group_arns = [aws_lb_target_group.app[0].arn] + vpc_zone_identifier = data.aws_subnets.private.ids + health_check_grace_period = 900 # Adjust depending on the App / Use Case instantiation time + health_check_type = "ELB" + + launch_template { + id = aws_launch_template.app[0].id + version = "$Latest" + } + + instance_refresh { + strategy = "Rolling" + preferences { + min_healthy_percentage = 100 + instance_warmup = 900 + checkpoint_delay = 900 + checkpoint_percentages = [25, 50, 75, 100] + } + } + + dynamic "tag" { + for_each = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}" + }) + content { + key = tag.key + value = tag.value + propagate_at_launch = true + } + } + + lifecycle { + create_before_destroy = true + ignore_changes = [desired_capacity] + } + + depends_on = [aws_lb.app] +} + +# Load Balancer Resources +resource "aws_lb" "app" { + count = var.is_test_env ? 0 : 1 + + name = "${var.app_name}-${var.environment}" + internal = true # Make ALB internal since we're in private subnets + load_balancer_type = "application" + security_groups = [aws_security_group.alb[0].id] + subnets = data.aws_subnets.private.ids # Use private subnets + idle_timeout = 600 # Default is 60 seconds, but some geopackage GETS take a long time. + + enable_deletion_protection = var.enable_deletion_protection + + access_logs { + bucket = aws_s3_bucket.alb_logs[0].id + prefix = "${var.app_name}-${var.environment}" + enabled = true + } + + tags = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}" + }) +} + +resource "aws_lb_target_group" "app" { + count = var.is_test_env ? 0 : 1 + + name = "${var.app_name}-${var.environment}" + port = var.container_port + protocol = "HTTP" + vpc_id = data.aws_vpc.main.id + + health_check { + enabled = true + healthy_threshold = 3 + interval = 30 + matcher = "200" # Accept 200 from the version endpoint + path = "/version/" + port = "traffic-port" + timeout = 10 + unhealthy_threshold = 3 + } + + tags = merge(local.common_tags, { + Name = "${var.app_name}-${var.environment}" + }) + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_lb_listener" "https" { + count = var.is_test_env || var.certificate_arn == null ? 0 : 1 + + load_balancer_arn = aws_lb.app[0].arn + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS-1-2-2017-01" + certificate_arn = var.certificate_arn + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.app[0].arn + } +} + +resource "aws_lb_listener" "http_redirect" { + count = var.is_test_env ? 0 : 1 + + load_balancer_arn = aws_lb.app[0].arn + port = 80 + protocol = "HTTP" + + default_action { + type = "redirect" + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +# ALB Logs Bucket +resource "aws_s3_bucket" "alb_logs" { + count = var.is_test_env ? 0 : 1 + bucket = "${var.app_name}-${var.environment}-alb-logs-${data.aws_caller_identity.current.account_id}" + + lifecycle { + prevent_destroy = false + } + + tags = local.common_tags +} + +resource "aws_s3_bucket_versioning" "alb_logs" { + count = var.is_test_env ? 0 : 1 + bucket = aws_s3_bucket.alb_logs[0].id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "alb_logs" { + count = var.is_test_env ? 0 : 1 + bucket = aws_s3_bucket.alb_logs[0].id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = var.kms_key_arn == null ? "AES256" : "aws:kms" + kms_master_key_id = var.kms_key_arn # Will use AES256 if null + } + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "alb_logs" { + count = var.is_test_env ? 0 : 1 + bucket = aws_s3_bucket.alb_logs[0].id + + rule { + id = "cleanup_old_logs" + status = "Enabled" + + # Filter block with an empty prefix applies the rule to all objects + filter { + prefix = "" + } + + expiration { + days = 90 + } + } +} + +resource "aws_s3_bucket_policy" "alb_logs" { + count = var.is_test_env ? 0 : 1 + bucket = aws_s3_bucket.alb_logs[0].id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::${lookup(var.alb_service_account_ids, var.aws_region)}:root" + } + Action = "s3:PutObject" + Resource = [ + "${aws_s3_bucket.alb_logs[0].arn}/*", + ] + }, + { + Effect = "Allow" + Principal = { + Service = "delivery.logs.amazonaws.com" + } + Action = "s3:PutObject" + Resource = [ + "${aws_s3_bucket.alb_logs[0].arn}/*", + ] + Condition = { + StringEquals = { + "s3:x-amz-acl": "bucket-owner-full-control" + } + } + }, + { + Effect = "Allow" + Principal = { + Service = "delivery.logs.amazonaws.com" + } + Action = "s3:GetBucketAcl" + Resource = aws_s3_bucket.alb_logs[0].arn + }, + { + Effect = "Deny" + Principal = "*" + Action = "s3:*" + Resource = [ + aws_s3_bucket.alb_logs[0].arn, + "${aws_s3_bucket.alb_logs[0].arn}/*" + ] + Condition = { + Bool = { + "aws:SecureTransport": "false" + } + } + } + ] + }) +} + +resource "aws_s3_bucket_public_access_block" "alb_logs" { + count = var.is_test_env ? 0 : 1 + bucket = aws_s3_bucket.alb_logs[0].id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +# Route 53 Records +resource "aws_route53_record" "test" { + count = var.is_test_env ? 1 : 0 + + zone_id = var.hosted_zone_id + name = "${var.app_name}.${var.environment}.nextgenwaterprediction.com" + type = "A" + ttl = 300 + + records = [ + aws_instance.test_instance[0].private_ip + ] +} + +resource "aws_route53_record" "app" { + count = var.is_test_env ? 0 : 1 + + zone_id = var.hosted_zone_id + name = "${var.app_name}.${var.environment}.nextgenwaterprediction.com" + type = "A" + + alias { + name = aws_lb.app[0].dns_name + zone_id = aws_lb.app[0].zone_id + evaluate_target_health = true + } +} + +# CloudWatch Resources +resource "aws_cloudwatch_log_group" "api_logs" { + name = "/aws/ec2/${var.app_name}-${var.environment}" + retention_in_days = var.log_retention_days + + tags = local.common_tags +} + +resource "aws_cloudwatch_metric_alarm" "high_cpu" { + count = var.is_test_env || var.sns_alert_topic_arn == null ? 0 : 1 + + alarm_name = "${var.app_name}-${var.environment}-high-cpu" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 300 + statistic = "Average" + threshold = 80 + alarm_description = "High CPU utilization for ${var.app_name} in ${var.environment}" + alarm_actions = [var.sns_alert_topic_arn] + ok_actions = [var.sns_alert_topic_arn] + + dimensions = { + AutoScalingGroupName = aws_autoscaling_group.app[0].name + } + + tags = local.common_tags +} + +resource "aws_cloudwatch_metric_alarm" "high_memory" { + count = var.is_test_env || var.sns_alert_topic_arn == null ? 0 : 1 + + alarm_name = "${var.app_name}-${var.environment}-high-memory" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + metric_name = "MemoryUtilization" + namespace = "System/Linux" + period = 300 + statistic = "Average" + threshold = 80 + alarm_description = "High memory utilization for ${var.app_name} in ${var.environment}" + alarm_actions = [var.sns_alert_topic_arn] + ok_actions = [var.sns_alert_topic_arn] + + dimensions = { + AutoScalingGroupName = aws_autoscaling_group.app[0].name + } + + tags = local.common_tags +} + +resource "aws_cloudwatch_metric_alarm" "high_5xx_errors" { + count = var.is_test_env || var.sns_alert_topic_arn == null ? 0 : 1 + + alarm_name = "${var.app_name}-${var.environment}-high-5xx" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + metric_name = "HTTPCode_Target_5XX_Count" + namespace = "AWS/ApplicationELB" + period = 300 + statistic = "Sum" + threshold = 10 + alarm_description = "High 5XX error count for ${var.app_name} in ${var.environment}" + alarm_actions = [var.sns_alert_topic_arn] + ok_actions = [var.sns_alert_topic_arn] + + dimensions = { + LoadBalancer = aws_lb.app[0].arn_suffix + } + + tags = local.common_tags +} \ No newline at end of file diff --git a/terraform/modules/app_service/outputs.tf b/terraform/modules/app_service/outputs.tf new file mode 100644 index 0000000..2f935ef --- /dev/null +++ b/terraform/modules/app_service/outputs.tf @@ -0,0 +1,49 @@ +output "instance_security_group_id" { + description = "ID of the instance security group" + value = aws_security_group.instance.id +} + +output "alb_security_group_id" { + description = "ID of the ALB security group" + value = var.is_test_env ? null : aws_security_group.alb[0].id +} + +output "instance_role_arn" { + description = "ARN of the instance IAM role" + value = aws_iam_role.instance_role.arn +} + +output "cloudwatch_log_group_name" { + description = "Name of the CloudWatch log group" + value = aws_cloudwatch_log_group.api_logs.name +} + +output "alb_dns_name" { + description = "DNS name of the ALB" + value = var.is_test_env ? null : aws_lb.app[0].dns_name +} + +output "endpoint" { + description = "API endpoint URL" + value = var.is_test_env ? "http://${aws_route53_record.test[0].name}" : "https://${aws_route53_record.app[0].name}" +} + +output "asg_name" { + description = "Name of the Auto Scaling Group" + value = var.is_test_env ? null : aws_autoscaling_group.app[0].name +} + +output "instance_id" { + description = "ID of the test instance (test environment only)" + value = var.is_test_env ? aws_instance.test_instance[0].id : null +} + +output "alb_logs_bucket" { + description = "Name of the S3 bucket storing ALB logs" + value = var.is_test_env ? null : aws_s3_bucket.alb_logs[0].id +} + +output "ami_id" { + description = "AMI ID being used for EC2 instances" + value = var.ami_id != null ? var.ami_id : data.aws_ami.ubuntu.id +} diff --git a/terraform/modules/app_service/templates/user_data.sh.tpl b/terraform/modules/app_service/templates/user_data.sh.tpl new file mode 100644 index 0000000..12c10dc --- /dev/null +++ b/terraform/modules/app_service/templates/user_data.sh.tpl @@ -0,0 +1,315 @@ +#!/bin/bash +set -euo pipefail + +# DEBUG logs everything to a file including secrets so only use for DEBUG +#exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +#set -x + + +# === Fetch instance metadata === +TOKEN=$(curl -sX PUT "http://169.254.169.254/latest/api/token" \ + -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") +LOCAL_IP=$(curl -sH "X-aws-ec2-metadata-token: $TOKEN" \ + http://169.254.169.254/latest/meta-data/local-ipv4) +INSTANCE_ID=$(curl -sH "X-aws-ec2-metadata-token: $TOKEN" \ + http://169.254.169.254/latest/meta-data/instance-id) + +# === DNS and Hostname Configuration === +AD_DNS_1="${ad_dns_1}" +AD_DNS_2="${ad_dns_2}" +DOMAIN_NAME="${directory_name}" +REALM_NAME=$(echo "$DOMAIN_NAME" | tr '[:lower:]' '[:upper:]') + +# === Helper Functions === +# Wait for APT: Implemented when I found intermittant user data failures +# due to apt being locked by cloud-init or unattended-upgrades +wait_for_apt_lock() { + for i in {1..20}; do + if ! fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; then + return + fi + echo "Waiting for APT lock... ($i)" + sleep 5 + done + echo "APT lock timeout." + exit 1 +} + +fetch_secret() { + local arn="$1" + for i in {1..10}; do + secret=$(aws secretsmanager get-secret-value \ + --secret-id "$arn" \ + --region "${aws_region}" \ + --query SecretString \ + --output text 2>/dev/null) || true + + if [[ -n "$secret" ]]; then + echo "$secret" + return 0 + fi + sleep 5 + done + echo "Failed to fetch secret from $arn" + exit 1 +} + +# Use netplan for persistent DNS configuration +INTERFACE=$(ip route | grep default | sed -e "s/^.*dev.//" -e "s/.proto.*//") +cat > /etc/netplan/99-custom-dns.yaml < /etc/hosts < /etc/krb5.conf < /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json </dev/null || true) +if [ -n "$CONF_FILE" ]; then + echo "Found and disabled override in: $CONF_FILE" + # Comment out line to disable override + sed -i 's/^\s*PasswordAuthentication\s\+no/#&/' "$CONF_FILE" +fi + +# Restart SSHD +echo "Restarting SSH service..." +systemctl restart sshd + +# === Join Domain Configuration === +#AD_OU = "OU=Computers,OU=nextgenwater,DC=nextgenwaterprediction,DC=com" +AD_SECRET_ARN="${ad_secret}" + +# Group to grant ssh access (no sudo though, currently) +AD_GROUP="SoftwareEngineersFull" + +# Fetch domain join credentials +echo "Fetching AD credentials..." +AD_CREDS=$(fetch_secret "$AD_SECRET_ARN") +AD_USER=$(echo "$AD_CREDS" | jq -r .UserID) +AD_PASS=$(echo "$AD_CREDS" | jq -r .Password) + +# Use direct adcli join command. +# I couldn't get the realm commands to play nice with our DNS. +echo "Joining domain using adcli..." +echo "$AD_PASS" | adcli join \ + --verbose \ + --domain "$DOMAIN_NAME" \ + --domain-realm "$REALM_NAME" \ + --domain-controller "$AD_DNS_1" \ + --computer-name "$COMPUTER_NAME" \ + --login-type user \ + --login-user "$AD_USER" \ + --stdin-password + +# === Configure Access and SSSD === +# Configure SSSD for user/group lookups and authentication +cat > /etc/sssd/sssd.conf < /opt/icefabric/.env < /opt/icefabric/docker-compose.yml <> /app/logs/app.log 2>&1" + healthcheck: + test: ["CMD", "curl", "-f", "--head", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 120s +EOF + +# === Compose Systemd Service === +cat > /etc/systemd/system/icefabric.service < /etc/logrotate.d/icefabric <<'EOF' +/opt/icefabric/logs/*.log { + rotate 7 + daily + compress + size 50M + missingok + delaycompress + copytruncate +} +EOF + +echo "User data complete for ${environment} instance at $LOCAL_IP" \ No newline at end of file diff --git a/terraform/modules/app_service/variables.tf b/terraform/modules/app_service/variables.tf new file mode 100644 index 0000000..564f3a9 --- /dev/null +++ b/terraform/modules/app_service/variables.tf @@ -0,0 +1,235 @@ +variable "environment" { + description = "Environment name (test or oe)" + type = string + + validation { + condition = contains(["test", "oe"], var.environment) + error_message = "Environment must be either 'test' or 'oe'." + } +} + +variable "aws_region" { + description = "AWS region to deploy resources" + type = string +} + +variable "app_name" { + description = "Name of the application for resource naming" + type = string +} + +variable "docker_image_uri" { + description = "The full URI of the Docker image to deploy (e.g., ghcr.io/ngwpc/icefabric:latest)" + type = string +} + +variable "vpc_name" { + description = "Name of the VPC to deploy into" + type = string + default = "main" +} + +variable "subnet_name_pattern" { + description = "Pattern to match for target subnets in the VPC" + type = string + default = "App*" +} + +variable "data_lake_bucket_arn" { + description = "ARN of the S3 bucket for the Iceberg data lake." + type = string +} + +variable "data_bucket_arn" { + description = "ARN of the S3 bucket for app data." + type = string +} + +variable "glue_catalog_arn" { + description = "ARN of the Glue Catalog." + type = string +} + +variable "hosted_zone_id" { + description = "Route53 hosted zone ID for DNS records" + type = string +} + +variable "ami_id" { + description = "AMI ID for EC2 instances. If not provided, latest ubuntu-jammy-22.04-amd64-server AMI will be used." + type = string + default = null + + validation { + condition = var.ami_id == null || can(regex("^ami-[a-f0-9]{17}$", var.ami_id)) + error_message = "If provided, AMI ID must be valid (e.g., ami-123456789abcdef01)." + } +} + +variable "instance_type" { + description = "EC2 instance type" + type = string +} + +variable "root_volume_type" { + description = "Type of root volume (gp2, gp3, io1, etc.)" + type = string + default = "gp3" +} + +variable "root_volume_size" { + description = "Size of root volume in GB" + type = number + default = 20 +} + +variable "is_test_env" { + description = "Whether this is a test environment (true for single instance, false for ASG/ALB)" + type = bool +} + +variable "asg_min_size" { + description = "Minimum number of instances in the ASG" + type = number + default = 1 +} + +variable "asg_max_size" { + description = "Maximum number of instances in the ASG" + type = number + default = 3 +} + +variable "asg_desired_capacity" { + description = "Desired number of instances in the ASG" + type = number + default = 2 +} + +variable "certificate_arn" { + description = "ARN of ACM certificate for HTTPS. Required only for non-test, load balanced environments." + type = string + default = null + + validation { + condition = var.certificate_arn == null || can(regex("^arn:aws:acm:[a-z0-9-]+:\\d{12}:certificate/[a-zA-Z0-9-]+$", var.certificate_arn)) + error_message = "If provided, must be a valid ACM certificate ARN." + } +} + +variable "container_port" { + description = "Port that the container listens on" + type = number + default = 8000 +} + +variable "health_check_path" { + description = "Path for ALB health check" + type = string + default = "/health" +} + +variable "health_check_interval" { + description = "Interval for health checks (in seconds)" + type = number + default = 15 +} + +variable "health_check_timeout" { + description = "Timeout for health checks (in seconds)" + type = number + default = 5 +} + +variable "health_check_healthy_threshold" { + description = "Number of consecutive successful health checks before considering target healthy" + type = number + default = 2 +} + +variable "health_check_unhealthy_threshold" { + description = "Number of consecutive failed health checks before considering target unhealthy" + type = number + default = 2 +} + +variable "log_retention_days" { + description = "Number of days to retain CloudWatch logs" + type = number + default = 30 +} + +variable "sns_alert_topic_arn" { + description = "SNS topic ARN for CloudWatch alarms. Required only for non-test environments." + type = string + default = null + + validation { + condition = var.sns_alert_topic_arn == null || can(regex("^arn:aws:sns:[a-z0-9-]+:\\d{12}:[a-zA-Z0-9-_]+$", var.sns_alert_topic_arn)) + error_message = "If provided, must be a valid SNS topic ARN." + } +} + +variable "enable_deletion_protection" { + description = "Enable deletion protection for ALB" + type = bool + default = true +} + +variable "kms_key_arn" { + description = "ARN of KMS key for encryption. If not provided, AWS managed keys will be used." + type = string + default = null +} + +variable "session_manager_logging_policy_arn" { + description = "ARN of the Session Manager logging policy" + type = string + default = "arn:aws:iam::591210920133:policy/AWSAccelerator-SessionManagerLogging" +} + +variable "additional_vpc_cidrs" { + description = "List of additional VPC CIDR blocks that should have access to the instance in test environment" + type = list(string) + default = [] + + validation { + condition = alltrue([for cidr in var.additional_vpc_cidrs : can(regex("^([0-9]{1,3}\\.){3}[0-9]{1,3}/[0-9]{1,2}$", cidr))]) + error_message = "All CIDR blocks must be valid IPv4 CIDR notation (e.g., '10.0.0.0/16')." + } +} + +variable "directory_id" { + description = "ID of the AWS Managed Microsoft AD directory for Windows instances" + type = string + default = "" +} + +variable "directory_name" { + description = "Fully qualified domain name of the AWS Managed Microsoft AD" + type = string + default = "" +} + +variable "ad_secret" { + description = "ARN of the Secrets Manager secret containing AD join credentials" + type = string +} + +variable "ad_dns_1" { + description = "Primary DNS server IP address for the AWS Managed Microsoft AD" + type = string + default = "" +} + +variable "ad_dns_2" { + description = "Secondary DNS server IP address for the AWS Managed Microsoft AD" + type = string + default = "" +} + +variable "deployment_timestamp" { + description = "Timestamp to force redeployment of the container (format: YYYYMMDDHHMMSS)" + type = string + default = null +} \ No newline at end of file diff --git a/terraform/modules/iceberg/main.tf b/terraform/modules/iceberg/main.tf new file mode 100644 index 0000000..229e27a --- /dev/null +++ b/terraform/modules/iceberg/main.tf @@ -0,0 +1,142 @@ +provider "aws" { + region = var.region +} + +data "aws_caller_identity" "current" {} + +resource "aws_s3tables_table_bucket" "icefabric" { + name = "${var.env}-${var.application}" +} + +resource "aws_s3tables_namespace" "icefabric" { + namespace = var.application + table_bucket_arn = aws_s3tables_table_bucket.icefabric.arn +} + +resource "aws_s3tables_table" "icefabric" { + name = var.application + namespace = aws_s3tables_namespace.icefabric.namespace + table_bucket_arn = aws_s3tables_table_bucket.icefabric.arn + format = "ICEBERG" +} + +data "aws_iam_policy_document" "icefabric_bucket_policy_document" { + statement { + sid = "AllowGlueAccess" + effect = "Allow" + principals { + type = "Service" + identifiers = ["glue.amazonaws.com"] + } + actions = [ + "s3tables:*" + ] + resources = [ + "${aws_s3tables_table_bucket.icefabric.arn}/*", + aws_s3tables_table_bucket.icefabric.arn + ] + } + statement { + sid = "AllowAccountAccess" + effect = "Allow" + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + } + actions = [ + "s3tables:*" + ] + resources = [ + "${aws_s3tables_table_bucket.icefabric.arn}/*", + aws_s3tables_table_bucket.icefabric.arn + ] + } +} + +resource "aws_s3tables_table_bucket_policy" "icefabric_policy" { + resource_policy = data.aws_iam_policy_document.icefabric_bucket_policy_document.json + table_bucket_arn = aws_s3tables_table_bucket.icefabric.arn +} + +# AWS Glue Catalog Database for Iceberg Tables Metadata +resource "aws_glue_catalog_database" "icefabric_db" { + name = "icefabric_db" + description = "Glue database for Iceberg tables in ${var.application} namespace" +} + +# Grant Lake Formation permissions for the database +resource "aws_lakeformation_permissions" "database_permissions" { + principal = var.identity_center_role_arn + permissions = [ + "CREATE_TABLE", + "DESCRIBE", + "ALTER" + ] + permissions_with_grant_option = [ + "CREATE_TABLE", + "DESCRIBE", + "ALTER" + ] + database { + name = aws_glue_catalog_database.icefabric_db.name + } +} + +# EDFS currently plans to manage Tables outside of Terraform, but one can grant permissions +# for table(s) (if it exists or after creation) in terraform +#resource "aws_lakeformation_permissions" "table_permissions" { +# principal = var.identity_center_role_arn +# permissions = [ +# "SELECT", +# "INSERT", +# "DELETE", +# "ALTER", +# "DESCRIBE" +# ] +# permissions_with_grant_option = [ +# "SELECT", +# "INSERT", +# "DELETE", +# "ALTER", +# "DESCRIBE" +# ] +# table { +# database_name = aws_glue_catalog_database.icefabric_db.name +# name = "icefabric" +# } +#} + +# Set Lake Formation Data Lake Settings (initialize Lake Formation) +resource "aws_lakeformation_data_lake_settings" "main" { + # Define Lake Formation administrators (This shows up as a popup when you enter the console if we don't set it via IaC.) + admins = length(var.lakeformation_admin_arns) > 0 ? var.lakeformation_admin_arns : [ + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root" + ] + + # Optional: Allow external data filtering (for cross-account access) + allow_external_data_filtering = false + + # Optional: Allow full table external data access + allow_full_table_external_data_access = false + + # Trusted resource owners (for cross-account scenarios) + trusted_resource_owners = [data.aws_caller_identity.current.account_id] +} + + +# Outputs +output "s3tables_bucket_arn" { + value = aws_s3tables_table_bucket.icefabric.arn +} + +output "s3tables_table_warehouse_location" { + value = aws_s3tables_table.icefabric.warehouse_location +} + +output "glue_database_name" { + value = aws_glue_catalog_database.icefabric_db.name +} + +output "lakeformation_admins" { + value = aws_lakeformation_data_lake_settings.main.admins +} diff --git a/terraform/modules/iceberg/variables.tf b/terraform/modules/iceberg/variables.tf new file mode 100644 index 0000000..dc5c863 --- /dev/null +++ b/terraform/modules/iceberg/variables.tf @@ -0,0 +1,33 @@ +# variables.tf + +variable "region" { + description = "AWS region" + type = string + default = "us-east-1" +} + +variable "env" { + description = "Environment name" + type = string +} + +variable "application" { + description = "Application name" + type = string +} + +variable "team" { + description = "Team name" + type = string +} + +variable "identity_center_role_arn" { + description = "ARN of the Identity Center role that will access the resources" + type = string +} + +variable "lakeformation_admin_arns" { + description = "List of ARNs to set as Lake Formation administrators" + type = list(string) + default = [] # Will be populated with current account or specific users/roles +} diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/app/test_hydrofabric_router.py b/tests/app/test_hydrofabric_router.py new file mode 100644 index 0000000..ef97f7f --- /dev/null +++ b/tests/app/test_hydrofabric_router.py @@ -0,0 +1,11 @@ +import pytest + + +@pytest.mark.slow +def test_subset_hl_uri(remote_client, gauge_hf_uri: str): + """Test: /v1/hydrofabric/{gauge_hf_uri}/gpkg""" + response = remote_client.get( + f"/v1/hydrofabric/{gauge_hf_uri}/gpkg?id_type=hl_uri&domain=conus_hf&layers=divides&layers=flowpaths&layers=network&layers=nexus" + ) + + assert response.status_code == 200, f"Request failed with status {response.status_code}: {response.text}" diff --git a/tests/app/test_main.py b/tests/app/test_main.py new file mode 100644 index 0000000..ced3427 --- /dev/null +++ b/tests/app/test_main.py @@ -0,0 +1,4 @@ +def test_health_with_fixture(client): + """Test using the client fixture from conftest.py.""" + response = client.head("/health") + assert response.status_code == 200 diff --git a/tests/app/test_rise_router.py b/tests/app/test_rise_router.py new file mode 100644 index 0000000..19a6601 --- /dev/null +++ b/tests/app/test_rise_router.py @@ -0,0 +1,63 @@ +import json + +import pytest + +from app.routers.rise_wrappers.router import EXT_RISE_BASE_URL, make_get_req_to_rise + +resources = ["catalog-item", "catalog-record", "location", "result"] + +good_ids = ["10835", "4462", "3672", "3672"] + +bad_ids = ["0", "0", "0", "0"] + + +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize("resource_type,id", list(zip(resources, good_ids, strict=False))) +async def test_get_item_by_id_good(client, resource_type, id): + """Test all per-ID endpoints for IDs that exist""" + response = client.get(f"/v1/rise/{resource_type}/{id}") + assert response.status_code == 200 + rise_direct_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/{resource_type}/{id}") + assert json.loads(response.text) == rise_direct_response["detail"] + + +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize("resource_type,id", list(zip(resources, bad_ids, strict=False))) +async def test_get_item_by_id_bad(client, resource_type, id): + """Test all per-ID endpoints for IDs that do not exist""" + response = client.get(f"/v1/rise/{resource_type}/{id}") + assert response.status_code == 404 + rise_direct_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/{resource_type}/{id}") + assert json.loads(response.text)["detail"] == rise_direct_response["detail"] + + +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize("resource_type", resources) +async def test_get_collection(client, resource_type): + """Test every resource collection endpoint - all default parameters""" + # TODO - remove skip once the RISE api/result endpoint is no longer timing out + if resource_type == "result": + pytest.skip(f"Skipping {resource_type} endpoint test until RISE API endpoint stops timing out.") + response = client.get(f"/v1/rise/{resource_type}") + assert response.status_code == 200 + rise_direct_response = await make_get_req_to_rise(f"{EXT_RISE_BASE_URL}/{resource_type}") + assert json.loads(response.text)["data"] == rise_direct_response["detail"]["data"] + + +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize("resource_type", resources) +async def test_get_collection_set_of_ids(client, resource_type): + """Test every resource collection endpoint with a set of three IDs""" + # TODO - remove skip once the RISE api/result endpoint is no longer timing out + if resource_type == "result": + pytest.skip(f"Skipping {resource_type} endpoint test until RISE API endpoint stops timing out.") + response = client.get(f"/v1/rise/{resource_type}?id=3672,4462,10835") + assert response.status_code == 200 + rise_direct_response = await make_get_req_to_rise( + f"{EXT_RISE_BASE_URL}/{resource_type}?id=3672,4462,10835" + ) + assert json.loads(response.text)["data"] == rise_direct_response["detail"]["data"] diff --git a/tests/app/test_streamflow_observations_router.py b/tests/app/test_streamflow_observations_router.py new file mode 100644 index 0000000..aaff87a --- /dev/null +++ b/tests/app/test_streamflow_observations_router.py @@ -0,0 +1,86 @@ +from io import BytesIO, StringIO + +import pandas as pd +import pytest + + +@pytest.mark.integration +def test_sources_endpoint(remote_client): + """Test: GET /streamflow_observations/sources""" + response = remote_client.get("/v1/streamflow_observations/sources") + assert response.status_code == 200 + + data = response.json() + assert "available_sources" in data + assert "total_sources" in data + + sources = data["available_sources"] + usgs_source = next((s for s in sources if s["name"] == "usgs"), None) + assert usgs_source is not None + assert usgs_source["description"] == "USGS stream gauge hourly data" + assert usgs_source["units"] == "cms" + + +@pytest.mark.integration +def test_available_identifiers_example(remote_client): + """Test: GET /streamflow_observations/usgs/available""" + response = remote_client.get("/v1/streamflow_observations/usgs/available") + + assert response.status_code in [200, 500] # Will return if the PyIceberg DB exists in the /tmp/ dir + + if response.status_code == 200: + data = response.json() + assert "data_source" in data + assert "identifiers" in data + assert "total_identifiers" in data + assert data["data_source"] == "usgs" + + +@pytest.mark.integration +def test_available_identifiers_with_limit_example(remote_client): + """Test: GET /streamflow_observations/usgs/available?limit=50""" + response = remote_client.get("/v1/streamflow_observations/usgs/available?limit=50") + + assert response.status_code in [200, 500] + + if response.status_code == 200: + data = response.json() + assert data["showing"] <= 50 + + +@pytest.mark.integration +def test_csv_generation(remote_client, local_usgs_streamflow_csv): + """Test: GET /streamflow_observations/usgs/csv""" + response = remote_client.get( + "/v1/streamflow_observations/usgs/csv", + params={ + "identifier": "01010000", + "start_date": "2021-12-31T14:00:00", + "end_date": "2022-01-01T14:00:00", + }, + ) + + assert response.status_code in [200, 500] + + if response.status_code == 200: + df = pd.read_csv(StringIO(response.text)) + assert local_usgs_streamflow_csv.equals(df) + + +@pytest.mark.integration +def test_parquet_generation(remote_client, local_usgs_streamflow_parquet): + """Test: GET /streamflow_observations/usgs/parquet""" + response = remote_client.get( + "/v1/streamflow_observations/usgs/parquet", + params={ + "identifier": "01010000", + "start_date": "2021-12-31T14:00:00", + "end_date": "2022-01-01T14:00:00", + }, + ) + + assert response.status_code in [200, 500] + + if response.status_code == 200: + df = pd.read_parquet(BytesIO(response.content)) + assert local_usgs_streamflow_parquet.equals(df) diff --git a/tests/app/test_topoflow_albedo_router.py b/tests/app/test_topoflow_albedo_router.py new file mode 100644 index 0000000..ae09aa8 --- /dev/null +++ b/tests/app/test_topoflow_albedo_router.py @@ -0,0 +1,15 @@ +from icefabric.schemas.modules import AlbedoValues + + +def test_albedo_endpoint(remote_client): + """Test: GET /v2/modules/topoflow/albedo - all valid arguments""" + for landcover, albedo in AlbedoValues.__members__.items(): + response = remote_client.get(f"/v1/modules/topoflow/albedo?landcover={landcover}") + assert response.status_code == 200 + assert response.text == str(albedo.value) + + +def test_albedo_endpoint__422(remote_client): + """Test: GET /v2/modules/topoflow/albedo - fails validator""" + response = remote_client.get("/v1/modules/topoflow/albedo?landcover=nope") + assert response.status_code == 422 diff --git a/tests/app/test_xs_router.py b/tests/app/test_xs_router.py new file mode 100644 index 0000000..48fdca1 --- /dev/null +++ b/tests/app/test_xs_router.py @@ -0,0 +1,33 @@ +from pyiceberg.catalog import load_catalog + + +def test_xs_endpoint(remote_client): + """Test: GET /v1/ras_xs/{mip_hucid or ble_hucid}/dsreachid={reach_id}?xstype={mip or ble} - all valid arguments""" + catalog = load_catalog("glue", **{"type": "glue", "glue.region": "us-east-1"}) + mip_hucid_list = [tup[1] for tup in catalog.list_tables("mip_xs")] + ble_hucid_list = [tup[1] for tup in catalog.list_tables("ble_xs")] + + for mip_hucid in mip_hucid_list[:5]: + response = remote_client.get(f"/v1/ras_xs/{mip_hucid}?xstype=mip") + assert response.status_code == 200, "Incorrect response" + + reachid = list(set(catalog.load_table(f"mip_xs.{mip_hucid}").scan().to_pandas()["ds_reach_id"]))[0] + response2 = remote_client.get(f"/v1/ras_xs/{mip_hucid}/dsreachid={reachid}?xstype=mip") + assert response2.status_code == 200, "Incorrect response" + + for ble_hucid in ble_hucid_list[:5]: + response = remote_client.get(f"/v1/ras_xs/{ble_hucid}?xstype=ble") + assert response.status_code == 200, "Incorrect response" + + reachid = list(set(catalog.load_table(f"ble_xs.{ble_hucid}").scan().to_pandas()["ds_reach_id"]))[0] + response2 = remote_client.get(f"/v1/ras_xs/{ble_hucid}/dsreachid={reachid}?xstype=ble") + assert response2.status_code == 200, "Incorrect response" + + +def test_xs_subset_endpoint__422(remote_client): + """Test: GET /v1/ras_xs/08020203?xstype=NA - fails validator""" + response = remote_client.get("/v1/ras_xs/08020203?xstype=NA") + assert response.status_code == 422 + + response2 = remote_client.get("/v1/ras_xs/08020203/dsreachid=NA?xstype=NA") + assert response2.status_code == 422 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..9ab6442 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,911 @@ +import os +from pathlib import Path +from typing import Any + +import pandas as pd +import polars as pl +import pyarrow as pa +import pytest +import rustworkx as rx +from dotenv import load_dotenv +from fastapi.testclient import TestClient +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.expressions import EqualTo, In +from pyprojroot import here + +from app.main import app +from icefabric.builds import load_upstream_json +from icefabric.builds.graph_connectivity import read_edge_attrs, read_node_attrs +from icefabric.schemas import NGWPCTestLocations + +""" +Unified Mock PyIceberg Catalog Test Suite for Hydrofabric v2.2 Data using RustworkX Graph +""" + +# Load the sample graph from the actual graph file +SAMPLE_GRAPH: rx.PyDiGraph = rx.from_node_link_json_file( + str(here() / "tests/data/hi_hf_graph_network.json"), + edge_attrs=read_edge_attrs, + node_attrs=read_node_attrs, +) # type: ignore + + +class MockSnapshot: + """Mock Pyiceberg snapshot""" + + def __init__(self): + self.snapshot_id = "0" + + +class MockTable: + """Mock PyIceberg Table with realistic Hawaii hydrofabric data""" + + def __init__(self, table_name: str, data: pd.DataFrame): + self.table_name = table_name + self.data = data + self.current_snapshot = MockSnapshot + self._polars_data = pl.from_pandas(data).lazy() + + def scan(self, row_filter=None): + """Mock scan method that applies filters""" + return MockScan(self._polars_data, row_filter) + + def to_polars(self) -> pl.LazyFrame: + """Returns data as Polars DataFrame""" + return self._polars_data + + +class MockScan: + """Mock scan result that can be filtered and converted""" + + def __init__(self, data: pl.LazyFrame, row_filter=None): + self.data = data + self.row_filter = row_filter + + def _apply_filters(self) -> pl.DataFrame: + """Apply filters to the data and return filtered Polars DataFrame""" + if self.row_filter is None: + return self.data.collect() + + # Handle different filter types + if isinstance(self.row_filter, EqualTo): + column_name = self.row_filter.term.name + value = self.row_filter.literal.value + return self.data.filter(pl.col(column_name) == value).collect() + elif isinstance(self.row_filter, In): + column_name = self.row_filter.term.name + values = [lit.value for lit in self.row_filter.literals] + return self.data.filter(pl.col(column_name).is_in(values)).collect() + + return self.data.collect() + + def to_polars(self) -> pl.DataFrame: + """Apply filters and returns a Polars DataFrame""" + return self._apply_filters() + + def to_pandas(self) -> pd.DataFrame: + """Apply filters and returns a Pandas DataFrame""" + return self._apply_filters().to_pandas() + + def to_arrow(self) -> pa.Table: + """Returns data as arrow table""" + return pa.Table.from_pandas() + + +class MockCatalog: + """Mock PyIceberg Catalog with sample Hawaii hydrofabric data based on RustworkX graph""" + + def __init__(self, catalog_type: str = "glue"): + self.catalog_type = catalog_type + self.connectivity_graph = SAMPLE_GRAPH + self.name = "mock_hf" + self.tables = self._create_sample_tables() + + def load_table(self, table_name: str) -> MockTable: + """Load a mock table by name""" + if table_name not in self.tables: + raise ValueError(f"Table {table_name} not found") + return self.tables[table_name] + + def get_connectivity_graph(self, namespace: str = "mock_hf") -> rx.PyDiGraph: + """Get the pre-built connectivity graph for the given namespace""" + return self.connectivity_graph + + def _extract_graph_relationships(self) -> dict[str, Any]: + """Extract connectivity relationships from the RustworkX graph""" + nodes = self.connectivity_graph.nodes() + + # Build node relationships from the graph + node_to_downstream = {} + + for edge_idx in self.connectivity_graph.edge_indices(): + source_idx, target_idx = self.connectivity_graph.get_edge_endpoints_by_index(edge_idx) + source_node = self.connectivity_graph.get_node_data(source_idx) + target_node = self.connectivity_graph.get_node_data(target_idx) + nexus_id = self.connectivity_graph.get_edge_data_by_index(edge_idx) + + node_to_downstream[source_node] = {"target": target_node, "nexus": nexus_id} + + return {"nodes": nodes, "relationships": node_to_downstream} + + def _create_sample_tables(self) -> dict[str, MockTable]: + """Create sample hydrofabric tables based on the RustworkX graph""" + tables = {} + + # Extract relationships from the graph + graph_info = self._extract_graph_relationships() + + # Network table - core connectivity data + network_data = self._create_network_data_from_graph(graph_info) + tables["mock_hf.network"] = MockTable("mock_hf.network", network_data) + + # Flowpaths table - stream geometry + flowpaths_data = self._create_flowpaths_data(network_data) + tables["mock_hf.flowpaths"] = MockTable("mock_hf.flowpaths", flowpaths_data) + + # Nexus table - connection points + nexus_data = self._create_nexus_data(network_data) + tables["mock_hf.nexus"] = MockTable("mock_hf.nexus", nexus_data) + + # Divides table - watershed boundaries + divides_data = self._create_divides_data(network_data) + tables["mock_hf.divides"] = MockTable("mock_hf.divides", divides_data) + + # Lakes table + lakes_data = self._create_lakes_data() + tables["mock_hf.lakes"] = MockTable("mock_hf.lakes", lakes_data) + + # Attribute tables + tables["mock_hf.divide-attributes"] = MockTable( + "mock_hf.divide-attributes", self._create_divide_attributes(divides_data) + ) + tables["mock_hf.flowpath-attributes"] = MockTable( + "mock_hf.flowpath-attributes", self._create_flowpath_attributes(flowpaths_data) + ) + tables["mock_hf.flowpath-attributes-ml"] = MockTable( + "mock_hf.flowpath-attributes-ml", self._create_flowpath_attributes_ml() + ) + tables["mock_hf.pois"] = MockTable("mock_hf.pois", self._create_pois_data(network_data)) + tables["mock_hf.hydrolocations"] = MockTable( + "mock_hf.hydrolocations", self._create_hydrolocations_data() + ) + + tables["divide_parameters.sac-sma_conus"] = MockTable( + "divide_parameters.sac-sma_conus", self._create_sac_sma_divide_parameters(network_data) + ) + tables["divide_parameters.snow-17_conus"] = MockTable( + "divide_parameters.snow-17_conus", self._create_snow17_divide_parameters(network_data) + ) + + return tables + + def _create_network_data_from_graph(self, graph_info: dict[str, Any]) -> pd.DataFrame: + """Create network data based on the actual RustworkX graph structure""" + network_data = [] + poi_counter = 0 + hydroseq_counter = 1 + + nodes = graph_info["nodes"] + relationships = graph_info["relationships"] + + # Create flowpath records for each node in the graph + for node_id in nodes: + if not node_id.startswith("wb-"): + continue + + wb_num = int(node_id.split("-")[1]) + + # Get downstream connection from graph + downstream_info = relationships.get(node_id, {}) + toid = downstream_info.get("nexus") # The nexus this wb flows to + + # Some records have POIs + has_poi = poi_counter % 10 == 0 # Every 10th watershed has a POI + poi_id = float(poi_counter) if has_poi else None + poi_counter += 1 + + # Create the flowpath record + record = { + "id": node_id, + "toid": toid, + "divide_id": f"cat-{wb_num}", + "ds_id": None, + "mainstem": None, + "poi_id": poi_id, + "hydroseq": float(hydroseq_counter), + "hf_source": "NHDPlusHR", + "hf_id": str(wb_num), + "lengthkm": round(0.5 + (wb_num % 100) / 10.0, 2), + "areasqkm": round(1.0 + (wb_num % 500) / 10.0, 2), + "tot_drainage_areasqkm": round(10.0 + (wb_num % 5000) / 10.0, 2), + "type": "waterbody", + "vpuid": "hi", + "topo": "fl-nex", + "hl_uri": f"gages-{wb_num:06d}" if has_poi else None, + } + + network_data.append(record) + hydroseq_counter += 1 + + # Create nexus records based on the edges in the graph + for source_node, info in relationships.items(): + if not source_node.startswith("wb-"): + continue + + nexus_id = info.get("nexus") + target_node = info.get("target") + + if nexus_id and target_node: + source_num = int(source_node.split("-")[1]) + + # Determine nexus type based on target + if target_node == "wb-0": + nexus_type = "terminal" + elif target_node.startswith("cnx-"): + nexus_type = "coastal" + else: + nexus_type = "network" + + nexus_record = { + "id": nexus_id, + "toid": target_node, + "divide_id": f"cat-{source_num}", + "ds_id": None, + "mainstem": None, + "poi_id": None, + "hydroseq": float(hydroseq_counter), + "hf_source": "NHDPlusHR", + "hf_id": str(source_num), + "lengthkm": 0.01 if nexus_type != "terminal" else 0.001, + "areasqkm": 0.001 if nexus_type != "terminal" else 0.0001, + "tot_drainage_areasqkm": 0.01 if nexus_type != "terminal" else 0.001, + "type": nexus_type, + "vpuid": "hi", + "topo": "fl-nex", + "hl_uri": None, + } + + network_data.append(nexus_record) + hydroseq_counter += 1 + + # Add wb-0 if not present (the ultimate outlet) + wb_ids = [record["id"] for record in network_data if record["id"] and record["id"].startswith("wb-")] + if "wb-0" not in wb_ids: + wb0_record = { + "id": "wb-0", + "toid": None, + "divide_id": "cat-0", + "ds_id": None, + "mainstem": None, + "poi_id": None, + "hydroseq": float(hydroseq_counter), + "hf_source": "NHDPlusHR", + "hf_id": "0", + "lengthkm": 0.0, + "areasqkm": 0.0, + "tot_drainage_areasqkm": 0.0, + "type": "outlet", + "vpuid": "hi", + "topo": "fl-nex", + "hl_uri": None, + } + network_data.append(wb0_record) + + return pd.DataFrame(network_data) + + def _create_flowpaths_data(self, network_df: pd.DataFrame) -> pd.DataFrame: + """Create sample flowpath geometry data""" + flowpaths = [] + + # Filter to only wb-* records (flowpaths) + wb_records = network_df[network_df["id"].str.startswith("wb-", na=False)] + + for _, row in wb_records.iterrows(): + # Create a simple LineString geometry as binary (matching real schema) + geometry_binary = b"\x01\x01\x00\x00\x00fl#\xd5g\xaf\x13\xc1\x96!\x8e\xa5\xfe\x14.A" + + flowpaths.append( + { + "id": row["id"], + "toid": row["toid"], + "mainstem": None, # DoubleType + "order": float(hash(row["id"]) % 7 + 1), # DoubleType, stream order 1-7 + "hydroseq": int(row["hydroseq"]) if row["hydroseq"] else None, # IntegerType + "lengthkm": row["lengthkm"], # DoubleType + "areasqkm": row["areasqkm"], # DoubleType + "tot_drainage_areasqkm": row["tot_drainage_areasqkm"], # DoubleType + "has_divide": True, # BooleanType + "divide_id": row["divide_id"], # StringType + "poi_id": str(int(row["poi_id"])) if pd.notna(row["poi_id"]) else None, # StringType + "vpuid": "hi", # StringType + "geometry": geometry_binary, # BinaryType + } + ) + + return pd.DataFrame(flowpaths) + + def _create_nexus_data(self, network_df: pd.DataFrame) -> pd.DataFrame: + """Create sample nexus point data matching Hawaii schema""" + nexus_points = [] + + # Create nexus points for toids + unique_toids = network_df["toid"].dropna().unique() + + counter = 1 + for toid in unique_toids: + if toid and not toid.endswith("_downstream"): + # Create mock binary geometry for Point (copying from a HY Nexus) + geometry_binary = b"\x01\x01\x00\x00\x00fl#\xd5g\xaf\x13\xc1\x96!\x8e\xa5\xfe\x14.A" + + # Determine type and poi_id + if toid.startswith("cnx-"): + nexus_type = "coastal" + poi_id = None + elif toid.startswith("nex-"): + nexus_type = "network" + poi_id = str(counter) if counter % 3 == 0 else None + elif toid.startswith("tnx-"): + nexus_type = "terminal" + poi_id = None + else: + nexus_type = "network" + poi_id = None + + nexus_points.append( + { + "id": toid, + "toid": "wb-0" if nexus_type == "coastal" else None, + "poi_id": poi_id, # StringType + "type": nexus_type, # StringType + "vpuid": "hi", # StringType + "geometry": geometry_binary, # BinaryType + } + ) + counter += 1 + + return pd.DataFrame(nexus_points) + + def _create_divides_data(self, network_df: pd.DataFrame) -> pd.DataFrame: + """Create sample watershed divide geometry data matching Hawaii schema""" + divides = [] + + # Filter to only wb-* records (flowpaths have divides) + wb_records = network_df[network_df["id"].str.startswith("wb-", na=False)] + + for _, row in wb_records.iterrows(): + # Create a simple polygon for the watershed boundary as binary + geometry_binary = b"\x01\x01\x00\x00\x00fl#\xd5g\xaf\x13\xc1\x96!\x8e\xa5\xfe\x14.A" + + # Determine toid and type based on actual network structure + if pd.isna(row["toid"]) or row["toid"] == "wb-0": + divide_type = "terminal" + toid = f"tnx-{hash(row['id']) % 1000000}" # Terminal nexus + elif row["toid"] and row["toid"].startswith("cnx-"): + divide_type = "coastal" + toid = row["toid"] + else: + divide_type = "network" + toid = row["toid"] + + divides.append( + { + "divide_id": row["divide_id"], + "toid": toid, + "type": divide_type, # StringType + "ds_id": None, # DoubleType + "areasqkm": row["areasqkm"], # DoubleType + "id": row["id"], # StringType + "lengthkm": row["lengthkm"], # DoubleType + "tot_drainage_areasqkm": row["tot_drainage_areasqkm"], # DoubleType + "has_flowline": True, # BooleanType + "vpuid": "hi", # StringType + "geometry": geometry_binary, # BinaryType + } + ) + + return pd.DataFrame(divides) + + # ... (rest of the helper methods remain the same) + def _create_lakes_data(self) -> pd.DataFrame: + """Create sample lakes data matching Hawaii schema""" + lakes = [] + + # Create a few lakes with realistic Hawaii schema + for i in range(5, 10): # Create 5 lakes + x = -100.0 + i * 0.02 + y = 40.0 + i * 0.02 + + # Create mock binary geometry for Point (lake centroid) + geometry_binary = b"\x01\x01\x00\x00\x00fl#\xd5g\xaf\x13\xc1\x96!\x8e\xa5\xfe\x14.A" + + lakes.append( + { + "lake_id": float(800020000 + i), # DoubleType + "LkArea": round(0.1 + i * 0.3, 2), # DoubleType + "LkMxE": round(90.0 + i * 50, 2), # DoubleType + "WeirC": 2.6, # DoubleType + "WeirL": 10.0, # DoubleType + "OrificeC": 0.6, # DoubleType + "OrificeA": 1.0, # DoubleType + "OrificeE": 10.0, # DoubleType + "WeirE": 15.0, # DoubleType + "ifd": 0.9, # DoubleType + "Dam_Length": 100.0, # DoubleType + "domain": "hi", # StringType + "poi_id": i + 1000, # IntegerType + "hf_id": float(8000010000000 + i), # DoubleType + "reservoir_index_AnA": None, # DoubleType + "reservoir_index_Extended_AnA": None, # DoubleType + "reservoir_index_GDL_AK": None, # DoubleType + "reservoir_index_Medium_Range": None, # DoubleType + "reservoir_index_Short_Range": None, # DoubleType + "res_id": f"res-{800020000 + i}", # StringType + "vpuid": "hi", # StringType + "lake_x": x, # DoubleType + "lake_y": y, # DoubleType + "geometry": geometry_binary, # BinaryType + } + ) + return pd.DataFrame(lakes) + + def _create_divide_attributes(self, divides_df: pd.DataFrame) -> pd.DataFrame: + """Create sample divide attributes data matching Hawaii schema""" + attributes = [] + for _, row in divides_df.iterrows(): + # Create realistic soil and vegetation parameters + attributes.append( + { + "divide_id": row["divide_id"], # StringType + "mode.bexp_soil_layers_stag.1": 7.457384, # DoubleType + "mode.bexp_soil_layers_stag.2": 7.457384, # DoubleType + "mode.bexp_soil_layers_stag.3": 7.457384, # DoubleType + "mode.bexp_soil_layers_stag.4": 7.457384, # DoubleType + "mode.ISLTYP": 1.0, # DoubleType + "mode.IVGTYP": 7.0, # DoubleType + "geom_mean.dksat_soil_layers_stag.1": 0.000012, # DoubleType + "geom_mean.dksat_soil_layers_stag.2": 0.000012, # DoubleType + "geom_mean.dksat_soil_layers_stag.3": 0.000012, # DoubleType + "geom_mean.dksat_soil_layers_stag.4": 0.000012, # DoubleType + "geom_mean.psisat_soil_layers_stag.1": -0.355872, # DoubleType + "geom_mean.psisat_soil_layers_stag.2": -0.355872, # DoubleType + "geom_mean.psisat_soil_layers_stag.3": -0.355872, # DoubleType + "geom_mean.psisat_soil_layers_stag.4": -0.355872, # DoubleType + "mean.cwpvt": 0.5, # DoubleType + "mean.mfsno": 2.5, # DoubleType + "mean.mp": 0.0, # DoubleType + "mean.refkdt": 3.0, # DoubleType + "mean.slope_1km": 0.1 + (hash(row["divide_id"]) % 50) / 500.0, # DoubleType + "mean.smcmax_soil_layers_stag.1": 0.476, # DoubleType + "mean.smcmax_soil_layers_stag.2": 0.476, # DoubleType + "mean.smcmax_soil_layers_stag.3": 0.476, # DoubleType + "mean.smcmax_soil_layers_stag.4": 0.476, # DoubleType + "mean.smcwlt_soil_layers_stag.1": 0.135, # DoubleType + "mean.smcwlt_soil_layers_stag.2": 0.135, # DoubleType + "mean.smcwlt_soil_layers_stag.3": 0.135, # DoubleType + "mean.smcwlt_soil_layers_stag.4": 0.135, # DoubleType + "mean.vcmx25": 45.0, # DoubleType + "mean.Coeff": 0.5, # DoubleType + "mean.Zmax": 1.0, # DoubleType + "mode.Expon": 3.0, # DoubleType + "X": -100.0 + (hash(row["divide_id"]) % 1000) / 1000.0, # DoubleType + "Y": 40.0 + (hash(row["divide_id"]) % 500) / 500.0, # DoubleType + "mean.impervious": 0.1, # DoubleType + "mean.elevation": 500.0 + hash(row["divide_id"]) % 1000, # DoubleType + "mean.slope": 0.05 + (hash(row["divide_id"]) % 100) / 1000.0, # DoubleType + "circ_mean.aspect": 180.0, # DoubleType + "dist_4.twi": '[{"v":0.6137,"frequency":0.2501},{"v":2.558,"frequency":0.2499}]', # StringType + "vpuid": "hi", # StringType + } + ) + + return pd.DataFrame(attributes) + + def _create_sac_sma_divide_parameters(self, network_df: pd.DataFrame) -> pd.DataFrame: + """Create sample SAC-SMA divide parameters data matching CONUS schema""" + attributes = [] + wb_records = network_df[network_df["id"].str.startswith("wb-", na=False)] + + for _, row in wb_records.iterrows(): + # Create realistic SAC-SMA parameters using hash for reproducible variation + divide_hash = hash(row["divide_id"]) + attributes.append( + { + "divide_id": row["divide_id"], # StringType + "lzfpm": 80.0 + (divide_hash % 500) / 10.0, # DoubleType + "lzfsm": 5.0 + (divide_hash % 200) / 20.0, # DoubleType + "lzpk": 0.01 + (divide_hash % 50) / 2000.0, # DoubleType + "lzsk": 0.10 + (divide_hash % 100) / 1000.0, # DoubleType + "lztwm": 100.0 + (divide_hash % 600) / 10.0, # DoubleType + "pfree": 0.05 + (divide_hash % 200) / 1000.0, # DoubleType + "rexp": 1.0 + (divide_hash % 150) / 100.0, # DoubleType + "uzfwm": 20.0 + (divide_hash % 200) / 10.0, # DoubleType + "uzk": 0.30 + (divide_hash % 200) / 1000.0, # DoubleType + "uztwm": 30.0 + (divide_hash % 300) / 10.0, # DoubleType + "zperc": 40.0 + (divide_hash % 800) / 10.0, # DoubleType + } + ) + + return pd.DataFrame(attributes) + + def _create_snow17_divide_parameters(self, network_df: pd.DataFrame) -> pd.DataFrame: + """Create sample Snow-17 divide parameters data matching CONUS schema""" + attributes = [] + wb_records = network_df[network_df["id"].str.startswith("wb-", na=False)] + + for _, row in wb_records.iterrows(): + # Create realistic Snow-17 parameters using hash for reproducible variation + divide_hash = hash(row["divide_id"]) + attributes.append( + { + "divide_id": row["divide_id"], # StringType + "mfmax": 1.5 + (divide_hash % 300) / 1000.0, # DoubleType + "mfmin": 0.3 + (divide_hash % 100) / 1000.0, # DoubleType + "uadj": 0.05 + (divide_hash % 50) / 1000.0, # DoubleType + } + ) + + return pd.DataFrame(attributes) + + def _create_flowpath_attributes(self, flowpath_df: pd.DataFrame) -> pd.DataFrame: + """Create sample flowpath attributes data matching schema""" + attributes = [] + for _, row in flowpath_df.iterrows(): + attributes.append( + { + "link": row["id"], # StringType + "to": row["toid"], # StringType + "Length_m": (row["lengthkm"] * 1000) if row["lengthkm"] else None, # DoubleType + "Y": round(0.5 + (hash(row["id"]) % 100) / 200.0, 6), # DoubleType + "n": 0.035, # DoubleType - Manning's n + "nCC": 0.035, # DoubleType + "BtmWdth": 2.0, # DoubleType + "TopWdth": 10.0, # DoubleType + "TopWdthCC": 10.0, # DoubleType + "ChSlp": 0.1, # DoubleType + "alt": 1, # IntegerType + "So": 0.01 + (hash(row["id"]) % 50) / 1000.0, # DoubleType - slope + "MusX": 1800.0, # DoubleType + "MusK": 0.2, # DoubleType + "gage": None, # StringType + "gage_nex_id": None, # StringType + "WaterbodyID": None, # StringType + "waterbody_nex_id": None, # StringType + "id": row["id"], # StringType + "toid": row["toid"], # StringType + "vpuid": "hi", # StringType + } + ) + + return pd.DataFrame(attributes) + + def _create_flowpath_attributes_ml(self) -> pd.DataFrame: + """Create sample ML flowpath attributes data""" + # This table doesn't exist in the Hawaii schema based on the samples + # Return empty DataFrame with expected structure + return pd.DataFrame(columns=["id", "vpuid", "predicted_flow", "confidence"]) + + def _create_pois_data(self, network_df: pd.DataFrame) -> pd.DataFrame: + """Create sample points of interest data matching Hawaii schema""" + pois = [] + + # Create POIs for records that have poi_id + poi_records = network_df[network_df["poi_id"].notna()] + + for _, row in poi_records.iterrows(): + # Create corresponding nexus ID + if row["toid"] and row["toid"].startswith("nex-"): + nex_id = row["toid"] + else: + nex_id = f"tnx-{hash(row['id']) % 1000000}" + + pois.append( + { + "poi_id": int(row["poi_id"]), # IntegerType + "id": row["id"], # StringType + "nex_id": nex_id, # StringType + "vpuid": "hi", # StringType + } + ) + + return pd.DataFrame(pois) + + def _create_hydrolocations_data(self) -> pd.DataFrame: + """Create sample hydrolocations data matching Hawaii schema""" + hydrolocations = [] + + # Create some realistic hydrolocations based on graph nodes + nodes = self.connectivity_graph.nodes() + sample_nodes = [node for node in nodes if node.startswith("wb-")][:10] # Take first 10 + + for i, node_id in enumerate(sample_nodes): + wb_num = int(node_id.split("-")[1]) + + # Determine if this is a coastal or gage location + is_coastal = i % 3 == 0 + + if is_coastal: + hl_data = { + "poi_id": i + 20, + "id": node_id, + "nex_id": f"tnx-{1000000000 + wb_num}", + "hl_link": f"HI{i + 1}", + "hl_reference": "coastal", + "hl_source": "NOAAOWP", + "hl_uri": f"coastal-HI{i + 1}", + } + else: + hl_data = { + "poi_id": i + 20, + "id": node_id, + "nex_id": f"nex-{wb_num}", + "hl_link": f"167{wb_num:05d}", + "hl_reference": "gages", + "hl_source": "NWIS", + "hl_uri": f"gages-167{wb_num:05d}", + } + + hydrolocations.append( + { + **hl_data, + "hf_id": 8.000010e13, # DoubleType + "vpuid": "hi", # StringType + } + ) + + return pd.DataFrame(hydrolocations) + + +# Utility functions for test setup +def create_test_environment(tmp_path: Path) -> dict[str, Any]: + """Create a complete test environment with mock catalogs and graph data""" + + # Create a test graph file path + graph_file = tmp_path / "data" / "hydrofabric" / "hi_hf_graph_network.json" + graph_file.parent.mkdir(parents=True, exist_ok=True) + + # Create a simple test graph if the file doesn't exist + if not graph_file.exists(): + test_graph = rx.PyDiGraph() + # Add some test nodes and edges + node1 = test_graph.add_node("wb-1001") + node2 = test_graph.add_node("wb-1002") + node3 = test_graph.add_node("wb-0") + test_graph.add_edge(node1, node2, "nex-1001") + test_graph.add_edge(node2, node3, "nex-1002") + + # Add metadata + test_graph.attrs = { + "generated_at": "2024-01-01T00:00:00+00:00", + "catalog_name": "test", + "flowpath_snapshot_id": "test_snapshot_123", + "network_snapshot_id": "test_snapshot_456", + } + + # Save the test graph + from icefabric.builds.graph_connectivity import serialize_edge_attrs, serialize_node_attrs + + rx.node_link_json( + test_graph, + path=str(graph_file), + graph_attrs=lambda attrs: dict(attrs), + edge_attrs=serialize_edge_attrs, + node_attrs=serialize_node_attrs, + ) + + # Create mock catalogs + glue_catalog = MockCatalog("glue") + sql_catalog = MockCatalog("sql") + + return { + "glue_catalog": glue_catalog, + "sql_catalog": sql_catalog, + "graph_file": graph_file, + "tmp_path": tmp_path, + } + + +# Setting .env/.pyiceberg creds based on project root +env_path = Path.cwd() / ".env" +load_dotenv(dotenv_path=env_path) +pyiceberg_file = Path.cwd() / ".pyiceberg.yaml" +if pyiceberg_file.exists(): + os.environ["PYICEBERG_HOME"] = str(here()) +else: + raise FileNotFoundError( + "Cannot find .pyiceberg.yaml. Please download this from NGWPC confluence or create " + ) + +# Test data constants +sample_hf_uri = [ + "gages-01010000", + "gages-02450825", + "gages-03173000", + "gages-04100500", + "gages-05473450", + "gages-06823500", + "gages-07060710", + "gages-08070000", + "gages-09253000", + "gages-10316500", + "gages-11456000", + "gages-12411000", + "gages-13337000", + "gages-14020000", +] + +test_ic_rasters = [f for f in NGWPCTestLocations._member_names_ if "TOPO" in f] +local_ic_rasters = [ + here() / "tests/data/topo_tifs/nws-nos-surveys/Albemarle_Sound_NOS_NCEI", + here() / "tests/data/topo_tifs/nws-nos-surveys/Chesapeake_Bay_NOS_NCEI", + here() / "tests/data/topo_tifs/nws-nos-surveys/Mobile_Bay_NOS_NCEI", + here() / "tests/data/topo_tifs/nws-nos-surveys/Tangier_Sound_NOS_NCEI", + here() / "tests/data/topo_tifs/tbdem_alaska_10m", + here() / "tests/data/topo_tifs/tbdem_alaska_30m", + here() / "tests/data/topo_tifs/tbdem_conus_atlantic_gulf_30m", + here() / "tests/data/topo_tifs/tbdem_conus_pacific_30m", + here() / "tests/data/topo_tifs/tbdem_great_lakes_30m", + here() / "tests/data/topo_tifs/tbdem_hawaii_10m", + here() / "tests/data/topo_tifs/tbdem_hawaii_30m", + here() / "tests/data/topo_tifs/tbdem_pr_usvi_10m", + here() / "tests/data/topo_tifs/tbdem_pr_usvi_30m", +] + + +# Pytest fixtures +@pytest.fixture +def mock_catalog(): + """Fixture providing a mock Glue catalog with RustworkX graph""" + return MockCatalog + + +@pytest.fixture +def sample_graph() -> rx.PyDiGraph: + """Fixture providing the sample RustworkX graph""" + return SAMPLE_GRAPH + + +@pytest.fixture +def temp_graph_file(tmp_path): + """Fixture creating a temporary graph file for testing""" + graph_file = tmp_path / "hi_hf_graph_network.json" + graph_file.parent.mkdir(parents=True, exist_ok=True) + + # Create a simple test graph + test_graph = rx.PyDiGraph() + node1 = test_graph.add_node("wb-1001") + node2 = test_graph.add_node("wb-1002") + node3 = test_graph.add_node("wb-0") + test_graph.add_edge(node1, node2, "nex-1001") + test_graph.add_edge(node2, node3, "nex-1002") + + test_graph.attrs = { + "generated_at": "2024-01-01T00:00:00+00:00", + "catalog_name": "test", + "flowpath_snapshot_id": "test_snapshot_123", + "network_snapshot_id": "test_snapshot_456", + } + + # Save using the same serialization functions + from icefabric.builds.graph_connectivity import serialize_edge_attrs, serialize_node_attrs + + rx.node_link_json( + test_graph, + path=str(graph_file), + graph_attrs=lambda attrs: dict(attrs), + edge_attrs=serialize_edge_attrs, + node_attrs=serialize_node_attrs, + ) + + return graph_file + + +@pytest.fixture(params=test_ic_rasters) +def ic_raster(request) -> str: + """Returns AWS S3 icechunk stores/rasters for checking correctness""" + return request.param + + +@pytest.fixture(params=local_ic_rasters) +def local_ic_raster(request) -> Path: + """Returns local icechunk stores/rasters for checking correctness""" + return request.param + + +@pytest.fixture(params=sample_hf_uri) +def gauge_hf_uri(request) -> str: + """Returns individual gauge identifiers for parameterized testing""" + return request.param + + +@pytest.fixture +def testing_dir() -> Path: + """Returns the testing data dir""" + return here() / "tests/data/" + + +@pytest.fixture(scope="session") +def remote_client(): + """Create a test client for the FastAPI app with real Glue catalog.""" + catalog = load_catalog("glue") + hydrofabric_namespaces = ["conus_hf", "ak_hf", "gl_hf", "hi_hf", "prvi_hf"] + app.state.catalog = catalog + app.state.network_graphs = load_upstream_json( + catalog=catalog, + namespaces=hydrofabric_namespaces, + output_path=here() / "data", + ) + return TestClient(app) + + +@pytest.fixture(scope="session") +def client(): + """Create a test client for the FastAPI app with mock catalog.""" + app.state.catalog = MockCatalog() # defaulting to use the mock catalog + return TestClient(app) + + +@pytest.fixture +def local_usgs_streamflow_csv(): + """Returns a locally downloaded CSV file from a specific gauge and time""" + file_path = here() / "tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.csv" + return pd.read_csv(file_path) + + +@pytest.fixture +def local_usgs_streamflow_parquet(): + """Returns a locally downloaded Parquet file from a specific gauge and time""" + file_path = here() / "tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.parquet" + return pd.read_parquet(file_path) + + +@pytest.fixture +def hydrofabric_catalog() -> Catalog: + """Returns an iceberg catalog object for the hydrofabric""" + return load_catalog("glue") + + +@pytest.fixture(params=["wb-1435", "wb-1440", "wb-1686", "wb-1961"]) +def test_wb_id(request): + """Fixture providing test watershed IDs for parameterized tests""" + return request.param + + +# Pytest configuration functions +def pytest_addoption(parser): + """Adds custom command line options for pytest""" + parser.addoption( + "--run-slow", + action="store_true", + default=False, + help="Run slow tests", + ) + parser.addoption( + "--run-local", + action="store_true", + default=False, + help="Run local tests", + ) + + +def pytest_collection_modifyitems(config, items): + """Modifies test collection based on command line options""" + if not config.getoption("--run-slow"): + skipper = pytest.mark.skip(reason="Only run when --run-slow is given") + for item in items: + if "slow" in item.keywords: + item.add_marker(skipper) + + if not config.getoption("--run-local"): + skipper = pytest.mark.skip(reason="Only run when --run-local is given") + for item in items: + if "local" in item.keywords: + item.add_marker(skipper) + + +def pytest_configure(config): + """Configure pytest markers.""" + config.addinivalue_line("markers", "slow: marks tests as slow tests") + config.addinivalue_line("markers", "local: marks tests as local tests") + config.addinivalue_line("markers", "performance: marks tests as performance tests") + config.addinivalue_line("markers", "integration: marks tests as integration tests") + config.addinivalue_line("markers", "unit: marks tests as unit tests") diff --git a/tests/data/gages-06710385.gpkg b/tests/data/gages-06710385.gpkg new file mode 100644 index 0000000..8f8a14a Binary files /dev/null and b/tests/data/gages-06710385.gpkg differ diff --git a/tests/data/sample_connections.json b/tests/data/sample_connections.json new file mode 100644 index 0000000..55c655d --- /dev/null +++ b/tests/data/sample_connections.json @@ -0,0 +1,74 @@ +{ + "_metadata": { + "generated_at": "2025-07-21T18:45:57.169868+00:00", + "iceberg": { + "catalog_name": "mock_glue", + "source_table": "mock_hf.network", + "snapshot_id": 1464906773857472435 + } + }, + "upstream_connections": { + "wb-2813": ["wb-2896"], + "wb-2815": ["wb-2931"], + "wb-1421": ["wb-1775"], + "wb-3019": ["wb-3126", "wb-3270"], + "wb-1496": ["wb-1495"], + "wb-1425": ["wb-1424"], + "wb-2034": ["wb-2072"], + "wb-3042": ["wb-3075"], + "wb-1890": ["wb-1889"], + "wb-2843": ["wb-2842", "wb-2927"], + "wb-2437": ["wb-2467"], + "wb-1914": ["wb-1913", "wb-1926"], + "wb-1962": ["wb-1961"], + "wb-2426": ["wb-2425", "wb-2483"], + "wb-1966": ["wb-1973"], + "wb-1559": ["wb-1641"], + "wb-1952": ["wb-1956"], + "wb-2738": ["wb-2737"], + "wb-3051": ["wb-3207"], + "wb-1971": ["wb-1970", "wb-2071"], + "wb-1917": ["wb-2056"], + "wb-1506": ["wb-1505"], + "wb-2402": ["wb-2401"], + "wb-1391": ["wb-1390"], + "wb-1399": ["wb-1769"], + "wb-2195": ["wb-2225"], + "wb-1432": ["wb-1431", "wb-1771", "wb-1675"], + "wb-2700": ["wb-2741", "wb-2699", "wb-2813"], + "wb-1618": ["wb-1727", "wb-1617"], + "wb-1365": ["wb-1364", "wb-1519"], + "wb-1653": ["wb-1718"], + "wb-1466": ["wb-1666"], + "wb-2717": ["wb-2758"], + "wb-2753": ["wb-2752"], + "wb-3052": ["wb-3051"], + "wb-1928": ["wb-1953"], + "wb-1363": ["wb-1469", "wb-1362"], + "wb-2736": ["wb-2735"], + "wb-2828": ["wb-2827"], + "wb-3151": ["wb-3150", "wb-3272"], + "wb-2218": ["wb-2311"], + "wb-2651": ["wb-2698"], + "wb-1679": ["wb-1678"], + "wb-2720": ["wb-2719"], + "wb-1899": ["wb-1898"], + "wb-1858": ["wb-1863"], + "wb-1487": ["wb-1760"], + "wb-2706": ["wb-2705", "wb-2890"], + "wb-2708": ["wb-2707"], + "wb-2207": ["wb-2206", "wb-2310"], + "wb-2643": ["wb-2943", "wb-2642"], + "wb-1593": ["wb-1634"], + "wb-1926": ["wb-1925"], + "wb-1415": ["wb-1576"], + "wb-2640": ["wb-2639"], + "wb-1451": ["wb-1450"], + "wb-1993": ["wb-2038"], + "wb-1397": ["wb-1396"], + "wb-2751": ["wb-2750"], + "wb-1861": ["wb-1866"], + "wb-2660": ["wb-2659", "wb-2954"], + "wb-3063": ["wb-3070", "wb-3062"] + } +} diff --git a/tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.csv b/tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.csv new file mode 100644 index 0000000..2853a97 --- /dev/null +++ b/tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.csv @@ -0,0 +1,26 @@ +q_cms,time +17.698,2021-12-31 14:00:00 +17.698,2021-12-31 15:00:00 +17.698,2021-12-31 16:00:00 +17.698,2021-12-31 17:00:00 +17.698,2021-12-31 18:00:00 +17.698,2021-12-31 19:00:00 +17.698,2021-12-31 20:00:00 +17.698,2021-12-31 21:00:00 +17.414831,2021-12-31 22:00:00 +17.414831,2021-12-31 23:00:00 +17.414831,2022-01-01 00:00:00 +17.414831,2022-01-01 01:00:00 +17.414831,2022-01-01 02:00:00 +17.414831,2022-01-01 03:00:00 +17.414831,2022-01-01 04:00:00 +17.414831,2022-01-01 05:00:00 +17.414831,2022-01-01 06:00:00 +17.414831,2022-01-01 07:00:00 +17.414831,2022-01-01 08:00:00 +17.414831,2022-01-01 09:00:00 +17.15998,2022-01-01 10:00:00 +17.15998,2022-01-01 11:00:00 +17.15998,2022-01-01 12:00:00 +17.15998,2022-01-01 13:00:00 +17.15998,2022-01-01 14:00:00 diff --git a/tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.parquet b/tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.parquet new file mode 100644 index 0000000..bf8419c Binary files /dev/null and b/tests/data/usgs_01010000_data_from_20211231_1400_to_20220101_1400.parquet differ diff --git a/tests/hydrofabric/__init__.py b/tests/hydrofabric/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/hydrofabric/test_build_graphs.py b/tests/hydrofabric/test_build_graphs.py new file mode 100644 index 0000000..dba716b --- /dev/null +++ b/tests/hydrofabric/test_build_graphs.py @@ -0,0 +1,483 @@ +"""Test suite for graph connectivity module""" + +import json +import tempfile +from datetime import UTC, datetime +from pathlib import Path +from unittest.mock import Mock + +import polars as pl +import pytest +import rustworkx as rx + +from icefabric.builds.graph_connectivity import ( + _build_graph, + load_upstream_json, + read_edge_attrs, + read_node_attrs, + serialize_edge_attrs, + serialize_node_attrs, +) + + +class TestBuildGraph: + """Test the _build_graph function""" + + @pytest.fixture + def sample_flowpaths_data(self): + """Sample flowpaths data for testing""" + return pl.DataFrame( + { + "id": ["wb-1", "wb-2", "wb-3", "wb-4", "wb-5"], + "toid": ["nex-1", "nex-2", "nex-3", "nex-4", None], # wb-5 is terminal + } + ).lazy() + + @pytest.fixture + def sample_network_data(self): + """Sample network data for testing""" + return pl.DataFrame( + { + "id": ["nex-1", "nex-2", "nex-3", "nex-4", "nex-5"], + "toid": ["wb-2", "wb-3", "wb-4", "wb-0", None], # nex-5 is terminal + } + ).lazy() + + @pytest.fixture + def simple_flowpaths_data(self): + """Simple linear flowpaths for basic testing""" + return pl.DataFrame( + { + "id": ["wb-1", "wb-2", "wb-3"], + "toid": ["nex-1", "nex-2", "nex-3"], + } + ).lazy() + + @pytest.fixture + def simple_network_data(self): + """Simple linear network for basic testing""" + return pl.DataFrame( + { + "id": ["nex-1", "nex-2", "nex-3"], + "toid": ["wb-2", "wb-3", "wb-0"], + } + ).lazy() + + def test_build_graph_basic_structure(self, simple_flowpaths_data, simple_network_data): + """Test that the graph has the correct basic structure""" + graph = _build_graph(simple_flowpaths_data, simple_network_data) + + # Check that all flowpaths are nodes + node_data = graph.nodes() + assert "wb-1" in node_data + assert "wb-2" in node_data + assert "wb-3" in node_data + assert "wb-0" in node_data # Should be added automatically + + # Check that we have the expected number of nodes + assert len(node_data) == 4 + + def test_build_graph_edges(self, simple_flowpaths_data, simple_network_data): + """Test that edges are created correctly""" + graph = _build_graph(simple_flowpaths_data, simple_network_data) + + # Check edges exist (wb-1 -> wb-2 -> wb-3 -> wb-0) + edges = graph.edge_list() + + # Should have edges connecting the linear chain + assert len(edges) >= 2 # At least wb-1->wb-2 and wb-2->wb-3 + + def test_build_graph_terminal_handling(self, sample_flowpaths_data, sample_network_data): + """Test that terminal nodes are handled correctly""" + graph = _build_graph(sample_flowpaths_data, sample_network_data) + + # wb-5 has no toid, so it should be a terminal node with no outgoing edges + nodes = graph.nodes() + assert "wb-5" in nodes + + # Check that wb-0 is automatically added + assert "wb-0" in nodes + + def test_build_graph_wb0_addition(self, simple_flowpaths_data, simple_network_data): + """Test that wb-0 is automatically added if not present""" + graph = _build_graph(simple_flowpaths_data, simple_network_data) + + nodes = graph.nodes() + assert "wb-0" in nodes + + def test_build_graph_empty_input(self): + """Test behavior with empty input""" + # Create empty DataFrames with proper string types to avoid schema issues + empty_flowpaths = pl.DataFrame( + {"id": pl.Series([], dtype=pl.Utf8), "toid": pl.Series([], dtype=pl.Utf8)} + ).lazy() + empty_network = pl.DataFrame( + {"id": pl.Series([], dtype=pl.Utf8), "toid": pl.Series([], dtype=pl.Utf8)} + ).lazy() + + graph = _build_graph(empty_flowpaths, empty_network) + + # Should still have wb-0 + nodes = graph.nodes() + assert "wb-0" in nodes + assert len(nodes) == 1 + + def test_build_graph_with_cycles_disabled(self, simple_flowpaths_data, simple_network_data): + """Test that the graph is created with cycle checking disabled""" + graph = _build_graph(simple_flowpaths_data, simple_network_data) + + # The graph should be created successfully even if there might be cycles + assert isinstance(graph, rx.PyDiGraph) + + def test_build_graph_node_data_preservation(self, simple_flowpaths_data, simple_network_data): + """Test that node data is preserved correctly""" + graph = _build_graph(simple_flowpaths_data, simple_network_data) + + # Each node should contain its flowpath ID as data + for node_idx in graph.node_indices(): + node_data = graph.get_node_data(node_idx) + assert isinstance(node_data, str) + assert node_data.startswith("wb-") + + def test_build_graph_edge_data(self, simple_flowpaths_data, simple_network_data): + """Test that edge data contains nexus information""" + graph = _build_graph(simple_flowpaths_data, simple_network_data) + + # Edges should have nexus IDs as data + for edge in graph.edge_indices(): + edge_data = graph.get_edge_data_by_index(edge) + if edge_data: # Some edges might not have data + assert isinstance(edge_data, str) + assert edge_data.startswith("nex-") + + +class TestSerializationFunctions: + """Test the serialization helper functions""" + + def test_serialize_node_attrs(self): + """Test node attribute serialization""" + test_data = "wb-123" + result = serialize_node_attrs(test_data) + + assert isinstance(result, dict) + assert "data" in result + assert result["data"] == "wb-123" + + def test_serialize_edge_attrs(self): + """Test edge attribute serialization""" + test_data = "nex-456" + result = serialize_edge_attrs(test_data) + + assert isinstance(result, dict) + assert "data" in result + assert result["data"] == "nex-456" + + def test_read_node_attrs(self): + """Test node attribute reading""" + test_input = {"data": "wb-789"} + result = read_node_attrs(test_input) + + assert result == "wb-789" + + def test_read_edge_attrs(self): + """Test edge attribute reading""" + test_input = {"data": "nex-101"} + result = read_edge_attrs(test_input) + + assert result == "nex-101" + + def test_serialization_roundtrip(self): + """Test that serialization and deserialization work together""" + original_node = "wb-test" + original_edge = "nex-test" + + # Serialize + serialized_node = serialize_node_attrs(original_node) + serialized_edge = serialize_edge_attrs(original_edge) + + # Deserialize + deserialized_node = read_node_attrs(serialized_node) + deserialized_edge = read_edge_attrs(serialized_edge) + + assert deserialized_node == original_node + assert deserialized_edge == original_edge + + +class TestLoadUpstreamJson: + """Test the load_upstream_json function""" + + @pytest.fixture + def mock_catalog(self): + """Create a mock catalog for testing""" + catalog = Mock() + catalog.name = "test_catalog" + + # Mock network table + mock_network_table = Mock() + mock_network_table.to_polars.return_value = pl.DataFrame( + {"id": ["nex-1", "nex-2"], "toid": ["wb-2", "wb-0"]} + ).lazy() + mock_snapshot = Mock() + mock_snapshot.snapshot_id = 12345 + mock_network_table.current_snapshot.return_value = mock_snapshot + + # Mock flowpaths table + mock_flowpaths_table = Mock() + mock_flowpaths_table.to_polars.return_value = pl.DataFrame( + {"id": ["wb-1", "wb-2"], "toid": ["nex-1", "nex-2"]} + ).lazy() + mock_flowpaths_table.current_snapshot.return_value = mock_snapshot + + # Configure catalog to return these tables + def load_table_side_effect(table_name): + if "network" in table_name: + return mock_network_table + elif "flowpaths" in table_name: + return mock_flowpaths_table + else: + raise ValueError(f"Unknown table: {table_name}") + + catalog.load_table.side_effect = load_table_side_effect + return catalog + + def test_load_upstream_json_creates_new_file(self, mock_catalog): + """Test that load_upstream_json creates a new file when none exists""" + with tempfile.TemporaryDirectory() as tmp_dir: + output_path = Path(tmp_dir) + namespaces = ["test_namespace"] + + result = load_upstream_json(mock_catalog, namespaces, output_path) + + # Check that the result contains the namespace + assert "test_namespace" in result + assert isinstance(result["test_namespace"], rx.PyDiGraph) + + # Check that the file was created + expected_file = output_path / "test_namespace_graph_network.json" + assert expected_file.exists() + + def test_load_upstream_json_loads_existing_file(self, mock_catalog): + """Test that load_upstream_json loads an existing file with current snapshots""" + with tempfile.TemporaryDirectory() as tmp_dir: + output_path = Path(tmp_dir) + namespace = "test_namespace" + output_file = output_path / f"{namespace}_graph_network.json" + + # Create the exact format that RustworkX actually produces + mock_graph_data = { + "directed": True, + "multigraph": True, + "attrs": { + "generated_at": datetime.now(UTC).isoformat(), + "catalog_name": "test_catalog", + "flowpath_snapshot_id": "12345", + "network_snapshot_id": "12345", + }, + "nodes": [{"id": 0, "data": {"data": "wb-1"}}, {"id": 1, "data": {"data": "wb-2"}}], + "links": [], + } + + output_file.parent.mkdir(parents=True, exist_ok=True) + with open(output_file, "w") as f: + json.dump(mock_graph_data, f) + + result = load_upstream_json(mock_catalog, [namespace], output_path) + + # Should load the existing file + assert namespace in result + assert isinstance(result[namespace], rx.PyDiGraph) + + def test_load_upstream_json_rebuilds_outdated_file(self, mock_catalog): + """Test that load_upstream_json rebuilds when snapshot IDs don't match""" + with tempfile.TemporaryDirectory() as tmp_dir: + output_path = Path(tmp_dir) + namespace = "test_namespace" + output_file = output_path / f"{namespace}_graph_network.json" + + # Create the exact format that RustworkX actually produces with outdated snapshots + mock_graph_data = { + "directed": True, + "multigraph": True, + "attrs": { + "generated_at": datetime.now(UTC).isoformat(), + "catalog_name": "test_catalog", + "flowpath_snapshot_id": "old_snapshot", + "network_snapshot_id": "old_snapshot", + }, + "nodes": [{"id": 0, "data": {"data": "wb-old"}}], + "links": [], + } + + output_file.parent.mkdir(parents=True, exist_ok=True) + with open(output_file, "w") as f: + json.dump(mock_graph_data, f) + + result = load_upstream_json(mock_catalog, [namespace], output_path) + + # Should rebuild the graph + assert namespace in result + assert isinstance(result[namespace], rx.PyDiGraph) + + def test_load_upstream_json_multiple_namespaces(self, mock_catalog): + """Test loading multiple namespaces""" + with tempfile.TemporaryDirectory() as tmp_dir: + output_path = Path(tmp_dir) + namespaces = ["namespace1", "namespace2"] + + result = load_upstream_json(mock_catalog, namespaces, output_path) + + # Should create graphs for both namespaces + assert len(result) == 2 + assert "namespace1" in result + assert "namespace2" in result + + # Both should be graphs + assert isinstance(result["namespace1"], rx.PyDiGraph) + assert isinstance(result["namespace2"], rx.PyDiGraph) + + +class TestGraphConnectivityIntegration: + """Integration tests for the graph connectivity module""" + + def test_realistic_hydrofabric_graph(self): + """Test with realistic hydrofabric data structure""" + # Create a more realistic dataset + flowpaths_data = pl.DataFrame( + { + "id": ["wb-1001", "wb-1002", "wb-1003", "wb-1004", "wb-1005"], + "toid": ["nex-1001", "nex-1002", "nex-1003", "nex-1004", None], + } + ).lazy() + + network_data = pl.DataFrame( + { + "id": ["nex-1001", "nex-1002", "nex-1003", "nex-1004", "nex-1005"], + "toid": ["wb-1002", "wb-1003", "wb-1004", "wb-0", None], + } + ).lazy() + + graph = _build_graph(flowpaths_data, network_data) + + # Verify the graph structure + nodes = graph.nodes() + assert len(nodes) == 6 # 5 flowpaths + wb-0 + + # Verify connectivity: wb-1001 -> wb-1002 -> wb-1003 -> wb-1004 -> wb-0 + # wb-1005 should be disconnected (terminal) + assert "wb-1001" in nodes + assert "wb-1002" in nodes + assert "wb-1003" in nodes + assert "wb-1004" in nodes + assert "wb-1005" in nodes + assert "wb-0" in nodes + + def test_graph_attributes_preservation(self): + """Test that graph attributes are preserved during serialization""" + flowpaths_data = pl.DataFrame( + { + "id": ["wb-1", "wb-2"], + "toid": ["nex-1", "nex-2"], + } + ).lazy() + + network_data = pl.DataFrame( + { + "id": ["nex-1", "nex-2"], + "toid": ["wb-2", "wb-0"], + } + ).lazy() + + graph = _build_graph(flowpaths_data, network_data) + + # Add some attributes + test_attrs = { + "generated_at": datetime.now(UTC).isoformat(), + "catalog_name": "test", + "flowpath_snapshot_id": "123", + "network_snapshot_id": "456", + } + graph.attrs = test_attrs + + # Test serialization and deserialization + with tempfile.TemporaryDirectory() as tmp_dir: + test_file = Path(tmp_dir) / "test_graph.json" + + # Serialize + rx.node_link_json( + graph, + path=str(test_file), + graph_attrs=lambda attrs: dict(attrs), + edge_attrs=serialize_edge_attrs, + node_attrs=serialize_node_attrs, + ) + + # Deserialize + loaded_graph = rx.from_node_link_json_file( + str(test_file), + edge_attrs=read_edge_attrs, + node_attrs=read_node_attrs, + ) + + # Check that attributes are preserved + assert loaded_graph.attrs == test_attrs + + +# Parametrized tests for edge cases +@pytest.mark.parametrize( + "flowpath_ids,nexus_ids,expected_nodes", + [ + (["wb-1"], ["nex-1"], 2), # Single flowpath + ([], [], 1), # Empty input (just wb-0) + (["wb-1", "wb-2", "wb-3"], ["nex-1", "nex-2"], 4), # More flowpaths than nexus + ], +) +def test_build_graph_edge_cases(flowpath_ids, nexus_ids, expected_nodes): + """Test various edge cases in graph building""" + # Handle empty case with proper string types + if not flowpath_ids: + flowpaths_data = pl.DataFrame( + { + "id": pl.Series([], dtype=pl.Utf8), + "toid": pl.Series([], dtype=pl.Utf8), + } + ).lazy() + else: + flowpaths_data = pl.DataFrame( + { + "id": flowpath_ids, + "toid": nexus_ids + [None] * (len(flowpath_ids) - len(nexus_ids)), + } + ).lazy() + + if not nexus_ids: + network_data = pl.DataFrame( + { + "id": pl.Series([], dtype=pl.Utf8), + "toid": pl.Series([], dtype=pl.Utf8), + } + ).lazy() + else: + network_data = pl.DataFrame( + { + "id": nexus_ids, + "toid": ["wb-0"] * len(nexus_ids), + } + ).lazy() + + graph = _build_graph(flowpaths_data, network_data) + assert len(graph.nodes()) == expected_nodes + + +def test_mock_catalog_integration(mock_catalog, tmp_path): + """Test that mock catalog works with the fixed graph building""" + catalog = mock_catalog("glue") + + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + + assert "mock_hf" in graph_dict + assert graph_dict["mock_hf"].num_nodes() > 0 + + # Graph should be valid + graph = graph_dict["mock_hf"] + assert isinstance(graph.nodes(), list) + assert len(graph.nodes()) > 0 diff --git a/tests/hydrofabric/test_find_origin.py b/tests/hydrofabric/test_find_origin.py new file mode 100644 index 0000000..33db5c7 --- /dev/null +++ b/tests/hydrofabric/test_find_origin.py @@ -0,0 +1,285 @@ +"""Tests for the find_origin function using the RustworkX graph-based mock catalog""" + +import polars as pl +import pytest + +from icefabric.hydrofabric.origin import find_origin +from icefabric.schemas.hydrofabric import IdType + + +class TestFindOrigin: + """Test the find_origin function with various identifier types""" + + def test_find_origin_success_with_hl_uri(self, mock_catalog): + """Test successfully finding an origin by hl_uri""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # Get valid hl_uri values from the network table + hl_uri_records = network_table.filter(pl.col("hl_uri").is_not_null()).collect() + + if hl_uri_records.height > 0: + # Test with the first valid hl_uri + test_hl_uri = hl_uri_records["hl_uri"][0] + result = find_origin(network_table, test_hl_uri, IdType.HL_URI) + + assert result.height == 1 + assert result["id"][0].startswith("wb-") + + expected_columns = ["id", "toid", "vpuid", "hydroseq"] + assert all(col in result.columns for col in expected_columns) + else: + pytest.skip("No hl_uri values found in mock data") + + def test_find_origin_with_wb_id_type(self, mock_catalog): + """Test finding origin by watershed ID (wb-*)""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # Get a valid wb-* ID from the network table + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + test_wb_id = wb_records["id"][0] + result = find_origin(network_table, test_wb_id, IdType.ID) + + assert result.height == 1 + assert result["id"][0] == test_wb_id + else: + pytest.skip("No wb-* IDs found in mock data") + + def test_find_origin_with_poi_id(self, mock_catalog): + """Test finding origin by POI ID""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # Get valid POI ID values from the network table + poi_records = network_table.filter(pl.col("poi_id").is_not_null()).collect() + + if poi_records.height > 0: + # Test with the first valid POI ID + test_poi_id = float(poi_records["poi_id"][0]) + result = find_origin(network_table, test_poi_id, IdType.POI_ID) + + assert result.height == 1 + assert result["id"][0].startswith("wb-") + + # Verify the POI ID matches + assert float(result["poi_id"][0]) == test_poi_id + else: + pytest.skip("No POI IDs found in mock data") + + @pytest.mark.parametrize("id_type", [IdType.ID, IdType.HL_URI, IdType.POI_ID]) + def test_find_origin_with_sample_graph_data(self, mock_catalog, id_type): + """Test find_origin with various ID types from the RustworkX graph data""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # Get appropriate test identifier based on type + if id_type == IdType.ID: + # Get first wb-* ID + candidates = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + if candidates.height == 0: + pytest.skip(f"No valid {id_type.value} identifiers found") + test_identifier = candidates["id"][0] + + elif id_type == IdType.HL_URI: + # Get first non-null hl_uri + candidates = network_table.filter(pl.col("hl_uri").is_not_null()).collect() + if candidates.height == 0: + pytest.skip(f"No valid {id_type.value} identifiers found") + test_identifier = candidates["hl_uri"][0] + + elif id_type == IdType.POI_ID: + # Get first non-null poi_id + candidates = network_table.filter(pl.col("poi_id").is_not_null()).collect() + if candidates.height == 0: + pytest.skip(f"No valid {id_type.value} identifiers found") + test_identifier = float(candidates["poi_id"][0]) + + # Test the find_origin function + result = find_origin(network_table, test_identifier, id_type) + + # Validate result + assert result.height == 1 + assert result["id"][0].startswith("wb-") + + # Verify the identifier matches in the result + if id_type == IdType.ID: + assert result["id"][0] == test_identifier + elif id_type == IdType.HL_URI: + assert result["hl_uri"][0] == test_identifier + elif id_type == IdType.POI_ID: + assert float(result["poi_id"][0]) == test_identifier + + def test_find_origin_handles_null_values(self): + """Test that function handles null values properly""" + # Create test data with null hl_uri values + test_data = pl.DataFrame( + [ + { + "id": "wb-NULL001", + "toid": "nex-NULL001", + "vpuid": "hi", + "hydroseq": 1000.0, + "hl_uri": None, + "poi_id": None, + "divide_id": "cat-NULL001", + "ds_id": None, + "mainstem": None, + "hf_source": "NHDPlusHR", + "hf_id": "1", + "lengthkm": 1.0, + "areasqkm": 1.0, + "tot_drainage_areasqkm": 1.0, + "type": "waterbody", + "topo": "fl-nex", + }, + { + "id": "wb-VALID001", + "toid": "nex-VALID001", + "vpuid": "hi", + "hydroseq": 2000.0, + "hl_uri": "ValidGage", + "poi_id": 123.0, + "divide_id": "cat-VALID001", + "ds_id": None, + "mainstem": None, + "hf_source": "NHDPlusHR", + "hf_id": "2", + "lengthkm": 1.0, + "areasqkm": 1.0, + "tot_drainage_areasqkm": 1.0, + "type": "waterbody", + "topo": "fl-nex", + }, + ] + ).lazy() + + # Should find the valid record by hl_uri + result = find_origin(test_data, "ValidGage", IdType.HL_URI) + assert result.height == 1 + assert result["id"][0] == "wb-VALID001" + + # Should find the valid record by POI ID + result = find_origin(test_data, 123.0, IdType.POI_ID) + assert result.height == 1 + assert result["id"][0] == "wb-VALID001" + + def test_find_origin_with_empty_dataframe(self): + """Test behavior with empty network table""" + empty_df = pl.DataFrame( + { + "id": pl.Series([], dtype=pl.Utf8), + "toid": pl.Series([], dtype=pl.Utf8), + "vpuid": pl.Series([], dtype=pl.Utf8), + "hydroseq": pl.Series([], dtype=pl.Float64), + "hl_uri": pl.Series([], dtype=pl.Utf8), + "poi_id": pl.Series([], dtype=pl.Float64), + } + ).lazy() + + with pytest.raises(ValueError, match=r"No origin found"): + find_origin(empty_df, "AnyIdentifier", IdType.HL_URI) + + def test_find_origin_nonexistent_identifier(self, mock_catalog): + """Test behavior when identifier doesn't exist""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + with pytest.raises(ValueError, match=r"No origin found"): + find_origin(network_table, "nonexistent-gage-999999", IdType.HL_URI) + + with pytest.raises(ValueError, match=r"No origin found"): + find_origin(network_table, "wb-999999", IdType.ID) + + with pytest.raises(ValueError, match=r"No origin found"): + find_origin(network_table, 999999.0, IdType.POI_ID) + + def test_find_origin_data_structure_validation(self, mock_catalog): + """Test that the returned data has the expected structure""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # Get a valid wb-* ID from the graph-based data + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + test_wb_id = wb_records["id"][0] + result = find_origin(network_table, test_wb_id, IdType.ID) + + # Validate structure + assert isinstance(result, pl.DataFrame) + assert result.height == 1 + + # Check required columns exist + required_columns = ["id", "toid", "vpuid"] + for col in required_columns: + assert col in result.columns + + # Validate data types + assert isinstance(result["id"][0], str) + assert result["vpuid"][0] == "hi" # Should be Hawaii + + # Check that toid is either a string or None + toid_value = result["toid"][0] + assert toid_value is None or isinstance(toid_value, str) + else: + pytest.skip("No wb-* IDs found in mock data") + + def test_find_origin_with_multiple_vpu_handling(self, mock_catalog): + """Test that find_origin works correctly with VPU filtering""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # All mock data should be Hawaii (vpuid = "hi") + vpu_values = network_table.select(pl.col("vpuid").unique()).collect()["vpuid"].to_list() + + # Should only have Hawaii VPU in mock data + assert "hi" in vpu_values + + # Test with a valid wb-* ID + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + test_wb_id = wb_records["id"][0] + result = find_origin(network_table, test_wb_id, IdType.ID) + + assert result.height == 1 + assert result["vpuid"][0] == "hi" + else: + pytest.skip("No wb-* IDs found in mock data") + + @pytest.mark.parametrize("invalid_type", ["invalid_type", None, 123]) + def test_find_origin_invalid_id_type(self, mock_catalog, invalid_type): + """Test that invalid ID types are handled appropriately""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + + # This should raise an error due to invalid IdType + with pytest.raises((ValueError, TypeError, AttributeError)): + find_origin(network_table, "wb-test", invalid_type) + + def test_find_origin_graph_consistency(self, mock_catalog): + """Test that find_origin works consistently with the RustworkX graph structure""" + catalog = mock_catalog("glue") + network_table = catalog.load_table("mock_hf.network").to_polars() + graph = catalog.get_connectivity_graph("mock_hf") + + # Get a node from the graph + graph_nodes = graph.nodes() + wb_nodes = [node for node in graph_nodes if node.startswith("wb-")] + + if wb_nodes: + test_node = wb_nodes[0] + + # This node should be findable in the network table + result = find_origin(network_table, test_node, IdType.ID) + + assert result.height == 1 + assert result["id"][0] == test_node + + # The result should have data that's consistent with being part of a graph + assert result["toid"][0] is None or isinstance(result["toid"][0], str) + else: + pytest.skip("No wb-* nodes found in graph") diff --git a/tests/hydrofabric/test_subset.py b/tests/hydrofabric/test_subset.py new file mode 100644 index 0000000..a892cd2 --- /dev/null +++ b/tests/hydrofabric/test_subset.py @@ -0,0 +1,500 @@ +"""Concise tests for subset hydrofabric functionality using RustworkX graph""" + +from pathlib import Path + +import geopandas as gpd +import pandas as pd +import polars as pl +import pytest +from pandas.testing import assert_series_equal + +from icefabric.builds.graph_connectivity import load_upstream_json +from icefabric.hydrofabric.subset import ( + get_upstream_segments, + subset_hydrofabric, + subset_hydrofabric_vpu, + subset_layers, +) +from icefabric.schemas.hydrofabric import IdType +from tests.conftest import MockCatalog + + +class TestGetUpstreamSegments: + """Test the upstream segment traversal logic""" + + def test_upstream_tracing_with_graph(self, test_wb_id, mock_catalog, tmp_path: Path) -> None: + """Test upstream tracing scenarios with the actual RustworkX graph""" + catalog = mock_catalog("glue") + + # Load the graph from the mock catalog + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get some nodes from the graph to test with + graph_nodes = graph.nodes() + wb_nodes = [node for node in graph_nodes if node.startswith("wb-")] + + if len(wb_nodes) >= 2: + # Test with the first available node + result = get_upstream_segments(test_wb_id, graph) + + # Result should be a set of node indices + assert isinstance(result, set) + assert len(result) >= 1 # At least the origin + else: + pytest.skip("Not enough wb- nodes in mock graph for testing") + + def test_upstream_tracing_nonexistent_node(self, mock_catalog, tmp_path: Path) -> None: + """Test upstream tracing with a nonexistent node""" + catalog = mock_catalog("glue") + + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Test with a nonexistent node + result = get_upstream_segments("wb-nonexistent-999999", graph) + + # Should return empty set for nonexistent nodes + assert isinstance(result, set) + assert len(result) == 0 + + +class TestSubsetLayers: + """Test the layer subsetting functionality""" + + @pytest.mark.parametrize( + "layers,expected_core_layers", + [ + (["network", "flowpaths", "nexus", "divides"], ["network", "flowpaths", "nexus", "divides"]), + ([], ["network", "flowpaths", "nexus", "divides"]), # Should add core layers + (["pois"], ["network", "flowpaths", "nexus", "divides", "pois"]), + ], + ) + def test_layer_inclusion( + self, mock_catalog: MockCatalog, layers: list[str], expected_core_layers: list[str] + ) -> None: + """Test that correct layers are included in results""" + catalog = mock_catalog("glue") + + # Get some valid upstream IDs from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height >= 2: + # Use the first few wb- IDs as upstream_ids + upstream_ids = set(wb_records["id"][:2].to_list()) + + result = subset_layers(catalog, "mock_hf", layers, upstream_ids, "hi") + + # Core layers should always be present + core_layers = ["network", "flowpaths", "nexus", "divides"] + for layer in core_layers: + assert layer in result + assert len(result[layer]) > 0 + + # Check data types + assert isinstance(result["network"], pd.DataFrame) + assert isinstance(result["flowpaths"], gpd.GeoDataFrame) + assert isinstance(result["nexus"], gpd.GeoDataFrame) + assert isinstance(result["divides"], gpd.GeoDataFrame) + else: + pytest.skip("Not enough wb- records in mock data") + + def test_empty_upstream_ids_raises_assertion(self, mock_catalog: MockCatalog) -> None: + """Test that empty upstream IDs cause assertion errors""" + catalog = mock_catalog("glue") + + with pytest.raises(AssertionError, match="No flowpaths found"): + subset_layers(catalog, "mock_hf", ["network"], set(), "hi") + + +class TestSubsetHydrofabric: + """Test the main subset hydrofabric function""" + + def test_successful_subsetting_with_wb_id( + self, test_wb_id: str, mock_catalog: MockCatalog, tmp_path: Path + ) -> None: + """Test successful subsetting with a watershed ID""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get a valid wb- ID from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + result = subset_hydrofabric( + catalog=catalog, + identifier=test_wb_id, + id_type=IdType.ID, + layers=["network", "flowpaths", "nexus", "divides"], + namespace="mock_hf", + graph=graph, + ) + + # Validate basic structure + assert isinstance(result, dict) + core_layers = ["network", "flowpaths", "nexus", "divides"] + for layer in core_layers: + assert layer in result + assert len(result[layer]) > 0 + + # Origin should be included in network + network_df = result["network"] + assert test_wb_id in network_df["id"].values + else: + pytest.skip("No wb- records found in mock data") + + def test_poi_id_subsetting(self, mock_catalog: MockCatalog, tmp_path: Path) -> None: + """Test POI ID subsetting""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get a valid POI ID from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + poi_records = network_table.filter(pl.col("poi_id").is_not_null()).collect() + + if poi_records.height > 0: + poi_id_value = float(poi_records["poi_id"][-1]) + + try: + result = subset_hydrofabric( + catalog=catalog, + identifier=poi_id_value, + id_type=IdType.POI_ID, + layers=["network", "flowpaths", "nexus", "divides"], + namespace="mock_hf", + graph=graph, + ) + + # Should have valid structure if successful + assert isinstance(result, dict) + assert "network" in result + assert len(result["network"]) > 0 + + except ValueError as e: + # POI might not exist or have issues, which is acceptable + if "No origin found" not in str(e): + raise + else: + pytest.skip("No POI records found in mock data") + + def test_hl_uri_subsetting(self, mock_catalog: MockCatalog, tmp_path: Path) -> None: + """Test HL_URI subsetting""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get a valid HL_URI from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + hl_records = network_table.filter(pl.col("hl_uri").is_not_null()).collect() + + if hl_records.height > 0: + test_hl_uri = hl_records["hl_uri"][-1] + + try: + result = subset_hydrofabric( + catalog=catalog, + identifier=test_hl_uri, + id_type=IdType.HL_URI, + layers=["network", "flowpaths", "nexus", "divides"], + namespace="mock_hf", + graph=graph, + ) + + # Should have valid structure if successful + assert isinstance(result, dict) + assert "network" in result + assert len(result["network"]) > 0 + + except ValueError as e: + # HL_URI might not exist or have issues, which is acceptable + if "No origin found" not in str(e): + raise + else: + pytest.skip("No HL_URI records found in mock data") + + def test_vpu_id_subsetting(self, mock_catalog: MockCatalog) -> None: + """Test VPUID subsetting""" + catalog = mock_catalog("glue") + + network_table = catalog.load_table("mock_hf.network").to_polars() + vpu_records = network_table.filter(pl.col("vpuid").is_not_null()).collect() + + if vpu_records.height > 0: + test_vpu = vpu_records["vpuid"][-1] + + try: + result = subset_hydrofabric_vpu( + catalog=catalog, + layers=["network", "flowpaths", "nexus", "divides"], + namespace="mock_hf", + vpu_id=test_vpu, + ) + + # Should have valid structure if successful and rows with only requested vpu + assert isinstance(result, dict) + assert "network" in result + assert len(result["network"]) > 0 + assert_series_equal( + result["network"]["vpuid"], + pd.Series( + name="vpuid", + data=[test_vpu for _i in range(len(result["network"]["vpuid"].index))], + index=result["network"]["vpuid"].index, + ), + ) + + except ValueError as e: + if "No origin found" not in str(e): + raise + else: + pytest.skip("No VPUID records found in mock data") + + def test_vpu_id_subsetting__none(self, mock_catalog: MockCatalog) -> None: + """Test VPUID subsetting - no rows found (invalid vpu)""" + catalog = mock_catalog("glue") + + network_table = catalog.load_table("mock_hf.network").to_polars() + vpu_records = network_table.filter(pl.col("vpuid").is_not_null()).collect() + + if vpu_records.height > 0: + # create fake vpu and assert it's not a valid vpu + # if this assertion fails, check mock data + vpus = vpu_records.select(pl.col("vpuid").unique()).to_series().to_list() + test_vpu = "fake_vpu" + assert test_vpu not in vpus + + with pytest.raises(AssertionError): + _result = subset_hydrofabric_vpu( + catalog=catalog, + layers=["network", "flowpaths", "nexus", "divides"], + namespace="mock_hf", + vpu_id=test_vpu, + ) + + else: + pytest.skip("No VPUID records found in mock data") + + def test_upstream_tracing_completeness( + self, test_wb_id: str, mock_catalog: MockCatalog, tmp_path: Path + ) -> None: + """Test that upstream tracing finds connected segments""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Find a node that has upstream connections + graph_nodes = graph.nodes() + wb_nodes = [node for node in graph_nodes if node.startswith("wb-")] + + if len(wb_nodes) > 1: + result = subset_hydrofabric( + catalog=catalog, + identifier=test_wb_id, + id_type=IdType.ID, + layers=["network"], + namespace="mock_hf", + graph=graph, + ) + + network_df = result["network"] + + # Origin should be included + assert test_wb_id in network_df["id"].values + + # Should have at least one record + assert len(network_df) >= 1 + else: + pytest.skip("Not enough wb- nodes for upstream testing") + + def test_nonexistent_identifier_raises_error(self, mock_catalog: MockCatalog, tmp_path: Path) -> None: + """Test that nonexistent identifier raises appropriate error""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + with pytest.raises(ValueError, match="No origin found"): + subset_hydrofabric( + catalog=catalog, + identifier="nonexistent-gage-999999", + id_type=IdType.HL_URI, + layers=["network"], + namespace="mock_hf", + graph=graph, + ) + + +class TestDataConsistency: + """Test data consistency and relationships between tables""" + + def test_table_relationship_consistency( + self, test_wb_id: str, mock_catalog: MockCatalog, tmp_path: Path + ) -> None: + """Test that relationships between tables follow the hydrofabric data model""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get a valid wb- ID from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + result = subset_hydrofabric( + catalog=catalog, + identifier=test_wb_id, + id_type=IdType.ID, + layers=["network", "flowpaths", "nexus", "divides", "pois"], + namespace="mock_hf", + graph=graph, + ) + + network_df = result["network"] + flowpaths_df = result["flowpaths"] + divides_df = result["divides"] + nexus_df = result["nexus"] + + # Basic data presence + assert len(network_df) > 0 + assert len(flowpaths_df) > 0 + assert len(divides_df) > 0 + assert len(nexus_df) > 0 + + # Key relationship: network.id ↔ flowpaths.id (same watershed) + network_ids = set(network_df["id"].values) + flowpath_ids = set(flowpaths_df["id"].values) + common_network_flowpath = network_ids.intersection(flowpath_ids) + assert len(common_network_flowpath) > 0, "Network and flowpaths should share watershed IDs" + + # Key relationship: network.divide_id → divides.divide_id + network_divide_ids = set(network_df["divide_id"].dropna().values) + divides_divide_ids = set(divides_df["divide_id"].values) + assert network_divide_ids.issubset(divides_divide_ids), ( + "All network divide_ids should exist in divides table" + ) + + # POI relationships if POIs exist + if "pois" in result and len(result["pois"]) > 0: + pois_df = result["pois"] + + # network.poi_id → pois.poi_id + network_poi_ids = set(network_df["poi_id"].dropna().astype(str).values) + pois_poi_ids = set(pois_df["poi_id"].astype(str).values) + poi_overlap = network_poi_ids.intersection(pois_poi_ids) + if len(network_poi_ids) > 0: + assert len(poi_overlap) > 0, "Network POI IDs should match POIs table POI IDs" + + # VPU consistency across all tables + for df_name, df in [ + ("network", network_df), + ("flowpaths", flowpaths_df), + ("nexus", nexus_df), + ("divides", divides_df), + ]: + if "vpuid" in df.columns: + vpu_values = set(df["vpuid"].dropna().values) + assert vpu_values == {"hi"} or len(vpu_values) == 0, ( + f"{df_name} should have consistent VPU values" + ) + else: + pytest.skip("No wb- records found in mock data") + + +class TestOptionalLayers: + """Test optional layer handling""" + + @pytest.mark.parametrize( + "optional_layers", + [ + ["divide-attributes"], + ["flowpath-attributes"], + ["pois"], + ["hydrolocations"], + ["divide-attributes", "pois"], # Multiple optional layers + ], + ) + def test_optional_layer_loading( + self, test_wb_id: str, mock_catalog: MockCatalog, optional_layers: list[str], tmp_path: Path + ) -> None: + """Test that optional layers can be loaded without errors""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get a valid wb- ID from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + all_layers = ["network", "flowpaths", "nexus", "divides"] + optional_layers + + result = subset_hydrofabric( + catalog=catalog, + identifier=test_wb_id, + id_type=IdType.ID, + layers=all_layers, + namespace="mock_hf", + graph=graph, + ) + + # Core layers should always be present + core_layers = ["network", "flowpaths", "nexus", "divides"] + for layer in core_layers: + assert layer in result + assert len(result[layer]) > 0 + + # Optional layers should be present if they have data, and be valid DataFrames + for layer in optional_layers: + if layer in result: + assert isinstance(result[layer], pd.DataFrame | gpd.GeoDataFrame) + # Don't assert length > 0 as some optional layers might legitimately be empty + else: + pytest.skip("No wb- records found in mock data") + + def test_lakes_layer_loading_separately( + self, test_wb_id: str, mock_catalog: MockCatalog, tmp_path: Path + ) -> None: + """Test lakes layer separately since it has different filtering logic""" + catalog = mock_catalog("glue") + + # Get the connectivity graph + graph_dict = load_upstream_json(catalog=catalog, namespaces=["mock_hf"], output_path=tmp_path) + graph = graph_dict["mock_hf"] + + # Get a valid wb- ID from the mock data + network_table = catalog.load_table("mock_hf.network").to_polars() + wb_records = network_table.filter(pl.col("id").str.starts_with("wb-")).collect() + + if wb_records.height > 0: + result = subset_hydrofabric( + catalog=catalog, + identifier=test_wb_id, + id_type=IdType.ID, + layers=["network", "flowpaths", "nexus", "divides", "lakes"], + namespace="mock_hf", + graph=graph, + ) + + # Core layers should be present + assert "network" in result + assert "lakes" in result + assert isinstance(result["lakes"], gpd.GeoDataFrame) + else: + pytest.skip("No wb- records found in mock data") diff --git a/tests/integration/test_icechunk.py b/tests/integration/test_icechunk.py new file mode 100644 index 0000000..b633b4b --- /dev/null +++ b/tests/integration/test_icechunk.py @@ -0,0 +1,16 @@ +import pytest +import xarray as xr + +from icefabric.builds import IcechunkRepo +from icefabric.schemas import NGWPCTestLocations + +ic_list = NGWPCTestLocations._member_names_ +params = [pytest.param(getattr(NGWPCTestLocations, name), id=f"Icechunk {name}") for name in ic_list] + + +@pytest.mark.parametrize("ic", params) +def test_icechunk_repo(ic: NGWPCTestLocations) -> None: + """Confirm icechunk repos are valid""" + ic_repo = IcechunkRepo(location=ic.path) + ic_data = ic_repo.retrieve_dataset() + assert isinstance(ic_data, xr.core.dataset.Dataset) diff --git a/tests/integration/test_topobathy_icechunk.py b/tests/integration/test_topobathy_icechunk.py new file mode 100644 index 0000000..d43b248 --- /dev/null +++ b/tests/integration/test_topobathy_icechunk.py @@ -0,0 +1,90 @@ +import os +from pathlib import Path + +import numpy as np +import pytest +import rasterio +from dotenv import load_dotenv +from pyprojroot import here + +from icefabric.builds import IcechunkRepo +from icefabric.schemas import NGWPCTestLocations + +load_dotenv() + + +@pytest.mark.slow +def test_topobathy(ic_raster: str) -> None: + """This test is SLOW. It will temporarily download all topobathy layers, up to 9 GB individually. + To run, call `pytest tests --run-slow` + Corrupted rasters will load correctly in xarray but incorrectly in rasterio and cannot be exported. + This test checks that when exported, a dataset has values that are non-no data. + """ + data_dir = here() / "tests/data" + os.makedirs(data_dir, exist_ok=True) + + temp_path = data_dir / "temp_raster.tif" + + local_creds_file = here() / ".env" + if local_creds_file.exists is False: + pytest.skip("Skipping as AWS creds are not available") + + try: + # export icechunk zarr to geotiff raster + repo = IcechunkRepo(location=NGWPCTestLocations[ic_raster].path) + ds = repo.retrieve_dataset() + raster = ds.elevation + raster.rio.to_raster(temp_path, tiled=True, compress="LZW", bigtiff="YES") + + # open raster version + with rasterio.open(temp_path, "r") as f: + profile = f.profile + ras = f.read(1) + + # assert all values are not nodata + total_nd = np.where(ras == profile["nodata"], 1, 0).sum() + assert total_nd != ras.size + assert ras.min() != ras.max() + + finally: + if os.path.exists(temp_path): + os.remove(temp_path) + + +@pytest.mark.local +def test_local_topobathy(local_ic_raster: Path) -> None: + """Tests local topobathy against local icechunk stores + + Parameters + ---------- + local_ic_raster : str + _description_ + """ + data_dir = here() / "tests/data" + os.makedirs(data_dir, exist_ok=True) + + temp_path = data_dir / "temp_raster.tif" + + if local_ic_raster.exists() is False: + pytest.skip("Local file for topobathy missing. Skipping test") + + try: + # export icechunk zarr to geotiff raster + repo = IcechunkRepo(local_ic_raster) + ds = repo.retrieve_dataset() + raster = ds.elevation + raster.rio.to_raster(temp_path, tiled=True, compress="LZW", bigtiff="YES") + + # open raster version + with rasterio.open(temp_path, "r") as f: + profile = f.profile + ras = f.read(1) + + # assert all values are not nodata + total_nd = np.where(ras == profile["nodata"], 1, 0).sum() + assert total_nd != ras.size + assert ras.min() != ras.max() + + finally: + if os.path.exists(temp_path): + os.remove(temp_path) diff --git a/tests/ipes/__init__.py b/tests/ipes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/ipes/test_module_ipes.py b/tests/ipes/test_module_ipes.py new file mode 100644 index 0000000..f212d19 --- /dev/null +++ b/tests/ipes/test_module_ipes.py @@ -0,0 +1,154 @@ +import polars as pl +import pytest + +from icefabric.modules import ( + get_lasam_parameters, + get_lstm_parameters, + get_noahowp_parameters, + get_sacsma_parameters, + get_smp_parameters, + get_snow17_parameters, + get_topmodel_parameters, + get_troute_parameters, +) + + +@pytest.fixture +def test_identifiers(mock_catalog): + """Fixture that provides test identifiers for parameterization""" + catalog = mock_catalog("glue") + domain = "mock_hf" + + # Get identifiers once for all tests + network_table = catalog.load_table(f"{domain}.network").to_polars() + identifiers = ( + network_table.select(pl.col("hl_uri")) + .filter(pl.col("hl_uri").is_not_null()) + .collect() + .to_pandas() + .values.squeeze() + ) + return identifiers + + +def test_topmodel_parameters(mock_catalog, sample_graph, test_identifiers): + """Test Topmodel parameter generation and attribute count for all identifiers""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + topmodel_models = get_topmodel_parameters( + catalog, + namespace, + identifier, + graph=sample_graph, + ) + + assert len(topmodel_models) > 0, f"No Topmodel parameters generated for {identifier}" + + +def test_noahowp_parameters(mock_catalog, sample_graph, test_identifiers): + """Test Noah OWP Modular parameter generation and attribute count for all identifiers""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + noahowp_models = get_noahowp_parameters( + catalog, + namespace, + identifier, + graph=sample_graph, + ) + + assert len(noahowp_models) > 0, f"No Noah OWP parameters generated for {identifier}" + + +def test_troute_parameters(mock_catalog, sample_graph, test_identifiers): + """Test T-Route parameter generation and attribute count for all identifiers""" + mock_catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + troute_models = get_troute_parameters( + mock_catalog, + namespace, + identifier, + graph=sample_graph, + ) + + assert len(troute_models) > 0, f"No T-Route parameters generated for {identifier}" + + +@pytest.mark.parametrize("sft_included", [False, True]) +def test_lasam_parameters(mock_catalog, sample_graph, test_identifiers, sft_included): + """Test LASAM parameter generation with different sft_included values""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + lasam_models = get_lasam_parameters( + catalog, + namespace, + identifier, + sft_included=sft_included, + soil_params_file="vG_default_params_HYDRUS.dat", + graph=sample_graph, + ) + + assert len(lasam_models) > 0, f"No LASAM parameters generated for {identifier}" + + +@pytest.mark.parametrize("envca", [True, False]) +def test_snow17_parameters(mock_catalog, sample_graph, test_identifiers, envca): + """Test Snow17 parameter generation with different envca values""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + snow17_models = get_snow17_parameters( + catalog, + namespace, + identifier, + envca=envca, + graph=sample_graph, + ) + + assert len(snow17_models) > 0, f"No Snow17 parameters generated for {identifier}" + + +@pytest.mark.parametrize("envca", [True, False]) +def test_sacsma_parameters(mock_catalog, sample_graph, test_identifiers, envca): + """Test SAC-SMA parameter generation with different envca values""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + sacsma_models = get_sacsma_parameters(catalog, namespace, identifier, envca=envca, graph=sample_graph) + + assert len(sacsma_models) > 0, f"No SAC-SMA parameters generated for {identifier}" + + +@pytest.mark.parametrize("module_type", ["TopModel", "CFE-S", "CFE-X", "LASAM"]) +def test_smp_parameters(mock_catalog, sample_graph, test_identifiers, module_type): + """Test SMP parameter generation for different modules""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + smp_models = get_smp_parameters( + catalog, + namespace, + identifier, + extra_module=module_type, + graph=sample_graph, + ) + + assert len(smp_models) > 0, f"No SMP parameters generated for {identifier} with {module_type}" + + +def test_lstm_parameters(mock_catalog, sample_graph, test_identifiers): + """Test LSTM parameter generation and attribute count for all identifiers""" + catalog = mock_catalog("glue") + namespace = "mock_hf" + for identifier in test_identifiers: + lstm_models = get_lstm_parameters( + catalog, + namespace, + identifier, + graph=sample_graph, + ) + + assert len(lstm_models) > 0, f"No LSTM parameters generated for {identifier}" diff --git a/tools/R/GL_divide_attr/build_gpkg.R b/tools/R/GL_divide_attr/build_gpkg.R new file mode 100644 index 0000000..9b0caf9 --- /dev/null +++ b/tools/R/GL_divide_attr/build_gpkg.R @@ -0,0 +1,35 @@ +library(sf) + +gpkg_in <- '/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg' +gpkg_out <- 'gl_nextgen_divide_attr.gpkg' + +flowpaths <- st_read(gpkg_in, layer='flowpaths') +divides <- st_read(gpkg_in, layer='divides') +nexus <- st_read(gpkg_in, layer='nexus') +pois <- st_read(gpkg_in, layer='pois') +hydrolocations <- st_read(gpkg_in, layer='hydrolocations') +network <- st_read(gpkg_in, layer='network') +print('*******') +flowpath_attributes <- st_read(gpkg_in, layer='flowpath-attributes') + +print('read divide attributes') +divide_attributes <- read.csv('all.csv') + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +print('project layer') + +flowpaths <- st_transform(flowpaths, crs) +divides <- st_transform(divides, crs) +nexus <- st_transform(nexus, crs) +pois <- st_transform(pois, crs) +hydrolocations <- st_transform(hydrolocations, crs) + +st_write(flowpaths, gpkg_out, 'flowpaths') +st_write(divides, gpkg_out, 'divides', append=TRUE) +st_write(nexus, gpkg_out, 'nexus', append=TRUE) +st_write(pois, gpkg_out, 'pois', append=TRUE) +st_write(hydrolocations, gpkg_out, 'hydrolocations', append=TRUE) +st_write(network, gpkg_out, 'network', append=TRUE) +st_write(flowpath_attributes, gpkg_out, 'flowpath-attributes', append=TRUE) +st_write(divide_attributes, gpkg_out, 'divide-attributes', append=TRUE) diff --git a/tools/R/GL_divide_attr/dem/aspect.R b/tools/R/GL_divide_attr/dem/aspect.R new file mode 100644 index 0000000..cb652b9 --- /dev/null +++ b/tools/R/GL_divide_attr/dem/aspect.R @@ -0,0 +1,19 @@ +library(raster) +library(terra) +library(zonal) +library(sf) + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') +div <- st_transform(div,crs) + +aspect_r <- rast('usgs_250m_aspect_crop.tif') + +aspect_r <- project(aspect_r, crs) + +aspect <- execute_zonal(aspect_r,div,ID='divide_id',join=FALSE,fun=circular_mean) + +names(aspect) <- c('divide_id','circ_mean.aspect') + +write.csv(aspect, 'aspect.csv', row.names=FALSE) diff --git a/tools/R/GL_divide_attr/dem/elevation.R b/tools/R/GL_divide_attr/dem/elevation.R new file mode 100644 index 0000000..bac96aa --- /dev/null +++ b/tools/R/GL_divide_attr/dem/elevation.R @@ -0,0 +1,20 @@ +library(raster) +library(terra) +library(zonal) +library(sf) + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') +div <- st_transform(div,crs) + +elevation_r <- rast('usgs_250m_elev_v1.tif') + +elevation_r <- project(elevation_r, crs) + +v <- values(elevation_r)*100 +values(elevation_r) <- v + +elevation <- execute_zonal(elevation_r,div,ID='divide_id',join=FALSE) + +write.csv(elevation, 'elevation.csv', row.names=FALSE) diff --git a/tools/R/GL_divide_attr/dem/slope.R b/tools/R/GL_divide_attr/dem/slope.R new file mode 100644 index 0000000..61f3bc7 --- /dev/null +++ b/tools/R/GL_divide_attr/dem/slope.R @@ -0,0 +1,19 @@ +library(raster) +library(terra) +library(zonal) +library(sf) + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') +div <- st_transform(div,crs) + +slope_r <- rast('usgs_250m_slope_crop.tif') + +#slope_r <- project(slope_r, crs) + +slope <- execute_zonal(slope_r,div,ID='divide_id',join=FALSE) + +names(slope) <- c('divide_id','mean.slope') + +write.csv(slope, 'slope.csv', row.names=FALSE) diff --git a/tools/R/GL_divide_attr/gw/create_cfe_init_bmi_config.R b/tools/R/GL_divide_attr/gw/create_cfe_init_bmi_config.R new file mode 100644 index 0000000..f62c949 --- /dev/null +++ b/tools/R/GL_divide_attr/gw/create_cfe_init_bmi_config.R @@ -0,0 +1,51 @@ +create_cfe_init_bmi_config = function(basins, data_dir){ + +# Derive initial parameters for CFE based on NWM v3 parameter files and +# create the BMI config files for each catchment in the selected basins + +#rm(list=ls()) + +library(terra) +library(zonal) +library(data.table) +library(sf) +library(ncdf4) +library(raster) +library(rwrfhydro) + +group <- 1 + +# Groundwater parameters +message("processing groundwater parameters ...") +gw_file <- paste0(data_dir, "GWBUCKPARM_CONUS_FullRouting.nc") +gwparm <- GetNcdfFile(gw_file,quiet=TRUE) +gwvars <- names(gwparm) +gwparm <- data.table(gwparm) +setkey(gwparm, ComID) + +# bucket/catchment mapping +gwWeightFile <- paste0(data_dir,"gwbuck_to_maskid_basins_group_",group,".txt") +dtWgt <- read.table(gwWeightFile, header=TRUE, sep="\t", stringsAsFactors=FALSE) +dtWgt <- data.table(dtWgt) +setkey(dtWgt, ComID) +gwparm <- merge(gwparm, dtWgt, all.x=TRUE, all.y=FALSE, suffixes=c("", ".cat"), by="ComID") +dtGwPars <- subset(gwparm, !is.na(cat_id)) + +# add divide_id from the crosswalk table +cwt <- read.table(paste0(data_dir,"raster_id_crosswalk_basins_group_1.csv"), header=TRUE, sep=",", + colClasses=rep("character",7),stringsAsFactors=FALSE) +cwt$cat_id <- as.integer(cwt$cat_id) +dtGwPars <- merge(dtGwPars,cwt[,c("divide_id","gage","cat_id")],by="cat_id") +dtGwPars <- dtGwPars[,c("Coeff","Expon","Zmax","sumwt","divide_id","gage"),with=FALSE] + +# compute weighted mean +dtGwPars1 <- dtGwPars[,.(Coeff=sum(Coeff*sumwt)/sum(sumwt), + Expon=sum(Expon*sumwt)/sum(sumwt), + #Zmax=sum(Zmax*sumwt)/sum(sumwt)), + #Zmax=sum(Zmax*sumwt)/sum(sumwt)/1000*10), + Zmax=sum(Zmax*sumwt)/sum(sumwt)/1000), #to be confirmed, Zmax for NMW is in mm (but m for CFE) + by=.(divide_id,gage)] +names(dtGwPars1) <- c("divide_id","gage","Cgw","expon","max_gw_storage") + +write.csv(dtGwPars1, 'gw.csv') +} diff --git a/tools/R/GL_divide_attr/gw/create_crosswalk_gwbucket_catchement.R b/tools/R/GL_divide_attr/gw/create_crosswalk_gwbucket_catchement.R new file mode 100644 index 0000000..c1541af --- /dev/null +++ b/tools/R/GL_divide_attr/gw/create_crosswalk_gwbucket_catchement.R @@ -0,0 +1,54 @@ +# create a crosswalk table to match the NWM groudwater bucket ID (ComID) to the catchments of selected basins +# in order to properly transfer the NWM ground water parameters + +rm(list=ls()) + +library(raster) +library(data.table) +library(rwrfhydro) + +# mask file of the basins (created by rasterize_basins.R) +run1 <- "basins_group_1" +maskFile <- paste0("../data/masks/",run1,".tif") + +# NWM spatial weights file +wtFile <- "../data/spatialweights_1km_LongRange_NWMv3.0.nc" + +# Read rasters +mask1 <- raster(maskFile) +mask1[mask1<=0] <- NA + +# Output file (the crosswalk) +gwOutFile <- paste0("../data/gwbuck_to_maskid_",run1,".txt") + +# GW Buckets +wts <- ReadWtFile(wtFile) +wts <- wts[[1]] +ids <- unique(wts$IDmask) +dimy <- dim(mask1)[1] + +# Convert matrix to data frame of indices and values +bas4join <- data.frame(which(!is.na(as.matrix(mask1)), arr.ind=TRUE)) +bas4join$catid <- c(as.matrix(mask1)[!is.na(as.matrix(mask1))]) +# Assign ij, referenced from (1,1) at lower left corner to match spatial weight file +bas4join$i_index <- bas4join$col +bas4join$j_index <- as.integer(dimy+1-bas4join$row) + +# Join to weights table +bas4join <- data.table(bas4join) +wts <- data.table(wts) +setkey(bas4join, i_index, j_index) +setkey(wts, i_index, j_index) +wts <- merge(wts, bas4join, by=c("i_index", "j_index"), all.x=TRUE, all.y=FALSE) + +# Aggregate weights +setkey(wts, IDmask, catid) +wts.sum <- wts[, list(sumwt=sum(weight)), by=c("IDmask", "catid")] +#Slower: wts.sum.max <- wts.sum[, ':=' (whichMax = sumwt == max(.SD$sumwt)), by="IDmask"] +wts.sum.max <- wts.sum[wts.sum[, .I[sumwt == max(sumwt)], by=IDmask]$V1] +gwOut <- data.frame(wts.sum.max[!is.na(catid),]) +names(gwOut) <- c("ComID", "cat_id", "sumwt") + +write.table(gwOut, file=gwOutFile, sep="\t", row.names=FALSE) + + diff --git a/tools/R/GL_divide_attr/gw/create_crosswalk_gwbucket_catchment.R b/tools/R/GL_divide_attr/gw/create_crosswalk_gwbucket_catchment.R new file mode 100644 index 0000000..ab947db --- /dev/null +++ b/tools/R/GL_divide_attr/gw/create_crosswalk_gwbucket_catchment.R @@ -0,0 +1,55 @@ +# create a crosswalk table to match the NWM groudwater bucket ID (ComID) to the catchments of selected basins +# in order to properly transfer the NWM ground water parameters + +create_crosswalk_gwbucket_catchment = function(data_dir){ + +#rm(list=ls()) + +library(raster) +library(data.table) +library(rwrfhydro) + +# mask file of the basins (created by rasterize_basins.R) +run1 <- "basins_group_1" +maskFile <- paste0(data_dir,run1,".tif") + +# NWM spatial weights file +wtFile <- paste0(data_dir,"spatialweights_1km_LongRange_NWMv3.0.nc") + +# Read rasters +mask1 <- raster(maskFile) +mask1[mask1<=0] <- NA + +# Output file (the crosswalk) +gwOutFile <- paste0(data_dir,"gwbuck_to_maskid_",run1,".txt") + +# GW Buckets +wts <- ReadWtFile(wtFile) +wts <- wts[[1]] +ids <- unique(wts$IDmask) +dimy <- dim(mask1)[1] + +# Convert matrix to data frame of indices and values +bas4join <- data.frame(which(!is.na(as.matrix(mask1)), arr.ind=TRUE)) +bas4join$catid <- c(as.matrix(mask1)[!is.na(as.matrix(mask1))]) +# Assign ij, referenced from (1,1) at lower left corner to match spatial weight file +bas4join$i_index <- bas4join$col +bas4join$j_index <- as.integer(dimy+1-bas4join$row) + +# Join to weights table +bas4join <- data.table(bas4join) +wts <- data.table(wts) +setkey(bas4join, i_index, j_index) +setkey(wts, i_index, j_index) +wts <- merge(wts, bas4join, by=c("i_index", "j_index"), all.x=TRUE, all.y=FALSE) + +# Aggregate weights +setkey(wts, IDmask, catid) +wts.sum <- wts[, list(sumwt=sum(weight)), by=c("IDmask", "catid")] +#Slower: wts.sum.max <- wts.sum[, ':=' (whichMax = sumwt == max(.SD$sumwt)), by="IDmask"] +wts.sum.max <- wts.sum[wts.sum[, .I[sumwt == max(sumwt)], by=IDmask]$V1] +gwOut <- data.frame(wts.sum.max[!is.na(catid),]) +names(gwOut) <- c("ComID", "cat_id", "sumwt") + +write.table(gwOut, file=gwOutFile, sep="\t", row.names=FALSE) +} diff --git a/tools/R/GL_divide_attr/gw/rasterize_basins.R b/tools/R/GL_divide_attr/gw/rasterize_basins.R new file mode 100644 index 0000000..fdec141 --- /dev/null +++ b/tools/R/GL_divide_attr/gw/rasterize_basins.R @@ -0,0 +1,56 @@ +# rasterize selected basins given the gpkg (GDAL needs to be installed) + +rasterize_basins = function(basins, data_dir){ + +#rm(list=ls()) + +#library(rgdal) +library(raster) +library(sf) + +# NWM domain projection +prjstr <- "+proj=lcc +lat_1=30 +lat_2=60 +lat_0=40.0000076293945 +lon_0=-97 +x_0=0 +y_0=0 +a=6370000 +b=6370000 +units=m +no_defs" + +# basin group +group <- 1 +#basins <- c("01123000","01350080","14141500","14187000") +str1 <- paste0("basins_group_",group) + +# hydrofabric file for the basins (all catchments together) +sf1 <- data.frame() +for (gage1 in basins) { +# str_gage1 <- ifelse(substr(gage1,1,1)=="0",substr(gage1,2,nchar(gage1)),gage1) + str_gage1 <- gage1 +# hydro_file <- paste0(data_dir,"gauge_",str_gage1,".gpkg") + hydro_file <- 'GL_all.gpkg' + sf0 <- read_sf(hydro_file, "GL_all") + sf0$gage <- gage1 + sf1 <- rbind(sf1,sf0) +} +sf1$cat_id <- 1:nrow(sf1) + +# transform projection +sf1 <- st_transform(sf1,crs(prjstr)) + +# write to shapefile +shp_file <- paste0(data_dir,str1,".shp") +st_write(sf1,shp_file,append=FALSE) + +# create raster using gdal_rasterize +file1 <- paste0(data_dir,str1,".tif") +system(paste0("cp ",data_dir,"geogrid_1km_blank.tif ",file1)) +while(!file.exists(file1)) Sys.sleep(1) + +system(paste0("gdal_rasterize -a cat_id -l ",str1," ",shp_file," ", file1)) + +# plot the raster to check +png(paste0(str1,".png")) +r1 <- raster(file1) +plot(r1) +dev.off() + +# save the raster_id / catchment id crosswalk table for later use in transferring NWM groundwater parameters +sf1$geom <- NULL +write.csv(sf1,file=paste0(data_dir,"raster_id_crosswalk_",str1,".csv"),quote=FALSE, row.names=FALSE) + +} diff --git a/tools/R/GL_divide_attr/gw/run_create_cfe_init_bmi_config.R b/tools/R/GL_divide_attr/gw/run_create_cfe_init_bmi_config.R new file mode 100644 index 0000000..29760f0 --- /dev/null +++ b/tools/R/GL_divide_attr/gw/run_create_cfe_init_bmi_config.R @@ -0,0 +1,19 @@ +#An interface between the Rscript command line arguments and the R functions +#args = commandArgs(trailingOnly = TRUE) + +source('create_cfe_init_bmi_config.R') +source('rasterize_basins.R') +source('create_crosswalk_gwbucket_catchment.R') + +#gage_ids <- eval(parse(text=args[1])) +#data_dir <- args[2] + +gage_ids <- '02AD010' +data_dir <- '/workspace/GL_attr/GW/data/' + +print("Running rasterize_basins") +rasterize_basins(gage_ids, data_dir) +print("Running create_crosswalk_gwbucket_catchment") +create_crosswalk_gwbucket_catchment(data_dir) +print("Running create_cfe_init_bmi_config") +create_cfe_init_bmi_config(gage_ids, data_dir) diff --git a/tools/R/GL_divide_attr/merge_all.R b/tools/R/GL_divide_attr/merge_all.R new file mode 100644 index 0000000..d987b37 --- /dev/null +++ b/tools/R/GL_divide_attr/merge_all.R @@ -0,0 +1,30 @@ +library(tidyverse) + +xy <- read.csv('xy/xy.csv') +soil_veg_type <- read.csv('soil_veg_type/soil_veg_type.csv') +nwm_soil <- read.csv('nwm_soil/nwm_soil.csv') +twi <- read.csv('twi/twi.csv') +elev <- read.csv('dem/elevation.csv') +slope <- read.csv('dem/slope.csv') +aspect <- read.csv('dem/aspect.csv') +gw <- read.csv('gw/gw.csv') + +gw <- gw %>% select(-gage) + +all_dfs <- list(xy, soil_veg_type, nwm_soil, twi, elev, slope, aspect, gw) + +all <- all_dfs %>% reduce(full_join, by='divide_id') + +all$mean.Coeff[is.na(all$mean.Coeff)] <- 0.05 +all$mean.Zmax[is.na(all$mean.Zmax)] <- 1 +all$mode.Expon[is.na(all$mode.Expon)] <- 1 + +all <- all %>% mutate(vpuid='gl') +all <- all %>% mutate(mean.impervious=0) + +all <- all %>% select(-X) + +print(str(all)) + +write.csv(all,'all.csv', row.names=FALSE) + diff --git a/tools/R/GL_divide_attr/nwm_soil/build_attr_nwm_soil.R b/tools/R/GL_divide_attr/nwm_soil/build_attr_nwm_soil.R new file mode 100644 index 0000000..888d6d7 --- /dev/null +++ b/tools/R/GL_divide_attr/nwm_soil/build_attr_nwm_soil.R @@ -0,0 +1,68 @@ +library(raster) +library(zonal) +library(sf) +library(terra) +library(tidyverse) + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_transform(div,crs) + +# Get NWM Soil attributes + +nwm_soil_names <- c('bexp', 'dksat', 'psisat', 'cwpvt', 'mfsno', 'mp', 'refkdt', 'slope_1km', 'smcmax', 'smcwlt', 'vcmx25') +nwm_soil_func <- c('mode', 'geom_mean', 'geom_mean', 'mean', 'mean', 'mean', 'mean', 'mean', 'mean', 'mean', 'mean') +nwm_soil_layers <-c(4,4,4,1,1,1,1,1,4,4,1) + +soil <- data.frame() + +for (x in 1:length(nwm_soil_names)){ + + name <- nwm_soil_names[x] + func <- nwm_soil_func[x] + layers <- nwm_soil_layers[x] + + for (layer in 1:layers){ + + if (layers == 1) { + rasterfile <- paste(name,'tif',sep='.') + + } else if (layers > 1) { + rasterfile <- paste(name,layer,sep='_') + rasterfile <- paste(rasterfile,'tif',sep='.') + } + + print(paste('processing:',rasterfile,sep=' ')) + r <- rast(rasterfile) + #r <- project(r,crs) + + if (func == 'mean') {attr_zonal <- execute_zonal(r,div,ID='divide_id',join=FALSE)} + if (func == 'mode') {attr_zonal <- execute_zonal(r,div,ID='divide_id',join=FALSE, fun=mode)} + if (func == 'geom_mean') {attr_zonal <- execute_zonal(r,div,ID='divide_id',join=FALSE, fun=geometric_mean)} + + if (layers == 1) { + col_name <- paste(func,name,sep='.') + } else if (layers > 1) { + soil_layers <- paste('soil_layers_stag=',layer,sep='') + col_name <- paste(func,name,sep='.') + col_name <- paste(col_name,soil_layers,sep='_') + } + + names(attr_zonal) <- c('divide_id',col_name) + print(x) + print(layer) + + if ((x == 1) & (layer == 1)) { + soil <- attr_zonal + print('first') + } else { + soil <- merge(soil,attr_zonal,by='divide_id') + print('next') + } + + } + +} +write.csv(soil,'nwm_soil.csv',row.names=FALSE) diff --git a/tools/R/GL_divide_attr/nwm_soil/nwm_soil.R b/tools/R/GL_divide_attr/nwm_soil/nwm_soil.R new file mode 100644 index 0000000..dfdc447 --- /dev/null +++ b/tools/R/GL_divide_attr/nwm_soil/nwm_soil.R @@ -0,0 +1,58 @@ +library(ncdf4) +library(raster) +library(sf) +library(terra) + +#div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/conus_nextgen.gpkg', layer='divides') +#div_unique <- div[!duplicated(div$divide_id),] + +#div_unique = st_read('hf2.2_divides.gpkg',layer='hf2.2_divides') + +filename <- 'soilproperties_CONUS_FullRouting.nc' +nwm_nc <- nc_open(filename) +nc_names <- names(nwm_nc$var) + +ext <- extent(363197.3,2006436.2,144063.1,1810826.8) + +mask <- raster('final_combined_calib_v3.tif') + +for (var in nc_names) { + print(var) + var_nc <- ncvar_get(nwm_nc,var) + + if(length(dim(var_nc)) == 2){ + + var_nc[var_nc == -9999] <- NA + var_nc <- t(var_nc)[nrow(t(var_nc)):1,] + var_r <- raster(var_nc) + crs(var_r) <- crs(mask) + extent(var_r) <- extent(mask) + res(var_r) <- res(mask) + var_r <- crop(var_r,ext) + raster_file <- paste(var,'tif',sep='.') + writeRaster(var_r, raster_file, overwrite=TRUE) + + } else if (length(dim(var_nc)) == 3) { + + num_layers <- dim(var_nc)[3] + for(x in 1:num_layers){ + + var_nc[var_nc == -9999] <- NA + print(x) + var_nc_layer <- var_nc[,,x] + var_nc_layer <- t(var_nc_layer)[nrow(t(var_nc_layer)):1,] + var_r <- raster(var_nc_layer) + crs(var_r) <- crs(mask) + extent(var_r) <- extent(mask) + res(var_r) <- res(mask) + var_r <- crop(var_r,ext) + raster_file <- paste(var,x,sep='_') + raster_file <- paste(raster_file,'tif',sep='.') + writeRaster(var_r, raster_file, overwrite=TRUE) + } + + } + +} + + diff --git a/tools/R/GL_divide_attr/soil_veg_type/build_attr_soil_veg_type.R b/tools/R/GL_divide_attr/soil_veg_type/build_attr_soil_veg_type.R new file mode 100644 index 0000000..f43058c --- /dev/null +++ b/tools/R/GL_divide_attr/soil_veg_type/build_attr_soil_veg_type.R @@ -0,0 +1,30 @@ +library(raster) +library(zonal) +library(sf) +library(terra) +library(tidyverse) + +#crs <- 'EPSG:4326' +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') +div <- st_transform(div,crs) + + +# Get soil and vegetation types +print('soil and veg type') +soil_type_r <- rast('ISLTYP.tif') +veg_type_r <- rast('IVGTYP.tif') +#soil_type_r <- project(soil_type_r,crs) +#veg_type_r <- project(veg_type_r,crs) + +print(st_crs(div)) +print(crs(veg_type_r)) + +soil_type <- execute_zonal(soil_type_r,div,ID='divide_id',join=FALSE, fun=mode) +veg_type <- execute_zonal(veg_type_r,div,ID='divide_id',join=FALSE, fun=mode) + +soil_veg_type <- merge(soil_type,veg_type,by='divide_id') +names(soil_veg_type) <- c('divide_id','ISLTYP','IVGTYP') +write.csv(soil_veg_type,'soil_veg_type.csv',row.names=FALSE) + diff --git a/tools/R/GL_divide_attr/soil_veg_type/soil_veg_type.R b/tools/R/GL_divide_attr/soil_veg_type/soil_veg_type.R new file mode 100644 index 0000000..82746a4 --- /dev/null +++ b/tools/R/GL_divide_attr/soil_veg_type/soil_veg_type.R @@ -0,0 +1,32 @@ +library(ncdf4) +library(raster) +library(sf) +library(terra) + +#div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/conus_nextgen.gpkg', layer='divides') +#div_unique <- div[!duplicated(div$divide_id),] + +#div_unique = st_read('hf2.2_divides.gpkg',layer='hf2.2_divides') + +filename <- 'wrfinput_CONUS.nc' +nwm_nc <- nc_open(filename) +nc_names <- names(nwm_nc$var) + +ext <- extent(363197.3,2006436.2,144063.1,1810826.8) + +mask <- raster('final_combined_calib_v3.tif') + +nc_vars <- c('ISLTYP', 'IVGTYP') + +for (var in nc_vars){ + var_nc <- ncvar_get(nwm_nc,var) + var_nc[var_nc == -9999] <- NA + var_nc <- t(var_nc)[nrow(t(var_nc)):1,] + var_r <- raster(var_nc) + crs(var_r) <- crs(mask) + extent(var_r) <- extent(mask) + res(var_r) <- res(mask) + var_r <- crop(var_r,ext) + raster_file <- paste(var,'tif',sep='.') + writeRaster(var_r, raster_file, overwrite=TRUE) +} diff --git a/tools/R/GL_divide_attr/twi/build_attr_twi.R b/tools/R/GL_divide_attr/twi/build_attr_twi.R new file mode 100644 index 0000000..8f0353b --- /dev/null +++ b/tools/R/GL_divide_attr/twi/build_attr_twi.R @@ -0,0 +1,18 @@ +library(sf) +library(terra) +library(zonal) + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_transform(div,crs) + +r <- rast('twi.tiff') + +r <- project(r,crs) + +twi <- execute_zonal(r,div,ID='divide_id',join=FALSE,fun=equal_population_distribution) + +write.csv(twi,'twi.csv',row.names=FALSE) +_ diff --git a/tools/R/GL_divide_attr/xy/build_attr_xy.R b/tools/R/GL_divide_attr/xy/build_attr_xy.R new file mode 100644 index 0000000..9b72749 --- /dev/null +++ b/tools/R/GL_divide_attr/xy/build_attr_xy.R @@ -0,0 +1,21 @@ +library(raster) +library(zonal) +library(sf) +library(terra) +library(tidyverse) + +crs <- '+proj=lcc +lat_0=40.0000076293945 +lon_0=-97 +lat_1=30 +lat_2=60 +x_0=0 +y_0=0 +R=6370000 +units=m +no_defs' + +div <- st_read('/Hydrofabric/data/hydrofabric/v2.2/nextgen/CONUS/gl_nextgen.gpkg',layer='divides') + +div <- st_transform(div,crs) + +# Get xy coordinates for divide centroids +print('xy') +xy <- st_point_on_surface(div) +xy <- xy %>% select(c('divide_id','geom')) %>% mutate(coords = st_coordinates(geom)) +xy <- data.frame(xy) +xy <- select(xy,c('divide_id','coords')) +#xy %>% select(geom) %>% mutate(geom = NULL) +print(xy) +write.csv(xy,'xy.csv',row.names=FALSE) diff --git a/tools/hydrofabric/download_hydrofabric_gpkg.py b/tools/hydrofabric/download_hydrofabric_gpkg.py new file mode 100644 index 0000000..1e7861c --- /dev/null +++ b/tools/hydrofabric/download_hydrofabric_gpkg.py @@ -0,0 +1,92 @@ +"""A script to download the Hydrofabric to disk as a geopackage""" + +import argparse +from pathlib import Path + +import geopandas as gpd +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.exceptions import NoSuchTableError +from tqdm import tqdm + +from icefabric.helpers import load_creds + +load_creds() + + +def download_hydrofabric(catalog: Catalog, namespace: str, output_folder: Path, crs: str) -> None: + """Build the RAS XS table in a PyIceberg warehouse. + + Parameters + ---------- + catalog : Catalog + The PyIceberg catalog object + namespace: str + Path to the parquet file to upload to the warehouse + output_folder: Path + Output directory for saving the hydrofabric gpkg + crs: str + A string representing the CRS to set in the gdf + """ + layers = [ + "divide-attributes", + "divides", + "flowpath-attributes", + "flowpath-attributes-ml", + "flowpaths", + "hydrolocations", + "lakes", + "network", + "nexus", + "pois", + ] + output_layers = {} + for layer in tqdm(layers, desc=f"Exporting {namespace} tables", total=len(layers)): + try: + table = catalog.load_table(f"{namespace}.{layer}") + df = table.scan().to_pandas() + if "geometry" in df.columns: + output_layers[layer] = gpd.GeoDataFrame( + df, geometry=gpd.GeoSeries.from_wkb(df["geometry"]), crs=crs + ) + else: + output_layers[layer] = df + except NoSuchTableError: + print(f"No table found for layer: {layer}.") + + output_folder.mkdir(exist_ok=True) + output_file = output_folder / f"{namespace}.gpkg" + print("Saving hydrofabric to disk") + for table_name, _layer in output_layers.items(): + gpd.GeoDataFrame(_layer).to_file(output_file, layer=table_name, driver="GPKG") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Download the latest HF production snapshot") + + parser.add_argument( + "--catalog", + choices=["sql", "glue"], + default="sql", + help="Catalog type to use (default: sql for local build)", + ) + parser.add_argument( + "--namespace", type=str, required=True, help="The hydrofabric namespace (domain) to download" + ) + parser.add_argument( + "--output-folder", + type=Path, + default=Path.cwd(), + help="Output directory for saving the hydrofabric gpkg", + ) + parser.add_argument( + "--crs", + type=str, + default="EPSG:5070", + help="The CRS to save the outputted .gpkg to (default is EPSG:5070). Change for oCONUS domains.", + ) + args = parser.parse_args() + + catalog = load_catalog(args.catalog) + download_hydrofabric( + catalog=catalog, namespace=args.namespace, output_folder=args.output_folder, crs=args.crs + ) diff --git a/tools/hydrofabric/download_reference_fabric.py b/tools/hydrofabric/download_reference_fabric.py new file mode 100644 index 0000000..c396eca --- /dev/null +++ b/tools/hydrofabric/download_reference_fabric.py @@ -0,0 +1,76 @@ +"""A script to download the Reference Fabric to disk as a geopackage""" + +import argparse +from pathlib import Path + +import geopandas as gpd +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.exceptions import NoSuchTableError +from tqdm import tqdm + +from icefabric.helpers import load_creds + +load_creds() + +NAMESPACE = "conus_reference" + + +def download_reference_fabric(catalog: Catalog, output_folder: Path, crs: str) -> None: + """Build the RAS XS table in a PyIceberg warehouse. + + Parameters + ---------- + catalog : Catalog + The PyIceberg catalog object + output_folder: Path + Output directory for saving the hydrofabric gpkg + crs: str + A string representing the CRS to set in the gdf + """ + layers = ["reference_divides", "reference_flowpaths"] + output_layers = {} + for layer in tqdm(layers, desc=f"Exporting {NAMESPACE} tables", total=len(layers)): + try: + table = catalog.load_table(f"{NAMESPACE}.{layer}") + df = table.scan().to_pandas() + if "geometry" in df.columns: + output_layers[layer] = gpd.GeoDataFrame( + df, geometry=gpd.GeoSeries.from_wkb(df["geometry"]), crs=crs + ) + else: + output_layers[layer] = df + except NoSuchTableError: + print(f"No table found for layer: {layer}.") + + output_folder.mkdir(exist_ok=True) + output_file = output_folder / f"{NAMESPACE}.gpkg" + print("Saving reference fabric to disk") + for table_name, _layer in output_layers.items(): + gpd.GeoDataFrame(_layer).to_file(output_file, layer=table_name, driver="GPKG") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Download the latest HF production snapshot") + + parser.add_argument( + "--catalog", + choices=["sql", "glue"], + default="sql", + help="Catalog type to use (default: sql for local build)", + ) + parser.add_argument( + "--output-folder", + type=Path, + default=Path.cwd(), + help="Output directory for saving the hydrofabric gpkg", + ) + parser.add_argument( + "--crs", + type=str, + default="EPSG:5070", + help="The CRS to save the outputted .gpkg to (default is EPSG:5070)", + ) + args = parser.parse_args() + + catalog = load_catalog(args.catalog) + download_reference_fabric(catalog=catalog, output_folder=args.output_folder, crs=args.crs) diff --git a/tools/hydrofabric/gpkg_to_parquet.py b/tools/hydrofabric/gpkg_to_parquet.py new file mode 100644 index 0000000..fc875c2 --- /dev/null +++ b/tools/hydrofabric/gpkg_to_parquet.py @@ -0,0 +1,93 @@ +"""A simple script to convert the v2.2 hydrofabric to parquet""" + +import argparse +from pathlib import Path + +import geopandas as gpd +import pyarrow as pa +from pyarrow import parquet as pq +from pyogrio.errors import DataLayerError + +from icefabric.helpers import load_creds +from icefabric.schemas import ( + DivideAttributes, + Divides, + FlowpathAttributes, + FlowpathAttributesML, + Flowpaths, + Hydrolocations, + Lakes, + Network, + Nexus, + POIs, +) + +load_creds() + + +def gpkg_to_parquet(input_file: Path, output_folder: Path) -> None: + """Convert geopackage to parquet file. + + Parameters + ---------- + input_file : Path + Path to the geopackage file to convert + output_folder : Path + Directory where the parquet file will be saved + + Raises + ------ + FileNotFoundError + If the input file doesn't exist + """ + layers = [ + ("divide-attributes", DivideAttributes), + ("divides", Divides), + ("flowpath-attributes-ml", FlowpathAttributesML), + ("flowpath-attributes", FlowpathAttributes), + ("flowpaths", Flowpaths), + ("hydrolocations", Hydrolocations), + ("lakes", Lakes), + ("network", Network), + ("nexus", Nexus), + ("pois", POIs), + ] + for layer, schema in layers: + if not input_file.exists(): + raise FileNotFoundError(f"Input file not found: {input_file}") + + print(f"Converting {layer} to parquet") + + output_folder.mkdir(parents=True, exist_ok=True) + + try: + gdf = gpd.read_file(input_file, layer=layer) + except DataLayerError: + print(f"No layer existing for: {layer}") + continue + if "geometry" in gdf.columns: + # NOTE there will be an warning as we're overriding the geometry. This is fine for now + gdf["geometry"] = gdf["geometry"].to_wkb() + + # Create PyArrow table with schema validation + table = pa.Table.from_pandas(gdf[schema.columns()], schema=schema.arrow_schema()) + + # Write parquet file + output_path = output_folder / f"{layer}.parquet" + pq.write_table(table, output_path) + print(f"Successfully converted to {output_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert geopackage files to parquet format") + + parser.add_argument("--gpkg", type=Path, required=True, help="Path to the geopackage file to convert") + parser.add_argument( + "--output-folder", + type=Path, + default=Path.cwd(), + help="Output directory for parquet file (default is cwd)", + ) + + args = parser.parse_args() + gpkg_to_parquet(input_file=args.gpkg, output_folder=args.output_folder) diff --git a/tools/iceberg/README.md b/tools/iceberg/README.md new file mode 100644 index 0000000..473dcef --- /dev/null +++ b/tools/iceberg/README.md @@ -0,0 +1,70 @@ +### Pushing to prod + +This folder is set up to create/update production iceberg S3 Tables on the AWS test account. + +To ensure that we don't accidentally override data before our code for data manipulation is accurate, the following strategy is being proposed. There will be three steps to data promotion from Dev → Test → Prod + +Dev +Pyiceberg namespaces and tables can both exist in a centralized location and a local directory on a user's system. Through using the `export_catalog.py` script, a namespace can be locally created in the /tmp directory using SQLlite/DuckDB. Since these local tables are detached from the production data lake, the code/data is safe for all development/manipulation + +Test +Once code is vetted and merged into Github, tests of code services be done locally through read-only queries of the production glue catalog. These can be done through changing the catalog name from sql to glue . + +Prod +After code passes test, we can begin to update the Production glue catalog with any data that was manipulated by the code. The S3 table will be updated, with the new snapshot noted with the specific delivery. Any snapshots which require additional backups will be downloaded into the edfs-data/ into a folder specified by the snapshot ID. + +Production promotion + +When writing update scripts, it's important to know the location of the data that you're writing to. Uploading data to S3 Tables tracks the file location, which all should live on the Test account. + +Production promotion + +### Example workflow +For this workflow I'll be showing how to update the CONUS hydrofabric namespace + +#### Build/Create +*Note* this assumes you have a .gpkg file that you'd like to upload to PyIceberg +1. Write the geopackage to a parquet +```sh +python tools/hydrofabric/gpkg_to_parquet.py --gpkg conus_hf.gpkg --output-folder /tmp/hf +``` + +2. Build a local warehouse for testing using the sql warehouse +```sh +python tools/iceberg/build_hydrofabric.py --catalog sql --files /tmp/hf --domain conus +``` + +3. Test that this is working, confirm with a team member in peer review + +4. Update the GLUE endpoint +```sh +python tools/iceberg/build_hydrofabric.py --catalog glue --files /tmp/hf --domain conus +``` + +#### Update +1. Export the table you are looking to update from the S3 Tables so you have a local dev warehouse +```sh +python tools/iceberg/export_catalog.py --namespace conus_hf +``` + +2. Download the gpkg so you can make changes +```sh +python tools/hydrofabric/download_hydrofabric_gpkg.py --namespace conus_hf +``` + +3. Make changes to the geopackage + +4. Write the geopackage to a parquet +```sh +python tools/hydrofabric/gpkg_to_parquet.py --gpkg patch_conus_hf.gpkg --output-folder /tmp/hf +``` + +5. Update the local warehouse table +```sh +python tools/iceberg/update_hydrofabric.py --layer --file --domain conus +``` + +6. Once the data is updated and works, confirm with a team member that the data is correct, then prod can be updated +```sh +python tools/iceberg/update_hydrofabric.py --catalog glue --layer --file --domain conus +``` diff --git a/tools/iceberg/build_conus_reference.py b/tools/iceberg/build_conus_reference.py new file mode 100644 index 0000000..e123729 --- /dev/null +++ b/tools/iceberg/build_conus_reference.py @@ -0,0 +1,66 @@ +import argparse + +import pyarrow.parquet as pq +from pyiceberg.catalog import load_catalog +from pyiceberg.transforms import IdentityTransform + +from icefabric.helpers import load_creds +from icefabric.schemas import ReferenceDivides, ReferenceFlowpaths + +load_creds() + + +def build_table(catalog_type: str, file_dir: str): + """Builds the hydrofabric namespace and tables + + Parameters + ---------- + file_dir : str + The directory to hydrofabric parquet files + """ + catalog = load_catalog(catalog_type) + namespace = "conus_reference" + catalog.create_namespace_if_not_exists(namespace) + layers = [("reference_flowpaths", ReferenceFlowpaths), ("reference_divides", ReferenceDivides)] + for layer, schema in layers: + print(f"building layer: {layer}") + try: + table = pq.read_table(f"{file_dir}/{layer}.parquet", schema=schema.arrow_schema()) + except FileNotFoundError: + print(f"Cannot find {layer} in the given file dir {file_dir}") + continue + if catalog.table_exists(f"{namespace}.{layer}"): + print(f"Table {layer} already exists. Skipping build") + else: + iceberg_table = catalog.create_table( + f"{namespace}.{layer}", + schema=schema.schema(), + location=f"s3://edfs-data/icefabric_catalog/{namespace.lower()}/{layer}", + ) + partition_spec = iceberg_table.spec() + if "vpuid" in table.column_names: + col = "vpuid" + else: + col = "VPUID" + if len(partition_spec.fields) == 0: + with iceberg_table.update_spec() as update: + update.add_field(col, IdentityTransform(), "vpuid_partition") + iceberg_table.append(table) + + print(f"Build complete. Files written into metadata store on {catalog.name} @ {namespace}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script to build a pyiceberg catalog for the conus reference fabric" + ) + parser.add_argument( + "--catalog", + choices=["sql", "glue"], + default="sql", + help="Catalog type to use (default: sql for local build)", + ) + parser.add_argument("--files", help="The directory containing the conus_reference parquet files") + + args = parser.parse_args() + build_table(catalog_type=args.catalog, file_dir=args.files) diff --git a/tools/iceberg/build_fema_ble.py b/tools/iceberg/build_fema_ble.py new file mode 100644 index 0000000..97cbb9b --- /dev/null +++ b/tools/iceberg/build_fema_ble.py @@ -0,0 +1,48 @@ +import argparse +from pathlib import Path + +from dotenv import load_dotenv +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NamespaceAlreadyExistsError + +from icefabric.builds import build_iceberg_table + +load_dotenv() + + +def build_table(file_dir: str): + """Builds the FEMA BLE XS tables + + Parameters + ---------- + file_dir : str + The directory to hydrofabric parquet files + """ + catalog = load_catalog("glue") # Using an AWS Glue Endpoint + namespace = "ble_xs" + try: + catalog.create_namespace(namespace) + except NamespaceAlreadyExistsError: + print(f"Namespace {namespace} already exists") + for folder in Path(file_dir).glob("ble_*"): + huc_number = folder.name.split("_", 1)[1] + print(f"building HUC XS: {huc_number}") + build_iceberg_table( + catalog=catalog, + parquet_file=f"{file_dir}/ble_{huc_number}/huc_{huc_number}.parquet", + namespace=namespace, + table_name=huc_number, + location=f"s3://edfs-data/icefabric_catalog/{namespace}/{huc_number}", + ) + print(f"Build successful. Files written into metadata store @ {catalog.name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script to build a pyiceberg catalog in the S3 Table for the FEMA-BLE data" + ) + + parser.add_argument("--files", help="The local file dir where the files are located") + + args = parser.parse_args() + build_table(file_dir=args.files) diff --git a/tools/iceberg/build_fema_mip.py b/tools/iceberg/build_fema_mip.py new file mode 100644 index 0000000..226e0ca --- /dev/null +++ b/tools/iceberg/build_fema_mip.py @@ -0,0 +1,48 @@ +import argparse +from pathlib import Path + +from dotenv import load_dotenv +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NamespaceAlreadyExistsError + +from icefabric.builds import build_iceberg_table + +load_dotenv() + + +def build_table(file_dir: str): + """Builds the FEMA MIP XS tables + + Parameters + ---------- + file_dir : str + The directory to hydrofabric parquet files + """ + catalog = load_catalog("glue") # Using an AWS Glue Endpoint + namespace = "mip_xs" + try: + catalog.create_namespace(namespace) + except NamespaceAlreadyExistsError: + print(f"Namespace {namespace} already exists") + for folder in Path(file_dir).glob("mip_*"): + huc_number = folder.name.split("_", 1)[1] + print(f"building HUC XS: {huc_number}") + build_iceberg_table( + catalog=catalog, + parquet_file=f"{file_dir}/mip_{huc_number}/huc_{huc_number}.parquet", + namespace=namespace, + table_name=huc_number, + location=f"s3://edfs-data/icefabric_catalog/{namespace}/{huc_number}", + ) + print(f"Build successful. Files written into metadata store @ {catalog.name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script to build a pyiceberg catalog in the S3 Table for the FEMA-BLE data" + ) + + parser.add_argument("--files", help="The local file dir where the files are located") + + args = parser.parse_args() + build_table(file_dir=args.files) diff --git a/tools/iceberg/build_hydrofabric.py b/tools/iceberg/build_hydrofabric.py new file mode 100644 index 0000000..67bfaeb --- /dev/null +++ b/tools/iceberg/build_hydrofabric.py @@ -0,0 +1,144 @@ +"""A file to create/build the hydrofabric table and snapshots for a specific domain""" + +import argparse +import os +import warnings +from pathlib import Path + +import pyarrow as pa +import pyarrow.parquet as pq +import yaml +from pyiceberg.catalog import load_catalog +from pyiceberg.transforms import IdentityTransform + +from icefabric.helpers import load_creds +from icefabric.schemas import ( + DivideAttributes, + Divides, + FlowpathAttributes, + FlowpathAttributesML, + Flowpaths, + HydrofabricSnapshot, + Hydrolocations, + Lakes, + Network, + Nexus, + POIs, +) + +# Loading credentials, setting path to save outputs +load_creds() +with open(os.environ["PYICEBERG_HOME"]) as f: + CONFIG = yaml.safe_load(f) +WAREHOUSE = Path(CONFIG["catalog"]["sql"]["warehouse"].replace("file://", "")) +WAREHOUSE.mkdir(parents=True, exist_ok=True) + +LOCATION = { + "glue": "s3://edfs-data/icefabric_catalog", + "sql": CONFIG["catalog"]["sql"]["warehouse"], +} + +# Suppress threading cleanup warnings +warnings.filterwarnings("ignore", category=ResourceWarning) + + +def build_hydrofabric(catalog_type: str, file_dir: str, domain: str): + """Builds the hydrofabric Iceberg tables + + Parameters + ---------- + catalog_type : str + the type of catalog. sql is local, glue is production + file_dir : str + where the files are located + domain : str + the HF domain to be built + """ + catalog = load_catalog(catalog_type) + namespace = f"{domain}_hf" + catalog.create_namespace_if_not_exists(namespace) + layers = [ + ("divide-attributes", DivideAttributes), + ("divides", Divides), + ("flowpath-attributes-ml", FlowpathAttributesML), + ("flowpath-attributes", FlowpathAttributes), + ("flowpaths", Flowpaths), + ("hydrolocations", Hydrolocations), + ("lakes", Lakes), + ("network", Network), + ("nexus", Nexus), + ("pois", POIs), + ] + snapshots = {} + snapshots["domain"] = domain + for layer, schema in layers: + print(f"Building layer: {layer}") + try: + table = pq.read_table(f"{file_dir}/{layer}.parquet", schema=schema.arrow_schema()) + except FileNotFoundError: + print(f"Cannot find {layer} in the given file dir {file_dir}") + continue + if catalog.table_exists(f"{namespace}.{layer}"): + print(f"Table {layer} already exists. Skipping build") + current_snapshot = catalog.load_table(f"{namespace}.{layer}").current_snapshot() + snapshots[layer] = current_snapshot.snapshot_id + else: + iceberg_table = catalog.create_table( + f"{namespace}.{layer}", + schema=schema.schema(), + location=f"{LOCATION[catalog_type]}/{namespace.lower()}/{layer}", + ) + partition_spec = iceberg_table.spec() + if len(partition_spec.fields) == 0: + with iceberg_table.update_spec() as update: + update.add_field("vpuid", IdentityTransform(), "vpuid_partition") + iceberg_table.append(table) + current_snapshot = iceberg_table.current_snapshot() + snapshots[layer] = current_snapshot.snapshot_id + + snapshot_namespace = "hydrofabric_snapshots" + snapshot_table = f"{snapshot_namespace}.id" + catalog.create_namespace_if_not_exists(snapshot_namespace) + if catalog.table_exists(snapshot_table): + tbl = catalog.load_table(snapshot_table) + else: + tbl = catalog.create_table( + snapshot_table, + schema=HydrofabricSnapshot.schema(), + location=f"{LOCATION[catalog_type]}/{snapshot_namespace}", + ) + df = pa.Table.from_pylist([snapshots], schema=HydrofabricSnapshot.arrow_schema()) + tbl.append(df) + tbl.manage_snapshots().create_tag(tbl.current_snapshot().snapshot_id, "base").commit() + print(f"Build complete. Files written into metadata store on {catalog.name} @ {namespace}") + print(f"Snapshots written to: {snapshot_namespace}.id") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Build a PyIceberg catalog in the S3 Table for the Hydrofabric" + ) + + parser.add_argument( + "--catalog", + choices=["sql", "glue"], + default="sql", + help="Catalog type to use (default: sql for local build)", + ) + parser.add_argument( + "--files", + type=Path, + required=True, + help="Path to the folder containing Hydrofabric parquet files", + ) + parser.add_argument( + "--domain", + type=str, + required=True, + choices=["conus", "ak", "hi", "prvi", "gl"], + help="The hydrofabric domain to be used for the namespace", + ) + + args = parser.parse_args() + + build_hydrofabric(catalog_type=args.catalog, file_dir=args.files, domain=args.domain) diff --git a/tools/iceberg/build_iceberg_bathymetry_ml_auxiliary.py b/tools/iceberg/build_iceberg_bathymetry_ml_auxiliary.py new file mode 100644 index 0000000..dae4afa --- /dev/null +++ b/tools/iceberg/build_iceberg_bathymetry_ml_auxiliary.py @@ -0,0 +1,75 @@ +import argparse +from pathlib import Path + +from dotenv import load_dotenv +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NamespaceAlreadyExistsError + +from icefabric.builds import build_iceberg_table + +load_dotenv() + + +def build_table(file_dir: Path): + """Builds the bathymetric channel data for the Hydrofabric + + Parameters + ---------- + file_dir : str + The directory to hydrofabric parquet files + """ + catalog = load_catalog("glue") # Using an AWS Glue Endpoint + namespace = "bathymetry_ml_auxiliary" + try: + catalog.create_namespace(namespace) + except NamespaceAlreadyExistsError: + print(f"Namespace {namespace} already exists") + + layers = [ + "vpuid=01", + "vpuid=02", + "vpuid=03N", + "vpuid=03S", + "vpuid=03W", + "vpuid=04", + "vpuid=05", + "vpuid=06", + "vpuid=07", + "vpuid=08", + "vpuid=09", + "vpuid=10L", + "vpuid=10U", + "vpuid=11", + "vpuid=12", + "vpuid=13", + "vpuid=14", + "vpuid=15", + "vpuid=16", + "vpuid=17", + ] + + for layer in layers: + print(f"building layer: {layer}") + # The following warning is expected: + # Iceberg does not have a dictionary type. will be inferred as int32 on read. + # Arrow will make columns with a single, non-unique, value into a dictionary for ease of writing/loading + # Thus, when writing to pyiceberg it needs to remove that. + build_iceberg_table( + catalog=catalog, + parquet_file=f"{file_dir}/{layer}/ml_auxiliary_data.parquet", + namespace=namespace, + table_name=layer, + location=f"s3://edfs-data/icefabric_catalog/{namespace}/{layer}", + ) + print(f"Build successful. Files written into metadata store @ {catalog.name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script to build a pyiceberg catalog for bathymetric data in the S3 Table" + ) + + parser.add_argument("--files", help="The local file dir where the files are located") + + args = parser.parse_args() + build_table(file_dir=args.files) diff --git a/tools/iceberg/build_parameters.py b/tools/iceberg/build_parameters.py new file mode 100644 index 0000000..c56d424 --- /dev/null +++ b/tools/iceberg/build_parameters.py @@ -0,0 +1,55 @@ +import argparse + +from dotenv import load_dotenv +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NamespaceAlreadyExistsError + +from icefabric.builds import build_iceberg_table + +load_dotenv() + + +def build_table(file_dir): + """Builds the divide parameters namespace and tables + + Parameters + ---------- + file_dir : str + The directory to hydrofabric parquet files + """ + parquet_tables = { + "CFE-X_params_Alaska_2.2.parquet": "CFE-X_Alaska", + "CFE-X_params_GL_2.2.parquet": "CFE-X_GL", + "CFE-X_params_Puerto_Rico_2.2.parquet": "CFE-X_Puerto_Rico", + "snow17_params_2.2.parquet": "Snow-17_CONUS", + "CFE-X_params_CONUS_2.2.parquet": "CFE-X_CONUS", + "CFE-X_params_Hawaii_2.2.parquet": "CFE-X_Hawaii", + "sac_sma_params_2.2.parquet": "Sac-SMA_CONUS", + "ueb_params.parquet": "UEB_CONUS", + } + + catalog = load_catalog("glue") # Using an AWS Glue Endpoint + namespace = "divide_parameters" + try: + catalog.create_namespace(namespace) + except NamespaceAlreadyExistsError: + print(f"Namespace {namespace} already exists") + + for file in parquet_tables.keys(): + build_iceberg_table( + catalog=catalog, + parquet_file=f"{file_dir}/{file}", + namespace=namespace, + table_name=parquet_tables[file], + location=f"s3://edfs-data/icefabric_catalog/{namespace}{parquet_tables[file]}", + ) + print(f"Build successful. Files written into metadata store @ {catalog.name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="A script to build a pyiceberg catalog in the S3 Table") + + parser.add_argument("--files", help="The local file dir where the files are located") + + args = parser.parse_args() + build_table(file_dir=args.files) diff --git a/tools/iceberg/build_ras_xs.py b/tools/iceberg/build_ras_xs.py new file mode 100644 index 0000000..7dfc848 --- /dev/null +++ b/tools/iceberg/build_ras_xs.py @@ -0,0 +1,140 @@ +import argparse +import os +from pathlib import Path + +import pyarrow as pa +import pyarrow.parquet as pq +import yaml +from pyiceberg.catalog import load_catalog + +from icefabric.helpers import load_creds +from icefabric.schemas import ConflatedRasXS, RepresentativeRasXS + +load_creds() +with open(os.environ["PYICEBERG_HOME"]) as f: + CONFIG = yaml.safe_load(f) +WAREHOUSE = Path(CONFIG["catalog"]["sql"]["warehouse"].replace("file://", "")) +WAREHOUSE.mkdir(parents=True, exist_ok=True) + +LOCATION = { + "glue": "s3://edfs-data/icefabric_catalog", + "sql": CONFIG["catalog"]["sql"]["warehouse"], +} +NAMESPACE = "ras_xs" + + +def build_table(catalog_type: str, file_path: Path, schema_type: str, overwrite: bool) -> None: + """Build the RAS XS table in a PyIceberg warehouse. + + Parameters + ---------- + catalog : str + The PyIceberg catalog type + file_path : Path + Path to the parquet file to upload to the warehouse + schema_type : str + The schema to validate against. Either representative XS or all conflated XS + + Raises + ------ + FileNotFoundError + If the parquet file doesn't exist + """ + if not file_path.exists(): + raise FileNotFoundError(f"Cannot find file: {file_path}") + + print(f"Processing file: {file_path}") + catalog = load_catalog(catalog_type) + catalog.create_namespace_if_not_exists(NAMESPACE) + + table_identifier = f"{NAMESPACE}.{schema_type}" + if catalog.table_exists(table_identifier): + if not overwrite: + print(f"Table {table_identifier} already exists. Skipping build") + return + else: + print(f"Table {table_identifier} will be overwritten.") + + print("Building XS table") + + if schema_type == "representative": + schema = RepresentativeRasXS.schema() + pa_schema = RepresentativeRasXS.arrow_schema() + elif schema_type == "conflated": + schema = ConflatedRasXS.schema() + pa_schema = ConflatedRasXS.arrow_schema() + else: + raise ValueError("Schema not found for your inputted XS file") + + if not overwrite: + iceberg_table = catalog.create_table( + table_identifier, + schema=schema, + location=LOCATION[catalog_type], + ) + else: + iceberg_table = catalog.load_table(table_identifier) + with iceberg_table.update_schema() as update: + update.union_by_name(schema) + + # Load data and create table + parquet_file = pq.ParquetFile(file_path) + if not overwrite: + for batch in parquet_file.iter_batches(batch_size=500000): + print("Adding batch...") + arrow_table = pa.Table.from_batches([batch]) + arrow_table = arrow_table.cast(pa_schema) + iceberg_table.append(arrow_table) + print("Batch appended to iceberg table...") + else: + first_thru = True + for batch in parquet_file.iter_batches(batch_size=500000): + print("Adding batch...") + arrow_table = pa.Table.from_batches([batch]) + arrow_table = arrow_table.cast(pa_schema) + if first_thru: + print("Overwriting table with first batch...") + iceberg_table.overwrite(arrow_table) + first_thru = False + else: + iceberg_table.append(arrow_table) + print("Batch appended to iceberg table...") + + print(f"Build successful. Files written to metadata store @ {catalog.name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Build a PyIceberg catalog in the S3 Table for FEMA-BLE data" + ) + parser.add_argument( + "--catalog", + choices=["sql", "glue"], + default="sql", + help="Catalog type to use (default: sql for local build)", + ) + parser.add_argument( + "--schema", + type=str, + choices=["representative", "conflated"], + required=True, + help="The schema to validate against. Either representative XS or all conflated XS", + ) + parser.add_argument( + "--file", + type=Path, + required=True, + help="Path to the parquet file containing processed cross sections", + ) + parser.add_argument( + "--overwrite", + type=bool, + required=False, + default=False, + help="Flag to indicate that it is okay to overwrite an existing table", + ) + args = parser.parse_args() + + build_table( + catalog_type=args.catalog, file_path=args.file, schema_type=args.schema, overwrite=args.overwrite + ) diff --git a/tools/iceberg/build_usgs_streamflow_observations.py b/tools/iceberg/build_usgs_streamflow_observations.py new file mode 100644 index 0000000..70c7b11 --- /dev/null +++ b/tools/iceberg/build_usgs_streamflow_observations.py @@ -0,0 +1,42 @@ +import argparse + +from dotenv import load_dotenv +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NamespaceAlreadyExistsError + +from icefabric.builds import build_iceberg_table + +load_dotenv() + + +def build_table(file_dir): + """Builds the streamflow observation namespace and tables + + Parameters + ---------- + file_dir : str + The directory to hydrofabric parquet files + """ + catalog = load_catalog("glue") # Using an AWS Glue Endpoint + namespace = "streamflow_observations" + try: + catalog.create_namespace(namespace) + except NamespaceAlreadyExistsError: + print(f"Namespace {namespace} already exists") + build_iceberg_table( + catalog=catalog, + parquet_file=f"{file_dir}/usgs_hourly.parquet", + namespace=namespace, + table_name="usgs_hourly", + location=f"s3://edfs-data/icefabric_catalog/{namespace}/usgs_hourly", + ) + print(f"Build successful. Files written into metadata store @ {catalog.name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="A script to build a pyiceberg catalog in the S3 Table") + + parser.add_argument("--files", help="The local file dir where the files are located") + + args = parser.parse_args() + build_table(file_dir=args.files) diff --git a/tools/iceberg/export_catalog.py b/tools/iceberg/export_catalog.py new file mode 100644 index 0000000..b3548d5 --- /dev/null +++ b/tools/iceberg/export_catalog.py @@ -0,0 +1,96 @@ +"""A file to export the glue catalog to sqllite""" + +import argparse +import os +from pathlib import Path + +import pyarrow as pa +import yaml +from pyiceberg.catalog import load_catalog +from pyiceberg.exceptions import NamespaceAlreadyExistsError +from pyiceberg.transforms import IdentityTransform +from tqdm import tqdm + +from icefabric.helpers import load_creds +from icefabric.schemas import HydrofabricSnapshot + +load_creds() + + +def export(namespace: str, snapshot: int | None = None): + """Exports the catalog to a local SQL file based on the .pyiceberg.yaml in the project root + + Parameters + ---------- + namespace : str + The namespace to be exported + snapshot : str | None, optional + The snapshot ID to export from, by default None and using the latest + """ + # Creates the local dir for the warehouse if it does not exist + with open(os.environ["PYICEBERG_HOME"]) as f: + config = yaml.safe_load(f) + + warehouse = Path(config["catalog"]["sql"]["warehouse"].replace("file://", "")) + warehouse.mkdir(parents=True, exist_ok=True) + + glue_catalog = load_catalog("glue") + local_catalog = load_catalog("sql") + try: + local_catalog.create_namespace(namespace) + except NamespaceAlreadyExistsError as e: + print("Cannot Export Catalog. Already exists") + raise NamespaceAlreadyExistsError from e + namespace_tables = glue_catalog.list_tables(namespace=namespace) + + # Saving new snapshots for local + snapshots = {} + if "hf" in namespace: + is_hf = True + domain = namespace.split("_")[0] + snapshots["domain"] = domain + else: + is_hf = False + + for _, table in tqdm(namespace_tables, desc=f"Exporting {namespace} tables", total=len(namespace_tables)): + _table = glue_catalog.load_table(f"{namespace}.{table}").scan(snapshot_id=snapshot) + _arrow = _table.to_arrow() + iceberg_table = local_catalog.create_table_if_not_exists( + f"{namespace}.{table}", + schema=_arrow.schema, + ) + if namespace == "conus_hf": + # Partitioning the CONUS HF data + partition_spec = iceberg_table.spec() + if len(partition_spec.fields) == 0: + with iceberg_table.update_spec() as update: + update.add_field("vpuid", IdentityTransform(), "vpuid_partition") + iceberg_table.append(_arrow) + if is_hf: + snapshots[table] = iceberg_table.current_snapshot().snapshot_id + + if is_hf: + local_catalog.create_namespace_if_not_exists("hydrofabric_snapshots") + if local_catalog.table_exists("hydrofabric_snapshots.id"): + tbl = local_catalog.load_table("hydrofabric_snapshots.id") + else: + tbl = local_catalog.create_table( + "hydrofabric_snapshots.id", + schema=HydrofabricSnapshot.schema(), + ) + df = pa.Table.from_pylist([snapshots], schema=HydrofabricSnapshot.arrow_schema()) + tbl.append(df) + print(f"Exported {namespace} into local pyiceberg DB") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script to export the S3 tables catalog based on a namespace and snapshot id. If no snapshot, assuming the latest" + ) + + parser.add_argument("--namespace", help="The namespace repo that is being exported") + parser.add_argument("--snapshot", help="The snapshot ID for the namespace") + + args = parser.parse_args() + export(namespace=args.namespace, snapshot=args.snapshot) + # export(namespace="conus_hf") diff --git a/tools/iceberg/migrate_catalog.py b/tools/iceberg/migrate_catalog.py new file mode 100644 index 0000000..440d20e --- /dev/null +++ b/tools/iceberg/migrate_catalog.py @@ -0,0 +1,114 @@ +from pathlib import Path + +import pyarrow.parquet as pq +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.table import Table + +from icefabric.helpers import load_creds + +load_creds() + + +def migrate_table_in_place( + catalog: Catalog, table_name: str, new_location: str, backup_location: str | None = None +) -> Table: + """A script to migrate/move pyiceberg tables to a new location and to save to disk + + Parameters + ---------- + catalog : Catalog + The pyiceberg catalog + table_name : str + The table name to be moved. Ex: conus_hf.network + new_location : str + the new S3 location in glue for the table data to reside + backup_location : str | None, optional + Where to save a backup parquet file in case the move goes wrong, by default None + + Returns + ------- + Table + The moved pyiceberg table in memory + """ + print(f"WARNING: This will drop and recreate table {table_name}") + print(f"New location will be: {new_location}") + + old_table = catalog.load_table(table_name) + schema = old_table.schema() + partition_spec = old_table.spec() + sort_order = old_table.sort_order() + properties = old_table.properties + + # Export all data + print("Exporting data...") + all_data = old_table.scan().to_arrow() + row_count = len(all_data) + print(f"Exported {row_count} rows") + + # Optional: Save to backup location first + if backup_location: + print(f"Saving backup to {backup_location}") + # You could write to S3 or local file here + Path(backup_location).parent.mkdir(exist_ok=True) + pq.write_table(all_data, backup_location) + + print(f"Dropping table {table_name}") + catalog.drop_table(table_name) + + try: + # Create new table with new location + print(f"Creating new table at {new_location}") + new_table = catalog.create_table( + identifier=table_name, # Same name + schema=schema, + location=new_location, # New location + partition_spec=partition_spec, + sort_order=sort_order, + properties=properties, + ) + # Import the data + print("Importing data to new table...") + + # For Hydrofabric. Partition based on VPU + # if len(partition_spec.fields) == 0: + # with new_table.update_spec() as update: + # update.add_field("vpuid", IdentityTransform(), "vpuid_partition") + new_table.append(all_data) + return new_table + + except Exception as e: + print(f"CRITICAL ERROR during migration: {str(e)}") + print("Table has been dropped but recreation failed!") + print("You may need to restore from backup") + raise + + +if __name__ == "__main__": + catalog = load_catalog("glue") + # namespace = "gl_hf" + # layers = [ + # "divide-attributes", + # "divides", + # "flowpath-attributes-ml", + # "flowpath-attributes", + # "flowpaths", + # "hydrolocations", + # "lakes", + # "network", + # "nexus", + # "pois", + # ] + namespace = "streamflow_observations" + for _, table_name in catalog.list_tables(namespace): + table_to_migrate = f"{namespace}.{table_name}" + new_location = f"s3://edfs-data/icefabric_catalog/{namespace}/{table_name}" + + table = migrate_table_in_place( + catalog=catalog, + table_name=table_to_migrate, + new_location=new_location, + backup_location=f"/tmp/backup/{namespace}/{table_name}.parquet", + ) + + print(f"Successfully migrated {table_name} to {new_location}") + print(f"Sample output: {table.scan().to_pandas().head()}") diff --git a/tools/iceberg/update_hydrofabric.py b/tools/iceberg/update_hydrofabric.py new file mode 100644 index 0000000..a0b6db3 --- /dev/null +++ b/tools/iceberg/update_hydrofabric.py @@ -0,0 +1,150 @@ +"""A file to update the hydrofabric table and snapshots for a specific domain""" + +import argparse +import os +import warnings +from pathlib import Path + +import pyarrow as pa +import pyarrow.parquet as pq +import yaml +from pyiceberg.catalog import load_catalog +from pyiceberg.expressions import EqualTo + +from icefabric.helpers import load_creds +from icefabric.schemas import ( + DivideAttributes, + Divides, + FlowpathAttributes, + FlowpathAttributesML, + Flowpaths, + HydrofabricSnapshot, + Hydrolocations, + Lakes, + Network, + Nexus, + POIs, +) + +# Loading credentials, setting path to save outputs +load_creds() +with open(os.environ["PYICEBERG_HOME"]) as f: + CONFIG = yaml.safe_load(f) + +LOCATION = { + "glue": "s3://edfs-data/icefabric_catalog", + "sql": CONFIG["catalog"]["sql"]["warehouse"], +} + +# Suppress threading cleanup warnings +warnings.filterwarnings("ignore", category=ResourceWarning) + + +def update_hydrofabric(catalog_type: str, layer: str, file: Path, domain: str, tag: str | None = None): + """A script to update the hydrofabric for a specific layer + + Parameters + ---------- + catalog_type : str + specifying either a local (sql) or remote (glue) endpoint + layer : str + The layer of the hydrofabric to update + file : Path + the path to the updated HF parquet file + domain : str + the domain to update + tag : str | None, optional + A tag to place on the updates, by default None + + Raises + ------ + FileNotFoundError + Cannot find inputted parquet file + """ + catalog = load_catalog(catalog_type) + namespace = f"{domain}_hf" + layers = { + "divide-attributes": DivideAttributes, + "divides": Divides, + "flowpath-attributes-ml": FlowpathAttributesML, + "flowpath-attributes": FlowpathAttributes, + "flowpaths": Flowpaths, + "hydrolocations": Hydrolocations, + "lakes": Lakes, + "network": Network, + "nexus": Nexus, + "pois": POIs, + } + snapshots = {} + snapshots["domain"] = domain + schema = layers[layer] + print(f"Updating layer: {layer}") + try: + table = pq.read_table(file, schema=schema.arrow_schema()) + except FileNotFoundError as e: + print("Cannot find input file") + raise FileNotFoundError from e + if catalog.table_exists(f"{namespace}.{layer}"): + iceberg_table = catalog.load_table(f"{namespace}.{layer}") + iceberg_table.overwrite(table) # TODO See issue #81 for support of upsert + current_snapshot = iceberg_table.current_snapshot() + snapshots[layer] = current_snapshot.snapshot_id + print() + else: + print(f"{layer} not found in S3 Tables. Please run `build_hydrofabric.py` First prior to updating") + + snapshot_namespace = "hydrofabric_snapshots" + tbl = catalog.load_table(f"{snapshot_namespace}.id") + df = tbl.scan(row_filter=EqualTo("domain", domain)).to_pandas() + df[layer] = snapshots[layer] + table = pa.Table.from_pandas(df, preserve_index=False, schema=HydrofabricSnapshot.arrow_schema()) + tbl.upsert(table) + if tag is not None: + tbl.manage_snapshots().create_tag(tbl.current_snapshot().snapshot_id, tag).commit() + print(f"Build complete. Files written into metadata store on {catalog.name} @ {namespace}") + print(f"Snapshots written to: {snapshot_namespace}.id") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Update a PyIceberg catalog in the S3 Table for the Hydrofabric" + ) + + parser.add_argument( + "--catalog", + choices=["sql", "glue"], + default="sql", + help="Catalog type to use (default: sql for local build)", + ) + parser.add_argument( + "--layer", + type=str, + required=True, + help="the layer to be updated in S3 Tables", + ) + parser.add_argument( + "--file", + type=Path, + required=True, + help="Path to the file for the updated Hydrofabric parquet file", + ) + parser.add_argument( + "--domain", + type=str, + required=True, + choices=["conus", "ak", "hi", "prvi", "gl"], + help="The hydrofabric domain to be used for the namespace", + ) + parser.add_argument( + "--tag", + type=str, + required=False, + help="A tag to add to the snapshot table update", + ) + + args = parser.parse_args() + + update_hydrofabric( + catalog_type=args.catalog, layer=args.layer, file=args.file, domain=args.domain, tag=args.tag + ) + # update_hydrofabric(catalog_type="sql", layer="network", file="/Users/taddbindas/projects/NGWPC/icefabric/data/hydrofabric/update_tmp/network.parquet", domain="conus") diff --git a/tools/icechunk/build_landcover_stores.py b/tools/icechunk/build_landcover_stores.py new file mode 100644 index 0000000..04e3137 --- /dev/null +++ b/tools/icechunk/build_landcover_stores.py @@ -0,0 +1,92 @@ +"""A file to build the landcover stores using Icechunk""" + +import argparse +from pathlib import Path + +import icechunk as ic +import xarray as xr +from tqdm import tqdm +from virtualizarr import open_virtual_dataset + +from icefabric.helpers import ( + add_time_dim_to_datasets, + extract_dates_from_archival_files, +) + + +def build_landcover_store(virtual_files: str, output_path) -> None: + """Creates a landcover store based on the NLCD data + + Parameters + ---------- + virtual_files : str + The path to where the virtual files live + output_path : _type_ + _description_ + """ + abs_path = Path(virtual_files).resolve() + + # Create virtual chunk container + store_config = ic.local_filesystem_store(str(abs_path)) + container = ic.VirtualChunkContainer(f"file://{abs_path}", store_config) + + # Set up credentials + credentials = ic.containers_credentials({f"file://{abs_path}": None}) + + # Create config and add container + config = ic.RepositoryConfig.default() + config.set_virtual_chunk_container(container) + + # Create storage for the repo + storage = ic.local_filesystem_storage(str(Path(output_path).resolve())) + + # Create/open repository with correct class name + nlcd_repo = ic.Repository.open_or_create( + storage=storage, + config=config, + authorize_virtual_chunk_access=credentials, + ) + + # Get Files + files = sorted([str(f) for f in abs_path.glob("*")]) + datetimes = extract_dates_from_archival_files(files, "Annual_NLCD_LndCov_*_CU_C1V0.tif", just_year=True) + + # Virtualize Data + datasets = [] + for i in tqdm( + range(len(files)), + desc="Opening files as Virtual Datasets", + unit="files", + ncols=125, + colour="#37B6BD", + ): + datasets.append( + open_virtual_dataset( + filepath=files[i], + indexes={}, + ) + ) + time_added_datasets = add_time_dim_to_datasets(datasets, datetimes, just_year=True) + ds = xr.concat( + time_added_datasets, dim="year", coords="minimal", compat="override", combine_attrs="override" + ) + + # Write to icechunk + session = nlcd_repo.writable_session("main") + store = session.store # A zarr store + ds.virtualize.to_icechunk(store) + _ = session.commit("Initial Commit: Building landcover store") + + print("Successfully wrote to icechunk") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="A script to build a pyiceberg catalog in the S3 Table") + + parser.add_argument( + "virtual_files", type=str, help="The Path to the files we're virtualizing into icechunk" + ) + parser.add_argument("output_path", type=str, help="The Path to where the repo should be created") + + args = parser.parse_args() + build_landcover_store(args.virtual_files, args.output_path) diff --git a/tools/icechunk/convert_to_tif.py b/tools/icechunk/convert_to_tif.py new file mode 100644 index 0000000..1f97407 --- /dev/null +++ b/tools/icechunk/convert_to_tif.py @@ -0,0 +1,17 @@ +"""A function to convert the topobathy data into a tif file""" + +import os + +from icefabric.helpers import convert_topobathy_to_tiff, load_creds +from icefabric.schemas import NGWPCLocations + +load_creds() + +if __name__ == "__main__": + output_dir = "./temp_tb_data" + os.makedirs(output_dir, exist_ok=True) + + # all 30 m topobathy layers + ic_rasters = [f for f in NGWPCLocations._member_names_ if "TOPO" and "30M" in f] + + convert_topobathy_to_tiff(output_dir, ic_rasters) diff --git a/tools/icechunk/get_envca_cadwr_txdot_gages_from_s3.py b/tools/icechunk/get_envca_cadwr_txdot_gages_from_s3.py new file mode 100644 index 0000000..8f0fe37 --- /dev/null +++ b/tools/icechunk/get_envca_cadwr_txdot_gages_from_s3.py @@ -0,0 +1,84 @@ +import os +import shutil +import subprocess + +import pandas as pd + +GAGE_TYPE_CORRELATION = {"ENVCA": "ENVCA", "CADWR": "CADWR", "USGS": "TXDOT"} + +if __name__ == "__main__": + # This path is in the NGWPC AWS Data account + conus_root_path = "s3://ngwpc-hydrofabric/2.1/CONUS" + gage_collection_dir = "data/gage_data/" + hourly_discharge_final_collection = "data/gage_csv_files" + if not os.path.exists(gage_collection_dir): + os.makedirs(gage_collection_dir) + print("Downloading ENVCA/CADWR/TXDOT gages...") + with open("data/envca_cadwr_txdot_gages.txt") as file: + for line in file: + gage_id = line.strip().split()[0] + gage_type = line.strip().split()[1] + print(f"Collecting: {gage_id} ({gage_type})") + s3_dir_path = f"2.1/CONUS/{gage_id}/*" + command = [ + "aws", + "s3", + "cp", + f"{conus_root_path}/{gage_id}", + gage_collection_dir, + "--recursive", + ] + print(" ".join(map(str, command))) + subprocess.call(command) + else: + print("Skipping data retrieval...") + if not os.path.exists(hourly_discharge_final_collection): + os.makedirs(hourly_discharge_final_collection) + print( + "Collecting list of hourly discharge CSV files - only collecting the latest if there are multiples" + ) + gage_file_dict = {} + for root, dirs, files in os.walk(gage_collection_dir): + dirs.sort() + for file in files: + gage_type_fp_portion = root.split("/")[-2] + gage_type = GAGE_TYPE_CORRELATION[gage_type_fp_portion] + if file in gage_file_dict: + print(f"Found more recent gage collection file: {os.path.join(root, file)}") + gage_file_dict.pop(file) + gage_file_dict[file] = (os.path.join(root, file), gage_type) + + for file, (fp, g_type) in gage_file_dict.items(): + new_file_name = f"{g_type}_{file}" + shutil.copy(fp, os.path.join(hourly_discharge_final_collection, new_file_name)) + else: + print("Skipping data consolidation...") + if not os.path.exists("envca_cadwr_txdot.zarr"): + csv_files = [file for file in os.listdir(hourly_discharge_final_collection) if file.endswith(".csv")] + + # Read and concatenate all CSV files into a single DataFrame + dataframes = [] + for file in csv_files: + gage_type, gage_id = file.split("_")[0], file.split("_")[1] + df = pd.read_csv(os.path.join(hourly_discharge_final_collection, file), header=0).assign( + id=gage_id, gage_type=gage_type + ) + dataframes.append(df) + combined_df = pd.concat(dataframes, ignore_index=True, axis=0) + combined_df["dateTime"] = pd.to_datetime( + combined_df["dateTime"], format="mixed", utc=True + ).dt.tz_localize(None) + combined_df.rename(columns={"dateTime": "time"}, inplace=True) + combined_df.set_index(["id", "time"], inplace=True) + + print("Converting to xarray dataset...") + dataset = combined_df.to_xarray() + dataset.coords["id"] = dataset.coords["id"].astype(str) + dataset["gage_type"] = dataset["gage_type"].astype(str) + + print("Saving to zarr store...") + dataset.to_zarr("envca_cadwr_txdot.zarr", mode="w") + + print("CSV files have been successfully combined into a Zarr store!") + else: + print("Zarr store already exists - skipping CSV consolidation/conversion...") diff --git a/tools/icechunk/icechunk_s3_migration.py b/tools/icechunk/icechunk_s3_migration.py new file mode 100644 index 0000000..456231a --- /dev/null +++ b/tools/icechunk/icechunk_s3_migration.py @@ -0,0 +1,72 @@ +import argparse +import os + +from dotenv import load_dotenv + +from icefabric.schemas import NGWPCLocations + +from .icechunk_testing import topo_push_test + +load_dotenv() + + +def migrate_all_icechunks(attr_name: str) -> None: + """Converts & migrates TIFFs from local to cloud. + + Parameters + ---------- + attr_name: str + Attribute name of interest for a given TIFF. + """ + # Extract list of TIFFs from local + local_list = [] + for _, _, files in os.walk("./"): + for file in files: + if file.endswith(".tiff"): + local_list.append(file.lower()) + + # Maps the S3 paths to TIFFs + topo_fn2s3_map = {} + for fn in local_list: + if "hawaii_10m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_HA_10M_IC.path] = "tbdem_hawaii_10m.tiff" + elif "hawaii_30m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_HA_30M_IC.path] = "tbdem_hawaii_30m.tiff" + elif "conus_atlantic_gulf_30m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_CONUS_ATL_GULF_30M_IC.path] = ( + "tbdem_conus_atlantic_gulf_30m.tiff" + ) + elif "conus_pacific_30m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_CONUS_PAC_30M_IC.path] = "tbdem_conus_pacific_30m.tiff" + elif "pr_usvi_30m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_PR_USVI_30M_IC.path] = "tbdem_pr_usvi_30m.tiff" + elif "pr_usvi_10m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_PR_USVI_10M_IC.path] = "tbdem_pr_usvi_10m.tiff" + elif "alaska_10m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_AK_10M_IC.path] = "tbdem_alaska_10m.tiff" + elif "alaska_30m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_AK_30M_IC.path] = "tbdem_alaska_30m.tiff" + elif "great_lakes_30m" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_GREAT_LAKES_30M_IC.path] = "tbdem_great_lakes_30m.tiff" + elif "albemarle_sound_nos_ncei" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_ALBEMARLE_SOUND_IC.path] = "Albemarle_Sound_NOS_NCEI.tiff" + elif "chesapeake_bay_nos_ncei" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_CHESAPEAKE_BAY_IC.path] = "Chesapeake_Bay_NOS_NCEI.tiff" + elif "mobile_bay_nos_ncei" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_MOBILE_BAY_IC.path] = "Mobile_Bay_NOS_NCEI.tiff" + elif "tangier_sound_nos_ncei-002" in fn: + topo_fn2s3_map[NGWPCLocations.TOPO_TANGIER_SOUND_IC.path] = "Tangier_Sound_NOS_NCEI-002.tiff" + + # Migration of all captured TIFFs to cloud. + for s3_path, fn in topo_fn2s3_map.items(): + if fn: + topo_push_test(tiff_fp=f"./{fn}", attr_name=attr_name, new_ic_repo=s3_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="A script to write an icechunk store") + + parser.add_argument("--attr", help="The attribute that is to be built to icechunk") + + args = parser.parse_args() + migrate_all_icechunks(attr_name=args.attr) diff --git a/tools/icechunk/icechunk_testing.py b/tools/icechunk/icechunk_testing.py new file mode 100644 index 0000000..690a526 --- /dev/null +++ b/tools/icechunk/icechunk_testing.py @@ -0,0 +1,180 @@ +""" +Icechunk examples + +General example/testing code for Icechunk functionality. Covers archival weather +file virtualization/concatenation & uploading/appending/retrieving data to/from repos. +""" + +from dotenv import load_dotenv + +import icefabric.helpers.nc_conv_utils as ncc_utils +from icefabric.builds import IcechunkRepo, S3Path +from icefabric.helpers import ( + get_archival_weather_files, + load_tiff_file, + virtualize_and_concat_archival_files_on_time, +) +from icefabric.schemas import FileType, NGWPCLocations + +ICECHUNK_STORES = [loc for loc in NGWPCLocations if "_IC" in loc.name] +load_dotenv() + + +def output_icechunk_stores(): + """Print out each icechunk store""" + for loc in ICECHUNK_STORES: + print(f"{loc.name}\n {loc.path}") + + +def snodas_virtualize_push_test( + new_ic_repo: S3Path, test_quant: int | None = None, clean_up: bool | None = False +): + """ + Push a collection of SNODAS NETCDFs + + Take the collection of SNODAS NETCDF files, virtualize and concatenate + them, and create a new IC repo to store the combined dataset. + + Specify clean-up to delete the icechunk repo after creation. + + NOTE: Take care to not overwrite an existing IC repo + """ + snodas_repo = IcechunkRepo( + location=new_ic_repo, virtual_chunk_mapping=[{"bucket": "ngwpc-forcing", "region": "us-east-1"}] + ) + nc_virt_ds = virtualize_and_concat_archival_files_on_time( + location=NGWPCLocations.SNODAS_REF.path, + file_date_pattern="zz_ssmv11034tS__T0001TTNATS*05HP001.nc", + file_type=FileType.NETCDF, + loadable_vars=["crs"], + testing_file_quantity=test_quant, + ) + snodas_repo.write_dataset(ds=nc_virt_ds, virtualized=True, commit="first commit") + print(snodas_repo.retrieve_dataset()) + if clean_up: + snodas_repo.delete_repo(quiet=True) + + +def snodas_yearly_virt_append_test( + new_ic_repo: S3Path, years: range, test_quant: int | None = None, clean_up: bool | None = False +): + """ + Incrementally push a by-year-collection of SNODAS NETCDFs + + Take the collection of SNODAS NETCDF files and virtualize/concatenate + them on a yearly basis. The year range is supplied. The data is virtualized + and concatted individually by year, and each year is appended to the IC repo. + + Specify clean-up to delete the icechunk repo after creation. + + NOTE: Take care to not overwrite an existing IC repo + """ + snodas_repo = IcechunkRepo( + location=new_ic_repo, virtual_chunk_mapping=[{"bucket": "ngwpc-forcing", "region": "us-east-1"}] + ) + for y in years: + nc_virt_ds = virtualize_and_concat_archival_files_on_time( + location=NGWPCLocations.SNODAS_REF.path, + file_date_pattern="zz_ssmv11034tS__T0001TTNATS*05HP001.nc", + manual_file_pattern=f"zz_ssmv11034tS__T0001TTNATS{y}*.nc", + file_type=FileType.NETCDF, + loadable_vars=["crs"], + testing_file_quantity=test_quant, + ) + if y == min(years): + snodas_repo.write_dataset(ds=nc_virt_ds, virtualized=True, commit="first commit") + else: + snodas_repo.append_virt_data_to_store( + vds=nc_virt_ds, append_dim="time", commit=f"appended new data from the year {y}" + ) + del nc_virt_ds + print(snodas_repo.retrieve_dataset()) + if clean_up: + snodas_repo.delete_repo(quiet=True) + + +def land_cover_virtualize_push_test( + new_ic_repo: S3Path, test_quant: int | None = None, clean_up: bool | None = False +): + """ + Push a collection of NLCD GEOTIFFs + + Take the collection of NLCD GEOTIFF files, virtualize and concatenate + them, and create a new IC repo to store the combined dataset. + + Specify clean-up to delete the icechunk repo after creation. + + NOTE: Take care to not overwrite an existing IC repo + """ + nlcd_repo = IcechunkRepo( + location=new_ic_repo, virtual_chunk_mapping=[{"bucket": "ngwpc-hydrofabric", "region": "us-east-1"}] + ) + nlcd_vrt_ds = virtualize_and_concat_archival_files_on_time( + location=NGWPCLocations.NLCD_REF.path, + file_date_pattern="Annual_NLCD_LndCov_*_CU_C1V0.tif", + file_type=FileType.GEOTIFF, + just_year=True, + testing_file_quantity=test_quant, + ) + nlcd_repo.write_dataset(ds=nlcd_vrt_ds, virtualized=True, commit="first commit") + print(nlcd_repo.retrieve_dataset()) + if clean_up: + nlcd_repo.delete_repo(quiet=True) + + +def topo_push_test(tiff_fp: str, attr_name: str, new_ic_repo: S3Path, clean_up: bool | None = False): + """ + Push a topobathy GEOTIFF + + Take a topobathy GEOTIFF file and create a new IC repo + containing that file's contents. 'repo_dir' specifies + the IC repo name under the base topobathy S3 path + + Specify clean-up to delete the icechunk repo after creation. + + NOTE: Take care to not overwrite an existing IC repo + """ + topo_repo = IcechunkRepo(location=new_ic_repo) + topo_ds = load_tiff_file(tiff_fp, attr_name) + topo_repo.write_dataset(ds=topo_ds, commit="first commit") + print(topo_repo.retrieve_dataset()) + if clean_up: + topo_repo.delete_repo(quiet=True) + + +def get_nc_by_year(years: range): + """Return all SNODAS reference files for a given year range""" + files = [] + for y in years: + print(y) + files += get_archival_weather_files( + loc=NGWPCLocations.SNODAS_REF.path, + file_type=FileType.NETCDF, + manual_file_pattern=f"zz_ssmv11034tS__T0001TTNATS{y}*.nc", + ) + return files + + +def conv_nc_by_year(year: str, test_quant: int | None = None): + """Convert original NETCDF3 SNODAS files into NETCDF4.""" + ncc_utils.convert_nc_files_from_s3( + orig=NGWPCLocations.SNODAS_V3.path, + dest=NGWPCLocations.SNODAS_REF.path, + manual_file_pattern=f"zz_ssmv11034tS__T0001TTNATS{year}*.nc", + testing_file_quantity=test_quant, + ) + + +if __name__ == "__main__": + snodas_virtualize_push_test( + new_ic_repo=S3Path("hydrofabric-data", "ic_testing/snodas_test"), test_quant=5, clean_up=True + ) + snodas_yearly_virt_append_test( + new_ic_repo=S3Path("hydrofabric-data", "ic_testing/snodas_yearly_append_test"), + years=range(2012, 2016), + test_quant=3, + clean_up=True, + ) + land_cover_virtualize_push_test( + new_ic_repo=S3Path("hydrofabric-data", "ic_testing/nlcd_test"), test_quant=5, clean_up=True + ) diff --git a/tools/icechunk/push_or_pull_envca_cadwr_txdot_icechunk.py b/tools/icechunk/push_or_pull_envca_cadwr_txdot_icechunk.py new file mode 100644 index 0000000..f5d2a09 --- /dev/null +++ b/tools/icechunk/push_or_pull_envca_cadwr_txdot_icechunk.py @@ -0,0 +1,26 @@ +import icechunk +import xarray as xr +from dotenv import load_dotenv +from icechunk.xarray import to_icechunk + +load_dotenv() + +bucket = "edfs-data" +prefix = "streamflow_observations/usgs_observations" +storage_config = icechunk.s3_storage(bucket=bucket, prefix=prefix, region="us-east-1", from_env=True) +try: + repo = icechunk.Repository.create(storage_config) + session = repo.writable_session("main") + ds = xr.open_zarr("usgs.zarr") + to_icechunk(ds, session) + snapshot = session.commit("Uploaded all USGS gages to the store") + print(f"All data is uploaded. Commit: {snapshot}") +except icechunk.IcechunkError as e: + if "repositories can only be created in clean prefixes" in e.message: + print("usgs_observations icechunk store already exists. Pulling it down now...") + repo = icechunk.Repository.open(storage_config) + session = repo.writable_session("main") + ds = xr.open_zarr(session.store, consolidated=False) + print(ds) + else: + print(f"Unexpected Icechunk error: {e}") diff --git a/tools/ipes/get_module_parameters.py b/tools/ipes/get_module_parameters.py new file mode 100644 index 0000000..05ac679 --- /dev/null +++ b/tools/ipes/get_module_parameters.py @@ -0,0 +1,52 @@ +"""A sample script to generate CFE IPEs""" + +import argparse + +import geopandas as gpd +import pandas as pd +from dotenv import load_dotenv + +# param_file = "../src/icefabric_api/data/cfe_params.csv" +# gpkg_file = "../src/icefabric_tools/test/data/gages-08070000.gpkg" +load_dotenv() + + +def create_module_params(param_file: str, gpkg_file: str) -> None: + """Creates module initial parameter estimates + + Parameters + ---------- + param_file : str + the initial parameters file + gpkg_file : str + the hydrofabric gpkg file + """ + divides = gpd.read_file(gpkg_file, layer="divides") + divides = divides["divide_id"].to_list() + + module_params = pd.read_csv(param_file) + param_values = module_params[["name", "default_value"]] + + for divide in divides: + cfg_file = f"{divide}_bmi_cfg_cfe.txt" + f = open(cfg_file, "x") + + for _, row in param_values.iterrows(): + key = row["name"] + value = row["default_value"] + f.write(f"{key}={value}\n") + + f.close() + + params_calibratable = module_params.loc[module_params["calibratable"] == "TRUE"] + params_calibratable.to_json("out.json", orient="split") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="A script to make initial parameter estimates") + + parser.add_argument("--params", help="The initial parameters file") + parser.add_argument("--gpkg", help="The hydrofabric gpkg file") + + args = parser.parse_args() + create_module_params(param_file=args.params, gpkg_file=args.gpkg) diff --git a/tools/ras_xs/R/fema_xs_processing.R b/tools/ras_xs/R/fema_xs_processing.R new file mode 100644 index 0000000..d8cf823 --- /dev/null +++ b/tools/ras_xs/R/fema_xs_processing.R @@ -0,0 +1,215 @@ +#!/usr/bin/env Rscript + +# FEMA BLE and MIP Cross-Section Processing Script - ALL SUBMODELS +# Author: Lynker-Spatial, Raytheon +# Converts FEMA flood model cross-sections to representative channel geometry features + +# Define path to FEMA BLE data and output directory +fema <- 'mip_full_collection/' +ref_path <- "sc_reference_fabric.gpkg" +outdir <- "riverML5" +dir.create(outdir, showWarnings = FALSE) + +library(sf) +library(dplyr) +library(glue) +library(DescTools) +library(tidyr) +library(data.table) +library(purrr) +library(nhdplusTools) + +clean_elev <- function(elev_vec, threshold = 100) { +for (i in which(elev_vec > threshold)) { +if (i > 1 && i < length(elev_vec)) { +elev_vec[i] <- mean(c(elev_vec[i - 1], elev_vec[i + 1]), na.rm = TRUE) +} else if (i == 1) { +elev_vec[i] <- elev_vec[i + 1] +} else if (i == length(elev_vec)) { +elev_vec[i] <- elev_vec[i - 1] +} +} +elev_vec +} + +# Helper function to compute channel area (CA) from a transect +.findCA <- function(df, depth) { + Y <- NULL + t <- filter(df, Y <= depth) + + suppressWarnings({ + x <- pmax( + 0, + DescTools::AUC(x = t$x, y = rep(depth, nrow(t)), absolutearea = FALSE) - + DescTools::AUC(x = t$x, y = t$Y, absolutearea = FALSE) + ) + }) + + ifelse(is.na(x), 0, x) +} + +# Get all BLE subdirectories excluding previous riverML runs +ble <- list.dirs(fema, recursive = FALSE) +ble <- ble[!grepl('riverML', ble)] +subs = list() +for(b in 1:length(ble)) { + dir <- grep('submodels', list.dirs(ble[b], recursive = FALSE), value = TRUE) + # Find all GPKG submodel files and extract metadata + subs[[b]] <- length(list.files(dir, recursive = TRUE, pattern = ".gpkg$", full.names = TRUE)) + message(b) +} + +# Loop through each BLE HUC directory +for (b in 1:length(ble)) { + + dir <- grep('submodels', list.dirs(ble[b], recursive = FALSE), value = TRUE) + + # Find all GPKG submodel files and extract metadata + subs <- list.files(dir, recursive = TRUE, pattern = ".gpkg$", full.names = TRUE) |> + as.data.frame() |> + setNames("file") |> + mutate( + reach = gsub('.*/', '', file), + reach = gsub('.gpkg', '', reach), + name = gsub('/submodels', "", gsub(fema, "", dir)) + ) + + outdir_here <- glue::glue("{outdir}/{subs$name[1]}.gpkg") + + if (file.exists(outdir_here)) { + message("\tAlready processed ", basename(subs$name[1]), " - skipping") + } else { + message("Processing ", basename(ble[b]), " (", b ," in ", length(ble), ")") + + subs_data <- list() + + for (v in 1:nrow(subs)) { + message("\tProcessing ", basename(subs$file[v]), " (", v ," in ", nrow(subs), ")") + + transects <- read_sf(subs$file[v], 'XS') |> + st_transform(5070) + + ll <- list() + + for (j in 1:nrow(transects)) { + # Clean and parse station-elevation point strings + cleaned <- gsub("\\[|\\]|\\(|\\)", "", transects$station_elevation_points[j]) + cleaned <- strsplit(cleaned, ", ")[[1]] + + df <- as.data.frame(matrix(as.numeric(cleaned), ncol = 2, byrow = TRUE)) + names(df) <- c("x", "Y") + + # Parse left and right bank station locations + pins <- transects$bank_stations[j] %>% + gsub("\\[|\\]|\\'", "", .) |> + strsplit(",\\s*") |> + unlist() |> + as.numeric() + + # Subset and clean elevation data + result <- dplyr::filter(df, dplyr::between(x, pins[1], pins[2])) + result$Y <- clean_elev(result$Y) + + if (nrow(result) <= 2 | diff(range(result$Y)) < .25) { + warning("No channel in transect ", j, " for ", basename(subs$file[v])) + } else { + result$Ym <- max(result$Y) - min(result$Y) + result$TW <- max(result$x) - min(result$x) + result$flowpath_id <- subs$reach[v] + result$river_station <- transects$river_station[j] + result$model = subs$file[v] + result$A <- .findCA(result, max(result$Y)) + result$r <- result$A / ((result$Ym * result$TW) - result$A) + result$domain <- subs$name[v] + + ll[[j]] <- dplyr::distinct(dplyr::select(result, -x, -Y)) |> + slice(1) |> + left_join( + select(transects[j,], + river_station, river_reach_rs, + source_river, source_reach, source_river_station, station_elevation_points, bank_stations ), + by = c('river_station') + ) |> + st_as_sf() + } + } + + df = tryCatch({read_sf(subs$file[v], 'metadata') |> filter(key == "units")}, + error = function(e) { + data.frame(value = NA) + }) + + tmp = df |> + mutate(flowpath_id = subs$reach[v], + epsg = st_crs(read_sf(subs$file[v], 'XS'))$epsg, + crs_units = st_crs(read_sf(subs$file[v], 'XS'))$units) |> + select(flowpath_id, metdata_units = value, epsg, crs_units) + + tmp2 = bind_rows(ll) + + if(nrow(tmp2) > 0 & nrow(tmp) > 0) { + subs_data[[v]] <- left_join(tmp2, tmp, by = "flowpath_id") + } else { + subs_data[[v]] <- NULL + } + } + + huc_xs <- data.table::rbindlist(subs_data) |> + tibble() + + if (nrow(huc_xs) == 0) { + warning("No channels in submodel ", v, " for ", subs$reach[v]) + } else { + huc_xs <- st_as_sf(huc_xs) + + # Compute representative XS features per flowpath + representive_features <- huc_xs |> + tidyr::drop_na(flowpath_id) |> + dplyr::group_by(flowpath_id) |> + arrange(river_station) |> + dplyr::summarise( + r = mean(r[is.finite(r)]), + TW = mean(TW), + Y = mean(Ym), + geom = geom[ceiling(n()/2)], + source_river_station = source_river_station[ceiling(n()/2)], + river_station = river_station[ceiling(n()/2)], + model = model[ceiling(n()/2)], + ) + + # Write output layers + write_sf(huc_xs, outdir_here, layer = "XS") + write_sf(representive_features, outdir_here, layer = "representative_xs") + + read_sf( + ref_path, "reference_flowpaths", + wkt_filter = st_as_text(st_as_sfc(st_bbox(st_union(huc_xs)))) + ) |> + write_sf(outdir_here, layer = "reference_fabric") + } + } +} + +# Load and export final dataset for representative XS +xs <- purrr::map(list.files(outdir, full.names = TRUE), + ~read_sf(.x, 'representative_xs')) + +y <- nhdplusTools::get_vaa(c('ftype', 'streamorde')) |> + mutate(comid = as.character(comid)) + +out_xs <- bind_rows(xs) |> + left_join(y, by = c('flowpath_id' = 'comid')) + +write_sf(out_xs, "riverML_ripple_beta_representative.gpkg") + +# Load and export final dataset for all XS +xs <- purrr::map(list.files(outdir, full.names = TRUE), + ~read_sf(.x, 'XS')) + +y <- nhdplusTools::get_vaa(c('ftype', 'streamorde')) |> + mutate(comid = as.character(comid)) + +out_xs <- bind_rows(xs) |> + left_join(y, by = c('flowpath_id' = 'comid')) + +write_sf(out_xs, "riverML_ripple_beta_all_xs.gpkg") diff --git a/tools/ras_xs/gpkg_to_parquet.py b/tools/ras_xs/gpkg_to_parquet.py new file mode 100644 index 0000000..3ec3cb8 --- /dev/null +++ b/tools/ras_xs/gpkg_to_parquet.py @@ -0,0 +1,79 @@ +"""A simple script to convert the v2.2 hydrofabric to parquet""" + +import argparse +from pathlib import Path + +import geopandas as gpd +import pyarrow as pa +from pyarrow import parquet as pq + +from icefabric.schemas import ConflatedRasXS, RepresentativeRasXS + + +def gpkg_to_parquet(input_file: Path, output_folder: Path, schema: str) -> None: + """Convert geopackage to parquet file. + + Parameters + ---------- + input_file : Path + Path to the geopackage file to convert + output_folder : Path + Directory where the parquet file will be saved + schema: str + The schema to validate against. Either representative XS or all conflated XS + + Raises + ------ + FileNotFoundError + If the input file doesn't exist + """ + if not input_file.exists(): + raise FileNotFoundError(f"Input file not found: {input_file}") + + print(f"Converting {input_file} to parquet") + + output_folder.mkdir(parents=True, exist_ok=True) + + gdf = gpd.read_file(input_file) + gdf = gdf.drop_duplicates() # drop duplicates + + # NOTE there will be an warning as we're overriding the geometry. This is fine for now + gdf["geometry"] = gdf["geometry"].to_wkb() + + # Create PyArrow table with schema validation + if schema == "representative": + table = pa.Table.from_pandas( + gdf[RepresentativeRasXS.columns()], schema=RepresentativeRasXS.arrow_schema() + ) + elif schema == "conflated": + table = pa.Table.from_pandas(gdf[ConflatedRasXS.columns()], schema=ConflatedRasXS.arrow_schema()) + else: + raise ValueError("Schema not found for your inputted XS file") + + # Write parquet file + output_path = output_folder / f"{input_file.stem}.parquet" + pq.write_table(table, output_path) + + print(f"Successfully converted to {output_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert geopackage files to parquet format") + + parser.add_argument("--gpkg", type=Path, required=True, help="Path to the geopackage file to convert") + parser.add_argument( + "--schema", + type=str, + choices=["representative", "conflated"], + required=True, + help="The schema to validate against. Either representative XS or all conflated XS", + ) + parser.add_argument( + "--output-folder", + type=Path, + default=Path.cwd(), + help="Output directory for parquet file (default is cwd)", + ) + + args = parser.parse_args() + gpkg_to_parquet(input_file=args.gpkg, output_folder=args.output_folder, schema=args.schema) diff --git a/tools/troute/usbr_data_retrieval.py b/tools/troute/usbr_data_retrieval.py new file mode 100644 index 0000000..1130e74 --- /dev/null +++ b/tools/troute/usbr_data_retrieval.py @@ -0,0 +1,315 @@ +"""A simple script to convert the v2.2 hydrofabric to parquet""" + +import argparse +from datetime import datetime +from io import StringIO +from pathlib import Path +from typing import Any + +import httpx +import pandas as pd +import xarray as xr + +from icefabric.helpers import EXT_RISE_BASE_URL, RISE_HEADERS, make_sync_get_req_to_rise + + +def valid_date(s: str) -> datetime: + """Validates a string input into a datetime + + Parameters + ---------- + s : str + the str datetime + + Returns + ------- + datetime + the output datetime in the correct format + + Raises + ------ + ValueError + Not a valid date + """ + try: + return datetime.strptime(s, "%Y-%m-%d") + except ValueError as e: + msg = f"Not a valid date: '{s}'. Expected format: YYYY-MM-DD." + raise ValueError(msg) from e + + +""" +USGS: +---- +>>> xr.open_dataset("/Users/taddbindas/projects/NGWPC/icefabric/data/2023-04-01_00_00_00.15min.usgsTimeSlice.ncdf") + Size: 3kB +Dimensions: (stationIdInd: 57) +Dimensions without coordinates: stationIdInd +Data variables: + stationId (stationIdInd) |S15 855B ... + time (stationIdInd) |S19 1kB ... + discharge (stationIdInd) float32 228B ... + discharge_quality (stationIdInd) int16 114B ... + queryTime (stationIdInd) datetime64[ns] 456B ... +Attributes: + fileUpdateTimeUTC: 2023-04-01_04:54:15 + sliceCenterTimeUTC: 2023-04-01_00:00:00 + sliceTimeResolutionMinutes: 15 + +USACE +----- +>>> xr.open_dataset("/Users/taddbindas/projects/NGWPC/icefabric/data/2023-04-01_00_00_00.15min.usaceTimeSlice.ncdf", decode_times=False) + Size: 0B +Dimensions: (stationIdInd: 0) +Dimensions without coordinates: stationIdInd +Data variables: + stationId (stationIdInd) |S15 0B ... + time (stationIdInd) |S19 0B ... + discharge (stationIdInd) float32 0B ... + discharge_quality (stationIdInd) int16 0B ... + queryTime (stationIdInd) int32 0B ... +Attributes: + fileUpdateTimeUTC: 2023-04-01_04:55:00 + sliceCenterTimeUTC: 2023-04-01_00:00:00 + sliceTimeResolutionMinutes: 15 +""" + + +def write_ds(df: pd.DataFrame, params: dict[str, Any], location_id: str, info: str, output_folder: Path): + """Writes the newly obtained USBR data to disk in the T-Route specified format""" + timestep = df["Timestep"].values[0] + if timestep == "daily": + df["Datetime (UTC)"] = pd.to_datetime(df["Datetime (UTC)"]) + df_indexed = df.set_index("Datetime (UTC)") + + # Creates an extended hourly range from 00:00 of first day to 00:00 of day after last day + start_date = df_indexed.index.min().normalize() # gets YYYY-MM-DD 00:00:00 + end_date = df_indexed.index.max().normalize() + pd.Timedelta(days=1) # gets YYYY-MM-DD 00:00:00 + interpolated_index = pd.date_range(start=start_date, end=end_date, freq="15min")[ + :-1 + ] # Remove the final timestamp to end at 23:00 + + # Reindex with nearest interpolation + df_extended = df_indexed.reindex(interpolated_index, method="nearest") + df = df_extended.reset_index() + df = df.rename(columns={"Datetime (UTC)": "Datetime (UTC)"}) # Ensure column name is preserved + + # Convert hourly to 15-minute intervals + df["index"] = pd.to_datetime(df["index"]) + df_indexed = df.set_index("index") + else: + raise ValueError(f"Cannot interpolate non-daily values. Timestep is: {timestep}") + + # Create a separate file for each 15-minute timestamp + for timestamp, row in df_indexed.iterrows(): + # Format timestamp for filename: YYYY-MM-DD_HH:MM:SS + time_str = timestamp.strftime("%Y-%m-%d_%H:%M:%S") + file_name = f"{time_str}.15min.usbrTimeSlice.ncdf" + + # Create arrays for this single timestamp + stationId = xr.DataArray( + data=[str(location_id).encode("utf-8")], # Single station as array + dims=["stationIdInd"], + attrs={"long_name": info, "units": "-"}, + ) + + time_array = xr.DataArray( + data=[time_str.encode("utf-8")], # Single timestamp as array + dims=["stationIdInd"], + attrs={"long_name": "YYYY-MM-DD_HH:mm:ss UTC", "units": "UTC"}, + ) + + discharge = xr.DataArray( + data=[row["Result"]], # single value as array + dims=["stationIdInd"], + attrs={ + "long_name": "Discharge", + "units": "ft^3/s", + }, + ) + + discharge_quality = xr.DataArray( + data=[100], + dims=["stationIdInd"], + attrs={ + "long_name": "Discharge quality flag", + "units": "-", + }, + ) + + queryTime = xr.DataArray( + data=[int(pd.Timestamp.now().timestamp())], # Unix timestamp as integer + dims=["stationIdInd"], + attrs={ + "long_name": "Query time as unix timestamp", + "units": "seconds since 1970-01-01", + }, + ) + + # Create the dataset matching USGS TimeSlice format + ds = xr.Dataset( + data_vars={ + "stationId": stationId, + "time": time_array, + "discharge": discharge, + "discharge_quality": discharge_quality, + "queryTime": queryTime, + }, + attrs={ + "fileUpdateTimeUTC": pd.Timestamp.now().strftime("%Y-%m-%d_%H:%M:%S"), + "sliceCenterTimeUTC": time_str, + "sliceTimeResolutionMinutes": 15, + "usbr_catalog_item_id": params.get("itemId", ""), + }, + ) + + # Save individual file + output_file = output_folder / file_name + ds.to_netcdf(output_file) + print(f"Created: {output_file}") + + +def result_to_file(location_id: str, start_date: str, end_date: str, output_folder: Path) -> None: + """Calls the USBR API and formats the response for t-route + + Parameters + ---------- + location_id : str + the usbr reservoir ID + output_folder : Path + The path to the folder to dump the outputs + """ + rise_response = make_sync_get_req_to_rise(f"{EXT_RISE_BASE_URL}/location/{location_id}") + try: + if rise_response["status_code"] == 200: + relationships = rise_response["detail"]["data"]["relationships"] + catalog_records = relationships["catalogRecords"]["data"] + else: + print(f"Error reading location: {rise_response['status_code']}") + raise ValueError + except KeyError as e: + msg = f"Cannot find record for location_id: {location_id}" + print(msg) + raise KeyError(msg) from e + + all_catalog_items = [] + for record in catalog_records: + try: + record_id = record["id"].split("/rise/api/")[-1] + record_response = make_sync_get_req_to_rise(f"{EXT_RISE_BASE_URL}/{record_id}") + + if record_response["status_code"] == 200: + relationships = record_response["detail"]["data"]["relationships"] + catalog_items = relationships["catalogItems"]["data"] + all_catalog_items.extend(catalog_items) + else: + print(f"Error reading record: {record_response['status_code']}") + raise ValueError + except KeyError as e: + msg = f"Cannot find item for record: {record}" + print(msg) + raise KeyError(msg) from e + + valid_items = [] + info = [] + for item in all_catalog_items: + try: + item_id = item["id"].split("/rise/api/")[-1] + item_response = make_sync_get_req_to_rise(f"{EXT_RISE_BASE_URL}/{item_id}") + if item_response["status_code"] == 200: + attributes = item_response["detail"]["data"]["attributes"] + parameter_group = attributes["parameterGroup"] + parameter_unit = attributes["parameterUnit"] + parameter_name = attributes["parameterName"] + if ( + parameter_group == "Lake/Reservoir Outflow" + and parameter_name == "Lake/Reservoir Release - Total" + and parameter_unit == "cfs" + ): + # Currently only supporting reservoir releases in cfs + valid_items.append(attributes["_id"]) + info.append(attributes["itemTitle"]) + else: + print(f"Error reading record: {item_response['status_code']}") + raise ValueError + except KeyError as e: + msg = f"Cannot find data for item: {item}" + print(msg) + raise KeyError(msg) from e + + # Asserts to ensure we only have one item found. + assert len(valid_items) > 0, "Cannot find reservoir data. No releases found for location" + assert len(valid_items) == 1, ( + "Cannot determine correct catalog id. Multiple entries. Please see development team" + ) + + item = valid_items[0] + _info = info[0] + # Build parameters + params = { + "type": "csv", + "itemId": item, + "before": end_date, + "after": start_date, + "order": "ASC", + } + + # Build the URL + base_url = f"{EXT_RISE_BASE_URL}/result/download" + param_string = "&".join([f"{k}={v}" for k, v in params.items()]) + download_url = f"{base_url}?{param_string}" + + # download the csv, write the xarray files + response = httpx.get(download_url, headers=RISE_HEADERS, timeout=15).content + csv_string = response.decode("utf-8") + lines = csv_string.split("\n") + start_row = None + for i, line in enumerate(lines): + if "#SERIES DATA#" in line: + start_row = i + 1 # Use the row after "#SERIES DATA#" as the header + break + + if start_row is not None: + df = pd.read_csv(StringIO(csv_string), skiprows=start_row) + else: + raise NotImplementedError("Series Data not found. Throwing error.") + write_ds(df, params, location_id=location_id, info=_info, output_folder=output_folder) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Convert USBR reservoir outflows into data to be used in the USBR Persistence" + ) + + parser.add_argument( + "--location-id", + type=str, + required=True, + help="The location ID to get pull streamflow from", + ) + parser.add_argument( + "--start-date", + type=str, + required=True, + help="The start time for pulling reservoir release data", + ) + parser.add_argument( + "--end-date", + type=str, + required=True, + help="The end time for pulling reservoir release data", + ) + parser.add_argument( + "--output-folder", + type=Path, + default=Path.cwd(), + help="Output directory for parquet file (default is cwd)", + ) + + args = parser.parse_args() + result_to_file( + location_id=args.location_id, + start_date=args.start_date, + end_date=args.end_date, + output_folder=args.output_folder, + )