From 6250172f9b6b279cfaab53691cd642df82c6b367 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 30 Aug 2023 14:56:18 +0200 Subject: [PATCH 01/83] [Range] use monotonic range calc for reshapes --- src/qonnx/util/range_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qonnx/util/range_analysis.py b/src/qonnx/util/range_analysis.py index f58093c0..074cdc5b 100644 --- a/src/qonnx/util/range_analysis.py +++ b/src/qonnx/util/range_analysis.py @@ -244,7 +244,7 @@ def calc_range_outdtype(node, model, range_dict): "Conv": calc_conv_range, "QuantMaxNorm": calc_range_outdtype, "Flatten": propagate_range, - "Reshape": propagate_range, + "Reshape": calc_monotonic_range, "Quant": calc_monotonic_range, "BipolarQuant": calc_monotonic_range, "Mul": calc_monotonic_range, From 64d95ac94f2c3aeb1de55a82e6793d151a52f9de Mon Sep 17 00:00:00 2001 From: shashwat1198 Date: Sun, 22 Oct 2023 10:12:04 +0100 Subject: [PATCH 02/83] QuantLSTM ONNX representation --- notebooks/4_quant_lstm.ipynb | 2934 ++++++++++++++++++++++++++++++++++ 1 file changed, 2934 insertions(+) create mode 100644 notebooks/4_quant_lstm.ipynb diff --git a/notebooks/4_quant_lstm.ipynb b/notebooks/4_quant_lstm.ipynb new file mode 100644 index 00000000..72cac7e9 --- /dev/null +++ b/notebooks/4_quant_lstm.ipynb @@ -0,0 +1,2934 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5ef5f772-f48a-4bb1-bb68-4e8e9236fd2e", + "metadata": {}, + "source": [ + "# QuantLSTM - ONNX (QCDQ) representation" + ] + }, + { + "cell_type": "markdown", + "id": "e5a747f9-fd74-4ebc-8d74-17bf06ff2d48", + "metadata": {}, + "source": [ + "This notebook is divided into `five` parts:\n", + "\n", + "
Part 1 : Introduction to LSTMs.\n", + "
\n", + "
Part 2 : Model creation with brevitas QuantLSTM layer. \n", + "
\n", + "
Part 3 : Build ONNX model representing the LSTM computation used to process a single input with `QCDQ quantization` (weights/inputs/activations) \n", + "
\n", + "
Part 4 : Integration of the QCDQ-LSTM graph with the `SCAN` operator. This operator allows cyclic computations (required for state updates in recurrent neural networks) that are currently not supported in ONNX.\n", + "
\n", + "
Part 5 : Functional verification of the `QCDQ-LSTM` model with brevitas `QuantLSTM` model output." + ] + }, + { + "cell_type": "markdown", + "id": "69ae7154-8cf3-4ee7-88c3-3bec0550008a", + "metadata": {}, + "source": [ + "# Introduction to LSTM's " + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "e7a903ef-1680-4a20-8c61-267884b76c96", + "metadata": {}, + "source": [ + "`LSTM’s (Long Short-Term Memory)` are sequential neural networks that are capable of learning long term dependencies especially in sequence prediction problems. They are deployed in machine translation, speech recognition, image captioning and especially used for time-series analysis applications.\n", + "\n", + "LSTM's have `feedback connections`, unlike conventional feed-forward neural networks (where the compute path goes only in the forward direction). This makes them capable of processing time-series data like vide streams or analyzing network traffic patterns.\n", + "Such feedback connections though also make their hardware implementations compiliacted as they require state updates unlike feed-forward neural networks.\n", + "
\n", + "
\n", + "The LSTM compute requires the following six compute equations:\n", + "$$\n", + " f_t = \\sigma (W_f * x_t + U_f * H_{t-1} + b_f) \n", + "$$\n", + "$$\n", + " i_t = \\sigma (W_i * x_t + U_i * H_{t-1} + b_i)\n", + "$$\n", + "$$\n", + " \\tilde{C_t} = tanh(W_c * x_t + U_c * H_{t-1} + b_c)\n", + "$$\n", + "$$\n", + " o_t = \\sigma (W_o * x_t + U_o * H_{t-1} + b_o)\n", + "$$\n", + "$$\n", + " C_t = f_t \\odot C_{t-1} + i_t \\odot \\tilde{C_t}\n", + "$$\n", + "$$\n", + " H_t = tanh(C_t) \\odot o_t \n", + "$$\n", + "\n", + "The first four equations represent the `gate computations`.\n", + "We compute the `cell state` and the `hidden state` in the last two equations respectively. \n", + "These two states are then fed back into the LSTM cell for the computation of the next input." + ] + }, + { + "cell_type": "markdown", + "id": "70d052c8-e5cd-4eb1-89e5-f8ae956cb853", + "metadata": {}, + "source": [ + "# QuantLSTM model creation" + ] + }, + { + "cell_type": "markdown", + "id": "6a64be7c", + "metadata": {}, + "source": [ + "In the 2nd part of the notebook, we will create a single layer `QuantLSTM` model in brevitas. We will evaluate with a given set of inputs. We then export this model to `QONNX` so that the same parameters (weights/biases/scales) can be extracted and used in the `QCDQ-LSTM` implementation." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "84d66548-365d-46a5-9eaa-bb767085f9aa", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda'\n" + ] + } + ], + "source": [ + "# We import the required libraries to execute different functions in the notebook.\n", + "# The first four imports are required to build the QuantLSTM model in brevitas. \n", + "# The model created will then be exported to QONNX and it's parameters used in the QCDQ implementation.\n", + "\n", + "import torch\n", + "from torch import nn\n", + "from brevitas.nn import QuantLSTM\n", + "from brevitas.export import export_onnx_qcdq\n", + "\n", + "#We need the onnx and onnx helper nodes to build the onnx graph for the LSTM compute.\n", + "import onnx\n", + "from onnx import numpy_helper\n", + "from onnx.helper import make_tensor_value_info, make_node, make_graph, make_model, make_tensor\n", + "#onnxruntime will be used to execute our onnx model.\n", + "import onnxruntime as rt \n", + "from qonnx.util.basic import qonnx_make_model\n", + "#numpy allows us to manipulate outputs from the brevitas and the ONNX model\n", + "import numpy as np \n", + "# Netron visualization tool will help us view interactable graphs\n", + "import netron" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "23a7682c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "quant_input_supplied to brevitas = tensor([[-1.0000, -0.5000, -1.0000, 0.5156, -1.0000, 0.9922, -0.8047, -1.0000,\n", + " 0.2188, 0.9922]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.7266, -0.9531, 0.9922, 0.9922, -1.0000, 0.9922, -0.7734, -1.0000,\n", + " -0.0859, 0.6250]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.6719, -1.0000, 0.0547, -0.5234, -0.0000, 0.1250, -1.0000, 0.3047,\n", + " -0.0312, -1.0000]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-1.0000, -0.1797, 0.3516, -0.1328, -1.0000, -1.0000, 0.8750, -0.2812,\n", + " 0.4844, -0.3203]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.6719, -0.1484, 0.5078, 0.5312, -0.2969, 0.1719, -1.0000, 0.4688,\n", + " -0.2500, 0.8672]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.3125, 0.9922, 0.8281, -0.4297, -1.0000, 0.9922, -1.0000, 0.9922,\n", + " -1.0000, 0.2578]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.3125, -1.0000, -0.4688, 0.2656, -1.0000, -1.0000, -1.0000, -0.7266,\n", + " 0.9922, 0.8984]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.5625, 0.8359, -1.0000, 0.1875, -1.0000, -1.0000, 0.1562, 0.3438,\n", + " 0.6172, -1.0000]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-1.0000, -0.0781, 0.3203, 0.1797, -1.0000, -0.1875, 0.9219, -0.4609,\n", + " -0.3125, 0.2031]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.8750, -1.0000, 0.6016, -1.0000, -0.7656, -0.1484, 0.9922, 0.6406,\n", + " -1.0000, 0.9922]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.9922, -1.0000, 0.5078, -1.0000, -1.0000, 0.4453, -1.0000, 0.6719,\n", + " -1.0000, -1.0000]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.0703, -1.0000, -0.6797, -1.0000, -1.0000, -0.8750, -0.6797, 0.3672,\n", + " -0.5938, -0.2031]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.6641, 0.9922, 0.1641, 0.9922, 0.9922, -1.0000, -1.0000, 0.9922,\n", + " 0.3438, 0.4688]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.1875, 0.0000, -0.2812, -1.0000, -1.0000, -0.0391, 0.0781, 0.9922,\n", + " -0.2188, 0.9922]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.2578, 0.9922, -1.0000, 0.4297, -0.7500, 0.2891, -1.0000, -1.0000,\n", + " 0.6484, 0.3828]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.3594, -0.0000, -1.0000, 0.4688, -0.2734, -1.0000, -0.2969, 0.9922,\n", + " 0.9922, 0.9062]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.0938, -1.0000, 0.1016, -0.7109, -0.3203, 0.7578, 0.9922, 0.3359,\n", + " 0.1328, 0.4062]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.4141, -0.6328, -0.7422, 0.9609, -0.9062, -0.4297, 0.7031, 0.9922,\n", + " -1.0000, -0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.3203, -1.0000, -0.7109, 0.3281, 0.6016, -0.2031, -0.6172, 0.7031,\n", + " -0.5078, -1.0000]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-1.0000, -0.2500, -0.9766, -1.0000, 0.3984, -0.6484, -1.0000, 0.7188,\n", + " 0.9922, 0.9453]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[-0.5234, 0.9922, -0.3984, 0.1328, -0.0625, -0.8047, -0.1562, -0.1250,\n", + " -0.1172, 0.6328]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.0547, 0.0156, 0.0703, -0.8750, -1.0000, 0.5156, -0.0938, -0.2969,\n", + " -0.9922, 0.9922]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.9922, -1.0000, 0.3438, 0.9922, 0.1328, 0.2891, 0.0469, -0.3438,\n", + " -0.9531, -1.0000]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.2969, -1.0000, 0.1250, -1.0000, -0.5469, -1.0000, 0.5000, 0.7344,\n", + " -1.0000, 0.7109]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[ 0.4219, 0.4922, 0.7266, 0.0078, 0.0469, 0.9844, -0.5391, -0.0781,\n", + " 0.9922, -1.0000]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", + " 0.7969]])\n", + "----------------------------\n", + "[[[ 0.1484375 -0.0078125 0.0390625 0.140625 0.0078125 0.\n", + " 0.109375 -0.09375 0.0390625 -0.0625 0.015625 -0.1171875\n", + " 0.1015625 0.03125 0.1640625 -0.015625 -0.0234375 -0.015625\n", + " -0.046875 0.0078125]]\n", + "\n", + " [[ 0.2109375 -0.0234375 0.03125 0.2109375 0.0234375 -0.015625\n", + " 0.1875 -0.1484375 0.046875 -0.09375 0.0234375 -0.1640625\n", + " 0.1484375 0.0625 0.2578125 -0.015625 -0.03125 -0.0234375\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2421875 -0.0390625 0.015625 0.25 0.03125 -0.0234375\n", + " 0.234375 -0.1796875 0.0546875 -0.109375 0.015625 -0.1875\n", + " 0.1796875 0.09375 0.3125 0. -0.03125 -0.03125\n", + " -0.078125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.0390625 0.015625 0.265625 0.0390625 -0.03125\n", + " 0.265625 -0.1875 0.0546875 -0.125 0.015625 -0.1953125\n", + " 0.1953125 0.1171875 0.3359375 0.015625 -0.03125 -0.0390625\n", + " -0.078125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", + " 0.2890625 -0.1953125 0.0546875 -0.125 0.015625 -0.203125\n", + " 0.203125 0.125 0.359375 0.0234375 -0.03125 -0.046875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.2734375 0.046875 -0.0390625\n", + " 0.296875 -0.1953125 0.0546875 -0.1328125 0.015625 -0.203125\n", + " 0.2109375 0.1328125 0.3671875 0.03125 -0.0234375 -0.046875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.015625 0.28125 0.0546875 -0.046875\n", + " 0.3046875 -0.1953125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.2109375 0.140625 0.375 0.0390625 -0.0234375 -0.046875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.0546875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.2109375 0.140625 0.3828125 0.0390625 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.2109375 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]]\n" + ] + } + ], + "source": [ + "# In this block of code we will create the QuantLSTM model using the brevitas layer\n", + "torch.manual_seed(0) #Setting the manual seeds to 0 for consistency in outputs.\n", + "\n", + "# Initializing attributes that can be changed accordingly depending on users requirements.\n", + "\n", + "num_inputs = 25 #Defining the number of inputs \n", + "num_features_brevitas = 10 #This attribute defines number of features each input comprises of\n", + "num_hidden_cells_brevitas = 20 #This attribute defines the number of hidden cells in the QuantLSTM layer\n", + "\n", + "# Creating a sequential model\n", + "\n", + "model_lstm = nn.Sequential( \n", + " QuantLSTM(input_size = num_features_brevitas, hidden_size = num_hidden_cells_brevitas, bias_quant=None) \n", + " ) #No other feature described here implies quantization of inputs/parametersers/activations to 8-bits.\n", + "model_lstm.eval() #Setting the model to eval mode to make sure all the parameters and scales are frozen and not updated on runtime.\n", + "export_path = './quant_lstm_quantization_qcdq.onnx' #Setting export path for the model\n", + "export_onnx_qcdq(model_lstm,(torch.randn(num_inputs, 1, num_features_brevitas)), opset_version=14, export_path=export_path) #Exporting the model to QCDQ representation. \n", + "\n", + "# Creating a test input to execute the above created model\n", + "\n", + "in_qcdq_node = np.empty([num_inputs,1,num_features_brevitas],dtype=np.float32).reshape([num_inputs,1,num_features_brevitas])\n", + "in_qcdq_node.fill(0.8) #Using the fill function to fill the numpy array with a value of 0.8\n", + "test_input = torch.from_numpy(in_qcdq_node) #Converting the array to a torch tensor\n", + "brevitas_output = model_lstm(test_input) #Executing the model with the set input\n", + "brevitas_output = brevitas_output[0].detach().numpy()\n", + "print(brevitas_output)" + ] + }, + { + "cell_type": "markdown", + "id": "347ef1f5-36e8-4103-9b13-efa7fe93eb5e", + "metadata": {}, + "source": [ + "`Abbreviations` : Short-forms defined in the next code block can be referenced here for definitions.\n", + "\n", + "* Wi = \"Weight matrix for the input gate\" (Similarily for the other three gates)\n", + "* Ui = \"Recurrence matrix for the input gate\" (Similarily for the other three gates)\n", + "* bi = \"Bias for the input gate\" (Similarily for the other three gates)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0bfbf5a3-8556-4190-a28f-4fe9859c55a9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.layers.0.0.input_gate_params.bias\n", + "(20,)\n", + "[-0.02587563 -0.18425222 -0.18189065 0.02914573 -0.21827428 0.0595416\n", + " -0.20598626 -0.15559138 -0.04639753 -0.2133838 0.18059207 0.18321364\n", + " -0.11679631 0.04684116 0.11439164 0.07105622 -0.02995344 -0.21090843\n", + " 0.1625932 -0.19612479] , 0\n", + "-------------------------\n", + "0.layers.0.0.input_gate_params.input_weight.weight\n", + "(20, 10)\n", + "[[-4.14119214e-02 1.38706667e-02 -7.36431107e-02 -8.17852393e-02\n", + " -1.93256751e-01 1.23205660e-02 -2.53894478e-02 1.94940954e-01\n", + " -7.36160800e-02 1.72829047e-01]\n", + " [ 1.05855539e-02 -1.00462548e-01 -5.31778559e-02 -2.53751595e-02\n", + " 2.31616711e-03 -3.68398018e-02 6.63604736e-02 1.84143797e-01\n", + " 3.51473056e-02 8.09932351e-02]\n", + " [ 1.38081744e-01 4.81988601e-02 1.03076197e-01 1.17293097e-01\n", + " 2.09298924e-01 -2.04075590e-01 7.65163079e-02 -1.01319486e-02\n", + " -4.01576199e-02 -8.62098187e-02]\n", + " [ 1.34432539e-01 2.04552680e-01 -1.82483241e-01 1.20810278e-01\n", + " 1.54187992e-01 3.90806384e-02 2.63404008e-03 1.72071218e-01\n", + " 6.62961556e-03 -5.57729751e-02]\n", + " [-1.65121444e-02 7.17408881e-02 5.59775345e-02 -1.20642958e-02\n", + " 7.05851838e-02 6.02219440e-02 -1.81134686e-01 5.57176135e-02\n", + " 1.36812523e-01 2.56436393e-02]\n", + " [-2.04101056e-02 1.71289816e-01 -1.95361048e-01 -1.02062307e-01\n", + " -1.01068199e-01 1.93207934e-01 -2.16277346e-01 2.21768115e-02\n", + " -2.16605455e-01 -7.35303294e-03]\n", + " [ 8.33466202e-02 -5.22914641e-02 2.17063010e-01 7.11822009e-04\n", + " -1.14001475e-01 5.76605424e-02 1.16289847e-01 -4.44249017e-04\n", + " 1.91289768e-01 -1.41524345e-01]\n", + " [ 9.54081938e-02 1.26971915e-01 1.11063533e-01 -8.20205314e-05\n", + " 6.38317242e-02 -1.75422058e-01 -1.75476715e-01 -1.38986288e-02\n", + " -2.80253254e-02 1.66033790e-01]\n", + " [ 1.62366882e-01 1.51616067e-01 -1.02419287e-01 -1.75539613e-01\n", + " -2.09742919e-01 8.09257179e-02 -2.01488122e-01 -2.23217383e-01\n", + " -1.13006435e-01 -1.88792080e-01]\n", + " [-8.81207064e-02 -1.40770882e-01 -1.14718042e-01 2.12588429e-01\n", + " -4.21379767e-02 1.85490459e-01 4.96126944e-03 -2.87544206e-02\n", + " -6.54680878e-02 -1.59840211e-01]\n", + " [-1.79656431e-01 1.54830217e-01 -6.89065754e-02 -2.18012080e-01\n", + " 2.05210581e-01 4.14780807e-03 -1.49626598e-01 -1.75766915e-01\n", + " -1.87781662e-01 -1.96070760e-01]\n", + " [ 2.02346548e-01 1.54175445e-01 1.82888191e-02 -1.90574318e-01\n", + " -5.84847443e-02 -2.10055038e-01 7.70593956e-02 -5.93719892e-02\n", + " -4.78506237e-02 -6.97683394e-02]\n", + " [ 1.04838371e-01 1.21036001e-01 4.89832126e-02 -2.80011501e-02\n", + " -2.20977236e-02 -3.90723767e-03 -1.66511953e-01 2.18188778e-01\n", + " -9.64377001e-02 1.30095944e-01]\n", + " [-1.25353500e-01 1.50110642e-03 7.65467212e-02 -2.05311388e-01\n", + " 1.02568395e-01 -1.71158642e-01 3.12034953e-02 -4.43410687e-02\n", + " 1.28176615e-01 2.17323676e-01]\n", + " [ 5.03933132e-02 -6.38488680e-03 -1.10784821e-01 8.33686888e-02\n", + " -1.07626989e-01 9.23645869e-02 -9.69173536e-02 1.51675642e-01\n", + " 1.71514452e-01 1.37112319e-01]\n", + " [ 2.23987759e-03 1.03696242e-01 -2.03757793e-01 1.81339085e-01\n", + " -5.80957830e-02 8.15173239e-02 -3.78652588e-02 -7.50842392e-02\n", + " -1.05006970e-01 1.44231498e-01]\n", + " [-1.21653110e-01 -3.94320451e-02 1.12798467e-01 2.25366149e-02\n", + " -1.88142627e-01 -2.22348958e-01 -1.08711593e-01 2.06236228e-01\n", + " -1.58990204e-01 1.23237595e-01]\n", + " [ 1.60061240e-01 -9.26844329e-02 -9.87462699e-02 -1.60870835e-01\n", + " 3.48785594e-02 -3.12594734e-02 1.08638955e-02 9.69918296e-02\n", + " 9.38790441e-02 -7.05472827e-02]\n", + " [ 1.53575651e-02 5.31169996e-02 4.75974986e-03 4.47460003e-02\n", + " -9.05808210e-02 1.83284596e-01 -2.29354147e-02 -2.86094397e-02\n", + " -2.00689927e-01 -1.62085444e-01]\n", + " [ 6.95567206e-03 -3.45815569e-02 -1.12424992e-01 1.17047116e-01\n", + " -2.00185552e-02 7.86398575e-02 1.88336477e-01 -1.02802545e-01\n", + " -1.10053055e-01 -4.49331515e-02]] , 1\n", + "-------------------------\n", + "0.layers.0.0.input_gate_params.hidden_weight.weight\n", + "(20, 20)\n", + "[[-1.89352538e-02 -1.11839756e-01 -5.36844507e-02 -6.44523604e-03\n", + " 1.00301303e-01 2.06872717e-01 1.65582791e-01 2.36654170e-02\n", + " -1.40909785e-02 5.72774969e-02 -9.12800338e-03 -2.93454379e-02\n", + " 7.68917575e-02 -1.81926534e-01 -1.90163419e-01 9.05744440e-04\n", + " -6.77747875e-02 -1.10600702e-01 -2.08165124e-01 1.49785221e-01]\n", + " [-8.90937075e-03 -1.20138384e-01 -9.10849124e-02 5.87869175e-02\n", + " -1.62167445e-01 1.43613769e-02 -2.75748386e-03 7.61744976e-02\n", + " 8.87038633e-02 -1.46100059e-01 9.65513662e-02 1.68849513e-01\n", + " 1.43956831e-02 1.13917463e-01 -8.46547335e-02 4.44148518e-02\n", + " 6.53375536e-02 -1.03280008e-01 1.38058737e-01 -2.11419612e-01]\n", + " [-8.39947835e-02 -1.31567493e-01 -1.32741287e-01 -1.35494858e-01\n", + " -2.10702628e-01 3.83746810e-02 -4.42331657e-02 -1.88279316e-01\n", + " -9.19632221e-05 -3.72487307e-02 9.22437534e-02 -1.75148100e-01\n", + " -6.29062578e-02 4.60259691e-02 9.47839618e-02 1.69158224e-02\n", + " 6.05970472e-02 2.23524958e-01 -7.74600878e-02 1.52398065e-01]\n", + " [ 1.92612275e-01 -1.97806209e-01 5.40891960e-02 1.26661941e-01\n", + " -3.48797850e-02 1.23408221e-01 7.60573195e-03 1.70228094e-01\n", + " 4.81458148e-03 -1.43158093e-01 1.69815615e-01 6.65016174e-02\n", + " 1.90237820e-01 5.55088967e-02 1.18736811e-01 1.39421389e-01\n", + " 3.76524106e-02 -5.19809462e-02 4.61825170e-02 -1.55909836e-01]\n", + " [ 7.63913197e-03 -7.18704611e-02 1.41373863e-02 -1.77042618e-01\n", + " 1.36628836e-01 -2.06302434e-01 9.57576782e-02 1.47258580e-01\n", + " -2.04934716e-01 2.02031001e-01 -1.66225716e-01 -4.39088680e-02\n", + " 1.15872569e-01 -7.09063411e-02 1.99275032e-01 -9.86447409e-02\n", + " -2.99374424e-02 -1.46168455e-01 -1.03737742e-01 2.18205780e-01]\n", + " [ 1.68166518e-01 1.64642967e-02 1.83855016e-02 -1.89751670e-01\n", + " 1.68811426e-01 -3.35250199e-02 -9.32650268e-02 -1.77951321e-01\n", + " 1.83845311e-01 1.06031545e-01 1.34684831e-01 2.31534615e-02\n", + " -1.51732951e-01 9.15970504e-02 2.57883817e-02 7.50367939e-02\n", + " -5.56799732e-02 -1.05523452e-01 1.83565930e-01 7.49567226e-02]\n", + " [-9.07528847e-02 1.99678559e-02 -4.86066155e-02 -1.91221125e-02\n", + " 1.25389591e-01 -1.77972749e-01 2.02371553e-01 1.50499865e-01\n", + " 1.92136504e-04 -9.14627835e-02 4.55915295e-02 -1.48007214e-01\n", + " 1.45243973e-01 -1.18256845e-01 4.27256078e-02 -2.19991282e-01\n", + " 1.07079633e-01 1.51370272e-01 1.67834863e-01 1.82519276e-02]\n", + " [ 1.32025823e-01 7.62412176e-02 1.49954304e-01 1.26183063e-01\n", + " -1.95639879e-01 2.35728398e-02 -7.62314126e-02 -1.06771380e-01\n", + " 1.56516239e-01 -3.20035741e-02 3.47357877e-02 1.40789405e-01\n", + " 1.50514722e-01 1.19332708e-01 -3.90392952e-02 -1.99321926e-01\n", + " -2.14659125e-01 7.02862144e-02 -2.65357876e-03 -1.41277447e-01]\n", + " [ 9.76564139e-02 2.02965632e-01 1.29328549e-01 -3.15438919e-02\n", + " 3.02148778e-02 -1.42630830e-01 1.05540812e-01 -1.73283800e-01\n", + " 1.54376432e-01 -1.02132224e-01 -8.86853859e-02 -1.87295631e-01\n", + " -5.40727489e-02 -2.16292981e-02 -1.03067294e-01 1.59174219e-01\n", + " 1.28328785e-01 -1.97347268e-01 -2.23675612e-02 7.51795396e-02]\n", + " [ 2.15735227e-01 -5.34672327e-02 1.37278914e-01 -1.25270970e-02\n", + " -8.57628211e-02 1.36838645e-01 -1.99253812e-01 1.87337860e-01\n", + " 2.23344907e-01 -6.10500947e-02 8.83295834e-02 2.22981662e-01\n", + " 6.74140528e-02 8.74451399e-02 8.21070075e-02 -9.14832279e-02\n", + " 5.45820408e-02 -1.19176529e-01 1.90940976e-01 -9.58186984e-02]\n", + " [ 5.11176400e-02 -6.47741258e-02 1.11825228e-01 3.68577940e-03\n", + " 1.22950912e-01 -6.05489872e-02 -1.31215081e-01 8.57292935e-02\n", + " -1.25841707e-01 -1.83588028e-01 8.63927826e-02 -1.34484172e-01\n", + " -8.40481222e-02 -5.58335669e-02 1.58777572e-02 -7.74438009e-02\n", + " -8.04765150e-02 -5.62009923e-02 1.56701818e-01 6.69540018e-02]\n", + " [-1.07652791e-01 -1.54563770e-01 5.18102152e-03 7.16358349e-02\n", + " -4.67919558e-03 1.30897254e-01 1.88077956e-01 6.55371249e-02\n", + " 7.37451240e-02 1.29728526e-01 -7.66031295e-02 3.96637134e-02\n", + " 1.80782616e-01 -1.07077263e-01 1.74031202e-02 -8.74211192e-02\n", + " -1.71936572e-01 1.18438050e-01 1.78673968e-01 -1.20800309e-01]\n", + " [ 8.38049129e-02 6.85676187e-02 8.73105526e-02 1.23087496e-01\n", + " 2.08757341e-01 1.69717655e-01 -1.95658267e-01 -8.76599625e-02\n", + " 1.18758187e-01 -1.27650708e-01 4.39067073e-02 -9.58611295e-02\n", + " 4.44106422e-02 1.09106824e-01 7.02822655e-02 1.62435979e-01\n", + " -2.69077457e-02 1.21389672e-01 7.22895712e-02 -7.04701096e-02]\n", + " [-1.57925934e-01 2.04573229e-01 -6.66687265e-02 1.68426275e-01\n", + " 1.40947536e-01 -9.00426600e-03 -1.84701070e-01 1.80013608e-02\n", + " -1.08096078e-01 5.81858531e-02 -8.88810679e-02 1.72345534e-01\n", + " -2.01746121e-01 -6.01959564e-02 3.52624580e-02 2.13314164e-02\n", + " 1.83701098e-01 -7.06517771e-02 -1.78495154e-01 1.48046315e-01]\n", + " [ 6.24824539e-02 1.47299409e-01 -1.32342920e-01 -1.31334439e-01\n", + " -9.03252959e-02 1.58978552e-02 7.57712200e-02 -1.28496692e-01\n", + " -2.10528076e-02 -3.86467576e-02 2.04027027e-01 -8.06416422e-02\n", + " 2.16690734e-01 -1.37144789e-01 -9.21397135e-02 -1.68184295e-01\n", + " 1.64731190e-01 -1.53769597e-01 9.25582647e-02 -8.21671411e-02]\n", + " [ 2.22826257e-01 3.15412283e-02 -1.94183901e-01 3.84835452e-02\n", + " 2.71859560e-02 -2.16274336e-01 4.48757894e-02 2.13342309e-01\n", + " 6.43487200e-02 -1.18915108e-03 -4.63541821e-02 5.94213046e-02\n", + " -9.96202976e-02 2.20200241e-01 1.93298727e-01 1.04461670e-01\n", + " -8.32887441e-02 -2.09956676e-01 -1.28724366e-01 2.17411697e-01]\n", + " [-2.05243871e-01 -2.13502616e-01 -1.61161683e-02 7.11405650e-02\n", + " -2.22554103e-01 -2.07601383e-01 1.21570053e-03 -7.50053376e-02\n", + " 1.55782372e-01 6.41999543e-02 -1.94095746e-01 -2.01538876e-01\n", + " 1.53562352e-01 -3.96501981e-02 -9.78184044e-02 7.04318583e-02\n", + " -4.39465865e-02 1.06939368e-01 5.67044728e-02 -9.68158469e-02]\n", + " [-1.79218486e-01 1.21047780e-01 -1.34345368e-01 -2.47318167e-02\n", + " 3.05733737e-02 -1.30131751e-01 1.21804118e-01 -1.57282248e-01\n", + " 5.49192652e-02 2.39149425e-02 8.20437744e-02 -2.19451547e-01\n", + " 1.29167549e-02 1.09009661e-01 -1.43156886e-01 5.53317666e-02\n", + " 8.76156322e-04 1.89696804e-01 -4.73480262e-02 1.52765575e-03]\n", + " [-9.72549468e-02 -5.51085509e-02 6.40134960e-02 -2.15656430e-01\n", + " 1.69629768e-01 1.60795882e-01 9.46965069e-02 1.67391464e-01\n", + " -6.96057901e-02 5.09320870e-02 1.13759311e-02 -1.54622883e-01\n", + " -8.59646648e-02 -7.93827102e-02 -5.52875437e-02 -1.98549107e-01\n", + " -1.57260388e-01 -2.12343093e-02 -3.40157561e-02 -2.02978238e-01]\n", + " [ 4.77774814e-02 1.21752672e-01 1.86222807e-01 1.88188314e-01\n", + " -1.56248853e-01 -7.16619864e-02 -1.06078379e-01 4.10118401e-02\n", + " 5.99195063e-02 4.97494638e-02 1.30669191e-01 1.17969945e-01\n", + " -1.20020248e-01 1.53502032e-01 1.50838137e-01 2.95910202e-02\n", + " -1.94543302e-01 -1.37143746e-01 6.23138808e-02 7.73103088e-02]] , 2\n", + "-------------------------\n", + "0.layers.0.0.forget_gate_params.bias\n", + "(20,)\n", + "[ 0.20850217 0.11380532 0.08104482 -0.00762655 0.15247074 -0.08138975\n", + " 0.0910454 -0.10650107 -0.00208706 0.13215044 0.10260209 -0.05017841\n", + " -0.00283135 -0.12413156 0.10357434 0.15046087 0.07697045 -0.21637587\n", + " -0.16006967 0.14969489] , 3\n", + "-------------------------\n", + "0.layers.0.0.forget_gate_params.input_weight.weight\n", + "(20, 10)\n", + "[[-0.03201701 0.13732338 0.16482215 -0.06550063 -0.13119501 -0.2103679\n", + " 0.08553377 0.11468438 -0.0387658 -0.21708311]\n", + " [-0.14402747 -0.01204806 0.10205487 -0.07492673 -0.14435105 -0.15566948\n", + " 0.2000676 0.08097311 -0.1815501 -0.13809344]\n", + " [-0.18981868 0.03235186 -0.09079897 -0.00075695 -0.0353742 -0.1957324\n", + " -0.19982079 -0.17343585 -0.09364887 0.03477862]\n", + " [-0.10515709 -0.00797041 -0.02678433 0.20449734 -0.10193561 0.21008612\n", + " -0.17165995 -0.18656294 0.07271551 -0.13013807]\n", + " [ 0.11469334 -0.12370986 0.17608246 0.21651667 0.01431521 0.04778921\n", + " 0.20847315 0.13255776 -0.19520605 -0.00715788]\n", + " [-0.20184483 0.17081025 -0.04095714 -0.00155866 -0.13738167 -0.12158713\n", + " 0.02901981 0.18449156 -0.1123966 0.02112942]\n", + " [ 0.20241037 0.20039941 -0.04371644 0.20957804 0.08143061 0.20365277\n", + " 0.00663433 -0.1895056 -0.06086665 0.06706649]\n", + " [ 0.1192437 -0.22275887 0.17393245 -0.20059223 0.13101582 0.22062524\n", + " 0.05510434 -0.0422016 0.12311912 -0.06636703]\n", + " [-0.16563286 -0.15869099 0.10513588 0.1707739 0.00905446 -0.2168069\n", + " -0.21971782 -0.05049207 0.12070725 -0.1490105 ]\n", + " [ 0.06027115 -0.12221678 0.18192975 -0.05859193 -0.04659947 -0.19612114\n", + " -0.20028274 0.01511241 0.03615525 0.12080745]\n", + " [-0.19552828 0.03918052 -0.03230212 0.1311668 -0.1016731 0.06661848\n", + " 0.09010674 0.11232612 -0.07669472 0.07195909]\n", + " [-0.04382298 0.06021269 -0.13749652 -0.17768005 -0.18290731 -0.1405653\n", + " -0.09463658 0.03328432 -0.04891114 -0.12729394]\n", + " [ 0.00187842 -0.07061429 0.13783802 -0.18416376 -0.08253521 -0.1436971\n", + " 0.02759105 0.01219904 -0.0128632 0.22186181]\n", + " [-0.08530237 -0.03213883 0.05777045 0.18662488 0.16948868 0.02554451\n", + " -0.08459641 0.07345897 0.14069013 -0.00477207]\n", + " [ 0.12276765 0.18300453 -0.11980148 -0.04943415 -0.20131664 0.05132969\n", + " 0.15936238 -0.04342245 0.03568069 0.07144996]\n", + " [-0.00476937 0.17384104 0.0325843 -0.21979333 -0.18465139 -0.22154187\n", + " 0.00921626 0.12087465 -0.02950055 0.20104776]\n", + " [-0.04022751 0.04571649 0.20163535 0.11316557 -0.00713371 0.2153832\n", + " -0.1335971 0.08328808 0.14121595 -0.13845547]\n", + " [-0.21004361 0.07152335 -0.08483391 -0.1128413 0.04447659 -0.16221067\n", + " 0.2011128 -0.02007227 -0.07161061 0.18693109]\n", + " [ 0.06226142 0.04260208 -0.10691333 0.21311398 -0.06810362 0.18598051\n", + " -0.016437 0.11216957 0.15722302 -0.1664758 ]\n", + " [-0.14903465 -0.22111452 0.16127922 0.19229865 -0.08172148 -0.10951796\n", + " 0.03742959 0.12038527 0.05519409 -0.04660187]] , 4\n", + "-------------------------\n", + "0.layers.0.0.forget_gate_params.hidden_weight.weight\n", + "(20, 20)\n", + "[[-0.14223064 0.19124371 -0.14481081 -0.21607104 -0.08928006 0.04458899\n", + " 0.0831126 0.08646142 -0.12953514 -0.08581803 -0.09943341 -0.10828371\n", + " -0.18833804 0.04577223 -0.06502874 -0.2152229 -0.13056786 -0.13428617\n", + " -0.09645564 -0.13816758]\n", + " [-0.03877772 0.08013236 -0.18096809 -0.01915519 -0.06435173 -0.11432081\n", + " -0.0496515 -0.09477154 0.00718846 -0.16141057 0.04240454 0.20530063\n", + " 0.18528308 -0.10025615 0.06892193 -0.21135406 0.18826427 -0.22283866\n", + " -0.19982089 -0.20071597]\n", + " [-0.20765333 0.03028304 -0.05912894 0.05351972 -0.01383548 -0.00480333\n", + " -0.08078498 -0.13266474 -0.18721604 0.11282834 -0.11529152 -0.04547688\n", + " 0.10860465 -0.05537887 -0.05637903 -0.14906646 -0.19131811 0.10732386\n", + " -0.05044974 0.14060505]\n", + " [ 0.01471702 -0.00028402 -0.20187245 0.0049368 -0.0505344 -0.12759772\n", + " -0.05175107 0.01168989 -0.16848378 0.03718214 0.15558895 0.04417289\n", + " 0.21344449 0.10434435 -0.17634727 -0.08801483 -0.05380939 0.06689031\n", + " -0.00637761 0.17993565]\n", + " [ 0.02597556 -0.14161254 -0.08197778 -0.18603216 -0.061655 0.10993782\n", + " 0.00215927 -0.21323241 -0.19348647 0.08106777 -0.19626026 -0.1783532\n", + " -0.1333177 0.21312374 -0.06358164 -0.09219337 -0.15098219 0.14304285\n", + " -0.03610551 0.04311918]\n", + " [ 0.05341741 0.06306308 0.14312816 0.01160373 0.02312934 -0.01452105\n", + " -0.17375752 -0.05117204 0.21281871 -0.15847513 -0.14112028 -0.22188812\n", + " 0.013559 -0.20914444 -0.11453009 0.20604049 0.09261008 0.11913135\n", + " 0.03828845 -0.19001652]\n", + " [-0.10404866 -0.18102278 -0.13826925 0.076148 -0.06201827 0.2185227\n", + " -0.16299975 -0.19082828 0.2207899 -0.19316407 0.19027402 0.06021235\n", + " -0.20380671 0.1947569 -0.06087566 -0.09220145 -0.17443547 -0.1891369\n", + " 0.04978558 -0.21964009]\n", + " [ 0.09188584 -0.05525529 0.0784739 -0.05474811 0.07732737 -0.00610806\n", + " 0.06572182 -0.09097287 -0.15380703 0.02847747 -0.14272346 -0.13861606\n", + " -0.21501313 -0.07127416 -0.14941145 0.17413448 0.1611419 0.05305404\n", + " 0.18168166 0.10766982]\n", + " [-0.21064265 -0.022373 -0.03629636 -0.13576584 0.06368566 -0.06979065\n", + " -0.10692404 -0.00260666 -0.14866948 0.18506847 0.14149404 0.21166477\n", + " -0.03960523 0.07302888 -0.00899392 -0.18503006 0.10116354 -0.15618756\n", + " -0.08071785 -0.10013654]\n", + " [-0.21814388 0.00802042 0.03663212 -0.01662389 0.1644524 0.01072139\n", + " -0.0407296 -0.12196475 -0.13280123 -0.03179033 -0.1312358 -0.14750735\n", + " -0.02957479 -0.03948133 -0.13649467 0.13065115 0.18963577 -0.15246144\n", + " 0.09794185 -0.10375587]\n", + " [-0.02321799 0.20873794 0.02861272 -0.21320319 0.20555921 -0.00946067\n", + " -0.11196752 -0.11808899 0.19175017 0.00377388 0.12350584 0.14696068\n", + " -0.08678884 0.01897924 -0.14464125 0.18672368 -0.11824197 0.14852415\n", + " 0.05665502 0.1379358 ]\n", + " [-0.1575466 -0.00695391 0.11586404 -0.00892534 -0.0032084 0.10896464\n", + " -0.16712412 -0.04483069 0.10185106 0.10966767 0.20768207 -0.04423303\n", + " 0.05298113 -0.11002054 -0.03752897 -0.11225442 0.16570821 0.0013621\n", + " 0.09096613 0.12299404]\n", + " [ 0.04166875 0.02379598 -0.01636612 -0.1894117 0.03602695 -0.04953878\n", + " -0.18794785 0.20833082 -0.02383836 -0.11159918 -0.21768506 -0.20595226\n", + " 0.08515022 -0.1020775 -0.09659212 -0.12938367 0.18049696 -0.05375253\n", + " 0.14493793 0.17751718]\n", + " [-0.17336273 0.16682073 -0.04269946 0.21416363 0.11421449 -0.21660405\n", + " 0.04154139 0.07860353 -0.08111839 0.16956337 -0.1851744 -0.07095176\n", + " 0.2130592 0.21838497 0.11170101 -0.13348123 -0.19239157 -0.1818077\n", + " -0.05589887 0.12667239]\n", + " [ 0.07079396 -0.02715501 0.20110089 0.17559125 -0.10450983 -0.09683432\n", + " -0.00262346 0.04640241 -0.00160075 0.08632647 0.15427703 -0.04031902\n", + " 0.10981148 0.03041176 0.08583194 0.09205452 -0.05976621 -0.09969731\n", + " 0.09557738 -0.14316456]\n", + " [ 0.1173941 -0.1434708 0.15340208 0.08971985 -0.05478028 0.12781222\n", + " -0.07363954 0.04763815 0.06583516 0.02283663 0.04490386 -0.00443905\n", + " -0.0645991 0.1247524 0.08819748 0.08340425 0.15096036 -0.11699554\n", + " -0.0519524 -0.00637345]\n", + " [ 0.18044722 -0.1780605 -0.12826072 -0.05326315 -0.19100511 -0.17666493\n", + " 0.15317535 0.01043098 -0.17988645 -0.03692174 -0.00735149 -0.07949581\n", + " -0.18703558 0.12169496 -0.02761802 0.21831468 -0.17125311 -0.12275734\n", + " -0.01161703 -0.15571442]\n", + " [ 0.16295849 0.17292082 0.2025731 -0.14115438 0.15909635 0.15525764\n", + " -0.08897205 0.02453648 0.10655329 0.16001071 -0.20884806 0.2226173\n", + " -0.05621968 0.09110746 -0.13887972 -0.17207511 -0.15143432 0.13178375\n", + " -0.11029776 0.12998497]\n", + " [ 0.0675995 0.08894558 -0.04973555 -0.07073203 -0.10462123 -0.12498911\n", + " 0.20617247 -0.01215215 -0.09589054 -0.20804486 0.0097276 -0.22196051\n", + " -0.00263305 0.14118703 -0.12879056 0.12285849 -0.07132839 -0.1719783\n", + " -0.22146888 0.11108326]\n", + " [-0.1710799 0.10918202 0.03201576 0.12152903 -0.16808327 0.19554281\n", + " -0.22271936 -0.16972543 0.13409424 0.00759949 -0.12556304 -0.04690479\n", + " -0.19899549 -0.194607 -0.04797396 0.17057896 0.06677905 0.04216573\n", + " -0.05926214 0.20352075]] , 5\n", + "-------------------------\n", + "0.layers.0.0.cell_gate_params.bias\n", + "(20,)\n", + "[ 0.00214154 0.07550146 0.00355405 0.03489293 0.07456551 0.17159154\n", + " 0.12870987 0.0286169 0.08939798 -0.06724557 0.15284362 0.06277069\n", + " 0.16875166 -0.03491265 -0.18256952 0.04417255 0.09094475 0.18067895\n", + " 0.08666804 0.08261736] , 6\n", + "-------------------------\n", + "0.layers.0.0.cell_gate_params.input_weight.weight\n", + "(20, 10)\n", + "[[ 0.17794745 -0.07684495 0.19742867 0.11464191 0.14933479 0.15947415\n", + " -0.18268393 0.11646748 0.20825341 -0.15708849]\n", + " [-0.01916463 -0.1364658 -0.05399449 0.03332363 0.11960924 -0.06491657\n", + " -0.21173826 0.12073942 0.12545025 -0.04053707]\n", + " [ 0.19142465 0.17237733 -0.04928424 0.00863487 0.03938841 -0.04381773\n", + " -0.05508858 -0.10093604 -0.12716216 0.11167222]\n", + " [-0.06639788 -0.10727276 0.19697405 0.03575112 0.16133724 0.2037714\n", + " -0.03149954 0.03335407 0.20731461 -0.15384933]\n", + " [-0.06704343 0.03181893 -0.01517017 0.05953267 0.11757869 -0.09199598\n", + " 0.01741112 0.20230028 -0.1265286 -0.15163381]\n", + " [-0.17148444 0.13366292 -0.20509928 -0.1087402 0.15102275 -0.13404797\n", + " 0.1818403 -0.10452814 0.03537463 0.02927051]\n", + " [-0.00548471 0.13927223 0.18991414 -0.13961166 0.12540615 0.0597448\n", + " -0.00416681 -0.15634763 0.06633033 0.1623022 ]\n", + " [-0.19193047 -0.20651296 -0.21982425 0.05166686 -0.06424998 -0.06945844\n", + " 0.20821334 -0.05703437 -0.14200093 0.02011372]\n", + " [-0.12272914 -0.06551553 0.11811562 0.05160707 -0.1534436 0.21288224\n", + " 0.15128401 -0.15242937 0.09739923 0.09188432]\n", + " [-0.16044928 -0.1571494 -0.18515183 0.09960561 0.03895786 0.09450045\n", + " -0.09821384 0.1681353 0.02855213 -0.17842196]\n", + " [-0.056282 0.11411482 0.04916727 -0.03420792 -0.15622441 -0.13909872\n", + " 0.19286813 -0.12808998 0.15845725 -0.07484471]\n", + " [ 0.00223508 -0.21774605 -0.07268656 0.18849593 -0.20075409 0.11251042\n", + " -0.188184 0.03261365 -0.20273004 -0.17701481]\n", + " [-0.18051723 -0.07753571 0.03044572 -0.16394225 0.05667006 0.13467607\n", + " 0.18228398 0.19799176 0.14722027 -0.06584404]\n", + " [-0.02060739 0.19784163 0.11123517 -0.05929887 0.16882291 -0.19541554\n", + " 0.1913779 0.12510933 -0.16400692 -0.18237662]\n", + " [ 0.17486629 0.22059093 0.01951262 -0.08737109 0.12732458 0.1008788\n", + " -0.0279066 0.17902343 0.14493623 0.05574536]\n", + " [ 0.11610299 -0.20945168 -0.10473937 0.02451142 0.06080827 -0.03056943\n", + " 0.08443112 0.06811719 -0.20665829 0.07052966]\n", + " [-0.01818041 -0.15387398 0.00754629 -0.05499369 -0.11874414 -0.20375879\n", + " 0.18706112 -0.13579562 0.0300329 0.17913137]\n", + " [-0.02817055 -0.14655502 -0.21633011 0.03715306 -0.11219743 0.01630673\n", + " 0.07142475 -0.06335549 0.1516163 -0.02909804]\n", + " [-0.08923855 -0.14784832 0.06784268 -0.13824603 0.04700406 -0.02822138\n", + " 0.1536749 -0.10962173 -0.11015368 -0.02889775]\n", + " [-0.13657494 0.08524874 -0.08190698 0.09174035 0.12977527 0.13057181\n", + " -0.04105001 0.12203032 -0.11840606 -0.22279048]] , 7\n", + "-------------------------\n", + "0.layers.0.0.cell_gate_params.hidden_weight.weight\n", + "(20, 20)\n", + "[[-2.12806370e-02 -1.62129834e-01 -1.73234463e-01 5.68399914e-02\n", + " 1.91077381e-01 -8.79967287e-02 -1.26489419e-02 -1.62001878e-01\n", + " 3.90813835e-02 6.37496263e-02 -3.43248062e-02 1.70126632e-01\n", + " -1.79964885e-01 -3.00010163e-02 -1.24117516e-01 1.96340203e-01\n", + " 1.89398184e-01 2.19951704e-01 2.05728129e-01 8.85609612e-02]\n", + " [-1.71218976e-01 -1.51676044e-01 5.36037646e-02 -1.99636862e-01\n", + " 1.41561761e-01 9.72114205e-02 5.33513576e-02 -1.95168942e-01\n", + " 1.62662312e-01 -2.36655492e-02 -9.38338637e-02 1.16747312e-01\n", + " 1.88960433e-02 -9.94693190e-02 5.23358434e-02 -1.49113968e-01\n", + " 2.07823291e-01 1.95990741e-01 1.03123404e-01 1.18294187e-01]\n", + " [-2.22277910e-01 -1.24300212e-01 -2.15169474e-01 -1.16545178e-01\n", + " -1.85386583e-01 1.64590582e-01 1.20638609e-01 1.31684974e-01\n", + " -9.92668644e-02 1.70430213e-01 -3.23111340e-02 -5.79339787e-02\n", + " 1.20397158e-01 1.48079455e-01 -1.60713032e-01 2.12880254e-01\n", + " -2.25685220e-02 5.95554635e-02 -2.22653463e-01 2.48931386e-02]\n", + " [-1.10666625e-01 -1.40009314e-01 -9.33616757e-02 -1.04158348e-03\n", + " -6.37013763e-02 -1.43241197e-01 1.60099015e-01 6.65228367e-02\n", + " -2.08098441e-01 4.69054580e-02 5.49288094e-02 8.21655430e-03\n", + " 5.42974621e-02 -1.87213402e-02 9.77927893e-02 -1.57414630e-01\n", + " -9.53418463e-02 1.67505234e-01 -1.38533488e-01 1.09708525e-01]\n", + " [ 2.06897184e-01 -2.04468444e-01 -9.79631692e-02 1.90820277e-01\n", + " -1.35208331e-02 4.41430137e-02 3.18236202e-02 9.21481624e-02\n", + " -9.21330750e-02 2.90291384e-02 1.52316689e-01 -1.88640561e-02\n", + " -2.05149427e-01 7.72908777e-02 -5.70836812e-02 -4.71739881e-02\n", + " 1.16618834e-01 3.91878746e-02 -1.35271400e-01 -1.03187911e-01]\n", + " [-3.39903794e-02 -5.52454554e-02 -4.73374985e-02 -1.52837262e-01\n", + " 1.61986634e-01 1.15967356e-01 4.41279002e-02 5.06293550e-02\n", + " 2.61772387e-02 1.67198420e-01 5.05979806e-02 3.40624861e-02\n", + " -1.22919112e-01 7.45933205e-02 -2.09194586e-01 7.05230013e-02\n", + " -1.93819985e-01 -9.25445408e-02 1.18050657e-01 -1.33182898e-01]\n", + " [ 1.78052112e-01 -1.23547316e-01 2.11798310e-01 6.89183101e-02\n", + " -9.69009325e-02 1.36373073e-01 -1.98024541e-01 -1.41652852e-01\n", + " -1.40091866e-01 2.94355899e-02 2.19678022e-02 -1.92325816e-01\n", + " 2.15771765e-01 -2.13701205e-04 -1.19405292e-01 5.34111727e-03\n", + " -9.59839672e-02 6.16913289e-02 8.09477344e-02 -6.34285584e-02]\n", + " [ 1.30358534e-02 1.33047834e-01 -1.45440847e-01 -4.98616323e-02\n", + " -3.29875015e-02 -1.47941127e-01 1.82121564e-02 8.21812730e-03\n", + " -1.80613607e-01 4.58700024e-02 2.13425189e-01 1.18935056e-01\n", + " -1.21292830e-01 2.04682201e-01 -1.53705969e-01 -1.13691926e-01\n", + " 9.86314118e-02 1.77888468e-01 2.13384852e-01 1.92508563e-01]\n", + " [-1.23128124e-01 5.11671938e-02 -1.40405849e-01 4.93797194e-03\n", + " 1.85259327e-01 1.10102132e-01 -2.06472665e-01 -9.62342396e-02\n", + " -1.88666239e-01 1.05334759e-01 -2.83857696e-02 -1.63461700e-01\n", + " -7.14522004e-02 7.33797774e-02 2.07014289e-02 2.09811881e-01\n", + " -2.96870619e-03 7.03370497e-02 -6.77365363e-02 2.66825557e-02]\n", + " [ 8.01036973e-03 1.92074046e-01 9.36935991e-02 -1.27431735e-01\n", + " -1.98687479e-01 -2.12748200e-01 -8.12046453e-02 2.89045740e-02\n", + " 2.10361689e-01 -2.19703875e-02 8.74281824e-02 1.13642633e-01\n", + " -1.71282887e-01 -1.84971020e-01 8.47281963e-02 1.04225203e-01\n", + " -1.04119189e-01 3.50410007e-02 -2.18935862e-01 2.81849946e-03]\n", + " [ 5.48111200e-02 2.11656699e-03 -3.54930870e-02 9.30717662e-02\n", + " -6.14620335e-02 1.66451484e-01 -1.92599118e-01 -1.27790585e-01\n", + " -1.86674312e-01 -2.02230543e-01 1.65771663e-01 -5.53366169e-02\n", + " -1.75649151e-01 4.63781990e-02 -1.69327542e-01 1.15589779e-02\n", + " 1.06298663e-01 -4.72831465e-02 1.14950888e-01 4.58941013e-02]\n", + " [-1.79431096e-01 4.40098420e-02 1.44146204e-01 -5.18364720e-02\n", + " 2.11329088e-02 2.85264328e-02 1.92284174e-02 5.81263304e-02\n", + " -2.14094386e-01 1.69653893e-01 9.75249708e-02 2.76133306e-02\n", + " 4.06875163e-02 -1.80331707e-01 -6.38444126e-02 -9.72616393e-03\n", + " 5.31534106e-02 -1.22661509e-01 2.37256587e-02 -6.93958476e-02]\n", + " [ 1.62758812e-01 -1.91935405e-01 2.33742520e-02 1.51492402e-01\n", + " -1.73671409e-01 -6.40887721e-03 1.03327051e-01 9.02309865e-02\n", + " 2.62962040e-02 9.03898776e-02 -1.55875593e-01 1.86238810e-01\n", + " 4.98715229e-03 1.44541100e-01 4.94662710e-02 -2.48756800e-02\n", + " 9.57791656e-02 2.12270051e-01 2.20569506e-01 -1.88220173e-01]\n", + " [ 1.35616167e-02 -1.60633817e-01 1.30284145e-01 1.60526067e-01\n", + " -1.57016143e-01 -1.29234986e-02 1.54731110e-01 1.47872686e-01\n", + " -1.68123141e-01 1.50136366e-01 -3.95872369e-02 -1.90171361e-01\n", + " 4.45422679e-02 1.04169942e-01 1.34101674e-01 -1.52035385e-01\n", + " -1.61954522e-01 -1.50239438e-01 1.26720712e-01 -1.95428118e-01]\n", + " [-1.88556593e-03 -6.57092705e-02 9.76277590e-02 4.39127870e-02\n", + " -1.12915963e-01 3.90566476e-02 2.05778107e-01 3.68154384e-02\n", + " -1.10807024e-01 7.48633966e-03 -2.05102757e-01 -1.43465236e-01\n", + " -4.15345095e-02 -1.39340952e-01 1.89353585e-01 4.34043780e-02\n", + " 1.73192978e-01 -5.09172641e-02 -3.10981516e-02 5.64037636e-02]\n", + " [-6.64871484e-02 -7.62214959e-02 -2.19352797e-01 1.68453470e-01\n", + " 2.02370644e-01 -2.21398085e-01 -7.39822015e-02 -1.69133484e-01\n", + " -9.07677040e-02 1.70234248e-01 1.19611956e-01 -1.73501018e-02\n", + " 9.55028459e-02 6.67780936e-02 1.22115597e-01 -1.79690495e-01\n", + " 6.91184700e-02 -2.11776465e-01 -1.47058472e-01 -8.33279863e-02]\n", + " [-2.17858739e-02 -2.11018786e-01 5.56494808e-03 3.57002839e-02\n", + " -8.87419507e-02 7.25275800e-02 1.95392817e-01 -3.81953120e-02\n", + " -1.19088188e-01 -1.98077247e-01 -1.63278311e-01 -1.23674117e-01\n", + " -1.65306747e-01 -8.79110843e-02 1.23181596e-01 6.99715093e-02\n", + " 2.01542184e-01 2.22007304e-01 -8.05223361e-02 -8.75686854e-02]\n", + " [ 3.05994693e-02 -1.78054109e-01 1.21623978e-01 -4.02442813e-02\n", + " -1.87232435e-01 -1.68819025e-01 -1.54080361e-01 6.14588112e-02\n", + " 1.71410367e-01 1.77153081e-01 -6.15712442e-02 -1.29883334e-01\n", + " -9.92444977e-02 -1.52750149e-01 -5.76506779e-02 -2.01948732e-01\n", + " 1.19517274e-01 -2.10457653e-01 -1.39095634e-01 1.50062576e-01]\n", + " [-1.67259946e-01 5.34564890e-02 1.67486787e-01 2.20412284e-01\n", + " 1.13142729e-01 -6.00084551e-02 1.27776846e-01 -7.37963570e-03\n", + " -6.89469650e-02 7.28242099e-04 5.01570366e-02 1.49932787e-01\n", + " 9.38621163e-02 1.06770106e-01 3.34510244e-02 -1.12544857e-02\n", + " 9.38917845e-02 5.37824407e-02 -2.13967159e-01 3.61516774e-02]\n", + " [-9.93019715e-02 -1.18578210e-01 8.64755288e-02 4.57250476e-02\n", + " 3.78663242e-02 -1.06075369e-01 1.03322893e-01 2.09839717e-01\n", + " 2.73554083e-02 9.19082835e-02 -1.96176514e-01 1.32933155e-01\n", + " 7.76783228e-02 1.00741126e-01 9.32467878e-02 -5.88140823e-02\n", + " -1.34220198e-02 2.16287613e-01 1.63621128e-01 -1.60278752e-01]] , 8\n", + "-------------------------\n", + "0.layers.0.0.output_gate_params.bias\n", + "(20,)\n", + "[ 0.17741492 0.22254053 0.02940683 -0.17445402 0.04334408 -0.04515981\n", + " 0.16077036 -0.21483785 0.05722176 -0.00262266 0.01760296 0.15381731\n", + " 0.0040394 -0.18002152 -0.13043821 -0.08953302 0.02384774 0.08628984\n", + " -0.04173774 -0.08825271] , 9\n", + "-------------------------\n", + "0.layers.0.0.output_gate_params.input_weight.weight\n", + "(20, 10)\n", + "[[ 9.81200710e-02 -2.17414662e-01 1.56252235e-01 -2.59936582e-02\n", + " 1.55592158e-01 1.68960407e-01 2.38872208e-02 7.07329437e-02\n", + " -1.26473457e-01 1.60210714e-01]\n", + " [ 1.30875960e-01 -3.51194218e-02 8.71568248e-02 -1.25249382e-02\n", + " 1.74701765e-01 9.20466036e-02 1.63019851e-01 -2.03253865e-01\n", + " 2.17866078e-01 8.33117217e-02]\n", + " [ 1.08713590e-01 4.98261265e-02 1.46862045e-01 2.10508242e-01\n", + " -1.90491565e-02 -1.83473915e-01 2.05329910e-01 -4.71567698e-02\n", + " -1.07840233e-01 1.37649149e-01]\n", + " [ 1.24790154e-01 2.99369618e-02 -1.40363071e-02 -4.27761748e-02\n", + " 2.05027208e-01 1.36240214e-01 1.33165866e-01 1.42589167e-01\n", + " -1.17026694e-01 4.66880240e-02]\n", + " [-1.93439931e-01 1.29910931e-01 -2.21640781e-01 -2.23473564e-01\n", + " -2.21031293e-01 1.37891039e-01 2.32707467e-02 5.08490019e-04\n", + " 3.55657227e-02 -8.46242681e-02]\n", + " [-6.79011941e-02 -1.50619775e-01 -5.46085611e-02 -1.37593433e-01\n", + " 5.88322058e-03 1.75689265e-01 -1.84854001e-01 1.09963417e-01\n", + " -1.66318297e-01 -9.26456451e-02]\n", + " [ 4.37250473e-02 3.84753868e-02 1.83374569e-01 -8.36465479e-05\n", + " -8.51647705e-02 -9.24766734e-02 6.55569835e-03 -1.67666823e-01\n", + " -1.75320774e-01 -9.56731290e-02]\n", + " [ 5.74407633e-03 -1.51010871e-01 -1.27642184e-01 1.59654185e-01\n", + " 2.06639260e-01 -7.00415373e-02 -1.91840678e-01 -8.56086463e-02\n", + " 9.02482048e-02 7.25704432e-02]\n", + " [-6.93180412e-02 -1.96934849e-01 -6.72358871e-02 -4.99973148e-02\n", + " 1.28766835e-01 -1.10879898e-01 1.34200945e-01 3.10183968e-02\n", + " -3.74761075e-02 -1.99273914e-01]\n", + " [ 2.20759660e-01 -3.98728549e-02 1.40693069e-01 -1.15664735e-01\n", + " -2.17755169e-01 -1.78237423e-01 -1.14595190e-01 -7.12116584e-02\n", + " -3.15762796e-02 1.86491266e-01]\n", + " [-2.06223264e-01 1.11605875e-01 1.88149154e-01 1.43918453e-03\n", + " -1.39450610e-01 7.15188682e-03 5.30482270e-02 9.89372358e-02\n", + " -6.79695681e-02 -7.67354444e-02]\n", + " [-1.05491146e-01 -2.16275647e-01 7.85326734e-02 -1.69050053e-01\n", + " -1.07421041e-01 -2.30107992e-03 1.72379389e-01 1.98816836e-01\n", + " -1.62642673e-01 1.93931282e-01]\n", + " [ 2.00302720e-01 1.80637628e-01 1.94676816e-02 1.79588884e-01\n", + " 1.08642928e-01 -1.60451204e-01 -1.17858045e-01 4.20530513e-03\n", + " -1.58465564e-01 -7.36296773e-02]\n", + " [ 1.80281103e-01 1.04106739e-01 1.94734529e-01 1.71422120e-03\n", + " -1.14017285e-01 1.47993699e-01 1.64847951e-02 3.76562215e-02\n", + " -9.47417393e-02 9.18511599e-02]\n", + " [-1.65143967e-01 1.78432971e-01 1.95620790e-01 8.06822702e-02\n", + " 1.74128443e-01 1.35722205e-01 -8.53993148e-02 -1.93941638e-01\n", + " 2.94244476e-02 1.40397370e-01]\n", + " [-2.28753053e-02 1.88145563e-02 1.65735826e-01 9.23255607e-02\n", + " 1.67166159e-01 3.28338295e-02 2.50651501e-02 -5.34861833e-02\n", + " -3.77333388e-02 -1.18839331e-01]\n", + " [ 1.49498299e-01 2.03940362e-01 8.29838291e-02 6.35351241e-03\n", + " -7.38137364e-02 -2.20774114e-01 -4.14042696e-02 -1.58739850e-01\n", + " -1.65080443e-01 -4.42778133e-02]\n", + " [-4.39881422e-02 4.51072417e-02 -1.62074581e-01 1.60696968e-01\n", + " -2.03583151e-01 -1.05898663e-01 -8.48927200e-02 1.37860607e-02\n", + " 9.24347416e-02 -5.89275286e-02]\n", + " [ 3.48980725e-02 -5.29355779e-02 -8.79468024e-02 -3.12774107e-02\n", + " 4.50214110e-02 -2.17200696e-01 -1.55640006e-01 1.74693078e-01\n", + " 1.01111621e-01 -5.97870257e-03]\n", + " [ 7.06157601e-03 3.08655780e-02 5.19711897e-02 -1.52664930e-01\n", + " -6.09524250e-02 -2.05220923e-01 -1.75796479e-01 -4.20728028e-02\n", + " -2.95243543e-02 1.74893185e-01]] , 10\n", + "-------------------------\n", + "0.layers.0.0.output_gate_params.hidden_weight.weight\n", + "(20, 20)\n", + "[[ 0.03851524 -0.03625689 -0.00619491 0.12488268 -0.06773603 -0.0418019\n", + " -0.04485707 -0.18031046 -0.03125188 -0.20671144 -0.12019279 -0.14232881\n", + " 0.16657048 -0.20598304 0.21545227 0.08384079 -0.15111198 0.18525589\n", + " -0.0492739 -0.18939163]\n", + " [-0.03105276 0.11050874 -0.21741039 -0.01675669 0.09098183 -0.08714523\n", + " 0.02036562 -0.0876366 -0.15001732 0.17511557 -0.1587715 -0.00262151\n", + " 0.07447443 -0.12496222 0.10796666 -0.18569624 0.21355589 0.09958527\n", + " -0.03165689 -0.18600492]\n", + " [ 0.00689578 0.0793154 -0.12144296 -0.02816021 -0.22284126 -0.22354037\n", + " -0.02428471 0.187102 -0.01052416 0.07010341 -0.08937916 -0.07301357\n", + " -0.02457852 -0.11304034 0.13682817 0.13944101 -0.17383203 0.06858449\n", + " -0.09237309 -0.12858376]\n", + " [-0.02727968 -0.0693544 -0.12731954 0.03295429 0.12762886 -0.03450404\n", + " -0.01564156 0.01682661 -0.09610138 0.11838 0.2063172 -0.02043679\n", + " 0.01520035 0.18016809 0.18314716 -0.16634111 -0.10355289 -0.21934243\n", + " 0.13695723 0.17452586]\n", + " [-0.08138426 0.07172713 0.05416519 -0.19238184 0.0892937 0.10971964\n", + " 0.00491766 0.02293088 0.05196048 0.16108814 0.19757238 0.03213832\n", + " 0.09531388 -0.05850127 0.13331535 -0.08795608 -0.18431664 0.1049106\n", + " 0.08293276 0.0492176 ]\n", + " [ 0.09513766 0.02660845 0.0761021 0.09111597 -0.12062387 -0.01198089\n", + " 0.03369791 -0.03394864 -0.188005 0.02121117 0.13665509 -0.11958458\n", + " 0.21953909 0.0509951 0.09510146 -0.08634473 -0.18291326 -0.08321758\n", + " 0.00683159 -0.10189173]\n", + " [ 0.19913672 -0.14311586 -0.15060481 -0.0793146 0.20060927 -0.10224532\n", + " 0.20686573 0.10745841 -0.03397548 0.11565119 0.10630453 -0.11381406\n", + " -0.04603498 0.21659105 0.12819836 -0.10921414 -0.0601254 0.12532982\n", + " 0.11351746 0.01772486]\n", + " [-0.14387828 -0.16492477 -0.04719649 0.08221286 -0.02383876 -0.18695372\n", + " -0.05480145 0.22319667 -0.18481532 -0.17354017 0.14056584 0.22249034\n", + " -0.21510145 -0.20223859 -0.06991865 0.22294378 -0.1269095 0.01911828\n", + " 0.18253623 -0.0791588 ]\n", + " [-0.06857247 -0.15009233 0.0085855 0.20870976 0.0914357 0.157171\n", + " -0.01481424 -0.03551737 -0.03994827 0.12753342 -0.02932107 -0.19100396\n", + " -0.07851914 0.08750965 0.21801063 -0.04065894 -0.19468635 -0.16464569\n", + " -0.1759353 0.09013668]\n", + " [ 0.16482699 0.06612828 0.07709847 0.14567545 0.15288451 0.13352284\n", + " 0.12504087 0.06050573 0.11541758 -0.1534312 -0.14473058 0.06013739\n", + " 0.03479816 -0.19657765 -0.16289718 -0.17800786 0.17759389 0.14619377\n", + " -0.11769552 0.033738 ]\n", + " [-0.05143119 0.19438726 -0.20252845 -0.16313015 -0.18616724 0.13013433\n", + " -0.11177826 0.13318242 0.07558636 -0.10929734 -0.06023749 -0.09048979\n", + " 0.09864956 -0.08967353 0.07588523 0.01597441 -0.17857382 -0.1405619\n", + " -0.1550431 0.1171688 ]\n", + " [ 0.0484514 -0.00562237 -0.1331447 -0.22155127 -0.07913139 -0.17113578\n", + " -0.22241357 -0.21326728 -0.14605871 -0.21737726 0.069704 0.08366753\n", + " 0.0901287 -0.22259942 0.13826938 0.04359518 0.11433873 -0.05495736\n", + " 0.10737925 -0.21207204]\n", + " [ 0.0761621 0.17731208 0.09399657 -0.21077465 -0.06277167 -0.02776839\n", + " 0.11715963 -0.08461329 0.03216063 -0.07849736 -0.03552182 -0.00445118\n", + " -0.1283987 -0.15520401 0.1845957 0.18787426 -0.00676964 0.19354711\n", + " 0.17230819 -0.14084579]\n", + " [-0.08885217 -0.15358365 0.07229424 0.00565505 -0.03066478 0.16602065\n", + " -0.08740129 -0.12237797 -0.15895672 -0.11375529 0.21551864 -0.10871551\n", + " -0.06152614 0.10078279 -0.17173737 -0.13572007 0.16457646 -0.08576282\n", + " -0.1160312 -0.02892987]\n", + " [-0.03186222 0.04086494 0.08197901 -0.17241116 0.2032053 -0.21259488\n", + " 0.07573222 -0.06309208 -0.09442816 0.20916638 -0.2154794 0.01527144\n", + " 0.1432838 0.19990316 -0.18904059 0.02694101 0.22123207 -0.21902935\n", + " 0.0546164 -0.14010552]\n", + " [ 0.03629959 -0.20227122 0.11001531 -0.04960475 0.13363701 -0.0033625\n", + " -0.03187283 -0.05428797 -0.2047436 -0.09497944 0.00742607 -0.1729926\n", + " 0.19623755 -0.14542621 -0.08711543 -0.02990268 -0.1811355 -0.00176668\n", + " -0.10767633 -0.1871676 ]\n", + " [ 0.00548474 0.19795649 0.05506302 0.18442854 -0.0021867 -0.07804751\n", + " 0.1802177 -0.11907462 -0.20685978 0.0489392 0.11143997 -0.13366425\n", + " 0.07870162 -0.07933193 -0.02713096 -0.04951058 -0.04782786 -0.18194063\n", + " 0.05480235 -0.05881837]\n", + " [ 0.17097771 0.03732251 -0.18287036 -0.17010981 -0.11653572 0.10708019\n", + " -0.14437075 -0.10229405 0.04059571 -0.15502611 -0.11010965 0.20276332\n", + " -0.11821949 -0.07449946 0.1599237 0.05010674 0.17550889 -0.19699533\n", + " 0.11176885 -0.03420243]\n", + " [-0.14325288 -0.09576999 -0.21628909 0.15468563 -0.04290593 -0.2192564\n", + " 0.19123225 0.14483131 0.09245753 0.21885075 0.20192903 0.20897363\n", + " 0.2002456 0.18172018 0.05853782 -0.01872608 0.00850361 -0.09292599\n", + " 0.10506337 0.00647802]\n", + " [ 0.05275466 -0.14403579 -0.08419433 0.16763861 0.02174832 0.07716487\n", + " -0.1952104 -0.09575427 -0.00569092 -0.0234643 0.14273825 -0.06748112\n", + " 0.18662164 -0.04324729 0.08697162 -0.15742545 0.03795354 -0.21800253\n", + " -0.19185208 -0.14310952]] , 11\n", + "-------------------------\n", + "/0/layers.0.0/output_quant/export_handler/Constant_output_0\n", + "()\n", + "0.0078125 , 12\n", + "-------------------------\n", + "/0/layers.0.0/output_quant/export_handler/Constant_1_output_0\n", + "()\n", + "0 , 13\n", + "-------------------------\n", + "/0/layers.0.0/output_quant/export_handler/Constant_2_output_0\n", + "()\n", + "8.0 , 14\n", + "-------------------------\n", + "/0/layers.0.0/input_weight/weight_quant/export_handler/Constant_output_0\n", + "()\n", + "0.001760039 , 15\n", + "-------------------------\n", + "/0/layers.0.0/input_weight/weight_quant/export_handler/Constant_1_output_0\n", + "()\n", + "-127 , 16\n", + "-------------------------\n", + "/0/layers.0.0/input_weight/weight_quant/export_handler/Constant_2_output_0\n", + "()\n", + "127 , 17\n", + "-------------------------\n", + "/0/layers.0.0/input_weight/weight_quant/export_handler_1/Constant_output_0\n", + "()\n", + "0.0017542557 , 18\n", + "-------------------------\n", + "/0/layers.0.0/input_weight/weight_quant/export_handler_2/Constant_output_0\n", + "()\n", + "0.0017601603 , 19\n", + "-------------------------\n", + "/0/layers.0.0/input_weight/weight_quant/export_handler_3/Constant_output_0\n", + "()\n", + "0.0017546351 , 20\n", + "-------------------------\n", + "onnx.brevitas::QuantLSTMCell_48\n", + "(1, 20)\n", + "[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] , 21\n", + "-------------------------\n", + "/0/layers.0.0/export_handler/Constant_output_0\n", + "()\n", + "0.003921569 , 22\n", + "-------------------------\n", + "/0/layers.0.0/export_handler/Constant_1_output_0\n", + "()\n", + "0 , 23\n", + "-------------------------\n", + "/0/layers.0.0/Constant_output_0\n", + "(1,)\n", + "[0] , 24\n", + "-------------------------\n", + "/0/layers.0.0/Constant_1_output_0\n", + "(1,)\n", + "[0] , 25\n", + "-------------------------\n" + ] + } + ], + "source": [ + "# In this block of code we store all the parameters (weight matrices, recurrence matrices, biases, scales and zero-points) that we will need to import in the QCDQ implementation.\n", + "# Importing the exported quantized model from brevitas\n", + "brevitas_lstm_export = onnx.load(\"./quant_lstm_quantization_qcdq.onnx\")\n", + "parameters = brevitas_lstm_export.graph.initializer #Extracting all the parameters from the loaded graph\n", + "\n", + "# In this loop we will be printing all the parameters to correctly import the parameters values to the right variables\n", + "for i in range(len(parameters)):\n", + " w = numpy_helper.to_array(parameters[i])\n", + " print (brevitas_lstm_export.graph.initializer[i].name)\n", + " print(w.shape)\n", + " print(w,',',i)\n", + " print(\"-------------------------\")\n", + " \n", + "# Storing the extracted parameters (weights/biases/scales) to the right variables depending on the order in which they are exported. \n", + "# The abbreviation described in the above block will help in understanding what each variable denotes\n", + "\n", + "bi_val = numpy_helper.to_array(parameters[0])\n", + "Wi_val = numpy_helper.to_array(parameters[1])\n", + "Ui_val = numpy_helper.to_array(parameters[2])\n", + "bf_val = numpy_helper.to_array(parameters[3])\n", + "Wf_val = numpy_helper.to_array(parameters[4])\n", + "Uf_val = numpy_helper.to_array(parameters[5])\n", + "bc_val = numpy_helper.to_array(parameters[6])\n", + "Wc_val = numpy_helper.to_array(parameters[7])\n", + "Uc_val = numpy_helper.to_array(parameters[8])\n", + "bo_val = numpy_helper.to_array(parameters[9])\n", + "Wo_val = numpy_helper.to_array(parameters[10])\n", + "Uo_val = numpy_helper.to_array(parameters[11])\n", + "# Scalar values can either be int or float\n", + "inp_scale_val = float(numpy_helper.to_array(parameters[12])) \n", + "w1_scale_val = float(numpy_helper.to_array(parameters[15]))\n", + "w2_scale_val = float(numpy_helper.to_array(parameters[18]))\n", + "w3_scale_val = float(numpy_helper.to_array(parameters[19]))\n", + "w4_scale_val = float(numpy_helper.to_array(parameters[20]))\n", + "eq_scale_val_1 = float(numpy_helper.to_array(parameters[12]))\n", + "eq_scale_val_2 = float(numpy_helper.to_array(parameters[22]))" + ] + }, + { + "cell_type": "markdown", + "id": "10237589-f84e-423a-829e-3e2c2e806ed7", + "metadata": {}, + "source": [ + "# LSTM ONNX model" + ] + }, + { + "cell_type": "markdown", + "id": "367547b8", + "metadata": {}, + "source": [ + "In the 3rd part of the notebook, we will construct the `QCDQ-LSTM` model with standard ONNX operators. After loading all the parameters in the above block we can now start building our ONNX model with QCDQ quantization to represent the LSTM computations described in part-1.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "02fe4d94-af24-4d5e-a809-7d8c49e7fd90", + "metadata": {}, + "outputs": [], + "source": [ + "# Setting parameters : Matching the input output lengths exported from brevitas\n", + "num_features = 10\n", + "num_hidden_cells = 20\n", + "activation_bit_width = 8\n", + "\n", + "# The below two parameters are for the 'Clip' operation. \n", + "# Clip node parameters\n", + "max_clip_val = (2 ** (activation_bit_width -1) - 1)\n", + "min_clip_val = -(2 ** (activation_bit_width -1) - 1)\n", + "\n", + "# Zero-point datatype decides the datatype of the output tensor for the quantization operations hence we defined two. One for signed and other for unsigned.\n", + "# Zero point values for quantization\n", + "zero_point_signed_val = 0\n", + "zero_point_unsigned_val = 0" + ] + }, + { + "cell_type": "markdown", + "id": "15098a9e-4187-4987-82cc-275eba650923", + "metadata": {}, + "source": [ + "`Abbreviations` : These describe different short-forms used in the next two blocks.\n", + "\n", + "* ql = \"QuantizeLinear\"\n", + "* dql = \"DequantizeLinear\"\n", + "* clp = \"Clip\"\n", + "* id = \"Identity\"\n", + "* matmul = \"Matrix Multiplication\"\n", + "* el_mul = \"Elementwise Multiplication\"\n", + "* sig = \"Sigmoid\"" + ] + }, + { + "cell_type": "markdown", + "id": "f2edc0cc", + "metadata": {}, + "source": [ + "We start defining the model by defining the `inputs` and `outputs` defined as value_info tensors in ONNX.\n", + "For LSTMs we need three inputs : `inputs`, `previous hidden state` and `previous cell state`. \n", + "We get three outputs : `hidden_state`, `cell_state` and `concatenated_hidden_states`." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "02761646-4c6d-440f-8e90-4935beebab56", + "metadata": {}, + "outputs": [], + "source": [ + "# Defining the inputs 'value info' tensors for the compute graph.\n", + "hidden_state = make_tensor_value_info(\"h_t-1\",onnx.TensorProto.FLOAT, [num_hidden_cells,1])\n", + "cell_state = make_tensor_value_info(\"c_t-1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1])\n", + "inputs = make_tensor_value_info(\"inp\",onnx.TensorProto.FLOAT, [num_features,1])\n", + "\n", + "#Output value info tensor definitions\n", + "out_hidden_state = make_tensor_value_info(\"h_t\", onnx.TensorProto.FLOAT, [num_hidden_cells,1])\n", + "out_cell_state = make_tensor_value_info(\"c_t\", onnx.TensorProto.FLOAT, [num_hidden_cells,1])\n", + "out_hidden_state_concat = make_tensor_value_info(\"h_t_concat\", onnx.TensorProto.FLOAT, [num_hidden_cells,1])#maybe this will have one more dimension" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c08e5a23-ef2e-4bca-9293-c800350c2c62", + "metadata": {}, + "outputs": [], + "source": [ + "# Once we have defined the inputs and outputs, we will now start defining the operations in the LSTM compute graph.\n", + "# We start by quantizing the input with the standard QDQ operation which is 8-bit quantization. \n", + "# Note: For quantization to lower bit-width's we can use the clip node.\n", + "\n", + "# Input quantization\n", + "ql_input = make_node(\"QuantizeLinear\", inputs=[\"inp\",\"inp_scale\",\"zero_point_signed\"], outputs=[\"ql_input_out\"],name=\"ql_input\")\n", + "id_0_input = make_node(\"Identity\", inputs=[\"ql_input_out\"], outputs=[\"first_input_out\"], name=\"id_0_input\")\n", + "dql_input = make_node(\"DequantizeLinear\", inputs=[\"ql_input_out\", 'inp_scale', \"zero_point_signed\"], outputs=[\"dql_input_out\"],name=\"dql_input\")\n", + "\n", + "# Quantization of the four weight matrices showing QCDQ quantization\n", + "ql_w1 = make_node(\"QuantizeLinear\", inputs=[\"W_f\",\"scale_f\",\"zero_point_signed\"], outputs=[\"ql_wf_out\"], name=\"ql_w1\")\n", + "clp_w1 = make_node(\"Clip\", inputs=[\"ql_wf_out\",\"min\",\"max\"], outputs=[\"clp_wf\"], name=\"clp_w1\")\n", + "dql_w1 = make_node(\"DequantizeLinear\", inputs=[\"clp_wf\",\"scale_f\",\"zero_point_signed\"], outputs=[\"dql_wf_out\"], name=\"dql_w1\")\n", + "\n", + "ql_w2 = make_node(\"QuantizeLinear\", inputs=[\"W_i\",\"scale_i\",\"zero_point_signed\"], outputs=[\"ql_wi_out\"], name=\"ql_w2\")\n", + "clp_w2 = make_node(\"Clip\", inputs=[\"ql_wi_out\",\"min\",\"max\"], outputs=[\"clp_wi\"], name=\"clp_w2\")\n", + "dql_w2 = make_node(\"DequantizeLinear\", inputs=[\"clp_wi\",\"scale_i\",\"zero_point_signed\"], outputs=[\"dql_wi_out\"], name=\"dql_w2\")\n", + "\n", + "ql_w3 = make_node(\"QuantizeLinear\", inputs=[\"W_c\",\"scale_c\",\"zero_point_signed\"], outputs=[\"ql_wc_out\"], name=\"ql_w3\")\n", + "clp_w3 = make_node(\"Clip\", inputs=[\"ql_wc_out\",\"min\",\"max\"], outputs=[\"clp_wc\"], name=\"clp_w3\")\n", + "dql_w3 = make_node(\"DequantizeLinear\", inputs=[\"clp_wc\",\"scale_c\",\"zero_point_signed\"], outputs=[\"dql_wc_out\"], name=\"dql_w3\")\n", + "\n", + "ql_w4 = make_node(\"QuantizeLinear\", inputs=[\"W_o\",\"scale_o\",\"zero_point_signed\"], outputs=[\"ql_wo_out\"], name=\"ql_w4\")\n", + "clp_w4 = make_node(\"Clip\", inputs=[\"ql_wo_out\",\"min\",\"max\"], outputs=[\"clp_wo\"], name=\"clp_w4\")\n", + "dql_w4 = make_node(\"DequantizeLinear\", inputs=[\"clp_wo\",\"scale_o\",\"zero_point_signed\"], outputs=[\"dql_wo_out\"], name=\"dql_w4\")\n", + "\n", + "# Quantizations for the four recurrence weight matrices showing QCDQ quantization\n", + "ql_u1 = make_node(\"QuantizeLinear\", inputs=[\"U_f\",\"scale_f\",\"zero_point_signed\"], outputs=[\"ql_uf_out\"], name=\"ql_u1\")\n", + "clp_u1 = make_node(\"Clip\", inputs=[\"ql_uf_out\",\"min\",\"max\"], outputs=[\"clp_uf\"], name=\"clp_u1\")\n", + "dql_u1 = make_node(\"DequantizeLinear\", inputs=[\"clp_uf\",\"scale_f\",\"zero_point_signed\"], outputs=[\"dql_uf_out\"], name=\"dql_u1\")\n", + "\n", + "ql_u2 = make_node(\"QuantizeLinear\", inputs=[\"U_i\",\"scale_i\",\"zero_point_signed\"], outputs=[\"ql_ui_out\"], name=\"ql_u2\")\n", + "clp_u2 = make_node(\"Clip\", inputs=[\"ql_ui_out\",\"min\",\"max\"], outputs=[\"clp_ui\"], name=\"clp_u2\")\n", + "dql_u2 = make_node(\"DequantizeLinear\", inputs=[\"clp_ui\",\"scale_i\",\"zero_point_signed\"], outputs=[\"dql_ui_out\"], name=\"dql_u2\")\n", + "\n", + "ql_u3 = make_node(\"QuantizeLinear\", inputs=[\"U_c\",\"scale_c\",\"zero_point_signed\"], outputs=[\"ql_uc_out\"], name=\"ql_u3\")\n", + "clp_u3 = make_node(\"Clip\", inputs=[\"ql_uc_out\",\"min\",\"max\"], outputs=[\"clp_uc\"], name=\"clp_u3\")\n", + "dql_u3 = make_node(\"DequantizeLinear\", inputs=[\"clp_uc\",\"scale_c\",\"zero_point_signed\"], outputs=[\"dql_uc_out\"], name=\"dql_u3\")\n", + "\n", + "ql_u4 = make_node(\"QuantizeLinear\", inputs=[\"U_o\",\"scale_o\",\"zero_point_signed\"], outputs=[\"ql_uo_out\"], name=\"ql_u4\")\n", + "clp_u4 = make_node(\"Clip\", inputs=[\"ql_uo_out\",\"min\",\"max\"], outputs=[\"clp_uo\"], name=\"clp_u4\")\n", + "dql_u4 = make_node(\"DequantizeLinear\", inputs=[\"clp_uo\",\"scale_o\",\"zero_point_signed\"], outputs=[\"dql_uo_out\"], name=\"dql_u4\")\n", + "\n", + "# Once we have quantized the weights and inputs we can now start defining the operations for the 6 LSTM equations.\n", + "# The first four gate equations have a very similar compute structure. We define the first four gate computations in this order : Forget, Input, Output, Cell \n", + "\n", + "# 1st Equation : Forget gate\n", + "matmul_1_e1 = make_node(\"MatMul\", inputs=[\"dql_wf_out\",\"dql_input_out\"], outputs=[\"out_m1_e1\"], name=\"matmul_1_e1\")\n", + "matmul_2_e1 = make_node(\"MatMul\", inputs=[\"dql_uf_out\",\"h_t-1\"], outputs=[\"out_m2_e1\"],name=\"matmul_2_e1\")\n", + "add_1_e1 = make_node(\"Add\", inputs=[\"out_m1_e1\",\"out_m2_e1\"], outputs=[\"out_add1_e1\"],name=\"add_1_e1\")\n", + "add_2_e1 = make_node(\"Add\", inputs=[\"out_add1_e1\",\"b_f\"], outputs=[\"f_t_ba\"],name=\"add_2_e1\")\n", + "ql_1_e1 = make_node(\"QuantizeLinear\", inputs=[\"f_t_ba\",\"scale_3\",\"zero_point_signed\"], outputs=[\"f_t_ql1\"],name=\"ql_1_e1\")\n", + "dql_1_e1 = make_node(\"DequantizeLinear\", inputs=[\"f_t_ql1\", \"scale_4\", \"zero_point_signed\"], outputs=[\"f_t_dql1\"], name=\"dql_1_e1\")\n", + "sig_f_e1 = make_node(\"Sigmoid\", inputs=[\"f_t_dql1\"], outputs=[\"f_t\"],name=\"sig_f_e1\")\n", + "ql_2_e1 = make_node(\"QuantizeLinear\", inputs=[\"f_t\",\"scale_4\",\"zero_point_unsigned\"], outputs=[\"f_t_ql2\"],name=\"ql_2_e1\")\n", + "dql_2_e1 = make_node(\"DequantizeLinear\", inputs=[\"f_t_ql2\", \"scale_4\", \"zero_point_unsigned\"], outputs=[\"f_t_dql2\"], name=\"dql_2_e1\")\n", + "\n", + "# 2nd Equation : Input gate\n", + "matmul_1_e2 = make_node(\"MatMul\", inputs=[\"dql_wi_out\",\"dql_input_out\"], outputs=[\"out_m1_e2\"], name=\"matmul_1_e2\")\n", + "matmul_2_e2 = make_node(\"MatMul\", inputs=[\"dql_ui_out\",\"h_t-1\"], outputs=[\"out_m2_e2\"],name=\"matmul_2_e2\")\n", + "add_1_e2 = make_node(\"Add\", inputs=[\"out_m1_e2\",\"out_m2_e2\"], outputs=[\"out_add1_e2\"],name=\"add_1_e2\")\n", + "add_2_e2 = make_node(\"Add\", inputs=[\"out_add1_e2\",\"b_i\"], outputs=[\"i_t_ba\"],name=\"add_2_e2\")\n", + "ql_1_e2 = make_node(\"QuantizeLinear\", inputs=[\"i_t_ba\",\"scale_1\",\"zero_point_signed\"], outputs=[\"i_t_ql1\"],name=\"ql_1_e2\")\n", + "dql_1_e2 = make_node(\"DequantizeLinear\", inputs=[\"i_t_ql1\",\"scale_1\", \"zero_point_signed\"], outputs=[\"i_t_dql1\"], name=\"dql_1_e2\")\n", + "sig_i_e2 = make_node(\"Sigmoid\", inputs=[\"i_t_dql1\"], outputs=[\"i_t\"],name=\"sig_i_e2\")\n", + "ql_2_e2 = make_node(\"QuantizeLinear\", inputs=[\"i_t\",\"scale_2\",\"zero_point_unsigned\"], outputs=[\"i_t_ql2\"],name=\"ql_2_e2\")\n", + "dql_2_e2 = make_node(\"DequantizeLinear\", inputs=[\"i_t_ql2\", \"scale_2\", \"zero_point_unsigned\"], outputs=[\"i_t_dql2\"], name=\"dql_2_e2\")\n", + "\n", + "# 3rd Equation : Output gate\n", + "matmul_1_e3 = make_node(\"MatMul\", inputs=[\"dql_wo_out\",\"dql_input_out\"], outputs=[\"out_m1_e3\"], name=\"matmul_1_e3\")\n", + "matmul_2_e3 = make_node(\"MatMul\", inputs=[\"dql_uo_out\",\"h_t-1\"], outputs=[\"out_m2_e3\"],name=\"matmul_2_e3\")\n", + "add_1_e3 = make_node(\"Add\", inputs=[\"out_m1_e3\",\"out_m2_e3\"], outputs=[\"out_add1_e3\"],name=\"add_1_e3\")\n", + "add_2_e3 = make_node(\"Add\", inputs=[\"out_add1_e3\",\"b_o\"], outputs=[\"o_t_ba\"],name=\"add_2_e3\" )\n", + "ql_1_e3 = make_node(\"QuantizeLinear\", inputs=[\"o_t_ba\",\"scale_7\",\"zero_point_signed\"], outputs=[\"o_t_ql1\"],name=\"ql_1_e3\")\n", + "dql_1_e3 = make_node(\"DequantizeLinear\", inputs=[\"o_t_ql1\",\"scale_7\", \"zero_point_signed\"], outputs=[\"o_t_dql1\"], name=\"dql_1_e3\")\n", + "sig_o_e3 = make_node(\"Sigmoid\", inputs=[\"o_t_dql1\"], outputs=[\"o_t\"],name=\"sig_o_e3\")\n", + "ql_2_e3 = make_node(\"QuantizeLinear\", inputs=[\"o_t\",\"scale_8\",\"zero_point_unsigned\"], outputs=[\"o_t_ql2\"],name=\"ql_2_e3\")\n", + "dql_2_e3 = make_node(\"DequantizeLinear\", inputs=[\"o_t_ql2\", \"scale_8\", \"zero_point_unsigned\"], outputs=[\"o_t_dql2\"], name=\"dql_2_e3\")\n", + "\n", + "# 4th Equation : Cell gate\n", + "matmul_1_e4 = make_node(\"MatMul\", inputs=[\"dql_wc_out\",\"dql_input_out\"], outputs=[\"out_m1_e4\"], name=\"matmul_1_e4\")\n", + "matmul_2_e4 = make_node(\"MatMul\", inputs=[\"dql_uc_out\",\"h_t-1\"], outputs=[\"out_m2_e4\"],name=\"matmul_2_e4\")\n", + "add_1_e4 = make_node(\"Add\", inputs=[\"out_m1_e4\",\"out_m2_e4\"], outputs=[\"out_add1_e4\"],name=\"add_1_e4\")\n", + "add_2_e4 = make_node(\"Add\", inputs=[\"out_add1_e4\",\"b_c\"], outputs=[\"c_t_ba\"],name=\"add_2_e4\")\n", + "ql_1_e4 = make_node(\"QuantizeLinear\", inputs=[\"c_t_ba\",\"scale_5\",\"zero_point_signed\"], outputs=[\"c_t_ql1\"],name=\"ql_1_e4\")\n", + "dql_1_e4 = make_node(\"DequantizeLinear\", inputs=[\"c_t_ql1\",\"scale_5\", \"zero_point_signed\"], outputs=[\"c_t_dql1\"], name=\"dql_1_e4\")\n", + "tanh_c_e4 = make_node(\"Tanh\", inputs=[\"c_t_dql1\"], outputs=[\"c_t_partial\"],name=\"tanh_c_e4\")\n", + "ql_2_e4 = make_node(\"QuantizeLinear\", inputs=[\"c_t_partial\",\"scale_6\",\"zero_point_signed\"], outputs=[\"c_t_ql2\"],name=\"ql_2_e4\")\n", + "dql_2_e4 = make_node(\"DequantizeLinear\", inputs=[\"c_t_ql2\", \"scale_6\", \"zero_point_signed\"], outputs=[\"c_t_dql2\"], name=\"dql_2_e4\")\n", + "\n", + "# Once we have the first four gate computations we can procedd with the computation of the cell_state and the hidden_state in the 5th and the 6th equations.\n", + "# 5th Equation : Cell state compute\n", + "el_mul_1_e5 = make_node(\"Mul\", inputs=[\"f_t_dql2\",\"c_t-1\"], outputs=[\"out_el_mul1_e5\"],name=\"el_mul_1_e5\")\n", + "ql_1_e5 = make_node(\"QuantizeLinear\", inputs=[\"out_el_mul1_e5\",\"scale_9\",\"zero_point_signed\"], outputs=[\"fifth_ql1\"],name=\"ql_1_e5\")\n", + "dql_1_e5 = make_node(\"DequantizeLinear\", inputs=[\"fifth_ql1\",\"scale_9\", \"zero_point_signed\"], outputs=[\"fifth_dql1\"], name=\"dql_1_e5\")\n", + "el_mul_2_e5 = make_node(\"Mul\", inputs=[\"i_t_dql2\",\"c_t_dql2\"], outputs=[\"out_el_mul2_e5\"], name=\"el_mul_2_e5\") \n", + "ql_2_e5 = make_node(\"QuantizeLinear\", inputs=[\"out_el_mul2_e5\",\"scale_9\",\"zero_point_signed\"], outputs=[\"fifth_ql2\"],name=\"ql_2_e5\")\n", + "dql_2_e5 = make_node(\"DequantizeLinear\", inputs=[\"fifth_ql2\",\"scale_9\", \"zero_point_signed\"], outputs=[\"fifth_dql2\"], name=\"dql_2_e5\")\n", + "add_1_e5 = make_node(\"Add\", inputs=[\"fifth_dql1\",\"fifth_dql2\"], outputs=[\"c_t\"], name=\"add_1_e5\") #-----------------> The first output is computed here.\n", + "ql_3_e5 = make_node(\"QuantizeLinear\", inputs=[\"c_t\",\"scale_9\",\"zero_point_signed\"], outputs=[\"h_t_ql\"], name=\"ql_3_e5\")\n", + "dql_3_e5 = make_node(\"DequantizeLinear\", inputs=[\"h_t_ql\",\"scale_9\",\"zero_point_signed\"], outputs=[\"h_t_dql\"], name=\"dql_3_e5\")\n", + "\n", + "# 6th Equation : Hidden state compute\n", + "tanh_node_e6 = make_node(\"Tanh\", inputs=[\"h_t_dql\"], outputs=[\"out_tanh_e6\"], name=\"tanh_node_e6\") \n", + "ql_1_e6 = make_node(\"QuantizeLinear\", inputs=[\"out_tanh_e6\",\"scale_10\",\"zero_point_signed\"], outputs=[\"sixth_ql1\"], name=\"ql_1_e6\")\n", + "dql_1_e6 = make_node(\"DequantizeLinear\", inputs=[\"sixth_ql1\",\"scale_10\",\"zero_point_signed\"], outputs=[\"sixth_dql1\"], name=\"dql_1_e6\")\n", + "el_mul_1_e6 = make_node(\"Mul\", inputs=[\"sixth_dql1\",\"o_t_dql2\"], outputs=[\"h_t_inter\"], name=\"el_mul_1_e6\")#h_t_inter\n", + "ql_2_e6 = make_node(\"QuantizeLinear\", inputs=[\"h_t_inter\",\"scale_11\",\"zero_point_signed\"], outputs=[\"sixth_ql2\"], name=\"ql_2_e6\")\n", + "dql_2_e6 = make_node(\"DequantizeLinear\", inputs=[\"sixth_ql2\",\"scale_11\",\"zero_point_signed\"], outputs=[\"h_t\"], name=\"dql_2_e6\") #-----------------> The second output is computed here.\n", + "id_1_e6 = make_node(\"Identity\", inputs=[\"h_t\"], outputs=[\"h_t_concat\"], name=\"id_1_e6\") #-----------------> The third output is computed here." + ] + }, + { + "cell_type": "markdown", + "id": "3d10867f", + "metadata": {}, + "source": [ + "After defining the above operations we now connect them and create a graph with the help of onnx.helper `make_graph` utility function" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "79839558-8752-4fc8-9b0e-8fed47c91701", + "metadata": {}, + "outputs": [], + "source": [ + "lstm_body = make_graph(\n", + " nodes=[\n", + " ql_input,\n", + " dql_input, \n", + " ql_w1,\n", + " clp_w1, \n", + " dql_w1,\n", + " ql_w2,\n", + " clp_w2, \n", + " dql_w2,\n", + " ql_w3,\n", + " clp_w3, \n", + " dql_w3,\n", + " ql_w4,\n", + " clp_w4, \n", + " dql_w4,\n", + " ql_u1,\n", + " clp_u1, \n", + " dql_u1,\n", + " ql_u2,\n", + " clp_u2,\n", + " dql_u2, \n", + " ql_u3,\n", + " clp_u3,\n", + " dql_u3, \n", + " ql_u4,\n", + " clp_u4,\n", + " dql_u4, \n", + " matmul_1_e1,\n", + " matmul_2_e1, \n", + " add_1_e1, \n", + " add_2_e1,\n", + " ql_1_e1,\n", + " dql_1_e1,\n", + " sig_f_e1,\n", + " ql_2_e1, \n", + " dql_2_e1, \n", + " matmul_1_e2,\n", + " matmul_2_e2, \n", + " add_1_e2, \n", + " add_2_e2,\n", + " ql_1_e2,\n", + " dql_1_e2,\n", + " sig_i_e2,\n", + " ql_2_e2, \n", + " dql_2_e2, \n", + " matmul_1_e3,\n", + " matmul_2_e3, \n", + " add_1_e3, \n", + " add_2_e3,\n", + " ql_1_e3,\n", + " dql_1_e3,\n", + " sig_o_e3,\n", + " ql_2_e3, \n", + " dql_2_e3, \n", + " matmul_1_e4,\n", + " matmul_2_e4, \n", + " add_1_e4, \n", + " add_2_e4,\n", + " ql_1_e4,\n", + " dql_1_e4,\n", + " tanh_c_e4,\n", + " ql_2_e4, \n", + " dql_2_e4, \n", + " el_mul_1_e5,\n", + " ql_1_e5, \n", + " dql_1_e5,\n", + " el_mul_2_e5,\n", + " ql_2_e5,\n", + " dql_2_e5,\n", + " add_1_e5,\n", + " ql_3_e5, \n", + " dql_3_e5,\n", + " tanh_node_e6,\n", + " ql_1_e6, \n", + " dql_1_e6,\n", + " el_mul_1_e6,\n", + " ql_2_e6,\n", + " dql_2_e6, \n", + " id_1_e6\n", + " ],\n", + " name = \"qcdq-lsmt-body\",\n", + " inputs=[hidden_state,cell_state,inputs], #The order in which the inputs are defined here should match the input order when the scan node is defined.\n", + " outputs = [out_hidden_state, out_cell_state, out_hidden_state_concat],\n", + " value_info=[\n", + " make_tensor_value_info(\"ql_input_out\",onnx.TensorProto.INT8, [num_features,1]),\n", + " make_tensor_value_info(\"dql_input_out\",onnx.TensorProto.FLOAT, [num_features,1]),\n", + " make_tensor_value_info(\"out_m1_e1\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m2_e1\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_add1_e1\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"f_t_ba\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"f_t_ql1\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"f_t_dql1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"f_t_ql2\",onnx.TensorProto.UINT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"f_t_dql2\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m1_e2\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m2_e2\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_add1_e2\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"i_t_ba\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"i_t_ql1\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"i_t_dql1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"i_t_ql2\",onnx.TensorProto.UINT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"i_t_dql2\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m1_e3\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m2_e3\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_add1_e3\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"o_t_ba\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"o_t_ql1\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"o_t_dql1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"o_t_ql2\",onnx.TensorProto.UINT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"o_t_dql2\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m1_e4\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_m2_e4\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_add1_e4\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"c_t_ba\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"c_t_ql1\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"c_t_dql1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"c_t_ql2\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"c_t_dql2\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"f_t\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"i_t\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"o_t\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"c_t_partial\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_el_mul1_e5\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_el_mul2_e5\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"fifth_ql1\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"fifth_dql1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"fifth_ql2\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"fifth_dql2\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"h_t_ql\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"h_t_dql\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"out_tanh_e6\",onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"sixth_ql1\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"sixth_dql1\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"sixth_ql2\",onnx.TensorProto.INT8, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"h_t_inter\", onnx.TensorProto.FLOAT, [num_hidden_cells,1]),\n", + " make_tensor_value_info(\"ql_wf_out\", onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"dql_wf_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"ql_wi_out\", onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"dql_wi_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"ql_wc_out\", onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"dql_wc_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"ql_wo_out\", onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"dql_wo_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"ql_uf_out\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"dql_uf_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"ql_ui_out\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"dql_ui_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"ql_uc_out\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"dql_uc_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"ql_uo_out\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"dql_uo_out\",onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"clp_wf\",onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"clp_wi\",onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"clp_wc\",onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"clp_wo\",onnx.TensorProto.INT8, [num_hidden_cells,num_features]),\n", + " make_tensor_value_info(\"clp_uf\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]), \n", + " make_tensor_value_info(\"clp_ui\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"clp_uc\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " make_tensor_value_info(\"clp_uo\",onnx.TensorProto.INT8, [num_hidden_cells,num_hidden_cells]),\n", + " ],\n", + " initializer=[\n", + " # Initializing the weight and recurrecne matrices\n", + " make_tensor('W_f',onnx.TensorProto.FLOAT, [num_hidden_cells,num_features], (Wf_val)),\n", + " make_tensor('U_f',onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells], (Uf_val)),\n", + " make_tensor('b_f',onnx.TensorProto.FLOAT, [num_hidden_cells,1], (bf_val)),\n", + " make_tensor('W_i',onnx.TensorProto.FLOAT, [num_hidden_cells,num_features], (Wi_val)),\n", + " make_tensor('U_i',onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells], (Ui_val)),\n", + " make_tensor('b_i',onnx.TensorProto.FLOAT, [num_hidden_cells,1], (bi_val)),\n", + " make_tensor('W_o',onnx.TensorProto.FLOAT, [num_hidden_cells,num_features], (Wo_val)),\n", + " make_tensor('U_o',onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells], (Uo_val)),\n", + " make_tensor('b_o',onnx.TensorProto.FLOAT, [num_hidden_cells,1], (bo_val)),\n", + " make_tensor('W_c',onnx.TensorProto.FLOAT, [num_hidden_cells,num_features], (Wc_val)),\n", + " make_tensor('U_c',onnx.TensorProto.FLOAT, [num_hidden_cells,num_hidden_cells], (Uc_val)),\n", + " make_tensor('b_c',onnx.TensorProto.FLOAT, [num_hidden_cells,1], (bc_val)),\n", + " # Input scale value\n", + " make_tensor('inp_scale',onnx.TensorProto.FLOAT, [],[inp_scale_val]),\n", + " # Scale weight values\n", + " make_tensor('scale_i',onnx.TensorProto.FLOAT, [],[w1_scale_val]),\n", + " make_tensor('scale_c',onnx.TensorProto.FLOAT, [],[w2_scale_val]),\n", + " make_tensor('scale_o',onnx.TensorProto.FLOAT, [],[w3_scale_val]),\n", + " make_tensor('scale_f',onnx.TensorProto.FLOAT, [],[w4_scale_val]),\n", + " # Scale values for the six equations\n", + " make_tensor('scale_1',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_2',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]), \n", + " make_tensor('scale_3',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_test',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_4',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_5',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_6',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_7',onnx.TensorProto.FLOAT, [],[eq_scale_val_2]), \n", + " make_tensor('scale_8',onnx.TensorProto.FLOAT, [],[eq_scale_val_2]),\n", + " make_tensor('scale_9',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " make_tensor('scale_10',onnx.TensorProto.FLOAT, [],[eq_scale_val_2]),\n", + " make_tensor('scale_11',onnx.TensorProto.FLOAT, [],[eq_scale_val_1]),\n", + " # Scales for zero-points : Zero-point datatype defines the dataype of the output for that quantization\n", + " make_tensor('zero_point_signed',onnx.TensorProto.INT8,[],[zero_point_signed_val]),\n", + " make_tensor('zero_point_unsigned',onnx.TensorProto.UINT8,[],[zero_point_unsigned_val]),\n", + " # Introducing scalars for the clip operators.\n", + " make_tensor('min', onnx.TensorProto.INT8, [], [min_clip_val]),\n", + " make_tensor('max', onnx.TensorProto.INT8, [], [max_clip_val]),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "b1b16751", + "metadata": {}, + "source": [ + "The above created graph can now be converted into a qonnx model with the `qonnx_make_model` utility. We save the model with `onnx.save` utility and then view it in Netron with the help of `showInNetron` utility. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c6ec7b2a-456d-4452-97ec-df9a471d5391", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Serving './lstm_full_graph.onnx' at http://localhost:8080\n" + ] + }, + { + "data": { + "text/plain": [ + "('localhost', 8080)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "lstm_model = qonnx_make_model(lstm_body, producer_name=\"QuantizeLSTM_scan\")\n", + "onnx.save(lstm_model, './lstm_full_graph.onnx')\n", + "netron.start('./lstm_full_graph.onnx')" + ] + }, + { + "cell_type": "markdown", + "id": "40b49257", + "metadata": {}, + "source": [ + "In this block of code we execute the onnx graph to check that it can execute without any errors. We perform it's functional verification in the later part of the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "db5892bc-ac8d-4972-afcf-20bf880f5e86", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[array([[ 0.1484375],\n", + " [-0.0078125],\n", + " [ 0.0390625],\n", + " [ 0.140625 ],\n", + " [ 0.015625 ],\n", + " [ 0. ],\n", + " [ 0.1015625],\n", + " [-0.1015625],\n", + " [ 0.0390625],\n", + " [-0.0625 ],\n", + " [ 0.015625 ],\n", + " [-0.125 ],\n", + " [ 0.1015625],\n", + " [ 0.03125 ],\n", + " [ 0.1640625],\n", + " [-0.015625 ],\n", + " [-0.0234375],\n", + " [-0.015625 ],\n", + " [-0.046875 ],\n", + " [ 0.0078125]], dtype=float32), array([[ 0.2421875],\n", + " [-0.0078125],\n", + " [ 0.0625 ],\n", + " [ 0.2421875],\n", + " [ 0.03125 ],\n", + " [ 0.0078125],\n", + " [ 0.2265625],\n", + " [-0.234375 ],\n", + " [ 0.0859375],\n", + " [-0.1328125],\n", + " [ 0.0390625],\n", + " [-0.2421875],\n", + " [ 0.1875 ],\n", + " [ 0.0546875],\n", + " [ 0.296875 ],\n", + " [-0.03125 ],\n", + " [-0.0546875],\n", + " [-0.03125 ],\n", + " [-0.109375 ],\n", + " [ 0.0234375]], dtype=float32), array([[ 0.1484375],\n", + " [-0.0078125],\n", + " [ 0.0390625],\n", + " [ 0.140625 ],\n", + " [ 0.015625 ],\n", + " [ 0. ],\n", + " [ 0.1015625],\n", + " [-0.1015625],\n", + " [ 0.0390625],\n", + " [-0.0625 ],\n", + " [ 0.015625 ],\n", + " [-0.125 ],\n", + " [ 0.1015625],\n", + " [ 0.03125 ],\n", + " [ 0.1640625],\n", + " [-0.015625 ],\n", + " [-0.0234375],\n", + " [-0.015625 ],\n", + " [-0.046875 ],\n", + " [ 0.0078125]], dtype=float32)]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-10-20 11:07:46.350885612 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'scale_test'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 11:07:46.370978980 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'scale_test'. It is not used by any node and should be removed from the model.\n" + ] + } + ], + "source": [ + "# Before the model can be executed, it'd opset version needs to be set to a minimum of '14' to accomodate clip nodes with INT8 and UINT8 input. Otherwise ONNX cannot create an execution session and we get errors.\n", + "lstm_model.opset_import[0].version = 14\n", + "\n", + "# Creating the inference session here for the updated model here\n", + "sess = rt.InferenceSession(lstm_model.SerializeToString())\n", + "\n", + "# Defining dummy inputs and the model parameters for dummy execution\n", + "X_inp = np.empty([num_features,1],dtype=np.float32).reshape([num_features,1])\n", + "X_inp.fill(0.8)\n", + "hidden_state_input = np.zeros((num_hidden_cells, 1)).astype(np.float32)\n", + "cell_state_input = np.zeros((num_hidden_cells, 1)).astype(np.float32)\n", + "\n", + "# Assigning the above defined values to the input dictionary of the ONNX model.\n", + "input_dict = {}\n", + "input_dict[\"inp\"] = X_inp\n", + "input_dict[\"h_t-1\"] = hidden_state_input\n", + "input_dict[\"c_t-1\"] = cell_state_input \n", + "\n", + "# Setting up the inference session and executing the onnx model here.\n", + "sess = rt.InferenceSession(lstm_model.SerializeToString())\n", + "output = sess.run(None, input_dict)\n", + "print(output)" + ] + }, + { + "cell_type": "markdown", + "id": "5d2b5a1e-654e-46a5-9d4f-8708611a6d1e", + "metadata": {}, + "source": [ + "# SCAN Operation Integration" + ] + }, + { + "cell_type": "markdown", + "id": "7365329a-f3d2-4f74-8e2f-9076771e07a7", + "metadata": {}, + "source": [ + "### Introduction to ONNX Scan operation\n", + "Observations regarding the `Scan` operator in ONNX:\n", + "\n", + "1. `Scan` can be used to iterate over one or more scan input tensors constructing zero or more scan output tensors. It combines ideas from general recurrences, functional programming cnostructs such as scan, fold, map and zip.\n", + "2. The attribute `body` in the node must be a graph specifying the computation to be performed in every iteration.\n", + "3. Input is the current values of the `state variables` and the current `iterated element` of the scan input. Returns values of the `state variables` and the `scan output element tensors`. (Can be greater than 1)\n", + "4. The values of the scan output tensors are concatenated over all the iterations to produce the scan output values of the scan construct.\n", + "5. The properties that make a scan node unique and different from a normal compute node are:\n", + "* Allows update of state variable after each input computation; to be used in the processing of the next input.\n", + "* It needs to scan your inputs row by row or column by column; then keep computing the output with the updated hidden state for every input; while storing all the intermediate outputs in the form of hidden states.\n", + "\n", + "More information regarding this op can be found in these links:\n", + "\n", + "* https://github.com/onnx/onnx/blob/main/docs/Operators.md#Scan\n", + "* https://onnx.ai/onnx/intro/python.html#scan" + ] + }, + { + "cell_type": "markdown", + "id": "17f247f7", + "metadata": {}, + "source": [ + "The `Scan` operation is essentially a container operator which will consume the LSTM graph that we created above in it's body.\n", + "To create it, we need to define separate input and output value info tensors just for the Scan operator. We will then follow the same steps as the `QCDQ-LSTM` graph creation to convert the above graph into an executable ONNX model.\n", + "

\n", + "We start by defining the input and output value info tensors for the `scan_graph` creation. These tensors act as the wrapper to the previously defined graph.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "700a93a8-f757-4fa1-88dd-47a3f2a7f171", + "metadata": {}, + "outputs": [], + "source": [ + "# Inputs\n", + "scan_input = make_tensor_value_info(\"scan_input\",onnx.TensorProto.FLOAT, [None,num_features,1])#X ; scan input. Here None defines the varibale number of inputs that can be supplied for input processing.\n", + "scan_hidden_state = make_tensor_value_info(\"scan_hidden_state\",onnx.TensorProto.FLOAT, [num_hidden_cells,1])# h_t-1\n", + "scan_cell_state = make_tensor_value_info(\"scan_cell_state\",onnx.TensorProto.FLOAT, [num_hidden_cells,1])# c_t-1\n", + "\n", + "# Outputs\n", + "scan_out_hidden_state = make_tensor_value_info(\"scan_out_hidden_state\", onnx.TensorProto.FLOAT, [num_hidden_cells,1])#h_t\n", + "scan_out_cell_state = make_tensor_value_info(\"scan_out_cell_state\", onnx.TensorProto.FLOAT, [num_hidden_cells,1])#c_t\n", + "scan_out_hidden_state_concat = make_tensor_value_info(\"scan_out_hidden_state_concat\", onnx.TensorProto.FLOAT, [None,num_hidden_cells,1])" + ] + }, + { + "cell_type": "markdown", + "id": "572f191e", + "metadata": {}, + "source": [ + "We will now create the scan operator here now utilizing the `make_node` utility from ONNX.\n", + "Note, in the body of the operation we have included the `lstm_body` graph we created in the above steps." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "111fdce4-464f-40c1-ac4d-3022b05f153e", + "metadata": {}, + "outputs": [], + "source": [ + "scan_node_lstm = make_node(\n", + " \"Scan\", \n", + " inputs=[\"scan_hidden_state\",\"scan_cell_state\",\"scan_input\"], \n", + " outputs=[\"scan_out_hidden_state\",\"scan_out_cell_state\",\"scan_out_hidden_state_concat\"], \n", + " num_scan_inputs=1,\n", + " body=lstm_body, domain=''\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "ea8a05d9", + "metadata": {}, + "source": [ + "We can now define the graph for the scan operator utilizing the `make_graph` utility." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "4668cf2b-524e-4768-8dc8-9d619f6273da", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Serving './lstm_scan_node_model.onnx' at http://localhost:8081\n", + "[]\n" + ] + } + ], + "source": [ + "scan_lstm_node_graph = make_graph(\n", + " nodes = [scan_node_lstm],\n", + " name=\"lstm-scan-node\",\n", + " inputs=[scan_hidden_state,scan_cell_state,scan_input],#h_t-1, c_t-1, X\n", + " outputs=[scan_out_hidden_state,scan_out_cell_state,scan_out_hidden_state_concat]#h_t,c_t,h_t_concat\n", + ")\n", + "\n", + "# Creating the model from the above created graph and saving it.\n", + "lstm_scan_node_model = qonnx_make_model(scan_lstm_node_graph, producer_name=\"scan-lstm\")\n", + "onnx.save(lstm_scan_node_model, './lstm_scan_node_model.onnx')\n", + "netron.start('./lstm_scan_node_model.onnx')\n", + "\n", + "#Checking the model for any errors\n", + "onnx.checker.check_model(lstm_scan_node_model)\n", + "print(lstm_scan_node_model.graph.value_info)\n", + "\n", + "#Conversion to version 14 of onnx to accomodate clip nodes as done for the LSTM graph also.\n", + "lstm_scan_node_model.opset_import[0].version = 14" + ] + }, + { + "cell_type": "markdown", + "id": "0673e335", + "metadata": {}, + "source": [ + "Now that we have the SCAN based quantized LSTM model ready, we can now go forward and test it with the same sets of inputs we used for the testing of the brevitas model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "818d2a81-686f-4a4a-8e78-17dbf75d8451", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final Hidden State [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "------------------------\n", + "Final Cell State [[ 0.421875 ]\n", + " [-0.078125 ]\n", + " [ 0.0234375]\n", + " [ 0.4921875]\n", + " [ 0.1484375]\n", + " [-0.09375 ]\n", + " [ 0.75 ]\n", + " [-0.59375 ]\n", + " [ 0.1171875]\n", + " [-0.3125 ]\n", + " [ 0.0390625]\n", + " [-0.421875 ]\n", + " [ 0.3984375]\n", + " [ 0.2578125]\n", + " [ 0.828125 ]\n", + " [ 0.0625 ]\n", + " [-0.0703125]\n", + " [-0.109375 ]\n", + " [-0.1484375]\n", + " [ 0.0234375]]\n", + "------------------------\n", + "All Hidden States [[[ 0.1484375]\n", + " [-0.0078125]\n", + " [ 0.0390625]\n", + " [ 0.140625 ]\n", + " [ 0.015625 ]\n", + " [ 0. ]\n", + " [ 0.1015625]\n", + " [-0.1015625]\n", + " [ 0.0390625]\n", + " [-0.0625 ]\n", + " [ 0.015625 ]\n", + " [-0.125 ]\n", + " [ 0.1015625]\n", + " [ 0.03125 ]\n", + " [ 0.1640625]\n", + " [-0.015625 ]\n", + " [-0.0234375]\n", + " [-0.015625 ]\n", + " [-0.046875 ]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.203125 ]\n", + " [-0.0234375]\n", + " [ 0.03125 ]\n", + " [ 0.2109375]\n", + " [ 0.0234375]\n", + " [-0.015625 ]\n", + " [ 0.1875 ]\n", + " [-0.1484375]\n", + " [ 0.046875 ]\n", + " [-0.09375 ]\n", + " [ 0.0234375]\n", + " [-0.1640625]\n", + " [ 0.1484375]\n", + " [ 0.0703125]\n", + " [ 0.2578125]\n", + " [-0.015625 ]\n", + " [-0.03125 ]\n", + " [-0.0234375]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.2265625]\n", + " [-0.03125 ]\n", + " [ 0.015625 ]\n", + " [ 0.2421875]\n", + " [ 0.03125 ]\n", + " [-0.0234375]\n", + " [ 0.234375 ]\n", + " [-0.1796875]\n", + " [ 0.0546875]\n", + " [-0.109375 ]\n", + " [ 0.0234375]\n", + " [-0.1875 ]\n", + " [ 0.1796875]\n", + " [ 0.09375 ]\n", + " [ 0.2734375]\n", + " [ 0. ]\n", + " [-0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.0703125]\n", + " [ 0.015625 ]]\n", + "\n", + " [[ 0.234375 ]\n", + " [-0.0390625]\n", + " [ 0.015625 ]\n", + " [ 0.2578125]\n", + " [ 0.0390625]\n", + " [-0.03125 ]\n", + " [ 0.25 ]\n", + " [-0.1875 ]\n", + " [ 0.0546875]\n", + " [-0.125 ]\n", + " [ 0.015625 ]\n", + " [-0.1953125]\n", + " [ 0.1953125]\n", + " [ 0.1171875]\n", + " [ 0.2734375]\n", + " [ 0.015625 ]\n", + " [-0.03125 ]\n", + " [-0.0390625]\n", + " [-0.078125 ]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.2421875]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0390625]\n", + " [-0.03125 ]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.1328125]\n", + " [ 0.015625 ]\n", + " [-0.1953125]\n", + " [ 0.203125 ]\n", + " [ 0.1328125]\n", + " [ 0.2734375]\n", + " [ 0.0234375]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.078125 ]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.2421875]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.046875 ]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.1328125]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.2421875]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.046875 ]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.2421875]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.2421875]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]\n", + "\n", + " [[ 0.25 ]\n", + " [-0.046875 ]\n", + " [ 0.015625 ]\n", + " [ 0.2734375]\n", + " [ 0.0546875]\n", + " [-0.0390625]\n", + " [ 0.25 ]\n", + " [-0.1953125]\n", + " [ 0.0546875]\n", + " [-0.140625 ]\n", + " [ 0.015625 ]\n", + " [-0.203125 ]\n", + " [ 0.203125 ]\n", + " [ 0.140625 ]\n", + " [ 0.2734375]\n", + " [ 0.03125 ]\n", + " [-0.03125 ]\n", + " [-0.046875 ]\n", + " [-0.0703125]\n", + " [ 0.0078125]]]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-10-20 10:50:38.892379706 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'scale_test'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894726380 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_uo_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894741924 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wf_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894750521 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_ui_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894758793 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'max'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894767212 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_c'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894775093 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_c'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894782542 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_i'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894790413 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_uc_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894797986 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_i'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894805922 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wi_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894813725 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_o'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894821378 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_f'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894829187 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_o'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894837744 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_uf_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894845343 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wc_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894852862 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_f'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894861070 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wo_out'. It is not used by any node and should be removed from the model.\n", + "2023-10-20 10:50:38.894868719 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'min'. It is not used by any node and should be removed from the model.\n" + ] + } + ], + "source": [ + "# Defining the values of the varibales to test the execution of the scan model\n", + "num_inputs = 25\n", + "\n", + "#Initializing the initial values of the hidden state and the cell state. \n", + "# Also assigning the same input as the one used for the brevitas execution.\n", + "\n", + "hidden_state_inp = np.zeros((num_hidden_cells, 1)).astype(np.float32)#'h_t-1'\n", + "cell_state_inp = np.zeros((num_hidden_cells, 1)).astype(np.float32)#'c_t-1'\n", + "scan_inp = np.empty([num_inputs,num_features,1],dtype=np.float32).reshape([num_inputs,num_features,1])\n", + "scan_inp.fill(0.8)\n", + "\n", + "# Assigning the defined input values to the input dictionary of the scan model\n", + "input_dict = {}\n", + "input_dict[\"scan_hidden_state\"] = hidden_state_inp\n", + "input_dict[\"scan_cell_state\"] = cell_state_inp\n", + "input_dict[\"scan_input\"] = scan_inp\n", + "\n", + "# We can now set up the inference session and execute the scan onnx model here. \n", + "# The execution session gives some warnings which can be ignored.\n", + "\n", + "sess = rt.InferenceSession(lstm_scan_node_model.SerializeToString())\n", + "scan_output = sess.run(None, input_dict)\n", + "print('Final Hidden State',scan_output[0])\n", + "print(\"------------------------\")\n", + "print('Final Cell State',scan_output[1])\n", + "print(\"------------------------\")\n", + "print('All Hidden States',scan_output[2])" + ] + }, + { + "cell_type": "markdown", + "id": "907d2ff9-f605-4aec-891e-0c77a1a92346", + "metadata": {}, + "source": [ + "# Functional Verification" + ] + }, + { + "cell_type": "markdown", + "id": "b6bb6c60", + "metadata": {}, + "source": [ + "In the final part of the notebook, we compare the output of the 8-bit quantized `(QCDQ)-LSTM` implementation with the `QuantLSTM` brevitas model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "2fe07395-6cf9-4c99-a0d3-a27aa6a326b5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Brevitas Output shape : (25, 1, 20)\n", + "SCAN-QCDQ-LSTM output shape : (25, 1, 20)\n", + "-----------------------------------\n", + "Brevitas Output = [[[ 0.1484375 -0.0078125 0.0390625 0.140625 0.0078125 0.\n", + " 0.109375 -0.09375 0.0390625 -0.0625 0.015625 -0.1171875\n", + " 0.1015625 0.03125 0.1640625 -0.015625 -0.0234375 -0.015625\n", + " -0.046875 0.0078125]]\n", + "\n", + " [[ 0.2109375 -0.0234375 0.03125 0.2109375 0.0234375 -0.015625\n", + " 0.1875 -0.1484375 0.046875 -0.09375 0.0234375 -0.1640625\n", + " 0.1484375 0.0625 0.2578125 -0.015625 -0.03125 -0.0234375\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2421875 -0.0390625 0.015625 0.25 0.03125 -0.0234375\n", + " 0.234375 -0.1796875 0.0546875 -0.109375 0.015625 -0.1875\n", + " 0.1796875 0.09375 0.3125 0. -0.03125 -0.03125\n", + " -0.078125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.0390625 0.015625 0.265625 0.0390625 -0.03125\n", + " 0.265625 -0.1875 0.0546875 -0.125 0.015625 -0.1953125\n", + " 0.1953125 0.1171875 0.3359375 0.015625 -0.03125 -0.0390625\n", + " -0.078125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", + " 0.2890625 -0.1953125 0.0546875 -0.125 0.015625 -0.203125\n", + " 0.203125 0.125 0.359375 0.0234375 -0.03125 -0.046875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.2734375 0.046875 -0.0390625\n", + " 0.296875 -0.1953125 0.0546875 -0.1328125 0.015625 -0.203125\n", + " 0.2109375 0.1328125 0.3671875 0.03125 -0.0234375 -0.046875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.015625 0.28125 0.0546875 -0.046875\n", + " 0.3046875 -0.1953125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.2109375 0.140625 0.375 0.0390625 -0.0234375 -0.046875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.0546875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.2109375 0.140625 0.3828125 0.0390625 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.2109375 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", + " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", + " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", + " -0.0703125 0.015625 ]]]\n", + "-----------------------------------\n", + "SCAN-QCDQ-LSTM output [[[ 0.1484375 -0.0078125 0.0390625 0.140625 0.015625 0.\n", + " 0.1015625 -0.1015625 0.0390625 -0.0625 0.015625 -0.125\n", + " 0.1015625 0.03125 0.1640625 -0.015625 -0.0234375 -0.015625\n", + " -0.046875 0.0078125]]\n", + "\n", + " [[ 0.203125 -0.0234375 0.03125 0.2109375 0.0234375 -0.015625\n", + " 0.1875 -0.1484375 0.046875 -0.09375 0.0234375 -0.1640625\n", + " 0.1484375 0.0703125 0.2578125 -0.015625 -0.03125 -0.0234375\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.2265625 -0.03125 0.015625 0.2421875 0.03125 -0.0234375\n", + " 0.234375 -0.1796875 0.0546875 -0.109375 0.0234375 -0.1875\n", + " 0.1796875 0.09375 0.2734375 0. -0.03125 -0.03125\n", + " -0.0703125 0.015625 ]]\n", + "\n", + " [[ 0.234375 -0.0390625 0.015625 0.2578125 0.0390625 -0.03125\n", + " 0.25 -0.1875 0.0546875 -0.125 0.015625 -0.1953125\n", + " 0.1953125 0.1171875 0.2734375 0.015625 -0.03125 -0.0390625\n", + " -0.078125 0.0078125]]\n", + "\n", + " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.0390625 -0.03125\n", + " 0.25 -0.1953125 0.0546875 -0.1328125 0.015625 -0.1953125\n", + " 0.203125 0.1328125 0.2734375 0.0234375 -0.03125 -0.046875\n", + " -0.078125 0.0078125]]\n", + "\n", + " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.1328125 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]\n", + "\n", + " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", + " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", + " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", + " -0.0703125 0.0078125]]]\n", + "-----------------------------------\n", + "[[[ 0. 0. 0. 0. 1. 0. -1. -1. 0. 0. 0. -1. 0. 0.\n", + " 0. 0. 0. 0. 0. 0.]]\n", + "\n", + " [[ -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n", + " 0. 0. 0. 0. 0. -1.]]\n", + "\n", + " [[ -2. 1. 0. -1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.\n", + " -5. 0. 0. 0. 1. 1.]]\n", + "\n", + " [[ -2. 0. 0. -1. 0. 0. -2. 0. 0. 0. 0. 0. 0. 0.\n", + " -8. 0. 0. 0. 0. -1.]]\n", + "\n", + " [[ -2. 0. 0. 0. -1. 1. -5. 0. 0. -1. 0. 1. 0. 1.\n", + " -11. 0. 0. 0. -1. -1.]]\n", + "\n", + " [[ -2. 0. 1. 0. 0. 0. -6. 0. 0. -1. 0. 0. -1. 0.\n", + " -12. 0. -1. 0. 0. -1.]]\n", + "\n", + " [[ -2. 0. 0. -1. -1. 1. -7. 0. 0. 0. 1. 0. -1. 0.\n", + " -13. -1. -1. 0. 0. -1.]]\n", + "\n", + " [[ -2. 1. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -1. 0.\n", + " -14. -1. -2. 1. 0. -1.]]\n", + "\n", + " [[ -2. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -1. -1.\n", + " -15. -1. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -1. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]\n", + "\n", + " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", + " -15. -2. -2. 1. 0. -1.]]]\n" + ] + } + ], + "source": [ + "# We first match the shape of both the outputs to perform the functional verification correctly\n", + "\n", + "print('Brevitas Output shape : ', brevitas_output.shape)\n", + "all_hidden_states = np.array(scan_output[2])\n", + "all_hidden_states = all_hidden_states.reshape([num_inputs,1,num_hidden_cells])\n", + "print('SCAN-QCDQ-LSTM output shape :', all_hidden_states.shape)\n", + "print('-----------------------------------')\n", + "print('Brevitas Output = ',brevitas_output)\n", + "print('-----------------------------------')\n", + "print('SCAN-QCDQ-LSTM output',all_hidden_states)\n", + "print('-----------------------------------')\n", + "\n", + "# Comparison between the 'Scan-LSTM output' and the brevitas 'QuantLSTM' ouptut\n", + "# Since the outputs from both models are floating-point, to get a better understanding of the differences we scale the outputs to INT8 precision and then compare their differences.\n", + "# The scale used to do that is the last scale of the LSTM graph.\n", + "\n", + "scale = inp_scale_val #The scale value is equal to the value of the inp_scale_val\n", + "all_hidden_states = np.array(scan_output[2])\n", + "all_hidden_states = all_hidden_states.reshape([num_inputs,1,num_hidden_cells])\n", + "all_hidden_state_diff = (all_hidden_states - brevitas_output)\n", + "print(all_hidden_state_diff/scale)" + ] + }, + { + "cell_type": "markdown", + "id": "7bcca933", + "metadata": {}, + "source": [ + "Note the difference in outputs increases as we progress with processing the inputs. The first two outputs are very close to one another, but as we get the outputs for more inputs we see for some values differ from the brevitas output by a considerable amount.\n", + "This behaviour can be attributed to some values being slightly different in the first few outputs (which are not visible) which eventually cause an increase in differences between both values as more inputs are processed." + ] + }, + { + "cell_type": "markdown", + "id": "81c6d531", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From e2e07371c5e1a03de0dc0dc68e9ee55df0270a95 Mon Sep 17 00:00:00 2001 From: shashwat1198 Date: Sun, 22 Oct 2023 10:29:44 +0100 Subject: [PATCH 03/83] Clean QuantLSTM --- notebooks/4_quant_lstm.ipynb | 2040 +--------------------------------- 1 file changed, 22 insertions(+), 2018 deletions(-) diff --git a/notebooks/4_quant_lstm.ipynb b/notebooks/4_quant_lstm.ipynb index 72cac7e9..186be984 100644 --- a/notebooks/4_quant_lstm.ipynb +++ b/notebooks/4_quant_lstm.ipynb @@ -89,18 +89,10 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "84d66548-365d-46a5-9eaa-bb767085f9aa", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda'\n" - ] - } - ], + "outputs": [], "source": [ "# We import the required libraries to execute different functions in the notebook.\n", "# The first four imports are required to build the QuantLSTM model in brevitas. \n", @@ -126,291 +118,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "23a7682c", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "quant_input_supplied to brevitas = tensor([[-1.0000, -0.5000, -1.0000, 0.5156, -1.0000, 0.9922, -0.8047, -1.0000,\n", - " 0.2188, 0.9922]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.7266, -0.9531, 0.9922, 0.9922, -1.0000, 0.9922, -0.7734, -1.0000,\n", - " -0.0859, 0.6250]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.6719, -1.0000, 0.0547, -0.5234, -0.0000, 0.1250, -1.0000, 0.3047,\n", - " -0.0312, -1.0000]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-1.0000, -0.1797, 0.3516, -0.1328, -1.0000, -1.0000, 0.8750, -0.2812,\n", - " 0.4844, -0.3203]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.6719, -0.1484, 0.5078, 0.5312, -0.2969, 0.1719, -1.0000, 0.4688,\n", - " -0.2500, 0.8672]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.3125, 0.9922, 0.8281, -0.4297, -1.0000, 0.9922, -1.0000, 0.9922,\n", - " -1.0000, 0.2578]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.3125, -1.0000, -0.4688, 0.2656, -1.0000, -1.0000, -1.0000, -0.7266,\n", - " 0.9922, 0.8984]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.5625, 0.8359, -1.0000, 0.1875, -1.0000, -1.0000, 0.1562, 0.3438,\n", - " 0.6172, -1.0000]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-1.0000, -0.0781, 0.3203, 0.1797, -1.0000, -0.1875, 0.9219, -0.4609,\n", - " -0.3125, 0.2031]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.8750, -1.0000, 0.6016, -1.0000, -0.7656, -0.1484, 0.9922, 0.6406,\n", - " -1.0000, 0.9922]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.9922, -1.0000, 0.5078, -1.0000, -1.0000, 0.4453, -1.0000, 0.6719,\n", - " -1.0000, -1.0000]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.0703, -1.0000, -0.6797, -1.0000, -1.0000, -0.8750, -0.6797, 0.3672,\n", - " -0.5938, -0.2031]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.6641, 0.9922, 0.1641, 0.9922, 0.9922, -1.0000, -1.0000, 0.9922,\n", - " 0.3438, 0.4688]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.1875, 0.0000, -0.2812, -1.0000, -1.0000, -0.0391, 0.0781, 0.9922,\n", - " -0.2188, 0.9922]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.2578, 0.9922, -1.0000, 0.4297, -0.7500, 0.2891, -1.0000, -1.0000,\n", - " 0.6484, 0.3828]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.3594, -0.0000, -1.0000, 0.4688, -0.2734, -1.0000, -0.2969, 0.9922,\n", - " 0.9922, 0.9062]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.0938, -1.0000, 0.1016, -0.7109, -0.3203, 0.7578, 0.9922, 0.3359,\n", - " 0.1328, 0.4062]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.4141, -0.6328, -0.7422, 0.9609, -0.9062, -0.4297, 0.7031, 0.9922,\n", - " -1.0000, -0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.3203, -1.0000, -0.7109, 0.3281, 0.6016, -0.2031, -0.6172, 0.7031,\n", - " -0.5078, -1.0000]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-1.0000, -0.2500, -0.9766, -1.0000, 0.3984, -0.6484, -1.0000, 0.7188,\n", - " 0.9922, 0.9453]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[-0.5234, 0.9922, -0.3984, 0.1328, -0.0625, -0.8047, -0.1562, -0.1250,\n", - " -0.1172, 0.6328]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.0547, 0.0156, 0.0703, -0.8750, -1.0000, 0.5156, -0.0938, -0.2969,\n", - " -0.9922, 0.9922]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.9922, -1.0000, 0.3438, 0.9922, 0.1328, 0.2891, 0.0469, -0.3438,\n", - " -0.9531, -1.0000]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.2969, -1.0000, 0.1250, -1.0000, -0.5469, -1.0000, 0.5000, 0.7344,\n", - " -1.0000, 0.7109]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[ 0.4219, 0.4922, 0.7266, 0.0078, 0.0469, 0.9844, -0.5391, -0.0781,\n", - " 0.9922, -1.0000]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "quant_input_supplied to brevitas = tensor([[0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969, 0.7969,\n", - " 0.7969]])\n", - "----------------------------\n", - "[[[ 0.1484375 -0.0078125 0.0390625 0.140625 0.0078125 0.\n", - " 0.109375 -0.09375 0.0390625 -0.0625 0.015625 -0.1171875\n", - " 0.1015625 0.03125 0.1640625 -0.015625 -0.0234375 -0.015625\n", - " -0.046875 0.0078125]]\n", - "\n", - " [[ 0.2109375 -0.0234375 0.03125 0.2109375 0.0234375 -0.015625\n", - " 0.1875 -0.1484375 0.046875 -0.09375 0.0234375 -0.1640625\n", - " 0.1484375 0.0625 0.2578125 -0.015625 -0.03125 -0.0234375\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2421875 -0.0390625 0.015625 0.25 0.03125 -0.0234375\n", - " 0.234375 -0.1796875 0.0546875 -0.109375 0.015625 -0.1875\n", - " 0.1796875 0.09375 0.3125 0. -0.03125 -0.03125\n", - " -0.078125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.0390625 0.015625 0.265625 0.0390625 -0.03125\n", - " 0.265625 -0.1875 0.0546875 -0.125 0.015625 -0.1953125\n", - " 0.1953125 0.1171875 0.3359375 0.015625 -0.03125 -0.0390625\n", - " -0.078125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", - " 0.2890625 -0.1953125 0.0546875 -0.125 0.015625 -0.203125\n", - " 0.203125 0.125 0.359375 0.0234375 -0.03125 -0.046875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.2734375 0.046875 -0.0390625\n", - " 0.296875 -0.1953125 0.0546875 -0.1328125 0.015625 -0.203125\n", - " 0.2109375 0.1328125 0.3671875 0.03125 -0.0234375 -0.046875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.015625 0.28125 0.0546875 -0.046875\n", - " 0.3046875 -0.1953125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.2109375 0.140625 0.375 0.0390625 -0.0234375 -0.046875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.0546875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.2109375 0.140625 0.3828125 0.0390625 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.2109375 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]]\n" - ] - } - ], + "outputs": [], "source": [ "# In this block of code we will create the QuantLSTM model using the brevitas layer\n", "torch.manual_seed(0) #Setting the manual seeds to 0 for consistency in outputs.\n", @@ -454,685 +165,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "0bfbf5a3-8556-4190-a28f-4fe9859c55a9", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.layers.0.0.input_gate_params.bias\n", - "(20,)\n", - "[-0.02587563 -0.18425222 -0.18189065 0.02914573 -0.21827428 0.0595416\n", - " -0.20598626 -0.15559138 -0.04639753 -0.2133838 0.18059207 0.18321364\n", - " -0.11679631 0.04684116 0.11439164 0.07105622 -0.02995344 -0.21090843\n", - " 0.1625932 -0.19612479] , 0\n", - "-------------------------\n", - "0.layers.0.0.input_gate_params.input_weight.weight\n", - "(20, 10)\n", - "[[-4.14119214e-02 1.38706667e-02 -7.36431107e-02 -8.17852393e-02\n", - " -1.93256751e-01 1.23205660e-02 -2.53894478e-02 1.94940954e-01\n", - " -7.36160800e-02 1.72829047e-01]\n", - " [ 1.05855539e-02 -1.00462548e-01 -5.31778559e-02 -2.53751595e-02\n", - " 2.31616711e-03 -3.68398018e-02 6.63604736e-02 1.84143797e-01\n", - " 3.51473056e-02 8.09932351e-02]\n", - " [ 1.38081744e-01 4.81988601e-02 1.03076197e-01 1.17293097e-01\n", - " 2.09298924e-01 -2.04075590e-01 7.65163079e-02 -1.01319486e-02\n", - " -4.01576199e-02 -8.62098187e-02]\n", - " [ 1.34432539e-01 2.04552680e-01 -1.82483241e-01 1.20810278e-01\n", - " 1.54187992e-01 3.90806384e-02 2.63404008e-03 1.72071218e-01\n", - " 6.62961556e-03 -5.57729751e-02]\n", - " [-1.65121444e-02 7.17408881e-02 5.59775345e-02 -1.20642958e-02\n", - " 7.05851838e-02 6.02219440e-02 -1.81134686e-01 5.57176135e-02\n", - " 1.36812523e-01 2.56436393e-02]\n", - " [-2.04101056e-02 1.71289816e-01 -1.95361048e-01 -1.02062307e-01\n", - " -1.01068199e-01 1.93207934e-01 -2.16277346e-01 2.21768115e-02\n", - " -2.16605455e-01 -7.35303294e-03]\n", - " [ 8.33466202e-02 -5.22914641e-02 2.17063010e-01 7.11822009e-04\n", - " -1.14001475e-01 5.76605424e-02 1.16289847e-01 -4.44249017e-04\n", - " 1.91289768e-01 -1.41524345e-01]\n", - " [ 9.54081938e-02 1.26971915e-01 1.11063533e-01 -8.20205314e-05\n", - " 6.38317242e-02 -1.75422058e-01 -1.75476715e-01 -1.38986288e-02\n", - " -2.80253254e-02 1.66033790e-01]\n", - " [ 1.62366882e-01 1.51616067e-01 -1.02419287e-01 -1.75539613e-01\n", - " -2.09742919e-01 8.09257179e-02 -2.01488122e-01 -2.23217383e-01\n", - " -1.13006435e-01 -1.88792080e-01]\n", - " [-8.81207064e-02 -1.40770882e-01 -1.14718042e-01 2.12588429e-01\n", - " -4.21379767e-02 1.85490459e-01 4.96126944e-03 -2.87544206e-02\n", - " -6.54680878e-02 -1.59840211e-01]\n", - " [-1.79656431e-01 1.54830217e-01 -6.89065754e-02 -2.18012080e-01\n", - " 2.05210581e-01 4.14780807e-03 -1.49626598e-01 -1.75766915e-01\n", - " -1.87781662e-01 -1.96070760e-01]\n", - " [ 2.02346548e-01 1.54175445e-01 1.82888191e-02 -1.90574318e-01\n", - " -5.84847443e-02 -2.10055038e-01 7.70593956e-02 -5.93719892e-02\n", - " -4.78506237e-02 -6.97683394e-02]\n", - " [ 1.04838371e-01 1.21036001e-01 4.89832126e-02 -2.80011501e-02\n", - " -2.20977236e-02 -3.90723767e-03 -1.66511953e-01 2.18188778e-01\n", - " -9.64377001e-02 1.30095944e-01]\n", - " [-1.25353500e-01 1.50110642e-03 7.65467212e-02 -2.05311388e-01\n", - " 1.02568395e-01 -1.71158642e-01 3.12034953e-02 -4.43410687e-02\n", - " 1.28176615e-01 2.17323676e-01]\n", - " [ 5.03933132e-02 -6.38488680e-03 -1.10784821e-01 8.33686888e-02\n", - " -1.07626989e-01 9.23645869e-02 -9.69173536e-02 1.51675642e-01\n", - " 1.71514452e-01 1.37112319e-01]\n", - " [ 2.23987759e-03 1.03696242e-01 -2.03757793e-01 1.81339085e-01\n", - " -5.80957830e-02 8.15173239e-02 -3.78652588e-02 -7.50842392e-02\n", - " -1.05006970e-01 1.44231498e-01]\n", - " [-1.21653110e-01 -3.94320451e-02 1.12798467e-01 2.25366149e-02\n", - " -1.88142627e-01 -2.22348958e-01 -1.08711593e-01 2.06236228e-01\n", - " -1.58990204e-01 1.23237595e-01]\n", - " [ 1.60061240e-01 -9.26844329e-02 -9.87462699e-02 -1.60870835e-01\n", - " 3.48785594e-02 -3.12594734e-02 1.08638955e-02 9.69918296e-02\n", - " 9.38790441e-02 -7.05472827e-02]\n", - " [ 1.53575651e-02 5.31169996e-02 4.75974986e-03 4.47460003e-02\n", - " -9.05808210e-02 1.83284596e-01 -2.29354147e-02 -2.86094397e-02\n", - " -2.00689927e-01 -1.62085444e-01]\n", - " [ 6.95567206e-03 -3.45815569e-02 -1.12424992e-01 1.17047116e-01\n", - " -2.00185552e-02 7.86398575e-02 1.88336477e-01 -1.02802545e-01\n", - " -1.10053055e-01 -4.49331515e-02]] , 1\n", - "-------------------------\n", - "0.layers.0.0.input_gate_params.hidden_weight.weight\n", - "(20, 20)\n", - "[[-1.89352538e-02 -1.11839756e-01 -5.36844507e-02 -6.44523604e-03\n", - " 1.00301303e-01 2.06872717e-01 1.65582791e-01 2.36654170e-02\n", - " -1.40909785e-02 5.72774969e-02 -9.12800338e-03 -2.93454379e-02\n", - " 7.68917575e-02 -1.81926534e-01 -1.90163419e-01 9.05744440e-04\n", - " -6.77747875e-02 -1.10600702e-01 -2.08165124e-01 1.49785221e-01]\n", - " [-8.90937075e-03 -1.20138384e-01 -9.10849124e-02 5.87869175e-02\n", - " -1.62167445e-01 1.43613769e-02 -2.75748386e-03 7.61744976e-02\n", - " 8.87038633e-02 -1.46100059e-01 9.65513662e-02 1.68849513e-01\n", - " 1.43956831e-02 1.13917463e-01 -8.46547335e-02 4.44148518e-02\n", - " 6.53375536e-02 -1.03280008e-01 1.38058737e-01 -2.11419612e-01]\n", - " [-8.39947835e-02 -1.31567493e-01 -1.32741287e-01 -1.35494858e-01\n", - " -2.10702628e-01 3.83746810e-02 -4.42331657e-02 -1.88279316e-01\n", - " -9.19632221e-05 -3.72487307e-02 9.22437534e-02 -1.75148100e-01\n", - " -6.29062578e-02 4.60259691e-02 9.47839618e-02 1.69158224e-02\n", - " 6.05970472e-02 2.23524958e-01 -7.74600878e-02 1.52398065e-01]\n", - " [ 1.92612275e-01 -1.97806209e-01 5.40891960e-02 1.26661941e-01\n", - " -3.48797850e-02 1.23408221e-01 7.60573195e-03 1.70228094e-01\n", - " 4.81458148e-03 -1.43158093e-01 1.69815615e-01 6.65016174e-02\n", - " 1.90237820e-01 5.55088967e-02 1.18736811e-01 1.39421389e-01\n", - " 3.76524106e-02 -5.19809462e-02 4.61825170e-02 -1.55909836e-01]\n", - " [ 7.63913197e-03 -7.18704611e-02 1.41373863e-02 -1.77042618e-01\n", - " 1.36628836e-01 -2.06302434e-01 9.57576782e-02 1.47258580e-01\n", - " -2.04934716e-01 2.02031001e-01 -1.66225716e-01 -4.39088680e-02\n", - " 1.15872569e-01 -7.09063411e-02 1.99275032e-01 -9.86447409e-02\n", - " -2.99374424e-02 -1.46168455e-01 -1.03737742e-01 2.18205780e-01]\n", - " [ 1.68166518e-01 1.64642967e-02 1.83855016e-02 -1.89751670e-01\n", - " 1.68811426e-01 -3.35250199e-02 -9.32650268e-02 -1.77951321e-01\n", - " 1.83845311e-01 1.06031545e-01 1.34684831e-01 2.31534615e-02\n", - " -1.51732951e-01 9.15970504e-02 2.57883817e-02 7.50367939e-02\n", - " -5.56799732e-02 -1.05523452e-01 1.83565930e-01 7.49567226e-02]\n", - " [-9.07528847e-02 1.99678559e-02 -4.86066155e-02 -1.91221125e-02\n", - " 1.25389591e-01 -1.77972749e-01 2.02371553e-01 1.50499865e-01\n", - " 1.92136504e-04 -9.14627835e-02 4.55915295e-02 -1.48007214e-01\n", - " 1.45243973e-01 -1.18256845e-01 4.27256078e-02 -2.19991282e-01\n", - " 1.07079633e-01 1.51370272e-01 1.67834863e-01 1.82519276e-02]\n", - " [ 1.32025823e-01 7.62412176e-02 1.49954304e-01 1.26183063e-01\n", - " -1.95639879e-01 2.35728398e-02 -7.62314126e-02 -1.06771380e-01\n", - " 1.56516239e-01 -3.20035741e-02 3.47357877e-02 1.40789405e-01\n", - " 1.50514722e-01 1.19332708e-01 -3.90392952e-02 -1.99321926e-01\n", - " -2.14659125e-01 7.02862144e-02 -2.65357876e-03 -1.41277447e-01]\n", - " [ 9.76564139e-02 2.02965632e-01 1.29328549e-01 -3.15438919e-02\n", - " 3.02148778e-02 -1.42630830e-01 1.05540812e-01 -1.73283800e-01\n", - " 1.54376432e-01 -1.02132224e-01 -8.86853859e-02 -1.87295631e-01\n", - " -5.40727489e-02 -2.16292981e-02 -1.03067294e-01 1.59174219e-01\n", - " 1.28328785e-01 -1.97347268e-01 -2.23675612e-02 7.51795396e-02]\n", - " [ 2.15735227e-01 -5.34672327e-02 1.37278914e-01 -1.25270970e-02\n", - " -8.57628211e-02 1.36838645e-01 -1.99253812e-01 1.87337860e-01\n", - " 2.23344907e-01 -6.10500947e-02 8.83295834e-02 2.22981662e-01\n", - " 6.74140528e-02 8.74451399e-02 8.21070075e-02 -9.14832279e-02\n", - " 5.45820408e-02 -1.19176529e-01 1.90940976e-01 -9.58186984e-02]\n", - " [ 5.11176400e-02 -6.47741258e-02 1.11825228e-01 3.68577940e-03\n", - " 1.22950912e-01 -6.05489872e-02 -1.31215081e-01 8.57292935e-02\n", - " -1.25841707e-01 -1.83588028e-01 8.63927826e-02 -1.34484172e-01\n", - " -8.40481222e-02 -5.58335669e-02 1.58777572e-02 -7.74438009e-02\n", - " -8.04765150e-02 -5.62009923e-02 1.56701818e-01 6.69540018e-02]\n", - " [-1.07652791e-01 -1.54563770e-01 5.18102152e-03 7.16358349e-02\n", - " -4.67919558e-03 1.30897254e-01 1.88077956e-01 6.55371249e-02\n", - " 7.37451240e-02 1.29728526e-01 -7.66031295e-02 3.96637134e-02\n", - " 1.80782616e-01 -1.07077263e-01 1.74031202e-02 -8.74211192e-02\n", - " -1.71936572e-01 1.18438050e-01 1.78673968e-01 -1.20800309e-01]\n", - " [ 8.38049129e-02 6.85676187e-02 8.73105526e-02 1.23087496e-01\n", - " 2.08757341e-01 1.69717655e-01 -1.95658267e-01 -8.76599625e-02\n", - " 1.18758187e-01 -1.27650708e-01 4.39067073e-02 -9.58611295e-02\n", - " 4.44106422e-02 1.09106824e-01 7.02822655e-02 1.62435979e-01\n", - " -2.69077457e-02 1.21389672e-01 7.22895712e-02 -7.04701096e-02]\n", - " [-1.57925934e-01 2.04573229e-01 -6.66687265e-02 1.68426275e-01\n", - " 1.40947536e-01 -9.00426600e-03 -1.84701070e-01 1.80013608e-02\n", - " -1.08096078e-01 5.81858531e-02 -8.88810679e-02 1.72345534e-01\n", - " -2.01746121e-01 -6.01959564e-02 3.52624580e-02 2.13314164e-02\n", - " 1.83701098e-01 -7.06517771e-02 -1.78495154e-01 1.48046315e-01]\n", - " [ 6.24824539e-02 1.47299409e-01 -1.32342920e-01 -1.31334439e-01\n", - " -9.03252959e-02 1.58978552e-02 7.57712200e-02 -1.28496692e-01\n", - " -2.10528076e-02 -3.86467576e-02 2.04027027e-01 -8.06416422e-02\n", - " 2.16690734e-01 -1.37144789e-01 -9.21397135e-02 -1.68184295e-01\n", - " 1.64731190e-01 -1.53769597e-01 9.25582647e-02 -8.21671411e-02]\n", - " [ 2.22826257e-01 3.15412283e-02 -1.94183901e-01 3.84835452e-02\n", - " 2.71859560e-02 -2.16274336e-01 4.48757894e-02 2.13342309e-01\n", - " 6.43487200e-02 -1.18915108e-03 -4.63541821e-02 5.94213046e-02\n", - " -9.96202976e-02 2.20200241e-01 1.93298727e-01 1.04461670e-01\n", - " -8.32887441e-02 -2.09956676e-01 -1.28724366e-01 2.17411697e-01]\n", - " [-2.05243871e-01 -2.13502616e-01 -1.61161683e-02 7.11405650e-02\n", - " -2.22554103e-01 -2.07601383e-01 1.21570053e-03 -7.50053376e-02\n", - " 1.55782372e-01 6.41999543e-02 -1.94095746e-01 -2.01538876e-01\n", - " 1.53562352e-01 -3.96501981e-02 -9.78184044e-02 7.04318583e-02\n", - " -4.39465865e-02 1.06939368e-01 5.67044728e-02 -9.68158469e-02]\n", - " [-1.79218486e-01 1.21047780e-01 -1.34345368e-01 -2.47318167e-02\n", - " 3.05733737e-02 -1.30131751e-01 1.21804118e-01 -1.57282248e-01\n", - " 5.49192652e-02 2.39149425e-02 8.20437744e-02 -2.19451547e-01\n", - " 1.29167549e-02 1.09009661e-01 -1.43156886e-01 5.53317666e-02\n", - " 8.76156322e-04 1.89696804e-01 -4.73480262e-02 1.52765575e-03]\n", - " [-9.72549468e-02 -5.51085509e-02 6.40134960e-02 -2.15656430e-01\n", - " 1.69629768e-01 1.60795882e-01 9.46965069e-02 1.67391464e-01\n", - " -6.96057901e-02 5.09320870e-02 1.13759311e-02 -1.54622883e-01\n", - " -8.59646648e-02 -7.93827102e-02 -5.52875437e-02 -1.98549107e-01\n", - " -1.57260388e-01 -2.12343093e-02 -3.40157561e-02 -2.02978238e-01]\n", - " [ 4.77774814e-02 1.21752672e-01 1.86222807e-01 1.88188314e-01\n", - " -1.56248853e-01 -7.16619864e-02 -1.06078379e-01 4.10118401e-02\n", - " 5.99195063e-02 4.97494638e-02 1.30669191e-01 1.17969945e-01\n", - " -1.20020248e-01 1.53502032e-01 1.50838137e-01 2.95910202e-02\n", - " -1.94543302e-01 -1.37143746e-01 6.23138808e-02 7.73103088e-02]] , 2\n", - "-------------------------\n", - "0.layers.0.0.forget_gate_params.bias\n", - "(20,)\n", - "[ 0.20850217 0.11380532 0.08104482 -0.00762655 0.15247074 -0.08138975\n", - " 0.0910454 -0.10650107 -0.00208706 0.13215044 0.10260209 -0.05017841\n", - " -0.00283135 -0.12413156 0.10357434 0.15046087 0.07697045 -0.21637587\n", - " -0.16006967 0.14969489] , 3\n", - "-------------------------\n", - "0.layers.0.0.forget_gate_params.input_weight.weight\n", - "(20, 10)\n", - "[[-0.03201701 0.13732338 0.16482215 -0.06550063 -0.13119501 -0.2103679\n", - " 0.08553377 0.11468438 -0.0387658 -0.21708311]\n", - " [-0.14402747 -0.01204806 0.10205487 -0.07492673 -0.14435105 -0.15566948\n", - " 0.2000676 0.08097311 -0.1815501 -0.13809344]\n", - " [-0.18981868 0.03235186 -0.09079897 -0.00075695 -0.0353742 -0.1957324\n", - " -0.19982079 -0.17343585 -0.09364887 0.03477862]\n", - " [-0.10515709 -0.00797041 -0.02678433 0.20449734 -0.10193561 0.21008612\n", - " -0.17165995 -0.18656294 0.07271551 -0.13013807]\n", - " [ 0.11469334 -0.12370986 0.17608246 0.21651667 0.01431521 0.04778921\n", - " 0.20847315 0.13255776 -0.19520605 -0.00715788]\n", - " [-0.20184483 0.17081025 -0.04095714 -0.00155866 -0.13738167 -0.12158713\n", - " 0.02901981 0.18449156 -0.1123966 0.02112942]\n", - " [ 0.20241037 0.20039941 -0.04371644 0.20957804 0.08143061 0.20365277\n", - " 0.00663433 -0.1895056 -0.06086665 0.06706649]\n", - " [ 0.1192437 -0.22275887 0.17393245 -0.20059223 0.13101582 0.22062524\n", - " 0.05510434 -0.0422016 0.12311912 -0.06636703]\n", - " [-0.16563286 -0.15869099 0.10513588 0.1707739 0.00905446 -0.2168069\n", - " -0.21971782 -0.05049207 0.12070725 -0.1490105 ]\n", - " [ 0.06027115 -0.12221678 0.18192975 -0.05859193 -0.04659947 -0.19612114\n", - " -0.20028274 0.01511241 0.03615525 0.12080745]\n", - " [-0.19552828 0.03918052 -0.03230212 0.1311668 -0.1016731 0.06661848\n", - " 0.09010674 0.11232612 -0.07669472 0.07195909]\n", - " [-0.04382298 0.06021269 -0.13749652 -0.17768005 -0.18290731 -0.1405653\n", - " -0.09463658 0.03328432 -0.04891114 -0.12729394]\n", - " [ 0.00187842 -0.07061429 0.13783802 -0.18416376 -0.08253521 -0.1436971\n", - " 0.02759105 0.01219904 -0.0128632 0.22186181]\n", - " [-0.08530237 -0.03213883 0.05777045 0.18662488 0.16948868 0.02554451\n", - " -0.08459641 0.07345897 0.14069013 -0.00477207]\n", - " [ 0.12276765 0.18300453 -0.11980148 -0.04943415 -0.20131664 0.05132969\n", - " 0.15936238 -0.04342245 0.03568069 0.07144996]\n", - " [-0.00476937 0.17384104 0.0325843 -0.21979333 -0.18465139 -0.22154187\n", - " 0.00921626 0.12087465 -0.02950055 0.20104776]\n", - " [-0.04022751 0.04571649 0.20163535 0.11316557 -0.00713371 0.2153832\n", - " -0.1335971 0.08328808 0.14121595 -0.13845547]\n", - " [-0.21004361 0.07152335 -0.08483391 -0.1128413 0.04447659 -0.16221067\n", - " 0.2011128 -0.02007227 -0.07161061 0.18693109]\n", - " [ 0.06226142 0.04260208 -0.10691333 0.21311398 -0.06810362 0.18598051\n", - " -0.016437 0.11216957 0.15722302 -0.1664758 ]\n", - " [-0.14903465 -0.22111452 0.16127922 0.19229865 -0.08172148 -0.10951796\n", - " 0.03742959 0.12038527 0.05519409 -0.04660187]] , 4\n", - "-------------------------\n", - "0.layers.0.0.forget_gate_params.hidden_weight.weight\n", - "(20, 20)\n", - "[[-0.14223064 0.19124371 -0.14481081 -0.21607104 -0.08928006 0.04458899\n", - " 0.0831126 0.08646142 -0.12953514 -0.08581803 -0.09943341 -0.10828371\n", - " -0.18833804 0.04577223 -0.06502874 -0.2152229 -0.13056786 -0.13428617\n", - " -0.09645564 -0.13816758]\n", - " [-0.03877772 0.08013236 -0.18096809 -0.01915519 -0.06435173 -0.11432081\n", - " -0.0496515 -0.09477154 0.00718846 -0.16141057 0.04240454 0.20530063\n", - " 0.18528308 -0.10025615 0.06892193 -0.21135406 0.18826427 -0.22283866\n", - " -0.19982089 -0.20071597]\n", - " [-0.20765333 0.03028304 -0.05912894 0.05351972 -0.01383548 -0.00480333\n", - " -0.08078498 -0.13266474 -0.18721604 0.11282834 -0.11529152 -0.04547688\n", - " 0.10860465 -0.05537887 -0.05637903 -0.14906646 -0.19131811 0.10732386\n", - " -0.05044974 0.14060505]\n", - " [ 0.01471702 -0.00028402 -0.20187245 0.0049368 -0.0505344 -0.12759772\n", - " -0.05175107 0.01168989 -0.16848378 0.03718214 0.15558895 0.04417289\n", - " 0.21344449 0.10434435 -0.17634727 -0.08801483 -0.05380939 0.06689031\n", - " -0.00637761 0.17993565]\n", - " [ 0.02597556 -0.14161254 -0.08197778 -0.18603216 -0.061655 0.10993782\n", - " 0.00215927 -0.21323241 -0.19348647 0.08106777 -0.19626026 -0.1783532\n", - " -0.1333177 0.21312374 -0.06358164 -0.09219337 -0.15098219 0.14304285\n", - " -0.03610551 0.04311918]\n", - " [ 0.05341741 0.06306308 0.14312816 0.01160373 0.02312934 -0.01452105\n", - " -0.17375752 -0.05117204 0.21281871 -0.15847513 -0.14112028 -0.22188812\n", - " 0.013559 -0.20914444 -0.11453009 0.20604049 0.09261008 0.11913135\n", - " 0.03828845 -0.19001652]\n", - " [-0.10404866 -0.18102278 -0.13826925 0.076148 -0.06201827 0.2185227\n", - " -0.16299975 -0.19082828 0.2207899 -0.19316407 0.19027402 0.06021235\n", - " -0.20380671 0.1947569 -0.06087566 -0.09220145 -0.17443547 -0.1891369\n", - " 0.04978558 -0.21964009]\n", - " [ 0.09188584 -0.05525529 0.0784739 -0.05474811 0.07732737 -0.00610806\n", - " 0.06572182 -0.09097287 -0.15380703 0.02847747 -0.14272346 -0.13861606\n", - " -0.21501313 -0.07127416 -0.14941145 0.17413448 0.1611419 0.05305404\n", - " 0.18168166 0.10766982]\n", - " [-0.21064265 -0.022373 -0.03629636 -0.13576584 0.06368566 -0.06979065\n", - " -0.10692404 -0.00260666 -0.14866948 0.18506847 0.14149404 0.21166477\n", - " -0.03960523 0.07302888 -0.00899392 -0.18503006 0.10116354 -0.15618756\n", - " -0.08071785 -0.10013654]\n", - " [-0.21814388 0.00802042 0.03663212 -0.01662389 0.1644524 0.01072139\n", - " -0.0407296 -0.12196475 -0.13280123 -0.03179033 -0.1312358 -0.14750735\n", - " -0.02957479 -0.03948133 -0.13649467 0.13065115 0.18963577 -0.15246144\n", - " 0.09794185 -0.10375587]\n", - " [-0.02321799 0.20873794 0.02861272 -0.21320319 0.20555921 -0.00946067\n", - " -0.11196752 -0.11808899 0.19175017 0.00377388 0.12350584 0.14696068\n", - " -0.08678884 0.01897924 -0.14464125 0.18672368 -0.11824197 0.14852415\n", - " 0.05665502 0.1379358 ]\n", - " [-0.1575466 -0.00695391 0.11586404 -0.00892534 -0.0032084 0.10896464\n", - " -0.16712412 -0.04483069 0.10185106 0.10966767 0.20768207 -0.04423303\n", - " 0.05298113 -0.11002054 -0.03752897 -0.11225442 0.16570821 0.0013621\n", - " 0.09096613 0.12299404]\n", - " [ 0.04166875 0.02379598 -0.01636612 -0.1894117 0.03602695 -0.04953878\n", - " -0.18794785 0.20833082 -0.02383836 -0.11159918 -0.21768506 -0.20595226\n", - " 0.08515022 -0.1020775 -0.09659212 -0.12938367 0.18049696 -0.05375253\n", - " 0.14493793 0.17751718]\n", - " [-0.17336273 0.16682073 -0.04269946 0.21416363 0.11421449 -0.21660405\n", - " 0.04154139 0.07860353 -0.08111839 0.16956337 -0.1851744 -0.07095176\n", - " 0.2130592 0.21838497 0.11170101 -0.13348123 -0.19239157 -0.1818077\n", - " -0.05589887 0.12667239]\n", - " [ 0.07079396 -0.02715501 0.20110089 0.17559125 -0.10450983 -0.09683432\n", - " -0.00262346 0.04640241 -0.00160075 0.08632647 0.15427703 -0.04031902\n", - " 0.10981148 0.03041176 0.08583194 0.09205452 -0.05976621 -0.09969731\n", - " 0.09557738 -0.14316456]\n", - " [ 0.1173941 -0.1434708 0.15340208 0.08971985 -0.05478028 0.12781222\n", - " -0.07363954 0.04763815 0.06583516 0.02283663 0.04490386 -0.00443905\n", - " -0.0645991 0.1247524 0.08819748 0.08340425 0.15096036 -0.11699554\n", - " -0.0519524 -0.00637345]\n", - " [ 0.18044722 -0.1780605 -0.12826072 -0.05326315 -0.19100511 -0.17666493\n", - " 0.15317535 0.01043098 -0.17988645 -0.03692174 -0.00735149 -0.07949581\n", - " -0.18703558 0.12169496 -0.02761802 0.21831468 -0.17125311 -0.12275734\n", - " -0.01161703 -0.15571442]\n", - " [ 0.16295849 0.17292082 0.2025731 -0.14115438 0.15909635 0.15525764\n", - " -0.08897205 0.02453648 0.10655329 0.16001071 -0.20884806 0.2226173\n", - " -0.05621968 0.09110746 -0.13887972 -0.17207511 -0.15143432 0.13178375\n", - " -0.11029776 0.12998497]\n", - " [ 0.0675995 0.08894558 -0.04973555 -0.07073203 -0.10462123 -0.12498911\n", - " 0.20617247 -0.01215215 -0.09589054 -0.20804486 0.0097276 -0.22196051\n", - " -0.00263305 0.14118703 -0.12879056 0.12285849 -0.07132839 -0.1719783\n", - " -0.22146888 0.11108326]\n", - " [-0.1710799 0.10918202 0.03201576 0.12152903 -0.16808327 0.19554281\n", - " -0.22271936 -0.16972543 0.13409424 0.00759949 -0.12556304 -0.04690479\n", - " -0.19899549 -0.194607 -0.04797396 0.17057896 0.06677905 0.04216573\n", - " -0.05926214 0.20352075]] , 5\n", - "-------------------------\n", - "0.layers.0.0.cell_gate_params.bias\n", - "(20,)\n", - "[ 0.00214154 0.07550146 0.00355405 0.03489293 0.07456551 0.17159154\n", - " 0.12870987 0.0286169 0.08939798 -0.06724557 0.15284362 0.06277069\n", - " 0.16875166 -0.03491265 -0.18256952 0.04417255 0.09094475 0.18067895\n", - " 0.08666804 0.08261736] , 6\n", - "-------------------------\n", - "0.layers.0.0.cell_gate_params.input_weight.weight\n", - "(20, 10)\n", - "[[ 0.17794745 -0.07684495 0.19742867 0.11464191 0.14933479 0.15947415\n", - " -0.18268393 0.11646748 0.20825341 -0.15708849]\n", - " [-0.01916463 -0.1364658 -0.05399449 0.03332363 0.11960924 -0.06491657\n", - " -0.21173826 0.12073942 0.12545025 -0.04053707]\n", - " [ 0.19142465 0.17237733 -0.04928424 0.00863487 0.03938841 -0.04381773\n", - " -0.05508858 -0.10093604 -0.12716216 0.11167222]\n", - " [-0.06639788 -0.10727276 0.19697405 0.03575112 0.16133724 0.2037714\n", - " -0.03149954 0.03335407 0.20731461 -0.15384933]\n", - " [-0.06704343 0.03181893 -0.01517017 0.05953267 0.11757869 -0.09199598\n", - " 0.01741112 0.20230028 -0.1265286 -0.15163381]\n", - " [-0.17148444 0.13366292 -0.20509928 -0.1087402 0.15102275 -0.13404797\n", - " 0.1818403 -0.10452814 0.03537463 0.02927051]\n", - " [-0.00548471 0.13927223 0.18991414 -0.13961166 0.12540615 0.0597448\n", - " -0.00416681 -0.15634763 0.06633033 0.1623022 ]\n", - " [-0.19193047 -0.20651296 -0.21982425 0.05166686 -0.06424998 -0.06945844\n", - " 0.20821334 -0.05703437 -0.14200093 0.02011372]\n", - " [-0.12272914 -0.06551553 0.11811562 0.05160707 -0.1534436 0.21288224\n", - " 0.15128401 -0.15242937 0.09739923 0.09188432]\n", - " [-0.16044928 -0.1571494 -0.18515183 0.09960561 0.03895786 0.09450045\n", - " -0.09821384 0.1681353 0.02855213 -0.17842196]\n", - " [-0.056282 0.11411482 0.04916727 -0.03420792 -0.15622441 -0.13909872\n", - " 0.19286813 -0.12808998 0.15845725 -0.07484471]\n", - " [ 0.00223508 -0.21774605 -0.07268656 0.18849593 -0.20075409 0.11251042\n", - " -0.188184 0.03261365 -0.20273004 -0.17701481]\n", - " [-0.18051723 -0.07753571 0.03044572 -0.16394225 0.05667006 0.13467607\n", - " 0.18228398 0.19799176 0.14722027 -0.06584404]\n", - " [-0.02060739 0.19784163 0.11123517 -0.05929887 0.16882291 -0.19541554\n", - " 0.1913779 0.12510933 -0.16400692 -0.18237662]\n", - " [ 0.17486629 0.22059093 0.01951262 -0.08737109 0.12732458 0.1008788\n", - " -0.0279066 0.17902343 0.14493623 0.05574536]\n", - " [ 0.11610299 -0.20945168 -0.10473937 0.02451142 0.06080827 -0.03056943\n", - " 0.08443112 0.06811719 -0.20665829 0.07052966]\n", - " [-0.01818041 -0.15387398 0.00754629 -0.05499369 -0.11874414 -0.20375879\n", - " 0.18706112 -0.13579562 0.0300329 0.17913137]\n", - " [-0.02817055 -0.14655502 -0.21633011 0.03715306 -0.11219743 0.01630673\n", - " 0.07142475 -0.06335549 0.1516163 -0.02909804]\n", - " [-0.08923855 -0.14784832 0.06784268 -0.13824603 0.04700406 -0.02822138\n", - " 0.1536749 -0.10962173 -0.11015368 -0.02889775]\n", - " [-0.13657494 0.08524874 -0.08190698 0.09174035 0.12977527 0.13057181\n", - " -0.04105001 0.12203032 -0.11840606 -0.22279048]] , 7\n", - "-------------------------\n", - "0.layers.0.0.cell_gate_params.hidden_weight.weight\n", - "(20, 20)\n", - "[[-2.12806370e-02 -1.62129834e-01 -1.73234463e-01 5.68399914e-02\n", - " 1.91077381e-01 -8.79967287e-02 -1.26489419e-02 -1.62001878e-01\n", - " 3.90813835e-02 6.37496263e-02 -3.43248062e-02 1.70126632e-01\n", - " -1.79964885e-01 -3.00010163e-02 -1.24117516e-01 1.96340203e-01\n", - " 1.89398184e-01 2.19951704e-01 2.05728129e-01 8.85609612e-02]\n", - " [-1.71218976e-01 -1.51676044e-01 5.36037646e-02 -1.99636862e-01\n", - " 1.41561761e-01 9.72114205e-02 5.33513576e-02 -1.95168942e-01\n", - " 1.62662312e-01 -2.36655492e-02 -9.38338637e-02 1.16747312e-01\n", - " 1.88960433e-02 -9.94693190e-02 5.23358434e-02 -1.49113968e-01\n", - " 2.07823291e-01 1.95990741e-01 1.03123404e-01 1.18294187e-01]\n", - " [-2.22277910e-01 -1.24300212e-01 -2.15169474e-01 -1.16545178e-01\n", - " -1.85386583e-01 1.64590582e-01 1.20638609e-01 1.31684974e-01\n", - " -9.92668644e-02 1.70430213e-01 -3.23111340e-02 -5.79339787e-02\n", - " 1.20397158e-01 1.48079455e-01 -1.60713032e-01 2.12880254e-01\n", - " -2.25685220e-02 5.95554635e-02 -2.22653463e-01 2.48931386e-02]\n", - " [-1.10666625e-01 -1.40009314e-01 -9.33616757e-02 -1.04158348e-03\n", - " -6.37013763e-02 -1.43241197e-01 1.60099015e-01 6.65228367e-02\n", - " -2.08098441e-01 4.69054580e-02 5.49288094e-02 8.21655430e-03\n", - " 5.42974621e-02 -1.87213402e-02 9.77927893e-02 -1.57414630e-01\n", - " -9.53418463e-02 1.67505234e-01 -1.38533488e-01 1.09708525e-01]\n", - " [ 2.06897184e-01 -2.04468444e-01 -9.79631692e-02 1.90820277e-01\n", - " -1.35208331e-02 4.41430137e-02 3.18236202e-02 9.21481624e-02\n", - " -9.21330750e-02 2.90291384e-02 1.52316689e-01 -1.88640561e-02\n", - " -2.05149427e-01 7.72908777e-02 -5.70836812e-02 -4.71739881e-02\n", - " 1.16618834e-01 3.91878746e-02 -1.35271400e-01 -1.03187911e-01]\n", - " [-3.39903794e-02 -5.52454554e-02 -4.73374985e-02 -1.52837262e-01\n", - " 1.61986634e-01 1.15967356e-01 4.41279002e-02 5.06293550e-02\n", - " 2.61772387e-02 1.67198420e-01 5.05979806e-02 3.40624861e-02\n", - " -1.22919112e-01 7.45933205e-02 -2.09194586e-01 7.05230013e-02\n", - " -1.93819985e-01 -9.25445408e-02 1.18050657e-01 -1.33182898e-01]\n", - " [ 1.78052112e-01 -1.23547316e-01 2.11798310e-01 6.89183101e-02\n", - " -9.69009325e-02 1.36373073e-01 -1.98024541e-01 -1.41652852e-01\n", - " -1.40091866e-01 2.94355899e-02 2.19678022e-02 -1.92325816e-01\n", - " 2.15771765e-01 -2.13701205e-04 -1.19405292e-01 5.34111727e-03\n", - " -9.59839672e-02 6.16913289e-02 8.09477344e-02 -6.34285584e-02]\n", - " [ 1.30358534e-02 1.33047834e-01 -1.45440847e-01 -4.98616323e-02\n", - " -3.29875015e-02 -1.47941127e-01 1.82121564e-02 8.21812730e-03\n", - " -1.80613607e-01 4.58700024e-02 2.13425189e-01 1.18935056e-01\n", - " -1.21292830e-01 2.04682201e-01 -1.53705969e-01 -1.13691926e-01\n", - " 9.86314118e-02 1.77888468e-01 2.13384852e-01 1.92508563e-01]\n", - " [-1.23128124e-01 5.11671938e-02 -1.40405849e-01 4.93797194e-03\n", - " 1.85259327e-01 1.10102132e-01 -2.06472665e-01 -9.62342396e-02\n", - " -1.88666239e-01 1.05334759e-01 -2.83857696e-02 -1.63461700e-01\n", - " -7.14522004e-02 7.33797774e-02 2.07014289e-02 2.09811881e-01\n", - " -2.96870619e-03 7.03370497e-02 -6.77365363e-02 2.66825557e-02]\n", - " [ 8.01036973e-03 1.92074046e-01 9.36935991e-02 -1.27431735e-01\n", - " -1.98687479e-01 -2.12748200e-01 -8.12046453e-02 2.89045740e-02\n", - " 2.10361689e-01 -2.19703875e-02 8.74281824e-02 1.13642633e-01\n", - " -1.71282887e-01 -1.84971020e-01 8.47281963e-02 1.04225203e-01\n", - " -1.04119189e-01 3.50410007e-02 -2.18935862e-01 2.81849946e-03]\n", - " [ 5.48111200e-02 2.11656699e-03 -3.54930870e-02 9.30717662e-02\n", - " -6.14620335e-02 1.66451484e-01 -1.92599118e-01 -1.27790585e-01\n", - " -1.86674312e-01 -2.02230543e-01 1.65771663e-01 -5.53366169e-02\n", - " -1.75649151e-01 4.63781990e-02 -1.69327542e-01 1.15589779e-02\n", - " 1.06298663e-01 -4.72831465e-02 1.14950888e-01 4.58941013e-02]\n", - " [-1.79431096e-01 4.40098420e-02 1.44146204e-01 -5.18364720e-02\n", - " 2.11329088e-02 2.85264328e-02 1.92284174e-02 5.81263304e-02\n", - " -2.14094386e-01 1.69653893e-01 9.75249708e-02 2.76133306e-02\n", - " 4.06875163e-02 -1.80331707e-01 -6.38444126e-02 -9.72616393e-03\n", - " 5.31534106e-02 -1.22661509e-01 2.37256587e-02 -6.93958476e-02]\n", - " [ 1.62758812e-01 -1.91935405e-01 2.33742520e-02 1.51492402e-01\n", - " -1.73671409e-01 -6.40887721e-03 1.03327051e-01 9.02309865e-02\n", - " 2.62962040e-02 9.03898776e-02 -1.55875593e-01 1.86238810e-01\n", - " 4.98715229e-03 1.44541100e-01 4.94662710e-02 -2.48756800e-02\n", - " 9.57791656e-02 2.12270051e-01 2.20569506e-01 -1.88220173e-01]\n", - " [ 1.35616167e-02 -1.60633817e-01 1.30284145e-01 1.60526067e-01\n", - " -1.57016143e-01 -1.29234986e-02 1.54731110e-01 1.47872686e-01\n", - " -1.68123141e-01 1.50136366e-01 -3.95872369e-02 -1.90171361e-01\n", - " 4.45422679e-02 1.04169942e-01 1.34101674e-01 -1.52035385e-01\n", - " -1.61954522e-01 -1.50239438e-01 1.26720712e-01 -1.95428118e-01]\n", - " [-1.88556593e-03 -6.57092705e-02 9.76277590e-02 4.39127870e-02\n", - " -1.12915963e-01 3.90566476e-02 2.05778107e-01 3.68154384e-02\n", - " -1.10807024e-01 7.48633966e-03 -2.05102757e-01 -1.43465236e-01\n", - " -4.15345095e-02 -1.39340952e-01 1.89353585e-01 4.34043780e-02\n", - " 1.73192978e-01 -5.09172641e-02 -3.10981516e-02 5.64037636e-02]\n", - " [-6.64871484e-02 -7.62214959e-02 -2.19352797e-01 1.68453470e-01\n", - " 2.02370644e-01 -2.21398085e-01 -7.39822015e-02 -1.69133484e-01\n", - " -9.07677040e-02 1.70234248e-01 1.19611956e-01 -1.73501018e-02\n", - " 9.55028459e-02 6.67780936e-02 1.22115597e-01 -1.79690495e-01\n", - " 6.91184700e-02 -2.11776465e-01 -1.47058472e-01 -8.33279863e-02]\n", - " [-2.17858739e-02 -2.11018786e-01 5.56494808e-03 3.57002839e-02\n", - " -8.87419507e-02 7.25275800e-02 1.95392817e-01 -3.81953120e-02\n", - " -1.19088188e-01 -1.98077247e-01 -1.63278311e-01 -1.23674117e-01\n", - " -1.65306747e-01 -8.79110843e-02 1.23181596e-01 6.99715093e-02\n", - " 2.01542184e-01 2.22007304e-01 -8.05223361e-02 -8.75686854e-02]\n", - " [ 3.05994693e-02 -1.78054109e-01 1.21623978e-01 -4.02442813e-02\n", - " -1.87232435e-01 -1.68819025e-01 -1.54080361e-01 6.14588112e-02\n", - " 1.71410367e-01 1.77153081e-01 -6.15712442e-02 -1.29883334e-01\n", - " -9.92444977e-02 -1.52750149e-01 -5.76506779e-02 -2.01948732e-01\n", - " 1.19517274e-01 -2.10457653e-01 -1.39095634e-01 1.50062576e-01]\n", - " [-1.67259946e-01 5.34564890e-02 1.67486787e-01 2.20412284e-01\n", - " 1.13142729e-01 -6.00084551e-02 1.27776846e-01 -7.37963570e-03\n", - " -6.89469650e-02 7.28242099e-04 5.01570366e-02 1.49932787e-01\n", - " 9.38621163e-02 1.06770106e-01 3.34510244e-02 -1.12544857e-02\n", - " 9.38917845e-02 5.37824407e-02 -2.13967159e-01 3.61516774e-02]\n", - " [-9.93019715e-02 -1.18578210e-01 8.64755288e-02 4.57250476e-02\n", - " 3.78663242e-02 -1.06075369e-01 1.03322893e-01 2.09839717e-01\n", - " 2.73554083e-02 9.19082835e-02 -1.96176514e-01 1.32933155e-01\n", - " 7.76783228e-02 1.00741126e-01 9.32467878e-02 -5.88140823e-02\n", - " -1.34220198e-02 2.16287613e-01 1.63621128e-01 -1.60278752e-01]] , 8\n", - "-------------------------\n", - "0.layers.0.0.output_gate_params.bias\n", - "(20,)\n", - "[ 0.17741492 0.22254053 0.02940683 -0.17445402 0.04334408 -0.04515981\n", - " 0.16077036 -0.21483785 0.05722176 -0.00262266 0.01760296 0.15381731\n", - " 0.0040394 -0.18002152 -0.13043821 -0.08953302 0.02384774 0.08628984\n", - " -0.04173774 -0.08825271] , 9\n", - "-------------------------\n", - "0.layers.0.0.output_gate_params.input_weight.weight\n", - "(20, 10)\n", - "[[ 9.81200710e-02 -2.17414662e-01 1.56252235e-01 -2.59936582e-02\n", - " 1.55592158e-01 1.68960407e-01 2.38872208e-02 7.07329437e-02\n", - " -1.26473457e-01 1.60210714e-01]\n", - " [ 1.30875960e-01 -3.51194218e-02 8.71568248e-02 -1.25249382e-02\n", - " 1.74701765e-01 9.20466036e-02 1.63019851e-01 -2.03253865e-01\n", - " 2.17866078e-01 8.33117217e-02]\n", - " [ 1.08713590e-01 4.98261265e-02 1.46862045e-01 2.10508242e-01\n", - " -1.90491565e-02 -1.83473915e-01 2.05329910e-01 -4.71567698e-02\n", - " -1.07840233e-01 1.37649149e-01]\n", - " [ 1.24790154e-01 2.99369618e-02 -1.40363071e-02 -4.27761748e-02\n", - " 2.05027208e-01 1.36240214e-01 1.33165866e-01 1.42589167e-01\n", - " -1.17026694e-01 4.66880240e-02]\n", - " [-1.93439931e-01 1.29910931e-01 -2.21640781e-01 -2.23473564e-01\n", - " -2.21031293e-01 1.37891039e-01 2.32707467e-02 5.08490019e-04\n", - " 3.55657227e-02 -8.46242681e-02]\n", - " [-6.79011941e-02 -1.50619775e-01 -5.46085611e-02 -1.37593433e-01\n", - " 5.88322058e-03 1.75689265e-01 -1.84854001e-01 1.09963417e-01\n", - " -1.66318297e-01 -9.26456451e-02]\n", - " [ 4.37250473e-02 3.84753868e-02 1.83374569e-01 -8.36465479e-05\n", - " -8.51647705e-02 -9.24766734e-02 6.55569835e-03 -1.67666823e-01\n", - " -1.75320774e-01 -9.56731290e-02]\n", - " [ 5.74407633e-03 -1.51010871e-01 -1.27642184e-01 1.59654185e-01\n", - " 2.06639260e-01 -7.00415373e-02 -1.91840678e-01 -8.56086463e-02\n", - " 9.02482048e-02 7.25704432e-02]\n", - " [-6.93180412e-02 -1.96934849e-01 -6.72358871e-02 -4.99973148e-02\n", - " 1.28766835e-01 -1.10879898e-01 1.34200945e-01 3.10183968e-02\n", - " -3.74761075e-02 -1.99273914e-01]\n", - " [ 2.20759660e-01 -3.98728549e-02 1.40693069e-01 -1.15664735e-01\n", - " -2.17755169e-01 -1.78237423e-01 -1.14595190e-01 -7.12116584e-02\n", - " -3.15762796e-02 1.86491266e-01]\n", - " [-2.06223264e-01 1.11605875e-01 1.88149154e-01 1.43918453e-03\n", - " -1.39450610e-01 7.15188682e-03 5.30482270e-02 9.89372358e-02\n", - " -6.79695681e-02 -7.67354444e-02]\n", - " [-1.05491146e-01 -2.16275647e-01 7.85326734e-02 -1.69050053e-01\n", - " -1.07421041e-01 -2.30107992e-03 1.72379389e-01 1.98816836e-01\n", - " -1.62642673e-01 1.93931282e-01]\n", - " [ 2.00302720e-01 1.80637628e-01 1.94676816e-02 1.79588884e-01\n", - " 1.08642928e-01 -1.60451204e-01 -1.17858045e-01 4.20530513e-03\n", - " -1.58465564e-01 -7.36296773e-02]\n", - " [ 1.80281103e-01 1.04106739e-01 1.94734529e-01 1.71422120e-03\n", - " -1.14017285e-01 1.47993699e-01 1.64847951e-02 3.76562215e-02\n", - " -9.47417393e-02 9.18511599e-02]\n", - " [-1.65143967e-01 1.78432971e-01 1.95620790e-01 8.06822702e-02\n", - " 1.74128443e-01 1.35722205e-01 -8.53993148e-02 -1.93941638e-01\n", - " 2.94244476e-02 1.40397370e-01]\n", - " [-2.28753053e-02 1.88145563e-02 1.65735826e-01 9.23255607e-02\n", - " 1.67166159e-01 3.28338295e-02 2.50651501e-02 -5.34861833e-02\n", - " -3.77333388e-02 -1.18839331e-01]\n", - " [ 1.49498299e-01 2.03940362e-01 8.29838291e-02 6.35351241e-03\n", - " -7.38137364e-02 -2.20774114e-01 -4.14042696e-02 -1.58739850e-01\n", - " -1.65080443e-01 -4.42778133e-02]\n", - " [-4.39881422e-02 4.51072417e-02 -1.62074581e-01 1.60696968e-01\n", - " -2.03583151e-01 -1.05898663e-01 -8.48927200e-02 1.37860607e-02\n", - " 9.24347416e-02 -5.89275286e-02]\n", - " [ 3.48980725e-02 -5.29355779e-02 -8.79468024e-02 -3.12774107e-02\n", - " 4.50214110e-02 -2.17200696e-01 -1.55640006e-01 1.74693078e-01\n", - " 1.01111621e-01 -5.97870257e-03]\n", - " [ 7.06157601e-03 3.08655780e-02 5.19711897e-02 -1.52664930e-01\n", - " -6.09524250e-02 -2.05220923e-01 -1.75796479e-01 -4.20728028e-02\n", - " -2.95243543e-02 1.74893185e-01]] , 10\n", - "-------------------------\n", - "0.layers.0.0.output_gate_params.hidden_weight.weight\n", - "(20, 20)\n", - "[[ 0.03851524 -0.03625689 -0.00619491 0.12488268 -0.06773603 -0.0418019\n", - " -0.04485707 -0.18031046 -0.03125188 -0.20671144 -0.12019279 -0.14232881\n", - " 0.16657048 -0.20598304 0.21545227 0.08384079 -0.15111198 0.18525589\n", - " -0.0492739 -0.18939163]\n", - " [-0.03105276 0.11050874 -0.21741039 -0.01675669 0.09098183 -0.08714523\n", - " 0.02036562 -0.0876366 -0.15001732 0.17511557 -0.1587715 -0.00262151\n", - " 0.07447443 -0.12496222 0.10796666 -0.18569624 0.21355589 0.09958527\n", - " -0.03165689 -0.18600492]\n", - " [ 0.00689578 0.0793154 -0.12144296 -0.02816021 -0.22284126 -0.22354037\n", - " -0.02428471 0.187102 -0.01052416 0.07010341 -0.08937916 -0.07301357\n", - " -0.02457852 -0.11304034 0.13682817 0.13944101 -0.17383203 0.06858449\n", - " -0.09237309 -0.12858376]\n", - " [-0.02727968 -0.0693544 -0.12731954 0.03295429 0.12762886 -0.03450404\n", - " -0.01564156 0.01682661 -0.09610138 0.11838 0.2063172 -0.02043679\n", - " 0.01520035 0.18016809 0.18314716 -0.16634111 -0.10355289 -0.21934243\n", - " 0.13695723 0.17452586]\n", - " [-0.08138426 0.07172713 0.05416519 -0.19238184 0.0892937 0.10971964\n", - " 0.00491766 0.02293088 0.05196048 0.16108814 0.19757238 0.03213832\n", - " 0.09531388 -0.05850127 0.13331535 -0.08795608 -0.18431664 0.1049106\n", - " 0.08293276 0.0492176 ]\n", - " [ 0.09513766 0.02660845 0.0761021 0.09111597 -0.12062387 -0.01198089\n", - " 0.03369791 -0.03394864 -0.188005 0.02121117 0.13665509 -0.11958458\n", - " 0.21953909 0.0509951 0.09510146 -0.08634473 -0.18291326 -0.08321758\n", - " 0.00683159 -0.10189173]\n", - " [ 0.19913672 -0.14311586 -0.15060481 -0.0793146 0.20060927 -0.10224532\n", - " 0.20686573 0.10745841 -0.03397548 0.11565119 0.10630453 -0.11381406\n", - " -0.04603498 0.21659105 0.12819836 -0.10921414 -0.0601254 0.12532982\n", - " 0.11351746 0.01772486]\n", - " [-0.14387828 -0.16492477 -0.04719649 0.08221286 -0.02383876 -0.18695372\n", - " -0.05480145 0.22319667 -0.18481532 -0.17354017 0.14056584 0.22249034\n", - " -0.21510145 -0.20223859 -0.06991865 0.22294378 -0.1269095 0.01911828\n", - " 0.18253623 -0.0791588 ]\n", - " [-0.06857247 -0.15009233 0.0085855 0.20870976 0.0914357 0.157171\n", - " -0.01481424 -0.03551737 -0.03994827 0.12753342 -0.02932107 -0.19100396\n", - " -0.07851914 0.08750965 0.21801063 -0.04065894 -0.19468635 -0.16464569\n", - " -0.1759353 0.09013668]\n", - " [ 0.16482699 0.06612828 0.07709847 0.14567545 0.15288451 0.13352284\n", - " 0.12504087 0.06050573 0.11541758 -0.1534312 -0.14473058 0.06013739\n", - " 0.03479816 -0.19657765 -0.16289718 -0.17800786 0.17759389 0.14619377\n", - " -0.11769552 0.033738 ]\n", - " [-0.05143119 0.19438726 -0.20252845 -0.16313015 -0.18616724 0.13013433\n", - " -0.11177826 0.13318242 0.07558636 -0.10929734 -0.06023749 -0.09048979\n", - " 0.09864956 -0.08967353 0.07588523 0.01597441 -0.17857382 -0.1405619\n", - " -0.1550431 0.1171688 ]\n", - " [ 0.0484514 -0.00562237 -0.1331447 -0.22155127 -0.07913139 -0.17113578\n", - " -0.22241357 -0.21326728 -0.14605871 -0.21737726 0.069704 0.08366753\n", - " 0.0901287 -0.22259942 0.13826938 0.04359518 0.11433873 -0.05495736\n", - " 0.10737925 -0.21207204]\n", - " [ 0.0761621 0.17731208 0.09399657 -0.21077465 -0.06277167 -0.02776839\n", - " 0.11715963 -0.08461329 0.03216063 -0.07849736 -0.03552182 -0.00445118\n", - " -0.1283987 -0.15520401 0.1845957 0.18787426 -0.00676964 0.19354711\n", - " 0.17230819 -0.14084579]\n", - " [-0.08885217 -0.15358365 0.07229424 0.00565505 -0.03066478 0.16602065\n", - " -0.08740129 -0.12237797 -0.15895672 -0.11375529 0.21551864 -0.10871551\n", - " -0.06152614 0.10078279 -0.17173737 -0.13572007 0.16457646 -0.08576282\n", - " -0.1160312 -0.02892987]\n", - " [-0.03186222 0.04086494 0.08197901 -0.17241116 0.2032053 -0.21259488\n", - " 0.07573222 -0.06309208 -0.09442816 0.20916638 -0.2154794 0.01527144\n", - " 0.1432838 0.19990316 -0.18904059 0.02694101 0.22123207 -0.21902935\n", - " 0.0546164 -0.14010552]\n", - " [ 0.03629959 -0.20227122 0.11001531 -0.04960475 0.13363701 -0.0033625\n", - " -0.03187283 -0.05428797 -0.2047436 -0.09497944 0.00742607 -0.1729926\n", - " 0.19623755 -0.14542621 -0.08711543 -0.02990268 -0.1811355 -0.00176668\n", - " -0.10767633 -0.1871676 ]\n", - " [ 0.00548474 0.19795649 0.05506302 0.18442854 -0.0021867 -0.07804751\n", - " 0.1802177 -0.11907462 -0.20685978 0.0489392 0.11143997 -0.13366425\n", - " 0.07870162 -0.07933193 -0.02713096 -0.04951058 -0.04782786 -0.18194063\n", - " 0.05480235 -0.05881837]\n", - " [ 0.17097771 0.03732251 -0.18287036 -0.17010981 -0.11653572 0.10708019\n", - " -0.14437075 -0.10229405 0.04059571 -0.15502611 -0.11010965 0.20276332\n", - " -0.11821949 -0.07449946 0.1599237 0.05010674 0.17550889 -0.19699533\n", - " 0.11176885 -0.03420243]\n", - " [-0.14325288 -0.09576999 -0.21628909 0.15468563 -0.04290593 -0.2192564\n", - " 0.19123225 0.14483131 0.09245753 0.21885075 0.20192903 0.20897363\n", - " 0.2002456 0.18172018 0.05853782 -0.01872608 0.00850361 -0.09292599\n", - " 0.10506337 0.00647802]\n", - " [ 0.05275466 -0.14403579 -0.08419433 0.16763861 0.02174832 0.07716487\n", - " -0.1952104 -0.09575427 -0.00569092 -0.0234643 0.14273825 -0.06748112\n", - " 0.18662164 -0.04324729 0.08697162 -0.15742545 0.03795354 -0.21800253\n", - " -0.19185208 -0.14310952]] , 11\n", - "-------------------------\n", - "/0/layers.0.0/output_quant/export_handler/Constant_output_0\n", - "()\n", - "0.0078125 , 12\n", - "-------------------------\n", - "/0/layers.0.0/output_quant/export_handler/Constant_1_output_0\n", - "()\n", - "0 , 13\n", - "-------------------------\n", - "/0/layers.0.0/output_quant/export_handler/Constant_2_output_0\n", - "()\n", - "8.0 , 14\n", - "-------------------------\n", - "/0/layers.0.0/input_weight/weight_quant/export_handler/Constant_output_0\n", - "()\n", - "0.001760039 , 15\n", - "-------------------------\n", - "/0/layers.0.0/input_weight/weight_quant/export_handler/Constant_1_output_0\n", - "()\n", - "-127 , 16\n", - "-------------------------\n", - "/0/layers.0.0/input_weight/weight_quant/export_handler/Constant_2_output_0\n", - "()\n", - "127 , 17\n", - "-------------------------\n", - "/0/layers.0.0/input_weight/weight_quant/export_handler_1/Constant_output_0\n", - "()\n", - "0.0017542557 , 18\n", - "-------------------------\n", - "/0/layers.0.0/input_weight/weight_quant/export_handler_2/Constant_output_0\n", - "()\n", - "0.0017601603 , 19\n", - "-------------------------\n", - "/0/layers.0.0/input_weight/weight_quant/export_handler_3/Constant_output_0\n", - "()\n", - "0.0017546351 , 20\n", - "-------------------------\n", - "onnx.brevitas::QuantLSTMCell_48\n", - "(1, 20)\n", - "[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] , 21\n", - "-------------------------\n", - "/0/layers.0.0/export_handler/Constant_output_0\n", - "()\n", - "0.003921569 , 22\n", - "-------------------------\n", - "/0/layers.0.0/export_handler/Constant_1_output_0\n", - "()\n", - "0 , 23\n", - "-------------------------\n", - "/0/layers.0.0/Constant_output_0\n", - "(1,)\n", - "[0] , 24\n", - "-------------------------\n", - "/0/layers.0.0/Constant_1_output_0\n", - "(1,)\n", - "[0] , 25\n", - "-------------------------\n" - ] - } - ], + "outputs": [], "source": [ "# In this block of code we store all the parameters (weight matrices, recurrence matrices, biases, scales and zero-points) that we will need to import in the QCDQ implementation.\n", "# Importing the exported quantized model from brevitas\n", @@ -1190,7 +226,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "02fe4d94-af24-4d5e-a809-7d8c49e7fd90", "metadata": {}, "outputs": [], @@ -1239,7 +275,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "02761646-4c6d-440f-8e90-4935beebab56", "metadata": {}, "outputs": [], @@ -1257,7 +293,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "c08e5a23-ef2e-4bca-9293-c800350c2c62", "metadata": {}, "outputs": [], @@ -1384,7 +420,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "79839558-8752-4fc8-9b0e-8fed47c91701", "metadata": {}, "outputs": [], @@ -1604,28 +640,10 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "c6ec7b2a-456d-4452-97ec-df9a471d5391", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving './lstm_full_graph.onnx' at http://localhost:8080\n" - ] - }, - { - "data": { - "text/plain": [ - "('localhost', 8080)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "lstm_model = qonnx_make_model(lstm_body, producer_name=\"QuantizeLSTM_scan\")\n", "onnx.save(lstm_model, './lstm_full_graph.onnx')\n", @@ -1642,83 +660,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "db5892bc-ac8d-4972-afcf-20bf880f5e86", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[array([[ 0.1484375],\n", - " [-0.0078125],\n", - " [ 0.0390625],\n", - " [ 0.140625 ],\n", - " [ 0.015625 ],\n", - " [ 0. ],\n", - " [ 0.1015625],\n", - " [-0.1015625],\n", - " [ 0.0390625],\n", - " [-0.0625 ],\n", - " [ 0.015625 ],\n", - " [-0.125 ],\n", - " [ 0.1015625],\n", - " [ 0.03125 ],\n", - " [ 0.1640625],\n", - " [-0.015625 ],\n", - " [-0.0234375],\n", - " [-0.015625 ],\n", - " [-0.046875 ],\n", - " [ 0.0078125]], dtype=float32), array([[ 0.2421875],\n", - " [-0.0078125],\n", - " [ 0.0625 ],\n", - " [ 0.2421875],\n", - " [ 0.03125 ],\n", - " [ 0.0078125],\n", - " [ 0.2265625],\n", - " [-0.234375 ],\n", - " [ 0.0859375],\n", - " [-0.1328125],\n", - " [ 0.0390625],\n", - " [-0.2421875],\n", - " [ 0.1875 ],\n", - " [ 0.0546875],\n", - " [ 0.296875 ],\n", - " [-0.03125 ],\n", - " [-0.0546875],\n", - " [-0.03125 ],\n", - " [-0.109375 ],\n", - " [ 0.0234375]], dtype=float32), array([[ 0.1484375],\n", - " [-0.0078125],\n", - " [ 0.0390625],\n", - " [ 0.140625 ],\n", - " [ 0.015625 ],\n", - " [ 0. ],\n", - " [ 0.1015625],\n", - " [-0.1015625],\n", - " [ 0.0390625],\n", - " [-0.0625 ],\n", - " [ 0.015625 ],\n", - " [-0.125 ],\n", - " [ 0.1015625],\n", - " [ 0.03125 ],\n", - " [ 0.1640625],\n", - " [-0.015625 ],\n", - " [-0.0234375],\n", - " [-0.015625 ],\n", - " [-0.046875 ],\n", - " [ 0.0078125]], dtype=float32)]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-20 11:07:46.350885612 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'scale_test'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 11:07:46.370978980 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'scale_test'. It is not used by any node and should be removed from the model.\n" - ] - } - ], + "outputs": [], "source": [ "# Before the model can be executed, it'd opset version needs to be set to a minimum of '14' to accomodate clip nodes with INT8 and UINT8 input. Otherwise ONNX cannot create an execution session and we get errors.\n", "lstm_model.opset_import[0].version = 14\n", @@ -1787,7 +732,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "700a93a8-f757-4fa1-88dd-47a3f2a7f171", "metadata": {}, "outputs": [], @@ -1814,7 +759,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "111fdce4-464f-40c1-ac4d-3022b05f153e", "metadata": {}, "outputs": [], @@ -1838,19 +783,10 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "4668cf2b-524e-4768-8dc8-9d619f6273da", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving './lstm_scan_node_model.onnx' at http://localhost:8081\n", - "[]\n" - ] - } - ], + "outputs": [], "source": [ "scan_lstm_node_graph = make_graph(\n", " nodes = [scan_node_lstm],\n", @@ -1882,608 +818,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "818d2a81-686f-4a4a-8e78-17dbf75d8451", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Final Hidden State [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "------------------------\n", - "Final Cell State [[ 0.421875 ]\n", - " [-0.078125 ]\n", - " [ 0.0234375]\n", - " [ 0.4921875]\n", - " [ 0.1484375]\n", - " [-0.09375 ]\n", - " [ 0.75 ]\n", - " [-0.59375 ]\n", - " [ 0.1171875]\n", - " [-0.3125 ]\n", - " [ 0.0390625]\n", - " [-0.421875 ]\n", - " [ 0.3984375]\n", - " [ 0.2578125]\n", - " [ 0.828125 ]\n", - " [ 0.0625 ]\n", - " [-0.0703125]\n", - " [-0.109375 ]\n", - " [-0.1484375]\n", - " [ 0.0234375]]\n", - "------------------------\n", - "All Hidden States [[[ 0.1484375]\n", - " [-0.0078125]\n", - " [ 0.0390625]\n", - " [ 0.140625 ]\n", - " [ 0.015625 ]\n", - " [ 0. ]\n", - " [ 0.1015625]\n", - " [-0.1015625]\n", - " [ 0.0390625]\n", - " [-0.0625 ]\n", - " [ 0.015625 ]\n", - " [-0.125 ]\n", - " [ 0.1015625]\n", - " [ 0.03125 ]\n", - " [ 0.1640625]\n", - " [-0.015625 ]\n", - " [-0.0234375]\n", - " [-0.015625 ]\n", - " [-0.046875 ]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.203125 ]\n", - " [-0.0234375]\n", - " [ 0.03125 ]\n", - " [ 0.2109375]\n", - " [ 0.0234375]\n", - " [-0.015625 ]\n", - " [ 0.1875 ]\n", - " [-0.1484375]\n", - " [ 0.046875 ]\n", - " [-0.09375 ]\n", - " [ 0.0234375]\n", - " [-0.1640625]\n", - " [ 0.1484375]\n", - " [ 0.0703125]\n", - " [ 0.2578125]\n", - " [-0.015625 ]\n", - " [-0.03125 ]\n", - " [-0.0234375]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.2265625]\n", - " [-0.03125 ]\n", - " [ 0.015625 ]\n", - " [ 0.2421875]\n", - " [ 0.03125 ]\n", - " [-0.0234375]\n", - " [ 0.234375 ]\n", - " [-0.1796875]\n", - " [ 0.0546875]\n", - " [-0.109375 ]\n", - " [ 0.0234375]\n", - " [-0.1875 ]\n", - " [ 0.1796875]\n", - " [ 0.09375 ]\n", - " [ 0.2734375]\n", - " [ 0. ]\n", - " [-0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.0703125]\n", - " [ 0.015625 ]]\n", - "\n", - " [[ 0.234375 ]\n", - " [-0.0390625]\n", - " [ 0.015625 ]\n", - " [ 0.2578125]\n", - " [ 0.0390625]\n", - " [-0.03125 ]\n", - " [ 0.25 ]\n", - " [-0.1875 ]\n", - " [ 0.0546875]\n", - " [-0.125 ]\n", - " [ 0.015625 ]\n", - " [-0.1953125]\n", - " [ 0.1953125]\n", - " [ 0.1171875]\n", - " [ 0.2734375]\n", - " [ 0.015625 ]\n", - " [-0.03125 ]\n", - " [-0.0390625]\n", - " [-0.078125 ]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.2421875]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0390625]\n", - " [-0.03125 ]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.1328125]\n", - " [ 0.015625 ]\n", - " [-0.1953125]\n", - " [ 0.203125 ]\n", - " [ 0.1328125]\n", - " [ 0.2734375]\n", - " [ 0.0234375]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.078125 ]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.2421875]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.046875 ]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.1328125]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.2421875]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.046875 ]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.2421875]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.2421875]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]\n", - "\n", - " [[ 0.25 ]\n", - " [-0.046875 ]\n", - " [ 0.015625 ]\n", - " [ 0.2734375]\n", - " [ 0.0546875]\n", - " [-0.0390625]\n", - " [ 0.25 ]\n", - " [-0.1953125]\n", - " [ 0.0546875]\n", - " [-0.140625 ]\n", - " [ 0.015625 ]\n", - " [-0.203125 ]\n", - " [ 0.203125 ]\n", - " [ 0.140625 ]\n", - " [ 0.2734375]\n", - " [ 0.03125 ]\n", - " [-0.03125 ]\n", - " [-0.046875 ]\n", - " [-0.0703125]\n", - " [ 0.0078125]]]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-10-20 10:50:38.892379706 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'scale_test'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894726380 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_uo_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894741924 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wf_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894750521 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_ui_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894758793 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'max'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894767212 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_c'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894775093 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_c'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894782542 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_i'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894790413 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_uc_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894797986 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_i'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894805922 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wi_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894813725 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_o'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894821378 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_f'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894829187 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'W_o'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894837744 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_uf_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894845343 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wc_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894852862 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'U_f'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894861070 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'ql_wo_out'. It is not used by any node and should be removed from the model.\n", - "2023-10-20 10:50:38.894868719 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'min'. It is not used by any node and should be removed from the model.\n" - ] - } - ], + "outputs": [], "source": [ "# Defining the values of the varibales to test the execution of the scan model\n", "num_inputs = 25\n", @@ -2532,344 +870,10 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "2fe07395-6cf9-4c99-a0d3-a27aa6a326b5", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Brevitas Output shape : (25, 1, 20)\n", - "SCAN-QCDQ-LSTM output shape : (25, 1, 20)\n", - "-----------------------------------\n", - "Brevitas Output = [[[ 0.1484375 -0.0078125 0.0390625 0.140625 0.0078125 0.\n", - " 0.109375 -0.09375 0.0390625 -0.0625 0.015625 -0.1171875\n", - " 0.1015625 0.03125 0.1640625 -0.015625 -0.0234375 -0.015625\n", - " -0.046875 0.0078125]]\n", - "\n", - " [[ 0.2109375 -0.0234375 0.03125 0.2109375 0.0234375 -0.015625\n", - " 0.1875 -0.1484375 0.046875 -0.09375 0.0234375 -0.1640625\n", - " 0.1484375 0.0625 0.2578125 -0.015625 -0.03125 -0.0234375\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2421875 -0.0390625 0.015625 0.25 0.03125 -0.0234375\n", - " 0.234375 -0.1796875 0.0546875 -0.109375 0.015625 -0.1875\n", - " 0.1796875 0.09375 0.3125 0. -0.03125 -0.03125\n", - " -0.078125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.0390625 0.015625 0.265625 0.0390625 -0.03125\n", - " 0.265625 -0.1875 0.0546875 -0.125 0.015625 -0.1953125\n", - " 0.1953125 0.1171875 0.3359375 0.015625 -0.03125 -0.0390625\n", - " -0.078125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", - " 0.2890625 -0.1953125 0.0546875 -0.125 0.015625 -0.203125\n", - " 0.203125 0.125 0.359375 0.0234375 -0.03125 -0.046875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.2734375 0.046875 -0.0390625\n", - " 0.296875 -0.1953125 0.0546875 -0.1328125 0.015625 -0.203125\n", - " 0.2109375 0.1328125 0.3671875 0.03125 -0.0234375 -0.046875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.015625 0.28125 0.0546875 -0.046875\n", - " 0.3046875 -0.1953125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.2109375 0.140625 0.375 0.0390625 -0.0234375 -0.046875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.0546875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.2109375 0.140625 0.3828125 0.0390625 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.2109375 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.0390625 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.2578125 -0.046875 0.0078125 0.28125 0.0546875 -0.046875\n", - " 0.3125 -0.203125 0.0546875 -0.140625 0.0078125 -0.203125\n", - " 0.21875 0.1484375 0.390625 0.046875 -0.015625 -0.0546875\n", - " -0.0703125 0.015625 ]]]\n", - "-----------------------------------\n", - "SCAN-QCDQ-LSTM output [[[ 0.1484375 -0.0078125 0.0390625 0.140625 0.015625 0.\n", - " 0.1015625 -0.1015625 0.0390625 -0.0625 0.015625 -0.125\n", - " 0.1015625 0.03125 0.1640625 -0.015625 -0.0234375 -0.015625\n", - " -0.046875 0.0078125]]\n", - "\n", - " [[ 0.203125 -0.0234375 0.03125 0.2109375 0.0234375 -0.015625\n", - " 0.1875 -0.1484375 0.046875 -0.09375 0.0234375 -0.1640625\n", - " 0.1484375 0.0703125 0.2578125 -0.015625 -0.03125 -0.0234375\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.2265625 -0.03125 0.015625 0.2421875 0.03125 -0.0234375\n", - " 0.234375 -0.1796875 0.0546875 -0.109375 0.0234375 -0.1875\n", - " 0.1796875 0.09375 0.2734375 0. -0.03125 -0.03125\n", - " -0.0703125 0.015625 ]]\n", - "\n", - " [[ 0.234375 -0.0390625 0.015625 0.2578125 0.0390625 -0.03125\n", - " 0.25 -0.1875 0.0546875 -0.125 0.015625 -0.1953125\n", - " 0.1953125 0.1171875 0.2734375 0.015625 -0.03125 -0.0390625\n", - " -0.078125 0.0078125]]\n", - "\n", - " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.0390625 -0.03125\n", - " 0.25 -0.1953125 0.0546875 -0.1328125 0.015625 -0.1953125\n", - " 0.203125 0.1328125 0.2734375 0.0234375 -0.03125 -0.046875\n", - " -0.078125 0.0078125]]\n", - "\n", - " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.1328125 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.046875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.2421875 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]\n", - "\n", - " [[ 0.25 -0.046875 0.015625 0.2734375 0.0546875 -0.0390625\n", - " 0.25 -0.1953125 0.0546875 -0.140625 0.015625 -0.203125\n", - " 0.203125 0.140625 0.2734375 0.03125 -0.03125 -0.046875\n", - " -0.0703125 0.0078125]]]\n", - "-----------------------------------\n", - "[[[ 0. 0. 0. 0. 1. 0. -1. -1. 0. 0. 0. -1. 0. 0.\n", - " 0. 0. 0. 0. 0. 0.]]\n", - "\n", - " [[ -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.\n", - " 0. 0. 0. 0. 0. -1.]]\n", - "\n", - " [[ -2. 1. 0. -1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.\n", - " -5. 0. 0. 0. 1. 1.]]\n", - "\n", - " [[ -2. 0. 0. -1. 0. 0. -2. 0. 0. 0. 0. 0. 0. 0.\n", - " -8. 0. 0. 0. 0. -1.]]\n", - "\n", - " [[ -2. 0. 0. 0. -1. 1. -5. 0. 0. -1. 0. 1. 0. 1.\n", - " -11. 0. 0. 0. -1. -1.]]\n", - "\n", - " [[ -2. 0. 1. 0. 0. 0. -6. 0. 0. -1. 0. 0. -1. 0.\n", - " -12. 0. -1. 0. 0. -1.]]\n", - "\n", - " [[ -2. 0. 0. -1. -1. 1. -7. 0. 0. 0. 1. 0. -1. 0.\n", - " -13. -1. -1. 0. 0. -1.]]\n", - "\n", - " [[ -2. 1. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -1. 0.\n", - " -14. -1. -2. 1. 0. -1.]]\n", - "\n", - " [[ -2. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -1. -1.\n", - " -15. -1. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -1. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]\n", - "\n", - " [[ -1. 0. 1. -1. 0. 1. -8. 1. 0. 0. 1. 0. -2. -1.\n", - " -15. -2. -2. 1. 0. -1.]]]\n" - ] - } - ], + "outputs": [], "source": [ "# We first match the shape of both the outputs to perform the functional verification correctly\n", "\n", From 95279e4d98fb68b799e47e4f7d09186337154d62 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 24 Oct 2023 13:45:34 +0200 Subject: [PATCH 04/83] [Transform] introduce ExtractQuantScaleZeroPt and simple test --- .../extract_quant_scale_zeropt.py | 142 ++++++++++++++++++ .../test_extract_quant_scale_zeropt.py | 93 ++++++++++++ 2 files changed, 235 insertions(+) create mode 100644 src/qonnx/transformation/extract_quant_scale_zeropt.py create mode 100644 tests/transformation/test_extract_quant_scale_zeropt.py diff --git a/src/qonnx/transformation/extract_quant_scale_zeropt.py b/src/qonnx/transformation/extract_quant_scale_zeropt.py new file mode 100644 index 00000000..58863f08 --- /dev/null +++ b/src/qonnx/transformation/extract_quant_scale_zeropt.py @@ -0,0 +1,142 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of qonnx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +from onnx import TensorProto, helper + +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.base import Transformation +from qonnx.transformation.general import GiveUniqueParameterTensors, SortGraph +from qonnx.transformation.remove import RemoveIdentityOps + + +class ExtractQuantScaleZeroPt(Transformation): + """Extract any non-identity scale and zero-point Quant inputs as + separate Div/Mul (for scale) and Add/Sub (for zeropoint" nodes, + preceding and following the Quant node.""" + + def apply(self, model: ModelWrapper): + graph = model.graph + for node in graph.node: + if node.op_type == "Quant": + quant_node = node + input_nm, scale_nm, zeropt_nm, _ = node.input + scale_t = model.get_initializer(scale_nm) + zeropt_t = model.get_initializer(zeropt_nm) + ishp = model.get_tensor_shape(input_nm) + extract_scale = False + extract_zeropt = False + if scale_t is not None and (scale_t != 1).any(): + extract_scale = True + if zeropt_t is not None and (zeropt_t != 0).any(): + extract_zeropt = True + if (not extract_scale) and (not extract_zeropt): + continue + running_input = input_nm + if extract_scale: + # create new Div node that divides the input + # by the scale + inp_scaled_nm = model.make_new_valueinfo_name() + inp_scaled = helper.make_tensor_value_info( + inp_scaled_nm, + TensorProto.FLOAT, + ishp, + ) + graph.value_info.append(inp_scaled) + inp_scale_node = helper.make_node("Div", [running_input, scale_nm], [inp_scaled_nm]) + graph.node.append(inp_scale_node) + # create new Mul node + # remove scale from Quant node + new_scale_nm = model.make_new_valueinfo_name() + model.set_initializer(new_scale_nm, np.asarray(1.0, dtype=np.float32)) + quant_node.input[1] = new_scale_nm + running_input = inp_scaled_nm + if extract_zeropt: + # create new Add node that adds the zeropoint to + # the scaled input + inp_zeropt_nm = model.make_new_valueinfo_name() + inp_zeropt = helper.make_tensor_value_info( + inp_zeropt_nm, + TensorProto.FLOAT, + ishp, + ) + graph.value_info.append(inp_zeropt) + inp_zeropt_node = helper.make_node("Add", [running_input, zeropt_nm], [inp_zeropt_nm]) + graph.node.append(inp_zeropt_node) + # remove zeropt from Quant node + new_zeropt_nm = model.make_new_valueinfo_name() + model.set_initializer(new_zeropt_nm, np.asarray(0.0, dtype=np.float32)) + quant_node.input[2] = new_zeropt_nm + running_input = inp_zeropt_nm + # rewire node input to any newly created Div/Add nodes + quant_node.input[0] = running_input + last_node = quant_node + final_output = quant_node.output[0] + if extract_zeropt: + # create new Sub node that subtracts the zeropoint from + # the output + out_zeropt_nm = model.make_new_valueinfo_name() + out_zeropt = helper.make_tensor_value_info( + out_zeropt_nm, + TensorProto.FLOAT, + ishp, + ) + graph.value_info.append(out_zeropt) + out_zeropt_node = helper.make_node("Sub", [out_zeropt_nm, zeropt_nm], [final_output]) + last_node.output[0] = out_zeropt_nm + graph.node.append(out_zeropt_node) + # important: when tracking a pointer to newly added nodes, + # ensure the item from the container is used, and not the + # make_node result -- those are different objects + # e.g. if we use last_node = out_zeropt_node below, + # this will point to the wrong object and cause bugs later + last_node = graph.node[-1] + if extract_scale: + # create new Mul node that applies the output scale + out_scale_nm = model.make_new_valueinfo_name() + out_scale = helper.make_tensor_value_info( + out_scale_nm, + TensorProto.FLOAT, + ishp, + ) + last_node.output[0] = out_scale_nm + graph.value_info.append(out_scale) + out_scale_node = helper.make_node("Mul", [out_scale_nm, scale_nm], [final_output]) + graph.node.append(out_scale_node) + + if extract_scale or extract_zeropt: + # since we used append() for new nodes, need to call + # SortGraph to ensure correct (topological) order + model = model.transform(SortGraph()) + # Remove potential unity multiplications from alpha and beta attributes + model = model.transform(RemoveIdentityOps()) + # Ensure unique parameter tensors + model = model.transform(GiveUniqueParameterTensors()) + return model, True + + return model, False diff --git a/tests/transformation/test_extract_quant_scale_zeropt.py b/tests/transformation/test_extract_quant_scale_zeropt.py new file mode 100644 index 00000000..4faecb58 --- /dev/null +++ b/tests/transformation/test_extract_quant_scale_zeropt.py @@ -0,0 +1,93 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of qonnx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +import onnx.parser as oprs + +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.core.onnx_exec import execute_onnx +from qonnx.transformation.extract_quant_scale_zeropt import ExtractQuantScaleZeroPt + + +def make_test_model(): + ishp = (1, 10) + ishp_str = str(list(ishp)) + channelwise = True + bitwidth = np.asarray(4.0, dtype=np.float32) + if channelwise: + q_attr_shp = ishp + else: + q_attr_shp = 1 + attrshp_str = str(list(q_attr_shp)) + np.random.seed(0) + scale = np.random.rand(*q_attr_shp).astype(np.float32) + zeropt = np.random.rand(*q_attr_shp).astype(np.float32) + signed = 1 + narrow = 1 + rounding_mode = "ROUND" + + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{ishp_str} in0) => (float{ishp_str} out0) + < + float{attrshp_str} scale_param, + float{attrshp_str} zeropt_param, + float bitwidth_param + > + {{ + out0 = qonnx.custom_op.general.Quant< + signed={str(signed)}, + narrow={str(narrow)}, + rounding_mode="{rounding_mode}" + >(in0, scale_param, zeropt_param, bitwidth_param) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) + model.set_initializer("scale_param", scale) + model.set_initializer("zeropt_param", zeropt) + model.set_initializer("bitwidth_param", bitwidth) + return model + + +def test_extract_quant_scale_zeropt(): + model = make_test_model() + ishp = model.get_tensor_shape("in0") + inp = np.random.rand(*ishp).astype(np.float32) + y_golden = execute_onnx(model, {"in0": inp})["out0"] + model_new = model.transform(ExtractQuantScaleZeroPt()) + y_ret = execute_onnx(model_new, {"in0": inp})["out0"] + assert np.allclose(y_golden, y_ret) + qnt_node = model_new.get_nodes_by_op_type("Quant")[0] + new_scale = model_new.get_initializer(qnt_node.input[1]) + assert new_scale == 1 + new_zeropt = model_new.get_initializer(qnt_node.input[2]) + assert new_zeropt == 0 From 9e0a49aaf7277de0106d1c126a3196e213bf1929 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 24 Oct 2023 14:27:41 +0200 Subject: [PATCH 05/83] [Test] add more testcases for ExtractQuantScaleZeroPt --- .../test_extract_quant_scale_zeropt.py | 38 +++++++++++++------ 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/tests/transformation/test_extract_quant_scale_zeropt.py b/tests/transformation/test_extract_quant_scale_zeropt.py index 4faecb58..540ec274 100644 --- a/tests/transformation/test_extract_quant_scale_zeropt.py +++ b/tests/transformation/test_extract_quant_scale_zeropt.py @@ -26,6 +26,8 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pytest + import numpy as np import onnx.parser as oprs @@ -34,19 +36,22 @@ from qonnx.transformation.extract_quant_scale_zeropt import ExtractQuantScaleZeroPt -def make_test_model(): - ishp = (1, 10) +def make_test_model(ishp, channelwise, bitwidth, need_extraction_scale, need_extraction_zeropt): ishp_str = str(list(ishp)) - channelwise = True - bitwidth = np.asarray(4.0, dtype=np.float32) if channelwise: q_attr_shp = ishp else: - q_attr_shp = 1 + q_attr_shp = (1,) attrshp_str = str(list(q_attr_shp)) np.random.seed(0) - scale = np.random.rand(*q_attr_shp).astype(np.float32) - zeropt = np.random.rand(*q_attr_shp).astype(np.float32) + if need_extraction_scale: + scale = np.random.rand(*q_attr_shp).astype(np.float32) + else: + scale = np.ones(q_attr_shp, dtype=np.float32) + if need_extraction_zeropt: + zeropt = np.random.rand(*q_attr_shp).astype(np.float32) + else: + zeropt = np.zeros(q_attr_shp, dtype=np.float32) signed = 1 narrow = 1 rounding_mode = "ROUND" @@ -78,8 +83,13 @@ def make_test_model(): return model -def test_extract_quant_scale_zeropt(): - model = make_test_model() +@pytest.mark.parametrize("need_extraction_scale", [True, False]) +@pytest.mark.parametrize("need_extraction_zeropt", [True, False]) +@pytest.mark.parametrize("channelwise", [True, False]) +def test_extract_quant_scale_zeropt(channelwise, need_extraction_scale, need_extraction_zeropt): + ishp = (1, 10) + bitwidth = np.asarray(4.0, dtype=np.float32) + model = make_test_model(ishp, channelwise, bitwidth, need_extraction_scale, need_extraction_zeropt) ishp = model.get_tensor_shape("in0") inp = np.random.rand(*ishp).astype(np.float32) y_golden = execute_onnx(model, {"in0": inp})["out0"] @@ -88,6 +98,12 @@ def test_extract_quant_scale_zeropt(): assert np.allclose(y_golden, y_ret) qnt_node = model_new.get_nodes_by_op_type("Quant")[0] new_scale = model_new.get_initializer(qnt_node.input[1]) - assert new_scale == 1 + assert (new_scale == 1).all() new_zeropt = model_new.get_initializer(qnt_node.input[2]) - assert new_zeropt == 0 + assert (new_zeropt == 0).all() + if need_extraction_scale: + assert len(model_new.get_nodes_by_op_type("Mul")) == 1 + assert len(model_new.get_nodes_by_op_type("Div")) == 1 + if need_extraction_zeropt: + assert len(model_new.get_nodes_by_op_type("Add")) == 1 + assert len(model_new.get_nodes_by_op_type("Sub")) == 1 From 51b671784c6b9bb0ea0a3572b3213ca0a7e22c47 Mon Sep 17 00:00:00 2001 From: Harish <62412574+Harsh9650@users.noreply.github.com> Date: Mon, 6 Nov 2023 16:14:29 +0000 Subject: [PATCH 06/83] Update 0_how_to_work_with_onnx.ipynb Change in the import and built-in functions. --- notebooks/0_how_to_work_with_onnx.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notebooks/0_how_to_work_with_onnx.ipynb b/notebooks/0_how_to_work_with_onnx.ipynb index 69e27546..db079f81 100644 --- a/notebooks/0_how_to_work_with_onnx.ipynb +++ b/notebooks/0_how_to_work_with_onnx.ipynb @@ -36,7 +36,7 @@ "outputs": [], "source": [ "import onnx\n", - "from qonnx.basic.util import qonnx_make_model\n", + "from qonnx.util.basic import qonnx_make_model\n", "\n", "Add1_node = onnx.helper.make_node(\n", " 'Add',\n", @@ -608,7 +608,7 @@ "metadata": {}, "outputs": [], "source": [ - "onnx_model1 = onnx.qonnx_make_model(graph, producer_name=\"simple-model1\")\n", + "onnx_model1 = qonnx.util.basic.qonnx_make_model(graph, producer_name=\"simple-model1\")\n", "onnx.save(onnx_model1, '/tmp/simple_model1.onnx')" ] }, From 94321b7d1710d2c1dd034137987c278f3f2157b5 Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Tue, 14 Nov 2023 10:46:12 +0100 Subject: [PATCH 07/83] Add option to find_upstream to keep nodes visited even if not found --- src/qonnx/core/modelwrapper.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/qonnx/core/modelwrapper.py b/src/qonnx/core/modelwrapper.py index f78e1334..580f0155 100644 --- a/src/qonnx/core/modelwrapper.py +++ b/src/qonnx/core/modelwrapper.py @@ -346,16 +346,19 @@ def find_producer(self, tensor_name): return x return None - def find_upstream(self, tensor_name, finder_fxn): + def find_upstream(self, tensor_name, finder_fxn, keep_if_not_found=False): """Follow the producer chain upstream, calling finder_fxn on each upstream node until it returns True or there are no nodes left. Returns the list - of nodes visited, or None if finder_fxn did not return True.""" + of nodes visited, or None if finder_fxn did not return True. If + keep_if_not_found is specified, returns the list of nodes visited, even + if finder_fxn never returned True, i.e., if the search terminated at an + input or initializer.""" visit_list = [] current_tensor = tensor_name while True: current_producer = self.find_producer(current_tensor) if current_producer is None: - return [] + return visit_list if keep_if_not_found else [] else: found = finder_fxn(current_producer) visit_list.append(current_producer) @@ -364,7 +367,7 @@ def find_upstream(self, tensor_name, finder_fxn): elif len(current_producer.input) > 0: current_tensor = current_producer.input[0] else: - return None + return visit_list if keep_if_not_found else None def find_consumer(self, tensor_name): """Finds and returns the node that consumes the tensor with given name. From 7719a3e3ec2a78148dd021dd6b47bfd7eff182ec Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Tue, 14 Nov 2023 14:21:13 +0100 Subject: [PATCH 08/83] Add cleanup transformation sorting inputs of commutative operations --- src/qonnx/core/modelwrapper.py | 8 +++- src/qonnx/transformation/general.py | 57 +++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/src/qonnx/core/modelwrapper.py b/src/qonnx/core/modelwrapper.py index f78e1334..ce621743 100644 --- a/src/qonnx/core/modelwrapper.py +++ b/src/qonnx/core/modelwrapper.py @@ -38,7 +38,12 @@ import qonnx.util.onnx as onnxutil from qonnx.core.datatype import DataType from qonnx.transformation.double_to_single_float import DoubleToSingleFloat -from qonnx.transformation.general import RemoveStaticGraphInputs, RemoveUnusedTensors, SortGraph +from qonnx.transformation.general import ( + RemoveStaticGraphInputs, + RemoveUnusedTensors, + SortGraph, + SortCommutativeInputsInitializerLast +) class ModelWrapper: @@ -149,6 +154,7 @@ def cleanup(self): RemoveUnusedTensors(), RemoveStaticGraphInputs(), SortGraph(), + SortCommutativeInputsInitializerLast(), ] for trn in cleanup_transforms: transformed_model = transformed_model.transform(trn, cleanup=False, make_deepcopy=False) diff --git a/src/qonnx/transformation/general.py b/src/qonnx/transformation/general.py index 5153e616..686bf17b 100644 --- a/src/qonnx/transformation/general.py +++ b/src/qonnx/transformation/general.py @@ -35,6 +35,9 @@ import qonnx.util.basic as util from qonnx.transformation.base import Transformation +# Protobuf onnx graph node type +from onnx import NodeProto # noqa + class MovePadAttributeToTensor(Transformation): "Move padding info from attribute into input tensor for Pad nodes." @@ -359,3 +362,57 @@ def apply(self, model): # one iteration is enough return (model, False) + + +# Groups inputs by categories, i.e., groups dynamic inputs first, followed by +# initializers. Keeps order of inputs in each category. +def group_inputs_by_category(node: NodeProto, model): # noqa + # Select all dynamic inputs, which are those without initializer tensor + dynamics = [i for i in node.input if model.get_initializer(i) is None] + # Select all input which are initializers, which, by exclusion, are all + # those not among the dynamic inputs + initializers = [i for i in node.input if i not in dynamics] + # Return lists of dynamic anc initializer inputs + return dynamics, initializers + + +# Tidy-Up transformation sorting the inputs to all commutative operations to +# have initializer inputs last +class SortCommutativeInputsInitializerLast(Transformation): + """ + Sorts inputs of nodes describing commutative operations to have initializer + inputs last. This order of inputs is assumed by many other transformations. + """ + + # Set of supported commutative operations + # TODO: There might be more valid operations + SUPPORTED_COMMUTATIVE_OPS = {"Add", "Mul", "And", "Or", "Xor", "Sum"} + + # Applies the transform to a whole model graph + def apply(self, model): # noqa + # Get the model graph out of the model wrapper object + graph = model.graph + # Keep track of whether the graph has been modified + graph_modified = False + # Iterate all nodes in the graph keeping track of the index + for index, node in enumerate(graph.node): + # Check whether this node is among the supported + if node.op_type in self.SUPPORTED_COMMUTATIVE_OPS: + # Group node inputs by category + dynamics, initializers = group_inputs_by_category(node, model) + # Flatten the grouped input list + inputs = [*dynamics, *initializers] + # Length of sorted and original input list must match + assert len(inputs) == len(node.input) + # Reassigned inputs from sorted categories + # Note: ONNX does not allow direct assignment to node.input + for i, name in enumerate(inputs): + # The graph has been modified if any input is reordered + if node.input[i] != name: + # Note: This is never reset back to False + graph_modified = True + # Reassign input name at the new index + node.input[i] = name + # Return the transformed model and indicate whether the graph actually + # has been transformed + return model, graph_modified From c0f5b4626118c275a8588a5a64393aa319044f5d Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Tue, 14 Nov 2023 14:26:29 +0100 Subject: [PATCH 09/83] Address some linting issues --- src/qonnx/core/modelwrapper.py | 2 +- src/qonnx/transformation/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/qonnx/core/modelwrapper.py b/src/qonnx/core/modelwrapper.py index ce621743..f7cf1d19 100644 --- a/src/qonnx/core/modelwrapper.py +++ b/src/qonnx/core/modelwrapper.py @@ -41,8 +41,8 @@ from qonnx.transformation.general import ( RemoveStaticGraphInputs, RemoveUnusedTensors, + SortCommutativeInputsInitializerLast, SortGraph, - SortCommutativeInputsInitializerLast ) diff --git a/src/qonnx/transformation/general.py b/src/qonnx/transformation/general.py index 686bf17b..b5ed0fca 100644 --- a/src/qonnx/transformation/general.py +++ b/src/qonnx/transformation/general.py @@ -29,15 +29,15 @@ import json import numpy as np import warnings + +# Protobuf onnx graph node type +from onnx import NodeProto # noqa from onnx import mapping from toposort import toposort_flatten import qonnx.util.basic as util from qonnx.transformation.base import Transformation -# Protobuf onnx graph node type -from onnx import NodeProto # noqa - class MovePadAttributeToTensor(Transformation): "Move padding info from attribute into input tensor for Pad nodes." From 8902694106de98c827e38e04ffbf3f0d8dfc9675 Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Wed, 15 Nov 2023 09:41:18 +0100 Subject: [PATCH 10/83] Fix RemoveIdentityOps not correctly handling ops following fork-nodes --- src/qonnx/transformation/remove.py | 45 ++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/src/qonnx/transformation/remove.py b/src/qonnx/transformation/remove.py index e745f0f0..2fc888cb 100644 --- a/src/qonnx/transformation/remove.py +++ b/src/qonnx/transformation/remove.py @@ -25,9 +25,8 @@ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - import numpy as np +import warnings from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.base import Transformation @@ -58,21 +57,45 @@ def apply(self, model: ModelWrapper): def remove_node_and_rewire(model, node): + # Currently cannot remove and rewire join-nodes, probably not necessary to + # support this + if model.is_join_node(node): + # Log this as a warning, so the user is aware of this, there might be + # somthing wrong or some checks missing at the caller site + warnings.warn( + "Tried to remove join-node operation: Currently not supported" + ) + # Exit the function here without doing anything + return + # We already know that node is not a join-node, thus to rewire, we only need + # to check the single producer producer = model.find_producer(node.input[0]) - if producer is not None: - # wire output tensor to - # output of producer node + # If there is a producer which is not a fork-node, rewiring is simple + if producer is not None and not model.is_fork_node(producer): + # Rewire by skipping the node, letting the producer directly feed the + # nodes output. + # TODO: Check whether this already covers fork-node identities? producer.output[0] = node.output[0] + # If there is no producer or the producer forks, rewiring is a bit more + # complicated else: - # node is first in graph + # Now it depends on the successor nodes to rewire their inputs successors = model.find_direct_successors(node) + # Singular node detached from the rest of the graph? assert successors is not None, "Whole graph is one node." - for succ in successors: - for i, s_inp in enumerate(succ.input): + # We need to rewire the input of each successor to not detach parts of + # the graph + for successor in successors: + # Find the inputs of the successor which are produced by the node to + # be removed + for i, s_inp in enumerate(successor.input): + # Note: This might happen multiple times? if s_inp == node.output[0]: - # rewire successor's input directly to graph input - succ.input[i] = node.input[0] - # remove node + # Rewire successor's input directly to nodes input + # Note: Node may not be a join-node, but there is probably + # no such thing as join-node identity anyway + successor.input[i] = node.input[0] + # Remove node model.graph.node.remove(node) From c7b359062dee8b979bc22741885ac812da8fe7ce Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Wed, 15 Nov 2023 09:50:20 +0100 Subject: [PATCH 11/83] Change error message to address some linting issue --- src/qonnx/transformation/remove.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/qonnx/transformation/remove.py b/src/qonnx/transformation/remove.py index 2fc888cb..980e80c1 100644 --- a/src/qonnx/transformation/remove.py +++ b/src/qonnx/transformation/remove.py @@ -62,9 +62,7 @@ def remove_node_and_rewire(model, node): if model.is_join_node(node): # Log this as a warning, so the user is aware of this, there might be # somthing wrong or some checks missing at the caller site - warnings.warn( - "Tried to remove join-node operation: Currently not supported" - ) + warnings.warn("Removing join-node operation is currently not supported") # Exit the function here without doing anything return # We already know that node is not a join-node, thus to rewire, we only need From ebd9193f12b37621962ba5770daf60d42c583dc1 Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Mon, 20 Nov 2023 16:10:18 +0100 Subject: [PATCH 12/83] Introduce 3D layout annotations defaulting to NWC This is required for supporting scaled dot-product attention, which has 3 dimensional tensors with batch/head, sequence and embedding dimension. --- src/qonnx/transformation/infer_data_layouts.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/qonnx/transformation/infer_data_layouts.py b/src/qonnx/transformation/infer_data_layouts.py index bbfc7404..81143e45 100644 --- a/src/qonnx/transformation/infer_data_layouts.py +++ b/src/qonnx/transformation/infer_data_layouts.py @@ -46,11 +46,19 @@ def _dims_to_layout(model, node, ndims): return DataLayout.NHWC elif layout == "NCHW" and ndims == 4: return DataLayout.NCHW + elif layout == "NWC" and ndims == 3: + return DataLayout.NWC + elif layout == "NC" and ndims == 2: + return DataLayout.NC else: return DataLayout.UNKNOWN else: if ndims == 4: return DataLayout.NHWC + elif ndims == 3: + return DataLayout.NWC + elif ndims == 2: + return DataLayout.NC else: return DataLayout.UNKNOWN else: @@ -119,6 +127,10 @@ def apply(self, model): warnings.warn("Assuming 4D input is NCHW") model.set_tensor_layout(inp_name, DataLayout.NCHW) graph_modified = True + elif len(inp_shape) == 3: + warnings.warn("Assuming 3D input is NWC") + model.set_tensor_layout(inp_name, DataLayout.NWC) + graph_modified = True elif len(inp_shape) == 2: graph_modified = True warnings.warn("Assuming 2D input is NC") From 6f8efa26d65bf9cdc375d427968a519a6f19c9b1 Mon Sep 17 00:00:00 2001 From: Harish Date: Tue, 21 Nov 2023 10:18:36 +0000 Subject: [PATCH 13/83] update in the matmul mac calculation --- src/qonnx/analysis/inference_cost.py | 2 +- src/qonnx/data/onnx/matmul_update/sdp.onnx | Bin 0 -> 2604 bytes tests/analysis/test_matmul_mac_cost.py | 45 +++++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 src/qonnx/data/onnx/matmul_update/sdp.onnx create mode 100644 tests/analysis/test_matmul_mac_cost.py diff --git a/src/qonnx/analysis/inference_cost.py b/src/qonnx/analysis/inference_cost.py index da5e1f5d..98e03428 100644 --- a/src/qonnx/analysis/inference_cost.py +++ b/src/qonnx/analysis/inference_cost.py @@ -134,7 +134,7 @@ def inference_cost_matmul(model, node, discount_sparsity): if tB is not None and tB.i == 1: w_shape = w_shape[::-1] # exclude common dim (last axis) from one side to avoid duplication - n_macs = np.prod(i_shape[:-1]) * np.prod(w_shape) + n_macs = i_shape[-1] * np.prod(o_shape) # deal with both dyn,param and dyn,dyn cases for weight memory inp0_is_const = model.get_initializer(node.input[0]) is not None inp1_is_const = model.get_initializer(node.input[1]) is not None diff --git a/src/qonnx/data/onnx/matmul_update/sdp.onnx b/src/qonnx/data/onnx/matmul_update/sdp.onnx new file mode 100644 index 0000000000000000000000000000000000000000..23375c80a4a9efa4cb73d9d95489ca6d97411b68 GIT binary patch literal 2604 zcmb_e%Wm3G5RHvfn`xuK4V9=DRpdn$UIc7jfo`ls>LOLtHhoB~6d@;a0}|L4n1*(h zf9MbJC;hbcTCqK2xV5SlOJF?r`kZ;3nTw_|HT)I@vzw8=Yad(3*5F4Ct{7{a1^zs8 z{b(30qHqxnn|e938Ctq(+OYf8oh&@_NK-26`JuD(a7oEsN$!WqeI+U2vRVc;w2TF9(B5Z$5HEZ`cGuVOX`1u4lEn225{OnY zN9-U?FcmPMu_*61>t(el2{r^FAreLsyNKi#&_#Tk5@A^g5fL&HwTg%~qOmCnQWFx) zqmCr)A`%)6zD0GY@@Cw7UR*w3Y^h_ z(8G;nF!xAn)zX2WRiyKH3Y^B%JqL0k1UV@J`5A+}q@_hqs1>2vUgJ%0hfGTRXq5OD zynv0cA-|iyoJ)!lKN=llP>NSb#d1Q$MM96)j!9SgB>7SVU FzX2;5;5PsO literal 0 HcmV?d00001 diff --git a/tests/analysis/test_matmul_mac_cost.py b/tests/analysis/test_matmul_mac_cost.py new file mode 100644 index 00000000..534618aa --- /dev/null +++ b/tests/analysis/test_matmul_mac_cost.py @@ -0,0 +1,45 @@ +# Copyright (c) 2023 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of Xilinx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import pytest +import qonnx +from pkgutil import get_data +import qonnx.util.inference_cost as infc +from qonnx.util.cleanup import cleanup_model +from qonnx.core.modelwrapper import ModelWrapper + + +def test_matmul_mac_cost(): + raw_model = get_data("qonnx","data/onnx/matmul_update/sdp.onnx") + model = ModelWrapper(raw_model) + cleaned_model = cleanup_model(model) + # Two Matmul layers with shape (i_shape, w_shape, o_shape), L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) + inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False) + mac_cost = inf_cost_dict['op_mac_FLOAT32_FLOAT32'] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 + assert mac_cost == 1048576.0, "Error: discrepancy in mac cost." From cdc2747df3b54452d6efaaab842aaf44c546778c Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 21 Nov 2023 11:44:15 +0100 Subject: [PATCH 14/83] [Hotfix] round scaled-up internal result for QuantAvgPool2d --- src/qonnx/custom_op/general/quantavgpool2d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qonnx/custom_op/general/quantavgpool2d.py b/src/qonnx/custom_op/general/quantavgpool2d.py index 9c06a871..c0e24071 100644 --- a/src/qonnx/custom_op/general/quantavgpool2d.py +++ b/src/qonnx/custom_op/general/quantavgpool2d.py @@ -140,7 +140,7 @@ def execute_node(self, context, graph): sess = rt.InferenceSession(model_avgpool.SerializeToString()) result_temp = sess.run(None, idict) # remove scaling introduced by average - result_temp = result_temp[0] * (k * k) + result_temp = np.round(result_temp[0] * (k * k)) result = np.right_shift(result_temp.astype(int), self.get_shifts()) if self.get_nodeattr("data_layout") == "NHWC": result = result.transpose(0, 2, 3, 1) From e8de415eff1c7a6be57eb4518b3a7bda982fdf13 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 12 Jan 2024 15:57:56 +0100 Subject: [PATCH 15/83] Merge main into feature/tensor_stats + deprecate propagate_range --- src/qonnx/util/range_analysis.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/src/qonnx/util/range_analysis.py b/src/qonnx/util/range_analysis.py index fa72f50f..f2919558 100644 --- a/src/qonnx/util/range_analysis.py +++ b/src/qonnx/util/range_analysis.py @@ -60,13 +60,6 @@ def calculate_matvec_accumulator_extremum(matrix: np.ndarray, vec_min, vec_max): return (min_values, max_values) -def propagate_range(node, model, range_dict): - iname = node.input[0] - node_irange = range_dict[iname] - for oname in node.output: - range_dict[oname] = node_irange - - def calc_gemm_range(node, model, range_dict): alpha = get_by_name(node.attribute, "alpha").f beta = get_by_name(node.attribute, "beta").f @@ -240,11 +233,11 @@ def calc_range_outdtype(node, model, range_dict): optype_to_range_calc = { - "Transpose": propagate_range, + "Transpose": calc_monotonic_range, "MatMul": calc_matmul_range, "Conv": calc_conv_range, "QuantMaxNorm": calc_range_outdtype, - "Flatten": propagate_range, + "Flatten": calc_monotonic_range, "Reshape": calc_monotonic_range, "Quant": calc_monotonic_range, "BipolarQuant": calc_monotonic_range, @@ -254,7 +247,7 @@ def calc_range_outdtype(node, model, range_dict): "Add": calc_monotonic_range, "BatchNormalization": calc_monotonic_range, "Relu": calc_monotonic_range, - "Pad": propagate_range, + "Pad": calc_monotonic_range, "AveragePool": calc_monotonic_range, "Trunc": calc_range_outdtype, "MaxPool": calc_monotonic_range, From 538a935089f6c5926d0e73bf0d77cac61bfdf10a Mon Sep 17 00:00:00 2001 From: Harish Date: Sun, 14 Jan 2024 18:50:51 +0000 Subject: [PATCH 16/83] Transformation pass to introduce quantnodes --- .../transformation/introduce_quantnode.py | 263 ++++++++++++++++++ .../test_introduce_quantnode.py | 147 ++++++++++ 2 files changed, 410 insertions(+) create mode 100644 src/qonnx/transformation/introduce_quantnode.py create mode 100644 tests/transformation/test_introduce_quantnode.py diff --git a/src/qonnx/transformation/introduce_quantnode.py b/src/qonnx/transformation/introduce_quantnode.py new file mode 100644 index 00000000..f7e25edf --- /dev/null +++ b/src/qonnx/transformation/introduce_quantnode.py @@ -0,0 +1,263 @@ +# Copyright (c) 2024 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of qonnx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import numpy as np +import onnx +from onnx import TensorProto + +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.base import Transformation +from qonnx.transformation.general import SortGraph +from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model +from qonnx.util.cleanup import cleanup_model + + +class graph_util: + def get_node_id(self, model): + node_index = {} + node_ind = 0 + for node in model.graph.node: + node_index[node.name] = node_ind + node_ind += 1 + return node_index + + def node_from_name(self, model, node_name): + for node in model.graph.node: + if node.name == node_name: + return node + + def identify_nodes(self, model, node_type): + node_list = [] + for node in model.graph.node: + if node.op_type == node_type: + node_list.append(node) + return node_list + + def create_node( + self, + model, + quantnode_input, + quantnode_output_shape, + node_count, + tensor_count, + scale_value, + zeropoint_value, + bitwidth_value, + narrow, + signed, + rounding_mode, + ): + quantnode_output_dtype = DataType["UINT8"] + quant_tensor = onnx.helper.make_tensor_value_info( + model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape + ) + model.graph.value_info.append(quant_tensor) + model.set_tensor_datatype(quant_tensor.name, quantnode_output_dtype) + + stationary_input_dtype = DataType["FLOAT32"] + scale_tensor = np.array(scale_value).astype(np.float32) + s_value = onnx.helper.make_tensor_value_info( + model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape + ) + model.graph.value_info.append(s_value) + model.set_tensor_datatype(s_value.name, stationary_input_dtype) + model.set_initializer(s_value.name, scale_tensor) + + zeropt_tensor = np.array(zeropoint_value).astype(np.float32) + z_value = onnx.helper.make_tensor_value_info( + model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape + ) + model.graph.value_info.append(z_value) + model.set_tensor_datatype(z_value.name, stationary_input_dtype) + model.set_initializer(z_value.name, zeropt_tensor) + + bitwidth_tensor = np.array(bitwidth_value).astype(np.float32) + b_value = onnx.helper.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.FLOAT, [1]) + model.graph.value_info.append(b_value) + model.set_tensor_datatype(b_value.name, stationary_input_dtype) + model.set_initializer(b_value.name, bitwidth_tensor) + + quant_node = onnx.helper.make_node( + "Quant", + inputs=[quantnode_input, s_value.name, z_value.name, b_value.name], + outputs=[quant_tensor.name], + name="Quant_node_" + str(node_count) + str(tensor_count), + narrow=narrow, + signed=signed, + rounding_mode=rounding_mode, + ) + + return quant_node, quant_tensor + + def adjust_graph(self, model, input_positions, node_in_focus, quantized_nodes, node_count): + tensor_count = 0 + for pos in input_positions: + node_details = (node_in_focus.name, pos[0]) + if ( + node_details not in quantized_nodes + ): # This is to ensure that we don't quantize the same node for the same input/output index. + if pos[0][0] == "input": + input_to_quantnode = node_in_focus.input[pos[0][1]] + consumer_node = node_in_focus + producer_node = model.find_producer(input_to_quantnode) + if producer_node is None or producer_node.op_type != "Quant": + quantization_to_perform = "yes" + else: + quantization_to_perform = "no" + else: + input_to_quantnode = node_in_focus.output[pos[0][1]] + consumer_node = model.find_consumer(input_to_quantnode) + producer_node = model.find_producer(input_to_quantnode) + if consumer_node is None or consumer_node.op_type != "Quant": + quantization_to_perform = "yes" + else: + quantization_to_perform = "no" + if quantization_to_perform == "yes": + node_indx = self.get_node_id(model) # Getting index of each node in the graph. + quantnode_output_shape = model.get_tensor_shape(input_to_quantnode) # Step: 3 + + quant_node, quant_tensor = self.create_node( + model, + input_to_quantnode, + quantnode_output_shape, + node_count, + tensor_count, + scale_value=pos[1][0], + zeropoint_value=pos[1][1], + bitwidth_value=pos[1][2], + narrow=pos[1][3], + signed=pos[1][4], + rounding_mode=pos[1][5], + ) + + if consumer_node is not None: + node_pos = node_indx[consumer_node.name] + model.graph.node[node_pos].input[pos[0][1]] = quant_tensor.name + model.graph.node.append(quant_node) + else: + model.graph.value_info.remove(quant_tensor) + model.graph.node.append(quant_node) + model.graph.output.insert(0, quant_tensor) + model.graph.output.pop(1) + + model = model.transform(SortGraph()) + tensor_count += 1 + quantized_nodes.append(node_details) + else: + print(f"{pos[0][0]} index {pos[0][1]} of {node_in_focus.name} is already quantized.") + else: + print(f"{pos[0][0]} index {pos[0][1]} of {node_in_focus.name} is already quantized.") + continue + + return model + + +class IntroduceQuantnode(Transformation): + """This transformation can be used to introduce a Quant node for a specific type of node in the graph. + Users would be able to specify the location of the quant node by providing the input and output indexs + as the parameters. + + 1) Expectations: + a) Onnx model in the modelwraper format. + b) Model must be cleaned using cleanup_model qonnx.util.cleanup.cleanup_model() + c) Batchsize to be set. + + 2) Steps to transform are + Step1: Finding the input for the quant node. + Step2: Finding the consumer of the quant node output. + Step3: Finding the shape for the output tensor of quant node. + Note: The output tensor of the quant node must have the same shape as the + consumer of the input to the quant node. + + 3) Introduction to quantnodes will be done with precedence to "Name" in comparison to "op_type". + + 4) Assert: + a) The input is a dictionary representing the node names as keys and a list of quant positions + as values. + b) The input dictionary must have atleast one mac node (Conv, gemm, matmul) for the transformation. + + 5) Return: + Returns a cleaned version of the model. + + """ + + def __init__(self, quant_node_inputs): + super().__init__() + self.quant_node_inputs = quant_node_inputs + self.graph_util = graph_util() + + def apply(self, model): + model = model.transform(InferShapes()) + if type(self.quant_node_inputs) == dict: + selection_type = self.quant_node_inputs.keys() + if set(selection_type) <= {"name", "op_type"}: + node_count = 0 + quantized_nodes = [] + if "name" in selection_type: + by_name = self.quant_node_inputs[ + "name" + ] # by_name is a dictionary with the unique node names as keys and the list of positions as values. + node_list_by_name = by_name.keys() # name of all the nodes specified by the user for an quant node. + for node_name in node_list_by_name: + node_in_focus = self.graph_util.node_from_name(model, node_name) + input_positions = by_name[ + node_name + ] # input positions specified by the user to introduce quant node. + model = self.graph_util.adjust_graph( + model, input_positions, node_in_focus, quantized_nodes, node_count + ) + node_count += 1 + if "op_type" in selection_type: + by_op_type = self.quant_node_inputs[ + "op_type" + ] # by_name is a dictionary with the unique node names as keys and the list of positions as values. + op_list = by_op_type.keys() + for op in op_list: + node_list = self.graph_util.identify_nodes( + model, op + ) # List of all nodes with the operation type "op". + input_positions = by_op_type[op] + for node_in_focus in node_list: + model = self.graph_util.adjust_graph( + model, input_positions, node_in_focus, quantized_nodes, node_count + ) + node_count += 1 + model = qonnx_make_model(model.graph) + model = ModelWrapper(model) + model = cleanup_model(model) + else: + raise Exception("Unsupported selection type") + else: + raise TypeError("Input must be a dictionary.") + + graph_modified = False + + return (model, graph_modified) diff --git a/tests/transformation/test_introduce_quantnode.py b/tests/transformation/test_introduce_quantnode.py new file mode 100644 index 00000000..cc2e88ef --- /dev/null +++ b/tests/transformation/test_introduce_quantnode.py @@ -0,0 +1,147 @@ +# Copyright (c) 2024 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of qonnx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import os +import random +import urllib.request + +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.introduce_quantnode import IntroduceQuantnode, graph_util +from qonnx.util.cleanup import cleanup +from qonnx.util.inference_cost import inference_cost + +random.seed(42) + +graph_util = graph_util() + +a = "https://github.com/onnx/models/raw/main/validated/vision/" +b = "classification/resnet/model/resnet18-v1-7.onnx?download=" + +model_details = { + "resnet18-v1-7": { + "description": "Resnet18 Opset version 7.", + "url": (a + b), + "test_input": { + "name": { + "Conv_0": [ + (("input", 0), (1, 0, 8, 0, 1, "ROUND")), + (("input", 1), (1, 0, 8, 0, 1, "ROUND")), + (("output", 0), (1, 0, 8, 0, 1, "ROUND")), + ], + "Conv_1": [(("input", 0), (1, 0, 8, 0, 1, "ROUND"))], + "Conv_2": [(("input", 1), (1, 0, 8, 0, 1, "ROUND")), (("output", 0), (1, 0, 8, 0, 1, "ROUND"))], + }, + "op_type": { + "Gemm": [ + (("input", 0), (1, 0, 8, 0, 1, "ROUND")), + (("input", 1), (1, 0, 8, 0, 1, "ROUND")), + (("input", 2), (1, 0, 8, 0, 1, "ROUND")), + (("output", 0), (1, 0, 8, 0, 1, "ROUND")), + ] + }, + }, + }, +} + + +def download_model(test_model, do_cleanup=False, return_modelwrapper=False): + qonnx_url = model_details[test_model]["url"] + # download test data + dl_dir = "/tmp" + dl_file = dl_dir + f"/{test_model}.onnx" + ret = dl_file + if not os.path.isfile(dl_file): + urllib.request.urlretrieve(qonnx_url, dl_file) + if do_cleanup: + out_file = dl_dir + f"/{test_model}_clean.onnx" + cleanup(dl_file, out_file=out_file, override_batchsize=1) + ret = out_file + if return_modelwrapper: + ret = ModelWrapper(ret) + return ret + + +def to_verify(model, test_details): + by = random.choice(list(test_details.keys())) # by "name" or "op_type" + + if by == "name": + sample_node_name = random.choice(list(test_details["name"].keys())) + sample_node = graph_util.node_from_name(model, sample_node_name) + sample_pos = random.choice(test_details["name"][sample_node_name]) + if by == "op_type": + node_type = random.choice(list(test_details["op_type"].keys())) + sample_node = random.choice(graph_util.identify_nodes(model, node_type)) + sample_pos = random.choice(test_details["op_type"][node_type]) + + if sample_pos[0][0] == "input": + tensor_to_verify = sample_node.input[sample_pos[0][1]] + producer_node = model.find_producer(tensor_to_verify) + if producer_node.op_type == "Quant": + verification = "Success" + else: + verification = "Failure" + if sample_pos[0][0] == "output": + tensor_to_verify = sample_node.output[sample_pos[0][1]] + consumer_node = model.find_consumer(tensor_to_verify) + if consumer_node.op_type == "Quant": + verification = "Success" + else: + verification = "Failure" + + return verification + + +@pytest.mark.parametrize("test_model", model_details.keys()) +def test_introduce_quantnode(test_model): + test_details = model_details[test_model] + model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) + original_model_inf_cost = inference_cost(model, discount_sparsity=False) + nodes_pos = test_details["test_input"] + model = model.transform(IntroduceQuantnode(nodes_pos)) + quantnodes_added = len(model.get_nodes_by_op_type("Quant")) + assert quantnodes_added == 10 # 10 positions are specified. + verification = to_verify(model, nodes_pos) + assert verification == "Success" + inf_cost = inference_cost(model, discount_sparsity=False) + assert ( + inf_cost["total_macs"] == original_model_inf_cost["total_macs"] + ) # "1814073344.0" must be same as the original model. + assert ( + inf_cost["total_mem_w_elems"] == original_model_inf_cost["total_mem_w_elems"] + ) # "11678912.0" must be same as the original model. + assert ( + inf_cost["total_mem_o_bits"] == original_model_inf_cost["total_mem_o_bits"] + ) # "79510784.0" must be same as the original model. + assert ( + inf_cost["total_mem_o_elems"] == original_model_inf_cost["total_mem_o_elems"] + ) # "2484712.0" must be same as the original model. + assert inf_cost["total_bops"] == 1566256136192.0 + assert inf_cost["total_mem_w_bits"] == 360326656.0 + assert inf_cost["op_mac_INT8_INT8"] == 118525952.0 From c752dfbfbb14938cef37d807767dde3dc0ff6b1f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Jan 2024 19:15:45 +0100 Subject: [PATCH 17/83] [Range] support multi-output ops, add Split --- src/qonnx/util/range_analysis.py | 33 ++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/qonnx/util/range_analysis.py b/src/qonnx/util/range_analysis.py index f2919558..d9ba086d 100644 --- a/src/qonnx/util/range_analysis.py +++ b/src/qonnx/util/range_analysis.py @@ -204,11 +204,14 @@ def calc_monotonic_range(node, model, range_dict, i_channel_axis=1): inp_vi = model.get_tensor_valueinfo(inp) proto_vectors.append(get_minmax_prototype_tensors(irange, ishp, inp_vi, i_channel_axis)) # process all combinations of prototype vectors for dynamic inputs - running_min = None - running_max = None + running_min = [None for i in range(len(node.output))] + running_max = [None for i in range(len(node.output))] # create context for single-node execution ctx = {x: model.get_initializer(x) for x in node.input} - ctx[oname] = valueinfo_to_tensor(model.get_tensor_valueinfo(oname)) + for oname in node.output: + ctx[oname] = valueinfo_to_tensor(model.get_tensor_valueinfo(oname)) + # assume all outputs are homogenous wrt data layout (e.g. channel axis + # always lives in the same position) axes_to_min = [i for i in range(ctx[oname].ndim)] axes_to_min.remove(i_channel_axis) axes_to_min = tuple(axes_to_min) @@ -216,13 +219,19 @@ def calc_monotonic_range(node, model, range_dict, i_channel_axis=1): for i in range(n_dyn_inp): ctx[dyn_inps[i]] = inps[i] execute_node(node, ctx, model.graph, opset_version=opset_version) - # grab new output and update running min/max - out = ctx[oname] - chanwise_min = out.min(axis=axes_to_min).flatten() - chanwise_max = out.max(axis=axes_to_min).flatten() - running_min = np.minimum(chanwise_min, running_min).flatten() if running_min is not None else chanwise_min - running_max = np.maximum(chanwise_max, running_max).flatten() if running_max is not None else chanwise_max - range_dict[oname] = (running_min, running_max) + for oind, oname in enumerate(node.output): + # grab new output and update running min/max + out = ctx[oname] + chanwise_min = out.min(axis=axes_to_min).flatten() + chanwise_max = out.max(axis=axes_to_min).flatten() + running_min[oind] = ( + np.minimum(chanwise_min, running_min[oind]).flatten() if running_min[oind] is not None else chanwise_min + ) + running_max[oind] = ( + np.maximum(chanwise_max, running_max[oind]).flatten() if running_max[oind] is not None else chanwise_max + ) + for oind, oname in enumerate(node.output): + range_dict[oname] = (running_min[oind], running_max[oind]) def calc_range_outdtype(node, model, range_dict): @@ -260,6 +269,7 @@ def calc_range_outdtype(node, model, range_dict): "Clip": calc_monotonic_range, "Sigmoid": calc_monotonic_range, "Concat": calc_monotonic_range, + "Split": calc_monotonic_range, } @@ -343,9 +353,8 @@ def range_analysis( for node in model.graph.node: dyn_inputs = [x for x in node.input if is_dyn_input(x, model)] inprange_ok = all([x in range_dict.keys() for x in dyn_inputs]) - outcount_ok = len(node.output) == 1 op_ok = node.op_type in optype_to_range_calc.keys() - if inprange_ok and op_ok and outcount_ok: + if inprange_ok and op_ok: range_calc_fxn = optype_to_range_calc[node.op_type] range_calc_fxn(node, model, range_dict) out_range = range_dict[node.output[0]] From 251030b6ad4cf4370e251e011825100b53296307 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Jan 2024 19:37:35 +0100 Subject: [PATCH 18/83] [Extract] support extracting ConvTranspose bias --- src/qonnx/transformation/extract_conv_bias.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/qonnx/transformation/extract_conv_bias.py b/src/qonnx/transformation/extract_conv_bias.py index 2a50f725..9c70a215 100644 --- a/src/qonnx/transformation/extract_conv_bias.py +++ b/src/qonnx/transformation/extract_conv_bias.py @@ -34,8 +34,8 @@ class ExtractBiasFromConv(Transformation): """ - Extracts the (optional) Bias from a Conv node and inserts it behind the - Conv node as an Add node. + Extracts the (optional) Bias from a Conv(Transpose) node and inserts it behind the + Conv(Transpose) node as an Add node. """ def apply(self, model): @@ -43,13 +43,13 @@ def apply(self, model): node_ind = 0 for n in graph.node: node_ind += 1 - if n.op_type == "Conv": + if n.op_type in ["Conv", "ConvTranspose"]: # Check if the node has a bias input if len(n.input) > 2: # Extract bias bias = model.get_initializer(n.input[2]) if bias is None: - warnings.warn(f"Could not extract bias from Conv node {n}") + warnings.warn(f"Could not extract bias from node {n}") continue # Insert bias as Add node behind the Conv node From f5f17f33b0ab5b9842adefb9f9a5afcebdf8f1c3 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Jan 2024 19:38:05 +0100 Subject: [PATCH 19/83] [Range] support dense ConvTranspose for analysis --- src/qonnx/util/range_analysis.py | 40 ++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/qonnx/util/range_analysis.py b/src/qonnx/util/range_analysis.py index d9ba086d..359048b1 100644 --- a/src/qonnx/util/range_analysis.py +++ b/src/qonnx/util/range_analysis.py @@ -165,6 +165,45 @@ def calc_conv_range(node, model, range_dict): range_dict[oname] = ret +def calc_convtranspose_range(node, model, range_dict): + iname = node.input[0] + wname = node.input[1] + assert len(node.input) == 2, "Found unsupported ConvTranspose with bias" + oname = node.output[0] + irange = range_dict[iname] + imin, imax = irange + weights = model.get_initializer(wname) + assert weights is not None, "Uninitialized ConvTranspose weights" + groups = get_by_name(node.attribute, "group") + if groups is None: + # default to dense convs + groups = 1 + else: + groups = groups.i + assert groups == 1, "Only dense (non-grouped) ConvTranspose is supported" + # do weight reshaping to treat Conv similar to MatMul + # (mh, mw) = (ofm, (ifm x k0 x k1 x ...)) + conv_ofm = weights.shape[1] + conv_ifm = weights.shape[0] + weights = weights.transpose(1, 0, 2, 3).reshape(conv_ofm, -1) + k_total = weights.shape[1] // conv_ifm + if type(imin) is np.ndarray: + imin_rep = np.repeat(imin, k_total) + imax_rep = np.repeat(imax, k_total) + else: + imin_rep = imin + imax_rep = imax + dw_ret_min = [] + dw_ret_max = [] + for i in range(conv_ofm): + w_slice = weights[i, :].reshape(1, -1) + dw_ret = calculate_matvec_accumulator_extremum(w_slice, imin_rep, imax_rep) + dw_ret_min.append(dw_ret[0].item()) + dw_ret_max.append(dw_ret[1].item()) + ret = (np.asarray(dw_ret_min), np.asarray(dw_ret_max)) + range_dict[oname] = ret + + def get_minmax_prototype_tensors(irange, ishp, inp_vi, i_channel_axis=1): proto_min = valueinfo_to_tensor(inp_vi) proto_max = valueinfo_to_tensor(inp_vi) @@ -245,6 +284,7 @@ def calc_range_outdtype(node, model, range_dict): "Transpose": calc_monotonic_range, "MatMul": calc_matmul_range, "Conv": calc_conv_range, + "ConvTranspose": calc_convtranspose_range, "QuantMaxNorm": calc_range_outdtype, "Flatten": calc_monotonic_range, "Reshape": calc_monotonic_range, From f209c9dda96d29b851aa6ec67682eb721505a1a4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Jan 2024 20:02:49 +0100 Subject: [PATCH 20/83] [Range] support Python notation for input ranges ([ch0_min, ch1_min,..],[ch0_max, ch1_max,..]) --- src/qonnx/util/range_analysis.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/qonnx/util/range_analysis.py b/src/qonnx/util/range_analysis.py index 359048b1..86a2d87c 100644 --- a/src/qonnx/util/range_analysis.py +++ b/src/qonnx/util/range_analysis.py @@ -363,8 +363,12 @@ def range_analysis( range_min = None range_max = None else: - irange = irange.split(",") - range_min, range_max = float(irange[0]), float(irange[1]) + irange = eval(irange) + range_min, range_max = irange + if isinstance(range_min, list): + range_min = np.asarray(range_min, dtype=np.float32) + if isinstance(range_max, list): + range_max = np.asarray(range_max, dtype=np.float32) elif isinstance(irange, tuple): range_min, range_max = irange else: From 96906cbb7d91507e7aade1b83e4fd0ebd4c7aa40 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Jan 2024 20:39:43 +0100 Subject: [PATCH 21/83] [Cleanup] allow overriding full input tensor size --- src/qonnx/util/cleanup.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/qonnx/util/cleanup.py b/src/qonnx/util/cleanup.py index 46dda2a0..dc377a17 100644 --- a/src/qonnx/util/cleanup.py +++ b/src/qonnx/util/cleanup.py @@ -43,7 +43,7 @@ from qonnx.transformation.quant_constant_folding import FoldTransposeIntoQuantInit -def cleanup_model(model, preserve_qnt_ops=True, override_batchsize=None, extract_conv_bias=False): +def cleanup_model(model, preserve_qnt_ops=True, override_inpsize=None, extract_conv_bias=False): """Execute the transformations for the cleanup function on a model level. This allows the reuse of the cleanup transformations, without needing to read/write the model from/to disk. @@ -61,6 +61,19 @@ def cleanup_model(model, preserve_qnt_ops=True, override_batchsize=None, extract preserve_qnt_optypes = ["Quant", "BipolarQuant", "QuantizeLinear", "DequantizeLinear"] else: preserve_qnt_optypes = [] + + if override_inpsize is not None: + inpsize = eval(override_inpsize) + if type(inpsize) is int: + override_batchsize = inpsize + model = model.transform(ChangeBatchSize(override_batchsize)) + elif type(inpsize) is tuple: + override_batchsize = inpsize[0] + model = model.transform(ChangeBatchSize(override_batchsize)) + iname = model.graph.input[0].name + model.set_tensor_shape(iname, inpsize) + model.save("dbg.onnx") + cleanup_transformations = [ InferShapes(), GiveUniqueParameterTensors(), @@ -80,27 +93,24 @@ def cleanup_model(model, preserve_qnt_ops=True, override_batchsize=None, extract model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) - if override_batchsize is not None: - model = model.transform(ChangeBatchSize(override_batchsize)) - model = model.transform(InferShapes()) - return model -def cleanup(in_file, *, out_file=None, preserve_qnt_ops=True, override_batchsize: int = None, extract_conv_bias=False): +def cleanup(in_file, *, out_file=None, preserve_qnt_ops=True, override_inpsize: str = None, extract_conv_bias=False): """Execute a set of graph transformations to clean-up the given ONNX file. :param in_file: Filename for the input ONNX model :param preserve_qnt_ops: Preserve weight quantization operators :param out_file: If set, filename for the output ONNX model. Set to in_file with _clean suffix otherwise. - :param override_batchsize: If specified, override the batch size for the ONNX graph + :param override_inpsize: If specified, override the input size (e.g. "(1,3,224,224)" to set all or + just 1 to set batchsize to 1) for the ONNX graph :param extract_conv_bias: If specified, separate Conv bias into its own Add node """ model = ModelWrapper(in_file) model = cleanup_model( - model, preserve_qnt_ops=preserve_qnt_ops, override_batchsize=override_batchsize, extract_conv_bias=extract_conv_bias + model, preserve_qnt_ops=preserve_qnt_ops, override_inpsize=override_inpsize, extract_conv_bias=extract_conv_bias ) if out_file is None: out_file = in_file.replace(".onnx", "_clean.onnx") From 11850c1e426b0902dd84a3b5cebd1321fd6b0cf7 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Jan 2024 11:11:10 +0100 Subject: [PATCH 22/83] [Extract] use derived output dtype for conv bias extraction --- src/qonnx/transformation/extract_conv_bias.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qonnx/transformation/extract_conv_bias.py b/src/qonnx/transformation/extract_conv_bias.py index 9c70a215..bf2cf8b4 100644 --- a/src/qonnx/transformation/extract_conv_bias.py +++ b/src/qonnx/transformation/extract_conv_bias.py @@ -27,7 +27,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import warnings -from onnx import TensorProto, helper +from onnx import helper from qonnx.transformation.base import Transformation @@ -65,7 +65,7 @@ def apply(self, model): act_add_tensor = helper.make_tensor_value_info( model.make_new_valueinfo_name(), - TensorProto.FLOAT, + model.get_tensor_valueinfo(n.output[0]).type.tensor_type.elem_type, out_shape, ) graph.value_info.append(act_add_tensor) From 184bfd7780bb10923e5ac7385f1526b9aef1aedb Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Jan 2024 11:11:50 +0100 Subject: [PATCH 23/83] [Range] also work with np.float16 --- src/qonnx/util/range_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qonnx/util/range_analysis.py b/src/qonnx/util/range_analysis.py index 86a2d87c..353232e5 100644 --- a/src/qonnx/util/range_analysis.py +++ b/src/qonnx/util/range_analysis.py @@ -207,7 +207,7 @@ def calc_convtranspose_range(node, model, range_dict): def get_minmax_prototype_tensors(irange, ishp, inp_vi, i_channel_axis=1): proto_min = valueinfo_to_tensor(inp_vi) proto_max = valueinfo_to_tensor(inp_vi) - if type(irange[0]) in [float, int, np.float32, np.float64, np.uint8, np.int8]: + if type(irange[0]) in [float, int, np.float16, np.float32, np.float64, np.uint8, np.int8]: imin, imax = irange proto_min[...] = imin proto_max[...] = imax From 2d6322b28ec7c2fb2002353af45bd28a931d5da4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Jan 2024 11:12:57 +0100 Subject: [PATCH 24/83] [Cleanup] support both str and tuple format for input shape override --- src/qonnx/util/cleanup.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/qonnx/util/cleanup.py b/src/qonnx/util/cleanup.py index dc377a17..933f729d 100644 --- a/src/qonnx/util/cleanup.py +++ b/src/qonnx/util/cleanup.py @@ -63,16 +63,16 @@ def cleanup_model(model, preserve_qnt_ops=True, override_inpsize=None, extract_c preserve_qnt_optypes = [] if override_inpsize is not None: - inpsize = eval(override_inpsize) - if type(inpsize) is int: - override_batchsize = inpsize + if type(override_inpsize) is str: + override_inpsize = eval(override_inpsize) + if type(override_inpsize) is int: + override_batchsize = override_inpsize model = model.transform(ChangeBatchSize(override_batchsize)) - elif type(inpsize) is tuple: - override_batchsize = inpsize[0] + elif type(override_inpsize) is tuple: + override_batchsize = override_inpsize[0] model = model.transform(ChangeBatchSize(override_batchsize)) iname = model.graph.input[0].name - model.set_tensor_shape(iname, inpsize) - model.save("dbg.onnx") + model.set_tensor_shape(iname, override_inpsize) cleanup_transformations = [ InferShapes(), From 1de24c59f9ee753e4c171b0aafb80588e65033ca Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 5 Feb 2024 11:26:43 +0100 Subject: [PATCH 25/83] use newer tf2onnx --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index fe89cbde..602d6ada 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,7 +69,7 @@ exclude = # Note: pyparsing is actually needed by QKeras, but missing as dependency qkeras = pyparsing - tf2onnx>=1.12.1 + tf2onnx>=1.16.1 tensorflow==2.9.0 QKeras==0.9.0 From fd3b319610adf69c13e2ab55f4e042871c372e74 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 5 Feb 2024 13:33:01 +0100 Subject: [PATCH 26/83] [Test] add validated/ to ONNX model zoo URLs --- tests/transformation/test_batchnorm_to_affine.py | 2 +- tests/transformation/test_qcdq_to_qonnx.py | 4 +++- tests/transformation/test_renaming.py | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/transformation/test_batchnorm_to_affine.py b/tests/transformation/test_batchnorm_to_affine.py index 622f0d9c..705a31c1 100644 --- a/tests/transformation/test_batchnorm_to_affine.py +++ b/tests/transformation/test_batchnorm_to_affine.py @@ -41,7 +41,7 @@ from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model -download_url = "https://github.com/onnx/models/raw/main/vision/classification" +download_url = "https://github.com/onnx/models/raw/main/validated/vision/classification" download_url += "/shufflenet/model/shufflenet-9.onnx" export_onnx_path = download_url.split("/")[-1] diff --git a/tests/transformation/test_qcdq_to_qonnx.py b/tests/transformation/test_qcdq_to_qonnx.py index 4532530c..44d10524 100644 --- a/tests/transformation/test_qcdq_to_qonnx.py +++ b/tests/transformation/test_qcdq_to_qonnx.py @@ -39,7 +39,9 @@ model_details = { "MobileNetv2-w8a8": { - "url": ("https://github.com/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-12-qdq.onnx"), + "url": ( + "https://github.com/onnx/models/raw/main/validated/vision/classification/mobilenet/model/mobilenetv2-12-qdq.onnx" + ), "input_shape": (1, 3, 224, 224), "input_range": (-1, +1), "exp_q_nodes": 171, diff --git a/tests/transformation/test_renaming.py b/tests/transformation/test_renaming.py index a318a2dd..2a4f765a 100644 --- a/tests/transformation/test_renaming.py +++ b/tests/transformation/test_renaming.py @@ -77,7 +77,7 @@ def test_renaming(): def test_rename_multi_io_tinyyolov3(): - download_url = "https://github.com/onnx/models/raw/main/vision/object_detection_segmentation" + download_url = "https://github.com/onnx/models/raw/main/validated/vision/object_detection_segmentation" download_url += "/tiny-yolov3/model/tiny-yolov3-11.onnx" export_onnx_path = download_url.split("/")[-1] ureq.urlretrieve(download_url, export_onnx_path) From 3b0f11eafbc43eac493b60a35b611ba96901e4bd Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 5 Feb 2024 16:23:50 +0100 Subject: [PATCH 27/83] fix typo in notebooks README --- notebooks/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/notebooks/README.md b/notebooks/README.md index f25b99dd..f852fb09 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -8,5 +8,8 @@ git clone https://github.com/fastmachinelearning/qonnx cd qonnx virtualenv -p python3.8 venv source venv/bin/activate -pip install -e .[testing, docs, notebooks] +pip install -e .[testing,docs,notebooks] +cd notebooks +jupyter notebook . +# follow the link printed in the console to bring up Jupyter ``` From baff9219b75320537c63b8963081319cedf68250 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 5 Feb 2024 16:29:15 +0100 Subject: [PATCH 28/83] [Notebook] more fixes to 0_how_to_work_with_onnx.ipynb --- notebooks/0_how_to_work_with_onnx.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/notebooks/0_how_to_work_with_onnx.ipynb b/notebooks/0_how_to_work_with_onnx.ipynb index db079f81..60340f14 100644 --- a/notebooks/0_how_to_work_with_onnx.ipynb +++ b/notebooks/0_how_to_work_with_onnx.ipynb @@ -656,7 +656,7 @@ ], "metadata": { "kernelspec": { - "display_name": "venv", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -670,7 +670,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.16" + "version": "3.8.18" }, "vscode": { "interpreter": { @@ -679,5 +679,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } From 2feab8455a240faa421ac2aa6748b7e6950a6348 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 5 Feb 2024 16:46:34 +0100 Subject: [PATCH 29/83] [Test] override_batchsize -> override_inpsize for cleanup --- tests/transformation/test_introduce_quantnode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/transformation/test_introduce_quantnode.py b/tests/transformation/test_introduce_quantnode.py index cc2e88ef..f53dbf63 100644 --- a/tests/transformation/test_introduce_quantnode.py +++ b/tests/transformation/test_introduce_quantnode.py @@ -81,7 +81,7 @@ def download_model(test_model, do_cleanup=False, return_modelwrapper=False): urllib.request.urlretrieve(qonnx_url, dl_file) if do_cleanup: out_file = dl_dir + f"/{test_model}_clean.onnx" - cleanup(dl_file, out_file=out_file, override_batchsize=1) + cleanup(dl_file, out_file=out_file, override_inpsize=1) ret = out_file if return_modelwrapper: ret = ModelWrapper(ret) From 3e132fe2dd892032a75013ff0c6956ab9be172a5 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 5 Feb 2024 17:01:10 +0100 Subject: [PATCH 30/83] [GraphQnt] some cleanup and renaming --- ...troduce_quantnode.py => quantize_graph.py} | 42 +++++++++---------- ...ce_quantnode.py => test_quantize_graph.py} | 10 ++--- 2 files changed, 25 insertions(+), 27 deletions(-) rename src/qonnx/transformation/{introduce_quantnode.py => quantize_graph.py} (89%) rename tests/transformation/{test_introduce_quantnode.py => test_quantize_graph.py} (95%) diff --git a/src/qonnx/transformation/introduce_quantnode.py b/src/qonnx/transformation/quantize_graph.py similarity index 89% rename from src/qonnx/transformation/introduce_quantnode.py rename to src/qonnx/transformation/quantize_graph.py index f7e25edf..af290730 100644 --- a/src/qonnx/transformation/introduce_quantnode.py +++ b/src/qonnx/transformation/quantize_graph.py @@ -180,32 +180,30 @@ def adjust_graph(self, model, input_positions, node_in_focus, quantized_nodes, n return model -class IntroduceQuantnode(Transformation): - """This transformation can be used to introduce a Quant node for a specific type of node in the graph. - Users would be able to specify the location of the quant node by providing the input and output indexs - as the parameters. +class QuantizeGraph(Transformation): + """This transformation can be used to introduce a Quant node for particular nodes in the graph, + determined based on either op_type or node name. + For the particular nodes identified, users can specify the location of the Quant nodes by providing + the input and output indices where Quant nodes are to be inserted. + Assumes the input model is cleaned-up with all intermediate shapes specified and nodes given + unique names already. - 1) Expectations: - a) Onnx model in the modelwraper format. - b) Model must be cleaned using cleanup_model qonnx.util.cleanup.cleanup_model() - c) Batchsize to be set. + 2) Steps to transform are + Step1: Finding the input for the quant node. + Step2: Finding the consumer of the quant node output. + Step3: Finding the shape for the output tensor of quant node. + Note: The output tensor of the quant node must have the same shape as the + consumer of the input to the quant node. - 2) Steps to transform are - Step1: Finding the input for the quant node. - Step2: Finding the consumer of the quant node output. - Step3: Finding the shape for the output tensor of quant node. - Note: The output tensor of the quant node must have the same shape as the - consumer of the input to the quant node. + 3) Introduction to quantnodes will be done with precedence to "Name" in comparison to "op_type". - 3) Introduction to quantnodes will be done with precedence to "Name" in comparison to "op_type". + 4) Assert: + a) The input is a dictionary representing the node names as keys and a list of quant positions + as values. + b) The input dictionary must have atleast one mac node (Conv, gemm, matmul) for the transformation. - 4) Assert: - a) The input is a dictionary representing the node names as keys and a list of quant positions - as values. - b) The input dictionary must have atleast one mac node (Conv, gemm, matmul) for the transformation. - - 5) Return: - Returns a cleaned version of the model. + 5) Return: + Returns a cleaned version of the model. """ diff --git a/tests/transformation/test_introduce_quantnode.py b/tests/transformation/test_quantize_graph.py similarity index 95% rename from tests/transformation/test_introduce_quantnode.py rename to tests/transformation/test_quantize_graph.py index f53dbf63..c0ceb456 100644 --- a/tests/transformation/test_introduce_quantnode.py +++ b/tests/transformation/test_quantize_graph.py @@ -33,7 +33,7 @@ import urllib.request from qonnx.core.modelwrapper import ModelWrapper -from qonnx.transformation.introduce_quantnode import IntroduceQuantnode, graph_util +from qonnx.transformation.quantize_graph import QuantizeGraph, graph_util from qonnx.util.cleanup import cleanup from qonnx.util.inference_cost import inference_cost @@ -41,13 +41,13 @@ graph_util = graph_util() -a = "https://github.com/onnx/models/raw/main/validated/vision/" -b = "classification/resnet/model/resnet18-v1-7.onnx?download=" +download_url = "https://github.com/onnx/models/raw/main/validated/vision/" +download_url += "classification/resnet/model/resnet18-v1-7.onnx?download=" model_details = { "resnet18-v1-7": { "description": "Resnet18 Opset version 7.", - "url": (a + b), + "url": download_url, "test_input": { "name": { "Conv_0": [ @@ -124,7 +124,7 @@ def test_introduce_quantnode(test_model): model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) original_model_inf_cost = inference_cost(model, discount_sparsity=False) nodes_pos = test_details["test_input"] - model = model.transform(IntroduceQuantnode(nodes_pos)) + model = model.transform(QuantizeGraph(nodes_pos)) quantnodes_added = len(model.get_nodes_by_op_type("Quant")) assert quantnodes_added == 10 # 10 positions are specified. verification = to_verify(model, nodes_pos) From 99841c108aa47ec509dc0d1d729654949d3f76d2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 6 Feb 2024 16:14:45 +0100 Subject: [PATCH 31/83] [DataType] add preliminary support for FLOAT16 --- src/qonnx/core/datatype.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/qonnx/core/datatype.py b/src/qonnx/core/datatype.py index 40584a4b..84365289 100644 --- a/src/qonnx/core/datatype.py +++ b/src/qonnx/core/datatype.py @@ -144,6 +144,36 @@ def to_numpy_dt(self): def get_canonical_name(self): return "FLOAT32" +class Float16Type(BaseDataType): + def bitwidth(self): + return 16 + + def min(self): + return np.finfo(np.float16).min + + def max(self): + return np.finfo(np.float16).max + + def allowed(self, value): + return True + + def get_num_possible_values(self): + raise Exception("Undefined for Float16Type") + + def is_integer(self): + return False + + def is_fixed_point(self): + return False + + def get_hls_datatype_str(self): + return "float" + + def to_numpy_dt(self): + return np.float16 + + def get_canonical_name(self): + return "FLOAT16" class IntType(BaseDataType): def __init__(self, bitwidth, signed): @@ -349,6 +379,7 @@ def resolve_datatype(name): "BIPOLAR": BipolarType(), "TERNARY": TernaryType(), "FLOAT32": FloatType(), + "FLOAT16": Float16Type(), } if name in _special_types.keys(): return _special_types[name] From 42df7a016584cef8023191ff18e74792f102f3c9 Mon Sep 17 00:00:00 2001 From: Harish Date: Thu, 8 Feb 2024 14:22:24 +0000 Subject: [PATCH 32/83] Revised version for QuantizeGraph --- src/qonnx/core/modelwrapper.py | 9 + src/qonnx/transformation/quantize_graph.py | 347 +++++++++----------- tests/transformation/test_quantize_graph.py | 10 +- 3 files changed, 175 insertions(+), 191 deletions(-) diff --git a/src/qonnx/core/modelwrapper.py b/src/qonnx/core/modelwrapper.py index f78e1334..f21efdab 100644 --- a/src/qonnx/core/modelwrapper.py +++ b/src/qonnx/core/modelwrapper.py @@ -542,6 +542,15 @@ def get_node_index(self, node): except ValueError: return None + def get_node_from_name(self, node_name): + """Returns the node with the specified name.""" + try: + for node in self.graph.node: + if node.name == node_name: + return node + except ValueError: + return None + def get_tensor_layout(self, tensor_name): """Returns the data layout annotation of tensor with given name. The data layout is expressed as a list of strings with as many diff --git a/src/qonnx/transformation/quantize_graph.py b/src/qonnx/transformation/quantize_graph.py index af290730..20feb049 100644 --- a/src/qonnx/transformation/quantize_graph.py +++ b/src/qonnx/transformation/quantize_graph.py @@ -31,225 +31,202 @@ import onnx from onnx import TensorProto -from qonnx.core.datatype import DataType -from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.base import Transformation from qonnx.transformation.general import SortGraph from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import qonnx_make_model from qonnx.util.cleanup import cleanup_model -class graph_util: - def get_node_id(self, model): - node_index = {} - node_ind = 0 - for node in model.graph.node: - node_index[node.name] = node_ind - node_ind += 1 - return node_index - - def node_from_name(self, model, node_name): - for node in model.graph.node: - if node.name == node_name: - return node - - def identify_nodes(self, model, node_type): - node_list = [] - for node in model.graph.node: - if node.op_type == node_type: - node_list.append(node) - return node_list - - def create_node( - self, - model, - quantnode_input, - quantnode_output_shape, - node_count, - tensor_count, - scale_value, - zeropoint_value, - bitwidth_value, - narrow, - signed, - rounding_mode, - ): - quantnode_output_dtype = DataType["UINT8"] - quant_tensor = onnx.helper.make_tensor_value_info( - model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape - ) - model.graph.value_info.append(quant_tensor) - model.set_tensor_datatype(quant_tensor.name, quantnode_output_dtype) - - stationary_input_dtype = DataType["FLOAT32"] - scale_tensor = np.array(scale_value).astype(np.float32) - s_value = onnx.helper.make_tensor_value_info( - model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape - ) - model.graph.value_info.append(s_value) - model.set_tensor_datatype(s_value.name, stationary_input_dtype) - model.set_initializer(s_value.name, scale_tensor) - - zeropt_tensor = np.array(zeropoint_value).astype(np.float32) - z_value = onnx.helper.make_tensor_value_info( - model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape - ) - model.graph.value_info.append(z_value) - model.set_tensor_datatype(z_value.name, stationary_input_dtype) - model.set_initializer(z_value.name, zeropt_tensor) - - bitwidth_tensor = np.array(bitwidth_value).astype(np.float32) - b_value = onnx.helper.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.FLOAT, [1]) - model.graph.value_info.append(b_value) - model.set_tensor_datatype(b_value.name, stationary_input_dtype) - model.set_initializer(b_value.name, bitwidth_tensor) - - quant_node = onnx.helper.make_node( - "Quant", - inputs=[quantnode_input, s_value.name, z_value.name, b_value.name], - outputs=[quant_tensor.name], - name="Quant_node_" + str(node_count) + str(tensor_count), - narrow=narrow, - signed=signed, - rounding_mode=rounding_mode, - ) - - return quant_node, quant_tensor - - def adjust_graph(self, model, input_positions, node_in_focus, quantized_nodes, node_count): - tensor_count = 0 - for pos in input_positions: - node_details = (node_in_focus.name, pos[0]) - if ( - node_details not in quantized_nodes - ): # This is to ensure that we don't quantize the same node for the same input/output index. - if pos[0][0] == "input": - input_to_quantnode = node_in_focus.input[pos[0][1]] - consumer_node = node_in_focus - producer_node = model.find_producer(input_to_quantnode) - if producer_node is None or producer_node.op_type != "Quant": - quantization_to_perform = "yes" - else: - quantization_to_perform = "no" +def create_quantnode( + model, + quantnode_input, + quantnode_output_shape, + scale_value, + zeropoint_value, + bitwidth_value, + narrow, + signed, + rounding_mode, +): + quant_tensor = onnx.helper.make_tensor_value_info( + model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape + ) + model.graph.value_info.append(quant_tensor) + + scale_tensor = np.array(scale_value).astype(np.float32) + s_value = onnx.helper.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape) + model.graph.value_info.append(s_value) + model.set_initializer(s_value.name, scale_tensor) + + zeropt_tensor = np.array(zeropoint_value).astype(np.float32) + z_value = onnx.helper.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.FLOAT, quantnode_output_shape) + model.graph.value_info.append(z_value) + model.set_initializer(z_value.name, zeropt_tensor) + + bitwidth_tensor = np.array(bitwidth_value).astype(np.float32) + b_value = onnx.helper.make_tensor_value_info(model.make_new_valueinfo_name(), TensorProto.FLOAT, [1]) + model.graph.value_info.append(b_value) + model.set_initializer(b_value.name, bitwidth_tensor) + + quantnode = onnx.helper.make_node( + "Quant", + inputs=[quantnode_input, s_value.name, z_value.name, b_value.name], + outputs=[quant_tensor.name], + name="Quant_" + quantnode_input, + narrow=narrow, + signed=signed, + rounding_mode=rounding_mode, + ) + + return quantnode, quant_tensor + + +def adjust_graph(model, input_positions, node_name, quantized_nodes): + for pos in input_positions: + node_details = (node_name, pos[0]) + if node_details not in quantized_nodes: # not quantizing for same node_inp/out index. + node_in_focus = model.get_node_from_name(node_name) + + if pos[0][0] == "input": + quantnode_input = node_in_focus.input[pos[0][1]] + consumer_node = node_in_focus + producer_node = model.find_producer(quantnode_input) + if producer_node is None or producer_node.op_type != "Quant": + quantization_to_perform = True else: - input_to_quantnode = node_in_focus.output[pos[0][1]] - consumer_node = model.find_consumer(input_to_quantnode) - producer_node = model.find_producer(input_to_quantnode) - if consumer_node is None or consumer_node.op_type != "Quant": - quantization_to_perform = "yes" - else: - quantization_to_perform = "no" - if quantization_to_perform == "yes": - node_indx = self.get_node_id(model) # Getting index of each node in the graph. - quantnode_output_shape = model.get_tensor_shape(input_to_quantnode) # Step: 3 - - quant_node, quant_tensor = self.create_node( - model, - input_to_quantnode, - quantnode_output_shape, - node_count, - tensor_count, - scale_value=pos[1][0], - zeropoint_value=pos[1][1], - bitwidth_value=pos[1][2], - narrow=pos[1][3], - signed=pos[1][4], - rounding_mode=pos[1][5], - ) - - if consumer_node is not None: - node_pos = node_indx[consumer_node.name] - model.graph.node[node_pos].input[pos[0][1]] = quant_tensor.name - model.graph.node.append(quant_node) - else: - model.graph.value_info.remove(quant_tensor) - model.graph.node.append(quant_node) - model.graph.output.insert(0, quant_tensor) - model.graph.output.pop(1) - - model = model.transform(SortGraph()) - tensor_count += 1 - quantized_nodes.append(node_details) + quantization_to_perform = False + else: + quantnode_input = node_in_focus.output[pos[0][1]] + consumer_node = model.find_consumer(quantnode_input) + producer_node = model.find_producer(quantnode_input) + if consumer_node is None or consumer_node.op_type != "Quant": + quantization_to_perform = True + else: + quantization_to_perform = False + if quantization_to_perform is True: + quantnode_output_shape = model.get_tensor_shape(quantnode_input) # Step: 3 + quantnode, quant_tensor = create_quantnode( + model, + quantnode_input, + quantnode_output_shape, + scale_value=pos[1][0], + zeropoint_value=pos[1][1], + bitwidth_value=pos[1][2], + narrow=pos[1][3], + signed=pos[1][4], + rounding_mode=pos[1][5], + ) + + if consumer_node is not None: + node_pos = model.get_node_index(consumer_node) + model.graph.node[node_pos].input[pos[0][1]] = quant_tensor.name + model.graph.node.append(quantnode) else: - print(f"{pos[0][0]} index {pos[0][1]} of {node_in_focus.name} is already quantized.") + model.graph.value_info.remove(quant_tensor) + model.graph.node.append(quantnode) + model.graph.output.insert(0, quant_tensor) + model.graph.output.pop(1) + + model = model.transform(SortGraph()) + quantized_nodes.append(node_details) else: - print(f"{pos[0][0]} index {pos[0][1]} of {node_in_focus.name} is already quantized.") - continue + print(f"{pos[0][0]} index {pos[0][1]} of {node_name} is already quantized.") + else: + print(f"{pos[0][0]} index {pos[0][1]} of {node_name} is already quantized.") + continue - return model + return model class QuantizeGraph(Transformation): - """This transformation can be used to introduce a Quant node for particular nodes in the graph, - determined based on either op_type or node name. - For the particular nodes identified, users can specify the location of the Quant nodes by providing - the input and output indices where Quant nodes are to be inserted. - Assumes the input model is cleaned-up with all intermediate shapes specified and nodes given - unique names already. - - 2) Steps to transform are + """This transformation can be used to introduce a Quant node for a specific type of node in the graph. + Users would be able to specify the location of the quant node by providing the input and output indexs + as the parameters. + + 1) Expectations: + a) Onnx model in the modelwraper format. + b) Model must be cleaned using cleanup_model qonnx.util.cleanup.cleanup_model() + c) Batchsize to be set. + + 2) S.teps to transform are: Step1: Finding the input for the quant node. Step2: Finding the consumer of the quant node output. Step3: Finding the shape for the output tensor of quant node. - Note: The output tensor of the quant node must have the same shape as the - consumer of the input to the quant node. - - 3) Introduction to quantnodes will be done with precedence to "Name" in comparison to "op_type". + Note: The output tensor of the quant node must have the same shape as the consumer of the input + to the quant node. + + 3) Input: + A dict "quantnode_map" specifying the criterion, positions, and input parameters like + scale, bitwidth, zeropoint, and others for the particular quantnode. + + Criterion: + a) name: This will allow users to add quant nodes for specific node like "Conv_0" and "Gemm_0". + Note: using this users can have quant nodes with different parameters. Ex: quantizing + "Conv_0" and "Conv_1" with bitwidth of 4 and 6, respectively. + b) op_type: This will allow users to add quant nodes for all nodes of a particular op_type such + as, "Conv", "Gemm", and others. + Note: All quant nodes created using op_type criterion will have the same input + parameters (scale, zeropoint, bitwidth, and others.) + c) name and op_type: In this case, quant nodes will be added with precedence to "Name" + in comparison to "op_type". + + Positions: ("input", index) or ("output", index) + a) "input": specifies that the user want to quantize the input of the selected node. + b) "output": specifies that the user want to quantize the input of the selected node. + c) index: specifies which input/output to quantize (as a node can have multiple inputs and outputs) + + Parameters (to quant node) are provided as (scale, zeropoint, bitwidth, narrow, signed, rounding_mode) + + a) Inputs: scale, zeropoint, bitwidth. + b) Attributes: narrow, signed, rounding_mode. 4) Assert: - a) The input is a dictionary representing the node names as keys and a list of quant positions - as values. - b) The input dictionary must have atleast one mac node (Conv, gemm, matmul) for the transformation. + a) The input is a dictionary representing the node names as keys and a list of quant positions + as values. + b) The input dictionary must have atleast one mac node (Conv, gemm, matmul) for the transformation. 5) Return: - Returns a cleaned version of the model. - + Returns a model with new quant nodes created at the positions specified using the "quantnode_map". + + 6) Example: + quantnode_map = {"name": {"Conv_0": [(("input", 0), (1, 0, 8, 0, 1, "ROUND")), + (("input", 1), (1, 0, 8, 0, 1, "ROUND")), + (("output", 0), (1, 0, 8, 0, 1, "ROUND"))], + "Conv_1": [(("input", 0), (1, 0, 8, 0, 1, "ROUND"))], + "Conv_2": [(("input", 1), (1, 0, 8, 0, 1, "ROUND")), + (("output", 0), (1, 0, 8, 0, 1, "ROUND"))]}, + + "op_type": {"Gemm": [(("input", 0), (1, 0, 8, 0, 1, "ROUND")), + (("input", 1), (1, 0, 8, 0, 1, "ROUND")), + (("input", 2), (1, 0, 8, 0, 1, "ROUND")), + (("output", 0), (1, 0, 8, 0, 1, "ROUND"))]}} """ - def __init__(self, quant_node_inputs): + def __init__(self, quantnode_map): super().__init__() - self.quant_node_inputs = quant_node_inputs - self.graph_util = graph_util() + self.quantnode_map = quantnode_map def apply(self, model): model = model.transform(InferShapes()) - if type(self.quant_node_inputs) == dict: - selection_type = self.quant_node_inputs.keys() + if type(self.quantnode_map) == dict: + selection_type = self.quantnode_map.keys() if set(selection_type) <= {"name", "op_type"}: - node_count = 0 quantized_nodes = [] if "name" in selection_type: - by_name = self.quant_node_inputs[ - "name" - ] # by_name is a dictionary with the unique node names as keys and the list of positions as values. - node_list_by_name = by_name.keys() # name of all the nodes specified by the user for an quant node. + by_name = self.quantnode_map["name"] # dict with unique names and list of positions. + node_list_by_name = by_name.keys() # node names specified by the user for quant nodes. for node_name in node_list_by_name: - node_in_focus = self.graph_util.node_from_name(model, node_name) - input_positions = by_name[ - node_name - ] # input positions specified by the user to introduce quant node. - model = self.graph_util.adjust_graph( - model, input_positions, node_in_focus, quantized_nodes, node_count - ) - node_count += 1 + input_positions = by_name[node_name] # input positions to introduce quant nodes. + model = adjust_graph(model, input_positions, node_name, quantized_nodes) if "op_type" in selection_type: - by_op_type = self.quant_node_inputs[ - "op_type" - ] # by_name is a dictionary with the unique node names as keys and the list of positions as values. + by_op_type = self.quantnode_map["op_type"] # dict with the unique names and list of positions. op_list = by_op_type.keys() for op in op_list: - node_list = self.graph_util.identify_nodes( - model, op - ) # List of all nodes with the operation type "op". + node_list = model.get_nodes_by_op_type(op) # List of all nodes with the operation type "op". input_positions = by_op_type[op] - for node_in_focus in node_list: - model = self.graph_util.adjust_graph( - model, input_positions, node_in_focus, quantized_nodes, node_count - ) - node_count += 1 - model = qonnx_make_model(model.graph) - model = ModelWrapper(model) + for node in node_list: + node_name = node.name + model = adjust_graph(model, input_positions, node_name, quantized_nodes) model = cleanup_model(model) else: raise Exception("Unsupported selection type") diff --git a/tests/transformation/test_quantize_graph.py b/tests/transformation/test_quantize_graph.py index c0ceb456..e613bd17 100644 --- a/tests/transformation/test_quantize_graph.py +++ b/tests/transformation/test_quantize_graph.py @@ -33,14 +33,12 @@ import urllib.request from qonnx.core.modelwrapper import ModelWrapper -from qonnx.transformation.quantize_graph import QuantizeGraph, graph_util +from qonnx.transformation.quantize_graph import QuantizeGraph from qonnx.util.cleanup import cleanup from qonnx.util.inference_cost import inference_cost random.seed(42) -graph_util = graph_util() - download_url = "https://github.com/onnx/models/raw/main/validated/vision/" download_url += "classification/resnet/model/resnet18-v1-7.onnx?download=" @@ -93,11 +91,11 @@ def to_verify(model, test_details): if by == "name": sample_node_name = random.choice(list(test_details["name"].keys())) - sample_node = graph_util.node_from_name(model, sample_node_name) + sample_node = model.node_from_name(model, sample_node_name) sample_pos = random.choice(test_details["name"][sample_node_name]) if by == "op_type": node_type = random.choice(list(test_details["op_type"].keys())) - sample_node = random.choice(graph_util.identify_nodes(model, node_type)) + sample_node = random.choice(model.get_nodes_by_op_type(node_type)) sample_pos = random.choice(test_details["op_type"][node_type]) if sample_pos[0][0] == "input": @@ -119,7 +117,7 @@ def to_verify(model, test_details): @pytest.mark.parametrize("test_model", model_details.keys()) -def test_introduce_quantnode(test_model): +def test_quantize_graph(test_model): test_details = model_details[test_model] model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) original_model_inf_cost = inference_cost(model, discount_sparsity=False) From 7608e7c90df931a8daeababf5239f24003a9dec5 Mon Sep 17 00:00:00 2001 From: Harish Date: Thu, 15 Feb 2024 11:29:36 +0000 Subject: [PATCH 33/83] inference cost breakdown --- src/qonnx/analysis/inference_cost.py | 25 ++++-- src/qonnx/util/inference_cost.py | 76 +++++++++++----- .../analysis/test_inference_cost_breakdown.py | 88 +++++++++++++++++++ 3 files changed, 160 insertions(+), 29 deletions(-) create mode 100644 tests/analysis/test_inference_cost_breakdown.py diff --git a/src/qonnx/analysis/inference_cost.py b/src/qonnx/analysis/inference_cost.py index 98e03428..847058b7 100644 --- a/src/qonnx/analysis/inference_cost.py +++ b/src/qonnx/analysis/inference_cost.py @@ -201,10 +201,10 @@ def inference_cost_upsample(model, node, discount_sparsity): return ret -def inference_cost(model, discount_sparsity=True): +def inference_cost(model, discount_sparsity=True, cost_breakdown=False): "Ensure all nodes have unique names prior to calling this analysis pass." - node_costs = {} + ret, node_costs, nodes_per_optype = {}, {}, {} zero_cost_ops = [ "MaxPool", "AveragePool", @@ -240,13 +240,24 @@ def inference_cost(model, discount_sparsity=True): if node.op_type in inference_cost_fxn_map.keys(): node_cost = inference_cost_fxn_map[node.op_type](model, node, discount_sparsity) node_costs[node.name] = node_cost + if node.op_type not in nodes_per_optype.keys(): + new_optype = {} + new_optype[node.name] = node_cost + nodes_per_optype[node.op_type] = new_optype + else: + nodes_per_optype[node.op_type][node.name] = node_cost elif node.op_type in zero_cost_ops: continue else: unsupported_ops.add(node.op_type) - - ret = aggregate_dict_keys(node_costs) - ret["unsupported"] = unsupported_ops - ret["discount_sparsity"] = discount_sparsity - + total = aggregate_dict_keys(node_costs) + total["unsupported"] = unsupported_ops + total["discount_sparsity"] = discount_sparsity + ret["total_cost"] = total + if cost_breakdown: + optype_cost = {} + for optype, resources in nodes_per_optype.items(): + optype_cost[optype] = aggregate_dict_keys(resources) + ret["optype_cost"] = optype_cost + ret["node_cost"] = node_costs return ret diff --git a/src/qonnx/util/inference_cost.py b/src/qonnx/util/inference_cost.py index 86428c76..22ee140d 100644 --- a/src/qonnx/util/inference_cost.py +++ b/src/qonnx/util/inference_cost.py @@ -71,7 +71,13 @@ def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"): def inference_cost( - model_filename_or_wrapper, *, output_json=None, output_onnx=None, preprocess=True, discount_sparsity=True + model_filename_or_wrapper, + *, + output_json=None, + output_onnx=None, + preprocess=True, + discount_sparsity=True, + cost_breakdown=False ): """Return the inference cost estimate metric for given ONNX model. Supports the Quant op for weight/activation quantization. @@ -83,8 +89,8 @@ def inference_cost( :param preprocess: If set, run preprocessing steps such as shape inference, datatype inference and constant folding. Strongly recommended. :param discount_sparsity: If set, will discount op cost of MAC ops with a - constant zero weight, and the mem cost of constant zero weights. - """ + constant zero weight, and the mem cost of constant zero weights.""" + combined_results = {} if isinstance(model_filename_or_wrapper, ModelWrapper): model = model_filename_or_wrapper else: @@ -104,25 +110,51 @@ def inference_cost( model = model.transform(GiveReadableTensorNames()) if output_onnx is not None: model.save(output_onnx) - ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity)) - bops, macs = compute_bops_and_macs(ret) - mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(ret, "mem_w") - mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(ret, "mem_o") - ret["total_bops"] = bops - ret["total_macs"] = macs - ret["total_mem_w_bits"] = mem_w_bits - ret["total_mem_w_elems"] = mem_w_elems - ret["total_mem_o_bits"] = mem_o_bits - ret["total_mem_o_elems"] = mem_o_elems - - if "unsupported" in ret: - ret["unsupported"] = str(ret["unsupported"]) - - if output_json is not None: - with open(output_json, "w") as f: - json.dump(ret, f, sort_keys=True, indent=2) - - return ret + ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity, cost_breakdown)) + for i, res in ret.items(): + if i == "total_cost": + bops, macs = compute_bops_and_macs(res) + mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(res, "mem_w") + mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(res, "mem_o") + res["total_bops"] = bops + res["total_macs"] = macs + res["total_mem_w_bits"] = mem_w_bits + res["total_mem_w_elems"] = mem_w_elems + res["total_mem_o_bits"] = mem_o_bits + res["total_mem_o_elems"] = mem_o_elems + if "unsupported" in res: + res["unsupported"] = str(res["unsupported"]) + if output_json is not None: + with open(output_json, "w") as f: + json.dump(res, f, sort_keys=True, indent=2) + combined_results[i] = res + elif i == "optype_cost": + per_optype_breakdown = {} + for optype, op_res in res.items(): + bops, macs = compute_bops_and_macs(op_res) + mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(op_res, "mem_w") + mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(op_res, "mem_o") + op_res["total_bops"] = bops + op_res["total_macs"] = macs + op_res["total_mem_w_bits"] = mem_w_bits + op_res["total_mem_w_elems"] = mem_w_elems + op_res["total_mem_o_bits"] = mem_o_bits + op_res["total_mem_o_elems"] = mem_o_elems + per_optype_breakdown[optype] = op_res + combined_results[i] = per_optype_breakdown + else: + per_node_breakdown = {} + for node_name in res.keys(): + node_cost = res[node_name] + mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(node_cost, "mem_w") + mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(node_cost, "mem_o") + node_cost["total_mem_w_bits"] = mem_w_bits + node_cost["total_mem_w_elems"] = mem_w_elems + node_cost["total_mem_o_bits"] = mem_o_bits + node_cost["total_mem_o_elems"] = mem_o_elems + per_node_breakdown[node_name] = node_cost + combined_results[i] = per_node_breakdown + return combined_results def main(): diff --git a/tests/analysis/test_inference_cost_breakdown.py b/tests/analysis/test_inference_cost_breakdown.py new file mode 100644 index 00000000..b798eaf0 --- /dev/null +++ b/tests/analysis/test_inference_cost_breakdown.py @@ -0,0 +1,88 @@ +# Copyright (c) 2024 Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of qonnx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import os +import urllib.request + +from qonnx.analysis.inference_cost import aggregate_dict_keys +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.util.cleanup import cleanup +from qonnx.util.inference_cost import inference_cost as infca + +download_url = "https://github.com/onnx/models/raw/main/validated/vision/" +download_url += "classification/resnet/model/resnet18-v1-7.onnx?download=" + +model_details = { + "resnet18-v1-7": { + "description": "Resnet18 Opset version 7.", + "url": download_url, + "enc": { + "a": "op_mac_FLOAT32_FLOAT32", + "b": "total_mem_w_bits", + "c": "total_mem_w_elems", + "d": "total_mem_o_bits", + "e": "total_mem_o_elems", + }, + }, +} + + +def download_model(test_model, do_cleanup=False, return_modelwrapper=False): + qonnx_url = model_details[test_model]["url"] + # download test data + dl_dir = "/tmp" + dl_file = dl_dir + f"/{test_model}.onnx" + ret = dl_file + if not os.path.isfile(dl_file): + urllib.request.urlretrieve(qonnx_url, dl_file) + if do_cleanup: + out_file = dl_dir + f"/{test_model}_clean.onnx" + cleanup(dl_file, out_file=out_file, override_inpsize=1) + ret = out_file + if return_modelwrapper: + ret = ModelWrapper(ret) + return ret + + +@pytest.mark.parametrize("test_model", model_details.keys()) +def test_inference_cost_breakdown(test_model): + test_details = model_details[test_model] + model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) + inf_cost = infca(model, discount_sparsity=False, cost_breakdown=True) + print(inf_cost.keys()) + t_cost = inf_cost["total_cost"] # total cost + op_cost = aggregate_dict_keys(inf_cost["optype_cost"]) # cost per optype + n_cost = aggregate_dict_keys(inf_cost["node_cost"]) # cost per node. + enc = test_details["enc"] + assert t_cost[enc["a"]] == op_cost[enc["a"]] == n_cost[enc["a"]], "inf discrepancy" + assert t_cost[enc["b"]] == op_cost[enc["b"]] == n_cost[enc["b"]], "inf discrepancy" + assert t_cost[enc["c"]] == op_cost[enc["c"]] == n_cost[enc["c"]], "inf discrepancy" + assert t_cost[enc["d"]] == op_cost[enc["d"]] == n_cost[enc["d"]], "inf discrepancy" + assert t_cost[enc["e"]] == op_cost[enc["e"]] == n_cost[enc["e"]], "inf discrepancy" From d1207422626d70084e18c4a526dd43e440ac2825 Mon Sep 17 00:00:00 2001 From: Harish <62412574+Harsh9650@users.noreply.github.com> Date: Thu, 15 Feb 2024 12:06:35 +0000 Subject: [PATCH 34/83] Update inference_cost.py --- src/qonnx/util/inference_cost.py | 34 ++++++++++++-------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/src/qonnx/util/inference_cost.py b/src/qonnx/util/inference_cost.py index 22ee140d..7a212321 100644 --- a/src/qonnx/util/inference_cost.py +++ b/src/qonnx/util/inference_cost.py @@ -69,6 +69,14 @@ def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"): total_mem_elems += v return total_mem_bits, total_mem_elems +def assign_mem_bits_and_elems(res_dict): + mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(res_dict, "mem_w") + mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(res_dict, "mem_o") + res_dict["total_mem_w_bits"] = mem_w_bits + res_dict["total_mem_w_elems"] = mem_w_elems + res_dict["total_mem_o_bits"] = mem_o_bits + res_dict["total_mem_o_elems"] = mem_o_elems + return res_dict def inference_cost( model_filename_or_wrapper, @@ -114,14 +122,9 @@ def inference_cost( for i, res in ret.items(): if i == "total_cost": bops, macs = compute_bops_and_macs(res) - mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(res, "mem_w") - mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(res, "mem_o") + res = assign_mem_bits_and_elems(res) res["total_bops"] = bops res["total_macs"] = macs - res["total_mem_w_bits"] = mem_w_bits - res["total_mem_w_elems"] = mem_w_elems - res["total_mem_o_bits"] = mem_o_bits - res["total_mem_o_elems"] = mem_o_elems if "unsupported" in res: res["unsupported"] = str(res["unsupported"]) if output_json is not None: @@ -132,31 +135,20 @@ def inference_cost( per_optype_breakdown = {} for optype, op_res in res.items(): bops, macs = compute_bops_and_macs(op_res) - mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(op_res, "mem_w") - mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(op_res, "mem_o") + op_res = assign_mem_bits_and_elems(op_res) op_res["total_bops"] = bops op_res["total_macs"] = macs - op_res["total_mem_w_bits"] = mem_w_bits - op_res["total_mem_w_elems"] = mem_w_elems - op_res["total_mem_o_bits"] = mem_o_bits - op_res["total_mem_o_elems"] = mem_o_elems per_optype_breakdown[optype] = op_res combined_results[i] = per_optype_breakdown else: per_node_breakdown = {} for node_name in res.keys(): - node_cost = res[node_name] - mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(node_cost, "mem_w") - mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(node_cost, "mem_o") - node_cost["total_mem_w_bits"] = mem_w_bits - node_cost["total_mem_w_elems"] = mem_w_elems - node_cost["total_mem_o_bits"] = mem_o_bits - node_cost["total_mem_o_elems"] = mem_o_elems - per_node_breakdown[node_name] = node_cost + node_res = res[node_name] + node_res = assign_mem_bits_and_elems(node_res) + per_node_breakdown[node_name] = node_res combined_results[i] = per_node_breakdown return combined_results - def main(): clize.run(inference_cost) From 04619a397670dd3c76001a30ddf6c82bab5356be Mon Sep 17 00:00:00 2001 From: Harish Date: Thu, 15 Feb 2024 15:04:31 +0000 Subject: [PATCH 35/83] revised version of quantize_graph --- src/qonnx/transformation/quantize_graph.py | 14 +++++++------- tests/transformation/test_quantize_graph.py | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/qonnx/transformation/quantize_graph.py b/src/qonnx/transformation/quantize_graph.py index 20feb049..230650bd 100644 --- a/src/qonnx/transformation/quantize_graph.py +++ b/src/qonnx/transformation/quantize_graph.py @@ -140,15 +140,15 @@ def adjust_graph(model, input_positions, node_name, quantized_nodes): class QuantizeGraph(Transformation): """This transformation can be used to introduce a Quant node for a specific type of node in the graph. - Users would be able to specify the location of the quant node by providing the input and output indexs + Users would be able to specify the location of the quant node by providing the input and output index as the parameters. 1) Expectations: a) Onnx model in the modelwraper format. - b) Model must be cleaned using cleanup_model qonnx.util.cleanup.cleanup_model() + b) Model must be cleaned using qonnx.util.cleanup.cleanup_model() c) Batchsize to be set. - 2) S.teps to transform are: + 2) Steps to transform are: Step1: Finding the input for the quant node. Step2: Finding the consumer of the quant node output. Step3: Finding the shape for the output tensor of quant node. @@ -157,7 +157,7 @@ class QuantizeGraph(Transformation): 3) Input: A dict "quantnode_map" specifying the criterion, positions, and input parameters like - scale, bitwidth, zeropoint, and others for the particular quantnode. + scale, bitwidth, zeropoint, and others for a specific quantnode. Criterion: a) name: This will allow users to add quant nodes for specific node like "Conv_0" and "Gemm_0". @@ -171,9 +171,9 @@ class QuantizeGraph(Transformation): in comparison to "op_type". Positions: ("input", index) or ("output", index) - a) "input": specifies that the user want to quantize the input of the selected node. - b) "output": specifies that the user want to quantize the input of the selected node. - c) index: specifies which input/output to quantize (as a node can have multiple inputs and outputs) + a) "input": indicates that the user want to quantize the input of the selected node. + b) "output": indicates that the user want to quantize the output of the selected node. + c) index: refers to the input/output index to quantize (a node can have multiple inputs and outputs) Parameters (to quant node) are provided as (scale, zeropoint, bitwidth, narrow, signed, rounding_mode) diff --git a/tests/transformation/test_quantize_graph.py b/tests/transformation/test_quantize_graph.py index e613bd17..867f9b34 100644 --- a/tests/transformation/test_quantize_graph.py +++ b/tests/transformation/test_quantize_graph.py @@ -91,7 +91,7 @@ def to_verify(model, test_details): if by == "name": sample_node_name = random.choice(list(test_details["name"].keys())) - sample_node = model.node_from_name(model, sample_node_name) + sample_node = model.get_node_from_name(sample_node_name) sample_pos = random.choice(test_details["name"][sample_node_name]) if by == "op_type": node_type = random.choice(list(test_details["op_type"].keys())) From be9a9f838ab7bd9a107ab937d453f64b22da22f4 Mon Sep 17 00:00:00 2001 From: Harish <62412574+Harsh9650@users.noreply.github.com> Date: Wed, 21 Feb 2024 13:07:52 +0000 Subject: [PATCH 36/83] Update inference_cost.py --- src/qonnx/util/inference_cost.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/qonnx/util/inference_cost.py b/src/qonnx/util/inference_cost.py index 7a212321..30ac677d 100644 --- a/src/qonnx/util/inference_cost.py +++ b/src/qonnx/util/inference_cost.py @@ -44,7 +44,6 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes - def compute_bops_and_macs(inf_cost_dict): total_bops = 0.0 total_macs = 0.0 @@ -57,7 +56,6 @@ def compute_bops_and_macs(inf_cost_dict): total_macs += v return total_bops, total_macs - def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"): total_mem_bits = 0.0 total_mem_elems = 0.0 @@ -98,6 +96,7 @@ def inference_cost( datatype inference and constant folding. Strongly recommended. :param discount_sparsity: If set, will discount op cost of MAC ops with a constant zero weight, and the mem cost of constant zero weights.""" + combined_results = {} if isinstance(model_filename_or_wrapper, ModelWrapper): model = model_filename_or_wrapper @@ -118,7 +117,8 @@ def inference_cost( model = model.transform(GiveReadableTensorNames()) if output_onnx is not None: model.save(output_onnx) - ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity, cost_breakdown)) + ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity, + cost_breakdown)) for i, res in ret.items(): if i == "total_cost": bops, macs = compute_bops_and_macs(res) @@ -148,10 +148,9 @@ def inference_cost( per_node_breakdown[node_name] = node_res combined_results[i] = per_node_breakdown return combined_results - + def main(): clize.run(inference_cost) - if __name__ == "__main__": main() From fad667fa976ad05205c131a09ffd46b81e6ccb85 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 23 Feb 2024 11:27:44 +0100 Subject: [PATCH 37/83] fix linting problems --- src/qonnx/core/datatype.py | 2 ++ tests/analysis/test_matmul_mac_cost.py | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/qonnx/core/datatype.py b/src/qonnx/core/datatype.py index 84365289..f37d4eea 100644 --- a/src/qonnx/core/datatype.py +++ b/src/qonnx/core/datatype.py @@ -144,6 +144,7 @@ def to_numpy_dt(self): def get_canonical_name(self): return "FLOAT32" + class Float16Type(BaseDataType): def bitwidth(self): return 16 @@ -175,6 +176,7 @@ def to_numpy_dt(self): def get_canonical_name(self): return "FLOAT16" + class IntType(BaseDataType): def __init__(self, bitwidth, signed): super().__init__() diff --git a/tests/analysis/test_matmul_mac_cost.py b/tests/analysis/test_matmul_mac_cost.py index 534618aa..f46af7a9 100644 --- a/tests/analysis/test_matmul_mac_cost.py +++ b/tests/analysis/test_matmul_mac_cost.py @@ -27,19 +27,19 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pytest -import qonnx from pkgutil import get_data + import qonnx.util.inference_cost as infc -from qonnx.util.cleanup import cleanup_model from qonnx.core.modelwrapper import ModelWrapper +from qonnx.util.cleanup import cleanup_model def test_matmul_mac_cost(): - raw_model = get_data("qonnx","data/onnx/matmul_update/sdp.onnx") + raw_model = get_data("qonnx", "data/onnx/matmul_update/sdp.onnx") model = ModelWrapper(raw_model) cleaned_model = cleanup_model(model) - # Two Matmul layers with shape (i_shape, w_shape, o_shape), L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) + # Two Matmul layers with shape (i_shape, w_shape, o_shape), L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) + # and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False) - mac_cost = inf_cost_dict['op_mac_FLOAT32_FLOAT32'] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 + mac_cost = inf_cost_dict["op_mac_FLOAT32_FLOAT32"] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 assert mac_cost == 1048576.0, "Error: discrepancy in mac cost." From 5e2d0b808a3333157be6b6b397b9983f88ab7ec9 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 23 Feb 2024 14:47:43 +0100 Subject: [PATCH 38/83] [Wrapper] explicitly return None for name/index finder functions --- src/qonnx/core/modelwrapper.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/qonnx/core/modelwrapper.py b/src/qonnx/core/modelwrapper.py index f21efdab..2abf9d9d 100644 --- a/src/qonnx/core/modelwrapper.py +++ b/src/qonnx/core/modelwrapper.py @@ -532,7 +532,7 @@ def get_non_finn_nodes(self): return list(filter(lambda x: not util.is_finn_op(x.domain), self.graph.node)) def get_node_index(self, node): - """Returns current index of given node.""" + """Returns current index of given node, or None if not found.""" n_ind = 0 try: for n in self.graph.node: @@ -541,15 +541,17 @@ def get_node_index(self, node): n_ind += 1 except ValueError: return None + return None def get_node_from_name(self, node_name): - """Returns the node with the specified name.""" + """Returns the node with the specified name, or None if not found.""" try: for node in self.graph.node: if node.name == node_name: return node except ValueError: return None + return None def get_tensor_layout(self, tensor_name): """Returns the data layout annotation of tensor with given name. From 565d97845d47e13703801a4250a26692449f702a Mon Sep 17 00:00:00 2001 From: i-colbert Date: Mon, 19 Feb 2024 19:37:51 -0800 Subject: [PATCH 39/83] Feat (transformation): adding ResizeConvolutionToDeconvolution transformation --- .../transformation/resize_conv_to_deconv.py | 262 ++++++++++++++++++ 1 file changed, 262 insertions(+) create mode 100644 src/qonnx/transformation/resize_conv_to_deconv.py diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py new file mode 100644 index 00000000..885103bf --- /dev/null +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -0,0 +1,262 @@ +# Copyright (c) 2024, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of QONNX nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +import warnings +from onnx import helper + +from qonnx.core.datatype import DataType +from qonnx.custom_op.general.quant import quant +from qonnx.transformation.base import Transformation +from qonnx.util.basic import get_by_name + + +def _weight_convolution(cnv_weights: np.ndarray, scale: int) -> np.ndarray: + """Adaptation of the weight convolution algorithm as proposed in Colbert et al. (2021) - `An + Energy-Efficient Edge Computing Paradigm for Convolution-Based Image Upsampling`""" + ofm_ch = cnv_weights.shape[0] + ifm_ch = cnv_weights.shape[1] + kh_size = cnv_weights.shape[2] + kw_size = cnv_weights.shape[3] + assert kh_size == kw_size, "Only square channels supported currently." + # NOTE - this is different than the convolution kernels, which are OC x IC x KH x KW + # rather than IC x OC x KH x KW + dcnv_weights = np.zeros((ifm_ch, ofm_ch, kh_size + scale - 1, kw_size + scale - 1)) + for oc in range(ofm_ch): + for ic in range(ifm_ch): + for i in range(scale): + for j in range(scale): + dcnv_weights[ic, oc, i : i + kh_size, j : j + kw_size] += np.rot90(cnv_weights[oc, ic], 2, [0, 1]) + return dcnv_weights + + +def _auto_pad_to_explicit_padding(autopad_str, idim_h, idim_w, k_h, k_w, stride_h, stride_w, n_dims): + pad_total_h = (stride_h - 1) * idim_h - stride_h + k_h + pad_total_w = (stride_w - 1) * idim_w - stride_w + k_w + pad_half_small_h = int((pad_total_h / 2)) + pad_half_small_w = int((pad_total_w / 2)) + pad_half_large_h = pad_total_h - pad_half_small_h + pad_half_large_w = pad_total_w - pad_half_small_w + if autopad_str == "VALID": + return [0 for i in range(2 * n_dims)] + elif autopad_str == "SAME_UPPER": + return [pad_half_small_h, pad_half_small_w, pad_half_large_h, pad_half_large_w] + elif autopad_str == "SAME_LOWER": + return [pad_half_large_h, pad_half_large_w, pad_half_small_h, pad_half_small_w] + else: + raise Exception("Unsupported auto_pad: " + autopad_str) + + +class ResizeConvolutionToDeconvolution(Transformation): + """Replaces resize convolution layers (e.g., nearest neighbor upsample + same-padded convolution) + with deconvolution layers using the weight convolution algorithm. Currently does not support + resize convolutions that use bilinear or bicubic upsampling""" + + def __init__(self, maintain_bit_width: bool = False): + super().__init__() + self.maintain_bit_width = maintain_bit_width + + def apply(self, model): + graph = model.graph + node_ind = 0 + graph_modified = False + for n in graph.node: + node_ind += 1 + if n.op_type == "Resize": + resize_input = n.input[0] + resize_output = n.output[0] + consumers = model.find_consumers(resize_output) + + if len(consumers) == 0: + continue + + if len(consumers) > 1 and any([c.op_type == "Conv" for c in consumers]): + warnings.warn("Skipping resize conv that has resize with multiple consumers. Not yet supported.") + continue + + conv = consumers[0] + if conv is not None and conv.op_type == "Conv": + # TODO: extend support to other resize convolutions + resize_mode = get_by_name(n.attribute, "mode").s.decode() + if resize_mode != "nearest": + warnings.warn(f"Skipping resize conv with resize_mode={resize_mode}. Not yet supported.") + continue + + group = get_by_name(conv.attribute, "group").i + if group != 1: + warnings.warn("Skipping resize conv with group > 1. Not yet supported.") + continue + + # The weights of the convolution can be generated by another input op if the model is + # quantized. Preliminary support for quantization focuses on QONNX ops (i.e., Quant) + weight_name = conv.input[1] + weight_prod = model.find_producer(weight_name) + + # If the producer is None, then it is initialized by the Conv node + if weight_prod is None: + W_conv = model.get_initializer(weight_name) # (OC, IC, KH, KW) + + # If the convolution weights are not initialized by the convolution, then we need to + # find the node is producing the weights + else: + if weight_prod.op_type == "Quant": + [q_w_name, q_s_name, q_zp_name, q_bw_name] = weight_prod.input + W_conv = model.get_initializer(q_w_name) + W_scale = model.get_initializer(q_s_name) + W_scale = np.moveaxis(W_scale, 0, 1) + W_zeropt = model.get_initializer(q_zp_name) + W_bitwidth = model.get_initializer(q_bw_name) + W_signed = get_by_name(weight_prod.attribute, "signed").i + W_narrow = get_by_name(weight_prod.attribute, "narrow").i + W_rounding_mode = get_by_name(weight_prod.attribute, "rounding_mode").s.decode() + else: + warnings.warn( + f"Weight producer is {weight_prod.op_type}, not a QONNX Quant node. Not yet supported." + ) + continue + + kshape = get_by_name(conv.attribute, "kernel_shape").ints + ifm_ch = model.get_tensor_shape(conv.input[0])[1] # assume NCHW + ofm_ch = model.get_tensor_shape(conv.output[0])[1] # assume NCHW + ifm_dim_h = model.get_tensor_shape(conv.input[0])[2] # assume NCHW + ifm_dim_w = model.get_tensor_shape(conv.input[0])[3] # assume NCHW + ofm_dim_h = model.get_tensor_shape(conv.output[0])[2] # assume NCHW + ofm_dim_w = model.get_tensor_shape(conv.output[0])[3] + if (ifm_dim_h != ofm_dim_h) or (ifm_dim_w != ofm_dim_w): + warnings.warn("Skipping resize conv, only same-padded convs supported.") + continue + dilation_attr = get_by_name(conv.attribute, "dilations") + if dilation_attr is not None: + dilation = dilation_attr.ints + else: + dilation = [1, 1] # default value + if dilation != [1, 1]: + warnings.warn("Skipping resize conv, only supporting dilation=[1,1].") + continue + # get resize scaling attribute + resize_scales = model.get_initializer(n.input[2]) # assume NCHW + if not (resize_scales[0] == resize_scales[1] == 1): + warnings.warn("Skipping resize conv, scaling along batch or channel dimension not supported.") + continue + if resize_scales[2] != resize_scales[3]: + warnings.warn("Skipping resize conv, non-square scaling not yet supported.") + continue + resize_scale = int(resize_scales[2]) # TODO: extend to vector once non-square scaling supported + + W_deconv = _weight_convolution(W_conv, resize_scale).astype(np.float32) + kh_size_deconv = kshape[0] + resize_scale - 1 + kw_size_deconv = kshape[1] + resize_scale - 1 + assert W_deconv.shape == ( + ifm_ch, + ofm_ch, + kh_size_deconv, + kw_size_deconv, + ), "The resulting deconvolution weight shape is incorrect." + + stride_h = get_by_name(conv.attribute, "strides").ints[0] + stride_w = get_by_name(conv.attribute, "strides").ints[1] + # handle both auto_pad and explicit padding + auto_pad = get_by_name(conv.attribute, "auto_pad") + if auto_pad is not None: + # find equivalent specified padding + auto_pad = auto_pad.s.decode("utf-8") + if auto_pad == "NOTSET": + # use specified padding + pad = get_by_name(conv.attribute, "pads").ints + else: + pad = _auto_pad_to_explicit_padding( + auto_pad, + ifm_dim_h, + ifm_dim_w, + kshape[0], + kshape[1], + stride_h, + stride_w, + len(model.get_tensor_shape(n.input[0])) - 2, + ) + else: + # use specified padding + pad = get_by_name(conv.attribute, "pads").ints + + # if `maintain_bit_width`, then we use the quant parameters to + # re-quantize the weights after the weight convolution + if self.maintain_bit_width and (weight_prod is not None): + W_deconv_quant = quant(W_deconv, W_scale, W_zeropt, W_bitwidth, W_signed, W_narrow, W_rounding_mode) + if not np.allclose(W_deconv, W_deconv_quant): + warnings.warn("Clipping error introduced, consider `maintain_bit_width=False`.") + + # if not `maintain_bit_width`, then we adjust the bit width to + # account for the clipping errors. + elif weight_prod is not None: + W_int = (W_deconv / W_scale) + W_zeropt + W_int = W_int.round() # handling rounding errors + if W_int.min() < 0: + if np.abs(W_int).min() > W_int.max(): + tdt = DataType.get_smallest_possible(W_int.min()) + else: + tdt = DataType.get_smallest_possible(-W_int.max() - 1) + else: + tdt = DataType.get_smallest_possible(W_int.max()) + assert np.vectorize(tdt.allowed)(W_int).all(), "Error: issue finding data type to support." + if W_bitwidth != tdt.bitwidth(): + W_bitwidth = np.array(tdt.bitwidth(), dtype=np.float32) + assert tdt.signed() == W_signed, "Error: should maintain sign of the weights." + + deconv_inps = [resize_input, weight_name] + # Make sure to keep the biases from the convolution + if len(conv.input) == 3: + bias_name = conv.input[2] + B_conv = model.get_initializer(bias_name) # (OC,) + deconv_inps.append(bias_name) # add to the inputs + model.set_initializer(bias_name, B_conv) + deconv_outs = conv.output + deconv_pad = pad + deconv_node = helper.make_node( + "ConvTranspose", + deconv_inps, + deconv_outs, + kernel_shape=[kh_size_deconv, kw_size_deconv], + strides=[resize_scale, resize_scale], + pads=deconv_pad, + group=group, + dilations=dilation, + ) + W_deconv_init = weight_name + if weight_prod is not None: + W_deconv_init = q_w_name + model.set_initializer(q_s_name, W_scale) + model.set_initializer(q_bw_name, W_bitwidth) + model.set_initializer(W_deconv_init, W_deconv) + model.set_tensor_shape(weight_name, list(W_deconv.shape)) + graph.node.insert(node_ind, deconv_node) + # remove old nodes + graph.node.remove(n) + graph.node.remove(conv) + graph_modified = True + + return (model, graph_modified) From cfc8dda9c417f0d8b1c91d7bbf441541f9abb897 Mon Sep 17 00:00:00 2001 From: i-colbert Date: Mon, 19 Feb 2024 19:38:47 -0800 Subject: [PATCH 40/83] Feat (transformation): restructure onnx data for upsampling tests --- .../bsd300x3-espcn/nn_resize/float_model.onnx | Bin 0 -> 246487 bytes .../bsd300x3-espcn/nn_resize/quant_model.onnx | Bin 0 -> 254227 bytes .../{ => subpixel}/float_model.onnx | Bin .../{ => subpixel}/quant_model.onnx | Bin .../transformation/test_subpixel_to_deconv.py | 4 ++-- 5 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/float_model.onnx create mode 100644 src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/quant_model.onnx rename src/qonnx/data/onnx/bsd300x3-espcn/{ => subpixel}/float_model.onnx (100%) rename src/qonnx/data/onnx/bsd300x3-espcn/{ => subpixel}/quant_model.onnx (100%) diff --git a/src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/float_model.onnx b/src/qonnx/data/onnx/bsd300x3-espcn/nn_resize/float_model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..5fe61c18236968b072393f4c676717b080432f76 GIT binary patch literal 246487 zcmce-d0b9k_cyLNl~hzpX;4ZiA=GvDI%Q1AP(m6tYtUqhd`k16G>S$k87fj~xXxY& zAqgRpF+?)YbM)(;p5OPrpZod!@%!U>U9W4{Ypt`-ey=^8wcpoX(vnJ2!P~+&hI;s_ zOH4DKW<1qG7&lU8l8oHOprEZ57Aw61Hn~n!k~jJLkBytcgExh{PF0uqds$cfZ&I0h z5t;wc%Z&KP)pIunZFZeu`tQYQN~29Yh7_hIe^>vnI%WTdR9F0OIwK{S;hug0?%{qL zgTgFDEJaF0q>SWbBz!_QZVDbVR1!5(mXY)K3JvlKa1HZy5B9Q@u$1_lk)n)vu)AlN zrRYCie-kOlNQH%m`gwZ&t0oyD^IsdHG|D7&2-bffmid2$|AgHvBJVbbq|F3%BKN|8c4@~>-9{7j&pB~sF`Y#Xshqa8(-^cO{6ThHv zuh8JXK@AIc4+?iRb^X_Hm(lt^nEcB?N%P+gR(gf`MR@(M>(&1;cK_Ec{-M^D{KvPE ziOe{UjT=Kf{es-Xyao$w+uPM$dDt*ZzC=W1Xby`1-Ciol%RSU9Ec|cAV`Sw1S^7s^65atrHyhH( zw1~+3f45sk@o#f~hVkD%jkkFD`S^w_sr?h_-(>$WEEN7z^?zk-OqKaRH%Uo!y1Dvr zlc8?kyAHY z-+U~VXWm7PCZ6A?G9rJi=BB*&;I<~rq~cSQXu$$aGV+23eg9IO`EH{@ykn|qp>hMU z`5ww_x7aCkUcnOWfDN=aehX=cujE|as_8l1>D(kHlo%}D!hpIMYM3X{-BqXPkA3yr zijudS$o<<)@*`s!sMbTrTBOlsf2k0o+_?hV++$>?r#Gh`@QGWkH;Sa58$o*|4w~1! zo<^%yNpYo4+ek}?9r7E3ND=#gQXp9Q(q^T@bQ`^dxFNkR;M%UtchEI9b;F>x?#WV)@(nbiZT-c&+|zC|))A&Obe03HUM$NvE+0wj$9pgrz7^1p#j}Wn zu`X$-=;RtlJr}O{qeQCD#E`vg2^o7mgi1vmCU3W;nP00*Bms?1oQTW+;Wkg4OV2!M zqc@zV(I*EvrfJRrGP!$vQ|1YGu5>&v*!FP*v6*T?mAquh#r#!V$9V~oxosS&=nrGE zqjZ@sZ>>n!=v72>c3E?QeioOMIgUIgLe5kF9?fsIA?msZIfo0m z^q^cU{r#5+31Wlj+#jon45LWG6|*_h?`xRDCZ$BT>MpnHoH2K8jwFR?cZJL69ia+F z3iN!*aQfVPjd0QdmUKLHC#U7b$--sIbYKm`NmiOr$ z6;0RAzD_qK93`tW%t`s{_0+1vm^(e>+mRaY8Qr)IRC<4pP??pZz1}An!()^jurH@g z+6M(KOXbP5h2x2qj5nFPHHr?F8F9DGo?3hfHKscQN7Fu+Gs4hyx47Ty4axQ6se%x5 zD{j|WZJIc71^KdZBJFUHB%8e{efg)58W_i^hW95E=wAKux=atl%aoW;kOrC%Y zZ$8Mq>Ag$mN%RRsM(q_$cI@JwU7Shko&1@0abvFi_F18dNd~DCJmRueWiygYvM|d0 zK6m}H7ZcUr#l&fPlKk&?guVsSsQ0R6|Aqn?~CNvC(bmeGk54Qbpw8R`t%8TGrrgn>B|ncyj>>9w@q+_h`_ z1d>L75xZx*1BoOXH1gj_yA2Sw~@!@W(U>WC?^XqRX7-EVO%FQ$_n zj(eGrPu6o@`FDg4i>A>iha%4QiZqc-yu;-jE+e($%9;APGX!&JFQ<4Tj(L4smzlqH zC-ePjEMsXmi8~{6S6HX6NVl(jEUaHTq3N9O9TKYaSm>ykM2(i!(8YI)xa-a%Xwk3j zxs8?gECKk5we_*n~KMFHz}!%haB05Kg`LiFuK{f%G-y5s}Gt zoUFtLrdxRzxvIB?(dcgI=jv> zr54hH*bRL`&n@4%GYtv!nV~v8s5^szc^sMZ@U!cNyGymvy8$N|{^y zsf#HVW-`8}CXfdu%)(I*nfdm+IbP={vvu8MPPaxt+xE&3Es1<`CPbUMcEvDLLfxtI zXKOlIWCt;s-pD-9olBzv{mJe$T@v~yid5re?$uaA9SpKbQtUU*<(>?Ui?blwCY$Ju zF>{G^BoQi$e`WT*@1qLuw~)BZRL0!9i>p@8Vw9cNkpOKi%AFfzQZ3ZUmXa~t6aQMS zBkc-zXv9s%(nX5ixRXOtJulF~JKwmj1rD??vVkzgQ|Zyjt>lqQ1vmJ_k!E=MQMEJ~ z5*(5x2-9+7aD5)9@TraZU6Uog@!kS|WjmUA^%HYO)(3^HHoXz>C1K0dwBDr{1 zjsz6xkvaD3iR9`mq1)jsvZkbfQ;hyi=e_FZ96jC%b|w8_Tqd^&9xqDcx=TXo>7(}4 z_)iyOlA=U|JR8W2-&C;d@^ErJDU|kz2UEk?iGpRnB&na0J<%`sC86(fg>%L9>4SD1 zdj5ofxHOI@PGQ#cO@+2VGx?ez_mJO5 z8XUV0vk<9`WX5+#V=*t^1aGT7FSJU;>{R8 zyUEOxiw;C0ay|W&vw}8>>>>W;QcU*a45nJXnC9WUnxgODtY5 z?0b8ZdwO9sEk0twq!yRZXNROPZR1G7-nhps`Iyi7{{BXHMfM2a7%pez1M0bsPG@@K z-Zzf1J3xAaehOzU>Jk$8CQwM~5u{dMW7I;Wh;hMVZndjDQQ?*8V1*}lp<0wy9={~0 z8B`_7-0SAX&=jHX#2a+UlPWScndjo^E@6dvH`6zH2OX|_opBZ%A!b!^WZ{Bt0{WMj z8ruuZw=@lEQK`%+rHrD(JZ^EDhw2OV8^^4Amqcd=X9(S;Wtr3Ci@4r99fF48lAOYu zIb=gt8kaP04582On17x1oE(3AkE^V`D_HZD%}mZ5&IDx#kn*%g%+81~PNMEO z`RcDk^d`Szj66+<<&Hvbk5YrsbH4|1Ocr6>EhZ6j!EVxRD&&m5v@-D@imA@lkHQP+H<5XSQ67iP`88ct71jD#6Y zp>}QAMDx~g(vav#4J9D{0BJ zlO)mmK9j9@mvMT0i5~2^&F#GSjoJCJfg7#1gZPx)=FS(}H%kXC5xlrqDA20=V!m>7 zG}V!NCybHvqHj|a=t| zD&^GU2VsKwUgllTJMO%C6E`p6Hlvl-!i}67Pc67G=FQe&0wl*7mFST)q#%R5UVel2 zE-+`-KSpML-O%%V%26u%qM3_`^(D8gkI|q_$jojgTzOa=r@DIq&D_pXW?}@n^y~*S zcA6oH5Pc}zw{aZPov?(N!8qS!Kt>bAbItRsHxsYuFO)OFyqpg?FRw{%%wnT z{_X<3>ek8~@}j~Wk3Vu#RbuIxX#pgnN{h=jdPu(9kRvZMhZDnL8e~{b8OeW`OBW

F!ih(mp)rlshqI$6Nat*gh zSq!e0rjg?CUF6D_cck)A0ljK137cncq>l#eDHpMvX5^~T_UKaXL0Kwwo-&^va+gNk zK@GZbVmwuPXhD0Hx6m+@XQprNp*v58(raH`=_enCURS(LTlQUJ6h$s_6NKw&-~JO+ zHt43nY-$mw=6XnQjM&iHCuylSUp<>&pK?NSEP`W zPtn6~JxVBiVTxW;lyN~85oui>aH|wgR{f6Uo!?1Mf`Kgfd z!U!m->j$^@Rv^XB!KmFoXo2Er{`&+4D9m(%ds)XI@@OQsh}n==&k0d3T*}LiI*Xgt zCt#KSZ&F>dlxi;-1B~2hVk=w?MfXiXZPZBeboo>&_Nay?HoPMVV>fW&)xG3+&Q;Vi zD;CM$re6~XTZmR1)Nno4+qA?l94)Uu+IGe z3AR+?%Z?6%AXy`5R?|kYZ*TEeVLNYs!iHbBAqF3-Y~!wkn&P0|BYab8iOUtM!1vA) zcsYqugTW}gV5I>0!kZ9w6G7{9C5((rr=#{6Kx5?`Jkc?RIBrkn^445{j{_IOvCLz7GcEZSc|pI+}FKKnijk1E`!#EC_{xlS~< z(-#BwPJ@?`i|Lo~jwDv1j_F<1$PE;QVb;+V7!;T$OrP(7=VoS-ZF8p~d(M%`{PLSF z@@vD=FB_Rtq8o6up%#AJD2Y?{k3x0#Wq3L52DjFE6m|VN6P@Rd;LkdG;-AVTRC8x4 zx(?m%apX0|yQN5AZKp!_lys6EpQF&aWjNY7oxu6O5~vbC52xif3$3j4`CnRnboloW zShf5WM2sca_Jsqn7X`e;-ArgQos3dv`Y@2QWXE=n;A7-Q@k+bY;9*ENtU9us*J+mE zueapl4>c9oWuc0_g<|ZZ;#*NO5YeQfz}Cl*6X{6s8Z7V~fJ%!ks5;cTkJ2-a@nLVmDCjC|IBHK|CqHA>M8!bPm)*nOtB5G-^p8?lp5dahRWRjbE`_NK} zfzp}tLDOIdq*>(?@yl_TH_%VnK_TP)mnRw23CCq#3UK$-R$}b$j*Ge`V|!K|5t|i8 zQ~dgf=;TE1(JUuAuJxjz`PONAKW!s3XN&^wI(3guyyJy`U6bN1YfRAK^hwh9(N>Tv zD~)LAj~_3Mp>}pZ>5TI)skfVetobtnbzHrfW`#>6wZDf(@A=Lg&)rFrQ!J@-B+_kd zRrK?0X|APJ3L|1)(e;&$LpB0*F}_ECD?USoCvo_xwVl>HDI^_dMq}76PYnJkPo^wQ zp{tzBxsTrxn1-eaL@`~2tW`Zs-1VjL&bF0&$!LZ(w3TI@2TI|IK`s`#-D1Lh)gflE zo!l?DNB3Bc;=?R5aaW=VOuW7uRL1GCADowwEgi|U+-g7<*+pQ)zBD>8hC1-@X7 zdC9@A@FFt@uB(;Nug%+$8({^Ja_{MbVk15^qkxW?TMH$5&6t$G1j{F%M%yZN*k&Qd z##{b@9JgMOShk2hkK2S!hd2l~b43y3z3kWY`*2z%o?JX-zy}*C6BS`B+jOM~#=gGI zRC$-eski>b%6$|+>ApA^?Q+NF6FwxoEd@_SUE^jh+<@C>gwU5gU0lo4L2~SMG^*|I zq#N&*kO#w$qnmUVwKi5}4{X)N(N3xK+oM5xt+)}bt85{!K#_M?>4#ee=R!)CExvqE z1+!RlJhWF8>rzr_^WY;K6}1}rG}wdwXT*W& zi>FI($iY&#VeEs}Y%Deq(2p`46UXMjkH^WlH2F4;Irt8jS871C*f-*m{FqUDf1fU$ zmq&YB`cN!fk;K%^=NhZu65*(?WUZVcKkM2oaQRw{r|p_>ia``iZCKBg^n@_yEIrYA zasv5XcMx93EAV5ADJc6Lr>_{~t3=e#yh!}9`E9d`ov%zQzYh3NBN&*`zB zF@XpJrEr!-F=h%i*|NtEX@;W~pD=bRc4bV0#>gaQ)cYbTzfKmDc4*x0ScVD6VuD4pXm84u3T=M9GMg{6Kg>G)~mNz1(~8YDfNbEuGI{nd}bqj5-vrM~B` zJ#UBIM#EVQ8}fffBp5$D0B;xQk{%^RT&EEOJ6!YW`}C8jEZB=uB~I|Ut_@$GTaL5t zDB`u*KTzb;0fbAhP^MiC?^`moHRdN#xj7G`{z?GH@Ribzd!AMGMd zup&zbod42;n`h=@?AT83X>J)tH5WOHAGk(CN(#$AYxky z+5f7S91hk7{Y720u&#}?J$g#iVkB_Iq5W{cFPJ#J{KdSn-U!l*yXe|02dJaEEPVNR zmD7?1ICChL*o3$etDFfq`f>*Y1hcrT1YCn1YT9cH1bYTR-Ekw~ujO>xUNVWZkp`q{~rB27N$l@mk#hu2@ zO6SNzT!vSL)_lssRzkjHfx;C52}<7pN8j2(>QqDa&WGWw`SBOjdhI9Zbj_hxQli;u z%Tw7qGriCOv+=|FYxoN^@R#%une|(O*DJXOaUqYWw8n2D{xTA}JI}+Fk71bXDbH)! zG;k6s7tw+k!NIPlWb672er0D2t_t17-!TH1Qm%~Tq5?m6&J(nAjptL}ro*(vDe!WH zI;@O*N)2*v!V5zy7<;)DqynR$($@%e>q?lb&W&4WK9F3sjf<>2p$i%_;aALce5qkaQv)OgzuJh3bp z9_>)U5%+*Q?iNE0P6gxcu4w%2twXw-UC9KQg=EdF$1r)&j@vOrGcCN@4gE=V;4d%3 zevLaq_TM-`EcqxHXq*hcBTJxxEhIMfVZyPi=Yit+Aeh;p3E8fTiT2DLWMNJj9(Rew zmCf_e>7E^ymvssbv}%(-FJEyxX4?Qb=HnHmWYFq(MA(FhV3pwnD?i83yJmA?bV?Bk z!X2dYW-E8kCkn3R&}EKS$@KI0$(1oykgQ?`NjK7g9*_a02~nhCdK8?Ba)Q(39>%&{&YPuo@y_$ge(-~aU76lPcWa;4DBixc-QaD#?Hf&8y z!Zz!Nj9cUv`p)4D%@Xw{10uEf=lB5XsJ4*DyaN9IQUZT|z9Em+jsUNiCPDJqbbPg3 z7tfBI4lA7|!mIqzxV2uCbR>vk@rz@0^T2W{J=jbOI*ToQTN6=rqB%)eS%`}F#mMEg zZ_ue$niE-C(G>qA8ZCPMkU~3AjFc~-;jf%v!ZKw+g3T)IvhT(}26E(j#3^|7ECh#5 zRI_N@vU_NcCk6o_`@vx9HMq5_lSU_tW5>WbcI=0teS=>URUAJJH!gdEF4rYlW~B%! z(+a$!sK~E%mPYfaDP(Q78XmGK2fu~kSUcf6{p-*hvbysWNRF$4%b8Dc{{(Z?yD7=8 z`IN&}9U9J;T{;L7=5838cb3Mk9w0?Aj{JmeBjB=XJo208LTH&SGvd7+UlOFp-}7`q z?R#ycqy8J?rRm3a_$NWvzMJq&!jBo6Y|z}Ak9XGg!_5t`SmxA%eNScCB>^J*s6Yis z?=-`RC{QQ*w1{)4 z+>7$PP2`l`D>8P88}!8g#2NOZ@Y(te=%Oz!IBS!GJ`&fsPr?)`XiugI_D3*Gr2x;I zJj2~zYKHnc`FJt89>=pKs9S#z4U8sZt@&7xty+M~7cRj1)=qNh$_%Vcbb*4rY_xiR z22?FY;HPFeQ#Y~>!=fKS>>UyE%IF!RyxRthFYQ4q`JtH1VLjQ~BF8^49|e+Wh4^hk z0jiJGg`L?!cx=QboNr2L;;Vkh(mz1%UN*!nfo{0Br3Y)Dog^bo&d>(sd^#^u6W_cK z0`uufy5(S9U+a6gIjtM8$kS_V`ad*PI=uzk<6on{{9@jvZ!4zF%;sec;z8KljVJHkfv9vdzFc-EM$fN<0~hRg zJgvvdOKY(Anqr8SQw%p|1ch&HQ&DQ-Y*uLCi^Llj{?O(2GvLz3Zc@BG6|!Ov5(qBj@_tBxqO_B^Wb7qZFWH&JSw*JQfXKTA;(UV7QQSli7UxExfROO1G>t1?d7sFr!_9^p3T_ zs^(IaM~l!neheF0 zmljJp;dn268ypFr=ieu6>N{@o;Xtrl)(>ANN^vDJwoRFD6tGk24vh^wO5QA~!$64( zH1tCZvhg*f?~(`It-@jZcOTp{V;Da>>@xnURK^t*8<@gM!gt6U!pfJnaAk!U>A-Wu z=(95Z9DWds)Qg#%yJBGH9Z#iS7Yl!%+=RQ$eDT|KMfmIfTxxtaA4g4+$8xU|=vJ)- ztAoE&I8jT5Ehq4Pbu@^EoFVqNo`Ij*0KR%2%1Ya7vu*OC;8(C2N3PjN+duJO1kHfY;FIxH05#LX%BwaCz%*u*TDD|Id5h6Syn556U3kE zCZ>&pG*?9mcO{gW&zq&nwjZn|GSlbcnARE8%>~11sdoNh(mtw^9N6EF4YTE;$(aFH`8c{VX%@y<&!f>|8@T-E;^gr34060X zk-jJ@C4L^>*lLiB7BO4MP04KBW)MsBRh~mjr7pJKiz8F_8DPM%Rp^}lf(FRez(u!w zLUq%a^J0yd5t;{q8bVMpQ3azv&eX8@Fx}_fhBt>eDCMU$WQyMiU@Ju-y)6)W`%R$X z=_zu#{s8p6SEH4>6Hxm26f!1z1E}tJMWWeua`=HXKK~sLmLwWxz4-#)do0N0kVi!8 z>sk0QAj-hxRWWM3a`86wz6 z_yy6fvSm2pN(TE2@AC2M_CWXevFzmK-E`yXQEblORMz3(EdIR6On&%N7uJ={WzA}g zS+&_B@Nn}LSYuVnyQbfQl&j+W{-7j&e^wXnKW@O@yrWD$9vTgiV`BOKfPHvz^)BdZ ze@6{0)M&8d2m1SG14ec&;B-t{H5qn@@eu{U=kHTIxLxbPB7pfFI~Y(FU-X&??l+0 z-(OS3x7}!PixTJ5YTmUs8qQu*pVr1n8Nzjz*llg(Cx$!Q@LY=2Eu zX8eZKZ%@Ey!6)0bcQ&9 zZ~N$FGdGyMViTr!Mlo;KP2`6O`@rRb9-AJT2-gi|h(+WR2zQEPdsk({r#5R4ndO95 z_X=TB_c(Tb-zFGQwuxq%-ol9&6Crt7AK{)vfWeFlxRsj;?_NdV(R&0sZXbn4m!IfX zoB?j@3NS(M7%tU`r{3CcNug~p_7zEjLxly7soMkZoz3xKjSqboe_hxYa~2b$LeU`E z0^Bv~;rl}k-l|-b_j8}fRNehRyClvKwW?g4-YCsh=${p4->!rMH|pU_c`crilmLml zW7x|RZd2LHY+|1=0BrPVdbxHV)pk6DmLnsn9CHxX&%H=9ZjYhotR>ipE&6=%%cFF% zPzwT&B#@8NaU`?mF0qsDfSEm)xJ$lMHIB`pQjL72L zGP3)VBi^rC4;nJV+5R_*VEkq-vt49}Q*1Sb-?vY}R5=k|ZnrExE0mQ zltf37CbGKtYUzdKBZcBJ#UQ{O#dq?JD04lgJEb4_q=%|b}n zss~#7Nf5dB6l7iy5sdnIg^S%f8n&5TCDD)8!;^~w6rH6A;`sy`Q)=MH;^WM&bsaRk zY!tn{Xe#y2&n43(l^6wCTNK9cV)ldpu2Uxl@Br_L~B>z(c7 zOq?moet$+z_*4-ICw-`@&*Z1CH^Z~dNkn3LB3CizA(6Kd!varf-iNtHZ-tJ;gq2Q^ ztQKkRe6yN_ro2SE!S%34QXO>jvOscPKWLSR;h&H;h*E7Jor zVm-+1G~i1O!r@?DF;%#{6dzolK&19aF*1)UVVY(?$yjiQbZirV>Y5bJzcCxHtu!IW z^~d1Hn~6-)z-maH7fm*-%EX2DmV?aR4cH-em9#&Mqlwyp^CcMiaeOZQ_9Bg!d0$9g zMOk9s;#63&axcFBwg6?| z5GSDbE({&-KE=AXqM-WL8B3n#;{IRjNuq5IG+#YMRFAAiqugrrj9o;J?DWIeo)-Ag zGLfGr^#`j(LqXkRE!}!>AL@r6Lt)npfkMX>5V^5~Udd1)bJ#-EDxZhy-^!WSvLC49 zNFmelSO!aFHxti_mFRrhg#^zN1)~`k5A!4cLqVMz<@QX zje!$UN~m^fvZG3yw=@Kl(V$?`nWdo=gRdfqH-hmGL%2JG=U{*TBAOnXL#FNbr&Z6q@ltgasHaUC>K|<~ zzN;7WcS-O+SJab{hm`m?zwhFM=UZ^W$ro_@ktDmIbQ#$mc}cK;X$JSn`vG>m9giyy zJ)xR21-#OyOjv#37O;akaJxU7RC|f=8{c)K`T1_T$XE%L%a7r{e2&qIH-?%yj##^; z0XD4rOt*S1;nOZoz;`a=*tKE`tZdaRwxUs=1(jkbVx`%8dkm=3^j+jV>f$>kX}&4h zhBkV#&@G$6b**ft^6A$Y_aa3&XS0k=dl1FvS52h{Qm!z;`=+ze?<`sAi?RHr;z|4+ zxm2_sH=KW%+f2_r-48_%eAtgO^x3)PrjUJ7m4D)M5ge~AhNHQAaKvJ55Vt>v?w9NF z`uSRro~^^0-#%mE+)A0H%oKjawOyclG!ey;95FT1fju*Llb@1N%Qp^~@~tOhQCOqE zev{Zf6vAt<2^onX-7mp*pOb}+J)dw#!v*~MWh1N}Xh6|oN%nw@F~8pB0KR$IfI0`a zk@R=kuzZa)f8KHtEEbhyWnU}t)eY)wr`JY)jhHU>=ttvC_BJOwIG5d+?+E*+E@!P0 zy0CtqHVLiEfGw8`$>-6w?DDHU_-E!mnEG0Qjc(CIKOcsjZJY}c)po3kEC%`Xd)byP|;X|zk78v_zaDkvrAS%;OY&q zEVl&tH)(v9mnnvOr{mUZKk&zzRQwjL#@$&5<71kS{WwewJCDJPOd&|F&VZ7&@pwl( z9xqp)LT&x`^q`6=+;JgHlJhN6`?wmtc2Jz%HG++=Is@Ui1$>3lF8IO6@XNH+apsZ( z^iZ593il77IXbdi+q~H~aoX(GwX)no}EB@V4}5-<)KxHhj5R6go4lfG}@)Gn=utHouoC8V6J3Ecyq zh7XXIj~k#bb}WADkb~#Zh2-MUp=E_l33U0rBqqpwHhxVXNlf|IkZaw@;uv_`#k;XaEWw~da7PAk6w(wg1Mii z&~N1~8lbfaI?q4Cm+cJdfB8e)UzwryZ9Dpc-G$oMFXN>3mvR0O7m_~hDSbNs9F_Jg zM4e7RJ+(zRv!;Qb*-?oDhj*g2djY&#&0>@EGfs86fZts)jpeNwwybtOD4RW_slz*= zs^&6IwB81W7ss*34!yt&uNvW0(h1huO^Bs4g4xD#6b9sG@|Tnj^4-0LZ2Gl6Y?|3b zYrz&hwtN>X*R3Ig>2smWsfVbv4+xWry|^wD8{X)&0Qz?r;tU5RwmI-4INW}TyN~R` zhg;=@Wuvp{y4WGkLEVtw7O;RnK4S-OGW`!c%6Q1nG8@a^_w#_{X?f(K?_!Bpk8P-|5wln3bI ztao{Iq1tpt{LRpR=SSt)W0MTnfOSD6*P#_}eCdN+vtg`<>0W*rA3d~(sv$Nup+sJj zXP*}su}=I_*s$vWK2H5a5;bGV787HfX`F}c!=h2TI~3%Sqxlf6EIe9z8y45Zpwse; z7Ivc+usQRN;nVS1G{;w)t?G8;V>c zr;~@EtbC9fRs(b9Dq>_-J-xp7J2kG6gJK_Pm=-slCRaV?cD?YWF5w}lonVW)Ekk_j z;5$av(i~fdl9sfty`~H9KVtNKMalB_`)GEwK6&;gl>Cf)OT+v$;NArWGJh6Qi92Vw z%Y_+afzBLEIv+!)Mkvzi?;2FjQx+!*v~kPX1WIbo(zQoxnfb#GQS=s~vU3xrL|#L< z{f(|l$+Vb1WdSmZu~@66hH>@dxB$y5T(+||lWA*;3%w`dj}O^2>5(eT%%8@FTBt+n z<1l#E5sFs(eQ}jXJ058GjXm!x@cckAynijq20yAtz5BCpwDlJ(FRH+8?yqRD#0PRI z@e+Ui^Jw_9Pzsa!zmYF`r9@+|0-pA3fvdNIh)wDkIQK&o?7TGK{9z%OnaJQalZRZn z+X%FolY>*qIXeA7Im&K2#67-&@Sv%Jbc&@A(ila!(`qR4-Glvfc0K=S=Lh(tu#&v8 ztEA&@ZYGiqS+F&t99=@&$YL2?40o7D24uHmgUuLruu%#MW7?tXBk%(C6cp1vjANRU zVAy~-E3(jvceyo=|0M{;akCD>s;|SS$=fY(?(`8}*M1Mo8a*G1g6}h#va)!xErZ;= z^pnh*v4>7FiUzN`4|K!@K*>mNh}<{<%zikcrl~JH%`)N#G%gW(e7SJn8yWERTnx*% zWMc11X?{wfHNT)j5+2X{i(YX%0phdsarOjR-eIQ>e}2?slKN~EJ6riOi4FQiMBguf zSfx0633VvVG)l3L^6Xlpy2k7&{5S2!u-gra@uVgCs2 zLe@}vtc+^?%E0yBBG6eMOZN7>gPF4R%;(AZEq+0I_!AS%QV2F z4;~EUk=3T<%+J6iy6|8CdiqQA3x?$pJ6C@wEZIT#s^)=Z`9i)otDKI{DI`5U)@)wu zTUx`7fWorTs4VLMZ4oYH>LLnt^4s|1?=xtHz7(sKCW5AiHlyWBZ z$k^33Ait>^Y)F2+Ic}B9nT;mdxuF{{A0H@uXjnRF%)Ko$meU6R9 z!fY*MA4y`qjw2mZa-yvEeH8k8LV}VP-VQtprDo${K}iJ6IC+TZDlH`S_oHAaIR>&6 zK4ak3vGBI-0g(+J0}8Pt_+Ir1yvwBV?7~q}yia8uefoX{>ALj;3ii6-UVBgIFmvSc z|14z#7{EQriH8GpfS$`h8FKsNF zd5Z(1f}b=rkjKpO?_ltSLC>rD?4x*Fa-wrAgsoyAgUNxA&*Ne3-I2KSug6sVmkmts z?GO$zzpzO41YWswo4kl?gt$g+I8p2c6W(V*9G3tl8xPaSr{kDgoD}==N)INvJVwP~ zqxkZ7y1aDoZU~W|j9rN`7-K(3O6qQcWRM~1+P@x3&>P^seDnr>i~>Du@)Tv3NbvJ* zN76sq>KL+J!2Z!uW&KxMz|-h_IAWMVw)NPt1(sV`tKJIglio)!%jMJ0rX3Lf^DU$C zH64s+X~1;k&)096!xCtq~^asCbldo=b#q5lIeFs~mcY&t~^cGN-UD{(%3P?lad zR0pwnA{-aA7cy-0_#&57T=cjW*2(N*ymlkK_2xd#*L;I~_8S@;e1=X^=oG|ON5KO( zQ|jGMQOWKW${d{vz2j@?+5KU7@0&PCR+;0XvMOpd)(kG2pCL0+Iam~si~HukCmIP- ze4xoI2(Ek!M?dpm>5&f=LkU7H339MkN}f$wdKjNb1jCbJW9s2iL2Sh*KyS_){I$dq zuO=$dZS8T`I60X6)bvvDR96R*#e1NBViR$(+C~@nD!@}IQCN1MmFYG;LS^oV;1;8L zB60Qtxju^_^A&tx)%;>2(Lc!e?X@MHAuML?Z>Q&C_tG~(TXBZjDxA7U0KVT&(kt8z z8u_Au8E$igJ|E(PsxF;lrd0USMprpFHk2`m>tG=5LE(ULCI> z65-=uO?otO@6-l`ajY}Y2vvnZLzl>Vi0q{-G*&-@{$DGQU*X1F zZxJUk<269_h6fHO|wMwM`qgeNn~HV02>vCaQ-mEtM<>RmGCE57NY=% zTs6>KHxkBXlJoHZtM2O`X?Sl&MFc!RWiv%@0D@vLRThXb~_YTl{5EK_mYiwX2YjgU)W(S0(OUaQr)3L{~xZtG!V+~{o7g; zg|aW9B8iB?+~=H;REQR&XfKh{B9%5}&z6)d5rrfaEhuwe=SC0=9-t?p7O@*2+=zF5 zB|H2|f~nt}NIt|rrpgyepu6q{zT)LX+Jb+RjeoPz*X!fxQ>7U4SwX^8FLYuixRF}c z)UYexqsWlFVDShN^|0}YTzeKZR7L$$7ce3_HD~Rmb9eCHMtEArX3I1I+k&QPs6c4HTL>}nh zCF27X$;KDqdy8CXPH$iRuBm)L(&YwsT~M zD|i}rB$uu6Nu!55-_ZA_TIk_MH@03QlFfT&fxgCx$Wp0QEFxZ=IG<4`YVnHV1^MYB zAFl;W`A$8X-(*W<+DDR~4Gu1x^Aj)sctEo_~i&b)GZ?}3_L~}m#$|&%Us#q-CM|=%0XuJ zS(-$8ekEt*(&>W-8|a%feThlTRNSPPPDXZrLN}r(kvW-+{a#r{V*8@e_vg!n?1-%} z{)`^p>*Fo1FkMB=`l7{Cre)wM=cAw9fc4qJ#X(y$C! z4Bde#Alcy#B&U5TJCfQ+{12&esYmUF(?yJItO;d%>tktG=2BuaX|5Rc5n*z@ zDznY<5icCZv)3zbksPmG+*5(F(7Rt&xZ$`KiHQGA_EcxEq-Dmez-9`0t}gkny^5f> ztM;JLS470@-U;%T9mhipT5#d*40b+Z1sNAUifI@d^B2+%k>+(#+D53C##a-39)RXPY=_RQDx@zGg+k>N*d-26(9Im z$i~|DF&)#JWczC&v3rL}>E@?I;b$mYFsq0uHxIDNE02h>c_eW-4@6gf9IK8RB-!Ov z$oTDNVtVErtGQ$*{JFJ(mM-YT`YBHELV_PX2#;iAw?4ocNMdGld`#^%4oEQAU$}CY zq4-``C2MaPC4OXRCiY(Sml$;wvkhivg$lhAeSyabx?!Fd+a41kEc~D)9N}vql=-ul zndi3AAIIkj)0$3_!sTg1fAj!LsQgD43^uZWmq*yE^>^^V3KLd;VL3l?;yfr?-H87T z@kVlfFWB@q2bjkMWAW$AD52TT-6UtHJhO5VGWh{DQp+Kc$NH(nCCg3NGh`0&Rz1&h zzV_2LB>mtW@1P(zKkUcJCe|B9~JsSEsBM%v1Fc8WYJAC9;_bR!y;)vTD2pE z&Mz;*hVoVD#jnTI3)0bBB}(rnOL$GhaOQ`*NHrxB89mTud!ohkpf001pX+GJ^ENv9 zeJy=$auCZnB9aHA*bRXU;XXgaL-^l#=emm|q9hxC>pP9lg&e}Jul31wy8`S#$%-%b?nv5#cF!A0>~u!pQDf)OmkX_dUhe_-X*2QQ zmqS>~pas?+zK^Zi)Yz=7FIaA84p^Vxhd(GJ6YcwTczPG6W|DpA(l7(k{LmC1k2r># z-Rx;(wGuJU-$FK5KgJ&=npWNQc6j3ZX=s<%3A{)BHxgYlp|6yFz^9Z#SnE2PzKWep zKULc@+ItP{_6f$@R>_bXf{%FX!bZ56lg}vyh_UdzCOIN?4Zm6#0g1sjSo8O9Tw7~E zoQsZQ&4dP;IojQB%u;)N1I6O$2OjY^Hn>pt=!N+8hVjJU=})w7*?WYR2jEWOWC^BI z!g-bQ*!xflPR=pmGt+<*zM(6`u)vL5;ORg9Ncq}NbK&yU|nh9 z_(oS&n&?f^4{~HhYcrWTpiN}wyR)G>mAL2WZak~&1u-_BM#gF%B)OtSlKY?s=jl%( z7c_G5Ax|Ze6CX&qbH|xp-c~kx`#Mz0&nM4DZKty;B|$6qG)S=U1Svlji%yNnqz<)f zP>#|pB9rl4G&gfQ*S2miZo4T*l0Tl|b>_LE;Z_c0M5+mWutkDjZ<~nk*dJu6yB+Wr zB~7OC-#u1H)-!lhPvv}tV$F>$ELI^4?>+LCb39?mvNoPTubNNMfAwcr+*}_*+JLSt zmlkVZn?|#*D@$&Nk$CdXa2!eZvfaJk=xB@E%qss8OWRS$@*1T{&nQ{3rMn)PG8iR( zytspG+V+T@o+cuq+HP#2y9n+l-vs!+sXq3kz0hk^8Z| zl>KaH4~GW|Yxh`sJ)O7M$6Op(kLclt>Y)j7QI z$LCT~{MbhrsO%{$+ovHc+VKrTpA(Ch_;ybHXH9DN>4*=vUL%Int`N5L71{krC_HKv z#(Yc{2&KJq@zuTe*sW@1aoEpvCV&1I`!r{!Sgl>e8a?ya^`5; zv~0Jy^GhL%*?N}o_B+VcEQZsKhLP(x$CKyIfF*<`QQ5PA^g0w0)g(n`QQUxghP#qg zP9uoVohg!F4mINU?l~E_=Zo)+@+C`b(n)A%C~3KI8h32jP9|ra1lP?eWMdxVJnZ|q zp4IZCV^}(sejZC5$BiYy)@SjG<_9ok-)Oc`CmIi=9Uv{5&aD3Ic6L2Hj4CauV41JO z>DFlhT%Xeq_!&8mnJ8~&Q|jlUe<8N4acojVp^6bsbTXY*8boD>zOx=C03mS zH~&Odl0TcRF>qlIe#GKn{|`)RWe8TD70z7jIaYSd9rCmj7w+pJR9>rNSQe?l63Uv(cQ`)QM>nJ=hx;s7=M(e0poax&Q; zTZE;QJCL-$vBRXqBR+zQ{7ls;K|$2#Kdq%q(7=Vft0G#GhL;vHaK>*tAT-56xM@ z^d($qmDC{oU6sg8qDQbXSN^hs3K9FH7tErr6*Gr@Hq5d|$P5=n;rS}V=_1i+y1n-* zQE+@khGmQ*^WO##*_YNt)gTI-ri~OD&W;t?b-W~Z6I#gPPYv{zO%!STdIH})9YrSw zC<@oT4wM9QDU!)8dc^C65sTa7K-MigPu4xyP5vF$A>31Aye4rH^%`-Q)m7c5-I3R5 z=#oF!JMkmd`w_)z{ebQiONk}09Gl%HVuh~~*b(y-+>xh`zuv#dLuMttD4azi@0GF@ zrD@F1&y5LX@)`P4kGJY>CRg4IiKlHb7A&96^5OUwRtP7W8BiW!X{8D&`B#Tz7*epH0G-lBZHj#SK_jJ{Dg*6N6Unol4(Y zhLBq}l905oBS=fWKQl{POuu{0U@cYS@mu~F>p!YPx_0jZRn<+{qUbChrci}X7$}h; z9hBv*Rg8~DkSt;3tDs_4M%P`LO!fL!S36rQWJMMvU7F{uB-MX zHqB)$BxDrvwY`NgnI`_UV>ioPpG21Xhma3{V_ANhtXOTPg?Ofa717_LM}0mKa#pyO z1e+bey5$nS$a*ZEtrEk0p7fB3TeVqwTLIawJC0m4mS%hGdx@OCI=ytl0&f;R#u2dh>zHQsLze55!3Ji|Wpf%K6ay?GhOtaT;}V~*?7?^h2o1^p0V{;0`ZkV+~O|2>JFwT#K;6FNjbBZay< zFUF6Ty(VRsr;CNzoOl{1MU)%th{xLya_*Wxc|5-fYwivw-#3eK(U5WEr^W)DCw-b+ zo>xIOQ&%*fw`VyXmSp17$AoCEp;@X5!fcl(^xw}YsM%6NGB+v-MVFG;-1B8*_!k#4 zZo(NnHEka{>lsVlhuRj zG~K(-hUG2pqHMJj7G3#)jWy=bh*VFu2TDIYDNT0=C6s^u~w+T?*X*W2L0 z(^{psEVRovuWbP)eK|}3HEB|*R2ep4~mNDdSyx%OMRrZVdb<@Wd{W>A1eMf zNIy@EV#m3q)XVZXj0-)$47-jq0rK1NT>e~u1X z52t#{axCzD74?o5;tF#w8qiUV?gl@jL1lZX(!nlK82ZKa2ivgR1c~kYDDRWKWA8)A*!|J9B^Hohv=br|UOp-Uc0H zho9lP;bBWz?fz;GH*sZ8g!gH13UD*aiX11YZ=T*m>Y`vJvx)o$g z=RcZfZogFxO0OJQ%sE!@3~)1&gQGq*v)grgC>Q- zu84=k{&2k5XUsgINn5Dcy{4GtB<~PDUD(T7bG*efEpD)QGhR5gI9WX5#2`N0FiYH{ zbdP;ss>IB0nhR~ttrl-tK98F8#jz_pvdQB9Bjk7E3HojJ3-P9SYq4zRH}1s4*+T!g zp^mmc9K}`4OsEm8M2=Y_uJUHe$p1boQe3)<&U%FR@uT0uT`vQ+Lv?+p(K2n8# zZ63|o)-skD9|xwozU-c8IvFO@j_-wqvkP@2n2P2{3Fm$dKkOFc;R$O=WU39_;ugs6 zl;&W!xzm_x?|#}f`UqQDu?%aKTas6ai7af0CCit1>&)MGvpqhN?0}jn?1N-}xnG*e z1}|M@<>!Xb=;!wAYuzQbaBVA-8lp&>`YovWmUn1W{s?yOk1Y#nabfz4XR%VVi6rcc zCws4*Nqr92;ER#_$oQGb#CDoG$y;tsf=`LqyoQfh)$=@EF>)p;Inh801Fz$WA3kBf z?Y-pE-V3aKS|RCl%_hR7``MLG1z00DlB7pSvMY@Ck;Pk!u*-^Jj*8w>g)?`YBBrYk zh*Qsrg*Vi*#S!;Ui{t2)t0xn(#CCQUh0VrUWIU&E^^w9GVPkKkSnE!QSi7N7ct$=M zKmHpnR+=+hNPeCb9zFd+s2?_$6;Jv|BF2vtdeoE>)$$|auajj^Mo_Gw);B&8VbZ&~+zgjG$@j91_RL>xqT_+>tAzU$)Er0b^LHBoDiB`GT_isGZ8jJ3(v!CTJ4LssJfH^C7vKZ4XTylHt!$*u8_*u{ z9IqTcoMijlq)$R*g;g2?Qe38u6>MtBdCC8)X2mn=UcHJmY_?(Mw=>CB<7i4R%aR)@ zm#AJ(8hvc2LFVY5W{U^!!`d%L@zLMU>AeI)+TFZ|=3kCOR?nkJ{l1$xo{wVtg?Gre zMdwJBLFd zO?!$rS39%RP7P-BeGFOfE{VO=G=yz`4atkYUSvz@BvP(#Nb1$E;gpYU_`LTw;;uPG zc)uWt^lUJu4|5XPFoSgbb3-(XgkQ2zGLxW`H-O8!j^sfj~+Cy zf@2Ssu%4U2#O&1>oP8vK;_B(lRyLWf_wqvf9AD$2$5)v7Kq2cmBGDG=F}C8=El_JY zz}ViMY=y5XJA)M2zN3=Nfq`T6<+kx`?sYwO_V{*48)t*dH0AK&-IiohPdE;4(j{ci zXR6mGMe06ekUP!BxHY~Hztc3tw_9xK@}Wt1Z-N|?Y0F|E_ao>ei8ethBN@lPXvJO< zP9ms)()HSI?NIGq#f8Z(Ak&ttmSv z^@c9Ycuc#RHe%_iKFoCMNvw6znO?cRksWVv5=|&wL#zh~RjFRczPM?y#SKTe9w@H88Qlv|9>*p6t@VOd-0!%Z)u3o?8P}^Bg`jLnobMl^V`{C zlQg0D!e`Q879-BQ9w%(pY7l=2ju-#<{(-m!#RwN~_r)s$YQ;G%rotEV&Wfj7_=!iK zBEq$@zQVTg2gL=%RD2=!vUtvQ7ok~}vG8)Eqj2uZrNSx`E8#D}0y1L5336zMlrSeP zN<7KVO(^aw6gn1{3HMz+D(+5j6QVWc;;W0p#QXZn#2f^ReU5Dvng{GL$!>q@# z&z0B3c|%tS?^it(w?-`yXS8XuTfPdyPcw3b;j<43H=E@Mo3k{9GCWU`8*+q2st?6Z zOHQ$UYh#3Yt8S5ZEL&_actJQZH$qX89QMELu>Yky|3ArLwr&v-ivK-)%KOTk;$vo{ z@b=GQxvZ@-kgDxA!3)^KS3TGdmYg0~kB{b___!hKopbovEgdK;*a%P4i$Sy3Y=-sY z4{&>;4?((mBOlqRLycBEfS2yN5PD=S+{$?mCx6YL4_Af3?ziuN-MkKQuYFM8l!Fjv zz674Bj^_(htRU=)33L}uN9W3)amyYoK#j|@(cK+NXx^@+=ziK$sXYudfUbClsNL2~N}^_$)Xl>tnWEpN?o4PiM=V<2Q_G;`R(I;fB0F#?hx% zV6}O*NM}?yYTq>+4vMJgwbCno*!ftv8hF!G~BZMP`vQ z$ms8U)OPR+k{RI%XY#vIi0>~!UVb81m~>BYCa4Zwi%>wSJHyeIBRV3B=*LL5?ku-K zoF-BeNFl9}TD*)Y2fTp;9JU?~0nXN-+zT` zGpOYEgumsxdZIv0^C*m=8)#5{0^M|664M;MmTJiyr9V=Vs7dce`mJaQRZrSXH7rh` zzr*$MW{rL5RaYo=%RdE;|8${YXe`QHk;XsZU2)z~0sQ$l0sZ`y3X`uT@+<1ka{2CA zFwUeJb*$=xCH}o4fyPq!Rh5A37v4j$m#)EsH*>gu!3c#aE5MKrX;kS|$_2gAqMd#+ z4hjLqTxp*P)SZ^18NKFk=dCe}j?{<8wv(aaix~=Pa>gZf$IvD7Ph9MXpMoO33k4J! z;y+1cNaw^$lsI!FElwZgzV^j(QxEL`S7lY)_YQc)-KA*UPAR<3XgW#{w-c=@PXZXK z$G=)Nmdgy2gGBGABIDOaaAdbE=XvR(gTsw>Zj6EoI@hO6^)3uSJ76;k)zsjpAHy)j zE1R>}CCfD~uLAAwKCrv(DOB-ez~b-+^d%&aOPBJ72H9wE4E3Z+<)dlrx*^D8e>Z%d z_nMbq`4K6nJ>t&msnGTrmT+SGW7ufY$Ol}G*ap zv}VCf{-|v?|4}mta$X#QyQd@gNtaK9w^|MS*&l|+n@_<0uUa|3{U_mX=p344Jqi|W zorX@^o8#X>0JYv-sJ?PLy;Kh{Ds2d4T{_QAo;MDc-f)0T4yx!rUJD^kpP;;}lb3Qn z%ylWh19_h<=!&Kswd(QX^xZGO-;x|Iu|E~5X9U3GUR^ZVvz@!~V;ksgsD-yv#Qew# zFDx?E#wmXfA&cxdeERQ6bkt!syxmv`xzmoLh#nCf7-NFf#wpMzt$tM3$^{L*zXQ)m z{Ds!;JPI;@C(w=&pTbRj~AtQ^$DD`f- z!1bg#f5%t}={jh`t_&|^f4zfGjNeGr209SUnE}9WLKqJ5(-f%0!z z8f7Mtb!@1IK%+*Gl3tIWSj@sPt%~&E?Xv)+DD9B|p?#`Tnt%fV2k=>JV@v7g*ILQXf zuKtE>Wu@_jv9joeF27;+0VPLo&beK6o;!8U;j5~#@f2iX% zRc)8YTNR;CsePzzdY5DlYT&tD+UV52WVEZo2VZtH#-DO!>0`M}F2|u3-N{UVi%y5& z^rR7V*+Xl5PWmsZI@izts#3;(+U_9o;wnn;8H*3^e+)~HRX{*1;jSxh2uk;VekD~NPW#};iodLOER zdo~Wo)^C5K#xK37a`|ZNeQLEN)k_(7h1{07qu=x6jvM2;eYZef?F{wGPk`xrC9`zL zbUJKpG@ts|2YMbngZlNZkhSQx#09GcG7Ij)pqvpa|v6s}qK@ z1hbLa5EDqbI31T~`18{(Hc^kZ+ek$-6g8)3OXmF`|8?9kbl-WEVCC0du&wC_-APjP zLtPtG#H-RS#c$xcb1$D2mE z78Li-M0c94`0jN|upuCe_fpsnX1JQ~kjX-OEoX4010}rW>QkbkeGa_Fr2;N4ZKp#w zX%kFr9OQply@JH)e;|8(K7ZdS8*HC#<-ecC5OE-h?})n1C+>QHR9?CuKRqG({JD&q z@ek0TedmXJa>O(p!!coPN0{*M~By_ho7bOPz@hvaD@EKpfa#iUE(0y|S)M|DD zX(-sjkc0+)E5C<}j)~y%lU(^{14Bis%C}IG%S~wgTI(QNG|0IaUqrbhoseA&s@=3>of4B%#WYf%X9Oi zcB8wy8d2wON5s$h%Ks^y50N)~kzjofce!piTK}q@+y31hv~pA6Pv>31ESrgR#o_VP zx^x(=KfqBF?V0pH-$Izm4s+LK%TQ>b2;D6Hfz;psMbBg^xeF&EP;RF&zMj1hbni_; z-}f0oc9{cAtt{jJ1t#%H{r0f`kq)15D--7XJ3*+eHh9gq=eo}w5R`jQK{;cl3a&nA z;&o=kK*`8+Tx~VsTi!ci^EV1;dec5M+w}-qe)cif9u@2 zwhoe1>-GVhEmn1GNvk zhh`?EaIM?Ck$37%6r?CZ^Hqo8;RsKZ~onid~Q*BEU$8?5)IC?0cW}z^iG~bowfN$IN~kJo_hqf`0;`}r#ADO8j{hQ zljk7tN)x}|dn~f5u0TEY%hBS0&QPaxNbojN#LK)%f-7!1qH(F_{O2(aysUaU$Ko#V zo!AB~i12{(ic4VAH4ko&`3>;f#UMOf9TjD4<}3z%QGDVi?zV!6QyAOCk8xLo#~b5e zj?R7l-R>jY_Vo%#_xL%%?+M8QJguFViWvh)S{aR5!NK72CVu1kQ{417$Iwa#8&ua9 z!#ltJhK9`Kx!E&I5ER#;CRt15y89qnvv?PGI+?FAZWus6HKUQO=>oLvZ!%hBd4U@u zZG!f1oQh6eHs(~0i@9j0k+|ld5q6IWKy9l_(6mvn1|WPF62OsT&$0 zgnI{b=;lpq1iyU`Gqpq}N zHEO%HKvN{*wl?g5jHh+{sY|(BYgQvyzE&OW(|yZVI4p+#<4@45^hvn>TQ^GkxLcCA z=!I8$T?D}xNp%kFL>jN7MW3Wf1vNiji(;bFp|#>8cTB-VFtIHLf~9vN8^y7l(XuQ) z?^PsJ|Ez}BYBI3$--jB@hndj!c{@yh!%03*044o$*mLs`A2jU=f7`(sYNwjR!$YQ2 z$<+;ZPhM^x93BNqt4Bh?&;(8+>?Wt}dP_7->I$0B6EAv}{D)5)IRpY2L4$=ixo$0W zc%roe+B0k*vNagIXFY|*=L_LWL<^tQQ4Pv3t>OH}1n%44J4jKf-+{RJqB?q%EBV~a zC(15D_LsJBt=?PUZr@`VnVtnPci!-BS@qyI^I$qKN|!u_{g#dJ zc=1yxAO9Wpom8M9H8NnBV1!E2)6fC+RG6Fnk_+2(QncZ+2nOEdfEvln)J7@<>Kv&g<QtyS8JO3o@Gv%yZZHZK}^-E)W11~rtobtX6B_E_}9r4$Y{-iNH)wlMuqB;5E_ z0iMUQ!QFfkZ$v+ca((K!&n_1^qrtOa6_C#@%$rcN_N61&Dw6<%Ywq%jIVw~&up6i( zZhb{rBj}h{!l4^OXqog;I1{H%!vsZ~`u#G&qHleIyH*zj>xN5l=H9v7nrZ#~M42T> zx&6Ihr(~C2I)QOdTb+1cvut#)|0fdGZ$|f$ogqK>6W_Fppb;w*(1t-k<0hCPZM97F z$2bDrQcpz>#=D?LZ`SZ)c7mT@;>yq3Z-uUmbm0;XeHATx9f8a;-iT_hgo!4M(SYZj zJp7B8E~#L$M{=sOP)pbdxN{^7gx|I~%&NEI4HS#G{?XEW^3j8wtlequTWt_J*pYsv zAiwZFv&L`39%KmYWHDD>=pe8xaSurmRJBzMIgx^NQ$w^)R$X z^48a-`5|wIh3H?GE6Qp9E|Tq8kNz32LPEX!yh>M>Xu6qz?+(8XmhaTiZ(Vm7<(@1u zo&BCa{bx0dP+SRX%hI6iSsRE7^TEey6ullQP4zT-k#2i8XKs6!zkA~jw}ENk3G<&VfoQa~cIpqSgz}Ul@U6`?I+CUIoG4tR{Qup9Z{EOg)zp+9nt;&q2>V zEeMGG!2jqS1FM#Lf~`mh?@vj?_WMPmVx`NX#J2+U>tqnO=G8xL@X-;$ot8_SvDR>u zSgyn!d$)%>J@AQB9diU~@0^x6-Q+>$!+tpU;ST3#Ylhq>Mf2~QMuC)nG-r}OnlJJ8 zhZhM}pwq7f!c;5x`p_L>*9_rv3k!Igmkpw+B|k;{2t)Ml_8Ttv{V1^1R0Ea1y@JJ# z`|P@A4cad?E8{we21p*^f%lg^dk|2GQGD4BCt29>Fdb_SSDe8}rK_Ce0#a_F7b%g0>20Cr2t;o+kt zVDcggR+^VW(?BaI?l+?{r7fUi{Rfaa_5{7C2d&0t>!tL$0y`S08Bt*E$!%$O~Sa>HgEuU#0+lMM?boms4T1 zW-fp6?iqf{;bmyh>@Ro1RtseYec(JwZlT-jjnPCaGc@Jh7F5}Hj{6?97c><@Ve#5h z(d*%fFx7k`u>U@Aj?Hnf-#r+T)+)fD@C)CV*};D`7y|bjQ}{2(bimmTiTv7%`SD_9 z_$7J6)*UZ_NVzD8`%(x62Kl_w(h0ENXg3tz%jRoY4%|rg;EJ<{A*-jR{ExR+k+Z@T z^nJrG)UzWHxyoEXt8*!02PD5)E|bqb9>zbp8G!bOE#u>#8Nj(}7?A_XIcp zt^#sz{mO5OoexW0F2Lxw`k-b%1If)TuJma&(AE@hC@HdxTl4KHDDM{5`;(T587KYRKXz zT!`UKlGh20=CtzvY5o(Oez%q1&=$bo3MW_2KTCs;tLK2Zg(p-{0zP|FVdIzC@Hr?1 zy8GkcbhsJ}yp&YyIAuVU(iG@Eo(SB!Ac$Wi4-S7;!MpMw{J;1s{H)k8{^aUk=+DPn zNIYf|8tR*lejJNHyXId)U5{p=Cf*&sa z^5FgBKk&5B(%wqX9$A%Ha-09kpv)(pDCE34iqDQl{fW;J)6_w}Q%{0Rfj$~XszsZX zWZ_ZEIB1`12`b(4@XoCWM&y)3KDFhx7i$T2m(IB|d9fP`CHHvMtWGX(qYS=XBaa?F zEE28kmCh$b8N@CGv8+=Bgu=&`&6&l5}mw~85H8OQVMU5wz( zP7UxWYUam1lf22n^}K(w9khIwfv575;og)Lu&nbekbiY>C_EOp*#wp@2!Up$e*SP{ z6aRgk2ETLDZqCOu3ASrYp-)o@cuzBc_BG?-^G_ZAc>#eXT7PSp*+H(OF%Zf(&w61e_zJzu4105`r42#U5KB;BLPT~gS}dHPR9!NXn%mY*ar za9tIOPFjN7L3Jo-zsC>D)&s*tLzpbVDTRg^{2Yg+g3t^zFduoJE0<~#we3)YZ68ax zJKGy;3{lT&_ve;BjNxpowUEttW2DunA-WTJmpi(02wHP(2xl%>#WhUIgMq z`2Thq!>)(j96IxvpVKMLpIN&drMYl0|K?^$QHq6i<8ydJ?NnZ4K|6o-xEEZi35TPS z)(4^z0hf%Mpz>!tgtk9GwRO)0etXVd8FI3lV|T*1-ASRmNlh`VNlM~N76R9GZ3$YB zPw^R}jtex`405$jHYmILAo?d?1V7`~!RVz8{IkhZq5J+G2+4K_vlD^vMn4jKtL0$! zzZhg~_gEq_^g+6NQ_Y$@;QG3BNNy0JK-;0z`Qjkbk2;LQ zc535cm2NauO;y5&orQ$#YaG~iamkAb9j@<*niXAeV2BxtnR=Y2RuQy+-W`}EP@vrJ9*ZyLDb)l;yE zXB2Fm`yQINOR5Sd-9^2Exm=^tV>B?k1Zo(LgXg_jIPR6YV(&2mW zdh~nT4dhgP5oXCPqYHYU!_5wWfTl;hlIT8MJm$&i{_R5sPEWAfx+h3*{j%t}9z|U* zr=rGDCaCnY9{v-fgs&hcG=2Lb{BLkPsxHmO!$lFu;!ZgjngQ%|3$S}btx-t11x+l~ zp-Y9fe9W8^yzYN{=+=8*;j6|7SPF@d`0b~tx5yAx>Fq-I6z`zwm$eWWHsG*FW(J-4 zycW_jJ?OuY&(I>dgK+9>5`7rb3eHc$KwJMN%s5)ltE|2%*`=R}CN7qx|2Uhg7`#8Mq(;maW))OH zUd?Z~A~ZzTK5xcuPj%=F|KWH`@@udP*T)t8Ptp4?o8Zf#)#%$RJ?LBe0Veu9gyLJ8 zR7zw@+w1Sa5@l2Rt?3^c#qkA|O}PnT{PPD>gcsqqCH{5*CzHVAZ~LkFsnhGbSP+E4`5-o>!! zrGWDB3-H|FY&>J=Ec%~pE4AD5kk>DoMNL|C>6WM0;J={Nw5@n79=pdEFYwH!2EXj6 z^!!8Up7kwIIr{>yemNI?-ZvjvT#cmL>f4%rgS@$McUKG^qjL$vVi%Y_%A)QCZkjBBA zklZf~g{~uM$D9a!!i7OH29)YJi*~*?#QUccx_rSPBIk9`=GCEx zVoeB@Gw>6buG)qa&5cAm>umAVIo~*I?@pLlK~Ug_+i;+3H8grV@;gR+=T_SLaL$7@ zaPO`LSf6$QhwL0~X_^oj3=ZKm4RTOG$p}2}*<;YClC)asDPVk3gP%Vuj^CkZgLK_e z;q9Y%38sDr9@c(DWdrBHfdzrseVfC6Iu&(jeSle$`@z+!6s=sGf)rjIfiY?MC@|id z3!K5BWBb&x?bi}~At4jW7e>Odp+}IfbS~ad8;@-Nrl1vv>)@l{C471|4lGW&zyS}* z9NS!ojWSQJ@q5_@BSxKvmv^$zGrj_CTe%hccw9g$ z%p?_eX_oY#)frel<+rHQx`97Yv>7&)PJz~pRK9eyKCMWf2Hvje4&O_A_&?#(Krl9g zH?oceLuU;-Br6AeYW@38x+e@);@mctwU5?oJ&~V?q|< z^uz0Er0_IkEFMd*P6_8^Dvp8(>0;9P2l<$du^(trr>W7HdfUyXGs-Z7)n%-XO|0j+1+1*8p*16Es z3#M>uARd{=Re=0kNgL7cJoMT2x?tHTDQSi#{jr#vkg& zqCcJHxX9@$aw|KCW~7fnr`)M%U)^Gdj$E=$z}!}b{urlBRVGj#mu=>hba_5JNJ!I?+-b{b zM|ym+G}c+Ajit)>!=F)GXxx`0Xm0aTx*1&;#LoD|m-fj+&W)XdmSe4=(^`)lT%%NY zmDiHW_C?Fkg^nR;!t+bqVzA)8R(GLcvj3qO1|3|Q@>woOe*{{%MVp)AIueo}sq?Sg zGr92mDWWU&=BPTVm@}NNi7Fq9IEx(}T=$G`4hOgSaLGo6$j|W`@4IRV|9hArBoA%m z^s}-BfgZp4-7_mWxO)igy78FPyEK8*i6}<){$<>!`jbfi?FR0ybOqNl|0y@odL#ce zHI)}A9^q#W?&1%hZsnhtXFytxKYw#X99Q*vF1o9b!(TEPj%q~9YZA(~p)I3VBj2*Q z9NwUbE^jwSzA2aaW8q9tw(LBgVSI`2Ix$CZ@uLb_wrLT19ooiy^SZ-TMaA)<1<{}; zoCg7~qhaLooe&%w0afG}zwM9!Hgp<7^95^xTC_BJ{_+q1b@~tforWWdyZ;~O{Y;&E zxA>IkceoOFYIPZm+ZPK9Q?A2)4=q@@dOEm%uYoStCw%g;09cH5VfR0C_;=bF1oF;ypjLajxUrMeZ~1@=BNVIi_sM ziwbRcjiYL4uwW-2=QPOQtoq1Bm=m~h#zi!*#Th*)utU0OmS|?ja8&R%nA;mEnUjJ? zg5`!OBAS`VcYIJqR*&^TeR%-i@G2L&pCrS5m3+8%G98{5M1Y%c92CFY0aGQ8h}%^M z{*R&SaLB2R;%N^hG|^H*Lkg+)IhR5z`VdW_6d^JjBBh;5rKQrQEi^Rlb1o{W%%mkd zn~)hAzV|DD+qaf1XXCpA9DAQh@?$rWs6| zZ^!Zb?v4hb=j~M5W-1-qyaY!=PNDU{Thqj8G1wDvp3JZkfhhhgH{H&KR$dW=@BS6k z*KIwSkfIGsJ6x#9rEqdxxVph@$^yDHxRMQ9`jnSbw;wNF6UHOI&QR^i0chG^OndbN zP`%a^FF#D?tu`E^(dSKQ{&*-Ybew``?i%rGZ|b1(9DdSb(`|Y-OcFo3oW{xfEHU&_ zC~g$ez$pIi%qi$FzVP$M@MdR(sw`{_-HY?)$)l3e9P%h!i*#xm)8q^3@M_G8&b;J6 z6ARbE;EXUbS=tg!jOT#<5-0wwcblF+?FF-yr<%U5+DX;^%E2nzO6s#o0i{ob@@tPj z>IE{O^J_CqDLzK_Sn-I|2{oGkx0u!+GDhFFOgi_&Gqy_QFEO}XMI5)hAPQ@@ldMNI zJfT@t^sm$e@ac>LI>u5qdLmvd^~76iO>lv|4Z7)Pk?F06*oCJYv8#FsD&PCZ{%7@^ z{j%jAS*99Ij7%;!^fU{>vmZP-m9Gz}Tg+j??>rvw_A7P{PX(?9OTk#vOw&2Oovcta zfzW$u(B82XMz89?z@ro70sTr^uP(r}-}@F4b!`yc|Lr3+z2<|bpKj#$#lhIv$Hz-+C!$(o z6)t%bhlLkhFzuic-V~9b*ylw1SG(cl?_a5mkrHjSSdItB?(kx!NYKFCP|Tb096NkY zqo@aqVQWfoP0$~H-w=k)O$j*fq%;1GddjZ*CBY_V93dVfYUHo8Ett#a5`n=v^q@=} z)fNaNlJoZydFoA`Mz&Dx_Ed5xC5W8ZbQq^g=;Bq0+4QDeDl7SJIcy7afubD=P`h(7 z(P&B`cOB0_n{6!L4_^o>{}ZE~Yg&nh#vYK|Ee;aB{djrLdDHQb#V|T1jB&^Hz|2|= zjyH6gt`;63pGYAc*WT2Scy*L+u?!}zC;|udlWD?>2UJez03L3O#~5xG!s~48QZL16 z{H^G>1?RE9+YZmJaK(&yJIJx0^T4s~5AUX$G(>vydp46ncF7MhVx*x4Rfp^7>4Hw| zzkd=JKTp6NayfWwBmu=Vqxg8xCHy+m7k7SGh%|o=wU7})!=y4^bB77;h@OPz8T}-A zz9h<2OTbqVMG*Co#ZKQWeqD$mmyWGz2x@)^+RdkE*>pMf?a5ta-hplS#@?KW5(|)9 z9Zi~SFOkesM*xMrKwe`K@S>VX&*9Ui|1MPUrhPw7+7i#PH`l4c2gkGUpOGhg`uMdr z{n7%c_DUs_CJj)Zibas`x`g~YB@Afi4bB^zXwZ-mykhT?_^u>!rQ!!~Vem>)rnQcq zv)M_qu6B_ReYsS}d!Sx0CX$F`0yR_$ps^pNApX-SD!!FJOZ6=!qdaMoc;9^7p=bq> zTRyOH+lt}0VI2_~=3`Z}=76(GIuT6xN!!L0srTeaGTawL%5`RfePRW965>i;@b8{) z`GMfKcm*vU8|M9fbB*v5a)`&yQZ%@A2DfR1;ii~KoN+D#7v(I+6v4&VzE}~-3wd02 zKmkDiK5bB$Oz#|+PNZWtlc0i^^tz26zWTU?ynB;Ga00s9ONc!(I^lJ+IZR^($_yFF0o zdoZ@A#i7*kDhvwlK<3T>zJ0MDb2V(yTq+K4vNkk5P!0OOi-YB>MezGKMr`^Iytx+i1mNQP?h4DZ^;~7zjSP5NwJ`p85cX-l#kw`ml1P#+t z*yS)6uq6Y!mgmFNky)_4Qy&6CC@f4)u6MXP&K{MRLNz+pk`%9SazT7QSrzL_2X(V~ zJ}--@h2VXnG&T`>>rIJ6y97vnmV#348Uo>#u(G(8P`i)B|9t>lDX51w*%Wy7{47}0 zOOTX%9agz5f`N2TNIJ$(irTjlLv*EhzSBRjZTzI5y`BCvns<`=gjmsC<~?k*^cX$4 zQwNsLj)K);o8iELy-+I~56k|9!A>Pxc)o8H9MC=vQ>I^`K^LQ_P*gatW}t}ueZ7+> z)-FK9)FzRhiJoNJm3RsR$9U0^7wD^T1u8JNm|mZtO$U~5VAl)1rmA**)Md#Y`f&%p z$5LaU#dkdn6-I*8<49=q^@Q7-H9#XO0(Sdvf=h~mxOiR)Rcjug6TMA&jV8nF-Y1qO zSA?tRelRwj^YIoPsjX!BNk67v@~i+P0kpc}->)Q8_YsO)v@fP#}E| z1!;HiyM_&EvE;CB0I`Y{A?+lAxodicP7vdFQLj{dqu zT}}I`%tbN$?^h&!JJl5B#inBVy7g?%ztD!Gq2<(5)`Yk0xgyzKmqGVS*wCNv8B$|B z#?F{Jljb}15p&LoR?90>*9sLh0Y*Mbl)`GeegK@+Q1fd#)hf>Up@32;_rR(MDYMSN+(PZz^`vz za7yCf;{;|!#^Ad_3#P2|hIb#Hm`;Bom1FdAYtKT;~@{gn07NZ{>QEsd|n zXTa(;{WR)b6Lt2OfL=%SF>OvGwT?YW4{gn-HT%y{zeI5swy4A5#}05MbS@n7FX5%_ zm<|O~g5mVNS)h1(V8_pjR8>y|<7ySU_UDG+_1w z6?*JeGS6pBmR)o$lg$~6t4sQ)NZb{B*htt+e$01oczn=_$o)D(!@TY3E75V&+U4SP z%XifB4%}N!eVUXY<=`U{dGRG}4Qrr}(z0o~L<&8Z@Q`(~en(uqR}sDTZdys7(~~VM zyE7w?{B;eXw})r)zRt=qEr^a}SMx%tkj5dJ=F~xF^z9^WM`nQE5`9`b-pA8;Til=< zEWul0nn*``D3Wi6$E1>&gJ9zThw9_Nz@m(|v|i^g%q&ukrwm8@@m@ z6uMb)CnIusu{Cda@+T_d@R?2Z{=w5{cY>#%KD0GD|er!%7pSZeyyT2ru4Ffk-hZwt2v}rw41JA;@J$be%jP2ij{W{lh?DX zslb5>dUm>7gSB=LT@W%KU;Ce6~|p7l{D}|6T4pbKFw)jsdB?44CM1k8>^p^#|!$%GoAy? zn7SBnH~$>lNI;g_3v$_B1ZKvRQ}^a|bc1UP-L86q_Rzz;p~G2}SGkvdCe7@1ZEAW* z_b)v+?n##JY@|kuuhI!gNmMN;hn@?OqHC8OG+8=(gst3fz`nF!PM?fUr5B6F8|Gpn z-FhgQI#d}_?;kdF-?byAYjaK!Db}7c8=HwRF5sQ_od88ou9AN>ZKe;(uF`J(JG3Bq zHh5%S1?O8wAw2OoC}!5f_fuCvuzx$OnQRX4*YRBo2jf9KvJQgp7ed;hLtuOK9Qf?w zb6>BRf%=D)kd z9RHS2v%iRtfunKYbEgs{H2q<@MiTfQehwFe%OE`}4ju{QgJZ)fu>AX%9zI+}LsbiC zj-@&l7y46$rWSf(Su`!dF4}MOjkaItCWhw&`Ejsn@BI=PAP2ZWdu} zdTXK>q|Y>kfQ|WJ7GeQfJzmiHG#k_tP2eGfz>Cf#cye|I@6Nm{FzuoO=^0NX-yAm+ z_Z`u6EFyq5jm#t;T~j&W!G? zWI6FokfN;x^&J*qd?yS3-N=Bx_21V{_ z%+XKNn1X|XOrFsJ5O~7pye$=BR`l;Z%k#4R^Z&L{j^_Y0>hN#fv>|#5V}%J zti#n&qjQ33nkOI6JZ*;ue>|Ym=QqQo>R8@~BfHVN|1`Pveu&sl>V?9UesJe`I*FP2 zf+U(uVy=3hhmBsR0F=(ak3eC@DZ~;^JbMV?N5q-xN&LBPq=M{N^_n+{GbbzVJ_nms zFX`BXMW7KY#k@6WM9nl;9P7A1=Is_?URTXPQG+hJpYOQ!?%*Lb4NibyRdu+sIuu5n z!g+fK6!2)87N?ujiLS*rpd_~rFSZ)vX~Amf`mzM_+v>n*ZX_g}tyw(z!d~dfx<*fV z$l>~uy}-WuL7hGx#J=nPbQb>`(3&I&WuF%^f)-Br$x<0g8e>7TTbk^ukmVMeb512|Hl= z&Ox`yu75$VA`Co+@ z8+I}?AyS12u=itvpG=1h25#8q(11jHCieLJBJ=lY!TIJ-_{S*=)F#(aRew2HeD@U9 zJJL?9%lp^?#kH_v&jVQXZZQ-L>H_y?6Jgt%Oq`;KMq2|F#RZyBk5D#dJm}O_q3v-6Xej z+(~=SQP?Z<1XoswaECgops;-aiZ_?RAYyL_8Ob}qSbhz`inMxmL-ctJnY4;$<1Pq~ z?k1smSufdoGluUt{E7XyrvUB+PGR0JY9p$O6PZ!fo51q<5N};JFwg!*FdHZ9GU;P) zK!oq|KOPv(@XF2be)1=hcf*vC9oB?|PX2jYUjcTxcc9dx5gaF{f~Tnhe4R51M4m-5 zrmbn<|5Keg^5`8jKAnoGu9x8Gt@V)HejeWJy8}npiQxN*LY!}?7*36B!zI-|*mhw( zeoAY`Z#}>9SYsR(4OZf|+ClX3F2}M_A#Sz&I21&V5#h=n&@s*g+0%}&ssAI{+*n7} zv1_UJx@`EfT7tB4;Qvtf)?`TU02;mqKZFh+Na5Mz2xlgV1OhKX*n#|bhG^r=-a zR5fpc;6sndHP7SFxHFr42wZ`7#oZ)HC5yLZuQ22LO4p=Z`Wf$Y-3HR$`k1)(WRVve z!ZB)&Am~``rA<$(XhxnttC_VQlSUp>i}Qx4X%&J(iP?M})kb_joCYIHI>@x2tLO&j z{sxPpX4Xz4hA4F`$LG-^7|fq-%@hmByV-FNpJq%B+X>Q+|K5;EHZH74)C6>S{(~$& zvk33XcJTV`DWtuu1J6LdW?C#6augFtvt{#naaExOV<&T9}^-KO^$tmH|KgBJ~4X9`*7PggeN`eHm!gRo~*emtSz*`*J2K!7`_=-UvOdW?tE82@cTSAz00MM z+P~_Z9~h&EXEG>z?L(z6i-}=)HLY}UL}iC|dUo*u`!dcIxsD}xpeBr-{G0~6mSlla zfG4=adnh`88F*hW!qaQ~UgOmuG;S|~7R`7Z)QG2xMYHLRZ;y%M;zW3L;S`PCeg${p z94z>dfC^8hFu^UKV8wbN=G>f>%-tPY%s;R1V6FHMv{!6r0?l=pD;XhRB_IR=9-qiB zEn{A4N*g(5ZwGrCwL$E?Ein>(K-Rh_>Au=Dq76Ydj9YvN66sOe&jh*nFF4 zb{k<~>|}h;=h|P`_=36>uEaohhEBKFMi+0*RGa9#N?V*AQ9uqJ zoW)BB)aQLJ-OJv7m(Hu-(oBWET_VmeHerO<7=2roPt{XClMs(QVoOUPWLpc2T)Id7 z^j+cd?1Su2r)|Hn?U6xC5^{cL6fUpc@vu7x`@%!KjV+$k5s8hhJ?6^;`r@N zrsscdL)U9|SUlCAcSp#Oujxd5z~5Pg3q~}^MBOJ5Qr!g9j?vR|H^ZLN1+=nQgDCBi zg_{?xLG_L|Pp{gDUTSHf_vIMet>%w^j|gLH&~f(7`ngEgY@>IlPl1Tvkwnf|g`HYJh`3)8Nq2$0We+IMnH_gh!qNxZ=WQ zdgW0+dAge)LYcLRtoAuhUtKJN?W_5C!QCQyyy!S61g-!vkrnvEXcm+NJ_JvngYY;c z7MaeQ{A7UFnENLNS;-T4w6Ow{7iXZ~i&Va=cq_(69Yj}SAGAMx9JSs~#J1`EWPs}? zp~*3*YFI={CY&QmHim(%r;BfqucRT8fCYVH~qgfLhhJh z>*U$sX?O^z=vFe@lp{{;3Wz?QOT0!$*}C@uxNlnq{t663Ri`KPM`S0pda8)49?D~N zo;JqWD&p7zE3Dt=iL&=+puCPVx!T=Et`8~Ux~RqcFidku{@O$Gj%e_))hL|oQG@bR z3#q5EEEi|96pKUSNv{2Ecwlyew|8ManbRbXDbI4z`oSO>UXy}z zF})eE95xiyfZ2W)ZFh|f+K;f_2kp=(d?#DmX+csG1Msyt#T2`p7;b$TBQ8Wi zEcpZ5hQ(ppr!J7-rz+3A{{k8y8hRoxLC*7R$W)0Uu6HEC?T`Z8ioeLmZWKwS^9i@?-B28x*TyiH-gF!YBZLx$PB0ky02%WyRP6kvpjdQW)Y)tm6B zss*Z#13rwJhVl&rA2Q3Rq+kj@I+Tvh|6+(;eLAdoNGji_qkRDxEpOr}#hjzd zx`t@xcL5Zr$XR^mfC-n=ehrgWi*gxX4LJF8^SQ96EvRlZk?ZBV-3=|D&1D&$MbFGf z^qBS*YO74hxid4#cz_ZVUDLr;(+{9hY8cKwGmDAnH)DQm(PNJ4s4;o_eHqQ;Qp``w z4)C}q&J2~6!`|{i8vUu1EM7TEz8B2lm!e!~Z!m?+@0Nh$-x~Ty#sH(je}lgLb+Eha z2O)=AVg3O*SZbRB$zrlhZ$}&S(tY6mL>6ykO{b<3((vZ-dJI*K{v~6ofF{pohrz^=HAfaXABxn=Ab$#!ga5a=1N6?*=r2Kln35Si&!Sabd)vNIoG zb?-5n?ypZwn;%iH?)&8JvsL(b>220xXc}?R^uxWqQ()`W8)REy4jcIV5?!3626|`q z6F1@2v?nJMXRlGfYa13rDKm*CBd2Jfo{ zqe6NT@#AggIYr&Ti~b@w^RPDiE6f3(91W#&oW;<3sR7RWc?}=(jL_%eULrIohS|QE z{NIsiYBos`=eR_Gjzuv~din$|;dxF&U&uo$q_hinZS!E|4$0xGhTEhzRTxZ*i|OVk zR`k4mGZ9gW0oOW9$mwu_4!s(PzPp`1{dtq#e3wLK@VSc-zIxbs#|W3sj=bbQA6y)0$gFR`mrni|e?<{1pLpRx!Hqb1mo^s7GHmcX`w!1gvq0^0y0~UR z3En)ZhVAi_vH43p)<}BNcF{-prEC%BJ!JxSuuhfxWU`!7@sZ;!M|3&qw_`Z{qZ)VU zupsArP94MLucD<&CPp5!hgZI(s(cr7{Gm$zWE_ zHF&eR7B&n{qG?kw5ntdpbWioZ4uP>8fi{`Aju+Sn`|IW94w(M`Jv8;uD#K zDtKU?CwU}vgybHOAore0^OHALLCx-~Cd*S2=}kC?RZ^eu?v)}uxBfn=7fEux`bckxUG}jR zCJx`A<7d}4n8-v!%)uKxz4jC$o{&d29d#qwYVQeip`S)Nr;taDreNt+jYm#T=Z@=k z;O{lIsAPT;PaN>Y<&Eoc8K0;Bx@QZ{Ox#9QPAFocb4A1V%g@p4&olh@yN&8U6Xg5d z$Z*GYIAFkG6XKGv*0it546bgSNmaiZ@=862O}l3JKuUNWyGZsQO>FK$@t`D>Z12XV z%{8bbt~bjLrOgd@WkT#c;s;nT{~$$ma2GRfOmvRwCj1C<+1`jt4dJ1 z#}OOMH#RJmp2)cfzazU{_#BA)e{rZn4-Jj?K;Yz9G+y`w-~G6SAHVRqshLx_e8E11 z^XBB3-)D?ew#SodakS^N2L66KkN)Dt;xyR~JXw-%!W?@-dS{DrAtlSuW5Wl$#lO!F zj&H-?b57xkjcypk$24_x-q0R{_p~Rw8rp65P`{KjIH@cc&+f>@UB+9`ub9C}Q@@~n z<0Aa>*@wFQhZwgn9Azd;a|-*fQs;hsbiX@+oA%Hd72~>T8{fYw=Sw_W!Gz(KAQ|j@ z&c}y5EQrXC0AJ%(go|!9ljD!hV1echo`Eupfy*-3DiJkSXh#ZC&01cBkrutO^aK4K z&&R=jOOl3jQ_#d;1$nC&aI4=AEiaT|Npn4o=KDgOS9*d{9~MHo;A5Wbn4`&)plCdE zu#vp9u_D_o_31&!|8Sn32=zJILr)*L&B_&*6V+4`yxr7-E5}^%K^_mw_Zx7h^@TW_ z(%1OfZzh)_%Hz5U{mC?QZ20)bjCZxyoZ1zK;nFjkKv4cTery#%X(N5Q&H6p|y3OFm z%3JW>w>~_3QJXth`wWK~`C5>(f6*gLinIQ!jyIEDQHM?Z^_g8#ea@X~JZwPu{PYOw zBxZ(t54^^})(|42R8f0q={l%xj$>a7ZKh%h+n__egKQK{rt(W`prUOBM)Kd|AG71s zu=x~5`@3Mp3`;BzzfXVl=h2B%HsJVqb@IGUn7!UOL~5E>HQ3k8!kVwK^uR?!Ji2x( z&1J=Cj(;RJx#i#{@mqLzW)tc;oWZkJQCLb!aqUPCuCSBmh6SQ%`o}bkyuOTt&-9{K zhO$sQYATE(cYmSljUCEk>=}E*LFUN-J3(1`7KoOqbqfB^ulWU zy>vugk=3`5LF0e6DCsm4ADi<%{DvG*XK_eroL(}W zfzPLu($uILdOM+zo_nOm*=y=^0taSr$%a$72m>R|`N>A^~UsE*1S~>#xa%ja>tQ|`;$Vb{H8p>m_Ovx zuJ`QG!BH~2MUIC5GN3NSE~NYQ2(|m$h?uYr&khotxqlM(s-%~+-7Dgm_3^M-!XJ%$ z1?gNT1GGD4OZ*bGa6{kw20bp9I&G~YCuGt|ZdWu`Z;CWcaXgLBmG$ANv?iQcu?lS# z&EZM5G*SD%)o9>8i?J&2gp0D@*s248TxUTJuG=cWCGF0lh1w;YV(c?~v72xf6C}7| zk1PxvU&JjRR^gk!TX15#rJ2MZJy>3MlB6AYi;I8rJqMKPapLJ2jK9bxMk`yB3D--4 zhtpOvXSZH}Si@VqOA(R)1J9_^>8kRCHx3*C(+AgQj1GjrCeg+tPz@?8sZt$=Sw~GmpRn4in?mmucI>Ptd$L zmDztT39<%)7{NKQWgT94Um=(bn8{i=*7$pAOs~O93uMYY>HJd2rED z5714k1m}8aaUnTPsB|a*J~v8mZ(d4rX-9>azsqv*NTD`Zb|xaXT%C!Fn#~n;O~9S@ zE4fqqZ(#NbMAhdpcsIre<~!cT&T$FunB{jmzVk9(&}#wdL#vsx03K8NEs61YmqoWc zh^8a`Q4Qsg2CH`$V7>oS48AH3w(@(hWw z0VZgLCX*?AjJYU33Ja!NGtO^}7(Ig{xLwZ!9bP$bPsO$2hvsM4-=GEoGd5w}oY_qO z3SI8|#rg1Uo<7b_o)7i{;y|J#pk~P+`|Xu6eyuFR`qkCwzp;|m-e@N4u8yOPhrH=^ z*-6|F%R#K&GJ~E?I>VJ0nt`Oz4vr`E7zuw5;TAp(f2Tz->CL4eXT5__ommU3eXg5+ zx}wL8-F%Acq!*&Hg&t%+x`AR%KS`C~BdqogL;SW0U(YLm<4QwN-Vn~~3{qqQzPbV3 zyouSjCk!4atKj;zjj+8*94Gjua7*^Q#|`|)5@?#k6>FE^ysX=}>46VdW0`>u9*c2S z?#ej+q@$s#%V&IW- z8kkuO3VqXWbawpV7q$}&UH`1 zJ4PXBrMw63&z)jTvcH&xQ%SGJWjK@6$*#6b=u_R;&3=I z?G4Fp3WdS;qnNmSGuTdEgbHIiu-hXYUf)w?db@h@-JI{xJ6;Eim)wHo8Lz?fzBT_n z)YI#yFT=VdQATpv4fw@;BL)1KMch!7Tk=JitJ2oQPSi|MR0@VBjl%S5H`DSy9j04jH;!GO#~u4;!Yp{Hg`3h0F*~jnn)r7_Q+hJ9 z}K0k)xX#HEj_lcHuVW3!md0-+c{oKUpyz31v81y^1!j?Sl9} zYNXXvgo7v#=J>BhT(ZuJ(UUc2)CQk}b%G9aY34Kd`u+%WYdnXtbaJz zo5ZB9oy4h}kl>tCY(RaV0O#%!&RG5H$2Gwo9PgSCH?8t77V`D4AAi5bA+=as1&=ZH z+hc4$okpw{`aoZaB0E2VZCImb3$Ii^ncgdppeH6KVQ+{6quhLy$@x~tFkR=Fd)7CY zyo$?AFI&bOp`}dG{1_%~QYjO3Armw8$}rL)m(IMeh)v7+J?G1G*fn6vT`ekxB!dFX zeqhPfP}tiCS{4m12-|RV2G_u)WL^c!Z^#^J8rgOV|aN@bgSR0s+E5g@c@8B-n z(Akc)G3j_>)-(2>M;z9hRgLAV(Ud$(A znI*7SOPkT#CC8l1SP1^7)__Oz3Ru(K#C!0<3r=+ufX1jYJf1oSG>dpXz3ra? zOLAMt!(WP|E?0)xo4bPY$DJ5l3zEj1!(cx;XrjxH$0 zq}(q2z@*~JX%Dc=Fb)$1=iIbSM?GZ_uvJPpDar zix+x{B?kA=N0)$btOR!LRKU?3LvB~ec22?Bk&B>z=(%BIZWyL=QNr@*F0_=~^w4c~mjK-(@2TtAar8bM3rvsd9ns(yk6GNR4xFtbBdyf{duQ&2308BtvN#qm^Y>`(xSxAI zI*Q}Nso4Blhap6XnL0z2`|oW5Ia2nV{j42^2i7+cy`Hmdu8|*PZ284{w}|jHh)S5L z^Nx4JB8uHHek!9qNd4upm7FBBx)Yp(~Ey#)1-9VW7qV8Apw| z&&V2~xzN7D4vzZm0sFw8#GqV`apw2S9>1(0skoDMZ(a@~%O;T8^}9f7b`2E$Si&4x zJqd!ojIn0h1-ZuVCk>Bp`_sw0*5F$OC2n=mUQRAApBt40E@{y+ZqcvHxrZk;(f47R7uTfcf>;!W=bviRXpM%suapunMgY-(`9XwU0 z!u*nm2Y)YTjE&8O%U?Xn%#2X7Hkq#%9xNao37Odcw3y``y@Ayo?=f*mi+z5?iCy{M zDn{hmPyGFPH!EtC3t1u_m}<0$)8)jO4dbGamp_m3_MeL)x>+zm!x@|++&P^*=o0^hEYYfh@>5aV;MyIWUH5!$wsAglljU&QX?vz7OoXxDpA1UF zEnLLy{WRi86+Ey@;}uI5gZa`&WSU1BFRIQAE1-q+ZVRV(Zd`!;7^6wT<8OLG+&}^Ez%bV}9&5 z7-w2T^BE)VOVC3+n3sheynpn5X&j#Yw2aI3(MOt4h|Y4Z+|Kd^+&!_G+;!90r1SG3 zp6{O^kaenrN53VQhS4W*|6l6+?JNA5BYZV`+4vTm zdm@4383iyl{U2CPcn$7rjhPi4ov`0A5lSk@!R$dGjJDi^Dcui=#u_>1o%B_5ZtF|( zz5X0Gq&ksvn^?kic_8;aBW^yMjmZL1 zjKi%k)Hv~-nj1SZXS<@9gZ*xx{vm}~`AZUxI1SQI|Gi|dG(QF1TL-y)W(?smW~~^L7dI8_0ybgi&x6FcREEfh?Lt{`7tprM z#2Y_5!PfRS{j__KRJQhF?ZVBdwpj&#otVKH9yes-9DZ?|6$OZk+I}W&Rxl~h6KC9a z87&E}6yR2k&Y|rw`_Q>efU)RG#$$OYs9Spj3s2m~?WO)W^U(@yR`Fos>JB#)-u?^Y zcSO0h*XDBA%h%>bH-P7x3jPdQNV`4iafGJOHuV(K&?aemOgb4v6Zn3={5$&D>Sq`? zr4%10juH7i`An+tcc_!g#DSG2%b4NyLzoTmT# zLodxX#gjFZj!iG6V&4Q&KW{B%V~uIXix?{U$Qgwo51X$3hr*X;@%i#w*nvOQG}bU4 z>)-Hw81-Uloi!gQ@EW9@50B%E7ouqWG!MTmc0=jwl6bGJfx4K4;_=+cczC8Tow(K- z=WRTSag)R-S8Id6ZH(|@cqxAU8H=%z2{>6f0&f{6VnNF$EIF5gD?i)d-(@bO|Bf=b zx&H@w`_qR0*|Lt`YfggfW6?Sc&Uq z&&GK5P}FSA;yyxEAj5azvHRZLB|eL9#CpjJ;*Y z+7IlcI~Q7$o@4iDe8*4I+j$XW$)OUwUYURuVtjwPd8ZLw7vcESIQ*e?0)4IbVWW2* zIz($C7wd`lQZ7>Vzr#2=HH@6u+(OQ(pCM+t=jiP#%i;4Zq_xU5ypHYB^!=g9Sb6g& zFKgp#nksx4_2Na5IyIBtf3ta`&KIbqNfzE~=%jlOH&LzdFl@g!MvvYdqE;vOp+fFC z%=?spT+m}=`rctiVKRCLUPf8X`}jL>7%LrKqgy~ck?iy*Go7E3iJMNaXB7DV-E=PR z^w%aba$|(=BW(m#9y`gsiQf1)ZZ4)|)MA+_%O>xL;z@~=(5UQGl>Fn3Kl66sMzvcF z9*Nj?G9~p?u|{D zP4Cg#;f0tV70**Oen`&GnuJ$l^}t-4B{8$z@y^jjkn2q7G})O*x-@ZWdN{8lVhaB9 zeb37i&Z8UfExRoC0j(JOMLs60zyYH!diI_Kipsg5aFG%E`)Z>^{sFA~rHE5ie6a4L z32NS$4W(~SHZVRc(S4`(A+FT$XOTX0+JTKtu|9$jnH(5AkXmLJ`W;%j};dA9=={mtb2Yvc@Y10M4{EEGOW#CgAoh1;d0jk6r5pzmmJ-3&#`oDcxQ;wJqozZAQ7j|aivQ# zTBsKu!)q6|QlV4x@NL3Ap5M?v>Unh`)t@*-e1~>nZi_AJNEeX&(p7lg)0vj*%7DYg zrF{O=GWsZg35w4+Nh`~eY0l7AzD{!*X@6HsZ2y}@>ftV1rz#7Q>(p_eDu5Rlok)Z4 z9-~+9wo>_%zv!uZg1G*{L|o@0PxYJDVEg)d8lW{7%C#bRvjzvLW|<-mZ#_f@cAZ6U z<@xBS+eQz}i(r@C2_>By(s-^Rvb+if6F7Wa9h)ruLBa2ThR(#Di!KbqB1i(*CO@Jf8cV>?=0_m zp8H-6d-|3@(=0#wu+$AV*j%QqvL@J;<%6jLuR=yb8pq!~h+`o1oqjf!(o zIUP5Xrj%|YrAil= zXoEL&!;&G(pNIB9sNf%Qan*pvb9z`MvYM>xodV;A>S@*SIQp5DhEA<+@ZJ6> zHi)dpH{aLbh-L@u71r6s9lxlZT>@Ttw;5->3P#rg8Jwj2mhhW;$V#dCw0}qplh12_ zO5F`|O5cSvFSmi^Dz^AiJQbVDG;t%;kfae4QNcnT<;CXFEUO?Y^YA+zCHkE>KV3tO zWTdb|KMr>78AGOv*U|ec4Iyy8JZyJbRBQPuvPNy;2CNyH1m9|NDd{!9!lZU`b8QS& z^|Oa(at$QrXacG}4#aCq_hEleIyyg0Mq8;o%zkzVZzr6_>w(UQrD3>(`f%sQWYH17 zE}`$Xxp?I~piTcHA{(a8MOKI5yP)j=nJ0*WvxcSga%C*(8G)wB&LBDNDD0Mtgondt z$%Wk(xMAl7^0{n2;}Im}3=A((@eg5C{bw$|Mp=vk8Ei~hj0TcMbcg3NnjEK3UtRx3 z4^C9ZfKO`JBj<@<(wE}!j|Dhd)(D89lobaa__twA^itYBpQ((23Ki z_&f(M-PnwU8}1{ovj?x(SK^#Cr|_}z0%Q&JQF6H|zLyKftM!q1!z}?rV&ie=s9fB= zBN6{>*pCM1PvF8>CHxZVM=i!m(nr%?bH(%Sa@`<9r{rEG`)_2?k1HjSEvFdx;t0B# zp1@Jd^!aa-29cM^L$SrS!gllkH98;S*g7FIGAM=_eEECPRgLf0y_w7uGHtEe2~lZ6G-FDOW3IKw}PyGfL_%z`p#!uwDy6<#`kouG~$* z{)qR1&80A!M_oEam3Rwm@|t5-kpcV^bN$=#=~&+Sr84sz`55K!_Jw`AhL2F@vjGR zg%O20FTOGBrYz(Po+lIAFZ;=!=Q)HKYT~5lRAYsE2D-oW#4)kLoiFMF?ruAPZ@vBS zR^5KQ>;h<9rURFj9T4(*bD^pG4%xbHBSbeYgM#i{_}5Ya)gE1hd-aoQ6C)fbaYWBu z)A78?7}WaVhK}Rb(S0wz(X7__2*($YgBzBELhNtLj>A)l^w~wUO8E};+N(>>7@uRb zVj0-=?-l5c)DrHieZ=eG5Z$Y-S=%{H3U^-<7`84c^!a}SRJ~LN&W&ADEA`|SUB zA4T5K;omM$E_6j-JTQXt2OivLm0+}6uowRsZNVeDpC zl)AMH$J@Bz7DY>ZbzBm^>l#thce}7-nLaLmT0n1AxMD)UYkFp-1g*+DO6T-FAS-h7 zsBpBQ3vE=$_A8Opyn7NYzZWfZYtPX9bt$uEJeCA@@qEy8+YPY^I>7hWgT}4VtW$C( zY_(WQ!Uqq+iSj78GP;|FHEjSLQ)x!WT^ueC-iEXuar*AU5RGoBC)bT7;J^KM$%3$N zL_$d&Mi?p4?@u?^F7NZTD6#v^jk`Y?lx#kdKXW%iX@)fo4K^Z2Crm=UsB&gg*C}dZ zXiiuErCeW{A+eW!Obfofq?Q|35}t79W{vU$$(j40H8a)x=AJcUl^ z5s<}rsBbg`*|ma`s7xEO`55?GEdffi)Nw($DW(p@V*ax6crxZ3ZPfltyK*w|>LOd* z#q7r|X%S4My&=sWBSPm|&WGQl$KoQx1!QYdD^b?;BI8Di(e$7XG_>-Eg-YHjy5F;v zPMO|HH#2&4R?BR<(flU$Hpt_gb{bHHapUnb=Rv>eXINgidfc*q(mBRRsgnC9< z=;O`m9k|5F3Rk=xpil4ZL9w=zDF1pHR@Ci5O$!Tbo>WHbj_3%CjyAf!uaLB%C)f18 zl$knf39Nh$#KG+mS^eu#jnoJU%(5AetF~m&*#8JV-dsdIsyCqWGX~es;b~3Vd)j1{vgF4sXW=e|73dPU%8uUaNv zp@Y8Ix`8RP>SZ=WmC@9?7x=8A7>9D)v6&R&r*eUB{An|eRH(sa<1%qv$U=1Z>w~(T z0t+;QF3}=&J|J4F7zRFm@}9esG<{=*SAJ$TWs*NW+ExZ75e^ zh!OsOaoN6uxFppGO((Zuhv7A}Z9jlrC#DllTu1PV93s9sqe08cj%=;C2p3gjz*n@K zByD{_q$`ipz9@HeZga=N{mR&oU4c8-VthR=Q@xYpID2m=^4#<( zz1uYvqV|a3(&fugEqV+*AOkdOzz)+SCUZ_xa)d52Pu>e}0&Z(4Szk;Ve9lHS=Vc_?ETJ3tW1wM8+>CTdwkwF_Rz;sY@lf!+^N#V zX&X+^h1v_r>Wn{RSNCB$YX2F=#aaa)TV|4Hi>v4=A=4GR+7?%CMdnhV1}>=Ypyw|7 zFlMbAIfgGHGH-to<$&3=>bwp4oj;D|*lL1BmKGe;sOAiklDO$FX43k-y0{`O6b+46 z!;-P1m_PD;WbKwflJ##48JjEz56n79z~zgKgKIg}TriUD9H!v-t();Odupj8{{Xx* zoxx|9(2E`GkC&I6CR#TSaQTIY;O-+a*wQiwtS+R%&8P}Ez2YNd92Npo&&YxOIVY+i z&5-3EjNxak2^slH6V?X!!C!ejrrp|toTb`OqiF{#*R(7vQ|vLg;9;grDu& zFjp;K;2p#o;hG^BNew{KXs#IfJtWev1M=5a#;z)HuVAWR!$~mgfcdqQF7j zsl8w_E0olURztw0T3FbBmN+f?NxDZD!CR4NxUu~gnfS|(*#B(j8m5FYxlI;uw^g57 zZxn;`J3n&^hOUsgjw8A06U(8j>^t=Q_Z7P5h_KURHpAq>RA`9Y567PrfI?$AIJNzN zSt^n6vMd|SrFB?Nt~LO z%HZ;>GVZF^K6sk@A1PTL$apC z<#`Y5AgSa4>L{d%Bb4HRUwIyWMi#sqg>KPFg@rDDZUqirR+u5Gd*^suJ!q)j8VN2b0 zGF@#fd*nzTd|DGEM_uW<(X{w@p(SvLAjL?x9Qituf*? z1GgGSf#y#J-1e#CYMOyg&l#*WIz{A{>o8*bq%rTNu#d3kVem)`x%cuG>33@*Win)D@FH_6~S?} zxnnx%)c$suUzZ1$&#J?zwkX(sB zMX?|4c2lACor^InJQG)2bJ$#U1_Y=Y{N0=mJMMmCBG>dl;H7lX%sBvHtOq69c@U|# zsW!T=hHO5pjJ}PhK{LMv=1?=iw>uS_3?-r9&pfC$mt?=+RASF7$APuQdssoL0T%U> z-;;x&`0^c?Un^u&X2yZ>wpM6fW5@jHuY*MiGVEFZJi&n~u;f)@pibZiB#gZW50CL6 zvvDF^trBI^Bc73mT8`jdz7}pzeN4>P2o5{9KC<`pJEHpEWJsGn7XDN^LU-Rm2$@)g zD!JXX&`ypfiTwxRbO0;`!}z`K8Zd613xRc$!1K~_kUH54=gZT3)5< z6Dr7w^ERM#U?df_6g(~R617Wb#lz++PZ-uQlDunj1-B_yMBg`;`o=|rv7^xKevu23 zEuPqPUlVt!i9(L*dVx19%u3FlWzvqFB}(_}s5uu&f`6?dRiOKJiq;^b*;2S;&x_X|lD5;2;iB4w2g6l!KSsP-_I4}&7V2wq3 zAtNOLZe5aRY)7iboNKaPdNVk3a4EZ#e6en}oZ+O*J@0QOs^#Z^mA(vT)=9yZ zl#TS{S2*|aKWK*gQ+2v9F5a!eeO)!=HD60!c??!;R#CLdZzY!!wWY}-! z6|ncv7?w?tW-UPyqbgBz6Ye@3(RQ-D^XgE1ULLt5_x zQxOUxLf_*0k|tu^Xb&GQ#6#`Pk!;KH_b^(;5t;=yy^q&;wzBaAT=+?b4O)&Aj7YC7IJai@)QxIp@=j?|s8&bjI)C9I_yer&w*Jk-1CHA|$BA96Gg?Jwho{m01WZzwY zSBqm{z|dUaG6z~Vy)R+hzt4oB$&PSyjs}!9^l$_2A#}(tl#Wj^fXrJaup@8^Bv1NA ziZ*T_#>u+S6}gU1KYNWlD|tg@o@O!mX4A>qU+dwrQYKmHHl6#TzKU)YTv}4xW;n6e z9$r2XG8f9)0H)VDudhk9mlqs^)=NW7nZ!0Y(`Es#R)sM9vKAi2?SzgK5n$YQ9zxDF zgMIf}P`B1YeMaamJut!5!QL1kxY5ozPQu=Y#b{rnh(X>mIHKY{Y29=W#wU!#wR`&L z5?@cSs%!uoVLn&!_%OL%&`-wrNs!$#-e6)=4AXvu(bt3JWM=V5Se5#lK66TdZ$IOy ztgivT<5W9Z=#-*Ma|iaU+K#%KXHaaRE&h^B#HvHXLU)L#&C4?(XVEDxU6}h`UZDiF zJ_RItzdY=eTnb*zPpR9(I3}@l1X(lRg?h%C)jEl;gw4yYaAsSwiI!oTrDdx+ZWA1P zu^vfa`#~P$m!5>?bpjhN+M4Y234tw_qKKcq5nLU+6e3rNlljrFNytx4h|IPo7yU=T zWa>@AXDuU9cXZ%~zQB8avmaf@DWXIpr7d5kF|TqRNzc~-8Y;4%Za=<+(%d&p$O%7AoCD|B9Zohcmbd)9<1Kxz9YcPFbdtd*&*|d5jdXr*GYLyx zf=YMGIh|~Es3@BPqZwx?l{rmZgznjpTo+xk@B|&`76-R9j`&}nMvdjtvFM>Meq6AP z9BR>GE)w+CvM>{fGzM)dEBtQH7d7LlZ zgZV%8cusx`eOLdFI{SWOZopM~IXVeFq=TSxQ5I=((!_ z`R|LkkS)@I&(bUD`6(0N?|Ctj>Gpy2^^S#7Pg4?qzJ#e8D<;fZ@`&YMDSRB^PS%VU zg>Hq{)S+n!wlC5W?rhg+)1EYeS(=HT55?ocbU8E_ZG;OOl7)QnNn&JP&jeS^qvwX* z=(~6yrpH_jM)rHdK)@)DEb8ZeN2`JJS5NZ%MG{rs%aJ#4#7KYEw-h>EU}<8(+rb%q$dPI`OZZIUkB7am&WP;oOP(#;`6?N>X^ zEt*Gs1`eQ!q7U3o$V21T>jY+-Jl31PBVi?*&|{qmU9fr=wPEBjY_mP;o7a=jL4Uk^ zPGC_UziZj7@R?b2*`ao5?=)(qJ)2He)nO8a?uLHb8gfGN0WBCYij4NEg-Kcx&~@^t zWmt(Q(;eY5lo&M2#<#?pz(A$`7-e} zd0nMXjx@EB6IDuJsQ-xUj`4<-lFqOx*beT_$cOnO3Lsih7minIfOy#mlAPyD_WAq3 zU$Yf(FR_{Cd`tkfqidlva1ImoTnu$)3;%2X+}c$!?aY6o6NPh51O2jD6s~v7v*212 zxFn$y6n@qO&R&{`<6KYE%at=Nb1YM-`t)}4a(xQfYhaC&^o0c7N!v)(K z$j$Fv;Hy>)QcEpi&#O$3`FWD8KVT1);hOML+@E~!9mcES*Kz8>Hk_1^gof=;vB3X6 z#upyJcvTLWQRQgdIhl7^uf*%_ev4v122iiumd~$H;m5iv@ft(v{*dRkI)B9(vM-AUj4{;Qs`_G9V{Lje*{+!ab;UFL-XqfxLk~M7_lT<_f(Y zk!ROo_UURNE9XaC_NGF8W-)LMi!7hXUSYcWOUU^666&=o4SdCBK&9_#uIWYrm|nUF z&yJrV&H*8oHNx63(OVt#-Iaxm(05{PluV6!6v@Aee{|&AotPLMg=^lr(zv+oWRcY^ zF0J(%^M$O&cIg)S@NXoQOqIhYb@j{^&v>R-C6yj_d`rJAZxFgsF+z4~4fbZK;uT#f zeBeD3ZNeS#Ha7_umL%a1p|@{&<38CmY8<%RR+B3Mt3b(Y5pk%_ptB8s)sCKG$Be%l zjm!JfK)zcEl*iv^WV8e>_@;yKYsXBo<@Ny>^Cpc@k66qt+<{EdIx>{027@^Zu+4TH z$#MvyKA#4eT44sXrEfKJYR*4mXFLyhXLJ0xR1b_Yo)c%+Sy1Mj&8&LuK~%18BR_%x zgbOgJ^An+U3c!-nJ@9ah(CyzJ4+$3cAXKddVxC1|Smr76ev1NzeR|KdMo1Aaz0;(1 z>}>oxC7vu&5#BQt712mh8-nZFIMex$$Yj;AFmK&6hI+iBr|0D{ftRi@@{+@}LrR}+ z?wNr{CYO>0!db3%;4>BdIFgur)kAfX%l*_TB%M#=AS=v;`>;|R_C{@k?W$5RX5Kis z=jRK*_*;T6_Aq%7bCj7gI1T5LyB4O;%MnmXCu=WK>FEIVH*<7(MR(xDPYKhBJ$6NazReWDLIpHwjv%Rhr|M=DV{ zAOg?E^GK#%#97v*=oT7+*`>nlNvzT`Zy*v>C#)fAcGB4VRv!d88-$3v!SYm=%1pDN zZ@b=6m$4>rOy~O$;876dYSu>6@%-Cf}=_CQ5cNC1GY%W| zS5m|$+KchUR$FYneHn)<6|i)?1s?qEK-(9%5{2Pf^5^|6!Tr}rq9*NvkC#iR!3S9o zZ1Z%`Kn_;7ionrP=gEiryJ4F17zB@_WW>Dv@Yc_cj`j}5(9!lVEx-$&Z~{lJ_&rq{ z8BC?O=#ouY<5^W*P1sg7p1n}43=4jIhHvjDu-DrUfrY?;xREh|ozef8y52p8Q~E3E z(ux>ztz|BKqti)0zFJ6U=T60uYkpDGF$ZaZsjzJMF`W714RbwC@VrGNyEj+A&K>ir}hrK22eqWjy)MQ3Gc>_>kK_SAb#VM!M>| z1GSO3M3i6D;@iodbX~13PD$Je6ZNvmO`Att9-M|(owLB8OUts&;~TT6N)bls7@$g9 z6ImX7ktPdoQ~EMG5Rr8nuASLKWNmxMJl`jG9oH&s}#Zn{JF*5~wPaR@o(GaDtCshAScPe(?Z zG6l(R=$`H3sC4dC?fKnEwW%V0>)D9h+y?5cy2Wzq{9H~gXft{*T2He+m2xfz^TBbZ z61du?!>4gEaDvEjxq<0mChP-S+g?I8nmgjSiEIctND^&2sr~v$nm5_8_KDOQ z;8lez-)CbQ?xu($%9c2}O9c8RFNW1`qUerm`n0#YnkZW4WAlv%q&sZ|)tlErHEhh` zPPYt9{w(l?EOW@i`FFWlLqe9%bTY<|oJT#L-5|#%-lNT8+b?%j_t6(>VO(jY;O|Hq z&FEjOCre{>@!~kZa~3VsiP0o#>OY~mQyEsBj)dv|N=U)TAM|B?0<7zP$!SzM!6*wTK zI>dXyVyjN3w)YZQ8fcGNIstg|RRfqt6cgoy3sAky0=L#=pp;Ak(Zov_rgaF44x8Y* z!ECy*;|Yw_k)7VKt@V!nxrLxQgfoe*3B(?=*mI6kzLZ8gQuxiQ2yMHef2 z_u<^n|c!nM7GOy^~vbT)v&@RlMluezNZ^Rtw z1ROeRkKG6QEZ>{QQ|^Zf42}?Z7I(GC*US^J@6ZF>*L?zmtxkbV#%!<~{K&mBw5;OI&ui?HcPYC1G5F7xHTJIqt8ywPk;&y!IgA!BHQSrz>Yb2 zpDZl@!yMhO%d8Jk#>#bixM0g)njNi7SLul`jl*^HQ-(Ch_L;(TwgiL5&%`T!#as$l zah_+dL*}Mi3O!m{e+B+@zaHo6IK_HuizY1Skaaw~8icC{VH$G1xfYvw`UOg~02 zxrRQz=7WPBiQLMXRdDl>2Qip2kuGkzK_->#p|K*rnS80MOs(Bk+|%TSRsSmJr=TQ! z+#iNR?jzu&?q=*0W6?4rQ+OnZp);?`(5x#g28CXvPp->wPXc^#T%s=Sy(b03C*|m_ zlN<4H@DVO6(1?6HEO1`^Bk6^Y1;lQ7EIMp-LFYHJbpO&c`aS;%$t*a+bS?NrZR%^$ zcDNqpUIbbyd^6;J_fKXlY)0U*ieoggU0_y!>&3iB7Wh4}oAVE))OFifyqGzPHfFAe z^`qy|jE(M;_0OYT<4jRyDo{6x1n`!+Mk2gI!7zLR>Mg!QYZ{W!@~tS`nxaV$FjtwN zl%LSj;sHs^Pk~$F1$bAu0(KmJ0t>wAAjdeoOIWWs-J}Lbc4Z{lcsK4_U{ppiVkIE$C&unKfHd%>Y?Qf}X zmo4{F{5f44@E=n?As5S=J_t^$HN?hcA?;b5Nk2%W;@X;(zBzU>t`_VqAb zd+G)lm>dLO-3oYWnE~HJCD@-r$0sL#3_Jdq97=^IP`xkK_*6p`{WND=PMfBVso!?f z-Q&#h-LE`4J~@$2D5|Gqg(xZgP)|>ZPs4XMUuig&(7>VRH2ZiN-P9rU0UvwfzC3;W zIx~xE>^n}qOz+d9buJi?6G<};qze8dJuF=2L-X9$5uM0UIHNz1dm~;iWFl=)J1G)v zTy&{+WdxmQFdtSwctZ-(3eaWcDlGAqz!9nXSSPy{>w|4EMAaT|SY+d|%p_Aa7=!o9V$4~|ANa}})Xd*t3 zp6=1X;GK)m(0ng-6gx_v93O+`oe6Z&sJC2Xo3IDWoJ;Ctk1&oUDqc#CRi@pQ1gmU;*7qci4<(4!_Lq@k*zMvI9O*7|f*7-*wa z3sNzExCzhKhM`Y)8A{Em!m}P}ICoobO@iJRye9Ler;@0(+vqpBL{e3V^lE7}+3Yj}W8_k~c$ZA(`gD#iwRwn7 zS1F>Rop8SWbJ)@*aTjV#xXy$klHv9!QeX3lj$HMJo;qN_Z8}^_m-_WF+r_fU*uE~B z(pg2LW%}ssfA=YCT15R)&(NJl1vJedk9ydyq{qJ$*S;1RNlp74;B?P?GXHioZe^OO zbwCiP`oxnV^T{}(>NrR|4S}GFK`5(u&P9G|Ap3ewgZL9ks9L=qoL@cW3@^`vjsxys zoKONUKAnV})`#fNmU6~GJreGV?Sx)09k}>i5!Fw=qA^#kuwwj4nh{&@%=cHG69%5y?b z;yO)ov&G)QSQP6Fr{6qgVUv9ps?}W=yhoX6#98D0h$(pRx!0>~`^iq76rSX@iG;_`wdOBQ(inY7qDakDuU01+7)xJR`U2o6~ zy#mhlrxYG;KTE`x`xB$jzI5%h+0^EA8Fi9Y2C?!aF7~M$$ETGsaXJjyeM6kAP12%4 z+fJfWY%pF>pMyUy3*K8eh=(1b@lZ@U`o(79+m~_pDPERwl(VVTl}mt^mK|gT?kB~G z_hDeQ7c^Wkf?35Oz`JaM}$;4Eq zAtoVT=vA+owDIdy!krd!fdhANf#ASfD64`QnR#@7-61mh_Y88x*4e?bd8ROzhA zK6IUpB&w(n(02=6>8Obu-MCE!Q!dM(Z+sSgbbUW{*G#7wLu<(H)w*PD$6m6``~X$j zCqgH=&ZaQ3gf8@dX_f2p2I}Q*lb$7gav5LMr-&cF(=v*!?O9i_5XvQQV8|>3qa#p97TDYnUS&e=& zd0rV!+OH2A!i6l!`89OhA4S{{I15R$3Xb#B!q&-7IGw8C`F-ZtaeW4sL@DD=g|%>S z+!dmFA%)n+ZlE4Lf9dV`B)s_20dJ0XC%So|DE2`U2a`UK*h?ySQ%;F~`6EdK)z$FW zic@GZDEvO$;!w5nKjNIKhB~FA(5&u?W!^mtuD3Rd1RYd?+^c69hplc<><3i-Akvjr zJLm`Pg>=UWWvo5*oo2Sh3w&b2aeU;0(NFdIV$s?MJ(d%P=E$ zJ)U)+i*ecUoZ7n@s$W`0)w&YNSW`**G<-STQpoL#ANHTX|H` z4#RA5H}qcYjS)XLqKW+~bZvIVr}d`X(QB8vHTh!jz)cjM=`@nr#c!GU3&w$SkuHR( zNy5LOXj=Nx9XE=7p{?dFw6{NnHtEITe`b~>Gt~}Np6o%z^aeWgZ-_2m=7J;REb-o- z4IFk@xN20(~QJdR>+c9jI&7EF9cFSNd|_1rTW+f#*3=Gvds=kFzA5_gu_8|Y0`wZlQ=<~us! zNDA?hmxAQA^2BW0f0oR18T|U=9=&pZ5o&jgq~A;xFn*&Cntk#_8#y_=`NRjye&%7<5Axe{Q@l`|A^n6$A-k6S-3SsQEU%$X{ zttZfRHSpDXB^ua-pnF;@>nSVB{@Q+uDD3%wuYS#dvZGmi*@$dd>6(u>l=ks+?+eV6 z@)U@BG@E~B^#S~~C=FYe!Ve#L?>94t4;fCbXi6Ja8~8xGJbq;KCg7IgYwfq zK-JX(HlehNojrUEUBnhs-@{92YC3;d_H;=uZ=ByjjGJ$P(C zCRwDjS10(h2cJq{dYT36oVAQ)7EWgON#?L}Q(GbVUM;Sr-)T^~2d_IV2N!(o#jBYb z{NedEfFQ#kycfue9*gJyZn=mMq{@0r*ub6Tne4@hN$mU`e*D+*f55&v4`+@oK2 z(4oJ&w%1}j%n6^t&l7CP&!s%@(*9NyDZPNBq@TjTgoAkg#!=Mo9mD#*eZv;3PGNfw zo3nH5l8E+{8LmP-ACk>|-bH*M*2T8my~A0$b&r%Uahg;&cUfzIuVSmX8`)GDXQ!Z*8Sqp398rkHVs_b(Gaq_OT80X5zLX1&3JIQGRf7Cid z@LL|^O^4R-0qewR%adyAZ#ajSS`x#5Rs}ZhNESqGPhgf!v=MwqO(10<#XpXzfr`1C z@YV|%cIx6yXk=>5)@WS8A8*F9yY>x{Fa>?Y&`O}8i=pDbe-v|KVbPBi(x9JD93_{q z!)`ik;h8jcXu2vd8=1-G$`M3Fy5a8YryxQ%l?BxZc4h8P;D4L( zadYDM+h1Csu;~_CQ8FD{;%oUoL7UizXROgCHwC;m^@2lc0jnpo9l7Wti0(cFr6u!u zgN9-l>{4J4zZ=1K>zrT<$7b+5=H>8pvw}c9x{z0JRpjl5FLE0vd_(sonf%b)FA%;o zolTh$iIP2Ycr{xqwy{NnU-NkqTXq`QQ#*7Zah3%y(L0V0?$txK;u8ciChVuYI;!J4 zk^Fgint!k~7vdwN*s9(}c#*OYl>PMCP%C?s%U;SqKYJMr8>R|(ww)wVU8A z!+FjAR#crK%Bo$jVXj?OVprsRtc`qZ&il1nz}XXd@U1!n0-c<=!!3#A)FN}frfepd zY`6i1sr6VSb^>d@PGyVzi{ZwmFp!ob5Y!nCUJc~`9L=9!Wdp|XIuJBcpO3>y>}H2f zR&P@(yYHGBoVb?)?=wBoJUfMz_Kd(me=+*FPXdxErP(EE$ok}D^OpqvGBqE=E<5DM zKDAkbje*i^Y=$ErzcPXy7F*1FZ*Ro@KeJ()Ns{_}gzT|H9 zRNqNfZJh=ydviBCL#CE(y*HNqDukW2d@b0}`j1d;;7D&Da%6w$r=xoGPBv+j6696p_lB-s}$`}l~ zFUn5HHRAVfe}rG56x@E9*1FI9insI+K#*D)J7sDldt%mWIF)F`+AX+7IQuBp<^3={ zGf#_OakGR=%c+Ctdn=&DL2wF5dazeY`|#Iq9f3u-k(U(lhhMG)qqsnR-rQ2KXj#V| z17&tk#3NcUwg~tjJ@!E23J}Qg>>HP*?1{NWIQG?fC_Fim{n@9$vg205kp@wI{*y!0 zD4u~4T~_Qbh2^ZycNN}a-cDxyFagC7mu05&(^FI_Jobp)#D#zCVr+jMb4tu z^S69gLmJfPtYPo_vuw%B9(Z9lk9FgAB7I>d%mr29jo&6nbrBdLYp39`Nj)THiv=wo zRZYL=QBKzC3vIqHiyvHjFx%oVzh_4eUYv2ZcJu5Uyzt}&x@^iqkD}RRw+(FHb7!SS^v=~!aVL_nBu!f&Br<9Z3bq}jww}HbJMb`9; z5?@qp&kuashfZ>{*k6|p;_aJ8>>r5$wkv)kJM!;q)-JXWiCPZ7edz-@An}%evQ)@% zUI6x7bS7%`2Xnn8W3gKMA#T6-kx6iE#DA-f;mQkFNz~*Zws7D)HeFuDI(O8wNio)} z?L=*CzSv7;bk5-&m3&D2u#*q}5zREM^2MTdan^f+0^Q?uhy2o#VcUdLsm?7q*!?&W z8&qA`hZ}F;8mn{gsp%)EAK$`jCdG0zRgwMryOuTATu;lqKEQvu%2uyNk7t|K?qVaB zM6%lZqA<(L1Jq2`^8XbJtodYXT+h591%ZqB6g}i>B-2ne2iW5#583o(4V?YgDt_d9 zf1-OUn6LjYl<%t9&3eAS$SZBy%_tPSBYNW}v01;6msK=o8%45FE%F`bYvsi@*xf{l z@Eq`v&}V(+*YoRYiCz~HQH<#B46#kxA%FEop&X*QH z=Cv&mrN+z!gLD4m&RlJ_%~FqT5|~qk@2Ak=>$|XPMI|3-GM3%*(w5C!A&X_&J$%|n zQG6$D$n239x3azW8f$j!=at=-@g+fS>=fxcu#$F5tU)htN0MfWO(P$fmv%XTNu5uy0RUuugK%`Q)BV zokmr%07iybW& z#OAc<@K$@m$X>ND{@8;7mbntj+CQAYO0?eQ$1V^;?E{}MRNaS_(%r@yzqraTPqSw? zd&l!*evRhUjV`n0dxB6_Whp8j@#LF|Z{g6RiO8vF;oY)Zlz-j{E6#nPqeW)$d$Jx= z>G!VaWAanzV;o@n&UeEd-v#XJ@ISD6?M*z~e-OvI1d`>;7xFqclThT=UFPW&M>eCX zh}~-U2-bgyYB6PbT@&_f^fH)z?+ok_ z9f@1Twu0l~54gSIOYKo5Pcp^c3!c}Tv62Eyz)n`+9PMsoaZfcL@aL+~*RKO5;lAk6 z{t(-J)Nove9_%%bV87=$pk8q=#u>%3=Ngmwz=h>3FSVIfniFg3c5f%E@?$h>HuRjm zzfR!j=&xWGKUfE6dLDwVS1pdK+RAD_U&%7ASD>2nVpk1VqTki!(5mT&+ZIHE!*)6H z@7Z)Tw2A~{rEJLiGM}%WCj2%V+aW1=f_C!&zHpUel{g+gibE|$k31-!CHf7b`Y!bWti;Vu5C4!zQs>5$-MiiJM^nq zDgGd3_-%|8+u^;GKbEkLeG_^Rj>Z*o)w@Ia)qmwjUvOyuA1ruj!XI?Kf;j=HsHQCg zBMg(#{c<5J>pj3qG#;i8%jMYq2ea6jbEH5@U)W znXbr>{;SBxhfQMNT*+ji)|g-MKSO82h(#NPVIi{1S|OpNLaE3*&zX`IB2rOGQYne3 z)K@~QeHYn_6e)^CmU+%gB1EM~l4zGy+BX&7`~!@cXXc#yzOFt);hrgNRMg-l)t3lC z87*`6u~;!V?S2DaG^hx#Du2Rrt;bQ$wFyaV3E{XS?5~WyutB2>sFoLm`Knhvzyt+_WgLokKl?8gl&z_64Ee+%{qRH{a&|1=+t?WXNNShPTvwio%um3 zt*zLqMY6*65#?a1)dOXlMTLL%x^5+_%L3+4RYvU`r- z=Ut*5!jo(DSQy)aJ5DIEhpTr9!@cwI-raa~l1vr)NzMIGGIH6sR{HMPethkrg{#$zDYJh&p4lBkMQ{Hh zV}HmBR18Y_evdfXH_pe9(dk^ps*BXgLKg~)?WpQy7yQp&h5nNb#3QxAVzG7uUYvOWlTD&g zYT87;w>_Gj^6@&n{;UCc%gRAbZaXZT?GEETTcP9qemLH}3!mA4hy8NTp(!dA`X@)j zwa^CGFQN%zpPRunAQvu%jDdw|4P-%qEGVR@k;A_~lV*|oWT!Hv(_g)({n{VtS`~dX zEB#cJyh4TA3ddpI4sG}u^NE~mR)y~PPb8yPmMAV;Mx6ZiLik=Cc(c-qOaG_=G15Gh zcGPMUXR71hu^{Tm?*oW*N}a^^Cx__pUo<+l4lP$48Yh!U!YTN0&Am^ z!W@+0vu?{8=s7Jn$n)l1xB?|XKO;waum7WW%ID%!e&_vex*=oVE{^qLeVoKEacDIk zr2YkwWQ@iy!c7RHRrzvQT=J71xDiEgn4j0R71C?A74+4wS~BtcM94mC4hhjmIPJ_v z(xM~{H$F0u=*;&3L(Mw|81`)A+Mf4SGuN-7Lx=e%N?B2GbIt1VF84}1A~7=0-FiZQyp2rKOO#4y^Im&5k zz>(C^c>Kh5JlR@+NnjkoWwaqpu`Fu&2F<+?g#6bLG@=k@jTnm7fUJ zyZVzGJx3JoK3AmM#u}2vx@P3?PgRWF5X=O7@N@FLJX@-$i+=bpA2faqkpo{Y@Spz8 z%xRv7WN0uNtd}McJ2_9fNO=;UoeIS^ri+Tbnt~HPr_;G%CRo-tokpul2tTs3u+*3( zFMb&FyQPh^NY@Rp7XXHS^tgB@Ud;2!xaok^8$)Klkw#mG#EuG*6Pg$Yw% zk7|*Iv^;$Q4mKY^`NM7WlItdVsO&iJ!aE9+o5D%7SrT&1+sQjNn(>y8!#(ozIeK6} zvGb0BFtgKwalPxQNy|CzVQM~(?yex)-rg3Nmz*bCj}dCNu$!J|ZE0=8K9bg4SG6th z8;!B!X<<3nNOaFdkgIzvaM|YrYr`9e_kK5&uP-BacHM+kTf#v3k07Z8TKZp&uaZR1^NoWZp@s{jyc{3(McOv_mpYu!AfQJ^pG`#DL<`Pci0MN z&k!SfY#(z!EG1#rk}=E#*KPFV&ZYQ6ycXxib2y}a0IhdD!u9+tFT$t|?GzQU=(#U$ zHM)dKGgYzOVmC&5@a&}C7W6q*hauHI7!#7hxXQSpf&DUiPvjxqi%v$do=zNh>=9n! zxh`*vT`@{e4=b{E;V^$)#BFwRxja|nqIM^Ntj`TiEM*F)QY`g2Gsx4MlfXMai3r-0 zA?&$61nl3;8Nbx6(r+%LqEXSb_fbEWu-}HdM_XX}gd1EL@8HwUuOQinX5lw`d12Y; zYTP_SMEGp^WmJ_|j+vFFn9#Nef0za01s&dje5{6sHJv9R6ZL7#zqxdFV*tILHVyVg zi_kOtkG}3~b>(N{777nvFjK#jW4>)J@$$$5&wP%|6h8&AgPBxwM94YxT&CeS6{*~7 zU#eu~4=lF}KKrRb?Z?HS712kQcAg~81`8moGM*lLG9BgByP?YIDD?3h#NAo(GA(fBPi?x+=cC&7_@2kz4%+n-c_>l?bun$M(UHbC717ozO%&y4w{ z#V#Rkpy+**{1FJCShNciB({_LOaBnX^E1%q+B-UN&Ud11beUL%rx2ZKr@;4c7=th0G`cpxn$KK(WlqH$CTFswaNCzUGF$0Au@Cw|=J01lkEuPp-Wv!SvS;bh zSvN@EwZ(LocrZTarlG_Ad9bBFiUj_3gS1s+nWC6tGHycwGf|`jp6m$)t<*K-g8D5e zhbA&S<_wJ6lLcu%qrp)BHbwj2v~2%gqTvOQ6EsYvH?PK_JD16@E0Vas+nJ=Nv*;U- zYjlOg82aVFRt#GffzMVGtT-?o7leGF)A1;c2#zLQ$a9Dm@mVB=0frY?X z)HU=lFq3}|^C`!hDsfo5*B1kZhUuvMbUZhn?>NlShilI9G|Ec@j81N(_BJM9uNeoe z-z8zFEF4-gQ(^NP7qI){1{TSs@XE&)+Ejwc*E3g{%~AH)Fjb942#blQ!);=`se!4# zl1m!9&7o73|1nJ1mIN1Ps0)oxrJ%Rb8`8W|QFvRW9KW6TL4~jq zRZ9oS!kSY+YJZWZ2_di(&cOm^9PD2r21%g_&>K1j?k@}j-CX47kCEtgrV#U1nb6r! zwxYsxORBWM9rmoz6z(eq=#Uu?pR~##TfG2uCSQS)jhbwX_7t{$Nix_8qG9Xjm+<3D z4dnWkf}6!E@G5>oE;1G{G~pKYsh&p^8eS2RZWoN{ZY5V&@m<8jCag!bAzW`zW{=5! zfy?7I!g#w&@JlNeg5sK?L;M5gIr#8StUNAt+j`*%IFAW7@A$Jm9{-eI#h;c6Lg$K5 zm||c^IsP5p_*6f6lVOLWo9=NI{Bxl1lb%qVv7{-lN2(nD($_b#4S|r$V-QBdy-5J{}8c}Xi z4o>;L68o0C#2e$@;rx_j`p{}U?_3c_14k=}nqmk?5{n@~eG=>BHkSRisS$>IpF;Az z$?TgTTp zXGct34WMlI6vkUWhdgpU4a6*s>fiPyqpDTmL@1vls>p%K@}}^@q=(8Ugux7jbCBy_ z&S{z^z=}`GpuhDUr&_lFMA}btdDD7P_fP_#S-A<1%o)MS)Tzu!eI>PdyMxOzxk7iw zT!WN)N&Kzo1M^~A$mF!8^sq}BJkGVpT~CqsDZZv1N5e4q{1Tk%KATKD$Gh&GFHpCn z^)z@&9L;FWrN$$on5^W(6`YAf2Y*Ra{pH9x?6tr^H+5Y6It^0h6_Ne(FOszm*+j}! z5-UF%k&BYIA#lrhm^k7FDMi+Bf#-zF{jy}AnAw5bs~xoG;3JaU6G?Wvs^G5|`c&gb z391$E!J+4l*fM4euK9Hu*IGN_nG62JE?OP82MkfQtU@?hxG2f zP12JpAZN!}5^8gem~L;STtaT8#*rtSu)2U*;F>`jk7qF(U&cdqt_%D*x)lb57l_>M zcjScka^S@NLk<6nP~eusSYPhKirI_!`(-&cZ@bByEL?|m%f<1q&DMR0Yl4io-q zA;14>A*pKhq$lSb?BKIu1=eYtve5|P9_@oh>2N~kS-|SRKKgbfpSxX=N&bC{p|_%E zVdjB)Sj|7957^#dru!ue1c4L5#Y7hD&dmg0&r4jn^C!^Q5=w_=yMTGvRD3l!1=>%X z!8y^^0t11naQJpSD*krI{$pG5m5sFUkBPJ}Js}hob*H25tWT&C69ZR+!idsWYgqPs zDh^>Zk-wG&4!8CK+4c_pRGsDjce~)kbq5-gERCn_GC}B61#@ktlZv~B?7iK75O60J z)JM)km*IHKsI0?fw?klaD$gQX^NX(jZvzYz^X$!mNc{ctD$4$Nz%|Io@DA;pxN(6C zF6nOqR7%DlJIkiSK>4Jst}yZM5Oh)Ctht zF&SQ8NB~pAgWxI>#va_7!m3D^vnG13>`Co4?Bp98SgvmwyF$Xy#?&UcCeJu&t!*fkPR8`qFrqj1m@6v{F5gg~4i8@yd!8cVF;}4q9 zhL;kc^U0fR%~8PP*A?KboC~` zL?(I>FzuEw|Mws>X<{|={c8wFK33$E%0~0g?p6}_&ZNq3k~OJ#u>(x>j*!{FtD~m# z*=3n$^m^me2OQoJhA<{|Dka4~V2kE%mqE zMoLSH;J>ZUp#P)^xc~VCD<{N(YC;-(wp1irw}iqX)(<*_FEQA=5dEI6#$9@OBu&o& z$42t~Exvs+eeY__>vf=?s#l>w^B(f6G?=@*R16O)yrhn-H5yijRwZZMt8#7}AqG*= zs6J2xwzm=I);{5{gEY>r2?4z~mXNUh1L%lpfnV_|^J%Up>GWk%Sk`X|jUHiqZzTw< zy9SAsS`jE|CqjYbO=A0WCTSo4iOY*=B(&o(Rod>4Pn=JH%rye0En((!)IkNCB`Rna~MOhK}Y0Y%pq&<(MKp?J~q>m0NpYWjSIGlZ% z@8}*=3dcY z=WkMHbsYq3Zm%NSJIaxrJ7qeXyJjZKW*hL?CM9@!Oa`|-xqw-{qRerfb@)#0I6d@Q z1WR<9sEvXQGf90mOw5ynsSl@u*4r0EEch;os8u8OlcZqb;WQGvX)UDhiA3i^6R5kB zO4UI#Gw>WGMx=xR;3gdc*V;ny@Jj(6^LYcu^|jcX&JI{5-Vd6Os~{lFf>pXXmYwV< z%?9oqgu(k4$oUPonX8ePh~i}lc(&1(?vh!?Sgq55za?hq;j040M{D3o*APtZiG;o} zJO{6>9me&`fVzk*+vjY_dc}3XT4NzD@PA1EzVe`s+rM$l4s&!G^$Go|p5v<{>*%Dn z)pU;TU2OU*3wy&$`99oen6hRQtT74al7=+Mco9Qle`W%jFPnoOq9^10Po8*A>af>1Qb;o6> z7E{1(aE@?YmZk9Vu2DjxHzB@4Uz5+OAgVLnyf}4Bq zK%}D^^s@~Rty|6aY45`Cd5%=(kr<^4yqDfO0%k1>eY3$Q$+#ih5e-wXYJAE`9^d(I)ftLA+$WPSs=dXEji~>MRK#2lC?SW zaa~C+d3N#!i4&^QjQM~z%a2m+<5#e8YXJm4V_9pdcxX@i4_3`jz|jW+QT&!CQ>xR< zxP?z5qOk|)0gIK?e!&mwe=2q>y^H_mbRa25`-_m&^M3h@^((L(Ss5 zl{@!0qw=_TbZq&DjDao<1~!IK@zqd*6%$215FNBpILfEd;L_pTRW!wLtm9c(n83eP83+@psC4cx-i) zMD6n@x(!BL-yH#5IwAw(j&3ELKTSV-@e@pIk18{v7;QSRP2-m@T| zK^Gi<%a|1&fucXW2i|)-PS_wqcRDvhX?P14SLX(LO{a03$P=2@?v1m5EX4=g?LjWc zk4)LJ0~AC~;-ZT=bZZ5JqV@-9we%7ie&!<0jWP!Qn8GsAlpZX%A^wg%)J^?Ixs1~ZIaInLjo$CsPqSuO5S{d;68uJ;i#Pb8*-$q74%Z{5gqRcFVGkBR2v7T_T~OzDLNBKO}e;$hWB=3l7+ z*?JLB9&lsE<&7d1FJ$24f-h7yVVIUL4TF^SJh)Q!fxM9#;@Np4RJ+Il3i)il-Q^1) zGx-=yu}dWjSC3S+u9Jc}mv(YxCu3oHnK#_s*$#hF2Y|B?z(1uRNJyl__cXO&N%ULDqUV8i4vPnqF?@6TyQa#Zsi?$I)bIt zspKl07?(+(Zaao#l^iuxK98Xfd9T%qUfSWm6vdlUnBcn_c$Q}sIKJqlFDCnwU%Ja_ zQwW01GZ9#S8(_!iAFxs>hCGv!Cz~Zs!M~bHNZeTrexJlC$?72udS@VFVK`yuE#~=R zl~vK}Ueer4dvW*ja&*@@V->wH+4dL8#P^X8KUZSxY;ED>ZPU>$LW2FR zaS&b1*1+l3bn>lYAx?CfM*1&z66bq*cr8>7cf93!PM^)`^Pmo#CvFeVvX$wX=7aFY z)EO98U$o1$fsu4~)YBBA>3@6yEQt@L^Y^@>vF=%TfM+nh(HJ9C-BW}iVkNlNL{s?V@hG8edN=Qg3`3PA zb8+-sacmRi{S}HLm@VMl(migd#cf8eKN-`DYj8iG*ST(e;`J^Vs| zUcnp4yN_;Y1}6|ET%_@f(=cColF&;bg61fUrRly;u-#8Y=sUg>TMwPV&De>5H|h!* znJUa$E5vJSK!_TzQOlX&0%tvRj=Vr)r<|jz%f#pgu9T{~Nz=tavP^2zc=FFTfVj5ES91wT~heuv24xVuaT(de31Z`dkc$ zk-2#|K=N^)ju1B(M}d8#AsOX;hTite!(HRv;@9y#DBe3>*!NCZsGxTPo!J?}v-3rS zj|7z2u-Fc)GsVcRso_}gc( z_jUvJe%*{FyjxEDql)mr&@%tL#QnbL{$qf#acm;n-WP(*-cO`oJ)Hz7-XNp9L+Rb^Uuo;S{d8Z1f-q}C1zFZJ znPsNOlC=r5P++6Pd>>wcx;gyy3Y$ds5glRi!c;=s+}Wa++CkQI%mbLU`p^lSXnq=A-w47%ye6mEN~hFv8eNQc4#{5?t$ zJb3Qs@kT#*%SPhH#>JSwX)(DLl*No6Q%`G>BY9VvD|V+mz^^~oqf(VF9?fWnu=?Hb zZ*wJgao`ZR@9uy-g<;&vfmC?&$`ZIUS3yK<19UywO`Vi(aTUWu@WAIZqsO~x(wr8- zl+?9QyPo0JvoEo$;SSafq>}b&jVS4T7w3((feBO8Y2>;wFt2flR@g}6%ELDJ`#0bH za&af7s}EsCw3XGGXv%vP7N^uejH3xX!*wXer`=Np^`;MKv71WQWPG!DtxJb4R(c$*^$uG~6t$CQ$u)gcc3ms=9xQ z?+cCj==9My(ckVCS@AFohPSEU3(a=IFf?OJ9`jM`58iVVc zO|V+AnA&AaV7{aReoacmC>dqsPDkTxKG!9z?8l!)hp0!_kaEP><9VZvBs4&{U zkHLocZ`{yld6=uG4l@>ABqt@-kx^fHR_~7axKTxb@Al1wBNb_&F!BL1Wdb05YB!ww zHl6+FJ^<(M=(1bxE@B(><=Bkp>*3~JC209*2`UMOm{KlB>|VT)K1?rT?12h@mm>BpZ#57w15A?P45Tvmcc9ABAgAddSKt z{-o;ELXs#YiFf^ZhT-LDw8W;C?wj_BK2JMKi}%UG7rt*KHiLz$tLwqE>n9l8mS#yi|NfaPKMH_~4bnwQTeblz+0G(dCmQK2P zldfIBlJVpD-}kQ+*;F(i{(Un+tEl5NsZgJ+oe+x4H=0s4_6~V*-x!-_nxN=kQAV9{ z$4{^L^M6`0UC|%G+?P)0)+*c~Q^^PoP6i|f#PZF68skPHO5G!wn8A)uz- zPK4jjk_@ZEq(J{9x8P4UvqdQr^^zS?VCaXZQv7LV`*|AfSO;^e=aG2@_o%nRLGn82 z1a5Fyk45iv@s_hYJvHGz^(hkIpN3dW;pd2%e#ST;+>8l+vABDp0UnB+i)w}A_}OO^ z`7WU$&{Q6%vb;8zZWC8v{M+ov!}dR+3Y~p(z`-d* zaCzS%y6VCMDm!{IM#co9x!N?`7d`4N=*2%Rz0bxF@vIzH>a92DB-&bmQmUq#cmSM+h@eG6LN?BGnUt5BcH zeCqQ>3Xgj{rZ0!GXl|n{u6x`se=15ZzYHhe7phX(uwwq2_`;G?aaHd`qab#> z2&cAenc&~08_=J36v~8}kTIgc_It>)#yJvf>9$>P{;d)l=g|kgHJ_-9b_iZkJqp_b z8W}x-J}#4;j6)wT5O1j%X8#8>6jh$U`8Ai(TtNz=z=8Hp_(X*s?zGeE3Y{*QL3N(i z(A}d=NSEhJCc=9LTvF?%I{PGPt=J~UYl#^CrtaJ-Z#T}DXJ`!jeM6Orvcg=IWf;1@ z6eHG`p^Rl5dYc^MJ>5MRrt^oitZ-pgPLM#w&^L6d+W@^$Krr9r1+F}sgsk#SI%UuZ zgYV73R?%Fd1vBvJMZU-OtCxU<20E(CNw#D&*^Bo#&sObrQ*DUce>+4F3_Tz)W3}- zRU4B@oJj%<{#*uIx37e;W#!-_p9Q{lI z<$Nan1y%I)q;c?~V;M?L8jEu2UflEYV|esMAoc4=p|-hk^smEzkd}Q4rRi$A2iIXn zlpL<=nTYq~jA?F<8Fn~~#e~)Av{i2s{y1!e3!YJ`_T(r!x5i+Pu_&Jd*oOl&stUH$o+9l{PB6ja_nltth+$oo}34c9^a@+ zjWK3QXfswG1N6-|KI?FM9cSj+MN9nJs8Y-X6sM!`&dz_-!D$9oJDC#OL2X>NZHT%( z?jYYU`@{2ZiZD@Q1}wdz244ejlj5N_u*@U~^ot5W#{Luia=o8ENwB7^hUR$es0m*F zqmPRXrr?h00_a=~zJOd3p&5QjKZ7i>Hf zh&vRfk#TYkkpD>yj1+P~Ee+|t%RD-f z*G4|7D%*C8wB6Fc#^(!gn^gzBIHgJucCfI@nEzh5Q#A$~Jg!hrmUl~;4N<`|Q5>AP z4*xrG2p_KeL;oc*w7GmgqZ=Us{rb14ZG;P{ZYrRo&CBQwR|}|p{?hz!%mkt~A&;&O zipEn`F}Sqx8qMm=L6MhYR84C%G{w5}%nW{Z`LvU$%t=D;2M6$4x(_xB~rb+ zDrDuNO4xli4rPmZXVW0h5#-*{31NmX-9n5F{#eQ9)%Jt<y}vT|B2BiUNR$!C0|-kAxoYuY-&!I$3DIN~HD z!SfIb4mM+B#8c=R$^)$=WzObz9ekd7f_FK%K(m(%v0cXd;%BGO?X#cLxkrTbptvEr zPrpwsF1IqT_phOjZvyeIU@o>k??(x>i&(vUCSKp!hRv;Ka7)27EIL+z>It(^R4|GZ zzo+;%M@U09E@K|kA3X*%WF6DWVX6Exk|!6T1Yh>nPXrTnb^yrMry zM9;yW0Xz?NTqJzRYNzS3<7rX%OFk=l9IU5*;C}Q!-~>5`;ijCBu1`!SwmzHT_V8;m zkKf~sH`&LpA9Yb#`9E?|Y9k01sAJKcYf!mtG8^f24W91&0S6o7KxfWhc>bdlLdVp> z`{Y|N?UX4zYT1aTug5~|_W3w2@EDZ~-cDznYN1_MGBI(*M8OW#JaoMz06+OE^6G9C zR4&qmklC?J*77Uxc+F?}XL&w-YRNmIYjnwop(5JXjG|&g+Vp@?w&2-IZFteYi+r)t z#C12l1$z{~2=azXi0vaY&=nD9A6_00hJF+1UQ;!Y4)TEyh5wj6p2HY__&aq?uAs;B zY^bC{89v@+f;$(^#!1h&;=vK#oe59Dvor{dGkv)B*gjHswS;%-+(4@yX&m)Pk(xgG ziVr4u!ueSbnY-KT;G5`I(9lc-%az&C#m_8~e#!Gi9FC@_06L^c>)r3*7p zK@IB#b+`n+EwqJi5ovJrlmslajUw8AACQc$VH%&k3Dg%F30!PXLVQfZr7CUFpl#L8-OL!S=Pqu=~aoX4{Yed<~kgYiI`C4Vb{r4Qs{WO9hZ#WQs0@2(uE6=mF7I=J0GG zS}KplGw(VWJKeRYJVhQ)Cyc?!x0?w!@jtpJ>j3&_n&XON7cs@XmfD4lCTj22VgGpk zZ1m*!*ou^Pj#mQRWEGUpP}nCUCiv8NiS_OV9t5%hnG__{9+ z#<^`4v}TXPy+NHM;HeRuvpb2eHxNv@HWuX{tiyn~Qan5ujK)%Rydzx!7P_kvy_N`A zUY-MgZapV@_bp+I;5`_NjAEzB@%f^ekzki0&q|+o3vwN8@uXG^m~TgBOOz8~%WPXtyT;=t8OnKe+Xf#MWN_Q=-+Y(1+deBLA>lvI=u zwqEYQs1HkoB{w?IVTqN{Ag!F|-euEMFN`t8C6tETJVEZwki{o;r%BO&_lcx?11Ibe zA%{B#P}U_MUyS^Rw-Wzg;K!HfBDjhr-^GPLkN&{f{5A+l+{}7r zj|UJN!~V!R%n7Sp;MY!T7?xI~SL*e^xHB7mbh*Mu@6DKcQ3-1U{PDE^Vf^uK5c}jL zaRWGE@HKvBy1W-h1)inf6x>j4;RI;*Ud%b4YsS}7vE1l?WmK%|Iyt+IK>0DAb<8@_ zg(3++x8@Rc>+VX;ibiTYR102Fmzaw|?Skf$4`6lW5_Z#=RqPYBC(x6=g6(%%#!4%_ z2NN4}_Jq0`d#Kr(KHQf>690aJ%8dI}5)IG5`EmesHf$p%f=6&Cc^8>590D>&et_43 z(d^p2GuUZfgD~8_m{pO{Wo0=#*8I0Fd*g==w=r@l8Si}z%`Qvh@$v?gd7_6;u7qLX z(OC3SOXfM)F{rzsk^|mRwdJ81su-p+bBlS7z>;@#<=^j2?Xe8VwH1NYa%X6#!6?>E z<2CsZIva{2X26%|Zd$uXj~ue$nOOBPm}^eK!BA0fh~MM&E_WB0ue(J(ltW4X)pLT^ z-UhJ3VlMbzTSsdiK4ivgYtozW6uq7J{e)HkR$V%X`|p}#Q&T<6suE*^TK(W#+BmlQ zK{Bzg+XP$qoo$T$0vNC##a66v28FkPosTcl5wWi%^n*V2tltJLv4;FCy&TM6dfZAd2_?7t={K1b=pF734)Y`-y*#$c`lT_Jx<})Q z0)4F7RfP5{#|YQQ6j9~cTXfiW6}i4|bLARo5sWcDM%$y5asQ*Oc%BL{JLCavvJIr` zlI5W|T#QXwYX~#Ge!ygqspJl{nCu6E{_uPlPF*X7H7Nucd zSr6~;NX9>}PvETa@mQr;jKbO)+_NYLw`5+Rt&KXBi*shutHztq(@B+V4RGT22PXjA zR1OY%juF|K3|K3c4BKu5VZQoaaOuflBm;FZRCXS=`C8IPM*3CWVhjoA-w7r%#n6;C z9gcoUBt>S%#Ljj+cn3DXzow_aeh-JhFb)2mYKH?Iyc2V*AsBj6^1&bx^%X8+nBhLU zR^$xNj7`Or3mR~+RTp*i)A8?FJ(Nw21!o;Gwp3~g-|zEw9?ixYU}Kmg>dmr71LXm_a|}T2yCbklT?#HWGWY|8 z+&D1-iM-)Y&qb?Z=w&Oap6d@DYN2>wRRI`{+ClcOQ^eiAQdCPKj0R|5#zLQ2_$2We z$o|<+G}vn1R-O%=TSl`Y691s`K{L=BDTMkblfSQI=q52M!I4voVQ)zlmtD6@Ft@sh zX6;BLEv*kp#7tMx(Rgsw7BaU}YsiE6?Z(Oj$2~AfOV@;-naO)@q zVY1J298i6S7e~Z|2AW-{pkIb#n+q_r@HRU8)sVA4{PBGEAgaHaPRrkH0<+3u479T5 z{(U?SruLbF!4C^@8FQAOU3s9&gIfZ@ei3$C{x&kV-Jb^OX~D&;GO%p? zMkd>sF$=BFfb~@ys8>v-V@pS%#<-0$SRe)Mqdubeqp2t@6 zi*izbBn`g#1(OW(M9>Ky&3@FF#)j`{gw;~U?47AbU^uV`C-Z;D*7ZW{4+zCP$ypeh zbQpKY$KXZfaTvZi16OOUz~F6p=)`AAGwx^O)|*kpOLY{Dy=;x2gA<9--znT*liApl zok4fUvBle)USj+#miNIspjS^UYRVv$ZH+|{ zj~Wb_qJ~yORb@PJ2eH`GN~Okp<)+$0c^+gCiR-bjXe) z*Aww|N;%o*8HhK$E?`Ml5DcAC2ozUL((oSgb5RMSW-1l8mCOAp7+&tsi3xOvfacADoGQ zR`RTxH7yv|pN(T@MqrNi4oup#8I8L#aFpg|toybZfBcfid(WcjVej*J=2joI&(K3F zo~e+@XJ93I3~=VyK}_f0xh-qo;30Ki=7}{wyEC$dn_gkOZ%hQ2J4%yLTUZE~T|{3g zOe8vMLl`dW9Nbr(LRUMl#7|pAz;Kxl?B1A8*8LDNnHtxb|E}!d-E)Otv}h{tq>=#R zJz-|c_S?hceRJp}{@mjx&LOS6*37E1qdfC(kjxLcfqRP6(0~b`!d+`XT0t9b&Uwcu zE;5CSWlur6ECF_J|3cn-tcRhAGeGIK4f!U2h2;1}W3IynwA75G=hSSlj;*D)&&)tO zeqIm~{~cR?i{YujbL6>x3M`Vy#&vF5=r`^hZ8A*5rJhqkC##QYz6++gwo$ZhV*;)S zKUcN=5AS9z+(Dh}KXLx+A~C4vJ_^M1@J?kdI`-a0uhQ-K*o%M1$~cKC1_h|?vI&2` zkir@9$I1Qk{F$`z64e`}MWeO5>B+@K_~&^zmUh0Q1||kvj-nd-U(;7IWHKM3lBC&G zYf)Ce&JXfk`6x!zAqda3!6zF6VE&bP_>K2itSHd|g{QjU71&D^hI}E_AQ=~wZG{gr z=HSC8XXwee_1Jy$4t=XW2iJP`VCJfH#GN(x`%5J(bde|9TsK1K%_Po3=_uVN?gh1< zL!jfah`{mSDNJ>UHJjc>@0Z7r)SFc#@#8nru>AscZ_#IL`ESX8lba~f zIfmC(l#;={Cs1s3VYJssY>cBKJ!e7h-@aEbjya>te-@^ z9^Qr*rv#X_a~3x-R}TgktS8BP7lN2)IB_v+6R^%wXncAP&(se>-`oE|ZTnw(=gxBS z>X$4o>2PGWu+gY`C5Pw-xsq?sx@n+m8o}oWVW`prKF92Ujr9yva)05(ax+d@G@Hc8 zltNpS44E$x3%MfR^q{sAUVLal=8ieRzpLaj+v`TibQ?gGh#I068_v1L?IE6I9V~jX z8qP?#g91?p$(|%o-0uX<%U02neLO>77zQ3`oz%77pQzrJqqAR4Cu)JxC{pm2D_UMa z#&_?-Llj&TogiLBghQ3R$LtU0F9ErqwJnxR9f@{-+{RB z$;5{^>Ovp8K@4h@_O3B)%w=RoO~QTRTZ9wMD{vKfZ0t2NKjsmgNaV` zv`~$%4gP0Rus)F7d)-@SxkUo?vVG{3#rgDndn#px{%NUC4(({l<2s~&(Qh|~nWL7{ z)cTwk9sAGJUR;mz2V2pXe~MZ~(daw$6eo>%jMv_#;ns|Ar1_={cv-d6 zkcj{#$#C4*E7Bvvn30 zRJ+iX-Xic&7D(;5mFTPO1*(eAsLMby4qL>MZ<>3l{1!#b;+&Y#Yj=@ry}^i`z9fVY zOnKi#UH0UW+jku>#w>tFO_jjW@9%LWb{sZl3rvKSN2q7XTy*wUApboz#f^re$mVTx zal>*q$_YOAhx@M!`HS`Fdf61+#y-Wg!34B32}1oWHQ;ydr=tp!$qT(}CMkaaUpil+ zRBRJ@I#L-oN3X$;J}2?pznf_BBpVe@ccNGQ3rrYl7WmB4ywS1Iyvp~Fn7=f{Wa9dL zq*mq+_pcKm;?xZASh|KTUvY_a>a-eP>KAyZZ>q>lIV;c^=Ku~R)5)w6He|Q4EB*XO z2hJW312KPqAww`~NzosrMXbrsmCTAS%K}JW3L&T>k#Q%jESFI}sFOrf$a&|Qs3%^@7^&lP6u?7k4bM)QVd8nH@ zz{TIpV0>OL!klN%Nt&48>2r#qZ_fN+xKEJ0~)0ki9ta+Fm-oJ-R|0M?t?p5KT*#ZtDFSS_uVZZ{qP(K-%^ALB?0tL{%<;4 zcM4Xo%0 zw=nl?ES}C6{OEVg(LZW7`b)-gI^n5kD6tX~ul3UVUmbA6)sZ;%&T@|9udO_7$E_UsI51}sC3<>ff0tQ9|;JdFwVi%~jlH~J-cV3Mae>9f4g zEV>jA*~0TzcLz@c#s|W$qbupw@h*&5MmS{5tRR=Gia35$Cpq=9m~35OAY>?&;Y`i~ zD7Ll2#qO%O_VWa&FcUb!J6njh_ZB#%Gyyu-OF~RwE9YHOPBtzxCTH8@!H~BB?bR8O zF)R)W`9YxgxC3NW0wJiIfqma+0qdDxr;bw@-{YCo)=dMOuK3bNdnaL4yHlNxvN`D$ zvTmM6hD23AhitignRKb7;@rm7_^$OFeHQc<--X}81D3@oY1GR67uZBboT*~=`u5Q6 z2fvX+AvHu>{Q^BTr;w}s9*ldY7T||<<(MszgVk@|VrE(kN*^gg=ZbE8nR5ky|1HIz zqn}_(Z8iS$sz>vJ-zcef3H3IfMrXDUH!LZ_$p@~ZPLLg()sF_Fmq_V+BbaX82O51X z+}v^D;8l79-f@|9{G6pUeGW$#e~u-LzYz@W`9&Ho^_f&Qj)M*%|MDl$2%Pq%(&P+7 zhz|<@W1+9#_*5GPI!40f_8!J=(kLkO6@m6C>C6lJSLERVj;8j=;FQM(=ut5i#Q%!J z)AbSLYUF3`!PW*ke5IYdO;v!>&toB}`Uw17D@KB=7gO1cuO$1nB23wAP2CfWG3oIH z?4F@bvj(H6XGa6=cu_%m%AXVG@^&(P=WS-?oCD~(Q5QGwFb1(%Qz5~6h)TY%B~szj zLFclsNyIN{a-~ob)mQv5 zLY$w&i}2ga#d*CG;=Gf^4-`xIi<@NfNS5v``qfDnAE$fJxCOyPIUtON3|*t;D_zNt zb#J%DDFtXq1~Ht9=S%F7i3XI-I_YX z!-}9>p@%)Xy;M~tjQJIkf!F5@ldrdnNQw0WvdM2A?$cZtPBI0rS;^^2r=TDSwk-5%0pC|66bPVaP!S*Bi8dniRE`w;%cG{TEW|h!z&}Q zG+zp;nvQb!w{N2rT@vK48Q>M0)r_P^AagW$4`+8*1S7q#;`trtsgMzxUsFC09XP(hB2L}Fxh8ZzhlFt_Ri|FpCnw8f60eclqB*nk|y z2am)Zqb+EmM?4L=A47uOS~1AwDel^ufl^a9;)%x3^zj{0IC(CT8JFc=w{F}la2g7t z_wK18cEpntBQ~Pd(il_=@y3Z$+equhmvxmLjpWEz!O5X4hXtOc^~YLQn6zv^O(!O~ z)2$CYanzQT*laQaXC6x?%gS{iU${R#ljd^M_8p@))w8%w29p>+(;+hNSPaZ_dqSJ) zdQAKV}@mWn*vr7PR#}i>5wFDCu(uweH5@ za{FbtX6YPUGNKgyCdT92J>ht6g%q?GXQR{9C>-}}07A~_;}m&OY*o!7QDN_}F4G(( zq&RNxVsU)&+aCuSjOn7TXnGEv;m8xAYtcUyZv}5g-EVGm-7z7r@hTm~L_Nr()(g~m zK`V9EG2y%w!pW!cz1)*SeHiO|6C;Z{aeP%Rssxt`j*g?~e7Fj851q%ITg$L3@PROA zQ^&QTLWadwnQAaRS?2G@RP1}se0LMh?Qgs5f2VB0lgH2ErNt_!KfMxhFOTk5_Twuq z9Mz*w<3Cp`RC85;yI(@V%`buBeoQ3eqU_;g^LyG@R|StGV&Pq^0vunPP6Nv{FgRR} ziwIZ_!B27^r!};`VMaThapxtut0MUB|7KCHKnXTjy5W5N+f34zSWr3A&ut&y zPgA9v$@_9ARC?M=&zL_m@hvGpnfAw6zoP@Kg!%rf##SuYdks$?C_~`@j7u(E!zm)S zaA49ZtW6t_XPW2G_rd3Anb`k(xhsUsn-wj*H=BGuXb20}eMUVC54wBM5w^d~7kJ%; z^t5=V@P3HFs;DF)mOLLT=O3=`Qa6K;9shCCHRf2DB%BSbH{t1@HmIU@oZkK*iklWi zQKc=JFvv^bKRq#YjEex92S&Is!$fd0L_p7GSF9Orz|Gy^OxMWnV)A%T>MNc}677D& z<$gK7JgyWg_mL9iaft^;k{Dq| zwQg4&xp&|siFmx6lPlgyzegBQ$)S_X7kgoM|7t3U&X1#cw;$1MtHyxVp^0$T)sJkR z5Q@8(@4{z3&3IzgFufr*9b?v7;%)EI_|@<$9o{03CDP%TR~JLWrX&g;o^1MPQw6zO zm__6Lc&75;g8Ij=u5#i7MP#<#G1~HK2HX!Z$4frZ@GV;p?OjxX{V|eFULxd1BXnqz z?_n-_-3VOMyaN{P9ZgnELmC@t4*z*>ht-+4iAcLU&0r6cs1_IW>s*RS{q^|Qst|A4 z>!P=nuW)bf!d0(EV?u%=-g*Cl#xx$IoA<LiV`v+$Qz3f>$MfJ#0km@!Srjs!*G(oH#dRJjCaDn{ahAH}$`B@E?A3$rFN)sO^$I#{%Y4nc1J@&qAp{{$(G1KfY-jTS;?XYU2zjXlf&PEdY zp^VhJtpu6v(_zueu}~Nm2u=#NLMPY)W>)Axt)>i}nqO{mXrT%@Qu|T(|7bHw_r?Q} zPo~AMT@o?PKLz1egCq+VDRL4&l$D{)!Az8kJ%uKIXE1H_ zQH%}O$I4^6_-*zVy2UUSN3V{=>gv~2S;&hx3!MDP_B+U0FI611yP51=V@I-hZ@in} zP5sj=kb z$AyBw+XF_bK8N{&Lwlcn9E2=62#qSOq;}C<_**7MK)#%sU0iKqA7x{FaWGUzX_Mu>WnL}g0u z(v=s#at&S_-5|y?8CJ6(>xvHiC|?N^rB4#8DIKKxTO~2qN}wlK){qfeX&4}O1UKiL zK&~hQ59S}k=<_kS{$4dkNu0p4d3hL6kpXHOZc_86b)@QoFVK6@7~SN^X-xW$X#SGM z4@%kO-Rn{^%O?o2mhBAuGA1d+v~1xiWOnzn)lh`;hMk$Kt_x#aJ`Ggq-~` z9V@+UurffOba=d`=j{gSS1c?revsmZUrV)c?}fd%yUGLq4F%)P6Pr4lo{Pe9qo;KF zxli1Y1alfVG7@G^bHyvEhP3~WgU~_zN|VCPsLQz7_%-N19A}%2-@fx${c#!EAM&F+ zB@d#@$_qHv1F)g3ALF)7L2$6cO*eameL_2Pp*xR^ot#SsE^NYkp};mLQf!5Ta22Y>hR&) z2CO{z0O!kGM{V~bG+IVrtqBgU_{b=fAND3ilV#DvFbZAvXn~n+JgMA0z?ArCm?WjS z;-uzIFniJtM!y$Oe8zKs*NkVmW5HyRK|ib?9}WsH6TrAs;JH7G68P7Wxb~8hC)l@Hf-{wBt%$}zJ_+%B-SFQQw2|KvUnA0uu3(lEy|4&H+;mK2GD+x>aa zyJszj6Z@Gw=gAOrcsvgFiNLP025hkKjTUcG3yV7a;d`eu*gk)b_v7SwpX5PoYuCZP z;T(LnNrHc{DIG;(v(Ye9g4aD&%#HHtp^J+OxlvEEn3UB2px#6TBcet#eZQ8{qp212 zy+b!GZ(Tr*XKx|wsS`x)Oar;}H=U#|TSzT_DMC?e5HVjf36m52!RFKya1mcYjP3|| zm<{R>)-xZhRtLZ%fom1H;|H_hcmkKE`Iu2y@{-DTj-!mHJ)PiQ%ZKJ#r>?^VbGKuLi!V9hlt#7Z_u;%t!Keg>z}}<>y#o-_ zQZ9qtqCC)QmjrFe>5%QvNt;B2k!fDeP*v)m_>^nPPJ=7H zQf!Ht41}1BfmsJ9!gld66NAtO;-`HI4BwAJe+6q|8dSkGl5o2APYBHw_7#dP^Qb|P z7>@BU!IeF=P?Iy3YA0lwtlsTSRtY^oS*0;h95t0l)S2KOArEJtc>o)4bkqGxv*>Sr zJ_a?W5`D2lg4f_Wa=*-Q$I%_wI5deX6HS6dvIX3aY@!=qo+K|m9;Sse7GQZ+35lOs zPU;?{kZ)VZLZjUcrsS$OTAg$TqYim6?>j>;ZMZeS$OZj>nyvWwgRD z64!pXgQw-sQlqXET3(w*_MUh^oz$D~)?`@|>CPi)FZ}%U_1CGbn9!wV67ZD3xj9mv z$6d%R$5+R#Ddo-)6L~ZIyI1I$>>T{SPhl^@MH92O|e3|a}yfZq!1iDIRWNfxl(FVh;Qmjsx*o;xMrGDwTI1i5ohm(Sz6AaMf~2 zG%WPSx~qq2!4w~?uBolxU1dzt{;q&u7LVZ6=``3?6a)U0fT6M;Tv${B$33<~rPEH} z<0U!nUjT`(G>0HMYj`-hD6tq;y}JfAsvAID-vx?gEf~g5(7D1>S?Yfu)emEF+R`gLw3#tg|{b(%`{QA{qxLvAG04c%KJIh zy(Nj3g3CB~mIvmzC*x3eBc{nm;f_u3>EQ)({Eqz+{7Va6-b7QG-|0J%Upjph|8}P~ zpU^auueQS3!-~$WAhI`+36jYWdK?i% z`Yb@n&=pS1SP{~LmSd-u1%$mehh@JODa$=07tYuYX- zdN>ys2Fjz?k&84dp_4B7sLT9zy@1@EdHCF^n$BWsI1|0a=yP8O6lo#-yJ;CCzD$$) zr{+Lpasixa&WC}Ysj&S|20U5*kd&+3B@bE(NJLDMz(Oj4SxZ#;s>Oxu-y53z@(dQv ziap@Q{P_qww2tsoMIu?H_ec2nTY>!Fo@Cta?!dZvPidI9NS<$1&BuS5!~908(4f#KEjhxA}%+(JZ^IlfV98D!<%Zho7MF z4-Ut_M3xyt4v&kWr&m8?6EbcoOY$6hs^H7N&D|;`i_>xl*S0N*6F} z=vp*xRNzzllhCXD1DrC7WRJdT;Efj4vKcoD*yHlDrjPqa^W(~WVKTYOrmn7FBfBJc zrmBp+Q6SDvS&{V8OpGcKpO;e13i-eL5{0NQnY#{Avn6{4+%001>{( z<0Lk9a%^b42EWGh2rr(}hiphVzmavN&W*CXzUvJBT8S5X{oF2SzWWzi%#yKvZ#w%j zDIN{A#7s8~9pdra1Ag$yVRrhwR;&zu&C4DC#2O~PLEoU8tU`7)f7oyzt1(il;S_(K zbvu(q^R_%?Gb6)!%VLhr`f;9|X_GQtp!5R8Z#-c2)g1WSE}m>#W(yuruYmY{k#Jg{ z@<)p%m~K<;VahyWS+SA7$WfP0DqgUQhZnA_XFpzH z56*hSA6JRwzr1?Gnoo{_uBb%Vdyw)Y^iN}`VIq9FkO(*IM97*y$h*(9;hUuNdCyN% z8?N7rg1a84F;#Mosn=i^d!c=RHTrUctuyT4A1-!p*rnruS8k>7<3{STBmGxUz4ZgE z)38ZHMQ@_Oev0J(R=sAgzLUV%qSX4Z>Yc3bH!r@~w1q$Bs0R1O-{;e^bNTo&I_%vW z6ZjqTbl5Xk#mm(8uusKLQ$w8H{N4M|Ner}qZyuA zJBP0yBgOvJ_2olQinV+EQ}Ccn;y2IC=l6VG2boH9@nQ<{l}Dcm98Ed?MREiCAbmge z_@QYkaoD6mB{~Z#t}W&__h_-2j&W@LZ!6OZ+cu0`Udn#=d&Jv$R6*`M?fO`7|8WmSJRf8YW2D(1 zap!Q&zFIPH`#dI=-NArr$A;vOU%AOGKBlXcv`njBH?yImWEx&*Pvz%COvLEmv1pm; z&YC{9A+9weg?__c_L4}yuvhnGr-jC{&)PS$)Ax$7k9t>Q;{8ClKYS9drijBng)qK3 zyqLYY?+P{9U&jx~*_w*?O>daETZAe!@Nf1OyQZl z)O4m)Eqh;TH-56b0A(@zahQ$Z_pAP3H~F^X*2pEi--eA~J~)Z?KMmzk{4V?!JAh&VhRrfp*+)(0 z4dj*)i+Y=R+p!w7;rwg<`hYdRDXET4U69R(p8rlG&!6Tqdd2wIpNVYkc_UM1_F6u< zMbxzA?p$7T1c$bhYv6&*O7_sIX#VxH)%?)Z0MHP6tb=mEXZXzHj|ZNC8%>q`({?MJceHP3q0o`>u?+u`JeK6b2JGq$YO;N7c6vP$XlY%c4=FUyVL zwL|XmvHn5)dh;}X&Nf3Fc5KVx6pk)JZmFmp6ZQNiIC>FJQB36IrWuLI(AZ zI(V*M$+!KmY8^R-8C9U_{W)AolWDH-1p=Q9a3>a zp9Z9Hv)R2l7N&&(yV#t9HZb1)07VirAU7lyzAU`KhFfl6&nc>NQmhX3cn7R__z;VfJ!!3bqM*w6P1<@S?xM*pcyx?3`}mSua03`U!FxCk7qsBiN=wKg;KBIHjTd;MmEv#sB04wvw5;w}-U_ERM z*wsVT>?Vawe$kxyeAK-FKF(j%^sYmDeeR_#L_@BCSJAQtetkb|$-j+tFQoAPoCfak zYGrng(*pL}bOqCSE$`T`K6xm*>n0m`T@Dlrg7}vKhrnRp9A4qY6jM#@N6_zbmvvYv z#h!T>$}f83Vfx~0Gru=wCN48C;`^M6+03`2K{9Cq-}%LpcXmI>-~XAzf4;YgT{K(B z{}-jRn&MG>x1s|Zr1X!Ph24gWCc^V5OQsWvZr|urAWk}1e;QV_{&D6LGh=8 z@O->UClpum?VFde29dIS*{XWp=f)c$$D7FSFaC|&ZnUzV%$SD2Zf&kRGM3*nM$Gi| z{uy|wA&LD|{ghX#zs%ZhnAkAqSOmLrl0JW7K%12*{KKzu-NGw8?1RydPP5bAZ{t-Z zmt+5ZXTDm(KN4kuVgvi92IZ0jp^Ub8R>FQ|+(oeDJ{Cj{P~p`nTP)LA2eH{dtuK(H}?ojsG6-r_ND+!$A>VYRxJ>bfGmbBX3q#oTi!c!KXCI8@c$%>L2LOV@ESY=uP1c@cgT?~5Z>Qz{>At+YD|MwS2r#-3}-_F zUQoi0WoHCVVk2b@&~TF>`(jQtu5-QydHXZ)MzRgxdF>vw%nasle0j{EY$<*9cpP>Q zIk2T8rZIMPB~ZFzh?wu!;-jV>!X@o$e3FP6e^JO&@OL$Nvu8T|A-8UvXg`e~y|5Q= zZJWwN#~9XVqp&@x6#*&6AD$f;%`1hdqmB1>Shz0$REH#4yM#8nWr`1^PjGA{v~j!iSafY3|uf>fLaa z+`O#M9(dUV{H89@9;wCVT&e6t7D8UOiOlI%eEH`p!go}c=)2FjD)Mv4eI78OK#)b_T6Avl1y--~Gmi#>+%C6hz2rCEd z*+o}W*(yVI_Q0?in|oH+xoXtFU{)7w>dJ>m(~I=mwmfVS7&7~Ij^>MpC$rtgr4Xxf zk_n&^tXEb)gnPU2-3q#JPT~@KXwq*yG4UDNT!q(kYc+oS7xcUJb=8ur2mFZOsf!+uTh zWDBF?**?82_FzjTt5v_B{jDp(el{{?nH(p6yYeXZd(l309c<=HKS_b(=tOq+x9R+N z|BtxJ{5f85|AR*VUSadtWDNW~g?H5ILutXIo~dojTY99ULG3VFu6u;7ftB2C{|uCN zkicacyYWn!BkCqZfM#DTTpsFyqrti09KMA0*WbX7ZI5OL_Ah0vN2{{GyC<+6!$r8| z_cc@=*?QVksweJ@;o>_zQGb{1aSU(iG zcN0hP(#%YUn?nDplUAs&;g|e>l+S{LQz^~Lv=$P3GGr}^+@d0Pp>6r&7gzUNG zn*YExdN!wPBn!qBFL20ZJJv6h<6}k_(7MTwi29M^(3I)RIvwJU@8i{AxyZUPYX z9sH{as?e-=h1_a*KrHKQP<`fDOy%7mrdy1ivcU|0dPGtEJL25FNiO(6J)F$lI}yHa zkE07~7BCGBvNU4fbP&tUhtj)K$m_+$%+uDjFu%-deJihzU*oz8iAvjPdm;FzPl}Sl$0Hkjf14r>m1#j><{bC89>?;Q<%@$ zk>}Iq!LmjtDBYC~Cz4Zfklluj-^SC?(ZU|-;b(y>F&oz{dBb>(IESB&HPOUF7Bn3X z5k=Q?)PK@;(2GljSuj9WuiMXvC#ZqzOP1^}zro~a%EPv?@wAIACN+_h;K|t4jD3X* zT+Tf}KC9oQfsOM?1TW0DwIhTK+I8Z*EfE&rM<`oA0$IsgCSq18yiSrp>)&NK{T|{d z`${a2tw0BpB9t``$4IMWYCAfePF&VN<|wM+2eD$V%H5iiag@T{j#>2HZ96pReoH5d zpP}FH^4x;6*>Gcr1{B+PLF(UL&@ih79m{0s&8h*(i6xNxcQM$Q+rUWuL0U>I>8H&h zr1$L;VMUrouV>Y8b#AmMnUbAS zOziDkCaL-tiAyS@D@h?$7xowd$`N4u_90ms^jz41>%+B1ak#8INIKf*p;?qBDLaxt z?Gzm0fRiMhSMY<(ntGJp+w_=NyZ6=?@B9w}6|T~c(-$(ue%F{YL&2o^eJKXbo(Lfq zr_(yeB%IhP$>uB!V4K!iu`l<(f_K($K|Oae>-y~rOcDA>ZAVW+_(x0B()J<0dsfi> zCbGoQP>1tTAi#0k+(Y|AIy~;aZ%&>XI=qoa?8hcI3WV@)w+1E6F z?n)R)S;oz(a-}gM`mATy3wY^cLR7rnAt81XFcCjY)Y|7#M>_ zyoRM;cfwAE063p;8Vycu26f?HRJ>Nl#Drv`zOxaE)#s5ST~{(w#shMaHbB+S`=n#j zfbq!G8;sk*1CXcKL2QSg(R7;{Vo@zBxUZ18ymcg$NPQz$TL00sn;B44VgO=Br^x)K zB_#W!1I$g@0Ux)_MXhvEIN|r4wn$zl=XOp;-8~7o?z}53v=F05W(a5M!SBGFTnhCV z3~h`g`^x(|*{RmYIX<@~!>So1udRhvdk@ese^IR4?ehQF9x;A@le@QV4AGHXO$^*F za6i)usA|nynCWH=eXIAFeX{JQ4j4}K1EcoQU{bS) zy{dCx_y+mHt=;_^zHaP;tbGRX72VBSC@hw)*fjlizoS$+u$)aImtM8Hy4iLF;7k zSgzpsS?kJt%NU0>A4cKC_E5a3rcT%xf}Q)1VdCvt=Ee_kc&K?AuiZG$RlPn>RYz2r zn7EE0&zWYhnt6wg@RXt-%QFNw;yaMnsD^iABEhY>1F|RPLZP-9gd05~GnUPPyh--x zXc~o=bYJ3`d;4&6(h)RV$KjT1BB(lGiLduWpqJHQ^69KJh9)dQJI#0;R~m&^tM_2z zX)SzJ^BU(Z+J{#c-lcBhNATFRTr6&1f;LvRc(HFhx7+2$?SP8SyV`xC_N!xS+=;$uz~w#9zFD=5~nUf^7@uR~>eszf!u%JGvR9r>qlZ zre}znme71(*hf}smeS8(V;Co&?=ldYP0&?L*O# zX7u37!*tVs%|Zvkr|NsU~HEQOU8Mv$@LKZvhz zCd$8ntiL}*8vP6*^n)JR7%K2suJ@RDcjcIblnai*y&dSjU?R$NT*hSw&8YO%GB8S! zWmQAE;C<2+n8CdRmBJ}(OWXvujEb;XT9u$ZEL^V^2INA~b{h1llo));gJVy{;n|i5 z$oAexHd^JGlxhBB+Cr|~`$DefsDj#*RB*pv z0LQs&FuFSttlu92zdl{iRQ^k@+~{Pqdx!AjfgwWIu7zgXx%i*g26!5M3SXa%Ao}W= zlv66Crdro=`8hG#Su4ZeeeezU*^c0|Ka|jtLLGjG=L5XF+s*jp`#X$FrZ8)^4*~a* z+rs&&k}T0!fU70uz)bClkahSe7)(yW#r{tCVL>7kJn4WBhw_Mpng}biSAn-kkL#gb15&pvm8UB0c z5Bj_*1G~FyiPb4jc+(_!aO>*mo|lvP+(XZ(*3LG%LPMV3o-_`(7+j|9a)q>CKLm%~ zj^~Fq_~NcVHTbDQmKoi?lFIry1LI*qU0U?m)jA*1p75Hnh3&-A^ z$rU~=!&B91C}And7xwnk5M6nA@gyEC4i!OZmjRf3eTWlhufpe_n&{YmKN4QN3f#AR zgVtW*9Ae+ZonQWd+!@mVCrxJYQU-7F=Z+XO+L(qbBGt*#0f7;lA;kx!Tt&s+v)H!H zk=zWDB40Y+z?`lz}pY;b~+&V#FQ6!z5mI{#)AE+mL9Q_^6 zqr{7eu)|j9k{M3mbB_pF<(N|_+qeqC&UVsi+eP{7Q8W0plf?PD0yvn5}o;8;FlNuLA86jC_d^al?FY&n~dU3m9LVRTagPcjhce4=N5yo!XRR-68w-; zBX`mTr}5)HI$B{XZd{g+8QBH+=ZrG=v`j|qpHCjWHG_grN!%|3Bchut4XI(bm={;? z&|$$N_9({DB!AGC%;P9w_U4n-cgtZ~=&^bWIl5Zw20t1h%&Y6UTt5 z%(?_eT+2Gc#r660%N>1Kr(2H2-5rn=qz(r{u7g-j2#OjGa2sYi;D2j8Flgy9XxW=e zGADl^V|OYE49cfur1N&Fuv7+ruA4{a6bWwK^VaY;G@iTDhL0&c%lg z(|AQ=y60Uu?$&%x4;6eTD{~YfCI2?DQR85Rz^BPi6lGs{iSrX8c--oH4ExR_PH2~3xW;Sx zaEjo99es)RZvIT9E9de_-`w#5pHBbN>mnj^n`!rk*K~gANPdgvaTu{E3P)*)@PE97 zIlzX`^(*^E@fGG1d86A>wC(3Ce!u-#ULo%R-hJUtkBR%@-o1Xn-=(nqpd?r&7Lg>6 z7!SsR?Dhy;G}8$`jW0xc zguu^LD5A9)x17u6!q0k>!z#1jqUAa88ZjH8G@80~H`5B;cAW1&1G8W+T>CN?=AV6w zZ;OL*^VB);pD=HoGN1_4EY8$3i$!tvaS1qf&z8DRDg~34i?r#b2Hw3r!Pvj$J}h67 z4TGT3lVygnru+F?h`($~-Ng-`(v|CCPyQWMzTx zUuUBAFL~Z(Mj*K>B?7WGB7AP`UYrOH{Nq{={@6^xQ=W7ZLoDCpLx{$@b(WZ(*MrD!n(;odDXnT}Q5?VazzNBCQI6<;Cy|4cTnvsnSVvW`H$ zkiQ*zwv>b~N(c3Z>yR=&k?}uNLl=G2WY@(S()8);>i;^*kiARQc+-{97cB0A?j;AhWBTt@Eb2|qwVI=xc}074B6|5mt6SGlI+%so?h z(yPKJjy2+~&gIZ%Unf4Wd<@?`b2~k>PYV+QBWOcl1yz}G3SP`erc3&#gV{PM47=$9 ztXC!;7Z`$KsRh_3wT=%zFT>9iEkh*@2Y%>=H-9fml@GG|k39RfgxjT}gK}Hy@cX}6 zkol`0lddq3ZXJQ|KV{-Bl{j4Flz?m7m2lS2M*QK{iiyXr;oET+na(+g)81Xh$Z#>9 z137-H`cho6CIa@C)Z?26Edn2XBw8B2r)k0E2uoB!>1RHj>J&qy{XMDP$$yLoHKlJx zf2WC_rx?u^Es`#Eh2{nfQc;t7s#v}d?T6mdQH#gn`nCX+_c27DTZeHV#ScXN1gG`c zBVao+nyC8f(Wke>V7-1Q8TY-7K8vhkyq^!#;ZR-ZQ>iAl%1iNww!k6{6@_g!PBcod zl=H=xm9kYIMe{oI#ibKn)uxgQw)6bAQgFeU^1bIwM6kPb+_XQl^K z;Tfk&sx{a`$_{Cw_>}h;VU$TP-%-K3*CnVhHx@)@_>%*p4$v|W2XG#nhA%#rVfxM^ zsMhlXFR;y6KO+afku03yRES|N3HX?sia~zW7+2TNn5+oF9X=~Dd`<^Zve3gfFUI3T zVdgMhH;THvD}c9C!^xs6S&Yc|lMuDL3RbSrf!xJk`BWT(xS2oT<<%;e`loO*>e+tL3kmoyF*;C1~bdL6!V|8`7f(w>UJe*$OCI%p~3 z-KRtBND*>nNQFkM{6~%AqR9Ijo+dWNO1RnJ2UQ$-o4X`Gna)$$Meiyuqp}ZA&<_XO zspHOKI3ID2EQ>aPromz8K9mQ$7pTIR7=gLL|Kp~SNX+OtCd& zj{Y$7>y)rdxjvcHjoMycV=xz6*ZttmHc6p`-~&n6eG7l+D50Cc_f)%njCk}UQ+;un z`m52-^jmzPN#>V}knpIQ+1L_DY!)ifh*5&S#Y!5VJ(tHw_k;L9hR(y0t1gV=$V|%~ z84)QNE#aQ$CPfP`3Rxwqlo4qfWko_owuq!;lt^*U^HT~*X;6{0(W1SV_x=mUJ?A{n z_xt%M?3Bl(U8|`YP2`@gQ^kZGI#}wZ3zuK{(qTzEGWqTk+V2xb&!oFyj{Y3}emNbB zm1m+?BhUMIwGfr;v@j{0f7{zWqu<7QQA@r{Jv3bc7x?i^h7U6M=XVc%JH?7;rz>K{ zODSkvGZS>;bBND_6zK4cgNRx7WPR}tTB>OPip2(4WFQMsOcogzb(?mUbdZSZ4OF~9 znA-ifsd4YXZZcRKLvJ{j5WoJEhEF?HY3zZ?q+!1`I(4YAzC19Rwx z?;Fuo?jkQfU72C8(bq4d(C0!tH`uYNl6%xV`|-&JIMK=wux0Er}kZi!nI8 z7G>N$FuIWUWBGr?*-DL6d}u4iW!b^a$sY-M+X(NQl;Qm5XXZhj84j5F8jjNvD2$Nvid)5mn)A*nW07 zr00Gh(P1x{G2<6Q)ZXcI*Xw?|u0vGN^|A;THF@H)4~MboUooD!?2b+@b@*ej1Me?Z zBV(5b;yITTX3)?8e|YfwvkD0WXLG#0P!V@d4uQusP$0=NvGC;KXsNrJ^Z z2yWv$r+b%BwGSQirQR$I=AAK?x1%w2Y!Sw#CE$*xt@vZEC}z7v;j3T1c;4g~^PP9- zD(rr5?se-Ry%l(id;V_)ZG5zhTBRA%G167^Ptkmw@YxSzBj%!o&SpGepojALCb;UB z1NyjF;W6Tj54FG0@U2d${z`@(I`)V^Cufp+n>>0jXey_iaoSuit$}Ec_@PwiK00SC z<;03_qW^FcxpPtyfsT{&_+8f(^Np!-~XtJ9-p&Z8G@Fw4rr5^fZdP% z@R3;%%cG%aO3Chr$1dKCud6`JEDs-PpDy8>vk*>ZlXV2=AwC*E(W(uLMst9 zJZnD(Vw=;MKG8!&RK5c3#-70Z>!Cc~WX3R7-$jkBu}q`2^7l91nLE z?1dO|ok$!iqRnq3@P5rvW>@S+PBJ%)tdy68gFL^r)?x)47rGw;_?~}Qyd2c{iooe9 zsU-A~F0Age0i#SK5I^Sw!69}q>~q#Ee1R7#eD9}YYE9_aeTNCZlfjiQjL@Zj7M*P> ziSxY`Kzwctcy8NEwhMm%qvtQ6*!eyradyz(wHdOGc5_}AtzqranIvWOELqXK5YBc+ zfP8lVk<0Xf`HJFTw$unDCJupX^EmcOQXX9VsL3V=Jclne*^qpm@7#o0@=WRi$V~AD zxpbZ%oVkJQQrZAlr4-?>?s80;uMd6x_L$jdiGDuEX+)tSW20V8-u>i#an;$V^1g=D zJd;ApAvH)jcLYLZHKAkj4w%%V!aU@CGG}#pNBF%=>kSQ3`PUXd0$P|Dmsg$AIl^d)RDuhZapl z^76+vn(@(@6kV++Po}Fwey$If!+UphIuaNa6=D1!CBjY+r(lt11um19vV&F%tl@SU zn3!6|oVX{{8~2eu%OW&TonRhBk;0cZB6rUeS#FUecU%0c?T>VC1Mb zTs^Uqlo-E-CX9yu&u!p!?H28R6V1E~Jx^wOyd*D*8_5P(0HyJ=;34eBJiKy^IV?02 z{|;WDZ)UyVyNKSjT2+G20yWXVCIj+(;$zY)xr3;D`$0sj5{aSt6=Hw!3-NwzMAuhO z0x@-M#_P@kSd`vCX0)lmE^iT-?354lgoK$L{M7$}{Y&aD8ck!eV#)l{3VMdOS0|dz z$KHp0Uaod8VGh`!kK#vC4dY z5TTX7=#twfs0&z-_pc_?ocq#rpTRNW_Q;i=V@twegEOSF?HNg!7E9P!_2l91@ep)% zJd~#?kZJQguy9+w*_iSdMAS_YG|p6!8K31~k@0f=dwG`r$h%1mH#U%hv1ueG%?Ous z$#~H|1y?xTrZ@j?<^8Ff8W(+*V5F~40D;(UMvx)I`9CeBk5_h*uzF4S)3K72o+*H# zF)lo3+>-7xP=eSdMfkG6l|J3ef<>k$|9kQP;-29oz?(PIr<{={~aG{45#WF-k()?ZC89 z0}R}O~&3b~?h>K^M4HkO=jY&l8QsLZF&-8*V(@3dNhALQcqf{_S)cHjjw0 z?VAgEcbN-$+9D1b!v=6LWxKf&d5*?yos53BJ~gbH$Fmw|Qo}8$NL}Pjs@iG_%EB(t zf3J=Nh-~FrP0auXyvVz34jyCSwq8Da-R)O^LL^^M97#(l%F29|wq-~-WY@L4w-j7eiym{K#ZGL;-? zUlLh9M`r);6s-!IN_D-%AZ^GR7}ko*Jz)bv1BXFrhbd@zB=UUtOh|ON3JvOi$g!wc zNRbMF#62YttF;P~kLO`i>H)M6&%rwunfO!O5XtQB%_EyLkGT07Vew-n#y%Nyk7Y_>a zbm)wO*)aCyS?;;S6Y?xEo{kY2A*Bk5bmW>c7;nmBJpTETtvfS`%Lxk@cFhE00Z4Cjnt(#!kvSg^u{|6@Q zE7$}d;miDg@Ne)hoD^*U_m*+&{mp9Z??F-4?NJym_1bN2%d@V-6lUYkW96h#gM-Qy zzsOoy)5bHq+_?z#gY2!w!|a>wN$m2otJ%j};@JKj=4`l`Kij|7nN9SKW!EcYanEj> z6605S{QmtIK0B;MudUO?(RK@*_xc6C%$3GzYMIam26*XwQSV%B??opicJQLT?@Wlj)8~De(3SHuO?swyN`q=mZE&n&2 zTN+c!Wu9=ywA691Z|Ecbj*=nayS~##yDlmemeDwhMI@l}2wi@5JpS}Pi>hL&^v2M0 zX3KYub~d;{K7Zb~w6(%v_Y!CtJq4Hi`^^0=3qg=bBowM$gcbTiVCZ=k?p4KOSgswF z4Nr#C-R(@f`3g`nTL6K1FX8X_Iy!bsDy}}emUcfph+au|acs(0s+g)uXN4%kn)$oY zHX;)P7JtY8YQ1o>hMM5mpKKcT^F6j%O%#l5`+x_RAYC|kh;BM%N1L9@!J&RRdMa-{ z`BdYDWD zxRQ%X{h45qtO=@9i@5Xer$E5FIXpk<54jcW#(Nr*@ln$X$~p4xlwWhuZlDskYl(4N za?0q)IzIcC?uaqhuW*Oli;3c%k%mIQ#h9zZdvg~=;`yNryvEO93KU~d=20{kEt858 z4w|sW*A&~tMoC`EUVc{dk9=;@CjZ5UVat>0WDXMqrkCeIgiZ>)H#-XU&$MCOsin+} zyYn%9V-gJIo+c`#U$OL26VB*0zy}(AxJRLfT0I}8id!dRoYxW9b!|83Wh^G{RzalE zzJsir|C2<`^x-s1x0)+m*TwAPa@dyb$D~;WpsDgX8d~8EVm61d{M;IvHe82#d#>^f z6(x+|S+^~3^l)3g6P9r4q+DAXw!Y|NHjdVl50z)>?sNBH^ISuCZC8dCkG(KPRUS2T z7UOa6INZ{ii9trk%mtpi@zsuS`0#2znzoKo&uypa4Lo42aa14A?-s=tZEre|`_x?Hvl50}$f7cme5UTA9v;YAhK<*hQO4*JjHrZj)!HmP zxIwF zt9vYQw!n#qDJI~cMih>@=*&LrO<;s!1R@*TM zCYPSU&_V;eCx42BzjVe?O?^~)kOpspPLszMrLkCv@5IjgNmnf4y;gaiDEVX%nLn!d zEoVQHQcBG^2RgTqBR;On8yZp)kxn&usFuC5nOjJXf5@2FG9@cnqm z<~V%4`GGhzIbE=MtAxREqL}YENKU46Fz?MJGS@+ml1^Fld~(NJ#Og6u;XWSDuRITp zfqn4nT{8(^IR#TRL(${6B`NcN$Ayjztv z)h@%g?~QOkD;+xIk2BIm-^l&44e-2s1C6@B8lR+@lc{TL@z2C0+(1{Ne_1VEe6x+K z_&FIT=1s)Hh_UeF$1FS_eHd>jPlp$C+Z)5W+;LTAEfrRe;&Y&8@bBS8*gZj&nfr7X z$*3p>{jvn)*r()r`!D+T!w-@_V#|J#4rC>|q}iS^8f=cc8vCJX9NV~f2FyISo|QPA z2tHT*!REsaYIt=$X7^AOc2dJ7VRP~CTs1uOSA|?~P{LC@^J{UFE4(w%Zy0S~4@GNP z?um6W+P_JI9T_o1)vc9D2`PpxpZAj)lV{-6CSy`|yNFcQn=Inhd+uI72?yASE*0Hd2GqQh{to&5C@lImdbUstSF}jvXcaB z1)DHg`660-q!Pz~b+|oURB+MZFtcQE6*tKH-6W@6qeBeevGudY+7+p+V8uCB_IW)U zzP*C&2|mjzUn*hk?mM%}UOQPMQ9m}?_z_VbUqogKcy9UkF7oS24Q%$*!2XUVCSqO( zc{nl(4~&n%floGE`TSi(SGWvv^q&Hw^$cb$)Mp!h{Ub(d((D-ZPVU1^ZQLg-hez9e zFyo^wxZB3joLy(=p#C3Pq8<*860bO&tf%x{loY4dppVJyRXl$3G+j62AU$*W3ZwI^ z7U~_N1^h-#u;kAw!Hhpz0f!YMSWZC5eaF zBK#cr6CeIf$E^H#+)!_gW+H}I^jry(f<5s@_#4vRc@3{_bi@1?br=w%j};No=r=|f zy_|Xe$mLx6sdp1^ zN1554HM4Qe>s*vr?gP5oeKEZCC8sZg9?bb_|_gZA^CORM6%l2OBLV%IUIul4!%vysn+} zrzXM4bfttLk>6kp`98nsWbC7%i)FyRH=Be%T>%rE?{md1dzqQW8$hE^md_Hp-;Z26?adbR;*j6ex)N~Uf*v}oYv4BCP2-EbnMG0NaF> zXz*7a)K7VW*M~7QdF*_6`|c5u-LMGqg7YEew;g0nJw{r>(|N~mD(Me-M4Cdx$<_y6 za6!%z_E%jZF4NSY)1{2=+*t^+N1mFGEO<(+V^g6=^+t9kzuR9THc zam@twqjfu6%1?t$(;P{ibUEw@sv!I4y(7EzuaI?R>E>Vf@Bb&|B$%>eEP43C-)!xN zhh)(303B3(LHGVuM(^v9jMT!FwADfvVs@=(?(Z7rq$b~FDqcefEF{ zWGOteKL92ztHAWoEX>m{g0-`C39U6G6>BK$xVixn{>9L^+8hXiyMO=FLXci$LK#&_^|+fA4#COla#2;vIVg4hc3+WEFdpWG{9Zy zO#a+`MK;LI;rUiuz@P8^K|~njl^lhXn3s_Dw+bd2@Q&Z71n!Tco4sNiaf6_F>B#+=Oe+@F+c5H@N5D>d5*PQz76grA6yX$cg6lFs#}RUncYZU(@OE zSM)4-ugm+#vdy4S+?0Ggmq$i_HqvdG52(sVo)uRoMvBdsQ{Q!qXkTw*gU3OnRcEDf zY>PGI`2FGT&e+KKo;e1F+11RY8Zq!3oevQKrF4MrPPY1{aE*eGBsU}p`p1aF)%D*= z*7tqnMB6S{x;={Q@hl_ZuJZ7$-~|kq?}abJ`#?dh1wN%CWT=Rcxq0>=!A3*sQzRh^ z*FnGOco=S21N&aIG-{8=lVLp|0Y=GiC-p2b=;iMZFHCr*rxBFJ|AxZ&WO&cagO=D< zuvO55TPcO``MED#OgqXv|5pohdvf8BodG)9T(V? z)5&{|67g$u(XYpXSy7`*-q_Bi2G*SPwM!XcffoM+RA68x2KawXBEJtE0=VCJ48BK!ocet zLLAcv{lA_;d+&3Y7CB0arxlTN)xS88LPxxL&JZOgPa-eFga}`92N$9P3rD8HgS%qv zuFh~6?yrW$dmezt#dhfV)dDkGC0S+J^B^2}0lqDFhT_eam~Y)C_}@!oQg>v4eC3(6 zOSY+EiCX~tJbs71=lg;;v79bi8BWLSjUic6$K(F(wRGhrp3k(#xAC(<4_RHUM6Gg6 zY3i49PSU=Fu(#)v@X&mk>Mn{O3*G1iODS$tc$AZLGUrOh=+Wylg5ebZ{Xexnjwrir zhsYC(V5E}(%@fB#Ue#OB<@ugd?>B*b^fnOE{0y1R!reFFRF(5cyJPY8ZQT%^DRj6gLNQ0uZH%7Mxs+m9X`9RhgWbfPRrelbHXfeI5Px= zNFGLWd#UUDz2@Pojze#iDRn(*40Co)0_D>Yz{oDA8IyN|i~f0T>JVHIh%}I268^B( zMH3XrTYBu61F$2OwBt${88^!nR9~JW%gcShGV?iMPiBFH+yro)6bXXUv9Ro<5D1-; zLNPgS3^m+~MMF`X&V}DRXVCzkYA$Eqthr5oo$#h|n|9D;ek@)3I@Ww@QexvAk#r{i z-$8oiLnB#{^_3eJaguXbc#9TF$%FaPE_&&U2s8XG5$r?V(WIr6Zq?gM;?+y3qWE*- zG*=G>ej7v4xCmG&TmdPvmq0M95Y}#}1)tS>;Q4k(c-y%OY!?2jbaX0Eo+F>zK7|Dc(=i+aGWA>1-E#`;M%WS(L`ni-uo1fU&{`m@RKbx+gXwL zxE{noX%#4H;m>z<8(MqfEcsCsN?nb@XxHEG7+cbd^At1%=XdsF!r$qFo^D29Q27W` z_T9ohwKam(yR@Nr=@$I*$%Ql8mJHIJze$FV5KJly1C5Sy2)lj=)~@7Tvi4f%y(44c znpQtNxQ~F>o`XyT->LsR70&SevZk%gw4y1BhMP)oH<@~J|0(YTvAsoS#gsrt8Q;IE zzkr`CDEwPB7rIm2VZ!et@Ux49m8ETPAYvzUg-cLF?M)!x)k5^z#^Cq2hiJb2DN;1} zho&6wXzchAO+5boBeial#CmlZ{43l95f7fi@}-~Q(#;0=^lA-83C{qVz~yjajvixX zy`T4IZU8^M5Mt1&1xACL;C;s%__I5c%*a0k@bNyJx?Kn@ctLTye9vtM=^utn2D*s{X|(2%Uh7V$pld7Dl^-;uj~K1GtP7$e6jeE36i zq7y*5$)9I|mf|AoSs-O_1m48x;AfS3LjAo#Vd5HmyuE^`i#TIIM>7~V>v3j&I=M>YrwY4 z!w_kj0>96wL)$T1kZBwXb2_XcVov}m5<$3M+5|seq=Re36H@U?5<)=>Zgj;#?Xnhd zHj4*|d6rOlen03(u1C8LQMR_`Cd@BU!=F#y!nkwUux|7c{n2klw+C$lk$vuLV3q znFh}@f5KDQ*U*q#2mU7sZWh{4u_R|_3^4e{7o_<-`lJ3} zXxmo;^LTch4gWURx%dK}Kfeh=j10F#?*n}59U_rqZje322XH9K1sZD}koMR7HD5Zhl5-<2F&u61ly@A!A2aWb)*fJd*2zy%z^?kkI zV=@3=J6=Pu(HmI4=@509t_6kuYOI@=F*8g2F>NMJ7M=>3+oZ6SpF=&Z z(S&D8yQuS2W%iq|EnQ)h0IE?=@MhOxFy3$#U-zg9*5&U&`vpS8+O`}V`8_~{e*tQW z1j7|iRk*z;7KJBmhY)uau(8t?IIR*DbX!aksCP^iT$waU@GQ_)aH+8Y+nc8dE_{5? zpC4&7nEc`va_V^MGRt$V_n>59HA(UQLHlLfv1Qy+9QBCcs)qQRd5t#Az4{2Ea}I-7 z$WEBOYa-;mcfxP)4>H%4y3w&I8~rwl3u?sOc(zgq&RlnZcTmL8NiWAyEBifs)-(sw z^vA%jx7{Rml^J{F_BYUbHv~NQkE|_UPCm?=f>{Ya!6rhFsvBCu*X!WsMsIk1OdT^4c;>;B zEUK?_r7>Db1xC$|(2hF<3sR~YHT3wr{n30VtIGnn)y1S%pCboj?a@xM&#dm|Nf-?` zfolhJnDb+w!jAHMaFbGDo6b9f8tE_(K5T@kPZq(CD>iUTy9T~56=!whl-LW4G`ETod;9+#xPGb@2U^ z9gdcy!nHaxSTt=5D7Ku0?v4mhUveG1&z6!`eG|#1@>cT1Yd1>7m*H25D4d(=3@g;^ z%&dH)@I!+yne;rHq_!)Q;%_Q=`w-vOdnAZUX7z&#>Ds3Jm|o!EE_j*ilwQPn@lU zj}IF`W8yY)7oLOB>Uv_aB#-(Hw9(7ckAUhye<)T-<$dHUu<49ECiF=Wuj$1^YvdyJ zu4G|FW+JG}N`()SyjObPZSen^2*bCe;J*X%>~@^M`n9!y@9q@1O%+(@0Y3L@ID@r~ z>4)m_x$K0+CsFk1M8S8FC4jQIcz(ty{7)r-e$FZ4**Yx~70+hqhguJVFtso8=9nv(?!=jaNizBYk@Ojo8csE%>Tc7A8+!R3P4vuk z!1KJP`)M7#<^6b3`n-oK=MjW{d=2O3NwJmI)%1~-G%K@m2)uQCsmBjJXquD<76wDG ztG*5F#bV&XmN>ZCdzTDEDZ%@~LOj{|9rrWGam=ifXmK|eD@tCX?N9~E^NhpT>~VsS znaYAmed#zocrOiF8I3YQWAJ^bDGITdF=Or^ZeCM?qObTJ;erc9<5?R>T*-pg^YZg>)O)6C6n=tEu7Ug+=7r#9z3%=@V^upASh>Br)u0CIpOWhsW#s>5#r3*vRLC zNj3lT)RSR%XTFEBmpLGA_lAT&N{9Ql6oP%cw-$mvrOY<(pum^M#RaKlSkudjoMK+MFay@ZO2)gA~1B3Kh|6c=llOk zIOkv>h6P&U)ZBiWws;{rd>W5>{>j8{xjH5_8dF!@MDo33Je;nprqMTy;G$ng!%EzwbM9c^OhEtbe)2v^q0y^IofF?(L$p70wB`>( zneK$fhi-|e!uVkPNF^@nG-0$3XPAE?GnuUg7npmKN~p}vQbuW97_%|>Vx#c2JlZj$ z$*pq##MNzRq8$QBX3FemoVk!UO-wyR(=7N|we$Al!REu5$>+GFJhCv|s2*qE`42xU zC(@m1;>6dQ$eNb zAC&J9Vdw6cz}Eja3xp@Sk}ExTn9R6xJ_EmnIQsu$x~Aj7Qy}_6LD?tR3@w;laAZr3Y}+d;pg>CPWYji z`P%7LoVIu}dAX&EO!FM1>-#U0+PNEWW=1xiS^kTLei3lRhE*h_Dv$4Vo~EBV6>u{4GY7-Pq(m{2IvE_GTUWlIO?v|A!`wSFWAFMJ@1UUSIX zqy}QE*F%1&`_lWa5hI6F*KW)gs#*>x!ha2^&rh_+^piDp!&M*%~ z8T}!8t~eemY#T5%KLdw!<~Gh24d#kJY0@B<9J1Bd3Z|S}M|&rDfa-e>uBMOw_QWWX zzm>CqS}%jPb}KToQ;6o7Tq60CfhCde=q1`2-bUG7dueXoJx<(j8it5 zjuds;xR-pR-gqGGDitb~0nIb%RR3QjM(>rvJ8ciCp<*z1F^YFT4g~T}`#_>;w}zWO ze1y(C1OpTcDkZxoUM6s+o^IF&e6QRP&d6+qZIi9QWY`9V214P^rEES^ zoQg4R0k~F--+%8NkIE|5nESI4FQ=X4oq0((A)pWg<*s0%U=RK^p1~y_E21uXZFG#r zM*NdG#=M1Z8?x_CVJg47JMc^jHVhwQ4%XxuY<}{V?e2D z0m&Vj29FQQQNv4n8P85prZ+>Hw7WUd(=l=&vrHD)m+>UQaDdoYbdxnvCCrBhG34CS zdNPuAh-5dMphV^oU6gvBz`+u3YvnW)x%MB=n^UHwYk)Y&KV>qbtMH*iEv{ZSg8$Z> z!WgRtEOUH~qW5Z039B*r(+PA4i6;Kq0y;~ri3*n=r)#av@p6GKb|p=wzFV);&8tse z_|dH>nEFyl;J;E@5V2~U;P`EZ!~)vgbaNb6wZT z>)WaX<69U9>(4}aND&VF$tVAu^l6zDKL?5JhUKa^vd7xFKP|zW#_nN8*e;ndARWxi;YwP+sf+t|=_JX0R>1E|0zsxl zhR=3M5!oZQpe^~3?wCK8YUL%8rPCZp^4h)hb0>tirXWmdZbFal!OvdSPFmu)w_&rUO_q}&w=#ihigDC?REV06; z&z8ZQN5`0h&T^RE`GX61Owx~-zJ+3j%Q(I2AvDjZg4Hv)Im z5q1y6LH)sHFss=Vqm$L3W!Zk})isYwepi4E1|PWVPZMa?z^vDr*HVF*Zr@47@&bCkNgw=XRuOvR3?1Gv z83wK@z>XO)r1_{Z%6=5Z{%c2>vc2WlKewAU8LENe%OLb}jix75Gx&bp6xz`@o{4T> z&LwS|g6gHmY1KzxIzClE15%Gr(PT*Kj zN4suwG-!KI;|6Bp-2Z~<$1Ii<3cct0$Ewrfr2CDsM}E+MjZ@I3^e3@40K(N7kp02! zbaeS*+{ypl*Hj&0rW9oosl~EzwQxGDYF_}qOB~^@k1R?$A7IA&q%=C^o5ApwUeZx{ z7%u-O4A(CvlNK2(xX~j|80Rl^_K^be&vF_G7ym_Wygdzl&u0*WIX{Sr_x#3s>nDs@ zw+24ku7?SF;~Nh~+Tz>tA?leDPCS1t!;GGXw6V6F$m$>9>U2cu_^7qu*_%y=XFsGn zW4++Op>uTUV^@02+7ODl*F@`RJuI7ffSCRGkL2a7gV{IxxpC2Av@t3HOn$n6_$)qi z)D;02Hkv~Gx#@U!iZ?}xHtI7elbY|pNy0W(k-qj{7A|BBPTm~tOQ>HO5__=i7Ju-5njd1UJ30K@lbXO&E>YkSx=Nj|wSN>=G28Cha zmJ6i6Csr94yl0FIe+0DrAf#PWpcIL~~ zB6$a~J+55ymQIkFkDKNOpmdWO?H8Gd)08B!q}Y*;54%M3eRwiKgA*8T8EF)rW=|UU z@21pzDLfsig|8b4XrI|k=RdK;mj$zUhVXZKBPAZ!9*sfyJJ0Ew98>i8WRA|l9hkDY zfr)vSN6*OBG9!P-;`$$fbgA!U2+l}=RfKM}0E zLLs5W2P#$hp6%ifbh^1a}J4J7Y@Fb9AfqAyvPnp<qc}Nq(6Nt1afkl_&3sy4ELBwaNlY&7xtohAuyI z9Sf~vG~lXa7fBb9hI{oac`#9m{27zSsLd}hZ!|EYlYO?4oMs<5dAOGx{^*FpLJQ2? zE%JzB(J&bfn8R(&0P^g19V8?ellqRipm)^-EzV|=PxcCplJm5AZp=lPJi`RL`HuCW zH_hZqn*=#9dYk^x*8@dkKNvOLNRGOgz?T<(;IFcWY+IEJ+dq$iEw;%JRN2O@J2=2? zt)I&14@Oe++EIE@=nU;Wl*l}GVnb6$Fy=H~|arExhQl=?H9Sh%P(0L7Q^oV9U z-NaruzuHk{?zgCk+1@rn9Tx3n{;QXzk>AhIN4wuJ%-E>LU$y(_y>ca@`Jn-Q-JW5k z+(~@UdIID5+zwk`hA%JX;H-(wyszvMF4&|;C)#M@^e@|qqmeZq&$~iHmfj*!4qf~k zq=mXl&xK7-OR$RG#mGmMct;S0kC#6|eU z9^$0ojN=ES$!=E%Xiu;qAKxkB^OdjY(9}QF$-A8QscylX&420g)E#t>cQfa_Q-?mPXEft|R6=a)-?E z)o}ToFpd|V53|#2AmXzJ3T@j)rmvI*J>^b%>gIkr+-d^V=CUx)eU$7M@`6c)WuPgq zOlpgSX-KC#-B;5|*oao*yIz^k3cMh~+4G6-jdGH&HiH$YtFk4!3)yCod93Rk8P<2Y zCcC|oVYA%=Sl3=-)^E@pwDfzRl68WTX8t_tyABW4^60xr2J4r|ko zU+l*2xE07=_0D3SO`OU4mV~kKLIG^#>lpTPei`xPGn!AUAF$+HTucPMr>tR+M19d_hVe8!r-d}ZwoO<^X z1D8C-x7(JX!pZ}<=u##GO4-nh&*IH5E%L?hJfCE{Yb*Ub`y%PmW=}3?z8x(+?>Wy37uclB>V)h|)4LWHA>vMA$_+z1_Z%D=JcyxF z4pEtu0yNh?hTTpBRIpowB#ECPmqas|_=NkMsYV)AJy1x^6KVms_E?_$u1G#~n2{#G*le9&##$_`o3>{oUW7 z(L*-km*86Nl zrQeS+YlAaUHZ8;}-v3Z2_Mdr{<2@`$KS%dl+T&=IGyb!zpcAy>=u_8ZMv>3oPplAv z{ANS$?5a?%>GE7WWR!w`eEX@f@(!x2E-hP5u-P6FF0yR9*sKn<|HqnWE9!L9W9g06qL5mMFkyakTx{qI}t&$=7 zet1h5?eT)))>~xJqG)X4oeuN<4&!b1J*5h#@XjnR3^%lIoY~xi!ryGz-q1a4*_&PH)eE;w zm$9<8bJ#`Mzrp|TeP}+Q#TxkafIr`vs9snD^U}?Tii!d}y(JA33p^Wy0!_f}*=!Kg z=!W18LTtc~1gIa(q&~NPqIAd+Zu!-3^!WXg=y}7PmhLpemCcsuJ@_9^*!GLd>ix}x zHvJ<8hI?t*;!_y%SOmw^7~og&4bc70jD7F)5;BG^!B^!GkUDf7RAfDfclLBMCyA{f zFBp%NXU{>d;3N{U8fu=k8_l;jU_;0bJk|e=9<$nyG2|D~9kZ3JZ}!FN5^J3Ctl zvyfO^K8Bxochf>n0)zC9LecVCdbQsflrppFXBTNGI4z4~at+e2@Jm!e+YI(kM=IO&Jc)H# z=g!7&P9jO>22_75&r5F_qWzz|NI*{jeOK}mV_sdL8KJJ^M@bv}F_mXyf1V%<@3n(! zL=Q|Z>Vhk}MI@(x2QfW#7nZF2$Gn{3gU5Tv6Ze{(_~MBJ*2Qn9O5)ttA=-ryrP5ZZDB4N+o!>w3@VuVqxzBZ9pU>z0A?q3< z$)y@O__%u>?{r9n?_FidzirUwFNl4|T3uOQd-fW>Jc{KP&tZ7Gj~_25`iRlkP9HpN zRDcI>Y{u^rSIMJ4H!!276D?bRP|d?bH2aDKf0|bX)oK0o_3WAWsPQrm4y&Vdd>V2a zE}=Z53#xV^U^M;`ypIll8g1)2l2CO z5WbqOi2q=S#x^^V+NBflLirLrcwh((1`V*fwUriam*Y2{T}j85o~F}gHqh&Oew>l$ zeuiosU>=KZU@Qf{J(0Obn!bpWx8`T5Qtop))P0MLbI1Zm*Ap=RVkdLQc`;0yD}r%f z?-8RP=kU+FA+qj73XKtOquMXUFg|3O(7$Yf=0o#vO@J{Tel!7R70kuY>&D^Hv~{TM zDR{iqKhZ}8O6Vf$g@^o{(NXsiwGGwAtl4(BT|h;Qy4r*5O=Nf*v%eU-(}>@)Oqu^0 z-XnBt58>UtBl+3pEnMm}B@pYY7x<{D+!412kaRbR+=$FUC!aj<9h4_N)0((%6)Nbo zH-&O`?`Z2pW4v+VCT&}Gmi{eGrgM|u(ES=~QAPMIWL`O1lNS(1lyAl}uQEPz&V@69 z&Xohnqm_8_q#OCYZaPF6#uDYY5;8>@;HQ_j(0{ZFeq4xzgqvew(E1-&PqK)n%0p^x zT+3{U`$BG2Cesq5kx0icr?&h3aokRQ+$d!Fg&+%vcB#XbM=3D5^%ZH}=LO4uIfCdF z9&{#3VwPwq6`fH}XN-ttBqIaJ{AO(|E-l~^mMfA0t#|Z_GRIAwluDjC9-#kCOn|Vz z(X{&1G;o-`2CmrtA>!*M;2t>z7@_C@&#$KmEav&JsevIi0>^S{rxld0n+uZ`_`tM9 z9oQ+COSK*y7qV00e4Upz?l<$Ebu{o4E?;w-bjF=v@J=mR`MiLXI`gFLYbcE}wS#n* zZj!u7O5lmN5w5+NJasO|0jYFaG*ZwGL_aZ8gCCIDHIrabf;OJt-A$UL^U2NtU%?YF znB^|+E9@82 z4s%-H8fKGgG2j`jMw8p#(<19(IxkC_8T0c3xt!qx0q@6R@}p`xoTAJ|LQZ z%?GTrj=^q0D}Sym6b_x43t8_ap#IBhuq;0h*kA1%DX?q z-}^6;-oiOJW5;J~d~T1P<*zV(qCQ4$5O`>vlgVq>%h2|?2rO=hl5z)uITtY#oz=Q& z&9YU3=9tG_F%`$F;xkCqL{oYo{WooE{7u8l2I=7a3Y40)88rhXd87M%__14x-yZl5 zKgYbpv0r&q3E)xT*C3I5=0u)X3{d?gHgxQeD`RNbJp1vi#bmH*fJ-$Kw2k(ikbk%w zWOhlhgQqGmr1y+&_=)FHqW}e^5 zRjp~ESz3y)x_KR0l@tS$eiAshx|bY2{eo0}Xttan?CMJs9>S4Ns<3{Y7p$A7&PksP zAUFOd!^=||Xx|@41G{B#N*7Ph1)HI2aX+0lE{&qE>edh?YSgTr-O`%x{JqO%)rNqf2#vh`nh!dKRETeCU)zWGu|2M81$)u zsQ-6>Jp54w%b%QvvTYJ@v9^(n(u*NFDr4xmi2^6_k1YHv>ITojli>I=vqpaYD0Ygf zA*&xeg3V8Q0CxTvpf0NeFSW9sOVjdj!N+h=J}O4aoZ~3p&~1a7R-(i_VY5 zr@F~Ft=$w~4%RS-nzhj6_#`CdO2mHIPNJSKf}PIZ+peIoj)U}0!FyG) zA8dC{geY$hymn?DR&<`Hfxkwhh{rOtTyKnhsq2M(k_y;H8p9@AD{4MlmY_s2qquW` zXh|J~zS=0b?$bk5?Y5H>^%7t**hwDL%Frc057M}qZb;Iq&^}$5VeTx0Hx<9R-(fu@ z^wJ}uQy_*P3_Q{A@MJt$lFrDzc7~t1EI3Esq1-}CA%}FHR1L(CxI<1*HqruEqdVl_ zfG*Uv%!d}UJ7j(R6k&%I2`1s4ShD*GPBBkG-Q1H{%n3dAk8JU~OaYFzxQ&mV_@kW1 zID!9Niq)q3F;O8D2aJt*j{|=&v=Zo{qO!fDB~9!I44#$7-k1;t_p3yB9S7c&U+& z$qU85DQ8h`NsyDCyO~Y*Ut>+w z7Lb_{2aBH_q$x$MFnaV?dc(hrOa5vMWueb&y4s)M(f+-dFld6?_!C$rbqFut-bPk- zm{pewda_bQ8WxSL#y^RXcu~k5zduldllOk22A2bIW$i^waZ08?r6ZDtD}G zIeBRC0GE37L$};^IGXer*Y-L=-JCk2{1*6Ag&eFrV1)V+w=pB(I=wR84<#FC;@d6- zkSf{-hZoL;iqU>#RGQ&_lX)G9nPJc!k>T>X5AZaWY` z7;mBPRaB3U(GQ?rK3#M?7e~tsj$n_NJTHt*u-2~>68m1zjsIOhZ#95XPx=@X7;nub z)pUVT8mg%_;f78Jo-*V3*KfzeZZTv2QJD;$45%cl4z@$7a7I?o9nFC6FC1P{K_+JD z@Fyc4VOmlyw)XyqHob+^%c_Ih8emDkNNR)hqcrZr?L-XyUQ4a%AMTdGI?0@*!;Z0< z1N-iX^Py9uS=Sp2Ktyi_j5qjzCm!B{%}V_!8_`XxdBVvNd)Vij#M#(PRo*K{iVyhp zf*aox2ab&j@U6oiD|Vd3zI|eRydI~=V3g^i7GQOrXD zXZb(Ffcod?l3Ga38h*oT;}&vOWfQ25@23Y}xiG4+t0B|Y8dsHw@(w%95HpqdEPg4Z z$-N`aHzaXX@j=wwc>od{N^ptMA*f6r#b31>#h=>Lh^^3qL+O9eqW>Oh>viLTP51Eb z$v2q(cM2Tudyk(FRb#aNdCSR4PjJFdY5tSqM8Rir3lF}j$HKv4dVc$1?wNQdQyIPp zcPzh+nwGg3KTV0+&bp0-F%=lu0U$R*nr%FC5N=w=fw{O3Jo6O#psHVDWTf!?tv91> z>4$Mo&K2BaT!yW2t1#=dIG=OA9V-JSV8iB1*s-q-?ey=%mgzw63~mzgG%x8~du<$& zkb>H`ajC}Y1vl*D!C${y&)IdjP|0`*tw{5%omP#AEcuu zC!(_Uew1#|z)9;TKw`@v)@M)UkL}DyvAN&DOwJ5{jj_djpY&m8pBp^e8;ceWcI3*T zFZe`Bnk(#{&L6&W2OWQD@NC zjbiNLlfuqjJqTbVM){PYN6& zcl>hI8uWa91r}93z4fz@B+F@&CjI5`$;z6P)>Y!K91~_$$P+HfIf%Rn{{_j{=D=Tp zFdgt;EWM@k65C_*u;|8jdR;ymYZvF^2I+FTvhNMOzE!wa^qt^b0$Eh*d0}~9at^fpX3ie0Ij&X@pQsVh&+&kVmSU{?z-#VmxG~Tz zrg;BG1BUHghWl08s7$mfotpEGY5$Z!dxo>M28}x{3!!zQy@$OB5BXq$N&*p@Y)5(XRDBvih&i+Lq<7v%= zhC$kGJ`8hr4OzZ#e~h(PTw!H~Hp}>`@XWkXpgU1?M5Z|G&da$=ze@9 z?9ru9E~GohIuiNdCgHwXfX)KjaMZygG$i*K-E)eCqN(Hg{=|d)$Kec!5Fd|Ei56S+ zFo#IZzW~E;HORH!`NYDk6)xF&;RbOT5Er<8NpItzC#{XP*RG~au`m1#RmR#4wIun@ z0k~Q3j(xAE1B7jZ_6K|L@D4+~;?+Sfr;6i(Kx3*U`j-Av$fuLcSPYT8V0nFdBeO#P zCsle7iE16{_(bD0jedCxeQW$Mc*RnT4(gj7F7ldWPnJUO2y>hkB?EQ0kCVdYGRs+? zo(R6^F7l{!AMp`;!+gG-imHO*3@@ zO-QS52F;1UzE@RYjbu8g%gS=PUr8A?ete}-ZujZ^SL-dErZ&?P#tJm|l?93yyVLuT z)pWa6HQnWsO!Zcq;(?Z3crtthK9S?;2VEfwu<{B~xMmLq6Wh6yj4KS86OeD-0UitL z!DZoT7;&>4bnY`KyW|_SxaE!~?R@Fid+*72h40+4+8cC&T_Cq6E1D)U1#lnEL6P@w z$ZUTLb;XatulX$;-+m9a+p!mDkBy#6`h zKb|Grze2ndc^G5Mt7wDP87yyiz_a{kMqDC>JJN8J+u<`C%3>5iZul%+ssDuQ6jLGd zm9^=p=iN+?pC~9T{6Xaz76!ItL6u}895!eLo?M4ykKe$YS7uGGSdej#yy16NC@hVY z19xFQ%}em~>G1?%J(M^_(pl$Ek;`u+V0`W%arN3k9e3$K(A$-;-Q5$u8wFYV zD;)&8q!Ex<{)w186=vGbbfPO?Pxd{Y2+u;FP^*MjLRYCVs$}GmIc)>fbj55&vi=?Q zk4Qn~mrn5cxFkOKbBMP10rNvAhLR_DXfgAKs-KuepC5k4&1^DZ#uyudSVRo0yzl^y zSctLLzrKY@TpmP6DzO(HHG^Y>8QgSn!wKple3OO*UfitAU#kehq95O>T<9#63->_p zX-05s+;8fT_nu0iI~g|20GG|3R9#1w{8aJEDKnL&I?Lu)hZ=atFxx% zPjX>|%Um!S69M0+yOYS3hbd@%A#8pY+!VTM{>uGiYi%V(Sp6zB<@Be z(RS@*uC3Wc_Wozhtm~WvEz`QFbio`l_d6vK0_SNd(+XD#N}%KSWw4B?;7q)=Y3A~A z@ZQ-3+|{39-X)%f?RZ5DH)Z0x2ST=MQ#3tdua07;>*<*6H1z1QL4Lt~5?5}HO{b!X zl(i`;jGo1fFdd{8(-P3`&O%IelE<$j6frmFG-ZFDu*_RMK#va8EmJ=gq4 zcir896t^KAZ%>XuN%HkDlGfpqGo z_JIEG9Hx^CB+;NG8I5{V;llD5Fe}X>XCx=FRTs6{TV{_zHg7DvIit?5;}am{)cqRi zc?sl&RXA<4n~X=a?o$y>Hz+jU0cuaqGa9zYo6#Fq5V;ik;+o0e{u6V^vtIEd7ZyKmB`-Q6Rn~ar>8d&rs2dh-} zF&`FwpuQTl=$APUW(-e&xvqi-_3$d9*V|1mI?tz@WPGttXEsy(NFJZ>y+KoF3ckG! zYf$916YjM-N1Hr(MpvCB!4b!ZH$Erg9n!@0#X%@Ci?4b5DFY<=2jE%LPVQFDfb-_Z z;MXhz681HTEN*BgUxa;4zvp3Y_ncxJjBm%JVI|zsQGn6$H))G|GkN#V4^7<1a~GfH zbFZcxpkEvnNtl-`D9a5|6OBXUtI?fVtAZ@3%D8S~D=FN6)iiO()?j){lBb3eK6vi+ z2-v=HDy`onhAO@)On{&rova^??Z*Xe^z(N7-iIIso>pNYVvk+q!bnxnM({z1w# zU!5J`mP9XANRx~!HOx_`%f!s06s{=>T~C)qxaDHWux-0ItN!f_Bl|@ge(4B3-#eZ0 z$L)XgjF{lt(RRW^o@F)t(w5ANZ)*iPK@qEs2&U#sqUes1G)Q_Zu#*>q$I20Of=V8p z@>`!|_kJU;38A=5@b}%^-ody9tKh0tabRmzRb%G#hbcb%oiudKCO$_E$h_?7pm+T^ z`Dgr$*H5LGIW@VO|;_+kcONd?pe~ET_{%r3`_KD|BBEX3@RZM^f2qNz^_- z@Ud^1ft+v$dDQ%-M)&mra*}Q4)|MW}@i{ATLUS~Z&Wl73i!9uKw+cS{JN?{!Tx*p=xAwFAJQvG0Q9qbYIh5R?F$Dk&^gY zCmORi5TV<>6gv(EU_`1H*4Db<{yt$gbLObsg|+x6pcszHjDf)KF(6mg2Np-pfmv%3 zWd9it>#lRKKy1DdTe(f{AD*d{kWGjNk-T7Da1kJ z7Cnp@ZGtNEv*dU*+%lMpdX%JhUUi!VS@q$I20VGeIg z4l+As9>QntBlG)*EH1beOVX4Am@4}j_~d*h^m@mEzRWDv{);0UmbL~?M2%&Y%15&o z_Xup$jbEw5fg=zZb{dW)&BCmXT59YP0-oBVfQ%HL15J;yDS0bs#moap?jTO@5@7b^ zv(WcNg8V4`X4%p6jgi(%$Dmavxa6rHePk|=wr|eE-?y1?&Y!@5vrbLyH$z;wcPi|D zqJ_EhlR$Ozbh5${xLC73BC_`v{ZJYN$s117 zae5_~M-7N_erAv3lMs(-A7iu=FGi-t^?oVy*0{*7b@ z#iM9>fEF}buYgLOW6;(u!-{H_lEZr<;o3uCj#~JWx)-}xR=t%%tACoL;#L+e|1O5! zmS@O^ODAx8vCuzrLj?|nh_c~2_8{ZXLwEldMIJph=alybLb#PWIj};EiRj3KCo^_K zWfkGxK0O30e`yH0cT{ z)Y`&@o{io>9UZcvxV8ZN!KZz0^f0LyFG@cKHInhmY$0rwHNAau3YB^s%Z2{F zf{XV_0ZDDANr?~8-}NwAkzYucJ2m6@Yp2NqpX0<@Q4(#1P5@48FZ7C}a`X3IM|(Rf z)VlbBHYF{_nzOo4sTKz&M`Ee%ly^+fZ&zluD?_TyRiNtJCUSMDDzCIbk-w@jgWn2D z{OQT+e66Aue6CPx1B7(fAJqf{u?*}l45oEaN`2fxut=O6|ogMnCC&{NI8(- zzKAZ}s!p#de}?_LjYw|RPH;_o47I5V@bq~jY<{5%bD1&Z>aRAGx9Y=zC-oTa-GqyO z-oxD2x6$=xEB=t_!A1L}c*8bx$lLye$;%XhbXOBtKK2TAPM<_tx5bf#xqq0;h1;oF zdo0(^3Eu9-6Ul}~Q-SrM2nXJX0%vUo+w8Z)1!HTFv$DgDVXHvTAceWXj)a!NG+Mnm z6>F(9U39M!`?3;YW^oF-hScGo{#M**Q-v)*R%6CM22Oi^09RKB;F|j5cxj^rrt5s9 zi&QpYxXlIndFn=*vAKypQ!a;#ldeD#4mhiLm>b1w2c<#>u95;E9B{aBTAk z_PtjZw{><2;W#zy6L^C@Gq>Q5ozHOh?$30#m;_(3_84>TqXV75S>TR{bFeib3#Bw& zuuJVOH}=&Kb0fW%mb{!q`3TBI%KoGI{TeWMJzLQJwsUJuMA=xeP!L-s3E!;E@qw7& zH%pOc;(L~(;hiyf%4`E_>Z%ZhF_E+-xCK=rvKepP2E_4E=$Lv6mzm1(AMG`;C^`Txy3&?OAk=`77@FkRy)wzJ;J=id% z?1e%H(7;scf&1PZ;qxqC;GV6 zMZS15S`)TUaHnsU&E=;(^Wdeu9CLJw)OZQ{6LT)yu zzzZ6k%*gp4@${#!2Q?T{f=K155a8t`usKc=mEj_0<(pb^&Q}3sEtb)g3k$J~5!iTb zSIPW2aW$`xOd!|B3w?z5MCpanFyj2Tg5C<$fZ0w_^j=;(XF4>C)3+Xtl0z(J%Qjc% zuh)cw+#*Ktr4*)$=ws`HBw{uugYtpTYL;aS-MwnrF6l;yZa}EU8dnW`rV9@5!x! zpLi|OAV*^x;ZflkFkiC;9xS*9q5T@{?Y>K-L4S~>ogWE(A<2++eFEkOPse`-c{pu@ zID+34OWWd7D>-h1nObU? zRYLKYb2)XbGNd#5Pr#O`4ni-2EcRS8VlVxShxk?-VDC+Vd5LYbYujWPK6@C|$L_=g zx7+l@{zxnjj=+GdS=eh7Lv=c~q28Yp7-sp6PCA!??$2^CXZ8?Yn_rBn=4ROSP+*jK zUBcj%!fObA^SAv#-n3RR#Xq)jj?o>Yp;rX| z)5}$I;1G3$q+gYX8>ZrLSDum|M+IKPxSKVqZ8?y;Arkip-1+&+Z^_R#2UI)W#EEVS z0!58>T%djnBmE?JwU!rn+ER~K_15Aqjz~kzp&|Sba0QhQDDtxQb+k$SA!%-WKpG9* z*bIj+plTdR-M9zfV5CPoYi`s01Lm;RY(0EC5DZgHkCBB1f|pzA4Y?9wO57qPv0=~( zo+k=f$Yp-8B~go(Mb^TD$Xzhz$1YO$p8=SqN^^^{Iyl*dbzI2mB;4|}6UVFY7#SqW z?;EbiJv(1u{Wb~yP4_VQJ2($k?AL=5#Vk@b&IRuIO$UW|%6w5hL^6LRlXk6jsJCk@ zc@UmUHWrxS!*@MICzKPiA`7vkA&IoJd1Uj<3pnJd2wdh)`t8O{jC%HoT-*18+Z`B* zQx+~Fb5GHl9d}hp(}~6OQ+N^W-kgB(F_c>NTVeMjCG6f~2w(P@p^o7~m|T4T0=%Oz zOnxu?kUtJz=2ao=@B)mjf+ZV9z}k!*wD=bAeoQ$A7WUz>Ol`>5I6@~S2Gb(H8EmVO z7=N^H6mG0NPiIv7aZ5@JAfj_U6sYVcA(h79L#?P|djeG%Q;ws*y5mx~jVG!f2tBVc zI7UGqmhMPHjk%rlz|~T6_SZZ(6)pl3g)>2^B@nEa*MWPnEG^KRfZie6a7?WeQe)kubL9AKadp5BJAwz!;Z3(BUcEao&W1 z=Ja4Lb&NmA`X)hqpaWK%$t4jf!kIAFnM^A^45}){koI;29$m1B*=4X4#lx?XNyDp| zp1;|2`9UXm-+Tq$W<8{(vyev5I0lWMg!yWCHqAC%fqnMI_;=A8IU(gK&N zUPaH(yD@cOEp<&5x__d4(0wvb^;*v`T5cO5U+`#u)~_L>Cd$BtfV1Rtg$Q;RY^X7- zK3}tG=r(@&a3AHn%J7cg73>#n!pVW>@WDU|x^zcl%fAEovEvuYOUUt`M$7TF*2Adz zS%RMuuEK}xo5XMS7{$-a5$B^T_Rw#CwaH4?j~vlqYJTg-(Z(zS7jL|-*2?7GAx*m0Inl z)NY{;jM^7U3VtlckG+y8@4tYoshcItwzKiU%#qNW^MIsTH8ZLAPLeS}@0r!&$yC`} z1k|3SbCWZQsrJ4ey7t!v+7)YoiFf*G$DTd7J=I9)2F;`U>k84Bt>Ah}RMExc9eJd1 zf*U+;M+#T1#T`+@c;!MO_3f!ey}(hhj@wFyJGHwRT4OYqhQEBasSYdTl> z9j^KKmHTkr3VvtrN3*jZXo%)Za$hnWQ!byxlMk-r(mjb-F5845zglp*mnQ!?xQCv( zc>_0$vahM1GPCB{8+rKhI)ns|YbCzdH*&I53W$+wIT7D3aGgrq$wQ+&ns>y8>)qK$ zmIoU{)^k6yXG|OxEdh`aOE6PmL3QS~PZ|GZkX7opxzIM`ddxW6#DB!N2lBn8k0zS7*(NN|yCTzMX{5I>*;CCnWO;n_MuRORV zMR^#aa1Fmd5J5IwmqrBKL?4^6sNsGEPP(aM!e4<+no$lHe4TL4WDDH9M(Aj0RHhR5 zBG6~%HBym&TP{W#Tr!0^DkiZ`_j18@VmueN_88mM^^)`) z&!ZR2-@&4_LlhI*xfIU?SaHf9qWqem)czP5n)8}?zA|J*g61$oFFB?J_odyloc_0(o{QiOJl>k~KdV&_J{-$LD|5qeNjy@?3Bmg>M`h0ISU<^Tq7>;PtnY?Lf_To zHcqS92M_t~!|3(*X=&49*5A4SGF^&T$Li7Sn0I#U-K7I?Ue^r_i*0a(@>YDDF_qU5 zkA~A}iR^39MZDTJ3TiTA&^bg5;-*wHDOQn0vCSE!^xu&Q5B0dXS)tfgGm_2GY94dVec z=V`|-iQdCLXunA-#pe^}htEJoPl0`YQgn`Y%_w$${U2Pk={dWhK!#sf707PTPNE7s zFX7_@llb6yQvAFAuk3N>cS0uTFW3o0=aS^*;5aD`WG-3sG4$j(u-4mR*0S7j^xE zgx(rc>at`#iZ52dFk)&sxMV8nyb;8_y7P~YzLSbo(F6EXGY97NeZfpmbGATo1Uwz& zXrZVaZ0qzV&GRDhe(8N?ubj}c8KI3ISqJ>w>RV$WaOhI{<4E^}CT7p>7P@5V1aNVf zLQM~bkq={K_&uKL?3wEe@#t!G=>KsG-#>oAF7Q$SvA{t1H|_-L@Abh=7su1BwaG&M zubC5>aGdUy)x;v_tthoI6x3hXqmgAAyF^Y4R=QrKQ5{D7vo9a9ILMG!m3Rhgr}SZf zX(-al5zrnN%>FVm!h!yYyw=8J?0=U2Z2V1IeB=8c@&ZqeTM~_XlE?8@F@K1MhY3-# zbmdJi{UAnp4s2D>OO#(^h>54X!0qi^{AL`2D`b}O2Zy_H*Pea+yl+FOl(e6bSSQC< z3YpXFFU#@tngOmlPDWq`7jf#JgR3vjY?ifu8F5ppAU!Fx@LX2s8|PMkG7&kfCd^|nTjoaPpR{aU6|d&u$AGF zY*Plyzqq1|{bT#^bFDr<9J>cK9vJc6uZD4Oa|<%8247H9g<__OP-FEFTx^4Jck3%y z|5%w#uup>h+qz(}_YiLMI}CIF8S}Fa1FtvnB(8IiCvzm!xQ_oSFyv>(8fq|gLm}9jX0XyiZuyg!3TvN`juEl$>?K(x}_NyUtEZINROF4%7(IzEQt+zWqH2NoNQBgN@bG%!_isB z*k)&oa^_QUQQK*h{BnjldhaEf!W_r1<9+DwQx~|YqVaeqZ3DW8ErN65*5v5o%h)(u z9QwKr(SN?qoK239MKv10r9b_s`pl(t#Zh0>Y-z$qOL=lcewf)^)P&Yg{nf9 zWq-+{z-SDKNkoGLC-%ej6R@P|D7XKZ8ar&W4lE`-g~InaaO}B|U9-%`?Xy~;#_%#z zIHQMt^Lr2PR8L~V7)#dZ$|CmTIs)ZV41a&WD!*Vrkzc}U@vg2%@oT6CKlDBqC+nH; zmQ_tCHPxEbSvQcrVR_8W90}`|?O?MmDez||9p#HHi}=5X4zkCGr@*$d1^iAUS;`uh zqO<5aIBBs6SJ&1FI=MJJ3o&K0*ALJ3Dtve>(ctO87L*lRfy#5M-V#BopsxuuC765;k=V@2ey* zhBIw&iLx4x?~QO;r50+B%Eqsi0zWYDB5hrt$wVEQ1b2txL2>aue9#$2NAEWvijI|B z{?ALCiR3TNw=N4Os=lUSGvpz4awc7T*n)Qd_UA71S71j^JK-BRV!k$+$VU#rFM)Y< z?0Y6;+d8oAm-=DC@pR7mj2p}ue}~?vNCCBHXUIACMEw17AEWk8<4g9vJotnxU2wt&Cn7wE<{+-iiY2Yv#rrUL5 z=qq(}Zmt3Mt5v<144?S+p;#~{Xs1ILfDZ0BB2>JjyiS*~OVn|)+?=^ZJkk-P$= zcoDwjVldrz_X@<9MbWhbajfR7C^pZ1Eg{kJ0?+Ci_-Y>lb@xCduSBW%s(AcTD~;Du zVwn3?mE?|e44JGv6*G+W*re!Q2%J0*9C!9W-1M33uXp3&VyY|9{(~^;xFh5k$3bap z6tVGoRZ|t7#)L2@>7|1Ny0D5GyLmw0D+Bm&OG{uHNec}4RLovEfm!M%jb0gZ(Di*A zrmej}P5#ayd(AcQ&-;t;ykZHi$o>L(cCJi+kf@MR84KiPOk)GEo5U}(v zm8_ZoGH>FcM@14$HcVp_yHh!f%1^{maWkIjy#_lrrxB?|`$&)ac=lxm2VI`yS*`Cm zpuV>R4+%cI#&?-0qpHi9ejLSj+D7w^>%Qau1w1CV%tN$~gOT#m7%N^uWY$?zX;U$r zgBg^$+rl+JH-jgAQ4BvfR>&b`Q}Mv5B(CEUj{7FdRBs6+&m#ZQdto+s^+mSjoU#lo z{j>tqy1&D%(i?ChG6cety~xM32+ZBuMZZ~ICmt^bsdbDd`}{w9wq$r7d%6F#poM;< zp~pEW)6}IGKmB49`h-rZ{qx!0hxedtn!u0mHh^lYUZ%V_4*FBZF{APmvH$F8P};JZ zUszzvt87}0lHow7#yPRmJl_He38K0j&3{zanXMuhg$H>#v5d>jU%yjET(Vrvl-Xie`K`q`7Mh-0N-a+!D)YO==Q3FeJjnW zq|qP_|7eH1dy=3zZy!8#wqnyHSHc@R!B@UopA1cz11bUw>radXJJZCN5A7_4cD!kOv`#Q8}SNpSI}(aw3`cd3jlI+H`!Hk^XdhRxLPK__f^E)G(9 z))@1umNv`z)2Tyg)qB^D;Ddan_(uWSe0HB1`(=Rwn>W@Pr4#2;-DqvtT4RD$ndP`| zhd)(`HH2{WG2j;!&xsgT;QmD$uw~B;vLt*xtTpSvyGm}@HT?xjY_O!J%BAou=^UnC zz5?y$W7t-^!!`FMy}|Kq3JH%EC!6Y?(TRda^}SV-ezlUpF0m1m5!j30RL0^`sk`Ka z={m3}@k6b|L!gp98*_Fm3pUsNFhar(|4!3lKcD>$)=obl^oxnGmy>06UuZz`+jVd# zU7O9Hl@1=7{;*Ks29FYU`Q1aONqpFS`s=7CbliUl?@Sgmdu4NKR^F}!S$Ys!l_g-- z?ntN*IYw-q0C%W-fNf!y$O`pha`H+knHIQ@tXN?V6FA{SNJzmAkG=7X?Gy}ru?COb z)x?))f&_*_2-Z!J!@B8LQMtAfwH3-S(|I|S(2K;u#Yb^KK?=*>exj>X@6g_7;rQ>u zLrzPtnd#S_iBzqZZW8*V{MFs?cKs84>V6qVX2jsb$;u-iX(O&$9VH(EWDP1 zv4TfgT{#u)qJ{mDpA@e@@i@L|(BdKmMvP|p0H|K>h9$+BPEUZ@H$DB9J zud}i+y5<(=(jdu>lzm!L7(b5h)H7v+#(NRJmlTh$EyvOvOU$o6jNVoEx$W^0G^ZyS z@+;?p<>);4K5Z$!-a?odpZQcp-mB)?w`yv2I*C?ioy3xi-{>%BKXz@k#aPJ`Xmi0E zr`3g#jl+sC(JK>dr|QDmulcaz145^PH$byJybzeupWkVqxo!?K{L~Us1>QhlNG|+a z-T^y(WniwjZO!2cLe^6CGX1Z~f-agU4RW)hAT~OH%vmc2_9CX(Z&5*B8y1k0BOj6g ziA9iarN=EeImdO%|XOnyI+DcP0)slwk0cBl}ZpK9#h ztBRnieGWS5ST?JE9d?~viBZm}$oOqz7I})|p06fYs&a%>|8=70Gm#68yhZ}IroisW zJK>i4X!d1@HQU>&z}jm67Ocp2_Plg-W3gFiBj9hOnqdL>4LY{u9aSYo27h8wVjP z0j?epICbmJLfMEwa&A-#HMx8i)E|uC?1NIc7e~gCmWBRU88!ygZP(&LiH)$>^iA{3fv*%+We68QVv#=q0wDY4_et zF6WMLojS9rwyp`eIah=1yAlVl{S@GAeF8LkvAC80GiuXa_&e%q+LN{n@)DEa)8xZ2 zW_dJNYGi@TW(Sa&uTIRvtD&Ji4VDEE0c$tZ2NA8sEi>ql}C@4*d8=J`Lq@(`p<^$MI)$S?U?` z+WZ_GRcy5E;`+#+4_P!|J^%M#rb}E68X;0;jiloh`)Rma)l2-Zv1sx$#Z!> zt{cNPB4^D>#S{3Qv$ z;tDO23b0)GJvmeTiF_?;;UcCFfaw8s2v0l&R`rH(RIHRcVk63)4|u|;7s$}0Bd)Oe znJ?+fkb^(VWx(%QGx;zf1tj>=gqF@Ye(v+86TjY~%FoKlUQb7Q2>b{e+ezYP+2O@& z&QSWMg4q?@Bz>VKQ2Jfw;-j%yyjadbAJ}H@J_iluu4>Iul<5{SAe3#z$ zW#QPew{|ob;Mgeo$ijQVLS`ucfF;8 z{TFV1ZXr=A4+iy46}aA)K;m>CkbSlZup(hSbpKVQ!5R0sQ&hB zbQH?m4cNNkW$bsuPcZP3_cSb6#Ciyq!)K@Itg*>oVCRp*)64fDbox}*s`EU&`IE*p zY1{xmXJPhkx&k|9*J9Q(S%WQII*}Egr@@|{ro|>ZBg{@TU`y?HK*oo12pH%hu|n3M z|1Jl#7o|d#&KG9&Mi#yt&V|DorMP-@3a4@G7IS2YG#-{ZhPfwYuzK}prb)4rJ9%gt z*OBuKymxfKtkXlFzy1g4Z&PAj+BH~@&lf>spcJ?iW%jbU1GPxgB;m8d;ZM~^y5sd7 z=I{F|`n(|p<#vfe&Z(mqzc~|qKn8bRv%m>43@keN1O`OxXyc(~Mk_Cb$X4)9wcBgJ zYwi(9ba$kzvN*hXlm^zRU7!)20%UE(a7+s0u)Zz=9?Y{F5^ESxS( zanFY|p1*tuSF*y`A-^7#o;TBb%c8K~M2s72UVu3z6xEix;IwI-kk@vJUTsx`o?3H~ z@Hh%uAD0vJktZPYv;u`n$HL}^7Od%q9Qb}jn{_qTVb`r3!|qgB&8pkh!5)>lV5X7E z-g@Us@8_T3z0Jo^S^YdJq~)QW{vo_q?t*exBQa#|E99<(!j!S8sJ~qoA8*_$xW7bN z(7t;EcKNJFk?LB!>;E1vb%vn9-V<j#{wu^@Z8wmrr=!g^}Q#-wFEmlQq-kb%>aU zY+!VpCo-Y0@6oQ;FL36S9F&=!go!I2Drc~*3B1@YKB0&m3Y;E-|wBz0f{*2&2*TPnn`XP*AoOU>xQCWKU8&z6MXG{1Lt1zeCs)Sf*0P3EFOA{>z2FG zh=D|sV;V-%#Gl~7H8*Nr@LhM!H@8W(btF8wvzXt@sG+>Y8|cwF34U3dF~ww}VCQ0f z*DG9&5oLU5ck4gqzdgbDQhp7(Oy-4@Vg*YIDql=NVX~IT)G19 z8J>YoA1w&5T?}RlC&~Wg%Ov8~9`ezUXCDU6CDy~s;0@pXSyK~(?!gmqaM&Mr_?Q);bxVo^!?p#R9to+zo>5%C|C$+b2-oGE2@G{IfhkBoWh>2 zT?Px*uf$M8X9)eW5@k!pQ0t=!x~HBc@sHJL!B8=lrwLFcG8nx}jAUY7eHweLk6l=POcM>3+f<*bmy47#XzG0P*NS-!Hz zek%{4p;yJ(s)esW$Tyq>ROFNMoFU6e-vfp6k1(lr78y2p$Lvx9oOxo9aqUe+pBF4# z@GhXvrSq`iMIb0xlxWN((Bl+~M`EpUs-7ySsVyMeLQX;D76o?65Dv#%#kUMz2ZO*-OMJZxz1t zlfj^ZFna6!GknxEUZCQp00AN@0@qV4)_>sdw{J)YQf(S|7kC^#2&%H0wj&wyRnLOC z{tBL#r%R^eYSjByKsvKb@vq_&yg${1yBJ`OchyX&UUm^yp5I31Uv(2{uzB2ghG)@SO*h{r_mY+c^3x8(HFVt#a|ddt1#Md(-U63nvXi} zbLq&fCR)SalPpLr!1RZkm>-AE^O>--cwC^2YB#5{8~X;KRH_|P=iY$2yGiVdjT_iH zZT@-Ot4_gA<@Nmg#I>p=%b#|1Z4>>8>go40$;%b%vrAg9c<=n@p7jOcvCD_1! z=c9@2lpWx(N<#3`=PJ2mpe`t{(-wf!YIN~G2I2=~F=hK%fxOdsZh7)PwD`4(%@b^d z?oUri>ei#o>=?f5_j3?NoA;q_d>EF^nglb;2XV-29CeHQL-lklpr=s_XZUSIhdNE3 zkJwMKy`IR8uVY@iU&Ax<(J))e0fu+!;KCS5=xAOBnd(3S`k(*!1WV=l$0lc9z%CZ+C^5GkZE|*oi7Izfed-Qo7Ipk$m}{ zO)Mkzuw!O3jJsee_PHcxS8pfb4pw;v@X%5YSEBNhDU zqVu*Dk&g1yxC58NdO2_6F{H`H9=C$3bVnT9m5VhSx#|@jlFkId_ zxE>=bDF5liBZ`k>W5RFRVwp_#yB?w!y>jv2-D=EvuL&0|oWM|24s0&0!rSu{`EJNU z)NJAzF~t%X`c@osldqwFaUao~EQ_+U4nxfRG7!C`!(Nq>W*3%JLHSaBR?7PmIE1Zc zTlBrzyJRD>@uDE5)5TSMx`S7X1&q|&1Ej0u0cC{Q_Mcnm&+Yuzbty&t zmMcuwiV4`?vYx)Oi^aLJhj3V41rjGjkjJf4;9o#CY})+_w8o!=(=zS&RP_>fJuVSX zMs!(NBr~So-b^w_f`Ln6##l@H1twv}S;)E2^@B zGrgoZ%N6o=mH~;$#2^|@{r3iwew%WR+?WFGx26gv7noqsw=rZrYeF{caAKGJet@IV zR@fetNgqyoj6uqQ_+M2t-qxw57Rz|Q=-exGW1E0#$7V2Ieecovr6S)w;QL7yW}H)0 zD0y4aNA{(uvAyxRs8HU52WpIwn zCY~^_#3p;bQ`pB}pOqxh-&5S7_4*NuZ&Sy@^9ApSQprfhIa$kZ=n>(ztz>D{M=~e5wx+{92|t=#p_4*>k)*(%^r(&vrh0$l z?&yWUx2aJy{iGet_&LliniE7k6T?BiX@!-@krc3ezr-pz$P|(+B4Od>7E&YYNbP@p zsJfJ!4p;AIQc%6jO?q;igg$sia`qNO(AO}Kcy9{#r{uxd^6iY=V{6JIGwk50`vA#su)tB|tm53_Nzctof5kA-wyGje+w16&p>%H9T?Le~T!G%X_ps$blr`ke}>skE z3Z0|22a;r_(cAN*;8gu@x^+(y(UwRdA2oOmkfjf)IPrv0ZMsV2-LK$_G;fsuHU;k` za5!)vpPxgssm{dZXr20i&idxSyTQMZ`~Cf7`lmU3R=bHj6lo=LzV76y+f|AcVR-G> zCwktef!m_95U$R+Lu$g7k>I5=+^V1i^sjBC^#iXtmt5ZYVlfkFYC7>0wm^w>Z;7^8 z61Dww6}5{l!56tWd^BQ+N1kmWohiw%?O!L%IF&~-xN9|uf18Ma}gwm(SvmuZA_-CGDuz_CLfoP=mbLr3!3X0rdY%1Jl7O zQe*D{8fqKi%QOlvPZ>e&3tMDd0a`ttK;_NXMB~f=nKVHOr|(H4Hy>3)X`ULIfH7>p z7-*%r{3y4kAp*R-gJJUHBjkA3ZLlr)2%=LOsA<_un0ToaoV6{%_Q5X@(NSPqT|bj$ zYT0n=H-9dwDksl2KA=(R4X9c87M|WKfTnfQG%`pSM_PO7{A_z%85M!!Wv^iKiW4Zt zIYRNK6gq8|gy6s`PrTW14qNdw4&8`{e_DZLz5Fzihoh9c#o~)Gv*A?8O1yDfmhD=; z2QC}_q9LoRxd*ZRG;NFw?|a&S?=~XI3j9+uari1k?oFg`)z(6&>2U~c`$;le5+JeN z7p^ytgXn@~mgl?N$jGAKB*T@@#?1-iN;2cg-i6C>8yk&}{0#-^Dqr!^?nh{JXD4P^ zEPyF3gCtwqfKDA}$*pz^qk^lS8E%m*sxs-6J+FlhBZ>6;Q(JPPRf@aUx&pF%lc}bz z1WKvI(_T4KytwTay+kq~%S4s++@gf)C9h#Ddlx2}UWWIxOF;MCIoNmm7)agCfIItN zk;>*aVkC7JG=5|g<&9Z%)sh>`jtTVSmV-D{_os8qR{-}|kwFcHW?IU^e z+4LpPx{GiQsQLLYir5-GB-6(3r1deEsNItcPQ%Wg+b4X6D2m*sr#Ds7pfh$g*LLVq zx}cglA=JS=G!#dbx>539pdD(I-JmX7HTY|r10H$4ocufQfg8syMeo)TZg!LhiqW$) zbe}(?RkIOerw`Mq2Mf5{F_+28zu&3UxJbBnN{H3Ia|^6)-GIUFIQTcm3})Y)4q=Zf zpuJrcG*92c&$_R1_UcEdP41!Fj0%j-M|>9NjmJGZFl}l+{=PB>U)gx!=XCxo>RpBF zWy;X^NDMKGWkEL25T{(4Pg`O*`if`5K4}g|^KUcpt&AAXf1`l*H?P!O-XV=vPlN?x zE3IL-@jcq|WFa^I%4y=`-9hy}&BQ&ec2u-lj1(t!(ZZRVz^S+ig5~(VY0Fbs`)~-% zYDL-I`Q`A|`YS9b*#>62W$4DIN?<*-jGnpoAI14qmSCva##l@er;B5+g6gfuA4!4nJpx1CczD%Byiek0QI>y;91sN@*qtUbo~|B&CAtU z{TVaasqPclYeg&A_pK&uVJq*N9p3}q{LXsB<`@(VNV7W8W7*;K=dkc!FTAiBhCb~E z*fy%qUX)Q{SM?5pmYoF~c<2p`*)Wr5OwVE6WAxaYDH`nVaCLTI<5c$hv86l=W*lpx z#ei-4P52e;3-7cm;ppB-IDFC+NQEgmBUB0<6#zK$3Mu5ejrY(V_h- zIi+k&U3w$oO~)N-IH!VBDpmr!wN+qcoC+Il$ARztop8Uaf-&DZn}*EYKtpez<@w|$ z#QNL;_zDg5>;z#LP_l-hcp*@oz6O}Rb>#kADJ)6KA)+QZ+;N2q#Q1XvLyA2}h>|Q! zuhWOofXg61Tn#n~12E0?7)0BtK+()waERaYj=9;uG`H`;O@GqJhO2k!X?+iBVw_2v z8*1p4DQVo2;sW?_tJ`WZ&qmC-JORc>R?&k^_h>!e861lJ$CMqIMYg!|9?+vM+(YAb z_%Hc2e6L{(mLr>}y+UYBeyTnH-0R@`qw;Xq zKND51b+i zWj*le*%Q+3V?jJKS zawL#>Jw^iesqRN_2Mz36*GjW4Iguwt;!s_fi4kjLQQ_?nuXTz+Han3@Z`)3s7EPjG zBcC$AM4vMXo_hGC#uB(JJ!;T70kV8{lIe#vp+x&Onr;@we6j2FO2lKV`#upJTcwD? z=s6HZ$$p=X7 znl>(bAOs&w{|mdPD8kdXnGmz?Ib6N<6au#Nk%jKl$(F(6#CD1&{yy7Jm0G`Yr})o6 z&q|TV+H1hTG5$^~LJY2b?xlv&+NfS_K+{IVQ0c7)+QtXM>Xvs*jpS|M-sR%Kla6#w zNloJlcPcO0k>iu0n{r;UNRI z(jqj$`W9U|yb2WBFAx!j*`#%?H%$3(4-WFo-7Swj$l3+R;7`+XqOx8HdjrHFbk7|U zb)O*?S-VLX&&hH8U=8CeLtskyIi5ki7kj!kK;c1Qu%9(koM`L*UO$PrvR76q^2;=XA0Dt`I!Rtc^(JOX^xIIB|ZKo%9P~dO?XDNLU&uu+LMm1nSro#c>!Z96i_8n31Js30d(2930ZbPy#4BgHZV8#&{oUZbP zzPxb)z4i~%YZj;jIkVgwOnA8+PMP9GO zzqh=In1=zHt#>2i7mlf(?`ev+r~APyzDsp#+*oeSn0VS}YtD!`O2dW=+sKmo2x9%8 zBn(!*rtddbkgTp_G)}LZzUY#|V{Z0X=T<^Pe(ypda+~(3SCU0b66xws0%{l(N1~42 zBQI0i|by6R<5?Q*%&kT>s1mcU36eg^eQYlkaj5^l9jC9-5&b8{mPhl{? z`*9uqOCn31VksvoM%Ykeh?`tV_a`fJQuvBIetDT;-(TeXLe6pp8@EGa&^zv$QyJ0! z_Jx?Vz9#35%isZ@fy=xv3#V_#LGNr2DEvXl8;<7^NQy&8usS4ev?YVu#sz!`-Lp4B;&1Xys|*-jzf?A6AE;NXMGcfL?wcdW4m&Cu#znyzpb# z5DocH0&h5*V%CdsJfp9OnuH#}Gja)NxZ4+RJs%+x)T8M6G0Jq;*HF?^EWlQ|Hf~Vb z8LVdWEaU}?AtCFSRmr#6&~>nhIP9r}eU&kwvSBmXe&9Q)7kS8?3$LVq)lr+y2p73EbZ_UAd!^iQG7T-x| zRl%liOVnN*4#!4AV9Zb@d8m5|4(*x*71jc35*|gIieJ!tcU`VE?;?Fj?cS?_lg=CAy(KNQ(?A{V6ov5dgxA#OMgTI+_UN@P zn2Apo77V)n#9cQP1b1eM3c9MlqAFg+x@1{_^GBWqa-$sw&8i^pY&~4u`hz%0&g2Z3 z8$?X&FoX>c(ZE6u;>T{l{h}PD27jsa!SD3nSUKES|C;mk2*AQODtKkv9u$2ghQ{$R zSfyoGb9t{m>P}lgbw53L9hP_ z=7}$bF;-g8H_d^mYqEtC2O0Pmmq0Gv>LpQ68z3fdA@@z&0s5r;;h{+tophGK?oIc} znxeC0C{T%PUbF!cL|B+oZAJ>dy#X^t5w`JkCo^m85M7)6l*aZ{S;VL|FdEmqOH;R_gpHmNy|qqhDD_bF7Ux8mS!(W zBkm)ch+2R>%%nTXSbIt^>j<$q5e;d=Z@EgsGckYXlIepp@w%HhH)D+n3O62vf!-X- zSR0cy?WuG@c@Hfv_(k_V@FV{MW#E4G&YCTALQu3~4PH4`fpNkIX|Q7^vu}kW{`G%N zUrjj)Kb3y+{n3fg*4;(KJVimULV?WMTgb4yOBUnPX}ex8Ehs+?%RY;vuBacmc<>vw zz9dd;*eLGk7ZZ>?Cd~KslQ8`-!u@|DY;fZ(qH*vJEKj)sLZx9KeSAL%Z5Ltd=n!v%m0b<=(d8ppDi(k%mh-IM!A;P* zs~LX3RfX)Li{K6a!8hJt_-4|Vn&vs1VXi(yY+k9-(I?}m)W31COYb6KZ-vt9A5)3q zJW6&t=EB+et6{bXe+JJOzP4)fHDa6fmq>{8lF{I0+~qTcM2er!j*HHO#$-J(oi)gb zB^JSE?{yG7@PX{vQwCS`HbKY0&6>Tg{M@-g6oo{kt7Dy(P(vLpW(AwZ{o*s>cY@Cl zyPFHar|b;+9gjxV>NS5iD$1YRv#`C~6t~=oqmP$oqO-$Ca_r6nvip82IF4(8 zrhAv#**)j{+SOpY-Zm)Q<4i^48~8m13m)8m^g~oA27S)vuRlTwdv=68%TfaO8^XBg z@l^U{<{S*R)Wpi83doa?xCT>gTy|d%_t8%Z}BDgs`TYI(aG$Ou439Z(ofpt*C2EBzwvas zVl}r+=LMAvK1`})RzSn;$D}~V7(|Z-leMO;T&4YI%C`?-m)~RBy8SI3&{gEz|0`f_ z??{2Xp2d(~mcerzUE%HBB*-Yzg!hu$ZqD&*aXJ7xR9H>!5XKe7BPW=zZexAU98_Mho8a7?0i{$>RdFOZ->X!MS>fA9fDdYtXxxT|CZhvsSrHDX&Xat@9{=wQI zX@POdAKbU?4Z80)rAGh$kRa9T+}9x^QfR!K$#}=JiJd#EV}A}Y+m6nqWruC(YnO4{ zbl&S{m)FWk{%$AEF>1{8Pu5`JIRzd#4>HR}yy3dODQsTuPGUA|f>f9ZWJepp&a7Vy zS7`{c^~rR#ODR(#Y(g&dJJY18$4TKfjyyVHMn3xU-Bgcp;5UDWSs=a&ob5x&i$iZ= z`js=_5-b5Krv;$Z`T*`cN`;a0Rp9?{549{GC5h7|$!X@Ud>IOUQeh_6z;mx!&z^8QzMfqjVpz2 z*61la6YKskCNxX}0)zA6_Ut9(Lst#6`LZw(Pnkq(9wm^&R3CpOexTwVI<&~wnA-gM zL}qAJVRG^(`bs4PbzjcH^L;)rI(ZJ9WHy2IcQ;5oFHe{6h_sw_v4SqT|$tq?Si%Cgv&MZB&rBtC*LJTGSnM@LO*)8Q2mcUytYjS(YzkCrg4 zc2caN=``3C_?P_f5n)d&26KC)MA+uE-Ry3h#|{r22L|QYFMX2isiG)s&RI_DP8zZ! zZJW?WZw5SEqroa~$;GyEdCGda!kOzi&BslDnUrT`Iv|KY0mY8VXw?2SN)yqlvx}A*q-7Q!*b_k;vTf$G*Kjg*1h3My; z3N@OgAg5GRE%SUCb9eVA2$cB@@o(0$dD;J9jCUR^xjdaclKGFs-<{0%-2O!zwGZJ6 zXE~C%P@XN7y+M_{XTYp>e!p+Ho%}8e#8X#uXz`;DoQ=9HiSE~e4s%%?h&%)b6h-0L z9}$6u+9kAVa0Rg#L)y6L6J#qqq^|2z__M%2#!~we^?rE*p9ZEtP?;z$Z?mM?i;jcs zy`yku`3J(K3$X^H;oz{>jfh7Yu_6atpsXYd=C=fff9j%W_vu4S1Y{Q&Bcz)#^#u{D2LX|1Rd#^vw@4ZHf8m6#L z1BZz21Oc>#3$f8o56HH3D^5CVBV|>tF&}DOz)g(j?@tH;;~S#v+ZQK^OUP{+uUk$Y zt!u-IlcsFY*D0)P%w#siYAnpxyv}?M$s=wTN1@591XlVhvEQv1qlcIz49$rk@ja1z zHz5R%i6v4OOGgZv$T##|zS9z|DNN&BA+q70E{0hTan5Qv5NkaIkK7KUf>kLIzx@^F zb;%0!|1D)B2A{#kAWN#UHL*9)GCoP?zCxG8_$jOSVEu)edm#|4m#UdBP`OVet6PGZ`mt#>QXn>}qDc zV78(o=6O5=sc#=K>Q)ar%!tD)>XKN~*vthUkHb}a=Hs<%GPL8}Rqn`HzUNZtf#3Rn zklf#@0&Tm!WICtJ9+HJMwQ` z51OBChp&&Mp!mru>^@k>{XOl1(YdMA*J2F(H26tQnIjaMynxLWU68u(0pk^61FJK> zkTWhloU+F|s$I%^JVf-s$H4(M1>YmK`!);Cr>zjE|H#JX-qp}{G!^9buLnnY7JPj+ z;NN+YbTcV~xP^{28x#0l%iK)*=kODNp|tvQdnibk=>;@ zk5x%4At^J)a?cyTkr%!51^*rv!Id-&cT_FwlU_VpQGbpP)+(NNH3Grqnd zvwcJ1LjHRq)H;#<%KI&Dv<<;wO*imSsyC9qt3nAk50<@e(g+37PDL2c>;cD2JP@JY0X$Lq78 zVvxW8al1@H_lCoK`^B*AM=SBE9}h1(_F#ulI;tHgCf^!A5dV$(f{(ekXoaQ_E#v!U zKG%D&a&Z`D>6PKqBx5$)Xd2yr?-a?H^A`SI9)ver@4*Z&IaYR#D&AjG4q;2}**=4r zY*fudC`3nA_pBK!Rk#XQk6Q;5E)+weTnFvem1BL5-UP$tuSjoqFSyLTNX+>=m4RvU5}R^+@PNm z|I*iQjQLnYHfY{_4^Hz|ub`$$dR=@LCQCYh58q=ZlEtv@#}I zV2I|%SGYYk4%NtK0j4&5hfmShc>eHPrg)Yh_v{i*X7wHVDPx$9tbRlSeRH8DxD~f_OOUQyDI)amGD=oX z!Bcxyp#SzfOWoKVOk~p-Fu$}Kj$GJ9gl-DKBH2C?+#W}E`Fp@$KJVI_U|GVJR`uuUk@^6SHP;SQgVFRG_W}$!@X}? zQF9c2GYuBIL1+T+H(az97S0l8GhS&?SD}8AIF;u)XH9{P)*HYj`44QBdj#)HqoDXg z1xRXT!ARd8m|YVC5#zs-38x=%V#3RaLm`8mGu-K~FYOlklNXbUbHRX9;z32tjqKi< z3*kq8!}92t@KiP##)zu34q3g>C%ytEZMqG!WuJrGA0^Dmnakz4X3(n_onbgL0MtG* z@HO;2S=h4`jyV2D45sOmYrKyk|G|rzhiU7;b!sroY}4Y#7f)uQqBF3;at!?5ZV1A^ z^1#!}4TfJ-k!!1>!1hK0U$Byfn3GH4fu}lrGVg*v&V5jzc?oVk9Ri^}Tc9bh18Vd1 zV8Q2IU@e~yI|g^af~7o{Evk)3Kb!`~#ukBNFawPu_2isEIXn^0hqzD8Ovth^aOsUD z*}TewO1_eV`EExbc+Lf~`mZhD1*j!Wa-pz%Fd6Edo{)j&Lol4P5BwJ9gVoM37!xxY za=vRr+Gicc-L0F9c`^~c|L`U|U?-U)aR{;}Z?{qp;D0}23poB3$mnxynT5bCN-{vO)4eC^5`XEx^OZ{ zl)TOttsARjuh|lI>;;fKbQtCwH-Uy|dsuz5n@k)ICAMdLsBmi1HI8P@l1f>9e66 z(sOZ$RLMUkFG@^cZ?_sKUpxzekFDs3#7?5;oChffRs)In0$SIn!dK@kSS2$Pq;?zz z--=yexvUht-|vM_X5Dmc!65hdxLHkyWjUVivEupOIk5VOEPCXt(9gUFSvT)35z9zo zez&KCYET9lTwVdiFPaIwlY(!hp77*z8u{az#L0N(K=HKspt14-?Ap=@xm6-;pu}uo z7hVO<f~#c~GccKDo_~4I=ckTV?BCMP7m&~YB~;p>iLG_jIrvL4eSW>HCWuFXSc++=6+@|Y(Kez!q(3n9}?4qA+2!Ci6@9D1z{ zJ)7<_Kk`n)Ju5kwFro`$Ey}Rf=Z{rjt_hs#62dnpRru`oO)8fwi)(6CpgS?01}#*x z+PJC#s&m)F?k$?Ia=bu~6GX?qGJBg>HK7Td|g(fFW&i3RBQkSO* z4Rd#b-q?P^>f)DUQT5X>WzSRZzpWG6Ja0b48zFSJ8gNf$yr-~I8G`2jN?wt|o9Yv}uUAEs_H zVc+onfOWlRV9tvXFpN`ToA`|PGFg2nI&qH}eQ}0corbV|@EDA|iiChA0d&Or3wasf zhkrgDp$4`S(c!Zq_gg9oBD$B-`+TQq=I#tqZhMOa39W_jsO_-ih78oNH3KjHetYTF z2yndELN*Rr!{e&u(DIyRGS4=WmpbBf==g7Lht6>*aI}Qt1F|3|8cWC72a&dQE@0Rd z0?+*P!Pg-i8cKs;p?x;^&>Tp)QVrS5CIj>C0`zyuvJf?yWv6@t%Q@p&^I7Vw_TK`i zNKj!#bpAnLb`fW+*h^ClcH+m!?QpQqjk=|crHZp3lVgz2SgEZb2K8=&rAMa;+@c^N~H0umz-IOX2sHxdhsqaF$vP zS=1(uD;^nPvzscbMDAke4jpQ2a0%ZB8&SF`H)fvrlpfL*O9kgW|D zyz84GFgJWdm-fwLFO{aU&n`A$jafc6ZXPRmx+Z|V@|NeU9Ztt{j+TOzkbmuawzc57 z1^@2tTO=5nDlaGu8OMK4sStBzlA!6KGT2wAQonHu*x6-FjVDxKmrgFtlZ%8|P212+ zyMtFODF~cvTm*9~WCU-27x43&FRJwJVMklM@VnrQps(~Qc3TePwu}mal!T37*Tm0w z{p=t(>L=jmRDnQZsT}6@$+2~&>ji&5&SQ6dz0Gd1`%Vv@GlttS%^=b0gF7CcwlY@X zd6344J-w<8+Rym1mv5Z{=3cjeKQYiZr77^vSDQQI=_n|%Wm!AT z+jzZPNg%s<9JC&az_tmK1qG8kF}=M6LN80BvW6w3*yal6X8y*VAJgH=9SK3jmpeFN zt2Vo1(NuQvb7@v$o~|G?)0B>eii6ZXOTp9yiv{DnWq^)f3NB+ea1YFKQGH%Au3NfQ zVC!att|9ZF5>A3$${X^%dAi_qaRm7$;U##vE=o|jw;CGX&15TH4ho+88?hVecCu5B zOS9XfbOkYP*SVsy2UPGbo1G`4Fn5!^jG#AUlAztGMX*hOHoGK!Hd(ML0^J0@Xdami z6Y^%jj)Dw48%60MjdASMIk7~|`T$q`pbh5z?4sQ^=~!yHOrVh03HyFWpzEYMnCx(g zh)UmKE_&+nJKYZ`KV=f`-z3U5zh1<@SL?VrUWM@M)Gf5>kP>W4EJEp)?O0?gENF4` zVSjA4=lj%Qf|_$HsNSFye<$&dI_Y;ne0~$R=++WJiQOuJ+Ji5+f+({FBegL8(mSyJ z_m*TWh!o6zrF6YZtXE*9RVWa9Y|HvjO<;SzTw-S_B?>k)M+%&FA7|Z_E(@BB^8~A0 zZVLX4QJ;HRLvrrVS7%w*tE<>I5$&+*kJwzj?+N5?##VUg*eB?53J~~wzFt!*QU&>A zjF38a@~V7}>N+Iznpxm5)AWvv%X?37@S$Zx~BISpjqkP@r9Ll$gb zPl82B-|5G?nOOg+0=GTVB)1O2E=3rKTpT;0zXxttB4$q+6}aVY=X*&- zkY#z96*;L4p3Ol5_4BD9oZ<$P&dp&vPOHwXPul{gg2e=4!rQ56aU$Qj$^zABQ|L7; zqcW0?z}!^ELq|jKTU3~!H;@r*S$~w>WHp_=d9Rgy=J^F~izo|nAM6v%w5rD&HkpE$ zC*uX`UtfUs;>~nx#sygWGz<vd;K4i71sg(Za2!Okue@~GnHgDZ&9lh@v5YUM zJvJ633i8!eWp$4W_mw0*r5T744)erGHN zBizU8=!vF$cKIFa^}7>p{aS~K_m{G-tlS0JT?Qm$_$V2nO9U%3yRj4g$IzLuQ`JUc zn9PJsNzs5rq(Wl9>xc%TK^mk%1EqnA$`B0{Au^N@DRZVuGM)XdBOw)%PeK$56_p0f zk#GM1*R{{SoU`A(*7MwV4;Y^jV@J2|;8j_4avsG}EIBB`n?2Bp&#%r9Tpp;$-4Ri! zE_MLBmWv854TYmdDDo z7r2!B6Q2IZ*YL=}Qc$_iQ;?BR!;`cANEx4lcwF!i)!GWMF^c2%Oa|WAOCf0ISfT9; ztfA|bJC8FM3-Xdy3uJ=xNYQx(p0_!-&l;A(&nG#CL8m7B<8wY-GOS}gr8gnFUyON6 zxH?Zp0 zdmun4IOxi-AIR#))x@Ek7p!|kEvzK{hr+&Sn~4^#8y7n63fgZ`(RjJ3WCFvK~W&igLM zT&auU1$zhy+&(T9nAllz^T10$WOD@ZN*4s09v1}%k|)v9#&H%db|Jhi=1RPCS1v%! zNlD&pcQHJqbquYZ>I-Jy-VH}?rPCYd;)wdJy@FG^U8vD_hTQ(f@Cu%$!#8q4pq2T6 zUa#8B>)WHm^9oXhjm`h?*Lfp!ysm*CZ*7BdcYDamhLgzit$~Qep}65-Ax@51B>3dK zTp(_E2F+GY5K!2F70`k^7HA4QV&=oUfy;Do+dG;g7^M6Wj)Ql73VvKWMysn#=&|5< zdUHe!>p#y&pVC`2=}s?IKmL?+rKn)mK0W+bIS&hTLg>+PpQvg_a(h2yK?ru$?cg>`5R`d$$swj)`f5kDrGo6-kv&LsdvbbbP zCv`h95$pf$N4b_5WJfNcWw$A670f^d>%F*@%iVh0$W#j-h{(9=1;ZO*0>L5P6wG{8D9zGuj1w@1a-ZUa$r%Ich@2ooK{*yAyPN_7ohet}=fmC9rwi0$ldV zlrA+{N;Ydmv0rW^(~n0!(RZKc;+KZ0SZAhzv&O`6x~34?yHCZJYei9J{2@5OjSo%4=?1v?KJzIoZvV<|HL<8f-^XW^I zT53|wvCTG4#!eYys`APnhvtN!<%KSGBWsC)d;@gfb%|PJCo;1d*APdi2)bFD%e-Hg zX9o@@Q7y4B@~rL`NgA~vVFhJ$N!L4OQRa2JY2hh!@tcVIUnkR-CtpyFp&aItdw1r(wajL;RX}2G!HLsNy6YbQriw{YDmI z!I4Z-zeoi(O%KI}yQj&347STeRpVC-+V=Brp;`sZxDq5IC(W39>c;w7qT6US^tR6K-l{->s z*J6uev#s#KOc|68wZuP7D==J53+FwSgSO2Ccm+3!OzUj&?f6!rzjzbriIu=NuN6>l z(N{7@Xc|tG6+@niFgke>?3Z4QcVlK?#lS*TT*jc^R|B+miKfesydm$D8%Y2CWz_hs z2CHIbN9JrlNv)G2!J=0L0`Hl!^$P^-z6veKj%y-g&zw-pzmRqZoF>yI|1kH8xJ(1K z>VtanGV*2Kcu=?_%=N<-lKTxsMEs8w#}00wuQsg%|D!`>X3h&D2>wlqXNQ8rjIZR9 z>0>hI+ZpJP`bzvy3CIIKKa>>Kr!$pAu}^sihsX$Ha@V=ypSTrR#r15On|$c6K`+h$ zK`=hx6Rk=+%k|<%`Ta@jFi-t4R#d-YFD`wAM$KGq-RvdWaOaEp8~W(kq)T|l=`PnV z?4Z#XyQxaU6uh({7_sOMN~T4kUyB;9UGIgz{vJkEqs6qD+p~#Y4W+v}?7_Mxg`7?r z54uC*be5kn-8M6j{!-3jgL7m+)+Zh@mOYDPv_Qu zpn|5G)FU>}{NR2oaNly7>V;Rc2hF$9oOvtApVVU3Ww$GpNJ*p(n?Ezz)!O*H&YZ5$ zh{K!X!jS0b;t9F4I4_04t1tdg{?JOg>rF3PbUm4V+i{!GmKtD=X^fI1R0Q9OonvY= zgdwLb7(@47L-E9mI6WZ@Ki^KmW|eI87rBkrH~i7^a}pkN(ZHFVhA4Sj7n@eRr3+eq z&}m!TxDL4%PM*9H&p-b~eR{WHsJR_B&$Pnmg`xN)?I6Zu0PZz7fC;i*`2CkB=GbM> z#y#6`?q5;7yTA{Pa0+hSybM*YX`#^3Kx{B}LEV{asrctX`uXE(IzDw8eVkZ8&x%*l zj0N+l>M53aBsIYRVjAbt3*bB8DD1lBj)VHqpPj4*!-)m!}^qH@BH=kJoZ1qtbu5 zWPV5&GqQ4=fc<=cH0T_mqLH2C<&ORQG>y5q-q@8|b;wb6k^GgMMVvo zN`dC+ct8h(C;XtAZ>;gM<5CnJ(*jAKXT)=f6ZyWkj-*Q5H=p`jg4y)~y@iuC5|1)ky>2^dAsDFiQ4pxIjkR0?l2n?xGt4&%wlp8PFIT2tO?{iHjPN ze|sardQ}%Zf2#xoZkA+sJ3wq-At^a;LazKf!DXOD@Ig4G?#BO-^V_CTy<4lX>x~F# zExQjpMjT<={2}sc$tl>^90?i8o0tUqBvvClfPCln*$*y@W4_~I)KSynI7%t#$}vl+Xk>1L)e3MXpygPb_JOfEE z(GO>Q+fUOUB$$wQ3B*z&hpbK%BMDO7U9+2dt3;`D|f$!@WxI5nqo_BD3BmPe4G0lVGS2vj6G6cyZi$T|> zl*r2ml4O&q#E((KlW~gFXg?2q5AMSo`hfrGW>Ssid>R#E#3sMCWiEC`na|b?A?3eW zruV%S_ZQo)3d}3K_dA$>^qPkN1mt_)lpwu5_A<$BT6Eopup( zIp#U3byx--V%!X%SP3*uw}RfCN|-l39d>QJ3cnJ{h{;ukj4R^!0`fJqQ}h+vZheO= z*Ly@khO$;wY1G*JGK87&VKR9HBFkp-)@MxNSuVK?f4Cs|X`5VdS$Y#(=PUE`n}&(9 z-&AOSb{=BCPNX59ufQwc>GX|j3SBqlF&R8Yh+UBZIM~!O-`0yVk9y?DxvQ$??NZn2 zj}@0mxZQa=mgEVarj3vwO>HnOk1;Ddwu*UW><-6dL&?izNvdLTot>81%T#@F!hX&h zs8czOIKK?T7DY#tGE;ZuOo}*aV}2J|Hw++YotH28~%Bi!8Ml; zWQpw~=oXG3`-^ML8|{+dOwEw!gI=^1#@c^}mMY9XPbZuDkd17SG7srk}8dd%}3^>T5?jA=sj{55CLm|jT= zcXIRav1qE$5<`|uk->J~a4-mdLy})ufsoHE^bYmHR+-bR!KztAQ$`%y^v;2F&Uu0x zXT!y>!SvK}VJhwLjb1x;iJms}psGa=iM?zunOE=({#z6VJUc1)^(7LL`OeTWe-LP- zJbC%R2s+F7AbHCl!Ub;dH0n0Izq^P%2xBEYp?BudF{$|Xj|(!cdvI9Y1FTMD!Ty*K zC^>W-R_y->mpOle%ldt+`QMk!?CCRM)4Ei0I_w&}8DB<3Ms|Z^-zM0Ts08h>8dRNr z5W}#AB>8DLc-*T3rJiV*q40`yR^0%#xGms*KZh2`elkDgBt+Mrj)gZy9bkR}NrkHq zI0rn28C>?kulEvgQ)zhecN{ja{0f`SOY<(Pj^lxv5O2o)8}LA&#IyK)7gpU}2W3Nw zTo!)^$JUMj!`^dHX~%<)Q^Ux@PkQiajS`Kw{>~=ceM}Oqzd=Fj6yEIX0x-7y4<5JF zLh7c~@X@6UUT+cOMeJ{fC2vRB>;9=YllehIgl^I!bDi+@ENQ4W`a~Drw8o?-lc|HK zB5L<567Mw%j9QHm4b`h5K@|re5z<*#dwb|E(S?UM@y1jAKAXCUMz*oft;!nFl;LTn*D_2f`fp z5SXri6g0|NxSh^D$6Dgx_Q+?Lb?qOOZuKI1G3UsmjpAUSmdBn+;xe?4q^b4QT=NA> z8mX2@AhUh#IxzlW4Y5_l@MS^}sCshuV01sI$rr6!h9$y)PUeUlc-69GgMD_30|s^aQ?4E9DFeMg!^Il$+$Q@NV{`|1irXJG8-G=g-I4zPq+?E z*3ZGseHWD9w1ZB~aEKjrg@wnBpe5!E{GHMWLk=ZyZFB&n^jB9k)io92e)l#;cp#s#*NlSmZNlK9*bS>rs^O4x8mKiKB^xI{VN4qqt<@2kU;l5txr1}W^r7V){T{E33`deY_IDc^Hl;N#u?0}fobSOPUc>DeHIftD% zZ)MgOu!3+1Js(GuRzHQs#&P7n*c>E^<9L#DeDU7gcGz9yRM^X?uqoqUMGB_ z=ZKrbBFrbYAL9$lo<>IeIJ)Sc#wmX>Y5G7@_vL9GAmG9ZeKx2u2n?#C(|yl#pnphBwBU zx7e>kucRQHD&<0R7aqlV>QR{gR~l!IRpE~8RH`)fB6`1_fY~!+VE3}w=1HziWS)~7 zhRk)Ok7p(0rl&{fLB2K?$h$)2Vq=7#;-DHh9cuSpW)>G{v5J+Jtj2vw8f551I``<( zfXl;-<0ftD&@&IRf~#qR(=v1s7sK1O&7@7Kg{sC+f}^W?8Q+Ek?DF6;#HmD3bg>(~ znLhmf#97c8(}KJCmI9VJif>*Gs;a#eY^O6tj@SL&$BW`WHb&J!OUjoxSe(OPn!kqVlE z9$$`deE=?J*DE17Jog|t8YeSub2(*RQ7mbtnqVs)153Jcp)YX>&!u1^kNwgD54es} z$;M2MYbyn_UV6gdr2BMeW*lVPe@xm+{ov{CG9<_P>8FD;@VI9@d%?$uXlz{#UFJ#j z^6FtanDv#6X>(_%f=KS3M?$anVVLyO0Hmk*&@iXjzkKoKnR*(v(H@OYGMxAQ0)5l=g$R@isD+ra`C4H) zyqaZ(Cyyy$wZ?ubymcXbKFOzxo@Jx6ts8aU_m&JV`$jV+=-}48VHzIpfch)N1?m0z z0v(YB+%~)vm(9*Y&AflO-a=I{xi20iqqpLL&ofb^`UOt8Q-X^6MgsYjZ&CKtZxr|K zMto^4Q11{D{Kyl9_NiK6Jt{_nxHH=(nM+{vB>`-I=hC;8a*V5pI>-J`1fAX%D9y8l zJ=^wy=5;L?&Y1_>-}Dmq@uOtlo-7iz!xfZAi@`oN52nt$$>kxZ^K8qi*}w76(O*2h z4BKCmw=a^w{8|s0FEd1f5^SME=nZ%cNz<>vxq$BmSy^#2$UL9J%#1z*TYkjCf8Ix6 z$$x!p_xodXoNGB<(;vt87ntI`^{1)HpC~#_eLgMRwHKCEF9h?TEzmZrhs;`KiNRk# zndj}2f5|5u|DKZ}j&WfAFp?xX%7YDa1t%}ROaCly zge`}MKy6hde66X6&3?ALm1GdMA0Gp^@ee?F{0C@pGKAeDf4H8oB8E+J0ksW{EZ?Jo zuAIl9Bi56Qt<%Yie`CzEH~Caf_&S~~G9waSZt+LRD`Mi=&&0T=kn*M}^e5Xy`WCd3 z;+?AGDaTN_=x&N{Pv+z5v{E`QXUOcTQxN{#z~Fk}7V;)34DHfa;hh!raNv~>oOLR} z+TM5Ur9BqJ>2f-kBMCu?c~@|b%oRLp+=(im((upLotXH%5l<{!$2lhtV^8I5RISz$ zq;dSl3^xm0d0Q4&FZ8CawfE@q)NukIg^9TIN+gIri=?lL+UXJ3>0tJC3nu^J`1Yyq zi1hjq^GJV+HWr?QH$EN{0%bsCjVSM6QvjC>9HZdTM;1;PPn9-t4(LVusmHWgxXJVs zT`tu~I)Y!)DI%SuX2}k+#af@=xg`;1mu>QD{ja2{sU^;M4a+Lgs&gD-em7)nE9uZ>~2hfnC?~J)S?KM zA<7tQ@QzG8JRkNL8W1T<7tYsphM9GH31loufbnmSkX*}UDDfv0u20-egtW!rhuvdh z9qdkh9KMom_S&?I+sR0|7SX*Aduhd|K?r+%f^Jk=jbSTiL+%_2;_vT5$H&G(lFdS{ zZyXCke*z)J=>`l;7=_Bg4A>p&3#G}Apy5|Oec@q`C-?L$6wP=@9kSbLR-Zl_+bKk6 zN6SFm;*E@MHs`N#%7N=gx?zLbc^Kc41}NSSvJKvlBM}a4&NDa>AB{!2Zun^0OH@qU zfyN#ISlqK1*_qy$GwC4q`HaI+X(?(Gq(^s3%Ryf9S^TK|o`z-LX6H|G!bOMo(#s`& z7=2BS{PooV^#fDc_6_vlH;`&OYo;dXC*{9`LVtkU3MT1a0D?V5wOQB}ciA zrNvITxIzaOJMU&@_XoqICPirFc<=0J4^gUkOfu&>F^e-aAmNuf9CCJq^=1dm++Xj7 zl#Ai;QMZ^MQ6Np?u62;8d%wwq2bx4Xm~)yJ)pPve2dH$TjPnh(;I3=!==MAYzkU3U z1&&ELs+@wJ`vQqSy$)2SnreS}%MQr+L+gP(uy&^h2)(*PH<^c!jrXlEZ&?y4?BZm! zGLm@s`z$K9A)5I5FTx$6xA2}wG~FvT4qiG1;QJS^%~uN_HMf|MNYf8qC$s$*mpQNh zNLO{)RxOTywC~<74jtjeBfwDGS*>(X8%+C0fPqD zK>p!ua7r!$hsq$BOt~pWY7(8fJd9@V|54sHV=a13JIj#O(Gb2jly!@8z}n0Zcu{A;nnUu9Z2Yv3)F zI{X)%txE9eG)GjAxkLZBCy={;HL2A3bW&h_7#IJJ!dAnt{MvW}^0clIxb!36=d(Xm zn4OQSyjPGtE4gcES&DB`*E3}f<59~&3{(y-CwpyQksaYeq++`s88VRo@s@5T&g>28 zc(|X|J@n^aYc}`+kL3aeaw2V5g*! zfVUT5^X_~|-keGXrewmF-J6K*e`{$~EsuWto=?+$a=f9ygK*N1z4~ zha{8ens@0ixoI2}-hK+t3`)UgDv}N}Ls-P|9+&K-U{>D?)0NC%ao{0XGbYKCb9v4! zKy$%Zy#vHk7D350XOOQM0HM?RkliUx?%N)M=g$*RM)ohWu6Z}CDp#VtD-}ttf;DCu z{9dSHgQHN#c|Cg;-ve2z6j=G66^K4Q3SYhaaE8t}flX~c z%74kk@#cBBF3bac%Dr*gK>?kfaRqB;YI5C3J&fw}!GSr^cyD(H=BEr%&sBzaRyGW4 zd$KU};$`eJH^gA|hg5c5AGVIg;o29X$&U)E@eDeiGd zq_ra=)byniR^D2Ud1tIK+$;)pd<$^s+dk}Sjz+^>@!0s?4!2ae;86o*TsBu5!m>=s z(vuD3So1Gpl@S5IEziJvUvK#8xRKPS+LP(4)OjQAN<60=ecsSg8Q$IPro8loZ9FTk zD;K%Wj(633G0$!~*L4|t1x7CLJf-1S(5z!EEjf_iOTh z@Zio9pliQFo9+ORJ{ew@aV;qPF`?;BVPN@A6c_*f&CGo%PfwqXBp$!Azz1KOD&KFT zeCca=Bch9*3AH8V&0CpkdL6Vmr-P=Ru|f8c9oGZpGL;>V;l*eS@P{ef9O~wt2~8Yh zxCmtL2=SiUaNUrU&v5tBZSx0@bHG*Q0KAUe3;#kAVD>jr$TkdTS}%?R!RJbPCOd>N z-K~x?VWN1Oijcs%Rs!2&p>*1L>S-!XT_Or`gH{2|kV=3826(Q6L!Tajerf=e_q-vq_(wofN()*)tOtqtnQ&fv3g?=C3cu9&Fry}n7z#qT zGhHEstH|)u?Qg@9+H0`uLLO|+$>vxW5o;O|#2GaC@gLQ+k>vK+C#XGh29osDP&9c7iR^16NxwD0`{fNd*jQn{bdCx{k0gME zggeYp*N5|CSuo$X7zVZf5c_lSuj_vd<=ORqqHIv;u#euC!q-LSSi z25udU0{^&Yu%s;nl0@h8`e#Q#Nklp*o*sbci<5YNA8PQ(@jy8H_$QG&gxjkfu7F=unWznPj3X!CM}?^Escq-`y~?In9kZw*-HZpgxJ2dKbZk( z6>5E48MgGR!9Wei_&I!?m}YY)k=sohVODt{oIBx6ThvVOXRieozMBYJaD-0R8;2oEALvej7oKTS#pM=HVP|9l z+|fVBdD|!QCfe!pCX0N44K+S6!R94AnOFd!>5rN5Yz;HPVmihNiy4L3;JLEL&Xf{g3A+AaJ_a23U)@oz>X|fV-o@Mo|nLwPds?l zb6(4<*Wt_pu8Vm}o?conAY?2G?aaB{>QX6u+#iG&#TfEHLXiB>A-tv;?n( z+|JYNkKl4vF<6ywsSZ5E&1=$5slxM~5u#we3~aZ#5QkP7SX@^Mb3QKy|0i{%`k56m z^t(glB)Dt6y_0XDH4_Y~ddfQW!{`Q=@fc-MMB^?3=$leS%?>G8iluM z4KUBR3U()nqE3|_iQ-$4b8lB78)_)s!vkQZA z*(xPfc&3>R&t%I<3%3VWS6mGBex?v9YzoaAwvjzoYauSel_rw8aH>>@Njn$F1bc|{ z#N+Z|_j6ZJH4KGmJqhr%B@ynO_klT=Z-9B@LGazV19h#=qJdK`o{fya>irWi)h-B& z3<7YEOA30xa@@LBM4%rmA&BZpMxih9#K?aFhDvy$a^Ve3-y*<&u|c^1d@QQ?8X}>; z%_ZmFp-rWKX!uj4QQ6{H<+z5e+`oiqcpt%%!FIa&vKkCcSq3MsWs^ps86a_K z7S)`bL<@|L!L2Ka5YS!*j%}aG2ECI&fdMC-6|m5}4L;O2K!DpmDk)n|nIjB}@6;s` z@(NrU?;%VODkV=Q_A%233+VT&d#L(A4pEv}NjGfBCr{(gv5EFW{6`{u=AV`hFfGbp z9Z*Xny!~;BtTWD6;$wsyCwa^%Ltk$8Qjyb2*EVvDnkBKY;FTGiXZM1$bSC+{a|7Kn zE1skpnZUQNA~4J`H-%YIUX||@-dodYybE>1(DtDXB=1V_j)}?fWF!{wocFKb1-!om z!M#r4lA8yz9|(+`I}U?Mb+9?)8tiD4hw`6gz{Y6v0uMmQEC&XeD42C+|q zVAD_(c!ns^WqfneswfOK*7u3+rySyQBp6->{)A2kd0x_)Ur=)R0lazo6nBtR?;pQIe3M}aqm9sSv?Sc7=RJsT6k<8vKL;|QPNVB@(%>>i{cgcBT4HY3DnZdnzZA z?ibv?gkQlnS(}1m>v(YaqK4UG6QE(uc`(Unxk1w z3+7O#0=e}503G*8o*ufL&V>HdMzuwInC)Wvn0H(SOY@y+(dB(O^Fs!sIcEWGO*ue= z+= z=2oynR1Q@;lIckEbvEZ;H~q|Y^L*#1v3J&~XCJkW-hD()nq;xr%s1`=QhD6 z)AKMC$mHa>sy+qJRa?}^trxL6Fu6gh!<0O*b}cWFyHEe=&!qtY#O(H3Gc}xBZu$PO6zg> zt}}@)`1h1eaGK9NTzQhYv0MRn-JVU=X)k;DdJunU$^zEspA_HYl>sCaHxSv%$#8Gj z0+(@YyL=qv4=xY7j?TIvxNS)@ z@_S;jB5NNiOe?{ejvvZYUWJr#Z(Zg>RG-9?QgXyM1iqa-OR{#P5VHf3+zRI`56noJ2(9HZ1#MK6 zT-zT)RsIH15rg&kl6MiSH>KdwNIzUKKNb6*72^Bc{kVu+!t-sV7;HxPdTp^}2)N8@ zd>u(FGN<3R{ANX!3Q5!DKq^sUOb*U)AkRlq$;tP-iS)ih)K{PDX?%agB#kwYRHa5* zG$D~(EPP4l-0P)!FKf%`O(d&dX`tj3Zr+!$g)9qL!*)e`;qgviVk&10F?yf4&vcwL zwnu=xK?GEjOrjzCfH~#(n)IA5Bu+Pl>5MJqBag=oQZf^40bQNxpsn zZdkRFmH8tiroV=`Wg0=5%@mMtw}Y)T02X#;!+e`yxb<@~XzE%*q|P*WwrV-sUw4P7 zva3k&A8pn@{tdCXJ4Vi#sWKmTnW!qBq)P_-qH(7f@m z%58w8XvjdY5n;8j?8J*|-gtH5W46Cp5B2sP!g0I9h?Mj_vNFe%8tuz~`2)u|*PR|D zc&ozPT5HJcwt}F#1Ekl8^VO`sNX82$;At0id~-z;gW@!)apyLiy4(_T75>t!nTxQf zZW*2p-boU#UnluEiBVnoA8VuchQ_6pkR3vzV1BfK&JoEa%I=pisa7jg7Zhk_2 zittmCH}2GqMcpMgQGanRN~QIplCqc}#*|~tma`Zx+l*zpa)PIY@`9cu5XJsu{L;dNEV0)vSJnR>v#sU$@@|*+8@|h&WI)GxRq&okuN1o1@PI<@x`2#p=io1Xo;`j=A}~sH9(zOA>K~dN=#=@>2S0ToU@+HpAQc_i(B-!#wIcNWM$VB8qd=Q7P;RDff+` z>%xktr^jYCGk%y(zZyxe%a)S*1N)d6)79y$cq8)ZE;2zc*0VETj#N;{F*IvDWz*d#2F276(;ZmcMcfLC;tYb zsDhORFS zy#=^&-dWf?n2)X|4QPL5BJEtm_3-j3x8q&M798vZmBTxsF#7;EXShSBt}aGj$9GKO zlO7CS!w3#s6NkLK<9MKZg}~MOI-Ps)6E?qf6li3GU~YId)_q!lyXVWn1YHxFva}yn zp1y~?R!s~~DZ+x>Y-UBI2ro4AJ*0nnhR-G}Ma#NQQeifZeCw`8znHTyE_DpQn>gW% zfSu&$OvFl=B0R4h#B5J3L9>@9P=vXNp`X-o&iXdI^=%IZ92Np4S#9Q!&QdtuYQZj^ z=>#gf4PalJkih-QHb`)-Kt}_4L49=|=yjaL`xnCSRpL$BvhN$-ERz$ot#gNl^hrFS z!flw9HU`ViX!DfU`(TQrBCNcWMcUdp5|?Q@ew{dOyPCceWL5;YN2=LPG}qw6XX~fV!z53~*24zV0LfT$324UgZ_~X-Ma{s##q!{SJ%8C1-FjSs5=in3AW}XZ-rw>u- zak_#Aj;oXPs2soSh{2l7MB&+hijsYs=Yfy$J0db72N7MtG`40szVT*>^v4gd)n@@uTdN&*gB@>c<7wzID4{-X zM!e`*4!r3)RybYnAxYa~BzULOM$A9PLbKXr`Yu70_aT>{`<*tdSfj~#L-J_TglKB_ z@B_-(pC`NH?^FL_9w;o@58StijYTV%G-`+!63xhIsZcn)*@%i7oxtiT+i{+AH`{Wq z8}}r}Bj4NvmtTF2yM!bKiB&h)SYH{zZWI%gePz-3x~bsX4F!SqePKb>U5Yofmk{rl zr^zP$3Y@lzPd3`Mv+FmH;+BOMAUkRid^|6O3RW@H=+;&EaH|gTUuf`t`hI4eS|{Sp zGF4v3SQ9b{Q4pc5kHxdAK>q+2%J%zR-r1BT_DbQD; zGT5^(9cpSUq4w`T2u*DS+N_Hzzw~%5%lM>e+9xnblH%3xJ}!6{F@kERr<2phtDr3Y zG01z5g6FOgT;i!F80(c5JPFUosJvOkXP}R=&BpLaj^OFzTQI7uk3=sjrPCAY(K>9Q zAkD9qC_1VO9=dU?i)&HXKabl5RXF19%p{`ZFht3RiSYi}{qm;6al~=%1fp0kLJU8h zMknEo_&jSD>v2zpH?zK%N(bA)!6{Y(jd2IT^I0f$O+Ld5*H48nlaJw;-c>kiw;o4C zlR1}pIDYHRB2vq*;^)CsY(6$saD2r*-1?uRdC9_aYyszebjT_sJJuQrwykc1`nL7N zSJ)2cKJ0@{-V(f{m6|;HX{WLMgEepc4Tkr#A{GQ+=ED7(o=`oh8@fNPWc?Q=qC#f{ zx=;K;??`3QJWpqch(1Cj3QVxh%90li3D`L52M6xV;3dxAgx{M|u<8DFOo>q!eCfW6 z|3;P3`u7(u!@d`5oBq;nOKEB)CqnjHB;$g<@q$y;IoMF_g%y9Sc=6m!rL4>uHk?rr zEEwg1d}18b*CxZ*{&nE^PJ+rF*~CMwc$juagI1q-gL`jC@WfB$!&1FC{yPaZn0RoS z;E#(vJgQ-Ed~6!7`W%ki_dkVc>^xqJn>qya9fNq8acI@O-mGlc6$ht(C7#8J7`8_n z>#9Id@bWZdTKvO#MG>Ik(@(6L)9G+}9t~3SWO6D-=)lqKY}~jDbgj20_SP5B1(U+* zg{)Cl(Y*}sWX40W&<*-segl5m7D5G(gU09V$<^~W&{5w8hea}R_NPRcKuk1C{tAM+ME*dUzq-)y)aJYAB6g&1cp}f1eAZP7Pthf}7Peqq9M?nw$j*DUu$2@FM7Q>&( z*;rXO37

3)s*Z0+nU!1vSdE1qA_n1$$mA2<{z~7bweY6Fjic6l4!SXP^GBeTLJ^delcARijaRbh;H?^z`UT#Wbp49T;?AGCo{j(vp=M8q&^$XKVQT( z=~?)(SOp!8T=A=75Z*ODgl6GMuu;t%mnv+*3_mWrSaB9^jLZ~>Z(9iEH>%0!McnMB zeLM~ay70C;=HMwmRoAGaS9~!j*FJ~!^Af=P=?R$p{U<#A zJBc@E^ae?4zXPV;gE00pnr;f3&5KT`A-g;qxc$;*!G-Dbcva~W1wqRK@zj-W+K})Y z=6O!WNP|%DlWh8*t26(n>izn#nUpbPs!%kUGM|0l2T6sBM5Tz5l4zjPq%w~gGDS!w zL`jo!_F4y(r07$TN;8sF8qgpO-~Igh{0HY{+vnW(TJQID;m;#d!hamAV^2#i5ouCD z5lI8=4}Xi2p9gS&my9NA6NUQ66@&#@CvduvyioF!zc6z5c%e|0p^09fNR0m&I-6r{ zowMS)U$BCDtK24kpLo*K*G9=)qddBPOcRqXDh3;SBXOm9Dqgc~#e*eN$m92uL37Oo zH0D^GC+w~>DP#}LQ1hUM?z*t9<21|D45w!r9+1_-aB8`$1eWSd6nZ#1a=sW5x>h6` z6MpR_7o^fcA?8&0xjX#OPCMj~a-3p64kCCYnLW1N4+n;-bOpF?j zb}Kp8-dB#HE$K$fd)D!;r%l6y`fV`or#++{uCvMBCuCl?carKqLzJ0W!rb`ih$D;C zseSPmGR27VhA-Siv*+Eys%#PA$AH5)(0Lg0Uq``*7y}!jpMpz zo`dhlvmn(|lpp)*AAQ=COqU)jqq#l$beA`htWHrp?=c45{@+%~O-ol2v`Sg(ob?wL+AdiBw6sV=>G zTLG`#or(X(8k3>@2jS;%KgZ*qhzCW);nvMvgh+W{`H@C=89EMy%Mx%xb_#}jor3>X zo+VGa&hbkt9dlZIEtx*?Z6u0ZH|$vGWV|Il1ot04huf<}X+x_%t{<+Y#Qqj^l-pC`f^6oh zmm~ep`a0`yZZQ|y_(2YR`#~au!|CvX4C?r(pK8`C@w1)JQn#?DbguDV@^rcY*pg(r z@7h;txyOrKe=&zHcthv`Cp{9$OW?Y9$@D}o=Y!|Y9#Y>&iA^Hq>AVewZ;9LvC1fMH zXxGg+hXpgntZ5fS}-hmya^U?oL9SNOej3ss!AEo7Dkf^-I}Cs!+cWmYXbhQ zZ^Io~H%L})2q|(oK&tP=T7?=~VEPztm%G502D#6nvy|LJ!1idCvOt^Qor@8b3yws^(JK|9qWEqV`WmMd4V!L9H@BXs;i2y0L&L&aI;xmrkShCpdOUBX`Fr$)JIk zmC^r~DESl;OZ)bgQ7<+h4z1<-eIpta`U*ibaS5xr^e&OfI7^$4rGRbpQ~0n`K%6^y z*p^sGGHg7l-+2+L(jY-kPmiIk-KXgYEuha6-H8b|;~CR#L>0YzNQB5t>eiM;RNo~M z9Vc13&M6g)&P_#G?#y0P z{g1)+@*lF=%cmVYpi=6ho>@Sgmw$V(5X~ZIPgnG*j%c>Z@96MDo?&c z$LO|lS)|}w+kcdLJ0#P07p-WlW-+?Wx`YaXyO{fA2e5x{t#3$gyEz`68p!4Rg79qVDyBlk6XG48lfeH*aS?9C!RBR{nk&Fm z%~l+|(}9iLebwkv9ga)W!MxIa#31)4xm0|FT_=16pM8IjEQ_gdN~4|8{rZ7ykG?~U z&Of57s$}`8db0fQV}faznNhV1$$w5P?MrPzG zPqep+T~;O_WLqXNg&W`C#h=#**UIIIlk0tzH2TyK?Qkpu=Y<<-TS5$0nwImr%{$@7>J${sJ`d^(?4jz{cL?Bf znboTMSXPjMrMX-d)MzeRKC48}|N7wN!m)gX`QIpKRp9s92k-+HkLPdWN%Co(3jYkF z!WZ+nLjwb>Xj#n~ob@CFcE}A1zuqr*56{sx1^(m+%nuL7Q}f1 zB#6lDEjXe-iVlwuMq7HIfw`|$XFmtzV3HP(Gbcqz}j6P&*|02Y8Nu-#ak8-)~({Kyhs$q?K zVbq;^h`YiMD7a>4=y$zt$Ej0wpOOyQN#-TkewgvE-fB;}Fz$_3eM7Rm-QxS9^_!x* z>m&zr>XsR4)(iJp*PEx#u=6ooP6w!UePw96t)Y{o-ToXC!x7J^&SYWGX+91@}rBJu$t3#dcGFv;@$69rB z-&Wh+>pieAtPt0S zzsIKIJz_!*cus~_m1&F`x5WzF&OsC74}+YV1rAl8g%_(0aj*O^gnuoCDGeQPlkCNZ zs_u9qOap&zo{3Fddw65$T*~i12f>LfIaB0F^pa=bPv26e^OeEXcIGfSs~S%F?@H6I z@%Pwm*DM%6ht2rrLI#daeFeM1+u7yymb9JtbC!9AUe`;ZZnq=p&!p?rZo503O9N== zs(0+ZoAc;lhe`DJ@^2{4{^mFhZh?(s|HgF{W zgGy5w$)P4Fx%i#=qiamBRDB}LP4mc_Mfu>a>PcKxI|;Afn1pq-z(KP&>}`WBXucu| z8dLRPeq=J&-bWJDbd5RjHI6D`0p0UHpXPkMOm0j(Mi$q%kS}K}Xx!LpviYF|wf5YM zHbQZ-;%+Kwd5}SzvKPbUg5_Yl$P}??3j|+?V;NTt30HcL9T(aPhlgxX8_tu_34l^Y z4RDojj-ino(KuWHCTE{8o{1~rf;~rVTC74A^l0D|*DrALWdXD`8SySl{UItJC(;OM z4Pq^21zF~g$nGF@y7lNcHYU`T3Wp}p4>4BQ)zk_%^!`AY=}Opo*BLMEA7W8X9e;}} zkeIoSxJ0i42_CT}N& zF`jkSD6%CD?M`H3{b`O+C`iHX--|HWzz3x?eQ^IYTU_ND$dTC|lKWrf=@bn$dgSbR znEK)uV|w2V18z#w%i48Zo7kMb4UJ_-0{1hcVcQ{Bt^z7L578Mn5elBpzzyql$g0{H ze80FEHn=8|fdkH@hQZ*NriZenT59Dk0Iu_0;;y?;fYrX zjDw*#=5QMVy_;7-aRYZV+b)5(1-`sJ+ih^5M-L2|m7#6(5Tx20KuqHlY@K_Si4NUB zr!CSZe*`9Umdt)C&Jhe=Z~H{l&r9H{m?`w)Eq`L1@B|?3GE=28fduFW!mWdMP)bjl zHJ)2WADibvtwT9!Um6JpuOi99=R8#6-M*@x9Sm-6tzf0H8|wpy;Mm$O;ukfMBT5v~ zi5vIOwX4Hu__g=sl79-^nkt2s9Cc)rv-_<}kbrl;Z;;z<(R9G1h8lhzA|^9ksneD$ zl9TDqn%_JN-sAy${AM?Cmdga?dJ7Ctnn-uWEv3 zGH&QQTb%4kt>@mPOJ~lfuU5)a-P$dJ-A*+mq2oGLUh$0Bx0I2WIY=VT&PJUrH_3&! zMeOe!d8T!>Bki7`hoAh$(e$~6?4HV1B=}x z1obQ<(YMJNzmJg?+UN3!!c-}th`*xn%`0`G@Pnu@a*mR4x2U2p;-I|n(rsPg$t-Ol z^WiKz*=75o|j5Nib=L16ldi8)M?y8CjiH z;^r4k?oDnc7k+9|-QjO=)87G8BLk5<%EJh*pS5Q3MEqrtfvOuARQRlbkG1NtbV#>4i^VleIj#s~wBmO_E_< z@=54WW?{*Od*<0H!ElJY&pu*hOfl1f=klvI7|c6(`nmB0r@OBgC1jS z(08v3rDq38k^4Akyaw!>qG|Nz@o{)j?;MEx$xsFURr0So4S2=BdA*BxP^>kc?tiz3 z$odU{t7jRUdgz5)_H&6nb#s);?}z*Y22jPCaKs=>QgUP`F=>>i%e#&dp0gr3U%@pB zn*^}Ktd0cO#F66vGFaWVVmO3`^!vObTJUWwSI`_dlidwYB8cp9W%Ous9i-q{WU+FYDmtaZCPJw(y|2l zS5cb`I=MhV`YmGJ?@jdrhe^_fT7a80UH8nG3SUGB{^X~_pK+Wm<)bTo zv-^)=`MYdr{`-VX*{zM&u#^4JmqpTKm$Antl|$U*eT0s4prsq;F@ldq8kCt zd0{U7HRlKoXRXP5qD9?cb{t22t38)r(1^MNB|Ot1;2sUO3$e=DHY zJ|9OfSka9k@if;bA9il&6{ro$!+<->#@m+Rxwaf)`>28zo&5pTea7T*?I~t@vkH=d zT2ymp8J&(u9MCxqRv+9c@8wDqHJyW;Wf|jF)p9i3P3T`a{Y%mf79}K zDv>3|{G->{Qol>QTP;K|Pm!A*b8V2c?WuTrk)|L>s-DE=OT(vt6?6lufYv*_QRL1v zs^mXj*mfK?^xO-u%@X`<&e!vAzQPRsM#cLTX06ekzcOh z0#{o)FYqN4Na_hMO>3nIry}6U@DujA%sbvrT_-ZfrjDAtEXT3SS?V@_H~;C{Yv{ZN zF(UpfHE8Sy?@$F+t=S!o%4_iWU=NJ?T*Ty|bFBHDnW%bggmvD0kL@Ub$J(c=poaS) zo?P%d^5A5EV1G#%-P1<#ZG8j2dg_6DuG}Hv$%GbL9mkn5DKsuU9ZDmYP+D-G>>H5~ z7V}n9{&B+U6>$l00cTsWjAB>roB*Q}KGAdaVdzRi(0qp*75TdfRY!K>^~Ov_rZ^Qg z{C&^#@2UqI7kPBQqX=*Q?@U(`inlHtLgDwN_@?m$`lt`1QJf6O1(o7yuGP|X^#Ja_ z{g9_ruo9;;3n0+Zfg=G7a~q0o5?JyV$}0WnWp6^iRA0r-kzZi{i;MUpDHtc*)yAB6 zudt{!2cMVE#OB^lXq3Z)a*r-1^u|xJ=6pSqKUt6d-Efai`nwa}!a`UZOX+!eSKfeM zI#fIowV8KX0h81XNaTzWp8FP26mu?OY!B4i7?35nF@Q&JNpW|Di_^)@f^0f*a6FDa z+gbZKRG%NeOrC!~TAd%bKoep(dXlfcKOMZ7k7s_%^Y>0OB0IvC!|w%lTvF{hcDsxd z9-o|uqvaw({^{w$O=|_1p3r~`8$aTYpPhJf`xM@W_1l@Z_eJG=L1 z80N^wps(O4mloj+a*rm{30Yn2{2fb3{ScRIx0#7%?(1>DwwRHwj{;pTU7BRDjSSEF zM*5XPv2XaP;L(_;JjL$&ust!78Dnirs)<7N{qF6)rf-P}zHN)aT|JDmP!3TpS-kOPgk*nu{v0X7vbjyJ8~F`%pxxYD1|( z+(Nu;R7`q)aleiA4^X1~7-T4{C!e41C7I%!RdGNO=~}|sat_dtJq#0h!ygpyH!_dc zMsw4aovh|^6FPTID^`4yV&eY&gchgwM5Xr%_Q@LKiKRT|F*nVMK5LG<@2z78Qzzob zkR@pH;V3N&UJ9S`?vtRXvG}kc6N@G%Ge5o+Q-kzD=2ibQSmsknHvXH4uS!{V+PFqC z&t(!8QWK%-1Q+D=)iAh8N9YwEfoFOala9ntkVeE}$vNTD8x@&bnz0NRHTOZQ%>@*m?k9gWt!eoaU&z1Q zNUYR`nD^QOa?v;%ftx(elQP2YJQ*yFYbLrP5hQ0{e5B zlsbo^JJRsT+7vwQmw=<4i!hYowmS1FvE^_Mov|_;R~)#4XZT6ItS-a==7Cfha{@BYBQj;QQuJpqg$*B>y$C``jJKo>gbzw%v8!d5275;J*|D`#QkG zXEidT(%7wii52|)1F^ju@n)0SR!)sl~t5 z8;M%?6db4xL}B_89C5!%eSQRjiq%QFK~asQw}euytVs~bQOr*U@`!ikQE=UAPiK7Z zWd=lq=(g%NyxbQDvjdd5JDd^LsVt?LBi6V}ZaU^(`N2%~70`{+PI&5lI5)_Op`TQf zF?4Pq4Jni6S8smDw2IB3a(8CZ+f|%7OhC>dZi2=^ zhe=4)MGVQ1DSaMPtK)E+M<6l%x``$BTjI$uL5 z>0vDNSI)-28$;;Etu`dBG6e1?4bj2pGjPX#8QM2pK%<>(+3uzs;>L85%Sw%43Ll6@ zP$|8ebq&KO+veGmsw}YM5@?KaYfl4*cDko{yi8kU?Sqkqwf2( zu4yId*)owvN!!qx{x!5`;cYtRc{aS+lSvAG%Fx@6z3iPSKFsCuB6L-R30XHs1?OC> zBE@_sM*V;h$@Sw%MUO7S;HL=e`7KV>diQ~(r3$)ElO}gz8Qoj91zykcV#{OB()T$V z7=eBbANgaZLNhDPso~Qx)V5ljepz{$R=qyKE!XEbqgnicG)>`=I2D*|nMFUnOrd!zOEFQ;7h^Gxj+4BM z+6uo&$B%=w^;( zyEzDlK>Heb6=>f?Xcnl~1rzJ)TaI5)y<=S!h~{T`BPu7lgB z-Gx&Abh5`>34D(h!7`@-*n3fg7*#spDyO&LzWY5m2WQ|bfgFC|FJ<39Schv2^r8Ns zsnC1)5P$k-1KQpgfU<_u;CSEz_IEo=T$C2kzb+f70bFA8IZCpcrWRVJ0v zn!&oQ_+ESD_$4^YodYwR=F`ph9Ep6$Fn3VA zKlFs{PVr>^CviHUSD z&o%xcERjyy#yb$hrKl_bdy+)pS^rUVarVbnjzpv*Ax9=$NG7UWk~?&C3Njj{q^hn2 znuey~tV}m}t-lL?hMmCKbSIH|+RARZEiXLvjKO^O3a}SHhT&UlSx;{XGV4kvj@MVk z?TKnE-WKA_{!EN5%pw(=;_=l-CzR^2C0EQI&;YS)?(2Db<}lLqsmLT&5f@XIPTFP?{@qk31#DP)_4yR z`aO!Ol2z=vFW=zhs#vb7YRj%QEQBQ?akOY~L=axbBYSEq;X&nQ++5j64o@qkJ@u-z zQYV`v>MWrO63^+P-@#-`nK_l1dxz%yScUpq?vZ%8m(=H#E7@%ojH$&+^e>%AMQZBN z>BsE_bHEL%`ZfbgDSFgr!kX=(=+4^=G-DkdKgFJYn=4Hv z24~@WnnWH8`{2BOER~i#0m>4`F+NF=Z0h+=`#hJxA7&o5e3a$(RatP~XFYto*hl2P zKVo%egwbN3U}mS)34AXujwfAmsn|MwzT?FL(sSJaSNR{L?O%|bdY=R4H^u0JtA%9N z`pe9eGR|PKxfl%G^|3#ohm1GUfV|?_SeutZ3&ggt55Jx#Vj61nkfs%$uiOtq9h97e z5jbXio~1UE`10w?ar~|{w&$TCzr=tu(l`6Tz|a&DU$TQ$ZaEBop#dCgFqf{o` zj!WpIFjZ=ERgUbcXp)$pj970ST-5 zOq`Dy6BVOd?DdZOr2c0%9O^D*zTNxBK3nI{?6}FmUVafP61WhqOGLs%%|=qR$`~f| zcan~}1R$;H*7p6!;bG-vHdCSSr%g$inm9@QbcU2BpJ67x zI0DKCqhR>Q0wQO}hr3H>!l!jcB2KPG<|=A|{Kap|ikQMDqvXjT+wp~6vpyddojjCa@J zOgh{q`cWuaIoYB-mj?M1-3iRD2?ST|g*SfPy#FeU;HKIpFfQvOe=n_p4b^{Hv7uR{ zM1>0)XPhNdLw>P|+5T)bCX$PKhHO}FB%308hTUJK$tt^_f$6(4$qVk>+2dLQva6KI zEL&-q8XgbPi~q2{!=u<9k8yP5%m_@dZza2)STZZZD&YP_9k`TjM>Irzxu#4a7kr|w-k2Gf8W{l%~H(oyilT>Q$WPmRDj#gJMiwc2s8i5 z2{y}FjVuW(W+uwiv5C1)Z7#^4hBsCrP;64jEZwKaTDlq0mrr*vY4D) zsTgwVB}ZjFJ|EKNz5v$yBAGaT3A8d6tiE^vST9LoMq|uLv1t%v#H9o)9rxhTD^J1W zhb}(CYR`FT(J|<%rY3)Gk2NZ zf*4qx9SN%ny`U=E6|yfUSn*&`N_aR1;}CSf>+Ieo$a-u2ePf>!WAp$!eS)%j4l{h2&Ai4!C#ak-zct`x?EOCl{VVz)B<+ zrQ?a%!fucIO%c1gkr2mXaFWI&8yU-H--tBh7*ZCR1uVpa0>Wv4Gu6Zb04aeeCU5(M0`2 zH9I4)jWxgQ4aZf1k!f-v*?5)c_LULdlg;Fs*&6av?9u&WIesH-%svZ2f+#q-t`e$EbC|S6jjYJtaAu>h znbo-;$)&b_Gc7gMuwm?Ojwam3Om{dzme$LlxYG)tv$m4w6J~RJ^gOnF`4YI&BLe{j zZR~JnwqW*Jb@1O`29lvGKt4F197k(#u&-qj=3ipPbk?wn+e9!WAeP-@SVUTSEO48j z7zzy9*?jk-pm5ZOi4X$bckaZtoEE%(djNli4rAZJ$2c5PjN{(}noKE1jO$|0bnX)H zJm#>S{Wl?c6=k{b_Z3X{H;Q)S)ztz z2V!vHtTp(c+@I*z7c$2ed66H*W#ACPhwYN_%+bOCs4lRCmJKr@AdN?U|960S_=~$g z%li`d_B&ADyoyvlL?-^HESIa=MA(|sHhJli5Fj@jcOH2{{#&CAwUNwyO$ViIUJ=OGWt7YP!&yrzs3O)`K=G(!z`d96?V>l|-GL2C@@plX$wE z>~`aRr@CJgi^Vs2w0Vd*&VbK_TS&0HB6vkj0(TQp zu>Wxx_H43-hcn9|L17j~eAGboT_!jHGmkRbW1h2&eFw&8J;k?|pJTk)AJi=o6>h!QjibTbJuavb7c6hZvY5MQ z+%F;YdifrgMTiL-%tvsWub6Ou#3M8nyueviw{c)j0(@1O%ZBK^X6I{Lv(G=CWUKV; z$g#Ppg4_9)Fk@L2vwp6N;J-K%kaz253PplINkk6UDxM+I#^#W&&l1sJ_gS0I-1lU% z9lpM;g6*}JK;pv!eAc)YrPfbH;i?7b`NapxS2^aJ$Wt;fM~B{i-O2n8ngQPaf#i~p zADLmrk`D%zY_qXHCdpreFN=~uU=qPw`9_Nd{WYb}YS++nph**C*OGC*8SuJuC(4ej zfs_p?f{t282#<2YEf1_P_|a=;S*(YtSS@2VdlslmD9i;WF7T@J(EVAH?lAVg&bnw5LsA**eHJJ%0puWo!uB7sINI zUIt$4YSe8^#wAOaV`r5V9>^Vo=7Y{8+OQ6ITk6RgjuKn`{tIE%1Z0nhIJtLe3Op?e zVV4)ifaQ;+(BNIm9NmPZNO>GlKe3-Fb&VzNo%dNz=SNqMlcDVcZ%EjCS(I`7%S487 z#A@D53?5z!o8No_UVSo-J9h$`V>#24g(A_}@&Z)eSCOu_QWOU|pw_O2?R}<+!8^;C z2DfzL5tj$`^6K=KXgcBdm{O^auUS3IB;JF|HB74cRkk)xj|Q14;J8g|$m`kH*!dlL zU}2<su=Mfwil8n0M=kdnqQ_NMZ#i@fAF?4A(e$mgs2ie!r zRksqHLl8B}w_%HS3A$cbjb!;woU45kJ=+gstzsIqN97SUr&3tm<%DY^ix}dJlYhp{25&JaUH2P(1hXkt(bXT z0d+hUv-584Rq7wn3U67#E`cz)Ii?l#L2ARK-P;w`oki_Uat zO1Vs45o`9#V@slu+YdWyt9S>jW$3H*E>z32pIq0UM8(h4(eEE4i8pEz|G#HRSi?#- zlRq2wHz~1y{}sW=xrg8;Hxc)}RYS=)D{V=%pNw1CjE#3kTT5UJjvdL{h5bxQFKG|UV za!<7CegV8QH=xA$2?T7&5WKRLqY77ZiB60Q9N(f$%S9U?TJugo=Kh3;y^}GfdJ1+? zQxrWg#1wxM!6p8B+>_)QWosSLrr;S!2TBp;8KWST5LxqTNQ%8=d&4G?^(BJk{lw;D z1MxcYkkD^m7zO>kv`6I`_&;bT{R6$sAMuy$h}8@WF2Tj_lrFdo5B=O=f898|{No(n;|w{z9-OHmxR3N5NGH}2%; zU3}j1<0{j5ldauR$z}2sFsiQ&%I?HbyUwZ9UUxk08?~mBRL0Q-h78XCZVBt7rjQTU zHHm!7Sz_jqM_PIMUTUL?b&i<;)Bm{uEd+xA2fl%#%x$ya*j1E zRmZ(w>p-%+41RZKl5;J6Z0LF|oR%AmmW`IU@~9r()XIZ6D?TP$KV`$mhCpcOb@=d7 z9eEXQxQ=(pW?s#5v~5XVB?t^JuhAxi1%ks0->A>q^HfCuI znc+I0<|@rVxhF;7zyis>s)cf4oCRm{eh}_mMzfzx!TpU>iRs7R?A!wfQR1gJIBpt4 zo(<716YFVj)p73<7UqCwcI?f>rx-i{!`wE8?Y$3r=W| zDFI3_PYap)fNLDV=^|5@B}H|7_t83-M-NG@qr=7=-Pf>&`s^vD8<;7WGF1$|F*(4` zDj+`GOdi*{@NOh=X4V4@L@Qksw741hkKQ3r)BDQCZp1&kp!KXCvwk;53dTu=O8J#GZXN~ zTkzI7LM-SLQaZbYtdn7&V*Xs3xu~2jdlp7F9NteAYDMt7gCs^-zn~9tSzw#8Xq9j) zv$bqH8Jcs5WQ>(0WkH-_ylMqbNYSKDr?N<{ffk+B+RQAz=1Ye~Yp8J|*KAtPqt@cF z^i`x0RextmcEoHY>%zbB7OHK49eM@4d%6Z}a8*A?x;g?11*@0?e|7NCzsIod@MgN< z{V0LNfAG=9hT&jWWPmd?#@zVID&E^f#yqWI7KD_tCfqK!F_}j%6ilKghPCNWg($8m zB}EGwFM`(Ay>$H!RhT`hMNahuT2DD9LJC*9(>po)V2gMznf`hhY*mV3v!f0u`tWGP zi)|#hKZJQZY)6kQ)1_y3_kf|)9HM*j0y9l!9QGN?lfSE%lk^vo*j?2Qw*#7(9FJFm zLKu(XZ`MFvd;)`N2hcXTitQXyLL>LhICZlJFcH!?-O&q)em?8_J^=-Ns>1GTmH5`D z9o>7Sh4T;8;Ue!Ae5O2z`71hc$J}T#)~k>t6)P~a-f`64WsN{Lm~eDHF*+Nm={5DnkSjFN)qR*7sAyu1I$S2Os4U> z2#j!Z!gEn>WF)B**g$xbFgTeO6xP5GeQ#|3&l(SYslxxboTfS4@W9H+CTr0lrk!NtVQY^<#ay-03fw z*G#JZKXRwZn`-~_qi3#olD%^3^nubSNptx?)>>>P)?Dsb=ItZ)S?p%s$?BVoR%{O= z&3$jnHEf5dciPxuDUIgi7D9$p0GcnZMC#p-a}R#NwIy><+#wLZW~<_CE?X>lTaCy> zX21Yv=-93wNR)+Uu*@Q!4!@Nq;fV~l^Xb5~({r%uRT>u89zvaI$8fRNQCvpSFwd+R z>lbOE`=e~m>gkIA=qXsIzZ;!aUc~6ka{SWhN_6JCqjZOgpzC^FtzU^U=;Y6)YuqKU z%SxW49!av!X>np+%N=2&^SK84_9E7yAewi!PY%DW@Bp=!TiL$senFbwER@Uq22N_b zncL^NnYqUZ`MIB)huyyfeqm?XxijuEYrh?1)@(=u>+0$7Z&xCOr5WIwNsXZMW)e=h zQx2bcHo)5TQQY~|3?;+Hk;Zz;9{y$l$_qnS%@vbr&~GV9?+ckO-h1e15+Qz?-qbu} zIhnoM0<1#%Nal+|ShP9^HgA{+ZMVkL?UJRS$Pvoac1?!HU&m27WjtQ0c>?m^$Kp8c zZRovbHO6eQK|RgYkiJkJe=S=J2`P@WX1fwivj5J`7+O!wg@4F@V`OkdrII=PVK%t# zxdKPZRx#G=4B^2y6L84v%!IN83`0{!gD9-5=?AK8upL9hSJ&x9sQQ82p z7Z}V+9*g?k5yUM`p4M{~B_9`U;%WLCgung}i5y?r;TuYi`fs4lKRG*Dj46IxS;DsF zS#Y+H947R>1~vyB2SLdQTPn7gwRZR`c<(%!3e1e@-YO$nUlT;#GPG&^oHDoEyrSRxmp&Xi8@qx~0tGCBV};|%Hm`2%pZZ}nI>YWR!c zYz{Q@Nfx=b`y)J0zeb);+)j*rH&f}oEVw1KlJ5l(B*HuBdipP06MAm@!ZnoF@C2^aT62Z;x0m!?j!P4$#xbAY1JZ`LJau*f|E-b7e zBf}@SyzViyo|lAf8ok{AyO^-|)E&H}o`>Vb3vm9%KyLS_4NDqSuH zwC^Px^XY)mXe$!Jy`P5$%1O}!eU9=fPE$Ip=``UI!nBW&(~K+~jCQ8Gv+St+8Dq*X zZ)Rm1?*>5Xnl-^G0rbKr zXKc28%cKlXgCL6{M&(BvtMN^Pg+na@wPan|wBQd@{Kg2aP21o@wGficzk`G3C!+Q- zm|l241`OYgLWR{iXxtgjcIw_Hvd_%Ow@>|`V^F~EcD#N&2mYgvC|UhXQge_bfF|Y)G(L+6YZwQww@;{0x#r*2%KTA zkj*ujjNP9n;P0+fT(IXnRC{Exsd^Ih*Ul(9>(6tDew9PtJ#^$hR`%gt#Wr^F4n?-M zUK%W8vS?|G0ljqJohDLu`gGYB(i?k$dJJT-OzTSe<=G@INAQ`6*b3Ft|`2k3O)jEqXqklL${|kjd z&@nN9`SPk%w={^r}Xrlu0t@cT@Fwg&#vT#j`32zJ=VV-VM`6V@Ms zFZP^0xNSFvdHQ5&szkmz^BIVciaRV^;-Ye}W-m&>I{=Qy?kO2khJg@bEnW z>*WS)qOA*W4jqdQ`G>J>K?J^87>d#{Nx1cGJH}_MK$UT|D3Du(vu8TegTs;VHemwj z7CNvFQ8i$G@e$n2Zi0>~abefvQDkvLIbAn%C8P{$pjvALPLMlCFZ?xz;@wAKr?Ikd z`t?&xS}WgGemq2Fh%N-BECSar(?MtBDI#6Cmvwvomd!KMBx{~l;6SAXo;jS3KP4jY zq(T;65i?|*3xjA}urx*t2Vg&b#5(cSD0|-=J7d37w(TZcrlEqPZKvR61vz-W_M%84 zLkD`N$^+wM!_2u?M&9OZhbgb)*ovx1n(}ThsLX#$pRx1s_-zKO8VQ=K<}}Y3Ugq*CG0(Y zAM8IofuozsL4P4{xC(ZJM2Cy;qj887gpY#aRrQcmJq;v{Y~bt(o~|oD6}lFelO68U z$ch>&l32hSVMU8UahDoRI&hNay-N0ZB;nxq5xD&E2uSk2!`OSMVfdM;VD{`Z zcAk^qT@9wJh%PMsL}vSnp?O-h`M32U^nUUl z`l}%UvNj4~arMGeo}>q12J$gx(A*wvxmbs3t_eH1$g-20$BSN!+zBq z81HU{$0nuY(4p7(>}MouAMwX$y4x|>=R5`|bfSb(9NYBz0$xl?pzfEvN$*(^yx(3$ z9vDW`aq4fu*=0UxZu11cQ6oTh`bav=67W3J$@VYRrmw`Wfm+}@;-|6@y7wI>Q6(CT z-PSmg)jkzc9rVcXT{Sv)$}~EA*;6`8?lKho+Xu07=`cqn8+3MDphiQ6Fg553^)6Bs z=3C^GrM&6-8a5IK&q<<=q9;U)r(@^a^o&K|6VH1uc>2B{e_Bha)Vk?)o! zV}WHdp4(DLWfyqUjq}gr+8k{v^76vPvis`=X@Lzt9-Vt}g-6!)ogKdLBJ>fD<(S@T5T!YcT#( zH+?vb?;a}afy2{hqQEVR?9}Q@P+*q>xr$35q9rR8Ob`Udhc{Vs~ zk0;HJFUXZXU(lZD09KwGndmc#I4hOn^q!TND5b#$$1lYrih5{iq>0OhqPlE_}pvxa1zEa4lDwA*$L zTr4(&5vajbrsOg|y=Fu0DG$-xHI*X$X&FTHdjYI6+z#5k;=Czw6i{aa(925`_{A=O zLxq#!)Tu3W${k&}uk#QLdq>i5UeDNFFZ#&_k1Vof=XP**JOLdZFNvR>GkBcX#P5C* z!91lNZjDHVyvSfO@Hq(#(@o&(V;1GCGjX@J2PS>czz>th#_4C7g6v1kx9B64LqDdUYG>m{hk;U{@y(&tmMZ$D}2l?>w zJ9RoKiu{v%C_qG`SBh+&69eT^#5Yzn<8+))ScRHE~3`^RB+?f z9_Dgr3pE%nr4b`*sCjcfz3^%}iOB4sL0!{HXxe1j1bgvwf&|)j>fp$K>*>Q6NV7LY z^BKb`lre~>y1Bf0_4!Ns;lf#z_1}*+pS^LCfi%}8sl~l~ABj7L=i$af)!1&W#;JLW z@fNka%;Dctq47gI8Qkv+XHU9|v_3_`oF!hcm+S-YXJa|@(08c5zZ~xzSLa^J%-|A6 z4`PquR4&sn6PXt*J`q=isN3S;o@@b4%d$i(W*Wn*2Se1%TZ0)d*vOVVZe?oDP#Wtt z3fH~2;%D+2rp9+7B=MB~5kJNtjJwNcKADu&T})afE|JNJIpn3X7_O8(OMGj4$)g83 zWOV!zh@ysg(PI&pmSN8A70u%!s_nScSx30K&ix!aLZADjbCz@R*~}eUBF-%8G=Oi{ zT8YGes{BpaYM8P09C6RfBxZqQpfn_tdEnSbl$#XU6S*q{rEV)=zfhdYy1ix#+VaWf ze!lDUW;9V33j)*(hNV*(m~nVMJ?q;+-fx}=h0SLnq{V@W&uW8533?!{vWnZuS>(9Y(3)b zeV+u{s?+;VCc|0--=ckO9}t%Zib$oDmb^B?-gH(h19WEU`FLhsz$csuBFW|Q$L2TDvyH$ zwxPJ?SO`6!b{8pWL zeTuPH$AXE6m&CS^D3k9*WojY)bmK7b z+inAuGbX}}fGm0>`2?OYHiaDdO#04fI*Psp@Wk9YbJg?R^z=Uo{JlFD?B_3F$LME} zjm&qptXLlB1rD>`OcU9>@Sb36>0fHUpp9-y_r<2yez<2%2u~*t#24f2@J<7tXFI21 zkZ=}mbA3;HB2J)d(k6U2(GKlSU8Wr#mb93sV_QWeVOHP_)R;1jd9vb-Kw+B(?V~=V zu3v%wJ2AN9cqhGc_y}s;^abT-hS-srf+U<$Hu8Zh?&vjTiVhXlC zo&v;HPH=zvYSA?BSeTM%OMYa;!oAX)q8aI@*}Qd4>$Eg$~xCLjvculm5RZXKpVM~+Q+xDd=;J;|xb)0vr1Ou+Ee zJh=MlE>C-srmFk%P~Ynuyt)$!Hx6uu!Zv@ptz#67b>eAB0}&)bzl4|^;%B^3*#c#r zUZpnS8uMBrjj&PTQ2OvGJ7Uc{I@4+rFvo_Ncd0?7-&&9E=XbJGf+d)@_tt`22ZcPh zP_iLkoJfs33cE6Fsj1RZbUUg^#jT6!&%CwhI@b$Dq5AlHRv6CIn2BvyEAYc|KQ!%} zhF|Wk!|xAt*q=YQGZRzk7PH47k8K(44g`I;F$nAxW%;AVokiHO3&M#^f9Y8a54;UdbS=~VnURT4gb%s=N z#BW;l@*S1={U06fxQdd^SFrs3F;pzrh(=e^@IXL3T6H!Puclpe-Q81Ac$miuoIgkJ zKItM}BUjMRx_{`sMp?3Zc?UBp!J9~$o`f^pSrTt}ll{FZkiHGFg)JuhKIhqfV&9qp zD|FwJkks>_z}p&9Qgdjj=U%eO-yiCI)-m^Vr@&bGBWR*#%<3nae6+$QY~k^3SNyw)m%uP0q0&}$3pc4Gsv z{U{FyF3g5Tll8g;+9eu+c1H?nZwi3jdUeECWVUBn~3eNZ7_4OIPB-Pkml=Z za9{lgpSi9VjWW2xsDB-a@u#C{d&@MK@-&t%tDKLIt0sfLhXkFw_6J>Y%L0~M-9yq} z4wBTAY+_whK<4cr#LYs9_BTtxo56FWFm^1AYth7M(hYP<$s2*&?ss(8pfuJSOogTi zHtb)?5#(oqH`&s3leF{ZmiK3t!mql~q~mB5-$hlYlb$$}%l0PZ&%{Th`*0#r8f+y~ ze;H8!&3Wvs)qzac#VYf}If_8e_b@HIbzr!Q(ixg{=1xjMWXjU9I4o(2pY}Xp9xY5n zi#a?m=6gCDoX+nTm5z{OuNITwGpCrd>n@VbATC-~?@b3&f?3tp8PtEchYgp^U~BtE z(ZlK^aeGY<&CvfwEuDGW#Nh?7UO^Uq*=zxC-k6#5V~8A`=RvA=d}SJi%c%l$o_IJm zQcvG<`njct{P-D4zVw?yReKKz&$W@2q6pS_(2$Ny-T|*=L*ZKcN0R;MJx!GSPHn=|2ksE95rsHK@Z&B!XQg1x;` z8fKo}LB`5OSNKk`pubBaVP9YXesFbXqPub`MyECiDxdM4PuY6<;>sEN>dHo%qO4B_ z+S(b5xJ!_6Dp{0$;|&cMybA>ZYT$3I1B*_*AyW6>(8>T!Jb3FSnZ`+y_Is`v_6abAJuxs6BTJ=2h>zrDOr0B!J3KA${l^Tvm|B8U zD*aL0+7UO;JdZ29SE2irO!C04g2_=7!*_daVXH7vP`!ONNj$)t>Fp9oOt3uc4X4Di zbS%H)N})DeYXmNPLP%A=7M32~O(y;*pht=V>D-zRv9#6y!WtbMTtL&8hBZ*SscUITC z2t}s8aCqiCdRbjesHwk(TNiYMY|vDKOLQ`9G!!SA9t+{0j0C1{eaph{IPAM+LIPX_ zL~5@CIle-JEN%y!^ivZ}lJ3$O8awe`={+o(eFMd0$KmR^p6Gbx48O{?rK9chsDsf< zIvkQm70ztM;jIeja!(TXTg}0#DnV>hc_}#?G!ct6w8-)kE#%bi+hnss1P#4D9dgFS zlN7ruGH+oZ#`4rFo7ip4!^~Xjcjh_$_P_+4R$H=N|9xbR+8&}av#jVbi4syY)|Nba zlfgXQy?_|i<XH_xI#MD+dyf-e5(E8I!R(f=?aJch(%96bHv!64purby>gv2rKOTt z{F5QN{Qu(5*@x(-z=>>Os1X?wUq?5M*doZNY^-P=VXr25-<+VMEK{i>Fp56>VoEn}GQfXCF?RRylCvxZl{DxiGN3sNrjhtxdwB#|%A z6JbL#Y`Y*%>qMI1zhn^&(l2M!`kR~Q!9leM>#?9?knurmDUY9!|Iq|6fM4qr9KUo(|wGhJ)syiY8y&&2hdaP09JVwFk5UQL=k1x zR8_N>+)!p|(yS2npLZH%#V63q^*W@+=K`Lbw-KYF1M%`CirM47;`*LWlwDSgbAw-C z)vSl8pBYBBg&!vCiWak*(|)mGt)E5atPwpqU5-k1)=)+JQg-uLD>BLRAh^tyVHR9@ z$#(jT4@F*JMHNEPp6JdkQzC z$wADMDB4>SP9M6{=NycX~hYq+Uhgq4!?Qhfd+4j zHN}@c#u$G*i(M>!5;a4HXp*J@JG*ivaXJ2+b@5rpR=+H!qtm9-Sp6V!_K_-#ziJNc zde6x+FK=SCqMPJR{6}uQO2d;=d(e3U-@7a*M-S~*%z4&_tAk@O{Q3i&y7Mivx!3Sq z$`zdHRf)X*5uj3GVIjJPf1p#CDvy%!$Wa==r{D7RYu>f7|At#SECEO4`9T%YN+|J4+d^LCQBAygn3K0fl*}(MDGy@gXZ2M z&AS%B^@urWd14YR8@x>>yA9I9;~wJ0y@k|%)fzf7J`VqjZi2fTSeklg0;V;Xf$4$0 zm@)Aa{ZMz4&b&GY|Kn};A(m=vcatw3GvL1?H5b5|utn&doyB(*ym5E^0Pa61;LO^l zbF%X-IIZJB9Mo!Y<9}RXKMrr8OQM_5zdW2+C8c9vfGlTeB8SIEoIw6tNVM5l1-CvZ zCpr;Zar^mB%qjYhj%c7@o%!-9)~lRGxRCq5UyBisjGRXga;;&&vk`zAWAKZ=dY7wGTsceL$i z0`{&6#wmy6;X}O+_K5oEh3&e+#&x6N6z7djE4-Q0PG!W`J_J7&zK2YU49q!Hhm!K1 zY#iUO(s5CP2@mV3ip3a=tQds8(7*6)uK_j;D3C=PBdE>a!>H2PK})}X$8Lutc4=}2 zee|;h9|Z>CVp}EdrKSaho0*`E|4~f87zh`$z0qu%5A0m@1-$1cg6YGXnBp)&c%phI zSPS@k^h6ot{AOWswj^&B{tT?K4>0eCAb@G6rjg=gZToTBenNn^h8EG*$N#V_M3qzS zoQ^w+Ze!xH+o*Y8oa_6t7k0Rff+O#mq2=^x=(G9=3OgI%)#KOD6-tCpOs5EA{ybrB zPr8UJ_nk6#9etm;{aJvm88uW#Mu9Y@Si$=C^R$JxZAN5fkczL$Y;nE|YJT5Cd|h=> zU2hV69;ph6rSg~~+>6!fuh4JS0PYLrZ~B#b(emtW?prF)Rv2J0_oN^Cnf=8N-rAhT z`VO=)lIA`NHgGR9bhz2A;@r@gQC#eao!s4rhlm5;;SspRu!To=LOstQYyEl$hI~CR z`C&XQ)oH}hQO)cQ>u1!+N)2b~b>e?!F+vTcb(~~q0KKbm3AVjD%6ALj@G!3!hphp*76XWbP4N&v4G@if9({oQ8#$=~Axa-R*OpNTL@8&7f zyZ3#G0r^N4`K&=7x07t9+s7B#$K zr%9_4SFUJ|eM4MT;6gD~}20f-x&hRF$X!msxx(b^#7|r>?U?Y)Ks&@)B;)ACV}-Nra+!4wy%=<6FAUzEL7k>$ z0!r=?3dH}Cj8iK~wennCWnYH&YeV^-!d1Mmoi_oe>Jv-n8nmKQTy{rS7&eH|#*kboD}2I-yX80vgNmu>G-#FD3byAzk)mOj^uVe zcEn=kC#YE2MtZ3Tok}m^D!vPn-`Pl0W>1F689vP7v8k9obP&llX?9}Y6|!XJa!lF( z58Qft2-y=OXm>s$+`T4N=zizAu+%73xYI61C_5rcXnDp%xJJ@n=oHo}%$)g(p7Xs4 zx7$vzZ~t|XfBU}Ch*m4?rWJH@RXqKC^BW16uf-i-CqgUBN~|<0M$cR^?o!$z6fKwF zl4s~*pIs`^PzC5N42HltO(ZyIBDdghBf0iiTd1;Im+4-lE9|PXg*Q1>RHEhsjo00W z*P>hKagjH0WvO`L@l|+XjbygeeX6|u3XXe_fq9RkacT(SqO>{;k_^I!U3zH8&wvA$ z-0Z>1@9tPBgm)hTi70Lj>GiOIf=4%?BwLg8ZI7d20}9;4`V)AhAcMG@sBmQ+ z1NbDv4P(6ElPja4qIl3j9Po~)U9Un{MRk2eD4+6TF5hBHN0qi{yx#OGkx^cqp{@AtFidB z-WQFzLUfKxqX`$3VEI`Mm^<$tO?z8|R=-}*nZgC6ejAXuJO4=0^(`Q)+6tR$HH9rC z7^ZhmhQ5SZ!WPq~uq9?K2;^0TTf8-BjeH=SbLSX^(_?6Z_K^yw{kPbKpESuSYYS9t zd_%U*SEl)Y3b8lk1-WQ49#T#DZb0%mh#WYARVHhhxpq6~>LS1`Jh#qhpavZpe^JJ! zlv;@z=~aWj^aK+O8&?gIVzpQ{_}XHUYM3wDic?D>qVL?6P_?&*X6_fGgIh|z=lx(~T`rcQmn=lo9>@9KQ$!oYE z#~P-G)ad2jTbKd(qM?8PtZ)Vo!iLZVCy+jaFqC z^_Eie^;Xz3W(-csnMw0+Cs28}t+=IBQK;NMhE`o$Agoyy#pzr?vLaRm&j%+#e8DsN zq+lOt@>bz4se+2gvEJnRI}vSu?u?pee$ZGCFVK`d$SxgUA&O|VX3Dl*AYrLeXf0)c zTAwB1$@P2Gy8i-m`Hl&gUrmJC+3tY5#>1PwbQq!joN|IBurKIhVqab3Gt^M}X^ad; z{AW&XG_+9@`Kz=}JsaO{TPyM$ypAX7CTxp%3ST^}(Dqj=Ib@Vd=Q-Eny74=3?AX0H z_0)X2$fJgKJ?@|;>)+B_U+h5s`wW_ebT>vdE7aWqbkOO}7B&mtV;cZHUZ{}giznK`F!=UTI1N?Kb9gX#U zX@iVCvTDI}wA?{5c$POkSO<|uCH}rI`%7r zk>YM#vBecnY)Ph*o=T(f6jkys(o9|01)-NX4c%HO@uaUW>Q4JNVn?yACDbbQU zf62u=tLUy1xwLXaAUt>Ug8s}1EKkoOa(S9~e}+2hTLjU%R}~=lY!(^SvPf|F?|8hY z97M;LJf$Xk3&1ju=ccVzLGu_};u^CabE`t=Z#jZl&LK2kX(Dv{o8h1TPO;JoD{!Us zWnw(H9URLPF_-7DyPvW}o4Q5N{xKZ~Y`#)E!)PLxmVpa;C*qW*S9GcdD_R@9n^>M! z#C!+sxkr~IqVtS!Y!;Wol77I?g9U5_KN~tayd$r>jOdniA!MMa0OOu&^7n3Y@bac8 zlqJj1{B|GpC_PJN8k7s>rD)(n)FhL;@|miYE@bbrWmMw!c+@|a$#ciMXbI|r;rRsU zDj7>MZAZedxKQkJDJlqcy_Nn7Tf)z6LRN~G_IJei+)63uiQie{3GD^L2Edh z(N68#chigVpXpz518n1Y&>0Gkao5CUxU6jl*Wwe#JzqA0+i+_P=VZ5zyJgLx>DdcJ z>Wd_|ee4)Gw@Vz)JB`6iqY3lm7qrms2^O%Y<|=)tyB%Ik9z!oqGQu5`uhRuHrhwYK z8s>g^JRBQ$3aQ=AWLmt!d)lw(8`F#&Y-_2w%Xn2v6J)7u+ zNnz~WxH+ZWO3gDlu=K^(q4!pdln)!>%fScqiJ~p8n=6CI*7Pz(8%)W~=_hI4 znhNVX^k(=iy4N<8S!p;(d>)37rD+?f z&Yj(YmHE=b^Ea-6zcgUm3KzI}#TneJ#myh^GYl6F_)Mn(D{e;NBTs!yVV_fPVn=1G zrsB9u?Nls7nr-u|A{n;+bgh*G5qBM=EN(G()Hp(|o>i0A^?NbJ;}qqdsGw``QEb0x zj_%W?albe}uNp6*L2p=Uyd)TgvJ&95V5C(%qJ`j=H zL4>cuz@YglDRA;ZrR(vSkhCASv?gHE=rSyP@DjaEvvDZk6UP32hG@#qp`s#sW~Yeg zq|U{fetynzkiZ{#@0i%I9J;8ki`1TOp|3Xt;k8OB=7hp_CZ}&BUOqV%O@(n({jWOm z4ITR7>~^Z4g!E!z6-lkRO;_F+ptQM$98wRWoBeO_jFTmx&23@J{>u?;us(>(tY^d0 zO)ME1-%W3PRYq6s<4nZ%`BY2kC)wd#46|&~hzoBRcf%jrb8z$KNZenXglq!e(f{{_N{{u#RC#OcUp^0uuIHf}&%jXA-2>aE zDS~0PB8IzW(a}7YJ;KBSGtP7{HM?R#^nDa>`20i9+B;y0)=sFebA^`PJg z{Ft`@H2L?Go%B`kZ8=T)pGU*+sgJ~5!=C+NJeGXF_molK`L_{85uz=t-Pr!rZ|p$D z9x`sRC$Yp zllwwwp1v4{3DmH7$O5g7&&O!CiRwo(`0%O<2FUUQsPrrh%@*NOvluKqCcsO>%^25G zi%Z6y!lv#F%otB`wi3@kN*P7(EAm|`{Tdqg9}4EI`br-6=aW6BUb1<=z4?7{HV*1v zz_qjZ8%N7L>^ge^3zIM5tg09U{ZveNzYP6PXVbGLE6}h$7Q>_4m|Yv!q4UuvG~X!@ z+Z(Q+pVfc#Mc@g17v+Rf*T2%_+edK3vrXtI^8kmBdZ7Nt=d8`m65Mml5nOgJracFj z!ZlTXCK?e;V%I5yO!OlX89<4YH2?f{$-ri{aI!vK5`tzk;FEQWC?BmN5%a$gfA$-5 zoA2uN>F810E;9^M9E}sjTg|N+TWt!yDl$KxpOo+?mWR3x&f#}`neEP_24$cvnp}u__ zmQ_XLA4`ACHH=5cFQ4dlQ+M*B%!Z5#ltY8=cG7Dn3A*w@^o^$kc1+14+QO+c##0+g zPVb`fp3|Yx;v~=Z$cAxSO5o7$%fx%RJWRam4EZ{_WRv|yHh0%;k*{|;{bQv~&y1*~ z@87jRaEF$Ft!btHtSrf2af-Zmx~1tz7(o%ltrCU`x7+)^3IB-@8l zD#)cwr5`Py$PtY-^+avR1;48L6E)pV61n~&b(kAM6K*9^X-NwtIho7}JV;$~)9KF{ z=B#CU3wyh)mmHd?MBi(V6NQV6Mc3cOuz?qPiKR~p(_2?djDs4PyxHyKXHhu}${i&a zYcG(uB^yBb?l4&`Q%PN8mB6MknpN)`CjQYp1$S*I^GaI+nkW4vrza~xNrnO#u1uxb z|IVTK))S=3dNrxf%O#3xSBZCdI+JZ8fM&678uDp1`8SPsz}Q}7UiGiRtZ&n)ZP;Pb z{bmCF+dBp$YM+sIagNFt#?qJ%l^D4~j{B4pgJg#mH{hqwHA{42L-%j=7mVa8#`fZd zF(K3?c0Wq1#?$T+UravXhMjI1bo_{7dVYcwElc-dx|$rIl{b$zIr9v^Sy!n_&`82N zfl0^V$aymwBALA(cTx9tA28feLg}z3-rXk-Yp%zEX@Dez==hUIY&UbdHUh#50By$> z(dqG-?E1UXG<~`aE_^)`C#(*mYMbORa*Za2{L@68S360s+AOF(s0oa7KKcGmNbYRd zN=qkc5y#*LMqZ4doLFOp&o)`mHqeDNy@yF*v^Myei0HOYkW- z4H`_G;OOy>f&-@uX~nh$^wx&CVz+1@b`(W0$94g5!A9jVPn)O+WBvnJ#Uu<#&UZFXCbM8x=!>baa~#iz@}+ z=-eg=F3s+SXn|ro`|+YLn{{t3P7IW$*@efc!~EGWUNMe%eQBVZMyf-D^hxO7-$k6F z{xR3yTi}csbs(jE;H#nntBT~|`}|E*ud<%Mp%W8I#<$V9p)+u6@j`G*(12N+cuwg% zfWYs@;I1PnbeKC4mk$Mz1Uiks6_UWp2q}DaCyoY}@5T1MAnZzb#v8(;C?g?`E;m(OVN{45mr@OgA)C2j&8pz5B<~#Uj40v z%9M+&*C7Uz45I0!_P@07RU9}hdT%c97=?YSU&3t5eE9ve7)>pBu3Ys*{PgJo`L9j} zkKW8@AI_jyp<{!$97n+7BfLw-_AmOzrl3b}2)F;F7gsJ3%z3?a<8CL1azidFIbUC6 zuFYl@7xmSUYgn8NY3n#TF#i-d)GcJrZ}Epa$tm<|%VThin2uSS#&H4RQ6Ltvh_2~f z3s)={@KU`2IxRQo472y3uyQlpD3ZhAm1~Gvo)Rfhcc$0VchljM0qmbK2QXi{M$of3 z7_FLJG3`Yf-R7G|43}GBmuec*f49D3;VM%wHcDjQ{FMRiqh4fxaRiR}wi2T6@Wx}F zw_yJ^0MhsH{KLyZ^u|Ck^RrBjey((Y?nrM&cDE*L_3JiM}RPg)o`K`Ioy8jl`%X|RM*;mMK;~x;EdkMLsnP{4#hyHJ>apR6> zr2O#-rsw{7@b#C4^|xw)t>}h{V_ZQ^<~*clNeed|JrAG1r2rm^hAfv7vO!G-LW)Ij zv_cE4g>UG?h5=G$qQz}mr@&=tN^`!Iy4<5>EohU_gp#B5xF7H0kXTq^#0nek22JHw zcLZ<&J${_VxM1$bk9Az}6IJg1UKMWLO2Vn0Rpze0H{cE_$#atrDRSDg-k{v{CajX0 zNds&?U`ONvoK+*vPf1xAb6uSKv~w)CD{maaDPz;6Ie7FA8PEv)qFqm?|T^>dvFP^36n*aFl~7L)SZGJ&yCqyN7NU^vKKW5 zNqxyAh+Fak4#vzAUdjsG3sbD%3ZpI zkLNVtoZxBv+J6+*o@m8wop-Rs&WA26rdV{@0^+}3f|@PH@P4O0$jFQ*$@^OA@8{=8 z)CLvU?i>y&;tRmfDIF>|JcCbiw_(je0sK_mN$!7A0jZ0s#4o`Fdm>bYvd+fjih?B^ zjZ}o_84NZ{jloWLb!?RP!a$(|aY&}rJ^eMxElWciW*a_U#lNME?vdHAKH*5McW~>` z4#>2#2UnH7pk6hD$ka5_;i??M2swDHi&_`(9)*A;(eccwRKZvq_8EJC_I)>|y+oW^85lxYz$l`&Y8uH@K7;w< zSK;%yPI`WuKaF!_30ubhB=>ofuk&wX6z}sG`*tlf6&<6+V^)x~{2OFNp*5EE#R{rg zp3q~im*~FY>riLx4T~yGgnRo{$bIo=Fz|CUaNHUCHO~(VUS?6dyLxc?aT)pis01yh z{b46A84r(tS^zVMAFZ#V>*?R%)NTY1pU2|bFmoJN9f2dA zMA*)AiVbXeM(&#!)K)LXd4?RGdODSZgR{6xzeVUgArd!V-^K56|B)9h!6f--1WfW( zfg3H$!3Nbq_1H3oSj-ga4+VhDzA~^cQ5VW6yds|0v_YK837@Ja5Ld|r`g`MJ>fU;o zE$oftJ!ATu--8LHcI-6VYNSfzr<(JHN)?otdxKdbzXO*vXy5nn!m{-H!L(EdZjGCyHv$h!+mHGPlV5$Y=v_+sR%dRwifPAs)E^LBZc$cY!Kdb zo-ACPWgxU0GY(VRPSE$2)woP76mDp;M3IQ`Y$h*kDykM$9jK+GKgGP(V>7gFO2?OTC@Rv`3*4(z1wM~0D&9j4w3_2X#36r z0#g@b(U_CiG)@!Vn+-y0+z6p*m$7K+JtKV1Y{7xA{U|tm1Fuv(B%LLRG(>A1&rx(n zr*{TuAuC6_!qYLTZVI}Xq%%E6vJk1SNJF2PFmH6E@$UC;jB@f#I)9oTRks$R&xi?F zH?^OZsns((b6ZJ&xHnXIOoKc@1M{QNl3A=fiI__)#F6>GF;qdC<5}h?;T1|A8bwgC zojw@V-^4z+QIBn2MqI>(Io#A!Ke2dj6)Kv|;qoL7aPK_cqF1;PH}k9oH$Nm1V-I(u zqIChzebj(Uocb_lXfu)v*s7#BuIRZWDHX?={@ ze|}Jwq$rg6SW0TP>hsCrB+z&efP2kD(c%1V`njQw{@Jw~+Z(O%sI@Z66f8t;;wBt@ za}_pxzDciaQxgPE@+DzA6zD(QbWG&$3#u&^LBHuL>ML`TB<-1x{WCL>%}>G6Q{Az- z#})rvE5j$<-gxi81+1xv!CEMVaPbZpMP0yWcr%P0(ub(vvG6nhIqX#21-{4j!{wL) z{`M?~t$k~X_ms+*t&ScT|Jnz~F7u@=e{5;Nx$|^Kei9bsq~om(Ik;YZEv_!>!PfUJ zXjqXba!*yjmq_OjGw68Wu^^aumx=703wQ79(OA2D zI?~vbUQbnp#<(d&EAz4_J82qi&&i{IE~zjPk#j*U;Vv0x(mC(g-Do;@75}c!A4`*S zmcqhYOF=_wEp?tZ0+*b8jT^^jW9iX2%wPQvHP!mDrus7VJ6?r0bUW5W`Cvss8!eqx z#~$)Oi6I}y<5mkj)IYP3on&T!!xIjZBDqBBsvM6>nRD?+*DcoNuqUFtKguZkphDkA z+R>-=4bXEyACGDe zfvrO`EFF%8^E;9Ho*fG>#g1d|CJ$=yIfK<*lY+-Geb7beB%0KfQX{YRc;|K@>3T4l zmZ)pe*Ye@8OtF_Jw41@?8W|YMsG?i>9j8L)7&tJZlQtjZZy-Hqy`-`$=i#r_#$45JO)hkmJl99Hyj;%jyELn)k>)g%X-Psi z?Vo}~@f-Bpn!mLF@^jX&<_RgcC?=nNhJ*ApIaro-LXT~M{~WOj(A(2iqabZX;I8ng2v-M&HzcaEF{{(KIwda)-=@Cz2r z8<@jPi8{zUN#y9)$?oLXff@K>MLv1$96>s)*055$iv@ERYNFdk1DqZtMz=|n;+Zvk z*6nMEDU&Dh;hie}n7bL{W~@NRvL`tD=KzV=xdKiUO@LA3=73GX3V4(w1zSCNZF^%2 z^~zAizc0e@=yrK5TbYEt{JY4oOdP)y@4|5_SMeEr7=JD4g1 zDxozEmR=nL#!3?GmfS5M_vk!3wCf=mm*36Ky|IUtbiPOw^{T0KcLu3mU`Xr+A2RoC z(@9$$pX<4e#M+E;So3~}&dSWE9nGU~ndwI=vWulIyXLTk`kxq1?--i;57U$#jZDQ? z8+5PTjX!D>;C~FAhd)+-6vtE9$x22_QIbeYxZh)IAxe@;X= zLBo7T%^U1%UQ7}e@0jdycW4Y#Vn=vQW{|3cTfMu{J*toYVjMwck4>n1>jb)zTgz{M z@C^H|IZ&yZGF^W06W6}Kf+f|eEOoIFOX`Tn>Z2N@Y}JUr4(73c&41Cw@Gw_;S|9!? z^|4EV9;kFzKBnX))v(~Tq)3(zET26LLxf*p0v zOw`cEd+e#=56K!(`rKgP*#ma0=NAi|sc%`|5 z5&uytl0kzX^|?jx7qeSMi(dEQB$f=mga2`%>@qvhS^)9eFF?V~Bk*Xe6%-De1@hxI zfN8GKt9`JCG*-6p%L@Ex>i#r-NQ46$K0u!tS1h2b_Q9+$&V-qEZKnAR-|^+o-EhUz z2!zENw1u8!R z!y5kn7+Fza_BdSjD}(o&JCe@7K8NQX_w#zSo2dR(D5Y=oC&SL?*fa48HunggpqZbA z-lT!p&-5P^hiZw_`lQ6yUq(=$O$mKpT1T%_Gw70kDp$PY8tv+qq47S$#c?l}ko(^t zEZ_72GcPt{YU2vXJGKIcUzMPJiO;$9x7-nPhErv3FP}e44~G<_vk5N+CXw4-{1rNi z_65!Z^-Tq={j93RT1~-~SEt6Eh?8SCgEZ+=Y(3res-RnaTc|lXf)p1Xp^rc0m|gT0 z%DoUys+W#22a`L@vvoWbD9(a;8-$$c6CcB=<=-sEHj3nRctR<)LLku<*fQ zjIwDLT{oV~S^Bi%6yN<^!Bi>!fbU~IFG3O~y1r+J_Nw6c?>>Co&}te~XG{ulE;wYB zGfny}$NJ_GCdVq`-mBx`gw!v~u*%Lzzrf}+MYazE$ z6S7qGSoV(|F1f`VzPN{Bp!)zWx=;pG8^CrrgO13bt5VJ;^^*i!ky zc#@X``=%%C)yC88i?_LGiSW6*5weEe&l?La=a)j{$?H(^TZ3)Lyutad`^ns-ZOrYB z%(zoag?rQfy{z9~;2Ui#V)p_iX|VQcFdjC5e(HylP8;C#^QUO9zzyH>(S*E5tC8E{ z0vdNPkwsYLfVs0CB>Lt+g1ReIS&V?(+9;6ds)I*q_n>UWBUtM#a9#J9(_+&=;m(pq zyUbh6w?GYz3s_D4mqwU3>>9zYg@0vhAH9OHJ|!^r_hA?{DF&VvCBnR>3TEWVW6k|J zV3KD~!8Wx7*9Oqvxt}p|br7vRA4exo>R^z(7|ZE7e<#lf=RbUfdTmiuylW)6Ogqm% zo+dchr+d@8Ph;5)F41D7Vgd{*DTl{O-!nbz^i*DTq)adx!m@`8L;DRD(!0VqSPgpI3duA zoRYe@Cy!mAt#cYF_IKkpH(i)M-yJ@sedEPvk7MU{bI$Np4eJZF#v|S1VaCH@sQP;Z z*%lDH$>lTSL&fmR`x`qk+K_%N_TrrMKA`{3SCAet2n)unhR++vK=!#etmDH#u#^dh zm7+>kX!-$4qD^S$bRl!$6U}G;vnPX=?G*G;;7^_lBF&FaFnH-i8mP9BUar~2WGe?z zd_@p0UYo<44KTq)WnY>4d29OGs(}$<>dU?_C#Q-;K4HMx{WbkEvYiPk&ShG#$w*pnN1sGCZ08am$>(~wYc=# z67lhGuHv1OJjDNeT*VsR8^v3jJj4@!l=ChJ?5T$o>2iZAd$m9r)+~(14cVu$Hr5gv zoo0fA?rUlk*^<(m!!%;xHM+X+Em;aPK|O!rJ>T0(Q3aXQTsnewpAbBUy7xpT7lEu^ zEhqdDMuGL=6!k-yY6FB!;ZYa%;gSU0Sk%o9zvyP(Rod_{U7c3FIxFH%FM#^9!EF9j zS#s@tPD-n+u}yHUP7?pZz-dzA^wqmzjmds`XCr3n@oQLA;ClGoHANToBi60O^rU9?E%*@A zbxWW@F9O*9%E>tK>;bYB&7yqwG}5b9A+LE}*!Zj* zQzrND?s2-*?THKJ^l5I>Xu6WUjSbK;#nwiF=iYaPshoX|zG1WA%hdnizZcQCs=*VY z=k#QRtEo6hwxS(*JBQ!j;n5d#8A&PzB_#pEuNfAdD5Hd@b=d@Cl%o8 zR|k5L=ZAI%p5*l1nyxK4$M*D3qM~83bZmGprus}~p{83=d$S3A;?`KWm@E?9Xk*~# z6;053HJXbnJdWuZ_qncsAlx&oALs6kz++eJD7E4dX7-GyuXZmiRIRPyM9N7Rbv~RE zPJs}3ejubv=irZ#huOU_Q~tqRb-43=4E(iuWnrms1j<7*A^EQ*Y0o&evsl zp<+C=}~=yC^%*$%=)i z)1q0~zc}tz_f^zzKSBEhm!ft+G%BbS;q2ieF7kN@t`%4)X`@3ZTWu}9y?Kmxu2;jp zA%Ah`3|Y$C^Bg}f{EeSY%y9ZOO&q$ipYZ&9T69}W9KK|X_`|LV;>h>2#CS$YykLNm z_>SFh@sk&s7GM7zL+gLjX`H}G4|5MIjh6f8>!!#;^bKL!v?R?rZp|dcq zf|WFl=XyRIrHsXI+4`}*ob*hg6Q&eIcTyagt-m(4Oabzel%(4xr!akTG&*exWiIaN z{K5Q3Om6xTyxnvS@7KIzUk3Z*_r-=VktK^3&AH8j){LUJuhY#X3y$LUVh67S`S;yyP_H&l-YQ}rr_O*! z^VDd=PX}6PeU!aEa?D~`lqMbaPedtxELC`l1q$tHmNa=C?NdlapNERnbG9Fwx8Jb% z0Iq$2rJHim|(08692xsUmPJY6gGArm8S*nW`oPl0K8awTS)lG6P-H!}#D=IXkQR zjp-!LhcAbI2;cJ-czDK@73YajFGtv4h|a=FZ+~iEvBdImZ7QT5K2Etp-ox>Xh>rO; zQRKEOWYe!KzTSVILLxG#I_VRw#aaR)TCk$umxF*g zYs{;*q+8+BaOjZ(Y~SaBwAW-fwzX*jT^%` z8vHVpdo;=iZwTFU1?#`~?VU3lXfzcXiu<{B6Bpv!?G2=P>I0wZHwb=scaiJP2WWfd zAj|72#@jZ<_|2mQbvGoDPuOI-`t=w~Q%j-lglcjy7G^Db{OMwEJbk>mKyVip3VW&w ze0({Wg71k)Hrkq244Vd;Wv(C^c$nGFa;EuTR#R!xAT~_Wh;pATqL%r0c%9Xn^mC1v zTXVLK?H6XXJGOgaNSiKnY@SYeYzCDV>=)(s?V<}}M^ai6PYXs1F7JyT+>RfSq@SZG z=GJEmj+uG1;9)4#R=1+x!>zdbI8jvY3*l}$T6{nD0C>d6g6qLtu-%|Tf9}Ll$#-8| z|ErlvDNcj;sgf|o;}ILk-R0F|qp)`DZ0>s1PVkG#fLNCl7HVb#Hl~hnLNyZZX3N9d zU>SimCI=@^2@K$lVDsrCM$qhuRf5-%P;s7ccYlOr67EX>+*k-wX|o&PSAgQjxol(&5do%8*Gqb#43T4x?jT62yH;VdP* zd(UtG?*cO$G8`IObYQi)6t8MoOb#E^D89{43>wY?`rIlQ6cMS zIn2TWTha3NY&z~F+;Mk&AoHQZ9VN1a=_@?sx@Hc5oLHhMI_cPTGK_Xxy=VK<2%`HX zSifZ@rub~5kt2m(c;Pyjxxa(WH2BDjXO5#axsNeHUIj7&2Sa{Fp1=q_PFDq2)zv$; zIPTm+iq$_r*=yHuSDTko%HZ3aYWjJ0e>ITO)0u4XHDz%{*ckFJw&NuVWU(+K1-}nn zhP_rxEi`p|*^;4o0t43)-sDPA;gbluvqlM4C+PCqoc=P`Sr_o@^HC(RD-+vuqG^cY zTYA3IlRE!XrqT8HDBCBK4&E!q$5&6#&CCFDydO*lyQK*Jn?oz7J)?w*=j1fsk5>Q8 z7re+Sn6Ftx*53+fd5?_vMCfiB6*yXCr@fw<-|CaW(KJr}Zzf*ZITed#AK)gd9L~l@ ziTc;c@!k(@Nu&NUsXoyo&$<6ldcjRvQ8tUbg&ouTHA84oSrC?ZCNuAUhuM}eN9um& zM(%#j*q=DHp z@kqgI+BR{3rS;g2pjYw_HvMQJrw$bs6p+Xk7~Vy-eh#K=KMUveJlWCDt;;#nDOXcD1ju3KwQbFZYDrq&glFhzkIuqSY+ndvHLGpMO zyWfjOZse)bA&aJriX&wUB@%_#Qv1=V)X{PV&k65`!*|cHuWD+%%?BHJ^F0LuNC-B5 zmV)}Fi^x1@14!k(WVc?XLF7#i)>S*B?HvVZ220Fvli=*4uVbFlEY@{07PlC1{N3Ds zyruIT*LRtSyygkr>9Z%;p{Y68e#488pRPhd<970u7P;6cQs<|iZ(|Ohvf0?5!cKW% z2|XzPkKgE!h|coy+}d7CR_L?@GRq2Z5k7KJ=2e6}>qnJs);9VVIPaF3- z;k?0xSRi{E?hFoKiSFNV-{bQrwM&|fm>WbLl_4xY`vT=oSWaf0m$kIRgCF5W+`i zaNUMF+SafFyxXdoQq?7td2t9E{3qbPHVfMFOOd*+Pv%T#GhAdj0H+MBhAox@SfW%e zeya+kF$FT*_b($z!f-JKD@k(=orn#S&r^HXF>-L;VbMOL0!+7_fO~0MLECLTtk_K4 z^E{rt+atvYDS1 ze2OU?UBNv4nwUfFXSTIx5X`PASa!0M_77Hj^O(=GqO^r;&=b8X;Wab* zF&OOi9`Fz5tfpe$i`=AqYr3!T9o>ptDZkQ|A~&SsOoJkRrSo*Oy(sL{-zZb3QYdS@ z=mH~uwXmX@JXvN&(S*LwRPgv7DOV2^Yhe;){P%;N?>F>0XfS41@w`T(9Zkv-^ZH30 z7GJ}Dv#mOR!HA83Hy$SZ=`Dp6I;jIS>MAQgciX^`)3Fe`lu=2my!b!MTU6{Os4oP%oO?$CaVs((*d`6wwcJo*oNvy$=4UTML>rHnO>>sZzNi8y=O zd;EPfj-pQcRIYViOAj;6$RPJ99gP>fOjd`Hd!K1Bwqgt0VQtUC1n=YhNs4rEh7~-^ ze1w~a2@H*^8~B1N7Az}9%zB0^u(gr`FE7cALZ>gGPfxYU&F4K+d4Gzu&X1sDhf77b zgzU$78A-}3UP0^X7n6q83k(*0;Qjs!p!E6-3Vl!lTa$EA^HL_)A|dq49TdU*r!}<9 zpA8P1-{Z!2vnblUgNKHT9tY|e$P2V3l$Dg zdfZ=>jUIymg?8|={yp0zX^e*2gXvj)BRUieB#F93up&YlR&2?F`;o@v5fw)bZBuE* zcpq;3XB~Q?F<)d_b^`Y-Hl~I!;>O79kh=OFl-!^TmD#rJN3;o-Wjq*v$1Y;eJ{i&F zvQl(jGlu@VrGTGLl(5q;viNx;2f>9&hBS6)BKx~T*jd{Rq12V(IMs0rqpAqHComYt zEZxl2wx6W?Ap_vP0i!RO3GCIIb-2=?lwyuaQN#ml+P=A#0%!g~>+-pzXPZa071r#b z>S&m{ZWC0QH1d2QanMfRc5ja9kC zvzm(+7$;ZDoimMKpVvg7#HavfnHj>S2UQC$^JB2Z3sCXme;9dQjglH}F&AfhIPqf# zD?OINq^fqaU>kFqY_3O-w^p$B4fXin?A_eb8F#RtB7_x9Jc_TvT3C191iBqnkADg{ zlDhc|Z{M+{z z_yWpz$Y9m#b$kbW#EwHRS(mvk_MM&tuUgi#{a=pJMprEwb0d<{suoghF^_+zJrs@e zJ3~(I;#usTP`o(17T=CL1UnSp;J&v;R6Mf`gI#*@?|+S4WJ;JQ+fx&6>Kn8FhU-wr zJ)xT=^M!X=G@6FzUSd(Ymr$gc%%(^m6^U=k(Z%jw(d-|xa6#)APQDguk#72owOd^0 zy)I8<$FsNNX-gH-j0r-g^1Cc>gbV5a+m6S}7LaMqA_}@biW6*7I%w z9$TG-a)BS1KE}N`iVveMv**6&AU_}!CwQcTShALX@pnDQochKRkN4y3 z=mg9jqeBMP4HPP$Pa37wj2TCgX1E9ongw@NxW3RyR)m~Ax#(p!5b|~m!TNpUaQCTM zu&rK!90d2>rITma*!L++fQ&PBt;Og$s|Yu_IFR4e)2wKy3dWtDAnsGQ5dWt$SA1I6 zQe3~@M=aN8DW1__CXO{U62E;iMXa^$E54j$Lsf&D@Ws3*xbER>HabsuC$8B@gWA4e z#fqc2J1QSXN4ZksT!9C7DTYbB3&rVj?@%$)mdVd7W&sK!wqQgQ1iIgY$(wJ1;_OIR z-y~`IDZUL>2VLQEen-I{gJbZfc065up#~%OJ>>Lrg7|r|=}gCtqlwyvFu^+m1(6@_ zZUV6kZzlfs62x4_L~)*hkGN^eVzFcSd~s5bqqy(kXt9~L4_jKk4qo-HChMt3z+32S zSk3I_$GikKL&=pH+@1q78craWz646t%OEJg#(eJ6J+P>#l!@h1s32Yhf%ZR`bBzg0 zTd;)U@7ZI0ovL6=cc4qR9tvIRGwAuphmHGDjGEbga8pGDs*OERe0(rmHn|ViMk!m$ z&3OwMcUwVTYKWy~>~oR!#$dXZv78M2OereXpR{M?k;TN-e0}^l8mYr*=)XjW*tM9B z&Qzz;2UpmeS67(9Iu(*n`H87wY3^y7E_9Dcv4m-QD$6`V90Fvcg zS=pnROq5xGeQ8C^c#}5eeZ7KbP6uK5IX|{@kuMCIu!9*onZT;{CfxtwG%IZSh(pwR zxZLl?9i+|(!7)FwfmWf8cy>IN60y_`8^$&khGjf}mL7m0Eb zMC-rbW#9FBEZ#T^?;aM4o4;>iD|Zc}LnqQW-CV(a^K}%Q3CqWPjUn*q{um*#q{r>J zo6j^Rr(l(tE1&;y9UQNl3l0i)P^M=j+zCD5fl(NIZt3De_N1|tuaOo~4-MH9b_(-G zN>a}+H&W0r#EJj@a?8uLnCgGdfXb=Brt5%mNdVYu90&ESQ4qpsGMU~>a5JkJ#-}f1 zCCStAn9(-gDtQ@N#6QAw6ZG-a5=|QV*APzZxxjQz7>I`{*o*Vts*3~97>Tdcfw=vs zx!85_DDk~_GGcc_NwMe53oLDr8hAg_gwV)ZkOrInHjz0$B+U!#NHb-F84cUMR zA&*(#_eoGNZ5Pyz@`Ky+oB1lcKF;!xC(AFq!PKq{pr=Wtv@PF*#^@o<8COp~@`O2j zTNxcy4yUKO}fUwId%Ir{R+lgz=te+N#{9RfA|Stv3L z0T=y~EG{^Xd*tc6!xEoOe@ou;YN&p$x=01i zKu&ll8l3HB|8?JDnz{vS?+R7ndy>h;T@9kiiw?qty>FmkNg$jxIKu7N83ey)?*`Yv z7|v<;Pq=wfhC7yf4-?-7aM$KvU`xWfIlf>xDzn$H<@|b@@}DFY*@n``Xa(W^p9K=Z zOJVrNiC4yyu*!ixth8ww`x-2CidN@i<$D=0PV$8VAC2juFz4#>71N^hr`T7q ziI(agqGw;qxlN0E@lgLtI?cJmbB_z;`C3U}2X`=19*JL0b+fJ2^5Ar(mQO74riF#m znb$ujvP?gQ#}eiF<-baCNU5U?yU?|I?@GJwn6S3fw`p4ZQ}kwb z?8C`ePIK^7f!V)<3>Na_|5M1wv}eKP5+&ewJz^vJzOgk=T%aQ}8b1rTfVRa8X^_6A zz}$|-9i=iT?e0RuUs|Hxg=AX1+=wl*-2k6`)5vw=a8S2Wg(}BFcHJtH+nPEaWjp>- zPuhJpX2MbmG5AfbgQs#b%BSJV^kGcnS04dl(0&*1b~rOZcH%vAgXAY%Uu80z&xc%OV_!JB)T{;v^i zTXG&;czgpZBda0WiHC_Z(%?^j1#@k@Mq32GsQP$&5L#I{Off={HmTlg}!Rp{kaQo8|hEGnh1oxR-mAO0kTssUJPkl-GvJOn_$RaIq zH?P;eR(QvSv&&`8qEDfj)b#rkA9XNK;L06=eGBel_?`&vL+>Rz$Gailk&EG-saO{f zOk)Za7*~3h-x%ge(?Ui;V!rU6)c(pG-^$a^--q~ZK6QBX`d%idm5TF6&!OONJd-V* zAlhSP1Rf#z+^(`h%sR0JP6yBBK5q4;Q=?7krD!;0DsN&Bxbf`ZnI!g3J&S+i_6yA} zaad~A%2lqvO!~WqW8tkeG;sVHj9b#q4k;SLvp_k>s*>Q#BC7f0KYm-xx%3t@PRhWC zDi0R)UK5p;jAD0VJJ_Z-LRL6@n5FfxH%u;176VT`BL`(q=3o3zg!6u|Q$KyFE$t5c zoIJqtd&W&d9}RJDXO@tIT*5L2M$u5a4(c>k5HC8FNHTj{=-IMcRHojK6GlGAxO>a# zfo%~x`#glY8v9v~i!)vq_?*r{r%u;H9WR(1C#M(Z==nN%cCY#!=VK~jExysr%TEv1 z+I7*wO9yeoiigbpa~!ML7z5ZZFV^8kf%X|OEA6ajoShh!oQ>u7GB=hUUl8caU zY1oR6f74-6x&lk&)yX=M#}nMaRn9zar{ z5jf7*0nhvcpenN-oScvH@{$%H(YzGIrNYd=-x1bjnLz)#E!>pMR@OA3hE;8O#2Ws2 zL+6`#SlAy7<7~dLfUhdxIV^}tgpLLM%20MR%bIDe%;H=9g}%sM;n^UvLB(JPziaB-fNig?;KA6WfD#!_?XD~rta zPuXUNT2Odt0g4URQRZ+L?k-AXiP9A2Ep$dNujDf$-m*E;$~)E z_7z%FtSe5KXDD{^drl#qsbL2UX~mlq3Jyi0FQEZ3xGf%)jT-r4Sot-POS3xyNB7l;YA5ZWUvA6k zZ*npm_C3!6m+P=fKWlEA%2wPXnL<~iY)EP{cWr_+m|>ojp%3>habAze6)F~UqZ{B<}f%-%{blLF{~ zkm0GtA@*+#V?%Ehu7?Lwp0Kl$Bz#}c zz3c7dUu#00$rRe<2?I9LsDU zO5n4nrT9cYAOBnk0%$(Pq{mjW7k$StFm4??JE)7h^+6G|&fkZQ>y1LjR|7hNx?uA5 z>!1*>1$s*xS=A{&(D-Ud3-#C0SBtqcE7_b_^aknro7Tqg8pfK2wg<|2hj}%~RQh2ivKAS1?+XjwR{*CY*Bn z7S62wjx&eMpt(;Au`nx-+jzSb>kWcf_$YnGC(Co4Hf=aMU?^L*XE#;_EQfDt#xOnQ zDxcnQgXNycW{weyV6$`%ES_=^%%A6g+CC2&AhVP&bvNJ~>Vx5TS_TYVmW2=I>Cu_@ z6KINXepC@Uonv)6QR~x47!>LV>}wb_52>UjFT(N8cy|gBcs5%rS8=X0{&63NxS{%% z58To(HMl!z9gXYtU`ri>=aTRF=B}m18H;u3%wDnd3>*Q?k*rLSVb?_Qo9*z1TfK zLaf*2L4}!;RPOkP)+YE+$huo77S0-j^{%stn{!cpfCI^SX8_+F&$?YF<2s{cc6Z|! z8WMe0@Myju8q`UPmZ^weFCx-D_kyC%N{W{XY^>L!t+dyF8U0X-qTLkSc0>CZfc{b7{1DJ|%72OGl~_NSYIda_T~!Lw+IoZu^6ad>gUr=2W`) z>m?Q$E}{v^)2MUfZ90BFfy^$H(6X8Fbk9T{eoWB^=ZGOJebq0tJw1%B+P)Lcl-ta{ zk%`5>w^i6~bp^BbNK?2)C+OzI(B3#7%ja6|;?q+lsbl$93(bspcHcQ~EtRyHP{uO)K$FXq9Ao}6wPjx?PNnv>!Ig|+evj-(KuDFG2VsB7V&ogTO z^OUBP6;i70X6&)Li+hFtN%c!fsygaU(*7&R(^5=x)+&+JjtPAtunrFMS(2Y0v%le7jaldM|a)=yD$cSh4OG5FJ*J?WAc8Ymzc*K-9*KwZi z>Ui0u1=sScSeWZzezqNBZ-xu+a&to#cw#NHAC$!xFV^Pc@9btlijH_r!xFdb8v>y- zir5^V6qb=I1vU-$AHypC(F>kFq z5*_^u@al^h++3Os+Iv>f<2<1&qq2_0w0EFMY$5LSs=%Meq^Y$*mP|~i)2EM>O!><< z&c$Rbt(xe98j&NxUe|=!^z zf$vvkz~j;wG>fUBEsF89WC6lj;vo6(EA2M znz1yDekE-oiNOkF7imh#b1lhZ$rN(98G!`{4%4zDAvB*7=~{sqO$$gQ6T9=cc)KZ7 z%QfI%-4W1wIEbzO7r?jJKIS*Qnh1~jlv#($YBaR5qaP|jOO_pieGPN?f6mHqR&_R$e_pYvA7p6Y)4_E3(P;MHkTBl3MHjv$S-^oOc63s# z2JTO4`HiNtpjz9Fo!YAaI_u@}W!*LY$>ulg;4lwNvQB5CR@GqefNjjE(+Ldk4u`0< z!$9V+EcbqCA~UYv1lva(VU4Mm=)2!xx>SCTu6@p?_~C=ZXUaWEqwEez%DtzAk&!e( z%alfM9VYb2C-Wh}due^@QhsvrNjBqqA!Qhp(Pq`SkX>9S?W~d)ZYJo;3pdlDC*|Dzb12SC4k_nk6AsVG0WZDlLC}ZKZk#M+W4(qX|N`(8%^$cQ(V_3TwvXSBOZj(sD)kVDDd=7Nidpi{*gto z5!7|h1J=bWaX(7tvo&KBLGFt#lnc2Svol_FJo+DVyxL^`l>KG@aXv8FJ(;Ax-LLGr zZ$O-REA##&4`cmRz-HNAG~ILp&jur}mLd4tEE#9B?K9$oJJ7#JpIS1X@gD6r*w5|3 zEZAuR{+#lJWz_6|ho5F4W?y15`=nrzp9Pd;j)SpdJYidf8fXQ`F_B9rOL%WYI~5Kx zlhM|2Pc4xpl_AK}yUC-H!mZJHGPFwVa4<*I`@&E^z+A@*~+ZGKmp#E+o5*e1`6@&K`DWW{Kha5+O+bp#XSIbEL#Ax&r85g z-xDl8J_zG~BwIYNlLeE&dYF4vc-1MN#apR8AipISXt?D|in?4&i5=0jwN69>+cQaOiUo$EW5IT-~Kl9_( zs&GaDew65oOg|t2zu{2|>|8}n8K>wd#8c5pXTh_mO%{^`CQPIPDW`^zj2ko7!0Um|meqm0(dB6hx-xLWLdC8EM zkQ;|bZkj;T3lAI_myW3~9^r)Q96YyHj7d0L^th?0;MlHiO;UgrkR>^Nhv=M zW4CXj%JI?YeXatpP!3n*t3tMqO(v$4&-)6hisSHjO1=1XIG1kQ=~ zGhY7w9`3hP2YOq_Q0}rbob8s%)D~~!Z&U~w!m;`E!t^Y@5qUEEom((H zMiM=yzrg6XF%&sN$Rib|ps&Pv98qJ=VzT9E^FM8Y)2z>~h3TOD)XTCEl|8_gr&nN$Oc)H^u?%MSJi>_&ElJ8{Iqj4@ z!mIM7^i)P0Yn`8ChIazz{Wg<*?`;>cMYmbCLKA27sUEvDoiT2w75n{mA@yqbnX}Xd zRML`-TH3Po&~64x&nSW&-~MvtmvULz^v&HEtH$}9!+^r2nCfWB zEi>0(mTxj}qkkIjA#@IUt!lXH{SjQ_JG`Vc>XA#@gf*FWd*?iz~=;|nmLI~XN&9N71!2mIf7ebzC#hP(Gs zo71{>)7;;?8b`&h7y9R?=(A%Qjh*93v-$CtEFhZMS1!YUE|%=5;Gr54 z@wDr)GrM7>W_}$mQ^JZImYOo2ukW3K8q?;&=sjw%x^4*_NI4Aq=f7b3&PTaqTX!mX zGz>TFFoMwRbnt440}cHzc+Jv`bTneo_0d4!a&9mm^G`Uve;_VcoX&oYl46fvAK|l| zIZ!y^!oTnGV*I6h?fvQX)mVi8+G(>tUa0CD5b}iWjIA9jyZQ7BiC8?4Gs9PuElBSqZZm}5eBIRi|uU1v+8OlaPB2D$3~V3}ISx<8!Y zZ+NVLQ_arYmD|xwd-fOe_-~`>iIWPNPf~}*=0tX}t&CZ0m8Gr&)+l+=Nu=p6LD8l_ zf1k{yF+Uga0vZ%xdA-HCqF}aKc`;ON-2(9i5~4MCkFdn(F!pn-4J@de2xTK1Kt|vO zK2P_@eocFzL_?Oc_CEjgVGg^aB4$D75}A9B1FQE<!ffm8b}?##He~IjiU6Gzq)o0n7I@}R#YEzoHB*|F1E$EOi9=_u$Sd+ISZ@z zg`)ZaOR?~$iH#N*h<7fSAbv2=Na#T*i?uq{#5?>Y#l}8g>86hIPvZOJ~5}2V)jnBH!5%443={vVC?5swr0LF)S6dA{7Ipg zB)triH)vAzEG>GnG)?d3RJrzmH|G?Z>bSfN-5*G7lK=P9bNxcCS% z_uWwN#(@o~Rup_C`$1$TgPPOh`RJi~B)55(;KE2@n^~o3Q)mEd9dJYx@o5+|T=~N` zw;W*c`&L2FoU?34q$wQx@(gy~ddHGuPjW?PEWs+hfJSN_C$op?Q45ovfW$a;KDwN+cM%#=EoT`VMS51KE&glT$5|~#J`)U<)?l7wCl$}5Z zN1|xY9Kko9C-|uf6tHgG6Q;jvKm8cI3$=$Fg4xdR+3SJ6{3t!aQL@s>e3_>$c`oP{ z<ej72c+6u+lVN zD7Y;N!rzM}M?SGd(l2=bxqn&X>2h{vz8Y$`83^B@B^K9jmgCO(BS`9K6@4*Z!_HY9 zW;z~Ev3h_3WWV~xT-RJ+hIdY}iQkne*gy$h3r~>C(YCBYzJkr#b(BSQxS-m-J{;vx zg>PhCNo)65+%GW1W^Xx*X+Ki9k8<#y};p~P(w1Q zesoT3$gS<(hx)^{@bj%oT<9PPf6biPPt$ZvT;IlSbhcUeHYbzII8~B9*N3lOq#@fQ z_(+yZlGo38bS@C)qH(h=xU;)SYRC|>k=#HrHQ{7=upW)Cyv65XN@O0|gI?F=$$3u{ zE8gvicdjjCt7Q{mmRRt7XvARa)Cq9+XCYtqR0evsj28IJZ}Hg6P_kL?LvO7UDP2;B zoZ?1MWM>!Z6zJos(n7XDc&@)q4uf&lf>+3M5bHAxhm*>|EbLVlbI*?hWr2r#_iGx> ziW8hr)%Q@|Y%?D1d5%LJ195-ISA0Ff18>Y)#qRbzWjHtzwrhJZo2m?!AaY_3H#V_d z)lQgm!(8~iI%&7Olz7C(Kh$@$g<2P?iZ^v#r_{1q+8!ED(`TQdo~ErVd&yo@S$+!3 z{lr{Hpb~VsF2fI!lhLqbF`jAK%$B_zO$nGs1}1Ome6A56M;~a}*g(=gcn4QCpQbKL zVK*$T2dAvpL78n64x5xCnz&9KV)Q2QC)Rhed%vYYqU|;tAY>?;^8(1G>?a<7+=>!S zrksRuK1@A#5}&W0%4@2B7U^FaM@i2b`Hk=1VpL8(%YU7K@%LVmqslmTV4*XZu}&DQ z?g`5Gd_Xj9Cgh&n096MvAa0Y0tvp~0U*9Cbj~W9=OP7P-8=oPvQNwby*)|v^T87UO z8*tU;hy2r3v2bzZWZ12I0&dKi%y%`_FwZZxsObAqWI6DY#hIxbzXOXX{fq|vlb(d3 z$^+s1!Oxt(rZ9glt>6bZXIp4qG=TBX7xPsMfASS4v??bks-aC`4^G_p4HRx`Kxly$ zi+{QT%#O~4rccY+hT~GK?c8IWC*RFt=X7IP;s!P|s|@eDPGYh%XTVaYDex`-A9pCm z8H{8eu)a0j?B1+c{@|Kl+!MtwxN*EIyIa`6EHqDIr>-Hn%(_gR;5wCywIDb96{Mgt zo)XS)BU=_v`)g*hQ7a8VYqTL`MXzPAoy+(l%c<0mXAY0QezTaDH3Oh-I|MYvu;xj^ z-uGBAynlBHRPX5gPtke!Q~ADeJX>~Fl93Qe62f_|>!c|qQBqncEp2J1BAF?L%F2jn z2@N^VeVtN7ku)?EY40@fZHRu)??3Q5&U2pozOK*b{l<`{8!V^4IR-`_( zlT>YpA)n(@*v#jJwD;p?_Eu#yes!#*k1LF+)wz(iFW03FaY7$*au1(%Y&kb;SP40{ zy@qA?Uh;b$?^YmfV2_ibzN&^p0~ z2Hc{rwV#;J!VgS2KNAde)9GY<4s06d&;HCRW6Cl2cyEV66x7_zbe#R^=G#CFJ)+4c z=_g{J@~h0|#1PCGu!3!VqrqaW1P=4d2z2IZP_=zHy6-Rr96uLZldgkBn=YP683vao zrE|L!62Q8m1kZim%ujHP$CAI6(ljqS@vx9ae4up9SgE>^aBM^`L-LvMdBo% z?YkK*quY4r^J?O6ua4n*x0CqQpo6@(OF_Q(GG3c&jM3HOD00Oxrc*YORF21l}OzVBI!YBM(5w88PMGo{ySUb3W3Zj@^_R5~?oK9%;JiayKIXou>4d~qPs zwRY_mtcz7(O|N9dwaryfX0sKSkGqNk4=ITQUbtXr$wXXOvzem0eRz%LBUn8+3M(%s zb0#zNY5o>j3>LX#SX&=f*?AQFyY{1Q^Fqi`PjEFkeiR>Eio#TbJn&7Q3Py+B$l+2m zx!u;nHL6OqY@q>tNZXAsZw%#o_l*`D=jJ$fN-%j3zf9v*<;W{^GHo3-Tv~s#nC6_j zPmx>O>CC6SwC=V#Z*>Z3gJl|yRQ|`##}u(C`&QG$oO`%={WZ*QH~{f46meUm58k&EI)X(nsL?o6;O4zrqCH)Lj#3=_$TQD<~1gqO;Zs`H(ghjR@U{9nZsCV!cJM%sQZo@ z1`Lv3>ujOv>aT=6l#{ftMgxpqKaBbuy@VG%Gx;xeximxmFJ9d2h?=oK`3{Gl*kU-6 z=DZk!`*J!c#O6F2k2^%TCYN>=q~Va|yJ_Q-XY}OxEUrxB8fCt4fXY?ZXwt8R)G|qM z9_V%Q5qAqH!Qcbid^QEY)x2N^4vsYTQ8ha04a70erL5;?KXBJNfXOe_nOyiA@ak(U zZnWA%(?bq&wy)1arkSGjtVsr$MyAt-BRr5Ykp;zytO`nz0@!v~WEGW|+!~IzJz7(Bnl`yo~4q?$CloJ?VXIJcn-$vT_>v4hT zgWv(O(TwM}n=Y?d$igt#${4oE=t+kx^@O_#d(riV4%d7p4HMiA;Thd#PUaWTaIQ8s z)U4q<7xlwsM|E-ER)h(H^RRozXnLJ_n#xNbaql|)gwN(XseL<$3UPWk=l5m)uU9%N zICB!@`^8dMuA;Q$!<7JzNR7kQ(DO~ywA`rVLxL3-dq|l$|RCJph%-Aa!yzv-MYG* zWy)zleyzWYH(goajL~kZrebzpK%+;Yrdqr zE}As?>ucII*NW^WMx*DOM%ur}h3+R5AGq0%tw!BRpSnkn~jw@Q;ZKD_&tCHE?C3?{eDMbL>2M{2K-8pQgb3uu1{e)5cQ! zOEB3VT*6s+UT~h(mqnqd{t4f^#+ed4ogpcHJ38H;0O}jJ zVZHfkRxiUdqp~oJ^WBKts}F4FW3jj}s0f2L=wtMAW3jhNI_ntLh92^U(n;R-6uRdX z$|TB2XJ6le?-pFbCv)zQOZQnQNr+{$p0&aA_fdkIN>*WN;VjymQU&+N9s!Bcn@nF366`PlAPrc=RrMo5%h3xWi ztZdv;SkSl~AN8JLRX=_}ca$}zbrivb;Iop)uYbW!O*>q0+Fz0|;TrqAL6zxUeFg1f zgYkahQe0}k9;NU1LH`+%)HirIq^waD2i_0C#4~-w+xsqqs(c?B{(UCR_0huZq1)Nw zh-bL_Z#;i&cmbpstRj=}zOZG;00{v)(`bBTMa)WR$` zXVaur5!4{$-UoFi<09EYu$y?A*+`;b_hx0`nLfdq)#917(1>EA;)(O@Cmp&}N>uiq zOt!A2z_tUlMt`+PK|x*c0gjT4=*^=+rqOh$Dv_C;e}j%}pb!=g!N1|!l%$b|!{ZY0 zcz8DMt4pEmoJ3gi@(0`B8iS&5x%6G6Dt@uB9$MY>AYyDj?g>c3(dV~g$Nh;2E}|iP9Ni9 zDAvP;tY(E#b#fuo{ZETxdK?AjjV-eoE=!N@+p(0IW|lX+fz58XBH3Z`konDA!R3^_ zgH+E7QETQQxOu1v9<&alIM0{-z@)eQu%9{jB(VuR+iwVegEE}jpN2nYoq>~6HF#Np z1sXlCoW)gIl6gchU8;7YNfn)JM8*(SyLb;dZIq#To)al$P7AxG>d3YxU4#!4w)5}D z55v9w>JXKk%C){wWS1`q`$mIZY~U9|VOC)X(XTC$?ev7rHYY(=_@5OC~lyn~+gbYbDlen8T;+%VzJ!Am8&(4K>qS1TaGVNPp&|SQmTbdzwql%2l?~1^zm9Jx()uV9Ne^c3qv9361 zwl?Q-tIcUk-3lC<(*TL<_cL#q{oKqHBldcN30o9p#Vmb)@uw2J+0(HDNiralA1LH{ zR8+D!W6iZR_2+K3@sT+_o;Qng95@=X*FS*&`di_bFS;1CeF|qdshww`0h|S?oTk&)6ohoGCo<&998raq50E)XDOwSGsqT9kdCpaaZ zULM{bbz^;grRsTbc;#Eehdt+b2%L zZ3B9Q?PbNCD>)-|ecmt9kPQxn50txN#Bjmz1V<^m8kIkQOfi+oA+XuQ;Lom=;* zKRlT1h~|0DsM3%Rm0{-Cv*{`54N8R55*b__{*Nr@o% z^@6tfgW1Z)Quc72EUA?qfYg|WBK>tO%zhn#xoazXWZ*!#(%E!Re*v8@JWQG+^=M#( zHq|mcI{3h!Bzt#K>v<84syjf-Tok2Wf5uUh(>r>3q>-vO#!$+kV)C7NhdU5`fOqea z5%_>L+|}T0*5Y-G|2W%%DxDkHsI4lrW3V@kZiu0&{ddrz@+JW+v5mZZh0MqL5b}K< zPgcY8=-uHanjLhE-tNty^uo*Z!@Y{6cPnY%$KNE^&S-0F6SWt|!03i*)+hKCpO-uU zU#Y$X8_WBg;f-@JX`L!A3s6Dz-ubkw>L4xSLdiq$Km?AlqMDd3i7IB-M=+ zEaXx*oe5>{8g8*#wE$f4W-+EDsNzsS4b)Q zYGy{eCBm$v&nDuQE+$SI&3+a|!Kj42T$|$;rYCTLF5R&r&(cd0a4v^YpXI?*c<4WbcY-X*G4fv+D7nnft=06WW=4Yr43(Hy{se3tQTp7bmo<@uM{~3hdBb=Dz$yw%0 zVlusZf-RRuz>w37bxiMV&+8(G;lA(#My-X{&i!Gb{ zguw{mEM%`w8-!g)hdOLUCTiwHqrOZVI4;|PtmOw6 zt9F)8XzT|E6>M2*tf{DDt{MNhLIceoX0XV1C%*TN9$!`xA@aIg9QE2=x?flUyF^H$jj0(wu0ZdYZ#94PvadPGX4s*9fjL~x+@p2W8Smmur7bgrN z39PD-Q-pmmC|L?u`Y6EUPQ+Q2{xEK3 zD!a9M7$|&N3X7i>x<32e4lWiNIC$VUXnonuA8k6vzYMqs;cJ`t<0GqS?ea&^^k_Q# zjM>k!)6!U=T|6YkJ!QWHu3u}m5p|10@!{)_e4o*p@cmCd^E0>MzV+^9ZA+BsciaSl zpYVV^X}SY3BZjl#b0^?Q$YiGnJ5ZQPEwmKh<=ve`oOFl=7U_hMeuq0*-n}i_Hnx>r z_-aefcnN<};F2C)_lAFaEsyysRl`>VFj;Yqp&x$d*O-J zEzwv$GLiNdMbeS0m-#;*GimKWCmOsdiJsOSq%$%a(ypaXF=l`!H+Jo1yi)uGb6ZMi zcEx;B2-zbr^N+IWYR|F2If}(ppJNNIctBvc8x^1Qfq?%S(Yjf1e((JVtNdoLwMzsx zn%-iz=ksv!*xVUtI#NdJIi&&*PQ3`ZI@WYp%@;r3nMN@I=eeWSk69A-qba7hSkp`c zVGdBqHR;ZSrK=4o{!lo)HhBc`Au=>K--)`{*1^nKDb!M*z=9(0Fu7CKRCDMczDTHK z_KAitE-4!-zxgm9)Ag|MtOEYCa>H6hG1_*TWBr|C;W@HF*ODMOEPg0-uIkaYHlGFk znuTLB82!qP5SiZWsJN`L1;4Kn=8|$TLwwf`gaoZ3Up=Vy}Tfrp%uXBwz4SwY#` z{y@<2DDuhbVdj1B@;RqA;nS_3@qWW?-f>I=M!oT&h};G!FbKr^=`+P1FAAXP$41&; zG7pwm-vqxH53R!3)wTl$|5(U$D5affMX|M7bGS{C*4j0!*r-f-t7d0vJM%UWOhBtvZ?yph9a1y2- zzDDUO1x!!fLmV(LgN3^tW{yd#VBD1Jv}E)L3K4~q=bSB&e=b*|m3|azvXfc;fDJTe zkI>&-d7Ac&E5T@q&<$GkktsJn7F-JQ($N=GXtIhS-ue-P{;slA^GcOYzS>A{#+=2s zaqe__m;x4wU1?~)WndTYMsFjh6%DV1~PtJOAF9!GHt1JeK&xhvYFw;nyUlWLi z{oQDNhCJJ2as}G{nSw2KVNT6EY|+?2FRwjj>nfje^W$9T9m?Pe*;cILZ{S|xU9YvR zm^Fk8j*|<0r0FsjVEnH;c)I&Cj10O#fo?t+P<({)miv>|m=Fq`oI}p1D+oIM*n^gH z_~rQ@t~FGb4jph3_B0vf=zo+vpD2=d&QQ=!aN)G4<&)cDEos@@y}~&(MB2Axy0kkz z7M5Pgr8>`#f=^>N{d(NOO${5vBy-g9`i7GN@8uL7RuR6#ue~Vfw%}&{vItHMl*hX% zjd=M*03M7S$Dev~7#8hcgxvcWdaQ7V^&e=8<{h={-7YEE3^8CQH_vAV3#Ze(hw4(v z*=y7rzn@inyUY?Sx8a<$b!_^HIGPmL%O8o2#@|liq9IqmW7UU$_#;CR`+4Qzs_XUq zV8_eg*laJ=^lRnwOnP8RXe7J-SsB%?Jp$EDxukq@IQ=|gBDR+od_-qt#PP$Hpv3zg zTQRtR>lV73rgoaxGbIac!dqF?y;BskHh^63+lwZjc4WzZ*4THI3|R+%Wrx;hg2t*O za>xwE8n1DvpSF@kM2}_-_o}cx>KJ(ao{N^peel_VJ@l;WH(xK+7a!{=rt3W-C=19( znO1r1x6FW^nbgqs^m5^@?7)7%@1oD|#?fKR8t`?Fp|tU8Xw~eDlT?My;>mbWF1W(Z zwe+A$Y9_yNj2nVt4E{Yk0hWiy;Ids)n3WfZ`xifkbL(Qb;?ifl>{DHmt#6~iu0h}x zH;C%i-{A9RXyA_P@uXHALi*R2@`Zl!Y{b?PEOkjTGml$HXXH=NKl!b+N2QP0Iq(k- z*)fs#h^s)q)J3o}F`Kl1^LWW~8`=37N`rg1a@Wnh&{gPfBzlfV?Jya(VDVmV(z-af zG=^cfvNjDjuckH6rVGridS+yBfQ^2tAn-`fqLXhZDrHB**3rU_$m=?0ZQm?j)-aTt zv`9|yY~?b=%^T2U*(B7vF(ACX z{3yDdPef7AAGomTC+S#t@Dm;_V|L9|9QW)ddm{J)#w3)|&X|$dV{(-C-(5#G%gt$5 zl_pJF5zG=U{-K!@Q)#W2z$#pGo=onQb4B$>KXpJED(U-}y#k_)z0|c3nC~|Wy1l55+U(_NXvRu3yWI)Hlow*8OdDchi(~sfo$$n#UZ;YlR?v2o>>_cy+uVOMDm&vT(4e0mCR(y5! zcpTy^FJ9?20Y65yz}^W(OlFh3czRJf)qm5bs98ckxqT@9luly;1YX!zo`t7z+El0! zAewykF+9q;!djX)Nw0@5ry!-NOh)L2eDjo|dtL*dsOJT~f+MHkz)Wz}P~_k1EP$Rv zeaSs38n^h#an=eixSx$;(VwSU?EaQ6@LUrO8A}DH_2$RWJ+T2=m(9SZ9&1Q@g)-fJ zaF9k8o8$YV&JZjx>qd?=rgi4JH1Ne(>V9~TC7V}N{IRGI%^seFjgJ1}10M$9g1lAy zkCOw%rB-X$(-|W~>V{4*_25O=oiGdwA|*I7(G7X^^H6yq8=H#f;FjuVSa<(0e$}3d z0gwBO8zRr5s#+N8FE|S`u4>_9^Wk{?k|qAv-d~brXG-HI%)}E12eXPwCs5H1$I5yB zT*P<eH60B`i0oIY!Cjq7lO|xufUt#W*Bfk3Fai-g7;=B%tbhN%yoGP zbWO;~SYCslCp$r(>x0jAI^a>$7f7;F!pv2GX3;!oC-oQls*IjytkP;<+=N9p55!wKiIT zX{D6=QXPt~n||>|SynWDiaJ;1JeMtWG{pX!>LGbfEUY~uhl2x(!0U}ID!<=B24}DH ziy!B*dHE+f|C3Q*sV;+)k4>kEFY}q~lU9JGA(*yf&BvG+8oVug&o5^2`tSTiG#NYzKLUbn6M&*8`?4nPwUQ=yfK-GR_^*J zHa5rSrEl!Qc=o_eHltkddxs_C!971g(M6Ed#K1L95&>W*eydf@NE-cbN#`@a^g0bl|d@)4}-Ca|lM$ri$484NO zhR1Q|2k(@GR4wGx95vw0NdtHj@fl`5suG-hH~CvjM)D&~1RldAWP|_OlQw-6IdqS} zy)qg!ze>dCe%0cdrxdWx&8^(b)`?{CB}?>XfjY|WO5sRT9c;FD(mgQ z`-cNz<+mGH`u7$rRF8tX^(yS!`3<;Rt&CIn$DxB?1->3`4zu+Z!D7FeaOmCyTwfQ1 zfoa>(|6ec2{;S{|xkk>rH5p#kgk#!m2ki4o2^X!`$DBYjJoLStpKdmUPMF%jC;bZ` zIwEAEz6}7?{JD^Lxe}y9lJM%(1lDt}FE||R<&U?^P^IJ&wA$EXy|8z$7G}1;N4#WK zE8-yPvEa2IqKFAC*L{bqiiGFX;)ikr3d zGaKiui9fA7;os`rG)OpKowIG@^W$`A>7=2!#I~L-G`TLAl z1MuzhL`lV(ADo7=JYQHv@Un`-(ervGdL^~U?bkvl>s+*)!=q8j73??W63)!j$E7+G zank%_cq88pt4vkd_P$CqLtBOWYg!E1fnFq|??#TD2T^W>DIQTx#nFK?&|jew9Na{B z)J9p{Asdb(&+A~J?ip@M++v{~62&I`t%r_e6LPsygt`LTDNOMp{AgHFv2VQ!8n0Gl z_w3*CO}gH!?$ri%aDz6aUULIg!(g<+=PQv|i`DmWqz@~gO zz(x=COFxDzgs$(wY~pNZ*k!OE$~K46zjFgv?$5h0y|59__(bD?&j;|1BpN4|W}w6A z#pqNr4FeRK!7r&F_MQsEo)7PEyv}aadXkJ2bB@FOm#*m3G!v^*BXGOrA{;i$7B8-k z;}jkDW81m|(3>8ISqF}w+0OIO^>+lzYL%hTge|VucK(9sfl9FBR5FTkhu~6m4>r{D zDDTrP@VA~UhLsJz=z0Ao9Qvh?W@l;y?yd&@s`SHVmtW|3NlxJ1_ThpXHE3?NKEL?W zT*-L3a{k7`RPa7zkH0&PLV{8kJTAP<_G#^dmbzQ4e8dZ;awe58KXr$Dp5@J2>@)bI zdL}SskJb=cOWseu${6kY);ojmS$hiqJ-Ixs=Ew}*6YJgj2z0c zU;ZJ*tdG>9-AeK`GE(o0I?^d+=P7aSCEE3^fof~sQO@u&__d{!QxS56@oWL&I>&kx^`B&J4yo^)QR_J8WG0Bu>fpDz{KElmF)3#`i>}0T;gyvK19z z_S%6^^_#=TqXdSCC>P!YahQKx0Xt6(<@W!I!GV^&q9;FZvZ?!A$lp1fl_iEtcCXeZ z|EaPRdio%HxzJqT9sPhg=3XqwG7-*9(H0m?4iq)vESLmnv49bd+>5YCFmKOiE(r!) zpO;@HtM;#9ffo)i<4j}jpw|uPRD8&P4tvJ;jd2%tzz3LgzLa$@9!+UV11M+jPbMx& zWPiq-XZL%ISf|H0*0t|5*XOe=qo-QfRn!MZwHv^dY6}b_8=EKu} zI;a;ukU9izj@rsStoL{}Bntj&qgmE8;OBlwy{W+(B8Sjphav20VKH=!(iJN02l?X# zu{b6^2C_Ua!X*38?2eI$^)BpX<0>u4I77%e7M+Dkos@E`E=BaoJ9nP@O|TlT?8`}*^{@DgYDa1^f8{l+4%C}Y6$ zNVf2UJ)A=&!RtMrMqNqbZgt<{G7^jUol72p$7*$q{K~BX|Ri4XE>LM%fWJ(0oCPOq2<(K2>2SyVnPKTO3c&>l^J1N%Rs@)`?i)fkLcp( z*aecKV>0V|B8iJi(}AYR=csV*4rUI)*28R!l^jiV7G>5R4+P;wa zwHgeyBcZM{5e$<4gNyUuz`0{1Ku*X~A6~GC<17;3=6fL@&rgRs=U(tFZ-jO3gK=4p z37UK;1CJ0joci;o>$HdUtfJfnrnx9eGNi_=MOm3YrY8^D6Vh02v_9P&c@l!dZ}9sE zFbJ~T#eVvyLG0p5^dd_ae|5F++yBmH=3+H`|1lOrEQ2s`@o=Pv*0|rehSZ)IL(W<| znyZ`6C#}0HbW0{OjrKv3Lr+TBry)V0J?|8rc~S!x6WY19#9v_jT9chHHm84oY-r1- zK6HCe3#)OTO-D2pnC1>GdM$JS-@0qUr$8m%bnZ>I>T!SkIkbygVl9tjhDAcQrV1PM zg@LTvHTXxGl8#MJ;LF!JY-Pj~R&>FONpB9o&PQ_iD&x1X(s;sEdH-N$vy%CFnvbDk zm<8pu5Zlr!4?fMi*frUqsQV*@4gK4nw5qH4??K<$FrOEEud^y!*?X2nJUGTp<5sh_ z<^;5ekwN{h{kZpu{m@3)2CLt`Wc_BofQJ(vz;w+`tfj*Px;*A%#ea(=!z?54*1OMa zOI)Jpzz|)in^w)~4X|N0LDjS`>=Ki^5JpOezp|V)LN++kldd>Vq6NGv)e5_*S9%{dk?*>`I*81xh7(Xlr~nPG3(TW&I&v{{D2>?g6!n+G%Xt12w`jXh2M9mk%0 zZ-Ml`a&$x50;-`QaAW;ue&*Cr? zgP{Se=ui?Yf6>f#b(>NiB=B=1r;=r=3uTWz#dfr+QFZJyrs}#Lg0e4j2hF9>b7>39 zUGkQnw!Q-XJR3}r9nsXOrN;2nY?yF;C40MF3(u}KV!Ey)$Y6E`U2+&rJ_GLX?TZJ| zkgEl}LA)}J(A`9a^0S#*b`gJY=Mb8<`VF{*DpAm)7PwF)W~;A;v0pxUaKPa_e{-mi z1CU!xD`l?G_De#i!ezeTUW%s4PC%V4NlY=ff+Y$ps%%j;L@a#Crz}-REvt#Vyw)mI zs2fZs1A;{Jb^r1!_C_-o{RpO?-@qUK9tS`E2wrm697t_k&1P=Zreh7QBCqgA^spkC zIvmO=r7fST=3Sxr^G?wpd`6QN6p?pkDP^f!;X(@cq>N%3ENlgwVgNP-;+#qLM%Mr0nTI#p4!J z!J`2}PG`4}Gl?P94+4kja0PALv7i2S?4kaGTVNzRPEi{c(*6W78~NK42Tx7~`Cm>r z{MIY(;s6g9y6Iyqw&0z4rFR}f#~>m zp1(AkY=@X zBsWr}2W!%3h0Q7YP?tk4n^)0=&U2J)F=taT)~^+Q=d%Ja z{lQawV`_!{*9T#n`c!-uzYh=ElAFJ86HQr=Bb_uKa z-3k?zI^_1BHKhuS-?F9a@IYw{-7z0Vca_JGyS2dV{?W-kTrnb@{^$9lOq-JTakSQL z4NIC*Kt>CObCW*^`~Dkc0N#s8$3T%;**syswgt>leE{iRwM4&jGW^!aGrYoOPo^|} zF0}kfVV_6oq4S^vob!Ob_|c#iI+Yh;>%LFy;psWJT69Wcx$+`6U$cYTIJ^|i%s$Y} zojOuSMOmpvO&870)Rj7#$I!jeM$)yH6{NfLe$&up=F;b{Wu>jdb)^%tWu!{C<)jj? zL!`R1P2fNNrV(HJN+*xp3mG5x^8*xSup1kl@ORDv*7(n!JbF|yrJ&UHbxkzO9U<)0 zew0$e)ePc<=V@QRWi(vKPK2tQqAzhsX~CmNvMK@sB~r-i^`pwVmmuS&DE=_*lw_Lb z9Y`N(k6$*Ha@Tp{&N&o<+S5kP@xoy?Z^)x)_y zr{Kv8Ceibp%%U#r7Fk{U%y*7iB{`P$gKZ5D5SIlz7Cu~%e@mt zt0uVe!8bp#hMI}=@=iM&MLN`G=s{AyV-zkYa3VtRAGj$QKDL#Zq5Xy$++H1zHPdN^bU*&Sl8 zsy(rEDl?y7dh`k_SgA~(b3cgAdIpFmvX%;VFc#__OUhdQh1opo&%VEu^+wstiU=n|FY8BY4AgBAh%Q!kE8Ry!bo3%e{$7>S$f5y zL?IU{txNHoi5p5rjlrM4G|)e4AG$wE!NhDQ+^ijgM>W-MPdou3-k&9K;n_!(! z9e6cWbE$R~cz^k5+}1n<`C|j{?Hxn-J=usOiw=qA>L2{PVb_`7*h2c$l*?XOifFAJ zlgv%{!Z!CmLIopxn8J)hqLF@#jsJ5Do~%2^r~RkRe0#hgD&8E=A6fvfqJO{=>0kIW zEDv_otKr42*(j(%;N)!!RDPm)Gg`f1W&Fu-i-xQ5bPlf#7NnIQvp8?5l zIc{@{F}q-%3dMDO;oI41bmv|pC>}d1*_)*g7r!0{nQ!uZ`4D5YjXQy=6RIKPzXDvX z`2zg%rsKb_OE5Ca0k0?CL*L0Nc-l=*9AWYccL=%gs*QUwx4jEJ`xoI0m+S1#!$*91 z_XALpPQh~RxhyE^nMh;sHFjd(8}@Y5Z+K+l%O8^Xq2um0?&(}3tlzB)vjglnnShVn z+O#dOVM7MsQajFDnCYeZUV*juhv9eA9GDtB2HW496os9?z^|*)1HF~mOrvK24T_q_ z!fJb2{XP}GM)1=I9&TpO|4NypgQE{o_7wh0Om||8C_`@nE68%87qvgxgG(=1Xl;M$ zRecAq1r}GAn;Z18xXh~So8YhE4hocSVCyd1b9IsWfF95WU-(9aS%-o zJ|}to?kH;z&R0XkJb(4S15CM67Ir*3$v-z%ByQGX8W??x$*Un0wf!S|mNk!t z9p6ZyoD*#=nMWn(cGH^l207 zX5AB#4695&^QH^+_A9{|cUIzGoeaF0aSw`?50FlXHHGyt^5RRvUa)v`B%XH;q`FNh z)UQ5MbkF}FOsr4lbLD`Q41d7CAO0J}ud89|uQYflbV8C(zv0iGm#5bk0-({?i*1#w zfO|QkvBts$pH(m6^M@9?{_HpbS?=#ae%E2iQGGkuwn2mZf6b?yMca5+$2?Yfawt9g zcaSX})5FDt_i#(Mr89$v3vl$Ga9mSA5>@)&WbN>Xu&tfs54~se%k{Wu_j|C`;R3)+fHH=Fk$G9Hw(JvQmyRa1If0D(2%l5VgM%mBK=N=#n?G4p6I4cz-|t`^`nPD! zKOO14C`gQmyj0XjrANmo-JyJwk?|`LuSHP6- zX0ZJ3Xe!ow$F_|Ua@#vMLh()&$QU(-0xr}u%lWzNk&vakI(;%NO-!bKk0(%}@>IBd z`y9(mSP89@IDYb=B`{4hme2d{gp#VuF#Ymiy07pI_lCv_x0*Mlu>CKLdByN{sOP&@&>pvl1TOiA)-x`BqzFGW0Vd(8T zOpg2SQ4WjfX@WN{I%NA!fy%bUa;3}TN&9Om{ZSu7j+@8Oe;)SO>O<6jiA%;WvBE$u#8Jn^GdU30L) zYb~x0FNYsqE6I7#VRm%iaXH2dmUNGXUTt3ks^VZA%}Y#4wpKM(T< z-;bp|YGo|=rv^?)Ol6p%f${rgP|02q&lh~BU3i+@qsqCX!PCg+%q*<3{>H(?P%d!E zM>rv^>X+YV9gocU+3KMX5c^T^+GVnKfn~Dl*%pkeZmT%?r2&c;>Y{=}v~d5}fn%Sk z@$KKNF>Y%cjI!5@r%{0J9yEFV-2zgG046Yl-t_lzWVT(gO<&h3IvE1q(YZy|Wd46AqznvaGn|$9FI+k42TE zsx&!e3j6RvmK}Z-#r{SOpoFLmZ24CWzT;)E$RS>zl$_tO?DDBpRh~iS+rM(DvgLdM z2s8Udd$^-dhQiN4L+G%(3+uy;+0=+fEGlmx{-^PZ>F%CO6+(uwbE`g68UK%6JiZQ+ z(`%Tm!a69@-j16dYx7-&3uvv{SvEQKAIl4xM1}QYXn0Z!b7$tm>ratXz4Rw(Dm1df ztu>&&Zy0{;Gm#D063a=Mx3O7 z@fm!3{XM=S@e}twU?%G9u3 z)EU^E{TJkyj=}x2nj!j(8}`5Up4+!csC@i8fp6{=u;q=m6u9mY8@x-x0(_T%*z6kg zGpOQnWM8s0&0MO=QNUz9E%5Q$0dx1pGi{LyCDzDb=WcM}Y@ZNdr4=6G z@J>g-e~&<4#ErVkmF2 z4`uiSkoDA6Qf*y%8*uJaK8;MbN8QgPx(y@N`cQ9)9A8>-0=9 zqet)^?36-ls*oR>-2n0rmt*5<;^!SZfTrD3u~y3;(~RBF_i{7_I}7L9U>!DN&=Xep z{4A4RxWlHJ`LVF`^Vpr^ifpizz_FW`3vuEuZn9(wEK>_ah4i;@Tr>i3YaiCy#|eIX zUB;!zw6Gm3tNF&#k8J7t=;OnOhxH zcZ`J|_u+i6#dS!$tBVdVLSTAKBb2U+fiabi=o;S5Z9jYiW;`>d+jkXF)in>^VhE@x z^})X#i@`pp0+Rk_Gee1x_f4P4)gM$q$9G~Fvc88~w!a?M1}Nj#jbC85Uq9@hb%Ot2 zUuPaq)%WjzLSzV;DiNAUp$TWN_d!xhL>fhuXp}~kMsrCBB`K*SL!oGJ_WOM_id0mD zq*7AplO~lk=-GAz4u!0*X#M3I!+I$cpJ(t7{E+6 zd>7UwgP`h74BzBw2{!9Z`Il{ie!yf8=jz_cDztR~2Ycd@R0F-^q>JByed0>$n~RbI{P(#<$Z{sMr?5-M9bCcC8)F^fV=`P2mm; z{q~z}sWd`1Q$6YY<@dY-0ha1lHPp30$PtyJrFB&VHjcN|vA{*0e z(x1Pb&iCA=s*E^Va7mGQ#*Gm&682)DMKCLJ{?6w0Pvp8Z0$87*b*x@a9rq~3@K4Rk z`1Ly#nc}WG&c?(C#d7^#ERbZXK#L&;d}9YUgxY2+oorN&g#jMttGnryBQ&+5R`?X zjqZZqjf0f>ud)8CKpR<&IDxRc^6X@}VEYk0tLOiTCnCI2YVigQTs&(mwnR(syC^ zU>9t;EHLelE1>p)0R*yfxbEv*(yd;|9vtd}R(DUZ$rEq0Cdm|T5$t3KYaG!fO{o2J z`0@#9qv=iPU)=dS4!;i4LUFhm22KrPYjU(zB6Ljv11-!a^eP)Xbpv~g@k4o zmZIs#M&=wJgF(9eIWL!g*qOT~(EGg#r+Gh?GvIY#+O2&Iiv@r4nN94ilOyaoU&zGf z>)_*-j7QJg;;3ua@m7p0P1Np%`j3`DSnmop$v^=<%T0$+Un8i$H3L|w zC5#j}88OGdGW~#=5E*zAy<7M4Te%e2>97J_8ly1PM*$L=YfHD(q%p;vm$BvK4K}w{ zj{S-054)C!vL=nCn99vy_hw%~osl|pb5|&Ie~lFW&yfrjJDB^cSU3<=#s;?X zg=mj0FuzYJD_=JPwq2hAr>_dvA@geXpgtLP{UZzNnnp0Vw>m_A&;jQiLf%!jfFXI* z#FXuai>xYsVe#>q`0P~>rfrGFOe;5*|6nZp(`Pd;s#AdGGCTTu<05y~E{W;%JtKHv z?sB_LPSY$m!B;TwJgM1Ur$Z}`kjK4pGH@y}Oite-$}%d%#Kg^P>PA`it}_Y8jg&{f8;6uZi`@tT&wug>P~FlRZ~e#u^g@n z8pRwN3)%1&LjB0}Dk&9J)BO95B)xZyw6=9q_=7@Hoc5Ls+J8}V;}vSq{D-Qo8!6$$ zRhpsFOL{C!RvP}|0cCzKqh8ThDd5dmI5_$zo6$A~Qm>w3dM4g5(OVs?7ng8d$BWtD zxdM|fMxE}=Xt#x)Bdm7F7tYY>Ieu6%5&!6P^5);{Ny}}5z^>M&VP%Kt_4@-uQ~?LChtZx%n{7T%JVLx`FI(YZ`mq>jGA-OyfLj3c&xK9~dv00H(gkCelSX z+!9ZlKlOp44jJv>$u$v`<`qOo6o%k@7Jxrq3 zgvB(r#+ptE9#O|a1DyT44f*QTbk6b!4V<_f-F{5Pc)i{9*vlH18;YpG?h9tU^s>FW zLV@m8%n=o2hj2a7%VDs`J#g-K3F<$!LT|nIOu7F@7#9@@y=yZ#OTp*zkBNrV;N~Op zA7&@;zVhktE#bRawu>dZR!C;I^<|m|j^Ku+*C~9D3aL07WAKtR+VZX++5EMkPrJ{s z|K}_s2^GcT&V>;?f(&j%H8o==n4wI z@ru+^Z?M$kr`f#nEcEFaz`vZCPcE_TR9Ean37d_X)9^r{X4g)YA7{~W+#_!{?oQphL)B47sk2X-$10+oFLfiBGbBiZf7a(l~)FHxHwK zlrXJmeW6~xhbB&2Ky*w=8uYr3Oqa?_D{nQ>=o?2VMLwA7n%7b4bQfw^R6@?@WyPoR zenUheLa1CQJaEf|+GUS{i!X%Sx$&@Tj{`VeYb1kHCG@z+$X2yS`otp17Pu zhY3;8!ZpIE1B0YjM)=bsog5Z5Dh(|@HA6TT3ef^%=#uf zgH@S!q&#jPeuC4==@RsdW7w%fQZDn+MnUIY=VnIzGYR$TBRZ^wa@Cwaqd{xpm>H5haHo-@gg;w0bZ zO-AQ_Z|T6DyO>_IoouYfX4Y|0=Bz_>=E7YZ z8*IlH-uEJxIR;dd{+!v$$kW^pi+Pp1fpqj(AX4ORx-{qt1)Q8n$a|4$n7|@8IVUjj z8zn6sGOYV`8aUs)%@xVZL+9!!IC4*yZQL7;At%Q}w95zn>ZIr7G5ICUm|-a06;w^f zJw(#-@NV+XXd|PF7g!slAf5Sq1pCvW0cxYY$asMtySH--G>zSgPPGxJRX?8|tZnBa zL<#iJwi&F;^5LFOACR@qfWqpfpt;HvuC@!==d07;c+?Y7(+m%o)*{sJUVptTw#fu5 z=VC}GpGr~r%OU*4d&qkHoXb8K0Vc!Gf$IAGV3U0I;(mc+TVk$4eTQFy;;9%a9IH_qHkY{i_GHx(}tx&z4gBw<36Z!1@%}-PIcdKMaPp@QYmDUpc9nl{Zd&N%To09iJJlfs!UB*;_Uc ze_YJP8mI&xi8D=E+d@3%6AMYC-zEpC{+%lwyj?~ccfF&C@6)MI_+d7`cRZ_(p3Iyt ze4^)0!JwY+Pxp)5@bK&mTEGnOOt>2ciM7F7FPHRhZiA?knQT#=9(9~~&%CCo@;bZ9 z$YZ|;%^T}V#(yWGTT>|79?uem#>KJxHzG)F7)nWQkI}zzKS(@xG2OaWzRLbQ-+vNg z5gP|lY;zoL{kfTK%7`bnQiqp}8H7G%4rH*R4;-@^OM{;YTtW9hjK3vE{hsPTqx}Fp z>+eCEQcg1YLl4-L(!U4b8Ey~qh4cB}{aJ{AwwywGk52Fpmy?aX~ zd!Ek&gQ_af7npSKD#p?LFWzqB%2lxav22Rfmjv1o!doAb$;pO#d_ z#k3zr&*$2hbbTBc?-~U|Mm}UK=8a_i^}L~H>wVVluMR!Ow}IuvgK)GT^1~`Gu;K;_ zFh8Bn68z6HN2Os@urC9Am!-p)?c+e9t%{s{m89R9l=Kx2&~pAGozz`LCr+Qo?GBx|Zx`*zq~mic;n3PvqC2Cf(F=kFrXZNbBZ2BEO<@v?y+rRKB*4 zwBO00Q+6Ebx-MBBu3$fJ_Mv6f@Ay<#LueJ&S;MM? z+G6NPd|Sn6`1W!>Df9=vFn%McSf&dTf~73(SgJ(TE0LXvQjkniYh*Tlu4q><0BTCh zn98z7?&qJKoQAI){1p5-7Zn#nx4R6KEGfi$qlCN^w<_j!@f&1!3)51dCT_{JWT}x+ zhzZ5q>fJ|C{n|7hom|;$p9(g5pgU7M@s?S}oMj7C{F#-&b2FJV9Q4CGSzS{qhRxam z%X`M)^b3mcUVS{OEPIZFy<~Bsc^7AweFp>HJwt<+di37bmzF;9BnyH2d!}CjOKp40 zu1Dy>sS#7iO*r>{Nlf8%J7p=k-2uAi156HfU_bH`Kw*TGldp|tey1v6V&y3~@HqhH z_-KI1C}phc`N8EZoP~u$pW=>-`*48SA?m&;#i>q(_<89e8mF*_fNo^ z6;{HzD3~%uG2D~!!fznV0KxRN*BIOXRmnq{m>Hp4XWN^~s_c^YBszAzW} zJ<+09ez}q<&ug%~jZiP4Ozq^fu(W$_x(=n#-jbGc;5k^bqHQr z`wNoZSrL%k`4K8to@Hql_fpIDVyYOQ18Z~IxN#+i>9j=yKhUWG53O5+r}HwIsm(a> zYft3uCuDJWUEnkdJU|zJHQ|3#N#2GEQh7TGl{czNr+?@myRw_Kp*EXJI$x8+^+bw4 zT}dxO`beKtCDPpU(NzEI3*Ek?i7guz!3poR(0*ebB*s*5hdi2b>lbTCjnjas`AIBe zfx7tI9|fYXmL#v5gPSM%(QvaMa#9dcN6{1ne1zq6jfBjLqoG&B z0U=jP5A3Jb;j=GYyjxZ}aO0({WI{Y0A25pc&(na9mmYEAH8Sv_piQI_N^tgrH*7ss z4jIkI;8bif+j1fsUd_#eW_eY}k9`8af-k|6rX}!Ws1evK&SD{F`@oh_dtgIQD4F-k zXE}2`>63#Zl}>hoE^&yJ%*N48O9{tj||=s|J&WGP?BYo2JULt%N*7_gPm z^jDGKT$_SABdXzJm7s5%kd2!rd(+YJ5%B8Nbh`IxGR65QaZQ8c$vMfHE*trx@%2m! z(T}8nXm8vR@tJYzE)*oY2MgyN!uKQXX_3H_(XE^dJt3uRjCwdTH8h4f-JbmZm5ic} zA7j`(3#^2B`?mQeYs=5$_dZdD9alY=j$|h5)p&=!@A%1?&p*MgUmeWH+^gfc=>?LJ zXo7+7b8vy4ExBejVEcP#>QX=&Tm6jg#qPsT<}p+_#g~q}i)N>pA&I#mV12OxM;l(} z7y9J0M=c6mkim0-{XJD+JSEfL$>~(9GK5}UJxZJUyrzUrk7>x^4K(!e1={sTsN-B1 z$)<%qtZ`Zfz7O-jk$p=! z@6`beUf$(KeD4(9Jv54p)rVp7++hCZszJ=H;iqWW_XyF3`dQSoD9N^^#RjkaHDdN5 zm$}`=jeO}3M32qsc=6{b?A@)01qW{O_f0La;>mHmo7;yRw_L(6o-(r4DO^x0v=nP}1*(`Y;sGn%ZMGemd$KfoO>j5gPrvrwa0 z_M=c(+wG1Z>!1jeOB|BPF^pQ5McU+&LJMK~&BN=Mh|!_7`(44IMkjD z;sN#S{Hk}Nkgu~K+bR>%Z)U>Sr)eM&@&cL+KCxb|X8>==lH2lO#0y%upQ_EmUcHu} z_qf6>JMfyltpAFMM}_Kt-`!C3G7oNVd&V&KuOxi@YszOYJ+ax z9qiZlG<4axgL0j4D=JwG_e6>?_lp#K{@Dz-%jN0gm`HTzwZO1w1-N|31d|Ca5T=&R zgKro$WgkLI{Xklr5`gQ}!kOH?Re~4e4epYA%N<`Q=&z4F<_Ao9#VU*MlhZT>>4)`c z)VDxM+TJNAt>`E4%Ob1j-nBoZTGmL0y0PT+>Jgfnu4HpwB(pIFf==4G1`EcED0+sI+CH zGQvUGxRGu6A?#x;NW`~We4*r>6SFApX49LS@Q;wk!3xqbz4JB>ny1ZXoo4uAa}`%` zCYrqt3}ahQ6N8Ilud+u{=G4q)^oLiG{S>(~$%sloElf>WjhDo00e7)eu z-m`WzE)w(`u6>AdGMBK3+orgxO~`oiGJ>-@4d|fX%_f>D<1{)))tmN_Zb=R%-&jZI z1r%6QBH|raD^knjASSCHBm22j@#rHOF7hX{6(u&=h3hGD>27NCJBh2iwS~-(NRo^< zXES+Os8_XQgA=!arsgI{UNxMIbh_E5jgC;+@LDLM_GYh^?gl%ZrPRms5L1i!!gcw2 zayHY>pxfHMaL^)3^rmzlYW`!(ue8)8jk&!*Q!4-#pL@YHn>FFL>?;;6TnoN@%)^NV zLoqU^jW3@;m^0fOz&;j(QVh84X96#9TnMt_w9A{$uAzGgUHkxND~#R}2l`>(peW@$ z1SIT%7Dp+hZ#~YA>`?~OTYaG*!IuhoS9+hlot)Im*b=WAmbcr5X{{h;-TMkNb5H@P zPpgOvu%`<1L9}d#8Xf*ELrXMsDR=7|Y|hQX>PI=$78}Nwh!=|A)b|p*+FfLsiz39z z0~N&?g=*BJ)?aKLs|%^^`^Y`SkSx9$P)1uO$wwGdN%A2I%3ev+V{0&P`C)8tF(NAq zd)lijwNjSOu8^lu9Tm7SCzRVKtWAQqYLVam zzFefS3aMJhu%Pe7+&;L>m;f`t@c%X@m-JB<*uZXsCD#GdpXQmF@qi0AC8Hx z)vWbq5-kn1p~dzInCmp2^9xO)uz3%;@E@mHWY$%d<{HYaovO}09UH(Z$H%drTZTf$ z;1qUQO9?KFl*5kgAK=9mOLA&Ar6a@RA^&+2U3XbdtxIIYbEP2i>J_uTN{L1Al2!+4DtJrGW zzucBIW69<}HjLY5486LdIn~VFWS3h+-lCl(S&>5fVxLmRvlp~Xi>F@RLcQKBfxJzw z*dB=-%S(@5!(a9lEOz2k{wF6xgXUK9dF9{m-X2YslV6D^@_Ui;_<6KovXs|TD<`ynMX+J;QNloBVJK~!> zFV5L5mv^cjMFYQF$J^uUc+cD2Sm$vK9k4&T6fdXB3AeeB!AV?L_Ga3jH4O*Hm$Ii? zQPgqb4o-jXOAmSnfXxIkbT%z#`!X{iT&n=4N6Y{Nftz41z%m?Q$br_|$8}7&tkjAQmRONay=@nT>8#a!Xe%Y=p{Ze8n9a5(t?Ydhsi7_fBw=8a)^w9>#aU*r^KQ^o|Z_sc(k6 zrb6yZ${ZLbFg-pFs)vQ=9Y{|lft-7q@aml%WX!*%YQz1ssi!}A_FqPwZbkfyKW~}b z>RBMuZ#A2BRvTydw!(%prrF&6Sv zOs!}zN^s7z3N&>;MH-)7C@RsK&Tihedh}I`R$L6H#EQLi*C!8q{%(Ov{RkL!P8t4WorAS*4QxfV5@@#Whk%zO z;h@SKsBBGPMZv9L(SB3%_>T_sJXi*g-vxtT4`Ts3af03@oKE$f1{2oK;!a5_AuDkb z%rv@KjHu^o$?58*R^qfOP=FCL%~at=Lzn9sxg zn{vAWH%>0%MOMdI^I>i3w7UnJhvu=Zt*6lJ=MD%h2!}-~eJOIP6K-#Pgjx-)e3Z2o zBo8%*mOF1)=So|y@S+VnO8&gk5IZ99LqG;2_Xz05FA#Twe2(W5o+?BVCA2J6b z!&+GSNDtWhTLTm&Z+M;2s^UJWF;Jrw3HtNY#38+#nM;}&_4giy^+N~Zk`rI>Yl15+ za+m~1hp)hU4lgh*UI}N;Y+>OIYw1$%6`XAm#V-l(&t6N9VB(fMP!xDb^KIACS@)s* z5S61$-s=;8YW)s4F|Lt6SC=jE4Rgi8zPVhpXFXT0wHr6gaS-|MHbB3x3z*`sbFe1! z9K2q=m$IPUmIYZdJS9-87G$XWI{yySx6`w zBV?YXK;bm}Z;ggMWz+ zyb|_9P36~v88;YyU0sQXj5HueEsMYQCyDKdwxQ)mIVe6Kfdks|@ta9Li{CO>q_eeI zlsT+`WMmRx-u$`f;V>N9epIvO)Z1V>w*x`ek-si_$c|QL@Tq^V^7D>nvSDv#(V)gL zP>^IuYs!A|2~tDeIY5_2rphWAX~;UNtqBg^!f}(<1aI;iZ|f2!dvw%s>F3i&?Oy4- zm)-SHwS&8l#m^GoiQkucUwJ*bkR+q6$^w24vS0k}ubqJ7p!loW_U}ug#io59+nF?5 z*nf&uvEQTdKfpp>6FwCwn1S-Z3$ z`GQ@t%I@mLdab|{4&LauCS>Er*aviv|b^e8X9>F_^$8UH_e`9D3${m)(|{Qr8f`7gcv jYry|&FSh@s7bAZ|J^7>IyfAYm7Fpx78-E1bRN!N%^u z_S%8%we@~;ZuQ1<-*=4v_&?wH#xvHl)|zwvVs6$sXP@xe<#n`;7#%${vd;kLytTY) zd3)6|+}rP9U(vq6(D3k421EPM!NUVyyGv2n5H*?xjO2kmX5`UDRSwYMJ= z8aXsDVrW=+w0*viu;{>%VSS?qI9iT!wrw{&I6OMAwu_Cv@GNMbH#|5pa_GpC7Acl_ zJWJUZj2t>Vyl+@||G*(b`-ZyMv~Sa?b;A@(E3S+R>mMH4SFbb_WJg{hk)b2PqJyLC zXIt7^jq>ubx77{MqQGAkXqW#_6J9=!1%~M!Muvv<9}xY^QNX_)S^j@^#FhW)Xs)Hb z1sqr zhV>2o#bm$C(*B36y?sb{Ew3L}z}}A6?8&|`uOf$shwIlrG&tPe;qQ?V!I6%(x@|8X z=i>E)qx%eKJv4Gi@Zhj9y5qoFwOkzk9VTzKp%GDGgNKF}-*VWkPKu=^^=UU`=#bFx z=;1?(_v_csNHP0b_P@v-o&NFHH5h)J8eeZ`Tm974^8R`3sGYooGAD@34`|nQbk{=Ht-~TwObU65_)aS=h{q+~|{drb>=)(U$IIHw${Zp9_tIyvK z>knsqSbhF=SpSOr{jdi7kB8OA_y5je4ftOUYaPFzht(e!otKx6eOL z2W|iMJm&4==@z$htWRK{p`rcy1qK(lu&A5$tlJ*6D?tb3e^czfK zl(DDhWA;?%&kJ1VpBI>3!~S}q@UG+N@Q)V?dY=3FN>RJczc28A_xPhTu3s(t|CF?M z;0IuBFaH03JPPT`{9?f?JeQCQ*EgOCm$-`vpa=8v0%+5taL^zzGf!zYZK27VSANk6qoumi7O3SJ*rz&%B)Y zfI-P;GpAK}Wm=c$Z0I$wnBu3b%$#jb4_Ihf&=B`N+thiwr{dGUr@>{=!;HC}WtDGU zKBiQ0*R(IVzF}%rXKi4-(Qtpu?K&Agmki<4zZ%?~TN|zg9Wo?*TkU_O@lnH=-;DvC z8|^XeIud8d@ZK2E_Wc0Um{UhGzj^o?+RxsSG1(?J)8#<(%v&+xhS;Pk0S7u)G#IBZ zF~uBp$y}At&(tigp(!Q*Q`3bZZiW-qc}y2QX9RfP$jtO;9+J6cWLoAze>GtA;1`A~ zwku8d8&xzkwfU48+%caaCEpQ4?q%OhuYom`y$>C<{naXD7V3X7W6_h40ROaHQ}Jh4 zP5-zmKVI|cs}{pY zZV_%Mw6|@>*fEX$8Xs(x*=XP53~P5AQ%$p7=Fy?U{VRV8%=8ZJ8Q@y6bH)RUy8h#O zJx3vZ})5%u%{Af3%BR*ZE)2qbFCay$1wk3=5yCm{zdA(4@j%#7Ep9+TEN+USNzg1*k?{1{MtVw z&Nri?pKr$Ey6rRXg<0<1;8M@ubDpJLgz-rL*N+hv^$dC#{}?>2H(KUF-V`h_Q}u^$BFT-u<%o3KD_`DMFWc;0i> zb$Ubfh%t}qd%2j=vUgGACBsp*Pvbsn9sgnj%tV+Rx(f>pKw!b z4LGNsE#hJfn;)hgxVT8|`fh-_ZOBSB^N%`e-sB1D{)tu8-q#u$SB)eOZ6E z3aL-j${$~s^yz4I&7~ahocrU!|L2=8x+dPE zlAuJU6)rzD&RD(V2JLP6dFpzx7Q!YjkyEBGHTUodP{%}hs#(2*j7k36t4D=g-RVL-ha-WcCUe|CgoXl!ze3TXg3-??{*K^a&A=Qpxv7)Qw@9T4*@~*r% z&}yLC!Ei}*k6aC-Pc_f--!sjeH0h>!*}(g_)c8hz?dD+Y{3V~UK-vX!+?+1PlVR!R zJlBKp!00pP?$c^yom<((IJ(9-bNO){jm6U|sta-pnw!ODzfigE_GOMwkUc}Jzeaq@Lclan*8f+ zC|#R8#cSZH^1HNYn$U^0goVHd&r0Stla`okS(Z`V=On4Sulbo~RkSjH zUT$TqZ`sg1a%L1n?YN4q9;BJAvO8kk9SgJ$w$(KM_;9#byrVg~dNveqR$R^A@1a}^ zT5Gy_Pm0X&$?B;mDT;GYhMauBPt9I-QT;w5SUvLC60(eyL}-O`%1)oW0X;t5H&?6? zVR(uWc(hDItXI;<932s3&fjEvK&Om6<~#G9O?AUx22?(r2F07-h3?}fz-sF)<_QJ2 zW9s;CP<6SAIp>d>lT(c9@~X#0_bdC=7ZqokTMlu9D^){OpLBP9D ztznt}bSUjVRy^^s66W|)qUdV}uq?GnAZv+rWVh$WW7!*`;Huh9}*7hM!4w*|@t%Wa0XO(z?Cv6`W6^X~BG zun1Ti8l1k_7@G8`3=21Ahz6g=h%U#MiN?cyAo|T`Lr=RS;%`qd zzuqs>RT&iIWL!DG!T7qIRNJ4NuP%B!LNzC^R6VxjF&RT* zf2(e_tgD(I`=}H8+Zt2un$-&>3mCQ2_0(bG($q8A&(v*=H>o{ZS1}$A`KEsH3RibF z>Z(3YO;tDFE@Z6usEx5eo%+V97F$)1xhvEh*QM&)9ntFE5+BsN-AAdx$BG&kRL@eg zT$0q=^K#XWwWp{nJJ1tcf%oddb%|=9?nZTX!?|i+zv{-gqt3>|b1tcQ9)DNe)@@S@ zE#IpC?Fr`3SJB!ZHkj`gu4e9i&I3-3u4Hb%Xg3(VAD~C!^_X(oBJ1L4lUB%UN7k2H z0ahk7hG$5`~o%+vi)?4E=z9IKlxt4H93E)C6# zR`fJ4o$ja}s%bFZy3+xkT%V`*Db>N;?QRKm%I1Z* zYO~D)rnbr&VAzMXUX;zcFtY(fZ0ds3pGH8EUkCH6@B!wM9p{>7zC4Y_w` zd(1Ka?Fr_uPYj08=UjANLfyAQxBKrfuv?bddEt9pX_z8Tr`J%2WN#5ChyM<)J~hmD zHmz49U-_wpEA2;Xr_$!+ny<8dWk;DkMoh+NZK!$U>U4Juul#oqSn>RBeWOWjm8d~7QiJ+mid)&=S3>I0vt<`PaE5O2XHmdEWjd1&$L2Z2f5+qHlrf$ezTs<{vDD+9) zp)Rj~Rv2zLiwjn^YS}~e;P@5`(R1?&wd1$rknQpcns4d=q5BSkm%1LV)>sBFlIB6| zs;Z%m&s}nj`9)r>ZUb9|dT-Uc?OhL)mUSPJL5;hB_d>k2);TNxhyE z1+~{4r6-u5H(!6f$ETaGj!tEad+yXTJ__z+oaY#?CQof@Eb*wM@zxE+xO<_yvD^6| zV}zlo(dJblHBW}0dZ5KQHD_F7V@Th(YO76+jNewZHah3)VRXtLug)s#Z5;2JsOHU3 zjCV$MG5%JrwK`#Eka1VLbakmuUG>9Bdt=`vRn^+#2N>;}6gF-uRov(~_lGOGgZT4? z@NBy>X19H<;KkT}>fDblV9JqZ>I}tN`Z)QTm1k}!zm5S>HwovJ?xEKAudHr=YG;16 z+Rc3P*&vbc=zNHC4S`<0n&a43GvLt+8*{MJBKWL4#5!vWs445ufyD(!^QW*UV$Qrk zwT8o0?3u?^O+W0S*6qDb)~r&N9$-H_{p)iNx?Y*(I-55;`M`?S+q42(%a|u@*e>?; zEoQ!L&I=7v>xq-Gh*hUm7luYtutm2p^Xq2^@x{zDP+_hcGYhH?H($7OPO2l@-ZKsR90+{cQjWn=xw&PtATR*7hKVHHh3=n z;f-EF|M3Y7Sqp3ss5bSPt{R8t!r76o@bq9ub^FfwYJ%-KwfLznYCm-~d^+TyCVH(A zt-f6sZH^37>lM?~THzXe?TTuHN~_c+eP*b)-NPWDK!W;l<6t=EcUCppzk$NX!eHy> z@oLX9in_~ntZ2V~F?i5XVreHl!2MYf9=B2rH-Iy4~vls>b_8p8~X5m?!qJRG)@seU_z~R6#7}dx|6h9^PI2nv-`h2jqqhmB4kEsihh9s!>Jq=nFE5>!W zqvJctTG8cti+bOyV^FC8bh+mu-*4Zp-P=+CTNRtnIUHPLC2*Yhk?V*@pI5eNBiT!I zxm)^tW#M`}I5!rrEUZG9@`NuYR}6~NYm+)9BiFBNb4JT^bP+PnJn$MaCwb&rLH#ff zZjVUiTAhn?bS;psgU-Wzj8)#6biZRt`N~oI+@V531z5U17Ei8TE9|U3!G=Cra51eJ zPO97;BX-A08n}AeBMO`JJSvY-`%)o(d_VMlH$o;}j)ZZM^^m!T z_b-Eyg$hIU`nQ-v8Qy=npzCTu81tA?yT#?amnHGYldCI>(_d=hts-BLbHpXw=QNVU zC&oqYhqmr}fpy6z>7%%w@j|v3v4Xv^N78;M%T=2%S*u*NV&upP0m?!q@PfM?1)CwL|o8&=eX?O2G32W+^|h z>h$09sk9f&k?UJQo*z(1yW94aJQ{aPMAR8g`0l_sCAkyrKPkj5)*mR1#6w&*_-bng zb4${a%WpnI_T#g0EpjbkByj+FgS%}baC8|D2yEj7e2({Q8jt&CS7$GRcz``I2lEnE zqh91=ZL+RC2=7pAG_XhVYYuF)NcXzBX&j7SxE~p}xX%URD5MaUdO0b{H?n5MvkicC zYMu*&Go$mF>$wNR2t!_cKFeM%y6%e`T*HB8pgm?zT`Lw{-z~mBh?OfX&Lh>UsorVY z^I6lB@?#>!2GiXOwR_^^v%NW5gk@dv;_^mGY8n zX<$9~-$-{i8Vbkbyf(IG&NJr3on@j>r7vRE#%s)>CdQi?qcN8GZ`3J@<;T2HQoXBl zT(BSVD+wnHRT@jY!GgR=_P~8GC#`{VU0rQd?uju!`!{nR3r~+g@=bj9$l9zmyn42H zHM*mmvS^BIdGDkQD(?qV7TJmUg?qx47q7s24+@{IPvE|VCA7TP5-FyS)Rv9Jw8;M0 z&N~}|%J0EzAqDX4p+#Ef*m|`0Q_dc8;yrTddiy3APyx3?$a4S0qW)7Z;>i&$iC_E^npj`-}< z43AW)1f(VXH{4?_S!?_>jJfzm;~L)2-v1kER1u`BJ-mJ!z4E=p%7ezR9`oxyNDp{V zHImoFiE&M5kzek|2?yt^4!dr?K-Uu!MW^1`&~n3h&9?ANj_IS{>|M}pSp!M4LV27x za-pET;CbT65^~b6Bn&%Q6OB86gQ1N*px^A;g4Uwhz|Ppd;tAqwFZub2YH_Mpek2|d zSX|1;xOJjhqmLr-^(^?_bur;~$-(*3aIf<-eRyX@DD_|U!r7@-x<8JSrsDf`FnVXb z66~1=)tP8-AT7dIhep-Ab4+1=@(D8rP1gHpbN{s72V;t4$KvPG%i+}FvlKfh**Ez> ze|l{rZ`_C8r|XG71mQ(woU7ozo_9Nsw?a2!z}ZUTL%B3edU-{-&)tPVa0QuB&4{ihZD+blCs`tV3stpCC)wX%??5ZD@ZGIbWhd0H@FYWcU ztUew*Rz;%wF?V?1H3o7IU+1_!`erQEYZi!d;9DTr(5s$O#0{f8n91= zd_%uEeX;P{6VlYlhkKC`mGbGm#y7kOW$#YK9`BOy-r(KhaizH;r{p@wkMG5bHD{3q zjCplXScW}B9?<~GJ08Zb-G#*X=g> z8X*pL?gh-tdQMfni2JcdHG*=w2Cq{;+~IqZ819XA$sZKFl8p5hEYy3eHEolw!Cpy2 z=?r_3`>J}OHT%ImC&f?dyc9>PgCXoiiCk*}!s&75yPJ?CD6a2& zb!pC61NoH9$z#fEPe}C|E2tJv`LCvYpr9V)@QbZ5SzU(Ihxi^Mu(@G~WS*4pbV(jq zOUKP_buI&Jq!}q9KN@I7I7GBIiXW z{$4~tY=RG1$6CYZR%g(&Od9+#^)9N}Q$%dSPI=SwloC{B1@BE9+tFdJ460HNZhE$o zO^jQ4)|j7WdOqrhe@qRbT06rFq+ko-+^#YqibE<(V+-#+ij~JZqQqv88HvY z2gT+}8b~}cs>C25e&Tv4X|J%)rGr;X;*spN!jqA}GxtLnF{wG+n;7HC`14*QLKCTM7G`BT8E zT^3rkGeYB;7hr6}4%k<0GmK1m0wwNNl#Y&9f$B{CMh-HKwredtqO5^tI5p&sUc+^p zOW&6~Qw^B691$%K$KVaGF(Soto|v&@n?`Xx-y_i(iIcUMNAmg{c;IZy1zeBJ@ha@uEVd7uvify|C-j9l&d+yyuZe zyZy(2W_{GSlqG(~|BOYZbrAI(M!}&vnR<-Z6oY1ugJjudlaa9>dX4>s!^pG?8s!cB zTgJ-@mBQuf7Bzs^!Fx}GI(e=j9wjz#op?~`;4ck){itx`hU1rF7Gmp#HGuMvy}2na8RQPB zRd))Ssh%FMca}ll6O*7u31=V;X|>-EmUe!iS#OCF_v=oQdkV2B&Q<3s(F0=sl7bK|GRlCD%B{?w8{(pW$Ah^ZQ+RWM6)) zSns6HNg5jW#{A@mI3H-22X$Z_^bfwF@A(=qKjkFX5tpoK*+ndFR2#Q76)2k5)o!3jB`{xF(-n%HY>;6w61KvEBau7w~R`xI2eaf-!b zbWln0;7KtooG}}%x_*+62EGv#lhx1eMma189~v*v=zw!vEYPa@HGMRHd>xeS8@XOv z8GlZXbvQ!LoUF%G{buBVX@k<#bGkcUDHdX&$%!A zOSVJ5i zKS-2+xu5oNg8X24=U^1J1CU}Gaj+le=iZTh@F0ww;}U~!k556;1@b1M_gog#gJ9oE zqP>_|e>zUFX@z57Zx_qf{0=!4901&o;et{Fd4MmUdo`9N#seC)6zJg~TPpQo17P$RlTbq2~<1HltQc=A|0^ zc>X+W7`PuF)ZQe)tr-$V=OYY+rR0c8eQ9nqie(Dxr7PHi?X%bG*V3#!q%V|#BEPx4P7A_- zYLPhP+Mw|Y&DykMooHVU$J&`?mt~29G$mtsX5M-)vUN<)#6oaw&NN)G@_`77_yAn{ z(a}KO#%o+p{J@y+-|E5OChNteb$O6{D_qAKb{Eef^@P{v*y}#*F1CY}!+)1FxEH;5 z_CcAC7t~ zfp+U2nrN@l9y!K?YracMu|%_W_Zr#oZxl; zl#ayJPjUR|v>)oV7KJtl|;r2Rceom);&z2U2&t7Lo!w>dKM`)q*-{M-lzK}T|pX>`EkY!T_bhw3dT^+ zTO{}Nc>g@S*=GkPE!+m)ESulr`|oSoTd^^6{C5l6xVKVt6}GjFVUJocC{7SVdWG<3MCj%+KRU^<^j= zli8bMANc}P4?RZaNuRTqdh8%5E`puCFwceAva0J)<#k*u%<&GAiFMn_^`^P_X-frI zG1m;r$OTZ<^`(+n_nh)us_j(;?~nEkn(!m{*npr^w_DdbG+}!Pm8W$cKI1V zJbIkC#d=c_SU6x1kOmJ)9I*G)1IV-Gkv`7~TPu88*B5r%1av9JOzZeP1mX zkK+>sYn}C30v%Vp7p&DY`VOGamP^rjW z*!No`FphkY@8UXV#}y-hd?EKo814tN-e&;waIE9eJn%R^uV5|Gl~1kLL*RxkaCzzx z;lDjgEbST!%W^WHRjwV-Uh&^<6kCf6+E0EX9%x;e>M9fe6i6?6U7IAH$Y| zccbfwWE@qh3t-s}C_1f_;Z3VT_KWYPC$GL@_KOJypW;2>`XAv~+{Y-m?no~uEb#U- z*C;z%ZjkLhycbcQ-T~L|9XypWQgkYhZTroJgVtmifxVFnjJswx+ny6Iv zj=twulCOoiey4)4X8s0TTk|o*Jvt`y_{R!smk`JkSVUN*)si&pbyjKHMV`R9k}zuA zzD&K&>S8@0PRW{-L&7SPTI6Fa#x3zc>t>NuXQ?ze4q)BvjieaR9>^yUhSsqfdnNBm zgEiH49OVV)nTO(n{l)DIfJ5y^z`>i1Al`Q%uy5u$c=Iqc?DriS8pB~y@;1I?0R@GvL5B&M=phWxFrf+Nz(gcKkoMo zq_+w0@OA-)k=xwC2w~ zV>svX+!eDonIz2`=_%G%s^NqhbK(4_*P>PX&Oly};v#kae!;ptNK-)eRbXD)laDq) zdC7i>3-b=2&3M}PGSSd#UdsZlhZT}v@nD~i{O}|Rww%oimwoHNYllIS^d#dS?As}s zk1)vl-%Qd1j^d^*#s8;WIW0>_hu}nsx3eus$fjYM8UM zUb8{QdROy9yRI<4vq3h%4y7D zQ#??H#y^1jM@|66BzsRd|4|+(^9>u`uc2QT=04QMjr>hDP)@K0d+e8%0K=pD!1}31 zd2C`EDK^X&U1w%Oh5jRf*0_FZmLfJd$i4B|;^x*^2=DU^y+bC-#PC7zcxffb9e!JE zS+Wj~O*}(b2l;sE8dw|_1;ho6VQ%k`KQ!WzWoIZ@b&IWzCk|!)ti6(SFfh41UK?{o zev5b{>rQHc75aa`scRPKb;A~Kkh#MHF){oZZj1hn@RP}tQ8G3#d8K3=`=L0m1>qt8 zTp#u>i9_$hVem^q96&m{f9ioeO*A>KS^|WFVT;{h*@|S&W9!vUK=nF5b8;HDh5qpQ>ddEd#V9sXby6CVN2uUeNT)UO0@v{Z- zAY*7R+trLmn#FoYCq2#@thZ-UEgj4IDzo^VY5yb3P1 zJORJIJqh2cb%!esC1H;8MRK0%;e{znjGog$=ixf$n4^r9gcT2CcZiS&Rgq)7Ki+m2 z3@n$a3@jG_w8kD=$|;03yc$14uSxoz4aTv(fc#MX3uVFn`M|LbQAJooV;=TN92#q0 zaab(0Tb&sT1y)AmPfy6Tq{~`5@0rW?%*S~0zgX{ueenL*pc=X!pvw%IO9AF_rD z6CdjFMwgE>k3w;ccsQm%H;uNbB`W^zs_&2Ar_VaFKg;h$Es^s)UK}>+y1XVFGB3p> zdBBCcYa{7mqsz@HUP~dbXj*s`$)mhAasuwp9>W*|Vc}eHq5Ec@D~)?e_bN?*^aSt8 zKd+Ek>Whd=FVSnLW}Q`W9f;*MfXe_by!q;*?a38bem!YZ#%~#;nlkS z*X6LN^Z2R$BD8YNkbRe}hh+zR@IdKlusZ#iIR9jys2;rp&OfP(G>CtJq@M{O*EOj; zj4^`nlIqxx`tDp_weYulvc!|okaQ;rXFau5S)aZ3-dY$uC(I&j6{yqMgtddVD(Q*8 zD}}%H5nugXfO!{OE-QGvZ&^TN-m8~74V4`!RwasH2~^Ye;sH2Ewl1NwfkAJ;xh-x)A^k266u9oW=+><7T?mJ!|ajvY61FP zz=4=&vW~Yc(%zgF5G^NJlod3nHotILI0}B^jiu3b?7rv9a3L{`Klm_v+LNmH^P!i$*6 zAMoR`>u@{sDUwgt1oDj$1@`HK@{;PoYgiQ|9?64h*gvYdHzoJT{v5B-y==B34{!X0 z#6=uakorZ!i%-35%y|MAa7;eP)&zOedvfn{NvOHKfOy1%0@Y>RMsJj_-OZw-V+Gh7 zF-@u4C{j3XY$GgQ%)wgpv;8f{Eb&2sGzAXL!Y#*Q;mG0uF8jB{){# zn7A6cmf~H3c=2jzW#Iczj%{ro3YUB2GZPk=+v7u!oY|=bkST2XIX zvpIer+UCiY!~v{Bwd_L;%yb9V^!RW`4h*UVaHtSaPAvY~0bAyNmh7J}V(Nj9NPS4I zKj1WvYoKAyA>e)-H=fZxWp(+mJMk38rq^db zBHi^Q_*u^cvn9A`-DglAoTxeJ@iZj9l7jrC`3xJs%6Imo$`VO~#4-7B7{E$@@5m8+;Mv z@@kS}RL^*QkS=zw(2g|))#|*X3OVZU%lxl40J(gNoH^W5kMrMyyg;MOV99#p`}bx} zLH%ey`GB}J?u+#~#=SloIGy>)XL-tv6NEw1A}>QFb8$T3#a8ZBp%^Uki-tynQ|Kqo~`?3?we^qew5-l+w@+RY+b^h1^Xjz)fy8`!!1(i8YDvX)zz?<-&&ySp^(0y zS?1g8EIX;w<&B($*h$R~Z?YGoYsF@w-t-UB^lA`1I{gwnm;VlNj{<&Q~< z;nvSd$g|&qa`@=qXEYj{eh-0kvdK`+d=cd0gck%lMw z?iW~NlO#Pby?UlUR<~#u!Kd>bk#|&2a1XMDGW~BM@#z{|JEvV0U7yzCOKhAVeZh5H zL*6+ii={=ehQc#x$P<0*>Ans>nNR*#(|$kIe=rdj`1Z$$VDA+!pk(A>o^UAEZgGj>RqtMtW?~u z&eg|V(><(HeGOiI8La0x9~W{7=ci22F+7JnoP?{}`rz@(cW`6sObFi|4^JG;_-f!{ zNN6$zh!=Qmxv`#}qk0%;KbH3UMT3uPMP%hI;@mis;Mndc2OaaoQH2c$y94>vYk9r8 z?M|JOW8@un&bq-p==rQ}w{)J}QS?56c#EAZOIp^%H(S)a)Krjf!8JW*x$7EbcjSr} z2@gcokcv>s*hu%tKH0Z#(+DWD!2@hB&xW(Lp2Gcnxw3JyUUHCaE#$oxbQZ#j$71S~ z)7rp7OQ3Z6T<{)t9XG~4#M`bjq*r25XwaoP_HZ7Fr;R3*-3H*C!`6~`G+M)ihfBFe zPC7mY^Bv8SwwE1P!({58A!t@uqio84@JgJghI6>ra6b>1$$$D+6(Fv2ib0y ztV!N6_S{{4kY7Rl$@$w(B5QD*Yf`7Q7N;7!@Idmb$D)q52cIFMp=U~}7T+&YUk_26 z;^_7_v2wB7*mlM|o`J^@^(aob@kqX#kbq||-7(QHtWKDrF-K;a-_KZ(>D>ey98oX_ zxMsjEpg9PLN0!hUia(N^fd;n7lMjArOt{gXut5gm5oH>kLHS10HHj0LyJw2KAWy*l zroH+qNmCBkh0><1E0oX^5WDU}k96z#sr0%CkVqP=l2(wAK)$BF!eru`k z$efDXQ1V!cbrixVotwH4PfPY?j*Dyv7H;-Dj zy_K0f>HU&EEVVZSIG zuznV%RZ76Y%a5Tu+{eT2C3%)qM`I8s&U}ppf;LEsi?&&GAGvK8eDGG0xt5+jEZw26 zUc20M4P9o=krQ4|kwc^3YLpXUhsxuG*LFZL92y-Zi3_-nu=0bqSx`*7Q)(Y@ojUN8 z=&;C7;~E}HQYA>MWCQGB>FGu??9fWF^Fmi$qy6Sp>M`^C1 zChB7`{$_JYFc0+tj)lJn2mf}!8bvBU)HT>A^W=|C!fjvDb^Tk_H{+uBrE%5AGB9-E zOZmQ5AAGbn370FbV*T!7IJ9meJpTLzyf(asZ8fYhX}_;5mQo7`Uj4u^t^>m2wi@x0 zVn`U7H>^1lmsm6*O-6U<&p3@Ruv}Ru7{57kh8TEtB*o)Rp+^?t=Ewr*)AxyvozkSS zD10vfqB}g&CYjX7UJMUv!6Z&J~gPxj#Xk1!E?hx-8i< z=a_H(?)f5jZxqHx*o)D0zrDS?Dpt|G_JPzk`o4^vjaoJ{6|EALN$11{u27PTUOM^TA&(*dNB|mP><9{q$p1B zL7qPQEPBv;62fR4XYNx^=i?UZ$#UwkDv&(!v{>f(724;l&}UxPNLFD?!x!>Nl?h7s z-gn^n+0{7U-eSh!z29fa51aQ0!lCtz<+#K5yZ|F$aGEGQP>S=Pv_Tq+a)09j@EreeBVvvU$Q6S!2=GlIQ1BP zMnd_0rgTAT))UT;N`N_(e$keGRUBNEESEiP3!W zwz)ayaAB2Qlsky)B6C61{7rbLYGQ6kMYO4YUO8g50bZA}#qNQQ5LjuQ^iEZD4D+7I z&IH10ft6edqrfJ=iGuc``h9m;_Vfr62h2F(2hPukYC#6fVi4_X@@F;{ZcCnE zcE2E`81TBufk(*mKwE4fk8TyMdSSRyXv9VBH?8ne6Vv3R9-xZ_VMD-g@s6bkhg`AeAdMHFmYzq;XynhuApTELo3mSh?ChBz~=MdRbXAVKD4e74p^IL~F?!?0Y!<){HRXW$GtU zI6YZ0Oe>5RI!rb(7Y;>y?EPh)7 z`YgC3ax)evO=b;Max;qaT03JI2fH`<@X#hKKPPr?+Nmw?&=&%IlLghu#cxx<M}0skmp-ANm+V#j(K&Pq4O(MdBvUi{ahk zx{PH_>3%*Bb8;<|8h8NCW}B(M#e(^|kGrjF@H~A;x?ta=r`J+mq3eOCCbrc`3iB7P#%5Q@9yv79$KH#IOq4?#7 zJA_p1MfElD;+Yl5HN&j4d9RP&sTT77jww*O^&hyTNIN<6YhB{CK=p1lENXiUI7S%x z#-wTo(!%Af9mBEY?1squappAesX7Q~4^CD3>Uid5{`Q0BNxyyt zM7umbz`Uh`ZCD=fy=A3MJVU%ga#9(~cG6o{BC|MfZNJlhVxe)ESl7G~UVfink5LU6i&Vv_MeR6;Wrp2@ zs1+t78KloPB;yvh&$Zpd>L&5vj3S1Wm1?h>p^d->w66y#G$vVAC%Z2JLmFprt) zfG{v+`UU*KR}Jd-wbV7LJT!qzrGc<-d^?!vP!F%r@43k{UlVQE*_*^&S?_^36w-!) z*MCDRlc&g;xLhnv=!t2KW{Jh_wh*nE=yRAmdOl}j8S!FyC48}b4e#MtjVo|4WPzkw zh4CEg=9LX=LVb~D&C0Y%85C_B$=p-iPw~)jG^D>sgwHvQRTBAa;_llj^ut;6@ z!ak+>O)4JB8p!?7z7lYbba^kguD)kl6S|wQ!vuNLwyy4@`^}srfL{yACu@j9Sx@!X zVCLlbXpbJ^^5G|9WepfK&y-t(n?`|#z51F zanSsF2PmUOrOf=l);m0OChmQ;@y&ehcOG;8BF%?8q7ydXW< zvu|H``t%8$x_JveoSz97&TSL>qCDWB*LA8pr`HzBJt7G!LW>T7rg?7)-rtO=O}yJA z=`fGR9(_|UQQbB<+inr_z`_Y*#Q!1fU7&S4t1`{4HI$1$VbMkmqzRnj8kM5TDscbn z2UOHl1q?wzPXq;_R1ga^)mbIq9>wl zxReC(egToL(eLxVbFQ`aKKty@qnBguZ+`Qe^PTUU-&|`aAvs?D=}-3lXYT#3?Gyfd z+jy;{9j*!XIyc_@}>U|JEM z`$o3mJBP>aul||`Z|?KXTi-1ER>bxR?)}w2u$Rw3ez<(y z|NJf6OWt^ycz({7xL50+b$sHcJMN3$R{W-)8{apH+wg_od6)h5m;KQ8_a5_O;`G+( zvd?~ncs$xzpSbDytXnvW+wNE2|CQV8FT36TZ=U~ETk#mggC6LgjXq=d!TWy$wX4<# zY>{Vw^r^%5z3Huk*4_B8{&@SE!x!$}_P^g@`?;U_1$%Vl#ozIByKno!7wzx(lfSzE z@Pj_xKKMWTqqpon_cPzP|NUoNJK-1GQ{Q~$^Ztc>c+b#3{GcDy`JK0l-#+=@{P*3* ze(!gt_k7(~$>+|)Q{MW9EqS+Zd;9eLzj4>S);;@{?|ajiPUrsohxQjg_OZ6z@h2ZT zDAzzvY%l(f2ksOz-1F*}Q^WpM_xtGd)PMJ%jRD^`{>jbzuY2VGx&Ln$J!k*eyS!w7 z?d#ra+gs0^+pF&VmCySn=WP3}uX*METR-LV@ELym*r_2Ax zdv71{?ROrw&%eX=Yrgu?!?Rv}&GuQZ{+sO=KmN1!vTq;%%4zu8FL?Z3zWZlC?;rNh zc-uG2@9mTH?PG8Fg6$VS`NezfhsVEi*kAdcU)swipFf?RGyWt0Jo(_u{>3GGoo$JI z%6-0iE3psp!WzE({y)9_#+$!!`|tkAYxn=*MIUvJuif6X9e(Nu%;g(C@-*>#;r{FO zXM}w5s;2yXzWn7sIuS45>Aqk8OM5V1|D5OUw_kVf{cqm)ONKlB`$z2gZwCM9K5v{9zyII< z_yJq#8*v(b^S*Bx-td5%HXps$YqsBV_WQT;+X`=c_}bqdz!Z1>hlMeNV|L}+79P$Ui=no$`)E};Ks6RO3 zP=DaUA^#!1{sZ+x{RiKN{Kw4t4`>hdAEX}YKTJIQ@#~&^`0FqFPluoLS>Jp3>Zg47 zA^(Am{)3G}{f8HaZ@Amn9P%%q>tDP+)W0BnsDHurkbgN-|6<~y{zbY&{fl9T{7X@P z|L7k%)W5WIsDB~jQ2%nlq5jA6L;a7!hx#8+5A{C?9)92s&pPCP0D9xk{_vsxq57fz zA@U*rkW>Fq@bH>{^_)Zf1ENFyLj?XN`jEeI)!*bC>MzRm7pC8S$X`tAFBT5p^VVk^ z@?qBp>QFE7AupOP{O~~+sUJuFzpI@5$BR7_^Z3XIedf|WnSS>^AKadYVAq3#*q1-( z{2>o1i2pp^L3hEQQO2J(tUr%jC*ybr#+&DdAOF(fDfhUMe_S?7&tFR1;|kV;hrRFr z8IY&k;|dEt^4+asaFdPm8+Ua!{qW=O&R-7A*!bfIe*CV`ni%0bm-dSDxmV)ly8#QS zbMt{yanO2qzc%r8E#x>)$kTSXfWayoyl1>cH$VSl#ACU--XS%lH;NP2^b)S@CBjZG z&(CF_<{;nT;D-e3isI$7i9S%r-~Q@D_9qD##I?w0i(sc-Y9kH|`ON)*=Nd1(Jrkm0 zKE2Ynj-BrUZ0-Re2W#b{x8;Au)Uo95T69jI+~?M_u50h!dPdxL2vxmZ2fg+g^@kJp z(f8wf@O^hpmCwJT@B=g4;OBP%lOOi-uUf+SDBtsJ@+bF=#)lI+LhXgep4yoy-h-Yk z$7Fw_;yf!=-;M4gXWSWBId`4zt1xR#P54UuKEeZ?2OS3IF%^RdRBVQx#zaSrS)YMrtaYA9(>s(SIium z4{M)=N8U^9&8yZaxbA`OqrYSR*85C%;CCCm<=pRLerK4KoG&v$VBw)j~deXRN-*)g7n@dGD2Le!tc_xQz`~6Kz@7t@e$@;>n3s~Hu^B?++0}|HeYwV6a$Uc1F#1rgr$$rVBz4E0_{K{W--(ycbsp%fnBChvPeX&PR4a!L!%};gB zIPj_(uXMes#kJyZZWrqR+78FU9Tnz2g1zYP+|f(%+LD76>i@#Ieh2q zK9Yyrv1P4$Q~&j!+c|IUy|F5X@R$>y8M`o;g}N*EYRn$&?3bLbi{6n7JadtM_n^5u zR=Mz9PCU~5WnOPDS^TVfgc%UoB z^RBZ&qWUvi3$EY8ig#~ZGv`YWu=mX8J%CH`!F5C$n{}-RCq1PmYGan<)117|**&Pa z@5b|)T$xXJD2^HP9W{34YYtxVE4v_Pek&i(R^7F0mw)i_U&TmWo~|40;#Twc&KOo*uB+ipIZCT;Aif∓!P_9^G~ zlkd?QiP2+DS@XM^7!J7q(nrSC zH?f`B{>JMbZeRSPUpM^Ght5h8i@h-SvQG%P%4gF#8;7`#M~^SIa|1q{(br`?x6_&D zQ>%36CdXdb%I7me{Pdo6C14l_KGL0+0PwH{?~H0VH;Km{X;)#>=0~c)ctND-Go+{*_e+coCn&+O~WfpoSdZoUW z#HOEMDzAAoUe)25>iAyi8#wIY3)T{U)^k^T*Q|5u10TM+H&*wzdnk;B3+MRr z@|nnu=!m>i?&sXhAA3vT;73l2&ctKMwZ1#-)z|g>&fk{A&9&;zT?h3FGkWt|?%kN? zyUvwaGw+f3e#n6yyg66DyT;M6YVu5~f4={I_q%T+7Y+%C53e1F3C94KyeN0}4wIEO>^44>ZXJL2ClyvO3p zHrFT<@g>ueXF}Mj(nKY?&k)b9Qc%9 zYM7`-2dQ)J7>gqo;rgQ^XZzUdnWKBU-@+HxRC>YuXS#Wll10J z&Wdq8?8DPDGjqVFM9ZRw^D-;i4>j1)6Y7~ z`@$J`#Fl^e%zeeq{c%QY`MRgz6(?pNY;vJHU;7H<)Ti=_hrctF+08Ra3s5t6HiK$H7T{c~9Z9@~gAV7Ix0@uX%)Tt3Fy^(Fi&Xaocp0(a<^8%AR!G4gw-cQZuew^b;duk(&wexG` zK5E`GA8jl9NK6a1!f{=)D}Q*lE+6{mIm5Qn;oNp~=EpTKFLE8rAD;aFe9a{`^ERLM>2rM7BD?F6jb}vu4WRhD(tfo_Fm_n zi32%|BfiqzJL0gp#_IWeFI~fmQ|FoS!KDv8jUUY9LKm)Qq;fGAzB}@@=&UfC|6Eh$ zG~OzIW<%VVJ>|#@b=|_7XMyw1z2Znd*~yo=jGozcLCcTou{pS>%Tea1u)sS-&gsl` zsqQm=XQ+SO4|p>#;Nx3oUfY%&d0+6U`%xY4VfUKedOqk;_lel*m-7cdJqN!Zcp%!V zcGpesv9Zp)vd`bgNhcq?sY&m#Yjn-YIe#m#`F@z6%qG2A?t#9vU`KCEa-xS>iz+hyh3bpkV)G1mAuNi(XW}^3CF0Htw>-eV1SNKQq<2Q}bEAO|GN%t_AGs zU)4?@`n%OQd~ec+%E`UZPi91S(Eac4kiSnm-hD)P4iC^<8-1&YL&+oo^e{&bX zJg;7QT=S58T_YIeAs;mo-&}gdN9VkMb8hw}uBUvvx8&l!MnpP2@}1(B_k6kI*fQ(r z_#=fW2;UiY_q6-M+T0i2?_Ss9J0xx`&50Z(aP_0TVl92v?}GQ`=9+H!#JSB4pUAfi zd-m8Y?#t)yAHIF@cc0m^%6HJ&j+hsJ_jOa&GkfeG-)82pA%Yv_EB~`Y$KyjBSdx9o zR(9+7CjQ(`e%6yW-{n3pu5d7Z<*JzEEMItuUpQyw{M@Ga!f^wh9_et;w*x&jCYJN! zqWaIdUVPgJ0CfDq>zuHRXMFjB?SA9eSj53KIPnK}g@b-jeC>-rb&-eO_Ics^jQHeV zFp0yL2VX7np$6{o9B1rR_Zi)F`h>5Zksq8%s2^#)R_CF9`JsE?zN$NMaF&bn!iT!q zmuNXzSA6sByu*^iccuNgP5S7ydYV~=FW9Ul;uMZO3nb?$j;JF)z@*>s#aHpJq0aF+ zQMhA-n7(hV^xAzvS1f(?eb-9=sBI(L#|u_Fe2icfJr3n%*QKH<-d(#PQW zJ;K-L$?sVj5|{4}nCrTd+xNc-mU{ut6x?8xpX*-o&NJm1h!3^(`(u5hJpwPSnak=A zIp{aD;~B$8`>wUP&oeCluK&U-@ySIk#Gc_Bi>~?ijHut?j;(UnJU4gur1Kf4`omlv z&_giLsX_W~$?HAumd?(3p0fHIg0Fa(BR!Fvb)9g-Z`r3WeaBV9rslwX!dGdk9>>pp z7MA&O=jixG`aaToIDjGXdtI1p;SX2m$lhFq=e+jO5Ar)N25O7VeIRf5z*wmxT=3wFhju@_%z zb)MqF9xih~vA_14ZW+GmmOE`(zxyr^+@fdyRNKG%E&Lv>&qG z;+5QWeo%3a?YQzKUVcw?J@^Obse{a8@;C=r^ku!u7k|e%o|v+=Z{fpliC^?^23Pg& zU$ptsz0M}pyzd%Wvwxs$5A-=UeA#n`bX^?>4|K5W8Cg@`E%cC{61x@Fwy^0DwO7v` zINt?6@#79+rw;N||G=($asPlZj(mK7^)Clo+e%QGB1i%?Vbnaf~$LEO!1u%+2^~>-Z2~M5e?O1 zT;~oi&&*i*JWKdu(?g7NoX$735P)B;=2Ej$`JXy4SGOhPZH!tA^GN-k%dVT+!}Y0i zFA}@1F=vfQKj;mT-ed+^PtEkKYwG?bZaKFP`CYT{-M6fL&*32-_meXT&v;xaKX`q2 z_A4HJCq4^aa-`SmIYD3M8obVh&bvtr@8&Wi`8HK;=0I|u>J$9c6JzrF#UvcsyIp9}1`}i;SZ;qND z-mUaO<*46|b=nLnfRCQhKV4(K=M}S~j*|plZNIJem~(O=&82ztn*QbPWK%zm zOL|LA_;zmjd49W3tgCvMOKP0omFV<(eg{0evip(Ubrp`zmL9LqIuAIAg2xu^&7Xbi z?E|0E!NyPV@WI})uDxxU9bv-P7}X2EkDZHkCFD3Vr#d^h>|^iST=ucA9~NQ%zRjhF zKR??F1K(Zz-)Ec^_Ne$NVxL`~wSTR|?}Rv@N1^s*h z=V;w)KIv6@f0_=4QjEwthxR-pQuWwxtuMXk`~Ld6uddM?=etv^?!x!MOqTAw^!K%p z$4~gQf8Ik5io>My%J;tWbnwN`n2E`EMEZz;oi$vg3paJ3>%nL4O-zn0by?Dfz6;mv zd0hIF9L2lqGycNExv9hFC-~M3%v{N7At!=Hc$2~oS!7}RbO)_y@v8qm$(`e+|=5A$o=O$cSNmK zZ{yMv-(6Pcqwk&rt=4N_-|(f*RgCd!KZ%EXm_7#oXiqz5c;R0;xG%?_)Q|3CpY^v( zvGccV{Y@*~chGmjdR%Y57h2)oIZs;^-*-UYEZi4*t?pCb0V{SrV+lGBGwD0XcJ}kO zu=|c!xf|u4-zDoSj8QHYZ0Yf`>a01Ex4C!zaow|?>%NPg;lk+p*t!Pr*y!oKeZm6{ zN9AL8xXZR=|6jl7Zzkk*xBm9@yvJYL>rUtU_F=8_neX!+e;@pJti)^e?T3`NNVc3!omJsi+L6o@Zd7S zC0t$G0DgR~-IQJHHMV!|3$0+07wNj9U%TN<;@)9f^A87ll%8<0T^AhjMbO(0H{sB$ zefJ!l^JDh%NuBXoVyniEfxG={W&cTuI1IwF6i?~b)Qns^sJ1~hSkNzotFxLB&l~Z& z|I9&ZH+S{d82)FWW)jC+5_1(g?ggNtkYwxFh8~Z|Q;u0&Jd+fT=olk(C zH85pk9xNmRmz6kr)@Q7B&&-SLu9G^}vOTufJb-b`#&8+iT6C{W*g(g5G;p?zC;UwZaul z`bXWB8=L2cx|&=29_LLAd3ue1dZ!p>hCc4dflvB-t!=WiTE`9!zP)k>5znvB@DKUz1Llk)8A}3D#q`EV+%{ObRWmA zYrzJ;PeR~*M8Ijj)jQuU`HQ#TNwDFAEq2#2>*1RiaW&`8)!f;WtL@UA8xGi5sipR= zM?T8G{C;U&>i}QLfnx2up4GOkSNIbHztpgNZwlu~Zz}c(XZ-Z|eS#YsI2E?f_(3e^ zsfTx;-g#zi!;Z)mEc01&MSuHS{$lvU-}`CFy@p%f`x#rwp7sBF$8UN6_P0FS_29>u ze0TVBt{7v3UvZAr`G&ZPJxI*GCiduX2P3O7w7-|nh~+H%drfdDj^y=wPOiqnH~PJd zSN07);z&H4_+2=2RE+KL?>gi|?Q2o3_nPGE{==1rJ@)LDpI|EYY)8koco6Fz&$!f8 z=c9aG8~Czmf3NP}h)>nEJH~O%ch>R8kJ{?qe8wJoajQH0Y1`Vr@zZnPnd-$-6^FUc*m_?%<}2inw&nK}dI{eG$MxFrU=dF(9V;L99Vwz7+lxhVdxg=ya87}%Gu^O7Htojh>#j7cXKXItXA zgXpgUZ>|NseJhXO--3IyP(Yz$k&EIadsErwTbv z@!7+veW+ultDf|WoGT98199$Uo#{CJBNz2zZ=u%Xe%Od1U-hDA#4G;fSj`UoqKC7{ zXPZ0!Ht^{KbKzWY=p2fnuFB`R(X7A9?~5pm@TiBo0^hSCn`>p|Je=-5kW)Sgz83PX zb@m8xT7B<2cYn1SAA2xgwF#zsGPb?WcJ!cX_v}%d&V27(C%J^HyEG>6q4(I}K<=u| zbJ=;psWZo-bC!JWh5HE}_LVq4@&8{%*siv=`rzOCT4zb z4vytC2j%dCzr?ddT=S_M%IA4>pA|47$&+uf&dmiozRfFfd5^#&zVC*E*}t)Vut?CLTWYk(G0F?C7i2SY>OyeT;b4cyVoP0WdHIT}|o;7Fk5jXq75ll&Nw4#%9 zU2pMmjQu$Wr*K)FzgC|WZ*uJLO)uU%NiRN~)BP`u)Q3NDg-=hn^sMaeE4hTZ=PZ7% z6RtaQV=oTQPv1G~+WU-Ha8i4?Dz@nB(WhU%{ zoftWf-OK3dC4O~o?&Xh7!u@8(uos4NX+Lq^xLO_0szIZK} zFXP9Ic5HI2`-49T@UIy5y4TzXv2`b!3Fk5{HgJONIj6SHPmf$r<(9sDKg$MB*Q7Yk zwSSUUxS1(=iJHsEd!(OfZny; z^I!Mp-zBZ%lf3Dv^y;H)Q9rwuUgvt{*Y7re4KVkdnj3=}JY(3ZH}kiIyn66;40j-2 zdwLMxCk^ndIM;)Za_QW^&)nDMk>2IocF%AB%ju%0T)&YF7d_>Xh<(w?dDqK-Y{KVm zkAJ`(9Vx#-_CfaTT`#}uR`JXO&)9a^z;;aHuHRI=F%$R5LADm#6LZ(S)(!aNhL`+n z{W0gx{`#H#;IXe-$k%wPhq|uU{;uZ98qTSU7&yTj^5m2V^O4kP!47`sLto|F1Y_o= znCp!foa=WT2d|2yU+y!x=@&lXPo4DG{T!8gl$6JcM}}<#az~Sy}suQ+Z4&{as~#HI+yseQzJ9ZeSmdAuFKgYC-@3a zTQ`g4->jIa_<9CuD!^nRa%S!cet z>YHbx_`u72I~TLoy6~*CR&URnPf_p;!>6(EKUsIK0W7#m%o&X6?vZ;yd|`V=bk4U! zInpO;A*XDv9o~6QxSQ*Fhr#x{+%dv*Z-v`^_j`z6IC<{eQ+$hq_K$21J2=1#3~<9E z@0#|+P#@fym-XJq$92Q+JSlAO)f3m^IUyG!JMVMdVcu_SdiW5S0u=gdI9_r##H>KXabwZT_sRbzigvUVLh zZ~lDSJQrY;?tYM$I`Jiry1|t=FZu9EZu68+{MRe7E1Advj%qsZeFU%Zg{xj>9-@1G zT{rkG_(!PQwbN_P!E(ObS$-E_YkT1;j=9O4qf?U}Ah#hAK5*^zk6gm`N6`!>eGGX9>^m*wNK=QU;C0TGsKyHkK#MEl%Mal zVu>G~bkQq@`(5+yc|||EziK`M<9^q$zhql+8~K7cawglP{f_gG{mG~LcNBI#qvFh! zczoC+;HY|pM z`P{Pv#)TyPdIfzI|Es#cJNDFCm?L~!R(Y8N&nowXfEUc@Ydcu-mzdjHui2%Z3v*qa z5zF0pmRN;3$VPn6pLKF^Ub#C@bD+018}$Dw?oV^%KKMR^$NQ9fWTiLwD%SHsUge+{ zwyiMf4R&%g-bQ;#-m5xy58IBvgqbD}y8XeGziZW*`kn6#{wL|!nm2gxZXTQ!mSbx# zuI{6IQQSONt8;T`9;~_J3V8Z}$!!!kl}*%HRsMVI8-=upxSPG#=xr>?yui1^?rHhsweitj(^+8(Hym!KQ*E24DmaL zUx{;6|B1!NTm{>_)z8dm^})U9esG3w;}Kst>s-#CUgaAYpZ-oS^HzASKfF8}%~v=_ z>tP?sdzk!rZ-}ouS53~*chq=V*Z7N0O-o(2(+BnPlKy?Dnw$Te+df^VxXd#otRoNo zDg!v;=R0n`#4+QZxs@+{=~{gE#L>&{x%TRZdkp5RldJfm&fhTlg8|OMAZCrH_;tV2 z>m`@#htsN`dH3MnSgegh?zW?opSeo@{@%gkstFtC@Y6H$13U5T7as54z3tZjHhuc; zO_GNVuet8tdqn#UYn^S_Kk7%GKOp#lAvw_bfW-c$o&1og_VRtxX6+-)t!$3r3>dh;sWtu8*??4h_Z1v?J73PiA$;Dn%NghCmwf1pXIZN-t&iZK=lo${x<9P4 zbFYefm~q&12kcjOVGiu!LA~TgN8r*Hjn7sxqGpNUrT(_r{g*~ z;4Qm*1|Rsbue)(B@jr;)n|j8%2Xn^{?ulLIqwVs;m)?LOeYsO&z(JbtZLNy&I|I+~ zcWrQ?0K&X4;zKk;4fO81?{o?a@~puCvS~*F5M&<;o27 z9>3d?aLfCoIKS7)lYY`8dcX?*s(I#7@wLAr?=k%zz#=y>=-9#~bD4ejH19HXs($wY zoaFWI0%l}_7Z_c8VJbK8RB|Uj`T84mgfqVIVV2+~NzcfQjTL*&Q-}8EUwv>)bE6-n z+aBNGa+VyqpVWW!PWoJH_m%iQQ!^YmJr;o=#GGq}Z-K6=LJb=QG!`DyKWChlRwt3Uix(;qzSDvAFd z6_`=F?-0Arhv&WJQ@65h5cW9r^Xd=Za~t`oLH?YL_>`aZiUsokY|dC)Z>;!TWlqY; zd(-jwi=%n&;B)V!+qJ6@~($yvVCPd~h8)!sFs z!k?R(W;x#%gir?<)F zzyDBw+!Nm=y7-np|2@b7eeW4SS3d9C&RY0gi)wSec@J)CrQSt4U$2}6KmMu5yqU|o zr|^`oXO&!4C)l0^;u71l=sM|n=bt%NZu&I|OOm-IzImOtR-Tz(aMt%b&m{e>8uI=r zcKwXn%u-z7<+%b^y*}^Wxqo_4ys43yt@mo(8+`1CFMbyJG9$LN&f0S&KXF)Qvd`eR z=Ck&JYriAGoa=?xR2-Gpz4krIPyLzS(_Ni4uc`;mu4C>aJix5@K3ClQKKgEo&rZJX zXZn`AQ7-bB7kbxDePDN;$#GSG3;0g)9T3;#0=orXRl7K?XJG!Okn@w2p84DpIU1AP z5%*?lK3RMI^Il+U&Z>#`*8cF+q-<{0!_g6i=r~m%D@WA07#O2-YZ}E~n zy&<^pHsr22_p5z6SNZwd8;;>A{Jv-6$O*?8qx+KF^w$0IHw7H-k-QTBE?HstyFvVM zX$Krr9j=bbazk}5{ z$JfTJcf)as2P4>ZZ|1qw-MwArayh$d=sdz$>5lOofLRYSXKeQPuW-zVJwB_oYRh}n z@maa=Nc^uFKI%u zsRx^$v1MIfpKGPJ_6{|sp7ZwUmDu)E9?fL#G&%cQ-=Td%j!6=KW1y;FrEu-Hlzdn^?ykqe4y3*-?;n5F&2h?-t-GQ4qc$wppXa0^6>s;6s%U!uga3HRwI8Cf; z2@hf}_Fr#-Lp|h|-w+_iZ2uHQ&y(11>{IKDNEs{E@EA{jpdNuwMwMvu_xQNZ z9uW?lTo>5k;(pRUaW!vNzq85LdFHj>kLImdaE)CUU4O921t$5>>!B9=7q>yH?_9p> zU%0|I5-e~Qas1}S-+cJ~w&X3hyLr0l2d12hQxV%V+nN+g&~3+Z;KM zeK{+ev7%pX|H?6F9fY^-Rrp)^$$s@Ll@IJw#9Vy$@)Ng{WWikHAI-P$iBIuQXTtL*zt9cJpkMGX4)t;Vl z-|J`824BzE>HvE`V=29QTlJEQhyIyg^3L~lA)d2cb&B)NaO>Xpz2MXHiO;~jX6<>; zoK(&^4np#S&xjO`Z0<3$<$Cd{So48P`rftNY|OHmOXCI0eG~q2PkjznVGHAo--F;s zmmg2X&RpS0tvgA_Pts>$Hd=ZfsiUxnE!`Nd3(TB*Zd?bLRTtRG=ic-gad1G~FV4Ye zS)JQQc5!zblAwL-H z$Es&U%8t%6R!-k3dvLOL55upxANf`!)_tBTKdU5t(~3TnjlQnLe!cIw`5UX5I5J~B zb6}EpmAmeE!K1MCqZsv+{&Y_l9ly3q9g)TVcu$vaz48xhDSUWQZ{=FzV{>m0`n@kd z`ztTc=%3->x_ZwFHhmF4<<5J@zU|4~^WDAn{lSNOHAesbIBmtZ`y!rI*ZeN3pYEmZ zujgZ#7h?6$N4_`R5BG!vVp(}~ZfyKc68V}tSomi@zq|CZez&Z@ZQ)edQ^k(NU;Hhf zw*9Lc4=q1<&r7#Ye(n>7AH3%)hsE~F;mOba-Hq&%>=Uc@rT^|uIs*R@JG#U%;P+Xb zbGA{=eQ=&2KjSq9A>)$g2?PA(+`&Mv{5t>LiCR*Rd5McT9muciqc^JecQ@FgQ{Qqg$#-epSA6Q6 z996%2(YbfR;qR5z*GsnxE`8`b%^fP9n!wpLC;01WuJo_(dez(X6ud>+*0uJ`Jb^xR zZ|bh+{N|sO%kLucBi#pjfDbruWv?@1!kc~7LOs>L?hQ3wN}THqm+Yw>0h4@Msr3Ln z`Ef6WH|MB+&)6sI)04vMp5Zs=s66zTx>(HtpW>u?VxtFzg%5XJ{!f^e`MlI~*8BBJ z4)QIza@UOy9z82gu04BtY`a(b!M;Ut^7XszzYj<6ynSySk1vmWJfqpoKgU^<(mWJP zy}l209gXZI?qkh@@z%AK5B1oOICyD~%{b&syn9SNTqsr^B5u0HlV?RbCFxtpUsS$fnx1QTrV$i3E^FLjYiSbqQ5qr^o$9E^&pmh`7d&OL|FC3qNPwY{7 zo4Ne`{U5wn4*a+``MqsNPOY1=Z%tyzU1+Gbi0u?8T|hE}!R%dtc9kcyPat zBL)#~udT!Nwj}w&v+FKg-D7G8qiWb_J*XP#r}vA^`QYSO;?WU&lFReT9lIWE=q;5? zIb54!Gatn38IhcI{prENDSZ{5XXf|{2s0FJZxhe?n{Z6`{4wXMbA3toQPTncKCMK?mEfQ^`cXMad0oCSI_3(EABtOP*2QqpQ>3H%U=U@$!m}f7^-abLvhX+)LL4zv>Hj>3&o_bN|7Ra8K>4x^j1^ z`L^p?)zJN_{Kl>s3QlsRkK!_jmj!$E0WK1J?1%1|@0H&1#BZMQ7}LwbSm!*d%bA|< zB&y%J=!5oi-htk7ezbR$%(xlGL|9@2PvWoIjBqet4gSSGazEghRiN@HvpJ zxmjm_nrME6d+C-nc=`6C!(W_y#`$R?|Ek0J&|Nz#LJ_?C=>hkZ@1e}2&gr@PjE*mP z(TkUQn4Tuy{hssT-w!;%lK5`dYwPWsyzU=;Bu>40A~pBCl9#iR_h^kzV{rz@x*yjX zpS-8a<@#7Fuh(Fn&rzJ*b1>Gr_O_>Q^zNvL!~D%X>7L-%Ji#JA_^j|EU)Fi%%$pv? zFEd>Jn(_2Fbt}qGpE94X2hjy@ZHUGnq2em!Ma1Z zrq+s`*cU%)_587KS=U&8#`?6MxNN(-+ugRTE$rLI3ePJqzF{hH4dm5+Q!;4XU3u}N zd=L2ZXg|IG(!ZO8v9H*Hy^hA+-Cpdo*6~dYn2!R_cZseT?Qi4td-M#T^B3+&6M)pBA@w;J1zTz%h>g>GK$vJys zdapS3r|UCLaa(##40s*a*x*(C(0MMj%Fmqe7f0?A-pcJ-j2-T?z5LlHH~CX<_A3m} z$@;u--Iw$A+@X5l86huvaU0nq+AIFZ9C_B7w|x9`&D~%2=m>pAz!5)7<2EnsvPr`0 z^!0|qa9_}=eQco)Y{ICXz@z(i!_a5s=vb{?mu>O!Y!$B?>O3_Rw)*3{^Y@0D+>E7n z%oWmh-Fe@A&#ik)pYapt$Qq|~#ZqtPQ>*zt2*O~4Bf5!~sXFaR$hdU}x`F&6^6Ig|z znlel21HJC=W$fg;kf>KaqsA6;q9^yF2S0rl-rQ^Qb8dyC%ltmWm3}nmxhJymj0pZM>peY?-TCMfd|9!#P-DlQzvf=$BaC1b z{-E_B*s)6o$2GUEn3|^>m?zIM@x*dQ&ByiheEEiiU%p}T9r6G0pIx$l;frqCNj4w; zvsX;4CAQ&H`-$_~@6cOb`tpAQ262jioa~our8{2hfK6h4oC#y>b72gLe;j`06YScL zI(H0yN^XOUkz`Uu4`i#4InqOG&9k=mevU6r+Pk(nhWZoxINo>9 zB6;uwgs1dTs&lYP4yXe?m~#(3f3lIg^gbhZ_vdkbZ|Ms?Vot#dUiZ~KGFNiJPw|s{ zniDE_NPoE><72DbbDgo}Ua1+J_~$zS5HYNJMt-o5IIg+Z+GnS&ewXX6mzhX!ZW8DG zS(N0%#wt->>#V*5xK{1(!>4NYEP_LyCG-G3`NmaGY^T5FN8L4pH{rY5`*(!yBi}>i zfrIe1)nBKh2?X)1!Q+t1rju?z{9H(3`3Immc)H z?U|q8FCYBO8(%%@Vdgel=79I7zb$hw`5jkt*SPLg=j0yL``DDL^~z}u+$HymuXXZ( z+c{5K;mErRcl0GTv%b!`%B%eSjd1Si%bxLc$p%*cy-F(1roAiTA-uva-#62C~ zUCH#fUvb$Ud-?BJ|Mn|hSNKn#6t^cy*`T_zE2m=*CE!mca1tHo@Xo(wrf*w!iQBhgxWYKmONlJj{0Fgd`8Iu z`DBx%2JBT^*^h9d_Tu+}P3z!*vGfE^`Fo_cgG=uC63cyjpz~tpyt&LgyB8hbaoI0@ zsotv}sh@LV*PL>{?k#l`7uW8d$rn!6tG24OdG$Kam3T(4eAHv^#cxPW;aT;2?$-H_ z>lL%Z-g?ItKlREyN9F0q^~$auq^58=pKIHhbLj(pp>}%JGY>}Pr&rvIxE!g&dD~t& z<>&WRG5LFPInDPDew}kq;JMfMAj$KAITzxlG?-o3s)W7sQR>#t7&pG)E8=m4v zPpQKzbKLK#-#sv>qh<;m_dok^s5+ekf3ULV_vl(zJbv?R{rJnS82%dhe^ zj~f+#_bon1#cYFl#el8(wx6)!ARKDAae|ZlEyY9US9lK|I;&*;jM|LFT^wHOn6g=vsH-lir|He__GNJ+0b`^NsE+ z`3p;axu0-KU2v1#Jt01vXTEhOH*VmF9~kkc-t?9=alOx3_*kdzz7O+jUY#F~>qq|N zJn|&p-Rs`rVZQXi`H7Ja_{!%QAg{TvY;!O1iJe*VtU4aM5|4=N^v`;)BqSy2WS?{rH z&wULQBaFNIyRB5?($DlE{^3%1&Pgo(5^>IVTDYDS?ARhV_PPrfb1dE8P58JLtpnVs z6~V3;*4n#%-R1m^892{fau#eb;BbP_-@^0#n{VZ?uDY0^iQG%CZuBh3Hieh#*hY! zJY%~0qFqb6!p(Qa9-$`k;@7Kmo>ADv>lp)lFow&m-j}Z4&>#1x z?C|`>*@rlu@^O8{%A}m|FehB#Fi;b9V&IH^X`ML|Q@*|f&icIiHWW9lw<6Tc1AgO( zvpF1855%K6&Hm)a-#pOq;l8AU+j()}8Tqk_ajvp+Hs@dK!c=betm`Ul-;Z>kcP?X8 zj^lXreJWA^AFTU(uzH!ft+_J~<>wg@a!eO}T`ZeB=z)t=O#> zZ|-+&z4H-=ZN+)wtouR#*vny=0cKWuI{JOzw)SIA%r_*uSq7m;1to#AiL<&P|RZ?`Y-gUMV!a0|Qy{0n>eH zd;4hR8B3UJ^58q`+Pin)VQ*aO!@p!o%!57u@kyUr*Gi9!C12`SKjyxnBlWv0eS-IK zS>@E(t;fzYJIohY;p_YHtdbXd_X8cg@^cNcvEny##XomAzuO}*Cq18d=w}Rw&c>31 zPdxMmvb|uE-8m|5Y+vkkCY$}{_}-WAUhB4WFo%!-&b!PPn{-RgU$DZ+ne4OgT8GcE znA*e3IgAw_>U)84j@2joTzi!WF15QpFpOJ=WBXGBezMDU1bf~WhWwmkN{n&+jroUH zU9&COYu*3jr+q%XGN<6qy(h16;DJxpxnH&w@0oM3?1Ot_n0Mtt2Y2EeC-mT?hwSUV zbQa$wHk>M+aK~#I(e;qC`8ZEu3~k2;M?K>wAqVtn-81y?F*oVHJL98+*}SFuX}&8C z>)Q5xPjVht(y_*Ddtt5p&eK;lR}Z>xa1Gza(+U^t+=-u+KG%Ye4z>S`s@0f|k&T{^ zt9p~O*fOhiUNiXud@Yr7EbO+C2kawu*@lW^EiqQv=UnYq{N{YJ$6wg0JF`;yRD0*d zR?o<~(5-)qPP{+52AH3wVrmHtS#V{Og8ayz|wP z{8^6CuXeq%?XX8;Q~ZH^CdJJd_<`Fv+WQRqVoMHU<9jnXt-}Fao`1OU;ro^^dH$JM zm+yw0#IU~b^4sq@Z!W|n-_5(V-nM1ES{UJ<^^9)A+tgn3COZn{Jn02Wy{OL32 z?up{(_s+b*(la9R)BKoqWu{ z&gXtFKAsDFx>v;WzB*@dbia%Hj16zuj1OMq=ximgIr<*KYi#m>p{MVIoWgT`zI!lp zKHJQbJ~>i9@DV2SYi!oW?76CQ`7GZG&Q6l5$=r$yImB(|f==Jdr~5Upb>1^`>Z*FX zcQ;G#e&&vor@wibM>zMlfHk`BkN1f09N!H1(L26bJ!{lq&h!TVxcmF2)fjLh zNBsMKIis%ho*X&{pBk_eYoFL{lfBoQ=XU@bcLo+h4O(pjcflie?$41srsmV`7QDt> z@)@slGxN;W*nGojskdgxIKry>UC*X_TwGlTSmE28=X>_Ov~TAGAAj=e8Cwd2yyWzG zVvSqxYG$u_0095gaIB7deC9IbK1I z693~{S-&mVhYIU@&k zl^;3i0W)l0{O|#@{oo;)`$5m?uf)&3-A{8W+~!9Nd6@@hggbDZg{7YKoCX`d#l;-DZ^g;D_^kH6U-+eV zpE++~`TN27|6f_v0!QOg2X(c`uKSLS{LM*rIE&-V-F&zkq`0|e_QK;%m>mn}(g)U@ z&EN33ew{hKu)yCdpZb4^dBDHU_SkCg73pn_9R^h|_lM&V7zOLh~lWeIaGZsFcL!J5k(hSp|ynCF(ixr;8sz>Uj z=lw{o?h|(=U*97uI3>Ej%!6{;MEc$Aj^7DJedtucqnFI0d>v_NzuJY^m z8!nB*chEX`aD}%*d>68rZ`J48%jSB}E8ch8eyr3OU)NaQ)y$dfa5vX~ez(UCUvrnA z8vgz>K6T3qU)Fzqw|Dt>?rPo1*YltA$I2%HU)d)4PFWvo9Q=tdzezdv6)V4c?qh$K zaZBIe10Jzp!Als!^>_U@ioK@=j@ZB|U-Km8lKrqA@JbH_%1Y}?9y zs^H)Gi9uxBfrnJ>*YkjA^1>ch%;;4(W z#-)Zx?ya!Zzs6qfEobqk7Qu{Q>FGLG{SRjj!NR9Rd(R2;)i&a1{%c)6^*%Xo@tS+8 zz4@tzckTKPDj#v!)gzw~TRwhg9doSzzW>I7U*VgZ?4H-M36I?KU35KA9{Hg&=fDRW zd+gvVujhobo|*8||1bE=yJw^PmB;_P4DX`fTkYLz=B%Z0)@#oEJ%XP&<$vEHZ{%IZ z2ABNcF4+FA@tv8lg_m{LQ+V8M-w~Mj3MaKj4|nP0FjnR--_EW{FzV4f>%OaC^R07! zy$j7dJc>7YEw!I(0Q0s4Y~t}diq~^C&nCF4ZT+3G4|NUPDJwqoU8}weyvuwa{Qu7# zF5Cea#$m7L$h{J$URSN=)_ZV_OH5ki2diqGx$#X?Z^&;B z193_NkNTc6%M|fJ>I03{poPU zzwIB+?IgZKblGJ)J4hz&hdC0~Yw~SA3jDvBJ3yemn5tTKM99&Jql8&KR?C!IV$oI1Urf=%4zr z!aV3aeGWc;od<4kZJy|h4?T1*WjpKI#b=K+XIA-h2a@oKE%wSksW%mO&U$mqIoRYS zm+QX(-dS^B)v_!6@;|rZ4$`~phQ1f}ZBL!e_te#WflqTIwsltesAnueKmKSI&v}Nz z^Q>#9P8?a`*8OUq-pgey;l8MGLrnC(7tf*GmM4C>4}5hZ9+|Pl>%v;a;J|D%|GMvU zQ%6XL!xZWJXKgupR&is;YlTnu*Y6$sK1Ua}YR&y%N2fofJ2!X0onY%b?mMc!3ddNr zo?YFAdkLPn^!KA{34d_)a83`!!C2yPW=~%bof(sLeSgFu6|0rl#t)9pHu3TNHTsNg;M&^h8fs;A$be1F2B zzZaQ9atfau2+v$YwmB1)xHZmlFWAUgI0xtF@Z7p5;63s8UpIZ*r5~JFBbV);{#}<_ zzWfXR>Auc)@{#;|d^fTU(&2d!`v-R&%X#4r!r5v+&)yS%|1$7gAJ{q*PW-hxN7-{{ z%2)meZ~`mm;nDe7dBD&z!qbM>%%}I9w~(7U5b_8czUI_6Fyn)7IBE~R@_f*>sIDzS zy z+<9^Id%Wsl`nTppJP-KvLF!n0-vPc$UzYDee8O`TLw)Y6V!CeE1rG08^7wls+@8@I8JV-`BQ-NgjIP_iX6}GduTaM2BA__la%g)1fD> z9Zw$V#-X2GGeIjXeBdPx`QBDszi%V{r#!^__ZB|P7rj8oP7g{ahcQ_r^K5}J_XWKK zKk5E+liTH9Zw?UU_L1=JoCw1I!u2=X2_^d7Pi_P`ScDS@p>FUA#j0t}|;(I@8_RK~1IT+YoNA*&1sa<}R z<9_Z7e%Qc+3wZEREp=b+pLKlNUOB|kyz3sKhgF`OLVfg{*`Khl{o%@s3{_n>s@qaOROaj_xm^WI}af|0qG z?r#V6yxkK(2KY2B;r^W7Q?9O_2G;b-vS z%v|ey-Pa>J_X6M3q&b(b^G45XkwftVLO<3kI$W?fm#!&so=bms;=9bb`aB_pZ;tHA zt-c+|9~<{1S-M>@Q7w^q&RpZ=)wGBSKFPu3iJd}^$Kki5MS^Pfk{B;NLW$*m-H#oUh zVwbb;WCr{@kC>{Z@6s6Xkw3cofR7%2?_&4&toHT+|D?V@`PUx2|Bc%}X3u*6ul(?Y zjfi^qev2)4HO88~Zx3LO#bT7?5| zb9c_Q9t@wEzj~xR=16XHt<^n=?)s{huF1XZ{*!h@77ib#7d(A8#l?MU{^Z-0w5_;5 zbJdI75!losPTZ%Tu2mc7)_repONhZAfbPvoH}8d4>O6n%p1|FF=p*xn2y0}mKF)mr zgFJfRiawS;*9^`vOmO*YbbNVSr+hZi=X(~H=6c%de!GA4kr<;M#dl4d_Z-3jz4{&r}9%w+m5_Tbw|E4>+pgz{&j!U?jBYxUZ^cT_WX~y${f~XIbX=3IUm|I^BkZAj;oP)8tNsd<0SN&bp7yeUP7d!rKp=$b&xXJl>ZDCu&w7{avhk zMQ-!cv3>B>Dvhu~X3 z`K7NN)0p^Y9SyX0orxz;f6JOn?sCPEpPK1o-DPU(9_IaRpNbKFFzICvd{`$o@%Sgk znAV{7RnMt+Vf-%1$9yX9LJVWYc8%2O(dVmA_XQ0Z|2e<0U%)rBHL`BS@wD!1j``bm z`h7XWOCjzpT+qNe%{c83&#X`DEAD)6xf9|vY6naH#PsO1#$g|B#Pc*h^SXB&xeJRq z>$SheH2+}uuP3%zx6LuaTo{#i>Y3xk=TrM1diK}s7!UsAD?OjL{)+eCefWy^zkJJp zr}o=Be)lul-@SS8AK!T(R_i@>?Xk1wJF#d!)VwJSur%;7rq}TaeqlI=_#DH@xWXsb z%a!xa182;=ue{_2+diBN+fSXFhYSlw*_!7*dBctK@Vk54KAgwLb+`Ql9$-roYEj-R zwKooXaLwWLn%eI+m*8IMIxBwkBDG)X{*=yJY-dOH!?oiVp3Wi8?oaou_sUIv_ZFTT z_?IT;EL+uUEi0b7`X0Qz{i(wk=oSp`xnF3*nfrwwdbEw43~ZMh_$jWqfzx&7zQL=o z(ZP7|!CtlXT7K4K6O2_KbjAOyzUS?!WgWS5*_)3zSRdsa_XS?uw-f4UUo;XA57xc! zb@tY0tC%e{!CQ8?b`Q|TbyaO+Elu3J*!|84Cmi9z(_GOn{kZa2dP~P}Ydd>*ke>m5 zuM2n6TymcK%UT%ZHXmm4u4C1WT%tw4ls~nTCtQj1K*Peg+uu-PddLSJZQF{wayS?M zU@O0M2HU;uc;aMpuFl7Pw#AoPaxa*b$2ef?+-vFTcl=T_vBE|JI1WDS!2z$%t&!HP zN!1!nD&D+17GH%g46x~Q4|>$=_D|pLHV4JH4(cji%9n4c_J_uf7Jctx=S*X0$hu=< z=ev^MiPF+|veg{`Cs=)6eTS2A;k9zh-QWzr9?5&R_qV+G(e0y8{i5B)2Iv3o?|lHD zW6k&;&y2S9%(>TSe9Md9u>FZ2xbmQx;q_1az@2;6!uP!g4jhEsJ;_7^1Pr$8)VW<-04a1AEpz$8Jn> zpysN3*1tI(71!%Ahi~4I&WE44Y%;D?5AO801g*d?KJ>qjP`y&d8>FVt%caV~CpudD!g#yNnlA=*fEqxAi;7T{s7G#LXDf`Z&c5U-)+4IHz~iO^tOwJ$L;R-oQ9( z0}ov6Gd6p0W2aZqihLRCw@NIZq z@uC0ChdZz^$Rj)V+Im0qxyLsTeeMpwFXUp4jrF_!@|UsmEZ8>x)r~vOvDez1gYozw zv7Eo~DBGSvEdH|XG~=W3xN(UCy!^=R^QXS~%M@?FL$C4UA*S(??{Q;;$r-#9!+T-Q z*oaHaFU#kSE$8gvIRS!nMvH-xH@jqqWd&+xhjp&QHzzMFz>RI@MIM$lc#dB)$d+ykZ!}rh_6;~Wa?c^-J z)>Z4&7%ki%&RsvgZ9JTXhToeJbu?&clyuwR2bMjz>#LY3-kBb=jhVhn9;rXx=)oC z{_OkCxfii@p1jjwFvyqu%lX7pYD z!LE2~AfIjd4Q`y5jhxmE+dF>jZ3o5!A9#hwJAUlyWAFRyH-6!3j}Ls{!}7bj=C>WN zDc(=~KdkrN)U1OJ>~z0>-jq1s<@5ZI#iCVShpM%%0v z17G9EX06PFn5z3E@!@xR9c{~2xZt-vc$`yM>W>!imEHAdhNIsX=2@p`{)%m#1LK-l zv<;nDr?xe>Z2PL$?_l=y(>f_8J&?U>2FLsk#EFN;dF$}EKRTOB)kH0O@fHR;Hy>-> zd^kg2?ymY8KFqC4+p!6LsT_)rPT7y1khAh#YKF`D-hkCT=^Qm%V>mLf&FkjbTzk*l z_cJ}Z@0;`19X&9g#cI8E*4l62)!2;%pVG7MKR(o<`tlAJZ*wIM>~OpK zs93&-uBW&vPc&LN<-U@KfxUc{Bfe`s`;uRI=vCf}Yef3XsuLh`a$lBcQ1&e*1`kdd-)w0oHK~i0B778&dXl! zfZw;J@yowBzl376bp zaHe{FE`F__`&&B1#@Xw>^tv76^Iv{qtY@)`P|t+b@0UhAjhI))zLMk^h%I zb)kK9c43eGwg1y+9m^V<_QX4HuUEWa$*(a_@CAcd{E`O_7tnRl;8|*%R34w>FTeH` zmSfs~V1uvPjy>cz_T*;>mswcGG#7GVmqtfMc*b8C^05X!*V@|P2Z0sy!allQpicXc zgEd2O8)C_oxZrBN)Bvl`91}nD+i&S6n%p00WK6IQ8RoPnNAg_oS~zGudMu35`(B4n z>Z^F`T)3nE@$>sIYCyNP!x6p=;rPk9xu796JR$#C8+i#1uJ6ER(i82e$9-dtFS)Pk zwcp2;o8PB_DS!GP-7dh@{EO$dvB;fuX4Y`fM66;ijPu5zzQ##^(3Jb^?+W+Ty&x|` zzRYkPGx*7|zVl#c_|EB!_x^xSa*g*@7~Pw^H?=o5>q#s1)IP4EwFQq}bgo{Li$P4S z)tm5cyY|+S91PX$yIa`s_0!myt>e%vx^@kmt!ucY-c_&Wz+CWf+-OaYx^8QZ_Sm`y z@S;BU;1UBy=V4ENY&@*PdGv)`=;-?hwqiB>eboD&zQ@O0TqpRI6Mn>k0XDPs&3b$< z@Z${MdJoKn7&Ia0xc8#!+R0J8`_B2EVGmyJf9-jn=)3u|E}n2zp7a4595Va6ZU2tT z{MOEQ>MWR7jd!2CXjOTq+Tr!YvGN`oxzDLTIaBX=Pw0;{^F4CS)KZ$brnwe$o8qa{ z?~C}+OY2|f)uZ$Wo1TR~k}vs#-Szc5n!hHX?$qNxTCdV!tm__?KIv8PqkA;+UG#T} zp7g-4_?TpPyhV3*Ua)=e(r_`H$Qj7&0EdHpG_a?H;h7g_tL< zeqIj7HV$a!wocTPta+Vm!$?rMG;03Gq*rbCr2>-&|mgZ<9Za&-3nqO>BlD?T%~2QRBLI>A+OzBm8h=W&nSv*ra~@Oi-Ieq3%4 zn+V7BT6f^VQ7^btf2|2NM)#h44D>180>rQGwLR;c@}TX;_Y`wHLY4#Eodv=jP~(erY`w=^cjA(p%cJ*xSy&g zew80g#Ya<|dM&QHhY$Aq2%h;;3mAnfzUHJ{tA;smje?!rS$B?$hUJk6Um*qW3# z?}Kzd#_sooT09h^+5J(E-3RK?+8T_Sv4PPuzS##49BhRlKWo3I-?I7JLrys8K@W7F z?z8z=$GF4~-`e-g*58oSkX~bBRE~iUud3Z=@F#E8?RD?rE{qd=SQoxI%jSDh=V*(c zZOq(R^7K6hW8Sx&xkqc&Gxo-1*3)&2d@<*&dgQaRFPghfwC%gh8ot}^0kQF2f7i>0 zJ1ReF>zTNGJLo5MaaQ?#My#I2f98)C<4+BWuN3^VVHQ3j@4b<7=(soBGezt21lEed@DI&7G%nXy$R=%IBIY zCmg}oeEMC)-n!^)^e5WoephX(gL>0z#f-J$e4j%h->50q()O;08p%c8&dHi{Y5==p zPC5^EFtJyh>hn7+yK4i>nls0S{c}xZ~3WrUU=hs*gRz0GKgVrP3C#!=Gxrva9``3 z`CM!*d~41;uc?Q@2Llv(BS)<8ueH_TA9gdN;ype4lYM7Zzxt z^AXFw&W+K!_e}k)iRpFYp(U7ls@A@HzGLPKm$>9l?Wt#BeBj>yX8V~hxqi$1b^pnu zTHodMI{S)dOtSoNLWfyzWx&9^pY;ayeh~a=nesEFbup1DqIz3;*r9w*5r)2)A*TU?+f;-OI=j;qtYPdrF=0Hu{ANL4+_Vp~ifTPyj z3;B6%J+Xm9ZG}<%eHXdwJjCre&zgGeZw<(KsnPKO@sPjy)Lz>Az0%pZmmE27*EDh9 z>_po5j)J8c=(n+{N4_4ZE$?^o<}S(A*z&V(HB+POyi`4XHtA+PWZ%K5^Kjmr%I5pr zcFuSjkGb!_Y-26Ag;BAr_0C=4`;0yOPruvpwI=b+{cc;$d^gakG>DGkdT1N5ddPdw za6Rn1M&i2<;<>}8_ob!c{5|u%#3%80Ifs0i!5;5MubDH}_oDS7jx}S_Y6pIg%IiJ# z%=`8^J6G*|qlnWt;CkP2;@M+|Q*&nSvGR>p+M}0yh9+Rl^{zUOchh&TImlmodeOfp z?u&iaSl@Fv7@M5%>GS9mzOwPS=PU{z|2^ERX4QA@?@#NB2A3Sd+k(+J)ZS}m_UB>f z%>87hhURmr8Sc~qe@)i9^z+?WwY(lY_Y;0u`(1ba6J&i`~Pfg$!p9L;! z?&zDnduXoU6>nxVf?xG6-|67`y@c~l_GeCQ`WUauJBiN2}V zcSU`0etPK<|K@gjCJ*NtCvZqiaQm!n_{dgw2ArF$Y3I@GKEefC*Faq9;9lUTAx(Vm z8c+Mv+|Jvh0i$?Ozd2MczXRmcv$Tzk{8}rYfz1M4SyQ{V+$FfFw!FKU-%bvn)eKjL zeUs-VbSw?c3IDq1=_wqD#~)wLYmKk&LGG(;tqD9da$j;+*6&I7<_6rtUjE|b{O%RF zV1OB{`+S%1BZ&GrW6cAfiKp%Im2Kz+4t8p6Xk^!R*U+07q-}Byn9I^%GwlmDBl4ulw9r^ukX*a74e# zO)vZV=zGcCkS)9`PS}2jT?cD?k`p@`n;ZAm@o??|AJ%n7emFWu_;~GigS_~LLvZ8Y zF}4|_>h^b1KC)5weZ~c+F_L@4sUP?G4iu;IBQEz03}WlPfXg0i?x(-G_+S@??xgXU zx5TXZ;B=qA>u}`^-Fpq+_6MIcM)Kaoo$$Lrj@Vj*kWhDWs7v*S5Bbm{HqXs#`SGhC+Wz>}w`e@Hx#s_P%8r>K+@;4S z-}0`5d_3fH)cN5zU%TObAb!u#nuk5HC(IeQ&;$EnCkL^@bly+C<+OvaY@P76v(dN5Us~;FMjwZ{fbci8$wAuV*2K z1!tMVscmb_;EVsl01H0Jn;g@ba=9nsVI95aT-u@EB}egE&`mM$J6j_gzs%YaiK-lXLEK`q4EMw)-QPWgg89+q}-+oXowjy_Vm|QP{pW z#48T{eXrV&J@Ks>HELbolD<29w^!bZ@!d+^@m>lWPPc%?opAl&CfB+rP6LgKGrA6b zj?20AMnBG-6F=fx6V{wz*KqEYKmGOh7X6Gv9C(Gj@*-ZVB?G-H+m*6xg))^yG&D^7<_T%jDy!m-M&hQ08@n9?NdHz1hwy!haXMC}NQ?Z(zW5bzp_=(^8&zs|K zKk>P>W0lUdZmx+caNx%IBF=n_3FLOPOxXTu7v}Jaase}t-a5| zL{s8;2}G4$NB*)t~MiTr1!D&rOpbd(MJ|-q8vS*3F@L zRUUYVXYNn=DbFXUPkF6R>F*riz@xNrZ>*(YmKvJ;Ec$bds;BGU3kS|Kp7cPc*odtd z?izhFmN*huoN`x*33v0tPc>ytEdJBp@%ZRj{O3jf0F!|q_d0j`R=*Fdx%a^jzvijg z8fE^(zTT7WquvMJqv!3azkUayP+0wLpoiD+=e&C2-ZUTd2ru3<_;hb(4&%O{%K<*r z1s-$ptX%o$?f9Lk4+?Nnp7pzP&YJHAnB$I|;V1p`KESoLK@a$F#$1DX+-s8SjJ2@4 zx9%%=(tGUz=fzhnhK$AGw~VyU_R2cf$RG z^B3^A&)`yXFne`ig(+Ce6n^t_s}-**0cP!bUb#gol6)Uv&cOEwZS;YhcgZ7o4dps zjLs7c+&eU+7HQkxjpSFzLYyAIJLR)4KYvdZt9bg5I|o+od+u-Z281|jD&OS$;QjlP z559VP@|K@DoP6;9O%2(!CjSRtJ=x`lKMyghKWh!y93!8%JaZS@g#Ra>KPXRpnA;!U ze!xuZ=H#ose&@j%zU<-C_OmmX*#6}6@aY;93#QJJySNLFn)593IT*WV>VWT8ef{y& zc*`@7;uqVPt(|JtoEoV!cs*MyagRRI*>xPvl^p5AX%5)JaWPHSXwx+^vu`a^U)4fC zst@>~ALrIax!o`OtU80mxpAvM>}UM+T-pkcT6%PSZ1D|0IX?j_n4B!9 z2AZKk`iq9ah<>cIx90b^*5P68swa($t-1Evd+U?9+<&dHof(aD>6!WzOWnnH;FLa> zyJOt5j_*1|+%{r4=ibE@t?|c~*>-X0^X?UUw9FiBS)*e#R`0oAHP$<`+I>HC?mK4v zJ9cB7+{s`P-;cErr@nLe&3TMlcW|wRdQK15Xno1I5&Tkp+G;P&D<^T_vyU$Mju4ak z6WqKbTECh)u;C9kbS8ekYE51Z&RbLbiH#n?c3tvuuhf(5JC=CX!9bs`K`-=SaA#IMh|f2J*s4MNjc1<1xPQX2jojF_#bsmv^C zXFMbK;IU7P_8lL;wrB3xc@Adf9KEUc82)XmweMqbNxxKc^%LB|xwwUoYS2FQ%HNvx z9NqFw$h``la?7vZ$>@`qyc1dXx2S&?*`FQZApg>5+z;1;X3d-L*33b=$>+32=|g%8 zCU-A3YHwdH>WQ{(S8YH2?eE;a`LCY5W6pT?ZrwlIc4F^+@yE6rn|ojUTIOCKV*A)$ zF~^_&_Gce;F57Dl!s2Ydj>HLS!6sj0gGb)V;X8w^{dy*bb&nPIV~&x%I0o<6?)%2$ zzst#(I>X~bNt#zKUf@fZEGVj&{oz`6FfKKT3Z2o^OKjiK;bMKj_ zfwuP3Jgv>=WbNTQx6)x~&6+c`>7J3BH8$38>htztRvz=5*QFPldFage?5DN(p_w&2 z5T9J{^qQPGkFN5go+pgbXlX6Ufj*@n`K@(n#@c<=wCHRjzrJT@u+u~Q%GZ1=PxUu< zTIZFITAJ^r=CwD$9`~#H8Gkx+4d_ZP9(aqtweMc78a{j6bLub7g<(B_Q2nFE)dT#H zA541YIO*)%=tRxzU0c@1WRJbha^KdP@Hu~wyE*ily|Ca*U0e1&@VUtFHwca8%Qu0Z zSs(m`)BLA?k6zMy*Mw%>A9AwSvqX!oA$JBFhlQ=#T9gja06sdGZN%2Owc>1y_!`5u ziiKClOlxan`$hEdw~RCDnDekMY~=|r_V}S2qcO3~Yy9Ke-~Ps>8wA~U^Xm_{ z-Td%g^H%dFgZ1mlwbVEPOY8D?jC@~zc>PPh*!CyC^!auEu(c0va9DHxXe#uX5)Y!hg#?Fx6Vb$PaYl`Cl2J*u%2MEpEYHD$pQbW0Ugktnz#?v z1wO$~&ypkS85_U&wg%Cm;)OBp!3Iruz+x_4x{gcoP0rTS{c+8er!n^AW2~Iu%)8*b zu}AZ8NI%;Smzh&EdA@a_A80RM`!FlVxYvn`{@8=*ocJu9^E5Z+SYj&<2iX>#iEG|l zc(bqBb;T$3fPpVH6}Inj+kykuI*9K&iih{}8Xpb!hunql{?yuf;s?L@j{4>I`YoC< zXHbLnTF@1LY9^lbnP<_SfnLU9U3jI1-zTuawNGYq(7Q^1=DRwt<1_o8UV~}exv*hEv@r6_MH;10P zm+T+j^c@lxX~z3Ezk|>B_X`bVAK!ZRXGeH<|JMGB>K3=5{mjp~ikt5a zccx>C130{UNA~l;&QpF~zx4O>-#L5f@85Np*1oH9Sg>hqb-vLU_Fwz1AGNle8L0RZ zQ~MKQf9RKG}&(d4kXUYv1*h9YgD_Vtc(;PV)#}{=4Tn$9}zi+aI6b z`qsysUpT7aF0ZjMgfC8aZM1I8U1Kr#8lSF-wT5aKnxMzh{+F7oe#J*i`5w_Qc;I4> zF4#)L(%g0pHaG0p^ekX8vq8`HVXtRl$VLyyi{93z{7!M0G_^h4 zpQ3uX>({#$KKy*|@0oaX?YY*j4^3pJ7o#WG$g5ob#xlc)dKi^Q zd)wjXUTWW%#sWjUxIgV%{q?uD^&yvQf?lJtg-Wy&v?%|AHKa#Z0;3U3R|WD{^m4hzWY=D(O+<>i?bf=&)PWq zsnnGTW z*;FjEwJ1E_UGk8F=Y9KYcR9O#`>r2-nK{Jv7`_WWU?jhD)!KC{S9s#bQ+xTiW@E61 zcb%(_+r8Evo%ZCt+z2kYoX;4FML%rXhi}h??Rv@4x$vic*%bHKwrbrLH*9Ej5T7GM zzMffAgZym^p6hT8iMt&f_KizkX)^2A8pamP^&XtJ54QDm9r#eQX7jEb@E!vj54NoI z?=Gx*D%L$fPtMTSyr4lq>kuXU^neux}f=sutx=L!9QT)(Q z_NC9x!Sm5s4E%~)=U}!r;K6|$J#=lF^}pVH?(ie`-h0UWO4j@5-1^gp9-M)v zdGjOp`rKI0x%C-Gd~2NYAs0`@GH<|B{5G@WPOTeT=i98EYbzLXGYg0KXY4vBe&Gq1 zS2hQ6dgYA4-gPA|e&8k7y{;!Q^V%4~F?Y`jfs1=mb8&+6z0~7bT6)IDU{7qv!>4pN zH`#@A@1e(Q-tuC!^?vpvjvV;&fXl#!E~Sn6B`*2|YiKXrSDFuVao!6~wNZ08V_&g* zO%C^`;}7DxyWFT+x>o0bfAMN9M*Zb~&ffgoFY%xbd^R8-Lxsw;)_?z2SG1A>Sx-RfrhsLWqtTi>J&eTRO{Mvr0x$x*SwYv`D!!i9= z{B|DlZC>K~N}r`~&+uRSpuK#+^qte5Z>)Wpt5(lhNB7b~wdVaKj&o)(;n29nPn`4n zK#j?9FSx8V(syRC^>9}mleK#bPi(>-`f0uPGS+E`%rm6LGpqwzptpRjJ=|H(_C9CE-erB7Es%OzWMr+k^;G-=Zn>XvO6I`wR?dWyyov(!1|9W}rS``3TNKi&TEJHL6$%mDX~-}#}v#)eVn2l?O&KIe|!5y}!{I|C4>m^UOx%aIzw*@o#~94wzw5cq!Ia&2Xe(dmfCF)wtsDG#mKxm; zH2s3=!msei<2%e;d2B=9Q)Jad+a}RZ0Yx|jPC*Ki;di++!5CJp;g<#2S>yAYxRlmjN38zYM`HW5JdGO_F%TQLp$I5 z>XWsnmeLVD&{|yF1NaP0z)zg>ZSlcZkL}|41>E_>614Kqy9iF%ggw5o<4$m{(f60y zFFEKloTS&(^V8q-KEs#wc{PxS-0O@s(H%a~?&;3ccsEzPe|8QR z_UR?^pJGE2A^HXKm+oaQy!3uH>C-&6#^GO#14FwZ^6fI*z(l zo8O_n^K}pNtpY@TL!;J;8T&fo4}Re~XT_73+Wn3)qp4=ulUwuX0U9vY9F3j3Nqlez z=5*HvCvu2Oe~K2(AaWz%YD}#vwv*=n{RvBt_L~sx#hn6 zGX-XbVh-{-{=2`--;-xgJhrSK+k>g|UHnHb#RgC3yT(b};N|Z`azD0_5B%qG5at+_ z&-E2wpQWDsy+C+opTAe9826=c`W`HBBbRWrCO5o$_PTfk7hinHi)P9H*vMbCPxxI& zV_6q+kdtR&R8DK9wP&>KQU35%Y>&slVTgD1Z=Jv_y{p&ZD7zmp!j)X9Nw#ftGai_y zM#nPa$1ERfQCRr1U*-Y6_UAsuqx5@hL%qoxjZ^FB&mNAgF>8FOS>UBj`=XJxr|-K~3%UgO;W3J^{-&AQL$_krHI zeroopIQXCo-)nklt%U79t#v%MhhL9qmptf`n#@7m$m`z_Y0w>y7DQ0SZ_>eTdy)IMQ2i$ZnZErlF0ej}= zi8h=E<8)o}F+Q{MQhU{QBbo@~bdC5_F5+C5Y>Kr;M{*EHZ^{?_ST{FgY7cj?=!xR3 znetonMP}7=4?Q+E{h==A>a}OE$p;s5R{dyT4xGh@9>W(d8SHzIKj*ox#cNY_SU=*h z*8@M*U@Z4XcG<>$5#E*|Th-w@wRet@Z_OM1!}%WZyxi#fnYeIuKZ&)z*uaN#=V4v7 zz!yH7+?RXMaA=-0pMyay;HpQwvsbwn*af#R=nwIgyJ!3v@WEybu*j)6af$X|%;%i( z6t_LObl&$*oap;iU`Xq6=ak2smDBwK)9cJx6VrRRkY~kS?*e|D=~>Wn)yKWxQZrhR zv+BXGYWH1Gy*CoqvDFJ=PI2kGMlXz~b8y@b2z34Mt#y4nQYSWef{Rb%GZW7<+gX1B z!@Z^^c+I2ge1d)U={5a9Bl2NGkBJYwrT@5VXjZkHy5sbgx~SWF1lKy*MyzU|ET2@4E{@e79>Ww(y1wKXDt)@OX-U1xp&q5=NoViZ^M`i>Wd zcwcLsnAduucX65Xf+a0h9;#>Lh<)aL-a0i4TebCFG9LTVI=qFa+Kkbfito@*yo}NO zI^Nof=Au6D&KuN;edX)k?%7v;mACQxeQRFPd)31H760g7*xWNetf`3s@AJ6i&gNb1 zTIH|&9&qZ~Yi|3a=1QXGo%N^%4COYrp=;TER=KBj?msy2C6+mB*XC~+`U%Iif+rlm zQ$4HBQ~T0kD_?RkjH@&Yxu`KSdiULn*0R%2Vy$E9p{9$E;tnRUhzr@H$JjkR{+6M40XZPc{m4W^>>|c;vFOp@1D=uXYE{YcJIp8eRLi0 zubiFVm~H1wapobM(iXkIo^^Y)ePlmA?&Ybo7jgl6AU-2c6{OW$D zPi6BR3ihM>cYMue$M_q+{*>LrpS*ed@FySKuIqK*em#HNo1VM-wl}@2d>>TY%?HJT zy)d-i3+IR*xMkZAgKy$}rnug7_TV0z!cy4Dq8Rz-tZ?vS$S)kV=B}LJhffVndEy&> zQ{16A9(xwABYfC1Sc44*a)B0K^PwK>_R*2+#)cnxSjQ)8uuAVEv-W7gZwRr}P5q81 zE*#qKJR=`@6K4%vKNyN}ZwhYg&X4zR{UzTQ;sxvs{6kFn+J-0(_{+{el%|FtIBT32h2{-tx>2V-{r zQPUZoOY^(bOibqv*W97lAB3wlo%-S4fZ1o7&4rl04`9h<>0Xx(-5E^^ zVGjL!kBzmSg|X%x_C6yX-MQb}=A?bEt#>fehvKJR`+n-Z;@*$<+B!t1_4`9@3}P50 zZy)Uylb&)`Jbb6|+bZ%~7Qx~|h`_2o}dMDO*#Ie)9 zc`oD!tMbuPV^L47-3wxS5Q8R~tbN&ysr9Ai*3$Q*?=x7`&_fvV*p>MuguL8F1-$yaAMt0xO5-$ZKbA*jjj*e>W8&s50-G@ z=lbA(8o%>oE)MNK>IZ`}#a6F;f2f}!?9Mapx%1E$Vk^!zG%c*+b~9Sz=Z6}!ShCBv zqt;Bq8oK*_3maVL>3ro^`Erg<^XRymU6*om$J{?KG^m;SxI3<+Z1T4T%+{OzUw!iN z&71%Dmt7xgjjhH$>pQf*a2lQZXVUpdI< zjBm7o7x6ulhfx?_bIu^K9&#VFCvGpUJ889pGiTI+J@XyD8}aXT@QDKhz2{3|Z`8+Y``+Jt8`nmDT--vMbc_qc7rYVMbs!HjOM zoh`j~uUOZxe(0gSAM$G9{LIu!?XnZQEgjsSUeEsR*SWMu>uvU=HS~V z-@~eX*4XfcBz}p5liy9>8QHzZrZqZ)Gi((r`{U*V@8t6Rb#8p7$Muu12KV3EG)A8h zqcu65oBILwTub|E7B7#w+vX_W7fR>!Yw~UWis|oz`A~;>g%h<#7jWo3_mw>9pLyZq z8mbQByWiXa_hrSie&oH#NWbv!0f&t6>AlX~x5-cZ(;Ipjj^--eeCK>GuuH>j=^}ou z&v+;5ZauzfUo_9!?=2eaweK9tn|BLNJmkyj)O{#^1C^_K?cPXJ-&gKv-Rrq$ z5AP@T(h6VWKH)bAzxd|2 zx^RaLZHSeR=4hAoz%I?Bb$CaQb)B>3GuL+32kg#uHV^SPeBkE~4B|9Se*H5a-)n4s z{WGsa1NNKG-SW3~=Z&9u=2Lcx-7>U3%IDZv#P@lvIkT@|m%^U+OLD)?Tohwo;K|-L zI4zZL2Y$GIygBr{AgtFZE(300FyPpubI7im@%^~(IqTxM5L@x+!Z7X)A9#*2se`A^ zhHkNrIyYdH-v<8ZSs3nt&ZuAU8SN`B)a-|Kk1`QsKIE6p+Gs7mkNe)2UHn`hxzQCI z>&rR5H8+MZKfZ@E{^(Uc_*E}*4iC=A9iQg7)>63#&ndU?!h`sW44M5HUn|Y&kMvXj z(>LX&4z#2;4fB)6skJdmbJtroriDL<$?W*~{*`^)6Q9XM0GB+fk>^>$u4&}+EEgZsY_#GW-c;q7n3zIjszJlV_FKFsVVz0hK-c;^l8 zkNdp{_QwzSa?YMv`|cHS)ZzF(Q(oP}j~kD(9);)kNNe#Y?m2ZQyY|G9Lp=TcVg?Ui zFfKQUl|SDY&95_maw+bl&bk(#frmeH>MT%(Wh>M*5-p-4|wSIr;hw%&L=o zs@LC2@HyXB+#x(xp7G6n(b_#g<6uQM)`bZ!S{Bae;YniXZ|YGV?#^7FF~Q-ChZr=Q zb;C|vW}m69{9Z-}v}ir7VSRtnJNj50eTP==zC(&dj}c${+`l#N>3agdsTTU-K2KQg z3Aw=L?lZKnIp6cPGlMZ{<{GKFZRoV-HQ%A7-1;v0uHhShG#Kv~oNNE_=8IO;%sY+F zT95Zy{^>o!Fq}vKJxc@&K~l6))=x?+(YD4KE^|vn)l!m-?nEy`jlPS_OfZf z6@PL(cii9j_{@67Z{Cj_zRzHUf6wMIa`%~X9EGi%w;Wh2j{Lobuhuz_59{#6E}UiN zQvT+So_$8XdhoYK?BNMsA8KC02`;zHn(w8R`Qf7&YEdk8-Qs)`7UvsrnS7E*_R<*b zqDgSUz`yOrt@<2$mZ!PHyLbpMe6V$H^k6?d;ug;BUp%M*RN!Yf0{CCd}4h%R5M1@gnzr&NZ*W>b?BtwR9Wr zlKHQgu2;6M`=MIj;@n`uId$b-02>|}(%(85vuxOV@7TuXE`Y^(=IFzkhj@7Qoi7}) z$X7AZ2wnPIG5wt~XW7N;A-|K+*BrrCjNeDzRpF-Q)7qc484J8~hiL15dyv|SbJe@zM7xK!^o#h~!(DtHYAx9_ z=+8nv^5R1bvBF358rIM6-E6P5-^s#4f`y#S;Hfq+xa-YbHTHYHzW1wU{cZGjSTUYY za=v~qz-${cnAy9|`Yt)Y?5h}>dS9I^e`9e z+Hr+@=<~8KnpIcc)AY%`rVrLkvvboQM)k()w)HIE?kO=mXa^5JVAa^rYZ(_t`p*6F z9dc}Al@A;nd(6}n&h*RgpLHNd*Fyf}On-=p7U{=7eDc>E|9!?s_n$em|2Mtu?y>&W z@455PdpLj7+g@({W#7uq(D};`#B06BpLNA;IO`bM@vrgnZDT0!PCi@VpVX{8cU!ND zw{M*rukiQymG7c+$8{Z-n&DCTH}Lw^@43<(<~3a8ry7zYYu68z1r99WSbR3%XdKtr z)So({Pp^fiTJG*x){3{TUO>GHXieNLw=jYlgBxTMGMu!{j-Jx10QhYGi<@7&WyQs z*0Wu4)~0n~?i}z4*7;of41I~wzTb!9I_{&Oa-Wso-$3T{lp5ilo)(7gQEH~XjNCoq zI#yi0h6h*-){1p6;6x4N*0b;jXV{sucYd$6*S*QTD;)J8{wsFtZC=80eD%t`Gk35U z)FZx^{r-egc%5GN9SRTdgsU9mJ)>6F+*~^^wJ^|2Gd1^+26y}Y;9Rv8rgNRWe_&UA zuC+C8d%b7A*Nx-6siXOKZK*?N+te{*TT|gaai&HFeOS+ZXC`~F@I!+d?nl>9IpMxX z7ks!!#dYR=i0zclvY{b1e@Ecs`oS%oc~^zs_oDCd6Li4mlwQTHw7y&C8NyS}(L3iO zZ+PUpK+fdN{Y~u4trwbc)?C4YfA4#aeSOzcL-%6s6Lt2x$NIcMY;&sI)axW`!NWe{BL3C~y`;~x z{0%8PeB?(R=DILz@B8wP?*Ag{JTkN%Jz?KGvCH3ghPruL6V{Cnuk7L1bq`+TQ(W?Z z*X!U%FEnGwm72soIO{(8f;B(#l|ISUc4EP54Dzgd-vON^?z|bC#uNq`atHksPv4V@ zrDkb7dZO6gk2`y4EyDv2>%ewa3!Rq+zIW&=U97M3U3{Ip+MJoCx9<)dDWYA&pC)pg~LVdE)X{7w_6`0ly1_~^8M4V=tR)53rY z9KtQR&R_StV;{w=ScQQOuA4a{cR=_$i?;BAZ=G*T8})eH8P#uWW^s2fs`q;5p5^^y=gz6t{AQJo-jh>toI6iu@;-5NZOW~hKT3_R zhuW+Db1+7)Icr_~{cL{c&4uS2#C1(=r&ct8C${s}oMG=rIQ~8`YnW@rQjct_f3Mpf zZC&RfvG(maY3cX8bXc{k+zT^t^Iq$P{onobM>ZPfR{K5szx(I6Z8JE}diB3;`xig_ zf*nJ?hw^7;7*A)%zxd(zfFav$hpZpj{MP1aZMQxz_|3m9ocDltOsstml}mBhgAx3! z!6{sFHvdI&jH;_^2;VgqydD|-X>Q5$$X^IdBKGPJzL_F7<{zn8M#@*x$~xu){MD(_{c__cG_>@1eeAEu=bjr z*E7D@!A8IH2ADnYO}@mjpLMUiaFPaVP3u1P;)7q~p}%{^tihRaYHeMV&$bD>{SMq2 zeCbQCnTZKTv?4bT81Z+m;@iBRIOChh^sF$=Ie2LE#8EM0!bh7$`PDDis<=mn{%Fme zyxge#rAP7>4|sy#?~Q9^PfqlRU+y&=IO|z!pHEmP(UW(69q_9-IB2Z3lb<0wcbxoZ z=6o~iOuUFSAN&>um{p_s&AjK@@l@2ZI!J*X}9q{-@u&NX6+Qq`N-LICU^P_hdGWj@?+Q9 zePJ%FO=FOEimUY_oln~JZx=QCU2@N+-jNr)nwyXNwG)2}>e%CDzxvt3r(g0`57vyI{j(dq2jge|?7~L-t>zxuUyu)W z&&25Lh66duzSmsihW3^J4O`{%AV&VXd5F{4!zuX0hfi$AsI$(&9KX!7{p^|f?7L?C z;Q}wX!$-5%sq2Ez&_aW!G`ZlO z)~xp6IafH*BgTb2BvW7anf`XJ8~iS%-W&XG5Lb1Tju%epgGQGh)?9hkyF|^sXC}wx z2E62RLLC~eNgDgU!hv<2=|0&<^Q?9HTW4TsM3>ayebsYeLq4=+mapckcJoMVYkU6O zwXM0554&veBcJQ)`mxV-VlUfLacBo8#d}omvAJIK4OZo`Bu!%=yMuKaZOI&5jP-{k*5e$?qFy@$0@MCwYQ@ z72ixW;QMLc;LX}w?-TKakMC|`nxA>QNAScizQ%5CHT#|KS^nA6Z`LdSVdo0DVs3S-!nes8hO~0t1;nP{`M_<$1!`XIg%GUu^r#*o}+8dSD$>FS{v2j zcaxm*anAOIXYtzVc)|B_W6hoSjXc89ea`!@wXwj{@O!Bo%6oS7x2=C`#_tz8s^;~+ zfL}v=9j88m7yVODdMTTI)-%78;$eUEB+i3%k3W6NC-)3E^Yr>ppYp~7;|K13-{I^3 z&8v<-aQAf{zww^Gew5Ev^G@@ohA`gfdrLeqoNLBUaqlw@zQKB*wW_t_z$cbChRzg! zooCjfw&*y!g^w7Am8<#lnJ)-xjbKx52?7A^TeYxCFY}GnA>CUY6 z>fE)gexrwTUzCPEx&HEB``R4V^Y{@2E+g@4pF8HvtgY{&`%JG(H*&f^nzaspY3Bc* zxbX=_?`Uox=Y|(&;1M(P>>jlKIeTNhtE*4cNv}!cjz!bmqq3o^)`x)%=i^k-+)Btn6 zTSE)iOuf`kj;h_<=`S(h_8Lv(hc0LUhr&R6)B;Cs%|~l#pWgy(ZP&~Lj$@g7%>i-DJX%wK`}w!4 z_KS|m<381F8}@kwKli6N`hMX97B<%0DecMU?`hvL?X7WPYD!+=qTjan9OzkUi2LY8 zW1B1IVD>o}If&-%#bU;W`nH;?_nBbz6^;PK5f{>3AkyFd5H=70Xq-`RZiXMcC| z-@WeLn@2zK^P4}o=NC55`KEuhx%wY`aC7bVJ+OJ|{sWtT`Hc^3uKKtCV)MU$@&C1X z!#{d(^S`|4!<$$Bmk(`T^W}GM{`;T$KQ}kL@%J`A`9FMQ^Zf7oe{^-JaV zJMY;1$5;N==6m&broQ*N|7P=>zw?Wmzwz2nZNB3DcW-{;2kzWF^ts134}9Rmn_v3W zh0V|1@ZjdgKl;SxGhcGo=C^yn>Q^VyyB~V_|@BAfBWaRU-iTH z`(D5E@Ba+%f*;OU$mVy8SvHOPHyV2l);-%+z9;zfx-lA$GluL-+wh6q_~1Gh_QyZ< zbw};@;(2)I-+!jRU)c35{4wzom+-?5HaJ=;&;4U9f3JnLV+enY*;ebL<|Db`(D=-S zi%odWwJ9yZSN+f2VTTJffYURaSe%8&|KO#b{EP?d%6~|&xG&gVUFVtCv(=7IV!Jl>u_f=Q zdn-RQ;C}h;a<0L%NKf_jXST)BbN1w>Yg(l<-4de{#uScHzsSpW8@(>BkR0Va$ks{UY!;9Xu-bwq_r`b z8R*%X#8rXMU*2KasdE>)5IwvMI6Dzy53jg~L;dZ|{au&Pa0e@$S0Y~$wuleBH@vLKC zdqRwcxv_>*>sXk5Hfh#(W$m4COMk!5e@<&Tz)h_povjbEr6{W6hcT z5*Y^#mP!-!{nu|J;My|KM8=8m-ZC!*{;1 zKfrpcu_>QLYi!!bF8?iq^By;R_gC`QDSoh(&jEYJJG>6ow~Td#kOO>iNG{oz8Yj~k z=lkUDnOYntE)O^_apWXUYo29u{gZcp?^nLzvIAcNz|3rmZuMo!i6j)QQIjNU7U z>V1dr8W7Ft8n-h_@5LX>&;%z!XL%0IocOH4`*m_xgpmDe{%Ft?1A{+R}Xd09g1Ik z(hF?uulnpqFFNi_FVL4d;f=1&-x|u-cLR-j%`D$7bH^qp`O)9n!$JB6mo;^9P7dtZ zduBg;Ph*YywIt5?_D#I^=uCa(1`AI16{AIBF zPi6*wy{>q2m;*Z2yXAPDQ!9CjcWW_fLHC6rOy8BhKj8El9oTa>8E3gwUUB0-TZb(( zm|z5pZ|+xWpKtGK&wVZY;VVwk&wYUZKDg{hKg^x8@*VwPug97&vkAAx2?q8Fn|MFu zVnMFt<&K7D`1d;XzIsqe7Ep%ZP-@c=3+bN@adTJ z&avZNPd@9__d~dO*N9(7-wE5?=i;W?!>{(#LH^=IKG%sKgLA*j>+JmF47@{37Sgf&Wxzv8YeBFBv8qS4%ioxH$zw-9mj3sRAPAoCtCFVBj88&g+F^G%b zZSuXXa~QYt^i2GU7oNmpuLo|U7TJ~_!5H}VD;vD286H~Sc0eEY#7eW>H9k7| zNe(dJ5>9orFk_E4U=DrFgFNAq|2)0+eP5ghO^79bxa3Y0o^z5zd+S4fw%~%LAx_Nc z9e!F+opIhoD;0()p6MHuNh46 zxMT45Uh6~IeWvv`I{Cdo2Oj6p8F@9*KMsgJJ&$*+ajjE)M&A#*lfK`~J%nZZbT4(s ztOZ)Hzvk|H1b3bZ3qA~KMoq1W?+d)B zO|g7ma`*9Tdt$7`c^L4gN7fDO;)KpiuY+%GDv$p&;?*Z}qMzoc81h&@=4p*jY0Q1_ zogiPWhs`)>Ab(;;&sVJ6VfdAwbFTV0X3-gA;8eW#oRv22OYh4c-p<`Q(6i3^p1`x~ zI22E71{QI$bswxRwt0}h{91SG(Hy4x!`-64%I!WoZ{NqWGhE`&d-aIF$-#0y>RqUZ z!To3G+yfhHY~W<1PB68v-ug~ifB4Q1J~fDE?fXPLeyv+-_gxika;)c_C-GpE54!gK z8oDZOVbmEg?0K*`Kbv3}%duVe+86Zj?;<+MpZxhoqyL(_zdhF2b;_Mw_%QUY&<&sxvIX#eIo*@cHb^Sdy=)6wQlpLx~x%FkZ2d(&q=a$v0)ole+a`PsK?TyxZV z&ziZ;Hoczq756G|jKvvo<v#3}Jy+`Q9!*{26c`e91{I zZ6l66?9o7be60h1aKctt=FB<%jUC?R?z)Q0h|#&$8Oo2&XoBs)=$yiHeA$VwxKXd_ z@!c>6IN*^lT+=V?U@&X&SgXv$k9#v|=zHcE_%y$YMMv`_HshM3=Bgog5j$Mui@(R} zlUiecRpZZG%#(bH`G|0=sdIyk1M%%cF4ocH?O@jaBOCnbCv)AiJ+bY>tRep9>Gyy& zce(Df@>(n7qBDFZ9;L%+4TQmaxeZtIC%@lUY=uK!@}rgO4By;0*5oE%Mlc&&Gxfns zKDpzm(LHqSWmnC)2bIfvVjCNbDK~4*z_nhQdtEyAH^5r?J3$ZT8WU^&6`Ot+2j?gq z4)WDVo#ci~(iS0yTlzKwtC=i2m7u^m>;R{MfZUGxd#=;d$D1Z z#*SeRkFs%Y9&jw1de65xc(GZ_u8Vm13D4iD9T?P2J*od1>cTGk+`p_(?OjXVQ|{3= zc=^r|2T$hG$$7}lgB|}hk89=K;aM1!v)>EnD_r~NylTkzK>On4{+T*74h zt#|YEZ;t8^rg4O2Ut(Lkp;7bD88!I#s^>cMeWX6Lfk)vUijQXJ#?RUiC!bUA6_|+~ zy|Nz7yJB1qYxv9Gx@%wd+&_5q;EcM6S-IJ!{MYQj5YNFC?%1YyY2te}?j1P9;)6eR z6&6QuG9T78=uPpRe_zJ$FPhOuxN7!LU(z4)6W>0{>$R{?;Ltkje6CyEtmU!h;_LfJ ze{Fx(=U=q_#&5dj_^i*rch6e$f%$je{D{^*+sTHV_i1_EkNnrxv3!T$>we@(h4J2v z&JK+!oYvw*u?I$SycZv^$!9yhGhG;84>xF;U z%9^-3Ykuxi*?gac=X&9;XJNoCIPbOo;E)f8K3j9SU&?D<#smZ3ii0m2m=AWa!NaaC z`Rp@ekS9LPC41>$PV!YBQ$HBg#2NVHVqmYm^ci^gC@1m0bJ8aF1K%@#nBZ4j+~;ve z$WPw(VLjF5`mqHcEq!Oq4@@}10sFdMbtaZkI-rr?0rtd^w|uOB^C2I0^dP2jdQHB0 zEKhr7;VGyJVgn14sC04ma{J6mx<;jOLA9Yiy3g=l|{OUBEUgt250X0WE)u zg9XSzG(b8YNGzoapRtjl?co;in5-=F2J;h`$Lrfc^Gtd33wf5fMU#Gr$=X&dX-*tG_vkrUz zK&+~?*WK@xdgu3 zx9Xu;j@m`7qu!jc*VD0Nqh1X0;e?$0$un~aOmttq`C;?Mrk*d~e13~ut7rYpUHbDA z^Zd2n)t{f3vN)elKHcbx-{3!&?Hqq>mvMem*yj)HYrh+R#ufHjP1@Q&WZ0rJwr7WX z#wHKWJe_~dU%+MISMt~2_JY~oR&m28;ySi-EuG}99e44a5d+)GMY}nJ5&4{-TpsSx zvBZP^iT`q5d2cdC2PU$_a;)%kp9Z_)gsztS;E{bi-*HQZJbA9&tM>8@AGp%Tus7e9 zyx~SY>suo+8D}Eo)WmhM4y;@&aM9V%$p2yU6b^nv{xV`729C+0=6k4x zfQxdkrRfuHl^9N{dOjs-4woNJ%zBJNOM$C7NayZ){fH4yeF=*YX)w0Bjb+|m;4K7 zx|^hHYF?AR)eHtFxPcqk4-Z^RE*|6|dCiOat=#JyS~ZA0YBT;U$Cr8}A8+_Buc-f& zN1dm;IQZwB^4w!zi5bgtTJzBmJo0(C*&T*@F<=+R@SAdmFZYL=>%M>I{5|cvTJ8OH z-#>X+lIeVB|DN{A3(4D;-}6;$8umJ4gD?2V_CMy>nQXB0IqoO7fzSTrg=9M|H>FoC%&k0hmcf@gj6{BW|I(nYj{T&Eyo>#?X&hH%N2rkNF!J3}AD`s+Y zJbJCW{ngKytB4hGlwY6xY7TmM3u~xbo!8bAY!;m1!Akzf{XXiR9LR4xN8ETfu+@`1 zU5l#oO8US@vw!E1H}_gq1L_G^+4(!hSvq>byD{v@WaIv0!$Lo?Coc0omrQZT`f-MP z`2_Enx6Hw~aH$+e|Ckx%VCRxu%qbj@aj%|~j(EFU=UVt%4Ip_+$MW3fUHZrgZfkjCgM9RHUFZ)F+}tb9)C7NIg~b_p z`giCn9y>brh!L?yU-~kB7YJB*oUzl^l|9(pbIiJaUuVwi6D+X6y`mXY_rk<3F}9dja_rxJ*>N-Ji~~n}lh5#-1RFVe#g#pI-d%Q_ z6W^SPlM8OHMK2ql>(0(n{Kei@<~|>5;2fN{Bm1 z-Hq!rcNSQ+a~2=3iQcu4uX4S~3l6~rKFoIHA|LB3my%Z<;lzSdV!%-tHNSpm=GYJ) zf6iqaeqk@UW1%DNh*{iRM{3C$Y;~R|4zV--7V|=v@#g76>_LY)JeJ}S@3i(h;JQs z!Wn8)HHF`_wmSPkzZQ2__l)nn;m87WB5!ogykj;Lpku+49bV;2?dm+;!7OaqXI=fS zSYEbkb@+iPIdh^_-vD!2K?aYE>sm=R%t7ex9&!-h6~(e$?TRtI!^ijZ6?WC#c`m5O zn9og~?WlwMZBEJA{mL5EO!<8fHaVrt^HqA)CVtkSPn@h**pUNI#lQ|u$bwtUO6cul z9eGGbt;mU)EXaR zu!~S$&K-k}WyAW+SKZ`y{>bq8@xRho3pf;mLxl&%^~MavA!ldc`*foY1+3 z@`)NoF4(O4JB__?de($DzN&#|c)-LqQ0S@c}2%PZNbLBB{mr3 zH0rjr+yCA5J?pUUF71!IHz9j`&Yd${59>}@V8gol@PEGOzZw6#3-zpO-a%qd&Oz$P5Cd9)?k88p` zm+CU+rSqiL_Jd1@V?O0y@=kJOz6X|j#~t2H>b-NWHTpyx?$y)s)17*G#o2MdHGbI( zbC5-yg9CP1_slco=~pw~Gs7&|_q8UN%5RvjKIP=Y)3M|;sb}yFdE^%HiW7UC$2?^3 zS%P!=!o_)}Juy2ia#g$mhuGxjccN^t;|bfq3CU7FQ&f5Q{jsca)}@Qun6qOcD}BXg zM?P5-p21Hx&b@5XSKRBpb-mi`wepGIr!PC=lD{!yFCXj3zm0W$ySI07PkOjjZtx%$ zkFeoc{r5R!6@UDG$h;eU2JXU*I-u8CIk~389P*8&*38Rf9&-l9cd59fKYdrtPVx7I zz48;rj5{l18!Pc8cYfrn+IdxeaHF2&i4Kmg1+wWGUFDj)`&2U73oCGt@gU1x`0?KB z-mx-P*5rES72g2%I>V{XQyk8Ex?GM^_=Q_{Y&hp$9Q%~vYi_Yej*4ks^rn_-yL3)J z;SF^$_9V`52M_z$<9!ah@YOkUa4+zqZXNP`I`&CS_edhkmvzIu83P~eayQ2Gj;|i; zr?xtqzjkpC&ZE9&# zesCUfKI2>zo5%4vfXAJ<-1T(+Rlkr^-fbV;LMFeS_!AC!zRk5n4$f$ETU%!^lehRq zZrBR5Z2a#N^h7@7HtQCJVxz5y#+Dv3od*b02bqn5MOMUF8{xT1o;d5Tjh3hz+ z;g>aVtim9#kbg!vW6g<+4LkU8cMSZ8-jiqgE3b%8tXMIdsaMXCciIUj>bEOBvXI** z=Z4um&%IMzdIU%Dj{M05IWo@Jawo6K2fmzH^anegsV|(#(+^ycVUxPd=`#%m;FKJj zx^tGVY|YzsleudM7jR>_|C*cX*LjXrzTj~tR>T3%n2taC1)q#-Y|hhr`NKngeZ`;b zt}A!^(q4A1A9vRgJ$&F+`NAW8z!gr|#5sB;z3WdOq6RUm#0lTwJK!9V$-nw<+^mOs z!nbT{?|Ke%3TODjgT=Y>j(y;#Si?DM8uOL0jYGbAs9Sw!o8nS(?9qcqeC?Xoct4Y` zXAHR(de;+w_+dj`!Y96wSsTww*2D7{afX=zi?}r>btjHE_#Kig-UH$l7C8_za)=t{ zT{8*C^IW&lW_*{>t;vhHS?8#!aKWJmtrq>^>G%aq@lb9tA7CLzMm;2N;f}Aw)X3+| zr{ugB@L*Qv_=1bCcFBw(tm>&}uIAYDBAxUhub9+_1$OvGPvOTSnflR8}t zmmRp^M<4Ku6?$^fz4CN@(l_&s8eo_CP+uNm_zX`l$emSOz~PKt>DWu2XY1up4PCpe zpYG1xa_L>Glrj6liSJ~}JZsn$FUggg^LNhJ!MnI)n>J-9F0M1Yy@-+AXW{3&&o8{> zhyDA1aclejUwp99n*ZHzzt1(7-3n)9=yjIegJo-(e3$&fek*q9)7JMMe_~^^{h&>Y zii?ahyVi<);n4)Pc?k==2mA1Ou#rF9ZOaZ`#C~wj8GHP(k2^SC@GxiifXQBqJHDmk zEci})6SBw!IWlsToO;Q|I&zy4xBP;e{@GS?`ItwXM_+70XH4*%6ETV#c*MdtxsV^Z zxMx{!_}eD!jFs5Q(>YN)*+xDY-}y*ROmHg)*Pi%m*Ti<j#3*~ti3bjG)R!ZfA?neS8#3&% z3%O@Xv8X}R2psTb*Jf=z)5uc?cH-gh-@76{u7mjrpSbdm+0yPB5IbS48 zo=5OT9OTXyOy>oc%tf)Hhr~l4cjwfDGtpCvd`GutodzDxo80wuwTJ^|^2WdViQGOZ z^ZS;uJU?Td!ww8M%N`C{vv_Bb$=CJJxdn&Xq^)aCJy@0Zgp=Zpzqe67l^gzwx3wi6 zei7I88aRY3=2_>CMO;?ozZKr*5}XE}#y|&l_NyT#cHwIr$E{fjjQB2R3}eGH<8YV1 z=R$bY;z-^Vask^ryeBcJpXI~{H|{>WXUP*@t@Qr60B3NxBiGTnOFq1tiGv^b_|Cz? z7oW&GW)xd|!Q{zU*yHOuAWt0If*aWJ&Pxpkbj3}2-rvZb*oBp`h3)=XuCsFt9P=eF z|BQfJ;KaOe#y4>?NB2x;e=~|RoaikxQ8}AGa`1&^|Edjl}j9stUczY zcnOC1=qpEJ`kw{J39eP6sPz)AdC0$qTVS~^=1o4xY7}A?^(vup7;2>_pZKodm|fkE7)+xhb7${S*-01`wPGE+?nLImrKV&u2|`V{q|mV zi@-j72BvJla}M%ZnZ&_2W1M9_?kq5fH(luqmbiHp_PjX52|MCt+_Z&*^M`X`^l+DL z<*fbi1z2G4z%{v;k9=F%m=`h9R+wPe*Y&{$+lp0j$Ol_=oXHg&?h!F+ns;y=Lp%B| zaa<$r_((_o87q1QuVqYP*iLfdyT3jsCUvrp{HiX*R=%vL3%Fq)d@F{0+ro_4@%Mm+ zZ0YRxq40=F4m`v~4kmJN5-a7~=fp02&T{L@w_=q~$j85D5?uMXjm61zE=$DXdLW9?%aKbPuiHPY|NJ#f>*}y9y-rS=H$6_zu}H8`1p|TJrM@r_S=?B3Z{O4s`|#|G4{|_PfG8<7Te$?Lj6V zVf%f?S1Wj=z3PoDW)j3pTNnb7IH0RI;JAU_~rywO!o# z_SW}z|Go(2F)^e}B=~117 zSJVfayn7zti=AXWK44{>2^Y`69G&d^O-wu4IOm}jVT1j&$KhW+z=s$arzdu;u<<;I zC*Pyt=NFi=3%+nBmX_u@W~c7LGFR@LBTn>$doE_ly-ho?;ej4qE$-lHHq0~6!392W zj^0s=m|w-@41fN))jYBv^vv%9`o}+X_~N#A-ruu`p?#kB`QsnDbtR>*SuYj4n{LhncO<6BFxYV@r zH^;c!&T+%$Y@J<4c-SuEf|J}MUupz4e52O)&$)-a>p9dM-{Jt@tYud*q$}(pCuI1< ze4Gug@gkn{shY{x{W6c}CwUD$q^IiF;S2WZLRNBc6+)as#x9=9FXodS4%#&@)CqnA zFP~Fx9@4WX=B?QW*Vnm3?lDuc9p)8z=qwY){Y}=dCB85ftNMAiaV2&w3$u7(FBus0 zpJhJSmrclHu5>mJ;)Tuq)49%CICn6O1)r$7=Ysdh9HZtDJI+zp@+%I^OmMh=s8Q;i z=U5MOL^jFcet3E=vM%V8yK7zgL6&%mIrPRp4SsoLjE0V z%&Y3<-Ms`ae1_iIFLTHmdcKOk;>I_EIV5gytT|^U;~grlaEFKFO>nOn8@_eOjSIH? z_@?FEwR69bQ!5tOwZM<%??hAil)N}{&%88SF(ZW)clQV5D`7a#o7SMu?_m|_)2Y_VZQjoh0$Q*+l%zW&DGH|4=y5APg&%n3onaxcsc8&=d{ zsJ~*zy9f_(sWDhu=u{{3aUSyL9hTf&{qu*MgO6uH@q>TzsacZze)o*{;1CDf;v-Rq zTr%*rJS*TaU;b7Wzv&$r3;&FXFF6~7GjjB@o7B@WWHY`)_8%4@>5|^A0=k{PSyg z*Wo6a^CS-IkFR>>V)dD)FE0GbBTMHCzjCqeAMPdh_dxfSvsNQHXTNjoDW8`-ZA)(- zrO_1zIi;U{<8FLmKXX?8*!06fnAScJmP-i%N-%SI3t^$$$M`r z{NQ$RoB5eT;yVw?dvMsRp6u9YW&IwZW<7jT-p77%D_`Xtyd#F=Ss(TnPtU@+*c^&a z;Z)s_PjGxr+w4iyI=Pr1XCAQP6zB3|r{;AJTk~*T%P03vJ94mVw+fkfl8$|`Uq#}3Fk(>s_B?F6iOcF=>7DZ_Rfvi$zS*A zjr5L_ywJl3J-n;tUCJDvIfxj%i(@^&4DKU#_&HbccMjkRC-O!g@6HgyN5J$s zcF!Dl`fuCV>pKuNl)l#@xAec*bHRNo(>Za*+M+Ms>@0F-#cVxua)w*v3%}$_t|h0R z#PmB|wu#U8qDLlu_95m@vG`UHclhq5-Gs0GDlf;3SVOMD9O}BlS(xnR1}Dzs#2H<3 zh0i3Hs%c=QU(QMJE}nbZ1;3_W&-Aw_@FG5Ov77Li!wZ|*Iai)KlOy@`$YXJ47WuYC z&f=vUT`z2mSutu4o5(wA;&0wwzr7jL^-3()b;1!HoV+;4{Si2e|1uxl+X+|a9l4X! zbY3dwl8>`PAL<*wtDXq2df|B}eD-|M`{xC`!6G&~s)r|ulY;^C^wwG-$AMHh zFPoXp;uAcy_xRO%-hHPmSlrn&F8E%VLkkY)y31~ctQL3q6Kk*u4C>*USRVdWo2V7J z$<`bzF0~kCK9d(SwgLt_oROvNTyp0Yb*y@R!1bHT@WXbLMU7Kde8p$r?j901I8FJF zIlw77s;{F>`Gr0(OE%g&fAZ_lVap@EV}^ai3J$~kkheM0i{cz-W4d>V?@-QU)xEgJ zUD%%EdN+mXceCEllJmapZ7Hr|)i;HAVftMO`P3$ATlP=)_W&Pqt(@yjPFVxzuY302 z`O*W+Wz%8DQ(TBM$-`%K$!{(!R!<+PN1TySt5>)_g;jd?IERhSs(aL4{^lYd_m;V4 z5vP1aZUFQM60lVszu@&p{?cU}KJ@%7&3d=dvY;cD+_2Atf@<_Yl zJn$DD_2N62eV+EU-qWZBykk{=r|)INifeY_=)Trh*-lOm-S=jmgrzh@( z{MyWgzEE%8iRPqp&gegK!-mC4=X`Rm<#XK6=()4*J?3%Ota2w|9_hgL^N|~Vz15x7 z$ba@Xe|f?z?q{^*0hQ`*an5WW^m!I4GX+@SAX*@Q)h88NTdr)tYmM23XT`;e_rC;kp;t`;09M zIh^32v+~mh7Qi5k6Frl z`h-mSdB{DF>Xmyye-*cE9AAF5^L}_%D_6y#mxF#T-#+f~KGytXEZG{Hxxz2+_LpDq zulV@rp*Q4iTLkE1KZALxcRy##ajAjxU{4O@+trdE@3Uud!i5-G#+8m-hPfwa>Z2K} z@6FoxcF4m$z?L{s4|H(zKH%nk(u*^Ca)NX4Hl}-DbD~<o#^6VObqBj}h#f1w!_**bJ+H23$>2C$B~Q+MVai|UPY!qFrHfd@ z%!x<5o5ai-;OqIthkBD!o!JA!?^fm4Mo#rE$IqkS5c2{?VO5Rrp_cF^zRo-yzf=tF z@Cp3t(6H4Lf-0H+%=U!!337%{EtLT5w6eQD@0y ze}-{GM=yr&CHXMlyPO~VN~bfm_s=Wha2KBZhd8Q>WyD86<>lJSkJ|C`(R%FYL*d1a zjpxnzV-vG5ek0-WAI`j~Klg&;7VO`A-Q7O3|Fi4=8+U5FB3&EzEtdUAlYEEOKMzeW87ii=41wr+#G*PtMw1Tk$k*@;ATCl^nZV z)ZO*q4iD{qQr?AsVMfl1qq;;7iG!_H?L!R#syg7ar}9aU!lc5NjU! zY+=UC8yPmj>R8kVti&U}=5tjwGG}I{3Cy9_n!T7c{LzEWj+|Bal1aYBaZ@H2=GZe* zI$?y(G$wd9&R=wgi*rJk=RSPw-^tXyV2|@K`?7aF#+0w?B^z8p;zL~N%qKW7EAcH(K8Xu{%n@~i1B>|O&l!xuVFxR4 z+~0Zhg0t`96#app>}t+jhtN@z@p}nxa4UA6UF(ch-%Xjs{K$q`sM*cl+-(jSV@?h@ zq^)$+*L@&=e2HN$sVf`fkPlqg(ZdtJ-0SW=WVRzFH7vWK#$beQi+OQo!7tC^Gw|9a z4ml-`^CGSmH8;16+X~NE*rGph%hPA9?H|9rsq-uU^Al&U{Lk-f*|)s^FWc=O-`q<- zuN_X@Wn+2nu3PFF{IM_F^mlw@inTzd^NQ?Bz7q1y@S^X&!-F%=GOqm+YqR(g6P~w& zCytxpVC*>GZjL1bw{o8DQ@OAjR^Wv{=jlRE?2PZafx(^nv4=l=0l-R{ngQ!#e4@6LCS(=%4&>0Il*C1=TF*21Rh;h(pj zgYqLj{39pc+sfHgD+ezQebd_hIGql^xBdmE&jH=cli{y5*em zUiVx(nmA*_*?XQ}?e{9>*a_oS11Ww02R`^K12&`B4({sizEU@RNt;`nSq(8M^ z0b8qhVKc+7;&aEhY}g~tz?tuqc*Pyed^)fvd^syl`H{yCmS>>sRIf$EvRz+s@fB|7 zZ(aEn<|NmM0}t@Q#y4{rYMNZ^7uY+%Qvagf!Jk;uzWHuE{E5MnHp<8S0S6o6CqMH? zq#P`h9NZnQXVlz0=?QZ3bALE9NAO`G=OLCh@XVP#;xBO)f8sfo^gFVqQFr{ojM}&- zH3N!wJoBP{#101BSm_5(Y5<4YTkIqgzhSn`3ps{14<={r#wR9qsoKcjXY?gY4tjTv zmw)s+cQ`vYILJr&xK`RDS7gM1C%UjFFWbR^`6xclvtnQye)Z18ETz3`GoH;C4(d_N z5xzVn(+&^Cs`>Ocjo9*;_9l9lb)_HFSatC?qhe4;V{7Z;9Jt)09x;dDmkqUGahH6O ze{qU>W(~-ZnpRz^-uM$6x!<$sO}v9+-CPUzTV}*7&QqCtC7o-Kb`#n7&BUKL#MF~B zI?RJPm7F=kNcNsb@VRGRemAlX$R;^@Hq0+=-A}oS|4>io5qLGvL;rfknylhyT*>R_ zOXanrp||iQKlbF_B|ca!{K+MF*tdG19ywle-Z8w-!m0Ov9`gW>`|R2!7Ih~^<>Wkx zjX!?rOC1!;whek7cxmfc6CCj2Jp8N-Jv`%GO1RIKR8E5ENp_CF_IS;{&VoSgIDk83TN53ff+eqo0tp9 z_qZ}Y?42)ou_DeWcMaibJjr|c_S`uWKXajGmUCA+aGt|axIOn;_@KkScIo`UfrI3( zANn}QpTkyqnFt@wuF=q+kXuIW1;FNPhj_%W1zSyu*4qx3_ z)P?t}?jXXeGsH=HYvid8c1 zEbau%c%Bu>`&dWv-gz#xlZ$7x&g@x3a@5TG&yEvT%w6oxpB&VaI7d$L9hA%*$g8+< zCb!zLDLZU}yK|=o3%Ex7;eCK><`8{=r+m$q^K{`ad*{F&E8a1k727>^T*aYQvfnyj zagKUL{K(HZ<|Pbr*9trE$pJg9{1)5ZcF6Q}{30LX!!I$S2gIA^u)A~ORQ>56&)T~= zR1SrO9WwUxX@XDWB2MHje!*Qf6TYsQpF8VIsLwzep$3rK6exHaxJqs5(IcYXLuZ|55?Bq(F z={2!xubGg{dDTA8d&O`Kn0Lvwq9)W5?5Zhyozo89)V(X;unDfk2|mdQ?5crf#UtgF zgE>g%xh-Get^DNcAF}^7t4A+>^7ZHTT5ZYlJV$=b>TS}Mt^fS#m6W+AI=3t^*|F7< zuXoNYoUhJZ#&n#D zr@cweUD~)7_|`Mpo=bkcupO`T$Yj&P1FYg)yo)n_$harhs>6^Ay70fvwSeDbwZw@P zJyMOFU)Z>gwoiQJioJQ`hpyK`?!4g}>uA**y>Xd8&jq=`TUY9bFFkP{$U}yYm-OKe zR{0c`WNGJKyOxrP%QT0$gG+31VkbAxpfJEOSM-IGGUw);k{@#Nfxoc)_pT_nbD3E` zi7Tw3e$>Y`!UrSis=v0Qj}mtk0~_i>?>L9QYoVG_x0vPHv72IJlYWlFnY?R7oTvd@ zz={2#hPX0!eqa`-urV$=@^SBwW1sa^jC${@<`IWll}*{#o_L-icw~Ilz2<1_W9rPa zV#7IkIuFHBPBnkTkNy(B{DX^i^UT>bN*vWWc*~ypXWbu~gI)M}rmk>y&t%KJ>gn0f z{>b-e_Y@oAAP-&jAnKZYjT!Mht8n*|eAI)gt>c*o^<#~B#LP{7^V}sb=fgRBWPh+_ z$4qA59Lu)EuiS~1IMj$G|JbK7L*}{F`ySsl@Fn*gWgR%z_sltrdAL`?_f8*N=cskP zSMGDwts_4CVmBwhyLtB+E^tfRUT4*{=6)$=#-_ewkBJ`~U7vwd;yOlP4n0)P@eReV zYNy>l2Z+s?#k`QOxVcyK2YY&ge6(?H)LwlVzWMMZ7v<#GS%;{{BwyoaP1Ik%bJV`# z6i)TLW?+s@*hfBz35Iw_Z0b`yQ!YN%3D>LL!c&sds2*O~rMSNBx*?e<@LWWN2^E@tnwnmo^6 z`P>idelfDC+;bOp*oM7qjj6lmC;hPP$N{_D`(yss)x|5H`^<%8J^TOu;J_Q(#BarImbr+tZMxK@-y^QeBv#=i zNAn_X#h)h+_DO9nR_=~Hk9tt+M_Ay)hqxVlB3Asl1?aJpjcv;(qad1-T1_mU-l>VkvI34 zz4R?R<4w2{!%KG5FnJfYbGL8eRLzm~!f_oY^@AI{6vy?$4*ZmJj(R2sFtR?6G?RKT zU!9gXMIPXAuLn$>>C=FjafwHK_Q6MI^M$kQ;5BDoz6Tb#8He35*tJ$_w)mc~>f2Ih z?(m5GVlFDL@Q+!pxUt*LT*@Z7D|UPzxPvKu<{bG+Ua_1nb`?K4Pz!3c!d~$>YYq8} z|LlKeDc>3S;VV4Fa&2AXj769l(hZ_I0~Ny-v0Vg&Xh zEu4u5M(&DHd}7w5D;agjRlRtG_X%E#?Qfv+44vYVcdn_dYHr_7?j1htxrm3KYc|HL zZ=pF1-^KFFc=nM@?^C>woG18P;XBN&u8Lo=h+F)o=hCx>Jba4h)F);oc)OPPGU4eW?5zaeLGh>pfW=2Ua|I*;7Jwr%37CNYP3S3c%2c>Q@I6>cIT2dvaj4? zFCM%jtiY*U+zVzi^Qsz!FPz~t$&>fg?-=osm(BmkJ1TDGEuWeZ*LS=xe_z1PeA#m@ zqG#~Z(@nV37vqA5E_}y+BGYnB!HZhePR@Zn-QkctoUe3gw`8wAyVs6C;bb{+6T|fc zBW#B6Yvm`-#>G$k9wQHW+T)Wl&rZs)DPB6`Yk%3-`}P>V!i#Tg;2C$QPw3ry$ILj@ z8^@);%e8n;&5ZjZ9k~y+N*>iS#UJ1AsKt;c`KUh6S*RvnoO!S7-LCVL7d~mT=dsEHN--R|e_eSiCp`<&ZLR=Qp?-9r}VK_{D#x5ur_@PU_PaMzCAxx({* zuk7%hZ`-_JrM|~6&*EWQ;^AW~^oi|yV57S+bcSamycr8W_VPuCZE_YS_V#y7#qA=t z<<66P+KbOa&3FzQWLjC1IK!_vXisjgQ46P^zxSvGvBA<=Jdawy3%|shV;AS*a@2qc zCq13?t|u4#$pv|G^c-M=o+sq4DcFf0y+}UP()EhiH4E|oGm03F3Gd2_8frb{J$OgI zImf+xhCX&C6!mr<G2TG5-pkN2g{Qu4FkzAJZjoxPdJ-Vqq zm|FBnbw0{i)Bu~PMR3l3_;86GJvj???5>zOv^VB_F?Q#wC6(aOHir>w^(7)0a5NI~MuF6>eD<;-Zs}`J-FFl{>Km z$2^RQZTU(kF0PmT@U4YS=4WhT6PE>c*|8@ESooxk=UKMkN3Fob9^CX(eS%xoweHl6 zI)LeKRq*lLlaqfoR*sRcZRsU5Q#NqJmOW>Gm?z(n6IbRlHuOGbQTN21#tV*_llvC-=Ekgo*Z#*(T{6tg zfBe*=4fm9-aDT_4A8DWV*QYP`wq)Dy%0~YEI}Ux;0zFt@|Lti9%vh57%pP$g9_O+Z zrtPLO+50`Ba5~Qe>yjQh@sJf4V%K@?zN?rKcjN)qWcfX+IP{G(`pn0?;0#W1iTVZx z9L;l%KKw^nU{QZ#Iu~-()A?8ZlK##y@E`Tu1pcGdOU%1mt`T~KV2^V58cz5bn|+Dww=Fhf+^ARjFX6~$n1M-bVuCfy4_G<( z_D@@JsXp-@)3?Zv_{+63R@K1x@ClsiL-?eu@>g!g&H6lRw)WlL)g2ziM`zD)#bS?h z^u79;`Kf*-_q+@>D1Y*zZhAVF@O$|#q*p%i4Pq}j`>r^ymC25}P&d^z&II9^%5_!D z=xLsda~Jm|-%fFA#+u?TJkG?a_l#cX?zti#|MS+A$MQMe3*yxR8=K%Tp5s1zBg`Rf zbspzJc0AOx16aaTiB@^+aO(EO{><{8;7~z7ICCvu*GJ51aA# zm<0y8#P0{dU$z~KGxqrE>HO=cYw)QhJD zk~#Of$L@NXYsOf~7{Z7?fQM}@u z=8H|t4RYa-GxjYt#*Z^Lp&MqLT4lr;D?-e!9V7L7-3HzJbSW_->)RyFbiODhX;01pO{PT=-?6j{NBv+obets5B#Qb z&iG}GhIqnrpCl*Nf<5@u`vEt{wOlxwrKO%yBh?VD@5>&z_I{5tUL(xvnPX=jV7k{~ z7Z`;F9(5pQaHzV)@7=4MWT%`XU(VRViCiL|xRW1PA)`OkDt`Yae#>~{?~j59d|8~~ zq_bp!8{A^y>#poz3n6e!?A*M z)Fb5n#=@!ixdzyy=Yf+pb0Zh-*ym0R?(FzdH_fH%3BOi$meo0J6)$S8J6N_u2L?R_ zAHKzbJ97NifA5QP7QD3Uej&0pcAuNLV|StCUu+(D+wWjA*UlY(_ck$%Czfwvty!w7i!(VAlRbI0#0gH=Ur6ra8#R^Ad?Vj4 zw&0Khdbl8u`_QWfyyVY$p7?O3);yfSAg<-urBC)eat{5#rTkaYS9`?8F0qsYF(a4c zS@oxWLvFISf7qI*IJy4CZH3%2zcy+R{RJ2Q&{eEqrp!TV<%YfUgfnOGq_>al=tpp_ zzKyjA+vrcmtD3n^*%#N2bH*HI4w;sF#<}K)bH(MX_}Me_GtaCA99ixqe5U;IC8lJq zN9HI0VIGjH4qt>5vo2e|XY|tb(XQHrzVeOwR2`_*&>vx6n7#O7llj2Sc!BTt-*)gC zxf#Q@vWH_6bqMavMsn)2Cay8@h@afRbq&A;10BA}tKN?udFrvpHhUK{fj;6Hqv|JH z-ci3}!WiZZJY&jdMf?=UbJMUVH|sfrfgLfl%rVcpyPn1#;|%Ynd?H_V@=9#QaV?#L zpw+vYckt}fmaTN0ofABGBr`vF=xTq?na7-*sC&h%bJUhRrtc9s1yAgVhd=S-Zk=qL zJ99#RsxFEhbxmHj!xvs5t5}Xxys?{CT=1|*h7V8V@A}H_i!Jd}?sMoZ_dPhdUklFI zYiZ{k`K8SBmmH}jcDW)~;$$tDN9M!te%8gkLC(TXSo8<}#5LdI4;J#YL*Ht72H1;R z+Plx#gO|AScMkp*h20RR;?*2fZ1c(3ih;e~HFGac5j$#6yvR9bHDhL8)TO?+!}m=y zQ1yyz5SmdQ_wM z^P#}!ymp_~PWpbDJO13mCh7opmTbjkJW~tVDZYENRNst=&i#Ry&g6_td-w!j=Yz=# z_1N2zZ|1c_b^Tz?q4JJd1ruJ$jXAEE8Go34;k-I?wXbWJ+|3)l;IR`cb;6^z!CQQW z8jm)GXFJ*MXqboKf~W#7i|D`Hgzx6Qk*adX666@C^Qr;hMxf zzE@JaSK{Ov5-ViZWAAxMAJ;bZiS63KtM2%?PEoJwThy*%C;w;3CsoEmu^Wa}Isc*=c^mTUK_hi++`fBs$jYVRL- z%9&cDjx$SkmP_Aa1224~a~v>qHqO6((@)G;*b=+!6RU|hOB{Oekq6F{!=Z(Y+myamOw=5j*wHk+`xG$NRu*@xfO% z=7ydnopL->n69_+(3)EI&1e#vM09V5j*j)_mYk0 zf_=0ddY3sz&8a&x6m@p3(-;2uc$s&^HfP^6cG@^kor52F^1vZ_>G|)7$33_nO7Glv zc&45C;U^pLs+Z=7edIgLMvpCaV2}s9?y9@bU}#xpOwrPexPOCP*cykrDrVGzSk#BO z?D*9lc1!O#vEVSwFFA3SUn@T^{J4`JtLj87mh|EP-=SyD4Lj`7lLL4<%MuOdt&Qu z{^e7$s2jP){FN^}(jQ*ND&J{6;Kh=zOFj7!&-0Wr*Abh{9n6;4?Aga*p5TIQ`l^2S z@m;nL^1yblRh#(UmJfMF4ENOitXRaupPdC}agYrhsi$iPPM#efogT*~mUO8n-Z;aO z8xvf3RK7STCU~B6+0ZNd$GJGi&#m;O_QTH;aP+8-hx$D!jO+pY%{S`6dwE}jKP&OF zKKHqvlDT)I{!IAO3-oLIX6&+s2zKzn7JO=2dtk)u;S+XMS9ZVS>F>LJu}uzerB-ma zee7_DpE=?qo%<$!AtUdJP5oRqhIGa)zp7o{2b;h&-W>+=iyy|=&MwGZ^{wYXpK+LyIj?iKQ{J@5I8AN$1jTlQM$e__8P z7HN-DP$A^V6M2Sgs-Y<)Z&lPwmQ&7*W6C#(g{=VeatF zdepqSrYTF^SUZ9f2OQbJiJXX+=i0}4#9j+M^G6+I|3E`*@FLeE{C*~m?a<{Nd-)q1 zEd2bgAm^+FFV4o3t$bLniRJhcQ{1UNc?#3-srR(^e34sV1`hYk*F7s-cKQ~!&NJ)y zGWyClq(`TRyfyFi^@tfh?8$}qz;l2d#@K32dNSl89AoeD9R!ypU*mzlrjBrqfGakxNBU(RQA3%Edwl26Q=182VMmVIjT5or9f`9!=~FXhJJ&}T z=2aZI*9w`rw>sDC8+YR8*(rbchd+76xpv8O2AqpABY*C(s6GDTPLISb>XuyQ$kAiR zO1bSyCX=p~^vp2&HgVN!Fo)H0P8;pR&r!?eFTZ*pJR80X-!)~1;IjfgX&#qVt_Bt!B?VtSL(t-24>jUZ`wD{G5BHQeqo#Wtem8WzjrwU9*Qhaj#l$atTtn=s2X&0PMm;5`zB!McL2}P{ z?!EPiO`nKuZrR^~XK;r`L z%uq9W1V{F&VaBO!|V?4L)Cmw zJNXGa>k;`ShpN$#BeAJN<;+fhkjqE;#X?>SJu7t4ugu>)rzaU-Jp7IFOp#ysx=&RP z^>CP3*NWJIch8)d)G7E-Lw_5V_@cv(UCT46d&}aU9LOC$4cx%jQoWhW=pj2?WJCS2 z;pzBgB1fM#Q5)hSV}~F1+~Gt_mP8$V@(dnvhlP)xE*HEMWB9fWI^pt;)!n;f`94Hm z>9>-(JD220eI1h?;S3)Ba2YwpHwXQ+hS(N=AP#kxnfDwvADyFMcwMW zc9-2yCt?v7S;bA9o#G2;RyJ|2n#cPHhIp7CF?FXt)ROuSF|k7)IUv8Mhc7#|W)~;# z#a%MJU%%<{I&%cVoTdpARy&d4&D=GXwr` z#iuxP&U}~$pV!L7irbC?3g_a6UB&3YOP>ZDVve>KkcadC+}Rg6ANV9s@-!bXQa0SdEE#rzdjZ(C z5gt5@k+xmLKSms4!8`L%uE$iK#wBL>UjRSq*l3CGG4P{C@a56@7}b#2jww6u+`U2{ zH7TArl6%B}=hQdyASQMZ+qDwj=e$B5vOK$v$*Yr$#nZ8tF)nD8yEv0i&46RVN3}4o zq4(S;i}T}9c#sSC$s$MdkSXz9h`3gVYLt+sp;`?kL*|?ARlk*fCANXe-__hpniF{** zGI!@Xl}~dh49Tj0E!dSCHHL33aNt_C4>|Va7_$za#Bfd1$Mag)f$zT4%LzxnU*D0TBu~VzPE$o7~`$!#!K33g|6Z!KXmmhgEHytZEy3d>?vmN=Ru6$gtlEn_*P)FBG zcjXfceDHWGui)%CcaGdESLTH?vO2TJJ-%6iJ-!#x(FeztuY00cswW(xrx~AmcP#11 z$u)>`{A}W^yn1p2H*CRhKFG{Pa`K^P#1U5L^9~nt!k;C%-zS~@Fh|I+58KayVNQ|} zUoit;4*2`Kg*@*5{#YKdohN*V(`lJs?bwXxh)Z3OgIRfHT*W4z$SrdbxA+!IuCr%| zT5%S}`13aBaKJ>b*y36osIlLpt5a>N(v$ z&wksVG?#q+bIr%z_oe1dfB#_fwEyqp&6jU}xVi58cQ#wExx3l^?;daNy7@EB`+xDl z=IVR5H@Cg>{^r83Jkor7|Iarko%6}&o3E3N{?>W^Hdp?}G5&TA{`6`7)JFcqB7SGS zewP~m>vR5BPyYXO@?X>RPiu6jIpGKqI`C~*Ui+p?-~6VluDKpo`%%slo+`!q%dfir z(i^V2;`%pi+_c&97oV_U%mCRBzklD(AZfgE!V&9MWPb3QuDRyM(@uNEmDgSMwkt2) za?&|Fqo0!Kw>)j#zADwe>-Lr3em8F1bjms1(?0gzjkD|C@W<`{`{K(N|K*Fn+_$HG z-sjC<`_caNcmC@9^miWAp!?vND>L@*-*rd(%TN08<}cp*m$v)PmH)DR4Js4{)yR#&iPLJqc8o#_KF9-$ZB4D!?R|8@QuH){cCUdNdE`lcwK+kN6%~j zYWr`^cf9;%v#mQ?WB<%kH_X0s<&*l4z4!O*f55AbmEY^Yn}6t>Yb86iCB8WJ#60Yy zpJ_jMX4k@hztyj{w|w~U8Tjvh&6oO5{j0O*=zsISf6{(%|B2ha)*|Qpk!y~ak%yM^ z5truiH@>w8!$yf-CX&TfKrfz0R`v9dc;@)1}W}{LXQ&72d~;O&$NY zM-QF7>byf1#KMnVv1jjo&1;Bld-8=NHAhB`$c0+p^5I+Czj*6q_P_28FY15(p1*0; zk4Alg*NplT=dX`^uvL8*>O;SEN8kU+pZz_YTXGd2_2svyMSJvjZ)$)4kI!7Fmi>*t z_9|jg!xo>aKe=!}=KKFdJ$uC!pBJ^lY>7`ViA(MO;a8vGo<(n8=X$A@J$LnF&Mc4v zvp_#IM+>-t$2-7t_V54Lf*Ns#8}*Q#a=UDC^mkWv{?+~q&-#RVc7G%JT=_KQOuaQ< z&Bi}kFP~pm%#Y9)&mg(Tz8C+~zVNI~3vzt?jo))!>4EH9W?S>B7&rAgGusO=sVj3# zO!};KPt6{AZT{N-)!upF6O`Xd&)h5D#iJkCe+F;nQZ-q?{ox-TJ)56=6n2X%9=OoG z7q0NWdJnx(9;Zs?~@d(BVJd#t|ko;sfof93c42cG|~{+Q#pwSRW%A+vwl z`rG~8&w6`*;D!ITKj5f~`txVsZ9o0%KRSE)TkmZ@{rIB)mCyd4{Wm{!;_R0g<~C_`#>}Td*!W>lgZs51cy_FLH0l zv2?2E>|3Y5*S2?k>&_m|@WNL0bX;teXRlc5!Tt;SC2rKWIseaJyZHXcw@CKl#lL;Y zIga4Gs{`HuCr>>YQFkwZ+@6u7sMq;;kM$hT0EN`c;1OYO!cVo9R1)^ zaBRT9|4;AzUaK7ZPROq(C-^4MMs+=nndYu{W=1cV{oiQL5WmOfY`yami7g}W6d&L2xzvJ0IzIf!n z>^r;jA0BA$-u{O6UC;dyarkGmlkR<*<XOC?XP*acs(W_uWc{6 z;R8MX;=H)?6EEyfx%4#)@llK)^p~C0x9heayg&x$yV_Uw#Q(%$Pq5u(`+TAmZhPk^ z{!_pHiihAXJ|FAvYX9$6e5r%;+4kAbss_i-@R$8P#Jl+0|7!l__v^R0Z9lk|zvBEK z;{SE;@KlT$_1SjwcJmY0xpIV4PoD5qOj&%|Ltd}6Nu=&NS(>&0`f z+~!~T&V>u=MUC;nMzylvCk}gie`)`bo;ng!{Q7mjwEqH*+ipHfdAz6n!vp?KTwf^t zS>o_@`sdml@FQm|$U`+~v|9CkcE{CsxDE&W$R}IQPd?zbR;wrGx?g&S>jp2?6%N!3 z*=fXm@&QNBbe_{A>UP;an}(X+{Y#I!$MQRsn!oGohb?Y@&3fhe=ch6c^h&i`{7e1S zhQjY(f6@QbKlX+f`#%j)uglJAHDBB4y>eVfjj*L=z4BU+bIwddzvSQEbnSCIUtlv2 znhoc~tie$}Gj!tFvTMc`n(Kw?*)mJ;L?@2QZyj@Jyx0GmH<>TNILqGieh*yW%KrL)^S7-qXVgZs z(lWcJ9DTld^ZtQD?%0x}Fx1O)J=@regKMey^lVOU*uMM4FP$+Xob_Jy>JN4Fox0Ia z<>_}`^W%EJ-E*ROUA^`9m^Wsk=N*6}wP4QR!+V(gc&~fjA7b&{sCp5XnuuF}!tweS zvNiwqs(&$CuYc}3mG2&NveUmv%3zoOBDwj*o&QA=*)Mh7&i^8r#w!;+dv$&NOXSb~ z+@;UoaOCP!*X^@m!|D^(?W48t>b~o`ef-NsN3O2E{!Q1s<;0DbU$yPJ4Xdl`WVfGe zUR0V*yVY!7t2y?fr>`Ci+DRMvb**||ATPh>#EmyxdDTz9;rgXuPhDMyW+xKQSUm*! zwLf>wHCJ8p)0bX-<+f{<{1kAif6KOOm(&ipX}1A)k(&c<+DpJuzsrD|cMf>$Mb2;+ zC3C>dy9?-&UVhD}Uk+$>b=v>A;M7wl1=#PRLsy?X{6~omPh8zk-Ceop;MM*8-&6*{ zVXLd-f72MGhp!$m_-@=hh@ZZ??~S@Xb@ecT9m1%1q@J*Df9?BUblmEb;y>7|HDU6+ zREqUuxC!!7z*B~RPbtJvw2ZJMA1R-B;sEiCRsF{i-Iqxh@qI5kezpE9(^{L6^%1K} z|KVo@7nw=W7Yso+44|WI4alfi203*A3FfCxm>)YS`IPxuo82-W!Iy$=8e2RtpU9S( zPZW9umnLw_Ns|d&q1j_~HjS-vV!Dql*;k2?9Q!1(oosfO*aUnj=;pCiLt+!za$=+6 z@|UkZ;nb5ioU{5;&G}1K58iO#1wXg#n(I$J`O+({y86=RuN9oQRd}27cjaszo*x(zFELOyL!}yBMdxM zW{lmcPTalMrroPHzwtso2B&N|Z}q1X`lYLfY&a-#+IUJvTq`+oC!0+>NjAUn-TSN_ zG^MYP#M@SHREX=n0@-0_gzwNq1_WAz-Ao~q^ literal 0 HcmV?d00001 diff --git a/src/qonnx/data/onnx/bsd300x3-espcn/float_model.onnx b/src/qonnx/data/onnx/bsd300x3-espcn/subpixel/float_model.onnx similarity index 100% rename from src/qonnx/data/onnx/bsd300x3-espcn/float_model.onnx rename to src/qonnx/data/onnx/bsd300x3-espcn/subpixel/float_model.onnx diff --git a/src/qonnx/data/onnx/bsd300x3-espcn/quant_model.onnx b/src/qonnx/data/onnx/bsd300x3-espcn/subpixel/quant_model.onnx similarity index 100% rename from src/qonnx/data/onnx/bsd300x3-espcn/quant_model.onnx rename to src/qonnx/data/onnx/bsd300x3-espcn/subpixel/quant_model.onnx diff --git a/tests/transformation/test_subpixel_to_deconv.py b/tests/transformation/test_subpixel_to_deconv.py index 17a68d19..b033a476 100644 --- a/tests/transformation/test_subpixel_to_deconv.py +++ b/tests/transformation/test_subpixel_to_deconv.py @@ -47,7 +47,7 @@ def test_subpixel_to_deconv_float_espcn(): - raw_m = get_data("qonnx.data", "onnx/bsd300x3-espcn/float_model.onnx") + raw_m = get_data("qonnx.data", "onnx/bsd300x3-espcn/subpixel/float_model.onnx") model = ModelWrapper(raw_m) model = model.transform(InferShapes()) iname = model.graph.input[0].name @@ -67,7 +67,7 @@ def test_subpixel_to_deconv_float_espcn(): def test_subpixel_to_deconv_quant_espcn(): # get raw quantized model with reference input raw_i = get_data("qonnx.data", "onnx/bsd300x3-espcn/test_data/input_0.pb") - raw_m = get_data("qonnx.data", "onnx/bsd300x3-espcn/quant_model.onnx") + raw_m = get_data("qonnx.data", "onnx/bsd300x3-espcn/subpixel/quant_model.onnx") # create model from the onnx file and infer the shapes model = ModelWrapper(raw_m) model = model.transform(InferShapes()) From 057c26335d8e7741cb5b17e3c13b705e58caba3e Mon Sep 17 00:00:00 2001 From: i-colbert Date: Mon, 19 Feb 2024 19:39:15 -0800 Subject: [PATCH 41/83] Feat (tests): adding resize to deconv unit tests --- .../test_nn_resize_to_deconv.py | 173 ++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 tests/transformation/test_nn_resize_to_deconv.py diff --git a/tests/transformation/test_nn_resize_to_deconv.py b/tests/transformation/test_nn_resize_to_deconv.py new file mode 100644 index 00000000..15be7cdc --- /dev/null +++ b/tests/transformation/test_nn_resize_to_deconv.py @@ -0,0 +1,173 @@ +# Copyright (c) 2024, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of QONNX nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import numpy as np +import onnx +import onnx.helper as oh +import onnx.numpy_helper as nph +from onnx import TensorProto +from onnx.checker import check_model +from pkgutil import get_data + +import qonnx.core.onnx_exec as oxe +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.infer_shapes import InferShapes +from qonnx.transformation.resize_conv_to_deconv import ResizeConvolutionToDeconvolution +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model + +np.random.seed(0) + + +@pytest.mark.parametrize("maintain_bit_width", [True, False]) +def test_resize_conv_to_deconv_float_model(maintain_bit_width: bool): + raw_m = get_data("qonnx.data", "onnx/bsd300x3-espcn/nn_resize/float_model.onnx") + model = ModelWrapper(raw_m) + model = model.transform(InferShapes()) + iname = model.graph.input[0].name + oname = model.graph.output[0].name + ishape = model.get_tensor_shape(iname) + rand_inp = gen_finn_dt_tensor(DataType["FLOAT32"], ishape) + input_dict = {iname: rand_inp} + expected = oxe.execute_onnx(model, input_dict)[oname] + new_model = model.transform(ResizeConvolutionToDeconvolution(maintain_bit_width=maintain_bit_width)) + # check that there are no Resize ops left + op_types = list(map(lambda x: x.op_type, new_model.graph.node)) + assert "Resize" not in op_types, "Error: the Resize nodes should be removed." + produced = oxe.execute_onnx(new_model, input_dict)[oname] + assert np.isclose(expected, produced, atol=1e-4).all(), "Error: expected output does not match the produced output." + + +@pytest.mark.parametrize("maintain_bit_width", [True, False]) +def test_resize_conv_to_deconv_quant_model(maintain_bit_width: bool): + # get raw quantized model with reference input + raw_i = get_data("qonnx.data", "onnx/bsd300x3-espcn/test_data/input_0.pb") + raw_m = get_data("qonnx.data", "onnx/bsd300x3-espcn/nn_resize/quant_model.onnx") + # create model from the onnx file and infer the shapes + model = ModelWrapper(raw_m) + model = model.transform(InferShapes()) + iname = model.graph.input[0].name + oname = model.graph.output[0].name + ishape = model.get_tensor_shape(iname) + # load the reference input tensor + input_tensor = onnx.load_tensor_from_string(raw_i) + input_tensor = nph.to_array(input_tensor) + assert list(input_tensor.shape) == ishape, "Error: reference input doesn't match loaded model." + input_dict = {iname: input_tensor} + # get the output from the sub-pixel convolution model + output_resize_conv = oxe.execute_onnx(model, input_dict)[oname] + # translate the sub-pixel convolution to the deconvolution + new_model = model.transform(ResizeConvolutionToDeconvolution(maintain_bit_width=maintain_bit_width)) + # check that there are no Resize ops left + op_types = list(map(lambda x: x.op_type, new_model.graph.node)) + assert "Resize" not in op_types, "Error: the Resize nodes should be removed." + # get the output from the deconvolution model + output_deconv = oxe.execute_onnx(new_model, input_dict)[oname] + # maintaining the specified bit width introduces additional clipping errors that + # shouldn't be expected to maintain reasonable functional similarity + if not maintain_bit_width: + assert np.isclose( + output_deconv, output_resize_conv, atol=1 / 255.0, rtol=1. + ).all(), "Error: expected output does not match the produced output." + + +def create_nn_resize_conv_model( + in_channels: int, out_channels: int, input_dim: int, kernel_size: int, upscale_factor: int, bias: bool +): + assert isinstance(kernel_size, int), "Assuming square kernels, so kernel_size needs to be an int." + padding = (kernel_size - 1) // 2 + + ifm_ch = in_channels + ifm_dim = input_dim + ofm_dim = ifm_dim * upscale_factor + ofm_ch = out_channels + scales = np.array([1., 1., upscale_factor, upscale_factor], dtype=np.float32) + + resize = oh.make_node( + "Resize", + inputs=["inp", "roi", "scales"], + outputs=["hid"], + mode="nearest", + ) + conv = oh.make_node( + op_type="Conv", + inputs=["hid", "W"] if not bias else ["hid", "W", "B"], + outputs=["out"], + kernel_shape=[kernel_size, kernel_size], + pads=[padding, padding, padding, padding], + strides=[1, 1], + group=1, + dilations=[1, 1], + ) + + input_shape = [1, ifm_ch, ifm_dim, ifm_dim] + output_shape = [1, ofm_ch, ofm_dim, ofm_dim] + + conv_param_shape = [ofm_ch, ifm_ch, kernel_size, kernel_size] + bias_param_shape = [ofm_ch] + + inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, input_shape) + out = oh.make_tensor_value_info("out", TensorProto.FLOAT, output_shape) + + W_conv = oh.make_tensor_value_info("W", TensorProto.FLOAT, conv_param_shape) + B_conv = oh.make_tensor_value_info("B", TensorProto.FLOAT, bias_param_shape) + + value_info = [W_conv] if not bias else [W_conv, B_conv] + + graph = oh.make_graph( + nodes=[resize, conv], + name="cnv_graph", + inputs=[inp], + outputs=[out], + value_info=value_info, + ) + modelproto = qonnx_make_model(graph, producer_name="test_model") + model = ModelWrapper(modelproto) + model.set_initializer("roi", np.empty(0)) + model.set_initializer("scales", scales) + model.set_initializer("W", np.random.rand(*conv_param_shape).astype(np.float32)) + if bias: + model.set_initializer("B", np.random.rand(*bias_param_shape).astype(np.float32)) + model.save("cnv-graph.onnx") + model = model.transform(InferShapes()) + check_model(model._model_proto) + return model + + +@pytest.mark.parametrize("kernel_size", [1, 3, 5, 7]) +@pytest.mark.parametrize("upscale_factor", [1, 2, 3, 4]) +@pytest.mark.parametrize("bias", [True, False]) +def test_resize_conv_to_deconv_layer(kernel_size: int, upscale_factor: int, bias: bool): + # Create resize convolution layer that upsamples a 4x4 image with 1 I/O channel + model_1 = create_nn_resize_conv_model(3, 10, 4, kernel_size, upscale_factor, bias) + model_2 = model_1.transform(ResizeConvolutionToDeconvolution()) + input_shape = [1, 3, 4, 4] + inp_dict = {"inp": np.random.rand(*input_shape).astype(np.float32)} + assert oxe.compare_execution(model_1, model_2, inp_dict) From e199a46c97a0f043249a297eb825b2a2177ef7b7 Mon Sep 17 00:00:00 2001 From: i-colbert Date: Mon, 19 Feb 2024 19:39:48 -0800 Subject: [PATCH 42/83] Fix (transformation): fixing warnings to exit on unsupported condition --- src/qonnx/transformation/subpixel_to_deconv.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/qonnx/transformation/subpixel_to_deconv.py b/src/qonnx/transformation/subpixel_to_deconv.py index f721140d..e243261f 100644 --- a/src/qonnx/transformation/subpixel_to_deconv.py +++ b/src/qonnx/transformation/subpixel_to_deconv.py @@ -111,6 +111,7 @@ def apply(self, model): group = get_by_name(n.attribute, "group").i if group != 1: warnings.warn("Skipping sub-pixel conv with group > 1. Not yet supported.") + continue # The weights of the convolution can be generated by another input op if the model is # quantized. Preliminary support for quantization focuses on QONNX ops (i.e., Quant) @@ -144,6 +145,7 @@ def apply(self, model): ofm_dim_w = model.get_tensor_shape(n.output[0])[3] if (ifm_dim_h != ofm_dim_h) or (ifm_dim_w != ofm_dim_w): warnings.warn("Skipping sub-pixel conv, only same-padded convs supported.") + continue dilation_attr = get_by_name(n.attribute, "dilations") if dilation_attr is not None: dilation = dilation_attr.ints @@ -157,6 +159,7 @@ def apply(self, model): warnings.warn( "Skipping sub-pixel conv, the output channels and block size need to be evenly divisible." ) + continue W_deconv = _weight_shuffle(W_conv, block_size).astype(np.float32) kh_size_deconv = kshape[0] * block_size kw_size_deconv = kshape[1] * block_size From 19c73fe733f46a7b7f8b3235369f11f2977e94f6 Mon Sep 17 00:00:00 2001 From: i-colbert Date: Mon, 19 Feb 2024 20:37:51 -0800 Subject: [PATCH 43/83] Pre-commit fixes --- tests/transformation/test_nn_resize_to_deconv.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/transformation/test_nn_resize_to_deconv.py b/tests/transformation/test_nn_resize_to_deconv.py index 15be7cdc..c6d54e02 100644 --- a/tests/transformation/test_nn_resize_to_deconv.py +++ b/tests/transformation/test_nn_resize_to_deconv.py @@ -94,7 +94,7 @@ def test_resize_conv_to_deconv_quant_model(maintain_bit_width: bool): # shouldn't be expected to maintain reasonable functional similarity if not maintain_bit_width: assert np.isclose( - output_deconv, output_resize_conv, atol=1 / 255.0, rtol=1. + output_deconv, output_resize_conv, atol=1 / 255.0, rtol=1.0 ).all(), "Error: expected output does not match the produced output." @@ -108,7 +108,7 @@ def create_nn_resize_conv_model( ifm_dim = input_dim ofm_dim = ifm_dim * upscale_factor ofm_ch = out_channels - scales = np.array([1., 1., upscale_factor, upscale_factor], dtype=np.float32) + scales = np.array([1.0, 1.0, upscale_factor, upscale_factor], dtype=np.float32) resize = oh.make_node( "Resize", From 59a7ca002c992d5ff8cd5ab1086825574e58dd22 Mon Sep 17 00:00:00 2001 From: shashwat1198 Date: Fri, 1 Mar 2024 14:45:25 +0000 Subject: [PATCH 44/83] package installations added --- notebooks/4_quant_lstm.ipynb | 81 +++--- notebooks/4_quant_lstm_helper/function.py | 340 ++++++++++++++++++++++ notebooks/4_quant_lstm_helper/handler.py | 140 +++++++++ 3 files changed, 518 insertions(+), 43 deletions(-) create mode 100644 notebooks/4_quant_lstm_helper/function.py create mode 100644 notebooks/4_quant_lstm_helper/handler.py diff --git a/notebooks/4_quant_lstm.ipynb b/notebooks/4_quant_lstm.ipynb index 186be984..bc2b5e2e 100644 --- a/notebooks/4_quant_lstm.ipynb +++ b/notebooks/4_quant_lstm.ipynb @@ -2,7 +2,6 @@ "cells": [ { "cell_type": "markdown", - "id": "5ef5f772-f48a-4bb1-bb68-4e8e9236fd2e", "metadata": {}, "source": [ "# QuantLSTM - ONNX (QCDQ) representation" @@ -10,11 +9,12 @@ }, { "cell_type": "markdown", - "id": "e5a747f9-fd74-4ebc-8d74-17bf06ff2d48", "metadata": {}, "source": [ - "This notebook is divided into `five` parts:\n", + "This notebook is divided into `six` parts:\n", "\n", + "
Part 0 : Package Installations.\n", + "
\n", "
Part 1 : Introduction to LSTMs.\n", "
\n", "
Part 2 : Model creation with brevitas QuantLSTM layer. \n", @@ -28,16 +28,45 @@ }, { "cell_type": "markdown", - "id": "69ae7154-8cf3-4ee7-88c3-3bec0550008a", + "metadata": {}, + "source": [ + "# Package Installations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Required package installations, This cell only needs to be executed once at the start\n", + "!pip install torch==1.13.1\n", + "!pip install brevitas==0.9.1\n", + "!pip install onnx==1.13.0\n", + "!pip install onnxoptimizer==0.3.13\n", + "!pip install onnxruntime==1.11.1\n", + "!pip install netron==7.2.5\n", + "!pip install qonnx==0.2.0\n", + "!pip install IPython\n", + "!pip install ipykernel\n", + "!ipython kernel install --user --name=venv\n", + "\n", + "#The below location can change depending on your installation of the 'venv' virtual environment\n", + "!cp ./4_quant_lstm_helper/function.py ../venv/lib/python3.8/site-packages/brevitas/export/onnx/standard/\n", + "!cp ./4_quant_lstm_helper/handler.py ../venv/lib/python3.8/site-packages/brevitas/export/onnx/standard/qcdq/\n", + "\n", + "#NOTE : Make sure to chnage the kernel to from \"Python 3\" to \"venv\" before running the below commands" + ] + }, + { + "cell_type": "markdown", "metadata": {}, "source": [ "# Introduction to LSTM's " ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "e7a903ef-1680-4a20-8c61-267884b76c96", "metadata": {}, "source": [ "`LSTM’s (Long Short-Term Memory)` are sequential neural networks that are capable of learning long term dependencies especially in sequence prediction problems. They are deployed in machine translation, speech recognition, image captioning and especially used for time-series analysis applications.\n", @@ -73,7 +102,6 @@ }, { "cell_type": "markdown", - "id": "70d052c8-e5cd-4eb1-89e5-f8ae956cb853", "metadata": {}, "source": [ "# QuantLSTM model creation" @@ -81,7 +109,6 @@ }, { "cell_type": "markdown", - "id": "6a64be7c", "metadata": {}, "source": [ "In the 2nd part of the notebook, we will create a single layer `QuantLSTM` model in brevitas. We will evaluate with a given set of inputs. We then export this model to `QONNX` so that the same parameters (weights/biases/scales) can be extracted and used in the `QCDQ-LSTM` implementation." @@ -90,7 +117,6 @@ { "cell_type": "code", "execution_count": null, - "id": "84d66548-365d-46a5-9eaa-bb767085f9aa", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +145,6 @@ { "cell_type": "code", "execution_count": null, - "id": "23a7682c", "metadata": {}, "outputs": [], "source": [ @@ -153,7 +178,6 @@ }, { "cell_type": "markdown", - "id": "347ef1f5-36e8-4103-9b13-efa7fe93eb5e", "metadata": {}, "source": [ "`Abbreviations` : Short-forms defined in the next code block can be referenced here for definitions.\n", @@ -166,7 +190,6 @@ { "cell_type": "code", "execution_count": null, - "id": "0bfbf5a3-8556-4190-a28f-4fe9859c55a9", "metadata": {}, "outputs": [], "source": [ @@ -210,7 +233,6 @@ }, { "cell_type": "markdown", - "id": "10237589-f84e-423a-829e-3e2c2e806ed7", "metadata": {}, "source": [ "# LSTM ONNX model" @@ -218,7 +240,6 @@ }, { "cell_type": "markdown", - "id": "367547b8", "metadata": {}, "source": [ "In the 3rd part of the notebook, we will construct the `QCDQ-LSTM` model with standard ONNX operators. After loading all the parameters in the above block we can now start building our ONNX model with QCDQ quantization to represent the LSTM computations described in part-1.\n" @@ -227,7 +248,6 @@ { "cell_type": "code", "execution_count": null, - "id": "02fe4d94-af24-4d5e-a809-7d8c49e7fd90", "metadata": {}, "outputs": [], "source": [ @@ -249,7 +269,6 @@ }, { "cell_type": "markdown", - "id": "15098a9e-4187-4987-82cc-275eba650923", "metadata": {}, "source": [ "`Abbreviations` : These describe different short-forms used in the next two blocks.\n", @@ -265,7 +284,6 @@ }, { "cell_type": "markdown", - "id": "f2edc0cc", "metadata": {}, "source": [ "We start defining the model by defining the `inputs` and `outputs` defined as value_info tensors in ONNX.\n", @@ -276,7 +294,6 @@ { "cell_type": "code", "execution_count": null, - "id": "02761646-4c6d-440f-8e90-4935beebab56", "metadata": {}, "outputs": [], "source": [ @@ -294,7 +311,6 @@ { "cell_type": "code", "execution_count": null, - "id": "c08e5a23-ef2e-4bca-9293-c800350c2c62", "metadata": {}, "outputs": [], "source": [ @@ -412,7 +428,6 @@ }, { "cell_type": "markdown", - "id": "3d10867f", "metadata": {}, "source": [ "After defining the above operations we now connect them and create a graph with the help of onnx.helper `make_graph` utility function" @@ -421,7 +436,6 @@ { "cell_type": "code", "execution_count": null, - "id": "79839558-8752-4fc8-9b0e-8fed47c91701", "metadata": {}, "outputs": [], "source": [ @@ -632,7 +646,6 @@ }, { "cell_type": "markdown", - "id": "b1b16751", "metadata": {}, "source": [ "The above created graph can now be converted into a qonnx model with the `qonnx_make_model` utility. We save the model with `onnx.save` utility and then view it in Netron with the help of `showInNetron` utility. \n" @@ -641,7 +654,6 @@ { "cell_type": "code", "execution_count": null, - "id": "c6ec7b2a-456d-4452-97ec-df9a471d5391", "metadata": {}, "outputs": [], "source": [ @@ -652,7 +664,6 @@ }, { "cell_type": "markdown", - "id": "40b49257", "metadata": {}, "source": [ "In this block of code we execute the onnx graph to check that it can execute without any errors. We perform it's functional verification in the later part of the notebook." @@ -661,7 +672,6 @@ { "cell_type": "code", "execution_count": null, - "id": "db5892bc-ac8d-4972-afcf-20bf880f5e86", "metadata": {}, "outputs": [], "source": [ @@ -691,7 +701,6 @@ }, { "cell_type": "markdown", - "id": "5d2b5a1e-654e-46a5-9d4f-8708611a6d1e", "metadata": {}, "source": [ "# SCAN Operation Integration" @@ -699,7 +708,6 @@ }, { "cell_type": "markdown", - "id": "7365329a-f3d2-4f74-8e2f-9076771e07a7", "metadata": {}, "source": [ "### Introduction to ONNX Scan operation\n", @@ -721,7 +729,6 @@ }, { "cell_type": "markdown", - "id": "17f247f7", "metadata": {}, "source": [ "The `Scan` operation is essentially a container operator which will consume the LSTM graph that we created above in it's body.\n", @@ -733,7 +740,6 @@ { "cell_type": "code", "execution_count": null, - "id": "700a93a8-f757-4fa1-88dd-47a3f2a7f171", "metadata": {}, "outputs": [], "source": [ @@ -750,7 +756,6 @@ }, { "cell_type": "markdown", - "id": "572f191e", "metadata": {}, "source": [ "We will now create the scan operator here now utilizing the `make_node` utility from ONNX.\n", @@ -760,7 +765,6 @@ { "cell_type": "code", "execution_count": null, - "id": "111fdce4-464f-40c1-ac4d-3022b05f153e", "metadata": {}, "outputs": [], "source": [ @@ -775,7 +779,6 @@ }, { "cell_type": "markdown", - "id": "ea8a05d9", "metadata": {}, "source": [ "We can now define the graph for the scan operator utilizing the `make_graph` utility." @@ -784,7 +787,6 @@ { "cell_type": "code", "execution_count": null, - "id": "4668cf2b-524e-4768-8dc8-9d619f6273da", "metadata": {}, "outputs": [], "source": [ @@ -810,7 +812,6 @@ }, { "cell_type": "markdown", - "id": "0673e335", "metadata": {}, "source": [ "Now that we have the SCAN based quantized LSTM model ready, we can now go forward and test it with the same sets of inputs we used for the testing of the brevitas model.\n" @@ -819,7 +820,6 @@ { "cell_type": "code", "execution_count": null, - "id": "818d2a81-686f-4a4a-8e78-17dbf75d8451", "metadata": {}, "outputs": [], "source": [ @@ -854,7 +854,6 @@ }, { "cell_type": "markdown", - "id": "907d2ff9-f605-4aec-891e-0c77a1a92346", "metadata": {}, "source": [ "# Functional Verification" @@ -862,7 +861,6 @@ }, { "cell_type": "markdown", - "id": "b6bb6c60", "metadata": {}, "source": [ "In the final part of the notebook, we compare the output of the 8-bit quantized `(QCDQ)-LSTM` implementation with the `QuantLSTM` brevitas model.\n" @@ -871,7 +869,6 @@ { "cell_type": "code", "execution_count": null, - "id": "2fe07395-6cf9-4c99-a0d3-a27aa6a326b5", "metadata": {}, "outputs": [], "source": [ @@ -900,7 +897,6 @@ }, { "cell_type": "markdown", - "id": "7bcca933", "metadata": {}, "source": [ "Note the difference in outputs increases as we progress with processing the inputs. The first two outputs are very close to one another, but as we get the outputs for more inputs we see for some values differ from the brevitas output by a considerable amount.\n", @@ -909,16 +905,15 @@ }, { "cell_type": "markdown", - "id": "81c6d531", "metadata": {}, "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "venv", "language": "python", - "name": "python3" + "name": "venv" }, "language_info": { "codemirror_mode": { @@ -930,7 +925,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.8.0" } }, "nbformat": 4, diff --git a/notebooks/4_quant_lstm_helper/function.py b/notebooks/4_quant_lstm_helper/function.py new file mode 100644 index 00000000..6ba2e9dd --- /dev/null +++ b/notebooks/4_quant_lstm_helper/function.py @@ -0,0 +1,340 @@ +# Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause + +import torch +from torch.autograd import Function + +from brevitas.export.onnx import onnx_export_opset + +AXIS_OPSET = 13 +DOMAIN_STRING = "onnx.brevitas" + + +class DequantizeLinearFn(Function): + + @staticmethod + def symbolic(g, x, input_scale, input_zero_point, input_axis): + opset_version = onnx_export_opset() + + if input_axis is not None and opset_version < AXIS_OPSET: + raise RuntimeError('ONNX Opset 13 is required for per-channel quantization') + elif input_axis is not None and opset_version >= AXIS_OPSET: + ret = g.op('DequantizeLinear', x, input_scale, input_zero_point, axis_i=input_axis) + else: + ret = g.op('DequantizeLinear', x, input_scale, input_zero_point) + return ret + + @staticmethod + def forward(ctx, int_x, input_scale, input_zero_point, input_axis): + return int_x.float() + + +class IntClipFn(Function): + + @staticmethod + def symbolic(g, int_x, min_int_val, max_int_val): + ret = g.op('Clip', int_x, min_int_val, max_int_val) + return ret + + @staticmethod + def forward(ctx, int_x, min_int_val, max_int_val): + return int_x + + +class QuantizeLinearFn(Function): + + @staticmethod + def symbolic(g, x, output_scale, ouput_zero_point, output_dtype, output_axis): + opset_version = onnx_export_opset() + + if output_axis is not None and opset_version < AXIS_OPSET: + raise RuntimeError('ONNX Opset 13 is required for per-channel quantization') + elif output_axis is not None and opset_version >= AXIS_OPSET: + ret = g.op('QuantizeLinear', x, output_scale, ouput_zero_point, axis_i=output_axis) + else: + ret = g.op('QuantizeLinear', x, output_scale, ouput_zero_point) + return ret + + @staticmethod + def forward(ctx, x, output_scale, ouput_zero_point, output_dtype, output_axis): + return x.type(output_dtype) + +class BrevitasQuantLSTMCellFn(Function): + + + @staticmethod + def symbolic( + g, # args and kwargs passed from _QuantLSTMLayer + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, # Symbolic kwargs passed from BrevitasQuantLSTMLayerHandler + batch_first, + reverse_input, + cifg, # Output quant + output_scale, + output_zero_point, + output_bit_width, + output_narrow_range, + output_signed, + output_rounding_mode, # Cell state quant + cell_state_scale, + cell_state_zero_point, + cell_state_bit_width, + cell_state_narrow_range, + cell_state_signed, + cell_state_rounding_mode, # Input gate accumulator quant + input_acc_scale, + input_acc_zero_point, + input_acc_bit_width, + input_acc_narrow_range, + input_acc_signed, + input_acc_rounding_mode, # Forget gate accumulator quant + forget_acc_scale, + forget_acc_zero_point, + forget_acc_bit_width, + forget_acc_narrow_range, + forget_acc_signed, + forget_acc_rounding_mode, # Cell gate accumulator quant + cell_acc_scale, + cell_acc_zero_point, + cell_acc_bit_width, + cell_acc_narrow_range, + cell_acc_signed, + cell_acc_rounding_mode, # Output gate accumulator quant + output_acc_scale, + output_acc_zero_point, + output_acc_bit_width, + output_acc_narrow_range, + output_acc_signed, + output_acc_rounding_mode, # Input gate sigmoid quant + input_sigmoid_scale, + input_sigmoid_zero_point, + input_sigmoid_bit_width, + input_sigmoid_narrow_range, + input_sigmoid_signed, + input_sigmoid_rounding_mode, # Forget gate sigmoid quant + forget_sigmoid_scale, + forget_sigmoid_zero_point, + forget_sigmoid_bit_width, + forget_sigmoid_narrow_range, + forget_sigmoid_signed, + forget_sigmoid_rounding_mode, # Cell gate tanh quant + cell_tanh_scale, + cell_tanh_zero_point, + cell_tanh_bit_width, + cell_tanh_narrow_range, + cell_tanh_signed, + cell_tanh_rounding_mode, # Output gate sigmoid quant + output_sigmoid_scale, + output_sigmoid_zero_point, + output_sigmoid_bit_width, + output_sigmoid_narrow_range, + output_sigmoid_signed, + output_sigmoid_rounding_mode, # Hidden state tanh quant + hidden_state_tanh_scale, + hidden_state_tanh_zero_point, + hidden_state_tanh_bit_width, + hidden_state_tanh_narrow_range, + hidden_state_tanh_signed, + hidden_state_tanh_rounding_mode): + return g.op( + f'{DOMAIN_STRING}::QuantLSTMCell', # Tensors + ## Input values + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, ## Output quant + output_scale, + output_zero_point, + output_bit_width, ## Cell state quant + cell_state_scale, + cell_state_zero_point, + cell_state_bit_width, ## Input gate accumulator quant + input_acc_scale, + input_acc_zero_point, + input_acc_bit_width, ## Forget gate accumulator quant + forget_acc_scale, + forget_acc_zero_point, + forget_acc_bit_width, ## Cell gate accumulator quant + cell_acc_scale, + cell_acc_zero_point, + cell_acc_bit_width, ## Output gate accumulator quant + output_acc_scale, + output_acc_zero_point, + output_acc_bit_width, ## Input gate sigmoid quant + input_sigmoid_scale, + input_sigmoid_zero_point, + input_sigmoid_bit_width, ## Forget gate sigmoid quant + forget_sigmoid_scale, + forget_sigmoid_zero_point, + forget_sigmoid_bit_width, ## Cell gate tanh quant + cell_tanh_scale, + cell_tanh_zero_point, + cell_tanh_bit_width, ## Output gate sigmoid quant + output_sigmoid_scale, + output_sigmoid_zero_point, + output_sigmoid_bit_width, ## Hidden state tanh quant + hidden_state_tanh_scale, + hidden_state_tanh_zero_point, + hidden_state_tanh_bit_width, + # Attributes + batch_first_i=batch_first, + reverse_input_i=reverse_input, + cifg_i=cifg, + output_narrow_i=output_narrow_range, + output_signed_i=output_signed, + output_rounding_mode_s=output_rounding_mode, + cell_state_narrow_i=cell_state_narrow_range, + cell_state_signed_i=cell_state_signed, + cell_state_rounding_mode_s=cell_state_rounding_mode, + input_acc_narrow_i=input_acc_narrow_range, + input_acc_signed_i=input_acc_signed, + input_acc_rounding_mode_s=input_acc_rounding_mode, + forget_acc_narrow_i=forget_acc_narrow_range, + forget_acc_signed_i=forget_acc_signed, + forget_acc_rounding_mode_s=forget_acc_rounding_mode, + cell_acc_narrow_i=cell_acc_narrow_range, + cell_acc_signed_i=cell_acc_signed, + cell_acc_rounding_mode_s=cell_acc_rounding_mode, + output_acc_narrow_i=output_acc_narrow_range, + output_acc_signed_i=output_acc_signed, + output_acc_rounding_mode_s=output_acc_rounding_mode, + input_sigmoid_narrow_i=input_sigmoid_narrow_range, + input_sigmoid_signed_i=input_sigmoid_signed, + input_sigmoid_rounding_mode_s=input_sigmoid_rounding_mode, + forget_sigmoid_narrow_i=forget_sigmoid_narrow_range, + forget_sigmoid_signed_i=forget_sigmoid_signed, + forget_sigmoid_rounding_mode_s=forget_sigmoid_rounding_mode, + cell_tanh_narrow_i=cell_tanh_narrow_range, + cell_tanh_signed_i=cell_tanh_signed, + cell_tanh_rounding_mode_s=cell_tanh_rounding_mode, + output_sigmoid_narrow_range_i=output_sigmoid_narrow_range, + output_sigmoid_signed_i=output_sigmoid_signed, + output_sigmoid_rounding_mode_s=output_sigmoid_rounding_mode, + hidden_state_tanh_narrow_i=hidden_state_tanh_narrow_range, + hidden_state_tanh_signed_i=hidden_state_tanh_signed, + hidden_state_tanh_rounding_mode_s=hidden_state_tanh_rounding_mode, + # PyTorch requires to specify the number of outputs manually + outputs=3) + + + @staticmethod + def forward( + ctx, # args and kwargs passed from _QuantLSTMLayer + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, # Symbolic kwargs passed from BrevitasQuantLSTMLayerHandler + batch_first, + reverse_input, + cifg, # Output quant + output_scale, + output_zero_point, + output_bit_width, + output_narrow_range, + output_signed, + output_rounding_mode, # Cell state quant + cell_state_scale, + cell_state_zero_point, + cell_state_bit_width, + cell_state_narrow_range, + cell_state_signed, + cell_state_rounding_mode, # Input gate accumulator quant + input_acc_scale, + input_acc_zero_point, + input_acc_bit_width, + input_acc_narrow_range, + input_acc_signed, + input_acc_rounding_mode, # Forget gate accumulator quant + forget_acc_scale, + forget_acc_zero_point, + forget_acc_bit_width, + forget_acc_narrow_range, + forget_acc_signed, + forget_acc_rounding_mode, # Cell gate accumulator quant + cell_acc_scale, + cell_acc_zero_point, + cell_acc_bit_width, + cell_acc_narrow_range, + cell_acc_signed, + cell_acc_rounding_mode, # Output gate accumulator quant + output_acc_scale, + output_acc_zero_point, + output_acc_bit_width, + output_acc_narrow_range, + output_acc_signed, + output_acc_rounding_mode, # Input gate sigmoid quant + input_sigmoid_scale, + input_sigmoid_zero_point, + input_sigmoid_bit_width, + input_sigmoid_narrow_range, + input_sigmoid_signed, + input_sigmoid_rounding_mode, # Forget gate sigmoid quant + forget_sigmoid_scale, + forget_sigmoid_zero_point, + forget_sigmoid_bit_width, + forget_sigmoid_narrow_range, + forget_sigmoid_signed, + forget_sigmoid_rounding_mode, # Cell gate tanh quant + cell_tanh_scale, + cell_tanh_zero_point, + cell_tanh_bit_width, + cell_tanh_narrow_range, + cell_tanh_signed, + cell_tanh_rounding_mode, # Output gate sigmoid quant + output_sigmoid_scale, + output_sigmoid_zero_point, + output_sigmoid_bit_width, + output_sigmoid_narrow_range, + output_sigmoid_signed, + output_sigmoid_rounding_mode, # Hidden state tanh quant + hidden_state_tanh_scale, + hidden_state_tanh_zero_point, + hidden_state_tanh_bit_width, + hidden_state_tanh_narrow_range, + hidden_state_tanh_signed, + hidden_state_tanh_rounding_mode): + # Tp simplify things, here we are returning the outputs + # as if they were already concatenated. Scale/zp/bw are avoided too. + # This preserves output shapes but not values. + # See _QuantLSTMCell for the actual implementation. + quant_outputs = torch.zeros( + quant_input.size(0), + quant_input.size(1), + quant_hidden_state.size(1), + device=quant_hidden_state.device) + return quant_outputs, quant_hidden_state, quant_cell_state diff --git a/notebooks/4_quant_lstm_helper/handler.py b/notebooks/4_quant_lstm_helper/handler.py new file mode 100644 index 00000000..948eb647 --- /dev/null +++ b/notebooks/4_quant_lstm_helper/handler.py @@ -0,0 +1,140 @@ +# Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause + +from abc import ABC +from copy import copy + +import torch +from torch import Tensor + +from brevitas.export.common.handler.base import QuantAxisMixin +from brevitas.export.common.handler.qcdq import DQMixin +from brevitas.export.common.handler.qcdq import QCDQActQuantProxyHandlerMixin +from brevitas.export.common.handler.qcdq import QCDQBiasQuantProxyHandlerMixin +from brevitas.export.common.handler.qcdq import QCDQDecoupledWeightQuantProxyHandlerMixin +from brevitas.export.common.handler.qcdq import QCDQMixin +from brevitas.export.common.handler.qcdq import QCDQTruncQuantProxyHandlerMixin +from brevitas.export.common.handler.qcdq import QCDQWeightQuantProxyHandlerMixin +from brevitas.export.common.handler.qcdq import ZeroPointHandlerMixin +from brevitas.export.onnx.handler import ONNXBaseHandler +from brevitas.export.onnx.handler import QuantLSTMLayerHandler + +from ..function import DequantizeLinearFn +from ..function import IntClipFn +from ..function import QuantizeLinearFn +from ..function import BrevitasQuantLSTMCellFn + + +class StdDQONNXMixin(DQMixin, ABC): + + def dequantize_fn(self, x, scale, zero_point, axis): + return DequantizeLinearFn.apply(x, scale, zero_point, axis) + + @property + def flatten_dequantize_params(self): + return True + + @property + def itemize_quantize_scalar_params(self): + return False + + +class StdQCDQONNXMixin(QCDQMixin, StdDQONNXMixin, ABC): + + @property + def clip_over_integers(self): + return True + + @classmethod + def int8_dtype(cls): + return torch.int8 + + @classmethod + def uint8_dtype(cls): + return torch.uint8 + + @classmethod + def int32_dtype(cls): + return torch.int32 + + def validate(self, module): + self.validate_8b_bit_width(module.bit_width(), le_then=True) + assert module.bit_width() > 1., 'Binary quant not supported' + assert module.rounding_mode.upper() == 'ROUND', 'Only round to nearest even supported' + + def quantize_fn(self, x, scale, zero_point, dtype, axis): + return QuantizeLinearFn.apply(x, scale, zero_point, dtype, axis) + + def clip_fn(self, x, min_val, max_val): + return IntClipFn.apply(x, min_val, max_val) + + +class StdQCDQONNXWeightQuantProxyHandler(StdQCDQONNXMixin, + QCDQWeightQuantProxyHandlerMixin, + ONNXBaseHandler): + pass + + +class StdQCDQONNXDecoupledWeightQuantProxyHandler(StdQCDQONNXMixin, + QCDQDecoupledWeightQuantProxyHandlerMixin, + ONNXBaseHandler): + pass + + +class StdQCDQONNXActQuantProxyHandler(StdQCDQONNXMixin, + QCDQActQuantProxyHandlerMixin, + ONNXBaseHandler): + pass + + +class StdQCDQONNXBiasQuantProxyHandler(StdDQONNXMixin, + QCDQBiasQuantProxyHandlerMixin, + ONNXBaseHandler): + pass + + +class StdQCDQONNXTruncQuantProxyHandler(StdQCDQONNXMixin, + QCDQTruncQuantProxyHandlerMixin, + ONNXBaseHandler): + pass + + +class StdQCDQONNXQuantLSTMLayerHandler(QuantLSTMLayerHandler): + + def quantized_cell_symbolic_execution( + self, + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output): + return BrevitasQuantLSTMCellFn.apply( + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, + *self.symbolic_kwargs.values()) + # raise RuntimeError( + # "Quantized LSTM cell is not supported for ONNX QCDQ " + # "(weights only quantization is). Use export_qonnx.") From 30b838becdf8666b636c240dbd858c1fbc01458e Mon Sep 17 00:00:00 2001 From: i-colbert Date: Thu, 7 Mar 2024 12:59:41 -0800 Subject: [PATCH 45/83] Fix (tests): removing generated onnx graph --- tests/transformation/test_nn_resize_to_deconv.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/transformation/test_nn_resize_to_deconv.py b/tests/transformation/test_nn_resize_to_deconv.py index c6d54e02..64e31e2b 100644 --- a/tests/transformation/test_nn_resize_to_deconv.py +++ b/tests/transformation/test_nn_resize_to_deconv.py @@ -155,7 +155,6 @@ def create_nn_resize_conv_model( model.set_initializer("W", np.random.rand(*conv_param_shape).astype(np.float32)) if bias: model.set_initializer("B", np.random.rand(*bias_param_shape).astype(np.float32)) - model.save("cnv-graph.onnx") model = model.transform(InferShapes()) check_model(model._model_proto) return model From 8d66e8935c32fb084a3da14787bbf24476fdaefb Mon Sep 17 00:00:00 2001 From: i-colbert Date: Thu, 7 Mar 2024 13:29:58 -0800 Subject: [PATCH 46/83] Fix (util): centralizing auto pad utility --- .../transformation/lower_convs_to_matmul.py | 22 ++----------------- .../transformation/resize_conv_to_deconv.py | 21 ++---------------- .../transformation/subpixel_to_deconv.py | 21 ++---------------- src/qonnx/util/basic.py | 17 ++++++++++++++ 4 files changed, 23 insertions(+), 58 deletions(-) diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index 79e1f3f2..89fa3f0e 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -32,25 +32,7 @@ from qonnx.transformation.base import Transformation from qonnx.transformation.extract_conv_bias import ExtractBiasFromConv -from qonnx.util.basic import get_by_name - - -def _auto_pad_to_explicit_padding(autopad_str, idim_h, idim_w, k_h, k_w, stride_h, stride_w, n_dims): - pad_total_h = (stride_h - 1) * idim_h - stride_h + k_h - pad_total_w = (stride_w - 1) * idim_w - stride_w + k_w - pad_half_small_h = int((pad_total_h / 2)) - pad_half_small_w = int((pad_total_w / 2)) - pad_half_large_h = pad_total_h - pad_half_small_h - pad_half_large_w = pad_total_w - pad_half_small_w - if autopad_str == "VALID": - return [0 for i in range(2 * n_dims)] - elif autopad_str == "SAME_UPPER": - return [pad_half_small_h, pad_half_small_w, pad_half_large_h, pad_half_large_w] - elif autopad_str == "SAME_LOWER": - return [pad_half_large_h, pad_half_large_w, pad_half_small_h, pad_half_small_w] - else: - raise Exception("Unsupported auto_pad: " + autopad_str) - +from qonnx.util.basic import get_by_name, auto_pad_to_explicit_padding class LowerConvsToMatMul(Transformation): """Replace Conv layers with pairs of Im2Col-MatMul layers, plus Transpose @@ -100,7 +82,7 @@ def apply(self, model): # use specified padding pad = get_by_name(n.attribute, "pads").ints else: - pad = _auto_pad_to_explicit_padding( + pad = auto_pad_to_explicit_padding( auto_pad, ifm_dim_h, ifm_dim_w, diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py index 885103bf..42146463 100644 --- a/src/qonnx/transformation/resize_conv_to_deconv.py +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -33,7 +33,7 @@ from qonnx.core.datatype import DataType from qonnx.custom_op.general.quant import quant from qonnx.transformation.base import Transformation -from qonnx.util.basic import get_by_name +from qonnx.util.basic import get_by_name, auto_pad_to_explicit_padding def _weight_convolution(cnv_weights: np.ndarray, scale: int) -> np.ndarray: @@ -55,23 +55,6 @@ def _weight_convolution(cnv_weights: np.ndarray, scale: int) -> np.ndarray: return dcnv_weights -def _auto_pad_to_explicit_padding(autopad_str, idim_h, idim_w, k_h, k_w, stride_h, stride_w, n_dims): - pad_total_h = (stride_h - 1) * idim_h - stride_h + k_h - pad_total_w = (stride_w - 1) * idim_w - stride_w + k_w - pad_half_small_h = int((pad_total_h / 2)) - pad_half_small_w = int((pad_total_w / 2)) - pad_half_large_h = pad_total_h - pad_half_small_h - pad_half_large_w = pad_total_w - pad_half_small_w - if autopad_str == "VALID": - return [0 for i in range(2 * n_dims)] - elif autopad_str == "SAME_UPPER": - return [pad_half_small_h, pad_half_small_w, pad_half_large_h, pad_half_large_w] - elif autopad_str == "SAME_LOWER": - return [pad_half_large_h, pad_half_large_w, pad_half_small_h, pad_half_small_w] - else: - raise Exception("Unsupported auto_pad: " + autopad_str) - - class ResizeConvolutionToDeconvolution(Transformation): """Replaces resize convolution layers (e.g., nearest neighbor upsample + same-padded convolution) with deconvolution layers using the weight convolution algorithm. Currently does not support @@ -189,7 +172,7 @@ def apply(self, model): # use specified padding pad = get_by_name(conv.attribute, "pads").ints else: - pad = _auto_pad_to_explicit_padding( + pad = auto_pad_to_explicit_padding( auto_pad, ifm_dim_h, ifm_dim_w, diff --git a/src/qonnx/transformation/subpixel_to_deconv.py b/src/qonnx/transformation/subpixel_to_deconv.py index e243261f..eed5ced4 100644 --- a/src/qonnx/transformation/subpixel_to_deconv.py +++ b/src/qonnx/transformation/subpixel_to_deconv.py @@ -31,7 +31,7 @@ from onnx import helper from qonnx.transformation.base import Transformation -from qonnx.util.basic import get_by_name +from qonnx.util.basic import get_by_name, auto_pad_to_explicit_padding def _weight_shuffle(cnv_weights: np.ndarray, block_size: int) -> np.ndarray: @@ -62,23 +62,6 @@ def _weight_shuffle(cnv_weights: np.ndarray, block_size: int) -> np.ndarray: return dcnv_weights -def _auto_pad_to_explicit_padding(autopad_str, idim_h, idim_w, k_h, k_w, stride_h, stride_w, n_dims): - pad_total_h = (stride_h - 1) * idim_h - stride_h + k_h - pad_total_w = (stride_w - 1) * idim_w - stride_w + k_w - pad_half_small_h = int((pad_total_h / 2)) - pad_half_small_w = int((pad_total_w / 2)) - pad_half_large_h = pad_total_h - pad_half_small_h - pad_half_large_w = pad_total_w - pad_half_small_w - if autopad_str == "VALID": - return [0 for i in range(2 * n_dims)] - elif autopad_str == "SAME_UPPER": - return [pad_half_small_h, pad_half_small_w, pad_half_large_h, pad_half_large_w] - elif autopad_str == "SAME_LOWER": - return [pad_half_large_h, pad_half_large_w, pad_half_small_h, pad_half_small_w] - else: - raise Exception("Unsupported auto_pad: " + autopad_str) - - class SubPixelToDeconvolution(Transformation): """Replaces sub-pixel convolution layers (i.e., same-padded convolution + depth2space) with deconvolution layers using the weight shuffle algorithm. Currently does not support @@ -181,7 +164,7 @@ def apply(self, model): # use specified padding pad = get_by_name(n.attribute, "pads").ints else: - pad = _auto_pad_to_explicit_padding( + pad = auto_pad_to_explicit_padding( auto_pad, ifm_dim_h, ifm_dim_w, diff --git a/src/qonnx/util/basic.py b/src/qonnx/util/basic.py index b775a3ba..1ddc9b6a 100644 --- a/src/qonnx/util/basic.py +++ b/src/qonnx/util/basic.py @@ -321,3 +321,20 @@ def sanitize_quant_values(model, node_tensors, execution_context, check_values=F ) ) return execution_context + + +def auto_pad_to_explicit_padding(autopad_str, idim_h, idim_w, k_h, k_w, stride_h, stride_w, n_dims): + pad_total_h = (stride_h - 1) * idim_h - stride_h + k_h + pad_total_w = (stride_w - 1) * idim_w - stride_w + k_w + pad_half_small_h = int((pad_total_h / 2)) + pad_half_small_w = int((pad_total_w / 2)) + pad_half_large_h = pad_total_h - pad_half_small_h + pad_half_large_w = pad_total_w - pad_half_small_w + if autopad_str == "VALID": + return [0 for i in range(2 * n_dims)] + elif autopad_str == "SAME_UPPER": + return [pad_half_small_h, pad_half_small_w, pad_half_large_h, pad_half_large_w] + elif autopad_str == "SAME_LOWER": + return [pad_half_large_h, pad_half_large_w, pad_half_small_h, pad_half_small_w] + else: + raise Exception("Unsupported auto_pad: " + autopad_str) \ No newline at end of file From bf1840ddea17e334d9069fb9eb22f276c8c6d67f Mon Sep 17 00:00:00 2001 From: i-colbert Date: Thu, 7 Mar 2024 13:49:44 -0800 Subject: [PATCH 47/83] Fix: adding check and warning for dimension mismatch --- src/qonnx/transformation/resize_conv_to_deconv.py | 15 +++++++++------ src/qonnx/transformation/subpixel_to_deconv.py | 15 +++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py index 42146463..8c658774 100644 --- a/src/qonnx/transformation/resize_conv_to_deconv.py +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -124,12 +124,15 @@ def apply(self, model): continue kshape = get_by_name(conv.attribute, "kernel_shape").ints - ifm_ch = model.get_tensor_shape(conv.input[0])[1] # assume NCHW - ofm_ch = model.get_tensor_shape(conv.output[0])[1] # assume NCHW - ifm_dim_h = model.get_tensor_shape(conv.input[0])[2] # assume NCHW - ifm_dim_w = model.get_tensor_shape(conv.input[0])[3] # assume NCHW - ofm_dim_h = model.get_tensor_shape(conv.output[0])[2] # assume NCHW - ofm_dim_w = model.get_tensor_shape(conv.output[0])[3] + idim = model.get_tensor_shape(conv.input[0]) # require NCHW + odim = model.get_tensor_shape(conv.output[0]) # require NCHW + if not (len(odim) == len(idim) == 4): + warnings.warn("Skipping resize conv, only 2D convolutions supported.") + continue + + [_, ifm_ch, ifm_dim_h, ifm_dim_w] = idim + [_, ofm_ch, ofm_dim_h, ofm_dim_w] = odim + if (ifm_dim_h != ofm_dim_h) or (ifm_dim_w != ofm_dim_w): warnings.warn("Skipping resize conv, only same-padded convs supported.") continue diff --git a/src/qonnx/transformation/subpixel_to_deconv.py b/src/qonnx/transformation/subpixel_to_deconv.py index eed5ced4..7198355c 100644 --- a/src/qonnx/transformation/subpixel_to_deconv.py +++ b/src/qonnx/transformation/subpixel_to_deconv.py @@ -120,12 +120,15 @@ def apply(self, model): continue kshape = get_by_name(n.attribute, "kernel_shape").ints - ifm_ch = model.get_tensor_shape(n.input[0])[1] # assume NCHW - ofm_ch = model.get_tensor_shape(n.output[0])[1] # assume NCHW - ifm_dim_h = model.get_tensor_shape(n.input[0])[2] # assume NCHW - ifm_dim_w = model.get_tensor_shape(n.input[0])[3] # assume NCHW - ofm_dim_h = model.get_tensor_shape(n.output[0])[2] # assume NCHW - ofm_dim_w = model.get_tensor_shape(n.output[0])[3] + idim = model.get_tensor_shape(n.input[0]) # require NCHW + odim = model.get_tensor_shape(n.output[0]) # require NCHW + if not (len(odim) == len(idim) == 4): + warnings.warn("Skipping sub-pixel conv, only 2D convolutions supported.") + continue + + [_, ifm_ch, ifm_dim_h, ifm_dim_w] = idim + [_, ofm_ch, ofm_dim_h, ofm_dim_w] = odim + if (ifm_dim_h != ofm_dim_h) or (ifm_dim_w != ofm_dim_w): warnings.warn("Skipping sub-pixel conv, only same-padded convs supported.") continue From fd834aad8d510d2be85ed6d0cf5b3289474761d8 Mon Sep 17 00:00:00 2001 From: i-colbert Date: Thu, 7 Mar 2024 14:18:58 -0800 Subject: [PATCH 48/83] Fix: handling case where bias is generated from another node --- src/qonnx/transformation/resize_conv_to_deconv.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py index 8c658774..44232119 100644 --- a/src/qonnx/transformation/resize_conv_to_deconv.py +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -217,9 +217,13 @@ def apply(self, model): # Make sure to keep the biases from the convolution if len(conv.input) == 3: bias_name = conv.input[2] - B_conv = model.get_initializer(bias_name) # (OC,) + bias_prod = model.find_producer(bias_name) + # If the producer is None, then it is initialized by the Conv node + # and we need to ensure it isn't removed with the Conv node + if bias_prod is None: + B_conv = model.get_initializer(bias_name) # (OC,) + model.set_initializer(bias_name, B_conv) deconv_inps.append(bias_name) # add to the inputs - model.set_initializer(bias_name, B_conv) deconv_outs = conv.output deconv_pad = pad deconv_node = helper.make_node( From 29e684325048f5b0e2785ab540e3f8914390d005 Mon Sep 17 00:00:00 2001 From: i-colbert Date: Thu, 7 Mar 2024 14:25:24 -0800 Subject: [PATCH 49/83] Pre-commit fixes --- src/qonnx/transformation/lower_convs_to_matmul.py | 3 ++- src/qonnx/transformation/resize_conv_to_deconv.py | 6 +++--- src/qonnx/transformation/subpixel_to_deconv.py | 6 +++--- src/qonnx/util/basic.py | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index 89fa3f0e..bf95d537 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -32,7 +32,8 @@ from qonnx.transformation.base import Transformation from qonnx.transformation.extract_conv_bias import ExtractBiasFromConv -from qonnx.util.basic import get_by_name, auto_pad_to_explicit_padding +from qonnx.util.basic import auto_pad_to_explicit_padding, get_by_name + class LowerConvsToMatMul(Transformation): """Replace Conv layers with pairs of Im2Col-MatMul layers, plus Transpose diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py index 44232119..8b7a9cae 100644 --- a/src/qonnx/transformation/resize_conv_to_deconv.py +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -33,7 +33,7 @@ from qonnx.core.datatype import DataType from qonnx.custom_op.general.quant import quant from qonnx.transformation.base import Transformation -from qonnx.util.basic import get_by_name, auto_pad_to_explicit_padding +from qonnx.util.basic import auto_pad_to_explicit_padding, get_by_name def _weight_convolution(cnv_weights: np.ndarray, scale: int) -> np.ndarray: @@ -124,8 +124,8 @@ def apply(self, model): continue kshape = get_by_name(conv.attribute, "kernel_shape").ints - idim = model.get_tensor_shape(conv.input[0]) # require NCHW - odim = model.get_tensor_shape(conv.output[0]) # require NCHW + idim = model.get_tensor_shape(conv.input[0]) # require NCHW + odim = model.get_tensor_shape(conv.output[0]) # require NCHW if not (len(odim) == len(idim) == 4): warnings.warn("Skipping resize conv, only 2D convolutions supported.") continue diff --git a/src/qonnx/transformation/subpixel_to_deconv.py b/src/qonnx/transformation/subpixel_to_deconv.py index 7198355c..3f330c99 100644 --- a/src/qonnx/transformation/subpixel_to_deconv.py +++ b/src/qonnx/transformation/subpixel_to_deconv.py @@ -31,7 +31,7 @@ from onnx import helper from qonnx.transformation.base import Transformation -from qonnx.util.basic import get_by_name, auto_pad_to_explicit_padding +from qonnx.util.basic import auto_pad_to_explicit_padding, get_by_name def _weight_shuffle(cnv_weights: np.ndarray, block_size: int) -> np.ndarray: @@ -120,8 +120,8 @@ def apply(self, model): continue kshape = get_by_name(n.attribute, "kernel_shape").ints - idim = model.get_tensor_shape(n.input[0]) # require NCHW - odim = model.get_tensor_shape(n.output[0]) # require NCHW + idim = model.get_tensor_shape(n.input[0]) # require NCHW + odim = model.get_tensor_shape(n.output[0]) # require NCHW if not (len(odim) == len(idim) == 4): warnings.warn("Skipping sub-pixel conv, only 2D convolutions supported.") continue diff --git a/src/qonnx/util/basic.py b/src/qonnx/util/basic.py index 1ddc9b6a..363aa501 100644 --- a/src/qonnx/util/basic.py +++ b/src/qonnx/util/basic.py @@ -337,4 +337,4 @@ def auto_pad_to_explicit_padding(autopad_str, idim_h, idim_w, k_h, k_w, stride_h elif autopad_str == "SAME_LOWER": return [pad_half_large_h, pad_half_large_w, pad_half_small_h, pad_half_small_w] else: - raise Exception("Unsupported auto_pad: " + autopad_str) \ No newline at end of file + raise Exception("Unsupported auto_pad: " + autopad_str) From 13906da2ecadb2df559f954a0b1b84d2571c00ae Mon Sep 17 00:00:00 2001 From: i-colbert Date: Sun, 10 Mar 2024 16:13:28 -0700 Subject: [PATCH 50/83] Fix: handling scalar vs. array scaling factor --- src/qonnx/transformation/resize_conv_to_deconv.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py index 8b7a9cae..9f9e0532 100644 --- a/src/qonnx/transformation/resize_conv_to_deconv.py +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -111,7 +111,8 @@ def apply(self, model): [q_w_name, q_s_name, q_zp_name, q_bw_name] = weight_prod.input W_conv = model.get_initializer(q_w_name) W_scale = model.get_initializer(q_s_name) - W_scale = np.moveaxis(W_scale, 0, 1) + if isinstance(W_scale, np.ndarray) and W_scale.ndim > 0: + W_scale = np.moveaxis(W_scale, 0, 1) W_zeropt = model.get_initializer(q_zp_name) W_bitwidth = model.get_initializer(q_bw_name) W_signed = get_by_name(weight_prod.attribute, "signed").i From b7eebaa75166550d0bbab81674c270b1d8a29d52 Mon Sep 17 00:00:00 2001 From: i-colbert Date: Mon, 11 Mar 2024 13:59:56 -0700 Subject: [PATCH 51/83] Fix: adding quant test cases and fixing bugs --- .../transformation/resize_conv_to_deconv.py | 30 +- .../test_nn_resize_to_deconv.py | 295 ++++++++++++++---- 2 files changed, 250 insertions(+), 75 deletions(-) diff --git a/src/qonnx/transformation/resize_conv_to_deconv.py b/src/qonnx/transformation/resize_conv_to_deconv.py index 9f9e0532..0dd40972 100644 --- a/src/qonnx/transformation/resize_conv_to_deconv.py +++ b/src/qonnx/transformation/resize_conv_to_deconv.py @@ -31,7 +31,7 @@ from onnx import helper from qonnx.core.datatype import DataType -from qonnx.custom_op.general.quant import quant +from qonnx.custom_op.general.quant import quant, resolve_rounding_mode from qonnx.transformation.base import Transformation from qonnx.util.basic import auto_pad_to_explicit_padding, get_by_name @@ -111,9 +111,11 @@ def apply(self, model): [q_w_name, q_s_name, q_zp_name, q_bw_name] = weight_prod.input W_conv = model.get_initializer(q_w_name) W_scale = model.get_initializer(q_s_name) - if isinstance(W_scale, np.ndarray) and W_scale.ndim > 0: + if isinstance(W_scale, np.ndarray) and W_scale.ndim > 1: W_scale = np.moveaxis(W_scale, 0, 1) W_zeropt = model.get_initializer(q_zp_name) + if isinstance(W_zeropt, np.ndarray) and W_zeropt.ndim > 1: + W_zeropt = np.moveaxis(W_zeropt, 0, 1) W_bitwidth = model.get_initializer(q_bw_name) W_signed = get_by_name(weight_prod.attribute, "signed").i W_narrow = get_by_name(weight_prod.attribute, "narrow").i @@ -200,19 +202,22 @@ def apply(self, model): # if not `maintain_bit_width`, then we adjust the bit width to # account for the clipping errors. elif weight_prod is not None: + round_fnc = resolve_rounding_mode(W_rounding_mode) W_int = (W_deconv / W_scale) + W_zeropt - W_int = W_int.round() # handling rounding errors - if W_int.min() < 0: - if np.abs(W_int).min() > W_int.max(): - tdt = DataType.get_smallest_possible(W_int.min()) + W_int = round_fnc(W_int) # handling rounding errors + W_min = W_int.min() + W_max = W_int.max() + if W_min < 0: + if abs(W_min) > W_max: + wdt = DataType.get_smallest_possible(W_min) else: - tdt = DataType.get_smallest_possible(-W_int.max() - 1) + wdt = DataType.get_smallest_possible(-W_max - 1) else: - tdt = DataType.get_smallest_possible(W_int.max()) - assert np.vectorize(tdt.allowed)(W_int).all(), "Error: issue finding data type to support." - if W_bitwidth != tdt.bitwidth(): - W_bitwidth = np.array(tdt.bitwidth(), dtype=np.float32) - assert tdt.signed() == W_signed, "Error: should maintain sign of the weights." + wdt = DataType.get_smallest_possible(W_max) + assert np.vectorize(wdt.allowed)(W_int).all(), "Error: issue finding data type to support." + if W_bitwidth != wdt.bitwidth(): + W_bitwidth = np.array(wdt.bitwidth(), dtype=np.float32) + assert wdt.signed() == W_signed, "Error: should maintain sign of the weights." deconv_inps = [resize_input, weight_name] # Make sure to keep the biases from the convolution @@ -240,6 +245,7 @@ def apply(self, model): W_deconv_init = weight_name if weight_prod is not None: W_deconv_init = q_w_name + model.set_initializer(q_zp_name, W_zeropt) model.set_initializer(q_s_name, W_scale) model.set_initializer(q_bw_name, W_bitwidth) model.set_initializer(W_deconv_init, W_deconv) diff --git a/tests/transformation/test_nn_resize_to_deconv.py b/tests/transformation/test_nn_resize_to_deconv.py index 64e31e2b..9f369ed4 100644 --- a/tests/transformation/test_nn_resize_to_deconv.py +++ b/tests/transformation/test_nn_resize_to_deconv.py @@ -30,18 +30,18 @@ import numpy as np import onnx -import onnx.helper as oh import onnx.numpy_helper as nph -from onnx import TensorProto +import onnx.parser as oprs from onnx.checker import check_model from pkgutil import get_data import qonnx.core.onnx_exec as oxe from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.general.quant import quant from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.resize_conv_to_deconv import ResizeConvolutionToDeconvolution -from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model +from qonnx.util.basic import gen_finn_dt_tensor np.random.seed(0) @@ -98,75 +98,244 @@ def test_resize_conv_to_deconv_quant_model(maintain_bit_width: bool): ).all(), "Error: expected output does not match the produced output." -def create_nn_resize_conv_model( - in_channels: int, out_channels: int, input_dim: int, kernel_size: int, upscale_factor: int, bias: bool -): - assert isinstance(kernel_size, int), "Assuming square kernels, so kernel_size needs to be an int." - padding = (kernel_size - 1) // 2 - - ifm_ch = in_channels - ifm_dim = input_dim - ofm_dim = ifm_dim * upscale_factor - ofm_ch = out_channels - scales = np.array([1.0, 1.0, upscale_factor, upscale_factor], dtype=np.float32) - - resize = oh.make_node( - "Resize", - inputs=["inp", "roi", "scales"], - outputs=["hid"], - mode="nearest", - ) - conv = oh.make_node( - op_type="Conv", - inputs=["hid", "W"] if not bias else ["hid", "W", "B"], - outputs=["out"], - kernel_shape=[kernel_size, kernel_size], - pads=[padding, padding, padding, padding], - strides=[1, 1], - group=1, - dilations=[1, 1], - ) - - input_shape = [1, ifm_ch, ifm_dim, ifm_dim] - output_shape = [1, ofm_ch, ofm_dim, ofm_dim] - - conv_param_shape = [ofm_ch, ifm_ch, kernel_size, kernel_size] - bias_param_shape = [ofm_ch] - - inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, input_shape) - out = oh.make_tensor_value_info("out", TensorProto.FLOAT, output_shape) - - W_conv = oh.make_tensor_value_info("W", TensorProto.FLOAT, conv_param_shape) - B_conv = oh.make_tensor_value_info("B", TensorProto.FLOAT, bias_param_shape) - - value_info = [W_conv] if not bias else [W_conv, B_conv] - - graph = oh.make_graph( - nodes=[resize, conv], - name="cnv_graph", - inputs=[inp], - outputs=[out], - value_info=value_info, - ) - modelproto = qonnx_make_model(graph, producer_name="test_model") - model = ModelWrapper(modelproto) +def float_nn_resize_model(r: int, ifm: int, ich: int, och: int, ksize: int, use_bias: bool): + assert isinstance(ksize, int), "Assuming square kernels, so kernel_size needs to be an int." + pad = (ksize - 1) // 2 + + ishp = (1, ich, ifm, ifm) + oshp = (1, och, ifm * r, ifm * r) + wshp = (och, ich, ksize, ksize) + bshp = (och,) + rscales = np.array([1.0, 1.0, r, r], dtype=np.float32) + weight = np.random.randn(*wshp) + bias = np.random.randn(*bshp) + ishp_str = str(list(ishp)) + oshp_str = str(list(oshp)) + wshp_str = str(list(wshp)) + bshp_str = str(list(bshp)) + + if use_bias: + params_str = f""" + < + float{wshp_str} conv_param, + float{bshp_str} bias_param, + float roi, + float scales + > + """ + else: + params_str = f""" + < + float{wshp_str} conv_param, + float roi, + float scales + > + """ + + if use_bias: + conv_str = f""" + out0 = Conv< + dilations=[1,1], + group=1, + kernel_shape=[{ksize},{ksize}], + strides=[1,1], + pads=[{pad},{pad},{pad},{pad}] + >(hid0, conv_param, bias_param) + """ + else: + conv_str = f""" + out0 = Conv< + dilations=[1,1], + group=1, + kernel_shape=[{ksize},{ksize}], + strides=[1,1], + pads=[{pad},{pad},{pad},{pad}] + >(hid0, conv_param) + """ + + input = f""" + < + ir_version: 7, + opset_import: ["" : 13] + > + agraph (float{ishp_str} in0) => (float{oshp_str} out0) + {params_str} + {{ + hid0 = Resize< + mode="nearest" + >(in0, roi, scales) + {conv_str} + }} + """ + + model = oprs.parse_model(input) + model = ModelWrapper(model) + model.set_initializer("roi", np.empty(0)) + model.set_initializer("scales", rscales.astype(np.float32)) + model.set_initializer("conv_param", weight.astype(np.float32)) + if use_bias: + model.set_initializer("bias_param", bias.astype(np.float32)) + model = model.transform(InferShapes()) + check_model(model._model_proto) + return model + + +def quant_nn_resize_model(r: int, ifm: int, ich: int, och: int, ksize: int, use_bias: bool, channelwise: bool): + assert isinstance(ksize, int), "Assuming square kernels, so kernel_size needs to be an int." + pad = (ksize - 1) // 2 + + ishp = (1, ich, ifm, ifm) + oshp = (1, och, ifm * r, ifm * r) + wshp = (och, ich, ksize, ksize) + bshp = (och,) + rscales = np.array([1.0, 1.0, r, r], dtype=np.float32) + weight = np.random.randn(*wshp) + bias = np.random.randn(*bshp) + ishp_str = str(list(ishp)) + oshp_str = str(list(oshp)) + wshp_str = str(list(wshp)) + bshp_str = str(list(bshp)) + + if channelwise: + q_attr_shp = (och, 1, 1, 1) + else: + q_attr_shp = (1,) + attrshp_str = str(list(q_attr_shp)) + scale = np.random.rand(*q_attr_shp).astype(np.float32) + zeropt = np.zeros(q_attr_shp).astype(np.float32) # NOTE: needs to be integer + bitwidth = np.array(4.0) + + weight: np.ndarray = quant(weight, scale, zeropt, bitwidth, signed=True, narrow=True, rounding_mode="ROUND") + + if use_bias: + params_str = f""" + < + float{wshp_str} conv_param, + float{attrshp_str} scale_param, + float{attrshp_str} zeropt_param, + float{bshp_str} bias_param, + float bitwidth_param, + float scale_bias, + float zeropt_bias, + float bitwidth_bias, + float roi, + float scales + > + """ + else: + params_str = f""" + < + float{wshp_str} conv_param, + float{attrshp_str} scale_param, + float{attrshp_str} zeropt_param, + float roi, + float scales, + float bitwidth_param + > + """ + + if use_bias: + scale_bias = np.random.rand( + 1, + ) + zeropt_bias = np.array(0.0) + bitwidth_bias = np.array(16.0) + convs_str = f""" + param1 = qonnx.custom_op.general.Quant< + signed=1, + narrow=1, + rounding_mode="ROUND" + >(bias_param, scale_bias, zeropt_bias, bitwidth_bias) + out0 = Conv< + dilations=[1,1], + group=1, + kernel_shape=[{ksize},{ksize}], + strides=[1,1], + pads=[{pad},{pad},{pad},{pad}] + >(hid0, param0, param1) + """ + else: + convs_str = f""" + out0 = Conv< + dilations=[1,1], + group=1, + kernel_shape=[{ksize},{ksize}], + strides=[1,1], + pads=[{pad},{pad},{pad},{pad}] + >(hid0, param0) + """ + + input = f""" + < + ir_version: 7, + opset_import: ["" : 13, "qonnx.custom_op.general" : 1] + > + agraph (float{ishp_str} in0) => (float{oshp_str} out0) + {params_str} + {{ + hid0 = Resize< + mode="nearest" + >(in0, roi, scales) + param0 = qonnx.custom_op.general.Quant< + signed=1, + narrow=1, + rounding_mode="ROUND" + >(conv_param, scale_param, zeropt_param, bitwidth_param) + {convs_str} + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) model.set_initializer("roi", np.empty(0)) - model.set_initializer("scales", scales) - model.set_initializer("W", np.random.rand(*conv_param_shape).astype(np.float32)) - if bias: - model.set_initializer("B", np.random.rand(*bias_param_shape).astype(np.float32)) + model.set_initializer("scales", rscales.astype(np.float32)) + model.set_initializer("conv_param", weight.astype(np.float32)) + if use_bias: + model.set_initializer("bias_param", bias.astype(np.float32)) + model.set_initializer("scale_bias", scale_bias.astype(np.float32)) + model.set_initializer("zeropt_bias", zeropt_bias.astype(np.float32)) + model.set_initializer("bitwidth_bias", bitwidth_bias.astype(np.float32)) + model.set_initializer("scale_param", scale.astype(np.float32)) + model.set_initializer("zeropt_param", zeropt.astype(np.float32)) + model.set_initializer("bitwidth_param", bitwidth.astype(np.float32)) model = model.transform(InferShapes()) check_model(model._model_proto) return model -@pytest.mark.parametrize("kernel_size", [1, 3, 5, 7]) +@pytest.mark.parametrize("kernel_size", [3, 5, 7]) @pytest.mark.parametrize("upscale_factor", [1, 2, 3, 4]) @pytest.mark.parametrize("bias", [True, False]) -def test_resize_conv_to_deconv_layer(kernel_size: int, upscale_factor: int, bias: bool): +def test_float_resize_conv_to_deconv_layer(kernel_size: int, upscale_factor: int, bias: bool): + och = 10 # output channels + ich = 3 # input channels + ifm = 4 # input feature map size + input_shape = [1, ich, ifm, ifm] # Create resize convolution layer that upsamples a 4x4 image with 1 I/O channel - model_1 = create_nn_resize_conv_model(3, 10, 4, kernel_size, upscale_factor, bias) + model_1 = float_nn_resize_model(upscale_factor, ifm, ich, och, kernel_size, bias) model_2 = model_1.transform(ResizeConvolutionToDeconvolution()) - input_shape = [1, 3, 4, 4] inp_dict = {"inp": np.random.rand(*input_shape).astype(np.float32)} assert oxe.compare_execution(model_1, model_2, inp_dict) + + +@pytest.mark.parametrize("kernel_size", [3, 5, 7]) +@pytest.mark.parametrize("upscale_factor", [1, 2, 3, 4]) +@pytest.mark.parametrize("bias", [True, False]) +@pytest.mark.parametrize("channelwise", [True, False]) +@pytest.mark.parametrize("maintain_bit_width", [True, False]) +def test_quant_resize_conv_to_deconv_layer( + kernel_size: int, upscale_factor: int, bias: bool, channelwise: bool, maintain_bit_width: bool +): + och = 10 # output channels + ich = 3 # input channels + ifm = 4 # input feature map size + input_shape = [1, ich, ifm, ifm] + # Create resize convolution layer that upsamples a 4x4 image with 1 I/O channel + model_1 = quant_nn_resize_model(upscale_factor, ifm, ich, och, kernel_size, bias, channelwise) + model_2 = model_1.transform(ResizeConvolutionToDeconvolution(maintain_bit_width=maintain_bit_width)) + inp_dict = {"inp": np.random.rand(*input_shape).astype(np.float32)} + assert oxe.compare_execution(model_1, model_2, inp_dict) + + if maintain_bit_width: + bw1 = model_1.get_initializer("bitwidth_param") + bw2 = model_2.get_initializer("bitwidth_param") + assert (bw1 == bw2).all() From 7ebbeac73c5d253a4b76639945554aacd241d13f Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Thu, 25 Apr 2024 16:19:13 +0200 Subject: [PATCH 52/83] Add unit test for SortCommutativeInputsInitializerLast transformation --- ...ort_commutative_inputs_initializer_last.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 tests/transformation/test_sort_commutative_inputs_initializer_last.py diff --git a/tests/transformation/test_sort_commutative_inputs_initializer_last.py b/tests/transformation/test_sort_commutative_inputs_initializer_last.py new file mode 100644 index 00000000..134cb89e --- /dev/null +++ b/tests/transformation/test_sort_commutative_inputs_initializer_last.py @@ -0,0 +1,78 @@ +# Set pytest parameters +import pytest +# Numpy for handling simulation of tensor operations +import numpy as np +# Helper for creating ONNX nodes +from onnx import TensorProto +from onnx import helper as oh +# QONNX wrapper of ONNX model graphs +from qonnx.core.modelwrapper import ModelWrapper +# QONNX utility for creating models from ONNX graphs +from qonnx.util.basic import qonnx_make_model +# Execute QONNX model graphs +from qonnx.core.onnx_exec import execute_onnx +# Graph transformation to be tested: Sorts the input list of commutative +# operations to have all dynamic inputs first followed by all initializer inputs +from qonnx.transformation.general import SortCommutativeInputsInitializerLast + + +# Specify how many inputs the test should cover +@pytest.mark.parametrize("num_inputs", [4, 5, 6]) +# Specify which inputs should be turned into initializers +@pytest.mark.parametrize( + "initializers", [[], [0], [1], [0, 1], [0, 3], [0, 1, 2, 3]] +) +# Tests the SortCommutativeInputsInitializerLast transformation +def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): + # Generate the input tensor names + inputs = [f"in{i}" for i in range(num_inputs)] + # We will use the Sum ONNX operation to test this behavior, as it allows for + # arbitrary many inputs + node = oh.make_node( + op_type="Sum", inputs=inputs, outputs=["out"], name="Sum" + ) + # Create value infos for all input and the output tensor + inputs = [ + oh.make_tensor_value_info(i, TensorProto.FLOAT, (16,)) for i in inputs + ] + out = oh.make_tensor_value_info("out", TensorProto.FLOAT, (16,)) + # Make a graph comprising the Sum node and value infos for all inputs and + # the output + graph = oh.make_graph([node], inputs=inputs, outputs=[out], name="Sum") + # Wrap the graph in an QONNX model wrapper + model = ModelWrapper(qonnx_make_model(graph, producer_name="qonnx-tests")) + # Prepare the execution context + context = { + f"in{i}": np.random.rand(16) for i in range(num_inputs) + } + # Make sure all inputs are of type float32 + context = {key: value.astype(np.float32) for key, value in context.items()} + # Turn selected inputs into initializers + for i in initializers: + model.set_initializer(f"in{i}", context[f"in{i}"]) + + # Execute the ONNX model before transforming + out_expected = execute_onnx(model, context)["out"] + # Apply the transformation to be tested + # Note: No cleanup, as the tested transformation is part of the cleanup, and + # we want to test this in isolation + model = model.transform( + SortCommutativeInputsInitializerLast(), cleanup=False + ) + # Execute the ONNX model after transforming + out_produced = execute_onnx(model, context)["out"] + + # Start with no initializer input seen so far + seen_initializer = False + # Verify that no "dynamic" input follows an initializer input + for i in model.graph.node[0].input: + # Keep track of when an initializer has been seen + if model.get_initializer(i) is not None: + seen_initializer = True + # If there has already been an initializer, this input must be an + # initializer as well + assert not seen_initializer or model.get_initializer(i) is not None, \ + "Non-initializer input following initializer after sorting" + + # Outputs before and after must match + assert np.allclose(out_produced, out_expected) From 38df9fbe3b3dce582eae26b02d1a9b5ee91ebbad Mon Sep 17 00:00:00 2001 From: Christoph Berganski Date: Thu, 25 Apr 2024 16:26:22 +0200 Subject: [PATCH 53/83] Address some linting issues --- src/qonnx/transformation/general.py | 1 - ...ort_commutative_inputs_initializer_last.py | 27 ++++++++++++++----- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/src/qonnx/transformation/general.py b/src/qonnx/transformation/general.py index b5ed0fca..d69cee5a 100644 --- a/src/qonnx/transformation/general.py +++ b/src/qonnx/transformation/general.py @@ -405,7 +405,6 @@ def apply(self, model): # noqa # Length of sorted and original input list must match assert len(inputs) == len(node.input) # Reassigned inputs from sorted categories - # Note: ONNX does not allow direct assignment to node.input for i, name in enumerate(inputs): # The graph has been modified if any input is reordered if node.input[i] != name: diff --git a/tests/transformation/test_sort_commutative_inputs_initializer_last.py b/tests/transformation/test_sort_commutative_inputs_initializer_last.py index 134cb89e..1cd1eb72 100644 --- a/tests/transformation/test_sort_commutative_inputs_initializer_last.py +++ b/tests/transformation/test_sort_commutative_inputs_initializer_last.py @@ -1,26 +1,34 @@ # Set pytest parameters import pytest + # Numpy for handling simulation of tensor operations import numpy as np + # Helper for creating ONNX nodes from onnx import TensorProto from onnx import helper as oh + # QONNX wrapper of ONNX model graphs from qonnx.core.modelwrapper import ModelWrapper -# QONNX utility for creating models from ONNX graphs -from qonnx.util.basic import qonnx_make_model + # Execute QONNX model graphs from qonnx.core.onnx_exec import execute_onnx + # Graph transformation to be tested: Sorts the input list of commutative # operations to have all dynamic inputs first followed by all initializer inputs from qonnx.transformation.general import SortCommutativeInputsInitializerLast +# QONNX utility for creating models from ONNX graphs +from qonnx.util.basic import qonnx_make_model + # Specify how many inputs the test should cover @pytest.mark.parametrize("num_inputs", [4, 5, 6]) # Specify which inputs should be turned into initializers @pytest.mark.parametrize( + # fmt: off "initializers", [[], [0], [1], [0, 1], [0, 3], [0, 1, 2, 3]] + # fmt: on ) # Tests the SortCommutativeInputsInitializerLast transformation def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): @@ -29,11 +37,15 @@ def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): # We will use the Sum ONNX operation to test this behavior, as it allows for # arbitrary many inputs node = oh.make_node( + # fmt: off op_type="Sum", inputs=inputs, outputs=["out"], name="Sum" + # fmt: on ) # Create value infos for all input and the output tensor inputs = [ + # fmt: off oh.make_tensor_value_info(i, TensorProto.FLOAT, (16,)) for i in inputs + # fmt: on ] out = oh.make_tensor_value_info("out", TensorProto.FLOAT, (16,)) # Make a graph comprising the Sum node and value infos for all inputs and @@ -42,9 +54,7 @@ def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): # Wrap the graph in an QONNX model wrapper model = ModelWrapper(qonnx_make_model(graph, producer_name="qonnx-tests")) # Prepare the execution context - context = { - f"in{i}": np.random.rand(16) for i in range(num_inputs) - } + context = {f"in{i}": np.random.rand(16) for i in range(num_inputs)} # Make sure all inputs are of type float32 context = {key: value.astype(np.float32) for key, value in context.items()} # Turn selected inputs into initializers @@ -57,7 +67,9 @@ def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): # Note: No cleanup, as the tested transformation is part of the cleanup, and # we want to test this in isolation model = model.transform( + # fmt: off SortCommutativeInputsInitializerLast(), cleanup=False + # fmt: on ) # Execute the ONNX model after transforming out_produced = execute_onnx(model, context)["out"] @@ -71,8 +83,9 @@ def test_sort_commutative_inputs_initializer_last(num_inputs, initializers): seen_initializer = True # If there has already been an initializer, this input must be an # initializer as well - assert not seen_initializer or model.get_initializer(i) is not None, \ - "Non-initializer input following initializer after sorting" + assert ( + not seen_initializer or model.get_initializer(i) is not None + ), "Non-initializer input following initializer after sorting" # Outputs before and after must match assert np.allclose(out_produced, out_expected) From 57d0d9d6a8b7f61e68fea32581e0aec3031a3293 Mon Sep 17 00:00:00 2001 From: Tim Paine <3105306+timkpaine@users.noreply.github.com> Date: Sat, 11 May 2024 16:00:04 -0400 Subject: [PATCH 54/83] Remove some commented debug code --- src/qonnx/core/onnx_exec.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/qonnx/core/onnx_exec.py b/src/qonnx/core/onnx_exec.py index a5be9dee..a8f4774c 100644 --- a/src/qonnx/core/onnx_exec.py +++ b/src/qonnx/core/onnx_exec.py @@ -208,7 +208,6 @@ def execute_onnx_and_make_model(model, input_dict): new_model.set_initializer(i, execution_context[i]) for vi in new_model.graph.value_info: new_model.graph.output.append(vi) - # import pdb; pdb.set_trace() return new_model From 1dfda07eb04924715687e564c6acd28515cca889 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 21 May 2024 09:21:40 +0200 Subject: [PATCH 55/83] fix linting --- src/qonnx/core/datatype.py | 2 ++ src/qonnx/util/inference_cost.py | 13 +++++++++---- tests/analysis/test_matmul_mac_cost.py | 12 ++++++------ 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/qonnx/core/datatype.py b/src/qonnx/core/datatype.py index 84365289..f37d4eea 100644 --- a/src/qonnx/core/datatype.py +++ b/src/qonnx/core/datatype.py @@ -144,6 +144,7 @@ def to_numpy_dt(self): def get_canonical_name(self): return "FLOAT32" + class Float16Type(BaseDataType): def bitwidth(self): return 16 @@ -175,6 +176,7 @@ def to_numpy_dt(self): def get_canonical_name(self): return "FLOAT16" + class IntType(BaseDataType): def __init__(self, bitwidth, signed): super().__init__() diff --git a/src/qonnx/util/inference_cost.py b/src/qonnx/util/inference_cost.py index 30ac677d..57d5292d 100644 --- a/src/qonnx/util/inference_cost.py +++ b/src/qonnx/util/inference_cost.py @@ -44,6 +44,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes + def compute_bops_and_macs(inf_cost_dict): total_bops = 0.0 total_macs = 0.0 @@ -56,6 +57,7 @@ def compute_bops_and_macs(inf_cost_dict): total_macs += v return total_bops, total_macs + def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"): total_mem_bits = 0.0 total_mem_elems = 0.0 @@ -67,6 +69,7 @@ def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"): total_mem_elems += v return total_mem_bits, total_mem_elems + def assign_mem_bits_and_elems(res_dict): mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(res_dict, "mem_w") mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(res_dict, "mem_o") @@ -76,6 +79,7 @@ def assign_mem_bits_and_elems(res_dict): res_dict["total_mem_o_elems"] = mem_o_elems return res_dict + def inference_cost( model_filename_or_wrapper, *, @@ -96,7 +100,7 @@ def inference_cost( datatype inference and constant folding. Strongly recommended. :param discount_sparsity: If set, will discount op cost of MAC ops with a constant zero weight, and the mem cost of constant zero weights.""" - + combined_results = {} if isinstance(model_filename_or_wrapper, ModelWrapper): model = model_filename_or_wrapper @@ -117,8 +121,7 @@ def inference_cost( model = model.transform(GiveReadableTensorNames()) if output_onnx is not None: model.save(output_onnx) - ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity, - cost_breakdown)) + ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity, cost_breakdown)) for i, res in ret.items(): if i == "total_cost": bops, macs = compute_bops_and_macs(res) @@ -148,9 +151,11 @@ def inference_cost( per_node_breakdown[node_name] = node_res combined_results[i] = per_node_breakdown return combined_results - + + def main(): clize.run(inference_cost) + if __name__ == "__main__": main() diff --git a/tests/analysis/test_matmul_mac_cost.py b/tests/analysis/test_matmul_mac_cost.py index 534618aa..fbdc0d2a 100644 --- a/tests/analysis/test_matmul_mac_cost.py +++ b/tests/analysis/test_matmul_mac_cost.py @@ -27,19 +27,19 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pytest -import qonnx from pkgutil import get_data + import qonnx.util.inference_cost as infc -from qonnx.util.cleanup import cleanup_model from qonnx.core.modelwrapper import ModelWrapper +from qonnx.util.cleanup import cleanup_model def test_matmul_mac_cost(): - raw_model = get_data("qonnx","data/onnx/matmul_update/sdp.onnx") + raw_model = get_data("qonnx", "data/onnx/matmul_update/sdp.onnx") model = ModelWrapper(raw_model) cleaned_model = cleanup_model(model) - # Two Matmul layers with shape (i_shape, w_shape, o_shape), L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) + # Two Matmul layers with shape (i_shape, w_shape, o_shape), + # L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False) - mac_cost = inf_cost_dict['op_mac_FLOAT32_FLOAT32'] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 + mac_cost = inf_cost_dict["op_mac_FLOAT32_FLOAT32"] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 assert mac_cost == 1048576.0, "Error: discrepancy in mac cost." From 2cc6d526d89b6319c376bad6c37d3cb0a896d638 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 21 May 2024 10:12:15 +0200 Subject: [PATCH 56/83] [InfCost] per-node norm mac/param counts, always floats for json --- src/qonnx/analysis/inference_cost.py | 6 +++++ src/qonnx/util/inference_cost.py | 25 ++++++++----------- .../analysis/test_inference_cost_breakdown.py | 4 ++- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/qonnx/analysis/inference_cost.py b/src/qonnx/analysis/inference_cost.py index 847058b7..c821d26a 100644 --- a/src/qonnx/analysis/inference_cost.py +++ b/src/qonnx/analysis/inference_cost.py @@ -117,6 +117,8 @@ def inference_cost_conv(model, node, discount_sparsity): mac_op_type_str = "op_mac_%s_%s" % (idt_name, wdt_name) w_mem_type_str = "mem_w_%s" % (wdt_name) o_mem_type_str = "mem_o_%s" % (odt_name) + # keep in floats to remain compatible with json serialization + n_macs, w_mem, o_mem = float(n_macs), float(w_mem), float(o_mem) ret = {mac_op_type_str: n_macs, w_mem_type_str: w_mem, o_mem_type_str: o_mem} return ret @@ -161,6 +163,8 @@ def inference_cost_matmul(model, node, discount_sparsity): mac_op_type_str = "op_mac_%s_%s" % (idt_name, wdt_name) w_mem_type_str = "mem_w_%s" % (wdt_name) o_mem_type_str = "mem_o_%s" % (odt_name) + # keep in floats to remain compatible with json serialization + n_macs, w_mem, o_mem = float(n_macs), float(w_mem), float(o_mem) ret = {mac_op_type_str: n_macs, w_mem_type_str: w_mem, o_mem_type_str: o_mem} return ret @@ -197,6 +201,8 @@ def inference_cost_upsample(model, node, discount_sparsity): mac_op_type_str = "op_mac_%s_%s" % (idt_name, idt_name) o_mem_type_str = "mem_o_%s" % (odt_name) + # keep in floats to remain compatible with json serialization + n_macs, o_mem = float(n_macs), float(o_mem) ret = {mac_op_type_str: n_macs, o_mem_type_str: o_mem} return ret diff --git a/src/qonnx/util/inference_cost.py b/src/qonnx/util/inference_cost.py index 57d5292d..12f1b56d 100644 --- a/src/qonnx/util/inference_cost.py +++ b/src/qonnx/util/inference_cost.py @@ -99,7 +99,9 @@ def inference_cost( :param preprocess: If set, run preprocessing steps such as shape inference, datatype inference and constant folding. Strongly recommended. :param discount_sparsity: If set, will discount op cost of MAC ops with a - constant zero weight, and the mem cost of constant zero weights.""" + constant zero weight, and the mem cost of constant zero weights. + :param cost_breakdown: If set, include per-node (by name) and per-node-type + breakdowns as part of the returned inference cost dict.""" combined_results = {} if isinstance(model_filename_or_wrapper, ModelWrapper): @@ -130,26 +132,19 @@ def inference_cost( res["total_macs"] = macs if "unsupported" in res: res["unsupported"] = str(res["unsupported"]) - if output_json is not None: - with open(output_json, "w") as f: - json.dump(res, f, sort_keys=True, indent=2) combined_results[i] = res - elif i == "optype_cost": - per_optype_breakdown = {} + else: + per_optype_or_node_breakdown = {} for optype, op_res in res.items(): bops, macs = compute_bops_and_macs(op_res) op_res = assign_mem_bits_and_elems(op_res) op_res["total_bops"] = bops op_res["total_macs"] = macs - per_optype_breakdown[optype] = op_res - combined_results[i] = per_optype_breakdown - else: - per_node_breakdown = {} - for node_name in res.keys(): - node_res = res[node_name] - node_res = assign_mem_bits_and_elems(node_res) - per_node_breakdown[node_name] = node_res - combined_results[i] = per_node_breakdown + per_optype_or_node_breakdown[optype] = op_res + combined_results[i] = per_optype_or_node_breakdown + if output_json is not None: + with open(output_json, "w") as f: + json.dump(combined_results, f, sort_keys=True, indent=2) return combined_results diff --git a/tests/analysis/test_inference_cost_breakdown.py b/tests/analysis/test_inference_cost_breakdown.py index b798eaf0..afa422b9 100644 --- a/tests/analysis/test_inference_cost_breakdown.py +++ b/tests/analysis/test_inference_cost_breakdown.py @@ -76,7 +76,9 @@ def test_inference_cost_breakdown(test_model): test_details = model_details[test_model] model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) inf_cost = infca(model, discount_sparsity=False, cost_breakdown=True) - print(inf_cost.keys()) + assert inf_cost["node_cost"]["Conv_0"]["total_macs"] == 118013952 + assert inf_cost["node_cost"]["Conv_1"]["total_macs"] == 115605504 + assert inf_cost["optype_cost"]["Conv"]["total_macs"] == 1813561344 t_cost = inf_cost["total_cost"] # total cost op_cost = aggregate_dict_keys(inf_cost["optype_cost"]) # cost per optype n_cost = aggregate_dict_keys(inf_cost["node_cost"]) # cost per node. From 4dd2000ef179f0cd0c48a1389ada3e44f87c1550 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 21 May 2024 10:13:58 +0200 Subject: [PATCH 57/83] update README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 0e9ff13f..dd9b6c66 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,7 @@ Inference cost for CNV_2W2A.onnx } ``` +You can use the `--cost-breakdown` option to generate a more detailed report that covers per-node (by name) and per-op-type information. You can read more about the BOPS metric in [this paper](https://www.frontiersin.org/articles/10.3389/frai.2021.676564/full), Section 4.2 Bit Operations. ### Convert between different quantization representations From a4e7e35a308535f72176e289d841716629e92bf9 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 21 May 2024 14:46:50 +0200 Subject: [PATCH 58/83] [Test] fix changes return style for inference cost --- src/qonnx/util/inference_cost.py | 2 +- tests/analysis/test_inference_cost.py | 152 +++++++++++--------- tests/analysis/test_matmul_mac_cost.py | 2 +- tests/transformation/test_pruning.py | 4 +- tests/transformation/test_quantize_graph.py | 4 +- 5 files changed, 88 insertions(+), 76 deletions(-) diff --git a/src/qonnx/util/inference_cost.py b/src/qonnx/util/inference_cost.py index 12f1b56d..8041ecdc 100644 --- a/src/qonnx/util/inference_cost.py +++ b/src/qonnx/util/inference_cost.py @@ -133,7 +133,7 @@ def inference_cost( if "unsupported" in res: res["unsupported"] = str(res["unsupported"]) combined_results[i] = res - else: + elif i in ["optype_cost", "node_cost"]: per_optype_or_node_breakdown = {} for optype, op_res in res.items(): bops, macs = compute_bops_and_macs(op_res) diff --git a/tests/analysis/test_inference_cost.py b/tests/analysis/test_inference_cost.py index a94f57f4..572d2e14 100644 --- a/tests/analysis/test_inference_cost.py +++ b/tests/analysis/test_inference_cost.py @@ -34,90 +34,102 @@ model_details_infcost = { "FINN-CNV_W2A2": { "expected_sparse": { - "op_mac_SCALEDINT<8>_INT2": 1345500.0, - "mem_w_INT2": 908033.0, - "mem_o_SCALEDINT<32>": 57600.0, - "op_mac_INT2_INT2": 35615771.0, - "mem_o_INT32": 85002.0, - "unsupported": "set()", - "discount_sparsity": True, - "total_bops": 163991084.0, - "total_macs": 36961271.0, - "total_mem_w_bits": 1816066.0, - "total_mem_w_elems": 908033.0, - "total_mem_o_bits": 4563264.0, - "total_mem_o_elems": 142602.0, + "total_cost": { + "op_mac_SCALEDINT<8>_INT2": 1345500.0, + "mem_w_INT2": 908033.0, + "mem_o_SCALEDINT<32>": 57600.0, + "op_mac_INT2_INT2": 35615771.0, + "mem_o_INT32": 85002.0, + "unsupported": "set()", + "discount_sparsity": True, + "total_bops": 163991084.0, + "total_macs": 36961271.0, + "total_mem_w_bits": 1816066.0, + "total_mem_w_elems": 908033.0, + "total_mem_o_bits": 4563264.0, + "total_mem_o_elems": 142602.0, + } }, "expected_dense": { - "op_mac_SCALEDINT<8>_INT2": 1555200.0, - "mem_w_INT2": 1542848.0, - "mem_o_SCALEDINT<32>": 57600.0, - "op_mac_INT2_INT2": 57906176.0, - "mem_o_INT32": 85002.0, - "unsupported": "set()", - "discount_sparsity": False, - "total_bops": 256507904.0, - "total_macs": 59461376.0, - "total_mem_w_bits": 3085696.0, - "total_mem_w_elems": 1542848.0, - "total_mem_o_bits": 4563264.0, - "total_mem_o_elems": 142602.0, + "total_cost": { + "op_mac_SCALEDINT<8>_INT2": 1555200.0, + "mem_w_INT2": 1542848.0, + "mem_o_SCALEDINT<32>": 57600.0, + "op_mac_INT2_INT2": 57906176.0, + "mem_o_INT32": 85002.0, + "unsupported": "set()", + "discount_sparsity": False, + "total_bops": 256507904.0, + "total_macs": 59461376.0, + "total_mem_w_bits": 3085696.0, + "total_mem_w_elems": 1542848.0, + "total_mem_o_bits": 4563264.0, + "total_mem_o_elems": 142602.0, + } }, }, "FINN-TFC_W2A2": { "expected_sparse": { - "op_mac_INT2_INT2": 22355.0, - "mem_w_INT2": 22355.0, - "mem_o_INT32": 202.0, - "unsupported": "set()", - "discount_sparsity": True, - "total_bops": 89420.0, - "total_macs": 22355.0, - "total_mem_w_bits": 44710.0, - "total_mem_w_elems": 22355.0, - "total_mem_o_bits": 6464.0, - "total_mem_o_elems": 202.0, + "total_cost": { + "op_mac_INT2_INT2": 22355.0, + "mem_w_INT2": 22355.0, + "mem_o_INT32": 202.0, + "unsupported": "set()", + "discount_sparsity": True, + "total_bops": 89420.0, + "total_macs": 22355.0, + "total_mem_w_bits": 44710.0, + "total_mem_w_elems": 22355.0, + "total_mem_o_bits": 6464.0, + "total_mem_o_elems": 202.0, + } }, "expected_dense": { - "op_mac_INT2_INT2": 59008.0, - "mem_w_INT2": 59008.0, - "mem_o_INT32": 202.0, - "unsupported": "set()", - "discount_sparsity": False, - "total_bops": 236032.0, - "total_macs": 59008.0, - "total_mem_w_bits": 118016.0, - "total_mem_w_elems": 59008.0, - "total_mem_o_bits": 6464.0, - "total_mem_o_elems": 202.0, + "total_cost": { + "op_mac_INT2_INT2": 59008.0, + "mem_w_INT2": 59008.0, + "mem_o_INT32": 202.0, + "unsupported": "set()", + "discount_sparsity": False, + "total_bops": 236032.0, + "total_macs": 59008.0, + "total_mem_w_bits": 118016.0, + "total_mem_w_elems": 59008.0, + "total_mem_o_bits": 6464.0, + "total_mem_o_elems": 202.0, + } }, }, "RadioML_VGG10": { "expected_sparse": { - "op_mac_SCALEDINT<8>_SCALEDINT<8>": 12620311.0, - "mem_w_SCALEDINT<8>": 155617.0, - "mem_o_SCALEDINT<32>": 130328.0, - "unsupported": "set()", - "discount_sparsity": True, - "total_bops": 807699904.0, - "total_macs": 12620311.0, - "total_mem_w_bits": 1244936.0, - "total_mem_w_elems": 155617.0, - "total_mem_o_bits": 4170496.0, - "total_mem_o_elems": 130328.0, + "total_cost": { + "unsupported": "set()", + "discount_sparsity": True, + "op_mac_SCALEDINT<8>_SCALEDINT<8>": 12620311.0, + "mem_w_SCALEDINT<8>": 155617.0, + "mem_o_SCALEDINT<32>": 130328.0, + "total_bops": 807699904.0, + "total_macs": 12620311.0, + "total_mem_w_bits": 1244936.0, + "total_mem_w_elems": 155617.0, + "total_mem_o_bits": 4170496.0, + "total_mem_o_elems": 130328.0, + } }, "expected_dense": { - "op_mac_SCALEDINT<8>_SCALEDINT<8>": 12864512.0, - "mem_w_SCALEDINT<8>": 159104.0, - "mem_o_SCALEDINT<32>": 130328.0, - "unsupported": "set()", - "discount_sparsity": False, - "total_bops": 823328768.0, - "total_macs": 12864512.0, - "total_mem_w_bits": 1272832.0, - "total_mem_w_elems": 159104.0, - "total_mem_o_bits": 4170496.0, - "total_mem_o_elems": 130328.0, + "total_cost": { + "unsupported": "set()", + "discount_sparsity": False, + "op_mac_SCALEDINT<8>_SCALEDINT<8>": 12864512.0, + "mem_w_SCALEDINT<8>": 159104.0, + "mem_o_SCALEDINT<32>": 130328.0, + "total_bops": 823328768.0, + "total_macs": 12864512.0, + "total_mem_w_bits": 1272832.0, + "total_mem_w_elems": 159104.0, + "total_mem_o_bits": 4170496.0, + "total_mem_o_elems": 130328.0, + } }, }, } diff --git a/tests/analysis/test_matmul_mac_cost.py b/tests/analysis/test_matmul_mac_cost.py index fbdc0d2a..ff7dbc2f 100644 --- a/tests/analysis/test_matmul_mac_cost.py +++ b/tests/analysis/test_matmul_mac_cost.py @@ -40,6 +40,6 @@ def test_matmul_mac_cost(): cleaned_model = cleanup_model(model) # Two Matmul layers with shape (i_shape, w_shape, o_shape), # L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32]) - inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False) + inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False)["total_cost"] mac_cost = inf_cost_dict["op_mac_FLOAT32_FLOAT32"] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576 assert mac_cost == 1048576.0, "Error: discrepancy in mac cost." diff --git a/tests/transformation/test_pruning.py b/tests/transformation/test_pruning.py index 85f9afc9..b2fdbcd8 100644 --- a/tests/transformation/test_pruning.py +++ b/tests/transformation/test_pruning.py @@ -90,7 +90,7 @@ def test_pruning_mnv1(): # do cleanup including folding quantized weights model = cleanup_model(model, False) inp, golden = get_golden_in_and_output("MobileNetv1-w4a4") - cost0 = inference_cost(model, discount_sparsity=False) + cost0 = inference_cost(model, discount_sparsity=False)["total_cost"] assert cost0["op_mac_SCALEDINT<8>_SCALEDINT<8>"] == 10645344.0 assert cost0["mem_w_SCALEDINT<8>"] == 864.0 assert cost0["op_mac_SCALEDINT<4>_SCALEDINT<4>"] == 556357408.0 @@ -105,7 +105,7 @@ def test_pruning_mnv1(): } model = model.transform(PruneChannels(prune_spec)) - cost1 = inference_cost(model, discount_sparsity=False) + cost1 = inference_cost(model, discount_sparsity=False)["total_cost"] assert cost1["op_mac_SCALEDINT<8>_SCALEDINT<8>"] == 7318674.0 assert cost1["mem_w_SCALEDINT<8>"] == 594.0 assert cost1["op_mac_SCALEDINT<4>_SCALEDINT<4>"] == 546053216.0 diff --git a/tests/transformation/test_quantize_graph.py b/tests/transformation/test_quantize_graph.py index 867f9b34..5278194d 100644 --- a/tests/transformation/test_quantize_graph.py +++ b/tests/transformation/test_quantize_graph.py @@ -120,14 +120,14 @@ def to_verify(model, test_details): def test_quantize_graph(test_model): test_details = model_details[test_model] model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) - original_model_inf_cost = inference_cost(model, discount_sparsity=False) + original_model_inf_cost = inference_cost(model, discount_sparsity=False)["total_cost"] nodes_pos = test_details["test_input"] model = model.transform(QuantizeGraph(nodes_pos)) quantnodes_added = len(model.get_nodes_by_op_type("Quant")) assert quantnodes_added == 10 # 10 positions are specified. verification = to_verify(model, nodes_pos) assert verification == "Success" - inf_cost = inference_cost(model, discount_sparsity=False) + inf_cost = inference_cost(model, discount_sparsity=False)["total_cost"] assert ( inf_cost["total_macs"] == original_model_inf_cost["total_macs"] ) # "1814073344.0" must be same as the original model. From f9db21833dc2ab0ade94583eb2e5d8acecc3a3da Mon Sep 17 00:00:00 2001 From: makoeppel Date: Mon, 17 Jun 2024 09:25:54 +0200 Subject: [PATCH 59/83] fix notebook 0 --- notebooks/0_how_to_work_with_onnx.ipynb | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/notebooks/0_how_to_work_with_onnx.ipynb b/notebooks/0_how_to_work_with_onnx.ipynb index 60340f14..052f6a77 100644 --- a/notebooks/0_how_to_work_with_onnx.ipynb +++ b/notebooks/0_how_to_work_with_onnx.ipynb @@ -68,13 +68,6 @@ " name='Add2',\n", ")\n", "\n", - "Add3_node = onnx.helper.make_node(\n", - " 'Add',\n", - " inputs=['abs1', 'abs1'],\n", - " outputs=['sum3'],\n", - " name='Add3',\n", - ")\n", - "\n", "Abs_node = onnx.helper.make_node(\n", " 'Abs',\n", " inputs=['sum2'],\n", @@ -82,12 +75,19 @@ " name='Abs'\n", ")\n", "\n", + "Add3_node = onnx.helper.make_node(\n", + " 'Add',\n", + " inputs=['abs1', 'abs1'],\n", + " outputs=['sum3'],\n", + " name='Add3',\n", + ")\n", + "\n", "Round_node = onnx.helper.make_node(\n", " 'Round',\n", " inputs=['sum3'],\n", " outputs=['out1'],\n", " name='Round',\n", - ")\n" + ")" ] }, { @@ -253,7 +253,7 @@ "metadata": {}, "outputs": [], "source": [ - "in1_values =np.asarray(np.random.uniform(low=-5, high=5, size=(4,4)), dtype=np.float32)\n", + "in1_values = np.asarray(np.random.uniform(low=-5, high=5, size=(4,4)), dtype=np.float32)\n", "in2_values = np.asarray(np.random.uniform(low=-5, high=5, size=(4,4)), dtype=np.float32)\n", "in3_values = np.asarray(np.random.uniform(low=-5, high=5, size=(4,4)), dtype=np.float32)" ] @@ -350,6 +350,7 @@ "metadata": {}, "outputs": [], "source": [ + "import qonnx\n", "from qonnx.core.modelwrapper import ModelWrapper\n", "finn_model = ModelWrapper(onnx_model)" ] From 1b2774c0635476add3ce05bc50f4849480282bdb Mon Sep 17 00:00:00 2001 From: makoeppel Date: Mon, 17 Jun 2024 09:28:36 +0200 Subject: [PATCH 60/83] refactor LowerConvsToMatMul class, increase rtol in test_conv_lowering_convmnist() --- README.md | 1 + docs/index.rst | 3 + .../transformation/lower_convs_to_matmul.py | 292 ++++++++---------- tests/transformation/test_conv_lowering.py | 2 +- 4 files changed, 139 insertions(+), 159 deletions(-) diff --git a/README.md b/README.md index dd9b6c66..69c28b3c 100644 --- a/README.md +++ b/README.md @@ -125,6 +125,7 @@ source venv/bin/activate pip install -e .[qkeras,testing] ``` +### Test suite Run entire test suite, parallelized across CPU cores: ``` pytest -n auto --verbose diff --git a/docs/index.rst b/docs/index.rst index f07ba086..53b9c159 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -63,6 +63,9 @@ Install in editable mode in a venv: pip install -e .[testing, docs, notebooks] +Test suite +++++++++++ + Run entire test suite, parallelized across CPU cores: :: diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index bf95d537..c5964cf4 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -42,167 +42,143 @@ class LowerConvsToMatMul(Transformation): def apply(self, model): model = model.transform(ExtractBiasFromConv()) graph = model.graph - node_ind = 0 graph_modified = False - for n in graph.node: - node_ind += 1 - if n.op_type == "Conv": - if len(n.input) == 3: - warnings.warn("Found Conv node with bias, skipping") - continue - cnv_input = n.input[0] - cnv_output = n.output[0] - idt = model.get_tensor_datatype(cnv_input) - odt = model.get_tensor_datatype(cnv_output) - # extract conv parameters - k = get_by_name(n.attribute, "kernel_shape").ints - k_h = k[0] - k_w = k[1] - stride_h = get_by_name(n.attribute, "strides").ints[0] - stride_w = get_by_name(n.attribute, "strides").ints[1] - group = get_by_name(n.attribute, "group").i - weight_name = n.input[1] - W_conv = model.get_initializer(weight_name) - ifm_ch = model.get_tensor_shape(n.input[0])[1] # assume NCHW - ofm_ch = model.get_tensor_shape(n.output[0])[1] # assume NCHW - ifm_dim_h = model.get_tensor_shape(n.input[0])[2] # assume NCHW - ifm_dim_w = model.get_tensor_shape(n.input[0])[3] - ofm_dim_h = model.get_tensor_shape(n.output[0])[2] # assume NCHW - ofm_dim_w = model.get_tensor_shape(n.output[0])[3] - dilation_attr = get_by_name(n.attribute, "dilations") - if dilation_attr is not None: - dilation = dilation_attr.ints - else: - dilation = [1, 1] # default value - # handle both auto_pad and explicit padding - auto_pad = get_by_name(n.attribute, "auto_pad") - if auto_pad is not None: - # find equivalent specified padding - auto_pad = auto_pad.s.decode("utf-8") - if auto_pad == "NOTSET": - # use specified padding - pad = get_by_name(n.attribute, "pads").ints - else: - pad = auto_pad_to_explicit_padding( - auto_pad, - ifm_dim_h, - ifm_dim_w, - k_h, - k_w, - stride_h, - stride_w, - len(model.get_tensor_shape(n.input[0])) - 2, - ) - else: - # use specified padding - pad = get_by_name(n.attribute, "pads").ints - - # If len(pad) == 2, assume no padding for other dimension - if len(pad) == 2: # only one dimension should be padded - assert ifm_dim_h == 1 or ifm_dim_w == 1, "Padding is assumed to be 1D, image is 2D" - - # if depthwise conv create sparse matrix and variable "dw" - # to store as attribute in Im2Col that indicates that the created + for node_ind, node in enumerate(graph.node, start=1): + if node.op_type != "Conv": + continue + + if len(node.input) == 3: + warnings.warn("Found Conv node with bias, skipping") + continue + + # extract parameters of node + (cnv_input, cnv_output, cnv_input_datatype, cnv_output_datatype, + k_h, k_w, stride_h, stride_w, group, weight_name, W_conv, ifm_ch, + ofm_ch, ifm_dim_h, ifm_dim_w, ofm_dim_h, ofm_dim_w, dilation, pad) =\ + self.extract_conv_params(model, node) + + # if depthwise conv create sparse matrix and variable "dw" + # to store as attribute in Im2Col that indicates that the created + # Im2Col node belongs to a depthwise convolution + dw = False + if group == ifm_ch and ofm_ch == ifm_ch: + W_sparse = np.zeros((ofm_ch, ifm_ch, k_h, k_w)) # (OFM, IFM, k_H, k_W) + for ch in range(ifm_ch): + W_sparse[ch][ch] = W_conv[ch][0] # W_conv = [OFM, IFM, k_H, k_W] + W_conv = W_sparse.astype(np.float32) + # we need to store information of the + # sparsity of the weight matrix. For this + # we use the sparsity annotation of the + # weight tensor + sparsity = {"dw": {"kernel_shape": [k_h, k_w]}} + model.set_tensor_sparsity(weight_name, sparsity) + # additionally create variable "dw" to store + # as attribute in Im2Col that indicates that the created # Im2Col node belongs to a depthwise convolution - dw = False - if group == ifm_ch and ofm_ch == ifm_ch: - W_sparse = np.zeros((ofm_ch, ifm_ch, k_h, k_w)) # (OFM, IFM, k_H, k_W) - for ch in range(ifm_ch): - W_sparse[ch][ch] = W_conv[ch][0] # W_conv = [OFM, IFM, k_H, k_W] - W_conv = W_sparse.astype(np.float32) - # we need to store information of the - # sparsity of the weight matrix. For this - # we use the sparsity annotation of the - # weight tensor - sparsity = {"dw": {"kernel_shape": [k_h, k_w]}} - model.set_tensor_sparsity(weight_name, sparsity) - # additionally create variable "dw" to store - # as attribute in Im2Col that indicates that the created - # Im2Col node belongs to a depthwise convolution - dw = True - - # reuse conv weights for new matmul weights - # conv weights are [OFM][IFM][k][k] - # first convert to [OFM][k][k][IFM] (to remain compatible with - # finn-hlslib and how it does im2col/sliding window) - W_matmul = W_conv.transpose(0, 2, 3, 1) # W_conv = [OFM, IFM, k_H, k_W] - # reshape into [OFM][k*k*IFM] matrix - W_matmul = W_matmul.reshape(ofm_ch, ifm_ch * k_h * k_w) - # transpose to get ONNX-compatible [k*k*IFM][OFM] matrix - W_matmul = W_matmul.T - model.set_initializer(weight_name, W_matmul) - - # create new intermediate values - inp_trans_out = helper.make_tensor_value_info( - model.make_new_valueinfo_name(), - TensorProto.FLOAT, - (1, ifm_dim_h, ifm_dim_w, ifm_ch), # NHWC + dw = True + + # reuse conv weights for new matmul weights + # conv weights are [OFM][IFM][k][k] + # first convert to [OFM][k_h][k_w][IFM] (to remain compatible with + # finn-hlslib and how it does im2col/sliding window) + W_matmul = W_conv.transpose(0, 2, 3, 1) # W_conv = [OFM, IFM, k_H, k_W] + # reshape into [OFM][k_h*k_w*IFM] matrix + W_matmul = W_matmul.reshape(ofm_ch, ifm_ch * k_h * k_w) + # transpose to get ONNX-compatible [k_h*k_w*IFM][OFM] matrix + W_matmul = W_matmul.T + model.set_initializer(weight_name, W_matmul) + + # create new intermediate values + inp_trans_out = helper.make_tensor_value_info( + model.make_new_valueinfo_name(), + TensorProto.FLOAT, + (1, ifm_dim_h, ifm_dim_w, ifm_ch), # NHWC + ) + graph.value_info.append(inp_trans_out) + inp_trans_out = inp_trans_out.name + model.set_tensor_datatype(inp_trans_out, cnv_input_datatype) + + # k_h=k_w==1: pointwise convolution, thus no im2col needed + need_im2col = any(p != 0 for p in pad) or k_h != 1 or k_w != 1 or stride_h != 1 or stride_w != 1 + + # create new intermediate values + matmul_out = helper.make_tensor_value_info( + model.make_new_valueinfo_name(), TensorProto.FLOAT, (1, ofm_dim_h, ofm_dim_w, ofm_ch) + ) + graph.value_info.append(matmul_out) + matmul_out = matmul_out.name + model.set_tensor_datatype(matmul_out, cnv_output_datatype) + + # create new nodes + # NCHW -> NHWC + inp_trans_node = helper.make_node("Transpose", [cnv_input], [inp_trans_out], perm=[0, 2, 3, 1]) + nodes_to_insert = [inp_trans_node] + + if need_im2col: + im2col_out = helper.make_tensor_value_info( + model.make_new_valueinfo_name(), TensorProto.FLOAT, (1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) ) - graph.value_info.append(inp_trans_out) - inp_trans_out = inp_trans_out.name - model.set_tensor_datatype(inp_trans_out, idt) - - need_im2col = True - if all(p == 0 for p in pad): - padding = 0 - - # k_h=k_w==1: pointwise convolution, thus no im2col needed - if k_h == 1 and k_w == 1 and padding == 0 and stride_h == 1 and stride_w == 1: - need_im2col = False - - if need_im2col: - im2col_out = helper.make_tensor_value_info( - model.make_new_valueinfo_name(), - TensorProto.FLOAT, - (1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w), - ) - graph.value_info.append(im2col_out) - im2col_out = im2col_out.name - model.set_tensor_datatype(im2col_out, idt) - - matmul_out = helper.make_tensor_value_info( - model.make_new_valueinfo_name(), - TensorProto.FLOAT, - (1, ofm_dim_h, ofm_dim_w, ofm_ch), + graph.value_info.append(im2col_out) + im2col_out = im2col_out.name + model.set_tensor_datatype(im2col_out, cnv_input_datatype) + im2col_node = helper.make_node( + "Im2Col", [inp_trans_out], [im2col_out], domain="qonnx.custom_op.general", + stride=[stride_h, stride_w], kernel_size=[k_h, k_w], pad_amount=pad, + input_shape="(1,{},{},{})".format(ifm_dim_h, ifm_dim_w, ifm_ch), depthwise=dw, dilations=dilation ) - graph.value_info.append(matmul_out) - matmul_out = matmul_out.name - model.set_tensor_datatype(matmul_out, odt) - - # create new nodes - # NCHW -> NHWC - inp_trans_node = helper.make_node("Transpose", [cnv_input], [inp_trans_out], perm=[0, 2, 3, 1]) - # lower input tensor - matmul_input = inp_trans_out - if need_im2col: - matmul_input = im2col_out - im2col_node = helper.make_node( - "Im2Col", - [inp_trans_out], - [im2col_out], - domain="qonnx.custom_op.general", - stride=[stride_h, stride_w], - kernel_size=[k_h, k_w], - pad_amount=pad, - input_shape="(1,{},{},{})".format(ifm_dim_h, ifm_dim_w, ifm_ch), - depthwise=dw, - dilations=dilation, - ) - - # do matmul - matmul_node = helper.make_node("MatMul", [matmul_input, weight_name], [matmul_out]) - # NHWC -> NCHW - out_trans_node = helper.make_node("Transpose", [matmul_out], [cnv_output], perm=[0, 3, 1, 2]) - # insert nodes where the conv is to preserve topological ordering - graph.node.insert(node_ind, inp_trans_node) - if need_im2col: - graph.node.insert(node_ind + 1, im2col_node) - graph.node.insert(node_ind + 2, matmul_node) - graph.node.insert(node_ind + 3, out_trans_node) - else: - graph.node.insert(node_ind + 1, matmul_node) - graph.node.insert(node_ind + 2, out_trans_node) - # remove old nodes - graph.node.remove(n) + nodes_to_insert.append(im2col_node) + + matmul_input = im2col_out if need_im2col else inp_trans_out + # do matmul + matmul_node = helper.make_node("MatMul", [matmul_input, weight_name], [matmul_out]) + # NHWC -> NCHW + out_trans_node = helper.make_node("Transpose", [matmul_out], [cnv_output], perm=[0, 3, 1, 2]) + + nodes_to_insert.extend([matmul_node, out_trans_node]) + + # insert nodes where the conv is to preserve topological ordering + for i, insert_node in enumerate(nodes_to_insert): + graph.node.insert(node_ind + i, insert_node) + graph.node.remove(node) return (model, graph_modified) + + def extract_conv_params(self, model, node): + + cnv_input = node.input[0] + cnv_output = node.output[0] + cnv_input_datatype = model.get_tensor_datatype(cnv_input) + cnv_output_datatype = model.get_tensor_datatype(cnv_output) + k_h = get_by_name(node.attribute, "kernel_shape").ints[0] + k_w = get_by_name(node.attribute, "kernel_shape").ints[1] + stride_h = get_by_name(node.attribute, "strides").ints[0] + stride_w = get_by_name(node.attribute, "strides").ints[1] + group = get_by_name(node.attribute, "group").i + weight_name = node.input[1] + W_conv = model.get_initializer(weight_name) + ifm_ch = model.get_tensor_shape(cnv_input)[1] # assume NCHW + ofm_ch = model.get_tensor_shape(cnv_output)[1] # assume NCHW + ifm_dim_h = model.get_tensor_shape(cnv_input)[2] # assume NCHW + ifm_dim_w = model.get_tensor_shape(cnv_input)[3] # assume NCHW + ofm_dim_h = model.get_tensor_shape(cnv_output)[2] # assume NCHW + ofm_dim_w = model.get_tensor_shape(cnv_output)[3] # assume NCHW + dilation_attr = get_by_name(node.attribute, "dilations") + dilation = dilation_attr.ints if dilation_attr is not None else [1, 1] # default value + auto_pad = get_by_name(node.attribute, "auto_pad") + if auto_pad is not None: + auto_pad = auto_pad.s.decode("utf-8") + if auto_pad == "NOTSET": + pad = get_by_name(node.attribute, "pads").ints + else: + pad = auto_pad_to_explicit_padding( + auto_pad, ifm_dim_h, ifm_dim_w, k_h, k_w, stride_h, stride_w, len(model.get_tensor_shape(cnv_input)) - 2 + ) + else: + pad = get_by_name(node.attribute, "pads").ints + + if len(pad) == 2: # only one dimension should be padded + assert ifm_dim_h == 1 or ifm_dim_w == 1, "Padding is assumed to be 1D, image is 2D" + + return (cnv_input, cnv_output, cnv_input_datatype, cnv_output_datatype, k_h, k_w, stride_h, + stride_w, group, weight_name, W_conv, ifm_ch, ofm_ch, ifm_dim_h, ifm_dim_w, ofm_dim_h, + ofm_dim_w, dilation, pad) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index 78da6213..044da1b2 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -65,7 +65,7 @@ def test_conv_lowering_convmnist(): model = model.transform(InferShapes()) output_dict_p = oxe.execute_onnx(model, input_dict) produced = output_dict_p[output_name] - assert np.isclose(produced, expected).all() + assert np.isclose(produced, expected, rtol=1.e-4).all() def run_conv_lowering_test(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias): From 17bd6d04bfa2e12740073c147f4f9002d4613753 Mon Sep 17 00:00:00 2001 From: lstasytis Date: Fri, 19 Jul 2024 13:58:11 +0100 Subject: [PATCH 61/83] avoiding mp.Pool in case of using only 1 worker for easier pdb debugging --- src/qonnx/transformation/base.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/qonnx/transformation/base.py b/src/qonnx/transformation/base.py index 75b16aba..34be0780 100644 --- a/src/qonnx/transformation/base.py +++ b/src/qonnx/transformation/base.py @@ -107,8 +107,14 @@ def apply(self, model): old_nodes.append(model.graph.node.pop()) # Execute transformation in parallel - with mp.Pool(self._num_workers) as p: - new_nodes_and_bool = p.map(self.applyNodeLocal, old_nodes, chunksize=1) + if self._num_workers > 1: + with mp.Pool(self._num_workers) as p: + new_nodes_and_bool = p.map(self.applyNodeLocal, old_nodes, chunksize=1) + # execute without mp.Pool in case of 1 worker to simplify debugging + else: + new_nodes_and_bool = [self.applyNodeLocal(node) for node in old_nodes] + + # extract nodes and check if the transformation needs to run again # Note: .pop() had initially reversed the node order From 54144168541f07eff6e6d5b8e95454bf6ea846dd Mon Sep 17 00:00:00 2001 From: mdaniowi Date: Tue, 30 Jul 2024 14:55:46 +0100 Subject: [PATCH 62/83] strings attribute support added to CustomOp --- src/qonnx/custom_op/base.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/qonnx/custom_op/base.py b/src/qonnx/custom_op/base.py index bd2545fa..775d9f95 100644 --- a/src/qonnx/custom_op/base.py +++ b/src/qonnx/custom_op/base.py @@ -74,6 +74,8 @@ def get_nodeattr(self, name): if dtype == "s": # decode string attributes ret = ret.decode("utf-8") + elif dtype == "strings": + ret = [x.decode("utf-8") for x in ret] elif dtype == "t": # use numpy helper to convert TensorProto -> np array ret = np_helper.to_array(ret) @@ -123,13 +125,15 @@ def set_nodeattr(self, name, value): # encode string attributes value = value.encode("utf-8") attr.__setattr__(dtype, value) + elif dtype == "strings": + attr.strings[:] = [x.encode("utf-8") for x in value] elif dtype == "floats": # list of floats attr.floats[:] = value elif dtype == "ints": # list of integers attr.ints[:] = value elif dtype == "t": # single tensor attr.t.CopyFrom(value) - elif dtype in ["strings", "tensors", "graphs", "sparse_tensors"]: + elif dtype in ["tensors", "graphs", "sparse_tensors"]: # untested / unsupported attribute types # add testcases & appropriate getters before enabling raise Exception("Attribute type %s not yet supported" % dtype) From ba5c41f527bb7fb045bed8842786af2d17ec75e9 Mon Sep 17 00:00:00 2001 From: mdaniowi Date: Thu, 1 Aug 2024 09:30:14 +0100 Subject: [PATCH 63/83] strings attr test added to test_attr.py --- tests/custom_op/test_attr.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/custom_op/test_attr.py b/tests/custom_op/test_attr.py index 9db644d7..6e2527ac 100644 --- a/tests/custom_op/test_attr.py +++ b/tests/custom_op/test_attr.py @@ -37,7 +37,11 @@ class AttrTestOp(CustomOp): def get_nodeattr_types(self): - return {"tensor_attr": ("t", True, np.asarray([]))} + my_attrs = { + "tensor_attr": ("t", True, np.asarray([])), + "strings_attr": ("strings", True, [""]) + } + return my_attrs def make_shape_compatible_op(self, model): param_tensor = self.get_nodeattr("tensor_attr") @@ -70,6 +74,7 @@ def test_attr(): strarr = np.array2string(w, separator=", ") w_str = strarr.replace("[", "{").replace("]", "}").replace(" ", "") tensor_attr_str = f"int8{wshp_str} {w_str}" + strings_attr = ["a", "bc", "def"] input = f""" < @@ -86,9 +91,18 @@ def test_attr(): model = oprs.parse_model(input) model = ModelWrapper(model) inst = getCustomOp(model.graph.node[0]) + w_prod = inst.get_nodeattr("tensor_attr") assert (w_prod == w).all() w = w - 1 inst.set_nodeattr("tensor_attr", w) w_prod = inst.get_nodeattr("tensor_attr") assert (w_prod == w).all() + + inst.set_nodeattr("strings_attr", strings_attr) + strings_attr_prod = inst.get_nodeattr("strings_attr") + assert strings_attr_prod == strings_attr + strings_attr_prod[0] = "test" + inst.set_nodeattr("strings_attr", strings_attr_prod) + assert inst.get_nodeattr("strings_attr") == ["test"] + strings_attr[1:] + From b0a6088e05e9c3c5937de9793ce72a8a01ce10cf Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 9 Aug 2024 09:58:21 +0200 Subject: [PATCH 64/83] [README] updates to dev docs: pip upgrade, linting --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index dd9b6c66..a89baa86 100644 --- a/README.md +++ b/README.md @@ -115,16 +115,19 @@ Please see the documentation of the `QuantToQCDQ` transformation to learn more a ## Development -Install in editable mode in a venv: +Install in editable mode in a Python virtual environment: ``` git clone https://github.com/fastmachinelearning/qonnx cd qonnx virtualenv -p python3.8 venv source venv/bin/activate +pip install --upgrade pip pip install -e .[qkeras,testing] ``` +### Running tests + Run entire test suite, parallelized across CPU cores: ``` pytest -n auto --verbose @@ -135,6 +138,22 @@ Run a particular test and fall into pdb if it fails: pytest --pdb -k "test_extend_partition.py::test_extend_partition[extend_id1-2]" ``` +### Linting + +If you plan to make pull requests to the qonnx repo, linting will be required. +We use a pre-commit hook to auto-format Python code and check for issues. See https://pre-commit.com/ for installation. Once you have `pre-commit`, +you can install the hooks into your local clone of the qonnx repo: + +``` +cd qonnx +source venv/bin/activate +pip install pre-commit +pre-commit install +``` + +Every time you commit some code, the pre-commit hooks will first run, performing various checks and fixes. In some cases pre-commit won’t be able to +fix the issues and you may have to fix it manually, then run git commit once again. The checks are configured in .pre-commit-config.yaml under the repo root. + ## Why QONNX? The QONNX representation has several advantages compared to other alternatives, as summarized in the table below. From 3835a37cbaaf2322163641563225ea9f4a813bc5 Mon Sep 17 00:00:00 2001 From: lstasytis Date: Fri, 9 Aug 2024 11:01:57 +0100 Subject: [PATCH 65/83] ran pre-commit --- src/qonnx/transformation/base.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/qonnx/transformation/base.py b/src/qonnx/transformation/base.py index 34be0780..eaf73ab9 100644 --- a/src/qonnx/transformation/base.py +++ b/src/qonnx/transformation/base.py @@ -114,8 +114,6 @@ def apply(self, model): else: new_nodes_and_bool = [self.applyNodeLocal(node) for node in old_nodes] - - # extract nodes and check if the transformation needs to run again # Note: .pop() had initially reversed the node order run_again = False From 654bf1526075f4335b733cefc64e660e42db4b15 Mon Sep 17 00:00:00 2001 From: mdaniowi Date: Mon, 12 Aug 2024 08:31:07 +0000 Subject: [PATCH 66/83] pre-commit applied --- tests/custom_op/test_attr.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/custom_op/test_attr.py b/tests/custom_op/test_attr.py index 6e2527ac..cde5a321 100644 --- a/tests/custom_op/test_attr.py +++ b/tests/custom_op/test_attr.py @@ -37,10 +37,7 @@ class AttrTestOp(CustomOp): def get_nodeattr_types(self): - my_attrs = { - "tensor_attr": ("t", True, np.asarray([])), - "strings_attr": ("strings", True, [""]) - } + my_attrs = {"tensor_attr": ("t", True, np.asarray([])), "strings_attr": ("strings", True, [""])} return my_attrs def make_shape_compatible_op(self, model): @@ -105,4 +102,3 @@ def test_attr(): strings_attr_prod[0] = "test" inst.set_nodeattr("strings_attr", strings_attr_prod) assert inst.get_nodeattr("strings_attr") == ["test"] + strings_attr[1:] - From 75f8f8c887f613f8f41bdfd84d29997a6db5b8dd Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 15:05:27 +0200 Subject: [PATCH 67/83] run pre-commit on all files --- docs/license.rst | 4 +- notebooks/4_quant_lstm_helper/function.py | 401 +++++++++--------- notebooks/4_quant_lstm_helper/handler.py | 97 ++--- .../transformation/lower_convs_to_matmul.py | 63 ++- tests/transformation/test_conv_lowering.py | 2 +- 5 files changed, 293 insertions(+), 274 deletions(-) diff --git a/docs/license.rst b/docs/license.rst index e647e180..a5103f77 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -1,7 +1,7 @@ .. _license: -======= +======== License -======= +======== .. include:: ../LICENSE diff --git a/notebooks/4_quant_lstm_helper/function.py b/notebooks/4_quant_lstm_helper/function.py index 6ba2e9dd..935bf78a 100644 --- a/notebooks/4_quant_lstm_helper/function.py +++ b/notebooks/4_quant_lstm_helper/function.py @@ -2,26 +2,24 @@ # SPDX-License-Identifier: BSD-3-Clause import torch -from torch.autograd import Function - from brevitas.export.onnx import onnx_export_opset +from torch.autograd import Function AXIS_OPSET = 13 DOMAIN_STRING = "onnx.brevitas" class DequantizeLinearFn(Function): - @staticmethod def symbolic(g, x, input_scale, input_zero_point, input_axis): opset_version = onnx_export_opset() if input_axis is not None and opset_version < AXIS_OPSET: - raise RuntimeError('ONNX Opset 13 is required for per-channel quantization') + raise RuntimeError("ONNX Opset 13 is required for per-channel quantization") elif input_axis is not None and opset_version >= AXIS_OPSET: - ret = g.op('DequantizeLinear', x, input_scale, input_zero_point, axis_i=input_axis) + ret = g.op("DequantizeLinear", x, input_scale, input_zero_point, axis_i=input_axis) else: - ret = g.op('DequantizeLinear', x, input_scale, input_zero_point) + ret = g.op("DequantizeLinear", x, input_scale, input_zero_point) return ret @staticmethod @@ -30,10 +28,9 @@ def forward(ctx, int_x, input_scale, input_zero_point, input_axis): class IntClipFn(Function): - @staticmethod def symbolic(g, int_x, min_int_val, max_int_val): - ret = g.op('Clip', int_x, min_int_val, max_int_val) + ret = g.op("Clip", int_x, min_int_val, max_int_val) return ret @staticmethod @@ -42,116 +39,115 @@ def forward(ctx, int_x, min_int_val, max_int_val): class QuantizeLinearFn(Function): - @staticmethod def symbolic(g, x, output_scale, ouput_zero_point, output_dtype, output_axis): opset_version = onnx_export_opset() if output_axis is not None and opset_version < AXIS_OPSET: - raise RuntimeError('ONNX Opset 13 is required for per-channel quantization') + raise RuntimeError("ONNX Opset 13 is required for per-channel quantization") elif output_axis is not None and opset_version >= AXIS_OPSET: - ret = g.op('QuantizeLinear', x, output_scale, ouput_zero_point, axis_i=output_axis) + ret = g.op("QuantizeLinear", x, output_scale, ouput_zero_point, axis_i=output_axis) else: - ret = g.op('QuantizeLinear', x, output_scale, ouput_zero_point) + ret = g.op("QuantizeLinear", x, output_scale, ouput_zero_point) return ret @staticmethod def forward(ctx, x, output_scale, ouput_zero_point, output_dtype, output_axis): return x.type(output_dtype) -class BrevitasQuantLSTMCellFn(Function): - +class BrevitasQuantLSTMCellFn(Function): @staticmethod def symbolic( - g, # args and kwargs passed from _QuantLSTMLayer - quant_input, - quant_hidden_state, - quant_cell_state, - quant_weight_ii, - quant_weight_if, - quant_weight_ic, - quant_weight_io, - quant_weight_hi, - quant_weight_hf, - quant_weight_hc, - quant_weight_ho, - quant_bias_input, - quant_bias_forget, - quant_bias_cell, - quant_bias_output, # Symbolic kwargs passed from BrevitasQuantLSTMLayerHandler - batch_first, - reverse_input, - cifg, # Output quant - output_scale, - output_zero_point, - output_bit_width, - output_narrow_range, - output_signed, - output_rounding_mode, # Cell state quant - cell_state_scale, - cell_state_zero_point, - cell_state_bit_width, - cell_state_narrow_range, - cell_state_signed, - cell_state_rounding_mode, # Input gate accumulator quant - input_acc_scale, - input_acc_zero_point, - input_acc_bit_width, - input_acc_narrow_range, - input_acc_signed, - input_acc_rounding_mode, # Forget gate accumulator quant - forget_acc_scale, - forget_acc_zero_point, - forget_acc_bit_width, - forget_acc_narrow_range, - forget_acc_signed, - forget_acc_rounding_mode, # Cell gate accumulator quant - cell_acc_scale, - cell_acc_zero_point, - cell_acc_bit_width, - cell_acc_narrow_range, - cell_acc_signed, - cell_acc_rounding_mode, # Output gate accumulator quant - output_acc_scale, - output_acc_zero_point, - output_acc_bit_width, - output_acc_narrow_range, - output_acc_signed, - output_acc_rounding_mode, # Input gate sigmoid quant - input_sigmoid_scale, - input_sigmoid_zero_point, - input_sigmoid_bit_width, - input_sigmoid_narrow_range, - input_sigmoid_signed, - input_sigmoid_rounding_mode, # Forget gate sigmoid quant - forget_sigmoid_scale, - forget_sigmoid_zero_point, - forget_sigmoid_bit_width, - forget_sigmoid_narrow_range, - forget_sigmoid_signed, - forget_sigmoid_rounding_mode, # Cell gate tanh quant - cell_tanh_scale, - cell_tanh_zero_point, - cell_tanh_bit_width, - cell_tanh_narrow_range, - cell_tanh_signed, - cell_tanh_rounding_mode, # Output gate sigmoid quant - output_sigmoid_scale, - output_sigmoid_zero_point, - output_sigmoid_bit_width, - output_sigmoid_narrow_range, - output_sigmoid_signed, - output_sigmoid_rounding_mode, # Hidden state tanh quant - hidden_state_tanh_scale, - hidden_state_tanh_zero_point, - hidden_state_tanh_bit_width, - hidden_state_tanh_narrow_range, - hidden_state_tanh_signed, - hidden_state_tanh_rounding_mode): + g, # args and kwargs passed from _QuantLSTMLayer + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, # Symbolic kwargs passed from BrevitasQuantLSTMLayerHandler + batch_first, + reverse_input, + cifg, # Output quant + output_scale, + output_zero_point, + output_bit_width, + output_narrow_range, + output_signed, + output_rounding_mode, # Cell state quant + cell_state_scale, + cell_state_zero_point, + cell_state_bit_width, + cell_state_narrow_range, + cell_state_signed, + cell_state_rounding_mode, # Input gate accumulator quant + input_acc_scale, + input_acc_zero_point, + input_acc_bit_width, + input_acc_narrow_range, + input_acc_signed, + input_acc_rounding_mode, # Forget gate accumulator quant + forget_acc_scale, + forget_acc_zero_point, + forget_acc_bit_width, + forget_acc_narrow_range, + forget_acc_signed, + forget_acc_rounding_mode, # Cell gate accumulator quant + cell_acc_scale, + cell_acc_zero_point, + cell_acc_bit_width, + cell_acc_narrow_range, + cell_acc_signed, + cell_acc_rounding_mode, # Output gate accumulator quant + output_acc_scale, + output_acc_zero_point, + output_acc_bit_width, + output_acc_narrow_range, + output_acc_signed, + output_acc_rounding_mode, # Input gate sigmoid quant + input_sigmoid_scale, + input_sigmoid_zero_point, + input_sigmoid_bit_width, + input_sigmoid_narrow_range, + input_sigmoid_signed, + input_sigmoid_rounding_mode, # Forget gate sigmoid quant + forget_sigmoid_scale, + forget_sigmoid_zero_point, + forget_sigmoid_bit_width, + forget_sigmoid_narrow_range, + forget_sigmoid_signed, + forget_sigmoid_rounding_mode, # Cell gate tanh quant + cell_tanh_scale, + cell_tanh_zero_point, + cell_tanh_bit_width, + cell_tanh_narrow_range, + cell_tanh_signed, + cell_tanh_rounding_mode, # Output gate sigmoid quant + output_sigmoid_scale, + output_sigmoid_zero_point, + output_sigmoid_bit_width, + output_sigmoid_narrow_range, + output_sigmoid_signed, + output_sigmoid_rounding_mode, # Hidden state tanh quant + hidden_state_tanh_scale, + hidden_state_tanh_zero_point, + hidden_state_tanh_bit_width, + hidden_state_tanh_narrow_range, + hidden_state_tanh_signed, + hidden_state_tanh_rounding_mode, + ): return g.op( - f'{DOMAIN_STRING}::QuantLSTMCell', # Tensors - ## Input values + f"{DOMAIN_STRING}::QuantLSTMCell", # Tensors + # Input values quant_input, quant_hidden_state, quant_cell_state, @@ -166,37 +162,37 @@ def symbolic( quant_bias_input, quant_bias_forget, quant_bias_cell, - quant_bias_output, ## Output quant + quant_bias_output, # Output quant output_scale, output_zero_point, - output_bit_width, ## Cell state quant + output_bit_width, # Cell state quant cell_state_scale, cell_state_zero_point, - cell_state_bit_width, ## Input gate accumulator quant + cell_state_bit_width, # Input gate accumulator quant input_acc_scale, input_acc_zero_point, - input_acc_bit_width, ## Forget gate accumulator quant + input_acc_bit_width, # Forget gate accumulator quant forget_acc_scale, forget_acc_zero_point, - forget_acc_bit_width, ## Cell gate accumulator quant + forget_acc_bit_width, # Cell gate accumulator quant cell_acc_scale, cell_acc_zero_point, - cell_acc_bit_width, ## Output gate accumulator quant + cell_acc_bit_width, # Output gate accumulator quant output_acc_scale, output_acc_zero_point, - output_acc_bit_width, ## Input gate sigmoid quant + output_acc_bit_width, # Input gate sigmoid quant input_sigmoid_scale, input_sigmoid_zero_point, - input_sigmoid_bit_width, ## Forget gate sigmoid quant + input_sigmoid_bit_width, # Forget gate sigmoid quant forget_sigmoid_scale, forget_sigmoid_zero_point, - forget_sigmoid_bit_width, ## Cell gate tanh quant + forget_sigmoid_bit_width, # Cell gate tanh quant cell_tanh_scale, cell_tanh_zero_point, - cell_tanh_bit_width, ## Output gate sigmoid quant + cell_tanh_bit_width, # Output gate sigmoid quant output_sigmoid_scale, output_sigmoid_zero_point, - output_sigmoid_bit_width, ## Hidden state tanh quant + output_sigmoid_bit_width, # Hidden state tanh quant hidden_state_tanh_scale, hidden_state_tanh_zero_point, hidden_state_tanh_bit_width, @@ -238,103 +234,102 @@ def symbolic( hidden_state_tanh_signed_i=hidden_state_tanh_signed, hidden_state_tanh_rounding_mode_s=hidden_state_tanh_rounding_mode, # PyTorch requires to specify the number of outputs manually - outputs=3) - + outputs=3, + ) @staticmethod def forward( - ctx, # args and kwargs passed from _QuantLSTMLayer - quant_input, - quant_hidden_state, - quant_cell_state, - quant_weight_ii, - quant_weight_if, - quant_weight_ic, - quant_weight_io, - quant_weight_hi, - quant_weight_hf, - quant_weight_hc, - quant_weight_ho, - quant_bias_input, - quant_bias_forget, - quant_bias_cell, - quant_bias_output, # Symbolic kwargs passed from BrevitasQuantLSTMLayerHandler - batch_first, - reverse_input, - cifg, # Output quant - output_scale, - output_zero_point, - output_bit_width, - output_narrow_range, - output_signed, - output_rounding_mode, # Cell state quant - cell_state_scale, - cell_state_zero_point, - cell_state_bit_width, - cell_state_narrow_range, - cell_state_signed, - cell_state_rounding_mode, # Input gate accumulator quant - input_acc_scale, - input_acc_zero_point, - input_acc_bit_width, - input_acc_narrow_range, - input_acc_signed, - input_acc_rounding_mode, # Forget gate accumulator quant - forget_acc_scale, - forget_acc_zero_point, - forget_acc_bit_width, - forget_acc_narrow_range, - forget_acc_signed, - forget_acc_rounding_mode, # Cell gate accumulator quant - cell_acc_scale, - cell_acc_zero_point, - cell_acc_bit_width, - cell_acc_narrow_range, - cell_acc_signed, - cell_acc_rounding_mode, # Output gate accumulator quant - output_acc_scale, - output_acc_zero_point, - output_acc_bit_width, - output_acc_narrow_range, - output_acc_signed, - output_acc_rounding_mode, # Input gate sigmoid quant - input_sigmoid_scale, - input_sigmoid_zero_point, - input_sigmoid_bit_width, - input_sigmoid_narrow_range, - input_sigmoid_signed, - input_sigmoid_rounding_mode, # Forget gate sigmoid quant - forget_sigmoid_scale, - forget_sigmoid_zero_point, - forget_sigmoid_bit_width, - forget_sigmoid_narrow_range, - forget_sigmoid_signed, - forget_sigmoid_rounding_mode, # Cell gate tanh quant - cell_tanh_scale, - cell_tanh_zero_point, - cell_tanh_bit_width, - cell_tanh_narrow_range, - cell_tanh_signed, - cell_tanh_rounding_mode, # Output gate sigmoid quant - output_sigmoid_scale, - output_sigmoid_zero_point, - output_sigmoid_bit_width, - output_sigmoid_narrow_range, - output_sigmoid_signed, - output_sigmoid_rounding_mode, # Hidden state tanh quant - hidden_state_tanh_scale, - hidden_state_tanh_zero_point, - hidden_state_tanh_bit_width, - hidden_state_tanh_narrow_range, - hidden_state_tanh_signed, - hidden_state_tanh_rounding_mode): + ctx, # args and kwargs passed from _QuantLSTMLayer + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, # Symbolic kwargs passed from BrevitasQuantLSTMLayerHandler + batch_first, + reverse_input, + cifg, # Output quant + output_scale, + output_zero_point, + output_bit_width, + output_narrow_range, + output_signed, + output_rounding_mode, # Cell state quant + cell_state_scale, + cell_state_zero_point, + cell_state_bit_width, + cell_state_narrow_range, + cell_state_signed, + cell_state_rounding_mode, # Input gate accumulator quant + input_acc_scale, + input_acc_zero_point, + input_acc_bit_width, + input_acc_narrow_range, + input_acc_signed, + input_acc_rounding_mode, # Forget gate accumulator quant + forget_acc_scale, + forget_acc_zero_point, + forget_acc_bit_width, + forget_acc_narrow_range, + forget_acc_signed, + forget_acc_rounding_mode, # Cell gate accumulator quant + cell_acc_scale, + cell_acc_zero_point, + cell_acc_bit_width, + cell_acc_narrow_range, + cell_acc_signed, + cell_acc_rounding_mode, # Output gate accumulator quant + output_acc_scale, + output_acc_zero_point, + output_acc_bit_width, + output_acc_narrow_range, + output_acc_signed, + output_acc_rounding_mode, # Input gate sigmoid quant + input_sigmoid_scale, + input_sigmoid_zero_point, + input_sigmoid_bit_width, + input_sigmoid_narrow_range, + input_sigmoid_signed, + input_sigmoid_rounding_mode, # Forget gate sigmoid quant + forget_sigmoid_scale, + forget_sigmoid_zero_point, + forget_sigmoid_bit_width, + forget_sigmoid_narrow_range, + forget_sigmoid_signed, + forget_sigmoid_rounding_mode, # Cell gate tanh quant + cell_tanh_scale, + cell_tanh_zero_point, + cell_tanh_bit_width, + cell_tanh_narrow_range, + cell_tanh_signed, + cell_tanh_rounding_mode, # Output gate sigmoid quant + output_sigmoid_scale, + output_sigmoid_zero_point, + output_sigmoid_bit_width, + output_sigmoid_narrow_range, + output_sigmoid_signed, + output_sigmoid_rounding_mode, # Hidden state tanh quant + hidden_state_tanh_scale, + hidden_state_tanh_zero_point, + hidden_state_tanh_bit_width, + hidden_state_tanh_narrow_range, + hidden_state_tanh_signed, + hidden_state_tanh_rounding_mode, + ): # Tp simplify things, here we are returning the outputs # as if they were already concatenated. Scale/zp/bw are avoided too. # This preserves output shapes but not values. # See _QuantLSTMCell for the actual implementation. quant_outputs = torch.zeros( - quant_input.size(0), - quant_input.size(1), - quant_hidden_state.size(1), - device=quant_hidden_state.device) + quant_input.size(0), quant_input.size(1), quant_hidden_state.size(1), device=quant_hidden_state.device + ) return quant_outputs, quant_hidden_state, quant_cell_state diff --git a/notebooks/4_quant_lstm_helper/handler.py b/notebooks/4_quant_lstm_helper/handler.py index 948eb647..71cbdeb1 100644 --- a/notebooks/4_quant_lstm_helper/handler.py +++ b/notebooks/4_quant_lstm_helper/handler.py @@ -1,32 +1,23 @@ # Copyright (C) 2023, Advanced Micro Devices, Inc. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause +import torch from abc import ABC -from copy import copy +from brevitas.export.common.handler.qcdq import ( + DQMixin, + QCDQActQuantProxyHandlerMixin, + QCDQBiasQuantProxyHandlerMixin, + QCDQDecoupledWeightQuantProxyHandlerMixin, + QCDQMixin, + QCDQTruncQuantProxyHandlerMixin, + QCDQWeightQuantProxyHandlerMixin, +) +from brevitas.export.onnx.handler import ONNXBaseHandler, QuantLSTMLayerHandler -import torch -from torch import Tensor - -from brevitas.export.common.handler.base import QuantAxisMixin -from brevitas.export.common.handler.qcdq import DQMixin -from brevitas.export.common.handler.qcdq import QCDQActQuantProxyHandlerMixin -from brevitas.export.common.handler.qcdq import QCDQBiasQuantProxyHandlerMixin -from brevitas.export.common.handler.qcdq import QCDQDecoupledWeightQuantProxyHandlerMixin -from brevitas.export.common.handler.qcdq import QCDQMixin -from brevitas.export.common.handler.qcdq import QCDQTruncQuantProxyHandlerMixin -from brevitas.export.common.handler.qcdq import QCDQWeightQuantProxyHandlerMixin -from brevitas.export.common.handler.qcdq import ZeroPointHandlerMixin -from brevitas.export.onnx.handler import ONNXBaseHandler -from brevitas.export.onnx.handler import QuantLSTMLayerHandler - -from ..function import DequantizeLinearFn -from ..function import IntClipFn -from ..function import QuantizeLinearFn -from ..function import BrevitasQuantLSTMCellFn +from ..function import BrevitasQuantLSTMCellFn, DequantizeLinearFn, IntClipFn, QuantizeLinearFn class StdDQONNXMixin(DQMixin, ABC): - def dequantize_fn(self, x, scale, zero_point, axis): return DequantizeLinearFn.apply(x, scale, zero_point, axis) @@ -40,7 +31,6 @@ def itemize_quantize_scalar_params(self): class StdQCDQONNXMixin(QCDQMixin, StdDQONNXMixin, ABC): - @property def clip_over_integers(self): return True @@ -59,8 +49,8 @@ def int32_dtype(cls): def validate(self, module): self.validate_8b_bit_width(module.bit_width(), le_then=True) - assert module.bit_width() > 1., 'Binary quant not supported' - assert module.rounding_mode.upper() == 'ROUND', 'Only round to nearest even supported' + assert module.bit_width() > 1.0, "Binary quant not supported" + assert module.rounding_mode.upper() == "ROUND", "Only round to nearest even supported" def quantize_fn(self, x, scale, zero_point, dtype, axis): return QuantizeLinearFn.apply(x, scale, zero_point, dtype, axis) @@ -69,55 +59,47 @@ def clip_fn(self, x, min_val, max_val): return IntClipFn.apply(x, min_val, max_val) -class StdQCDQONNXWeightQuantProxyHandler(StdQCDQONNXMixin, - QCDQWeightQuantProxyHandlerMixin, - ONNXBaseHandler): +class StdQCDQONNXWeightQuantProxyHandler(StdQCDQONNXMixin, QCDQWeightQuantProxyHandlerMixin, ONNXBaseHandler): pass -class StdQCDQONNXDecoupledWeightQuantProxyHandler(StdQCDQONNXMixin, - QCDQDecoupledWeightQuantProxyHandlerMixin, - ONNXBaseHandler): +class StdQCDQONNXDecoupledWeightQuantProxyHandler( + StdQCDQONNXMixin, QCDQDecoupledWeightQuantProxyHandlerMixin, ONNXBaseHandler +): pass -class StdQCDQONNXActQuantProxyHandler(StdQCDQONNXMixin, - QCDQActQuantProxyHandlerMixin, - ONNXBaseHandler): +class StdQCDQONNXActQuantProxyHandler(StdQCDQONNXMixin, QCDQActQuantProxyHandlerMixin, ONNXBaseHandler): pass -class StdQCDQONNXBiasQuantProxyHandler(StdDQONNXMixin, - QCDQBiasQuantProxyHandlerMixin, - ONNXBaseHandler): +class StdQCDQONNXBiasQuantProxyHandler(StdDQONNXMixin, QCDQBiasQuantProxyHandlerMixin, ONNXBaseHandler): pass -class StdQCDQONNXTruncQuantProxyHandler(StdQCDQONNXMixin, - QCDQTruncQuantProxyHandlerMixin, - ONNXBaseHandler): +class StdQCDQONNXTruncQuantProxyHandler(StdQCDQONNXMixin, QCDQTruncQuantProxyHandlerMixin, ONNXBaseHandler): pass class StdQCDQONNXQuantLSTMLayerHandler(QuantLSTMLayerHandler): - def quantized_cell_symbolic_execution( - self, - quant_input, - quant_hidden_state, - quant_cell_state, - quant_weight_ii, - quant_weight_if, - quant_weight_ic, - quant_weight_io, - quant_weight_hi, - quant_weight_hf, - quant_weight_hc, - quant_weight_ho, - quant_bias_input, - quant_bias_forget, - quant_bias_cell, - quant_bias_output): + self, + quant_input, + quant_hidden_state, + quant_cell_state, + quant_weight_ii, + quant_weight_if, + quant_weight_ic, + quant_weight_io, + quant_weight_hi, + quant_weight_hf, + quant_weight_hc, + quant_weight_ho, + quant_bias_input, + quant_bias_forget, + quant_bias_cell, + quant_bias_output, + ): return BrevitasQuantLSTMCellFn.apply( quant_input, quant_hidden_state, @@ -134,7 +116,8 @@ def quantized_cell_symbolic_execution( quant_bias_forget, quant_bias_cell, quant_bias_output, - *self.symbolic_kwargs.values()) + *self.symbolic_kwargs.values() + ) # raise RuntimeError( # "Quantized LSTM cell is not supported for ONNX QCDQ " # "(weights only quantization is). Use export_qonnx.") diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index c5964cf4..49700cd7 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -52,10 +52,27 @@ def apply(self, model): continue # extract parameters of node - (cnv_input, cnv_output, cnv_input_datatype, cnv_output_datatype, - k_h, k_w, stride_h, stride_w, group, weight_name, W_conv, ifm_ch, - ofm_ch, ifm_dim_h, ifm_dim_w, ofm_dim_h, ofm_dim_w, dilation, pad) =\ - self.extract_conv_params(model, node) + ( + cnv_input, + cnv_output, + cnv_input_datatype, + cnv_output_datatype, + k_h, + k_w, + stride_h, + stride_w, + group, + weight_name, + W_conv, + ifm_ch, + ofm_ch, + ifm_dim_h, + ifm_dim_w, + ofm_dim_h, + ofm_dim_w, + dilation, + pad, + ) = self.extract_conv_params(model, node) # if depthwise conv create sparse matrix and variable "dw" # to store as attribute in Im2Col that indicates that the created @@ -122,9 +139,16 @@ def apply(self, model): im2col_out = im2col_out.name model.set_tensor_datatype(im2col_out, cnv_input_datatype) im2col_node = helper.make_node( - "Im2Col", [inp_trans_out], [im2col_out], domain="qonnx.custom_op.general", - stride=[stride_h, stride_w], kernel_size=[k_h, k_w], pad_amount=pad, - input_shape="(1,{},{},{})".format(ifm_dim_h, ifm_dim_w, ifm_ch), depthwise=dw, dilations=dilation + "Im2Col", + [inp_trans_out], + [im2col_out], + domain="qonnx.custom_op.general", + stride=[stride_h, stride_w], + kernel_size=[k_h, k_w], + pad_amount=pad, + input_shape="(1,{},{},{})".format(ifm_dim_h, ifm_dim_w, ifm_ch), + depthwise=dw, + dilations=dilation, ) nodes_to_insert.append(im2col_node) @@ -144,7 +168,6 @@ def apply(self, model): return (model, graph_modified) def extract_conv_params(self, model, node): - cnv_input = node.input[0] cnv_output = node.output[0] cnv_input_datatype = model.get_tensor_datatype(cnv_input) @@ -179,6 +202,24 @@ def extract_conv_params(self, model, node): if len(pad) == 2: # only one dimension should be padded assert ifm_dim_h == 1 or ifm_dim_w == 1, "Padding is assumed to be 1D, image is 2D" - return (cnv_input, cnv_output, cnv_input_datatype, cnv_output_datatype, k_h, k_w, stride_h, - stride_w, group, weight_name, W_conv, ifm_ch, ofm_ch, ifm_dim_h, ifm_dim_w, ofm_dim_h, - ofm_dim_w, dilation, pad) + return ( + cnv_input, + cnv_output, + cnv_input_datatype, + cnv_output_datatype, + k_h, + k_w, + stride_h, + stride_w, + group, + weight_name, + W_conv, + ifm_ch, + ofm_ch, + ifm_dim_h, + ifm_dim_w, + ofm_dim_h, + ofm_dim_w, + dilation, + pad, + ) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index 044da1b2..788d6993 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -65,7 +65,7 @@ def test_conv_lowering_convmnist(): model = model.transform(InferShapes()) output_dict_p = oxe.execute_onnx(model, input_dict) produced = output_dict_p[output_name] - assert np.isclose(produced, expected, rtol=1.e-4).all() + assert np.isclose(produced, expected, rtol=1.0e-4).all() def run_conv_lowering_test(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias): From 8f6661524c57fc4f54fe7758399428a3f1268624 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 15:37:52 +0200 Subject: [PATCH 68/83] [LowerConv] skip convs with non-initialized weights --- src/qonnx/transformation/lower_convs_to_matmul.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index 49700cd7..59ddbce6 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -51,6 +51,10 @@ def apply(self, model): warnings.warn("Found Conv node with bias, skipping") continue + if model.get_initializer(node.input[1]) is None: + warnings.warn("Found Conv node with non-initialized weight, skipping") + continue + # extract parameters of node ( cnv_input, From a92093c32268eae06e09aa0da65a56ddb4bee217 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 15:38:33 +0200 Subject: [PATCH 69/83] [Test] add (failing) quant weight conv testcase for lowering --- tests/transformation/test_conv_lowering.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index 788d6993..eea53c55 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -43,6 +43,14 @@ from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model +from qonnx.util.test import download_model + + +def test_conv_lowering_quant_weights(): + model_name = "FINN-CNV_W2A2" + model = download_model(model_name, return_modelwrapper=True, do_cleanup=True) + model = model.transform(LowerConvsToMatMul()) + assert model.get_nodes_by_op_type("Conv") == [] def test_conv_lowering_convmnist(): From 5e5bb5523137df04632b73edf7f7828cabc84ded Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 16:16:48 +0200 Subject: [PATCH 70/83] [LowerConv] support lowering Conv with Quant node on weights --- .../transformation/lower_convs_to_matmul.py | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index 59ddbce6..30ed85ca 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -51,10 +51,6 @@ def apply(self, model): warnings.warn("Found Conv node with bias, skipping") continue - if model.get_initializer(node.input[1]) is None: - warnings.warn("Found Conv node with non-initialized weight, skipping") - continue - # extract parameters of node ( cnv_input, @@ -67,6 +63,7 @@ def apply(self, model): stride_w, group, weight_name, + conv_weight_inp_name, W_conv, ifm_ch, ofm_ch, @@ -78,6 +75,10 @@ def apply(self, model): pad, ) = self.extract_conv_params(model, node) + if W_conv is None: + warnings.warn("Found Conv node with non-initialized weight, skipping") + continue + # if depthwise conv create sparse matrix and variable "dw" # to store as attribute in Im2Col that indicates that the created # Im2Col node belongs to a depthwise convolution @@ -108,6 +109,8 @@ def apply(self, model): # transpose to get ONNX-compatible [k_h*k_w*IFM][OFM] matrix W_matmul = W_matmul.T model.set_initializer(weight_name, W_matmul) + if weight_name != conv_weight_inp_name: + model.set_tensor_shape(conv_weight_inp_name, W_matmul.shape) # create new intermediate values inp_trans_out = helper.make_tensor_value_info( @@ -158,7 +161,7 @@ def apply(self, model): matmul_input = im2col_out if need_im2col else inp_trans_out # do matmul - matmul_node = helper.make_node("MatMul", [matmul_input, weight_name], [matmul_out]) + matmul_node = helper.make_node("MatMul", [matmul_input, conv_weight_inp_name], [matmul_out]) # NHWC -> NCHW out_trans_node = helper.make_node("Transpose", [matmul_out], [cnv_output], perm=[0, 3, 1, 2]) @@ -182,7 +185,14 @@ def extract_conv_params(self, model, node): stride_w = get_by_name(node.attribute, "strides").ints[1] group = get_by_name(node.attribute, "group").i weight_name = node.input[1] + conv_weight_inp_name = node.input[1] W_conv = model.get_initializer(weight_name) + if W_conv is None: + # check to see if there is an immediate quantizer node feeding the weight input + w_producer = model.find_producer(weight_name) + if not (w_producer is None) and w_producer.op_type == "Quant": + W_conv = model.get_initializer(w_producer.input[0]) + weight_name = w_producer.input[0] ifm_ch = model.get_tensor_shape(cnv_input)[1] # assume NCHW ofm_ch = model.get_tensor_shape(cnv_output)[1] # assume NCHW ifm_dim_h = model.get_tensor_shape(cnv_input)[2] # assume NCHW @@ -217,6 +227,7 @@ def extract_conv_params(self, model, node): stride_w, group, weight_name, + conv_weight_inp_name, W_conv, ifm_ch, ofm_ch, From c54f142e2c3ec6b9a8c9deffa5072f56cdff5f1b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 16:17:15 +0200 Subject: [PATCH 71/83] [Test] extend quant weight conv testcase, now passing --- tests/transformation/test_conv_lowering.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index eea53c55..b2003a77 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -43,7 +43,7 @@ from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model -from qonnx.util.test import download_model +from qonnx.util.test import download_model, get_golden_in_and_output def test_conv_lowering_quant_weights(): @@ -51,6 +51,11 @@ def test_conv_lowering_quant_weights(): model = download_model(model_name, return_modelwrapper=True, do_cleanup=True) model = model.transform(LowerConvsToMatMul()) assert model.get_nodes_by_op_type("Conv") == [] + input_t, golden_t = get_golden_in_and_output(model_name) + input_dict = {model.graph.input[0].name: input_t} + prod_dict = oxe.execute_onnx(model, input_dict) + prod_t = prod_dict[model.graph.output[0].name] + assert (prod_t == golden_t).all() def test_conv_lowering_convmnist(): From 8d1ee1d8fed3e93f5f5e7b6a7e4a260577350e90 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 16:42:46 +0200 Subject: [PATCH 72/83] [LowerConv] support reshaping quant conv weight scales --- .../transformation/lower_convs_to_matmul.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index 30ed85ca..89c08eae 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -64,6 +64,7 @@ def apply(self, model): group, weight_name, conv_weight_inp_name, + conv_weight_q_scale_name, W_conv, ifm_ch, ofm_ch, @@ -110,7 +111,19 @@ def apply(self, model): W_matmul = W_matmul.T model.set_initializer(weight_name, W_matmul) if weight_name != conv_weight_inp_name: + # required for convs with quantized weights model.set_tensor_shape(conv_weight_inp_name, W_matmul.shape) + if conv_weight_q_scale_name is not None: + # required for convs with quantized weights + scale_weight_q = model.get_initializer(conv_weight_q_scale_name) + # scale shape is originally [OFM, IFM, k_H, k_W] + # transpose into [OFM, k_H, k_W, IFM] + scale_weight_q = scale_weight_q.transpose(0, 2, 3, 1) + # reshape into [OFM][k_h*k_w*IFM] matrix + scale_weight_q = scale_weight_q.reshape(ofm_ch, -1) + # transpose to be shape-compatible with weight matrix + scale_weight_q = scale_weight_q.T + model.set_initializer(conv_weight_q_scale_name, scale_weight_q) # create new intermediate values inp_trans_out = helper.make_tensor_value_info( @@ -186,6 +199,7 @@ def extract_conv_params(self, model, node): group = get_by_name(node.attribute, "group").i weight_name = node.input[1] conv_weight_inp_name = node.input[1] + conv_weight_q_scale_name = None W_conv = model.get_initializer(weight_name) if W_conv is None: # check to see if there is an immediate quantizer node feeding the weight input @@ -193,6 +207,7 @@ def extract_conv_params(self, model, node): if not (w_producer is None) and w_producer.op_type == "Quant": W_conv = model.get_initializer(w_producer.input[0]) weight_name = w_producer.input[0] + conv_weight_q_scale_name = w_producer.input[1] ifm_ch = model.get_tensor_shape(cnv_input)[1] # assume NCHW ofm_ch = model.get_tensor_shape(cnv_output)[1] # assume NCHW ifm_dim_h = model.get_tensor_shape(cnv_input)[2] # assume NCHW @@ -228,6 +243,7 @@ def extract_conv_params(self, model, node): group, weight_name, conv_weight_inp_name, + conv_weight_q_scale_name, W_conv, ifm_ch, ofm_ch, From 55c2f6faa70f98583aa38175a92ecd88aec960f1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 16 Aug 2024 16:43:31 +0200 Subject: [PATCH 73/83] [Test] add MNv1 for quant conv lowering test --- tests/transformation/test_conv_lowering.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index b2003a77..091619e3 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -46,8 +46,8 @@ from qonnx.util.test import download_model, get_golden_in_and_output -def test_conv_lowering_quant_weights(): - model_name = "FINN-CNV_W2A2" +@pytest.mark.parametrize("model_name", ["FINN-CNV_W2A2", "MobileNetv1-w4a4"]) +def test_conv_lowering_quant_weights(model_name): model = download_model(model_name, return_modelwrapper=True, do_cleanup=True) model = model.transform(LowerConvsToMatMul()) assert model.get_nodes_by_op_type("Conv") == [] From a3451c5ef64f8eac40b0fc21247f0f781a907d20 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 18 Aug 2024 22:31:15 +0200 Subject: [PATCH 74/83] [Test] use np.isclose instead of equals for test condition --- tests/transformation/test_conv_lowering.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index 091619e3..c4470e93 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -55,7 +55,7 @@ def test_conv_lowering_quant_weights(model_name): input_dict = {model.graph.input[0].name: input_t} prod_dict = oxe.execute_onnx(model, input_dict) prod_t = prod_dict[model.graph.output[0].name] - assert (prod_t == golden_t).all() + assert np.isclose(prod_t, golden_t).all() def test_conv_lowering_convmnist(): From 100bfdef896c9ca7c31f8a3b681beb66d109f43d Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Aug 2024 09:50:33 +0200 Subject: [PATCH 75/83] [Util] break out test input generation function & allow seed setting --- src/qonnx/util/test.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/qonnx/util/test.py b/src/qonnx/util/test.py index f18e437e..ff0fcb15 100644 --- a/src/qonnx/util/test.py +++ b/src/qonnx/util/test.py @@ -145,15 +145,20 @@ def qonnx_download_model(): clize.run(download_model) -def get_golden_in_and_output(test_model): - model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) - rng = np.random.RandomState(42) +def get_random_input(test_model, seed=42): + rng = np.random.RandomState(seed) input_shape = test_model_details[test_model]["input_shape"] (low, high) = test_model_details[test_model]["input_range"] size = np.prod(np.asarray(input_shape)) input_tensor = rng.uniform(low=low, high=high, size=size) input_tensor = input_tensor.astype(np.float32) input_tensor = input_tensor.reshape(input_shape) + return input_tensor + + +def get_golden_in_and_output(test_model, seed=42): + model = download_model(test_model, do_cleanup=True, return_modelwrapper=True) + input_tensor = get_random_input(test_model, seed=seed) input_dict = {model.graph.input[0].name: input_tensor} golden_output_dict = oxe.execute_onnx(model, input_dict) golden_result = golden_output_dict[model.graph.output[0].name] From 032681c5137848531a6c26ee7f05a0b2a8241d68 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Aug 2024 09:51:08 +0200 Subject: [PATCH 76/83] [Lower] fix quant scale conversion, adjust seed random input generated with seed=42 was causing a major difference in Conv_13_out0 for no apparent reason (probably float / numerical related) --- .../transformation/lower_convs_to_matmul.py | 19 +++++++++++-------- tests/transformation/test_conv_lowering.py | 6 +++--- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/qonnx/transformation/lower_convs_to_matmul.py b/src/qonnx/transformation/lower_convs_to_matmul.py index 89c08eae..81f0b713 100644 --- a/src/qonnx/transformation/lower_convs_to_matmul.py +++ b/src/qonnx/transformation/lower_convs_to_matmul.py @@ -86,6 +86,8 @@ def apply(self, model): dw = False if group == ifm_ch and ofm_ch == ifm_ch: W_sparse = np.zeros((ofm_ch, ifm_ch, k_h, k_w)) # (OFM, IFM, k_H, k_W) + # TODO: if the convolution is quantized with a non-zero zeropoint we + # should be using the zeropoint value here instead of np.zeros for ch in range(ifm_ch): W_sparse[ch][ch] = W_conv[ch][0] # W_conv = [OFM, IFM, k_H, k_W] W_conv = W_sparse.astype(np.float32) @@ -116,14 +118,15 @@ def apply(self, model): if conv_weight_q_scale_name is not None: # required for convs with quantized weights scale_weight_q = model.get_initializer(conv_weight_q_scale_name) - # scale shape is originally [OFM, IFM, k_H, k_W] - # transpose into [OFM, k_H, k_W, IFM] - scale_weight_q = scale_weight_q.transpose(0, 2, 3, 1) - # reshape into [OFM][k_h*k_w*IFM] matrix - scale_weight_q = scale_weight_q.reshape(ofm_ch, -1) - # transpose to be shape-compatible with weight matrix - scale_weight_q = scale_weight_q.T - model.set_initializer(conv_weight_q_scale_name, scale_weight_q) + if scale_weight_q.ndim > 0: + # scale shape is originally [OFM, IFM, k_H, k_W] + # transpose into [OFM, k_H, k_W, IFM] + scale_weight_q = scale_weight_q.transpose(0, 2, 3, 1) + # reshape into [OFM][k_h*k_w*IFM] matrix + scale_weight_q = scale_weight_q.reshape(ofm_ch, -1) + # transpose to be shape-compatible with weight matrix + scale_weight_q = scale_weight_q.T + model.set_initializer(conv_weight_q_scale_name, scale_weight_q) # create new intermediate values inp_trans_out = helper.make_tensor_value_info( diff --git a/tests/transformation/test_conv_lowering.py b/tests/transformation/test_conv_lowering.py index c4470e93..0da57ea3 100644 --- a/tests/transformation/test_conv_lowering.py +++ b/tests/transformation/test_conv_lowering.py @@ -49,13 +49,13 @@ @pytest.mark.parametrize("model_name", ["FINN-CNV_W2A2", "MobileNetv1-w4a4"]) def test_conv_lowering_quant_weights(model_name): model = download_model(model_name, return_modelwrapper=True, do_cleanup=True) + input_t, golden_t = get_golden_in_and_output(model_name, seed=0) + input_dict = {model.graph.input[0].name: input_t} model = model.transform(LowerConvsToMatMul()) assert model.get_nodes_by_op_type("Conv") == [] - input_t, golden_t = get_golden_in_and_output(model_name) - input_dict = {model.graph.input[0].name: input_t} prod_dict = oxe.execute_onnx(model, input_dict) prod_t = prod_dict[model.graph.output[0].name] - assert np.isclose(prod_t, golden_t).all() + assert np.isclose(golden_t, prod_t, atol=1e-04).all() def test_conv_lowering_convmnist(): From 3c870d698bb6ae9d18ee5d8f875ad9dcd95c3a9c Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 9 Sep 2024 11:38:59 +0300 Subject: [PATCH 77/83] [Util] add accumulator-aware quantized (A2Q) CIFAR-10 models --- src/qonnx/util/test.py | 71 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/src/qonnx/util/test.py b/src/qonnx/util/test.py index ff0fcb15..84f83134 100644 --- a/src/qonnx/util/test.py +++ b/src/qonnx/util/test.py @@ -37,6 +37,76 @@ # utility functions to fetch models and data for # testing various qonnx transformations +a2q_rn18_preproc_mean = np.asarray([0.491, 0.482, 0.447], dtype=np.float32) +a2q_rn18_preproc_std = np.asarray([0.247, 0.243, 0.262], dtype=np.float32) +a2q_rn18_int_range = (0, 255) +a2q_rn18_iscale = 1 / 255 +a2q_rn18_rmin = (a2q_rn18_int_range[0] * a2q_rn18_iscale - a2q_rn18_preproc_mean) / a2q_rn18_preproc_std +a2q_rn18_rmax = (a2q_rn18_int_range[1] * a2q_rn18_iscale - a2q_rn18_preproc_mean) / a2q_rn18_preproc_std +a2q_rn18_scale = (1 / a2q_rn18_preproc_std) * a2q_rn18_iscale +a2q_rn18_bias = -a2q_rn18_preproc_mean * a2q_rn18_preproc_std +a2q_rn18_common = { + "input_shape": (1, 3, 32, 32), + "input_range": (a2q_rn18_rmin, a2q_rn18_rmax), + "int_range": a2q_rn18_int_range, + "scale": a2q_rn18_scale, + "bias": a2q_rn18_bias, +} +a2q_rn18_urlbase = "https://github.com/fastmachinelearning/qonnx_model_zoo/releases/download/a2q-20240905/" + +a2q_model_details = { + "rn18_w4a4_a2q_16b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q 16-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_16b-d4bfa990.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_15b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q 15-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_15b-eeca8ac2.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_14b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q 14-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_14b-563cf426.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_13b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q 13-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_13b-d3cae293.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_12b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q 12-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_12b-fb3a0f8a.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_plus_16b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q+ 16-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_plus_16b-09e47feb.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_plus_15b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q+ 15-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_plus_15b-10e7bc83.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_plus_14b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q+ 14-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_plus_14b-8db8c78c.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_plus_13b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q+ 13-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_plus_13b-f57b05ce.onnx", + **a2q_rn18_common, + }, + "rn18_w4a4_a2q_plus_12b": { + "description": "4-bit ResNet-18 on CIFAR-10, A2Q+ 12-bit accumulators", + "url": a2q_rn18_urlbase + "quant_resnet18_w4a4_a2q_plus_12b-1e2aca29.onnx", + **a2q_rn18_common, + }, +} + test_model_details = { "FINN-CNV_W2A2": { "description": "2-bit VGG-10-like CNN on CIFAR-10", @@ -116,6 +186,7 @@ "input_shape": (1, 3, 224, 224), "input_range": (0, 1), }, + **a2q_model_details, } From ee7464f4c8d68a01acec3617fb3758828867c30b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 9 Sep 2024 12:22:09 +0300 Subject: [PATCH 78/83] [Test] correctly handle multi-channel input ranges in change_batchsize --- tests/transformation/test_change_batchsize.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/transformation/test_change_batchsize.py b/tests/transformation/test_change_batchsize.py index 08d7c20f..e6c76da1 100644 --- a/tests/transformation/test_change_batchsize.py +++ b/tests/transformation/test_change_batchsize.py @@ -45,6 +45,11 @@ def test_change_batchsize(test_model): batch_size = 10 old_ishape = test_details["input_shape"] imin, imax = test_details["input_range"] + # some models spec per-channel ranges, be conservative for those + if isinstance(imin, np.ndarray): + imin = imin.max() + if isinstance(imax, np.ndarray): + imax = imax.min() model = download_model(test_model=test_model, do_cleanup=True, return_modelwrapper=True) iname = model.graph.input[0].name oname = model.graph.output[0].name From 8694a6de703e432dcedd67a4e15e88d914da8c08 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 9 Sep 2024 12:26:09 +0300 Subject: [PATCH 79/83] [Util] handle per-channel ranges in get_random_input --- src/qonnx/util/test.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/qonnx/util/test.py b/src/qonnx/util/test.py index 84f83134..47b4378f 100644 --- a/src/qonnx/util/test.py +++ b/src/qonnx/util/test.py @@ -220,6 +220,11 @@ def get_random_input(test_model, seed=42): rng = np.random.RandomState(seed) input_shape = test_model_details[test_model]["input_shape"] (low, high) = test_model_details[test_model]["input_range"] + # some models spec per-channel ranges, be conservative for those + if isinstance(low, np.ndarray): + low = low.max() + if isinstance(high, np.ndarray): + high = high.min() size = np.prod(np.asarray(input_shape)) input_tensor = rng.uniform(low=low, high=high, size=size) input_tensor = input_tensor.astype(np.float32) From 8bad7e71806d6c611c68fe00ac6007b076b08b5f Mon Sep 17 00:00:00 2001 From: jvreca Date: Thu, 22 Aug 2024 17:06:23 +0200 Subject: [PATCH 80/83] Added Identity node to the removal list --- src/qonnx/transformation/remove.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/qonnx/transformation/remove.py b/src/qonnx/transformation/remove.py index 980e80c1..0f7f38f7 100644 --- a/src/qonnx/transformation/remove.py +++ b/src/qonnx/transformation/remove.py @@ -138,5 +138,9 @@ def apply(self, model): remove_node_and_rewire(model, n) graph_modified = True break + elif n.op_type == "Identity": + remove_node_and_rewire(model, n) + graph_modified = True + break model = model.transform(InferShapes()) return (model, graph_modified) From 71ee78062ebdb5ae58dfbcc644d97d07dff3beb1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 12 Sep 2024 10:42:11 +0300 Subject: [PATCH 81/83] [Test] add Identity op case to test_remove_identity_ops --- tests/transformation/test_remove_identity_ops.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/transformation/test_remove_identity_ops.py b/tests/transformation/test_remove_identity_ops.py index ed34ffe6..d9e92c73 100644 --- a/tests/transformation/test_remove_identity_ops.py +++ b/tests/transformation/test_remove_identity_ops.py @@ -51,25 +51,30 @@ def insert_identity_op(model, op, as_first_node, approx): val = np.asarray([zero_val], dtype=np.float32) elif op in ["Mul", "Div"]: val = np.asarray([one_val], dtype=np.float32) + elif op in ["Identity"]: + val = None else: return graph = model.graph + if val is None: + inplist = ["inp" if as_first_node else "div_out"] + else: + model.set_initializer("value", val) + inplist = ["inp" if as_first_node else "div_out", "value"] + identity_node = helper.make_node(op, inplist, ["ident_out"]) if as_first_node: - identity_node = helper.make_node(op, ["inp", "value"], ["ident_out"]) graph.node.insert(0, identity_node) graph.node[1].input[0] = "ident_out" else: - identity_node = helper.make_node(op, ["div_out", "value"], ["ident_out"]) graph.node.insert(3, identity_node) graph.node[-1].input[0] = "ident_out" - model.set_initializer("value", val) return model # identity operations to be inserted -@pytest.mark.parametrize("op", ["Add", "Sub", "Mul", "Div"]) +@pytest.mark.parametrize("op", ["Add", "Sub", "Mul", "Div", "Identity"]) @pytest.mark.parametrize("approx", [False, True]) @pytest.mark.parametrize("as_first_node", [False, True]) def test_remove_identity_ops(op, as_first_node, approx): From 0a4d5c5315082582d3a646e9504fe129b4ff0fd6 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 12 Sep 2024 12:01:18 +0300 Subject: [PATCH 82/83] [ModelWrapper] add top-level checks for fork/join checks --- src/qonnx/core/modelwrapper.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/qonnx/core/modelwrapper.py b/src/qonnx/core/modelwrapper.py index b95c6a33..779bb8f2 100644 --- a/src/qonnx/core/modelwrapper.py +++ b/src/qonnx/core/modelwrapper.py @@ -429,14 +429,24 @@ def is_fork_node(self, node): """Checks if the given node is a fork, that is, the node has multiple direct successors""" direct_successors = self.find_direct_successors(node) - is_fork = False if direct_successors is None else (len(direct_successors) > 1) + # if the node output is also wired to a top-level output, it is still + # a fork with only 1 direct successor + if node.output[0] in [x.name for x in self.graph.output]: + is_fork = False if direct_successors is None else (len(direct_successors) > 0) + else: + is_fork = False if direct_successors is None else (len(direct_successors) > 1) return is_fork def is_join_node(self, node): """Checks if the given node is a join, that is, the node has multiple direct predecessors""" direct_predecessors = self.find_direct_predecessors(node) - is_join = False if direct_predecessors is None else (len(direct_predecessors) > 1) + # if the node input is also wired to a top-level input, it is still + # a fork with only 1 direct predecessor + if node.input[0] in [x.name for x in self.graph.input]: + is_join = False if direct_predecessors is None else (len(direct_predecessors) > 0) + else: + is_join = False if direct_predecessors is None else (len(direct_predecessors) > 1) return is_join def get_all_tensor_names(self): From 2d0934111ad24928aa3a613f7262b835a0d135c3 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 12 Sep 2024 12:01:54 +0300 Subject: [PATCH 83/83] [Test] add fork cases to RemoveIdentityOps test --- .../transformation/test_remove_identity_ops.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/transformation/test_remove_identity_ops.py b/tests/transformation/test_remove_identity_ops.py index d9e92c73..cfe01a82 100644 --- a/tests/transformation/test_remove_identity_ops.py +++ b/tests/transformation/test_remove_identity_ops.py @@ -77,7 +77,8 @@ def insert_identity_op(model, op, as_first_node, approx): @pytest.mark.parametrize("op", ["Add", "Sub", "Mul", "Div", "Identity"]) @pytest.mark.parametrize("approx", [False, True]) @pytest.mark.parametrize("as_first_node", [False, True]) -def test_remove_identity_ops(op, as_first_node, approx): +@pytest.mark.parametrize("fork_before_id", [False, True]) +def test_remove_identity_ops(op, as_first_node, approx, fork_before_id): # set up onnx model inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 4, 1, 1]) mul = helper.make_tensor_value_info("mul", TensorProto.FLOAT, []) @@ -114,14 +115,16 @@ def test_remove_identity_ops(op, as_first_node, approx): model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) idict = {"inp": inp_values} - odict = oxe.execute_onnx(model, idict) - out_before = odict["outp"] + odict_before = oxe.execute_onnx(model, idict) num_of_nodes_before = len(model.graph.node) - + if fork_before_id and not as_first_node: + divout_vi = model.get_tensor_valueinfo("div_out") + model.graph.output.append(divout_vi) + model.graph.value_info.remove(divout_vi) model = model.transform(RemoveIdentityOps()) num_of_nodes_after = len(model.graph.node) assert num_of_nodes_before - 1 == num_of_nodes_after - odict = oxe.execute_onnx(model, idict) - out_after = odict["outp"] - assert np.isclose(out_before, out_after, atol=1e-3).all() + odict_after = oxe.execute_onnx(model, idict) + outputs_same = [np.isclose(odict_before[tname], odict_after[tname], atol=1e-3).all() for tname in odict_before.keys()] + assert all(outputs_same)