From 649776f79a31d2110eefd423778f438168ef897c Mon Sep 17 00:00:00 2001 From: Mikolaj Blaz Date: Tue, 14 Sep 2021 06:03:36 -0700 Subject: [PATCH] [QuartzNet/PyT] Release QuartzNet model --- .../SpeechRecognition/QuartzNet/.gitignore | 9 + .../SpeechRecognition/QuartzNet/Dockerfile | 30 + PyTorch/SpeechRecognition/QuartzNet/LICENSE | 203 ++++++ PyTorch/SpeechRecognition/QuartzNet/NOTICE | 5 + PyTorch/SpeechRecognition/QuartzNet/README.md | 674 ++++++++++++++++++ .../QuartzNet/common/__init__.py | 0 .../QuartzNet/common/audio.py | 247 +++++++ .../QuartzNet/common/dali/__init__.py | 0 .../QuartzNet/common/dali/data_loader.py | 182 +++++ .../QuartzNet/common/dali/iterator.py | 183 +++++ .../QuartzNet/common/dali/pipeline.py | 343 +++++++++ .../QuartzNet/common/dataset.py | 234 ++++++ .../QuartzNet/common/features.py | 301 ++++++++ .../QuartzNet/common/helpers.py | 276 +++++++ .../QuartzNet/common/metrics.py | 59 ++ .../QuartzNet/common/optimizers.py | 269 +++++++ .../QuartzNet/common/sampler.py | 128 ++++ .../QuartzNet/common/tb_dllogger.py | 173 +++++ .../QuartzNet/common/text/LICENSE | 19 + .../QuartzNet/common/text/__init__.py | 32 + .../QuartzNet/common/text/cleaners.py | 107 +++ .../QuartzNet/common/text/numbers.py | 99 +++ .../QuartzNet/common/text/symbols.py | 19 + .../QuartzNet/common/utils.py | 20 + ...uartznet15x5_speedp-online-1.15_speca.yaml | 151 ++++ ...15x5_speedp-online-1.15_speca_drop0.2.yaml | 151 ++++ .../SpeechRecognition/QuartzNet/img/model.png | Bin 0 -> 120169 bytes .../QuartzNet/img/tcs_conv.png | Bin 0 -> 37934 bytes .../SpeechRecognition/QuartzNet/inference.py | 390 ++++++++++ .../platform/DGX2_QuartzNet_AMP_16GPU.sh | 10 + .../platform/DGX2_QuartzNet_AMP_8GPU.sh | 10 + .../platform/DGX2_QuartzNet_FP32_16GPU.sh | 10 + .../platform/DGX2_QuartzNet_FP32_8GPU.sh | 10 + .../platform/DGXA100_QuartzNet_AMP_8GPU.sh | 10 + .../platform/DGXA100_QuartzNet_TF32_8GPU.sh | 10 + .../QuartzNet/quartznet/config.py | 140 ++++ .../QuartzNet/quartznet/model.py | 391 ++++++++++ .../QuartzNet/requirements.txt | 6 + .../QuartzNet/scripts/docker/build.sh | 3 + .../QuartzNet/scripts/docker/launch.sh | 24 + .../QuartzNet/scripts/download_librispeech.sh | 32 + .../QuartzNet/scripts/evaluation.sh | 21 + .../QuartzNet/scripts/inference.sh | 63 ++ .../QuartzNet/scripts/inference_benchmark.sh | 37 + .../scripts/preprocess_librispeech.sh | 51 ++ .../QuartzNet/scripts/train.sh | 100 +++ .../QuartzNet/scripts/train_benchmark.sh | 57 ++ PyTorch/SpeechRecognition/QuartzNet/train.py | 558 +++++++++++++++ .../QuartzNet/utils/__init__.py | 0 .../QuartzNet/utils/convert_librispeech.py | 81 +++ .../QuartzNet/utils/download_librispeech.py | 72 ++ .../QuartzNet/utils/download_utils.py | 71 ++ .../QuartzNet/utils/inference_librispeech.csv | 5 + .../QuartzNet/utils/librispeech.csv | 8 + .../QuartzNet/utils/preprocessing_utils.py | 76 ++ 55 files changed, 6160 insertions(+) create mode 100755 PyTorch/SpeechRecognition/QuartzNet/.gitignore create mode 100644 PyTorch/SpeechRecognition/QuartzNet/Dockerfile create mode 100644 PyTorch/SpeechRecognition/QuartzNet/LICENSE create mode 100644 PyTorch/SpeechRecognition/QuartzNet/NOTICE create mode 100644 PyTorch/SpeechRecognition/QuartzNet/README.md create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/__init__.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/audio.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/dali/__init__.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/dali/data_loader.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/dali/iterator.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/dataset.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/features.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/helpers.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/metrics.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/optimizers.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/sampler.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/tb_dllogger.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/text/LICENSE create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/text/__init__.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/text/cleaners.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/text/numbers.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/text/symbols.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/common/utils.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca.yaml create mode 100644 PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca_drop0.2.yaml create mode 100644 PyTorch/SpeechRecognition/QuartzNet/img/model.png create mode 100644 PyTorch/SpeechRecognition/QuartzNet/img/tcs_conv.png create mode 100644 PyTorch/SpeechRecognition/QuartzNet/inference.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_16GPU.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_8GPU.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_16GPU.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_8GPU.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_AMP_8GPU.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_TF32_8GPU.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/quartznet/config.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/quartznet/model.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/requirements.txt create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/docker/build.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/docker/launch.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/download_librispeech.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/evaluation.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/inference.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/inference_benchmark.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/preprocess_librispeech.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/train.sh create mode 100755 PyTorch/SpeechRecognition/QuartzNet/scripts/train_benchmark.sh create mode 100644 PyTorch/SpeechRecognition/QuartzNet/train.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/__init__.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/convert_librispeech.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/download_librispeech.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/download_utils.py create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/inference_librispeech.csv create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/librispeech.csv create mode 100644 PyTorch/SpeechRecognition/QuartzNet/utils/preprocessing_utils.py diff --git a/PyTorch/SpeechRecognition/QuartzNet/.gitignore b/PyTorch/SpeechRecognition/QuartzNet/.gitignore new file mode 100755 index 00000000..bb051c47 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/.gitignore @@ -0,0 +1,9 @@ +__pycache__ +*.pt +results/ +datasets/ +checkpoints/ + +*.swp +*.swo +*.swn diff --git a/PyTorch/SpeechRecognition/QuartzNet/Dockerfile b/PyTorch/SpeechRecognition/QuartzNet/Dockerfile new file mode 100644 index 00000000..fae7ac48 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/Dockerfile @@ -0,0 +1,30 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG FROM_IMAGE_NAME=nvcr.io/nvidia/pytorch:21.07-py3 +FROM ${FROM_IMAGE_NAME} + +RUN apt update && apt install -y libsndfile1 && apt install -y sox && rm -rf /var/lib/apt/lists/* + +WORKDIR /workspace/quartznet + +# Install requirements (do this first for better caching) +COPY requirements.txt . +RUN conda install -y pyyaml==5.4.1 +RUN pip install --disable-pip-version-check -U -r requirements.txt + +RUN pip install --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda110==1.2.0 + +# Copy rest of files +COPY . . diff --git a/PyTorch/SpeechRecognition/QuartzNet/LICENSE b/PyTorch/SpeechRecognition/QuartzNet/LICENSE new file mode 100644 index 00000000..2ae5b819 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/LICENSE @@ -0,0 +1,203 @@ + Except where otherwise noted, the following license applies to all files in this repo. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 NVIDIA Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/PyTorch/SpeechRecognition/QuartzNet/NOTICE b/PyTorch/SpeechRecognition/QuartzNet/NOTICE new file mode 100644 index 00000000..10a15ab3 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/NOTICE @@ -0,0 +1,5 @@ +QuartzNet in PyTorch + +This repository includes source code (in "common/") from: +* https://github.com/keithito/tacotron and https://github.com/ryanleary/patter licensed under MIT license. + diff --git a/PyTorch/SpeechRecognition/QuartzNet/README.md b/PyTorch/SpeechRecognition/QuartzNet/README.md new file mode 100644 index 00000000..5f9e1990 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/README.md @@ -0,0 +1,674 @@ +# QuartzNet For PyTorch + +This repository provides a script and recipe to train the QuartzNet model to achieve state-of-the-art accuracy. The content of this repository is tested and maintained by NVIDIA. + +## Table Of Contents + +- [Model overview](#model-overview) + * [Model architecture](#model-architecture) + * [Default configuration](#default-configuration) + * [Feature support matrix](#feature-support-matrix) + * [Features](#features) + * [Mixed precision training](#mixed-precision-training) + * [Enabling mixed precision](#enabling-mixed-precision) + * [Enabling TF32](#enabling-tf32) + * [Glossary](#glossary) +- [Setup](#setup) + * [Requirements](#requirements) +- [Quick Start Guide](#quick-start-guide) +- [Advanced](#advanced) + * [Scripts and sample code](#scripts-and-sample-code) + * [Parameters](#parameters) + * [Command-line options](#command-line-options) + * [Getting the data](#getting-the-data) + * [Dataset guidelines](#dataset-guidelines) + * [Multi-dataset](#multi-dataset) + * [Training process](#training-process) + * [Inference process](#inference-process) +- [Performance](#performance) + * [Benchmarking](#benchmarking) + * [Training performance benchmark](#training-performance-benchmark) + * [Inference performance benchmark](#inference-performance-benchmark) + * [Results](#results) + * [Training accuracy results](#training-accuracy-results) + * [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb) + * [Training stability test](#training-stability-test) + * [Training performance results](#training-performance-results) + * [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb) + * [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb) + * [Inference performance results](#inference-performance-results) + * [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb) + * [Inference performance: NVIDIA DGX-2 (1x V100 32GB)](#inference-performance-nvidia-dgx-2-1x-v100-32gb) +- [Release notes](#release-notes) + * [Changelog](#changelog) + * [Known issues](#known-issues) + +## Model overview + +This repository provides an implementation of the QuartzNet model in PyTorch from the paper [QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions](https://arxiv.org/pdf/1910.10261). +The QuartzNet model is an end-to-end neural acoustic model for automatic speech recognition (ASR), that provides high accuracy at a low memory footprint. The QuartzNet architecture of convolutional layers was designed to facilitate fast GPU inference, by allowing whole sub-blocks to be fused into a single GPU kernel. This is important for meeting strict real-time requirements of ASR systems in deployment. + + +This repository is a PyTorch implementation of QuartzNet and provides scripts to train the QuartzNet 10x5 model from scratch on the [LibriSpeech](http://www.openslr.org/12) dataset to achieve the greedy decoding results improved upon the original paper. +The repository is self-contained and includes data preparation scripts, training, and inference scripts. +Both training and inference scripts offer the option to use Automatic Mixed Precision (AMP) to benefit from Tensor Cores for better performance. + +In addition to providing the hyperparameters for training a model checkpoint, we publish a thorough inference analysis across different NVIDIA GPU platforms, for example, DGX-2, NVIDIA A100 GPU, and T4. + +This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results [1.4]x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. + +### Model architecture + +QuartzNet is an end-to-end neural acoustic model that is based on efficient, time-channel separable convolutions (Figure 1). +In the audio processing stage, each frame is transformed into mel-scale spectrogram features, which the acoustic model takes as input and outputs a probability distribution over the vocabulary for each frame. + +

+ QuartzNet model architecture +

+

+ Figure 1. Architecture of QuartzNet (source) + +

+ +### Default configuration + +The following features were implemented in this model: +* GPU-supported feature extraction with data augmentation options [SpecAugment](https://arxiv.org/abs/1904.08779) and [Cutout](https://arxiv.org/pdf/1708.04552.pdf) using the DALI library +* offline and online [Speed Perturbation](https://www.danielpovey.com/files/2015_interspeech_augmentation.pdf) using the DALI library +* data-parallel multi-GPU training and evaluation +* AMP with dynamic loss scaling for Tensor Core training +* FP16 inference + +### Feature support matrix + +| **Feature** | **QuartzNet** | +|---------------|---------------| +|[Apex AMP](https://nvidia.github.io/apex/amp.html) | Yes | +|[DALI](https://docs.nvidia.com/deeplearning/dali/release-notes/index.html) | Yes | + +#### Features + +**DALI** +NVIDIA Data Loading Library (DALI) is a collection of highly optimized building blocks, and an execution engine, to accelerate the pre-processing of the input data for deep learning applications. DALI provides both the performance and the flexibility for accelerating different data pipelines as a single library. This single library can then be easily integrated into different deep learning training and inference applications. For details, see example sources in this repository or see the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/index.html). + +**Automatic Mixed Precision (AMP)** +Computation graphs can be modified by PyTorch on runtime to support mixed precision training. A detailed explanation of mixed precision can be found in the next section. + +### Mixed precision training + +Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps: +1. Porting the model to use the FP16 data type where appropriate. +2. Adding loss scaling to preserve small gradient values. + +For information about: +- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. +- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. +- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). + +#### Enabling mixed precision + +For training, mixed precision can be enabled by setting the flag: `train.py --amp`. When using bash helper scripts, mixed precision can be enabled with the environment variable `AMP=true`, for example, `AMP=true bash scripts/train.sh`, `AMP=true bash scripts/inference.sh`, etc. + +#### Enabling TF32 + +TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. + +TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. + +For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. + +TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. + +### Glossary + +**Time-channel separable (TCS) convolution** +A module composed mainly of two convolutional layers: a 1D depthwise convolutional layer, +and a pointwise convolutional layer (Figure 2). The former operates across K time frames, and the latter across all channels. By decoupling time and channel axes, the separable module uses less parameters and calculates the result faster, than it would otherwise would. + +

+ Time-channel separable (TCS) convolutional module +

+

+ Figure 2. Time-channel separable (TCS) convolutional module: (a) basic design, (b) TCS with a group shuffle layer, added to increase cross-group interchange +

+ +**Automatic Speech Recognition (ASR)** +Uses both an acoustic model and a language model to output the transcript of an input audio signal. + +**Acoustic model** +Assigns a probability distribution over a vocabulary of characters given an audio frame. Typically, a large part of the entire ASR model. + +**Language model** +Assigns a probability distribution over a sequence of words. Given a sequence of words, it assigns a probability to the whole sequence. + +**Pre-training** +Training a model on vast amounts of data on the same (or different) task to build general understandings. + +## Setup + +The following section lists the requirements that you need to meet in order to start training the QuartzNet model. + +### Requirements + +This repository contains Dockerfile which extends the PyTorch 21.07-py3 NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: +- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) +- [PyTorch 21.07-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) +- Supported GPUs: + - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) + - [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/) + - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) + +For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: +- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) +- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) +- [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running) + +Further required Python packages are listed in `requirements.txt`, which are automatically installed with the built Docker container. To manually install them, run: +```bash +pip install -r requirements.txt +``` + +For those unable to use the PyTorch 21.07-py3 NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). + +## Quick Start Guide + +To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the QuartzNet model on the LibriSpeech dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. + +1. Clone the repository. + ```bash + git clone https://github.com/NVIDIA/DeepLearningExamples + cd DeepLearningExamples/PyTorch/SpeechRecognition/QuartzNet + ``` + +2. Build the QuartzNet PyTorch NGC container. + ```bash + bash scripts/docker/build.sh + ``` + +3. Start an interactive session in the NGC container to prepare the dataset, or run training/inference. + Specify a local mountpoint for the dataset with the `DATA_DIR` variable: + ```bash + DATA_DIR= bash scripts/docker/launch.sh + ``` + +4. Download and preprocess the dataset. + No GPU is required for data download and preprocessing. + It can take several hours to complete, and requires over 250GB of free disk space. + + This repository provides scripts to download and extract LibriSpeech [http://www.openslr.org/12](http://www.openslr.org/12). The dataset contains 1000 hours of 16kHz read English speech derived from public domain audiobooks from the LibriVox project and has been carefully segmented and aligned. For more information, see the [LIBRISPEECH: AN ASR CORPUS BASED ON PUBLIC DOMAIN AUDIO BOOKS](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) paper. + + Inside the container, download and extract the datasets into the required format for later training and inference: + ```bash + bash scripts/download_librispeech.sh + ``` + After the data download is complete, the following folders should exist: + ```bash + datasets/LibriSpeech/ + ├── dev-clean + ├── dev-other + ├── test-clean + ├── test-other + ├── train-clean-100 + ├── train-clean-360 + └── train-other-500 + ``` + + Since `/datasets/` is mounted to `DATA_DIR` on the host, after the dataset is downloaded it will be accessible from outside of the container at `$DATA_DIR/LibriSpeech`. + + Next, convert the data into WAV files: + ```bash + bash scripts/preprocess_librispeech.sh + ``` + + After the data is converted, the following additional files and folders should exist: + ```bash + datasets/LibriSpeech/ + ├── dev-clean-wav + ├── dev-other-wav + ├── librispeech-train-clean-100-wav.json + ├── librispeech-train-clean-360-wav.json + ├── librispeech-train-other-500-wav.json + ├── librispeech-dev-clean-wav.json + ├── librispeech-dev-other-wav.json + ├── librispeech-test-clean-wav.json + ├── librispeech-test-other-wav.json + ├── test-clean-wav + ├── test-other-wav + ├── train-clean-100-wav + ├── train-clean-360-wav + └── train-other-500-wav + ``` + +5. Start training. + Inside the container, use the following script to start training. + Make sure the downloaded and preprocessed dataset is located at `$DATA_DIR/LibriSpeech` on the host, which is mounted as `/datasets/LibriSpeech` inside the container. + + ```bash + [OPTION1=value1 OPTION2=value2 ...] bash scripts/train.sh + ``` + By default, automatic precision is disabled, batch size is 144 over two gradient accumulation steps, and the recipe is run on a total of 8 GPUs. The hyperparameters are tuned for a GPU with at least 32GB of memory and will require adjustment for different configurations (for example, by lowering the batch size and using more gradient accumulation steps). + + Options are being passed as environment variables. More details on the available options can be found in the [Parameters](#parameters) and [Training process](#training-process) sections. + +6. Start validation/evaluation. + Inside the container, use the following script to run evaluation. + Make sure the downloaded and preprocessed dataset is located at `$DATA_DIR/LibriSpeech` on the host, which is mounted as `/datasets/LibriSpeech` inside the container. + ```bash + [OPTION1=value1 OPTION2=value2 ...] bash scripts/evaluation.sh [OPTIONS] + ``` + + By default, this will use full precision, a batch size of 64, and run on a single GPU. + + Options are being passed as environment variables. More details on the available options can be found in the [Parameters](#parameters) and [Evaluation process](#evaluation-process) sections. + +7. Start inference/predictions. + Inside the container, use the following script to run inference. + Make sure the downloaded and preprocessed dataset is located at `$DATA_DIR/LibriSpeech` on the host, which is mounted as `/datasets/LibriSpeech` inside the container. + A pretrained model checkpoint can be downloaded from [NGC model repository](https://ngc.nvidia.com/catalog/models). + + ```bash + [OPTION1=value1 OPTION2=value2 ...] bash scripts/inference.sh + ``` + + By default, this will use single precision, a batch size of 64, and run on a single GPU. + + Options are being passed as environment variables. More details on the available options can be found in the [Parameters](#parameters) and [Inference process](#inference-process) sections. + +Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark your performance to [Training performance benchmark](#training-performance-results), or [Inference performance benchmark](#inference-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section. + +## Advanced + +The following sections provide greater details of the dataset, running training and inference, and the training results. + +### Scripts and sample code + +In the `root` directory, the most important files are: +``` +quartznet +├── common # data pre-processing, logging, etc. +├── configs # model configurations +├── Dockerfile # container with the basic set of dependencies to run QuartzNet +├── inference.py # entry point for inference +├── quartznet # model-specific code +├── scripts # one-click scripts required for running various supported functionalities +│ ├── docker # contains the scripts for building and launching the container +│ ├── download_librispeech.sh # downloads LibriSpeech dataset +│ ├── evaluation.sh # runs evaluation using the `inference.py` script +│ ├── inference_benchmark.sh # runs the inference benchmark using the `inference_benchmark.py` script +│ ├── inference.sh # runs inference using the `inference.py` script +│ ├── preprocess_librispeech.sh # preprocess LibriSpeech raw data files for training and inference +│ ├── train_benchmark.sh # runs the training performance benchmark using the `train.py` script +│ └── train.sh # runs training using the `train.py` script +├── train.py # entry point for training +└── utils # data downloading and common routines +``` + +### Parameters + +Parameters should be set as environment variables. + +The complete list of available parameters for `scripts/train.sh` script contains: +```bash +DATA_DIR: directory of dataset. (default: '/datasets/LibriSpeech') +MODEL_CONFIG: relative path to model configuration. (default: 'configs/quartznet10x5dr_speedp_online_speca.yaml') +OUTPUT_DIR: directory for results, logs, and created checkpoints. (default: '/results') +CHECKPOINT: a specific model checkpoint to continue training from. To resume training from the last checkpoint, see the RESUME option. +RESUME: resume training from the last checkpoint found in OUTPUT_DIR, or from scratch if there are no checkpoints (default: true) +CUDNN_BENCHMARK: boolean that indicates whether to enable cudnn benchmark mode for using more optimized kernels. (default: true) +NUM_GPUS: number of GPUs to use. (default: 8) +AMP: if set to `true`, enables automatic mixed precision (default: false) +GPU_BATCH_SIZE: batch size for every forward/backward pass. The effective batch size might be higher, if gradient accumulation is enabled (default: 72) +GRAD_ACCUMULATION: number of forward/backward passes until the optimizer updates weights. (default: 2) +LEARNING_RATE: initial learning rate. (default: 0.01) +MIN_LEARNING_RATE: minimum learning rate, despite LR scheduling (default: 1e-5) +LR_POLICY: how to decay LR (default: exponential) +LR_EXP_GAMMA: decay factor for the exponential LR schedule (default: 0.981) +EMA: decay factor for exponential averages of checkpoints (default: 0.999) +SEED: seed for random number generator and used for ensuring reproducibility. (default: 0) +EPOCHS: number of training epochs. (default: 440) +WARMUP_EPOCHS: number of initial epoch of linearly increasing LR. (default: 2) +HOLD_EPOCHS: number of epochs to hold maximum LR after warmup. (default: 140) +SAVE_FREQUENCY: number of epochs between saving the model to disk. (default: 10) +EPOCHS_THIS_JOB: run training for this number of epochs. Does not affect LR schedule like the EPOCHS parameter. (default: 0) +DALI_DEVICE: device to run the DALI pipeline on for calculation of filterbanks. Valid choices: cpu, gpu, none. (default: gpu) +PAD_TO_MAX_DURATION: pad all sequences with zeros to maximum length. (default: false) +EVAL_FREQUENCY: number of steps between evaluations on the validation set. (default: 544) +PREDICTION_FREQUENCY: the number of steps between writing a sample prediction to stdout. (default: 544) +TRAIN_MANIFESTS: lists of .json training set files +VAL_MANIFESTS: lists of .json validation set files + +``` + +The complete list of available parameters for `scripts/inference.sh` script contains: +```bash +DATA_DIR: directory of dataset. (default: '/datasets/LibriSpeech') +MODEL_CONFIG: model configuration. (default: 'configs/quartznet10x5dr_speedp-online_speca.yaml') +OUTPUT_DIR: directory for results and logs. (default: '/results') +CHECKPOINT: model checkpoint path. (required) +DATASET: name of the LibriSpeech subset to use. (default: 'dev-clean') +LOG_FILE: path to the DLLogger .json logfile. (default: '') +CUDNN_BENCHMARK: enable cudnn benchmark mode for using more optimized kernels. (default: false) +MAX_DURATION: filter out recordings shorter then MAX_DURATION seconds. (default: "") +PAD_TO_MAX_DURATION: pad all sequences with zeros to maximum length. (default: false) +NUM_GPUS: number of GPUs to use. Note that with > 1 GPUs WER results might be inaccurate due to the batching policy. (default: 1) +NUM_STEPS: number of batches to evaluate, loop the dataset if necessary. (default: 0) +NUM_WARMUP_STEPS: number of initial steps before measuring performance. (default: 0) +AMP: enable FP16 inference with AMP. (default: false) +BATCH_SIZE: data batch size. (default: 64) +EMA: Attempt to load exponentially averaged weights from a checkpoint. (default: true) +SEED: seed for random number generator and used for ensuring reproducibility. (default: 0) +DALI_DEVICE: device to run the DALI pipeline on for calculation of filterbanks. Valid choices: cpu, gpu, none. (default: gpu) +CPU: run inference on CPU. (default: false) +LOGITS_FILE: dump logit matrices to a file. (default: "") +PREDICTION_FILE: save predictions to a file. (default: "${OUTPUT_DIR}/${DATASET}.predictions") +``` + +The complete list of available parameters for `scripts/evaluation.sh` is the same as `scripts/inference.sh`. Only the defaults have changed. +```bash +PREDICTION_FILE: (default: "") +DATASET: (default: "test-other") +``` + +The `scripts/inference_benchmark.sh` script pads all input to a fixed duration and computes the mean, 90%, 95%, 99% percentile of latency for the specified number of inference steps. Latency is measured in milliseconds per batch. The `scripts/inference_benchmark.sh` measures latency for a single GPU and loops over a number of batch sizes and durations. It extends `scripts/inference.sh` and changes the defaults with: +```bash +BATCH_SIZE_SEQ: batch sizes to measure with. (default: "1 2 4 8 16") +MAX_DURATION_SEQ: input durations (in seconds) to measure with (default: "2 7 16.7") +CUDNN_BENCHMARK: (default: true) +PAD_TO_MAX_DURATION: (default: true) +NUM_WARMUP_STEPS: (default: 10) +NUM_STEPS: (default: 500) +DALI_DEVICE: (default: "cpu") +``` + +The `scripts/train_benchmark.sh` script pads all input to the same length according to the input argument `MAX_DURATION` and measures average training latency and throughput performance. Latency is measured in seconds per batch, throughput in sequences per second. +Training performance is measured with online speed perturbation and NVIDIA cuDNN benchmark mode enabled. +The script `scripts/train_benchmark.sh` loops over a number of batch sizes and GPU counts. +It extends `scripts/train.sh`, the complete list of available parameters for `scripts/train_benchmark.sh` script contains: +```bash +ACC_BATCH_SIZE: accumulated (effective) batch size to measure with. (default: "144") +GRAD_ACC_SEQ: the sequence of gradient accumulation settings to measure with. (default: "4 2") +NUM_GPUS_SEQ: number of GPUs to run the training on. (default: "1 4 8") +MODEL_CONFIG: (default: "configs/quartznet10x5dr_speedp-online_train-benchmark.yaml") +TRAIN_MANIFESTS: (default: "$DATA_DIR/librispeech-train-clean-100-wav.json") +RESUME: (default: false) +EPOCHS_THIS_JOB: (default: 2) +EPOCHS: (default: 100000) +SAVE_FREQUENCY: (default: 100000) +EVAL_FREQUENCY: (default: 100000) +GRAD_ACCUMULATION_STEPS: (default: 1) +PAD_TO_MAX_DURATION: (default: true) +EMA: (default: 0) +``` + +### Command-line options + +To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example: +```bash +python train.py --help +python inference.py --help +``` + +### Getting the data + +QuartzNet is trained on the LibriSpeech dataset. We use the concatenation of `train-clean-100`, `train-clean-360`, and `train-other-500` for training and `dev-clean` for validation. + +This repository contains the `scripts/download_librispeech.sh` and `scripts/preprocess_librispeech.sh` scripts that automatically downloads and preprocesses the training, test, and development datasets. By default, data is downloaded to the `/datasets/LibriSpeech` directory. A minimum of 250GB free space is required for download and preprocessing; the final preprocessed dataset is approximately 100GB. + +#### Dataset guidelines + +The `scripts/preprocess_librispeech.sh` script converts the input audio files to WAV format with a sample rate of 16kHz. The target transcripts are stripped from whitespace characters, then lower-cased. No offline augmentations are stored on the disk - these are computed online with the DALI library without any impact on training time. + +After preprocessing, the script creates JSON metadata files with output file paths, sample rate, target transcript and other metadata. These JSON files are used by the training script to identify training and validation datasets. + +The QuartzNet model was tuned on audio signals with a sample rate of 16kHz. If you wish to use a different sampling rate, then some hyperparameters might need to be changed - specifically, the window size and step size. + +#### Multi-dataset + +Training scripts in this repository treat the training subsets of LibriSpeech (`train-clean-100`, `train-clean-360`, `train-other-500`) as three independent training datasets. +In order to add more datasets, follow the format of LibriSpeech, adjust the provided pre-processing scripts to generate metadata JSON files, and point them with the `TRAIN_MANIFESTS` variable to the `scripts/train.sh` script. + +### Training process + +Training is performed using the `train.py` script along with parameters defined in `scripts/train.sh`. +The `scripts/train.sh` script runs a job on a single node that trains the QuartzNet model from scratch using LibriSpeech as training data. To make training more efficient, we discard audio samples longer than 16.7 seconds from the training dataset, the total number of these samples is less than 1%. Such filtering does not degrade accuracy, but it allows us to decrease the number of time steps in a batch, which requires less GPU memory and increases training speed. +Apart from the default arguments as listed in the [Parameters](#parameters) section, by default the training script: + +* Runs on 8 GPUs with at least 32GB of memory and training/evaluation batch size 48, split over three gradient accumulation steps +* Uses TF32 precision (A100 GPU) or FP32 (other GPUs) +* Trains on the concatenation of all 3 LibriSpeech training datasets and evaluates on the LibriSpeech dev-clean dataset +* Maintains an exponential moving average of parameters for evaluation +* Has cuDNN benchmark enabled +* Runs for 260 epochs +* Uses an initial learning rate of 0.02 and an exponential learning rate decay +* Saves a checkpoint every 10 epochs +* Automatically removes old checkpoints and preserves milestone checkpoints +* Runs evaluation on the development dataset every epoch and at the end of training +* Maintains a separate checkpoint with the lowest WER on development set +* Prints out training progress every iteration to `stdout` +* Creates a DLLogger log file and a TensorBoard log +* Calculates speed perturbation online during training +* Uses `SpecAugment` in data pre-processing +* Filters out audio samples longer than 16.7 seconds +* Pads each batch so its length is divisible by 16 +* Uses time-channel separable convolutions as described in the paper +* Uses weight decay of 0.001 +* Uses [Novograd](https://arxiv.org/pdf/1905.11286.pdf) as optimizer with betas=(0.95, 0) + +Enabling AMP permits batch size 144 with one gradient accumulation step. Since each batch has to be padded to the longest sequence, all GPUs have to wait for the slowest one, and two accumulation steps are slightly faster. + +The current training setup improves upon the greedy WER [Results](#results) of the QuartzNet paper. + +### Inference process + +Inference is performed using the `inference.py` script along with parameters defined in `scripts/inference.sh`. +The `scripts/inference.sh` script runs the job on a single GPU, taking a pre-trained QuartzNet model checkpoint and running it on the specified dataset. +Apart from the default arguments as listed in the [Parameters](#parameters) section, by default, the inference script: + +* Evaluates on the LibriSpeech dev-clean dataset and prints out the final word error rate +* Uses a batch size of 64 +* Creates a log file with progress and results which will be stored in the `results` folder +* Pads each batch so its length would be divisible by 16 +* Does not use data augmentation +* Does greedy decoding and optionally saves the transcriptions in the results folder +* Has the option to save the model output tensors for more complex decoding, for example, beam search +* Has cuDNN benchmark disabled + +To view all available options for inference, run `python inference.py --help`. + +## Performance + +### Benchmarking + +The following section shows how to run benchmarks measuring the model performance in training and inference modes. + +#### Training performance benchmark + +To benchmark the training performance with a number of specific configurations, run: +```bash +GRAD_ACC_SEQ= NUM_GPUS_SEQ= bash scripts/train_benchmark.sh +``` +for example: +```bash +GRAD_ACC_SEQ="12 24" NUM_GPUS_SEQ="4 8" bash scripts/train_benchmark.sh +``` + +This invocation will measure performance in four setups (two different batch sizes for every single forward/backward pass times two hardware setups). + +By default, this script makes forward/backward pre-allocation passes with all possible audio lengths +enabling immediate stabilization of training step times in the cuDNN benchmark mode, +and trains for two epochs on the `train-clean-100` subset of LibriSpeech. + +#### Inference performance benchmark + +To benchmark the inference performance on a specific batch size and audio length, run: + +```bash +BATCH_SIZE_SEQ= MAX_DURATION_SEQ= bash scripts/inference_benchmark.sh +``` + +for example: +```bash +BATCH_SIZE_SEQ="24 48" MAX_DURATION_SEQ="2 7 16.7" bash scripts/inference_benchmark.sh +``` + +The script runs on a single GPU and evaluates on the dataset of fixed-length utterances shorter than `MAX_DURATION` and padded to that duration. + +### Results + +The following sections provide details on how we achieved our performance and accuracy in training and inference. + +#### Training accuracy results + +##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB) + +Our results were obtained by running the `scripts/train.sh` training script in the PyTorch 21.07-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. + +| Number of GPUs | Batch size per GPU | Precision | dev-clean WER | dev-other WER | test-clean WER | test-other WER | Time to train | +|-----|-----|-------|-------|-------|------|-------|------| +| 8 | 144 | mixed | 3.47 | 10.84 | 3.69 | 10.69 | 34 h | + +The table reports word error rate (WER) of the acoustic model with greedy decoding on all LibriSpeech dev and test datasets for mixed precision training. + +##### Training stability test + +The following table compares greedy decoding word error rates across 8 different training runs with different seeds for mixed precision training. + +| DGX A100 80GB, FP16, 8x GPU | Seed #1 | Seed #2 | Seed #3 | Seed #4 | Seed #5 | Seed #6 | Seed #7 | Seed #8 | Mean | Std | +|-----------:|----------:|----------:|----------:|----------:|----------:|----------:|----------:|----------:|-------:|------:| +| dev-clean | 3.57 | 3.48 | 3.54 | 3.48 | 3.47 | 3.69 | 3.51 | 3.59 | 3.54 | 0.07 | +| dev-other | 10.68 | 10.78 | 10.47 | 10.72 | 10.84 | 11.03 | 10.67 | 10.86 | 10.76 | 0.15 | +| test-clean | 3.70 | 3.82 | 3.79 | 3.84 | 3.69 | 4.03 | 3.82 | 3.80 | 3.81 | 0.10 | +| test-other | 10.75 | 10.62 | 10.54 | 10.90 | 10.69 | 11.14 | 10.41 | 10.82 | 10.73 | 0.21 | + +#### Training performance results + +##### Training performance: NVIDIA DGX A100 (8x A100 80GB) + +Our results were obtained by running: +```bash +AMP=true NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="16 24" bash scripts/train_benchmark.sh +AMP=true NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="4 6" bash scripts/train_benchmark.sh +AMP=true NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="2 3" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="16 24" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="4 6" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="2 3" bash scripts/train_benchmark.sh +``` +in the PyTorch 21.07-py3 NGC container on NVIDIA DGX A100 with (8x A100 80GB) GPUs. Performance numbers (in items/images per second) were averaged over an entire training epoch. + +| Batch size / GPU | Grad accumulation | GPUs | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | +|-----:|-----:|-------:|----------:|-------:|--------:|-----:|------:| +| 48 | 24 | 1 | 89.69 | 78.89 | 1.14 | 1.00 | 1.00 | +| 72 | 16 | 1 | 88.70 | 79.01 | 1.12 | 1.00 | 1.00 | +| 48 | 6 | 4 | 343.06 | 303.16 | 1.13 | 3.84 | 3.82 | +| 72 | 4 | 4 | 341.95 | 304.47 | 1.12 | 3.85 | 3.86 | +| 48 | 3 | 8 | 644.27 | 576.37 | 1.12 | 7.31 | 7.18 | +| 72 | 2 | 8 | 651.60 | 583.31 | 1.12 | 7.38 | 7.35 | + +To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). + +##### Training performance: NVIDIA DGX-2 (16x V100 32GB) + +Our results were obtained by running: +```bash +AMP=true NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="24 48" bash scripts/train_benchmark.sh +AMP=true NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="6 12" bash scripts/train_benchmark.sh +AMP=true NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="3 6" bash scripts/train_benchmark.sh +AMP=true NUM_GPUS_SEQ="16" GRAD_ACC_SEQ="3" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="48" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="12" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="6" bash scripts/train_benchmark.sh +AMP=false NUM_GPUS_SEQ="16" GRAD_ACC_SEQ="3" bash scripts/train_benchmark.sh +``` +in the PyTorch 21.07-py3 NGC container on NVIDIA DGX-2 with (16x V100 32GB) GPUs. Performance numbers (in items/images per second) were averaged over an entire training epoch. + +| Batch size / GPU | Grad accumulation | GPUs | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | +|-----:|-----:|-------:|----------:|-------:|--------:|------:|------:| +| 24 | 48 | 1 | 67.95 | 44.65 | 1.52 | 1.00 | 1.00 | +| 48 | 24 | 1 | 67.49 | - | - | 1.00 | 1.00 | +| 24 | 12 | 4 | 258.56 | 170.18 | 1.52 | 3.81 | 3.81 | +| 48 | 6 | 4 | 254.58 | - | - | - | 3.77 | +| 24 | 6 | 8 | 495.52 | 330.53 | 1.50 | 7.40 | 7.29 | +| 48 | 3 | 8 | 477.87 | - | - | - | 7.08 | +| 24 | 3 | 16 | 872.99 | 616.51 | 1.42 | 13.81 | 12.85 | + + +To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). + +#### Inference performance results + +##### Inference performance: NVIDIA DGX A100 (1x A100 80GB) + +Our results were obtained by running: +```bash +bash AMP=false scripts/inference_benchmark.sh +bash AMP=true scripts/inference_benchmark.sh +``` + +in the PyTorch 21.07-py3 NGC container on NVIDIA DGX A100 (1x A100 80GB) GPU. +Performance numbers (latency in milliseconds per batch) were averaged over 500 iterations. + +| | | FP16 Latency (ms) Percentiles | | | | TF32 Latency (ms) Percentiles | | | | FP16/TF32 speed up | +|-----:|---------------:|------:|------:|------:|------:|------:|------:|------:|------:|------:| +| BS | Duration (s) | 90% | 95% | 99% | Avg | 90% | 95% | 99% | Avg | Avg | +| 1 | 2.0 | 35.51 | 36.36 | 55.57 | 35.71 | 33.23 | 33.86 | 40.05 | 33.23 | 0.93 | +| 2 | 2.0 | 38.05 | 38.91 | 52.67 | 38.21 | 34.17 | 35.17 | 39.32 | 33.73 | 0.88 | +| 4 | 2.0 | 38.43 | 38.98 | 45.44 | 37.78 | 35.02 | 36.00 | 44.10 | 34.75 | 0.92 | +| 8 | 2.0 | 38.63 | 39.37 | 45.43 | 37.94 | 35.49 | 36.70 | 45.94 | 34.53 | 0.91 | +| 16 | 2.0 | 42.33 | 44.58 | 61.02 | 40.28 | 35.66 | 36.93 | 45.38 | 34.78 | 0.86 | +| 1 | 7.0 | 37.72 | 38.54 | 42.56 | 37.28 | 33.23 | 34.16 | 40.54 | 33.13 | 0.89 | +| 2 | 7.0 | 39.44 | 41.35 | 53.62 | 38.56 | 35.15 | 35.81 | 41.83 | 34.82 | 0.90 | +| 4 | 7.0 | 38.39 | 39.48 | 45.01 | 37.98 | 37.54 | 38.51 | 42.67 | 36.12 | 0.95 | +| 8 | 7.0 | 40.82 | 41.76 | 54.20 | 39.43 | 37.67 | 39.97 | 45.24 | 36.12 | 0.92 | +| 16 | 7.0 | 42.80 | 44.80 | 56.92 | 41.52 | 40.66 | 41.96 | 53.24 | 39.24 | 0.95 | +| 1 | 16.7 | 38.22 | 38.98 | 44.15 | 37.80 | 33.89 | 34.98 | 42.66 | 33.23 | 0.88 | +| 2 | 16.7 | 39.84 | 41.09 | 52.50 | 39.34 | 35.86 | 37.16 | 42.04 | 34.39 | 0.87 | +| 4 | 16.7 | 41.02 | 42.64 | 54.96 | 39.50 | 35.98 | 37.02 | 39.30 | 34.87 | 0.88 | +| 8 | 16.7 | 40.93 | 42.06 | 56.26 | 39.36 | 40.93 | 42.06 | 45.50 | 39.34 | 1.00 | +| 16 | 16.7 | 57.21 | 58.65 | 71.33 | 57.78 | 62.74 | 63.82 | 71.13 | 61.49 | 1.06 | + +To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). + +##### Inference performance: NVIDIA DGX-2 (1x V100 32GB) + +Our results were obtained by running: +```bash +bash AMP=false scripts/inference_benchmark.sh +bash AMP=true scripts/inference_benchmark.sh +``` + +in the PyTorch 21.07-py3 NGC container on NVIDIA DGX-2 with (1x V100 32GB) GPU. +Performance numbers (latency in milliseconds per batch) were averaged over 500 iterations. + +| | | FP16 Latency (ms) Percentiles | | | | FP32 Latency (ms) Percentiles | | | | FP16/FP32 speed up | +|-----:|---------------:|------:|------:|------:|------:|-------:|-------:|-------:|-------:|------:| +| BS | Duration (s) | 90% | 95% | 99% | Avg | 90% | 95% | 99% | Avg | Avg | +| 1 | 2.0 | 36.89 | 38.16 | 41.80 | 35.85 | 33.44 | 33.78 | 38.09 | 33.01 | 0.92 | +| 2 | 2.0 | 40.47 | 41.33 | 45.70 | 40.02 | 32.62 | 33.27 | 36.38 | 32.09 | 0.80 | +| 4 | 2.0 | 41.50 | 42.85 | 49.65 | 41.12 | 34.56 | 34.83 | 37.10 | 34.04 | 0.83 | +| 8 | 2.0 | 49.87 | 50.48 | 51.99 | 49.19 | 34.90 | 35.17 | 36.57 | 34.27 | 0.70 | +| 16 | 2.0 | 46.39 | 46.77 | 47.87 | 40.04 | 45.37 | 45.89 | 47.52 | 44.46 | 1.11 | +| 1 | 7.0 | 48.83 | 49.16 | 52.22 | 48.26 | 33.87 | 34.50 | 36.45 | 33.24 | 0.69 | +| 2 | 7.0 | 41.48 | 41.82 | 45.07 | 41.03 | 42.32 | 42.66 | 43.86 | 41.79 | 1.02 | +| 4 | 7.0 | 42.48 | 43.25 | 47.29 | 41.56 | 37.20 | 38.18 | 39.74 | 36.46 | 0.88 | +| 8 | 7.0 | 39.78 | 40.49 | 44.73 | 38.89 | 46.84 | 47.17 | 48.07 | 44.78 | 1.15 | +| 16 | 7.0 | 49.85 | 50.56 | 53.04 | 44.95 | 60.21 | 60.68 | 64.92 | 57.94 | 1.29 | +| 1 | 16.7 | 40.80 | 41.16 | 42.96 | 40.52 | 42.04 | 42.53 | 44.59 | 37.08 | 0.92 | +| 2 | 16.7 | 41.37 | 41.69 | 43.74 | 40.85 | 35.61 | 36.49 | 40.32 | 34.68 | 0.85 | +| 4 | 16.7 | 50.22 | 51.07 | 54.13 | 49.51 | 40.95 | 41.38 | 44.09 | 40.39 | 0.82 | +| 8 | 16.7 | 44.93 | 45.38 | 49.24 | 44.16 | 62.54 | 62.92 | 65.95 | 61.86 | 1.40 | +| 16 | 16.7 | 70.74 | 71.56 | 75.16 | 69.87 | 102.52 | 103.57 | 108.20 | 101.57 | 1.45 | + +To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). + +## Release notes + +We're constantly refining and improving our performance on AI and HPC workloads even on the same hardware with frequent updates to our software stack. For our latest performance data, refer to these pages for [AI](#https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](#https://developer.nvidia.com/hpc-application-performance) benchmarks. + +### Changelog + +September 2021 +- Initial release + +### Known issues + +There are no known issues in this release. diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/__init__.py b/PyTorch/SpeechRecognition/QuartzNet/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/audio.py b/PyTorch/SpeechRecognition/QuartzNet/common/audio.py new file mode 100644 index 00000000..bccd70ce --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/audio.py @@ -0,0 +1,247 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import soundfile as sf + +import librosa +import torch +import numpy as np + +import sox + + +def audio_from_file(file_path, offset=0, duration=0, trim=False, target_sr=16000): + audio = AudioSegment(file_path, target_sr=target_sr, int_values=False, + offset=offset, duration=duration, trim=trim) + + samples = torch.tensor(audio.samples, dtype=torch.float).cuda() + num_samples = torch.tensor(samples.shape[0]).int().cuda() + return (samples.unsqueeze(0), num_samples.unsqueeze(0)) + + +class AudioSegment(object): + """Monaural audio segment abstraction. + + :param samples: Audio samples [num_samples x num_channels]. + :type samples: ndarray.float32 + :param sample_rate: Audio sample rate. + :type sample_rate: int + :raises TypeError: If the sample data type is not float or int. + """ + + def __init__(self, filename, target_sr=None, int_values=False, offset=0, + duration=0, trim=False, trim_db=60): + """Create audio segment from samples. + + Samples are converted to float32 internally, with int scaled to [-1, 1]. + Load a file supported by librosa and return as an AudioSegment. + :param filename: path of file to load + :param target_sr: the desired sample rate + :param int_values: if true, load samples as 32-bit integers + :param offset: offset in seconds when loading audio + :param duration: duration in seconds when loading audio + :return: numpy array of samples + """ + with sf.SoundFile(filename, 'r') as f: + dtype = 'int32' if int_values else 'float32' + sample_rate = f.samplerate + if offset > 0: + f.seek(int(offset * sample_rate)) + if duration > 0: + samples = f.read(int(duration * sample_rate), dtype=dtype) + else: + samples = f.read(dtype=dtype) + samples = samples.transpose() + + samples = self._convert_samples_to_float32(samples) + if target_sr is not None and target_sr != sample_rate: + samples = librosa.core.resample(samples, sample_rate, target_sr) + sample_rate = target_sr + if trim: + samples, _ = librosa.effects.trim(samples, trim_db) + self._samples = samples + self._sample_rate = sample_rate + if self._samples.ndim >= 2: + self._samples = np.mean(self._samples, 1) + + def __eq__(self, other): + """Return whether two objects are equal.""" + if type(other) is not type(self): + return False + if self._sample_rate != other._sample_rate: + return False + if self._samples.shape != other._samples.shape: + return False + if np.any(self.samples != other._samples): + return False + return True + + def __ne__(self, other): + """Return whether two objects are unequal.""" + return not self.__eq__(other) + + def __str__(self): + """Return human-readable representation of segment.""" + return ("%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, " + "rms=%.2fdB" % (type(self), self.num_samples, self.sample_rate, + self.duration, self.rms_db)) + + @staticmethod + def _convert_samples_to_float32(samples): + """Convert sample type to float32. + + Audio sample type is usually integer or float-point. + Integers will be scaled to [-1, 1] in float32. + """ + float32_samples = samples.astype('float32') + if samples.dtype in np.sctypes['int']: + bits = np.iinfo(samples.dtype).bits + float32_samples *= (1. / 2 ** (bits - 1)) + elif samples.dtype in np.sctypes['float']: + pass + else: + raise TypeError("Unsupported sample type: %s." % samples.dtype) + return float32_samples + + @property + def samples(self): + return self._samples.copy() + + @property + def sample_rate(self): + return self._sample_rate + + @property + def num_samples(self): + return self._samples.shape[0] + + @property + def duration(self): + return self._samples.shape[0] / float(self._sample_rate) + + @property + def rms_db(self): + mean_square = np.mean(self._samples ** 2) + return 10 * np.log10(mean_square) + + def gain_db(self, gain): + self._samples *= 10. ** (gain / 20.) + + def pad(self, pad_size, symmetric=False): + """Add zero padding to the sample. + + The pad size is given in number of samples. If symmetric=True, + `pad_size` will be added to both sides. If false, `pad_size` zeros + will be added only to the end. + """ + self._samples = np.pad(self._samples, + (pad_size if symmetric else 0, pad_size), + mode='constant') + + def subsegment(self, start_time=None, end_time=None): + """Cut the AudioSegment between given boundaries. + + Note that this is an in-place transformation. + :param start_time: Beginning of subsegment in seconds. + :type start_time: float + :param end_time: End of subsegment in seconds. + :type end_time: float + :raise ValueError: If start_time or end_time is incorrectly set, e.g. out + of bounds in time. + """ + start_time = 0.0 if start_time is None else start_time + end_time = self.duration if end_time is None else end_time + if start_time < 0.0: + start_time = self.duration + start_time + if end_time < 0.0: + end_time = self.duration + end_time + if start_time < 0.0: + raise ValueError("The slice start position (%f s) is out of " + "bounds." % start_time) + if end_time < 0.0: + raise ValueError("The slice end position (%f s) is out of bounds." % + end_time) + if start_time > end_time: + raise ValueError("The slice start position (%f s) is later than " + "the end position (%f s)." % (start_time, end_time)) + if end_time > self.duration: + raise ValueError("The slice end position (%f s) is out of bounds " + "(> %f s)" % (end_time, self.duration)) + start_sample = int(round(start_time * self._sample_rate)) + end_sample = int(round(end_time * self._sample_rate)) + self._samples = self._samples[start_sample:end_sample] + + +class Perturbation: + def __init__(self, p=0.1, rng=None): + self.p = p + self._rng = random.Random() if rng is None else rng + + def maybe_apply(self, segment, sample_rate=None): + if self._rng.random() < self.p: + self(segment, sample_rate) + + +class SpeedPerturbation(Perturbation): + def __init__(self, min_rate=0.85, max_rate=1.15, discrete=False, p=0.1, rng=None): + super(SpeedPerturbation, self).__init__(p, rng) + assert 0 < min_rate < max_rate + self.min_rate = min_rate + self.max_rate = max_rate + self.discrete = discrete + + def __call__(self, data, sample_rate): + if self.discrete: + rate = np.random.choice([self.min_rate, None, self.max_rate]) + else: + rate = self._rng.uniform(self.min_rate, self.max_rate) + + if rate is not None: + data._samples = sox.Transformer().speed(factor=rate).build_array( + input_array=data._samples, sample_rate_in=sample_rate) + + +class GainPerturbation(Perturbation): + def __init__(self, min_gain_dbfs=-10, max_gain_dbfs=10, p=0.1, rng=None): + super(GainPerturbation, self).__init__(p, rng) + self._rng = random.Random() if rng is None else rng + self._min_gain_dbfs = min_gain_dbfs + self._max_gain_dbfs = max_gain_dbfs + + def __call__(self, data, sample_rate=None): + del sample_rate + gain = self._rng.uniform(self._min_gain_dbfs, self._max_gain_dbfs) + data._samples = data._samples * (10. ** (gain / 20.)) + + +class ShiftPerturbation(Perturbation): + def __init__(self, min_shift_ms=-5.0, max_shift_ms=5.0, p=0.1, rng=None): + super(ShiftPerturbation, self).__init__(p, rng) + self._min_shift_ms = min_shift_ms + self._max_shift_ms = max_shift_ms + + def __call__(self, data, sample_rate): + shift_ms = self._rng.uniform(self._min_shift_ms, self._max_shift_ms) + if abs(shift_ms) / 1000 > data.duration: + # TODO: do something smarter than just ignore this condition + return + shift_samples = int(shift_ms * data.sample_rate // 1000) + # print("DEBUG: shift:", shift_samples) + if shift_samples < 0: + data._samples[-shift_samples:] = data._samples[:shift_samples] + data._samples[:-shift_samples] = 0 + elif shift_samples > 0: + data._samples[:-shift_samples] = data._samples[shift_samples:] + data._samples[-shift_samples:] = 0 diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/dali/__init__.py b/PyTorch/SpeechRecognition/QuartzNet/common/dali/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/dali/data_loader.py b/PyTorch/SpeechRecognition/QuartzNet/common/dali/data_loader.py new file mode 100644 index 00000000..56b98dff --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/dali/data_loader.py @@ -0,0 +1,182 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import math +import os + +import torch +import torch.distributed as dist + +from .iterator import DaliIterator, SyntheticDataIterator +from .pipeline import make_dali_asr_pipeline +from common.helpers import print_once + + +def _parse_json(json_path: str, start_label=0, predicate=lambda json: True): + """ + Parses json file to the format required by DALI. + + Args: + json_path: path to json file + start_label: the label, starting from which DALI will assign + consecutive int numbers to every transcript + predicate: function, that accepts a sample descriptor + (i.e. json dictionary) as an argument. If the predicate for a given + sample returns True, it will be included in the dataset. + + Returns: + output_files: dict that maps file name to label assigned by DALI + transcripts: dict that maps label assigned by DALI to the transcript + """ + global cnt + with open(json_path) as f: + librispeech_json = json.load(f) + output_files = {} + transcripts = {} + curr_label = start_label + for original_sample in librispeech_json: + if not predicate(original_sample): + continue + transcripts[curr_label] = original_sample['transcript'] + output_files[original_sample['files'][-1]['fname']] = curr_label + curr_label += 1 + return output_files, transcripts + + +def _dict_to_file(dict: dict, filename: str): + with open(filename, "w") as f: + for key, value in dict.items(): + f.write("{} {}\n".format(key, value)) + + +class DaliDataLoader: + """ + DataLoader is the main entry point to the data preprocessing pipeline. + To use, create an object and then just iterate over `data_iterator`. + DataLoader will do the rest for you. + Example: + data_layer = DataLoader(DaliTrainPipeline, path, json, bs, ngpu) + data_it = data_layer.data_iterator + for data in data_it: + print(data) # Here's your preprocessed data + + Args: + device_type: Which device to use for preprocessing. Choose: "cpu", "gpu" + pipeline_type: Choose: "train", "val", "synth" + """ + def __init__(self, gpu_id, dataset_path: str, config_data: dict, + config_features: dict, json_names: list, symbols: list, + batch_size: int, pipeline_type: str, + grad_accumulation_steps: int = 1, + synth_iters_per_epoch: int = 544, device_type: str = "gpu"): + + self.batch_size = batch_size + self.grad_accumulation_steps = grad_accumulation_steps + self.drop_last = (pipeline_type == 'train') + self.device_type = device_type + pipeline_type = self._parse_pipeline_type(pipeline_type) + if pipeline_type == "synth": + self._dali_data_iterator = self._init_synth_iterator( + self.batch_size, + config_features['nfilt'], + iters_per_epoch=synth_iters_per_epoch, + ngpus=torch.distributed.get_world_size()) + else: + self._dali_data_iterator = self._init_iterator( + gpu_id=gpu_id, + dataset_path=dataset_path, + config_data=config_data, + config_features=config_features, + json_names=json_names, + symbols=symbols, + train_pipeline=pipeline_type == "train") + + def _init_iterator(self, gpu_id, dataset_path, config_data, + config_features, json_names: list, symbols: list, + train_pipeline: bool): + """Returns an iterator over data preprocessed with Dali.""" + + def hash_list_of_strings(li): + return str(abs(hash(''.join(li)))) + + output_files, transcripts = {}, {} + max_duration = config_data['max_duration'] + for jname in json_names: + of, tr = _parse_json( + jname if jname[0] == '/' else os.path.join(dataset_path, jname), + len(output_files), + predicate=lambda json: json['original_duration'] <= max_duration) + output_files.update(of) + transcripts.update(tr) + file_list_path = os.path.join( + "/tmp", "asr_dali.file_list." + hash_list_of_strings(json_names)) + _dict_to_file(output_files, file_list_path) + self.dataset_size = len(output_files) + print_once('Dataset read by DALI. ' + f'Number of samples: {self.dataset_size}') + + pipeline = make_dali_asr_pipeline( + config_data=config_data, + config_features=config_features, + device_id=gpu_id, + file_root=dataset_path, + file_list=file_list_path, + device_type=self.device_type, + batch_size=self.batch_size, + train_pipeline=train_pipeline) + + return DaliIterator([pipeline], transcripts=transcripts, + symbols=symbols, batch_size=self.batch_size, + reader_name="file_reader", + train_iterator=train_pipeline) + + def _init_synth_iterator(self, batch_size, nfeatures, iters_per_epoch, + ngpus): + self.dataset_size = ngpus * iters_per_epoch * batch_size + return SyntheticDataIterator(batch_size, nfeatures, regenerate=True) + + @staticmethod + def _parse_pipeline_type(pipeline_type): + pipe = pipeline_type.lower() + assert pipe in ("train", "val", "synth"), \ + 'Invalid pipeline type (choices: "train", "val", "synth").' + return pipe + + def _shard_size(self): + """ + Total number of samples handled by a single GPU in a single epoch. + """ + world_size = dist.get_world_size() if dist.is_initialized() else 1 + if self.drop_last: + divisor = world_size * self.batch_size * self.grad_accumulation_steps + return self.dataset_size // divisor * divisor // world_size + else: + return int(math.ceil(self.dataset_size / world_size)) + + def __len__(self): + """ + Number of batches handled by each GPU. + """ + if self.drop_last: + assert self._shard_size() % self.batch_size == 0, \ + f'{self._shard_size()} {self.batch_size}' + + return int(math.ceil(self._shard_size() / self.batch_size)) + + def data_iterator(self): + return self._dali_data_iterator + + def __iter__(self): + return self._dali_data_iterator diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/dali/iterator.py b/PyTorch/SpeechRecognition/QuartzNet/common/dali/iterator.py new file mode 100644 index 00000000..c25e5a4e --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/dali/iterator.py @@ -0,0 +1,183 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +from nvidia.dali.plugin.base_iterator import LastBatchPolicy +from nvidia.dali.plugin.pytorch import DALIGenericIterator + +from common.helpers import print_once +from common.text import _clean_text, punctuation_map + + +def normalize_string(s, symbols, punct_map): + """ + Normalizes string. + Example: + 'call me at 8:00 pm!' -> 'call me at eight zero pm' + """ + labels = set(symbols) + try: + text = _clean_text(s, ["english_cleaners"], punct_map).strip() + return ''.join([tok for tok in text if all(t in labels for t in tok)]) + except Exception as e: + print_once(f"WARNING: Normalizing failed: {s} {e}") + + +class DaliIterator(object): + """Returns batches of data. + + Batches are in the form: + (preprocessed_signal, preprocessed_signal_length, transcript, + transcript_length) + + This iterator is not meant to be the entry point to a Dali pipeline. + Use DataLoader instead. + """ + + def __init__(self, dali_pipelines, transcripts, symbols, batch_size, + reader_name, train_iterator: bool): + self.transcripts = transcripts + self.symbols = symbols + self.batch_size = batch_size + + # in train pipeline shard_size is set to divisable by batch_size, + # so PARTIAL policy is safe + self.dali_it = DALIGenericIterator( + dali_pipelines, + ["audio", "label", "audio_shape"], + reader_name=reader_name, + dynamic_shape=True, + auto_reset=True, + last_batch_policy=LastBatchPolicy.DROP) + + @staticmethod + def _str2list(s: str): + """ + Returns list of floats, that represents given string. + '0.' denotes separator + '1.' denotes 'a' + '27.' denotes "'" + Assumes, that the string is lower case. + """ + list = [] + for c in s: + if c == "'": + list.append(27.) + else: + list.append(max(0., ord(c) - 96.)) + return list + + @staticmethod + def _pad_lists(lists: list, pad_val=0): + """ + Pads lists, so that all have the same size. + Returns list with actual sizes of corresponding input lists + """ + max_length = 0 + sizes = [] + for li in lists: + sizes.append(len(li)) + max_length = max_length if len(li) < max_length else len(li) + for li in lists: + li += [pad_val] * (max_length - len(li)) + return sizes + + def _gen_transcripts(self, labels, normalize_transcripts: bool = True): + """ + Generate transcripts in format expected by NN + """ + if normalize_transcripts: + lists = [ + self._str2list(normalize_string(self.transcripts[lab.item()], + self.symbols, punctuation_map(self.symbols))) + for lab in labels] + else: + lists = [self._str2list(self.transcripts[lab.item()]) + for lab in labels] + + sizes = self._pad_lists(lists) + return (torch.tensor(lists).cuda(), + torch.tensor(sizes, dtype=torch.int32).cuda()) + + def __next__(self): + data = self.dali_it.__next__() + transcripts, transcripts_lengths = self._gen_transcripts( + data[0]["label"]) + return (data[0]["audio"], data[0]["audio_shape"][:, 1], transcripts, + transcripts_lengths) + + def next(self): + return self.__next__() + + def __iter__(self): + return self + + +# TODO: refactor +class SyntheticDataIterator(object): + def __init__(self, batch_size, nfeatures, feat_min=-5., feat_max=0., + txt_min=0., txt_max=23., feat_lens_max=1760, txt_lens_max=231, + regenerate=False): + """ + Args: + batch_size + nfeatures: number of features for melfbanks + feat_min: minimum value in `feat` tensor, used for randomization + feat_max: maximum value in `feat` tensor, used for randomization + txt_min: minimum value in `txt` tensor, used for randomization + txt_max: maximum value in `txt` tensor, used for randomization + regenerate: If True, regenerate random tensors for every iterator + step. If False, generate them only at start. + """ + self.batch_size = batch_size + self.nfeatures = nfeatures + self.feat_min = feat_min + self.feat_max = feat_max + self.feat_lens_max = feat_lens_max + self.txt_min = txt_min + self.txt_max = txt_max + self.txt_lens_max = txt_lens_max + self.regenerate = regenerate + + if not self.regenerate: + (self.feat, self.feat_lens, self.txt, self.txt_lens + ) = self._generate_sample() + + def _generate_sample(self): + feat = ((self.feat_max - self.feat_min) + * np.random.random_sample( + (self.batch_size, self.nfeatures, self.feat_lens_max)) + + self.feat_min) + feat_lens = np.random.randint(0, int(self.feat_lens_max) - 1, + size=self.batch_size) + txt = (self.txt_max - self.txt_min) * np.random.random_sample( + (self.batch_size, self.txt_lens_max)) + self.txt_min + txt_lens = np.random.randint(0, int(self.txt_lens_max) - 1, + size=self.batch_size) + return (torch.Tensor(feat).cuda(), + torch.Tensor(feat_lens).cuda(), + torch.Tensor(txt).cuda(), + torch.Tensor(txt_lens).cuda()) + + def __next__(self): + if self.regenerate: + return self._generate_sample() + return self.feat, self.feat_lens, self.txt, self.txt_lens + + def next(self): + return self.__next__() + + def __iter__(self): + return self diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py b/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py new file mode 100644 index 00000000..8b3e93ef --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py @@ -0,0 +1,343 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import math +import multiprocessing + +import numpy as np + +import nvidia.dali as dali +import nvidia.dali.fn as fn +import nvidia.dali.types as types +import torch +import torch.distributed as dist + + +def _interleave_lists(*lists): + """ + [*, **, ***], [1, 2, 3], [a, b, c] -> [*, 1, a, **, 2, b, ***, 3, c] + Returns: + iterator over interleaved list + """ + assert all((len(lists[0]) == len(test_l) for test_l in lists)), \ + "All lists have to have the same length" + return itertools.chain(*zip(*lists)) + + +def _generate_cutouts(mask_params, nfeatures): + """ + Returns: + Generates anchors and shapes of the cutout regions. + Single call generates one batch of data. + The output shall be passed to DALI's Erase operator + anchors = [f0 t0 f1 t1 ...] + shapes = [f0w t0h f1w t1h ...] + """ + MAX_TIME_DIMENSION = 20 * 16000 + freq_anchors = np.random.random(mask_params['freq_num_regions']) + time_anchors = np.random.random(mask_params['time_num_regions']) + both_anchors_freq = np.random.random(mask_params['both_num_regions']) + both_anchors_time = np.random.random(mask_params['both_num_regions']) + anchors = [] + for anch in freq_anchors: + anchors.extend([anch, 0]) + for anch in time_anchors: + anchors.extend([0, anch]) + for t, f in zip(both_anchors_time, both_anchors_freq): + anchors.extend([f, t]) + + shapes = [] + shapes.extend( + _interleave_lists( + np.random.randint(mask_params['freq_min'], + mask_params['freq_max'] + 1, + mask_params['freq_num_regions']), + # XXX: Here, a time dimension of the spectrogram shall be passed. + # However, in DALI ArgumentInput can't come from GPU. + # So we leave the job for Erase (masking operator) to get it together. + [int(MAX_TIME_DIMENSION)] * mask_params['freq_num_regions'] + ) + ) + shapes.extend( + _interleave_lists( + [nfeatures] * mask_params['time_num_regions'], + np.random.randint(mask_params['time_min'], + mask_params['time_max'] + 1, + mask_params['time_num_regions']) + ) + ) + shapes.extend( + _interleave_lists( + np.random.randint(mask_params['both_min_freq'], + mask_params['both_max_freq'] + 1, + mask_params['both_num_regions']), + np.random.randint(mask_params['both_min_time'], + mask_params['both_max_time'] + 1, + mask_params['both_num_regions']) + ) + ) + return anchors, shapes + + +def _tuples2list(tuples: list): + """ + [(a, b), (c, d)] -> [[a, c], [b, d]] + """ + return map(list, zip(*tuples)) + + +def _dali_init_log(args: dict): + if not dist.is_initialized() or dist.get_rank() == 0: + max_len = max([len(ii) for ii in args.keys()]) + fmt_string = '\t%' + str(max_len) + 's : %s' + print('Initializing DALI with parameters:') + for keyPair in sorted(args.items()): + print(fmt_string % keyPair) + + +@dali.pipeline_def +def dali_asr_pipeline(train_pipeline, # True if training, False if validation + file_root, + file_list, + sample_rate, + silence_threshold, + resample_range, + discrete_resample_range, + window_size, + window_stride, + nfeatures, + nfft, + frame_splicing_factor, + dither_coeff, + pad_align, + preemph_coeff, + do_spectrogram_masking=False, + cutouts_generator=None, + shard_id=0, + n_shards=1, + preprocessing_device="gpu"): + do_remove_silence = silence_threshold is not None + + def _div_ceil(dividend, divisor): + return (dividend + (divisor - 1)) // divisor + + encoded, label = fn.readers.file( + device="cpu", name="file_reader", file_root=file_root, + file_list=file_list, shard_id=shard_id, num_shards=n_shards, + shuffle_after_epoch=train_pipeline) + + speed_perturbation_coeffs = None + if resample_range is not None: + if discrete_resample_range: + values = [resample_range[0], 1.0, resample_range[1]] + speed_perturbation_coeffs = fn.random.uniform(device="cpu", + values=values) + else: + speed_perturbation_coeffs = fn.random.uniform(device="cpu", + range=resample_range) + + if train_pipeline and speed_perturbation_coeffs is not None: + dec_sample_rate_arg = speed_perturbation_coeffs * sample_rate + elif resample_range is None: + dec_sample_rate_arg = sample_rate + else: + dec_sample_rate_arg = None + + audio, _ = fn.decoders.audio(encoded, sample_rate=dec_sample_rate_arg, + dtype=types.FLOAT, downmix=True) + if do_remove_silence: + begin, length = fn.nonsilent_region(audio, cutoff_db=silence_threshold) + audio = fn.slice(audio, begin, length, axes=[0]) + + # Max duration drop is performed at DataLayer stage + + if preprocessing_device == "gpu": + audio = audio.gpu() + + if dither_coeff != 0.: + audio = audio + fn.random.normal(device=preprocessing_device + ) * dither_coeff + + audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff) + + spec = fn.spectrogram(audio, nfft=nfft, + window_length=window_size * sample_rate, + window_step=window_stride * sample_rate) + + mel_spec = fn.mel_filter_bank(spec, sample_rate=sample_rate, + nfilter=nfeatures, normalize=True) + + log_features = fn.to_decibels(mel_spec, multiplier=np.log(10), + reference=1.0, cutoff_db=math.log(1e-20)) + + log_features_len = fn.shapes(log_features) + if frame_splicing_factor != 1: + log_features_len = _div_ceil(log_features_len, frame_splicing_factor) + + log_features = fn.normalize(log_features, axes=[1]) + log_features = fn.pad(log_features, axes=[1], fill_value=0, align=pad_align) + + if train_pipeline and do_spectrogram_masking: + anchors, shapes = fn.external_source(source=cutouts_generator, + num_outputs=2, cycle=True) + log_features = fn.erase(log_features, anchor=anchors, shape=shapes, + axes=[0, 1], fill_value=0, + normalized_anchor=True) + + # When modifying DALI pipeline returns, make sure you update `output_map` + # in DALIGenericIterator invocation + return log_features.gpu(), label.gpu(), log_features_len.gpu() + + +def make_dali_asr_pipeline(train_pipeline: bool, device_id, batch_size, + file_root: str, file_list: str, config_data: dict, + config_features: dict, device_type: str = "gpu", + do_resampling: bool = True, + num_cpu_threads: int = multiprocessing.cpu_count()): + max_duration = config_data['max_duration'] + sample_rate = config_data['sample_rate'] + silence_threshold = -60 if config_data['trim_silence'] else None + + # TODO Take into account resampling probablity + # TODO config_features['speed_perturbation']['p'] + if do_resampling and config_data['speed_perturbation'] is not None: + resample_range = [config_data['speed_perturbation']['min_rate'], + config_data['speed_perturbation']['max_rate']] + discrete_resample_range = config_data['speed_perturbation']['discrete'] + else: + resample_range = None + discrete_resample_range = False + + window_size = config_features['window_size'] + window_stride = config_features['window_stride'] + nfeatures = config_features['n_filt'] + nfft = config_features['n_fft'] + frame_splicing_factor = config_features['frame_splicing'] + dither_coeff = config_features['dither'] + pad_align = config_features['pad_align'] + pad_to_max_duration = config_features['pad_to_max_duration'] + assert not pad_to_max_duration, \ + "Padding to max duration currently not supported in DALI" + preemph_coeff = .97 + + config_spec = config_features['spec_augment'] + if config_spec is not None: + mask_time_num_regions = config_spec['time_masks'] + mask_time_min = config_spec['min_time'] + mask_time_max = config_spec['max_time'] + mask_freq_num_regions = config_spec['freq_masks'] + mask_freq_min = config_spec['min_freq'] + mask_freq_max = config_spec['max_freq'] + else: + mask_time_num_regions = 0 + mask_time_min = 0 + mask_time_max = 0 + mask_freq_num_regions = 0 + mask_freq_min = 0 + mask_freq_max = 0 + + config_cutout = config_features['cutout_augment'] + if config_cutout is not None: + mask_both_num_regions = config_cutout['masks'] + mask_both_min_time = config_cutout['min_time'] + mask_both_max_time = config_cutout['max_time'] + mask_both_min_freq = config_cutout['min_freq'] + mask_both_max_freq = config_cutout['max_freq'] + else: + mask_both_num_regions = 0 + mask_both_min_time = 0 + mask_both_max_time = 0 + mask_both_min_freq = 0 + mask_both_max_freq = 0 + + nfeatures = config_features['n_filt'] + do_spectrogram_masking = \ + mask_time_num_regions > 0 or mask_freq_num_regions > 0 or \ + mask_both_num_regions > 0 + + do_remove_silence = silence_threshold is not None + + del(config_spec) + del(config_cutout) + del(config_data) + del(config_features) + + _dali_init_log(locals()) + + mask_params = { + 'time_num_regions': mask_time_num_regions, + 'time_min': mask_time_min, + 'time_max': mask_time_max, + 'freq_num_regions': mask_freq_num_regions, + 'freq_min': mask_freq_min, + 'freq_max': mask_freq_max, + 'both_num_regions': mask_both_num_regions, + 'both_min_time': mask_both_min_time, + 'both_max_time': mask_both_max_time, + 'both_min_freq': mask_both_min_freq, + 'both_max_freq': mask_both_max_freq, + } + + def _cutouts_generator(): + """ + Generator, that wraps cutouts creation in order to randomize inputs + and allow passing them to DALI's ExternalSource operator + """ + [anchors, shapes] = _tuples2list( + [_generate_cutouts(mask_params, nfeatures) + for _ in range(batch_size)]) + + yield (np.array(anchors, dtype=np.float32), + np.array(shapes, dtype=np.float32)) + + cutouts_gen = _cutouts_generator if do_spectrogram_masking else None + + if torch.distributed.is_initialized(): + shard_id = torch.distributed.get_rank() + n_shards = torch.distributed.get_world_size() + else: + shard_id = 0 + n_shards = 1 + + preprocessing_device = device_type.lower() + assert preprocessing_device == "cpu" or preprocessing_device == "gpu", \ + "Incorrect preprocessing device. Please choose either 'cpu' or 'gpu'" + + pipe = dali_asr_pipeline( + train_pipeline=train_pipeline, + file_root=file_root, + file_list=file_list, + sample_rate=sample_rate, + silence_threshold=silence_threshold, + resample_range=resample_range, + discrete_resample_range=discrete_resample_range, + window_size=window_size, + window_stride=window_stride, + nfeatures=nfeatures, + nfft=nfft, + frame_splicing_factor=frame_splicing_factor, + dither_coeff=dither_coeff, + pad_align=pad_align, + preemph_coeff=preemph_coeff, + do_spectrogram_masking=do_spectrogram_masking, + cutouts_generator=cutouts_gen, + shard_id=shard_id, + n_shards=n_shards, + preprocessing_device=preprocessing_device, + batch_size=batch_size, + num_threads=num_cpu_threads, + device_id=device_id + ) + return pipe diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/dataset.py b/PyTorch/SpeechRecognition/QuartzNet/common/dataset.py new file mode 100644 index 00000000..bb00d33e --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/dataset.py @@ -0,0 +1,234 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from pathlib import Path + +import numpy as np + +import torch +from torch.utils.data import Dataset, DataLoader +from torch.utils.data.distributed import DistributedSampler + +from .audio import (audio_from_file, AudioSegment, GainPerturbation, + ShiftPerturbation, SpeedPerturbation) +from .text import _clean_text, punctuation_map + + +def normalize_string(s, labels, punct_map): + """Normalizes string. + + Example: + 'call me at 8:00 pm!' -> 'call me at eight zero pm' + """ + labels = set(labels) + try: + text = _clean_text(s, ["english_cleaners"], punct_map).strip() + return ''.join([tok for tok in text if all(t in labels for t in tok)]) + except: + print(f"WARNING: Normalizing failed: {s}") + return None + + +class FilelistDataset(Dataset): + def __init__(self, filelist_fpath): + self.samples = [line.strip() for line in open(filelist_fpath, 'r')] + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + audio, audio_len = audio_from_file(self.samples[index]) + return (audio.squeeze(0), audio_len, torch.LongTensor([0]), + torch.LongTensor([0])) + + +class SingleAudioDataset(FilelistDataset): + def __init__(self, audio_fpath): + self.samples = [audio_fpath] + + +class AudioDataset(Dataset): + def __init__(self, data_dir, manifest_fpaths, labels, + sample_rate=16000, min_duration=0.1, max_duration=float("inf"), + pad_to_max_duration=False, max_utts=0, normalize_transcripts=True, + sort_by_duration=False, trim_silence=False, + speed_perturbation=None, gain_perturbation=None, + shift_perturbation=None, ignore_offline_speed_perturbation=False): + """Loads audio, transcript and durations listed in a .json file. + + Args: + data_dir: absolute path to dataset folder + manifest_filepath: relative path from dataset folder + to manifest json as described above. Can be coma-separated paths. + labels (str): all possible output symbols + min_duration (int): skip audio shorter than threshold + max_duration (int): skip audio longer than threshold + pad_to_max_duration (bool): pad all sequences to max_duration + max_utts (int): limit number of utterances + normalize_transcripts (bool): normalize transcript text + sort_by_duration (bool): sort sequences by increasing duration + trim_silence (bool): trim leading and trailing silence from audio + ignore_offline_speed_perturbation (bool): use precomputed speed perturbation + + Returns: + tuple of Tensors + """ + self.data_dir = data_dir + self.labels = labels + self.labels_map = dict([(labels[i], i) for i in range(len(labels))]) + self.punctuation_map = punctuation_map(labels) + self.blank_index = len(labels) + + self.pad_to_max_duration = pad_to_max_duration + + self.sort_by_duration = sort_by_duration + self.max_utts = max_utts + self.normalize_transcripts = normalize_transcripts + self.ignore_offline_speed_perturbation = ignore_offline_speed_perturbation + + self.min_duration = min_duration + self.max_duration = max_duration + self.trim_silence = trim_silence + self.sample_rate = sample_rate + + perturbations = [] + if speed_perturbation is not None: + perturbations.append(SpeedPerturbation(**speed_perturbation)) + if gain_perturbation is not None: + perturbations.append(GainPerturbation(**gain_perturbation)) + if shift_perturbation is not None: + perturbations.append(ShiftPerturbation(**shift_perturbation)) + self.perturbations = perturbations + + self.max_duration = max_duration + + self.samples = [] + self.duration = 0.0 + self.duration_filtered = 0.0 + + for fpath in manifest_fpaths: + self._load_json_manifest(fpath) + + if sort_by_duration: + self.samples = sorted(self.samples, key=lambda s: s['duration']) + + def __getitem__(self, index): + s = self.samples[index] + rn_indx = np.random.randint(len(s['audio_filepath'])) + duration = s['audio_duration'][rn_indx] if 'audio_duration' in s else 0 + offset = s.get('offset', 0) + + segment = AudioSegment( + s['audio_filepath'][rn_indx], target_sr=self.sample_rate, + offset=offset, duration=duration, trim=self.trim_silence) + + for p in self.perturbations: + p.maybe_apply(segment, self.sample_rate) + + segment = torch.FloatTensor(segment.samples) + + return (segment, + torch.tensor(segment.shape[0]).int(), + torch.tensor(s["transcript"]), + torch.tensor(len(s["transcript"])).int()) + + def __len__(self): + return len(self.samples) + + def _load_json_manifest(self, fpath): + for s in json.load(open(fpath, "r", encoding="utf-8")): + + if self.pad_to_max_duration and not self.ignore_offline_speed_perturbation: + # require all perturbed samples to be < self.max_duration + s_max_duration = max(f['duration'] for f in s['files']) + else: + # otherwise we allow perturbances to be > self.max_duration + s_max_duration = s['original_duration'] + + s['duration'] = s.pop('original_duration') + if not (self.min_duration <= s_max_duration <= self.max_duration): + self.duration_filtered += s['duration'] + continue + + # Prune and normalize according to transcript + tr = (s.get('transcript', None) or + self.load_transcript(s['text_filepath'])) + + if not isinstance(tr, str): + print(f'WARNING: Skipped sample (transcript not a str): {tr}.') + self.duration_filtered += s['duration'] + continue + + if self.normalize_transcripts: + tr = normalize_string(tr, self.labels, self.punctuation_map) + + s["transcript"] = self.to_vocab_inds(tr) + + files = s.pop('files') + if self.ignore_offline_speed_perturbation: + files = [f for f in files if f['speed'] == 1.0] + + s['audio_duration'] = [f['duration'] for f in files] + s['audio_filepath'] = [str(Path(self.data_dir, f['fname'])) + for f in files] + self.samples.append(s) + self.duration += s['duration'] + + if self.max_utts > 0 and len(self.samples) >= self.max_utts: + print(f'Reached max_utts={self.max_utts}. Finished parsing {fpath}.') + break + + def load_transcript(self, transcript_path): + with open(transcript_path, 'r', encoding="utf-8") as transcript_file: + transcript = transcript_file.read().replace('\n', '') + return transcript + + def to_vocab_inds(self, transcript): + chars = [self.labels_map.get(x, self.blank_index) for x in list(transcript)] + transcript = list(filter(lambda x: x != self.blank_index, chars)) + return transcript + + +def collate_fn(batch): + bs = len(batch) + max_len = lambda l, idx: max(el[idx].size(0) for el in l) + audio = torch.zeros(bs, max_len(batch, 0)) + audio_lens = torch.zeros(bs, dtype=torch.int32) + transcript = torch.zeros(bs, max_len(batch, 2)) + transcript_lens = torch.zeros(bs, dtype=torch.int32) + + for i, sample in enumerate(batch): + audio[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0]) + audio_lens[i] = sample[1] + transcript[i].narrow(0, 0, sample[2].size(0)).copy_(sample[2]) + transcript_lens[i] = sample[3] + return audio, audio_lens, transcript, transcript_lens + + +def get_data_loader(dataset, batch_size, multi_gpu=True, shuffle=True, + drop_last=True, num_workers=4): + + kw = {'dataset': dataset, 'collate_fn': collate_fn, + 'num_workers': num_workers, 'pin_memory': True} + + if multi_gpu: + loader_shuffle = False + sampler = DistributedSampler(dataset, shuffle=shuffle) + else: + loader_shuffle = shuffle + sampler = None + + return DataLoader(batch_size=batch_size, drop_last=drop_last, + sampler=sampler, shuffle=loader_shuffle, **kw) diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/features.py b/PyTorch/SpeechRecognition/QuartzNet/common/features.py new file mode 100644 index 00000000..134e8255 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/features.py @@ -0,0 +1,301 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import random + +import librosa +import torch +import torch.nn as nn + + +class BaseFeatures(nn.Module): + """Base class for GPU accelerated audio preprocessing.""" + __constants__ = ["pad_align", "pad_to_max_duration", "max_len"] + + def __init__(self, pad_align, pad_to_max_duration, max_duration, + sample_rate, window_size, window_stride, spec_augment=None, + cutout_augment=None): + super(BaseFeatures, self).__init__() + + self.pad_align = pad_align + self.pad_to_max_duration = pad_to_max_duration + self.win_length = int(sample_rate * window_size) # frame size + self.hop_length = int(sample_rate * window_stride) + + # Calculate maximum sequence length (# frames) + if pad_to_max_duration: + self.max_len = 1 + math.ceil( + (max_duration * sample_rate - self.win_length) / self.hop_length + ) + + if spec_augment is not None: + self.spec_augment = SpecAugment(**spec_augment) + else: + self.spec_augment = None + + if cutout_augment is not None: + self.cutout_augment = CutoutAugment(**cutout_augment) + else: + self.cutout_augment = None + + @torch.no_grad() + def calculate_features(self, audio, audio_lens): + return audio, audio_lens + + def __call__(self, audio, audio_lens): + dtype = audio.dtype + audio = audio.float() + feat, feat_lens = self.calculate_features(audio, audio_lens) + + feat = self.apply_padding(feat) + + if self.cutout_augment is not None: + feat = self.cutout_augment(feat) + + if self.spec_augment is not None: + feat = self.spec_augment(feat) + + feat = feat.to(dtype) + return feat, feat_lens + + def apply_padding(self, x): + if self.pad_to_max_duration: + x_size = max(x.size(-1), self.max_len) + else: + x_size = x.size(-1) + + if self.pad_align > 0: + pad_amt = x_size % self.pad_align + else: + pad_amt = 0 + + padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0) + return nn.functional.pad(x, (0, padded_len - x.size(-1))) + + +class SpecAugment(nn.Module): + """Spec augment. refer to https://arxiv.org/abs/1904.08779 + """ + def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0, + min_time=0, max_time=10): + super(SpecAugment, self).__init__() + assert 0 <= min_freq <= max_freq + assert 0 <= min_time <= max_time + + self.freq_masks = freq_masks + self.min_freq = min_freq + self.max_freq = max_freq + + self.time_masks = time_masks + self.min_time = min_time + self.max_time = max_time + + @torch.no_grad() + def forward(self, x): + sh = x.shape + mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device) + + for idx in range(sh[0]): + for _ in range(self.freq_masks): + w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item() + f0 = torch.randint(0, max(1, sh[1] - w), size=(1,)) + mask[idx, f0:f0+w] = 1 + + for _ in range(self.time_masks): + w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item() + t0 = torch.randint(0, max(1, sh[2] - w), size=(1,)) + mask[idx, :, t0:t0+w] = 1 + + return x.masked_fill(mask, 0) + + +class CutoutAugment(nn.Module): + """Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf + """ + def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5): + super(CutoutAugment, self).__init__() + assert 0 <= min_freq <= max_freq + assert 0 <= min_time <= max_time + + self.masks = masks + self.min_freq = min_freq + self.max_freq = max_freq + self.min_time = min_time + self.max_time = max_time + + @torch.no_grad() + def forward(self, x): + sh = x.shape + mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device) + + for idx in range(sh[0]): + for i in range(self.masks): + + w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item() + h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item() + + f0 = int(random.uniform(0, sh[1] - w)) + t0 = int(random.uniform(0, sh[2] - h)) + + mask[idx, f0:f0+w, t0:t0+h] = 1 + + return x.masked_fill(mask, 0) + + +@torch.jit.script +def normalize_batch(x, seq_len, normalize_type: str): +# print ("normalize_batch: x, seq_len, shapes: ", x.shape, seq_len, seq_len.shape) + if normalize_type == "per_feature": + x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, + device=x.device) + x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, + device=x.device) + for i in range(x.shape[0]): + x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1) + x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1) + # make sure x_std is not zero + x_std += 1e-5 + return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2) + + elif normalize_type == "all_features": + x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device) + x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device) + for i in range(x.shape[0]): + x_mean[i] = x[i, :, :int(seq_len[i])].mean() + x_std[i] = x[i, :, :int(seq_len[i])].std() + # make sure x_std is not zero + x_std += 1e-5 + return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1) + else: + return x + + +@torch.jit.script +def splice_frames(x, frame_splicing: int): + """ Stacks frames together across feature dim + + input is batch_size, feature_dim, num_frames + output is batch_size, feature_dim*frame_splicing, num_frames + + """ + seq = [x] + # TORCHSCRIPT: JIT doesnt like range(start, stop) + for n in range(frame_splicing - 1): + seq.append(torch.cat([x[:, :, :n + 1], x[:, :, n + 1:]], dim=2)) + return torch.cat(seq, dim=1) + + +class FilterbankFeatures(BaseFeatures): + # For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants + __constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length", + "log", "frame_splicing", "normalize"] + # torchscript: "center" removed due to a bug + + def __init__(self, spec_augment=None, cutout_augment=None, + sample_rate=8000, window_size=0.02, window_stride=0.01, + window="hamming", normalize="per_feature", n_fft=None, + preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True, + dither=1e-5, pad_align=8, pad_to_max_duration=False, + max_duration=float('inf'), frame_splicing=1): + super(FilterbankFeatures, self).__init__( + pad_align=pad_align, pad_to_max_duration=pad_to_max_duration, + max_duration=max_duration, sample_rate=sample_rate, + window_size=window_size, window_stride=window_stride, + spec_augment=spec_augment, cutout_augment=cutout_augment) + + torch_windows = { + 'hann': torch.hann_window, + 'hamming': torch.hamming_window, + 'blackman': torch.blackman_window, + 'bartlett': torch.bartlett_window, + 'none': None, + } + + self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length)) + + self.normalize = normalize + self.log = log + #TORCHSCRIPT: Check whether or not we need this + self.dither = dither + self.frame_splicing = frame_splicing + self.n_filt = n_filt + self.preemph = preemph + highfreq = highfreq or sample_rate / 2 + window_fn = torch_windows.get(window, None) + window_tensor = window_fn(self.win_length, + periodic=False) if window_fn else None + filterbanks = torch.tensor( + librosa.filters.mel(sample_rate, self.n_fft, n_mels=n_filt, + fmin=lowfreq, fmax=highfreq), + dtype=torch.float).unsqueeze(0) + # torchscript + self.register_buffer("fb", filterbanks) + self.register_buffer("window", window_tensor) + + def get_seq_len(self, seq_len): + return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to( + dtype=torch.int) + + # do stft + # TORCHSCRIPT: center removed due to bug + def stft(self, x): + return torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, + win_length=self.win_length, + window=self.window.to(dtype=torch.float)) + + @torch.no_grad() + def calculate_features(self, x, seq_len): + dtype = x.dtype + + seq_len = self.get_seq_len(seq_len) + + # dither + if self.dither > 0: + x += self.dither * torch.randn_like(x) + + # do preemphasis + if self.preemph is not None: + x = torch.cat( + (x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1) + x = self.stft(x) + + # get power spectrum + x = x.pow(2).sum(-1) + + # dot with filterbank energies + x = torch.matmul(self.fb.to(x.dtype), x) + + # log features if required + if self.log: + x = torch.log(x + 1e-20) + + # frame splicing if required + if self.frame_splicing > 1: + raise ValueError('Frame splicing not supported') + + # normalize if required + x = normalize_batch(x, seq_len, normalize_type=self.normalize) + + # mask to zero any values beyond seq_len in batch, + # pad to multiple of `pad_align` (for efficiency) + max_len = x.size(-1) + mask = torch.arange(max_len, dtype=seq_len.dtype, device=x.device) + mask = mask.expand(x.size(0), max_len) >= seq_len.unsqueeze(1) + x = x.masked_fill(mask.unsqueeze(1), 0) + + # TORCHSCRIPT: Is this del important? It breaks scripting + # del mask + + return x.to(dtype), seq_len diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/helpers.py b/PyTorch/SpeechRecognition/QuartzNet/common/helpers.py new file mode 100644 index 00000000..601fa988 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/helpers.py @@ -0,0 +1,276 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import re +from collections import OrderedDict + +import torch +import torch.distributed as dist + +from .metrics import word_error_rate + + +def print_once(msg): + if not dist.is_initialized() or dist.get_rank() == 0: + print(msg) + + +def add_ctc_blank(symbols): + return symbols + [''] + + +def ctc_decoder_predictions_tensor(tensor, labels): + """ + Takes output of greedy ctc decoder and performs ctc decoding algorithm to + remove duplicates and special symbol. Returns prediction + Args: + tensor: model output tensor + label: A list of labels + Returns: + prediction + """ + blank_id = len(labels) - 1 + hypotheses = [] + labels_map = {i: labels[i] for i in range(len(labels))} + prediction_cpu_tensor = tensor.long().cpu() + # iterate over batch + for ind in range(prediction_cpu_tensor.shape[0]): + prediction = prediction_cpu_tensor[ind].numpy().tolist() + # CTC decoding procedure + decoded_prediction = [] + previous = len(labels) - 1 # id of a blank symbol + for p in prediction: + if (p != previous or previous == blank_id) and p != blank_id: + decoded_prediction.append(p) + previous = p + hypothesis = ''.join([labels_map[c] for c in decoded_prediction]) + hypotheses.append(hypothesis) + return hypotheses + + +def greedy_wer(preds, tgt, tgt_lens, labels): + """ + Takes output of greedy ctc decoder and performs ctc decoding algorithm to + remove duplicates and special symbol. Prints wer and prediction examples to screen + Args: + tensors: A list of 3 tensors (predictions, targets, target_lengths) + labels: A list of labels + + Returns: + word error rate + """ + with torch.no_grad(): + references = gather_transcripts([tgt], [tgt_lens], labels) + hypotheses = ctc_decoder_predictions_tensor(preds, labels) + + wer, _, _ = word_error_rate(hypotheses, references) + return wer, hypotheses[0], references[0] + + +def gather_losses(losses_list): + return [torch.mean(torch.stack(losses_list))] + + +def gather_predictions(predictions_list, labels): + results = [] + for prediction in predictions_list: + results += ctc_decoder_predictions_tensor(prediction, labels=labels) + return results + + +def gather_transcripts(transcript_list, transcript_len_list, labels): + results = [] + labels_map = {i: labels[i] for i in range(len(labels))} + # iterate over workers + for txt, lens in zip(transcript_list, transcript_len_list): + for t, l in zip(txt.long().cpu(), lens.long().cpu()): + t = list(t.numpy()) + results.append(''.join([labels_map[c] for c in t[:l]])) + return results + + +def process_evaluation_batch(tensors, global_vars, labels): + """ + Processes results of an iteration and saves it in global_vars + Args: + tensors: dictionary with results of an evaluation iteration, e.g. loss, predictions, transcript, and output + global_vars: dictionary where processes results of iteration are saved + labels: A list of labels + """ + for kv, v in tensors.items(): + if kv.startswith('loss'): + global_vars['EvalLoss'] += gather_losses(v) + elif kv.startswith('predictions'): + global_vars['preds'] += gather_predictions(v, labels) + elif kv.startswith('transcript_length'): + transcript_len_list = v + elif kv.startswith('transcript'): + transcript_list = v + elif kv.startswith('output'): + global_vars['logits'] += v + + global_vars['txts'] += gather_transcripts( + transcript_list, transcript_len_list, labels) + + +def process_evaluation_epoch(aggregates, tag=None): + """ + Processes results from each worker at the end of evaluation and combine to final result + Args: + aggregates: dictionary containing information of entire evaluation + Return: + wer: final word error rate + loss: final loss + """ + if 'losses' in aggregates: + eloss = torch.mean(torch.stack(aggregates['losses'])).item() + else: + eloss = None + hypotheses = aggregates['preds'] + references = aggregates['txts'] + + wer, scores, num_words = word_error_rate(hypotheses, references) + multi_gpu = dist.is_initialized() + if multi_gpu: + if eloss is not None: + eloss /= dist.get_world_size() + eloss_tensor = torch.tensor(eloss).cuda() + dist.all_reduce(eloss_tensor) + eloss = eloss_tensor.item() + + scores_tensor = torch.tensor(scores).cuda() + dist.all_reduce(scores_tensor) + scores = scores_tensor.item() + num_words_tensor = torch.tensor(num_words).cuda() + dist.all_reduce(num_words_tensor) + num_words = num_words_tensor.item() + wer = scores * 1.0 / num_words + return wer, eloss + + +def num_weights(module): + return sum(p.numel() for p in module.parameters() if p.requires_grad) + + +class Checkpointer(object): + + def __init__(self, save_dir, model_name, keep_milestones=[100, 200, 300]): + self.save_dir = save_dir + self.keep_milestones = keep_milestones + self.model_name = model_name + + tracked = [ + (int(re.search('epoch(\d+)_', f).group(1)), f) + for f in glob.glob(f'{save_dir}/{self.model_name}_epoch*_checkpoint.pt')] + tracked = sorted(tracked, key=lambda t: t[0]) + self.tracked = OrderedDict(tracked) + + def save(self, model, ema_model, optimizer, scaler, epoch, step, best_wer, + is_best=False): + """Saves model checkpoint for inference/resuming training. + + Args: + model: the model, optionally wrapped by DistributedDataParallel + ema_model: model with averaged weights, can be None + optimizer: optimizer + epoch (int): epoch during which the model is saved + step (int): number of steps since beginning of training + best_wer (float): lowest recorded WER on the dev set + is_best (bool, optional): set name of checkpoint to 'best' + and overwrite the previous one + """ + rank = 0 + if dist.is_initialized(): + dist.barrier() + rank = dist.get_rank() + + if rank != 0: + return + + # Checkpoint already saved + if not is_best and epoch in self.tracked: + return + + unwrap_ddp = lambda model: getattr(model, 'module', model) + state = { + 'epoch': epoch, + 'step': step, + 'best_wer': best_wer, + 'state_dict': unwrap_ddp(model).state_dict(), + 'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None, + 'optimizer': optimizer.state_dict(), + 'scaler': scaler.state_dict(), + } + + if is_best: + fpath = os.path.join( + self.save_dir, f"{self.model_name}_best_checkpoint.pt") + else: + fpath = os.path.join( + self.save_dir, f"{self.model_name}_epoch{epoch}_checkpoint.pt") + + print_once(f"Saving {fpath}...") + torch.save(state, fpath) + + if not is_best: + # Remove old checkpoints; keep milestones and the last two + self.tracked[epoch] = fpath + for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones): + try: + os.remove(self.tracked[epoch]) + except: + pass + del self.tracked[epoch] + + def last_checkpoint(self): + tracked = list(self.tracked.values()) + + if len(tracked) >= 1: + try: + torch.load(tracked[-1], map_location='cpu') + return tracked[-1] + except: + print_once(f'Last checkpoint {tracked[-1]} appears corrupted.') + + elif len(tracked) >= 2: + return tracked[-2] + else: + return None + + def load(self, fpath, model, ema_model, optimizer, scaler, meta): + + print_once(f'Loading model from {fpath}') + checkpoint = torch.load(fpath, map_location="cpu") + + unwrap_ddp = lambda model: getattr(model, 'module', model) + state_dict = checkpoint['state_dict'] + unwrap_ddp(model).load_state_dict(state_dict, strict=True) + + if ema_model is not None: + if checkpoint.get('ema_state_dict') is not None: + key = 'ema_state_dict' + else: + key = 'state_dict' + print_once('WARNING: EMA weights not found in the checkpoint.') + print_once('WARNING: Initializing EMA model with regular params.') + state_dict = checkpoint[key] + unwrap_ddp(ema_model).load_state_dict(state_dict, strict=True) + + optimizer.load_state_dict(checkpoint['optimizer']) + scaler.load_state_dict(checkpoint['scaler']) + + meta['start_epoch'] = checkpoint.get('epoch') + meta['best_wer'] = checkpoint.get('best_wer', meta['best_wer']) diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/metrics.py b/PyTorch/SpeechRecognition/QuartzNet/common/metrics.py new file mode 100644 index 00000000..4ae47a4c --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/metrics.py @@ -0,0 +1,59 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def __levenshtein(a, b): + """Calculates the Levenshtein distance between two sequences.""" + + n, m = len(a), len(b) + if n > m: + # Make sure n <= m, to use O(min(n,m)) space + a, b = b, a + n, m = m, n + + current = list(range(n + 1)) + for i in range(1, m + 1): + previous, current = current, [i] + [0] * n + for j in range(1, n + 1): + add, delete = previous[j] + 1, current[j - 1] + 1 + change = previous[j - 1] + if a[j - 1] != b[i - 1]: + change = change + 1 + current[j] = min(add, delete, change) + + return current[n] + + +def word_error_rate(hypotheses, references): + """Computes average Word Error Rate (WER) between two text lists.""" + + scores = 0 + words = 0 + len_diff = len(references) - len(hypotheses) + if len_diff > 0: + raise ValueError("Uneqal number of hypthoses and references: " + "{0} and {1}".format(len(hypotheses), len(references))) + elif len_diff < 0: + hypotheses = hypotheses[:len_diff] + + for h, r in zip(hypotheses, references): + h_list = h.split() + r_list = r.split() + words += len(r_list) + scores += __levenshtein(h_list, r_list) + if words!=0: + wer = 1.0*scores/words + else: + wer = float('inf') + return wer, scores, words diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/optimizers.py b/PyTorch/SpeechRecognition/QuartzNet/common/optimizers.py new file mode 100644 index 00000000..81759191 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/optimizers.py @@ -0,0 +1,269 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.optim import Optimizer +import math + + +def lr_policy(step, epoch, initial_lr, optimizer, steps_per_epoch, warmup_epochs, + hold_epochs, num_epochs=None, policy='linear', min_lr=1e-5, + exp_gamma=None): + """ + learning rate decay + Args: + initial_lr: base learning rate + step: current iteration number + N: total number of iterations over which learning rate is decayed + lr_steps: list of steps to apply exp_gamma + """ + warmup_steps = warmup_epochs * steps_per_epoch + hold_steps = hold_epochs * steps_per_epoch + + if policy == 'legacy': + assert num_epochs is not None + tot_steps = num_epochs * steps_per_epoch + + if step < warmup_steps: + a = (step + 1) / (warmup_steps + 1) + elif step < warmup_steps + hold_steps: + a = 1.0 + else: + a = (((tot_steps - step) + / (tot_steps - warmup_steps - hold_steps)) ** 2) + + elif policy == 'exponential': + assert exp_gamma is not None + + if step < warmup_steps: + a = (step + 1) / (warmup_steps + 1) + elif step < warmup_steps + hold_steps: + a = 1.0 + else: + a = exp_gamma ** (epoch - warmup_epochs - hold_epochs) + + else: + raise ValueError + + new_lr = max(a * initial_lr, min_lr) + for param_group in optimizer.param_groups: + param_group['lr'] = new_lr + + +class AdamW(Optimizer): + """Implements AdamW algorithm. + + It has been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + + Adam: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + p.data.add_(torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom), alpha=-step_size) + + return loss + + +class Novograd(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(Novograd, self).__init__(params, defaults) + + def __setstate__(self, state): + super(Novograd, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p.data, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.data.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/sampler.py b/PyTorch/SpeechRecognition/QuartzNet/common/sampler.py new file mode 100644 index 00000000..ebeb3285 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/sampler.py @@ -0,0 +1,128 @@ +import torch +import numpy as np + +from torch.utils.data.sampler import Sampler + + +class DistributedSampler(Sampler): + def __init__(self, dataset, batch_size, world_size, rank): + """ + Constructor for the DistributedSampler. + :param dataset: dataset + :param batch_size: local batch size + :param world_size: number of distributed workers + :param rank: rank of the current process + """ + self.dataset = dataset + self.world_size = world_size + self.rank = rank + self.epoch = 0 + + self.batch_size = batch_size + self.global_batch_size = batch_size * world_size + + self.data_len = len(self.dataset) + + self.num_samples = self.data_len // self.global_batch_size \ + * self.global_batch_size + + def distribute_batches(self, indices): + """ + Assigns batches to workers. + Consecutive ranks are getting consecutive batches. + :param indices: torch.tensor with batch indices + """ + assert len(indices) == self.num_samples + + indices = indices.view(-1, self.batch_size) + indices = indices[self.rank::self.world_size].contiguous() + indices = indices.view(-1) + indices = indices.tolist() + + assert len(indices) == self.num_samples // self.world_size + return indices + + def reshuffle_batches(self, indices, rng): + """ + Permutes global batches + :param indices: torch.tensor with batch indices + :param rng: instance of torch.Generator + """ + indices = indices.view(-1, self.global_batch_size) + num_batches = indices.shape[0] + order = torch.randperm(num_batches, generator=rng) + indices = indices[order, :] + indices = indices.view(-1) + return indices + + def __iter__(self): + g = torch.Generator() + g.manual_seed(self.epoch) + # generate permutation + indices = torch.randperm(self.data_len, generator=rng) + + # make indices evenly divisible by (batch_size * world_size) + indices = indices[:self.num_samples] + + # assign batches to workers + indices = self.distribute_batches(indices) + return iter(indices) + + def set_epoch(self, epoch): + """ + Sets current epoch index. + Epoch index is used to seed RNG in __iter__() function. + :param epoch: index of current epoch + """ + self.epoch = epoch + + def __len__(self): + return self.num_samples // self.world_size + + +class BucketingSampler(DistributedSampler): + def __init__(self, dataset, batch_size, num_buckets, world_size, rank): + """ + Bucketing sampler with approx. equally-sized buckets. + :param dataset: dataset + :param batch_size: local batch size + :param seeds: list of seeds, one seed for each training epoch + :param num_buckets: number of buckets + :param world_size: number of distributed workers + :param rank: rank of the current process + """ + super().__init__(dataset, batch_size, world_size, rank) + + self.num_buckets = num_buckets + len_ids = np.argsort([sample['duration'] for sample in dataset.samples]) + self.buckets = [torch.from_numpy(t) + for t in np.array_split(len_ids, num_buckets)] + global_bs = self.global_batch_size + + def __iter__(self): + g = torch.Generator() + g.manual_seed(self.epoch) + global_bsz = self.global_batch_size + + indices = [] + for bid in range(self.num_buckets): + # random shuffle within current bucket + perm = torch.randperm(len(self.buckets[bid]), generator=g) + bucket_indices = self.buckets[bid][perm] + + # add samples from current bucket to indices for current epoch + indices.append(bucket_indices) + + indices = torch.cat(indices) + + # make indices evenly divisible by global batch size + length = len(indices) // global_bsz * global_bsz + indices = indices[:length] + + assert len(indices) % self.global_batch_size == 0 + + # perform global reshuffle of all global batches + indices = self.reshuffle_batches(indices, g) + # distribute batches to individual workers + indices = self.distribute_batches(indices) + return iter(indices) diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/tb_dllogger.py b/PyTorch/SpeechRecognition/QuartzNet/common/tb_dllogger.py new file mode 100644 index 00000000..6675f96d --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/tb_dllogger.py @@ -0,0 +1,173 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atexit +import glob +import os +import re +import numpy as np + +import torch +from torch.utils.tensorboard import SummaryWriter + +import dllogger +from dllogger import StdOutBackend, JSONStreamBackend, Verbosity + + +tb_loggers = {} + + +class TBLogger: + """ + xyz_dummies: stretch the screen with empty plots so the legend would + always fit for other plots + """ + def __init__(self, enabled, log_dir, name, interval=1, dummies=True): + self.enabled = enabled + self.interval = interval + self.cache = {} + if self.enabled: + self.summary_writer = SummaryWriter( + log_dir=os.path.join(log_dir, name), + flush_secs=120, max_queue=200) + atexit.register(self.summary_writer.close) + if dummies: + for key in ('aaa', 'zzz'): + self.summary_writer.add_scalar(key, 0.0, 1) + + def log(self, step, data): + for k, v in data.items(): + self.log_value(step, k, v.item() if type(v) is torch.Tensor else v) + + def log_value(self, step, key, val, stat='mean'): + if self.enabled: + if key not in self.cache: + self.cache[key] = [] + self.cache[key].append(val) + if len(self.cache[key]) == self.interval: + agg_val = getattr(np, stat)(self.cache[key]) + self.summary_writer.add_scalar(key, agg_val, step) + del self.cache[key] + + def log_grads(self, step, model): + if self.enabled: + norms = [p.grad.norm().item() for p in model.parameters() + if p.grad is not None] + for stat in ('max', 'min', 'mean'): + self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms), + stat=stat) + + +def unique_log_fpath(log_fpath): + + if not os.path.isfile(log_fpath): + return log_fpath + + # Avoid overwriting old logs + saved = sorted([int(re.search('\.(\d+)', f).group(1)) + for f in glob.glob(f'{log_fpath}.*')]) + + log_num = (saved[-1] if saved else 0) + 1 + return f'{log_fpath}.{log_num}' + + +def stdout_step_format(step): + if isinstance(step, str): + return step + fields = [] + if len(step) > 0: + fields.append("epoch {:>4}".format(step[0])) + if len(step) > 1: + fields.append("iter {:>4}".format(step[1])) + if len(step) > 2: + fields[-1] += "/{}".format(step[2]) + return " | ".join(fields) + + +def stdout_metric_format(metric, metadata, value): + name = metadata.get("name", metric + " : ") + unit = metadata.get("unit", None) + format = f'{{{metadata.get("format", "")}}}' + fields = [name, format.format(value) if value is not None else value, unit] + fields = [f for f in fields if f is not None] + return "| " + " ".join(fields) + + +def init_log(args): + enabled = (args.local_rank == 0) + if enabled: + fpath = args.log_file or os.path.join(args.output_dir, 'nvlog.json') + backends = [JSONStreamBackend(Verbosity.DEFAULT, + unique_log_fpath(fpath)), + StdOutBackend(Verbosity.VERBOSE, + step_format=stdout_step_format, + metric_format=stdout_metric_format)] + else: + backends = [] + + dllogger.init(backends=backends) + dllogger.metadata("train_lrate", {"name": "lrate", "format": ":>3.2e"}) + + for id_, pref in [('train', ''), ('train_avg', 'avg train '), + ('dev', ' avg dev '), ('dev_ema', ' EMA dev ')]: + + dllogger.metadata(f"{id_}_loss", + {"name": f"{pref}loss", "format": ":>7.2f"}) + + dllogger.metadata(f"{id_}_wer", + {"name": f"{pref}wer", "format": ":>6.2f"}) + + dllogger.metadata(f"{id_}_throughput", + {"name": f"{pref}utts/s", "format": ":>5.0f"}) + + dllogger.metadata(f"{id_}_took", + {"name": "took", "unit": "s", "format": ":>5.2f"}) + + tb_subsets = ['train', 'dev', 'dev_ema'] if args.ema else ['train', 'dev'] + global tb_loggers + tb_loggers = {s: TBLogger(enabled, args.output_dir, name=s) + for s in tb_subsets} + + log_parameters(vars(args), tb_subset='train') + + +def log(step, tb_total_steps=None, subset='train', data={}): + + if tb_total_steps is not None: + tb_loggers[subset].log(tb_total_steps, data) + + if subset != '': + data = {f'{subset}_{key}': val for key, val in data.items()} + dllogger.log(step, data=data) + + +def log_grads_tb(tb_total_steps, grads, tb_subset='train'): + tb_loggers[tb_subset].log_grads(tb_total_steps, grads) + + +def log_parameters(data, verbosity=0, tb_subset=None): + for k, v in data.items(): + dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity) + + if tb_subset is not None and tb_loggers[tb_subset].enabled: + tb_data = {k: v for k, v in data.items() + if type(v) in (str, bool, int, float)} + tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {}) + + +def flush_log(): + dllogger.flush() + for tbl in tb_loggers.values(): + if tbl.enabled: + tbl.summary_writer.flush() diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/text/LICENSE b/PyTorch/SpeechRecognition/QuartzNet/common/text/LICENSE new file mode 100644 index 00000000..4ad4ed1d --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/text/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Keith Ito + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/text/__init__.py b/PyTorch/SpeechRecognition/QuartzNet/common/text/__init__.py new file mode 100644 index 00000000..49018238 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/text/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2017 Keith Ito +""" from https://github.com/keithito/tacotron """ +import re +import string +from . import cleaners + +def _clean_text(text, cleaner_names, *args): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception('Unknown cleaner: %s' % name) + text = cleaner(text, *args) + return text + + +def punctuation_map(labels): + # Punctuation to remove + punctuation = string.punctuation + punctuation = punctuation.replace("+", "") + punctuation = punctuation.replace("&", "") + # TODO We might also want to consider: + # @ -> at + # # -> number, pound, hashtag + # ~ -> tilde + # _ -> underscore + # % -> percent + # If a punctuation symbol is inside our vocab, we do not remove from text + for l in labels: + punctuation = punctuation.replace(l, "") + # Turn all punctuation to whitespace + table = str.maketrans(punctuation, " " * len(punctuation)) + return table diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/text/cleaners.py b/PyTorch/SpeechRecognition/QuartzNet/common/text/cleaners.py new file mode 100644 index 00000000..a99db1a6 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/text/cleaners.py @@ -0,0 +1,107 @@ +# Copyright (c) 2017 Keith Ito +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" from https://github.com/keithito/tacotron +Modified to add puncturation removal +""" + +''' +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You'll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). + +''' + +import re +from unidecode import unidecode +from .numbers import normalize_numbers + +# Regular expression matching whitespace: +_whitespace_re = re.compile(r'\s+') + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + +def expand_numbers(text): + return normalize_numbers(text) + +def lowercase(text): + return text.lower() + +def collapse_whitespace(text): + return re.sub(_whitespace_re, ' ', text) + +def convert_to_ascii(text): + return unidecode(text) + +def remove_punctuation(text, table): + text = text.translate(table) + text = re.sub(r'&', " and ", text) + text = re.sub(r'\+', " plus ", text) + return text + +def basic_cleaners(text): + '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' + text = lowercase(text) + text = collapse_whitespace(text) + return text + +def transliteration_cleaners(text): + '''Pipeline for non-English text that transliterates to ASCII.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = collapse_whitespace(text) + return text + +def english_cleaners(text, table=None): + '''Pipeline for English text, including number and abbreviation expansion.''' + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_numbers(text) + text = expand_abbreviations(text) + if table is not None: + text = remove_punctuation(text, table) + text = collapse_whitespace(text) + return text diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/text/numbers.py b/PyTorch/SpeechRecognition/QuartzNet/common/text/numbers.py new file mode 100644 index 00000000..46ce1106 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/text/numbers.py @@ -0,0 +1,99 @@ +# Copyright (c) 2017 Keith Ito +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" from https://github.com/keithito/tacotron +Modifed to add support for time and slight tweaks to _expand_number +""" + +import inflect +import re + + +_inflect = inflect.engine() +_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') +_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') +_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') +_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') +_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') +_number_re = re.compile(r'[0-9]+') +_time_re = re.compile(r'([0-9]{1,2}):([0-9]{2})') + + +def _remove_commas(m): + return m.group(1).replace(',', '') + + +def _expand_decimal_point(m): + return m.group(1).replace('.', ' point ') + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split('.') + if len(parts) > 2: + return match + ' dollars' # Unexpected format + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = 'dollar' if dollars == 1 else 'dollars' + return '%s %s' % (dollars, dollar_unit) + elif cents: + cent_unit = 'cent' if cents == 1 else 'cents' + return '%s %s' % (cents, cent_unit) + else: + return 'zero dollars' + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + if int(m.group(0)[0]) == 0: + return _inflect.number_to_words(m.group(0), andword='', group=1) + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return 'two thousand' + elif num > 2000 and num < 2010: + return 'two thousand ' + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + ' hundred' + else: + return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') + # Add check for number phones and other large numbers + elif num > 1000000000 and num % 10000 != 0: + return _inflect.number_to_words(num, andword='', group=1) + else: + return _inflect.number_to_words(num, andword='') + +def _expand_time(m): + mins = int(m.group(2)) + if mins == 0: + return _inflect.number_to_words(m.group(1)) + return " ".join([_inflect.number_to_words(m.group(1)), _inflect.number_to_words(m.group(2))]) + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r'\1 pounds', text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + text = re.sub(_time_re, _expand_time, text) + return text diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/text/symbols.py b/PyTorch/SpeechRecognition/QuartzNet/common/text/symbols.py new file mode 100644 index 00000000..24efedf8 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/text/symbols.py @@ -0,0 +1,19 @@ +# Copyright (c) 2017 Keith Ito +""" from https://github.com/keithito/tacotron """ + +''' +Defines the set of symbols used in text input to the model. + +The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' +from . import cmudict + +_pad = '_' +_punctuation = '!\'(),.:;? ' +_special = '-' +_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + +# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): +_arpabet = ['@' + s for s in cmudict.valid_symbols] + +# Export all symbols: +symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet diff --git a/PyTorch/SpeechRecognition/QuartzNet/common/utils.py b/PyTorch/SpeechRecognition/QuartzNet/common/utils.py new file mode 100644 index 00000000..2bd7986f --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/common/utils.py @@ -0,0 +1,20 @@ +import numpy as np + + +class BenchmarkStats: + """ Tracks statistics used for benchmarking. """ + def __init__(self): + self.utts = [] + self.times = [] + self.losses = [] + + def update(self, utts, times, losses): + self.utts.append(utts) + self.times.append(times) + self.losses.append(losses) + + def get(self, n_epochs): + throughput = sum(self.utts[-n_epochs:]) / sum(self.times[-n_epochs:]) + + return {'throughput': throughput, 'benchmark_epochs_num': n_epochs, + 'loss': np.mean(self.losses[-n_epochs:])} diff --git a/PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca.yaml b/PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca.yaml new file mode 100644 index 00000000..df233e12 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca.yaml @@ -0,0 +1,151 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: "QuartzNet" +labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] + +input_val: + audio_dataset: &val_dataset + sample_rate: &sample_rate 16000 + trim_silence: true + normalize_transcripts: true + + filterbank_features: &val_features + normalize: per_feature + sample_rate: *sample_rate + window_size: 0.02 + window_stride: 0.01 + window: hann + n_filt: &n_filt 64 + n_fft: 512 + frame_splicing: &frame_splicing 1 + dither: 0.00001 + pad_align: 16 + +# For training we keep samples < 16.7s and apply augmentation +input_train: + audio_dataset: + <<: *val_dataset + max_duration: 16.7 + ignore_offline_speed_perturbation: true + + speed_perturbation: + min_rate: 0.85 + max_rate: 1.15 + + filterbank_features: + <<: *val_features + max_duration: 16.7 + + spec_augment: + freq_masks: 2 + max_freq: 15 + time_masks: 2 + max_time: 55 + +quartznet: + encoder: + init: xavier_uniform + in_feats: *n_filt + frame_splicing: *frame_splicing + activation: relu + use_conv_masks: true + blocks: + - &Conv1 + filters: 256 + repeat: 1 + kernel_size: [33] + dilation: [1] + stride: [2] + dropout: 0.0 + residual: false + separable: true + - &B1 + filters: 256 + repeat: 5 + kernel_size: [33] + dilation: [1] + stride: [1] + dropout: 0.0 + residual: true + separable: true + - *B1 + - *B1 + - &B2 + filters: 256 + repeat: 5 + kernel_size: [39] + dilation: [1] + stride: [1] + dropout: 0.0 + residual: true + separable: true + - *B2 + - *B2 + - &B3 + filters: 512 + repeat: 5 + kernel_size: [51] + dilation: [1] + stride: [1] + dropout: 0.0 + residual: true + separable: true + - *B3 + - *B3 + - &B4 + filters: 512 + repeat: 5 + kernel_size: [63] + dilation: [1] + stride: [1] + dropout: 0.0 + residual: true + separable: true + - *B4 + - *B4 + - &B5 + filters: 512 + repeat: 5 + kernel_size: [75] + dilation: [1] + stride: [1] + dropout: 0.0 + residual: true + separable: true + - *B5 + - *B5 + - &Conv2 + filters: 512 + repeat: 1 + kernel_size: [87] + dilation: [2] + stride: [1] + dropout: 0.0 + residual: false + separable: true + - &Conv3 + filters: &enc_feats 1024 + repeat: 1 + kernel_size: [1] + dilation: [1] + stride: [1] + dropout: 0.0 + residual: false + separable: false + + decoder: + in_feats: *enc_feats + init: xavier_uniform diff --git a/PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca_drop0.2.yaml b/PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca_drop0.2.yaml new file mode 100644 index 00000000..e9c22b95 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/configs/quartznet15x5_speedp-online-1.15_speca_drop0.2.yaml @@ -0,0 +1,151 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: "QuartzNet" +labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] + +input_val: + audio_dataset: &val_dataset + sample_rate: &sample_rate 16000 + trim_silence: true + normalize_transcripts: true + + filterbank_features: &val_features + normalize: per_feature + sample_rate: *sample_rate + window_size: 0.02 + window_stride: 0.01 + window: hann + n_filt: &n_filt 64 + n_fft: 512 + frame_splicing: &frame_splicing 1 + dither: 0.00001 + pad_align: 16 + +# For training we keep samples < 16.7s and apply augmentation +input_train: + audio_dataset: + <<: *val_dataset + max_duration: 16.7 + ignore_offline_speed_perturbation: true + + speed_perturbation: + min_rate: 0.85 + max_rate: 1.15 + + filterbank_features: + <<: *val_features + max_duration: 16.7 + + spec_augment: + freq_masks: 2 + max_freq: 20 + time_masks: 2 + max_time: 75 + +quartznet: + encoder: + init: xavier_uniform + in_feats: *n_filt + frame_splicing: *frame_splicing + activation: relu + use_conv_masks: true + blocks: + - &Conv1 + filters: 256 + repeat: 1 + kernel_size: [33] + dilation: [1] + stride: [2] + dropout: 0.2 + residual: false + separable: true + - &B1 + filters: 256 + repeat: 5 + kernel_size: [33] + dilation: [1] + stride: [1] + dropout: 0.2 + residual: true + separable: true + - *B1 + - *B1 + - &B2 + filters: 256 + repeat: 5 + kernel_size: [39] + dilation: [1] + stride: [1] + dropout: 0.2 + residual: true + separable: true + - *B2 + - *B2 + - &B3 + filters: 512 + repeat: 5 + kernel_size: [51] + dilation: [1] + stride: [1] + dropout: 0.2 + residual: true + separable: true + - *B3 + - *B3 + - &B4 + filters: 512 + repeat: 5 + kernel_size: [63] + dilation: [1] + stride: [1] + dropout: 0.2 + residual: true + separable: true + - *B4 + - *B4 + - &B5 + filters: 512 + repeat: 5 + kernel_size: [75] + dilation: [1] + stride: [1] + dropout: 0.2 + residual: true + separable: true + - *B5 + - *B5 + - &Conv2 + filters: 512 + repeat: 1 + kernel_size: [87] + dilation: [2] + stride: [1] + dropout: 0.2 + residual: false + separable: true + - &Conv3 + filters: &enc_feats 1024 + repeat: 1 + kernel_size: [1] + dilation: [1] + stride: [1] + dropout: 0.2 + residual: false + separable: false + + decoder: + in_feats: *enc_feats + init: xavier_uniform diff --git a/PyTorch/SpeechRecognition/QuartzNet/img/model.png b/PyTorch/SpeechRecognition/QuartzNet/img/model.png new file mode 100644 index 0000000000000000000000000000000000000000..b0a929b27ab58350b584aec98cdeb953c5a9a412 GIT binary patch literal 120169 zcmZ6y1ymeeur1uUySoJs?(PyixCAE<++BkNx8P1fa1ZY8mf-I0!JWVP?z?ZT_YWhi zndzQ$`qZh~wQEqD5=Q>OnWn$1i=P!QKnL{ zO%Ln0`e1i;`5FJno|3-GInKMX&UNhl@%Ia7T3Q-Il3c!ONN6Y{cEr=u(^LAjrf~#(zC%BCHzo-IeVR3>+K@^qC_5ezLOg4OK7A%i@>KHpW4&GO?s9 zb=Gsaxw+mL&fA0WShPyZEva6Ix*tb3`yzdPea}9Q_D2&TAt4cAtG&k5(y@t&i6P;> zFDkX8>f7Gk^?SMByPp1x)bV&K0FFnUVaMq{mCZC!jvCnB-iC&Ofly^pBH`1})I8>U z8H%q(W1Q)WesGh~g*{xnINmH>=+m0$T7A~?l@4vcSP#P!5)|y44S^T?{CNi)ma3|1 ze-uHFPp`;%Csc89F=B}L4;7n*-_(MFt@ZWXw6rqJDI;cX=NMZ0`p<`@m3FJGIyMwx z&ObCu)zsAF<>aQVTqPx;vG~Tag&waCrsCt{o%hCl!7&qXG<{;OpHGHyT=Y@W;Y%ah zBpn>=fCoqx-4F2=IusqM3PTGeRrla&@bIJRy$8lPJTlrAtWXy#>K7oxxWYtg9rY6;dH~GnCbf=5z1{d z&uY6^TbSdgYr_N&X;t@dyimiRWd~k)pb8wCR3t765x33peD&5qEE)<5j$ZB^ujO`P zQo{ZCsaPM5dy-sE_!e!0{ap1V)roSa;Qui#Qe_H$+IWCMh9*t3eN zDkdIYgT+L4ZEbCh#RO!4bRIn;6T09ftLOJaE+yh5tzEUv4KHKD*Smkaex0yf!R{M zZLILa7Yz*!2ZySl=kJ!t;~rxNg%W3^vVQu-SDrTGYBqz*PRyNQemF9B(x)pQK@+Oa zX|Jf5_Pvs9c4zzyH6E8N_u<13o}mw$aW}L%{x|`Rw#`bj>+R{vD7%51_;FYUk9~z< zhXGAOIrwbDB);XMgYdRbfBC`#SOdy#i;U4Rp`oDzz5fzQOGezt+?$=}jXh54=J;I? zU2aeHMMR#*zN3Bv2k{pUk;&rlEr!4ZE*tD`paCclY!NADaL=A^Z+~{_auI+{XYvH|aEgb{H zVbflA2Ct)uon6JelT9S}n?oT2U;l&&`yxP`iU6$iY_D^FLzeRB%T;%$Dnc8{CF(lhvp*V18a3(5|=9adKh@SWDqe=Bg}Q!AuP##U+4{K~1g2=6Ur))zZ3l$s^k|pFl>nXj1-ir_(5%KVJIYFzCdftnr zT&7baZNoR)dmw$38d^G?aEe)N2u}gzPFeNg25RrFxUYR?DK(l8CP$qZjHjwJ>=3q^ zrmGIoCO{@fhpy(MrltnBlCby9`-CKXF?3q7A1YB~BH21Gn~^MXa&o;hrZ}ul;^@7* zWx928#p*BM4=V|jB8X)YM@uIpC(kz68(tDo9itXz>;;pDo%@`3!MQzu;e!@C#Yj`Q z>4GDRA`!UR2qiOUaXV@CyzJGS_Bl@nqXzenU%$48XmeV&gbi0h#@~ELQzeu#n6Aj{H-<}I9+Lh0#{;uVggT3$+>e7K@6Qy zO;wfM=k6TzH@5RtT^l*MU|hQ7^}N^y{ZXIhU*i9!F-(^J>WB=BrI?B=7X zVnMJ%7lTw*b)W}RgnEl^Ww72O|2n@y^=W`78QD8&SW6z+aZ3>*`|rb^@hpMI+cSH2 z5i$&D45V<$-?mHj2UCTSOusCRw!zDQ!|VU@#8}z-+hT$=6P%kYM_6Q}TCuuG=@)E# zJ_>jf7~#A1AaER4hcogv6!3xb)ZRD8Y23Ey(s>wqvOT;`FIcP^?Ur-`>pF zat4w*kXSm5e$D4{?9UI0KFzP>VQm)+7CF)2d6pX;-A|V>^yHy<4ZZlqAp%l;+f=Q7 zR26eG6=HM|*U{Ah54U(2QKO@ypi5TF;>UqIK!Z@=cPskCU3%#U9!7=0Fj2CU##?Ww&aBo$G&l3?c2y<% z6Wq<0@uwJ)pmS?9OLoF&^PA%Z(}9>Qe%F$DU36$i==lMt1~XQt-}Y8kt`okQZnTvT zX+yTn&?6%ZoDIKAA=Z-!Y&A6a$*az{YCi<|(4t(x09phP0Ro;gv6XFyd2!`_C3)(Q zcon6A&!41LQF?_vFUNS2s5VSVi5bIlf@fuc>fI%D?5MTUVw?{#WMWYxx?n`0B-%-sBRp=Pf) z9P3VjA2fr=j*izD5`pgSZu9X>`g&X>-Zf;$0h2SskY{lm8e=t~ZS0v$1$u9@UmcYx zk%}P{sLHakvZ|`zJ32b%#le|#Vk7o}CrEy;@-dz<#q0F7zo zumP)CLbe6)j+EzCZ~KChl5#VeXLY{WwRB$meE^w{TWqXW!kQ!*O#7MarRSSxb~|n~ z>Vm8eA&1`_tev+o+~g_prE*U)+veUd#&gm_0L*Y^ulCoCIX;F_IbR>nl2k3*rwDlR)>f|3wP&m2`lk%{mpLXGCE~=X;5I7%)Wl9l zC;MifyfAw8HVZB8mGcZ9M%!j8rn6xg#rra>T0ErK89{OQdtW}LOYY8G7n_@ zdQV5yJ_Tm(CSn)mh+nrU-ISD+VmdlRVx}nIDe=J_uKU~UL!?(CZ_MlhPab7bWFd@y zJrCoCswKES0{1gqboBtBmX($Ha$lU zRKZ8ljQ!JAy5SkAi@)9Dub-GYQZ+XyQjia35V69jKupV&JeI-3V$iB>gCouAwpx8& zJtKK4%qT`TQFr%Qsw`5eFoVyzKs7v;R}x*4^Wo~id9yd1N+u?XR7m#|HMngt5P{uG z@AMFO#4siZ z5b*;>qp582$?rcFt8Wg!`PH}wkKHs26I0VShXbZ8RiNdC{2UJs=?-{v(5~MT5)$I# zSe>?I-|R847W>W3{!(%~S>$p%*8c0i0a{O$&%ZvrquFw-kHA|JG5x{Ze79yon6Vxa zv`8v&$@CiRa6SfHuIPbd$QRdv-+FLdX~-hf=+Krr@Ac{xdSht{9~Bbi%yhG)-%Q@4ZR~Gu zB4mIh1CCx8Ra|;{v)A=u@@p@StLs>ufG5xCZc?IFvR26m;|yWBlFdiOE%T3DHC2bD zzMBk|u-Q&LA)y&5p@Q><3+28>O8{9l4u#U!#*Te3WLxd5HaAk1{`}o7zd>D(+dHhT zRSMCmyr64C5z{%8K&y95ft`mne(y zv$7pdI_LmUrtq(!m`acPClnnd-Gx)q73z%#-LH$TuURd%eH<9o6bwlvzka=Y_fFDd zP6?0io^BCW%M1q(&v<*=qliHr__J79J}*xEwI>u!Ia^T221k%CUuHbt&mpE?p9vxk zzy<;oAjJ^%<5HA1q<7dE+#v`q>y$d5*yMxNM0qQH1iHCJ5iW@(yDK39Xn1r$9D%yC z4x-zB;itW+H8e0hgXzi#bYlHwuhIQBY#vKSNvvG3=)vA-LN`sFd{#bgK6a%sbI4%* zMG9q%d1{qm7F*-+MNTVAuCVf5fmHoi_L3rx`S@W~iwvLyFBQT%&#ok!#ZM&@M@B;x0I+zD){_%%^JSU1yo9mVcIm>xxLao|F~j z^c0R)?d%ouWH8K^pw=SwpJ@J37-#{(=)Uls2+{BxzGqnCVU8TeTd>cNFqCZdmbzKR zAHJfSR5uI%Hbzz$=3{|2TKnw81qG}ReZk8!Gmw=DP?dha@xUW31|2}Sjj?KO_fCqQ z0#m8Ryx)tZ|D+(f-;ZOV!alE7?$+LIY{UAn(OLuVO!HGK(nw6c;PSxP^$*gvmM{lf zz|km`w)zVv;fBm9Xe)FYQi;rSR#-{V0fn zb|wDuC4zo6R(pMcS$M{BEHg4&&&|QmVdWT(=z)QOPtVWbUZtXsrg0ag?hRm zT=2W(qH-!6L{?=(hJ{$ArPlqf-ikI9sN!B;&GVS+3d3VXt|b4}y_gO!R{e*KS3(EuJa}PtdK~<<&26!>E~Akg>ly=b#AnUe8;j{S zo5($B8a!m*I8uz8bj%a_J__bJ1w1eLwR^D<00z%rxcq?_wX|}CR$KEPtNwp&{83EE zodn)-4Os%@U;cjI8XujPj4k>0lC66`<*8w1pqSh3dg*%s7kUi~U9FtS<^&l~mO1L~ zhQ?6QSgQV@KrGVa+Y)1b$-?yKE!~i0mHAoP=1co*wEXq^cN4OS`cjM6qr5BIPOM4z zz@VVlyRF#sbBB+u9o5fQ0v4{opF+j4)_pm9cf{p0(_x<`y|9S+!T!a|%=f)!vzdEW z&q_2-y7hi_Z4=Iu@*6H$)jbFlfOi7ps_>^z6|*l59WK|$VgclCVN+wyCh(|;p1z9* zD2`}m+lI^S&U&FYSr1Y-_E^Nn2OHs@*}B3rGc!0iptUtr{wgqAZifL#ASv-;*J6zm zG{B^afr$wP%+UKDch@G`oHc1FuDZ@k!i}vm`?rl0P;QU$HgD!{mfgp#E{9|6(nZ9- z1yv3Tv3i`Z-`#nDX*ER_zvVVG&?QD;FZ{i6?~YbxQh2n{mywE!L{=hYjn1;OKn$wu zEvcTj651BdlQ8sj=#3;L%|>YyB^+37^OlLir$`wIarZs<=^sJhI%U3E=-Ls2;_5~c zh0e`Vz1it0w|I_g*(uSRodfqj9)o)iJ(euT$I=-P{T{5gwZti9@i;*OXlN6BST6$A zW?lbk;8Y|08}&Sz${3VOor@-pp7e5yzj6nXs&mVnZtmp4quwAPU1xItd^ID>&zX-P zj#z52ClmBg@#xr7=Io8mJ;?}JtSV)y?qz^lZUOmuN)0K$Bim~bQ2zj5_uSH!y&7;PUq+ zlPF}ywv9_!vh-XxK@Ef8_-0&E1klb_Npjf&Gj8<9p0zc@^@ELo_yj2D%iS?3I8@dV z@Wf~!>p&=(uR65j1d?YebkNQZ_c=o%*(0|Oe}x1ZM{x?#c*)ef>35~PH7s4G+_sIr zEA%pV4h|VF)L5XoUxUyWT^_-NrMJIdDV_5}zN)7eqD!8Mf!N=SffB&T+)OF}(jr8F ze#FG51*UPCuFTK=wF7it$59|D2-OL4gsT)WF}){V@MA{e=gIbwI*g&{=!N*z+G0pz+SI`&$n%w za7ErR$Pbo=v{T?w(GF~9*tZv#l<2m&G1WAi4PnaB=evN$ zu!$(4`yP+Qz>JxT`rSMD^;0>o8~az>X4!+(3SN4A2;lrSaXjJ*GGHd$0|nFay;GA9 zNJ)V+f0aL-~tz9(aV|+ z21IHc*$NN=^hUGDEM*A5!17&6`41ilJE%{l6&UU_W(ZNZumTlA_@ltxDTK$8;>vj` z)DrwYmtNK9r5~M^$sN{SP?v4nXG!fxQNBHvWBYw&x1KE$iVyXu)vm1}-=rejx~q)h zcpx4Ys7YQ{B?yN&CIqRfWE2GV)>X#$*aNkn=G{hSa+4guZN{oG5sV=DMW>eM{OC^xZkW;Au}zLk1B0Xn*) zpZ6oT7TV66#}6MG$Glv52Rw^!Z;07_;F~GtISY0_f`*?&|NL1z_GFWb7TVE`S--iL zq=bmeDp7GoLtXRPw@_UCOOb!MA_;}t@OsQC5SJMb`XuPLX{__q_iuIVP{Sph_Yg2*YAf4{@jId*$ zHb$}JiUXpe^VtbW&ir9uh%wf$o<>?_F+lpkWxrUh?iGeHg=TfXC<7pl57^V_sGk5XQ(Ju zw8>O~B0e9&KrC%V`<0B2K?trW(PsI{cIm(GYV>ECx-UYboo(`|YUbLsiS8+Ew=|!& z^xIp9&3WLCcvPnzX!;Y+_1Jk}Aw~1x5<+1wx1BS#O1T!q5`4E;5|O0^AVryM_m)48 z@|KJsv`8zS&F%8^ExV4r|Kh5mQmn`_nK`09ke}S2-<{}OY$$}i9qVk9Aqytxva(?x zekjuNL5O-L;R4(2nWQn(;oV8gSo(hob%~$%~Cu&;yx2m-7C@i#( zNCi1FDhW;r7uEf-ygNzPZ*fa2L~U@@47{*n0ve6CgQuelOC>=^G^y%1Nw7&q)_8YA zz(jGiAzYFoo%rox%S@xCcS--@NoA+2FsMhjr2t2>qn=06M#@dv+}!-jm!eK3M(a9Y zouO|Kp%}{@??W9f745H&@Ht7H1XP7JDfZ8MUvPlY(^A4I4W@eHZ3D|GxtpHMT+(v= zhWttJ^KhmCyDDCFxO)FhmO56GNOHmReNjR*b87U!&j>h#asw{q#H$EBG;m#W63wpC zxfw`0$to1}xGlzD5~NnB#Ki(m2NKM1Tlxt? zm6;k=qTW)QC6dyAOMbb$d2jT79goPt= z4y9G;fbEo=MDa#Cmb1SwdT!!x>&_<1&)QU?QFR_f@G8TEtogxJBcqfFV&Rx34wAfJ zg$sY=Yh_n2GJq_HT!)MhPlulgNi|F!rBW~SQ<MIq>(VKA5$bO%C2)D|0N&62p?a<`#opMV8am;VOj&s0`pzxP1%l6 zAEjWbycqD=VR79pxrxeTGVw)YLyj&kP;Qj%F90$?_`*IO`g&NHm^Orh1ZU-2WH!e< zCHMc3Y3v(u!^eLi7QUj~5(Z(_97AN{r@e?h_#6B84xwdu}j z>RC{Wg07kp0z#Zt)UEl=Gj4-PWSRm^!bX$5uTw==JC}+MGM5GbAd2h@6Mqwds5nKL z8&mk2V-UD{+tr)GWb?es2Y+bQe6Y>Udk-~w4=?VeBOw$>9t~B=2No1D$jC&%(p|L% zFHO2G-4J9G-c>uAS2T>fJ#Pn|-zP-EOs`OKZqvU%r9jUN5XXIE^{Ik<&wuQFW*C@3w1S*Qi3tE`NkmjfVBTGK#ElF&oz0P& zm%}KFVAO)VnV>^0s#s>&!&!-`E%e{bs(qJ-knSZ1W0A&Yi3v6UNVmH45B zdpbN)MrER9JsyqNXF&mUV_y@NqWX7WRCE{uWue8=QDSB(!fG5qpavUy80Sq}g{0s^ zK16tzjqz*UJ~m93v4cl~aK6V`~v=)-*0zz7DDWyQoa*BW6ypI{eu)}Q-aC86IqvlF}ypO%D z{0R@_nqsRbcaQJH`H>D^kaH0D`C`fa{$VPjQjcLefZiM4^_QRV{9!qSV9pK*n4F;X zF`B+3P?PBKo;H-y*SQ`?8hMN*X21TYQy-@um(sB-VqboLfz|ZM@2s4J?`nW$?b+oZ z=Tf)4USdy4!c_Xbuk(EyPbB;|x%}@VJJCLx`r_h{sPj6Jb`n1{ta%&Yyn01dv{I{K z-6{j~3_Jg_T){fA;B#X6h+mZ!4^Uk5R)(PZ8t>}vbC6Cd^_<9`Ut0Gol00_(`W$!+ z8(3?^%J_ARLKGrEl~swE9KeSE&jl_Q!+qm|4EArR_X}$NUE(`8jwZ}isH)+37#oap znIc$!wSZ7l@KG*p7XC*fB!u`^Oz{9!aoI=atVH4VPleEQFhO-c{!OgW*mB&SO=cDy`y$VAnCmcmkGqHFK&_^tzeKbD}Otn*f$KnMP{Nr7HB63gvO3|cT; zkT~y-TK@XC|2NOoMvjuGQwT0vrhVw6F>fajB@?reWpT(AtYMQrA7%)z;0x8NM-;16 zj5O*Ix@`f>K&qVEA}RZ#-XExB!bn1{mUneP$?5}i#)s)!2PmRlGf?JjGT>tKVp)}R z&7k-!1-X+`<)&F>SpHEbS{*{m70FPXa2KY@L4j8#1O3A;Yffu0tKq|gl*F=Fm~nK~ z>(_Qj2w(Kx@;-b(l#y(wvzjWPd*AYwC8uWnPs|FIxuPjZ{&18vHI3wW+9GUln%dwZ z8-OLxl<_AlQXGU}hd$P7yh>cQX&6XopJ!h~k1Y*KiZbyEJYN#TV6CJTBb%G;cvCpP zmCMBzMzYLj_(FOmk{-&X`j507Yf%5sA)X6tLZ?As2MWC>1HyI~RliDGZQNXi#in0=#L3meR*nScXBZ*kvB6`GJ z5UpKgy(>$AJZw~2T&F=r!`+{dvZG$ zaI6^Xurk!Rl87pu-k&Al4%XWKE0vgFaoaAw ztCESzEK|5S89n--6}SikCF{~|`6M(_Ell+XRN{%rCf%bq(` z-RCxB2c)7}pZf+@9T}$od&yTVA7hUxeBFi^0K$NG^PtDS{WFHs+ z(dkl|OsLv>$*mw{`!eO?w=<1W4xy0CpJC-S3q3#Ue~h2=^T}+Uk3;5Odj!vMfSa>r zkk+v`G`L?w5%I&}BY$}2SzWy!<7VBm>(4rG$6&`BnWK5-Pj1|JM#E$Aw;UO_h5CkS zaM@pyQ|~uoGig;fA^fl%7vIzIursh8S3iQw1QTGKC+)Vse9&;xQN{FBzm-yrHPCSM zXVv5}OyjMj{Odykp?&P_ELc;S4w{OdT?ji~N^5Cf)-?F^y-Gx>^skby6E|?YcH?aM zaj2v`2l^W{7Nk%fCElvY(OF)&+TIvqAl)bl|)TkoMe$}<)-U_n{9{pw)v z!pTV&4ra`6v%1c{Ei1{Bhxgs~Q(6sAg}&jx)kE(y|6imafte`xdF#Z{ix({03$1g} z9;s-4x>!oKrS%h8woth$sT4l_%K{0gPP{Hv>!oy}KP&cB*j21hdF_~+>)42$Zj8TY z4e|x!3bUbUfkLYjTT^rM!=DOYWoDiIje~y!+4F?1*F$%~YzYGyOCmSRQ-!hA%vlZV zSJep$A10fSl5mpdm<{hArp65Yo9|1Nc~Y=(!28bLcG~py`Bj;GcuColoT^;x>W0mS zj<3%na`H9o9BCTLEHE|eibVj zfR2S}%F-18pr3T%6uj=bH0A6Qn#+SAU+<4jc60!OXgN1DOoY?k`#d%K)1 za@eem>z9p%a9A+Sk_g+?#^#(Y@@SsC-ZuWvF25f5{-&T`@-!mhtpW2QU3#zmv?aQT+ z02Z)tAFJ$X_K+Gb*46c4j|CRC!L!D3s*TKhx0r)gSvg0j=7auMPMjpUnZec#a4WbC z7bs>xXCDUVlxVKEr>P0GwZ6CWdO6v_vQUy2_7*Fh8P|DI@NhA7n~H)_zKV!3OSTYx zeAhB0e<1YOR0xSIHd^%I7N*YD`DJ>Il$6vL_R18I3ikSpcs9$H;g{*-OitzAxa!4~ z>Zs2l8xS{Swnlt+?;>s@X7G{0#N`KVqo49uJeE+9?{`dDug4hWHVXwrD7GeO~ zz+i$|e|aXiN^z-vqU|a$a7qg58?u-GmY3ZoV=peqjC0eIK|m>ig?8v-IFKjnVnVr? zGz6*+eh6L?Z~kKA>!rqZ3d_KEQ2QE`PrMnNia(tqvHX)IS6%$w^)~umQClJLCU#M< zYyee2LkEqw@z1GRy+$FUFa1UmDBivH!}GK>D}DCEcZ&xrdVr{i)nbT{%;%XB4Mxo} zbiEMzWoPvFyXdsPvWYi~sT1^UP#yK&#J$#~Cq#|4hJPl_x6xm>J%(s$BV5Q(Y&$Um zfzeDp&_)wUTKhY0hKb) zjtZ)3YCSzYMD-j{Z`oJiG}PO-1F*5N>97&2HNbb~iAUv(oZ(e~Tx@_&4_aM>E9tkj zXp?IymkIxm>=n^eTmqHq~ECP;1E z4)gzF?dj#%>HuH>V@|xIV>DA)F6|%}TS2PE`{x-?TUxb9Q=82YM#Is=x@8l#!nyj77F%mbyn zYhs2LkCGv`f-X-7MUPX>C{4{F?kW5SLHxCRi`#Y7lPDg?ROKx07Wtr=(YCQ5clj_F z(=9>Uysrx+L^QV3#zT%;x8+2g0{79$*Q=Rk7Ykq!=c^8a4eI9!s~Yd4T(;ZU8Q%^f z>p$zXjsG~VNKebDdbcxrCfi(Fl_$(s^#KZG}$jr3SXQOeL5L+kyKb*#LLx#{u<4!BXLrt1@Zd%s&2aQvR zA%QPl^AUXc+-ED0$fRYij|45cM z9)lPqTH2W>k(-`M`)v5`a`_WV63howpeB-++@80ahi z7F&leYuM0-)qS+413a4e&(6MrBGpyzW#_B?kJewr2y;t*{?^Me(C_hHR%pxiq#^;j zA8(Vq8=*uMUF%ATgq_jFbm>4{2#j}&{#Yf0hWt*2?wgLHK)oC$z{sAbIwVY5T)Z39 z{ed`}6VY8c_Jyqn9#m7BQx^L5z}6k0L#B?vi|KMJ-5v?P|3d;yBI@^oQqmkEm^vem z$Sm||X;5nE6f5Bxpo0)BQ#g^r6ff6@6G(moZ%$psHx(EGtbZ!FG)Q(Z8vk6ozq}4j-2?jko>jJF zEwd&cWM$FsHWb4&a=%$Cv8PQb=7^$u_a0EvijbXY{rm--_wcc3a4FF`5Vt?HTzQF& zF@97PA+xWp3j?!cP{DL7tR4z7Hikx;kQ5${;+2C&v4MyOwBd5d*~CF7c*ASaEQjk` zH=EG3ZR;)<+e2?!oqYOHIdWFr@)tXtG$fFMiKC(N(S)>!$`lk@z7<(4uw|SZ#a!@~ zN#eD!m`f~iJ?qD7^Q#h-*?6V^tD(Q9da$stK(+Fhqykn(h&L3d$NlxSeZQW8 z`=kYX>|v`su&u5Q(przPmXui(o_bWV3py3%rUctKG75HC3n>G|HM|<5+cE*&V2cJe zn2?*`6%wGp2H^Ma7^1Sn)4W-$DKaUu5wVJCk{XW+G!zNBBC3!7+C-a!DJSCfi%ltk$sp{_T?oCia^~PraxzGWS6ryR=z@H9P z#7vORAdLP>I2!GbG6QiHik#oS+t@EnWm2`l3#pU=M8Qx*5%w@v1-rXLEIQrl#_O~z1H<#7;IlTvR|uS0JCT?(3H*SDkwxLvIu}AaDOIt6O@NJiBzz`ZjP9tn?Ehb^ z3uEgC!X<~DyPmJbi#Jk;^m<>jU=yp_23JPGcjygE)7UyLnVvY=Z!(75vPp*z_)SAI zUL>h@TZWzw!1^4oW|=lcmCRmrCVpahtL7MSBO>{~$?K7Z-xm=hb^M{t5_g+B)}dSe z9y`$ueYhryzc4>olU3G32=_d=+}x;!G0FHiIXQvqow6CiU1v;u5+c42S&k3Tf%7Ee zqHX+d~L1d zVV}i1{Y5@v`gQr^Nrj`Lvu(bctwxrHV^Qv*LV3j}8%r=m!@cx6YJdO!9TN*{VFvs$ zu&AIXDHO&uI5E-iphB|i{F@}v>SCr4sZ6zDg-$hyIO(W}jFc zcZ=8-gW~MTg`s`Q>mkEOx2x=#t|N@5X=NZD-Xs?Ob()^I+*#Z0blFn18+DZeRl>j^ zd!~|a1Ixc0DL?({f=p>S%cbV{s6SZola&kgT!e^-lZBI}RA%%xMOa&n!-n`*A(QRx zR1kw1boj0OifcN47eU%9y>AmI2?7*tj5_W|m(%4V>KrRejT)lAlO@hgBWj3qA4}?f za)VSbIG31k5HVMq^ntql)S0TPs-2M(7Q=QyCMLyC#$h;Mx5&+Ro@Ks|))&m%EX74_ zmPxaAiGKc@r^^hA6&kt{=%E{LbxLm#NW~0+YWYH?tY)x10n~s4qT1T?>0H*mxFOnU zAf3KJCJ?~+J!0`H5=MSHotV+Qhq<^t)5Co=AUC#crMqmFE^wYNgWI;-M=5LuZ0H$< z+zoe>1@$GwxaQCHDx-1^XX>-&nG^mM#Wm@KGrwt$%x0e^57kQ>qR3^bKxu6}`1lHH zZ=9MwQC7duN9iD(g7ZI9-5B9&rX;E}EM*kQYSS)fs`~f{S_K*y3Qbaa2L}E}lNd&y zXjO^*Tq&1esm5YudzyBP=c;b1#sY7G_lEsgq{zecogS_%iG(NwX=q^Kj?>3ZF32KF zC%C`NE?B6mFL^%TATBql94<~iYo&0`i#uL-6ia1zKQP%9s6abn2T#a>(iAE+Jw17N z2mlWc4-A2`?-&_J*4EafgR1J=KZXD>fE+{sCV&_Wd`<>LU*+f>X5rDTIAN_=d0x2G z;Q(@IfR2lMfQy?DBM4;x!(kEH=smpgG`_)CoZfcAWjw8%HjNt?m6uEhE8~d9Snt9j z?n|&e%d_d94x?)E^xRy7q_4OCp(+xI#q9X?W@VzCaaXn}%HinE_o;&izud$Xv>qk= zqy5{gH~09Vv#||T>tQ@Ec~xA(y7TCXf16TP9!BPfn4nx!o<2Vj?2UVy#Q*6VDvBno zyk#6CJc0Tl3eWbW9H4Ik>01z>!_n#x#kbqvVT00!;6Sua=p5ZxN+WMn%R29P(q#lI zZK+$*{OJi7h6&y339eI*>&aAisoGS=$jC@VMFo5YZykQm$it&0+6=#Pg;E~BC2Jle zrJhmyS6=B`921d0@hj|Q+z8KoZvlt<#1rs}ONe`)WnT?*v=Sf2%m06IkBL91D!zMs zTq<(Rq*xXF&1h=vnhz;3GdY}*zXanJLCyp=`T6{}p9a_4Tv0zbIsIn(j#0J#uFzbx zn7h;@mafA7t(~Gf5M0Qh_yh^C`zuuW-qtd4_EX8US=gp-`OsLYAa#T_w4>6S>{uW# zH&?V+eX+vV$4;Mvu85!c=k5EM97-=4bjO88$GkVfK^)zil8=v1P;j|`8B8~97TYCM zb*Xl1YHJun{Os@oYl;5{00!~KM1hb9_q?eJRpQg9<)`~gPyv1Z=deV*ZjQ+=!s@ZN zkr{W=u1OBZwQiTmzQxC*+0#>QHM{vju$ZffP(LIxpL?ouWq-O{UcVhwCBwcURH68U z1k{J#j)t=Q?lFxff9d)+madE8OwhR9h&(N$~S8&Kj^Xim6$i54D3H@}jciWzk3=KR=&@UmLY% zJV#_L?i(b_{^2vrd+vPUMxQzvo@Hn$&|Y$7H}au#qdp_&dJgRd~le zEX}SBlSzHAb<|-^v~vcxb?_7?n>W*_N+_3xLWD~xVlgbLz!-=BY{XVvGz${{hn*4A=XQ-?+USM5s3?v>HPICQOQ zYSAS(l7|ldUEPZ$(~j3PUx9=c%jxG^i1~#tD{08r2TK}}c|uGYQUJhg*uI=fVPpYT z2;mh!=-fX9r8MZLFTBK#SXO66E??zuy2$EC{CJ0bLloUs{ zHnJPF40gD}?LI2=3}z|t~&$a`NYj+Be?Z`SQ% z#$%{B(Y^W7kM*CDqg|SDmV=*_7>P&;eC6*2xYUip97|s;Q@PG<&&`57wHaB z^(tK@y4z;Y`)0Qx)N+05DRdy$edcfWuTq2}Yzxebp??-NZZD7}__fjh`#v=!RPu94 z1J3gxwbEs`CF4GVroYlt92tffO5)1sPMVe&%OuKXeogbiDM9y`5@ML%Vv{P!n!9SV zUOFJ)gSB9nbjb3bsLFxH=!PKIbb=-Bh7FtI9)YvI?UbT$f)4r$q`cc<-E@yqtGSMn z=M*IGHRe~_^u6_VA*R-~n~8ScM+3k6&&EDrkIiJk3C93mey8LLHax`edk8E_BJLV% zXm~C{D6UkZSHcpgeKeBi04lXmQyWgoPJ%>EJF{pAd?ayvu5VgKFa!od`pt5kiZV9UK2(CyfAhuqVf9WZg$!nTe#%tOZD@!%5c z4U|EPt9;bq+AY0`)R{!j-|aKbH?TXVWvnKK`nnMW1}4@^amIFtRB4XmpKRu=DDHP% z&>Mc~x)+}ti|pv5Ocm+T?K->Aa^TsNyjTsqc)Uy%=~z^^Qme@}mUE*EK{hH%$cWVF zV-M-S4ECGw5AJM8*%fBuse8e!6DN4;_M{A{&bjPO-bsa_-a=>XLZCo%bNB5=6V-yl z_RS8wnFo#e@^XzdG)y2(r6#`~f&rp1JZR-H-=a#z$Zk4-Wo??`bc zde?6#`u*5pt-g`26J@vv+lOzpu;sb--wtd~b=G3|%@HjIHoM`l^<0>7_9pXgwYlaH z%QdBH9Gn&VI%6du*;a{|*DX^9V+~m50aNqo(40@B1bkmf zdnB)7$T!)YwuQAr3;tINAp2E_Uw>O&`_sb)9}_Sd%`i|k6rU>6*;3kZ6uMU%(wFRv zu*d?6LXnY>utrIQ|1}UmHx8i3MQo-Aa7wa$#GZqn#QYx?K8x9j-+>{&QI2S~xj;{} z9BkQR+O2hNawzgA>WLVb6>G0o}hZc0Ipa_gN>fP(=xL%wz2R{n(uSPt3LuZ#)( zB;5&e7qDQKgyUCaH(m4*6zSk&GiM3Zda<|JSO+CbX*9eHJ!p1xzjCV1ao^yEs_bCN zc~%xgNTP3(IyQq`({WfN9t#+T4Q+5_vG(_`&Eg-_brhi*=$GFsHR@#M8X7xsabe`^ zO_2IUrZc01+|;OC0C`w87V-=-+Wj6#fmRUEvoYzPgq zC-8yzyuJ@tcmK9RZX@5oH&3x)N5B~SPY`fdVD98{6D@!K%56mk_HcE9gVCh}**tNY zPRX*cQ2EgPSwpV7ckAZlDX^#rFOy3sXvoli->43d+WHkj8NYsDIpERk5@q$AA6iz9 zvt+Zg=qovhhR;m_pQ4!YUQm6hU|ohIuvU_jFj|RrTrm(VfQRtPhLZmb0tvNRNu_K-u!+$A<0n&1NXAWOYXQ3RP?fVi>fXAA=)rde7!Zuz z&jxKN(qO3q7lcc)^6w}pet`Y{VD+LbKz;LX@l306*CW5jm}?RzlYC>T9+G0M+3Nh@DyES@?aj??&}sS((^j73eP|VG6i* zHxQxWVdTQPFgEZx%#Gf|wZmd@0IYOu)00H#h_Hp3EAK?OJwQzhr8Yu$jX!NVt)+v@HI_?C$7@(dP}e z27u7^WN|M$sGn+WyNH$8s2?iT-N;#)I3W)=0Nn_UcKol2*C_fa#el%!|3}qZhgH>m zQKN?jX+gRrrMpW~B&EB%OS+MeGC;b!rMr>tmhKMehP!xw-*=yT{mVyipB?M0J?9u> zj)9s;hmU-9VTSH?rO4GxuvE44)=P4Csb0priB5kvPe(=1xu$yC2@HY`%r{xt%9qw^ zz`TitCV!0^9xn?ff{-s_ztHM5-84#6I~}|uFN-946)PC^S9hDAYvH4O{y|jX=>?~- z?d84vZOdp4CFFs9{EXkm8|KD7Gn11nZ>I^aL3*R0jwTfv6p1{#o8kcw=cj{ow{~xA zyg=eE0>ft*AGq!8h=10a9AoyJv6vfK%geue2rxF_#dx8Vzv$?<3zP+DN7(-4%r)Rx z^BSMcY(F<%G_X-4-FJljcQtFmEbEf=IidnVgT_YrKdon^kI{tgLtbP#wuUpm89-er zgo>G{aswEmuMw|_U`IN7e6y2CN85dx{#He5j}&JKCpRD@{iQ3^-y-?e9E213e|B;& zg45yy#yQE0txxuKddqe#vS=q86@Jkv-s$~FeYV)NTfVfXUO;h;Gb&zYz#_2yMX*pd z>Na&$dp$)nIN|b*{{QsWw=oIiIxtU~L#4qS(&r>gL5s*QW0H)~DJd!6Fz99JB@Y89 z)_<^{_S)y@ya7|6b5s#HNU5?f)8UqyK){Un9IMa7Bc@|8@k1GEbuNGh%tnUk{vV*1 zuoTO`d8h#Ld`V4q$0{w~ds6u+a7d3usEv=f3zn^qpL3ErWSko_Ehka6?x zjFd+1lEXAyIeMWL3=EL?dPo16K|A7A{Fba;T1F1>$+|});>r<)Dylq7Z@pu(8iQa- zK^!h(x@tIUHc0E!8F{_-FNE_)1g?g>%hmMcmXys6{wF$pqXYp0)r9JtL5SkFpYfh_ zR19o7HoO4k2E&?aX1zV7_pM9)j2>7=;D2`(RL56-!`p~yGv>hXq%4H2NDUXBsn1n?^`3LSxA~$=&mNCNmdoS1ca2In3kS)SW9hCI1 zoYGwRG*vwpWD;II*FUCr>GwR>Zno4lUK~oS zoCNOkowkl3qLG`;V!e_<3?8rM&zq$RLm#iU-&bzz;N@pvbx|iX|3l6!*LkQu3Fp0iHsiy z@#WAW`F5E${gE^~zL%$Y;0EoNnc$}a&B5!!SjN9Ck2{GQ`4=a1#yV4DZhBtDm06XA zwiK7&($bjjIBCRLMu>$~wUxUV3u)CKoe!BSE&%g_2gdpdZZ#WfI5~B_u7B6p(6+4( z=j-(~CAS=AKg)U8Y^S#!=|9gu&y@<>E&hme+?hsS(QCH4O>aWa(UbY?>YU26(NWd= zY_#nI)1K~Sl*_gUQ$QQ&vW_{Qu%WfazW51gYH+>&v zRlVf|pK+@ih$1k{8N3#TZlC*rm2fsX>vQ*%{7lGecQK>y=Y70|ixoq(aMo`gWr<;l zvbVPf&@#a4P+;u8yZqA4zWBOOc6)u_^R&Fpx~TZir!QomLqQ+u5K=!)OV0!aX$Wol zQ+nU_+)!Px>8I~QfcJXQZH^2Ko@C1b!pH5^!7B)H)0fufD@JNe9izqXexoS|n3zIq z-x2)&-aV4G9i^WyDnlIp#kagYX;awSQI7ZjXg;dX=S2e1Sy}{}0F}fap{LfvnVtBy zi>X+A(o*;9(wG^pdS;;V6R!6XhG4wk24+7ubUz_v=U1sGT=pJ!f4(R%`=TL+M;_0% zDu=o~H#zkmlyl{#f8z^}h`{psuRm7oGpWq~)k5}NdW1yo;ogD*GUR#O|BZ(@FxM=Yfc5_lh6V<31ft339MdCTJcBW-vo?a&y>Pr zezSVKcDgNP9Zdq}&L7*VzBFk5?P`=nK|NyUs8(1^gR${&UkSTqW;o<$;bUmt=~Rzr z`vRmjRyFgBhbhd$Pltms+`hM`J<7eWPnKIJn~&cQS(3lVi$(BEnAk?u^hjPQBo;rJ z@qI1=(jP|r=4}&B=s=k8;KgRA7HfNCbgNQYWw=_-ZMc8`E?^<5nlQ%ZlW7S)CeN8U zRk-V)kB)?mr1OvO3P1id7gW(vgoTBkhfPv({MjUd7MXUsxd_AW$NqPUJ?nb%BTku@ zb&bCDqc@)d^B?7ibkj7CF{&YVbc0@|)HGj5L(Tpa%eTk;9-DJx<8Prd>VG3bRJPs$ zaYIP@AN1iw&1WXEC`v&=;`0!GnnQt3>Lo}7S(pZJRp11xf$w_vYH-{BxoOX*RQ8=S zW8?gUN_gTqse*u$z*c8&?a>01f9K#qA#PgnL50+^%vR1Gdh7Gdb1nMeN#?)Z8Fmzi z*@zbhTciF4Y24=ex|T|N6H}byInG9-BPtJaMMVV|3cv;Ry2yXBTBPu92R2r26Y%Ay zd_iH@51$ zUe!L>G+lBVqs@Vgg64U9#NGO2;%t0XU~I92K;p4GYrD{^kcv%$e|pt>^6Yjx_!BRngzP(g5A-fac!^&G*_ortQ&+G&{ z)qMVy!I{g;YH1NHtL{`|goBw3Fv^AD)edM;XrO?AgACw60pTlc?jb?bv;h!sA?9K; z*pSIaFqJVemwWLuv(p@?N!rSlD){U79%tG)@j`O3aavj)zCU|C$j{5UDU33;Gy;WA z>?i1dee5ypKN8gPgt)r8Cc%hsoXm66Qz}|!DxUGH?%&9fK+;_M$+NA&7nR?88y9b^ zMk+yvDIXg+j@6%ST+h4iiv(2Oa~ubR+s?xu5Uw53LuC|md(Tnll=@<|f3#;Ly+g)Fglo@hT2it_w7F47o{;j=SG3?@H4ul}0KO z8Op)gI^8pTNC5Rds;8Yqpr>sSCh-4TfSWJ&5d=N4cQ|+w{tx9}B1DsT{Jen>@c|d( zmCfMm@Td7UwQSD1t%9uJ%3*@Wpq)7z=G)6lUGq8gXzA6@-1n@#ble)RAy0`Kp&BDJ z*w+9&F{@CQ-J*!fUQTm)mFBUX$f!*);$k5I8cw55sP}Ux2wBf*vwo-hvxxWULKp@@ zkpVB&HKGkJ(G*hx5EgF3jvJC}|7C z6-AX)usOoWaInS2i>gg`9*xc1%yBd@>MBRCvk!+7cLa8L&CNfj3vTgZ^%grV@bg>{ z&*?uL4d7L|IpDGHVY;3cnNCNtw$IP=mAJWUoH(pCHjYor_0n1A{J^XWmrkrFkaf{? z-x_idb@O=n(~z8;T!D&t1I&Idiu4~)ptIaGZ=>F|(h<5heYsb=b+-@*%F5UD&JIuK zIVGlQCI7?CXkW0Y{Qge7IGt_Um~xm&x*#@eQi2&p@y|$UX=xlb`KMRWSbnr0v?U9w z@1QXCaqtpvEp+L`l#zfjq^?D&$_Jm&)xg{=sjhv z`R*LdRI&3E#Lhn8!y%i*(LbkFkh!-8FL^mE#omq7$v`$5P8K;do-KudN?>9`kC<&( znno_qKZDur!|q+5c^yji@(Ulp#Q_MjqV$`H^#KaXFa}K+g5lG{Ej-hA0h&M-YDY&$ z;F`x>`{p&6I*GCkG|l`fBa-b3Yl!o0M;xPP6ZHNZD&}hi6o#Kr90M(&kdTnzna0;= zW-x>I6!r9ukB{wa=!YUF=DP|0sB*Mod~A7;Pc5tX{PA42aZ^4v!qUPli6$jawk1y9 zik&k`Nd|i=ZlQ)Q^o@FFwCVD&n%)l;2$6B*-$=-8Yp7eO#ZB_EEIw1@Rt)O0Pl=!sTi)>H%SRM(QT$RQUjY(Nf_&R8p@(} zNwKU6Ke!m&%gCGyHPYemCBs!h1Zc7@MPDwA17LY~m)4#LBrsx1FH+ZPku)q(ywLr9 zD}o~6%i}IW`wmJ};}zgN**Y@$6jh)D$%bBmyMpnnGm4N~)}a6mI}R9GzNg>h#SC^V zD9!aB2ZL|uU)QouxWhojn7Zrk?wP;P;h={lHF z$bkfpCt5jG6#KWhM^8JCJW-#xYe2w;3bgk-<09iH43tQi-h@gr>et(UGUP+2;89s2 zG9P0bV4-?MHEG?mV?$Wwb7Mx|s^shO?!&?>i5ekgwtCA7L~jJuIgD{=sbXa=!Lqt~ zebsKr4D6OGw|WR!CST5&N}fhp9INHLT>kvicr;BA;7#DBqh_BJzzz`NDXtU~5ren!x_FyBTE#}=Wxi@ zQ}k!Eeat2b8`)RYEC|y2sfb)q{`Kqk8ELvat6F#y&Y}_=Rb-9}w9|YqW)%m6QM3!T zT}IdE*!XyDZS94*IlP5$A>-F5j%bm}%NG0m`VX<;b}Xl<*^Lz)_0&&$;yo_033WB= zq578`)kuqL*zxyLO_bf>B znj%OCAvAFpj2Q0j?uKL05G;I^%q)O~6-~fgUS59r&Ir&z(xV-Iu;RG&pGrr!RM?Wc zh>f>2`q{Op>JIwwb@tEIa>z+aN@{2%RFuA$o@xbBdCJ4Qe-tL&Lo}Gg->Lt&DUc}W zk$d)+@SOdX)pqc2sLZDxu6(3d$jGUwsZq9Yq8Q@zhG5kB<>So4&YRgPW|z)3uYVrY zraKG8n{zivNqoy+oRh@7miu736vA%0LKFW>jV@5&NGNThe-4LJPDdw&4t+Y{s`M>l zXGy)!eJ!|X#Zjd8v%R3k+P0hdr-`A38~bK21n!2m(8WJ#RomvwRu7@?z8A)D!gcV} zKyt9IRnBC8Xl_XJv3`=?%prjEu%a2fvdl91MW)OYX#9ht(5bM7;LTJ)cAuJ(gi6He zzOFe1UC#&PGWC+mq>_#-aX2K^0_BWw}p%E8lWN6p`H&sHL3BT_%<-{NlAP@ zcP_@-nW$f7GSFp^zu`ngukB6^&gEgio*HPB?Z6t$jM-s%#rZUxbT)phq?gdK`+!wS zUb5u}2|!Tp%@fp9llJdZT>=%jY+*U=dgk+y%2*t(=naj zfb$wQ)0tu38G7}2zta0IW2=3@Vt!^e`+67Tu? z8;?5t*CeakM)J-fzzR#wy8unZ+q-!SWiD<-R2BbyNJkGzjcUCUJc3$w5lWCbS1LC@ zKC8ZA2er@N&kme?!5uBBtruA02krlqkQkPnKe6Vh|NExb(Np{Wnv$BDDMI^tY+Vyb z6xnMTVS(b+d^pL}hCYdmjcsDW=pgDQek?*+De@#;@@&S;Ltb_3uBIsOb<;|32{So+ zP{9q@HBm_8`~~vWMu4jCrfriKKNa}rem*LI>Bw<(3RTBle9%sH;zyVr$tprA6WX03 zw6pzkVQv0BF>GFjvJ3{2E&?mOD8jU2hloc@mz9O5`GK|#rj2BGq7KQ+PQT;(3dU`@ zB%I2;06#eo^vr<5#YGTpzgRaBwvOb;2aSFVF8IIwh`xAV`AjHR^;o^3*H+Nk_&0UC zYgE7w*-)55p3o=|&w~W=XJn0x);od_pooNh1-Q6=rO<3tdR^O1k``4t44h<;5faCC z)L!{TO<7#3(pyJ*;S2UcwO=~d$0GK8t0#eYM!ylrPm*~!MXHQJ*2EKQhX_0cICyft zp>fN|`^0m^g{{0iZZ`Vc9P?R_??U9?DtuYmQbc{tw^>BETiDg`nAJ@*$PW7YK0Z%v zN5C_be=+)7UL{|tvDKrPt0l+J}#25rf z`CNTXVZ-PN;EogW`(ctn(WTf6vg+<*GUMt!s-JTZn+Zo@? zFZUO3194TWcvBf|KW=08gb|Awxo712J=?=aB5roF@{W}9wFT6_G{2qcMo61ukj`KY zU*QS83!swhtj1u{YwmO7IsLaJFInB&35Iylk&swx-wby`XTQ!l6kqquk<2b4LbIc7 zrOQD;>Gm&lu!4cKE{!=8ZF2UtO%s0q$kv* zRN_&gnae{JtCrVm{rhaiwey9K?2oi$cqzAzy|q2O=#q{4Tkht#fD6t%4XAJ7wTZQr zYR_90nT&4`v+xzum5WVh%XA6odSggqqND8;6UMmJ%XC=dh4O&}%Q)GPh9NzJ4!T0r(D;#8Qk55r@mF(uSeR-5BoHOos+_Bd2 zE}Dp7C{hOg$6yH&Pzx`mkX0`2qR+-Z#PlOioPBu_ro%GTBgD1(!7Y}^5D|B77JG|Bd$E^qqD*o+m`_-|kzc)A%qOv7P>Gb}gTiXu9z5^uyTDz$3yF z8+XOtTJKiFVI#uA?U;v$%-^i_c;q&}!)`4g%jYsc$%PXs$fJscF)n&_x;GFTQld%- z4+--|JJ2_+;k2^W6;RTJmtZW3O<(__j%WPukkSIjJbrhJQo`bEIBa#bw(@HVe4%c* zXPh%C{{T_z%?Nlo8iJk)qp+#atk)Qgd(h3e+=o( z^l?*(XOG_SZt9p&0C%dVkuwId4YJD*_gsgv{L~jFweCra-Kewq{LiNP;2qHduVK#)q95$+D7m8`wyXPtPw?a` zjKDGB$Pj65?xOL=fdQHQE0$May2SnQa<=eP_4n)#h`1c1g-dlbD`U1R_d6lFpQM&q zk@v((Y>rg$^wQ`vPgS!uIFU1@d^*kh(%+3t#j)TtS%Bm6-da~E6obZZgU(>29oWv;MNqv@ck|Y@qJ9+7Emk7+hK`f1gEk5QzY&KH=zaG1dGq)4Zs>N$`%?FY0inl zeysujdscm*g?mdeWYgUNy}^qKE6PE729Bc-jss!CFEs$u0AslSbzLtF+Zb+mc(_;y z3fMY9&rzBPCgx${r9uC)8xe$np0uP$%)kb^7Yy-p@|Sf0-Ig;#6uLiTC&LfUhy1^P zd#|HIggOmAXqWJ%sVRkZ)=};KuM=W<3p$@nV-;4sTj}*+_ETYs|EmQs1X}@H;t)90 z%QgX8ruQ!&30%@Sk{HD?U=1OlI%sF`!@?R4Wi|Ao-Ahvn_T>fRq+kvv(pCa_Z#+9x z?~>gC!h;6sg`hjpE9VsZZWV=Mj&6`G{+{Cc ze>ust-k0GpN$?b2**>|UU&Iw>&(Qx9(yJr9h}3BU46d>meUU(cO+N*iTs~?hrZW4L z)}Gp|EubDvKBHt5?ybYSkJ*psQg&4=Xew-nQu`s#xJ5<1H*63Y-dMTs8s%W1Cc8vM z-CuTyypchI9&d(A$zu^2|M6}twqb-w4$ZHiEhS!&5)4S%_Nc@fr&zbuw`(BKT3u$`xFisPjXIpbPcTm9HT8?@f)sRguP zUgkI$2>g}yI2qpgw$8@>mrDFJEdiGCDo<(ZsN5?&*>J7APd5y9Rh0Ojneyyx&tXj> zZ|nOW7Hy<%7n2tU%^ebuT>t&QCBuh4MhM0J#gg_ zS1GMug_>^4y_}+*Yn3CB$_|gz4~{b&eizosYPoiHd@bA&{hI&f6&4k*hGrp+<=35` zGGtMmaT&)b@8kMrJfW=ZZj}y`CBqHj{YpGUb=u z2Vda^wD#TA0i>l9-g$@h8;W2$W7|LTjeYYCb13^JZ4P1gN!{zDKR)V575}SJ@NiP` zdrN?Wiz_AODt=DK)a<-ArRg|uTHo~oOoSU}o@;OuHQe|*5gRWKwVK9+s`Jc$ij{W$ zGrYWiq(nkba$0HR{ub%Y^}8sGq0^1t`^y7!aTpiqBW-h*XkzJtpC8S>;Ns$>DtzGl zB?xO64UiBqMIEiH`KM2inClX}eTDngTKK2&-Ysx6^%J%7ue|mQgj&6d#I)}MGHL1F zjqvmcNhC&A`OSI!^ur7ZGREZu_*faG0qPJrw6*+`! zqn+-fV&2)ONabyRX@B*u`0}SNJWy(hSve0>PR+-9PR+=`{-N*|&041ld{0T0i{U3> z8x~TNbvIKUov0n|rc&_XTeE0DtA76}6j`pfyF?!slB(?|46?xGub>(wJ1JDR;o z^8U_e9ZX!s=Ws0fKioMtd0VJ)-UR<4z9k)pfx$B2Jh~TIY#|Cig|@A7S9!MHGI+*L zp%?A4i&|yO891a&&3JZB^u!YtCd)k{QQs0_{Ot-)l3!Yt)zYl+-=&-B6Xyu8)@A$- zCQDu}l@MSnsaV9sf*>XVAOP&NX+HxBl@YUDXSi^#m4A^9VS+t*J#aE5TzCb5MP#a%Qj6*bc#WZpySQA`J8neUeqqY2aRvWN>jfev z2?OQsOvT>US6_Eb46t2YAD~{=a+N=*CML8+0rQa%HKvKTO62euj7i z@`QYsY^EBJ$|)4N zCXUGQ-Oc z>4$4Wbv?F-$ZR|L+qXwYM@bw8lcQ}7^^NkL>#R{1Lv8FMX##>rg_=dN&B@$uw#!`m z-%pp`iyaq7Q~v=lMm&S25dilC$ry;cq^QV7KlBpuV>~}1&=()87JYg;U3ZQV6jWZ> z^1Z$-N}hF6%}*0rUa`^N+1fx|ove#*YHUEoFV(Y3^Q73?dG=vnRTsGQdhw zMVp2Si;T>H;s>ZW95|)l-Q#@lXhglIe!Tu0`edF;NFqGjkf$t3ro9#C4>U8)=cm9w z$-CyfE{X>I;aBxZ)$mG;k5Oet@+`H9>Yie9maH05n4Y*FB(8>nJY%IR2u+8lLRj!N zd1co5mBi4Am;0Z{ikM~36PL3Oj+hUf>z;ptHh5TOTC?>(sm+fMRS7?}RumOk?G@V> z4tye{sQJap(j$I}Q6@^6Z6grP3jZOOx1gA@wB=oXwPsG|KKJ(kV+*UbQJ5Eldzv)& z_ba)0$UgSz{cTd~3m;tGB45|_=#qK7hH@wg_v8D{N=&tF%}DSNt3<;M?T3po{Qg!2 zBc$u~eG1P%{j(x}n$ByWyhSo4!xJh)Vx~xO7#qrsbn8qfnmd)cZe1H~yC+~avNm6! z;}M~ZoXA?wc?`8GD)K@*B%FF1tk9B_=)-s~(>;et7aF76kT6|p9^)%UY9w-aCCJIi z3A6%*)d7E`PMBM?1x&A8gc>6TSg5BWj3YHQwc$cTsxPhebybJk+wpoG?T+p2pYP++ zuWT+4c6+4^*2JHFHYPQ*MH{QBMY3939~`e4Lg@x6P?H!B+(F;m>)iF{n7{{LqMdNN zXG|brH8W91BX;3+N4;WUf$wo7%J|fe7we(A4W5mtTlY!$O(`b#dod^r7dbVx88C^C zs`yQ5>>7bJ-*aQiFjm~2u{ZoAA!OxP4JWSM<<&%J&3>r6M%By$@9e#p#lvDT68xpP=}30Va#^2uW`iSdSsm?1eAfo+gjXla^#=5+ z4u2}>dzmj4*^g4Cd-|=d=fIpo20jk9-ftFL|5bOd0F`L@vbK`@LG5S4qT(Xs&xg17 zHuI}}nMK_d%G8&K3->L!xcysa_eDx_a&jZNhu?lHKB?%@>tD@lE663{v^^Uod<~lk_~Kob}M%4)=E3o+su$I=j*G~|D%_^?tXKfL`YLDig z+O&Nij0R(nf3 zkEK_*o;3ou6i&6fy&SCUCiH|cn)P$4lTq)d#N)LeWKs5hcO6F$+QPomCH1&&$R`iK z-|dI0Dj}N*d0mgB`n5FsD?D3Xa#wg|&6Ygx?W89AEjQdQ$hFzs?bUg`xlYQinkI`M zeDmr{Md{dEj{b1KRw0ApD_nZYqziXPx^~CAW=@EmUO6S>IiGo8Z&z8fHxN z1{_7tEB5-wCb;}m9@O^!zaAH{;Xoyn+Oo>4&EMp+@EMhud!o3M+dsR)Pj`3(YRuZ8 zO%LY&6yJycE-Fy~%78b%vi9L7IWBHmSYK&0*?MEI;-Zrur*h)8 zzbh&#eqM@%p04WSV3Pg8{=UxAlG@|hl0PE(U^Hs-+uj)b0)Zp2H1+e*3hDAesuebl z7?`I56x4r22dp7cpYsHjZF9?4<)V)dO>n1s&$sJcRP@5{-x4}OBOc5)ZQ40% zS{6nmC-Xdh`NN$0L8!rcLrrzySG_RdUm|SVh5P6D7;KplUH$*2Y*R7B^Fg@uK7stExa-3hl{ zF&cC#l5n8{89K&BFX~fR|6`vns1)9$q5PelW#C^#Um!IJ6-7Pb{;;{06YPUPlFZ(6 z`-_r>sHUdM)zNiY+Red5=j?Cj@a*|b3O*3o5c=NW`&UIcOKZd+n@=`3w=CS&s6U<8 zOcKoWm1pbAt9|~wO{F?edr1ft9Fn}H*8mWYDN-?jGW08^|761F@>pDs;-6u9ytoOt z@gK9Jl5mSTigEkwinKC5+NQ18W*AiGdmoW3Md&3JDrfEM@YywyxZy&j>omcT7~;pSqJnX( zUN^MevjKj4hJ?$*&W6!pceCiwe5rv`tY~?8_Vm<2Xr<%HNZjh`e0H7s%XQ zEbNvCK2h2R`w7Ed%>Hz>`gEPuj3KtYt5MoapsuX+4OM~|;Jvu;4>|0@dO zc^c}yZx)h>R~~=8PIuY1sI(@$7R?S3d|I@zPDE)RByFkMxOWxydujt9!fz-aT%Xp( zCf$YYg6YaMdD-Pl!nV>|68|QtA7D@#BD}d&vFK0H~*Xe2ochJ+@d*QdiL4)wZFg5 z6aa;c({fk-g_)zu&S{#>3g_o#mKB|$^W>#emNil!+0rqoi^UaL#tl;xMdZDU-oUGRJzHn`o#jz@ z0II#z8_&}pz0CGT@P`)uA<(!hDI*g^1#6zPe%U=5p-r1Iq3jW)*EsgD+s6G}DY?d( z@;B(vr2cL-(cWMu2iuA=E4!J%{JNtYVIGc?_>7h5t7-aX+H->=QmNxxXZg%#48h)g zhA`6^$3st2Hf@x&@KwY<8^x5E!G63uloC_Pu;9P>N5wu1E%SlW3B_o*<$RxVH1Nm8 zMRA5eDxBxdKSKxzoJ?Rb46GPtr3T`#yNWvn8 zz37d;{+1d!NHgvuK8b&S+Qhv7ES2jgtc#y4o3X1D&c;@%ZW7^kbm6QOPLEw!=dd{COB zh?RLG49J~Wd`xk2hv0;9?^w@E0>-a3#mQ5ikb(=q~WRmU!`{0}9)Ak=aSW|$V5 z%wF2X#^9q9@X-*ykghtyLtR*IdJ7gJ9ZU|qI;Kn>g+AXcl!@!?#`ipoBVAGFyL{Nk ztzikMUby6sJ#Y`VbOXq&t4P7o-P7#f=#AaF{bC-A$9ixvtJcOb>Dr7p4hjlwGNNZk zphaRNul?S4nA&)waz9Jnyv5@{b*-6ednM(oicGW;8QHbVO3*@+#e?3oETzN*xky5U z?A=fQu&i*n6926y`3GtJ34k7fGtV$dWGR?1N#0ZPd$O! zjJp$@PBT=7vp!%0rF=WOxiKDCh!$ZD-#k6CdP0?$LRL2e0Ku`v(HJ9=ulMgL#49%p zgE9Jm3p-Kj^qL$c4GE$yFRB-G0n>@NddmYrBt%sw6(8!jZ41`nW`?w{CI)JB+syGn zw{KwqVntBWxYOET2I64zHu~W!yPtt#i|bqOn518t|G@WZfPlTtU)?;j9<_WOOluI) zN7^*H8_vXk`wQh?CyZ7i*<&amfR*{y;YKRw2=`rFa&(ps%5(l0HLRhfVmg*6N0bgz z9rzQR*{Igl(j%)}09l z5+Vqb_fT+r?FX4+#qH*3NM0XCMvTT-q=wX@?FwT0Hy+zQz}6vQggnTcK}5_#|DDo~ zU=0YKBbd}8lCFq5!W!b?Fxj|}H>CaUwugS{#sZ9qcnrVe*WQg5YnIAS{w*@kEV@q;eq|+ zGv0vSo_Rx!r~G+{_R`(0qoxXI1kBM{&>{=to7v#08y5}_5b{CPwwtu@a0|KeueeD=E@~)1j++W}AYR-$Ng|Mv|h!28M#7@G0yLKN98lWt! zd=K-w@0!jJ@YrI~E1^W6HWhhmjJxHAdGWhj>x9VwMWUf|fy@qzkpF`mf>1#9qx8K0 zhluZqeI#i}2)L!r3#undFzIX$1Tu_?gg4a`1YHw2eY3qU%8PO5_4aa)Up=@MdMePv zI+t0m!{T=jINt@(ift$%jfty3D?#n!L1ldsSM~f|0Q=67Q3*+WFqVoL6}y{5rBI*u zGX6W$hmRj$ve<+;lF9B!VmDRnb&QpeAW`=?1omz}i`P4d791vptX2b~1bL#i%;WY*G=XNc z*&HHaQY)*+SW$ZzU9kQRLCP|=KJTgw?}if1ClsrVonOkXePisZAp9+E0qN6ym`sK$ zKe_09SulCVIA5SeAia8-%kJ1A##FH7WEnkVGfh1odnc0?x0QT9py)oUNS6K2ALLZ8 zw>y;Bu82e+!a802mRLtl;$N3s?E3nb-sPZd{>m5_jGjw2tYp&Yp4T@d^-;-k#a@a3oUnL`L}&(UT91HuQmG2q7^QQh>DL=m0rjq<#;M#@dEF zT3hpcZrNy(q;zkr@Is34}Y2o46|KB6?A=D~>{FX{lokZdLEBcY?>16sNLc za#3`!wmX0;1gcIJRrHIj^LujgLYXfA%bEXAtm%t?{p#H@#rDTTZ6t`=OjrWr4d3<0 zI&!dHAD_FPo}t>6l`IGmA-bWf?N%4*MZbvt@yMLPJr@eie)^soX=5nn&}YMXs0nen z?7$*jA62M0BEA&p5uF?>{VvCVAOr=G^81r{;;_q4KK@tv+le4l{0|2ktZ3WUOdg** zpBY@29aYtCJitytI2LzuxR>2#_7&F;&zG!K90fR@6HY zNX#0VM%C($7v{6no7o28r?1UxZkmed!K@tbe}s5OhD2GQjEo?GcP19C(lM5Q-%fR$ zJVpo&IeAAr%;y&lh3oUlvY_=0gR!>gBI!~HhhP`aE;%f!9a(BjH7tb48+sLMtWQ;G zz(1tNi8D)FYmk6ezIiyJuB}@F>ur_TXvHLg1QY7{Vp|to5umGN&`~w7p z>0C?@nPSGW^X&;AxhD?99M?xbk>TW$M$#?-@xgJx+fmp=+1i z-}Uu#gM-DE4Kor4(SRrxr|){%;efG*vm*)SVbSW`_dzs3FpjYz{f&cAU!RSK*Irz> z4C<)P?*AuZ6+pz<;f1;xh2ZK0k!~!kxor!VN}|lT77q?8OVt^rUFJ{oRW>xADs}Mo@3aW;C}R4jnhOATwj0 zM6B%FCzgRD2%u0Jb?kZmxjFnmh%kzP&;&q-SueG#HZ1=t{CTRtHunI*~aOxC8 z1uHTEa|NryFV?)(`pS35pCxoZE0wAp|C+dL2=7#r9x|Xn z8@)j=TkVdt8JDd>4Ic3{q$fA^(bg{jh^+b{bp{+3(&lHWhvPyKh^&_)8<|4{{m z))eWhPo!(g^1ON0QE6Ehx=N~WC%C`0;sxQ)Gl0XZ-pMg?(8w5R^N|Za-x&?{xjnez zCh>VV4Y``3AOPHtF`&Huqd5XuBp6)Mes28((aLl*?-aCxY6@dLCDgEJ~BL9iVD&3{^#k>obK~?fQ0p#qYC*ok;?4!UKXwpE8Uf1_sw@JWwM&& z^rt@w?zf>9@Ck~Rc}a}atqg27NTud{f2ixTT?;=whIjLDzeeSY0%&(>*+Dc z-~Q`D=lq12B~)#n8qkoCghs}?&HVCfX8kWXt`3EVeCe+hzh~XIxxat^i(qbL`O3Nt zvK>7^qX4L=@8OY=9JNTlB)0#aL(0r|6zi}=t0DIG_Ajy(<%68&W?;T0nPOybe{y`> z3T$}R*Nu!dP4C2|q^^$^W`W=ir@N*krSk#$>4w&oA2XQOz?S@A8W|aRa&i*in2CuA zAZ&gQr=cKuGA3peG$??HO$B4rsi|$Z+sToUk$@|UkB#+ye!RYnQC3#2vt7vhKF$$W zZ9O}(u;6t#S7B;u%5A@lf`a0`mm8mzot^HvU+8);!>Jq&4hYm?zJMn{sL0`!x!J&> z2f=D<*9lEdTLUogPtg$(guG5U(RO^!rnhHXo{t(DBwhe1JbnYn%guaZ>!{$rn<$Po zk^4z}6@S^gCB_K|Q^}+A%)+w6@z+_CeVJS69}?(Vv={06R)(|XVA#9An2ocE5y_2W zsP}VliRsMWFs6|+*?}366$Q$n&0^L5*>~~L2e4U~;Q5s6wY)M|Z4UqjsxLw$uy7rK zCMkrWg7cbJRb3ryR+3UuFd8kMmuFjpF{Hw1s~3BdtgNhwiHQ&pFy2e5uBO)0-TjYU zU|?W?KwxBOs3aZS88Fj%cz6gf1AcyfpqYn7KrlBoT`GSK0oRUh@xeLvUoWFtrQ3-f z1lT^YDPo_GJ($=^qjC zF{l^(fLPeKSyFOR1=?UNLlitZu%8r_mbSR;eFCOpS|#E%zbuqGN$WJi%gdSl0sI6K z6+pr#Li_e`GVq1Fo%qNMhB(S@>*N5#3BvCF2Huy(8LlfKX<>b+Nb;SyEJ?A}jORyy z!}~)LNR|X+ZJG=>uyqF1kT>AFucV}Ov{0wt^tm)DY8V(ng8LL{o0R!ML16_Lh3GH| z`##)!BNxMd^(8)j7UaEkHN})Bb6U?(Qc;y^)eH;{9t5EaAtEBWgMi_rW`tkBmk*qH zmxe|E|0_sa;XBH9)tSt8HNzaG1mt8 zS|@OgsrHfwtAInL7mJis0p>G&1~{>-I8qjaQG4y1c36AgTXM=}Foqxp1Z7%%AhCXD zH2t~l`4PPx+$~_KlzdN+XK4!R9y;u=AX8cG?NV>=PO96o5-m2kx00+w-~M@xULMTa z_y3?86}m406>lQDx$o^(e9xw_kr9LU3qb)uCnbNI9<(2pF3?W%&wK#)`)_HTNA)-{ z-e-{#5e5dF*XdS7qKuibSc778@X`?ol(U=>-_1g0kYJMuzp3u6k$s{sO(Iil`!%73 zB$sbT_?^p@?ovzt6R~`@Eg@^|_c}$7IfG(p&gvpdO3yDF3@xQhI`yCw8eVWNDl2;e zkL$hzf%N9?&iAaJ2AEveRI;5AaQ=L}zZSmR%?9i7c|C#|P_rs~s;a6$;!4Q>x#a2) z+z~;yW1XOAb^7h2x|OHh>=>{o`rWLBc6W=P=7Q*Pg;c(2jgUlwyC<^kcAn{Vx4fjP z?gXnIj98ihm9Q@o>-%j*L`8ez6mUj{72j@DHl&H4MNFeKNMu+=w3^vu0tTEcStkzn zzrkbJ*omm+N+oPjma=t2Us}_Z#*Mk`*J4^K$A2onq?$G)uw4kNh-&^a&;_q8Q4}D_ zn2c=vYgr8#Q*t|mn+cEZf$a#g>2wR807yZJw2c;}V_?v!wiu^L_dc7^wrAA%-M+jm z1iV^4)P`oWQw?=W$2?z?t5b;)m8zcPuPzCI}^z`(+Jpo4vEPW!allQ>f z+lyk+py&T#>Mg^n`o5^qLrSPLNGKrE(nz;}G}6+bbcl$6pmaAV9a7Q~(k0!}A=2GQ zBPra;|M$N4-u>_i<($3OUTfwUV}4Nsa*d2k*Trt5p^i>1ED$M|88=<`ixGJY$A_TZMbSXq%qsRKWd$l)#01r=PPU;@VOJ7=4u4Q5R?*nZ)IQIp( zMMN6h%SP!%L}pt^76y{})zY6UB=HoMmJa`}j3>V1q>2www9sl5zAb2dz*KiI4wm5_ zgF5!_-sx4^Xe%l84`<1@-?xLqX=!QMHxwBg`>WDc_Xnv3ItVv5UhLF?^dAxf;?G_odiOi}R#uxJ5*un8!ohF9=<7f5^(72`vmP%R`{MRzvO z-)$H0+m*IcW;Rd%w{}LSzx&Nr`MXU@;csDMjkmN?1o->&I9YNm!d<(80IStPb>hFyAb z<|ddmS)#w`i8NP8q3h3Z$;KD`q#Mt&R@p|)1G&|2k(PxCI@dG9v)ukt}LLB zM@(G#vOO5EZvc3<1ihU6rU+kjnCjXdOa$VOV0x;SO>Z$N2r-Epk4cwg{|#7xSp9ZZ zn}BBp_A)|`sUdgV`}WuW1ID={MLXEB4BFSqUwWUteVEt{Dw2swNpTO@O3Lqsq-)DT4t7Gz&<)M#cykPH86ux| z3H%PLWdkDe`|JSf?Y$c;H^=}U(Ev1XaYQeTY@YT8fv+<$HARa2xw6}EQ3Qv<5;jbw zSsz)A-fP%48A!u8i>jaQ9*wGJ_&;pKFT{pIKYP+pEu49!c=mKa2Zv!=^#3e>FDEY# zkem#w*i?@z7kEprIzl;$H0b0N6nbasoaq(f=XZDQY;;3=&kom)^HaT0n(PsL#yq@0 zk7&7ky->Ot>SZryjB%EjftOILLvcnFWBPtsLp=K%ZsR4n#)`M+4=y2rhelNAie80J zTji9l!f6^aiqH`@_qCJ$t`Fn8;43^>&W)5`FasP0jD3ztynXQ|D!YuDFA%yiQTJpA2>4T(Fz1uD!r8Tm^bW>Fqz>94?sOzNtiq9NP08V81pr`y9uu z_q+R^0oJm??9S7 zVi>z}ZZhGejl+?YMJFC0{*H!g=s1#ggK$Jn)3US8_7~fWii$ca+l78tSjS1rxw~Ig zL0)w5la0Ln0YPVfe^G5M8G>IK(u+dQuOof8B|ZEPhv5E%-pqUQwa?BP(0wj6_~8n{ z*ytbi)SK7{wjHFHfRwI>j6k?H1)U|noix@CcEc*S6nRy+pB_)}8YU8+_K76<>YtZ{G#R_t@r4ngg!gryc zigS_i)Rc^9E-@<$G$oJjgQY;ZcCG8 zq!tYu0m6#uyBlV>oxc_d)H?@_^yFIXuvOk4izEX1fY)qfP4eQ~3<+n#H!wyBb$9$T z)Jb55h}g=F)MC~TMBqMijwX4+lHHO?gBYDg@mwExuHipn*i4TA5CckMf2ZLbl}YX( z(eieXd81FoZG>xBXT%|tYf+D$=+Q(#IcWgakC- z8xaW}`s&#noon)~u<%UHPa<9A2|;<~88Wz3P0iYEj+mb-V0yS^DWJ%J-|lwxD^d&l zR+R%3*B2t1=%2rMxVjv^6UA3^-%o1@8V6n5&<*o6H-ER|Q(*o)nZCEe3O7~E@mg*E zDdPEC`R7EO66+33J?$}S7L?veq}BFF?&;OIw-CmePb&Cp)59pSN~4HF231O8o4fDN zzj;k5B#=8phe&a4=0zP>-7S8C(PH#pZ6~oWPEtoF@e4Pkz_hfqrY0uSwT@d5RDe}< zW$I#pl-o=6MY%=P!Y!ZUTd#TnR0G3gT3cxt7}gg8DDPrnx#IdkzyedONg+9e@<=W- zO&@SbUE3pJe&vF@*)75~6wgdSELN1J8aKYGh=}U6*eKqEjm6D7h!v?xA$pEq!^%5@ zGB$#NiE_yyUcJN-4PnqmHl@Fmh_b&2MQ_|+o}Yq3zOT`2>ju3_VxPD774=TUQjbZ` zznvOp9ZQO8oDd(jb~O88cSF-yUw;OC+RaHyN@7;havUzd3ze}}=d3-bZ>6L^X8_S; zxL_oJQi?!@>CaeDNA67#^kw~F%E}EN@E1!Oc0Fx&;j+C~> zcSMmB`ZVk73`)&O*l-@nHii8@dw)CCJHqM7i~*BJSN=a4t5bZulzEa1+?<6@Q2KX( ztw`}AY}-UZ?=}M5#lc)+3n%%H7fQIRffMhQYE#zU?aens#CEU{K0PyYak4vCY}g3# zH!wv8QLV~6m#U%u3v>{}Mt4Yah2a(P@}4Y3h(6$W**0C{;P#O$R3`UM;@a1Y3TfJE zw*JoaS_=IDZmlaTrUOoZChQ`B~TGJ$>#TqexfT(+W;Xglk0xh7qZUnB# zgH9f!U2_EYntV{yMEcyTb^g0ab4@FstDQ+Wb*Gu}%<2eywSgB!CZ%bu|REgG;2L*cw-gsPZx;$iVN zytfQP32`g$-pj1t6*%_(felZRarZ9kn>V)asue>Goe>fN1j=*Wd{xR*o=f2;My6r& z7^7nk-9)$b=O^%JuKbcn1Fs)UbS{wi*Xro;qq&5+(o%`znjJC-oeI&1FKHI(xxfej z(9mlSAqfZw*lJ^Md1o<$^Tx(! zR8&R7U+D+?0h*(eAidB&(#yt<8n+Thv93U~i8x9;F#y{t7BA{Il2|Li!4(ZJdKb8dS5Ez=-kE)ATv04%O20JFa{2zC_Bc zigr9!$+j3g4K!_|knC0I;pQCO9YRuzrPOxRBwJijg-9z#A}ba~S?ff%wgDyet%&JnlJxYV3W(0-tEZ^?>_1%~N8bVP$TOZ`|%%Ec0-=~Ym$Rc<+gu$d^$0zorb7dU(mAT%gc`@6)ItC+5WC^!7qA}!g%=gcyOa_=-EH5r)1GwaVc3lLDECMOs?pn#F` z{=U9PP=)94;z}4-y=r({R&t6I_ULwKPLjj3R34gh)VdlzUIuov-gp?}K}$!+v%wGS z?#EnQW#E&5F#5|UYt^^GhkqZU`b&>lBK+`EJM}BhYs;P-%v;_RO^KzEuLL3i0uPK> z%3t~JqjDZIoD1qlmHCE4-%iu}BysxiuW5!)mcAU2x6muhT}8<~Qy?x5IDAG%SA#y5Ler#No#;rX zv%~fKXx$Um#3c7SWZYdtLmbS^%wU4`1#Js@p^kIQ3v)bJtOL@>i`OL;QNx-I3GcOZ#u<=bHbFybr4Imqr^Css06J^|YN1VahlL zxw{``;^l8CWQa$rSn2*-^i98W=R-Tw*I#l^tP=|@i%KoBx2=W@abMqM>}}y%Kt6Eiwp=%yZh@2Q`hm!c=8km5RRZC61nTY|9#( z(ThUMUqK{+3LIM~YIZlAQK;Vxfc_y*7NDAIxHh;KAh&Vm=G{liT8rsA%x&PGu z!R1A|TQ1M5d#wwpgLx#AXuM1gYfPM<5oxws?{*4V*H!VcN{2j{cJ!qB8Nt+du|Fz$ zI$&So+}z&Ez~cGr8R~7%6oR)S_D_g*8ZgLi3+9;dXfQY57@#>1N}VTM66$kV58qU>2UQwoBToQ_F>V#ex%rCA}|(t?+bwkxrNs% zKc2v{-s8o_XBwo}nFs(cbhNd1@PrKFJc(hb4ga#_JF=8ye`+4oG?>GFN{M*0c|9Sx zTg_0!5`K*yI~$^oB!!EM$C$x==kF~0U*hASA8pd3zhBx6S!76LbQom(#(RZ?hKENa ziHl~FrYb2gOtSiq<7eqn1KQ5X(uaf}4m&mGWENAQOIR|xDtcPlp{7V%38^kc{f!!tQb94BTL>cQ)RDTQ zFf}klg?n1Behb)fWcSyA@q9~6spyhm*=23)=kle~TUzYV#0_x!NIznmjx;rybOxs;q=%KHtYiSMUfsYzv?BS)X+ z5cA^H>|h4%Q}XG!Hhz2Rv*bhxGyF>-YDS;##-|m+Ii+%S;oh*5kR=b^i~gy5C`!&5 zj7T$nVc_??LU_Al;V-4k1;zxK>ekl|ngbs0u=$+_$E*Jr3&2Md&F9%eJ!{5ls*!3s zXv_Zk+N*q^;%spf{Y^s1l!w?Hek_s4?WgsKCKrK6riiq0)JF|1Q=O;h>6dDguPDj| zwBl7Q?CxH4Bd8dC9xUy#qR#kd=s)w6*iCr9;KR0S)u3327qx%$#?^yL;!mrO9ug(s zV;5$umV{7Az8m{As%g@0N+=xh&tgowQQS>7nIK&oqwcN~9z}L^K&AT1Q-%lEA-)WV zL2l`5mq8vB&jA&_3mUV3VdGbU^BerJCl4e@S6H&d+5_I;t0G_p+xY!|RY$KzNRg{)Vh>$li!Lsm?cj0|rXElXHHz^&A0 zgbDmP(w+Po4>_KoJXeWjwIsGhHGZn*h1Op;%`THnK!U$KN~pSo80A|~j}Q6qB~qj1 zHja{H;mir2z%6Cg78mqq(u$O?c-~~QPCB;Dpai-j0%X?(aE-^|mL{FiJmbX(S2Ax+ zd}O8QVn7nJqBvr)zqVtrG~T34t~zxfoE0f5WLxSX9K#EqF#5X8Zu8_y-!D}1%XgEe zq8Fh|TiA%Mf+vUTh2&8Q4wEpMs?1k_h$s#bNj283!>GDQ*fYC=ZftJO3;bL`!8&4g zxi%_o%tQ&xLdqW}$|}(~AS(hwDe`%Tpe|FjXQSvlu{ON<$l{#Btsny?Oxl^;T z!m@9SNSmrSduG=j7`jG?kKf`efQP{%4xA_GEXhlxU?}=(I~b2YeUQ+KRY_oMKiMt{jTrBN3Mrw+gn=@D3KlQ^+Xu){w?0N#IhNcDETBE`?goa0)|LH#>WzDJaXTQfW%Ijx_yd) z(B@x6hzDOJR78Rg?628Hq%wRQXXJxhZg$@L6nRO%DF6aYYJYf}NGd{GQMB*n#kqiEeSW#zpA~65b?at{v(Bgo#7TtGG z@@#QTIQ|3$QMU`5-LN8=sdj?XufcN_GCQ;Cyw(tn>XGs>suB4rvRKc`3Za~b_cu}_cG z^zBquMcs zL1J9efjrVfemo2fu-K>QYV`@M#dE*&ODKW|3r_b4uL2e>QOpmW4#*{En?3#3jqb!g zGC0rZ7yRU!Dc}XM%vzl9w_H8>q4=YYBJ7bZ>=(YSEv*(2E7W1=u z{f-W7B=06{^WK;7X?XtZ%s~vQXSzFnI9fM;^@@g$jx)-GC7`CpWky0#OhrzpuH>ty zqO5$hN6mV$o%2Lek4yU*yMqXisYbSSjp$%alzgA>q&^&KVBYRfO%vyseY6`FFr~^9ImRw@B7ITxe7yL8izZFJ z=xKK1E{mU)JGx8yTWoogM~NFh8HB2&0!6D>vcA@!=M&-J;4JcM+#nxR3f9!OPD(N! z#S2smRebolxpY9dU)Zo{=V!3evO?s+_#i5g`ARc?NZ0*Ejgws;k%xpE}e}b$rCZc_7sF?H7wd zeKp>UE&%{hzUk&-GIW=eV#yzdw3|GVNBW{HF+ot#!ds9Sfhsn3Y-)bBZIPkyDkgXR z<+j=K*N3cx6NZfKPVa7!Gvqq-lf5wRS7{4al7Ec&9qwnK6^x&Q#9+~6jCJ(5XM|V2 zyVV%0>_sbSAZ=Ll&vdirSys_vY&ARya&x-Tyjqu7hgT%pe&sd}wqC@ariSrPG`nKB z$HiI#y51|1^Vr~)WBUFIt*);2_4Tdh>(&}tWcXPsVn)0|tdd_?+feH*@EAQt`Pt#a ze(!Xa+cjI9ra0jTj**-_B-ZaAu%6o*$?|hGN-BL)KoIy6LO9XoWG@j?3V@j_yFpx~ z%YXlLxwF*hpKJuS)koVs7AE=>8T(wAayX=d(3-(go=SbW$qVJ<0~M2GSCC|YOI%mn zqM~GC8}uT1l2|OBqDBBbDP)s3rXR6K8=$a@2Vxn)r=c(Je@E9srYcvQtSB%pFb<*S zbWnnJjW?O=z$eGzFaAFCG6_Ftw{k`+xDi4%B=6TQD}Ix((Pn)OtxXI|z4rO;N9uV} z{7QNEAG5$EEy67`{4+`4_qWR?dEr=Y_ABcNxr2KdEg4ReiK{#_B(_d*MitwF2ZJY9 z;i)_;CZ;IQ$4BppI`k~_b0w%cQDkPF(~Fxntp-){&9VZT$PU2x!T~URHKpp5mVN29z%@;;+jrkF&^8VIW7H?c*CI zfBqNQ&=ISTVA^l(%yIWZp%0IZiF@TuYPzz5m=GZZp8hVAijpZ* zDmGp=dgc$gTNaA<3wet(g`IYYNJvuILkM*3-*oQs{1p!C+td&*&&^$Txa?dg%6w8@ zG27lyWn(P#oYk_(3Cm2qdEZjhi?5_1 zeLt`z$}K(dMrq$B%q#ilO&^(nPTNst_;vV~Zz)R=p_Il{3XO`(mh5rTqX$=OYeMB7 zeztf-Pjkdah$<`8Ck~r!g!6J!nYDi@j1baxWL5^Vn@1lU#?b!U)y+C2+D>{Rqt#L!A5c{FpiQ*&}-;#g)|r`In1}ugSG!yL;3H+I!h$56ijA zPxVGg<77_viEfozsbB%aY_{gJETRS(_di;)TI>aY} zo-&%8oNRpohuOmuQOaBBPb8>QZyxFNx$BA`4J|^&uqzV}F)Q7Gj{EN7OWGYg5Qbi# zuSQ1bhi=BbND-SCe7M9fB`uA`KAJ~t;P0FGBGJ0Mn64qG&L<_lx_WJOPNXrSU~px_ z!I7np_18?k6A~`LzEyK>x0dqN#ew$eu60#)Sp4YUb&p)k9HV#Qtw;Ql*_>X)1rxU)q(^6y~=FZ=ZRb>h*JvS+yCPZ)U8?b#8y?unTrJVXE-S z+QOutyt2>Jmj*xF*VpXET07E}?GXFBqTjTuZF<{wXFMhtsGD7{mM0X(Wy4v21cX0O z$&nBjUreqpf2WCzc)DGeKZALZ?B}bn|F_51wV9QcnvvFvOPJ#Q$aro+p3xsXJ`x(3 zML1bAYZ{4Z``}Ydn)J=cabLz1Ir+^~*AsHC?fI~>Opkbqi_W>+dz`pX59K^f~U9(fAaSn3*d&%o*;CDVeA$o<{8b z7G-ByDcA~?MYN;}2{v5Yty1pdKGlDAUGjeR`M8RC1cf1|t&)m3_3i{I_F5(H;M8fR z;O_8P(t3RB#a+(phLzp%8mHw~w1et+}=QF~JI z$H?OSwUf`<_tS0|-&$nwLrx-P@IV_AAtNWFP_A+dmryIyV@?`lANw)O%4O)+M|2z) zZ>Ao1^V?iIpDn%|Rqs@ClqbMI$Y^@&7a~pvT(}fGkqrRBNehg zTD|`F{{57JAjz|tyXT~NmSb2Ris!0&FHDT|t_sraM)8{Bd`)PHObui?zD(dbD&Fb5 zVy$8|yuJ!xQPAhNg)mBw*=`m?ZI{L2=~Mee{Nn(-PvShT zm;+Cxo<+RsQlZY7C|VeKZExvY2EMC=v!kMhl=zec)o5T>FF&SjvKqa^NN&zR*)W$i ziJEQBthHF`c4+VPHpNr5CFNbfz|UzAy&9%iUW=jUJd2I#vn}@9FU#p@bi1DK{}JgK zuwUna_Mb2D%wn9v=GA__BMcDUCd?h?9krvy{my{~GP$`h;)(*OKS8_dh#P_r0IHbykh22wGYq z{ym6~VyyWyJpuxi8nKWf$T2VoOFN?;j@C554qmmzQ)0PaJxelTBn~WssXb zMN{y*A$y{bJ1?((T>9en{=z`TI6c|>qo8f!-h9sVU)<{e*fxo^jwbQk8 zWTt8zjvEEep^tytc@#=6>_PWf$e)!k;2NI%xLJVIU zx89Iyllt_hbkz0L@15*ty~+{1z0J%*^Y7fnMBUa%jt#wj9iI5bFd^qCcr{3>>Lg^Uh1XeWlQEd_rGI_7OohPfCezOWA3v~?j)8lq5*Z05OJMc5)!-av4v}=2I#gALZ zYU-*PmbIBt#&hW?2@Sn&s_LpbHI{|Z<=l!~n@kL9nkqY@HYKI`Os_mG$63ierikAU zh^}h9Gv^xlJkB~@|B>aP(8EjZ+;D>naTgzY94sc8Z##>a0djtFJh_3XG#)U~;E_K=CGSttzlZjBc&EiFOA zskgV6Y$#=r&k1J?L$M1}Ijy)M`u^Ax5>wKqpR?-y%>4lghl;3*T%8Qp9^q6YLCDYl z{yujiKvX%0>AQk{HazN@#w#Qb+{y0&y{o&&$WDoNKQIOm7LCyBE&GJgqefdi}A z7Eark9XUPp58W|@SVwa{rh*k~&gI2{pW@=2kJg7WG9E%t5T?C|C(+0NWwK=rPpZQ~ zRW+YRP(`tkZ%oFUk?pwwi)Vhe402VZxQi5OYmphY`ybVG8)P+f3BBo-*!mH`-fu)O@}sOrx5CKzEvCD$h>;2XK#prAmUobg z`$d&GIZX@Xgo}%1acqxy<5!cH3(<^WO4$dTt^Sk#)o!8T_j_->dlk>#u zre|EfMx-(LDESNC-7(!EcX(U9wGPQjg4Qr6XOS9Rtx`%`^h;dk5#y1c{n>H)S;GXq z@G{Cr*LU?32$$=teryw`t9Azj6EVdzXCno_R&Ty#XlR)I`u={ho}S(*&!4YfbKls0 zs{CU}Y1jUo6aUC)1bOh?iVm&Nm|V5tSnwR9`ZjXyc7e8gIR9AFCNq)y9VWNE>M^bN z4fXXiHDg-O7Xq>tk3BOG9QNUB>Mg8!J7ssG|7CSt{IRh)CNikd@MWI8#Cj%VSa7ax z$mrow&0D`{SY;}gTVDSu&;0Y}Qu%4yw(OHak)QX|nSd(-c{Iu!5DwrGF>J7>X*4H> zH-ulQ@93-P4-+l`E)VEqh`NQYuC6XAlpy0@U-w?fAt;jSCd-YEw$6fyW&|1Dk|!pi)7T<*Vhv;EFqS~yV85m3iIw=5&K2SH*ZSP?pS8p zq>OHP(D|LUzcf!BkMi^xwEDf7v_rJ7C?$QlV?rc+UlH~w=kn_6Y^`HR)e5ksEG@^@ zFl?}4((BDuUNyk@=aWUtG}r>~0ToLc@$+2LzDjG}wjC-^yZ;m$lDClV5fI5CKch*E?B)b30jugif6{+~8VFxoG@lV$EJ(57_sL>1#4JB~A zLcbwl+Y8fYXZ5vQf4%*vKS&G#!2!<4e!1uHs`>&#dS`5NRTB?fSjrY+heISWn-fPf z-V`{OO%(FfbQN@F8>_Dbi8`Cd4!@l~b#3l`z7Y~NY(}=ts$(yqBhNd)^mC0)$rfuSfvdg4lMwdArcjUSVkqV#1L zZ$P0$_3`)JZ=vLqClh7n3Nr$W{FqG4;w)V<@+=fK-;HqH*tpNDo}W7@ z0ABzf7y}5Spdqq@@>q$ydP0`Ij4axw#n@sL4a?UFRd31K59fm43LWphmv7xCtbb+j zd|rL5=(m#Apx&R~E0YWe7ehW#$=TxIN2|;3k5UZ?Y#Fivjt5f zmZ_LV`|~7|R*a!uqL$+KtCf{m(F`)d+Us56wyC_eImYz*VUrqo7=6h9>!HLCSbh-AA}=1^ZHyVy zGCSd9u&I^sW{*rTd^>NP&FA;$Uo?aR&|Ai{(eyqal-0`oT-5OxAtr9DvYiv{jLDe5 z@51kb20z>Qc=BRb3s3$%xr11ZC+uPViZz++PfgE4%f_5V#ax(KG_7r@R;BMl(u3(s z*K++071m*lbWPoqwO+bcz`~I9Gv`vHx0*4QU4xwaIHRGmpQ2Yr!r7a|GkH92v|s?P z5jG_=BqgoYe`S2mZGN64h^dtM?OB+UcTs&sv4YBk-ypZXnb_Hj6{`eNC$j|4sp8(Z zZ@#TDtf=%l_>V{rC5NoY?B8v%Ffp+Vuf^)=DN4+L!*9yOG>g1iIckJ1r4ow1)3>6> zd-7I1ZYDOEHw4)tSi%X(jh=2-?75k%!A1;6soDK<>)Nx5H;1czB_-^jyP5tpfG6XF zwfFT3SCV&mv|E#f)%u7(Ds zuL>h_rqwB0r+mH<5$A#YTo$qDnR*keAD=*~l|jbM-e{Mp2SiG~>=MahRJc}) z5P#m3;DzX+{?6?zqJT+k-97kP-v8laIuUNDT0SY;5kWPUP?R+=RI5;S&Xo4`)$;B$ z1MYWVNQi^;w{mS?p{^kKDAbTHp+~@l(^aae2=b~3692hDk0JWy%NGSrZ-9YfPZCQ^ zdDrP^Jv={4#_+B5gcgKj2ejf8#UW+_UEw6XB9^7yU1j3!Q_Ow%AK6W7RP=!`F3%~H zK!*^24N3&Z&q>y7WA!o@fiH^CBtDq*EXEp|H_QyB}O&s4R`y0e}{bYi(64HGLcj{zZ7k3wt;~KZ1`KA?JpA&6Kkuh_sWQ| zy4TwLOlar1^vr+c=E{`B;$md{uXp;WKhMztNPWO#I;`yK>Vg^7_I-&@E;%43u&36> z#6b1N5HH0a#Rzh+!Ul40M|U@)px_03KD;P+MOBrR57>T(~N~mxrxi}9$$285% zq+mKe45R=S`q<8ujI6B7;p!c`5HByUh6Z;Sa|7I}mbP;JhK~uN(xw0FuawIB!IDX0 z|JQJ2gb1FQxquicID;X0F-h3>{1EU}K-Cl!5J;@r2i&WYl2ZLtHPa6tEG(?ckIJ$l z^sGR#!!O6Q#eOZ41C^C#9SK~iu1SHL)pHaX?#g*)aRvlgI>kg2b93jzRYiJwdIT_X zt^l50%|>dGXqNpEprbxMJ|5>A^u!@gU?$VmJxg&0~-!QP;iB%tvx`}jV*#z2aCoV!;y)&Z~~2!JJXmuLF>qkBR`48&2M zMs45o=9k~YDfmI7893gCmX;eCBqE~G<ky1PK7D2$ zX9sA&DMfO49s`Hv8nDEGDMl^VT2&45u=NGJnBBjdc{>cf+&GLi9 z_NwQ5B~)*jo9BSrCW{lBZfZh^W$l?Dry7?{!1inaVFkdP!$a~|4u6%8V;y@0Jg6q% z4vM-St?w_e;I^8pge{0HD=WL2R#Ni3Bo>1L8n*uxxBkQGNT=oGkdTv4wVAW04{guX zoxo5RAS>(+XJL=1<{W+ng#uul0{N4UZWBaW(4ME>cK`$ry;3s9-Mg=Q-V~zXFSnUj zf5?QXdC20AZrcAt_DTp)E=Nq)yOe3OJRFS!3NjENfbMk2!`Tne&9{m_u1*@SCnhG~ zyqtly2kb&d#to3bAe5@9t{!DnfD(3OhT}(n-NS%3Fklb8y0#0g^&$ycJ&{lV{_PCB zZnXau>&9xc+xR)|k&_Rkih6*}gD@Hm4Gngm zhN`L+@f1c&D{MGuk2p9u^!4_FYJjLVy7D<3^*wM#BYK8o*V%i;1aS zYSImQ{L-QKh7{28Q3n7;UZhup+%z%a0%9R|uqc-YOuUc? zp#@Bqm~s>Fc`z)gD-qOiWB+&rcz}1$I5)^yA+vf%M4Y>SNkHq}N7# zvgnFb+}?255C4~$PESu;k)&DHir>(M;6eg$_W~S;p`jrlr@LHVxd9`6sgO`4-bBXA z%F3JPYXTl<%>WThO^u72n}Y96nvc&@w@*A{Ezo&5ZLHAX)tc;9!BqZkFC0FIe!QG->(Qs;Gg2#so z&DJjv4;L3$x^N9#D<(Y*`2TgkFw*;)9dF3!Xx5nGzCcztyBi!5f@U4mWcku%M4F1z z!@%x$iTEI0-~(_fiw`32s8s-)ffXdf`ec8{QsXUt%y!=F+%kCf7AtGOJe8gK7(Buh;r-h zO-8I;s0+lXi4%1XWMpI@k!s{cNeYg6!xbO@!e=6nP_fZU0v(~`?A5~+F;N>5FM->Y{Hcp-De}* ziX&~d`9aIepmGK`iQ$Xh`d`voqJo2izklc0tx#WS@xzW|GhF>^x_5Fi*XZGq;vDhZ5U~6njy)I=pLi^6 zh3Bso3K?9K5F%hek2cgqMMZB81uYEBVSQ2JdHr*_B@}V;tG%NmI4CF&WE0rzrmKyu ztv@WZph4EDU1nx!ZA}T8kFRaJb?l9H1D6pX?sMcsibN=ix!noDFHFHuLv+uBTi zmlz`ioOgua+2Jes1q6Ow>-@HHva$JATKblS6RiGNr4|bU?1Aqs6`ev9c5qj)b}%6s zYZ6z28HVZM+;FqrzJ*N`IBzbJ1DLASdH5~FMtpEiAykR4qJdBwls{~EraQ-wN8-QVlTZ2j_7>4W1LuEHs zdQnkIL{gtS_-}~&;Bdhf)CsAtum4!_r{Y2vf|!!P-#5oJ8nh}teNr_tqB`M?6SLrk z$-9E@_vQg*0T2uF6>z}467lKj3!q8`oMO;7B3T{vLb(O{gs>_49g`6MQ=w~`2Vu39 z%6s?jvA-sOZY(Wb;src70_vLDdV2N1kSdx&HJYllg`^y)^SwA-5Dizm;6l6rtA#QT z;w52q$sq2aN=QphZER}lxY9#}f`S5!dYxq$8?SDA0q&IC9N{&HzTjbd8!yS><;|Ky zcz}NUUI|d+ z)*MdD$bflGSAV7;ip~4>?J-1FU^8GnN?-gxOnn7ZRc*KRp&MxlB~(JXyGua2J4NY~ zMnI$_M3GVn=?>|TkW@lKO1eRiPRW1qefQq+GaT>m1%Y$+-p`6T=bDQZViF?ZUd~2r zLBYdM{i-*kv}#^%ZVj$WVQ`t*_Q661>H+9xmK{^Lt+8EKutLr%LEi^8dR*Fjh%!*% z7xgnXLp=$JNXO2hGL85(UKtwvg5BL+sD@x4=nYPMdvv?ih7P#7L$FsU|Ls6U^(g`d zT&~VzH-)%z5^u~O3vlE#R##W6*W=&4Q&drrGa%7W85(0H+k|Hpo)`!?5W(R{CB(%+ zbhPVPDTbc_fScp7*3?uZnIoKW@IhlL$~m~Y^lK1?hZ8JkXlQ6!;Qm4<2?gc} z%oF^|Rk1%?S^x6~tO!c#>b&7N1CHU*1MhtYP8Af!s)~x>!(}GR8zuabmI(@qQ_;-) z06DP1f%!T}5NqK62@}px2%evv!DX&7T&NLU)Arbkn_pNMf=@sJ3}z~Hyu4M=ad@22 zRdK?&8tmkRQ_+2Oc@EBe=$|2{02lzKK+px2wxj_il$V{&Njwav7AU*8mX<&joQhiK zIb9u{OX%9|ymTunDk$9^{zq;AcN4xJ7(SIW(Rx)M2nd=y?<8&RP*YR;<9iV43}6Mp zNdcrMIXO8yJ3BMea&#CY1QLxNo6As_&)#BaY-}ui4XCEj&`v-;Shn04F_~Gub;VFM zH8tJXOo3$#J$(+)E!z;Zp;dsFxr+W8+yR;buV;IDBB!FpOB7*#-rZfsfbtOPBvW|V zYw3YCU%o7$zlQfBD=T}rGe;8?9jy%S($`m5i@ASh*H%~gZBQ8g z(|hRyxB-8RXa=DEjUc#JZ`j>x`9-(Hup0V#I4ID}3M}qOOB>zPnUK6BE_T}?k1HfJ ze4Kx#i)@;mlXHtkJO%I`%T8=WSXdZDMHX-;m}_Z)ehA=*?ii}Pyga>!>|4;E)zs9$ z047Y&%R(8ECjMe$etv#q0~>mle-J6);e~&If{ttc1d=7Ebq~qYgPSpX02rXCy7A?5 znieD`eh&^~2#80@;(gF@O#S{1Pp7`2;ZI<7T1Te;0b~Pc)G$G$N4sF;(|^eo{5<{*DQGMS}yi`q4lkk`vx9j z-si_#;5iA+-d$*;;D$rIy#!d}!}sst<^%AIxP-(tI2VC89UwyZw{C$q(*pdeWV%hGPzGAv86G%Q~Bzn}HXn z8j@pJ4=?(ZK*{4gCk`&N0DnMv2UEk?lXs9`s$-OVb6;$G;QME`sWCHi5z^+#-`~WP zl*~3+n8>u?2>?L_Iu@v_A@V6HD@Vu1KKr5%?|ihUHu!^HaKw#e8#r+T6!@;-H|Sb3 zS9+6hCYg|>;^zZ(gLPMg{%meK|E>uAH?wze0O^}rOYR%k`}OwXKnniZ+e=)Y5V9K9 z{}hFZmG$uW_~|`=hzkKw{R9REl0{Yf9&Nzc7+>Z=f+qrFuN(`R4lu$G1S{5%10d$5 zOa7(%pZ+~|qEM6(oq?|x+I2Dx)4upXY)nkZQ20R*C2kx*p+0C2!xMSFP7nk|A!oM( zzaM-G?mN1=wzl@x9*lMtdXlKpBWj#x&QABNA~OL5hH7!xhQ8lYaZ4$ANuI@G7#mET zuc0(-Zf-`v;|~be11&8r1RUDx9Y*j_L@3&RyK!9ltdf5l;>z*v%IMpJZUPE!1n}K(w-Y~jR#a7?4nhe4 zb%T+iA(V>H!0W>bU`ds3xEIl!&W{$fky|Gxp@!Ag+58qJSWCCY* zjBcAlHb^KWz>f&J7RtI&X%%!wH?Q^VKOez0nTn7x892WV`?><8Lo3x{& z!`qD8=)R1!w5+Su$E+-?(P(nW_4H1}FVzE2|MF*$mW>f&9(A|229cZ+OG8|YiH@%0 zsI07n^aZ$4LVSE(jIwc7S=kQw5`tZ7v&(`s68R+RO~VVL;P5b^ynXlX19<&Gutswv z0Zbcj5n#1D3e3_L?Ok0^%Lz;;DR1TmqK!e4-;c!2*gQoaf_>~xw*OW z8Y9VER?J%k%Ku4JzeYx~nUu7&4q$VPrDiCSdF5}YSopZPEmc)g1m9i%I03Bxt>kKo zArH^fKJ()>uk}GGsFg$j>B3`PRU)8GX5vu-$F?O-~=9g87?vuakw}*2YY)KU-TiR z%|TmIShxX?1SEQ}H*~PJ?z|Sxf>N8DHciVu7qURdvqA}Y=3&Ga{tInHYlN|GFra8D z;KdlX7N4Fj0nZfN6a>7p%d;cD-T9YcVOyvxn@l^5;)IWESvDK!ON?{{HqByp3gdXfUJ@$Gv}#RM*%D z*O$$#d3t=@3-l)N!kpzFLmyoaAUsUF4FZmuLev??@bgBdcwUs$fdWj1&CKSY&TFi%ha7Cv5kUxOy)yMrd9awhDN&W6CU$!6>gww0=@j`? z`aWh;WKdu$J2Ju+E&or}E571K(z(<^HR6vJ|1t|Izx;LR8kIUy@E6V)*^h_D_KE%?9T zA2qr_nGJcZsIX8Tdj3a`1YD;fG_0+y={dH5Z!a+)F{P9g{)F5h|I1Fvb)$PrJi)39 z6QawbR=-L@aP{NfY7`m~Nv`YMNsa1TmY*#6jBksk-hXz8-x$WzyLW|ECDLw5%Zt0W zO86lAqkF^{>Ib+vJzpv+gafZc5AM)c-38~4FF=t&ro6#jsUBQGtq=3-P#~ayjW7W* zaS`vWkq#_I1ONb|H9X&RP+yvhVXs~yf=nXL!}q4C-gynWOW$qcjoHoey31{h?e_7> z!m^mkhG~Kq-R@D)SIREO&@ehgkh~Q8gCAEzl(cDpN9W(TB?~weX9?Cznh%?d9cWjx z@f`v31X0TnP;EGj&~4b;*yyXP6A%(EtghBTX@h{rx+&nY0S*$BRX2S$K-iGrhek%K zUh6#ps*GIy2fvZg6oVZ@yv%r3Hqm{Vmzk0p2s{L<;-R}tJV$evD=IfJ;Q@i!l6p^A z9GS5Gw^nS#u*A}WA|n1sp=t*k5s)_c2boW-1NGj0BqHwh$y>fNOo#>X2rN`Ny7dQ7 zNRZ5z4Q2l=m|&ju1;>$fPlo52i%QMnCXrrhyL)KOIWOt4j_}w2BCJ#GoF2Q1J6$6sl5{!0YCu|>L{jM0 zm|oK4pADPWLUrSzrF-Rh*JAcIjLoZ{rN7E%jrLv4p&6EtNAY}|cKNr}A&u@zjoff0h5lOGDn+FCtbvQT>a;LpFo zSlY!`m5huGjzRHQk-4ht{i-lricARP005JktL}xl{GOb2ny%AOQ;YSnwX^eu5CgF6 z`RQqPULHKW)Di*H6BEfoe&Bfs{1$wGKl%;+N6D-3DE#E%h(cE^P2fUXC_LOegp0V) zOanCH*x-hY5m_n|lq<;1YeUJ47cbWH_%Vz>V?D$sxxAuacMi&CZ8!`Kfq85B?`RWY zMt4j8R|}vPc7R)+O-m;uH=8SRB<+Wdm=)`^4~$RzT*(t(wR_BENv-b0XiFjYy+B$X zix*E$g^9cI&qJH%!2`}V&Qb=k*+~9AQIDRf_sH)U+yet03CbnQ;?VE zwc1BPhG9H~w*e*7t=se9HXE+gssyAQ|I5ZV) zK#dI0A$-K}Ha%5M7Z(G-5fbX^$_;0iB6OdQZs;!xA*Ix=5JG(?h=||+mZb8X_a~Aw z2*h8;r9L;*^w3U8Ci`|v#64%d^6$VlCt317fcBQaiNQRuar)y{14Cwrh}+Zk?blTSsHUc~ za)S@2C8Dhk2>kKmxH&z#l4`#z2Yz>?2e#-t`N&YF3=?qFE=Bq@RUzizsi20)zD7OAzseFkM4?VyI92=M ztR5db_sDYm^M2saEyU*#69W`trOsu`{KRNkgu0_yhDJYPj=L}^=`Fj&_7kvht3}Z6 z^JkNq&!#9oK|?nvu}>X2{2O?7_%NM~Pzd^;C}7SULuqYa-4Mm>uM#t|@R+!m@{{;R z3BqN@`HX(i##h5w_4vtN56xYN&lVBXODn1>wUNZLAmbHB)|tOyxd8!DpP)-qy2e8pGPr<`Zqt?Ha4yRl?#&)p3`;{`)D= zFC+qho1BaZf`xC>(_aDJR(3O`V}y!ER5azG6L1pH)kp#eCq<gY>TYV?;?0^8_xuL(J4fS+4()mlWZ?Hb&5>ndXlh9NAh}Tu)wJ{KN6pt_}vkj zl66Z2y+iibaK#;86L3J7o_P7GQNs|qh)ll_sajs1O$`-Gr(R$^RHX1&*i;q ze*bok+A{Gsis-iJLBZs44XZv44L;Ks)gNV zQO(ZJudAfQ415YeKtNIeJglm$Oft`q3x9)xbmY`gH6h7US=BBbQWPypf)KRF|6HY#!Cm^q{$G^Bkfi zWR(g0AI`f+DEr)84x|lu-!{)N>w`FUKHG;K4tQ_f^5IJ!an9m@j6H+CwC6MN#`e1y zk<%$9ofB{=Wo5v~3_!qB0|1qg@sT#Oi4B;eZ*CeH7;Me8&`8?YL;+`9QWjlp zJ5Wj-f@T8M@)lHcuYsU|9uiQ+8_LCz{HX1uMY*bRVhr)GNwLL>UfkdtYbAH^P%`(vn*?Wi)%aGa{2hC%{auk z)!n)L!O(a~jj-4KX|Ct=&->pIgnkzxAZ{N_fH;rnFd#o+ap=BxNgS-YB*VmtZh01< zFzz?pZug){MKHkPG2qpMnuYd~=7fvN)b*M7R3!s_ea`@q;4o>3kBbAtJ09S`R6g+o zqlLoB&krzr8mw#q%$S&%Sj1z4*P<;1+7-9n3V^rZdk#T`28*PB2m{QN@GODu%9iG9Ey9r0Q1~2JIY51@=l66mhRqx#l1PF~Na;9lH zIc-DHjzmsLi3~6*^h?k;^aHdGiWSqAfERx!xM*n7L6Mpp=Hu^w0kCBrgc5D-=kTup z@<+qKfKEHS)AGyjztFjNc62DG3IiB)1Pur5Rd5HA85;JePHdQCfF&nTx`5R!VB(;* z$pgiQS-?NERtQg;T3WyqfE>vUV2-nRX2=0Xuw$vTbQ^36jZX3TZUNp;JFB_m;_AA0 zeC+fojrfOB>$BkNfmxq!IoGd&7nOIK1pZ7&MK%RRMq+qcjo7eqeJrDwqi5h6ll?S!>U&52Z>r>m8I zjkULUDhKXJ;G09+4VA>&HSGgTX=lPw_p{TbSTKvrUeGPV_eYy`v)#jU|pSXTO9sl8R8-+#m(w##r??R)#aG6};I**U*VhZx=8pj|h`4~qTqX>60Yrj3 zUk~sR0emQdf!9FL0SI1ZTt7TFCjn|s01dsB!$2os-4hQ`do%EUAaA*G4}G7C)E30a z!&6jfHxk5#s__c&mH31N*D1g%a(}C$?$t%mBuI{k=RvCh;2FTDsS<%{fnwr9LKLlV zVeLwvm`-sA7$@BXHKYd&3=H)2qyEH^pQLoOwRwH^m*JPl)O~)cp>ebI(CT42&olhm zCpu0}PJkxos-;r{4Sna%g?<7-m>eWK*oFkI%Q-|)N_ z!Pxpei>MCXe0@F8Cd{Iu7eHf)`yI2+z2}A19&lW3CPajU__8N8vgDio<*q&uc0xEo z5`jne-$qf{)Bcb2?|;Bvv}=dSsz0{)-$qA00Hy#%3A|Z`fgg3SO0B_JbezSydp z9|Ks)h1-ti7abAt{l|}?hAwVPP^75`_|JWV_=lq6S(vwirJRc?FPwdddPN*@mR(L)v=)(*c5obKLP@1+>v8n5ByLHF+N>iQ41%0SBxF%N<)66iwU+9?Sp zF@gRTwt`etaC30%OgXota#@8%MfEoBG~<{9^a$ig8owjuZEVm`T0y@uUFZnxIbc|z zgM~+jjhXpu^^@~hnb|-EtHgU+Ufy`?-)J~wB&4J>%_f@_F|56_b*`Y(S^~A1Gb{{> zBxM1KlBxFs+Dr#Fv@hhUL^*% zee9sjl@PbwX@GrPICPlg> zr(;-446u6%_1Q)8JRNM5;l1-teiNR#1aRmE%YkFKAWOi=!{!Mf*`T!-fztuV3Z!#i zZ|}pcDLuice0U`{V!@W{D-a$~QBn#+fw&+Vbn|MV*~GxV!}RdsP4Nhar;hW}jldEB zg=1JAQ|&b43%mi`qXE%_KA^tpjTss9pwx1jD1V7l4$SY5fyPA&aeqOY%GTjwZkRfQ znep5Y0k9+K@jK3Rbnsa1>r*b!0oPmtU)1||WPhpE&3=YWuL^7(t@>ZRzsPs|B)l&V z3xRc*#n5|h7W({tmi}R+s^;|68~e`#2H;o3Pa<7Wl7=2o{yg{V-=NvE_~7fX!3X<9 zhbV`ssvRR&RVajNx8;Fyor4`zzVW=w42Pne4?&s-5@tRkAdcnV z3C->8jbL=k-kznJ;N53MRa7Z*0zhDH*UQP=E$Qv)i8Bl~B_<=ww>i);FizHo+F>T!P9rR`sK{Pb^-W`AO_8wqmtioojtU~g|YSPTphqHaxmS%MRsk&yvv z3Q&5_+k6TXQU3Btek)fItLP4TeyP4XX=5Z~%LR%D(O^Sy(IpkqRW>G1#lSB!Yw< z>QR57egT%#Pi%s)1l^n-CdCS+Fp;?AS^VKh(I2# zcjW&9>;`?J)8oL?UuW_t?dUW zlc61jn^9(1y#aw9jJE%PW*(jjSqTQ|G73@eUEr&7bfpM7tlG+tE{ewG1DUy`s_|$c z_IzPaM7Q)lNJ)!>x@_M)%^P+~Ai+KLSs%=RXbklwxuDBD6jlevQ_hbw-VHt~E-r@P znWIYw8;nvKQsIJ;0Ip8w7R9hE^zZa^Vq#*tB~Su39Pm@YHGwMzokj_O*$}FJfKp2+ zFOn8f*U$i>ACMR*vB}n|M;pMBE@93QuugBCp7w_m0qsb1Ow0-~WXFH{?=I}>Ktm2C z5i1l4%*=6;HcNyc+^44AUS1}lre+tlTl^6b87T~u>VG2)DLl6%^>V};?#Si9Gy%!{ z_28!n2*IH))PtrDN^o#M{|LntR73EL09^+2L%Df*a0ZV5{Oa!SSF!t{_7nP-Ilmn- zr}O`ATM~6!{tlpHj*j&psHFE`i6J>C=trb)Vj3DeR=0f$-(wvKA-Q;Xh^BuK#oqN9fk{Q8jyT6dTHZ+FwlXcVI z|8W7fvE{NM1T!{RlOyGqmYOztvayjca4!o{@PN7#LQJWU!y{38+^jqn zZvkv5gAxp-Un%s~d5QizTM=E=EagEXF3`d9zk$ zQ{A^d@!VfwlubnF#U-JA-;`F(?;m(p+5GBY$lvgtN^E|27X?Vk&C+mh$~t^TClPV+ zn{}zYQ;N#p-S1(xy%kLJckHa+I4$z|;Io22kbO_ke_zlR{ID>ey)eI7Pv3PV;7^7< zT9BCPO8t(Yh~R{EeBi=7wIdrFYj&co!|q+xJ0n{U0<@9$Q=-!$c>>9+SkjhXUQ@%) z%34)hn`9WFIyjK51O9PTv1Uf@%^Nnp)Z=RlHYPKa7n0oeo8lMg<6oyCzSkESoR|^;Cwpm*}sTe7suMp89la$jYH<&%xtn7 zg>-eM;`xsi7Jm1mw=tk51X~Mzi7spj*ZBQ%Av=G3z=-+1L83(;>gX7!ZdGQ_c$Idl zKqm!rNijS7v#6fmjsHdksC7*P)m^Wgf@=^?h(M!r|TD75DdsV55mMgau0V(NW(+_7mu! zKgHpAPILCMl|HYq5b@UYJS^DtD)p=^(yiy4oVnjgY`Q>=<;YioO05&K$Y*j-gw#IP#$*!>gU&EM5C5MnyE)%!)x2&Ks7XQn08>j)I?4NjF!eT@ z;e*!?h(pd&qEn!T6^9c9N!NEG%DnDo>E7O^$JxQSwsgiM>4)fpeK`)s-4*Ot zq{bZhb9>)Lyq9?pI5~JD)Vr}$YaJre)h9wyzvZ;-(}~l~jM9l?KQ4cKp(>rX`Qtog z+{cG67+!OU!Npo9mUnPp0QcLh6 z7{L;x2sDET04`vWIs$A$UirnG0e+NAu_?&F@DwO@%Nm8W_gwC~^HS`E766?<7Vf0M z<+psoZJ?u*pv>^zP+=+S7EcE-Z4y^v78XP}t)n%*(rb%v#1k~5?xmsL!uTCN#9nsn zahj6WT-zH8F_Y|92q87aK7fVn#lM@Z-(2mIj==m@?i0yYh}U< z@j;IR*#SwfDEnJIXV-2l2dar9nV5@MbOS$ZK(|WDp7qT`M3J$3V*Y6?94X9tk2TX% zH3~*2vbss#77h;b-!&tVzFje)s?EBASKaoSdf_ZW^a~Q!x!ES~4GE=0u^`GoigOrJ z$Z*P&@A&qOPC~*AqLNgb$EH$mPxqzkwNrpx#Iw%s%YDO!A{c-Jhl@n3eUf3O9k z6){&w8(*mY5%%|5`jQIj|LK+W$Q=4`_m5~p(`9>HAzjPUxC2>HdXz zs7d|rP|~fhJs9W-3MHX$5ducPE8bCaqvO1v2#Md+?4^r1%TRbI{}}cy?q)z;egp`F zQhqm+Eo{YsR*k(nQl3Fv+_Va*af|VnmzlrKGXt?N_EIQO%khr$0FW=3 zHxM|w_t9@7szS1EF~YY$dKJIO_P+63S()5i@|;k|!6PyiYJ`GcmFC+VPlP!|m<3-{ z(PQ1DjDp89(P3eeg(U`|U4&zT_b-?^SP{Tu6jwLqN@?eIBlU=Ixsf6Np;7zodh;Nx*p(xkr014Jv& zx6U_sZ=~L8H6SU!=wkha$)BmS_-(?c0#Pa|oHZ5JQ6SqNW*f1FYVNC+SMo#d*)|Es zl>5Tbw=F8o5L&^GSS87{_kd zOgI34pg2QAM^|F&*ww$Ap17C)wZPqaZY)RBp6)@{*zn)VmrfkcKMdchi>sJEwQ_`m z5u8u@aqUSyv=H%$c0RKpQJ?+x%>qU+rniAFfd;1*6z)!5@Ckf;eCX9K3SQ6Fck>fbIpAJJ25+`MKMn;%_@V#=QT_XKoEdXC+%$2rfogGqjta?4x=%eDU+pnfs zpPXLU*qS2!`*lS7^KF9^tj~(*rUkJr>|g{vQF3go8ff4E!UBM-UZcL@1GjY#{8dif z<{l`Xq5?NJ-axm$(eQddGIC-`jy3t2bR>D$C-*)V#5_@**b2uiDy*zKy09m?fq+~_ zk?m+Q+=vTf^<&*_wbccHb+(>Ze88dokK@I|p$qLGG z`97tPyC^^;07CkvrR5q5<7)hc(T?eq+&Vr=m~7!SKhggyo_6uMu4kZrc>KenDUn^Y zlk+WelaKF2KVcF^L`OqWf8WRd3eZ6q6^Szp?EsbI-|6~auxT6CI6|A|@9XdHud)O( zw%2)m6Sm`}Gf?2d^v3<_w_Q>pAB+N@PNLAyT-aI)W+*;kF=Di@tgMZy0FgpNqXJtv zC!!;8?T3Qs&D2Z*y+UkdrMs+b2dt=rR+z8C?oG=exsV2xp~;rK)1n=9kh&yY+uL z4l7w(=ah=0ynjGTI}hZEW)UauF!#3gYJ!Nq&bi=_`rV9B(|$*U@pp#qk2VDV?P;tI zQk3y#pM4bZf&tu0@JW_Q)ad>B)6+FWVwLiJem>L3jS1+-IyxG+K!+EpR*~q$)3=Fs87S^)qZ5tJ&oIM%(|^T^%#CwL80cf6g0So z^bD9sLKIXmWY5IJr2oYdE&X1~?#)(P07?MnQ~2CB@dw&3808yb#tpK+rG(@?p_NXo z!NO+Dw!QL8lrd8Z0(&MQFa+|_(rvI)7&?r3NKn9H-AuB9IWgRms>QPH^{Lt&!16|* zD=7#SWovk&w3}Z?6qHKeG^02YSs7ED4?Bzh`_o)8Z{6AfeLd(=>d^kq`mCiwG~|Eq zRdBCEP)KNHWksOJGgD%4xzg$zb-^7))w=CL_*q!3o+4{A*Uzk%bV{Iz|a0S z^brf{;YggH=k347e3jfk=?+9E@f>W3(L++{wG0i|sdWb|9kR^#J#4-@$#KE63d+2r^X zNb_hsV+OI6_Ul9HF7{wwk7#Vx=3sFpp|9$3;!}E3*xiJ;IREX{y;~DQqKsDZJsnWTsg(ttKFpfEHSGK){jwm9hzdi{xAiyGOTzVk$c05N{qr2y z!L-_gHmO4(=Ov4Y2W)0YMk7z(z<|%C)B1({xCWLoHWdaUpQO*D*`NHik?RQ&!W$#w z7Z2vU$5}D75sdBLqKx+=vVKK?{u`iw`K1)V^IV`^1yKm zxIrljBIf6M5-4PdUSxBMAhhxhQVJ47%Itmq5f16 zJtZlISMT5df`=koS4!daBNp!(X;bh^xaY=f=&6;R$A~03D|W}Eh@1Vp)EBOf3R+C! z;HMG9viAW=*3yvb{r!Q}%am+n(aL(UM_5Y#Sw|3|uaDCDxprZr$Z4`X zW2lF6)_bTuPgjv3Yyc2UIj47LDVUk>Zl=5d-Wg#|yW@*g`Nd`kOG2`MwN%unnt$6fa#hc&Zxr7Vs8==Z;{;a?B|$0tsy<1qaAgU zcu~oCD?rJ8%4(MZ(BilmxHF%lj=-PhTISSXhS_+Y2_~ zT|R4@HncF)h`ey9rhKwm?k;!0UY8c3Li2gxN!=Np1l6N}_N+#mS&E;ti!@|X$-K)# z3qb3F)k7nwVS%iINE7W5)&oFKPU2uPC+-sH;$ZY_V`CCB5|Dl{9bssvAusLkFN!z8 zNA{A?8kQ{D`NcM0Q#!^kp?lLu_bNy5-3*ow0DXu1uvWc_YCV#^MIn zla%&Vp~B+TRIbAp8u+hh50fF%yq?bdHT0V>+A;?!yO2K+pP3aK%z^xv4tsZ3 z54Ov~C_Q}m`ub{W4nv9s*%(;9kPs5mRr9vCW<~v2af{sBdm_j$)e*HzfO-&TF#qAi z^#FIja-I~SCvou%%_*&OCW`a@w=B|&srF$*of-z!S%kT5l&jg2DWy|hW))%#f7c#E z4do4`JVA{7dj=I*ry*IfJQyhgTbX`%W$5#N|4xHY0T!6@I?ql6V>vn5FuNGCT+Rx# zjole!q3^?ed~>;4g3_K`@Vj^1S4%1)?Y@v{`fk;ae>j> zNb>2MM3bS4qSoM&>SA=$m%G>wxEZKYqG$|jfXX%kRtGZ+h~1;3yF5>opFgjXIw44A zz-|98BMP%m04=9m|AxmAsOK_3^Z>J@d2#rz>Xp|Gu^koVw-9>!!H6V52w}S#>zAsk zc-_}GFEK*YJTK4P!J`A9dZ48mVQviOgP?9K)&G(SB8z2y3MAb2!_D!XPtMI)*x0b9 zF%2dyp(tm`pa#wN$cQ!;nkXLckJq%AD8ZRT>ja6`;QtdBAHTe@aR#({ES12|@3~h5 zFt9frWCDVL>&v<8R3UeHJ2`Z$|DjEY_frB+7s6p7BzTWN0geI8Sdt-r1~G`9ZU{Em zB&nlQ4Uj63b4ocdjj;?Esb;k*&{gZ0T6-z53AY8b%kaWsQ6vX1ueO~~h!Z4DFW~Io zfpU|IiX{OZmc)Nf6#$e@^D!_9HcX4K93E@nwe@chT%eG{Ld7Hg5124G?dIjx0;AC< z{6VMQetw{*codhLPW1m{j%g{v489=sn*(qW8xIeL6pH0_eSA98LgHHn=7hB86aM9Y z&Ht*@&HcJdwu_U!W;H-H(SqMAtm=efI19ko#W>IB&u=^#l0b3+4n+Xl4)yjv_V)p} z5BMVB@fHOxdDqWUvCvr&iD<3d2%IpaUPmPL3tiNB&A(jc`Z-)AD;rrF4u^R_uh~7X zHg@6MzK>d`!kN$Wl>FCV=LIH8EcG~3c5W^>V@CYW+ynco>2OB1G*nK%V~3a{;nJC} zd{5dhtIp|zD$BGiYI74j6Gw3oOzg7qml9qA?zK%lK?1t|jYTG%ZDR5|d7K*M{VQLJ zKND4NBpZf|&fdwI4f`mDqEbD%TyUvdTlzcMm%mJLE5eSyZR^V%We0`rN(zH3lwZ#bp?JXc;Sm< z0mr|e-S|XR$}R1sY|!%X@GvncWQ{LO_8W=m&NZ27FRZ8w)mUk~eN<0jg}(Bl_@Cp? zt@7!a(lhR7A!e)7W#=tps!Cu7!><#W!2RJ5h!@<_UPVx}E8MyE-HO zAE{M1E*<7x^^fKsPO`qHAwS%(b2-us)0LfG8Z;mBHA9bF8Cjgo>&tG&j$a#7?oaLU^N}Ay zr#v+|3FN{NAYep2JgiJ<#K5E8C2v{lXiA#RRiUz2tawpid-C|}9p=2S6(gf$U%fn6 zb7yOQ#!1rF?mfAGp>oEVIXTuqY=8cW+rbEvrXbgg<5dB;8yl#)$mz^IKUL%B8Zn`L ziD{*%oVhY|^(tMm1FK^um&Mzpx(<`+9gM)UNJr9E-hsJGbb(X3oiRdG3rQF+m02c> znR5sBFSubX{;k`kYCveC2`Sz%15irLK^kEf}6d1Gx({8k$ zirrX$Azt=BLLiyJi&gw; zl=p20!zU;z97U`z`Yl;08Dg6>9{K<|aK^HGi%ol437^aeoy$d z32RoCG+AZ}r?4<-&0yELXUv~icc^@0Mzep>qiGSl`a~Y-&J@*cE~dh+PbIY$Wr;NmGOgLf#lO``3ThN{wzA zC}1E>&_VR6>v7_c6_#fWHjRypR)|r_ zY=gM;%UN*}!9viYH5jS4A~I4xQ3q84shyiuUrwJa$T~v-xn0th;o^r*rh>Yo>sTZK zCo4&Pq%eZhTwjh-yPBzJVN&nXT-z=~Jx}*jrhGYTQL(;FzNl53XMBGVEJzggCiO4v zk)_qU{GPz5bs1Fm9^=Y9v^pPWq*!S*x&hg=fZHTinrJtglFtsR5{{OOiV^7za2S)z zJ94KX@G@cAVI6%|c3KiZJTTZ6WH_kSk+Ah2z^`3pNhRMsE zh7xkJ;?=wN>oUkWH%`I}y;a9v7-5|A-<_JRe`m1EbW2jb+Y`ZEnl0csnkkP}ZcaKU z7A!MXr^6Sk+4OaB7DyfACSMN+VUNmu>ehenvCI zL>{}Qz4k)Co!q!epnBf1kkhM&+^?t3t>^7?)>Ce56sdCI?N9m?WU(AMk@k7zRv89L z{XqxiA@L_|cHzs?M;#c3-i!R{7dA>AAJOvR1TdRqy$bZBJj? zRfDVjvxkeicUhY5n6B~ILg13{^R2(ji+xmPqUmm|SA$goPwh&W1=ggd(*kLhmk~CL zD}=L^;bxV&N7xFFn+|uQ23yefuX`2GR<31!jKF@xg}!(yv7Z%qV-C!`CM|NpfcK}RY5V`0>yD_i+Uz- zxBssi&v5A;x-IF3`# zT9+>ADc`zupKqBfLnUdC>o=;t2)LxY?WKnZzm7RS4-8~8{t_?H{G87E$g9`>Acfm` z+W7Rxvo?3`t)o9Yvo9*0kZf;rl<4mVik%-0@205UmDrQ{>opJ)Xu2*`VsdHLl6%#) zAS5R%dq>Fa2{;L8;uK)i8>Fn=J9Aak zn1nHhV`gYvLA(0OkD_+YVBe?P6qXl8YO1DgO;l0^wKra=sED4oZE{E)EFOgZ`}Z0n zmd|m9ND{_a$15ciRYecl`U+lki#^*hPxm-?8Y>Y2F#y3zMm2pv?Wgc_iNGz>-EVnR zy4TzXuYMb!WxP1Ay}s(+b%08ob(GwU7$gZzK4 zChD6lvH8`Np*0G&7g)3X7l;RUg-hQBJUMAe)J<$Rqs*&r8(xefJ+SINp%g8C(GmTQ zWBcp{OFCb+W2`%)T8o6w>gU02t?)NO9;f6%n@MGdx3H$Fz_Rx8x`lyWX02yt=aTyXBKE{KWandPeQbT+8uA$M(CX zF4Z)qzO(gB4!51Pi^HuGJfVvXX1P04y$^2(>R*&7Gi(YxCkX0^?D_CM*KzJ*&EC1` zs=vp&{ucFDUbpF)DX)#IQXzl4Tv@d=2eo4<> z?~r2hy7hR*IokhEQm2Fq-GTd#M0rj1kl};N{RA8^yEt1gdL@6$d8+ys3(Z%G+1l|g zn!wWH;$_^hXbPd{f87^e_^Cwt)YEcYcO`5h(mn}Z>7MoDC_Y0xnW_Keva6uKljiY# zcE9)RNa9j}k)^Sr;jC-azKxaf+1jnDKOy;J-SlJ=@@M1MO5G2sr+$9aIA(Q7x(| z3kB7LgU{7=k^jYZqr~>_X?vH$IjU>XubVW8H>E#EdyXjizSsGpNY;H7NFT1T?{2#Y zE}rtLH?d!_-zrs^MshiNzB$uAHRsjcC4uyJ`cOh3mVqvH66y4;ePG#9_{8y-kM#Ak z&=QB&b9cSCTsv2*g$z#20$n%SG3wi<)Aq%!u6Qi2&eH*_J9yn#yM<{_NI7=zL9yKT zCJ}5d&*S4?b3RMIxv~&_{yQb>-$0DkjiEYVpM>fgI{dGsoIF+C77hOGN?oyiGE-?XHr4&v zt+@z$V$fPusMgB8+)!aLvG$`&h&T5c-x%F(WZhZFYMSyC7jR9fuSW}tSXa13-RLpo41KA4+eV6vs*W?CDd`>VlAP{S@ze$B9^>Oj zw%68tq~uApc2fDtc`JTk{TMR5Q7RO-GxWc&te^ zuH=C(+d!Jqnahl0e+DPpXrUYY+11}=<>a|87~&3<)>>G&ZCp(UX_Rx ziFe-ntrqD|uMlT3zFmz4bFfkD_wnYA99k&Y%iUH@_3@kTwY{y0GKt>%q(UOlbU?+S z;RO$m)7tD$vGzNushiqJusTQTet4>|zr*^${ZJF#Y7G;URJYX&h$Zknstbn5_fS!5 zht?wsUp-|rQhx2j=CmVmem#q0eww_-%)*l7U!Px*NhB#1Ahq#txxs#&Ef#gRGeu34 z;{nR|uRiX_MO@Z7)+p%|!;*ODQlRp~v!)<}_j&o~A|G{RZ3I)WL3qIM@K2=LZMJ{| zVyEQxwjQ@W`)eeZRfV=yn!w)|OB^XVd1WW9YvCx6g3flY$yh&L?m5quo^DN%8kef^ z1b$FnUEbKA>~_9BwRYGq;8FQ1#*wb(d9t6v=d3CdADzPMwAr9CpfXJTSMvM83k-Lz z4z`lw*^9$ht0hKue4loEmc^z#ZTbs;Wb5Vry|jP!_cl(upB!qf{rSShK=YZ+c5`YV z25PlXlD9BCjXCzZyIsVO^3hcEE8Kp6wvdtdc>TLgO@fARoYhQ(LNu2|`vv zV5G5NXLR}TiuDRxu%XxbP(SEoSr&iwNqGJm_`&$TG{+P@SvSK`IK+!fpcc`KPk!W( zM{U8lgnxVaHtyXM*j*NqkYFi15<;Oh<+a)2D6n_z@7h(xFq`%jmnN`z_ z%sTm)$Npxd4!;<o2-BrE0CG7G4n0o85s@Cs|cazd7Eu}~!NT(o3NOw1)G)Ol{DM%QE(jhG&9nw-F zozh5ygh+#M=RV)xz0dXWuXEh7-o4fvbB^&D;Ntp_{_7+=ixVq5^i-?^>|L)ACp@MFMM8LT_v?M=~XVW5& zY3GF3HPy=9xSqrUu+aR$+M0b(=XRr{&)dk*;D~OdY%MJPO)0HRqo+34&3hkw?Vavz z-z1)AyKVf1t}!$7W!z*r_MrVeZUW^G_hZ!d_m7i>|61dR!eY4fAWU`1M2t<^tOC!9 znRCu)Cc5CoAVEn9J2(9v_9!#1R>({>zSBaYneN=ox}|upLC$Ns`eaIvN$CI59bkz5AlAvwC!V@SwBu z7E_1oyItL}jwEG*A?sM-cwPZ9G`Km)so4iVl+#-O-HM3LCNRawDCQ;Z(UEjr?bViK z*j;%csc*rxg(^_2DUeedtXHHwd2-SYT21iceA3n6$Ot(82^1#}J;@W4=ab;#(D6B}G2*TdQRGsy~aQn8NZ?3jc8BRXoZ3UY5(& z*=toD@c$k2()Q!Tn`locVxWfuMe{#Dh@KhLER|_inz!=coxIlwq<3MhjmK1X;5ZLw z;LQD@d9}h}aSFSj8C~6WEKRC}o6S^5cA0-?n)q3w&WvzKf2zYD?er z`s`aRFg-Lv#2+(GJ))an$T#$3Rm|C@^Z(TXNIrresN8qKXdHTJpc^dIv3WNC)km}> z<03e0^4+-(US3y#_OhUM%buMnhn>)?@PYJd-b7`#X=w98 z`@0UwJ=Ta%(3)6@Lv@Ri2#QQl@%~Rs&*M!%C(i2nx{J?Ho(S_1lyrgk$5@RM z%9QVbR@+9XE7cSlLO)FYiEm4m|Ab#uL_3!0_HiNX98m#d6a;QQwRka_MHl(H$yF=N5+F;Fp}X@HvIfh&W$$=CkI8$ z7zq@}sJ};3@j8(HA_70H*k#Zi*aKHMxmWMCe&9 zAF|h1cGl1t8iQ|7&@gV+lOev|;)Fw{B#0WIsqnw?bO4!*c~rWV0xmt`qYpV0n5Z*W z{8katO%9*PM;O3}1I{R<&>J0kW@gRCE95G}Za9RFo69zDwU`>?RR-RF`SQp8LGasj z5lviMA`q-UaNl?{#?C+dw*Vb3Ys4}toitcTECp@Kf1tUl9l`vK2!CZNX5^cZwdpar zRSv$@{W*|p0r~B5i8Lv4ssnX0^Oi^_9u~HPrCV96fB$27*YxPn_KGcx3pW)1pOXW_ z^Qv|t`0d~r&(4;%BZH5@#Dm%uHjO~lq} zsJ2=R&RfF)tHsq-5AawQ>J&h~8L7`ljzU9mS(V+Imp^rcLtTbv%SvO1YO`v>3l;Vq z&f@UbviG;Hse*OP2o?K3pmc=j#NlzxT4{UWkP1w^vZPEj!d3c!6&!9J$%={Surv6g zR4F3Ygf9RIA9mHfS{EiW6=umh1xXhAi6vjgbYhD1civSC46f^FGfLFn7AO{9*0>w| zrKBk7>lh8DEFjB_jc0*$luMoseHXYp#$G^`U#&b*w)klv#r*N?`!@$CXJRjLzN%Ws zQzYnZO3zr`7AJ~1$~tt<GdL1cV{t zgZwAzo$kmicpSB7Nb&KJ(u(~8TT?%j`8yqky~Xocj3mcLMt=RAr_I=z`!ak)^vkj} zBQq_{1U!OiY0W?SM~*(BB<{ICIaO8fBtLnyT9FV?6gz#TNW7okM`SVodGds$R)O3x@f?2^zJU|uQA~P&JJm6?t z53o<~3}3NTY}@z8=h5!ZJ}Y;S;vE0I4V~}v?hR>*UxCs(cN^k#l$56l5GHaPpJd@l z1W5^T*f*#xZ|FemTngjv#*4{`U)9wg4kZl3?=0VnEpb+F{Y)RFQF;CT61UlCoo@E)2Mg(V7Wqq)X{|z;;_%>%!_RP?} z#=U6eIrU{gJo@_TRb-TA;SCojpG+X z8Q-5Dw^A2k+=6l{Zl$BV`hbP2bj#rN3|o1X?B3&z!{1tIQXxDT1;|;A1u`N<43f|! zAEEU$g$G>UP@*|Z%~W`Nj!U?g`H$hzS1cO&)Y<4JB{G8&zt$EBhcx0pw{M$<1^LPP zUb6o-4r{-$amd-;y*O+#OCDXfV-WJ|<%S@RirC_Jjmtk4Ccd{Mc+AO5Z*_d6HFT{k zTdt)v7JHG|yzzieHm=-4aMwhG>gL+7xVWWXuS@R?J0W14t(IqcgkKKa8LFV!HEpjM z++PPxitH4naNQjzfrG=`?e`4SiptSW%rp}O@T;R|MHH48oovKrFJJ}d;-~+^GDV4+ zYjbYv!sc~Dzz1#5x2|?j^5E)vVr@kJwrAy1_Ygp-^F?L34pt-T;?#CmkyQ?^ul}Cd zjk}SB=X<`MUzZpxmb5sUr|Zb5p`a!`t7b!nDX&)j`Cp%gX~=!t;jg5Ku;b2TJ>nD6 zE^b{)@w&*i%3-HW!$M}R`{zoX#Q$htuLQhGe{rt^h2hfYl7~(&ZWPbGy|pgry7_+s zhz$%d?>t6YGBM08HLwx-)0uOKDw^O{Q=Zyy*K5D_%c3~_gb;Plsj*YuoU;5j)-VZ| zujWp43nWB58dPsee&&?>wAD)m1?x8kg3x*LPFWD%tz-s^*M^o433$0%g|wFhQC#dX z_ZXg}%Rf)o{92qhgWk&h-+w(oAOK&1Go#HKfdom=J%=T|S=Hf^6 zx9#l}d0;%ZvMX)2ODqkn^TR$fDEB}`)IMdvlBR(6$ObLoz(F42#OqYnU7&J{4jm@HqrR^qEqq2|2_y=g=eGh z<)6RBHgmLV6LqfWB@H=yRdPc=t;Bf;venPlWv(axTr^Jc&@gJ0dw%PLnx;1Fbh6 zWb%_s4>6)^rD($T@lEGlB><8uRC81xDA%Gu#Q9)z72U__Y4GPeuR@$`dfaIOA-!Zc<> zwqr`3&m)vku1CW>DJREci5Q5Ij*k;dcD+_>#jYq}F>+Hja{7wQGPQ3v1U2kE#=sEtF$1eky|Ik z?ApBM>g&d$a$2GqVu!y;>7iR=qgwoMabu$ngx6q}Bcu`h4Mw)3g99Yt1LVYFkY6z$ zVQr9T-Es<+_U6+NHRH;sF=b_c1_NjX9j0JZkZ#Dr@C~AY3k%9!$#opInmQKtq<#tf zI{waxb^`OQ#-9dx#Q#%FjD#CrwUu6lJ-gW$5~p)y^e4jx>+tvM|CGM)Ww6jJLDvg_ z5&%uy$XpWq1H5P8VyuCX6Nt*^EAQ2S%X7Oy85^+wgCMEa4K=)|MT% z5%j#1$~|BUA?@QMQfpyr%T_`{`t4ML1qcubYOQzJwr2nX0CvI^fU^M?_k43C*8|<| zV1Hk~&cy=oA<(8gc;fi{3tnAcD+3lTE@(I38DfB-z`_O0FK>;QKy8;o4|1E5+IhIh zQSe3r0l8B|4Jw1h{eckM3OXZ5OaR@%Dk=ZTVjCJ|{Q4LmtG139K{mtie@)txRG?~N zE`luJ$dKfAYrP6!j8+ytEvM!KI^3+@Ro0%^&FwE>Y>YvN&`lrYKf4AU|5CFSf>mpC zA_cmXU}j{q1oo_csvr#lQ5T>t6AZCX5THbx0m`$SR3CsG`aj%<1d!qyO(cbo_el@P zYrVm2*KG%BEm;7n2edN0TCG4I2V|xhz( zz#j#LH$@(yVkYPhKk(j6kYWLPxEb?%a2pF0~Laaa&kq z{bxu$grZO;2RH!$2oHAM8qoI@RuhS?gXnwKuLvR*c!qi%L5c%ZhyM)v|h%0${pCmR*%Q-B5u7 z!~;dCkIJ3^B!Icpc>0Ygh>%;v6`fHTK-MI_6Wq}|2&Tv>OtGP-%t99-i!%4=+ zm$EbHE?z6U8eNVeDf#fM)!4Rrf+fG*Q+_Xfl@x0F6y%Big^Gn~dT8bQ21GR)qK^l! z*udDNq5)6Egxb>1Zpg5tMoAsI1gXS4h+^$+#|0B54D{gwSiW1~%!2vgDMX8sA0{yoOH9Ka*3iMK~m|tIOvp$gKigoDmcxM^lT>L)A3df1H)#xY4Z6; zja&R38L}2NQaqCsOa(O&U(S0cx1O>MrpPrX)9JcwX|ovr!yd%Kx2p>XQHvZRxbAsx zr@s(?pKfsT`F%b*UZw= z5qw1!e8idmhn*nxtsOvz9D?b3w`Z1?w|!JGqhJkRzUO**h!pII)SMt%(tNlimbW~x zJBiSTiF(|4pOW7*?r{V(RQCuwV|q0Bl!1&j<}6Ri{84X|uWXJe@77NldZA&4QvzEz zH)=5w8t~U8f?^d^qR(ew4*mx>cJ8>0I0|@|fr50iUc<0fl|KwDydU)nXCiHgzOFO9 z@BO56TLf4Wz~zwp0c$VPFh2+L=;Qow9;W~5QOY67|G9o25ygd{mPu)FXZ)wUUvhGF zJ;(^U60sX&Ej%PrlLN5I|E$ru+8VX*8E+@`*%l#XZH`D0>nsdTS1&l*!Q&@D0`Ra$m1%ec&d$zn&B}6+ zB_>KDn4@fNHG`K7V1h_qL}OXgF?C-B#Dwu1K6~{cB?UMC^CtEGm6pI1$pW@TuquKg zbGGy?hMbT95WNAR04D-yy`Um%aEHKQIlm?9u28kc%8ml%C3IvvO_~(Y>mebi!^!{m z!Jso`80`J9WYrfXq7*TN0q+Y|i~lBEhUgwQOUj5Wb=EET0&e-?;w&=j09Kj%V)atk zar69vWh<|_f$mubMoVV}C*t0V8LcZ4O57hhl4m41q*xq8)VMKWjnqc!2CO?_$Lh%_ zTm5KuXUY8q6gSMUZzfky9UPStX%`3D;Djd27Y1raY2}Lwg-mOgj0s=Zz3i|{oiY42 zaX`#2RGm(CM6I14K7Og!$gMlR=DTezEFMr{QvZc&!tkUl~(o2;o-MUO0e`<9Z$vYDUD}a>gjOViNDgh zzL&a_jNJcjJ3Qe_GHT1ISR5FAyyn{ZGCMz536hrvvw4gdC^?HutC5ej-i&6NAkQ@2 zw6w(M#j!$F;hg8?%KaTTSp_|uGfhwTJkQ?^4h|v}V}DYGp^s+YEZ@bW+%&11gVczo z#(eykt@d`lgb($99$zebZ4EF|B9|P&!A)?@0RSsbfJ9}&ExScFIuf`u5V=(Z3-|w) z`#azmm310cSQCN8Rf+2bgS*m`C!d7BU~kNcf=uBR72nfWv)OMCMc|wTuk`>e@JOq&trzPLmZZjxeF1z+D^C)D-Yv?JMm+ zq{Cr2QnDy+{){Qd1H_hxfLs8G2=Z=`_J!D!I{XJt8NEoq;(t3i2D}JJz6lFMH>s88 zxFQL=(lRmWcYL+q%#{W2&5R;@mxln3eL!pRnES8U3yd15O1HnU0Ti02XU+e%2^fGn z_;7w&@ci!&zCjnhZ`e5yvLJfzo)ZAa8|=xzCC}{62<1 zLbR6B1Z`I_zGWw#azF4duADUTKRCEkrk`wN^xeVO@t-(t@`Xt4A6FYLzMO8NKiWv6 z)cwt&EVSB(dlOXnFy#OZ^S!DBOOzr5Z!}fk*!F8m43o-Hg~@E3AqlM^@}8U zUWYZN1)Z$jF|7AURHw7ZgbxQ1DI*Who9rawh}hhE6{Xr(4HRw_0wdnc{yv>?GBK^@ z1%J=0j$e(MN_X(^U@qWi9$SAB58Y*2VHYW{qwT*;EbXV|ZwxSfj=jHZUv30ni?jC{ zW*lyA8u{gt%My7!%<<@o4iCrZ;34{^d?y+KV5MntYP*~6vypj%N0aNQbxZhYL{yIlr5dGi^EYGrAsnPX0(N%E}5@GPko2fajDZsy4gc!!sMS zE2HD^Tuu94%0+qoe`hGUW2ZE_|FqYkCa}n|dScLXP;$7<3-A6F`C#bLrR~-WyI|II&W7bO;IICvYB<)?g3h2_^`(h;R}mPHO<#H{_FSEeYcKzNXJto% zoSMhCfT>cQ%&!`_}0{ER0bfA5|NfHrZv+jO6@&i8b+CegD<55YO;G{2%{v z7yl^!wPkXTwIA_tqZ)lGo+L+^Dc(LxnO6oLBOK{hZO&eE^gi*$u}%WjI$Zy5#1o}= zv2VPx4SZnNC!nt58=aU=p4LEFlpPPM&yuoZ@gz2d$D8kOug)2-W*Kz`nQ)80JO6BD z*yc5tuClpM6w+ssVe_*B8EID3>t@NL9INdVz~5Axb3~sK``Mer_iJIl1iW?TA9|?yLjGPO@SD)~>tF*X}w`PaH)_5$&oY z^{<60#UlEZL^*+O+=F8JWB-hI7uMhXjK`3ZRZiwFj8Nj&8r+N{Wiw0DB9VUik{l18 zt)k`$eO5zM6PdHFSH4exi8nf*YEFT^rQK^gEO;^?3wLYi$3%(M7>S%`#_y%Y6Sb{_ z3#t=kN8FraNKQ2AxSO1_9)I%LeCz5NBBF+KYc`rI+=$-0F=prUuBw-tQsQqFexTYv za(8zZ=wvq6z^1H5S5jKq&m3gim~XU)Hww+_yd{xOf2H!B{oO@NBs^*HGmhA`M zkl0{ko4K~~zt@UGLxfy@%d_O-b)`qCPThC@Vt#g5`iXfjee0W1{@Vmhgo^YKKf1wY zT>#3?{G%(YPw6kJ5HYBWbGlsqdpLA?Qij1=+lbcj8+2`TRcNtvSe&%%%p6@g+Wh02 zmJQGG)b(H83NmWKW#FK#Rn0OGrmijchEafHcp-o3@iHkfKmhNDo8@>gqtvv$vD66B8UjWU`_lKB9}Y$exSXw3mBP#t!75FdX5P?%W9OA-16o z`8^|i%n`4cI)@dm%yo+><^>nJq?U6tK-_85s9SB!>7 ze46qDWcGaOgN`GzY~2fg-+1b#SS}ECtQEeLx=Z({i1MUV$}sc67IBymMF_W)6Elft zmIki(%VNT`pReMVv9>hY*vc#&z6tIUJWxZ|k-Qty=B@F=DU~!-BLmQt!hx6G<6EDT zG_ZG#Q4xCd2;oD%k%8!GtL}{9 z42$=rPVH~J@g&c>e`>>#cNN;K1!)Q;mE#0=gAZ|E`4Vbfw|!IDcL==^S?5E7khVe; z1Sk#^RaNf44wK1QPd_dCHaCCq{OTVy*E;~k!qHzPsY?6YTb}wP% z3I0V_xs|(xKeN9n{_xjBew&D)7l_3ntk5e$@du^9+4OYQy@tM$o}vo#OK&hBx@ai~ zO0!sV$5NUCh{YfLy4Kz^l|<0d_lqTb(ZxpG7SoSnsF2BjTke4pj60)#dQ*bA^$r$- z`pl~JnL69^RkJ&?O6e%UvS*`*PHEDiPhVZZYp}v>e$tfOV7PueG5D82UcnhFj7TiI z!v*2}1`Ba7eIKWzL~HAIDSfbX-tLpnj~}__Kf9}TS5uH9BrC75v(e3UB$M<5)1)(_Zj#NlxB{Ca(dSK@krryj-*2oq`O{FgN z-;oSccfoCT)oqtb$xEiTKSKAAtqXCFnj~dHm)abu$d#{(pwutcTBr~h=1{Pzcp-ak zKebiN-X!I_E|br+>q@lC`;l0jE{-T$o4wd25MRUWbStD$E*@2skV2r2qu@9qZ0)J8 zna{liH`X3z$@su&M;eAt+_xj7%p+e4N$KoIB9;n|UhNDI5fW;tYhq4l{SskDR)5PU#`U>mjOgsL^)dq)^YZ5W zOMk0TA1u{f67Jq@t)<06iOZL$C2(Ruk0PmdS=TsS<{fZ`AGGe2;tzrR=o_5w8vD|> zZ)@>T3cY*n?l^NGET<7S4bc2;>{*N9?VQEOVs4sZO zU+mgB$v7EtWzT2v{LtAlDc~C((Y6uT#IhfSE!Ox`e&iNQlnSpZ!LXpF=CB}kbx7E4 z8ub)>rxvhs@Y0bVo)Zj{_v&VcOU zj^$8I)+a>M!LCli80&wu4A1XG@|n6-9d_kdJ2iMZFM5iXUcUa6gSGW=8t>DeprzC6 z#TAs?TUnv*@Qpdjv(Y5w*S%ZS*~6NjuKDd^RltZ&M-2>4{6%V+~}C5i|`V? zTiVReK}+atCx(Kr&W^PpJ!qINW%9fr$j5dN`_O*60OL}0>6e)cA;VzB=Jwz3l$p-a zv2;6pMJX~DbxA%^9%hUUuSy&rs;e8TAHNaNnj>`BfyjF&Xc`I_+}<{r?e?-&r)BAxRUom^bC&Vv6A&DzD(fyrgPJOUIJ_4xNe^%eLmca5)szL@K5|5 zL0i9?SX>z=LCnzhov+nI2-Q*yM0Xd$d>Q?O!F%I!!A4(c;TYlKsa20Mxdkr#<_ortBeO$0q(hPH_SG_{-c@l3?Yh_fTditI7Xcd4Rl_0woxBZGFjKA_*^3 zxE)g8`eKfZxOtw@_b|zI(dl5mAoBUh`h8Ss1(#@wb_7+pp`)J=mX(ME*j!Z)<9t8Z zNc7Foz}5emElkKtE8wy5Y-ohR`mXF&o1FC@+m~`zJ@ca@imK~yFlssJefoCi>DSgw}M}?u?}ddof7UwR=FvNyoljj{7EfauORXIsl}F$ zzjd*o3%OuGR6J>25Sfb63+Kq_M=KJK-V>dAJW4^S3q-U8y=rw@G+OL>z9K2Tf;H8s zQ`4MvU%1+oCMCG70N;JbaKKH+v0RV^GiP8F%3|!l%ZusVw%J>2B_jIK7-3zm<<7R| zjy``(yT2*>XFIsOttTj~lIG7MHB;^#7wWT|=ZFwLySp8j9W2rkR;8S}H8%6ugwDUl ze4pfI?GPPuny6W}Dj%kXJu`qe+oXW6zf_iFA15OdZ5r{)qP(IrW?A6@>RXA1pjQ@V zFyh=1{h^gCB%ZLR(ih8IaDUYNs*iGv_KJI4xZ7Nt9yl!pOC$y ze3^CX?_aM|47*hEnoyt=5-x`nU0s z-FHSJ>N3ty3*G;X*c~FQ71tUP_RuySdEl|@cHFHH%rDC4xi)Ao{2NCvUUd_v3GLFb zilF9ZB7JIUl(5H!{&`Fa0x|M!rqU!yp}!z0sCI8dOq<(gZq@A%_WJV6mXlZoCiWa` zke=7SPrl;ZqeAShJxFViH8M!3Ob#WW2{N*`eUi# zexnn+0T-t?CdS_BJ(Yr1JCy3j$}MEHXxhzrC*NTY|+HBQZ;H-CSYIR~Cgobvh$@!c5mCo*&P(EwtsU8plg_4_s8t zrlvpKill)c;d#2hwA4|&s9G$9+*V-bh?+Nf==3%j+4eP`PW&uso3u}IjFvpTh%~88 z;*O5p)Z)v1=@~8j!QWz`x^m&O@w*fL^^KMr{4bkR_)Z?{damjSILvM+Pal!8VJ}>L zJ=Iw2xb6zn!awFMdKVlV3?v424V{)85+2yYaI{aS7QI_;JbwLc!tAs0%Hj%E>vzMk z3KM<&?kaLD^qmH|8a(^I`+*V6_qkcTu6|q_9nK6UXt&oAxfC@V{+?hL=Unplub%o| zpsCMlf!W~?>>Bk#ZA6jFYOW{_>?Ud-Kdzg%+CccR+{lqnz*+6Gw(ok>iRZjvKQGTW z`mWyMv2cywHH&j}ec;kTyoD8J^_IBR%4x_~f5d`6?{-Pgg9?(5avk3o?>zhZ_i{7o z-LGF_g|?=F)46Hq__XmKTs%gTtGPAUa;4iSY%+cr^$O)h|0{nVw5@PGLH|{R{h#i% z(#8|*8$hlVa+>w_kT`n4j*=7GzWf(az_0PJKLyly0e?Cv%>4g^OPTL-F$e&3%|}(| zvFBP2c@|fn*ty%R7YKu%kBt#ZUOgSCoc(%DbMpK+r+YR_OlUOF;BO+zJLYX?s~(@AOq)ti z%HijC27K9Sk<2qL(|#GNS!&w6bo!!0?-%#ct%CI3bbqctO%``@NqW7C1JC!0GET`u zrfm5*a^7g5fwm3$<)`CEW1$7;(Y}!iHuzG8w|z>PLN!W0G=iY~rUXY-g&^-5b!(H+ zX1CW~(BF>x!}kNQ8Bt0?7q*YiV;6*CQqJzP@B}>k{IZq;#kYCldU3%IfgWCSfW5!a z67WoWAsylAJiWU+Ugem7yqa*eAJ{c*U0lDTyJQi^%iEN4VHbORYwBdB?T(pB+F9R1 zO1jZo`HzHRr|4)5VPVyAA9yeL?gebtzBV?d;WW8;-8vG2SfUj>+@nkjCW;z+i~C5>GUgqMGhw~!y$8z!P1)`3R@ZctrxWU>7M@pC+`c!e zw`s?m<|@qisq2V;y?nPEboPwYto8b;&j(Y`U(`8Pnb}A%=&jSQl(`%32aaT$Gza}Z zu1A?fMa7yVk8)5tOe?EvFZQ;qCoJs|owA)2su^T6uYyc!T*rUq7L#-T^Z1GLLdu1s z9>@0;r4DMwpkAWe4IrP>QpoEeGy`J`@h!&gC32gYLJh|%g%kxN_hvlg45BKcph~l0 zFowA{W}=6Y;3#I2qbYne9{N}NHoyBPTDMV^ZEj>|@q0S+nco^Irol=+_qBH|EJk>r zohhHWu8I9KO;TXG{q_}+G!|>YRM%RgS)xjiSBMk?LzqjV@>$CuQzr>3|A9;XO=FWz zLBTy`?+l_RlD?PY`IT$;u2jw0fVOtyrS3i$;Ns2 zJDuh>6qECl6_SkAl{}UOJJO%`gm1@dQV0D$Y<=rYBuys?CL=D}#a9+#F85xXHQj7L!-h6{r~Ft8hmzq6>ZHGlr~abJWNY{ zhc2wtiH0Rr`S04^E2Sc(q7s2T7QMq%R5Uc?h^qI8-$^!-T_4db`b(MUR^R*g(n_&N z>i-6_!!SEQ&VyccXW4X(Z6Y9?a&hNo{AC@v%2B}O>6a9vTngnv-QP2bowenZ`J`vtdcO?5`S4teC4uGad}9gJDFw!9`?j;cL5~%5wE0_L z<{Hb9qSe_LSlZ&B z^Nn8Ij7SD;ay(@0BA}5LslY_?x^vtvG6F(-GTCpfoic1p zZ4rF%%ECF#=^6Qp206?ox`zJc|9{88wD3xkxWx8=J>gOAt zF3e&|wd`TJ-GvwzfIBUBw|sRrJf`p*m(EMaEcaK?t#9(M-Zk1V-lg$D`_Xh=oX9c! z)^_>$u}z1XX?Cqs@1L)CSzNUjWUv%om7|8{xl@8{=-0G$>P+i!u27w#ctn@=cVZ{TPh2%C>Q!B1Njj;eTa zP3?1Y5|9&f=ITLC!Qr7$d4Sm&e?<_Y+R&Et(|_PuK+uc_oB~KaAiy5(d2z6%ynn9+8W&VlRG0}M zGy{ZF$jHxuv?DoVB=!@sEkMTsdLGbHI)Q);qZ2f0NJE=(z2>qY%Hk}u3A_*<9LQ&f z2n2HwA*lFjXbb~znv{g((QOTo{y;oD^MN(*#<5fBCJNz3Lpo}MR zG!lr$+7>$96n)$f_`&nuYp0pY^z}~?9tC!{jSfZ9>(4IV2Ptohbar*Q(pYsxFNq^% zfpGv*0XeF1LwI;6Se10kOw$q)Fw7yC1o_lJo4Y;-&~Nm>H+%T-h1}8&djE2VpS(Fr zh2l_hg|ru7dnW&uLn^AESOMA`wp<{Q0jzQxdUFUS78Wm{YO~1G^e^SaJvawgs{q+! zH8qglfvA){iG+f}3s{KHo;`Cq)iyPq2SJm^$*#$+jk)_^y?kj_=dzSrNiX912`9!vJn3*HKIHNlMQkc>HKPX_ z%@-A=lwCbO)o0?q=F)tuxeZoX2)JpCzk1&XY|QPqAlWs%mo%6nvX#+G_#C4dGK8QDe> z`2?OZ)EA5JW=V><2)JjFK_4(EJ*x*$D}e~O665-c0tFg%cL+5=rl!N^L23!0#~`Mr z1`NO=4FHyde@8}64jN}TFp9FVMcOa=obFq+1pWgB1I+y(wV{{r1vJN}AjnCp9DLx{ z>t^U>o(@Owg^lUzZD>BJIAY{fd@_EF?sC=hcutm`ifoAGP=$u$rInu7$7v#&iO&Y3 z;A93#BaobZ0N>~93lT_qoaB($)pWL&0%%IO5;P>6?1fZ0?1KRUh>8@LLAVO!(i7ei zHrM8w%sZHkNCllFT*anzRyok=K*I<=nGwz>^>7)WL&8Nz{i1&fLcQMA1Bkb=ig z3Q;m}mn8h1+PeR_HWr?)sh>Yr*VkLzJ;@U?ncpfhZ9W!4>9MZ(E3fLgH8VFxK8|wh zH?3GbzQmEYk?BtTTA;-5spu3T2tN*>XH}duC8JK55VgiQ-2Mdd~GZ$+V$w@OMCn6q@KQP zk})QVxQw@bV=5}^ajDT86TO=2+kLEY^Nwa};;xo{zutL?btO+HQiNXUXsN5aY{i>i zf3B_dvaw+e1_-vzIm!%(U_sfBXzh5(O(p&h#vVR=pr>bJG=C7{s~63eYmY24PZ-5pjcyIX6|Chv{qMtqrfBziSxR;i4ZOH zI#Jkd?FZ~!fa%xrCqk5e5q?v%Sr-{l`GJ*JndWnwcKLHycqBf7dm7Yk0@Y;E>n?vz zKaxbwzk2icrP_BzMP+NXxony3Qm!zK{EFK2$D&W{j7$*#ZiL`G$nTtQ32J#~0B?xo zJHJ(c56Ex)+@Amo~>+d7CuiwX!kZOLWB-KSO;Lu+My40R zEsfNhf%-B_9g^0N+F=mP7BcjKD_KH90+uR~=fA#z7-<$Fc;G47hLsAS#OX$Y1Orec@hNRmERo;equw!N&gr zgK_um4Z6$bD|Hn@(m-2CIKTPiO^78N~z-|#qa0UfbVI~{IK0s0r$2ost z@$aZZg=#{}8jVz|cWTI6Mx6$x(IicOEZmh$5*u><`dW)Djsj|FRTfXY5v{P;fUV_w zOaxN63#qQVJsWTfZ{yHlMbik~gR}_{u){#Z4gmzFc(}O5Q_=qu{X4-D2Dcm;7CP*g zU_LHRNFYOiSa51e-$__R9t@oh4xB=5T;PEd5b%K6nk4cQQXh#p3gPZJz5Xs ziaN(2r@8kbmy{++il$VL<;vN%$0F4D9JPV?<&g{_X_{k)eg}26HF}lx=1q37dDsI+zEmAT8DemtGClGC-etq5 zGmv?l_KQ6*5P#?Xt3XrT50~Dzy?P^ODSY3knTWa;ZY)I4j*$z}%FxKatn!Y%(_SD1 zDZ@Vih{vqN-zdoPAWr_FcKk+Di-NV@rN9sCMgRDFNjhsD#z){XeZsn&VLK5X?8%5> z(pN7YzeR4oGKxCKuCwneP_q`gJ1K4SX5L6gjVxE&$g!!m`b)@npNGY)xeTbs#Z5Pn zbL=aXeQCxgrl>{rE^-n~ihT{tLrt?=Gn*pCWkgM=-yb~RGMS^%Jjq+d8~3B36b}_u zGY}yl9~O}1Hd&9`?YDDLbx#P1~jo6ULw#tgFDf_;!)*A zSDlaxI!-0-&-Yq{z4x?HU2319IgD3<^#asdNL2xiNr#`O?BI>KT{?y7cZa9(#y$ti z>~HLhg=e4c@0%{$Pe{q>Amxs5jO0#}{u&-+rW9bO!Ng*z^p|>BXRrRa99_z5(h_2+C<sh}~7|{>aKIG7% z>Mh6tna&eud(zcswR>_`k?tB9Sf>!L<@YdH>O_PqzwsBc2wfZ20cDFPJl4n1Qj=)qO``3wb#u@qiOk% zOKa$4{z*D*llH4jm*Ig28-iw=ofIMa#_bNq7z=1t1wab_cYP2lm+FeLw>3OH1#69! z9rGd@x6ISu@-)``iNkh(*W=TtHbYUK_3J1k)pE7bh%TvTGo)bp>dhn%BO@bnlbXc| z^^x%>c%d2rzeQ^;HpAmGZayV&5VUw2H=Y)9t~>7t=azL4X9Ex&=7;y`=~s|+g-s=;)nE9(0Ao&&|p+R(aYbF(ODO#m*(Jfo3_GKmP z-?!Mo)Nh|x3km-2fBZGaWgt}XWDot#e(Tdb!TbR?h-B1CX*K^!X-|0RU2m@T%5SAl zwjEYUmSAx0q}!)CPJI3e82aM*?}k5Mz+o%g4Cjx*2N%&we+~|K3uUXR`5d1=_pG`; zkt}ToLfb(QFh*q&Ckl9w!O>lu&!ENN>57thWs z-mOr_B?`TENBh3H>pC+_F#g&hpXOAB#z56ep1bbWcg)~X?B4v80oh`$1mX#fSQ|?V zYHbCrn&#?U5^K%FZ5wlH4Wk@Aics;eEeZ!?GJ$B~g0iUBhuw~>I^LR}zrN3@JC!O@ zN0Cy_94(Wf5fPgX)xdN!VhOf*pz!G(*)z$$9;Kbl`*~*HQn*X<;RFEzFCTzu{@>oV zplz5vygQo84fbdd zbGQRZ5x;*{!gdq09a3TuJPuPl{q1bl)gF)+&ct|7A7Q๕SX>_dk7^}!2AXNKqxTWGiy#&GN^dc3*JC1^QHE1*g1*0 zEVeEHvK>(9@{b+?1>5y?H(SDY7y)F!ALNlFk8eZTG%{@$JhL{OA_3?8EQE_-3V?FZ z+0mq#heUn?haLnT;!44Yz{zUzvjHENxGO4m)&|o2ARP}}s$jW`RiYBG(}w8_9$|QQ zYX<-2E_?*p*V|yj`2gt9#)ekdze2K5F=UfGN6TU&iR@xYfjFU;FT24L3V8*vv6Y0Q zHH`D~bSmu8BhD^9o?W1{c=65(EY@z2=mUT% zCC+xCzOrooSKsF5<|BeYN!bCm6hQod3|&o#2G5=ZAUa@;gO>#r_-?<4EV#*!^2guS zh&Y6VQ=pCkQ|XgRn-q&muGT??-`&}12x|Y7c(6?YS9=o#aWI|(5yaGd^L}6eZ!uLo z1!_zLH+Oe%umZCyYr(s-ICbVF&;$qdJ#edKWG%bsg3)p69$>aCkfh9o7X$7KR`BdQ zxws$Bv~NCG|6W=WU}74WRDJzSW$@dAU?54IZ_8SfN?yhP>g!fe$AB~A(J&tDUKWk2 zbztEHbbe!cj@b0Rr8Qs{2W0t)2m?Vl0F~G@s*v+eZxObzoEdLCH$YK#12iWXP$;93 zov0|XAIJut<^BPHGVsm>hRr3jA6DktY49Z75kVCPM*UBvnbHwO=xA#L3-2%emfDgK zAUr&-Fz5oln{R;9AEf-ioAMI8(i>G3GNJ@Og#=*n&7#}i(0%Z~&qG&H4Zo&y0zn^a5k@t$>xmqYqh(1W1a$K4~eT0nv-a(gheGX-L|bJ-XBCN<27f z6>~qqCfhqWXa(B>Kw!Z22Uzi52TK(_Wd;ptMzCQeb#=-7z_|yOVC5&eYFa9)Rgfga z#Ka`vetKkbr#9a%#6|r?RTt%%;ZqIq0hqF@>m@K8fB>~HsIU^eTpm$BB%pwSRDcwb zoIC?)_v1+1OP?<(lYvqe8Qe2jfWNZltwt+@1&O$PXv^JS4In6OoB^qn#UE;Hz%t-` zcY5U8+YrmQZ!^I=0SOJLgI{<8G!D45YJFpeAo}Zr?f#fQy|YdMt|UO4%4DR-5>m@E zr9}o1UVt{mC=xNNN5{t-TjV^;&w4CLFGCn-J0Hezc`motQ^%g z3k(0M+R5^Sf0O1TUnAdHGnfpw>N*#8G_dr{IItRQ=Z{rqmuF?8XK9VFsVSIb92uxM z%L%xj(7bzhakf1H;57gbT!UyhF+L8~E2=*5hyfbIM`v>n4_-)^sKeGMrOlfckb`{> zP?-XtC`_~pNTC2SI6E_A$HD|=u1n>`Vu*PHYgtg%;)vub7QHI>e=d?0jpML63>;N7 z)Ya<&XbQAQB4|JV(Wt?}LI6rYQ7Rq~HbG%Q!eQ2)@ONiN3T+ms+eE2w(U3un2;vS2 zzlRfWS5hnjHoH6k0Ce9s24Ml*$!aiT6<`>TIU7hB!DR>SmCKzeOHkl1NJdNLfh7e1 zCrET+;svmS2aw_gcuccWu~rkXuXqQ@58#2|C8lsS9f8xkN?Em``gf%k6YzZ_=?=h7 z!CkeQi+86YfHFP%r7z@+{US4^mtgK0=?)7bVn52_Xi&>&go-el%{nH;T{jiwv5}Y` zkH&k9NR6OBvdBNBDUZqU|NA8z>{3s&3(W%3alQuqW0SxCIKN)=LlT9qk&Eo_`qu5s zw~RxekFW+nxYbn-1|U9sv{Ql9n2i~NHw41m1CaG|i2nijEy%U9Cm(LW&#gBHe8X#w z&dvbf^=)8Z1}FD3U>aRs&IXZ|9sog5yVVQ$$oDN|+Dc){t$}U=F|+z;OOr=?Q$fKd z04g3&d7x}y(P@lVWW+%Ohf-l!<2vB+0;F6}P!IyN)c}<_1ZXRWo}jaL28_9Y^b71% zcd!gz+=0yqf&j-0k7cmR&ySAWE*D)JaCa99ODZZ}vz~NrS<$f z8@|`C!8C-9lv!(FHl3o&g9Xq%(1F1ud1q{5QVUWG5C#zaGeL)I!S6UaIvQvNa{ww0 z>_-wM9^>63dSm0d1UO%jWc>fO#;9#v!6*iBf`VpO@8BRXNZ$d6YWKj=`N1L|C+9i% z0i?+QYy)lNF`YvPu=K!nee9zF+z`y$0!cu*#?$(}(L8YCzHlOle8Jg2GE&~`rr?aF z2;VoEL;^2GL`tf2u?mvF%EN5;PR=&DRPM3K&wd~IG4-#bOC07?(b#{yE6`MyFEw}6 zbb#6Rg}t1*{M_Y40CnG6dbn!(9Qh^r`Xz})5QunDVEQ)5gdS%A)Us?003Z=m1RE*L zmFe7mPmvm3_KOvZGEb`vS*yNHfWvbrYAhV=C@-+ZE9+(1QZm5Jk^$`gQJ;r?OmE0!bSzO(*$>nCDQ+c_ny66{%I<@0#9|XZCYp1IYcPb z15cOQ_beSz&35gC?N>E(sy1o{+EtquJ^7jhhTSUH(UtRzqX(6QtLsrE?sQ@Tl8A_i zp^*_NSwC8X@j3>NTs6lm;DHVRd#+l-{M=l$`A=)Y*)i`i?~d*dpS5Ax4vNcTFEj)p zjqD<9!CsKxVs{Z!TD>MKFziw`&QtS`xh!A#=Rc%SmfbX`}Jqo|2`JQ zpS0W5H)A{x>;3p)4FYjX3^uZicEMt#T?oWse>x;p5LEsGDFLzcW^EjcU?Vd|F#C6w zBpM`Dv)}-Ap#`Uj7_t4zxxx_zk^nX3TG`PBBvm9^Co)Vx20psfUVs%5Dls0lg*`i` zM$XG}DckF4rG17M;w>#7k2Lbm?NkfW>lqX{aJ2k_;0=d`f4f=Da|q-O`ml}2xB+Y+ zAb}+Xj4Y!`g)usl6(`U&Xh2HhAJL^}aIGHwWZeHY-P&S_+rISK1A*%AX>-mXHa{hN zSmQO~KmGs+1vNUvflU97V1f7Ra=O_fx!j?_F0-~^A)!xVu;mR9URdkB@4&Z84?A;% znBgI|NkEPwMAy+oBmF8k)*browf~{|#GsJ|-G*Gq>H|E2@IkNJN{>4Z8Ty4*o) zdr{%5@H{a{NolbF8fW0K$ZODpd`%7f02vdD%6H+{HJlQ;3=@3kS@OvTk+Mu^!Qbch-u)18J& zoXQ~=?&x3>;HpObkN+&$&u1b_k;;G=;#TXQC$Y;zy9d9=pm<^N`nXX;E_pds}^mO<3R8V7M4I!!` zF-4d~gkB(ZPD!Z!XdMyusNMQ6ZlL)KQ6vp8C4zD3WzUT)co@4Q`YfBX_vYP_1KJPl z&J_wmNpWy6&Hd~CexN{Zb1Dg{ri0BdZ|kh{mq@hzh=NC)m}5h(qo#^AMT__gQc>$2 zaldLU+ZQ*UGi~EQkdriY2j|El4|=7#YQ5zdCqE$8|6@yqjTV=yR+%$_1?7p(X2)2J zo}h^0Cq82u@u&-F*MD71%*FK+?DwXIhHrDxbCK)MMH6=C)_OgWfl1iTa~HLT?kV3| z0}O70ziy>2n2^lZ;`~x~4M*pH7W~I%JaFEimmLJR;~|-@M9JYi5HBFvuW|2bui#pV zN(Ec8`|qkmfI|0?@Ccg1Kz3OBA`emS zux;jJ4eB%k*kI{PsD6npV^v&eC{r4axmmUeni^6QsyLd|f4R#J#PnT(4W!Id*-|kN zSd}fcVe+x|SESbm#M;-yKzh)wn49+gK@K4^i@ed>jGnookZ9of7d2C$3g=uf=z-Ba z>1qa}*T9)(go1d^$=HDrfPDbo69Td#s&=kr4PL9-$!?dYH6^yE|Y* ztaNnY+L;O%FEudD5rC+OA{Tr3)}J2(fuhi}p#fPW2=l|h(hLXEHnDsvhCgli=~T|* z^Dn%@kNrT>-{&tC!yjpbR6!yJAP;aC|G~$IfIyoWpLj2xHM^?ny@FTYGA_v5388l% zei%rijJIQg?)Kw|5YURo%F$e}AG0{D`mQg(S)Cx$weBC2L!%Vc6rve9I%DPe8GL)n zi8~wXpMfio!Er*}x-WTx>)?VPL*&OOd4LpK5g~74ZckA~b#jK4C6yf=8{xaA07c*X zu&0AgUuh|XM%gDGdiGqQPtxAT{A;L>Sp8%A7_iOgVz>Kdj>u5KQdS2tuMZH{Nb9&`6k7->0Z3`?0dPRMy;e8PhQhLbwo06=7 z1?*KpjFCTU+X6P zM{1QT_g0Or%3xZnDBmci%)CJ?svkeJ5J3B+e?6LRfZqZ6ClvX&M zPrl8C@`l!kA_~#efN=BFG34uGN*90fYNNtUc1mVOUr9@z%Yf?x5CW zpd8+r-T9@NgE;u-W4z$C02Fr;%3=LvHjos8JR7u<0mH1--kChI3?XAh7j?1fItVS#IWMBs7fRz*3 zSFEmDkBf((Af0E|IUXv@9)p#;e0fmG3hw^Lk4gQAxP(*YB|l2jO@QYPAh=G-pkrZ` z!SPD3g8cduGUEdjr9g}gaCAUl(A_2{HGss!HXl@|P8Frbhw(%tzTf|azSIz_8+!Fl zHusQY-1UXM)yKL!@~R0mddhelkvebTTojre?{QNoCG5oYrMq1MhdGM4tAjgn4DpY1 z0^_({REY@ITm?sbwFau^LWg*XHX@#3UlG5Fm~<$$)xIG&O?5wU^^++O`lCAKVG8G& zX-a!sDal6PC%|2CaCDTh7Q>_@fHq*BezO?n{jM7dCLI6*UIXB4rNp18niMg<6b%p$ zb_h!~rOSk%L2wX(`om8VG4m|2J+Fvs(gDIwcBhR&t3m-jl*3yDp*<_@WdkUyRfsG1 z6Q43!^qoj&@)#wOt!17}-FW^~VL6^3oYm>mjC;sPCNKPZW z_y_|R7Z)R=RCerA4F7r{j);3^q9_6mf&zlFSQu0kAwA&aT3!JM;+35pPv22ZAq71W zLNAOxiC@MQxz2?4$0@Xi(h&EArTlNS+UxMa<8V1$H5_X=lbi=vaML9yfI zl#!KXuVEr8@kc@l%LwtEU?LPU?8n;>?s+@1@WP;<2F^+3UCoP?#Sc1v^^| zB1{B9HieNv4@Cq4K^Z1u!sriO4rJ`0UiLZ0$H-pBJ;(mQDkc{8_-;-!S8)@HYs|lPmnfEF5yT8yaRH-dyhpCNxBim?1e!EgHG%xFr50S?Ejh*P_Wwpjb=Pudw0SqHLxY8=&RwRBEV@M8OkZlV=wvXB70(+1{6Qd9lI03@*>3i z+x}7ukrjH`XsCUsIrFlNU+ zjKtEp5)sUv(8}J55Uvo?ms+`xd*#(8(-GpRlqshEsPLh2wId=h>9hJ9FDJy}7XGQJ zoul&qxB#lNRO#ATc=R1geo0I}>sALyue*cf32Cuz4b?i{3Mg|)zYtKEAz}#j%C<88 zX4@0DQ6wt2UgwovMIj|FjEj91)~Q;yy2&K!MV5!-OLP@j8ypyBTf&PLRWNO-xA@iN z>mIX>iL_(*by?l{>-hKx;`9aK4*Z$MXFQBdPax8F6f+|1&S3auACeLP-5raWzQp<9&kJ1Hot za13FLW;C9-E%v7$vEU-cs-Y`Hpb#*Qb=5!V1}|k}5E=7$%0=4ML@2Mg+OCfKUnKXfPfk zKY$821FCdOA^2CR_if^T3opJBrKErg5Qr!mb}(hhC(?%nC<;kt;bs5&td^V2NSj9> zg+Yan^PCc@(W+gYO*5NT z-u3;2_aXo0UJE?M(dSPR`RT&JJf=EY+qPtQ^ArnfGtUPe!~ zUSw1=lJAvp^A#Uiutgh(y?NEm&y5RdrCFcp>W6tp)TR$7SbCb=O@n=;-{<5~gq{vU zKSgQnzO*}1)FGXZW+947Q$~#{ZBw2W$zVa*p~pZdxKu=B5n-DiLpY~6S~aj-tUOP0 z=5!a&G-4t8Bpv6xky^t<#;bsWXkBTB^FEp1x&%o97mWZzYhP1kY9as`Z6(CnDuu=$ zmE&6}g23lgmZ8M#E!Cz0lfUi1|8y&ts1YIEO58lMP86OC7Hwvlu%X1_0nLq_sKst(uQUj08FbugZ_JGJ5hnkHVssO9ex@+7LoAI$1_K zz?ba<^nOb4LwYn_^C_ME19@6#s~~yv)_e?I$?!^p-ovx>?(Nk;*Ad@?#LN2zt)-he zQ?HRyInNFAr0T9D-m=PGbW@E@nqNMeq4wi87m-5Qyk@LS8_vBE`S~_aV52YPdy!0J zcu8NEnW$nsA;Mk?nzw@xt_lV4nnicxFX%kmT3MIh%X|)T2%pdj*PO~I z<|L0&S@nExeSPNb3kX@ui+l(5YwU~Fr&YwT9N*oL`z^&(XwMlpL0H>~Ck~(2GWp8B zpjT`w6$0DUtEV@RBhm*_pAFl!j63@K*#z4gV&5A{ww=?Cxbz4*0em@McEg8cf`>f6 zCbox#X~DIOmUVRBCbBa?hZWTMFxJPq>3Ue$Gb1Ia;Vr z9Y)aGJFYOF$Qw*=9KDL;Tr7Q2Z;168^_3hyr^rwdnFJeH2+yoc8YFs&g2>D^Rw3J8 z`!MNailb?!I`+bZ;sDO)F}*cL-;L18)VW&JSoo9Qo2-aX#IR~u7$IET)ulwl_}31e z*|qVsE(IZ7mp&_WvuyJ$cohsH-r6z)O0O4#wYp#R?&8nOz4x|;T0GLJ7|CE5QikEhas zWfQ4u(c|#V$T>{87XLGsR9~onT~SN_M~MreMomcb4B=)A>HV2oHx1pDM7a>aGB>!`S%^0Dh?~URO&CvkWJM$kw0N{=c;Qn zyOcl{#=fMXafQAo7$l?OY)34~VI?w^Mv53k{v_-A3a+BgtLi9AHq}tsO2P570B$pD zVXALAScuw*L@JaWIT*8}Ust$nqQkO;kj&D7EBg!~&Mf;~<%O=c2SYAK8i!*fI~}|` zHl>F1mz9FfaL5qjI)#)+E+_9BDNEe$@GV(?%q(P0aoQ~X%GVlT+D#pNB#|g$XH`G* ziTa49%%BGB+00%NuPWV7I13q_^wh78$Xdxgx%G82g7JQ8E7HVQA>?^ZC1I+RF>oo` z(U7^^ORN-1plJy!uEuTpt>;<9w{(WGb^)Z(e{PnhZS;_s#tuFDln}hG4t!=?%uUI} zP7l3K_-%b2uy0SJ?kBI0!;&`J&e;xqwtosfG?vY~SMMLWugo>!r7or``9)>h_V3Jtt?QWT>KdsXy&tQ~9LRDb}xx**>e`nvNc9DfOb zXVQTLS#%a`oLXF3;Wbv3_&R;#3Q?ulM-pErcRErDY!hSS@+@M`tSX6V^+Pv4W^*tn z@)pe+nmiMcoD|DJ;uk7fZB@7UlpeVLWyXt>k0{Hw<~&+-Iid$^3TKE`GOc6)t^0kZ zX;e%KORQ{tfWNHA2x5hiv)rs4e3@!rkyP*-_C=?Ik!!r@6n$aR$&Sc^RVk`aW0Po6 z2frdd)5|9~+;zeo{2G9znv2gVkx6S1QlekUEup|nDD!hTGQo62<9JL2ZADmcuxBm- z(q1a((K#(}r*!ylW+mkCp!FY9qTn?OAM7A{_pB$$_p%HX?DT>^KPUGwcMH6BPg@V+ zyLxUte{Q(tANs3z&57%~urlyXlX$>8ogZ4NMr&~^AtTD=Oj1ZxQDk2)(H|j*_?NSW7cOdFV=5@GXmqFARKua3P0wdzSt6wUyNUaR?GO>&?nS2|jx(jVzJE@fCQLaATC`*Zcc-*Q zkcE@(zSgknX&$wv+aKf!awtI(;-c4C1{vN?^qzgZ*fPzasM(K-(dU`M99mS%LdVhk zoYp?I^0+Ro$a+t08vZ6W9k$*iYS}cro4BhL#9umRZ(h4p<=b#_InuhkpSzovV|KpT z9wHa4xDM)4+{*A-#K`bHo#wy3wTAq;Z~ETE3NO&(YCh@yndVZ-w|B&2@A55Z&5-xs0>$MTw{i z*5PWG>OA%hq9BLp{9kdkIH(JfRU-X_JW0Y5YS;=+<2Ei~h7sNVJa3U*-xrgi_C?U5 zlF{$d^Gbx(I=P-w9iUB)7iq)NQ>HWM$X{{8R1)3+W z&bV{Ni@gr-VT0+f4vDaz;FXXqFU-7tjjC?x^nAVEMQcA8>tET*nb>M1-@(;iz5Bn1 z_XmeQ(@jg~u}RG|zk4nQRs?TTOz+cNeU75o*19g8d^a{u=-QT{Hhhc@bCRqJ2`u)CZa}ueG-QoiJIZNb^PPr z9J}>q|Ilmphu*b3gPYqT%W#wz{qIAAFdsTP@4V&^LO16QJG#HcT|U2;<21-T?qjIQ z8*QT8?hVebRFbgNR&p|Y{&@GeIvX_GoUdz@`>w=|2;Su1nLY`_Ci^Sydxz)uaB@+D zQMI^o+B2N6JeS1~L==^?F8sz{A`K>{nTc7lEQwKI zX4suqJAwSVhrWTL+i$8Q|y zw-mBN#Ap7EWC}toWJJ5345jAcZrP30iaqj(d%Yhzbt>p0+KJ`(cNybX5i--ve!Hx1 zWAs@6T)as9*^JWe>eb_Vdtv)Ue+TK?{4SmG}7PN`|k~O^oO15bC1d-9J;En>m-O zlkQqqTJA2W{bsItF*=qrPnAKHO-`3pUq@eT;KA$7?y7e+qy}&W{HgX=yRTqcd+Ma> zFj~!vLe`CrzS{K-!6ivd4nL8%c7%zM#jNO@p2|7xDbBebkkOKMEhn4IVpK7XHwioiA6hXu8(Bv^Q{sEvc4Py_jc;pp5)#5E_E_)13YFZtmSO7<)_W|v)U z%Y;!#+x_{-zwpyVa-HxthfO=o{F~VBL!TpRP;wIcUe{po$XHd~jm{sU9{suHTjYlM zv=qCzX)k}dpQ&%+qlSf<%1s&d@JjIzzY1v=ZvR+}dY(zfme{gmAHNXmifPC?)dm0T zXn&A53?G`4Nz^Rttp0U%hJr!vD<0xo$=lap~`9zZFLqMyV&5>`8vgv#UkNWUt*)-Doh&%EJ*3FR;-}9qyrq@T+ybVs+ zI^TapUW_tl*q@WT@ZN`7-tdIqy|i6P_#RcysBw4m&{uHC!~Eww;Uc4LgHiAY>3u0h zgBJXDOI+|@px{;DwA=XG{!|IZ`g7#^(PAZi1jd@$!P@RYNy*8*EI&dk0XZ${Or*M6 zW!RU&9OhD$iI6I@JyAZmu$137qv+;DTKUVaI#ct=_hFopgefs$&KHHl5~L>2lhw3& ziL#!{>jaTyV!hn4E4icK$t(_meq*8EvMWHH%T+K_;0QH|h&!L^4rO0=PUd5XQK1xB zOp?qDdr|ZRFOj9OY4l$-sczbom>Gm>Hyk6BMy&7&-Pbdtea4x}^qQhAKh!Z%odFR| z|L8>ZACfqMAJmpD>gAsL~}J zWLY{+jgh7NeOq?lB^we|Uzd?HGjdF%+i)HFeRWxILe7w9CvCw}XZoWE;^S*>5_;HjYc3o*0V%c>1gm>J)-BGemrd@v_)*1B@vn{*@Bq9f zgOf!{vUvnWc&!!WNZVkvFMnx_hZxkzp>)#6>l{`iTcgfByYWPFQk0OZJp#q`q9k(T zVSQuV3gul2FT;@9+fG$rUtKK3NB)y|wa%3As&$aGHjY=zL7#0f%(+e(M@v?EWG;*d zJ1$?NseAQk@@#ejsJnI?drF647u-!uz-=)Sd#g__VU)v zjwag8KVF+n_bhAVNpACPJonp|9`js9D3)(*E?=a+=VX>Pof8Z;Di`A#4CG9cHz4y=a2=Gxa*Bn?sZW_O! z;W8vH+P`XvzkAIooGkpI54Vxr6!D-c5hg{ZK3K#G_Uqrc1tEkAtN~fd`|h>JtadZq zv+Oum@Sio;qKEfGPtuxPi|^sbmSFO$sY~VwuXF0Q>w&}mf#+4`SIdh7*teZ`ko%r; zay`4LvT;BLQE?s8>Ss%@r&;bwgETYk+qOpCJmjl(HkwJ&-{Vc|S z6f_i#4{csSDqEMBYqLvrt9w;9c_i36v)0=kd-9xChe0Ac2dhduTIK_Xe%)mDE+Wg= z6LieG}bj`bSbru`PAfjap`-Uans`{)FxHCef>zqjR76cs+_@iYZ7dw?lGY z%}EhCzQZ3rHJDerG$BjIP%1SSBH@>Q2;xNkkrrVpe>@XROfOVO-BQ z9gz{mzF;28UlCc@8+cjNGWlswLk2=)*7QPcE1qlG7AaLesWeYihnw)G8-fpHf^G|-bds{p8cXaTEpDr_Cy zXR(MT@n(mNV?^Gn!q$6wNI0q^4ZIsamGp-{!Oq(wO^=@dG5c!*dW5(3<1Mg?18uzhwM-gL9OB&MS?*|NcBG7NFt#DQ+7O-g*t zW@07fs47eT9+B_Q)OTY)hg~hqTQu~usC1Q+Ee{#92CaqFW;nP{2#2Ms%!pKW;m!Mt zXJ$yJ&IV0}E}Oc&{L*Z-&foEzqVdYk%E&k@dCOV?t(|R;&HUO4K4wF#deX=+VVgW7 zkE_&Yjmu~4#mG&GDeRp1{sjNx-x2o1`3`Lv9v&e#!y&Tjp2*_m ztjL`81m?Jcoi|m?ifESMzX?yj9@|wNh|H82oBN0z1medO%Vwc!<|2MH&}8Pcu{So# zep2Y^+7PL=KmVjfR3%9QWzI*y$xMbqPC09$MFepyhbT&XjvPM4$gH(bz#(4u%v|B4 zN@M2i%&_Hg4e{pJi;X>vC!eU<5}o~2uy-qHiiIJzG*&DFS&^HolL2~&GF#|UZX?hN zQx)8|szKq=q6WQ0d=`eYad~by|5BvI31^ITj_ZjXg2?7GS$7EB#C7_pUUvq&u*&kQ z2nV8lUD7_2Wq!t66d?{N^(FchSsS4sX&KgiWAKfiN1cB!cO{Cb^Am-Du&>?a!!6UM?!)-nrogf04}o&_^rPJGkN?pZ9{O$syblW?Ui&}v!m5vX z?iX+RU0bf>#3NcR>P~$h1l{KTgUs2N{=t9#ck_S(tL$&C>Ca69{gz3gLk-n8Ovcd; z0#>BVM3Y?Z}v$^2V(uVzI61+e$94>Xv zS?|^4*DW(*tF5&Q%4cm!Pi- zQmzV}UtT4>2OZQcBt^YkPJG9qPZfKzWhV=NG3wN+FHPk884cAZ+G~-ybcvKqB+Iv{ z3+mXfJ++w!;Ry6Z;(cHJahD{w)sd`Nq;rQ7A=snTBz1Ep7Lq)ESdj-g=;&GZ$q+9A&gC#xwAcEhYnh-Vm?bJh(zQkhsySGhJTdjMO~ zXEV(;y??UYeN5>gsr)d%ll;E6Mei!Dy2{EGesnB-?&))(Bpz}1)s#)}QZ0l3%rv{F zuFEKht9b>JXVVh4{rr>c*)L7)s&48E3_GoHeh*9cO=!uRKpRqa$fmeHSZ z@wwo7g(Qfj_bcu8X zlOS2?<@?imGLr4|{pKafb;>p>9NN$~>dK0Qp{GYc6^M~u$;CNq?Y^jek=tJBX2D;f zaN;xbsz^qj?Pzz^g^hX0)=_W>I$Ju;&v=z@`!E0SZt0M}bhhnMOZOH@k9OnM7laG( zrCL|_rQ?+z!ljFtp|%^P%O&rI`*yY0l_eSf#|7w<^BN&#PY+9&B_*RCJoN&mYe03b z2V(L+i$(edp7`9lPwOwgRrf?R>cSeTMYfhChse|hGouEfVUwJvdqa)9%0GY2smJpn z)-*Dcg&7!wbg7?+bSp1_qo5t%+1S2-=ThXL&{E?aTyOHc9WPKOF-Se+FPPR2SvDcp z%Jv>f@kXn-No)z^;H!7r(okc+WFG3mKXVcCFE>H2pm?Xn^?9L^C@RExN|FW5|D8s7 z4T6{smh(&X*vY*P9Bk)is~=zVqtR%aqh=Dt2z2(iW3xq!r z*`!ua-Rla8h$FheMUL4S7NWKi{N&1>>6Sc(`>aR=`%GwvXxc!-kq2$UgcKvWu-k;i zF-Ye(x7J6SAs_D$HZmZ|Jl#(Db$j^-AUFay*bh4+-aB2F%cy_I9&S%JY_Iy?)-3KF zhs$Xf3!Yt|`s^FM^=rGqB)-3}%ctmjj?ieI={b*1nqq7tD}b2idgMOn?!6z8{_p<#=fmp)kbv3h-TK~7?JT;UgXzKBBTEjy_tMGO>JNeT_$G4c zmMv;?hpS|jM6c50?uSc_d5Ac|8h7gorfvPjDM-+aK^Kx=HO|7Y6Kx0_u4t!mTZ1n1eer$WqJt^kJRM5w5oT@o*ofUiHhDZDH zSFHVRTH|2it6kP#YcVEkU8ybDKd}wQ(VmZ19-z8zW?=@#cV~M*49JB3sJFRU{Rm;| zIsqacjFs9aQxhumC6_eS_@635{8}A5y=v?hou^yC z6}pNczm8EVSz-7IJKlWwjsNH3JNZqkUh@Ms+s4Dbj;EYJLAm>|!%dv@PfIqRZ_BO% z2|xw+PVi=BNSm}>?_r1T9{Z2m3Objz>zwoc5qAgh|MVC_#4h^{I+uRrQ@Z?QzZk+l zq{Jbp@*{G}qcbNCy3eZZ;Qdnm9_*jby&vpe#X2f#B{=9=Wz`Z<6D|*f?B($$ZeUf! zR;fc?w?~LO@{Z;he@-w)cuBzRFWpG>Y&mDW*4TM?ilhzvPNRs8foFDz?i^WSLv7Lq0r+84l8>yn-lsJ(SoYZ#v*Vbd=yKE8< zr`L^|;2lYZ@5I9~Q`=?!Ws;nBzu+~=j|{DO(vd;7`UZi&&1o*qd|oaKj=61Dd=rBA zrTezNJH~tOO$tiPSoN96lH6l+5C+h^eUaSsvw}ojxLz7-L6d z&Z?-vFjDay991(|)C^;ck2t^X2HR{yadD4*^wL;3BFR36=?N4>qdU-WuP}UNauHvM z3tjI0ENhW_t~i}as?Cg|Nv)1*KgJ}J<^7F`TZ7!-vjr;xJgZKcz})f^Vs*i9VQLO) z==b1!@g4VBsE)9|Hy)}88=32`?Xk`X-LvejuCAto#@)wgYCausZgI=j`gg?V@JIi{ z-2vaj2=;Ed@7~FXm)X@f>=peA(?5lBJf>XaGcq@I(?%Z!M1NH`9sbqqB2}ujzL2@_ z#M&>5ILAIwR=uqC_0mdpE@sJj`GKUcpk z1<%1chj=iw=+g$DVY#du#&af#ro}+M#8wv=?ei7!OUsecs$upByxe9~M53tJHQv#{ zVt-~VYO}sP^C3%zJW`w}?@2OJ&pYjlb1NNo4H)EI1u~=GD!aVx#~wuDs}yNJ88W1& zLVoDpRg>_77o^*Hc<(eem+AA|R1V$%9NTs-Hj#m(u^-jk(U8l8M zn!28Mc!Hj(l6`uY#wS_-I%tGEGLbpbu4W=3lmC}dCC?(2HHrpP+<)QHayqd6fp~sA zGBeV12OaiI^%+e;jUAC9w4Imgtg}+f))N}y_>H6Ad&XM^uF=$_T&Z4%Z+7NWIB{2| zeVm+~@cz3XoNQQeYQU%A+NUm1od*AD^KGpu?U{|-&~zdAr|EyB0=@B`A@3SO-Ja!&{JBjIdw4GA(VcPA@3T2x z(`EEU@3vdd>->kFH=|O9!iJMrXW$+KwN8oWbe}Qn(#sms4T({2kPx!Q#{{1lk-n09 zZZEeX>oimnEhMLmL@bAxQ8G{`s`=rgvTG4p(9D(o?}5fQrher{4AI@=#@q60s0O@s z?D9;V{(~gD-?7)P>#f)54$-vH=1VU&#$hznA1PVB&U7Rmtm62gt85{tvIIp_Yf~P4 zsgQ=NCH4KIKYa!z^)#5(^wk!JC=d*j<(Ze(Y;Ua~zJx1m5mMQmW*$tL)EN(tYRY7< z!k>NN#4^2;Iti*#*W{ob`}83imu;$LRgynBgkq|FwNjmq079{Ik z22V5I1IIU{v-NIx>TI*x2NV1_=tM;uim>ELjP5k3O$k$Ib@wV|xN0ch%RP1K+D1_e z5%!p2%UXSkOhTvjv>4}$*Piw4FX`Yria1PO-C{6w?f=)>cfU2!Mcszpk)l*l5CrKM zdXuKq&_$8ndo>j49Ym@GA)zCnfYN*DqCg;_7wJ9pA_z#o!+Y<2zJK7Ghad7llHu&x zGiRT5)?O=Y%WARvd}>r-mQ=@-c#w`uJC;Z5$2aIUx1Xr}$vbQ0tcGN5l@6UYi-rdA z0N0tPAo1q@Px1NJur#U3+I3KMk1n0BcCB4$j&b#a!3U&WCqGN%6{LGAuI=oJt(9? z-v^CCNjSeq8m+^)QYfQ?KtbxqUt!C@W?8*ky_P9mq%||=c3cncQ`J9Id0vAL?wQD7 zCP^n&ZkyZOQz^v-uV@QgiN@)JfSPqn3A-7HvKQ7 z;w?a$L@pi+!L1f|!3yu${j;!Koo{uz+q$v+Jf|nC0eR)=kDUJ65cF|hk?{YZBGQVv4 zD_Op@6OMh<@}d+UFKRrZ3G~1c<9CVl2xOf}tb=`i0HV~R5!GYg-8&1cqO#;gv5018 zDY6*Spjm!(8Kpf-HR#2si(~mFO~FA`T)>CfJIsPi#;jZkoqi`2(HQyeSZbOlY4dmu zS-JUUPO=hC#uBl*{M7jOo#w1W6~>TF!K)(k^CEL3pv9!!oXm0?q8JZg?b^_~>-yEO z1L>Gx%%MFec%orf5WQp(CvB-8+vi4S>_|m#;N7XDy|iX1jN`SN(JOV-+K-IkFl> z^6vs$`St{X$<qney~XDXB8q5-I5acRxc%BB2hGe;jzke z;hYrTg{7E>Am7Ifom>V%Ikx*bNi$@Yw2rPl=0tPpCDtXxxJfzyHeUO1Jxo&Gn6!Eb z=ZQogI9le-WYvS8Q$AwwBr&V_g|qC1Ghn}Zt(I&G$SI$9#O^`c#tWBN@mwn$`3&$xClB9R8{{ z;|nRzUXl*%{FndmJRflG94)s3V)c88t>p~kew`U|Q> zd3mk7n4Sn#$_uH!>3^$b7xOx@Ux3<&RbSBCdCmxXQN{Ha58+depCpxYxyqI6Ln6IX z*+qG0sT?TFj|Qa8FQzARtSEpe;RwA)h8oI@T!kSMYvsQQFf&Vy{VKpK`zTz=Ik8h^ z&;NkbDv!}nHn=panM#}xMfVEl-sgekk3xTH$IBntB$dgs3+YKY6_Vwn@%+D-TCUH%|6=reW+q@Xb6$kJi+ciirqeEwmg>v8 zl;Igtb8aj56IpH*1MKxT4fNPwQg~e3)h~uJC6I5VNo90tzafi--;-#&q($0M;zrz! zvP(Gq6wxX~*3$UIQ0AS6zo(Sy=~N0CO+qM=@A{^}w}CC)?Dbs7%}w2DTlsd&tl&`! zAf+{3C@6DKTW7?r`QF0jL1l1iu`W|d(T6h~5^M5DbYpi6p690pS8)_^7t&~cP$#Ju zIUCY`HCD>Xv3!?DD`-Rv(PX~|9XPH&HQPR`v>z#T6}GY)fb3mecuAP7n>f2su795j z*sW_}QlT)b{Y0!t9_Ht|Tr3^TAZi2azo*4f_p#AKzkfCFWAN+fw}<9=wjUj<^YWNf zj%;}j@WmZA7%{-6L@k`&!S;qpfU&O1p^Dc`Q2_0FW#RT)nG&tmh-sa ztO=qa?vsTUGGY*cY~|*ja6xr{2{+JC+;tMuwI;kO3V-lAaU)X~QWMLVcK_}G+a0=B z(|%alcETaG9R^v^40kv_AuGbA_Hos)4|u_t0VBQPpim5AauSP8wkfZ@=kVBDp!8`f z;c}&Jmx2*F6K+jG8ueo+LOsk6FB@$OBlPQLFzEYhVu% zO4Jd)4o=vbY@&w4U2A1?>5DkbQKpkH2EGo~F&BY|R#98ga(~<1{rv^sg`nK49s33r z)!P6Gk&%?$-ZVrM^rJ45$LcxSMPgYCSw2#+kHnxb97S(5YdRSIG$U@y*EAPR?ZY;B zdPNuwL16_}lhKc1X3Q$C99+$*Wl|3P&%0i1D^r9u!Bn4gbj_9?z`u)6lTYC56UcfO zEeQ?Yy~|TVnT1Ap?kVHp7#E2}ia7lKq+kJ)3`1BxXN?z%qLM<04}~82Nx~eLy5|)u zEH}GH((^y!vm%Ub&7(s}_)BSj<3HpRyi9$md_+RCBqrG?@N=WheUz}eKW*3REupCO(yelNcoWQ_W++^C|_msc?7}wZX;^_h1Mi*J^U-`kn6`%7q4C~FfpY)oFoxVLts z{$m#U(~ms0t~@pbn(-H((GwOh?JWE%FL?6{0z#I^KN{DR-Bj|IcZ zP=KG}AY9?9xUC`2BQjwN587`d@5s1@sEPfwyy1dNU}KGGYT-+$U(dq*R13!bl2kXKR7a`#hG_7YKe>lrBrGStcUB$cPsq42y31RO_)4CP3a;T!S?BJqB8>F8k`0 zfH-Y1MK9gCUc8eNc!+W(!o9zSjui^?x62o>eKM{UCMjf&HtotbGNp z`a~rCNR0Z5@h4?oSjcov)fOYOX^PVTRgT?(}3wq6GV{$jmyr5Kug#*YN@lmbTdU`{au;taEp*n5bw4$ua}*>qG+#&lvwyO zcaUv1BrIQ7g2z$Mg+Y&Blcm7U=5af9gQE3Wl~N=JO{S&mR12a|E%r=&E^lFif_&7K z{k8^ES&a56G6o3+1XS5SJ6@*TGa;>u1y0hRaxat>^#pdNl+7I}B3vIoTxe_3D`B4{ zOvp);-9P53&QqjJD&IAxo~4E_;Tutq#^jOjO(R>)ln&l&Y_kqbkTdE1t;V;xsMpQH zcv|QVw*!mD_gwm`_zUZ*g1OgB5|QgM&%{IduY^`aN(8@>%@z5f@V`R{P1Y9?&Rpnk zTf(_#Xr(){wYyJBOQk-{p8!&%hp^mPpS$Vz=R~;(xS#>SP8?i~F4&St#>N%@sMRqX zhVk^;ysVQukU|2iYy+a70rL=B4hb>t)u9mtO>uPY_ExSC;t?L{pHPe@K72I$O9Yy( zoM)q`<|2|Rkl0-9SS&r#03ni^aiC3B@UEqEw8-g;mB}!9rk4%@M@B|ubjC}UoA);M zWvQ91*LyuWNMnITq|FK7(vP^9k$wT;Ld7#Y7dAcHGf=B=n3`iKcZb7uqj^OwZHD~4 z5$E2qM9)Hc2VS*2kkl8J$pEYwvnUA~Dqc4Y+Ws>9 zLw&}qi>E=+yHYKP-H%O)cUV`viB~D{^#o_{StBdvd%2{kDIgDi4q!I_gyoKeCs836 zKwi}^zb}_Wyy^cNwA*AVE|x+dX3saS1x0`aBr$U$>7Ik9Wvfk)HJNOMu`C=rzGL~G zSuC7+wy5SbgRR-kqt?25>Qk_}|DBmn!}`qFS62OFLPQz%^B=2rpmk^*e{`8Kwkf_D zY}c4jPpW(bJFBZN@;ysr){5@x*ddn&r%je3pTK6;6)N^6G6?J=N*Ux7~5Df&b z7w5<1w9QCkBD+w2DBc`2b0xp_0Y!G9tT-P>TAh3ia>uJ~XMZ0Me>D}W zTHAS?bo)V+VAiuB{~6s&A{8%8tc`_PuaIFngbvIJLqK5XP4U@~?81a7d0Y+`0jYHn zUuzTsY{S--NKWY&zTj|yX<@)r)M;`~HE#PqmWmCX+YyY%`WDI!P1($|3X6w(pmQ91 zjw3afImyTvwHqlyhw+CHkjz;gLryAt-XnkO=v3u(J40mEWb|xm`2t8P7GIz&ri2WF zQeVJ#;j4uRGpdkn&+av8vU`@9hEw@G6AaZ2V+TUpfgeLD zMjhVHvTz6Y-s=He{VerkHiS@&hITe*A=&3{n@atLO7N;m&b#GUz{ky% zNY7wg_bh1{AiWN&JPh*sFM1+S++?&VT6m=7&sk$b@}|{ZnppQSs1mD23>p|OlBCFqj>8i3yZE*lX(YY|8#rW4syNHz&@IR z!p`{Sk%i?!?m`+v!YOoJ_py=U<8Q>*>T+XFOH;a)bp*IauVN3X{qFDB%Tr zYVEvA2qSTLuE6e)cdIDMM*aXjbw5RsIyEjwRWl&s@ zQbqpX4;({Jc_a)O#ZiMjgbERePv`W<)rqbj23lLzCZp4@r|nGE${~XU zuyARHpf%vJUbC3l(&x! zF`9J1hN*V5PSb;shR_I%SoC+Ltdkzw3(QGB6PmIq*mcnAjPvt(HYkXlvf0zgOdt@| z!umT?9=Qw+trO-k62LVew#4M4#Tb(>Z#f4#`}meRY^ZR*&GHn@#@R(gK^QT>3gFph zfxoC-FZu! z-Ocxj0b*2B@wIhiHRC6zc&iu**vW8hKv#Cu#0oE4SU}Pn0xWoH_r;~bwW|2UBJskA z78^WfHDN^f>Z_5Q{iHKZsyRbwv8g*70Rg`5;>bO~gcbHo-9D zG|5qh-$5r4Q0Y&Y-yEAS_^=unv`Pj2EqHjf^|~Fo`=qU=pxIdEcVaxLcI8T8w}X}f zgO3eOMohLBGY;XuVz=-RwitYpUYI1vsk&-{Ex?Fzf^*1Wk9BZ8D-G-;u#2i(ER62N zwbU_owzn(#N3$Tm~~A1gk=Us7|i>+vQbTERYdl$vMj7d*P;k zc!x^F`SA%|iliRPk!6l8u>{(YrH7H-B62$c0i>x zNA+$Qi-M~NCQY5goUYkVl%LrHD_guW(0E2hv1t@N*1@LJc4p`~De>$$RF)!>nr*R% z$kQQxA`07Kx25Pp8vfL(zM{jc&jX*4X;mU$Xp9K&E%Up7H()2gyS8wb4 z>k+}L5%0#d;I6a2*KLQNDgxI4joO7>+_YGng}Ht+56t{(#@xnxm4Iw?TdR7G9~L2Z z*(KKkfZ+`+y}mwPAui+)&a(c3=p&?I?_G|xC;(xARPhHWWv{(~tHP($Ga>UoQJv_@ zY9N@}NKwlmr@rtH`Fsc=R-)fFi8=)8F@?Gj_HW0c9n1K`z*nw4-IAY2jwg0dRGrnr zjRSs@$3TjE3=xZWQW*Js`Iq5Q72lq)#jJ+!?@H%}Z=sw|fO2wy(}Deq!wO5P8lpYy zs7A?kbx=+5VP~Aao&vO%0dBom7B3-pI}JuAJ3LPK`_Av5#)JY^6RTco+QJxtV)FI0 z)_e2JL|YLI-Otv-(u+O7mVc`CqTT+v;C$OoK4qV~i*%a_YhfyOQr`rSpJr2!h-;+zjr&`S zn-}JD&^-yj^J?(sa!~Gkqu`eARd0PZkm!cx(4od~3XA)DlQF6pub(7WuM_5rK`7X9 zq99mhiU(<$pP$BRaf14yU#D7jZ+c(^b z`g=_nN-@->^IRz`3w{5)Wd?0njB8>j3k+joC3XT%6@`=0Xw+yjRX=5pGGgN0Y1ZsA zA>8w!p=VGz#%Z!VLE{q(zjee5g9XdN1$#!*#8NShL7TGUp{PrvOdt${-ZbOSHF2v8 zK9ty>pmR1k8{81aSp275kmYhnge0M{b3^jS%6X{?2`gj&&wAFeZWpjnsMgDbLM*iHc>fC-IUhR<5Qh$P= zE&W_1K*!R~ma5y8Z~InoVXM;7*;4p8X6I)o?h!%eE^?l2U;QS?vX#m}m^Aqlss#)lD*55aXm$x$NHV8Jba9o;qtuB&DNt8Wita3;z;aV-CPTF5>5{t#WhV zR%bAHfbTQ?27AZrKGq`_8O6_pkn3y}7B$J&_lpiiNb1K5<=cKD=Y;aZ!i4~!4^l|6 zCFZ3#bDF6e)IY1h4QFc*VInMsz9BE_&Wa`|vy~E9(R(W*QQ#qjmmr9h3@v=7ngInL z{8;G8c&<2@zvAbct0&;bwLk6gHS>XSFV$&oB}S4!?74DPuy>< z|2$Mt!vmLMtfPgui}}i*^Gin_m;nQMPXG&}d__9bZ!T)evoVo|CGLc?u}9$ca6I(Q z;LCYUasSRCpHNw?ai4{fCf4&Z%=v@c z?Ua59^ODPhpX5c8qoPMjOxOr(A%x#G`<#ZdNS=4FAp|$>+fhKni&0P8*>(eSRE{En z<%tg(#JR|N;lPH2Umf*vixH1ck>w@CXKsuY7pXg`8vrGZ#fndJr~;Lw#JULBT?WOS0`+dde3SPa3YEDafnme&Swr;ZpSem_>2Occm&&j7`wGNF+p*@8pp`p_ zX_rSF-W0GALvSLvYwv#|deV!V5UoUEi!Nij_fq&ywne}Qp1%TfgarI6iNG9L(QAB) z4x8#!{P!79AtDfb@p4U){7YAX6(K%qJ{m`h4c_OqUDF zI1T2;KTxpfCcK+hL?F(ySZqi9Gbb*9&K}IjfT6+9%ixk9!f{$IrgX_8l~3VWN3Ton zp}KWRcKWDP*}`ppfK8s${O&e-`m_JYHB#yg2##rm&lIZ>KiU@(v$7b#%DPliB@5ej z|3n%$Jf6Q$dEmDy<}>FoF<;A>(o zC8BlEimcc&s|YTdYny>TRlvO`4@@_Efmsa#ep@3Hg!S<~b9S|2pEsc}H1)2+v#LJV zFMN_uY|1bs?HPO>z~h7ZTySL%@+Ac56Pa%vQh%wViEp_6WfD7aGTW`FCkLap=z58T zL9mX$4M&9}od$sV@P_yPlg7SmHc@b(@Lnl%4(4L*_YH&-m)R9;!`bxFwa8gSzq8Dm8!NK|3)mPrpB}^QY5GfAz1=j}4;i_L zXpKbJ{@jcNbHQ|oTq$1)Xf;2+Vj8(_xj6%9X~Z@<2kp1T9ZiTwk>7)3B}iEMHv z_0v9fBe-){wBcsK_!*nc=29vo+(FE9%!M2c0&|p*q4P|cDwLlvj3;c#j_cyp;3(3j zw)DM#I`Ks;3HXkQ;8m-!qvAR6ojMBJET5t0ek3G1S(IGHVN;r-m?_Z8D*Vu?!?l5; zB3~a1fqr3BA?)3zkB|zB2tH>xBR&Hhj2}&&2DpvTI!%4xD+uoor4OT_a%yTbNoJkG z2RYiAe6`yvP&)anMiBY6?Jj(%I<|utPuGX%CMo)`LT5;!|9$2my+Do0Sf~aI?N5*> z4mG?lcTV>nxMZ87B)>h&MbgMIdR|C0ynEMVJ4yHWAw}0gxo_+CM!T}PIH$RK5e4SW9rIBvaLI!lY^NiHO!{&!@>JxrQgc7k#gq+ za{fT0)KM>c(kygPc82n6Y3cU#Ezk6pga>lVt0hqpgPteKRlPoJzNnrwk^8T(X?<}~ zP4ddy0HFPv-T+-h2a6GRf^SYkZZwSF85kHW04@yB_9N+brD_{3@ig`f$*akSliT(< zSbyVRexfgoO_|-_bus@3IjiUzjC*eZl@rVUyPjLui>n5=^Tb3etQUax+)Q#+tkR#_ zR{X!YtNTMe@&xcF+HJj=TezAt5;HR}sA_M&5f3>MpWOBMlx~no^vb~d-W3)g0RbCu zdo4!;+deyf1_qPefKAZNs`hoEOq=H@=NP)AwDg-3P{9pe?+gNFB5k(>wOJoN0|Qr# zzP7CQZ9|D9)DEcZaJSBZ&%d6-yCZYZfp;g*Cji)q*#taRaxZ4rZ<7$%oOYh60^XOQ z51ePF@09{O=<6+_ET|%V*qVD)KTU4Gg?BTla(Z0|19Oef2@Po6CCZlG3eD zzJTM?pQMmW(|!L*P4T(fUBmJZMjkrA3F?8cR3!y{mw5&hpkL*#Htz&3;)(b50;pqK zLq*W8GvIgp_jb;6%Kq_kGOM(7`b+utt(oV=ZiC$4Atmp7rKN5HSv77fb$h-dkwyf#=bnW< z{1;}Q8*~CVqC9&Puy_0Et@F2&roKT35f3kq6mstp~f!c)C(xTxOoW1eTrg zKvUh`5ioF|?7B*D4_v9ZIj_LU0t_{8bvfq$0y83jf3)Qr|GJ#6xS9t3EjgTbTP0s_ z@ynq8+iSv=8+Zt;|D`1)-u)d`3EAlhcA23z75pzn5;)d(vhWT7`3Luq%bt)+V2C%j z^~o*8?M~2!5~%HRP4H%Z|H`cC@N4ka*Wg305Sk8{_Or8}wAz48OZ5{$#4YEbR@XPoP*48}WcE=2G9S&hY zr?>qrN(x+Mzd5pKJ9RcMu>kTk2>A2moe008XYQ}L84Q4gT=-TBQX~fQh#@rOdOhTs zj7;FJvw1*2f5ZE+=^XbG1aMOT$M~nxH#YzO)&KnNfBDt<&HA&Ep5f8v7!Yu%z0i7I Ju4obde*h^b14RG; literal 0 HcmV?d00001 diff --git a/PyTorch/SpeechRecognition/QuartzNet/img/tcs_conv.png b/PyTorch/SpeechRecognition/QuartzNet/img/tcs_conv.png new file mode 100644 index 0000000000000000000000000000000000000000..748f92093b0f3683c33be21c3a7e28ff8a27446f GIT binary patch literal 37934 zcmb@t1yEIO|2Dc25CsHj0SPxH-CatDbhorLh;%9--6bvEAl)G?Dc#-OUFU}9ea`&f z?>pbjIWuR?IBxb{Yu)kd>v!G$veKe25b+Qp5XcL0F(G*f1Qr1Tf$4ho1bh>P4jt?diyVYBdo@_ad9c66CuA`&Mn>61%$pAT?LivwFji7LEW+i z3(@nTLo?$#9&Y<{ypZtl@T-0wKV+>_TyeVC{mC-X`AymUwmd(UFNaI*6BDZKW;L4q zlxEPO^q`=iEoO92jVjAcMqJYdRjhFGy_uThb{w6Nxl+A$M0*^x@Wz|dah(beo-Z3O z2gEX9l!Y_=y%12>R#rylps_tp+X!fzJtg{`ruVAy@-58hCZ0-YCQw@1!aI?ITR za+~Fj^LdZ^K>Eh6?(U&;9Z{e!c~j*k!x3bX$!=$pU0q@|nvJfPN76}LFse=NZkCpo zcFRdsRSuE-+KsL?zlkg?EVl1|BqY=s{6TNWZZA=EXYm#s=OeM0Z=8L{SK*F&+t0nv z!Nuk5;zGphT%MD&u{D}+)pTobU|`_x-q?6gXS$J^mq%6ZI5{~vI5-I8BrPE^RcmJ^ zEbQ}vT%+1r*T&{Bj@kIbhYto!ae;wIo#NgkczAe1Lc*e=qQk?(I{RHFR@NjgyS&g)8AHQNpxq@UC5>g4 zW-FhaG&K{wyjp|t+1bn|WaZ=rQw4ohblfUROOL=?z@1>c**b^VKL$J`oASHNL)A9R zgf2UbyyW`C5=0EFtV%bi9%nm~1qC!17?EJM;C{>L%H337LOhmFf1@ZBXlQ6olB|Z+ zos|^T)Hb)bHB?nOY?j)D1F<)`3G%#@dr52G&M2F-Os=qLvIrL z?(HokDvE=HgMfgL5EplQb38!6^=08)D{T#r*{JvDdoN(BN5{v^%*-CQrz#p6)6X%; zCnhI1xhuvgpl{@;&3^@2nYSAs!_|}AR{%k*;1Q78yg!0XrZGBbm+$ia-?U^AlB>sNqlZs z=98s{zrVo)q0weDn4+EkjE;&b(5xxdX>xZt+o1#-d~opBK}5)6*!ww#fS{n&T)orP z$>vlnzL@Z*PoJ9HZ^sHXVpCG~7Z+3C`um9q03i+Vp3lV4YJ3Kltt~SX(@7HA&-t3P zfPjFAh=`MuljHRP>LQM#5y1?60&e>qpaa&{zksGBST#;Iy1BR+k3d&eS6A28_)h)` z=H})C-CHSt8D2?wH#;ye5E>Lz?|7Kz@Bc@`sv)y8|He5#VCC~=Zya-q%U+Eyk-OtU z^Vcg0uPz24;80v3mmiFfq2Xchmc+zF9)qrzZtI!spKDxKyQ4WdIjL2O;23O{gxrjU z3{;=MK3p)ESXj}qu~lZ{()1=9>xk;w+S*{D;720&3*83NcS04b>+AHYzZQTD2OXeS zz~X}&-L93Dlp2ovxq;K|`a9d?alhYm?|yUGdHVTFlCm%s7S{FIF1P!QBQU+Q#n^RH~yOgK^>j>k&%&{oVQ5?Dh^6=5)$2~+vDu! z6V1+GSFBU_y`P65L@Ov+1*M9cz}i!)AEtNoH|iIN?dw*jO8 zTn?Dve#3b(3W}%@)XC8i4gsN9r>Rk@{}mXSfb(+(@BjD0^9BSjDXEtRZVR&0E_x0DbPSdLL!Dv z>#7YyW~R=exU^J$e)DB`ExB|ehviH)*pH|v%tAu3I>T-Z8J_US$n=D(5Wn)@U0t76 zy25=?iN+kD=o4ke1O6Bk?!Z#DSxHQTySuyHuXn5Hv})_mrp(m2yeroNz%cR(3MvJv zhqveR$8bU;&V9+eF8zs|^bIFNg&I|0Dr==>Wy3>5H#axH%{4Ql%Wbn7_a}mgYSnlt zB6(OLk7a0VTvt<*nVI>=fN5@1Vq^*)4vt2v7RXGME_Rz6N6Nt17!|m*`9`;TQ<0IU zG^)Qo0;N?c)*cnHE>sjY8!K?ST#a=q#+B2ENJ`q>*_pw+E>&(9tRw zjf{*QG=$UENLxGETQ{>H0IcTt__(LLTO^c_R#z2j`li-l{~=nnv)YJn zS?z6YuBW3a4+&6tS8FQ?G4Z<=PgEr(C2{n(q@*h=e@6|HB$q8 zebVUmQ@HeH`Jv}{EGu5fmb`>AG%{)fXM-&C_oO{2OFHAgrJ^i-4 zH9eGXs#u3#D|46!n2S_$xFeTopreRsnR2L^p0C5ao5ShW=n^VvWpPeM#tMiTRMgZM zDTrtlbXs-xQc_a9Zdc`v7`bm^dmh{#u#Zh{H0V%8)x0}58=JDh%Mzu*0=2U5$hc6Z zXJ{4jz@Gw7H{yUT0c}e7b+NQdpp`nCCKo_UEoYLU#bd{E-=F=B= zq~zc^Qy3T+kRxR_mO&sSBWR3+Pbs{mnXmEWOUy#csTcuvNpU^0xX? z46x3L3CEp@5|Cbi$vS^nF07gh2n`+Ae+c{u#LlAOAFJdnK6K3Q62 zc3z%odjQrIm$Rbc2*|FTSX2BkE$8ZUC)QoeniVDd`sE z;+sc)LL*dg@bFkTIPB+IeMJF{z^$L{PJ=&$42Zwnyc;v@iBVHm-+H`pVq)SIE-nau zY};CWkC6?y!kxv%!2$=g@Wrs6RlHtxGuHx;L~3)Bq&)>0OAsEb1YK5O`YJUhG(sHr0XVqg+pF#3t)@&|B{}evk`fk0 zfS;IWU|pXeJk>6&cULsC*w&QRV9S#%6#N;>RL zW%eg;DGP&)A%)waXbl%F{HwUQ_@MRy*v?|DI%Stb{~TJi@=t?66-FnARg;o{J3~T3 zYR?p+?V9sg?lA0Pj^@WpXr94S4%uLZYg}f1?zl;OR`k?!^8TxrN_t%(Ap)dqy!p3A z>$8!7dXO5b2hhYmSn^g;uvjbMk* zz>|)uZPji<>w$;3xjdGPrHh6#n!eweVK63u%t&-*dihOyMF=FKdWn97wdEA*Q&Lwq z)z^=Vi6Ot78a6R83FSTeQO+p7kL~-j0U5%yWUWg?M*!!!qVVb>!~Hm|l7x@1(c}KE z<(h?8B`P7|8id;~`*T`p1v#0Y?iaobP>Tlr2)yKP86t#)6pZW3kJtl4<-6V~({VlY zVS^sKPgG`K1+6RucWSr(+=PU=qmenB{=lrDoOeBzio9mg!r}!fULX#A43x-pkjq|# zf*aE1H|#()qRewXW4k>rsbh!$4=+v_sg6maUyTCq*?rwrm$h%6Q^>%~Z1y=NtN7>Y z$_glzB*RfFlxs{lN}ih-%gI0EW}m3jJ)r3_KTX1}aAb<3;o;Fd`UP^Zh|UaUQWBI- z578-3i1oquc~ZxqAzO(NDpd`Qp4HEbW>-=JSRlues*TE7rRwf1+=eVDY_pY@>b<@~ zX`&Y%dai2k6-k6>wdnfFUBjC>^{P8cJ@V-2$cQ(-L>z?tH6J7#Qe-jOzqg2O!jN~q z(i>BCMlK?+-BCKS#=|3#Dfl@q7wx&uTm!6(mbYx&$RrCpb|%aA=bPy3Wrl5q9rlmX zET%{CiCT4zjw>j^Yq5M^i0;l-#&4o=l0wlrsP+iXtu%GjXnHr*e<=)vhV;jl(=%7qjmAcN3yC zePhVmnm~(vC^uR-|Iex>zN0MVRZu@fl1rgar26N*B_|h2+O{6SGo+xxd6{Qq{w@1X zOW_t$?G6KZI*?a3_#2-v{Uv^gf`%HF9EJgtnw(s?;-H?M9;nPFOlQdp69$_*z7Cse zXA21?KWDb>y{KLbGBEmzOSJ6k6lM&ifUk5_WqAG&HJ?0r0!Np|Y6IUlK3=63s+6wW zHUW2^bE;|EK3+Vv9`)_DK}YKZT#=yS{5B5+B7qoU(U}BeoDD^s$?4Aou8zntgNn~ zA&C{N9!16t`ALj0&l?!P^do+p(m&uRnmv8@E@$vr)o?|^n+gyZKIs*0q}<;a zTUr)nP|5`k8+#x5EG37SW}0fAGwi-~;~VJ^bL^P_0eN|OIWs_nHmnz0@Ec^IYk;*O z6tb6J@}V{}sj@VUP50!{X@IK z`pgKsdl#Dx@^RODGat{*s*|pdPt(h|DqzT{Gj&W4B$~{|{g_QHcH-jxnsn;{r51C4 zi8Dl&jhNV)?4p1+kprDvnSRPNsQDp}n6)m=;bO&b<=&nD7R1M+t60j=M zvgXtVZ#p;Jvj-x*EW?2~U-(_T8j%MjulPsB94b6k(~+jTQ~KQF)R>$GWPR-?p47hK z=vrliQ_mqq;s^A22S0lyc)c!ixTXVF5A_ zRMm_!5>t&VVCA978=^&|yH~Zb1c3-93ryTEHgg$}HO~ckC~1Y-CXba4tZd+URa9o8XNL{k$`Eo>|-5cBm{T1+D%B6bAf zlJH?xxY}F(XhdwHY4h)6*!-!9XK_^5&59ptAzSE3)vZX<1?H^}8=iC%WjjQ&mO3D2 z?u2plp^JHYXV{jQyfm{iSpC<>ugy7svC7THi?A@o7!|||y^|7-y4`jwjtS}+mm}lj z3?yAnaQ%M{u(}?E1v6`_hL%X0D#L7Wz7+h%V3*mUrXZ;zD{FmmpnGToaFRBJROPlY zc$7Ja^#w)L3_OGxsaMTce7||`w;{5bagf%x1)YVJ{KYrl#(oZn8IF@c##4VZJZ0u9 zD$8Q4AZww{dESR*Xk_%+B#g~u%s3dOPi?$eq_fd_hZ^&*8B4uLzRSrO(}lF>KAhco z_SoLt?KKGAZU8J0?x=A&ryCHn2=!u*VP)z>X5sPKVa)TlGW0TL)1P$1H~tW_^Oa=; z(R{A{GMF9DV*F)ZK&91ua%^n2Pn2Jm=a-GYi)(!eOUpq-{K>|oP;^RKTB!#s*#gO+ znp%Tog^*C6u7SbH^?s8idO+1GM!1-=vhtI30XJb1k)W5a$e{ARB_)TTKyU9v(>C<> z6C-#i#+!3wl}K;kSfNl>i^)=9*RZ3{tb|wZgh&N22HulE(XBukbS3@O?-|5?XW|V5 zl_Hj~h=@cIGFtfeESZ#|eN4ZcQAufO>{nQM0rd?HnbP0=kmUeOaY0N@PQK*z674xy z2uOUs#2}zm=spONh?J62-1Q}t$yy=n1KPuLU|ke;OhN{Jq??r?At9E|6i_Gt^DLjF z?^KdQ3-zm5(JEfH%ohXAC6kO1dmP-=jn#Pw3r&tzPEHOiQfUwX2&50Ncm(O}SaNdm zEH}S!8D*yQ%Uc-!eICgI-N!u3#v{wG5k=!^3^M0%Jj>N3qMpQvoV^( z_j!k3goI9?D+vQ=uF7gYW`zL+-6>FAmjp+^kwCS6u$V+Brh>P%AuuvA0i8rALhwvT zm@WzSkMM>wd36Wnt28c?l_De}+B{uE~05}(6_58&$Kx)j* z#l^-(HKKxvgOly!>esaZ5Sc(!d2MYvzGrB-)xEtRzRKBaV1$qPeE+`14G)DvCaUFf z5=3*T0)m2~xNxD+7l%i``RlK(qk%ejFQM`53PZfo7NxfN^ttUhY-0VZVy}HGLJ#J_jJ6P{lz%F$8j{_>;@^ zlx^-nJal^vnRpgTMn<_j(1L&k>J@>BPMbCzPlhI!PqL^p9W!Rp7Xh?+uun%LwI=P? zF9r_}kLReB=1tze{lo%8ESecpMc=Z8D$b4p>;auf^lLUr+L+-fhsn|}u*8!fu$mEp z|MBseM-zHpKn#r<6zG%?-q8a}jVLs-B`hoqqKuL-wQ@li2M&};EaHoXi?ee8STm`o z%3nXQo0~x4&<93Gl^!e{WT6p2SWH6CpFdyn5*sR&0(%3@{$X$Aq@_bf{=5u-gU9Du zeL)N$hsWI+t_L&n^UI+fte?I*TJ1pxJ*B6AIy*&O!_>-*9(I@XT@F=gNy(rZq0q>8 z(B<+&)iW?4eb5_@zPdtymOjW^Oc03r!TQyC!I#TROaE97fORj3K{5Y0R8Ma&=0Aea z`t&KMQGpox#4{M*htQSw|G`xRV?25ZyGLgLkXuZmwzf8*M`!TptRC$hz}PPzb%S&c zR6p<{lMgCI@u!vRLAR!i3ijc|O!k|?t>Lf?Sv3qQf7uESz#I@FktE&ws`H{izVmwN z4wxRD@sBU0`Z8P@mNP36bo;etjl+DLbhn|D-!5?kJ*$Ua0+Q+X{q;p{zU~0CN>zJ7 z6_pGA7x>1!D`lq^n%toem7~bOIvQuoQvq9HU0q$S9lP!%&&5(d7nT>L7qaeZ+V6YBC)r{jfz&J)9l}OCH?J0D#CJmW8>4| zd$FMY*{|5g!II2_)5HTq4(J&wmX-qJ^b;1dX{%bW5VLG9e91kiGr~|mC}ki!Gk+au zZpcS>!fc)=Xe%`G-C&^HXSg>Z!os4-F9(&r_sKp76=K{7 z5WZ!`y=bGAd0S$TEq!Lb;>+VnK?GA?a&PlVKfs0{JEGr}4QeYJ6A7bUl-|CjYfimi zl!xo>Wn4E?omw7X6$=$Upzmt>>Sq<^{IrAInjj#xOm{hG>B(!gP^4=}a}9q7-PcSv zX1-_sZI;`WsSKzPE(e9#yYr`s(>@UbuM(O|h`#f{cQ)GD%ii?CTbq-FSio)BBfae^ z3$sUAzk8>R*IT&2i|r=TJSKdqdMM||?K#5C!0g-k_JZFlBx$tIW5pFf5FW1IIJe{H zIN4uNu)W!(kwDZn3H8k$9-EKrooR0(9V%*s` zvbP<-hX`?P{6dAyIjFk-+7gR2{7fOVZ^a}a*xlHem;eE=9CRjWR|HKd>Dw!8IgXei zCoTp3e`5^0#J|>HCEi#!y>>F;IAGOlWi<3D-C*2>Q1H*xJa;0esY_K*S;fz{GN7jJ zm~UF@M#G%@X2{NQnOgqL#5nxR6J~y;O0>6{oZs~Eb(f!jr)ooxuESgr({Y;JylaUJ z+B9ztpQE^inh-tPwYOwXa~BfEt>t)o%$c^LsQND#;2wc)M|dIdAR(vr*vE~d5-n@Z z`bco~HvUQ^*++}$4L2H-2FK-_`=>>(MMPd-1-y6<@P; zn4ghJNAv{}HMS=zi4$;;m@_c6$s1|Qjz4{lOFGuY6=O;lDq+UIr*KiUyBY(Ib*tUc zWbP4+uua_p_L2`0f{r~t4sZWBd-H*ttzN^4l5VH z|E@X#v_`o3HkL(GeRmane>WU~P(zau1i%$>^B1C6^{>O^yi>=`Uu+BT~vHePT^fmow*V(1)DfFe8}0AtYQ8F(umMT7)X=9*L~@T!1Lx>->(B{bEzV>Ckue!O7^DaJX5ID7%tUGHoiGyM3jk=O>p7UV`4@ zemBN99;E5H--X*8SCXy9UQaIvL~h;~v@M>XAg2r;@Oc#dj&5aO`#oc2mRAM4ZOw=5 z+vt^)V0sy0ozb5>O4V(>%0TErK$z}{U@M7bD8{K}e9zAZo|!neg1b-K%O>7k9M;uf z>9K?;)q-dpKjL6L!A4$({y=~n6^zXaOa)Ze)*?P+E85zL{llz1maoo&jzY&@buptgV&m() zzYFH7i;jh|zrZozN(#wEC@K)C(oss6wMMR(Ikx zWa22RR3WcYW3$^<_BJF_ZUDAV#bZ5Iq- zna(3XK5&mreliI~iGe8M_(5so9;*XTh9V6nAb=O(Y9Ac_=MUT5CM&L|M+^TBMHU#RzXSUhFe(i3w> z_@fV_=Xo@e(?28;AV1AWY-vrq>$03rb$$#uX6Ur7ip|vc!`N93_dES+Ns*Enb*MoR z)>eltGYp6qL5@VJroh4X9sv<*%;ES&%a6qWnpWR8sC;!m+W#GD-&9If z`wP|ALqLu3D#XWvexiDTNQ9#SRwXnSU*QmbASWqC-ZJW*892pB{};U?>>}YA@uP$l zsymgIvw6k3vyp;EfjLCs9wt+gHrvRX13GD^4;KQ5vx&mLK>@tTM%tFxCz; ztg;yE(>);Hcl5g!HbBABA};o;vm5+K$FJ$yOi8C-)=7F1%7fpSS19e~wbiX++Vgym ze6$IlXMD~jL_NxVYqntO){P5s{V1l<3`Qkce0%VdpZxx}5;Fxqc)}y|x~KtWp5+#E?vu4A%{n?GmqRgV zzRcVQs3dSt&3LJY*z7f`>{l~Ef`X9{mgML0iV{aIUpqP7zRdQ$bf4lE;X*tyMubQ^ zVb&v0ijTW+PFbHzN=(Td!mTld)%PlKc@!#Cri!YVMt?Ne@u{3D)M`C7V<&rjEjiUT z7Ud6(#XslKW(*alCbUN`#Ivxc0j#Z zH71Nc#%l7PK~^w3HNRczFv7t7^%i3uOLCBVF!PCY_CLFNy^TYBXhnMWV*Sqgs5QyF zZ3o_S5J{;nmF!%MAwp*C=F=1!GyiX^KAFN^W0ecBa0vLX*334N)Ya9s4Iz%zEarkZBkRgzyqD(V zFb40|U2M7C?-nUBp1d1%!(C(wozD5 zlTBh6749cM_r}J?pkA5{&PsVMe*vGti3@6~r5qZ$7IdVoXl~l;o3_R?XoyQ!6E8A#ME&&H+6pHa749_@HAhn-kcncz_bh=~!44 z=P6&J!8CUS2O*N?PV0*)w`IN{juhY!Go0HlakyI%F5 z;X(+w|9P|!_5s+bUs*Bu51Irz`TnSn53{A_y``b~VC?i#4!_cVSN-rkV1l(Fyn({9 z|92HA^j+++=G(|}fI`t%*4D@wKnDzr4hEG&dUJCtkwc|kYTyr`3^bFj@o^O}2f2J4 zEG)7I#IaDdL|{qNuZviG$YOVMlMe8gF8Rc*$Q=Gj@T z2o9kPNqHsI|A}md8`SzefNGxx(}Y3y7@H^?03iSo4h}}_!xEpfXfSUoKlBsRc*NoY z_xs8~BLg5g{>V_>s?!trWh0u`6iNW{tyJSH-dub4h$cm@;1i;SJGS8be%QkQsm!T) zUDMMVTLykzFCSGJSPk@1wf^60^{1W~+9fYQ9(}ai{=Pn`k5LbxrX3hM4O`&iI{KfQ z`q5y+ii?W@K9;6TPemp10PLxk==rqOybOOIkug0rJgfk4Feps@HxYn#fova48jx`x zK)&$U*w{yqFJk4-oBwZ!HGr-G$*c5nl%vB#G$I4QkpZ~fBPP?!do#a#>P(P{mF%*TQ z(A?a$98#xeA*)~fR=Da{i2^c&nQ&g@m)cYNTzpbY>p_tyBjqPfO;wHXvn;qE+qcy- zTw6a2wUEH#>YEKSc1R?{A1`j{yUXkFouK-oAw*G?y6A13xE`*z-=&EHUfeP+Im;_M znFt%>Pv*TSR-JZGz84Jl_RfRi<0k+v+cpgr)HFY1tFq_;l<#~w(_4MI*~7oGjy*D` zYF)>0W}vt8r_Ff&M?rDhiDlOnq z|L%vNe95%BqqO3=IazIW#xDh-f!X?|uHP%%j0?&27ybzW&CSh#Ei0CUiWW{TARzGA zRH0G-Iuub#AA6hi*5CE!ny2AQTBHj*M7(4i^&ag-^GL7I0Oi&|=0<)1PEV6d$-L}2 z7$sfTxbmLfbwhQR&*fs=WBySWwX&OGU7D_uA@IwO)h7SkGUU_#92*;*;z~)- z+^a7K{r@(6xmz~qs#hA=V^T9vlxx>-8yUNqR~}h zQHb*0@=daEZg!@k-);lG?_8^OYMKLo?3ykaq`ZCcVxk+95$(%l_O zuJoPM7ln-7ExW>|yq+2To#)hq19^9zTp56~9@=pY@5oYf-xs8z+ayAGz3nRZ{%~Vc z&`YH^hxcmtS|iO~`1<19>3sU**WG|D6G#aK23B$Y^fVZK#mJe^Ghx#yAoVGI$!KhH zRK@W%ERHlO*pNC$&ZpA2EcR;*tnM%)ERQZM0eBqJBmmcPmvFXh4 z@cX_h(nJ|8jx$({Vf+5Q**-&pp~y0R54+oyny>FA$=}+Ct^|>A zVW`f)+MRo{t9$P~?OjRE&x^YADofSl&M1!%LL8R*-_)Nt*)81;dX}~X3!D$@CL;ZH z>My5d1ZU#yDzPX92g^ZmQah22DM2wjR) z9?{>SskF0DSW#z~YKJOSLv~+n`wEW}yb=6&YHB`9oeF3;SPnzMNO(m@9X9VZVUR^Nqne<_R4={umD+6Al-r7u-&^#^MC2)$E^>U}2q`?T`J*>(93SB4?zb zxbL;`p|^w@G8Ko1`bsT3bZ)BgxLHmcB40f7SH`F?;6Rw+A#dJp?lIo7KDXn;z;Za3 z4!@ACvNgs2T2D={e%`;26QUzGnO$F=_J_k1+mxy<2{{xdEJcIC_+UD?fsC71Ci96x zWNzG_U!B*eF7@|XPJ`L>Zos|Yj*r$|U7Qvq*P#;+jAmd)wgrzQK)NNV*Iww%Eui=~ zC?cul(Q|UDeN_BZ^uOj$Eu)(8S*qBatMbk!;&F>mbmz*3!9WF zAY?K6ZwCq-ii*A9ut->cH*TNRv6_ixlU+2{RHw~+%|=)g=Z@4&oxgq&sxbGJ)oG4o z^gPkb%i^34;_SrPSZQRbe=T-o5R^>Vop5?&i|Bnm!Yw?wykL6XKTgk5?e%>8t70oS zfdnVwv-ZEz60nIf`pU{HH;5LVFNEsgS$?giwmNICkj##RT!bDM()oXRCpTo6y{=ht9#r~J;)TqP)vho$ z!#79UQld@E8$`DQ=DW38==xD|CO03Qu;uSV zQ@ZwBCFt|~o0IO5Q>j-pQ+h2AWF-bvU$lz8@wh>swDL<>vd>$Q93LPGMzA&**i;Mb z$S3**oDOg#;dSXrGj-f&rFD@yj%Np%`X0&GzJvT!`znMpHhL%JsZydQjiKyxw;%63 zJTBg@tb9dkB|P6=Gtf|0PX0`J+kL4x0MkI;_o86VwpnZd#4C@Aohn><-Zgg3PlH?c zg)>x5cH@%`8Y-e@!0%3u+YSh6J^9+Zg@1l%y z5Dlk{`H1DJI&aG(=Z=I2mK>R*_IyW%XkRR(Zmy?J)YweTNlkbsw+&+vCs~KDP{$UV zg18p9R9(d9fJ1z*~!Vu)Yulo_Q$KK_!|7N8?-Zx5fJWSH|_4i3Au+o zx{vkav8;Q)+`BaqQBu9VTTBnz3NF5xFY^_UX;1!ebJzM3WP$Qp19^ROe~c!lF8ATw z&|H`pK^D22O4zV#c3*wa`jb8Tp8Hb%>S{pXwgs?Tf}(cPzAYu#c~F@084I>Q^j#MzHpj70c|ct~y@H7A%>lK}2{NKban+#7|VX` zRNvI$uoq0)a*NWvKJYpXw0HL)zWiL9<}uURG(e(&fa)|NdZ85@*W_B;>8Z`##2=QN zxVyA@FjGo#eQ|no=JvdHrXHa=Pxkh(&}ZAK*t3D`HJfOl;7gLQPx$ zW%Ym-xgT=kV~UOW>eX8Y2~g<*B8AkxkPzvoe~BC56y;-0hlqj#JWGAO<|7LtE>57Q zwCw)`{Cp>(6BEjC8P1j&e_)by0GiR<+#G1!`%V4&7x;N7VR1RAsZ&>H|0|IJ5YFZC zI@15`a{imE@_;o>J$Zlb(N)94```~A$+nckr8$+x8;?v=7jGwdsyuE z047M-WJ?x>Ye%b{-`)^$AB}dsBPjo``qMgAnZ@7CZ*f0|4*7x->EUQq2_P2u&5q%{ zr7k+wU!=vHYY7HW`Y{!u+TR_|OZG5AB#478OKt$u(n})$-(|#Q7H2N_Uud6>_k%uK z(H^ic4sRM2Y{5~)&$3~y5nY~QqZYg-Lm9j-XPSJGGbcqtBYM?!b&}{5oN62UjgP#j z2QG$q1Op4ny8gijNSFBU&E4uRFCltodK>2Dv8?n_UhJ>y2m6tgrTe5oTfn~fiRbUK z_}jUT6Bp!h`k(c1m2?s9m@$#xlJ==3p-uj*SlUxgiE|;A$y^d%ZU05vNCb>LaQKuT zhsf7&itrJJ=$)U}G0rU)ucZ6&3mNoA?o|N9zMQ~NdpRszHvY-+OY|Qs@$y*Rj?N)p ziy$5(jX5FlHwDfgk3LUuj3aYi|DKd_hzoo5FYzw-5t0FXp#&zw0HD=Xr;p^Sj(4Yc zK`;uC1^e1Y1rrPOke?YZA>VmG=QnLk_+PdLfUN!#wA52DrT7o!K*vbSiiZcZN=<{c zTe8MY-8Cz9E|(r{vSwbu$Sb{x_>^Wv4G@S)kQTYBjqbx%#dy`& zeJyD}@@IXa>cB(&6mR~k9S6~Oz{k|bY)nCLB8LE0&cRi@+mOD^XC(Q72~!rVf4Kn7 z6L2B7f2WDuL`F8bsOZ}>pK09iK@*cr3jdh>)(GfE1_mw zz&F;zuaa>zXkA{bT*6cF*$sSs@gb;vNBCmuyKfF7Z6Q#7WaTek%G1;JA6w|)Xm@#7 z=+{~sTu5)PS>@|H-0Vs~Z$hd)6R|`DTz}Z@gNyBrT~tUG!NLl{yUE!C=u%ToedxE> zs;8cDoTbHj;|R}O(O{Eh_dafa`=&Wl`xJtr&5&0C(Jp>3c7spm(-kDjj=I`$B(KSf z;)I>$rdqZ>%hUF~bq29W5awqcMQ?9-k}5@H^)n&Y;8HzR?$idd(jwwh;e9S;aMd%0QYjKPjJ zJc1Rn3$`@iqVOHC%BSfXeJyIRStnnKvvdCnV~ghTD%Uv7x?EaLDa?z6ACU9)wN1H* zB4Ub*t%kYL(4K<>zyHlKp!J8{4*T*1QZGVX)wZJ)BNV{-3UvBQcC`#BphpOXN(RFi zWqh!nqsw@HacBsDCa@;C8hR)F$AbMDs&Kcg}&8SvvsM!VL+D zZRCa>5dzgOMA!GEvV}(fNe!YNVCNi0 zSFQr(s5ym3;1hIomRw&!erWtQ4EuZ?o&xi6hgew;A< zqa{2f?og^p&In1;<>%NbC19k>HF9Wem&QXe5|X7l%Zuy+6MKj`94U3Eiwrm~M=AXe z7AmU?yvbXr;sZu(w((ZfAb_`Qne~d7 zDMPiF=dgGj{~P-iPDH%mk7x`D!je=K3f|i1(5>NR{E*;0;n(Dv zXdWEyhn4MjA2r%XVYXe-5|nF{J@9z-+#>{UR5(-uCmbBc2<)Y{tIWSSlikPJV0+y` z4hj1rUz1rY({qewR|fU*_Ra#wJ^qX6sPY$_I?s^qkX4?*kzFwy?zQw zNbc^DuQ=TaElch4Q<^Ob@nA7t8sLT4TXsncNwCy9J#gn<;dyT}0^ic(`*9!GnWeT* zrW=N>u*?n8BMs)P=z9&;6R+))AZok>=v|=SJ=@LM4gL$vx1$DHZw3nDTrk37vp_$$ zP~2seg4T4)^cPT*7ChMO#TBEB#E#Zs2=OH;Z13P1oM(b~vlnIld**l) zRzrAa6gbZo4PYmYDH_bMii0ER`Mo~L+66l@GThT&P@Z7ojEp1-=NHa%K&qXTF7++( zAUrgEPkMxeG7$bDM%xPi)0B7u`LcyL;=<-9;7$&icut!|j9bijajnw!Jw4+Ybm0kqr7u36zD1;U!UVdTrc*W4D~)`{$9JBra2XzY>ejV{j+Um`Aev?Mk{wz zER~MxscfUH8Wr>XQ$#BmPoIifq71esafYgvuf>P!ct7IwbKt7vant<#o*ljq3L4pC zB7vHEgLcl>uU|nE1oCkn!Dub#t)Q!7(k`w|cc$k^T(U{bw;2&8CkDz@=EnZBJ=Ck6 z(KsARx}4*5iNn;5BXREWFNsg>JPv-8wduc@iN_)GS;mhp&QHN`NZ-RWwu`K2$U?+ZRLqEfyRf`fQfbLo6Vkb9WKEj-zt~ z9ppK0TmSSyJgE#Q#+>8}mQtMJ3b$sh_h7{M;31Z+@qFDS!CiGvL&JZ47BG3!@BJi4 ztv`EyS?mJ;Pxh}<3y$mG{Vznw-_imrDMe@8E{mtuc>zv&B-%aG@!b&u{2F^p>w^bA zTr6}OaY$cx^2Oc2&m_v7*+HwT1> z)%Qh@UO)r{q(NE`1nKUOMnbw9q@}yNK|s2@Q@T@1x>LGaQu=NDe1Gqa@y7e_IUHQ% z;yvf=z4lsjueJ7^(hD1_0gPdlCo93oVqcwy9l6c(?ryZrYRYq0;-Yg1$^?22hftw2*k&M?|i z(oHdzRSpZP(ylwzVXpNRf><1{=rri=d_*=p6YX_AWNkq1E02sF6i_N@?aF;woPbr> z-Ucc9@UQqf^`{e#_+QnQ?Nf0!;5~Uww6r_X0h+HtO8RfNf@US zc%7T3G{fR(d5ZM&W0snf(#DE^3~@P;*sX|SZ08Vxc4U`V-M+%2=1aX^3TYm=vc`p^ zF01S_*uC+KJ^AIw`pYgaGQh{Pz&0q>#Oc?|OI8^y4$l57%G`q!1!848Dd=*D1_4fcBz;2gmRrvB*?l|0w=pyp=o5>n919zSaSJvEvQ9Kf>O`qle?3de6P+g?MS zYrwAn!w1guae(Ja+V`kUJ;LWYBpPe5`Ts}hIG~vHIY~80VgX87xcUFe$It0%jiWv% zG&EK^N^xJ2PH8Er=W_A$E(#p|-ut(Unkm)!C}nFOuazW<8QlHS!LY`P37X%)J=U9dZoFMF=yN)=n07tPeoq=T$_n|52#RXk!}g(in5`J?79xC5L$>XP{P{Pmev zkslH!z|I?F9it+>GRU8^#SrFv6=afKJ4lzIZ%+Q1NAici^l|5$ zf7$E+&TvjrMM0x`saN$=k()+Ibli6*PaRn!`Qspd+6~KfGX;F|K+j5^>AFG>oU;-Bw8mrO>WkLtnkMo%hYF&#Lxir+duPjx&{%Acy z2$sqS;pSO1pBQ*J*+@ky+ZgJ}PNm*>t57s;^HRB|Dycr^%J<1G{%T8*^@09?LQKvl zPVXgOG_9c3mubvj&&TWF#`62f#z@azRsIuSd-DW!etaO?=i!7I*X~}<0=^+kb+tr| zGs+L_%$)W4O`rQdE{^m}EcFUkvc6*!LlwbIIdGGvo^p^iE=2pDCB$V`op(Bo z_fVP^88!&+ZW*~5uZKp5!&*y7lCWc`z7=Zesy!p_R#imF^cLR$Z)WFlxWV&yMv&GE z7(hPH5CNB^cVomro>1qOOgi+~ei5UM+=E>-qOIimN&dDWwxfz`Pls^53b7EgxdE1v zVdrZHhfyN(?Jot}JBHXyR(-`f^x&Q742`*36UeV(2@GuK(4{0`fH+?KWn>wnsr}1k zkpdgb-KQOisWp9*D{mAMHAw$@?!Pf>Ms^xw+#NWy+Z&5GqliOpuYq==hWu`f=OjB+=5E=iUDR35j(ctFbu%G*S;NQ;zAzG%xt}AO*XkWue^PrFuYwy$1 zNkc8ge1UD4hlQ`b@`t0ka*bq!=!$QJ=D2h6i+1|oZsLwqrDZ`mxv#>%Liq1t55mqQ zl6Wi9TWDjqc|6nmLN6mkuj1mA@0%~b1V23lQad2k@ZVYF$-WZ~*4aGKAuxK$^=WYO zj4F$jG3%2~`4V}}M8Brh;PN!j0}%v_3)9JK@9Ln|w^aIkFFb8@q3{nT``v+oZ{^EW#UUM8x(ftTR8YF-($89}~#1nbMOoL{xG zBDHc1qXW#&5b;RArVc6=xVNvWRnoiX8FgQ$)=~0bX+J(pBV=isLc(UHOr6V($VKvC z-mrR9MmO?Zr-$czaT_Y-In~izB(74aVxVDS*uM`isCc*w>_(2{)ysV7i<2I*mHAEN z*3OOLPNLs@avONZGeUK{webXDAQs;d(nFgIuP6PJ7Jr8MGu(1lQ}^sx`h#xH>n}Xfqp5tgj_6>_ecx z1>B!Ict|O0DOF#l{gx8{dAPTB)J4#6GrU4%jD>K8^e1eo&eF@mAuYd-tGXb%5mQt> z;I9}o1Jfi{DQ8WD$Iq_QqGRx^jP-0UPdTc0yL=9YTqWHm!DYiUX74*PH9Vtzj0NvF zlIg)^fj@xMPCGogYw>Fh_wV@yMmQP88<)PRb{>v>zEOHuTKPs?Sl@?132Gwpo%EY) zv+n9mV=Sdp>N3LNByWd>iD>DA^OKpFp@g5^i-ybG7>SXRvxUYFZr5R71WDi7wYr>0 zCEjeS8fEyk-}d^yxAC%+8o+jJZE(U!4Zc~#O*$BMw>u2SGGM8lHZM7-D_bPSC7`G| z_u^0O9HfU)$oLNZ*vLDZSoRRK030&!2n{+H!^Ig4#1|zu2QZSY%>h@oxx4!Y^pPCh zq6%Vu21ha5JWG!CISDvzzdYXG$QMQ0=TU4jAWCy{CwdZe4(1cSQeW8DD7Q39=EW{g zN6Oqcz1(K^9F!WUc4X0Mevmer1RmONB=G2VU3|ZL=wPQ?Op1_9lvl;;ZlMGNj%g54 zS%$0XVZFzX9ZVP!!P@TrJW5BSS4AG*kfO4Zdi}%jqtJZ#6xLJ9`Qoh*@FbqC`=ck3 z9=Z8h$=3WHi%k!Z-VxlZTZ+k~wp`Oev6W`DyR^}jvI-dsLl^!bxpXwig~^hOvTBI zrKhbJk3Pp67%MRw@%^QmXwqQ4aXo{jFQ=+l(^I`-35g>^7HQl@OLP-r`Z#i$}j3kJ> zL&5^tmso6Xb@P2FsDy{II&XU~`EQENsC?~{;Ogh-a>uCUAlt3H0;uRvV-qup_Y3fl zNM=N4bX&-aJ5h!d>^A;JJ=ci0_9l4-?RUdYkJfGHK~@p-e;@X}rEGX)I4PoY{pe zG5?7YF_lN zOF@BiU@MSpOS^|2&Nff!0Bx5J8YvAniAs4T=>V4ku>QyOA{O!j!)+`%o+iis&~d$7 z06%o0zjNfJXoM1XNxY$qMjjywtbT{0Z81hXn+4#H0tz0$X~J4UwRzG27%-(V4MplJ zYqt`3U|dS=R^P`qzM zE>0bzZ{0&c9P+il9NN@`GzGTgw=yU{3_)^xjY4c*37kurhbMEs=*ZdB+|GMi(}lb< zMl#aolhB_n=rI&fR2a^_E`Y#uU{_rp{r0t%@V*guyJkgGuPDy%y3jCe`HE^53ak#Z zy~bA!H96^%#?Dp-M+UvSlmS)5>6YlW>qPSXr+r^vUyeFk4}N_EgFHv*SD*C<`@;FvIacQ5y~D=wz`!gW>E z$@Hlc%X=E!;ebF$DZvmFS>G3NRjBCjp^M%jT*u3wM`3W~&CXW_uARL@cxFc?kE62` z6&WbR97}5>TX5vC_iGBj0~YT6!_$~ct6KKYLKZ=U;ZMG7tq#%x$STJONpD;I%i{~F zK-+2#QC#>o&!mCItL&HVj1*>gf6tI=k^&~OnU2x@cT;wyR5!PrDE|tGcl{LUh7t~X zZ)R5Fb#{PnK~=D^8XAg(gk*w*MG3C0BqcJwAHX3tuuCG&bbGO)NdU^_KZ2vYL#7S2mI%lb53swkk8RO7D ze0k|@Jem%p`oq)lq-{7-uVynfpnpz^pPBsR-pfZywDC>)^bao&!!0(M*KO{n3*=r*E`hU{DwHG8Kp+v0|3=JD2S9P~r+}hWmHa+K zN!8dtgPJOyA=Waulg7E@IX5F5j0}DgNunPFnEk%_)U}oRtC`DQwkoah1I0$@-U} z-_m$X>ze3pKd(oeuK|!LHEIw=4_VB8&I|OLh475feDO_Ora$RQ(wWKsL`4oOqi#nk@VY|x8#y%` zGsO6*RsFhZD^tzmHVLX>3upcpMo8a$x3-$n2X0=S-2?#oH`hDyA%QXu3CFk^5_7f$ z6F47@B$g}xjQHYu@VswOpG#(f8TQ`}GgEf(qxWtL`8GN;d&(4zNdJ_-q&7_;DrL|| zcKmeX&iPa{Hsi*wACo=D5S+A->8mraDTp8(RP{%V7|w=Ev=zs7|BcDh!(|BS(_MMR z)j=Z68LX|Xy3_=;p$3NFmsQZyepW7cXM4HdCnXny8TZ3zfn8%|nb3(I>!YdOJb)F- zGLKEUjck!dY75u;6MXL1u&k-1!7eYky-5t<7!ZfvWSn;FRwaC>fs7nAzJEhOzGv)5en|qIoddZ=5A)RmW{8!BxtEa&BbGYfD#-@~a5~02Q zijza{#3>wilVTs~j&=aQ5P?SH**}&BEnGo(w8>OXJK+;D{vnz7lg_7a$+Q^^Y=z3C z>cE1#GDh^G`>^U&ce;b70JrxBuv6Z9yI=+ags*#g6`Pj>;F01f^-4z5SIsEk>bl`{q6TCrK<;UU_c)Sa~2V=OHKbw>pfwEeL|1OkLr~$%tOWX5h1b|!RHLkF5Yq800_-yc*U)yNSD&~U(;woh_*pdKnELuyPJywoY$I>EocW$o7?BcqpY>l zTCKMc8P~-dx+#Ip56)*X|3El7ECJ&9)QmOWS7wWw-K7iY6%PUFuv(|1^HoPuX2Uw* z%+gAxb49xGKSy;>p8R;|y5&wFUTSit|G?#!G%hzAwZq!uu~~EL@~*aq)sydG@5sAf z1Ls{#HZwGn5KArVEl1+8r#TM>x%jwo`@>3)P-oJoMuD9!Fk1FVH9_Ho3?1E;e=&<3 zGE7a)zb>DrpFAF1m-nSV-i0Oy7W;&ZY?P7;6h{Hqa3|j9X*nYZr_PzE#+__#RuUE- zy1$>^=z0YhSJB*yw`Y2>9rKaxYE()yCi^;hb~XQQ40SO-X~Je2WFD;=9qmt7u)1vV zIwz+*dR*VmpMvcVIjsA3MQKr)rJgw#z^?)*^xm+1F`B%0T0HjeNSGo_+kh)3<{ELC zd5VbnLem;n(3)S@G>dutn)|KJN0~@!gi`!IrF=lQL;(maP0ds0=I5bQ59s5%;7N{- zXFQjejwP%mIBu z;yjH%_7~PCEv?^6OD%b2u@=(srcQv}8N}Cbso)=bc+VUBiDP$n^;GB}RKYJH%^2a- zRL|BT-Z$P^U4}rCCMY5C3ZUM?{((;yCPHB0nyz&Pe83hJ0 z?V{p?I$RE`1Swv0_+RC^RM9snG_7L5W%4a00u=w zz_F2uawHOAG;%UBaqVD5`8ouWSC-)P18|ifN%8+|GkD4AH$i92cN{@@IqP*}@FDOd z{`TbHyT0CDiAFkq)PT*yLmC$nko`_*0oPvZz{0|P^QOO)2t57auQ=-OI1{aIP6LP} z{*)Yls>{S8@W_vTp$Y~k{s%}<1-W>^j{(JeF7VF5r0R{fd7A-KGcyv}@Fc>WfauWr zYu>Y2G;;&;*Orox@FWoH)efj{AWO$g=+U4`l#N*U>f$7YFzASQjGEOzFce3C&zXBeyf0MN}nDEasMaV$X z26Tk}H`2$PV*jnMe_sWxS9EMdL`qaD;-4=gEJa60cUBHfk~;~ghSukPVPRR!RSNua z0CPn4U(QV&jK05hCJ_r5_!tRbZT{jTy}g0Y%LpVVD4%B#Qx0S%HUq@T4O39FSB-{~ zd!8TV*XU@%zeni@XLim6)A_yVo*xCaFU_Lt=MU-7`5C|tDBq|?=HDNlmi8NP5^iaQ zxjx%@w5F`=7$|#bEc|yrS%4~j0!)7#J9N@|YyT zY|q47es?V8h2r}&w}WS2{Zaq?UWn9y%T-nv8?4KZ3Xb|r0S=E*$)P#bvHrXH=gf)= zGCavEI`DHH9i1-0Zv>o7UXSiYQ*ib%qT&T-t@Tr({{mO49C-3vKU6PE^Jlw{Di^+Q zEHn6jHd2nj-F$EaGbvu(kWC7Bfs&^EP$vSk8f`_+1Oa32e~(V_xHOX^8u4$2g{Ar; zi4&Z#;7QWb|IW{VGhzR&LqO>)eLPqcZ zY=GZ2e(1yYDgQIi=67P>zs<{)?c?mL)(DN!qk+a>1+^c zwrX)n30Q@cE}Ve6$zFuiU%Y>2Cf=ar`_TS4?pDEiCv3R+cZ#M_IG2!1`so^7{YZgo zJj5eQ@y^Xi@Mwl|-jp*k|GqL$V}`_sb-Q1?ivOjXg&8VAGqDIh2)B_*Z42AHvW zy=5ApS>{$O!f;j&>OY6)eob9(RL6!C0#S^6M+a4kz<`o>CO_K{!bWXlF6qOO0E!NNi;}XJAjcY~xRmd%J)W&gV?xW^>NU^Pd#z#G_ZBJT(BatG? zPAf!+?wLBLut<7E4g+ES!xyLCL2lGNLY2f6^^qjEst4-Uk| zw4{1I+yd6DkmU;M;?0e9l*7@2sN{k!^YnO9a?B4|4Q?-)+r0IN(E{3?>D*(KRs6b; zFpXB*=u4LvH^Lm-Wd_4V-DR1P&zYheaq>7R>A!0!#wG-NNH3eZp#2Kd)u+$lii)=u z5doClOeJDL-?99-%M$HAi}-0V9Kv+-jWPD{5l>BbTsO1*WbyqwpodwyWPQ#rd`n6T zoz=(iMSHPinQL^D3pZVlxO9oSd2)-qiX=LoDE@23Q^(~HqdRUOeT2N?ApCuR6QHS-awds4mj@E0P^DzHh3WUcGT&dAF3t%3Kq8%CQZ zb+8CO_nCeRX{aTZAqrD~N3(r0Og{OEvXxPtmk7xJbD10u0mf@~I>Bz$!I9U92v*=Y zZL_ZMsBUUU!lTvtVM`y_4&`m(X-O_{nrjhEa(;UI`BVz(?dDsvMY~g-$!$6{yf%Uo znd}vD+%&qw7wA@kpJ0!f8854E<9V1Vq_h#!L&A%*`bEDH4E%^IDeIq@INKf1A>g*> zg*ELD^@-!o_Y6I<$Y2~9Lc=Z>M-W-HL`tSLB7Eumj+VfIwLer~^yR>tdv|{#kp->T zZC_((0}UjN7chgjghbjs0Vhzp<1!6SulJnflwMOiy}9gJotHi6JdVfm6C(aGh|^|_ zg8L&oDZgk!^Tk)OTV9jBP6I~G_p?^|XTwKgx(hoIuN%*}d)wY=jvO0;$@g8@ZK!yx1cN%t0?t)dSO41rNTn5?i3Z4 zn0hKC*aKj(w4`}FURkDRWP}Y$`f*ea*a@D3_YQ@odKXkLYHV!%vOq%^LSAEV`|HS_ z^;E4D=<_=LYo^#Q8*6c*DiG52d&L(Iu#gHm)4pX$7cCdp!t`|Qzxo;|?k3P_ExcCR zZ-`Z7@T?Vn`pfECg?R67f*-2jn@`s)o2<}Y^*s{fNoZ}5hCbhdhx(;mo7sEQ)uox+ zUdQ*yEgTrkY&B(gHUb<^$jzGC{@zEyXWd=I^Wr@X^k?X3UW6F@W|+-CwF4ebMftDp z-D9@}B_j`rDEYC#=-a(7{sn&J&+OU*Un<4)iQSd?7vZWwDOWcmOrx);jJ~DcM?En= zmwh%5LF~%Q*vH^V#E`xh|EpY8OfU}BFqKKmN{?2+ww5LY$((^HS3unhYxuY}MnM;h zoP|k71}!Tq+Y!Au=Nca$k3!5B3mj=~6Eu&)(BiA~PboR15WgK#UAG4ZAtWA*xyX34<%CMQU%77?@_gju0UjJ zg_ZVJjbpI67^%|@4adLuOw{SEa}r0Pl@B{!Jy|a-$vv6&%DBnaZ{~(3M2K>RiT%Oi z=}09Dj@)A~!bcZ0=l9lcVCwg}XovF}8-Bq3nT7hef(}#AX;ZqJ{_8J3Bs13+3PbpC z{xHNj{|yhEqjm&U43?ik##;9|1l1G09wn<-^RKep4dr>TU15pOos?<=a1t&d;*tJ7Sj|E+LYj9lHy^Cuz@9^m zReyBgw8cQ(3c|o=c<`z;zp**wq;J3Xm9Q-=`q@Jq?Tn%B`i2SRrH8D>K>F@Idw)Za zO$ojSZ}}5)JuZVCuxKKRwKb(p^7T!e#8y~H;E>pogYa#aDUa0QoX~zT+C4E?2Y+&m zsK%5`O-xkP)g`pQB`2!`wFfn|q)ibA><7`#2`qJ2eWGP!)aEGkm*zIcnU%PKe-FgN zJ}Tkfop4bHBG<9Fc6DJvUdh`$;=efVZMYtr%^6?Ysmm@#>h;iD3#Eue!|+aognPij z6u^9RB3Et5Zt}H&VRKD7$ts2-f#Vl;fDOOR-bFh@xA{W*-V?rg4YK+NZ{1fm(Pzp3 z*2{bv3q)Y0t^RK!^g*|_6UWdU9ecp9DL>jU72~-VOpsc;GVhyZTJ+Bxjp%I_=+3-% z3|gnrTn-5rFD$GkkNpB>fJn2W=JKnVilja?q~m8>ZR^eT2Ky>fSO7L-;%yyU}6GlEHP$rTr3De*@63HBPQ6J!eXhyE@(X1-PN7`^p4uH)584% znh%6m-C0En?WR9NZ@q_H;d!t(Dz?O1LJo^6t$oxZZ{29LscAo6 zgpyy@`-qBhIZuB@qx~;YcQ3jEPSEUlAAXxT5=hwTt}PTER1X3` z9Z(ax59Eg&9l^#kn=F_!B5v2Z(;<^V>%LIjdQdU3lg(QD9wCpDp|0nU_OQLQn2F$y zz$FC!UXp`qpBpJ%^1#n~%%I^BrTqRIpI;}lPd?qY*P!D;%A8$Gmw znh}~(oI2_77dBH@txAjVF)Kl3Vvh0KUhPQ^n#ji!6HKOXlZ&e`_!2O*JZ4?Z55-Y9=8P4 zFx)Y;gQ&@leZ?`E<=}v+=b@VxJxbw z797p8eFPXTitPk#B{P1l(}Nyr?PidS8hk3)6{--`tNsXyi#cmoeX?a8SalQt!snaK>GS}Nwo1;94(ofgU6O#z+ z7e2s4`XzsEd~uYPhlk9eSP7LiL5__X>jr>iNRf%qN3LXUG1+980}|E01`or5>L@h# zXz?&|$$tJR5mzVd#p99ncQjY=8w(j0+$X{82MI~}nvemfIo1)3tn6=p4-VttJ|40< zl91Q%Dcx^-PEG6nwJpz2w)_n%dH6I;a6n%N{3$MM`iJNU)4qpglv6 zo`1`JGJ>WU*KMx$;G;zp*@w?%PucgHG&>te$MyE~e5#euBH+)^lHD@P-k{0KltZ{F za=v{5d3@k^Kl#1e4s%lNrp$*Xmv2cAp&UW4mVn+u&L1S4w=p}l}&2C@A?D-3GY^n*%3 zgbrK3%9%)e^STtI(iZ1&(X@TY1?S#I<4j_sPPDT&93c>r8wj&%wzU8K{{B8!X$o-h z=AE6QicD7i5dJD9G#&vOool5yf<)D@9$KN^`keDMHKV^AhVtHAwBT+IbC9=2^CObq z4ol2iJriUI3Vpq*ZHnat5ycM#z3yZl)uc}i?#_AR5)MbAUeU)AU(vh5j1Tc@iQYV5 zd*6N)e{b+fx+F=#UJqyhu6vHu1QWd@io6DO_#)+^Ft=N#k7)Eo@E=7On>)foEQS-6 zSZNtY$q^hZ*pQhYh|>8DO0tZhc0Pd>zF68dB)dvO<))>DZ&$y5@jiAtn*k<>uQ-FF zeBSf`Z{736*FSv8`A^0nNyAKy!+m8HRf^rP=5Gl*xn!8x53lSsUcRNH`__!pF73>t z4U~Cq+}qaevp;38AqUP?85|_Q4t#T4v9x*G55;C@d~r0U#`ygkC#3~L5k*XAS_*`l z-chQ1wQ-jnJpn)$`rC*vLIfU?Xogp~Lv=-)Fi!P&|~zo!~}1E+=PUQP#zW zTqbQ+0?>dNRaQRvEO8Ik*)yT+pu4*T(V}~`2?!v@+fx(( zG?t9blw<9HBVo77=4Y{0dns%W5qw>hBc;x1mE=P%+8!`&we|?>IG2$0F5E(I>st*x zM>sY!@4N$5mA-6hK*L)!a)z|@EosXM2#+VOBBO_+h5kWEumac?=F%7c%F0(-8Lxm3 zkB9(bOrx!lyT-C`FJBrN8%q{RXwCbDeMLNqy3rrWO3es;mr*&0%n0=9OF;5Wb7}k~1*Ko%~5%&=19@F{G?Sz_a+2kJiJxjN~uV5_k7ZL2HKfY!(pJ z3Qi>#w%3Rb3Idv7_asa$@*l?^51mIs(PK9&+tVIe7ugK<${r2{ElIf%?PLt3v=>cY z{9Wbj5B{0fy~-aLjeJ1^$)tn{A*ris&Uk^wL_fIBT8c ztA^d-`LB&E1UlG+Z?d*ONFC~p!ByCci7=vDt+^29yMvxeSV?vyEpITll0S6@#}{F_ z@@3@(*52hV31}hf({*>jLcRu85f9e#%W1zLWOi)J4pxBRd3akG9t0Fh%F6o;GcZc- z<^mSY{jDwXp6Hu!%im2Q%b$F(_yE<|UF%j-onF^RpZ!-~dRDIn*Ay+0^T~-%{N9<{ zxWG|KFz5<;pxZ0q%6E;;3USb?Gr0RlqX!1ZIOlPTmX+!Zum~{PdBJDnGCH>e?n0)f zy^YnqMC1>AC|LA+I(V7wRYe`<^5(l=2X%w^x=uP&JJ ze72k>x8HtGpcPgiW4-&%nJ(Z`*JMAl9FM#)m?NMDUP=RnS**~!+Z@;Ty6XgIXyIqR zIA4j-=8X|w$Bcq$UZXhi`@PqiwSZ+vi=9@eE&x3hZOoky=YsSPQOTill z(Fy%mc2L%7q@NXk+DEYxnk`=|EiJ*_p>=i7rnsj5e9!j&w^@z$snxO(yz7I{#sM0> zs@L6MrETB2F~A_l5^vfFVBW9J?NAQu!S%7y=xq+7wHGI=USM!zg+A5Ydc-P{d2orf zM+Y~(xSo$7TASDcfkUU&^;uz0p5CPcf^h5KMAu0+4t@2G;kcEJMQerYt-A;`gT6?P zi++R!qb@Y*=ZytocNTjkHOfQBo$Utz1)cFf$lfz=7?dWg~3 zKBaFC&wVDH3(ek=#~yiM-a;0?bi5#Y0dmJ1D%3OmD0~^jcz9e6`-;cEg~de7%*@!W zmZgAJNMk{lemd)nS8A}DG-sh{r{@dJ{F=nXA?D7dvIdV}k0#vnx0M3bDeV<_4{Z7s zGyJ_J6>upIb5Tuf+ny2LgyIcQx%Evvws~g<# zAT<|bPH$2v*pqi;weXqQrubiebJ6)`u*Oh4Pny!n-06FQ>WbMIa!6so{EU?_4v>=I z(nC(|#>k|+8LD1pTFZ2gjEp!sIK=%boe=~g4&RfKK3wWICqiK`>O}~XpGq*aFOPreRk@1Dx9|3-HzRex~fy7EHr8ZmW;qe!r z4i*?R;|vOylcqRzrqnv;q-hA4h?-3*|+g$fzP%A>oqaoQoWVV5By4?Ldx}f zD|pZ1?uSWc5AK(&LF zvF&8WvP}LZ39ZdSte6+bx{@`DX~-C5kyTV8s`oOKX`VvPY_D^!E=k$N=qICHO#Tea zZ9Kz|joXIgVk#jcmNNwis7LT9Bwhv?#dy&v8TL1fex9tDfoUbkG#=lD+1z=z(4G{z zzNQqr-7%)_TxTPCM!2?T#@%qz`e)hg5YG~-YAW_U6APl*GcmO3_%>6&pw5%u>j+WB~j~s<&p_5m-y7R?wvxR0ILm|?? zH^#IOjJ^%quh(B3tD6O}SvJyU<`IZxWydP&eWh;tA+cDQ#P*j>?!c#wXgG*QuzB9a zM*5-HV(vaehEg1TqEV2vD3Q$~IWTZH=yNcUU$1Jlk+ex+LykA7i0%O0NsIQbSwL?A zq)@M@Gf*eu$S5f4-;)U*gQ{80FCbPIh-BIqBCDyX37A#W`7G{O96^aP zCNZ(W!woSentgns5WeG?CZiaIM0gEQY)d7MU7@X6`~mu_d;7MZ$)w;(IsoTJ+r!nO z^6}C_3n>I78(GFZ#kM&_g~gais>Xp%E}&(gq^{r6OVpQ>H|xdT3ml&$ma~c=jH@I3^c}}2R!o!EX%*_d)`3hckVWA z5f$shhYx8$r4#5zubq>pC#IoJh@|nl)$yjEjhA9kE4SPoH&=1ufYR>T`3+#CF#jxC zmIi@fRDKc`P@K$p13m$2^^Y^L`>BAMni@W}D7+jdATD4^=@OEbem;a~pCJlwIs^J0 zo`u7wrqsQ}Q{1-LEf#Lna@qH-J-)xIFioUrg2_~rlpJ1<_dYPv>H6J8 z78W8%US3at4UzFRnZLaSsEc<8vAa-db8|bN>VnKjxQqn+-s;fsFc8(Ap9y*$f`Nrh z$aT?ry^OK(QBWKQ8xQaH7xlWPJQ<7GRG|$2gW}Z=xxf7vb#--p0`d{^q2*=I;bc~D zR1&?FnnajU5yT6?RTWp=Cy@Y@fDS-k!EiEmUm8-{}osT8u z<=A(s;J;LxPQG`ztm9$ST+nve0@8|c$;o(}GoT#=4am@c1rmxa20b#+W0R9^Kz0{s zJd-&hfIf(~tgN+#h2mmjV%o?e&_I>+{PHr+iV&W}wjP`%tb4D97)<;;u|RNC6YMO3 zGL}AQxY#><1bi2uE;i7N4XJd!+y}zbph~E|j78$#-{UDmqfR%=pHi1?nJ}PRgOoRQH z*Xk%iIs%@A=~>tMnOT;j-m@NuURzu{4(*-j?%tjU5SIRqBf^yfaZQAPYQ&SdU|>wK=l;tfkEo6#a#Hy8dx8kE@!zd z=Ho5w%gzSicNZ2GfB@iE#~{$EGT0y?%m&7({r*hS=w>65fBX37TdiKe=Sm@+JO!Nn zK-l*Y;Tj$ydcfd&{7yFb5a=Hg@wuy>GLex9e~Tlj>fAX8o~C;o{hiZu?6zEzm6vzD zI!tjwfRLusT6TFy{Mr&}Ez}pFEYnqoSaY0=iYceS7VJh$Uff&oU|d zGqX?}4a6z}E_;)O8dETBI3TMqE=CM4I`M7-Q^MJ~LASBA^egbiwxvkX5h`F_fY3TP z7asrFRQ`hqU|4ej@8QsCMp^#I?KX2ZewkTXIy5>eqa8LG7Cl zzMGqyp!S~?_xSi|@3c$~1ipb9e8>E}+A@|Y3@vTh)8idIFRvD`&ZF{%hPjoM_Ptv` z$2_kR*xzB*{s5{I6?u8n;PXCUFVdkjEQ1b#xk-udpL6HUQN06`L~{o@Y}S7Q4&1|| zqb;pywmYCrlkYy?(fY_tvznG@&Vm@I=asq8g8Jt%xp9i<)roVi>~e|5ZeY~yuX@Bb6aaJ?1X;b zYDEIVj=;cP6&;Uhq5$xDKb;>gXPGzkog}j6kJsN#dWG%Z=k2jRt!&X?tvz0es1nFm+bBB?U7BWV|XY;v)$8U z{bvZMa38s1le&?n_=3+g4-N|hy5m6VDYb=~lo057u6aYj?DCZzthIfDaX4^706k{U z8hKZTYR!Dka$I&Ac7wyiyf*8?E-p8q8#pdDm)VLex&qJ}*)An90*N^asl>M_x2Q#K z;J<*|v$M4Ys-(HBXyScfY(qer4G0s1#}|GDz7-Y~fh>DmYvJSL<4y(D#DF9Il9Gd0 zov`YMLOt;fJfz*t*%s)T zcWO6p0CDDJf%ytiO&~ZtJyl+&?Th0(g<1|f$$t2piPQcq{#JKJ--2~$rDgnm6ROJ{V>4VNebNG--m&FoyzBNnj@vIz^ zBs9CJi3|K>a*~;s*8|wSt$S6J`$Fi8sp)ATJvn0mH=pd zb?18mCJ7!+3rKPU8wLOwY$P_dinbjZ3JXcV3e)idE=I>|cyMrXVxqXLERE41u)6v@ zk!)s}!V?e7Ic;dUg{*U~vfvb%V1i^qLKo z;Ff_`1Dd6kEhVDjec$8au!xCI_V>*!Ej{o5{)vr^1#bfY1=@#4msa015zGDSYpooDJYa8+D9~ zjF=)(FkL~tv;nZjLqKz{%f)W=m?T7SN4`U$AV2@r<43N4e?m{ox19F@G8oF>U-|WG zAMC<_6Jcd#1woLAhzQIf=-31h#MaKPx~3*}bp_Y7?C;;dOG`^&6ahG7VP^;Hxf<}y z12cVIs37GCxWJYG2mxWoX~pAuD2W*u8w6OSk(CuO7S`6z&g_LDm`@xU)s6=yets|T zy|fNcWIr#fUxw~TmE6tqO>N43{v&(}w?A1BN!nt5lkPfQtn?3m%{xSWbQ$CpddU{AzZ}s)%*qi|6UCf3+?%HApd)1 zprj)GcY*)C>d7h!tLIf!oi(4dE!LW=XrM?*NG!Ct*n!3iZpBd4?93~m>}P-F=@jpY>;zW`}UGqYqM2B0Pr!nq5(Xh2X7%{k(%hW7cMS*v0L%Z~ z=HX^$R$Nm8wqG>QAk@a@M?paW*swU01FHEl^;#-!?mIAa6J_{&&|uuNdwP4J6SK&NVa{QEU&A`)S}XLqi)yVCAe z%?+eYrPH|L2K>H&I0T#xNYbd8$gs2516~SZUf0-c(h1bdOf)ngMF0WdN6h3Q1cM?m z3h?aS0ZRn?-jl@UdLRISjL!ytRGO31O|!YPQ|}gmK%%Ai`Sa`QF|bK7a#96I3@Cut z)?UWbY9#(J0;-^3X`3Za0bNuX787^U=kXhP>zmT0RdJ( z6^M+20s_J{)Zs#{qzQp%g?4LVKurxx(iAw*01Hqt%-Hla){+T=oU-!OXTZ_AZ> z-N0=Dwc?%KT~&GcD&0;<}ayD!0S4+%~Zu?xbRl5cl0nT%z|JO2ICM-0wu(7Z`E`tpUvE8CpxUG}APD zpjPxyMA4k7!ye1X9O<&hS{-Kn*ZqgT?%0LjJ?DJi&i5UX$z-xvEKc)-!@~!b(xVCo zceMClr0{rG_d(xyFG3O7S3EbukYBl`5$P?`Wi7VE;1rWU8Zd<{J0i7uj2uOegO*C`ex{73%k101a8cEVh zKOmo!yuC3pgp=wU9j#1m7LPRb*YHC!QX-fFr201=vct^+`?V;i65=)9e#A+#T_*<$cEnrG z>0S{W>{<*e(ibP!{u;H0UQ{tJ((rf`?4hvp4G(W?YKjjWH|q6=kO__-`RCVZGqx;O zRqYasKS6q1`-5V?64oHP8ks@~H+DEfB6%6GKQog`rG}1~pyRqqmnc#6m6bim8SK3j zbsXzbg~9=Vf&H}3@rzGrj%Z_|EI$~COMzCzd;ygE=kZSS)ReAXBouZ)T~tasv6%Q# zFJ>QKqly~fex7)+39m{*TdY;8Mrx(Yi4GG&1^52T}c9J)b<5492))sd;bYb&$85_ z?`kMo+}6FkF)ltnJuOXDU+?AVIV!GMkDi1I$S}Y67up77wbAJ+R zs62nsU;;rXI43p~Pfkwi^*e){f14X00No%`_~q&V!M?6l{{ES|^ZslW4)JO<552s-p+CmJ19#|(%F5>4;kCF4 z$tx`}#@IT3!Czi)Szy%vF{zd4ZSa~|Ked;yNv#wPgNBdldXlOukFth@YYT4&(ZJnT F@&~2%Q!@Yn literal 0 HcmV?d00001 diff --git a/PyTorch/SpeechRecognition/QuartzNet/inference.py b/PyTorch/SpeechRecognition/QuartzNet/inference.py new file mode 100644 index 00000000..f9c66495 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/inference.py @@ -0,0 +1,390 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +import os +import random +import time +from heapq import nlargest +from itertools import chain, repeat +from pathlib import Path +from tqdm import tqdm + +import dllogger +import torch +import numpy as np +import torch.distributed as distrib +from dllogger import JSONStreamBackend, StdOutBackend, Verbosity + +from quartznet import config +from common import helpers +from common.dali.data_loader import DaliDataLoader +from common.dataset import (AudioDataset, FilelistDataset, get_data_loader, + SingleAudioDataset) +from common.features import BaseFeatures, FilterbankFeatures +from common.helpers import print_once, process_evaluation_epoch +from quartznet.model import GreedyCTCDecoder, QuartzNet +from common.tb_dllogger import stdout_metric_format, unique_log_fpath + + +def get_parser(): + parser = argparse.ArgumentParser(description='QuartzNet inference') + parser.add_argument('--batch_size', default=16, type=int, + help='Data batch size') + parser.add_argument('--steps', default=0, type=int, + help='Eval this many steps for every worker') + parser.add_argument('--warmup_steps', default=0, type=int, + help='Burn-in period before measuring latencies') + parser.add_argument('--model_config', type=str, required=True, + help='Relative model config path given dataset folder') + parser.add_argument('--dataset_dir', type=str, + help='Absolute path to dataset folder') + parser.add_argument('--val_manifests', type=str, nargs='+', + help='Relative path to evaluation dataset manifest files') + parser.add_argument('--ckpt', default=None, type=str, + help='Path to model checkpoint') + parser.add_argument('--amp', '--fp16', action='store_true', + help='Use FP16 precision') + parser.add_argument('--cudnn_benchmark', action='store_true', + help='Enable cudnn benchmark') + parser.add_argument('--cpu', action='store_true', + help='Run inference on CPU') + parser.add_argument("--seed", default=None, type=int, help='Random seed') + parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0), + type=int, help='GPU id used for distributed training') + + io = parser.add_argument_group('feature and checkpointing setup') + io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'], + default='gpu', help='Use DALI pipeline for fast data processing') + io.add_argument('--save_predictions', type=str, default=None, + help='Save predictions in text form at this location') + io.add_argument('--save_logits', default=None, type=str, + help='Save output logits under specified path') + io.add_argument('--transcribe_wav', type=str, + help='Path to a single .wav file (16KHz)') + io.add_argument('--transcribe_filelist', type=str, + help='Path to a filelist with one .wav path per line') + io.add_argument('-o', '--output_dir', default='results/', + help='Output folder to save audio (file per phrase)') + io.add_argument('--log_file', type=str, default=None, + help='Path to a DLLogger log file') + io.add_argument('--ema', action='store_true', + help='Load averaged model weights') + io.add_argument('--torchscript', action='store_true', + help='Evaluate with a TorchScripted model') + io.add_argument('--torchscript_export', action='store_true', + help='Export the model with torch.jit to the output_dir') + io.add_argument('--override_config', type=str, action='append', + help='Overrides arbitrary config value.' + ' Syntax: `--override_config nested.config.key=val`.') + return parser + + +def durs_to_percentiles(durations, ratios): + durations = np.asarray(durations) * 1000 # in ms + latency = durations + + latency = latency[5:] + mean_latency = np.mean(latency) + + latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)), latency) + latency_ranges = get_percentile(ratios, latency_worst, len(latency)) + latency_ranges[0.5] = mean_latency + return latency_ranges + + +def get_percentile(ratios, arr, nsamples): + res = {} + for a in ratios: + idx = max(int(nsamples * (1 - a)), 0) + res[a] = arr[idx] + return res + + +def torchscript_export(data_loader, audio_processor, model, greedy_decoder, + output_dir, use_amp, use_conv_masks, model_config, device, + save): + + audio_processor.to(device) + + for batch in data_loader: + batch = [t.to(device, non_blocking=True) for t in batch] + audio, audio_len, _, _ = batch + feats, feat_lens = audio_processor(audio, audio_len) + break + + print("\nExporting featurizer...") + print("\nNOTE: Dithering causes warnings about non-determinism.\n") + ts_feat = torch.jit.trace(audio_processor, (audio, audio_len)) + + print("\nExporting acoustic model...") + model(feats, feat_lens) + ts_acoustic = torch.jit.trace(model, (feats, feat_lens)) + + print("\nExporting decoder...") + log_probs = model(feats, feat_lens) + ts_decoder = torch.jit.script(greedy_decoder, log_probs) + print("\nJIT export complete.") + + if save: + precision = "fp16" if use_amp else "fp32" + module_name = f'{os.path.basename(model_config)}_{precision}' + ts_feat.save(os.path.join(output_dir, module_name + "_feat.pt")) + ts_acoustic.save(os.path.join(output_dir, module_name + "_acoustic.pt")) + ts_decoder.save(os.path.join(output_dir, module_name + "_decoder.pt")) + + return ts_feat, ts_acoustic, ts_decoder + + +def main(): + + parser = get_parser() + args = parser.parse_args() + + log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json')) + log_fpath = unique_log_fpath(log_fpath) + dllogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_fpath), + StdOutBackend(Verbosity.VERBOSE, + metric_format=stdout_metric_format)]) + + [dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()] + + for step in ['DNN', 'data+DNN', 'data']: + for c in [0.99, 0.95, 0.9, 0.5]: + cs = 'avg' if c == 0.5 else f'{int(100*c)}%' + dllogger.metadata(f'{step.lower()}_latency_{c}', + {'name': f'{step} latency {cs}', + 'format': ':>7.2f', 'unit': 'ms'}) + dllogger.metadata( + 'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'}) + + if args.cpu: + device = torch.device('cpu') + else: + assert torch.cuda.is_available() + device = torch.device('cuda') + torch.backends.cudnn.benchmark = args.cudnn_benchmark + + if args.seed is not None: + torch.manual_seed(args.seed + args.local_rank) + np.random.seed(args.seed + args.local_rank) + random.seed(args.seed + args.local_rank) + + # set up distributed training + multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1 + if multi_gpu: + torch.cuda.set_device(args.local_rank) + distrib.init_process_group(backend='nccl', init_method='env://') + print_once(f'Inference with {distrib.get_world_size()} GPUs') + + cfg = config.load(args.model_config) + config.apply_config_overrides(cfg, args) + + symbols = helpers.add_ctc_blank(cfg['labels']) + + use_dali = args.dali_device in ('cpu', 'gpu') + dataset_kw, features_kw = config.input(cfg, 'val') + + measure_perf = args.steps > 0 + + # dataset + if args.transcribe_wav or args.transcribe_filelist: + + if use_dali: + print("DALI supported only with input .json files; disabling") + use_dali = False + + assert not args.pad_to_max_duration + assert not (args.transcribe_wav and args.transcribe_filelist) + + if args.transcribe_wav: + dataset = SingleAudioDataset(args.transcribe_wav) + else: + dataset = FilelistDataset(args.transcribe_filelist) + + data_loader = get_data_loader(dataset, + batch_size=1, + multi_gpu=multi_gpu, + shuffle=False, + num_workers=0, + drop_last=(True if measure_perf else False)) + + _, features_kw = config.input(cfg, 'val') + feat_proc = FilterbankFeatures(**features_kw) + + elif use_dali: + # pad_to_max_duration is not supported by DALI - have simple padders + if features_kw['pad_to_max_duration']: + feat_proc = BaseFeatures( + pad_align=features_kw['pad_align'], + pad_to_max_duration=True, + max_duration=features_kw['max_duration'], + sample_rate=features_kw['sample_rate'], + window_size=features_kw['window_size'], + window_stride=features_kw['window_stride']) + features_kw['pad_to_max_duration'] = False + else: + feat_proc = None + + data_loader = DaliDataLoader( + gpu_id=args.local_rank or 0, + dataset_path=args.dataset_dir, + config_data=dataset_kw, + config_features=features_kw, + json_names=args.val_manifests, + batch_size=args.batch_size, + pipeline_type=("train" if measure_perf else "val"), # no drop_last + device_type=args.dali_device, + symbols=symbols) + + else: + dataset = AudioDataset(args.dataset_dir, + args.val_manifests, + symbols, + **dataset_kw) + + data_loader = get_data_loader(dataset, + args.batch_size, + multi_gpu=multi_gpu, + shuffle=False, + num_workers=4, + drop_last=False) + + feat_proc = FilterbankFeatures(**features_kw) + + model = QuartzNet(encoder_kw=config.encoder(cfg), + decoder_kw=config.decoder(cfg, n_classes=len(symbols))) + + if args.ckpt is not None: + print(f'Loading the model from {args.ckpt} ...') + checkpoint = torch.load(args.ckpt, map_location="cpu") + key = 'ema_state_dict' if args.ema else 'state_dict' + state_dict = checkpoint[key] + model.load_state_dict(state_dict, strict=True) + + model.to(device) + model.eval() + + if feat_proc is not None: + feat_proc.to(device) + feat_proc.eval() + + if args.amp: + model = model.half() + + if args.torchscript: + greedy_decoder = GreedyCTCDecoder() + + feat_proc, model, greedy_decoder = torchscript_export( + data_loader, feat_proc, model, greedy_decoder, args.output_dir, + use_amp=args.amp, use_conv_masks=True, model_toml=args.model_toml, + device=device, save=args.torchscript_export) + + if multi_gpu: + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.local_rank], output_device=args.local_rank) + + agg = {'txts': [], 'preds': [], 'logits': []} + dur = {'data': [], 'dnn': [], 'data+dnn': []} + + looped_loader = chain.from_iterable(repeat(data_loader)) + greedy_decoder = GreedyCTCDecoder() + + sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None + + steps = args.steps + args.warmup_steps or len(data_loader) + with torch.no_grad(): + + for it, batch in enumerate(tqdm(looped_loader, initial=1, total=steps)): + + if use_dali: + feats, feat_lens, txt, txt_lens = batch + if feat_proc is not None: + feats, feat_lens = feat_proc(feats, feat_lens) + else: + batch = [t.to(device, non_blocking=True) for t in batch] + audio, audio_lens, txt, txt_lens = batch + feats, feat_lens = feat_proc(audio, audio_lens) + + sync() + t1 = time.perf_counter() + + if args.amp: + feats = feats.half() + + if model.encoder.use_conv_masks: + log_probs, log_prob_lens = model(feats, feat_lens) + else: + log_probs = model(feats, feat_lens) + + preds = greedy_decoder(log_probs) + + sync() + t2 = time.perf_counter() + + # burn-in period; wait for a new loader due to num_workers + if it >= 1 and (args.steps == 0 or it >= args.warmup_steps): + dur['data'].append(t1 - t0) + dur['dnn'].append(t2 - t1) + dur['data+dnn'].append(t2 - t0) + + if txt is not None: + agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], + symbols) + agg['preds'] += helpers.gather_predictions([preds], symbols) + agg['logits'].append(log_probs) + + if it + 1 == steps: + break + + sync() + t0 = time.perf_counter() + + # communicate the results + if args.transcribe_wav: + for idx, p in enumerate(agg['preds']): + print_once(f'Prediction {idx+1: >3}: {p}') + + elif args.transcribe_filelist: + pass + + elif not multi_gpu or distrib.get_rank() == 0: + wer, _ = process_evaluation_epoch(agg) + + dllogger.log(step=(), data={'eval_wer': 100 * wer}) + + if args.save_predictions: + with open(args.save_predictions, 'w') as f: + f.write('\n'.join(agg['preds'])) + + if args.save_logits: + logits = torch.cat(agg['logits'], dim=0).cpu() + torch.save(logits, args.save_logits) + + # report timings + if len(dur['data']) >= 20: + ratios = [0.9, 0.95, 0.99] + for stage in dur: + lat = durs_to_percentiles(dur[stage], ratios) + for k in [0.99, 0.95, 0.9, 0.5]: + kk = str(k).replace('.', '_') + dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]}) + + else: + print_once('Not enough samples to measure latencies.') + + +if __name__ == "__main__": + main() diff --git a/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_16GPU.sh b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_16GPU.sh new file mode 100644 index 00000000..8bdf36a4 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_16GPU.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -a + +: ${NUM_GPUS:=16} +: ${GPU_BATCH_SIZE:=36} +: ${GRAD_ACCUMULATION:=2} +: ${AMP=:true} + +bash scripts/train.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_8GPU.sh b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_8GPU.sh new file mode 100644 index 00000000..751a0f6e --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_AMP_8GPU.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -a + +: ${NUM_GPUS:=8} +: ${GPU_BATCH_SIZE:=36} +: ${GRAD_ACCUMULATION:=4} +: ${AMP=:true} + +bash scripts/train.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_16GPU.sh b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_16GPU.sh new file mode 100644 index 00000000..1c5c6c36 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_16GPU.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -a + +: ${NUM_GPUS:=16} +: ${GPU_BATCH_SIZE:=36} +: ${GRAD_ACCUMULATION:=2} +: ${AMP=:false} + +bash scripts/train.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_8GPU.sh b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_8GPU.sh new file mode 100644 index 00000000..63251cd4 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/platform/DGX2_QuartzNet_FP32_8GPU.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -a + +: ${NUM_GPUS:=8} +: ${GPU_BATCH_SIZE:=36} +: ${GRAD_ACCUMULATION:=4} +: ${AMP=:false} + +bash scripts/train.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_AMP_8GPU.sh b/PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_AMP_8GPU.sh new file mode 100644 index 00000000..a60f9c9d --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_AMP_8GPU.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -a + +: ${NUM_GPUS:=8} +: ${GPU_BATCH_SIZE:=72} +: ${GRAD_ACCUMULATION:=2} +: ${AMP=:true} + +bash scripts/train.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_TF32_8GPU.sh b/PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_TF32_8GPU.sh new file mode 100644 index 00000000..2515fbe1 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/platform/DGXA100_QuartzNet_TF32_8GPU.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -a + +: ${NUM_GPUS:=8} +: ${GPU_BATCH_SIZE:=72} +: ${GRAD_ACCUMULATION:=2} +: ${AMP=:false} + +bash scripts/train.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/quartznet/config.py b/PyTorch/SpeechRecognition/QuartzNet/quartznet/config.py new file mode 100644 index 00000000..30eff75b --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/quartznet/config.py @@ -0,0 +1,140 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +from ast import literal_eval +from contextlib import suppress +from numbers import Number + +import yaml + +from common.audio import GainPerturbation, ShiftPerturbation, SpeedPerturbation +from common.dataset import AudioDataset +from common.features import (CutoutAugment, FilterbankFeatures, SpecAugment) +from quartznet.model import JasperDecoderForCTC, JasperBlock, JasperEncoder + + +def default_args(klass): + sig = inspect.signature(klass.__init__) + return {k: v.default for k, v in sig.parameters.items() if k != 'self'} + + +def load(fpath): + + cfg = yaml.safe_load(open(fpath, 'r')) + + # Reload to deep copy shallow copies, which were made with yaml anchors + yaml.Dumper.ignore_aliases = lambda *args: True + cfg = yaml.dump(cfg) + cfg = yaml.safe_load(cfg) + return cfg + + +def validate_and_fill(klass, user_conf, ignore_unk=[], optional=[]): + conf = default_args(klass) + + for k, v in user_conf.items(): + assert k in conf or k in ignore_unk, f'Unknown param {k} for {klass}' + conf[k] = v + + # Keep only mandatory or optional-nonempty + conf = {k: v for k, v in conf.items() + if k not in optional or v is not inspect.Parameter.empty} + + # Validate + for k, v in conf.items(): + assert v is not inspect.Parameter.empty, \ + f'Value for {k} not specified for {klass}' + return conf + + +def input(conf_yaml, split='train'): + + conf = copy.deepcopy(conf_yaml[f'input_{split}']) + conf_dataset = conf.pop('audio_dataset') + conf_features = conf.pop('filterbank_features') + + # Validate known inner classes + inner_classes = [ + (conf_dataset, 'speed_perturbation', SpeedPerturbation), + (conf_dataset, 'gain_perturbation', GainPerturbation), + (conf_dataset, 'shift_perturbation', ShiftPerturbation), + (conf_features, 'spec_augment', SpecAugment), + (conf_features, 'cutout_augment', CutoutAugment), + ] + for conf_tgt, key, klass in inner_classes: + if key in conf_tgt: + conf_tgt[key] = validate_and_fill(klass, conf_tgt[key]) + + for k in conf: + raise ValueError(f'Unknown key {k}') + + # Validate outer classes + conf_dataset = validate_and_fill( + AudioDataset, conf_dataset, + optional=['data_dir', 'labels', 'manifest_fpaths']) + + # klass = feature_class(conf_features['feature_type']) + # conf_features = validate_and_fill( + # klass, conf_features, ignore_unk=['feature_type']) + + conf_features = validate_and_fill( + FilterbankFeatures, conf_features) # , ignore_unk=['feature_type']) + + # Check params shared between classes + shared = ['sample_rate', 'max_duration', 'pad_to_max_duration'] + for sh in shared: + assert conf_dataset[sh] == conf_features[sh], ( + f'{sh} should match in Dataset and FeatureProcessor: ' + f'{conf_dataset[sh]}, {conf_features[sh]}') + + return conf_dataset, conf_features + + +def encoder(conf): + """Validate config for JasperEncoder and subsequent JasperBlocks""" + + # Validate, but don't overwrite with defaults + for blk in conf['quartznet']['encoder']['blocks']: + validate_and_fill(JasperBlock, blk, optional=['infilters'], + ignore_unk=['residual_dense']) + + return validate_and_fill(JasperEncoder, conf['quartznet']['encoder']) + + +def decoder(conf, n_classes): + decoder_kw = {'n_classes': n_classes, **conf['quartznet']['decoder']} + return validate_and_fill(JasperDecoderForCTC, decoder_kw) + + +def apply_config_overrides(conf, args): + if args.override_config is None: + return + for override_key_val in args.override_config: + key, val = override_key_val.split('=') + with suppress(TypeError, ValueError): + val = literal_eval(val) + apply_nested_config_override(conf, key, val) + + +def apply_nested_config_override(conf, key_str, val): + fields = key_str.split('.') + for f in fields[:-1]: + conf = conf[f] + f = fields[-1] + assert (f not in conf + or type(val) is type(conf[f]) + or (isinstance(val, Number) and isinstance(conf[f], Number))) + conf[f] = val diff --git a/PyTorch/SpeechRecognition/QuartzNet/quartznet/model.py b/PyTorch/SpeechRecognition/QuartzNet/quartznet/model.py new file mode 100644 index 00000000..95ec4bf8 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/quartznet/model.py @@ -0,0 +1,391 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +activations = { + "hardtanh": nn.Hardtanh, + "relu": nn.ReLU, + "selu": nn.SELU, +} + + +def init_weights(m, mode='xavier_uniform'): + if type(m) == nn.Conv1d or type(m) == MaskedConv1d: + if mode == 'xavier_uniform': + nn.init.xavier_uniform_(m.weight, gain=1.0) + elif mode == 'xavier_normal': + nn.init.xavier_normal_(m.weight, gain=1.0) + elif mode == 'kaiming_uniform': + nn.init.kaiming_uniform_(m.weight, nonlinearity="relu") + elif mode == 'kaiming_normal': + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + else: + raise ValueError("Unknown Initialization mode: {0}".format(mode)) + + elif type(m) == nn.BatchNorm1d: + if m.track_running_stats: + m.running_mean.zero_() + m.running_var.fill_(1) + m.num_batches_tracked.zero_() + if m.affine: + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + +def compute_new_kernel_size(kernel_size, kernel_width): + new_kernel_size = max(int(kernel_size * kernel_width), 1) + # If kernel is even shape, round up to make it odd + if new_kernel_size % 2 == 0: + new_kernel_size += 1 + return new_kernel_size + + +def get_same_padding(kernel_size, stride, dilation): + if stride > 1 and dilation > 1: + raise ValueError("Only stride OR dilation may be greater than 1") + return (kernel_size // 2) * dilation + + +class GroupShuffle(nn.Module): + def __init__(self, groups, channels): + super(GroupShuffle, self).__init__() + self.groups = groups + self.channels_per_group = channels // groups + + def forward(self, x): + sh = x.shape + x = x.view(-1, self.groups, self.channels_per_group, sh[-1]) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(-1, self.groups * self.channels_per_group, sh[-1]) + return x + + +class MaskedConv1d(nn.Conv1d): + """1D convolution with sequence masking + """ + __constants__ = ["masked"] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=False, use_mask=True, + heads=-1): + + # Jasper refactor compat + assert heads == -1 # Unsupported + masked = use_mask + + super(MaskedConv1d, self).__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + self.masked = masked + + def get_seq_len(self, lens): + pad, ks = self.padding[0], self.kernel_size[0] + return torch.div(lens + 2 * pad - self.dilation[0] * (ks - 1) - 1, + self.stride[0], rounding_mode='trunc') + 1 + + def forward(self, x, x_lens=None): + if self.masked: + max_len = x.size(2) + idxs = torch.arange(max_len, dtype=x_lens.dtype, device=x.device) + mask = idxs.expand(x_lens.size(0), max_len) >= x_lens.unsqueeze(1) + x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0) + x_lens = self.get_seq_len(x_lens) + + return super(MaskedConv1d, self).forward(x), x_lens + + +class JasperBlock(nn.Module): + __constants__ = ["conv_mask", "separable", "res", "mconv"] + + def __init__(self, infilters, filters, repeat=3, kernel_size=11, + kernel_size_factor=1, stride=1, dilation=1, padding='same', + dropout=0.2, activation=None, residual=True, groups=1, + separable=False, heads=-1, normalization="batch", + norm_groups=1, residual_panes=[], use_conv_masks=False): + super(JasperBlock, self).__init__() + + # Fix params being passed as list, but default to ints + wrap = lambda v: [v] if type(v) is int else v + kernel_size = wrap(kernel_size) + dilation = wrap(dilation) + padding = wrap(padding) + stride = wrap(stride) + + if padding != "same": + raise ValueError("currently only 'same' padding is supported") + + kernel_size_factor = float(kernel_size_factor) + if type(kernel_size) in (list, tuple): + kernel_size = [compute_new_kernel_size(k, kernel_size_factor) + for k in kernel_size] + else: + kernel_size = compute_new_kernel_size(kernel_size, + kernel_size_factor) + + padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0]) + self.conv_mask = use_conv_masks + self.separable = separable + + infilters_loop = infilters + conv = nn.ModuleList() + + for _ in range(repeat - 1): + conv.extend( + self._get_conv_bn_layer( + infilters_loop, filters, kernel_size=kernel_size, + stride=stride, dilation=dilation, padding=padding_val, + groups=groups, heads=heads, separable=separable, + normalization=normalization, norm_groups=norm_groups) + ) + conv.extend(self._get_act_dropout_layer(drop_prob=dropout, + activation=activation)) + infilters_loop = filters + + conv.extend( + self._get_conv_bn_layer( + infilters_loop, filters, kernel_size=kernel_size, stride=stride, + dilation=dilation, padding=padding_val, groups=groups, + heads=heads, separable=separable, normalization=normalization, + norm_groups=norm_groups) + ) + self.mconv = conv + + res_panes = residual_panes.copy() + self.dense_residual = residual + + if residual: + res_list = nn.ModuleList() + + if len(residual_panes) == 0: + res_panes = [infilters] + self.dense_residual = False + for ip in res_panes: + res_list.append(nn.ModuleList( + self._get_conv_bn_layer(ip, filters, kernel_size=1, + normalization=normalization, + norm_groups=norm_groups, stride=[1]) + )) + + self.res = res_list + else: + self.res = None + + self.mout = nn.Sequential(*self._get_act_dropout_layer( + drop_prob=dropout, activation=activation)) + + def _get_conv(self, in_channels, out_channels, kernel_size=11, stride=1, + dilation=1, padding=0, bias=False, groups=1, heads=-1, + separable=False): + + kw = {'in_channels': in_channels, 'out_channels': out_channels, + 'kernel_size': kernel_size, 'stride': stride, 'dilation': dilation, + 'padding': padding, 'bias': bias, 'groups': groups} + + if self.conv_mask: + return MaskedConv1d(**kw, heads=heads, use_mask=self.conv_mask) + else: + return nn.Conv1d(**kw) + + def _get_conv_bn_layer(self, in_channels, out_channels, kernel_size=11, + stride=1, dilation=1, padding=0, bias=False, + groups=1, heads=-1, separable=False, + normalization="batch", norm_groups=1): + if norm_groups == -1: + norm_groups = out_channels + + if separable: + layers = [ + self._get_conv(in_channels, in_channels, kernel_size, + stride=stride, dilation=dilation, padding=padding, + bias=bias, groups=in_channels, heads=heads), + self._get_conv(in_channels, out_channels, kernel_size=1, + stride=1, dilation=1, padding=0, bias=bias, + groups=groups), + ] + else: + layers = [ + self._get_conv(in_channels, out_channels, kernel_size, + stride=stride, dilation=dilation, + padding=padding, bias=bias, groups=groups) + ] + + if normalization == "group": + layers.append(nn.GroupNorm(num_groups=norm_groups, + num_channels=out_channels)) + elif normalization == "instance": + layers.append(nn.GroupNorm(num_groups=out_channels, + num_channels=out_channels)) + elif normalization == "layer": + layers.append(nn.GroupNorm(num_groups=1, num_channels=out_channels)) + + elif normalization == "batch": + layers.append(nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)) + else: + raise ValueError( + f"Normalization method ({normalization}) does not match" + f" one of [batch, layer, group, instance]." + ) + + if groups > 1: + layers.append(GroupShuffle(groups, out_channels)) + return layers + + def _get_act_dropout_layer(self, drop_prob=0.2, activation=None): + if activation is None: + activation = nn.Hardtanh(min_val=0.0, max_val=20.0) + layers = [activation, nn.Dropout(p=drop_prob)] + return layers + + def forward(self, xs, xs_lens=None): + if not self.conv_mask: + xs_lens = 0 + + # compute forward convolutions + out = xs[-1] + lens = xs_lens + for i, l in enumerate(self.mconv): + # if we're doing masked convolutions, we need to pass in and + # possibly update the sequence lengths + # if (i % 4) == 0 and self.conv_mask: + if isinstance(l, MaskedConv1d): + out, lens = l(out, lens) + else: + out = l(out) + + # compute the residuals + if self.res is not None: + for i, layer in enumerate(self.res): + res_out = xs[i] + for j, res_layer in enumerate(layer): + if isinstance(res_layer, MaskedConv1d): + res_out, _ = res_layer(res_out, xs_lens) + else: + res_out = res_layer(res_out) + + out = out + res_out + + # compute the output + out = self.mout(out) + if self.res is not None and self.dense_residual: + out = xs + [out] + else: + out = [out] + + return (out, lens) if self.conv_mask else (out, None) + + +class JasperEncoder(nn.Module): + __constants__ = ["use_conv_masks"] + + def __init__(self, in_feats, activation, frame_splicing=1, + init='xavier_uniform', use_conv_masks=False, blocks=[]): + super(JasperEncoder, self).__init__() + + self.use_conv_masks = use_conv_masks + self.layers = nn.ModuleList() + + in_feats *= frame_splicing + all_residual_panes = [] + for i, blk in enumerate(blocks): + + blk['activation'] = activations[activation]() + + has_residual_dense = blk.pop('residual_dense', False) + if has_residual_dense: + all_residual_panes += [in_feats] + blk['residual_panes'] = all_residual_panes + else: + blk['residual_panes'] = [] + + self.layers.append( + JasperBlock(in_feats, use_conv_masks=use_conv_masks, **blk)) + + in_feats = blk['filters'] + + self.apply(lambda x: init_weights(x, mode=init)) + + def forward(self, x, x_lens=None): + out, out_lens = [x], x_lens + for layer in self.layers: + out, out_lens = layer(out, out_lens) + + return out, out_lens + + +class JasperDecoderForCTC(nn.Module): + def __init__(self, in_feats, n_classes, init='xavier_uniform'): + super(JasperDecoderForCTC, self).__init__() + + self.layers = nn.Sequential( + nn.Conv1d(in_feats, n_classes, kernel_size=1, bias=True),) + self.apply(lambda x: init_weights(x, mode=init)) + + def forward(self, enc_out): + out = self.layers(enc_out[-1]).transpose(1, 2) + return F.log_softmax(out, dim=2) + + +class GreedyCTCDecoder(nn.Module): + @torch.no_grad() + def forward(self, log_probs): + return log_probs.argmax(dim=-1, keepdim=False).int() + + +class QuartzNet(nn.Module): + def __init__(self, encoder_kw, decoder_kw, transpose_in=False): + super(QuartzNet, self).__init__() + self.transpose_in = transpose_in + self.encoder = JasperEncoder(**encoder_kw) + self.decoder = JasperDecoderForCTC(**decoder_kw) + + def forward(self, x, x_lens=None): + if self.encoder.use_conv_masks: + assert x_lens is not None + enc, enc_lens = self.encoder(x, x_lens) + out = self.decoder(enc) + return out, enc_lens + else: + if self.transpose_in: + x = x.transpose(1, 2) + enc, _ = self.encoder(x) + out = self.decoder(enc) + return out # XXX torchscript refuses to output None + + # TODO Explicitly add x_lens=None for inference (now x can be a Tensor or tuple) + def infer(self, x): + if self.encoder.use_conv_masks: + return self.forward(x) + else: + ret = self.forward(x[0]) + return ret, len(ret) + + +class CTCLossNM: + def __init__(self, n_classes): + self._criterion = nn.CTCLoss(blank=n_classes-1, reduction='none') + + def __call__(self, log_probs, targets, input_length, target_length): + input_length = input_length.long() + target_length = target_length.long() + targets = targets.long() + loss = self._criterion(log_probs.transpose(1, 0), targets, + input_length, target_length) + # note that this is different from reduction = 'mean' + # because we are not dividing by target lengths + return torch.mean(loss) diff --git a/PyTorch/SpeechRecognition/QuartzNet/requirements.txt b/PyTorch/SpeechRecognition/QuartzNet/requirements.txt new file mode 100644 index 00000000..c3f92959 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/requirements.txt @@ -0,0 +1,6 @@ +tqdm==4.53.0 +librosa==0.8.0 +soundfile +sox==1.4.1 +pyyaml +git+git://github.com/NVIDIA/dllogger.git@26a0f8f1958de2c0c460925ff6102a4d2486d6cc#egg=dllogger diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/docker/build.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/docker/build.sh new file mode 100755 index 00000000..7558f799 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/docker/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker build . --rm -t quartznet diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/docker/launch.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/docker/launch.sh new file mode 100755 index 00000000..9144fac9 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/docker/launch.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +SCRIPT_DIR=$(cd $(dirname $0); pwd) +QN_REPO=${QN_REPO:-"${SCRIPT_DIR}/../.."} + +DATA_DIR=${1:-${DATA_DIR-${QN_REPO}"/datasets"}} +CHECKPOINT_DIR=${2:-${CHECKPOINT_DIR:-${QN_REPO}"/checkpoints"}} +RESULT_DIR=${3:-${RESULT_DIR:-${QN_REPO}"/results"}} +PROGRAM_PATH=${PROGRAM_PATH} + +MOUNTS="" +MOUNTS+=" -v $DATA_DIR:/datasets" +MOUNTS+=" -v $CHECKPOINT_DIR:/checkpoints" +MOUNTS+=" -v $RESULT_DIR:/results" +MOUNTS+=" -v ${QN_REPO}:/quartznet" + +docker run -it --rm --gpus all\ + --env PYTHONDONTWRITEBYTECODE=1 \ + --shm-size=4g \ + --ulimit memlock=-1 \ + --ulimit stack=67108864 \ + $MOUNTS \ + -w /quartznet \ + quartznet:latest bash $PROGRAM_PATH diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/download_librispeech.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/download_librispeech.sh new file mode 100755 index 00000000..07a07b86 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/download_librispeech.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +DATA_SET="LibriSpeech" +DATA_ROOT_DIR="/datasets" +DATA_DIR="${DATA_ROOT_DIR}/${DATA_SET}" + +if [ ! -d "$DATA_DIR" ] +then + mkdir --mode 755 $DATA_DIR + + python utils/download_librispeech.py \ + utils/librispeech.csv \ + $DATA_DIR \ + -e ${DATA_ROOT_DIR}/ +else + echo "Directory $DATA_DIR already exists." +fi diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/evaluation.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/evaluation.sh new file mode 100755 index 00000000..6e51e19c --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/evaluation.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -a + +: ${PREDICTION_FILE:=} + +bash ./scripts/inference.sh "$@" diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/inference.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/inference.sh new file mode 100755 index 00000000..c45289e5 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/inference.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}} +: ${MODEL_CONFIG:=${2:-"configs/quartznet15x5_speedp-online-1.15_speca.yaml"}} +: ${OUTPUT_DIR:=${3:-"/results"}} +: ${CHECKPOINT:=${4:-"/checkpoints/quartznet_fp16.pt"}} +: ${DATASET:="test-other"} +: ${LOG_FILE:=""} +: ${CUDNN_BENCHMARK:=false} +: ${MAX_DURATION:=""} +: ${PAD_TO_MAX_DURATION:=false} +: ${NUM_GPUS:=1} +: ${NUM_STEPS:=0} +: ${NUM_WARMUP_STEPS:=0} +: ${AMP:=false} +: ${BATCH_SIZE:=64} +: ${EMA:=true} +: ${SEED:=0} +: ${DALI_DEVICE:="gpu"} +: ${CPU:=false} +: ${LOGITS_FILE:=} +: ${PREDICTION_FILE:="${OUTPUT_DIR}/${DATASET}.predictions"} + +mkdir -p "$OUTPUT_DIR" + +ARGS="--dataset_dir=$DATA_DIR" +ARGS+=" --val_manifest=$DATA_DIR/librispeech-${DATASET}-wav.json" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --batch_size=$BATCH_SIZE" +ARGS+=" --seed=$SEED" +ARGS+=" --dali_device=$DALI_DEVICE" +ARGS+=" --steps $NUM_STEPS" +ARGS+=" --warmup_steps $NUM_WARMUP_STEPS" + +[ "$AMP" = true ] && ARGS+=" --amp" +[ "$EMA" = true ] && ARGS+=" --ema" +[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark" +[ -n "$CHECKPOINT" ] && ARGS+=" --ckpt=${CHECKPOINT}" +[ -n "$LOG_FILE" ] && ARGS+=" --log_file $LOG_FILE" +[ -n "$PREDICTION_FILE" ] && ARGS+=" --save_prediction $PREDICTION_FILE" +[ -n "$LOGITS_FILE" ] && ARGS+=" --logits_save_to $LOGITS_FILE" +[ "$CPU" == "true" ] && ARGS+=" --cpu" +[ -n "$MAX_DURATION" ] && ARGS+=" --override_config input_val.audio_dataset.max_duration=$MAX_DURATION" \ + ARGS+=" --override_config input_val.filterbank_features.max_duration=$MAX_DURATION" +[ "$PAD_TO_MAX_DURATION" = true ] && ARGS+=" --override_config input_val.audio_dataset.pad_to_max_duration=True" \ + ARGS+=" --override_config input_val.filterbank_features.pad_to_max_duration=True" + +python -m torch.distributed.launch --nproc_per_node=$NUM_GPUS inference.py $ARGS diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/inference_benchmark.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/inference_benchmark.sh new file mode 100755 index 00000000..39ea23dc --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/inference_benchmark.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -a + +: ${OUTPUT_DIR:=${3:-"/results"}} +: ${CUDNN_BENCHMARK:=true} +: ${PAD_TO_MAX_DURATION:=true} +: ${NUM_WARMUP_STEPS:=10} +: ${NUM_STEPS:=500} + +: ${AMP:=false} +: ${DALI_DEVICE:="cpu"} +: ${BATCH_SIZE_SEQ:="1 2 4 8 16"} +: ${MAX_DURATION_SEQ:="2 7 16.7"} + +for MAX_DURATION in $MAX_DURATION_SEQ; do + for BATCH_SIZE in $BATCH_SIZE_SEQ; do + + LOG_FILE="$OUTPUT_DIR/perf-infer_dali-${DALI_DEVICE}_amp-${AMP}_dur${MAX_DURATION}_bs${BATCH_SIZE}.json" + bash ./scripts/inference.sh "$@" + + done +done diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/preprocess_librispeech.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/preprocess_librispeech.sh new file mode 100755 index 00000000..7cfe5cc6 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/preprocess_librispeech.sh @@ -0,0 +1,51 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/train-clean-100 \ + --dest_dir /datasets/LibriSpeech/train-clean-100-wav \ + --output_json /datasets/LibriSpeech/librispeech-train-clean-100-wav.json \ + --speed 0.9 1.1 +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/train-clean-360 \ + --dest_dir /datasets/LibriSpeech/train-clean-360-wav \ + --output_json /datasets/LibriSpeech/librispeech-train-clean-360-wav.json \ + --speed 0.9 1.1 +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/train-other-500 \ + --dest_dir /datasets/LibriSpeech/train-other-500-wav \ + --output_json /datasets/LibriSpeech/librispeech-train-other-500-wav.json \ + --speed 0.9 1.1 + + +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/dev-clean \ + --dest_dir /datasets/LibriSpeech/dev-clean-wav \ + --output_json /datasets/LibriSpeech/librispeech-dev-clean-wav.json +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/dev-other \ + --dest_dir /datasets/LibriSpeech/dev-other-wav \ + --output_json /datasets/LibriSpeech/librispeech-dev-other-wav.json + + +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/test-clean \ + --dest_dir /datasets/LibriSpeech/test-clean-wav \ + --output_json /datasets/LibriSpeech/librispeech-test-clean-wav.json +python ./utils/convert_librispeech.py \ + --input_dir /datasets/LibriSpeech/test-other \ + --dest_dir /datasets/LibriSpeech/test-other-wav \ + --output_json /datasets/LibriSpeech/librispeech-test-other-wav.json diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/train.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/train.sh new file mode 100755 index 00000000..9c990b65 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/train.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OMP_NUM_THREADS=1 + +: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}} +: ${MODEL_CONFIG:=${2:-"configs/quartznet15x5_speedp-online-1.15_speca.yaml"}} +: ${OUTPUT_DIR:=${3:-"/results"}} +: ${CHECKPOINT:=${4:-}} +: ${CUDNN_BENCHMARK:=true} +: ${NUM_GPUS:=8} +: ${AMP:=false} +: ${GPU_BATCH_SIZE:=72} +: ${GRAD_ACCUMULATION:=2} +: ${OPTIMIZER:=fused_novograd} +: ${LEARNING_RATE:=0.01} +: ${LR_POLICY:=exponential} +: ${LR_EXP_GAMMA:=0.981} +: ${EMA:=0.999} +: ${MULTI_TENSOR_EMA:=true} +: ${SEED:=0} +: ${EPOCHS:=260} +: ${WARMUP_EPOCHS:=2} +: ${HOLD_EPOCHS:=140} +: ${SAVE_FREQUENCY:=10} +: ${EPOCHS_THIS_JOB:=0} +: ${DALI_DEVICE:="gpu"} +: ${PAD_TO_MAX_DURATION:=false} +: ${EVAL_FREQUENCY:=241} +: ${PREDICTION_FREQUENCY:=241} +: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json \ + $DATA_DIR/librispeech-train-clean-360-wav.json \ + $DATA_DIR/librispeech-train-other-500-wav.json"} +: ${VAL_MANIFESTS:="$DATA_DIR/librispeech-dev-clean-wav.json"} + +mkdir -p "$OUTPUT_DIR" + +ARGS="--dataset_dir=$DATA_DIR" +ARGS+=" --val_manifests $VAL_MANIFESTS" +ARGS+=" --train_manifests $TRAIN_MANIFESTS" +ARGS+=" --model_config=$MODEL_CONFIG" +ARGS+=" --output_dir=$OUTPUT_DIR" +ARGS+=" --lr=$LEARNING_RATE" +ARGS+=" --gpu_batch_size=$GPU_BATCH_SIZE" +ARGS+=" --min_lr=1e-5" +ARGS+=" --lr_policy=$LR_POLICY" +ARGS+=" --lr_exp_gamma=$LR_EXP_GAMMA" +ARGS+=" --epochs=$EPOCHS" +ARGS+=" --warmup_epochs=$WARMUP_EPOCHS" +ARGS+=" --hold_epochs=$HOLD_EPOCHS" +ARGS+=" --epochs_this_job=$EPOCHS_THIS_JOB" +ARGS+=" --ema=$EMA" +ARGS+=" --seed=$SEED" +ARGS+=" --optimizer=$OPTIMIZER" +ARGS+=" --weight_decay=1e-3" +ARGS+=" --resume" +ARGS+=" --save_frequency=$SAVE_FREQUENCY" +ARGS+=" --keep_milestones 100 200" +ARGS+=" --save_best_from=200" +ARGS+=" --log_frequency=1" +ARGS+=" --eval_frequency=$EVAL_FREQUENCY" +ARGS+=" --prediction_frequency=$PREDICTION_FREQUENCY" +ARGS+=" --grad_accumulation=$GRAD_ACCUMULATION " +ARGS+=" --dali_device=$DALI_DEVICE" + +[ "$AMP" = true ] && ARGS+=" --amp" +[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark" +[ -n "$MAX_DURATION" ] && ARGS+=" --override_config input_train.audio_dataset.max_duration=$MAX_DURATION" \ + ARGS+=" --override_config input_train.filterbank_features.max_duration=$MAX_DURATION" +[ "$PAD_TO_MAX_DURATION" = true ] && ARGS+=" --override_config input_train.audio_dataset.pad_to_max_duration=True" \ + ARGS+=" --override_config input_train.filterbank_features.pad_to_max_duration=True" +[ -n "$CHECKPOINT" ] && ARGS+=" --ckpt=${CHECKPOINT}" +[ -n "$LOG_FILE" ] && ARGS+=" --log_file $LOG_FILE" +[ -n "$PRE_ALLOCATE" ] && ARGS+=" --pre_allocate_range $PRE_ALLOCATE" +[ "$MULTI_TENSOR_EMA" = true ] && ARGS+=" --multi_tensor_ema" +[ -n "$BENCHMARK_EPOCHS" ] && ARGS+=" --benchmark_epochs_num=$BENCHMARK_EPOCHS" + +GBS=$(($NUM_GPUS * $GPU_BATCH_SIZE * $GRAD_ACCUMULATION)) +if [ $GBS -ne $((8 * 144)) ]; then + echo -e "\nWARNING: Global batch size changed from $((8 * 144)) to ${GBS}." + sleep 3 +fi +echo -e "\nAMP=$AMP,""${NUM_GPUS}x${GPU_BATCH_SIZE}x${GRAD_ACCUMULATION}" \ + "(global batch size ${GBS})\n" + +: ${DISTRIBUTED:="-m torch.distributed.launch --nproc_per_node=$NUM_GPUS"} +python $DISTRIBUTED train.py $ARGS diff --git a/PyTorch/SpeechRecognition/QuartzNet/scripts/train_benchmark.sh b/PyTorch/SpeechRecognition/QuartzNet/scripts/train_benchmark.sh new file mode 100755 index 00000000..d155eca5 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/scripts/train_benchmark.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -a + +: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}} +: ${OUTPUT_DIR:=${3:-"/results"}} +: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json"} + +: ${BENCHMARK_EPOCHS:=20} +: ${EPOCHS:=100000} +: ${RESUME:=false} +: ${SAVE_FREQUENCY:=100000} +: ${EVAL_FREQUENCY:=100000} +: ${LEARNING_RATE:=0.0001} + +: ${AMP:=false} +: ${EMA:=0} +: ${DALI_DEVICE:="gpu"} +: ${NUM_GPUS_SEQ:="8 4 1"} +: ${ACC_BATCH_SIZE:="144"} +: ${GRAD_ACC_SEQ:="4 2"} + +# A range of batch lengths for LibriSpeech +# with continuous speed perturbation (0.85, 1.15) and max duration 16.7s +: ${PRE_ALLOCATE:="1408 1920"} + +for NUM_GPUS in $NUM_GPUS_SEQ; do + for GRAD_ACCUMULATION in $GRAD_ACC_SEQ; do + + # Scale the number of epochs to the number of GPUs + BMARK=$((BENCHMARK_EPOCHS * NUM_GPUS / 8)) + BMARK=$((BMARK < 2 ? 2 : BMARK)) + BMARK=$((BMARK > BENCHMARK_EPOCHS ? BENCHMARK_EPOCHS : BMARK)) + EPOCHS_THIS_JOB=$((BMARK + 1)) + + GPU_BATCH_SIZE=$((ACC_BATCH_SIZE / $GRAD_ACCUMULATION * 8 / $NUM_GPUS)) + + LOG_FILE="$OUTPUT_DIR/perf-train_dali-${DALI_DEVICE}_amp-${AMP}_" + LOG_FILE+="1x${NUM_GPUS}x${GPU_BATCH_SIZE}x${GRAD_ACCUMULATION}.json" + BENCHMARK_EPOCHS=$BMARK bash ./scripts/train.sh "$@" + + done +done diff --git a/PyTorch/SpeechRecognition/QuartzNet/train.py b/PyTorch/SpeechRecognition/QuartzNet/train.py new file mode 100644 index 00000000..762657ad --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/train.py @@ -0,0 +1,558 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import copy +import os +import random +import time + +try: + import nvidia_dlprof_pytorch_nvtx as pyprof +except: + import pyprof +import torch +import amp_C +import numpy as np +import torch.cuda.profiler as profiler +import torch.distributed as dist +from apex.optimizers import FusedLAMB, FusedNovoGrad +from contextlib import suppress as empty_context + +from common import helpers +from common.dali.data_loader import DaliDataLoader +from common.dataset import AudioDataset, get_data_loader +from common.features import BaseFeatures, FilterbankFeatures +from common.helpers import (Checkpointer, greedy_wer, num_weights, print_once, + process_evaluation_epoch) +from common.optimizers import AdamW, lr_policy, Novograd +from common.tb_dllogger import flush_log, init_log, log +from common.utils import BenchmarkStats +from quartznet import config +from quartznet.model import CTCLossNM, GreedyCTCDecoder, QuartzNet + + +def parse_args(): + parser = argparse.ArgumentParser(description='QuartzNet') + + training = parser.add_argument_group('training setup') + training.add_argument('--epochs', default=400, type=int, + help='Number of epochs for the entire training; influences the lr schedule') + training.add_argument("--warmup_epochs", default=0, type=int, + help='Initial epochs of increasing learning rate') + training.add_argument("--hold_epochs", default=0, type=int, + help='Constant max learning rate epochs after warmup') + training.add_argument('--epochs_this_job', default=0, type=int, + help=('Run for a number of epochs with no effect on the lr schedule.' + 'Useful for re-starting the training.')) + training.add_argument('--cudnn_benchmark', action='store_true', default=True, + help='Enable cudnn benchmark') + training.add_argument('--amp', '--fp16', action='store_true', default=False, + help='Use pytorch native mixed precision training') + training.add_argument('--seed', default=1, type=int, help='Random seed') + training.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0), type=int, + help='GPU id used for distributed training') + training.add_argument('--pre_allocate_range', default=None, type=int, nargs=2, + help='Warmup with batches of length [min, max] before training') + training.add_argument('--pyprof', action='store_true', help='Enable pyprof profiling') + + optim = parser.add_argument_group('optimization setup') + optim.add_argument('--gpu_batch_size', default=32, type=int, + help='Batch size for a single forward/backward pass. ' + 'The Effective batch size is gpu_batch_size * grad_accumulation.') + optim.add_argument('--lr', default=1e-3, type=float, + help='Peak learning rate') + optim.add_argument("--min_lr", default=1e-5, type=float, + help='minimum learning rate') + optim.add_argument("--lr_policy", default='exponential', type=str, + choices=['exponential', 'legacy'], help='lr scheduler') + optim.add_argument("--lr_exp_gamma", default=0.99, type=float, + help='gamma factor for exponential lr scheduler') + optim.add_argument('--weight_decay', default=1e-3, type=float, + help='Weight decay for the optimizer') + optim.add_argument('--grad_accumulation', '--update-freq', default=1, type=int, + help='Number of accumulation steps') + optim.add_argument('--optimizer', default='novograd', type=str, + choices=['novograd', 'adamw', 'lamb98', 'fused_novograd'], + help='Optimization algorithm') + optim.add_argument('--ema', type=float, default=0.0, + help='Discount factor for exp averaging of model weights') + optim.add_argument('--multi_tensor_ema', action='store_true', + help='Use multi_tensor_apply for EMA') + + io = parser.add_argument_group('feature and checkpointing setup') + io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'], + default='gpu', help='Use DALI pipeline for fast data processing') + io.add_argument('--resume', action='store_true', + help='Try to resume from last saved checkpoint.') + io.add_argument('--ckpt', default=None, type=str, + help='Path to a checkpoint for resuming training') + io.add_argument('--save_frequency', default=10, type=int, + help='Checkpoint saving frequency in epochs') + io.add_argument('--keep_milestones', default=[100, 200, 300], type=int, nargs='+', + help='Milestone checkpoints to keep from removing') + io.add_argument('--save_best_from', default=380, type=int, + help='Epoch on which to begin tracking best checkpoint (dev WER)') + io.add_argument('--eval_frequency', default=200, type=int, + help='Number of steps between evaluations on dev set') + io.add_argument('--log_frequency', default=25, type=int, + help='Number of steps between printing training stats') + io.add_argument('--prediction_frequency', default=100, type=int, + help='Number of steps between printing sample decodings') + io.add_argument('--model_config', type=str, required=True, + help='Path of the model configuration file') + io.add_argument('--train_manifests', type=str, required=True, nargs='+', + help='Paths of the training dataset manifest file') + io.add_argument('--val_manifests', type=str, required=True, nargs='+', + help='Paths of the evaluation datasets manifest files') + io.add_argument('--dataset_dir', required=True, type=str, + help='Root dir of dataset') + io.add_argument('--output_dir', type=str, required=True, + help='Directory for logs and checkpoints') + io.add_argument('--log_file', type=str, default=None, + help='Path to save the training logfile.') + io.add_argument('--benchmark_epochs_num', type=int, default=1, + help='Number of epochs accounted in final average throughput.') + io.add_argument('--override_config', type=str, action='append', + help='Overrides arbitrary config value.' + ' Syntax: `--override_config nested.config.key=val`.') + + return parser.parse_args() + + +def reduce_tensor(tensor, num_gpus): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + return rt.true_divide(num_gpus) + + +def init_multi_tensor_ema(model, ema_model): + model_weights = list(model.state_dict().values()) + ema_model_weights = list(ema_model.state_dict().values()) + ema_overflow_buf = torch.cuda.IntTensor([0]) + return model_weights, ema_model_weights, ema_overflow_buf + + +def apply_multi_tensor_ema(decay, model_weights, ema_model_weights, overflow_buf): + amp_C.multi_tensor_axpby( + 65536, overflow_buf, + [ema_model_weights, model_weights, ema_model_weights], + decay, 1-decay, -1) + + +def apply_ema(model, ema_model, decay): + if not decay: + return + + sd = getattr(model, 'module', model).state_dict() + for k, v in ema_model.state_dict().items(): + v.copy_(decay * v + (1 - decay) * sd[k]) + + +@torch.no_grad() +def evaluate(epoch, step, val_loader, val_feat_proc, labels, model, + ema_model, ctc_loss, greedy_decoder, use_amp, use_dali=False): + + for model, subset in [(model, 'dev'), (ema_model, 'dev_ema')]: + if model is None: + continue + + model.eval() + start_time = time.time() + agg = {'losses': [], 'preds': [], 'txts': []} + + for batch in val_loader: + if use_dali: + # with DALI, the data is already on GPU + feat, feat_lens, txt, txt_lens = batch + if val_feat_proc is not None: + feat, feat_lens = val_feat_proc(feat, feat_lens) + else: + batch = [t.cuda(non_blocking=True) for t in batch] + audio, audio_lens, txt, txt_lens = batch + feat, feat_lens = val_feat_proc(audio, audio_lens) + + with torch.cuda.amp.autocast(enabled=use_amp): + log_probs, enc_lens = model(feat, feat_lens) + loss = ctc_loss(log_probs, txt, enc_lens, txt_lens) + pred = greedy_decoder(log_probs) + + agg['losses'] += helpers.gather_losses([loss]) + agg['preds'] += helpers.gather_predictions([pred], labels) + agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], labels) + + wer, loss = process_evaluation_epoch(agg) + log((epoch,), step, subset, {'loss': loss, 'wer': 100.0 * wer, + 'took': time.time() - start_time}) + model.train() + return wer + + +def main(): + args = parse_args() + + assert(torch.cuda.is_available()) + assert args.prediction_frequency % args.log_frequency == 0 + + torch.backends.cudnn.benchmark = args.cudnn_benchmark + + # set up distributed training + multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1 + if multi_gpu: + torch.cuda.set_device(args.local_rank) + dist.init_process_group(backend='nccl', init_method='env://') + world_size = dist.get_world_size() + print_once(f'Distributed training with {world_size} GPUs\n') + else: + world_size = 1 + + torch.manual_seed(args.seed + args.local_rank) + np.random.seed(args.seed + args.local_rank) + random.seed(args.seed + args.local_rank) + + init_log(args) + + cfg = config.load(args.model_config) + config.apply_config_overrides(cfg, args) + + symbols = helpers.add_ctc_blank(cfg['labels']) + + assert args.grad_accumulation >= 1 + batch_size = args.gpu_batch_size + + print_once('Setting up datasets...') + train_dataset_kw, train_features_kw = config.input(cfg, 'train') + val_dataset_kw, val_features_kw = config.input(cfg, 'val') + + use_dali = args.dali_device in ('cpu', 'gpu') + if use_dali: + assert train_dataset_kw['ignore_offline_speed_perturbation'], \ + "DALI doesn't support offline speed perturbation" + + # pad_to_max_duration is not supported by DALI - have simple padders + if train_features_kw['pad_to_max_duration']: + train_feat_proc = BaseFeatures( + pad_align=train_features_kw['pad_align'], + pad_to_max_duration=True, + max_duration=train_features_kw['max_duration'], + sample_rate=train_features_kw['sample_rate'], + window_size=train_features_kw['window_size'], + window_stride=train_features_kw['window_stride']) + train_features_kw['pad_to_max_duration'] = False + else: + train_feat_proc = None + + if val_features_kw['pad_to_max_duration']: + val_feat_proc = BaseFeatures( + pad_align=val_features_kw['pad_align'], + pad_to_max_duration=True, + max_duration=val_features_kw['max_duration'], + sample_rate=val_features_kw['sample_rate'], + window_size=val_features_kw['window_size'], + window_stride=val_features_kw['window_stride']) + val_features_kw['pad_to_max_duration'] = False + else: + val_feat_proc = None + + train_loader = DaliDataLoader(gpu_id=args.local_rank, + dataset_path=args.dataset_dir, + config_data=train_dataset_kw, + config_features=train_features_kw, + json_names=args.train_manifests, + batch_size=batch_size, + grad_accumulation_steps=args.grad_accumulation, + pipeline_type="train", + device_type=args.dali_device, + symbols=symbols) + + val_loader = DaliDataLoader(gpu_id=args.local_rank, + dataset_path=args.dataset_dir, + config_data=val_dataset_kw, + config_features=val_features_kw, + json_names=args.val_manifests, + batch_size=batch_size, + pipeline_type="val", + device_type=args.dali_device, + symbols=symbols) + else: + train_dataset_kw, train_features_kw = config.input(cfg, 'train') + train_dataset = AudioDataset(args.dataset_dir, + args.train_manifests, + symbols, + **train_dataset_kw) + train_loader = get_data_loader(train_dataset, + batch_size, + multi_gpu=multi_gpu, + shuffle=True, + num_workers=4) + train_feat_proc = FilterbankFeatures(**train_features_kw) + + val_dataset_kw, val_features_kw = config.input(cfg, 'val') + val_dataset = AudioDataset(args.dataset_dir, + args.val_manifests, + symbols, + **val_dataset_kw) + val_loader = get_data_loader(val_dataset, + batch_size, + multi_gpu=multi_gpu, + shuffle=False, + num_workers=4, + drop_last=False) + val_feat_proc = FilterbankFeatures(**val_features_kw) + + dur = train_dataset.duration / 3600 + dur_f = train_dataset.duration_filtered / 3600 + nsampl = len(train_dataset) + print_once(f'Training samples: {nsampl} ({dur:.1f}h, ' + f'filtered {dur_f:.1f}h)') + + if train_feat_proc is not None: + train_feat_proc.cuda() + if val_feat_proc is not None: + val_feat_proc.cuda() + + steps_per_epoch = len(train_loader) // args.grad_accumulation + + # set up the model + model = QuartzNet(encoder_kw=config.encoder(cfg), + decoder_kw=config.decoder(cfg, n_classes=len(symbols))) + model.cuda() + ctc_loss = CTCLossNM(n_classes=len(symbols)) + greedy_decoder = GreedyCTCDecoder() + + print_once(f'Model size: {num_weights(model) / 10**6:.1f}M params\n') + + # optimization + kw = {'lr': args.lr, 'weight_decay': args.weight_decay} + if args.optimizer == "novograd": + optimizer = Novograd(model.parameters(), **kw) + elif args.optimizer == "adamw": + optimizer = AdamW(model.parameters(), **kw) + elif args.optimizer == 'lamb98': + optimizer = FusedLAMB(model.parameters(), betas=(0.9, 0.98), eps=1e-9, + **kw) + elif args.optimizer == 'fused_novograd': + optimizer = FusedNovoGrad(model.parameters(), betas=(0.95, 0), + bias_correction=False, reg_inside_moment=True, + grad_averaging=False, **kw) + else: + raise ValueError(f'Invalid optimizer "{args.optimizer}"') + + scaler = torch.cuda.amp.GradScaler(enabled=args.amp) + + adjust_lr = lambda step, epoch, optimizer: lr_policy( + step, epoch, args.lr, optimizer, steps_per_epoch=steps_per_epoch, + warmup_epochs=args.warmup_epochs, hold_epochs=args.hold_epochs, + num_epochs=args.epochs, policy=args.lr_policy, min_lr=args.min_lr, + exp_gamma=args.lr_exp_gamma) + + if args.ema > 0: + ema_model = copy.deepcopy(model) + else: + ema_model = None + + if multi_gpu: + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.local_rank], output_device=args.local_rank) + if args.pyprof: + pyprof.init(enable_function_stack=True) + + # load checkpoint + meta = {'best_wer': 10**6, 'start_epoch': 0} + checkpointer = Checkpointer(args.output_dir, 'QuartzNet', + args.keep_milestones) + if args.resume: + args.ckpt = checkpointer.last_checkpoint() or args.ckpt + + if args.ckpt is not None: + checkpointer.load(args.ckpt, model, ema_model, optimizer, scaler, meta) + + start_epoch = meta['start_epoch'] + best_wer = meta['best_wer'] + epoch = 1 + step = start_epoch * steps_per_epoch + 1 + + if args.pyprof: + torch.autograd.profiler.emit_nvtx().__enter__() + profiler.start() + + # training loop + model.train() + if args.ema > 0.0: + mt_ema_params = init_multi_tensor_ema(model, ema_model) + # ema_model_weight_list, model_weight_list, overflow_buf_for_ema = ema_ + + # pre-allocate + if args.pre_allocate_range is not None: + n_feats = train_features_kw['n_filt'] + pad_align = train_features_kw['pad_align'] + a, b = args.pre_allocate_range + for n_frames in range(a, b + pad_align, pad_align): + print_once(f'Pre-allocation ({batch_size}x{n_feats}x{n_frames})...') + + feat = torch.randn(batch_size, n_feats, n_frames, device='cuda') + feat_lens = torch.ones(batch_size, device='cuda').fill_(n_frames) + txt = torch.randint(high=len(symbols)-1, size=(batch_size, 100), + device='cuda') + txt_lens = torch.ones(batch_size, device='cuda').fill_(100) + with torch.cuda.amp.autocast(enabled=args.amp): + log_probs, enc_lens = model(feat, feat_lens) + del feat + loss = ctc_loss(log_probs, txt, enc_lens, txt_lens) + loss.backward() + model.zero_grad() + torch.cuda.empty_cache() + + bmark_stats = BenchmarkStats() + + for epoch in range(start_epoch + 1, args.epochs + 1): + if multi_gpu and not use_dali: + train_loader.sampler.set_epoch(epoch) + + epoch_utts = 0 + epoch_loss = 0 + accumulated_batches = 0 + epoch_start_time = time.time() + epoch_eval_time = 0 + + for batch in train_loader: + + if accumulated_batches == 0: + step_loss = 0 + step_utts = 0 + step_start_time = time.time() + + if use_dali: + # with DALI, the data is already on GPU + feat, feat_lens, txt, txt_lens = batch + if train_feat_proc is not None: + feat, feat_lens = train_feat_proc(feat, feat_lens) + else: + batch = [t.cuda(non_blocking=True) for t in batch] + audio, audio_lens, txt, txt_lens = batch + feat, feat_lens = train_feat_proc(audio, audio_lens) + + # Use context manager to prevent redundant accumulation of gradients + if (multi_gpu and accumulated_batches + 1 < args.grad_accumulation): + ctx = model.no_sync() + else: + ctx = empty_context() + + with ctx: + with torch.cuda.amp.autocast(enabled=args.amp): + log_probs, enc_lens = model(feat, feat_lens) + + loss = ctc_loss(log_probs, txt, enc_lens, txt_lens) + loss /= args.grad_accumulation + + if multi_gpu: + reduced_loss = reduce_tensor(loss.data, world_size) + else: + reduced_loss = loss + + if torch.isnan(reduced_loss).any(): + print_once(f'WARNING: loss is NaN; skipping update') + continue + else: + step_loss += reduced_loss.item() + step_utts += batch[0].size(0) * world_size + epoch_utts += batch[0].size(0) * world_size + accumulated_batches += 1 + + scaler.scale(loss).backward() + + if accumulated_batches % args.grad_accumulation == 0: + epoch_loss += step_loss + scaler.step(optimizer) + scaler.update() + + adjust_lr(step, epoch, optimizer) + optimizer.zero_grad() + + if args.ema > 0.0: + apply_multi_tensor_ema(args.ema, *mt_ema_params) + + if step % args.log_frequency == 0: + preds = greedy_decoder(log_probs) + wer, pred_utt, ref = greedy_wer(preds, txt, txt_lens, symbols) + + if step % args.prediction_frequency == 0: + print_once(f' Decoded: {pred_utt[:90]}') + print_once(f' Reference: {ref[:90]}') + + step_time = time.time() - step_start_time + log((epoch, step % steps_per_epoch or steps_per_epoch, steps_per_epoch), + step, 'train', + {'loss': step_loss, + 'wer': 100.0 * wer, + 'throughput': step_utts / step_time, + 'took': step_time, + 'lrate': optimizer.param_groups[0]['lr']}) + + step_start_time = time.time() + + if step % args.eval_frequency == 0: + tik = time.time() + wer = evaluate(epoch, step, val_loader, val_feat_proc, + symbols, model, ema_model, ctc_loss, + greedy_decoder, args.amp, use_dali) + + if wer < best_wer and epoch >= args.save_best_from: + checkpointer.save(model, ema_model, optimizer, scaler, + epoch, step, best_wer, is_best=True) + best_wer = wer + epoch_eval_time += time.time() - tik + + step += 1 + accumulated_batches = 0 + # end of step + + # DALI iterator need to be exhausted; + # if not using DALI, simulate drop_last=True with grad accumulation + if not use_dali and step > steps_per_epoch * epoch: + break + + epoch_time = time.time() - epoch_start_time + epoch_loss /= steps_per_epoch + log((epoch,), None, 'train_avg', {'throughput': epoch_utts / epoch_time, + 'took': epoch_time, + 'loss': epoch_loss}) + bmark_stats.update(epoch_utts, epoch_time, epoch_loss) + + if epoch % args.save_frequency == 0 or epoch in args.keep_milestones: + checkpointer.save(model, ema_model, optimizer, scaler, epoch, step, + best_wer) + + if 0 < args.epochs_this_job <= epoch - start_epoch: + print_once(f'Finished after {args.epochs_this_job} epochs.') + break + # end of epoch + + if args.pyprof: + profiler.stop() + torch.autograd.profiler.emit_nvtx().__exit__(None, None, None) + + log((), None, 'train_avg', bmark_stats.get(args.benchmark_epochs_num)) + + if epoch == args.epochs: + evaluate(epoch, step, val_loader, val_feat_proc, symbols, model, + ema_model, ctc_loss, greedy_decoder, args.amp, use_dali) + + checkpointer.save(model, ema_model, optimizer, scaler, epoch, step, + best_wer) + flush_log() + + +if __name__ == "__main__": + main() diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/__init__.py b/PyTorch/SpeechRecognition/QuartzNet/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/convert_librispeech.py b/PyTorch/SpeechRecognition/QuartzNet/utils/convert_librispeech.py new file mode 100644 index 00000000..91499751 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/utils/convert_librispeech.py @@ -0,0 +1,81 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#!/usr/bin/env python +import argparse +import os +import glob +import multiprocessing +import json + +import pandas as pd + +from preprocessing_utils import parallel_preprocess + +parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.') +parser.add_argument('--input_dir', type=str, required=True, + help='LibriSpeech collection input dir') +parser.add_argument('--dest_dir', type=str, required=True, + help='Output dir') +parser.add_argument('--output_json', type=str, default='./', + help='name of the output json file.') +parser.add_argument('-s','--speed', type=float, nargs='*', + help='Speed perturbation ratio') +parser.add_argument('--target_sr', type=int, default=None, + help='Target sample rate. ' + 'defaults to the input sample rate') +parser.add_argument('--overwrite', action='store_true', + help='Overwrite file if exists') +parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(), + help='Number of threads to use when processing audio files') +args = parser.parse_args() + +args.input_dir = args.input_dir.rstrip('/') +args.dest_dir = args.dest_dir.rstrip('/') + +def build_input_arr(input_dir): + txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'), + recursive=True) + input_data = [] + for txt_file in txt_files: + rel_path = os.path.relpath(txt_file, input_dir) + with open(txt_file) as fp: + for line in fp: + fname, _, transcript = line.partition(' ') + input_data.append(dict(input_relpath=os.path.dirname(rel_path), + input_fname=fname+'.flac', + transcript=transcript)) + return input_data + + +print("[%s] Scaning input dir..." % args.output_json) +dataset = build_input_arr(input_dir=args.input_dir) + +print("[%s] Converting audio files..." % args.output_json) +dataset = parallel_preprocess(dataset=dataset, + input_dir=args.input_dir, + dest_dir=args.dest_dir, + target_sr=args.target_sr, + speed=args.speed, + overwrite=args.overwrite, + parallel=args.parallel) + +print("[%s] Generating json..." % args.output_json) +df = pd.DataFrame(dataset, dtype=object) + +# Save json with python. df.to_json() produces back slashed in file paths +dataset = df.to_dict(orient='records') +with open(args.output_json, 'w') as fp: + json.dump(dataset, fp, indent=2) diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/download_librispeech.py b/PyTorch/SpeechRecognition/QuartzNet/utils/download_librispeech.py new file mode 100644 index 00000000..ad36ad4e --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/utils/download_librispeech.py @@ -0,0 +1,72 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python + +import os +import argparse +import pandas as pd + +from download_utils import download_file, md5_checksum, extract + +parser = argparse.ArgumentParser(description='Download, verify and extract dataset files') +parser.add_argument('csv', type=str, + help='CSV file with urls and checksums to download.') +parser.add_argument('dest', type=str, + help='Download destnation folder.') +parser.add_argument('-e', type=str, default=None, + help='Extraction destnation folder. Defaults to download folder if not provided') +parser.add_argument('--skip_download', action='store_true', + help='Skip downloading the files') +parser.add_argument('--skip_checksum', action='store_true', + help='Skip checksum') +parser.add_argument('--skip_extract', action='store_true', + help='Skip extracting files') +args = parser.parse_args() +args.e = args.e or args.dest + + +df = pd.read_csv(args.csv, delimiter=',') + + +if not args.skip_download: + for url in df.url: + fname = url.split('/')[-1] + print("Downloading %s:" % fname) + download_file(url=url, dest_folder=args.dest, fname=fname) +else: + print("Skipping file download") + + +if not args.skip_checksum: + for index, row in df.iterrows(): + url = row['url'] + md5 = row['md5'] + fname = url.split('/')[-1] + fpath = os.path.join(args.dest, fname) + print("Verifing %s: " % fname, end='') + ret = md5_checksum(fpath=fpath, target_hash=md5) + print("Passed" if ret else "Failed") +else: + print("Skipping checksum") + + +if not args.skip_extract: + for url in df.url: + fname = url.split('/')[-1] + fpath = os.path.join(args.dest, fname) + print("Decompressing %s:" % fpath) + extract(fpath=fpath, dest_folder=args.e) +else: + print("Skipping file extraction") diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/download_utils.py b/PyTorch/SpeechRecognition/QuartzNet/utils/download_utils.py new file mode 100644 index 00000000..fd664440 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/utils/download_utils.py @@ -0,0 +1,71 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python + +import hashlib +import requests +import os +import tarfile +import tqdm + +def download_file(url, dest_folder, fname, overwrite=False): + fpath = os.path.join(dest_folder, fname) + if os.path.isfile(fpath): + if overwrite: + print("Overwriting existing file") + else: + print("File exists, skipping download.") + return + + tmp_fpath = fpath + '.tmp' + + if not os.path.exists(os.path.dirname(tmp_fpath)): + os.makedirs(os.path.dirname(tmp_fpath)) + + r = requests.get(url, stream=True) + file_size = int(r.headers['Content-Length']) + chunk_size = 1024 * 1024 # 1MB + total_chunks = int(file_size / chunk_size) + + with open(tmp_fpath, 'wb') as fp: + content_iterator = r.iter_content(chunk_size=chunk_size) + chunks = tqdm.tqdm(content_iterator, total=total_chunks, + unit='MB', desc=fpath, leave=True) + for chunk in chunks: + fp.write(chunk) + + os.rename(tmp_fpath, fpath) + + +def md5_checksum(fpath, target_hash): + file_hash = hashlib.md5() + with open(fpath, "rb") as fp: + for chunk in iter(lambda: fp.read(1024*1024), b""): + file_hash.update(chunk) + return file_hash.hexdigest() == target_hash + + +def extract(fpath, dest_folder): + if fpath.endswith('.tar.gz'): + mode = 'r:gz' + elif fpath.endswith('.tar'): + mode = 'r:' + else: + raise IOError('fpath has unknown extention: %s' % fpath) + + with tarfile.open(fpath, mode) as tar: + members = tar.getmembers() + for member in tqdm.tqdm(iterable=members, total=len(members), leave=True): + tar.extract(path=dest_folder, member=member) diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/inference_librispeech.csv b/PyTorch/SpeechRecognition/QuartzNet/utils/inference_librispeech.csv new file mode 100644 index 00000000..40dac4e0 --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/utils/inference_librispeech.csv @@ -0,0 +1,5 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 +http://www.openslr.org/resources/12/dev-other.tar.gz,c8d0bcc9cca99d4f8b62fcc847357931 +http://www.openslr.org/resources/12/test-clean.tar.gz,32fa31d27d2e1cad72775fee3f4849a9 +http://www.openslr.org/resources/12/test-other.tar.gz,fb5a50374b501bb3bac4815ee91d3135 diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/librispeech.csv b/PyTorch/SpeechRecognition/QuartzNet/utils/librispeech.csv new file mode 100644 index 00000000..d48a9f8d --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/utils/librispeech.csv @@ -0,0 +1,8 @@ +url,md5 +http://www.openslr.org/resources/12/dev-clean.tar.gz,42e2234ba48799c1f50f24a7926300a1 +http://www.openslr.org/resources/12/dev-other.tar.gz,c8d0bcc9cca99d4f8b62fcc847357931 +http://www.openslr.org/resources/12/test-clean.tar.gz,32fa31d27d2e1cad72775fee3f4849a9 +http://www.openslr.org/resources/12/test-other.tar.gz,fb5a50374b501bb3bac4815ee91d3135 +http://www.openslr.org/resources/12/train-clean-100.tar.gz,2a93770f6d5c6c964bc36631d331a522 +http://www.openslr.org/resources/12/train-clean-360.tar.gz,c0e676e450a7ff2f54aeade5171606fa +http://www.openslr.org/resources/12/train-other-500.tar.gz,d1a0fd59409feb2c614ce4d30c387708 diff --git a/PyTorch/SpeechRecognition/QuartzNet/utils/preprocessing_utils.py b/PyTorch/SpeechRecognition/QuartzNet/utils/preprocessing_utils.py new file mode 100644 index 00000000..15605cea --- /dev/null +++ b/PyTorch/SpeechRecognition/QuartzNet/utils/preprocessing_utils.py @@ -0,0 +1,76 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python +import os +import multiprocessing +import librosa +import functools + +import sox + + +from tqdm import tqdm + +def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None, + overwrite=True): + speed = speed or [] + speed.append(1) + speed = list(set(speed)) # Make uniqe + + input_fname = os.path.join(input_dir, + data['input_relpath'], + data['input_fname']) + input_sr = sox.file_info.sample_rate(input_fname) + target_sr = target_sr or input_sr + + os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True) + + output_dict = {} + output_dict['transcript'] = data['transcript'].lower().strip() + output_dict['files'] = [] + + fname = os.path.splitext(data['input_fname'])[0] + for s in speed: + output_fname = fname + '{}.wav'.format('' if s==1 else '-{}'.format(s)) + output_fpath = os.path.join(dest_dir, + data['input_relpath'], + output_fname) + + if not os.path.exists(output_fpath) or overwrite: + cbn = sox.Transformer().speed(factor=s).convert(target_sr) + cbn.build(input_fname, output_fpath) + + file_info = sox.file_info.info(output_fpath) + file_info['fname'] = os.path.join(os.path.basename(dest_dir), + data['input_relpath'], + output_fname) + file_info['speed'] = s + output_dict['files'].append(file_info) + + if s == 1: + file_info = sox.file_info.info(output_fpath) + output_dict['original_duration'] = file_info['duration'] + output_dict['original_num_samples'] = file_info['num_samples'] + + return output_dict + + +def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel): + with multiprocessing.Pool(parallel) as p: + func = functools.partial(preprocess, + input_dir=input_dir, dest_dir=dest_dir, + target_sr=target_sr, speed=speed, overwrite=overwrite) + dataset = list(tqdm(p.imap(func, dataset), total=len(dataset))) + return dataset