From 0663b67c1afe9979c9767ed63c7e719b1d0629b9 Mon Sep 17 00:00:00 2001 From: Przemek Strzelczyk <41076710+nvpstr@users.noreply.github.com> Date: Mon, 8 Jul 2019 22:51:28 +0200 Subject: [PATCH] Updating models --- .gitignore | 3 + MxNet/Classification/RN50v1.5/LICENSE | 1 - MxNet/Classification/RN50v1.5/__init__.py | 0 MxNet/Classification/RN50v1.5/data.py | 2 + PyTorch/Classification/RN50v1.5/img/.gitkeep | 0 .../RN50v1.5/resnet50v1.5/README.md | 0 PyTorch/Detection/SSD/Dockerfile | 2 +- .../SSD300_pytorch_18.08_inference_fp16.json | 31 - .../SSD300_pytorch_18.08_inference_fp32.json | 31 - .../SSD300_pytorch_18.08_training_fp16.json | 59 - .../SSD300_pytorch_18.08_training_fp32.json | 59 - .../SSD300_pytorch_19.01_inference_fp16.json | 34 - .../SSD300_pytorch_19.01_inference_fp32.json | 34 - .../SSD300_pytorch_19.01_training_fp16.json | 52 - .../SSD300_pytorch_19.01_training_fp32.json | 45 - .../SSD300_pytorch_19.05_inference_fp16.json | 34 - .../SSD300_pytorch_19.05_inference_fp32.json | 34 - .../Detection/SSD/qa/benchmark_performance.py | 81 - ...orch_18.08_fp16_full_run_acc_baseline.json | 1 - ...orch_18.08_fp32_full_run_acc_baseline.json | 1 - ...ch_19.01_fp16_1epoch_run_acc_baseline.json | 20 - ...ch_19.01_fp32_1epoch_run_acc_baseline.json | 20 - PyTorch/Detection/SSD/qa/qa_accuracy_main.py | 73 - PyTorch/Detection/SSD/qa/qa_perf_main.py | 199 - PyTorch/Detection/SSD/qa/qa_utils.py | 115 - .../qa/testing_DGX1V_8GPU_fp16_1epoch_run.sh | 4 - .../qa/testing_DGX1V_8GPU_fp16_full_run.sh | 4 - .../qa/testing_DGX1V_8GPU_fp32_1epoch_run.sh | 4 - .../qa/testing_DGX1V_8GPU_fp32_full_run.sh | 4 - .../testing_DGX1V_inference_benchmark_fp16.sh | 3 - .../testing_DGX1V_inference_benchmark_fp32.sh | 3 - .../testing_DGX1V_training_benchmark_fp16.sh | 3 - .../testing_DGX1V_training_benchmark_fp32.sh | 3 - PyTorch/Detection/SSD/src/coco_pipeline.py | 4 +- PyTorch/LanguageModeling/BERT/.dockerignore | 3 + PyTorch/LanguageModeling/BERT/.gitignore | 129 + PyTorch/LanguageModeling/BERT/Dockerfile | 27 + PyTorch/LanguageModeling/BERT/LICENSE | 202 + PyTorch/LanguageModeling/BERT/README.md | 554 + .../LanguageModeling/BERT/bert_config.json | 13 + .../BERT/create_pretraining_data.py | 472 + PyTorch/LanguageModeling/BERT/data/README.md | 30 + .../data/bookcorpus/clean_and_merge_text.py | 23 + .../data/bookcorpus/download_bookcorpus.sh | 9 + .../BERT/data/create_datasets_from_start.sh | 38 + .../BERT/data/glue/download_mrpc.sh | 7 + .../data/merge_datasets_after_creation.sh | 29 + .../BERT/data/squad/squad_download.sh | 60 + .../BERT/data/utils/config.sh | 24 + .../BERT/data/utils/create_mixed_dataset.py | 160 + .../data/utils/create_mixed_dataset_ids.py | 134 + .../BERT/data/utils/preprocessing.sh | 23 + .../data/utils/preprocessing_xargs_wrapper.sh | 15 + .../data/utils/sentence_segmentation_nltk.py | 28 + .../BERT/data/utils/shard_text_input_file.py | 47 + .../wikipedia_corpus/download_wikipedia.sh | 30 + .../wikipedia_corpus/remove_tags_and_clean.py | 39 + .../LanguageModeling/BERT/extract_features.py | 297 + PyTorch/LanguageModeling/BERT/file_utils.py | 249 + .../LanguageModeling/BERT/fused_adam_local.py | 205 + PyTorch/LanguageModeling/BERT/modeling.py | 1249 + PyTorch/LanguageModeling/BERT/optimization.py | 218 + .../LanguageModeling/BERT/requirements.txt | 13 + PyTorch/LanguageModeling/BERT/run_glue.py | 649 + .../LanguageModeling/BERT/run_pretraining.py | 417 + .../BERT/run_pretraining_inference.py | 300 + PyTorch/LanguageModeling/BERT/run_squad.py | 1143 + PyTorch/LanguageModeling/BERT/run_swag.py | 561 + PyTorch/LanguageModeling/BERT/schedulers.py | 92 + .../BERT/scripts/data_download.sh | 38 + .../BERT/scripts/docker/build.sh | 9 + .../BERT/scripts/docker/launch.sh | 23 + PyTorch/LanguageModeling/BERT/scripts/run.sh | 184 + .../LanguageModeling/BERT/scripts/run_glue.sh | 63 + .../BERT/scripts/run_pretraining.sh | 152 + .../BERT/scripts/run_pretraining_inference.sh | 146 + .../BERT/scripts/run_squad.sh | 88 + .../LanguageModeling/BERT/scripts/run_swag.sh | 62 + .../BERT/scripts/start_pretraining.sh | 89 + PyTorch/LanguageModeling/BERT/tokenization.py | 391 + .../BERT/vocab/download_models.py | 123 + PyTorch/LanguageModeling/BERT/vocab/vocab | 30522 ++++++++++++++ PyTorch/Recommendation/NCF/Dockerfile | 2 +- PyTorch/Recommendation/NCF/README.md | 580 +- PyTorch/Recommendation/NCF/dataloading.py | 158 + .../Recommendation/NCF/download_dataset.sh | 7 +- .../Recommendation/NCF/img/dgx1v_32_curve.png | Bin 42574 -> 41910 bytes .../Recommendation/NCF/img/hr_histogram.png | Bin 28153 -> 30062 bytes PyTorch/Recommendation/NCF/logger/analyzer.py | 3 +- PyTorch/Recommendation/NCF/ncf.py | 303 +- PyTorch/Recommendation/NCF/neumf.py | 10 +- PyTorch/Recommendation/NCF/prepare_dataset.sh | 21 +- PyTorch/Recommendation/NCF/requirements.txt | 1 + .../maskrcnn_benchmark/engine/trainer.py | 8 +- .../maskrcnn_benchmark/utils/model_zoo.py | 11 +- .../MaskRCNN/pytorch/tools/train_net.py | 9 +- PyTorch/SpeechSynthesis/Tacotron2/Dockerfile | 7 +- PyTorch/SpeechSynthesis/Tacotron2/README.md | 715 +- .../Tacotron2/audio/audio_fp16.wav | Bin 854074 -> 831533 bytes .../Tacotron2/audio/audio_fp32.wav | Bin 823354 -> 773178 bytes .../Tacotron2/common/layers.py | 1 + ...s_audio_text_train_subset_625_filelist.txt | 625 + .../filelists/ljs_mel_text_filelist.txt | 13100 ++++++ .../filelists/ljs_mel_text_test_filelist.txt | 500 + .../filelists/ljs_mel_text_train_filelist.txt | 12500 ++++++ ...js_mel_text_train_subset_1250_filelist.txt | 1250 + ...js_mel_text_train_subset_2500_filelist.txt | 2500 ++ ...ljs_mel_text_train_subset_625_filelist.txt | 625 + .../filelists/ljs_mel_text_val_filelist.txt | 100 + .../Tacotron2/img/tacotron2_amp_loss.png | Bin 0 -> 31253 bytes .../Tacotron2/img/tacotron2_arch.png | Bin 0 -> 201553 bytes .../Tacotron2/img/tacotron2_fp32_loss.png | Bin 24531 -> 29853 bytes .../Tacotron2/img/waveglow_arch.png | Bin 0 -> 90239 bytes .../SpeechSynthesis/Tacotron2/inference.py | 33 +- .../Tacotron2/inference_perf.py | 21 +- PyTorch/SpeechSynthesis/Tacotron2/models.py | 27 +- .../train_tacotron2_AMP_DGX1_16GB_1GPU.sh | 2 + .../train_tacotron2_AMP_DGX1_16GB_4GPU.sh | 2 + .../train_tacotron2_AMP_DGX1_16GB_8GPU.sh | 2 + .../train_tacotron2_FP16_DGX1_16GB_1GPU.sh | 2 +- .../train_tacotron2_FP16_DGX1_16GB_4GPU.sh | 2 +- .../train_tacotron2_FP16_DGX1_16GB_8GPU.sh | 2 +- .../train_tacotron2_FP32_DGX1_16GB_1GPU.sh | 2 +- .../train_tacotron2_FP32_DGX1_16GB_4GPU.sh | 2 +- .../train_tacotron2_FP32_DGX1_16GB_8GPU.sh | 2 +- .../train_waveglow_AMP_DGX1_16GB_1GPU.sh | 2 + .../train_waveglow_AMP_DGX1_16GB_4GPU.sh | 2 + .../train_waveglow_AMP_DGX1_16GB_8GPU.sh | 2 + .../train_waveglow_FP16_DGX1_16GB_1GPU.sh | 2 +- .../train_waveglow_FP16_DGX1_16GB_4GPU.sh | 2 +- .../train_waveglow_FP16_DGX1_16GB_8GPU.sh | 2 +- .../train_waveglow_FP32_DGX1_16GB_1GPU.sh | 2 +- .../train_waveglow_FP32_DGX1_16GB_4GPU.sh | 2 +- .../train_waveglow_FP32_DGX1_16GB_8GPU.sh | 2 +- .../Tacotron2/preprocess_audio2mel.py | 64 + ...2_epochtrain_FP16_DGX1_16GB_8GPU_single.sh | 28 - ...2_epochtrain_FP32_DGX1_16GB_8GPU_single.sh | 27 - ...2_inferbench_FP16_DGX1_16GB_1GPU_single.sh | 17 - ...2_inferbench_FP32_DGX1_16GB_1GPU_single.sh | 17 - ...2_trainbench_FP16_DGX1_16GB_8GPU_single.sh | 32 - ...2_trainbench_FP32_DGX1_16GB_8GPU_single.sh | 31 - ...w_epochtrain_FP16_DGX1_16GB_8GPU_single.sh | 27 - ...w_epochtrain_FP32_DGX1_16GB_8GPU_single.sh | 26 - ...w_inferbench_FP16_DGX1_16GB_1GPU_single.sh | 17 - ...w_inferbench_FP32_DGX1_16GB_1GPU_single.sh | 16 - ...w_trainbench_FP16_DGX1_16GB_8GPU_single.sh | 31 - ...w_trainbench_FP32_DGX1_16GB_8GPU_single.sh | 30 - ...n2_fulltrain_FP16_DGX1_16GB_8GPU_single.sh | 28 - ...n2_fulltrain_FP32_DGX1_16GB_8GPU_single.sh | 27 - ...ow_fulltrain_FP16_DGX1_16GB_8GPU_single.sh | 27 - ...ow_fulltrain_FP32_DGX1_16GB_8GPU_single.sh | 26 - ...rbench_BS1_FP16_DGX1_16GB_1GPU_single.json | 90 - ...rbench_BS1_FP32_DGX1_16GB_1GPU_single.json | 90 - ...rbench_BS4_FP32_DGX1_16GB_1GPU_single.json | 91 - ...rbench_BS8_FP16_DGX1_16GB_1GPU_single.json | 91 - ...rbench_BS1_FP16_DGX1_16GB_1GPU_single.json | 91 - ...rbench_BS1_FP32_DGX1_16GB_1GPU_single.json | 90 - ...rbench_BS4_FP32_DGX1_16GB_1GPU_single.json | 91 - ...rbench_BS8_FP16_DGX1_16GB_1GPU_single.json | 91 - .../Tacotron2/qa/check_curves.py | 170 - .../Tacotron2/qa/input_lengths.pt | Bin 20338 -> 0 bytes .../Tacotron2/qa/mel_padded.pt | Bin 1432356 -> 0 bytes .../qa/tacotron2_fp16-full-loss.json | 16037 -------- .../qa/tacotron2_fp16-infer-bs1.json | 53 - .../qa/tacotron2_fp16-infer-bs8.json | 53 - .../Tacotron2/qa/tacotron2_fp16-perf.json | 197 - .../qa/tacotron2_fp16-short-loss.json | 16037 -------- .../qa/tacotron2_fp32-full-loss.json | 16037 -------- .../qa/tacotron2_fp32-infer-bs1.json | 53 - .../qa/tacotron2_fp32-infer-bs4.json | 53 - .../Tacotron2/qa/tacotron2_fp32-perf.json | 197 - .../qa/tacotron2_fp32-short-loss.json | 16037 -------- .../Tacotron2/qa/text_padded.pt | Bin 3780347 -> 0 bytes .../Tacotron2/qa/waveglow_fp16-full-loss.json | 16037 -------- .../Tacotron2/qa/waveglow_fp16-infer-bs1.json | 53 - .../Tacotron2/qa/waveglow_fp16-infer-bs8.json | 53 - .../Tacotron2/qa/waveglow_fp16-perf.json | 197 - .../qa/waveglow_fp16-short-loss.json | 16037 -------- .../Tacotron2/qa/waveglow_fp32-full-loss.json | 16037 -------- .../Tacotron2/qa/waveglow_fp32-infer-bs1.json | 53 - .../Tacotron2/qa/waveglow_fp32-infer-bs4.json | 53 - .../Tacotron2/qa/waveglow_fp32-perf.json | 197 - .../qa/waveglow_fp32-short-loss.json | 16037 -------- .../Tacotron2/scripts/prepare_dataset.sh | 27 - .../Tacotron2/scripts/prepare_mels.sh | 21 + .../Tacotron2/scripts/train_tacotron2.sh | 2 +- .../Tacotron2/scripts/train_waveglow.sh | 2 +- .../Tacotron2/tacotron2/data_function.py | 2 +- .../Tacotron2/tacotron2/model.py | 69 +- PyTorch/SpeechSynthesis/Tacotron2/train.py | 128 +- .../Tacotron2/waveglow/model.py | 10 +- PyTorch/Translation/GNMT/README.md | 317 +- .../GNMT/scripts/tests/inference.sh | 44 + .../tests/reference_inference_performance | 6 + .../tests/reference_training_performance | 20 + .../GNMT/scripts/tests/train_1epoch.sh | 52 + .../GNMT/scripts/tests/train_bench.sh | 53 + .../GNMT/scripts/tests/train_full.sh | 51 + PyTorch/Translation/GNMT/seq2seq/utils.py | 12 +- PyTorch/Translation/GNMT/train.py | 14 +- PyTorch/Translation/GNMT/translate.py | 23 +- TensorFlow/Classification/RN50v1.5/README.md | 101 +- TensorFlow/Classification/RN50v1.5/main.py | 7 +- .../RN50v1.5/model/resnet_v1_5.py | 16 +- .../Classification/RN50v1.5/runtime/runner.py | 113 +- .../RN50v1.5/runtime/runner_utils.py | 28 +- .../benchmarking/DGX1V_inferbench_fp16.sh | 2 +- .../benchmarking/DGX1V_inferbench_fp32.sh | 2 +- .../benchmarking/DGX1V_trainbench_fp16.sh | 1 + .../benchmarking/DGX2_inferbench_fp16.sh | 2 +- .../benchmarking/DGX2_trainbench_fp32.sh | 4 +- .../scripts/benchmarking/benchmark.py | 35 +- .../Classification/RN50v1.5/utils/__init__.py | 4 +- .../RN50v1.5/utils/cmdline_helper.py | 25 +- .../RN50v1.5/utils/dali_utils.py | 154 + .../RN50v1.5/utils/data_utils.py | 21 + TensorFlow/Detection/SSD/download_all.sh | 12 +- .../SSD/qa/testing_DGX1V_accuracy.sh | 26 - .../SSD/qa/testing_DGX1V_accuracy_fp16.sh | 1 - .../SSD/qa/testing_DGX1V_accuracy_fp32.sh | 1 - .../SSD/qa/testing_DGX1V_convergence.sh | 20 - .../SSD/qa/testing_DGX1V_convergence_fp16.sh | 1 - .../SSD/qa/testing_DGX1V_convergence_fp32.sh | 1 - .../testing_DGX1V_inference_benchmark_fp16.sh | 20 - .../testing_DGX1V_inference_benchmark_fp32.sh | 20 - .../SSD/qa/testing_DGX1V_performance.sh | 24 - .../testing_DGX1V_training_benchmark_fp16.sh | 21 - .../testing_DGX1V_training_benchmark_fp32.sh | 19 - .../LanguageModeling/BERT/scripts/run.sub | 184 + .../BERT/scripts/start_pretraining.sh | 90 + .../Recommendation/NCF/prepare_dataset.sh | 14 +- TensorFlow/Segmentation/UNet_Medical/LICENSE | 2 +- .../qa/L0_joc_unet_medical_inferbench_fp16.sh | 44 - .../qa/L0_joc_unet_medical_inferbench_fp32.sh | 43 - .../qa/L0_joc_unet_medical_trainbench_fp16.sh | 43 - ...0_joc_unet_medical_trainbench_fp16_8gpu.sh | 54 - .../qa/L0_joc_unet_medical_trainbench_fp32.sh | 42 - ...0_joc_unet_medical_trainbench_fp32_8gpu.sh | 52 - .../qa/L2_joc_unet_medical_train_fp16.sh | 47 - .../qa/L2_joc_unet_medical_train_fp32.sh | 46 - .../L3_joc_unet_medical_convergence_fp16.sh | 47 - .../L3_joc_unet_medical_convergence_fp32.sh | 47 - TensorFlow/Translation/Transformer/Dockerfile | 43 + TensorFlow/Translation/Transformer/README.md | 395 + .../Translation/Transformer/encode_data.py | 49 + TensorFlow/Translation/Transformer/job.json | 30 + .../Translation/Transformer/requirements.txt | 35 + .../Transformer/scripts/data_helper.sh | 24 + .../Transformer/scripts/docker/build.sh | 7 + .../Transformer/scripts/docker/launch.sh | 11 + .../Transformer/scripts/download_data.sh | 6 + .../Transformer/scripts/run_training.sh | 52 + .../Transformer/scripts/verify_dataset.sh | 30 + .../Translation/Transformer/transformer.png | Bin 0 -> 196304 bytes .../Transformer/transformer/README.md | 255 + .../Transformer/transformer/__init__.py | 0 .../data/convert_utf8_to_tfrecord.py | 83 + .../transformer/data/process_data.py | 467 + .../Transformer/transformer/model/__init__.py | 0 .../transformer/model/attention_layer.py | 145 + .../transformer/model/beam_search.py | 527 + .../transformer/model/beam_search_test.py | 101 + .../transformer/model/embedding_layer.py | 83 + .../transformer/model/ffn_layer.py | 72 + .../transformer/model/fp16_utils.py | 35 + .../transformer/model/fused_layer_norm.py | 137 + .../model/mixed_precision_optimizer.py | 87 + .../transformer/model/model_params.py | 82 + .../transformer/model/model_utils.py | 136 + .../transformer/model/model_utils_test.py | 68 + .../transformer/model/transformer.py | 380 + .../Transformer/transformer/options.py | 106 + .../transformer/transformer_main.py | 458 + .../Transformer/transformer/translate.py | 335 + .../Transformer/transformer/utils/__init__.py | 0 .../transformer/utils/compute_bleu.py | 127 + .../transformer/utils/compute_bleu_test.py | 63 + .../Transformer/transformer/utils/dataset.py | 253 + .../transformer/utils/distributed_utils.py | 21 + .../Transformer/transformer/utils/metrics.py | 482 + .../transformer/utils/tokenizer.py | 641 + .../transformer/utils/tokenizer_test.py | 182 + ...vocab.translate_ende_wmt32k.32768.subwords | 33708 ++++++++++++++++ 283 files changed, 112904 insertions(+), 133470 deletions(-) create mode 100644 .gitignore create mode 100644 MxNet/Classification/RN50v1.5/__init__.py create mode 100644 PyTorch/Classification/RN50v1.5/img/.gitkeep create mode 100644 PyTorch/Classification/RN50v1.5/resnet50v1.5/README.md delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp16.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp32.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp16.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp32.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp16.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp32.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp16.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp32.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp16.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp32.json delete mode 100644 PyTorch/Detection/SSD/qa/benchmark_performance.py delete mode 100644 PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp16_full_run_acc_baseline.json delete mode 100644 PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp32_full_run_acc_baseline.json delete mode 100644 PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp16_1epoch_run_acc_baseline.json delete mode 100644 PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp32_1epoch_run_acc_baseline.json delete mode 100644 PyTorch/Detection/SSD/qa/qa_accuracy_main.py delete mode 100644 PyTorch/Detection/SSD/qa/qa_perf_main.py delete mode 100644 PyTorch/Detection/SSD/qa/qa_utils.py delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_1epoch_run.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_full_run.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_1epoch_run.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_full_run.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp16.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp32.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp16.sh delete mode 100644 PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp32.sh create mode 100644 PyTorch/LanguageModeling/BERT/.dockerignore create mode 100644 PyTorch/LanguageModeling/BERT/.gitignore create mode 100644 PyTorch/LanguageModeling/BERT/Dockerfile create mode 100644 PyTorch/LanguageModeling/BERT/LICENSE create mode 100644 PyTorch/LanguageModeling/BERT/README.md create mode 100644 PyTorch/LanguageModeling/BERT/bert_config.json create mode 100644 PyTorch/LanguageModeling/BERT/create_pretraining_data.py create mode 100644 PyTorch/LanguageModeling/BERT/data/README.md create mode 100644 PyTorch/LanguageModeling/BERT/data/bookcorpus/clean_and_merge_text.py create mode 100755 PyTorch/LanguageModeling/BERT/data/bookcorpus/download_bookcorpus.sh create mode 100755 PyTorch/LanguageModeling/BERT/data/create_datasets_from_start.sh create mode 100755 PyTorch/LanguageModeling/BERT/data/glue/download_mrpc.sh create mode 100755 PyTorch/LanguageModeling/BERT/data/merge_datasets_after_creation.sh create mode 100755 PyTorch/LanguageModeling/BERT/data/squad/squad_download.sh create mode 100755 PyTorch/LanguageModeling/BERT/data/utils/config.sh create mode 100644 PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset.py create mode 100644 PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset_ids.py create mode 100755 PyTorch/LanguageModeling/BERT/data/utils/preprocessing.sh create mode 100755 PyTorch/LanguageModeling/BERT/data/utils/preprocessing_xargs_wrapper.sh create mode 100644 PyTorch/LanguageModeling/BERT/data/utils/sentence_segmentation_nltk.py create mode 100644 PyTorch/LanguageModeling/BERT/data/utils/shard_text_input_file.py create mode 100755 PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/download_wikipedia.sh create mode 100644 PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/remove_tags_and_clean.py create mode 100644 PyTorch/LanguageModeling/BERT/extract_features.py create mode 100644 PyTorch/LanguageModeling/BERT/file_utils.py create mode 100644 PyTorch/LanguageModeling/BERT/fused_adam_local.py create mode 100644 PyTorch/LanguageModeling/BERT/modeling.py create mode 100644 PyTorch/LanguageModeling/BERT/optimization.py create mode 100644 PyTorch/LanguageModeling/BERT/requirements.txt create mode 100644 PyTorch/LanguageModeling/BERT/run_glue.py create mode 100644 PyTorch/LanguageModeling/BERT/run_pretraining.py create mode 100644 PyTorch/LanguageModeling/BERT/run_pretraining_inference.py create mode 100644 PyTorch/LanguageModeling/BERT/run_squad.py create mode 100644 PyTorch/LanguageModeling/BERT/run_swag.py create mode 100644 PyTorch/LanguageModeling/BERT/schedulers.py create mode 100755 PyTorch/LanguageModeling/BERT/scripts/data_download.sh create mode 100644 PyTorch/LanguageModeling/BERT/scripts/docker/build.sh create mode 100644 PyTorch/LanguageModeling/BERT/scripts/docker/launch.sh create mode 100755 PyTorch/LanguageModeling/BERT/scripts/run.sh create mode 100755 PyTorch/LanguageModeling/BERT/scripts/run_glue.sh create mode 100644 PyTorch/LanguageModeling/BERT/scripts/run_pretraining.sh create mode 100644 PyTorch/LanguageModeling/BERT/scripts/run_pretraining_inference.sh create mode 100755 PyTorch/LanguageModeling/BERT/scripts/run_squad.sh create mode 100755 PyTorch/LanguageModeling/BERT/scripts/run_swag.sh create mode 100644 PyTorch/LanguageModeling/BERT/scripts/start_pretraining.sh create mode 100644 PyTorch/LanguageModeling/BERT/tokenization.py create mode 100644 PyTorch/LanguageModeling/BERT/vocab/download_models.py create mode 100644 PyTorch/LanguageModeling/BERT/vocab/vocab create mode 100644 PyTorch/Recommendation/NCF/dataloading.py create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_audio_text_train_subset_625_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_test_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_train_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_train_subset_1250_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_train_subset_2500_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_train_subset_625_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/filelists/ljs_mel_text_val_filelist.txt create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/img/tacotron2_amp_loss.png create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/img/tacotron2_arch.png create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/img/waveglow_arch.png create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/platform/train_tacotron2_AMP_DGX1_16GB_1GPU.sh create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/platform/train_tacotron2_AMP_DGX1_16GB_4GPU.sh create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/platform/train_tacotron2_AMP_DGX1_16GB_8GPU.sh create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/platform/train_waveglow_AMP_DGX1_16GB_1GPU.sh create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/platform/train_waveglow_AMP_DGX1_16GB_4GPU.sh create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/platform/train_waveglow_AMP_DGX1_16GB_8GPU.sh create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/preprocess_audio2mel.py delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_Tacotron2_epochtrain_FP16_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_Tacotron2_epochtrain_FP32_DGX1_16GB_8GPU_single.sh delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_Tacotron2_inferbench_FP16_DGX1_16GB_1GPU_single.sh delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_Tacotron2_inferbench_FP32_DGX1_16GB_1GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_Tacotron2_trainbench_FP16_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_Tacotron2_trainbench_FP32_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_WaveGlow_epochtrain_FP16_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_WaveGlow_epochtrain_FP32_DGX1_16GB_8GPU_single.sh delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_WaveGlow_inferbench_FP16_DGX1_16GB_1GPU_single.sh delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_WaveGlow_inferbench_FP32_DGX1_16GB_1GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_WaveGlow_trainbench_FP16_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L2_WaveGlow_trainbench_FP32_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L3_Tacotron2_fulltrain_FP16_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L3_Tacotron2_fulltrain_FP32_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L3_WaveGlow_fulltrain_FP16_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/L3_WaveGlow_fulltrain_FP32_DGX1_16GB_8GPU_single.sh delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/Tacotron2_inferbench_BS1_FP16_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/Tacotron2_inferbench_BS1_FP32_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/Tacotron2_inferbench_BS4_FP32_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/Tacotron2_inferbench_BS8_FP16_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/WaveGlow_inferbench_BS1_FP16_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/WaveGlow_inferbench_BS1_FP32_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/WaveGlow_inferbench_BS4_FP32_DGX1_16GB_1GPU_single.json delete mode 100755 PyTorch/SpeechSynthesis/Tacotron2/qa/baselines/WaveGlow_inferbench_BS8_FP16_DGX1_16GB_1GPU_single.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/check_curves.py delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/input_lengths.pt delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/mel_padded.pt delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp16-full-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp16-infer-bs1.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp16-infer-bs8.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp16-perf.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp16-short-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp32-full-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp32-infer-bs1.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp32-infer-bs4.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp32-perf.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/tacotron2_fp32-short-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/text_padded.pt delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp16-full-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp16-infer-bs1.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp16-infer-bs8.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp16-perf.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp16-short-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp32-full-loss.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp32-infer-bs1.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp32-infer-bs4.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp32-perf.json delete mode 100644 PyTorch/SpeechSynthesis/Tacotron2/qa/waveglow_fp32-short-loss.json create mode 100644 PyTorch/SpeechSynthesis/Tacotron2/scripts/prepare_mels.sh create mode 100644 PyTorch/Translation/GNMT/scripts/tests/inference.sh create mode 100644 PyTorch/Translation/GNMT/scripts/tests/reference_inference_performance create mode 100644 PyTorch/Translation/GNMT/scripts/tests/reference_training_performance create mode 100644 PyTorch/Translation/GNMT/scripts/tests/train_1epoch.sh create mode 100644 PyTorch/Translation/GNMT/scripts/tests/train_bench.sh create mode 100644 PyTorch/Translation/GNMT/scripts/tests/train_full.sh create mode 100644 TensorFlow/Classification/RN50v1.5/utils/dali_utils.py delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_accuracy.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_accuracy_fp16.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_accuracy_fp32.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_convergence.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_convergence_fp16.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_convergence_fp32.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp16.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp32.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_performance.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp16.sh delete mode 100644 TensorFlow/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp32.sh create mode 100755 TensorFlow/LanguageModeling/BERT/scripts/run.sub create mode 100755 TensorFlow/LanguageModeling/BERT/scripts/start_pretraining.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L0_joc_unet_medical_inferbench_fp16.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L0_joc_unet_medical_inferbench_fp32.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L0_joc_unet_medical_trainbench_fp16.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L0_joc_unet_medical_trainbench_fp16_8gpu.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L0_joc_unet_medical_trainbench_fp32.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L0_joc_unet_medical_trainbench_fp32_8gpu.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L2_joc_unet_medical_train_fp16.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L2_joc_unet_medical_train_fp32.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L3_joc_unet_medical_convergence_fp16.sh delete mode 100644 TensorFlow/Segmentation/UNet_Medical/qa/L3_joc_unet_medical_convergence_fp32.sh create mode 100644 TensorFlow/Translation/Transformer/Dockerfile create mode 100644 TensorFlow/Translation/Transformer/README.md create mode 100644 TensorFlow/Translation/Transformer/encode_data.py create mode 100644 TensorFlow/Translation/Transformer/job.json create mode 100644 TensorFlow/Translation/Transformer/requirements.txt create mode 100755 TensorFlow/Translation/Transformer/scripts/data_helper.sh create mode 100755 TensorFlow/Translation/Transformer/scripts/docker/build.sh create mode 100755 TensorFlow/Translation/Transformer/scripts/docker/launch.sh create mode 100755 TensorFlow/Translation/Transformer/scripts/download_data.sh create mode 100755 TensorFlow/Translation/Transformer/scripts/run_training.sh create mode 100755 TensorFlow/Translation/Transformer/scripts/verify_dataset.sh create mode 100644 TensorFlow/Translation/Transformer/transformer.png create mode 100644 TensorFlow/Translation/Transformer/transformer/README.md create mode 100644 TensorFlow/Translation/Transformer/transformer/__init__.py create mode 100644 TensorFlow/Translation/Transformer/transformer/data/convert_utf8_to_tfrecord.py create mode 100644 TensorFlow/Translation/Transformer/transformer/data/process_data.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/__init__.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/attention_layer.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/beam_search.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/beam_search_test.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/embedding_layer.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/ffn_layer.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/fp16_utils.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/fused_layer_norm.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/mixed_precision_optimizer.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/model_params.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/model_utils.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/model_utils_test.py create mode 100644 TensorFlow/Translation/Transformer/transformer/model/transformer.py create mode 100644 TensorFlow/Translation/Transformer/transformer/options.py create mode 100644 TensorFlow/Translation/Transformer/transformer/transformer_main.py create mode 100644 TensorFlow/Translation/Transformer/transformer/translate.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/__init__.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/compute_bleu.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/compute_bleu_test.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/dataset.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/distributed_utils.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/metrics.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/tokenizer.py create mode 100644 TensorFlow/Translation/Transformer/transformer/utils/tokenizer_test.py create mode 100644 TensorFlow/Translation/Transformer/transformer/vocab/vocab.translate_ende_wmt32k.32768.subwords diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..e1b77839 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +repos.cfg +repos_init.cfg +nvtool* diff --git a/MxNet/Classification/RN50v1.5/LICENSE b/MxNet/Classification/RN50v1.5/LICENSE index d6456956..261eeb9e 100644 --- a/MxNet/Classification/RN50v1.5/LICENSE +++ b/MxNet/Classification/RN50v1.5/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/MxNet/Classification/RN50v1.5/__init__.py b/MxNet/Classification/RN50v1.5/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/MxNet/Classification/RN50v1.5/data.py b/MxNet/Classification/RN50v1.5/data.py index cdd6c6fe..f3fd7812 100644 --- a/MxNet/Classification/RN50v1.5/data.py +++ b/MxNet/Classification/RN50v1.5/data.py @@ -1,5 +1,7 @@ +# ----------------------------------------------------------------------- # Copyright 2017-2018 The Apache Software Foundation # +# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information diff --git a/PyTorch/Classification/RN50v1.5/img/.gitkeep b/PyTorch/Classification/RN50v1.5/img/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/PyTorch/Classification/RN50v1.5/resnet50v1.5/README.md b/PyTorch/Classification/RN50v1.5/resnet50v1.5/README.md new file mode 100644 index 00000000..e69de29b diff --git a/PyTorch/Detection/SSD/Dockerfile b/PyTorch/Detection/SSD/Dockerfile index 8d912db0..03bf1162 100755 --- a/PyTorch/Detection/SSD/Dockerfile +++ b/PyTorch/Detection/SSD/Dockerfile @@ -1,4 +1,4 @@ -FROM nvcr.io/nvidia/pytorch:19.03-py3 +FROM nvcr.io/nvidia/pytorch:19.05-py3 # Set working directory WORKDIR /mlperf diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp16.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp16.json deleted file mode 100644 index 2113d5b6..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp16.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "model": "", - "ngpus": [1, 4, 8], - "bs": [2, 4, 8, 16, 32, 64, 128], - "metric_keys": ["images_per_second"], - "metrics": { - "1": { - "2": { - "images_per_second": 191.25867003414876 - }, - "4": { - "images_per_second": 340.9537905548054 - }, - "8": { - "images_per_second": 517.2612062140391 - }, - "16": { - "images_per_second": 711.5516679788083 - }, - "32": { - "images_per_second": 812.9203401838566 - }, - "64": { - "images_per_second": 951.7432815456556 - }, - "128": { - "images_per_second": 876.1868813828711 - } - } - } -} \ No newline at end of file diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp32.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp32.json deleted file mode 100644 index bf7f5a6a..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_inference_fp32.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "model": "", - "ngpus": [1, 4, 8], - "bs": [2, 4, 8, 16, 32, 64, 128], - "metric_keys": ["images_per_second"], - "metrics": { - "1": { - "2": { - "images_per_second": 174.58768325581374 - }, - "4": { - "images_per_second": 254.24180710755593 - }, - "8": { - "images_per_second": 308.95847419165545 - }, - "16": { - "images_per_second": 419.60746029488445 - }, - "32": { - "images_per_second": 453.81433823995565 - }, - "64": { - "images_per_second": 592.6385687558369 - }, - "128": { - "images_per_second": 603.8453409148115 - } - } - } -} \ No newline at end of file diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp16.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp16.json deleted file mode 100644 index 8c841167..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp16.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "model": "", - "ngpus": [1, 4, 8], - "bs": [2, 4, 8, 16, 32, 64], - "metric_keys": ["images_per_second"], - "metrics": { - "1": { - "2": { - "images_per_second": 40.71944999694824 - }, - "4": { - "images_per_second": 68.22257804870605 - }, - "8": { - "images_per_second": 121.42024612426758 - }, - "16": { - "images_per_second": 159.56442260742188 - }, - "32": { - "images_per_second": 185.69010543823242 - } - }, - "4": { - "2": { - "images_per_second": 40.75998783111572 - }, - "4": { - "images_per_second": 75.58991050720215 - }, - "8": { - "images_per_second": 142.64888381958008 - }, - "16": { - "images_per_second": 256.07005310058594 - }, - "32": { - "images_per_second": 300.8989944458008 - } - }, - "8": { - "2": { - "images_per_second": 61.28578186035156 - }, - "4": { - "images_per_second": 119.46021270751953 - }, - "8": { - "images_per_second": 231.7295379638672 - }, - "16": { - "images_per_second": 430.5494079589844 - }, - "32": { - "images_per_second": 454.2975769042969 - } - } - } -} \ No newline at end of file diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp32.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp32.json deleted file mode 100644 index 0f5ca347..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_18.08_training_fp32.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "model": "", - "ngpus": [1, 4, 8], - "bs": [2, 4, 8, 16, 32], - "metric_keys": ["images_per_second"], - "metrics": { - "1": { - "2": { - "images_per_second": 48.635780334472656 - }, - "4": { - "images_per_second": 66.06407419840494 - }, - "8": { - "images_per_second": 83.91736857096353 - }, - "16": { - "images_per_second": 102.67040761311848 - }, - "32": { - "images_per_second": 110.02347819010416 - } - }, - "4": { - "2": { - "images_per_second": 41.199180603027344 - }, - "4": { - "images_per_second": 79.85076141357422 - }, - "8": { - "images_per_second": 145.39981587727863 - }, - "16": { - "images_per_second": 247.95855712890625 - }, - "32": { - "images_per_second": 341.29132080078125 - } - }, - "8": { - "2": { - "images_per_second": 63.07561111450195 - }, - "4": { - "images_per_second": 123.25757344563802 - }, - "8": { - "images_per_second": 237.3413340250651 - }, - "16": { - "images_per_second": 376.59598795572913 - }, - "32": { - "images_per_second": 507.9451497395833 - } - } - } -} \ No newline at end of file diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp16.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp16.json deleted file mode 100644 index 9d303798..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp16.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "bs" : [ - 2, - 4, - 8, - 16, - 32 - ], - "metric_keys" : [ - "images_per_second" - ], - "metrics" : { - "1" : { - "16" : { - "images_per_second" : 470.099200788709 - }, - "2" : { - "images_per_second" : 163.117099093173 - }, - "32" : { - "images_per_second" : 520.538879400471 - }, - "4" : { - "images_per_second" : 296.604178917743 - }, - "8" : { - "images_per_second" : 412.522394180558 - } - } - }, - "ngpus" : [ - 1 - ] -} diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp32.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp32.json deleted file mode 100644 index 9049af82..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_inference_fp32.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "bs" : [ - 2, - 4, - 8, - 16, - 32 - ], - "metric_keys" : [ - "images_per_second" - ], - "metrics" : { - "1" : { - "16" : { - "images_per_second" : 280.570005994299 - }, - "2" : { - "images_per_second" : 147.914221468741 - }, - "32" : { - "images_per_second" : 302.430594818483 - }, - "4" : { - "images_per_second" : 201.622430560779 - }, - "8" : { - "images_per_second" : 228.159516872363 - } - } - }, - "ngpus" : [ - 1 - ] -} diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp16.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp16.json deleted file mode 100644 index 510d548d..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp16.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "bs" : [ - 2, - 4, - 8, - 16, - 32 - ], - "metric_keys" : [ - "images_per_second" - ], - "metrics" : { - "1" : { - "16" : { - "images_per_second" : 192.623916625977 - }, - "2" : { - "images_per_second" : 48.7488899230957 - }, - "32" : { - "images_per_second" : 204.250648498535 - }, - "4" : { - "images_per_second" : 95.4697418212891 - }, - "8" : { - "images_per_second" : 164.66495513916 - } - }, - "4" : { - "16" : { - "images_per_second" : 701.366027832031 - }, - "2" : { - "images_per_second" : 154.449935913086 - }, - "32" : { - "images_per_second" : 771.171325683594 - }, - "4" : { - "images_per_second" : 300.332641601562 - }, - "8" : { - "images_per_second" : 550.924163818359 - } - } - }, - "ngpus" : [ - 1, - 4 - ] -} diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp32.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp32.json deleted file mode 100644 index c96f1ba9..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp32.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "bs" : [ - 2, - 4, - 8, - 16 - ], - "metric_keys" : [ - "images_per_second" - ], - "metrics" : { - "1" : { - "16" : { - "images_per_second" : 121.772495269775 - }, - "2" : { - "images_per_second" : 56.0 - }, - "4" : { - "images_per_second" : 90.5315437316895 - }, - "8" : { - "images_per_second" : 103.113033294678 - } - }, - "4" : { - "16" : { - "images_per_second" : 472.226806640625 - }, - "2" : { - "images_per_second" : 184.061141967773 - }, - "4" : { - "images_per_second" : 324.639801025391 - }, - "8" : { - "images_per_second" : 391.055908203125 - } - } - }, - "ngpus" : [ - 1, - 4 - ] -} diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp16.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp16.json deleted file mode 100644 index 0c628beb..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp16.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "bs" : [ - 2, - 4, - 8, - 16, - 32 - ], - "metric_keys" : [ - "images_per_second" - ], - "metrics" : { - "1" : { - "16" : { - "images_per_second" : 478.225033 - }, - "2" : { - "images_per_second" : 148.5965123 - }, - "32" : { - "images_per_second" : 531.1827376 - }, - "4" : { - "images_per_second" : 283.3305197 - }, - "8" : { - "images_per_second" : 418.7012914 - } - } - }, - "ngpus" : [ - 1 - ] -} diff --git a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp32.json b/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp32.json deleted file mode 100644 index 0404e674..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp32.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "bs" : [ - 2, - 4, - 8, - 16, - 32 - ], - "metric_keys" : [ - "images_per_second" - ], - "metrics" : { - "1" : { - "16" : { - "images_per_second" : 280.4733254 - }, - "2" : { - "images_per_second" : 143.8231571 - }, - "32" : { - "images_per_second" : 305.4504603 - }, - "4" : { - "images_per_second" : 202.6915644 - }, - "8" : { - "images_per_second" : 230.262872 - } - } - }, - "ngpus" : [ - 1 - ] -} diff --git a/PyTorch/Detection/SSD/qa/benchmark_performance.py b/PyTorch/Detection/SSD/qa/benchmark_performance.py deleted file mode 100644 index 5751dbac..00000000 --- a/PyTorch/Detection/SSD/qa/benchmark_performance.py +++ /dev/null @@ -1,81 +0,0 @@ -import argparse -import subprocess - -from qa.qa_utils import compare_benchmarks, load_json, save_json, OKBLUE, ENDC, FAIL - - - -# parsing -def parse_testscript_args(): - parser = argparse.ArgumentParser(description='PyTorch Benchmark Tests') - parser.add_argument('--bs', default=[1], type=int, nargs='+') - parser.add_argument('--ngpus', default=[1], type=int, nargs='+') - parser.add_argument('--benchmark-mode', default='training', choices=['training', 'inference'], - help='benchmark training or inference', required=True) - parser.add_argument('--bench-iterations', type=int, default=20, metavar='N', - help='Run N iterations while benchmarking (ignored when training and validation)') - parser.add_argument('--bench-warmup', type=int, default=10, metavar='N', - help='Number of warmup iterations for benchmarking') - parser.add_argument('--fp16', action='store_true', help='Run model in mixed precision.') - parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', - help='number of data loading workers') - parser.add_argument('--data', type=str, metavar='', required=True, - help='path to the dataset') - parser.add_argument('--results-file', default='experiment_raport.json', type=str, - help='file in which to store JSON experiment raport') - parser.add_argument('--benchmark-file', type=str, metavar='FILE', required=True, - help='path to the file with baselines') - return parser.parse_args() - - -# job command -command_template = 'python3 {launcher} qa/qa_perf_main.py --bs {bs} --ebs {bs} ' \ - '--benchmark-mode {mode} --benchmark-warmup {bw} --benchmark-iterations {bi} {fp16} ' \ - '--backbone resnet50 --seed 1 --data {data} --results-file {results_file} --benchmark-file {benchmark_file}' - -if __name__ == '__main__': - args = parse_testscript_args() - - fp16 = '--fp16' if args.fp16 else '' - - # create results json file - # todo: maybe some template json file? - results = {'ngpus': args.ngpus, - 'bs': args.bs, - 'metric_keys': ['images_per_second'], - 'metrics': {}} - - for gpu in args.ngpus: - results['metrics'][str(gpu)] = {} - for bs in args.bs: - results['metrics'][str(gpu)][str(bs)] = {'images_per_second': None} - - save_json(args.results_file, results) - - # run qa_perf_main.py tests one by one - for gpu in args.ngpus: - launcher = '' if gpu == 1 else '-m torch.distributed.launch --nproc_per_node={}'.format(gpu) - for bs in args.bs: - print('#' * 80) - command = command_template.format(launcher=launcher, bs=bs, workers=args.workers, mode=args.benchmark_mode, - bw=args.bench_warmup, bi=args.bench_iterations, fp16=fp16, - data=args.data, results_file=args.results_file, - benchmark_file=args.benchmark_file) - - print('Running "{}"'.format(command)) - - process = subprocess.Popen(command, shell=True) - output, error = process.communicate() - - if error is not None: - print(FAIL + 'Program exited with status {}. Data has not been collected'.format(error) + ENDC) - # elif results['metrics'][str(gpu)][str(bs)]['images_per_second'] is None: - # print(WARNING + 'Program did not end sucessfully. Data has not been collected.' + ENDC) - else: - print(OKBLUE + 'Program ended sucessfully. Data has been collected.' + ENDC) - - results_data = load_json(args.results_file) - benchmark_data = load_json(args.benchmark_file) - exit_code = compare_benchmarks(results_data, benchmark_data, args, 0.16 if args.benchmark_mode == 'inference' else 0.1) - print(exit_code) - exit(exit_code) diff --git a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp16_full_run_acc_baseline.json b/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp16_full_run_acc_baseline.json deleted file mode 100644 index 95368764..00000000 --- a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp16_full_run_acc_baseline.json +++ /dev/null @@ -1 +0,0 @@ -{"metric_keys": ["train.loss", "val.acc"], "metrics": {"train.loss": [8.812795396454991, 5.914838795058071, 6, 5.092440919584583, 4.887887316499735, 4.744666463422983, 4.694560192557922, 4.567333741479565, 4.492525351620137, 6, 4.408311570055099, 4.334232046614567, 6, 4.263646488106407, 4.2514614595596445, 4.2171871953656055, 4.206751160226014, 4.1795772798196715, 4.156515416099515, 6, 4.108870625495911, 4.0985876759066855, 4.075221928967139, 4.080158276849438, 6, 4.033980131669857, 4.037739227952915, 6, 3.99941903534935, 6, 3.9875937877263565, 3.971811039999583, 3.980771179282509, 3.953947089124455, 3.9305202960968018, 3.9366443781873546, 3.9252991879350754, 3.8827156307395367, 3.9388060424005102, 3.88922161618695, 3.8874285418914396, 6, 3.8936942113018453, 3.537499847891029, 3.4058184228089177, 6, 6, 3.3219671837627627, 3.295458280363458, 3.262115957955606, 6, 6, 6, 3.2190717260910433, 3.213117691627236, 3.1739242191397987, 3.1791626058811704, 3.2088054501854177, 3.1719801842385507, 3.187761370792139, 3.1809213312432236, 3.1823803410259397, 3.1752594631311677, 3.1709555600928425, 3.1823559530957817], "val.acc": [0.025120322205631106, 0.06065902615325462, 0.08224594352985645, 0.09868630608427395, 0.11402055039858493, 0.11779455253460233, 0.1232203941357061, 0.13708232144631768, 0.13614397127135028, 0.13289094380937685, 0.14004009449749777, 0.1369843423424096, 0.13877603069457692, 0.15418866425831707, 0.1500001994042602, 0.1542573219664272, 0.14771151227315413, 0.15896497766306272, 0.1600724682809656, 0.15881491661088476, 0.16213217020726906, 0.16466781280171408, 0.15738430149539484, 0.16634155547369375, 0.1623110334880526, 0.16394517553182106, 0.1494171026560053, 0.16762167601953265, 0.16063595691096758, 0.16982898253523193, 0.17321918229909394, 0.17242960413896102, 0.1625123530546557, 0.18330429802960516, 0.16333127233412115, 0.17973452067250242, 0.16699022570278652, 0.17183956548028687, 0.17168756775917593, 0.17547718325478198, 0.1750019046551496, 0.18416070771679066, 0.1711460087987496, 0.231325087097653, 0.23716038401167305, 0.23886896590018106, 0.2403412383214709, 0.24380227870861898, 0.24383605475007317, 0.2449733300818802, 0.24508423152154857, 0.24252172333110344, 0.24566254540226004, 0.24661345705692578, 0.25123807624083877, 0.25184439401895475, 0.2519010236397111, 0.25191664071239706, 0.2522156441636805, 0.25215053241008767, 0.2525434296889651, 0.2524917808636186, 0.2527410425201369, 0.2534121449798447, 0.25279479287831214]}, "bs": [64], "model": "", "ngpus": [8]} \ No newline at end of file diff --git a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp32_full_run_acc_baseline.json b/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp32_full_run_acc_baseline.json deleted file mode 100644 index 54010290..00000000 --- a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_18.08_fp32_full_run_acc_baseline.json +++ /dev/null @@ -1 +0,0 @@ -{"metric_keys": ["train.loss", "val.acc"], "metrics": {"train.loss": [9.887425426832973, 6.30290542835752, 5.566619733535567, 5.192713968618468, 4.943981836976963, 4.777146058311629, 4.682364774062644, 4.566371860462505, 4.479279315107254, 5, 4.398730874582149, 4.31779890601812, 4.293896813580043, 4.250142149529603, 4.219812418175577, 4.21572122303159, 4.187492328960302, 4.147948342119242, 4.134799897931028, 4.131298205737984, 4.071315974647822, 4.074750597299968, 4.0595350983882055, 4.042616275720722, 4.029284068070124, 4.02082926113012, 3.9983501902834298, 4.00984974094874, 3.9730074155799167, 5, 3.9646901324326294, 3.952598022061144, 3.944574903713043, 3.9182081201711596, 3.9252539055836775, 3.907297405092997, 3.8867245969813986, 3.87151758639573, 3.8793927009449254, 3.8687505586699107, 3.8750464156204956, 5, 3.8645522469516402, 3.504709825765618, 3.3920036476251862, 3.318732707260998, 5, 3.295415750237011, 3.2602547589347872, 5, 5, 5, 5, 3.199645553613854, 3.1623374312205086, 5, 3.147109237820821, 3.158245995575684, 3.1465386938319977, 3.1480963979746055, 3.151234711101482, 3.146022343739672, 3.1410668343956294, 3.142435818259893, 3.123337645718104], "val.acc": [0.01106397969239677, 0.04958324872172423, 0.07470961174804201, 0.08412781056028416, 0.1052591997157941, 0.11592629309116805, 0.1275672396324061, 0.12472585915140484, 0.13138377072048255, 0.1262696666605193, 0.13354663690485083, 0.14424123617821044, 0.14059169419863984, 0.14768715602101368, 0.15450788443085858, 0.14792122925940135, 0.1508861356435794, 0.157419558440425, 0.15279118544884585, 0.16075469826863828, 0.14747077091644412, 0.16340857637480236, 0.14427366437395484, 0.15709914018423293, 0.16324391683493303, 0.16440443232887508, 0.16479726175439752, 0.17508843799046686, 0.16142292492169025, 0.1643848499786872, 0.16912610131976924, 0.16376330941842296, 0.16894551721633602, 0.17771765128166106, 0.1749561896689298, 0.1695538322677119, 0.16778561571905298, 0.16380194923909086, 0.16994188486879763, 0.1716953661397215, 0.17755697810460197, 0.17187995479426885, 0.1742018462295355, 0.23426649845846764, 0.23613136034024038, 0.24175797706337981, 0.2425279583355936, 0.24352550398110506, 0.24411115979837528, 0.24656561042490024, 0.24383524308920906, 0.24686666489675338, 0.24814559219197632, 0.24840393696219026, 0.251965847689631, 0.25254138256097747, 0.2523565615073023, 0.2529904738785998, 0.253555154014026, 0.2530651493203877, 0.25358174010109197, 0.2537683728256746, 0.2539384684886946, 0.2540280117408162, 0.2534652864501853]}, "bs": [32], "model": "", "ngpus": [8]} \ No newline at end of file diff --git a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp16_1epoch_run_acc_baseline.json b/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp16_1epoch_run_acc_baseline.json deleted file mode 100644 index 30e5a0a0..00000000 --- a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp16_1epoch_run_acc_baseline.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "metrics" : { - "val.acc" : [ - 0.0100971670737651 - ], - "train.loss" : [ - 9.85026645043801 - ] - }, - "ngpus" : [ - 8 - ], - "metric_keys" : [ - "train.loss", - "val.acc" - ], - "bs" : [ - 64 - ] -} diff --git a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp32_1epoch_run_acc_baseline.json b/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp32_1epoch_run_acc_baseline.json deleted file mode 100644 index 94f049c2..00000000 --- a/PyTorch/Detection/SSD/qa/curve_baselines/SSD300_pytorch_19.01_fp32_1epoch_run_acc_baseline.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "bs" : [ - 32 - ], - "metrics" : { - "train.loss" : [ - 8.79916159380589 - ], - "val.acc" : [ - 0.0238952010105531 - ] - }, - "metric_keys" : [ - "train.loss", - "val.acc" - ], - "ngpus" : [ - 8 - ] -} diff --git a/PyTorch/Detection/SSD/qa/qa_accuracy_main.py b/PyTorch/Detection/SSD/qa/qa_accuracy_main.py deleted file mode 100644 index a8234ade..00000000 --- a/PyTorch/Detection/SSD/qa/qa_accuracy_main.py +++ /dev/null @@ -1,73 +0,0 @@ -# core imports -import os -import numpy as np - -# pytorch imports -import torch -import torch.utils.data.distributed - -# Apex imports -try: - from apex.parallel.LARC import LARC - from apex.parallel import DistributedDataParallel as DDP - from apex.fp16_utils import * -except ImportError: - raise ImportError("Please install APEX from https://github.com/nvidia/apex") - -# project imports -from src.train import train_loop -from main import train, make_parser -from src.logger import Logger -from qa.qa_utils import load_json, create_json_file, compare_acc, save_json - -RESULT = None - - -def add_benchmark_args(parser): - parser.add_argument('--benchmark-mode', type=str, default='epoch-accuracy', - choices=['full-accuracy', 'epoch-accuracy'], required=True) - parser.add_argument('--benchmark-file', type=str, default=None, metavar='FILE', - help='path to the file with baselines', required=True) - return parser - - -def main(args): - if args.local_rank == 0: - os.makedirs('./models', exist_ok=True) - - if args.seed is not None: - print("Using seed = {}".format(args.seed)) - torch.manual_seed(args.seed) - np.random.seed(seed=args.seed) - - torch.backends.cudnn.benchmark = True - - if args.benchmark_mode == 'epoch-accuracy': - args.epochs = 1 - - train_loop_func = train_loop - logger = Logger('Accuracy test', print_freq=10) - - args.evaluation = list(range(90)) - train(train_loop_func, logger, args) - - exit_code = 0 - if args.local_rank == 0: - train_loss_results, val_acc_results, train_time_results = logger.print_results() - print(train_time_results) - print(train_loss_results) - print(val_acc_results) - measured_results = create_json_file(val_acc_results, train_loss_results, ngpus=8, bs=args.batch_size) - save_json('/results/results.json', measured_results) - print(measured_results) - benchmark_results = load_json(args.benchmark_file) - exit_code = compare_acc(measured_results, benchmark_results, args) - exit(exit_code) - - -if __name__ == "__main__": - parser = make_parser() - parser = add_benchmark_args(parser) - args = parser.parse_args() - print(args) - main(args) diff --git a/PyTorch/Detection/SSD/qa/qa_perf_main.py b/PyTorch/Detection/SSD/qa/qa_perf_main.py deleted file mode 100644 index d4adc7fc..00000000 --- a/PyTorch/Detection/SSD/qa/qa_perf_main.py +++ /dev/null @@ -1,199 +0,0 @@ -# core imports -import os -import numpy as np -import json -from pprint import pprint -import time - -# pytorch imports -import torch -import torch.utils.data.distributed -from torch.autograd import Variable - - -# Apex imports -try: - from apex.parallel.LARC import LARC - from apex.parallel import DistributedDataParallel as DDP - from apex.fp16_utils import * -except ImportError: - raise ImportError("Please install APEX from https://github.com/nvidia/apex") - -# project imports -from main import train, make_parser -from src.logger import BenchLogger -# from src.train import benchmark_inference_loop, benchmark_train_loop - -from SSD import _C as C - -RESULT = None - - -def add_benchmark_args(parser): - parser.add_argument('--benchmark-mode', type=str, choices=['training', 'inference'], - default='inference', required=True) - parser.add_argument('--results-file', default='experiment_raport.json', type=str, - help='file in which to store JSON experiment raport') - parser.add_argument('--benchmark-file', type=str, default=None, metavar='FILE', - help='path to the file with baselines') - return parser - -def benchmark_train_loop(model, loss_func, epoch, optim, train_dataloader, val_dataloader, encoder, iteration, logger, args, mean, std): - start_time = None - # tensor for results - result = torch.zeros((1,)).cuda() - for i, data in enumerate(loop(train_dataloader)): - if i >= args.benchmark_warmup: - start_time = time.time() - - img = data[0][0][0] - bbox = data[0][1][0] - label = data[0][2][0] - label = label.type(torch.cuda.LongTensor) - bbox_offsets = data[0][3][0] - # handle random flipping outside of DALI for now - bbox_offsets = bbox_offsets.cuda() - img, bbox = C.random_horiz_flip(img, bbox, bbox_offsets, 0.5, False) - - if not args.no_cuda: - img = img.cuda() - bbox = bbox.cuda() - label = label.cuda() - bbox_offsets = bbox_offsets.cuda() - img.sub_(mean).div_(std) - - N = img.shape[0] - if bbox_offsets[-1].item() == 0: - print("No labels in batch") - continue - bbox, label = C.box_encoder(N, bbox, bbox_offsets, label, encoder.dboxes.cuda(), 0.5) - - M = bbox.shape[0] // N - bbox = bbox.view(N, M, 4) - label = label.view(N, M) - - - - - - ploc, plabel = model(img) - ploc, plabel = ploc.float(), plabel.float() - - trans_bbox = bbox.transpose(1, 2).contiguous().cuda() - - if not args.no_cuda: - label = label.cuda() - gloc = Variable(trans_bbox, requires_grad=False) - glabel = Variable(label, requires_grad=False) - - loss = loss_func(ploc, plabel, gloc, glabel) - - - - # loss scaling - if args.fp16: - if args.amp: - with optim.scale_loss(loss) as scale_loss: - scale_loss.backward() - else: - optim.backward(loss) - else: - loss.backward() - - optim.step() - optim.zero_grad() - iteration += 1 - - # reduce all results from every gpu - if i >= args.benchmark_warmup + args.benchmark_iterations: - result.data[0] = logger.print_result() - if args.N_gpu > 1: - torch.distributed.reduce(result, 0) - if args.local_rank == 0: - global RESULT - RESULT = float(result.data[0]) - return - - if i >= args.benchmark_warmup: - logger.update(args.batch_size, time.time() - start_time) - -def loop(dataloader): - while True: - for data in dataloader: - yield data - -def benchmark_inference_loop(model, loss_func, epoch, optim, train_dataloader, val_dataloader, encoder, iteration, logger, args, mean, std): - assert args.N_gpu == 1, 'Inference benchmark only on 1 gpu' - start_time = None - model.eval() - i=-1 - dataloader = loop(val_dataloader) - while True: - i+=1 - with torch.no_grad(): - torch.cuda.synchronize() - if i >= args.benchmark_warmup: - start_time = time.time() - data = next(dataloader) - - img = data[0] - - if not args.no_cuda: - img = img.cuda() - - if args.fp16: - img = img.half() - - img.sub_(mean).div_(std) - img = Variable(img, requires_grad=False) - _ = model(img) - torch.cuda.synchronize() - - if i >= args.benchmark_warmup + args.benchmark_iterations: - global RESULT - RESULT = logger.print_result() - return - - if i >= args.benchmark_warmup: - logger.update(args.batch_size, time.time() - start_time) - - -def main(args): - if args.local_rank == 0: - os.makedirs('./models', exist_ok=True) - - if args.seed is not None: - print("Using seed = {}".format(args.seed)) - torch.manual_seed(args.seed) - np.random.seed(seed=args.seed) - - torch.backends.cudnn.benchmark = True - - if args.benchmark_mode == 'training': - train_loop_func = benchmark_train_loop - logger = BenchLogger('Training benchmark') - else: - train_loop_func = benchmark_inference_loop - logger = BenchLogger('Inference benchmark') - - args.epochs = 1 - - train(train_loop_func, logger, args) - - if args.local_rank == 0: - global RESULT - with open(args.results_file) as f: - results = json.load(f) - results['metrics'][str(args.N_gpu)][str(args.batch_size)] = {'images_per_second': RESULT} - pprint(results) - - with open(args.results_file, 'w') as f: - json.dump(results, f) - - -if __name__ == "__main__": - parser = make_parser() - parser = add_benchmark_args(parser) - args = parser.parse_args() - print(args) - main(args) diff --git a/PyTorch/Detection/SSD/qa/qa_utils.py b/PyTorch/Detection/SSD/qa/qa_utils.py deleted file mode 100644 index a9ee3239..00000000 --- a/PyTorch/Detection/SSD/qa/qa_utils.py +++ /dev/null @@ -1,115 +0,0 @@ -import json - -# terminal stdout colors -OKBLUE = '\033[94m' -OKGREEN = '\033[92m' -WARNING = '\033[93m' -FAIL = '\033[91m' -ENDC = '\033[0m' - - -# load results and benchmark -def load_json(filepath): - with open(filepath) as f: - data = json.load(f) - return data - - -def save_json(filepath, data): - with open(filepath, 'w') as f: - json.dump(data, f) - - -# compare func -def compare(measured_value, true_value, pmargin=0.1): - assert 0 < pmargin < 1, 'Margin should be in range [0, 1]' - return (1 - pmargin) * true_value < measured_value - - -# compare 2 benchmark json files -def compare_benchmarks(results, benchmark, args, pmargin=0.1): - # sanity check - for metric in results['metric_keys']: - if metric not in benchmark['metric_keys']: - assert False, "You want to compare {} metric which doesn't appear in benchmark file".format(metric) - - assert len(args.bs) <= len(benchmark['bs']), 'len(args.bs) <= len(benchmark["bs"] ({} <= {})'.format(len(args.bs), - len(benchmark[ - 'bs'])) - assert len(args.bs) == len(results['bs']), 'len(args.bs) <= len(results["bs"] ({} == {})'.format(len(args.bs), - len(results['bs'])) - for bs in results['bs']: - if bs not in benchmark['bs']: - assert False, "You want to compare batch size = {} which doesn't appear in benchmark file".format(bs) - - assert len(args.ngpus) <= len(benchmark['ngpus']), 'len(args.ngpus) <= len(benchmark["ngpus"]) ({} <= {})'.format( - len(args.bs), len(benchmark['ngpus'])) - assert len(args.ngpus) == len(results['ngpus']), 'len(args.ngpus) == len(results["ngpus"]) ({} == {})'.format( - len(args.bs), len(results['ngpus'])) - for gpu in results['ngpus']: - if gpu not in benchmark['ngpus']: - assert False, "You want to compare {} gpus results which don't appear in benchmark file".format(gpu) - - # compare measured numbers with benchmark - exit = 0 - for metric in results['metric_keys']: - for gpu in results['ngpus']: - for bs in results['bs']: - measured_metric = results['metrics'][str(gpu)][str(bs)][metric] - ground_truth_metric = benchmark['metrics'][str(gpu)][str(bs)][metric] - ok = compare(measured_metric, ground_truth_metric, pmargin) - if ok: - print(OKGREEN + 'BENCHMARK PASSED: metric={} gpu={} bs={}'.format(metric, gpu, bs) + ENDC) - else: - print(FAIL + 'BENCHMARK NOT PASSED: metric={} gpu={} bs={}'.format(metric, gpu, bs) + ENDC) - exit = 1 - return exit - -# compare 2 benchmark json files -def compare_acc(results, benchmark, args): - # sanity check - for metric in results['metric_keys']: - if metric not in benchmark['metric_keys']: - assert False, "You want to compare {} metric which doesn't appear in benchmark file".format(metric) - - for bs in results['bs']: - if bs not in benchmark['bs']: - assert False, "You want to compare batch size = {} which doesn't appear in benchmark file".format(bs) - - for gpu in results['ngpus']: - if gpu not in benchmark['ngpus']: - assert False, "You want to compare {} gpus results which don't appear in benchmark file".format(gpu) - - # compare measured numbers with benchmark - for i, (result, ground_truth) in enumerate(zip(results['metrics']['val.acc'], benchmark['metrics']['val.acc'])): - if i > 43: # before first decay accuracy tends to vary more than 15% at ~30th epoch - if ground_truth * 0.9 > result: - print(FAIL + 'ACCURACY TEST NOT PASSED' + ENDC) - return 1 - - # compare measured numbers with benchmark - for i, (result, ground_truth) in enumerate(zip(results['metrics']['train.loss'], benchmark['metrics']['train.loss'])): - if i > 43: - if ground_truth * 1.1 < result: - print(FAIL + 'LOSS TEST NOT PASSED' + ENDC) - return 1 - - print(OKGREEN + 'ACCURACY TEST PASSED' + ENDC) - return 0 - -def create_json_file(val_acc_results, train_loss_results, ngpus=8, bs=32): - results = {"ngpus": [ngpus], - "bs": [bs], - "metric_keys": ["train.loss", "val.acc"], - "metrics": { - "train.loss": [], - "val.acc": [] - } - } - - for i, ((epoch1, acc), (epoch2, loss)) in enumerate(zip(val_acc_results, train_loss_results)): - assert i == epoch1 == epoch2 - results['metrics']['train.loss'].append(loss) - results['metrics']['val.acc'].append(acc) - - return results diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_1epoch_run.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_1epoch_run.sh deleted file mode 100644 index 8885f35c..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_1epoch_run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -python3 -m torch.distributed.launch --nproc_per_node=8 qa/qa_accuracy_main.py --bs 64 --fp16 --warmup 300 --learning-rate 2.6e-3 --seed 1 --benchmark-mode epoch-accuracy --benchmark-file qa/curve_baselines/SSD300_pytorch_19.01_fp16_1epoch_run_acc_baseline.json --data $1 - diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_full_run.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_full_run.sh deleted file mode 100644 index 34ab4e64..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp16_full_run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -python3 -m torch.distributed.launch --nproc_per_node=8 qa/qa_accuracy_main.py --bs 64 --fp16 --warmup 300 --learning-rate 2.6e-3 --seed 1 --benchmark-mode full-accuracy --benchmark-file qa/curve_baselines/SSD300_pytorch_18.08_fp16_full_run_acc_baseline.json --data $1 - diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_1epoch_run.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_1epoch_run.sh deleted file mode 100644 index 18b5d622..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_1epoch_run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -python3 -m torch.distributed.launch --nproc_per_node=8 qa/qa_accuracy_main.py --bs 32 --warmup 300 --learning-rate 2.6e-3 --seed 1 --benchmark-mode epoch-accuracy --benchmark-file qa/curve_baselines/SSD300_pytorch_19.01_fp32_1epoch_run_acc_baseline.json --data $1 - diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_full_run.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_full_run.sh deleted file mode 100644 index 35dc9a35..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_8GPU_fp32_full_run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -python3 -m torch.distributed.launch --nproc_per_node=8 qa/qa_accuracy_main.py --bs 32 --warmup 300 --learning-rate 2.6e-3 --seed 1 --benchmark-mode full-accuracy --benchmark-file qa/curve_baselines/SSD300_pytorch_18.08_fp32_full_run_acc_baseline.json --data $1 - diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp16.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp16.sh deleted file mode 100644 index b1ab08fe..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp16.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python ./qa/benchmark_performance.py --benchmark-mode inference --ngpus 1 --bs 2 4 8 16 32 --fp16 --bench-warmup 100 --bench-iterations 200 --benchmark-file qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp16.json --data $1 diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp32.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp32.sh deleted file mode 100644 index 1dd00324..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_inference_benchmark_fp32.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python ./qa/benchmark_performance.py --benchmark-mode inference --ngpus 1 --bs 2 4 8 16 32 --bench-warmup 100 --bench-iterations 200 --benchmark-file qa/benchmark_baselines/SSD300_pytorch_19.05_inference_fp32.json --data $1 diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp16.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp16.sh deleted file mode 100644 index c0ec1b39..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp16.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python ./qa/benchmark_performance.py --benchmark-mode training --ngpus 1 4 --bs 2 4 8 16 32 --fp16 --bench-warmup 100 --bench-iterations 200 --benchmark-file qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp16.json --data $1 diff --git a/PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp32.sh b/PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp32.sh deleted file mode 100644 index 27c0b019..00000000 --- a/PyTorch/Detection/SSD/qa/testing_DGX1V_training_benchmark_fp32.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python ./qa/benchmark_performance.py --benchmark-mode training --ngpus 1 4 --bs 2 4 8 16 --bench-warmup 100 --bench-iterations 200 --benchmark-file qa/benchmark_baselines/SSD300_pytorch_19.01_training_fp32.json --data $1 diff --git a/PyTorch/Detection/SSD/src/coco_pipeline.py b/PyTorch/Detection/SSD/src/coco_pipeline.py index efacebde..82acae90 100644 --- a/PyTorch/Detection/SSD/src/coco_pipeline.py +++ b/PyTorch/Detection/SSD/src/coco_pipeline.py @@ -35,9 +35,9 @@ class COCOPipeline(Pipeline): super(COCOPipeline, self).__init__(batch_size=batch_size, device_id=device_id, num_threads=num_threads, seed = seed) - try: + if torch.distributed.is_initialized(): shard_id = torch.distributed.get_rank() - except RuntimeError: + else: shard_id = 0 self.input = ops.COCOReader(file_root = file_root, annotations_file = annotations_file, diff --git a/PyTorch/LanguageModeling/BERT/.dockerignore b/PyTorch/LanguageModeling/BERT/.dockerignore new file mode 100644 index 00000000..269a9403 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/.dockerignore @@ -0,0 +1,3 @@ +data/ +vocab/ +results/ diff --git a/PyTorch/LanguageModeling/BERT/.gitignore b/PyTorch/LanguageModeling/BERT/.gitignore new file mode 100644 index 00000000..6e529b32 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/.gitignore @@ -0,0 +1,129 @@ +# Initially taken from Github's Python gitignore file + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +#Data +data/*/*/ +data/*/*.zip + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode +.vscode + +# TF code +tensorflow_code + +# Models +models diff --git a/PyTorch/LanguageModeling/BERT/Dockerfile b/PyTorch/LanguageModeling/BERT/Dockerfile new file mode 100644 index 00000000..fe0f464b --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/Dockerfile @@ -0,0 +1,27 @@ +ARG FROM_IMAGE_NAME=gitlab-master.nvidia.com:5005/dl/dgx/pytorch:19.05-py3-devel +FROM ${FROM_IMAGE_NAME} +RUN apt-get update && apt-get install -y pbzip2 pv bzip2 cabextract + + +#WORKDIR /opt +#RUN cd pytorch/apex \ +# && git fetch origin pull/182/head:norm_fix \ +# && git checkout norm_fix \ +# && python setup.py develop --cuda_ext --cpp_ext + + +WORKDIR /opt +RUN cd pytorch/apex ; \ + pip uninstall apex; \ + pip uninstall apex; \ + git checkout master; \ + git pull; \ + pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . + +WORKDIR /workspace +RUN git clone https://github.com/attardi/wikiextractor.git +RUN git clone https://github.com/soskek/bookcorpus.git + +WORKDIR /workspace/bert +COPY . . +RUN pip install tqdm boto3 requests six ipdb h5py html2text nltk progressbar \ No newline at end of file diff --git a/PyTorch/LanguageModeling/BERT/LICENSE b/PyTorch/LanguageModeling/BERT/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PyTorch/LanguageModeling/BERT/README.md b/PyTorch/LanguageModeling/BERT/README.md new file mode 100644 index 00000000..6160a867 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/README.md @@ -0,0 +1,554 @@ +# Bert For PyTorch + +This repository provides scripts and recipes to pretrain BERT from a dataset of choice and achieve state of the art accuracy on relevant fine tuning tasks. This is tested and maintained by NVIDIA. + + +## Table Of Contents: +* [The model](#the-model) + * [Default configuration](#default-configuration) +* [Setup](#setup) + * [Requirements](#requirements) +* [Quick start guide](#quick-start-guide) +* [Details](#details) + * [Command line options](#command-line-options) + * [Getting the data](#getting-the-data) + * [Training process](#training-process) + * [Pre-training](#pre-training) + * [Fine tuning](#fine-tuning) + * [Enabling mixed precision](#enabling-mixed-precision) + * [Inference process](#inference-process) +* [Benchmarking](#benchmarking) + * [Training performance benchmark](#training-performance-benchmark) + * [Inference performance benchmark](#inference-performance-benchmark) +* [Results](#results) + * [Training accuracy results](#training-accuracy-results) + * [Training stability test](#training-stability-test) + * [Training performance results](#training-performance-results) + * [NVIDIA DGX-1 (8x V100 16G)](#nvidia-dgx-1-8x-v100-16g) + * [NVIDIA DGX-1 (8x V100 32G)](#nvidia-dgx-1-8x-v100-32g) + * [NVIDIA DGX-2 (16x V100 32G)](#nvidia-dgx-2-16x-v100-32g) + * [Inference performance results](#inference-performance-results) + * [NVIDIA DGX-1 16G (1x V100 16G)](#nvidia-dgx-1-16g-1x-v100-16g) + * [NVIDIA DGX-1 32G (1x V100 32G)](#nvidia-dgx-1-32g-1x-v100-32g) + * [NVIDIA DGX-2 32G (1x V100 32G)](#nvidia-dgx-2-32g-1x-v100-32g) +* [Changelog](#changelog) +* [Known issues](#known-issues) + +## The model + +BERT, or Bidirectional Encoder Representations from Transformers, is a new method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. This model is based on [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) paper. NVIDIA's BERT 19.04 is an optimized version of [Google's official implementation](https://github.com/google-research/bert), leveraging mixed precision arithmetic and tensor cores on V100 GPUS for faster training times while maintaining target accuracy. + + +The repository also contains scripts to interactively launch data download, training, benchmarking and inference routines in a Docker container for both pretraining and fine tuning for Question Answering. The major differences between the official implementation of the paper and our version of BERT are as follows: +- [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [tensor cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) previously required two steps: +- 1. Porting the model to use the FP16 data type where appropriate. +- 2. Manually adding loss scaling to preserve small gradient values. + +Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision (AMP), library from [APEX](https://github.com/NVIDIA/apex) that casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In PyTorch, loss scaling can be easily applied by using scale_loss() method provided by amp. The scaling value to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed. + +For an in-depth walk through on AMP, check out sample usage [here](https://github.com/NVIDIA/apex/tree/master/apex/amp#usage-and-getting-started). [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as AMP, which require minimal network code changes to leverage tensor cores performance. + +- Scripts to download dataset for + - Pretraining - [Wikipedia](https://dumps.wikimedia.org/), [BookCorpus](http://yknzhu.wixsite.com/mbweb) + - Fine Tuning - [SQuaD](https://rajpurkar.github.io/SQuAD-explorer/) (Stanford Question Answering Dataset), Pretrained Weights from Google +- Custom fused CUDA kernels for faster computations +- Multi-GPU/Multi-Node support using [APEX DDP](https://github.com/NVIDIA/apex#2-distributed-training) + + +These techniques and optimizations improve model performance and reduce training time, allowing you to perform various NLP tasks with no additional effort. + + +Other publicly available implementations of BERT include: +1. [Hugging Face](https://github.com/huggingface/pytorch-pretrained-BERT) +2. [codertimo](https://github.com/codertimo/BERT-pytorch) + + +This model trains with mixed precision tensor cores on Volta, therefore researchers can get results much faster than training without tensor cores. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. + +### Default configuration + +BERT's model architecture is a multi-layer bidirectional Transformer encoder. Based on the model size, we have the following two default configurations of BERT. + +| **Model** | **Hidden layers** | **Hidden unit size** | **Attention heads** | **Feedforward filter size** | **Max sequence length** | **Parameters** | +|:---------:|:----------:|:----:|:---:|:--------:|:---:|:----:| +|BERTBASE |12 encoder| 768| 12|4 x 768|512|110M| +|BERTLARGE|24 encoder|1024| 16|4 x 1024|512|330M| + +## Setup +The following section list the requirements in order to start training the BERT model. + +### Requirements +This repository contains `Dockerfile` which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: +- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) +- [PyTorch 19.04-py3](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) NGC container +- [NVIDIA Volta based GPU](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) + + +For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: +- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) +- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) +- [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running) + +## Quick start guide +To pretrain or fine tune your model for Question Answering using mixed precision with tensor cores or using FP32, perform the following steps using the default parameters of the BERT model. + +### 1. Clone the repository. + +```bash +git clone https://github.com/NVIDIA/DeepLearningExamples +cd DeepLearningExamples/PyTorch/LanguageModeling/BERT +``` + +### 2. Build the BERT TensorFlow NGC container. + +```bash +bash scripts/docker/build.sh +``` + +### 3. Download and preprocess the dataset. +This repository provides scripts to download, verify and extract various datasets: +SQuaD and swag for fine-tuning as well as Wikipedia and BookCorpus for pretraining. If you just want to do fine-tuning, you can also download the pretrained weights. + +To download, verify, and extract required datasets: + +```bash +bash scripts/data_download.sh +``` + +Datasets can also be mixed before used for training or inference. In case of training there are two options: + + +The script launches a docker container with current directory mounted and downloads datasets to `data/` folder on the host. + + +Datasets can also be mixed before used for training or inference + + +### 4. Start an interactive session in the NGC container to run training/inference. +After you build the container image and download the data, you can start an interactive CLI session as follows: + +```bash +bash scripts/docker/launch.sh +``` + +The `launch.sh` script assumes that the datasets are in the following locations by default after downloading data. +- SQuaD v1.1 - `data/squad/v1.1` +- BERT - `data/pretrained_models_google/uncased_L-24_H-1024_A-16` +- Wikipedia - `data/wikipedia_corpus/hdf5_shards` +- BookCorpus - `data/bookcorpus/hdf5_shards` + + +### 5. Start pre-training. +BERT is designed to pre-train deep bidirectional representations for language representations. The following scripts are to replicate pretraining on Wikipedia+Book Corpus from the [paper](https://arxiv.org/pdf/1810.04805.pdf). These scripts are general and can be used for pretraining language representations on any corpus of choice. + +From within the container, you can use the following script to run pre-training. +```bash +bash scripts/run_pretraining.sh +``` + + + +### 6. Start fine tuning. +The above pretrained BERT representations can be fine tuned with just one additional output layer for a state-of-the-art Question Answering system. From within the container, you can use the following script to run fine-training for SQuaD. + +```bash +bash scripts/run_squad.sh +``` + + +For FP32 training using a DGX-1 V100 32G, run: +```bash +bash scripts/run_squad.sh 5 5e-6 fp32 8 /bert/bert_model.ckpt 2 +``` + +### 7. Start validation/evaluation. +The `run_squad_inference.sh` script runs inference on a checkpoint fine tuned for SQuaD and evaluates the goodness of predictions on the basis of exact match and F1 score. + +```bash +bash scripts/run_squad_inference.sh +``` + +For FP32 inference without XLA using a DGX-1 V100 32G, run: +```bash +bash scripts/run_squad_inference.sh /results/model.ckpt 8 fp32 +``` + +## Details +The following sections provide greater details of the dataset, running training and inference, and the training results. + +### Command line options +To see the full list of available options and their descriptions, use the -h or --help command line option, for example: +```bash +python run_pretraining.py --help +python run_squad.py --help +``` + +Aside from options to set hyperparameters, the relevant options to control the behaviour of the `run_pretraining.py` script are: +```bash + --[no]amp: Whether to enable AMP ops.(default: 'false') + --[no]amp_fastmath: Whether to enable AMP fasthmath ops.(default: 'false') + --bert_config_file: The config json file corresponding to the pre-trained BERT model. This specifies the model architecture. + --[no]do_eval: Whether to run evaluation on the dev set.(default: 'false') + --[no]do_train: Whether to run training.(evaluation: 'false') + --eval_batch_size: Total batch size for eval.(default: '8')(an integer) + --[no]fastmath: Whether to enable loss scaler for fasthmath ops.(default: 'false') + --[no]horovod: Whether to use Horovod for multi-gpu runs(default: 'false') + --init_checkpoint: Initial checkpoint (usually from a pre-trained BERT model). + --input_file: Input TF example files (can be a glob or comma separated). + --iterations_per_loop: How many steps to make in each estimator call.(default: '1000') +``` + +Aside from options to set hyperparameters, some relevant options to control the behaviour of the run_squad.py script are: +```bash + --bert_config_file: The config json file corresponding to the pre-trained BERT model. This specifies the model architecture. + --[no]do_predict: Whether to run evaluation on the dev set. (default: 'false') + --[no]do_train: Whether to run training. (default: 'false') + --learning_rate: The initial learning rate for Adam.(default: '5e-06')(a number) + --max_answer_length: The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.(default: '30')(an integer) + --max_query_length: The maximum number of tokens for the question. Questions longer than this will be truncated to this length.(default: '64')(an integer) + --max_seq_length: The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.(default: '384')(an integer) + --predict_batch_size: Total batch size for predictions.(default: '8')(an integer) + --train_batch_size: Total batch size for training.(default: '8')(an integer) + --[no]use_fp16: Whether to use fp32 or fp16 arithmetic on GPU.(default: 'false') + --[no]use_xla: Whether to enable XLA JIT compilation.(default: 'false') + --[no]verbose_logging: If true, all of the warnings related to data processing will be printed. A number of warnings are expected for a normal SQuAD evaluation.(default: 'false') + --[no]version_2_with_negative: If true, the SQuAD examples contain some that do not have an answer.(default: 'false') +``` + +### Getting the data +For pre-training BERT, we use the concatenation of Wikipedia (2500M words) as well as Book Corpus (800M words). For Wikipedia, we extract only the text passages from [here](ftp://ftpmirror.your.org/pub/wikimedia/dumps/enwiki/20190301/enwiki-20190301-pages-articles-multistream.xml.bz2) and ignore headers list and tables. It is structured as a document level corpus rather than a shuffled sentence level corpus because it is critical to extract long contiguous sentences. The next step is to run `create_pretraining_data.py` with the document level corpus as input, which generates input data and labels for the masked language modeling and next sentence prediction tasks. Pre-training can also be performed on any corpus of your choice. The collection of data generation scripts are intended to be modular to allow modifications for additional preprocessing steps or to use additional data. + +#### Mixing datasets + +The repository provides tools to mix datasets for both training and finetuning. +In case of training there are two options: + +a) inter sequence-pair mixing (after pretraining data is created) + +In the `data/` directory, `merge_datasets_after_creation.sh` is a tool to mix data from multiple source corpora. To perform this mixing, the source corpora need to be already in the format of pretraining data, i.e. .hdf5 files. To call the script, use: + + +```bash +cd data +bash merge_datasets_after_creation.sh +``` + +For example, to merge the bookcorpus and Wikipedia corpora provided with this repository and create 1024 new shards containing the mixed training instances, first make sure that `data/bookcorpus/hdf5_shards/` and `data/wikipedia_corpus/hdf5_shards/` exist and are filled with .hdf5, then run: + +``` +cd data +bash merge_datasets_after_creation.sh inter_instance_merged_wiki+books bookcorpus/hdf5_shards/,wikipedia_corpus/hdf5_shards/ 1024 +``` + +b) intra sequence-pair mixing (before pretraining data is created) + + +In the `data/` directory, `merge_datasets_from_start.sh` is a tool to mix data from multiple source corpora. To perform this mixing, the source corpora must each be condensed into a single file that contains the entire corpus text, with line within the file corresponding to a document in the corpus. The script is then called as such: + +``` +cd data +merge_datasets_from_start.sh DESTINATION_FOLDER CORPUS_1 CORPUS_2 CORPUS_3 ... +``` + +For example, to merge the bookcorpus and Wikipedia corpora provided with this repository, first make sure that `data/bookcorpus/intermediate_files/bookcorpus.txt` and `data/wikipedia_corpus/intermediate_files/wikipedia.txt` exist, then run: + +``` +cd data +merge_datasets_from_start.sh intra_instance_merged_wiki+books bookcorpus/intermediate_files/bookcorpus.txt wikipedia_corpus/intermediate_files/wikipedia.txt +``` + +Note that `merge_datasets_from_start.sh` has a few dependencies, so it may be preferable to modify `data_download_helper.sh` to call the merging script and run `data_download.sh` so that the mixing process is done in a container. + +#### Fine Tuning datasets + +We can use a pre-trained BERT model for other fine tuning tasks like Question Answering. We use SQuaD for this task. SQuaD v1.1 has 100,000+ question-answer pairs on 500+ articles. SQuaD v2.0 combines v1.1 with an additional 50,000 new unanswerable questions and must not only answer questions but also determine when that is not possible. + +### Training process +The training process consists of two steps: pre-training and fine tuning. + +#### Pre-training +Pre-training is performed using the `run_pretraining.py` script along with parameters defined in the `scripts/run_pretraining.sh`. + + +The `run_pretraining.sh` script runs a job on a single node that trains the BERT-large model from scratch using the Wikipedia and Book corpus datasets as training data. By default, the training script: +- Runs on 8 GPUs with training batch size of 14 and evaluation batch size of 8 per GPU. +- Has FP16 precision enabled. +- Runs for 1144000 steps with 10000 warm-up steps. +- Saves a checkpoint every 5000 iterations (keeps only the latest checkpoint) and at the end of training. All checkpoints, evaluation results and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory). +- Creates the log file containing all the output. +- Evaluates the model at the end of training. To skip evaluation, modify `--do_eval` to `False`. + +These parameters will train Wikipedia + BooksCorpus to reasonable accuracy on a DGX1 with 32GB V100 cards. If you want to match google’s best results from the BERT paper, you should either train for twice as many steps (2,288,000 steps) on a DGX1, or train on 16 GPUs on a DGX2. The DGX2 having 16 GPUs will be able to fit a batch size twice as large as a DGX1 (224 vs 112), hence the DGX2 can finish in half as many steps. + + +For example: +```bash +run_pretraining.sh +``` + +Where: +- is per-gpu batch size used for training. Batch size varies with , larger batch sizes run more efficiently, but require more memory. + +- per-gpu batch size used for evaluation after training. Default rate of 1e-4 is good for global batch size 256. + +- Type of math in your model, can be either fp32, fp16, fastmath, amp_fm, amp_fm_xla, amp . The options mean: + + - fp32 32 bit IEEE single precision floats. + + - fp16 Hand-coded mixed precision 16 and 32 bit floats. + + - fp16 Hand-coded mixed precision floats, JIT compiled with XLA. + + - fastmath Matmuls done by tensor cores in mixed precision, the rest is done in FP32. + + - amp_fm Alternative FastMath implementation that works by manipulating TensorFlow’s compute graph. + + - amp_fm_xla The amp_fm flag plus XLA JIT compilation. + + - amp Automatic rewrite of TensorFlow compute graph to take advantage of 16 bit arithmetic whenever that is safe. + + - amp_xla The amp flag plus XLA JIT compilation. + +- Number of GPUs to use for training. Must be equal to or smaller than the number of GPUs attached to your node. + +- Number of warm-up steps at the start of training. + +- Total number of training steps. + +- Controls how often checkpoints are saved. Default is 5000 steps. + +- Flag indicating if output should be written to a logfile or not (acceptable values are ‘true’ or ‘false’, true indicates output should be saved to a logfile.) + + +For example: +```bash +bert_tf/scripts/run_pretraining.sh 14 8 1e-4 fp16_xla 16 10000 1144000 5000 true +``` + +Trains BERT-large from scratch on a single DGX-2 using FP16 arithmetic. This will take around 156 hours / 6.5 days. Checkpoints are written out every 5000 steps and all printouts are saved to a logfile. + +#### Fine tuning +Fine tuning is performed using the `run_squad.py` script along with parameters defined in `scripts/run_squad.sh`. + +The `run_squad.sh` script trains a model and performs evaluation on the SQuaD v1.1 dataset. By default, the training script: +- Uses 8 GPUs and batch size of 10 on each GPU. +- Has FP16 precision enabled. +- Is XLA enabled. +- Runs for 2 epochs. +- Saves a checkpoint every 1000 iterations (keeps only the latest checkpoint) and at the end of training. All checkpoints, evaluation results and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory). +- Evaluation is done at the end of training. To skip evaluation, modify `--do_predict` to `False`. + +This script outputs checkpoints to the `/results` directory, by default, inside the container. Mount point of `/results` can be changed in the `scripts/docker/launch.sh` file. The training log contains information about: +- Loss for the final step +- Training and evaluation performance +- F1 and exact match score on the Dev Set of SQuaD after evaluation. + +The summary after training is printed in the following format: +```bash +I0312 23:10:45.137036 140287431493376 run_squad.py:1332] 0 Total Training Time = 3007.00 Training Time W/O start up overhead = 2855.92 Sentences processed = 175176 +I0312 23:10:45.137243 140287431493376 run_squad.py:1333] 0 Training Performance = 61.3378 sentences/sec +I0312 23:14:00.550846 140287431493376 run_squad.py:1396] 0 Total Inference Time = 145.46 Inference Time W/O start up overhead = 131.86 Sentences processed = 10840 +I0312 23:14:00.550973 140287431493376 run_squad.py:1397] 0 Inference Performance = 82.2095 sentences/sec +{"exact_match": 83.69914853358561, "f1": 90.8477003317459} +``` + +Multi-gpu training is enabled with the Horovod TensorFlow module. The following example runs training on 8 GPUs: +```bash +mpi_command="mpirun -np 8 -H localhost:8 \ + --allow-run-as-root -bind-to none -map-by slot \ + -x NCCL_DEBUG=INFO \ + -x LD_LIBRARY_PATH \ + -x PATH -mca pml ob1 -mca btl ^openib" \ + python run_squad.py --horovod +``` + +### Enabling mixed precision +[Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [tensor cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) previously required two steps: +1. Porting the model to use the FP16 data type where appropriate. +2. Manually adding loss scaling to preserve small gradient values. +This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta and Turing GPUs automatically. The TensorFlow framework code makes all necessary model changes internally. + +In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing `tf.contrib` loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling. + +For information about: +- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. +- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. +- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. + +### Inference process +Inference on a fine tuned Question Answering system is performed using the `run_squad.py` script along with parameters defined in the `scripts/run_squad_inference.sh`. Inference is supported on single GPU at this moment. + +The `run_squad_inference.sh` script trains a model and performs evaluation on the SQuaD v1.1 dataset. By default, the inferencing script: +- Has FP16 precision enabled +- Is XLA enabled +- Evaluates the latest checkpoint present in `/results` with a batch size of 8 + +This script outputs predictions file to `/results/predictions.json` and computes F1 score and exact match score using SQuaD's `evaluate-v1.1.py`. Mount point of `/results` can be changed in the `scripts/docker/launch.sh` file. + +The output log contains information about: +- Evaluation performance +- F1 and exact match score on the Dev Set of SQuaD after evaluation. + +The summary after inference is printed in the following format: +```bash +I0312 23:14:00.550846 140287431493376 run_squad.py:1396] 0 Total Inference Time = 145.46 Inference Time W/O start up overhead = 131.86 Sentences processed = 10840 +I0312 23:14:00.550973 140287431493376 run_squad.py:1397] 0 Inference Performance = 82.2095 sentences/sec +{"exact_match": 83.69914853358561, "f1": 90.8477003317459} +``` + +## Benchmarking +The following section shows how to run benchmarks measuring the model performance in training and inference modes. + +Benchmarking can be performed for both training and inference. Both scripts run the BERT model for fine tuning. You can specify whether benchmarking is performed in FP16 or FP32 by specifying it as an argument to the benchmarking scripts. + +Both of these benchmarking scripts enable you to run a number of epochs and extract performance numbers. + +### Training performance benchmark +Training benchmarking can be performed by running the script: +```bash +scripts/finetune_train_benchmark.sh squad +``` + +### Inference performance benchmark +Inference benchmarking can be performed by running the script: +```bash +scripts/finetune_inference_benchmark.sh squad +``` + +## Results +The following sections provide details on how we achieved our performance and accuracy in training and inference for Question Answering fine tuning. +### Training accuracy results +Our results were obtained by running the `run_squad.py` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-1 with 8x V100 32G GPUs. + + +| **Number of GPUs** | **Batch size per GPU** | **Training time with FP16 (Hrs)** | **Training time with FP32 (Hrs)** | +|:---:|:---:|:----:|:----:| +| 8 | 4 ||| + +#### Training stability test +The following tables compare `F1` scores across 5 different training runs with different seeds, for both FP16 and FP32 respectively. The runs showcase consistent convergence on all 5 seeds with very little deviation. + +| **FP16, 8x GPUs** | **seed #1** | **seed #2** | **seed #3** | **seed #4** | **seed #5** | **mean** | **std** | +|:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| +|F1 || +|Exact match|| + +| **FP32, 8x GPUs** | **seed #1** | **seed #2** | **seed #3** | **seed #4** | **seed #5** | **mean** | **std** | +|:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| +|F1 | | +|Exact match| | + + +### Training performance results +Our results were obtained by running batch sizes up to 3x GPUs on a 16GB V100 and up to 10x GPUs on a 32G V100 with mixed precision. + +#### NVIDIA DGX-1 (8x V100 16G) +Our results were obtained by running the `scripts/run_pretraining.sh` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-1 with 8x V100 16G GPUs. Performance numbers (in tokens per second) were averaged over an entire training epoch. + + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speed-up with mixed precision** | **Multi-gpu weak scaling with FP32** | **Multi-gpu weak scaling with FP16** | +|:---:|:---:|:------:|:-----:|:----:|:----:|:----:| +| 1 | 2 | 5.48 |18.97|3.46 |1.0 |1.0 | +| 4 | 2 |19.6|60.6|3.09|3.57 |3.2| +| 8 | 2 |39.21 |121.21|3.09|7.15|6.38| + + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speed-up with mixed precision** | **Multi-gpu weak scaling with FP32** | **Multi-gpu weak scaling with FP16** | +|:---:|:---:|:-----:|:-----:|:---:|:---:|:----:| +| 1 | 4 | - |19.46| - | - |1.0 | +| 4 | 4 | - |75.67| - | - |3.88| +| 8 | 4 | - |151.35| - | - |7.77 | + +Note: The respective values for FP32 runs that use a batch size of 4 are not available due to out of memory errors that arise. Batch size of 4 is only available on using FP16. + +To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. + +#### NVIDIA DGX-1 (8x V100 32G) +Our results were obtained by running the `scripts/run_pretraining.sh` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-1 with 8x V100 32G GPUs. Performance numbers (in sentences per second) were averaged over an entire training epochs. + + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speed-up with mixed precision** | **Multi-gpu weak scaling with FP32** | **Multi-gpu weak scaling with FP16** | +|---|---|-----|-----|----|----|----| +| 1 | 7 | 7.56|24.29|3.21|1.0 |1.0 | +| 4 | 7 |28.84|86.24|2.99|3.81|3.55| +| 8 | 7 |57.68|172.48|2.99|7.62|7.10| + + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speed-up with mixed precision** | **Multi-gpu weak scaling with FP32** | **Multi-gpu weak scaling with FP16** | +|---|---|-----|-------|---|---|----| +| 1 | 14| - | 26.04 | - | - |1.0 | +| 4 | 14| - | 99.68| - | - |3.87| +| 8 | 14| - |199.35 | - | - |7.65 | + + +Note: The respective values for FP32 runs that use a batch size of 10 are not available due to out of memory errors that arise. Batch size of 10 is only available on using FP16. + +To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. + +#### NVIDIA DGX-2 (16x V100 32G) +Our results were obtained by running the `scripts/run_pretraining.sh` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-2 with 16x V100 32G GPUs. Performance numbers (in sentences per second) were averaged over an entire training epoch. + + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speed-up with mixed precision** | **Multi-gpu weak scaling with FP32** | **Multi-gpu weak scaling with FP16** | +|---|---|------|------|----|-----|----| +| 1| 7 | 8.47| 26.04|3.07| 1.0 |1.0 | +| 4| 7 | 32.2 | 92.68|2.87| 3.8|3.80| +| 8| 7 | 63.84|183.68|2.87| 7.53|7.05| +| 16| 7 |126.56|365.12|2.87|14.94|14.02| + + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speed-up with mixed precision** | **Multi-gpu weak scaling with FP32** | **Multi-gpu weak scaling with FP16** | +|---|---|---|------|---|---|----| +| 1| 14| - | 28.28| - | - |1.0 | +| 4| 14| - | 103.6| - | - |3.66| +| 8| 14| - |208.32| - | - |7.36| +| 16| 14| - |416.64| - | - |14.73| + + +Note: The respective values for FP32 runs that use a batch size of 10 are not available due to out of memory errors that arise. Batch size of 10 is only available on using FP16. + +To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. +### Inference performance results + +#### NVIDIA DGX-1 16G (1x V100 16G) +Our results were obtained by running the `scripts/run_squad_inference.sh` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-1 with 1x V100 16G GPUs. Performance numbers (in sentences per second) were averaged over an entire training epoch. + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speedup** | +|---|---|-----|------|----| +| 1 | 8 || + +To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. + + +#### NVIDIA DGX-1 32G (1x V100 32G) +Our results were obtained by running the `scripts/run_squad_inference.sh` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-1 with 1x V100 32G GPUs. Performance numbers (in sentences per second) were averaged over an entire training epoch. + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speedup** | +|---|---|-----|------|----| +| 1 | 8 || + +To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. + +#### NVIDIA DGX-2 32G (1x V100 32G) +Our results were obtained by running the `scripts/run_squad_inference.sh` training script in the TensorFlow 19.03-py3 NGC container on NVIDIA DGX-2 with 1x V100 32G GPUs. Performance numbers (in sentences per second) were averaged over an entire training epoch. + +| **Number of GPUs** | **Batch size per GPU** | **FP32 sentences/sec** | **FP16 sentences/sec** | **Speedup** | +|---|---|-----|------|----| +| 1 | 8 || + +To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. + +## Changelog +March 2019 +- Initial release + +## Known issues +There are no known issues with this model. \ No newline at end of file diff --git a/PyTorch/LanguageModeling/BERT/bert_config.json b/PyTorch/LanguageModeling/BERT/bert_config.json new file mode 100644 index 00000000..a7efa973 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/bert_config.json @@ -0,0 +1,13 @@ +{ + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 4096, + "max_position_embeddings": 512, + "num_attention_heads": 16, + "num_hidden_layers": 24, + "type_vocab_size": 2, + "vocab_size": 30522 +} diff --git a/PyTorch/LanguageModeling/BERT/create_pretraining_data.py b/PyTorch/LanguageModeling/BERT/create_pretraining_data.py new file mode 100644 index 00000000..fc08bb96 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/create_pretraining_data.py @@ -0,0 +1,472 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse +import logging +import os +import random +from io import open +import h5py +import numpy as np +from tqdm import tqdm, trange + +from tokenization import BertTokenizer +import tokenization as tokenization + +import random +import collections + + + + +class TrainingInstance(object): + """A single training instance (sentence pair).""" + + def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, + is_random_next): + self.tokens = tokens + self.segment_ids = segment_ids + self.is_random_next = is_random_next + self.masked_lm_positions = masked_lm_positions + self.masked_lm_labels = masked_lm_labels + + def __str__(self): + s = "" + s += "tokens: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.tokens])) + s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) + s += "is_random_next: %s\n" % self.is_random_next + s += "masked_lm_positions: %s\n" % (" ".join( + [str(x) for x in self.masked_lm_positions])) + s += "masked_lm_labels: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.masked_lm_labels])) + s += "\n" + return s + + def __repr__(self): + return self.__str__() + + +def write_instance_to_example_file(instances, tokenizer, max_seq_length, + max_predictions_per_seq, output_file): + """Create TF example files from `TrainingInstance`s.""" + + + total_written = 0 + features = collections.OrderedDict() + + num_instances = len(instances) + features["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") + features["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") + features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32") + + + for inst_index, instance in enumerate(tqdm(instances)): + input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) + input_mask = [1] * len(input_ids) + segment_ids = list(instance.segment_ids) + assert len(input_ids) <= max_seq_length + + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + masked_lm_positions = list(instance.masked_lm_positions) + masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) + masked_lm_weights = [1.0] * len(masked_lm_ids) + + while len(masked_lm_positions) < max_predictions_per_seq: + masked_lm_positions.append(0) + masked_lm_ids.append(0) + masked_lm_weights.append(0.0) + + next_sentence_label = 1 if instance.is_random_next else 0 + + + + features["input_ids"][inst_index] = input_ids + features["input_mask"][inst_index] = input_mask + features["segment_ids"][inst_index] = segment_ids + features["masked_lm_positions"][inst_index] = masked_lm_positions + features["masked_lm_ids"][inst_index] = masked_lm_ids + features["next_sentence_labels"][inst_index] = next_sentence_label + + total_written += 1 + + # if inst_index < 20: + # tf.logging.info("*** Example ***") + # tf.logging.info("tokens: %s" % " ".join( + # [tokenization.printable_text(x) for x in instance.tokens])) + + # for feature_name in features.keys(): + # feature = features[feature_name] + # values = [] + # if feature.int64_list.value: + # values = feature.int64_list.value + # elif feature.float_list.value: + # values = feature.float_list.value + # tf.logging.info( + # "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) + + + print("saving data") + f= h5py.File(output_file, 'w') + f.create_dataset("input_ids", data=features["input_ids"], dtype='i4', compression='gzip') + f.create_dataset("input_mask", data=features["input_mask"], dtype='i1', compression='gzip') + f.create_dataset("segment_ids", data=features["segment_ids"], dtype='i1', compression='gzip') + f.create_dataset("masked_lm_positions", data=features["masked_lm_positions"], dtype='i4', compression='gzip') + f.create_dataset("masked_lm_ids", data=features["masked_lm_ids"], dtype='i4', compression='gzip') + f.create_dataset("next_sentence_labels", data=features["next_sentence_labels"], dtype='i1', compression='gzip') + f.flush() + f.close() + +def create_training_instances(input_files, tokenizer, max_seq_length, + dupe_factor, short_seq_prob, masked_lm_prob, + max_predictions_per_seq, rng): + """Create `TrainingInstance`s from raw text.""" + all_documents = [[]] + + # Input file format: + # (1) One sentence per line. These should ideally be actual sentences, not + # entire paragraphs or arbitrary spans of text. (Because we use the + # sentence boundaries for the "next sentence prediction" task). + # (2) Blank lines between documents. Document boundaries are needed so + # that the "next sentence prediction" task doesn't span between documents. + for input_file in input_files: + print("creating instance from {}".format(input_file)) + with open(input_file, "r") as reader: + while True: + line = tokenization.convert_to_unicode(reader.readline()) + if not line: + break + line = line.strip() + + # Empty lines are used as document delimiters + if not line: + all_documents.append([]) + tokens = tokenizer.tokenize(line) + if tokens: + all_documents[-1].append(tokens) + + # Remove empty documents + all_documents = [x for x in all_documents if x] + rng.shuffle(all_documents) + + vocab_words = list(tokenizer.vocab.keys()) + instances = [] + for _ in range(dupe_factor): + for document_index in range(len(all_documents)): + instances.extend( + create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) + + rng.shuffle(instances) + return instances + + +def create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng): + """Creates `TrainingInstance`s for a single document.""" + document = all_documents[document_index] + + # Account for [CLS], [SEP], [SEP] + max_num_tokens = max_seq_length - 3 + + # We *usually* want to fill up the entire sequence since we are padding + # to `max_seq_length` anyways, so short sequences are generally wasted + # computation. However, we *sometimes* + # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter + # sequences to minimize the mismatch between pre-training and fine-tuning. + # The `target_seq_length` is just a rough target however, whereas + # `max_seq_length` is a hard limit. + target_seq_length = max_num_tokens + if rng.random() < short_seq_prob: + target_seq_length = rng.randint(2, max_num_tokens) + + # We DON'T just concatenate all of the tokens from a document into a long + # sequence and choose an arbitrary split point because this would make the + # next sentence prediction task too easy. Instead, we split the input into + # segments "A" and "B" based on the actual "sentences" provided by the user + # input. + instances = [] + current_chunk = [] + current_length = 0 + i = 0 + while i < len(document): + segment = document[i] + current_chunk.append(segment) + current_length += len(segment) + if i == len(document) - 1 or current_length >= target_seq_length: + if current_chunk: + # `a_end` is how many segments from `current_chunk` go into the `A` + # (first) sentence. + a_end = 1 + if len(current_chunk) >= 2: + a_end = rng.randint(1, len(current_chunk) - 1) + + tokens_a = [] + for j in range(a_end): + tokens_a.extend(current_chunk[j]) + + tokens_b = [] + # Random next + is_random_next = False + if len(current_chunk) == 1 or rng.random() < 0.5: + is_random_next = True + target_b_length = target_seq_length - len(tokens_a) + + # This should rarely go for more than one iteration for large + # corpora. However, just to be careful, we try to make sure that + # the random document is not the same as the document + # we're processing. + for _ in range(10): + random_document_index = rng.randint(0, len(all_documents) - 1) + if random_document_index != document_index: + break + + random_document = all_documents[random_document_index] + random_start = rng.randint(0, len(random_document) - 1) + for j in range(random_start, len(random_document)): + tokens_b.extend(random_document[j]) + if len(tokens_b) >= target_b_length: + break + # We didn't actually use these segments so we "put them back" so + # they don't go to waste. + num_unused_segments = len(current_chunk) - a_end + i -= num_unused_segments + # Actual next + else: + is_random_next = False + for j in range(a_end, len(current_chunk)): + tokens_b.extend(current_chunk[j]) + truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) + + assert len(tokens_a) >= 1 + assert len(tokens_b) >= 1 + + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + + tokens.append("[SEP]") + segment_ids.append(0) + + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + (tokens, masked_lm_positions, + masked_lm_labels) = create_masked_lm_predictions( + tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) + instance = TrainingInstance( + tokens=tokens, + segment_ids=segment_ids, + is_random_next=is_random_next, + masked_lm_positions=masked_lm_positions, + masked_lm_labels=masked_lm_labels) + instances.append(instance) + current_chunk = [] + current_length = 0 + i += 1 + + return instances + + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + + +def create_masked_lm_predictions(tokens, masked_lm_prob, + max_predictions_per_seq, vocab_words, rng): + """Creates the predictions for the masked LM objective.""" + + cand_indexes = [] + for (i, token) in enumerate(tokens): + if token == "[CLS]" or token == "[SEP]": + continue + cand_indexes.append(i) + + rng.shuffle(cand_indexes) + + output_tokens = list(tokens) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + masked_lms = [] + covered_indexes = set() + for index in cand_indexes: + if len(masked_lms) >= num_to_predict: + break + if index in covered_indexes: + continue + covered_indexes.add(index) + + masked_token = None + # 80% of the time, replace with [MASK] + if rng.random() < 0.8: + masked_token = "[MASK]" + else: + # 10% of the time, keep original + if rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] + + output_tokens[index] = masked_token + + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + + masked_lms = sorted(masked_lms, key=lambda x: x.index) + + masked_lm_positions = [] + masked_lm_labels = [] + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + + return (output_tokens, masked_lm_positions, masked_lm_labels) + + +def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): + """Truncates a pair of sequences to a maximum sequence length.""" + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_num_tokens: + break + + trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b + assert len(trunc_tokens) >= 1 + + # We want to sometimes truncate from the front and sometimes from the + # back to add more randomness and avoid biases. + if rng.random() < 0.5: + del trunc_tokens[0] + else: + trunc_tokens.pop() + + +def main(): + + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--vocab_file", + default=None, + type=str, + required=True, + help="The vocabulary the BERT model will train on.") + parser.add_argument("--input_file", + default=None, + type=str, + required=True, + help="The input train corpus. can be directory with .txt files or a path to a single file") + parser.add_argument("--output_file", + default=None, + type=str, + required=True, + help="The output file where the model checkpoints will be written.") + + ## Other parameters + + # str + parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + + #int + parser.add_argument("--max_seq_length", + default=128, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--dupe_factor", + default=10, + type=int, + help="Number of times to duplicate the input data (with different masks).") + parser.add_argument("--max_predictions_per_seq", + default=20, + type=int, + help="Maximum sequence length.") + + + # floats + + parser.add_argument("--masked_lm_prob", + default=0.15, + type=float, + help="Masked LM probability.") + + parser.add_argument("--short_seq_prob", + default=0.1, + type=float, + help="Probability to create a sequence shorter than maximum sequence length") + + parser.add_argument("--do_lower_case", + action='store_true', + default=True, + help="Whether to lower case the input text. True for uncased models, False for cased models.") + parser.add_argument('--random_seed', + type=int, + default=12345, + help="random seed for initialization") + + args = parser.parse_args() + + tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) + + + input_files = [] + if os.path.isfile(args.input_file): + input_files.append(args.input_file) + elif os.path.isdir(args.input_file): + input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt') )] + else: + raise ValueError("{} is not a valid path".format(args.input_file)) + + rng = random.Random(args.random_seed) + instances = create_training_instances( + input_files, tokenizer, args.max_seq_length, args.dupe_factor, + args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq, + rng) + + output_file = args.output_file + + + write_instance_to_example_file(instances, tokenizer, args.max_seq_length, + args.max_predictions_per_seq, output_file) + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/data/README.md b/PyTorch/LanguageModeling/BERT/data/README.md new file mode 100644 index 00000000..d2ec8d02 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/README.md @@ -0,0 +1,30 @@ +Steps to reproduce datasets from web + +1) Build the container + * docker build -t bert_prep . +2) Run the container interactively + * nvidia-docker run -it --ipc=host bert_prep + * Optional: Mount data volumes + * -v yourpath:/workspace/bert/data/wikipedia_corpus/download + * -v yourpath:/workspace/bert/data/wikipedia_corpus/extracted_articles + * -v yourpath:/workspace/bert/data/wikipedia_corpus/raw_data + * -v yourpath:/workspace/bert/data/wikipedia_corpus/intermediate_files + * -v yourpath:/workspace/bert/data/wikipedia_corpus/final_text_file_single + * -v yourpath:/workspace/bert/data/wikipedia_corpus/final_text_files_sharded + * -v yourpath:/workspace/bert/data/wikipedia_corpus/final_tfrecords_sharded + * -v yourpath:/workspace/bert/data/bookcorpus/download + * -v yourpath:/workspace/bert/data/bookcorpus/final_text_file_single + * -v yourpath:/workspace/bert/data/bookcorpus/final_text_files_sharded + * -v yourpath:/workspace/bert/data/bookcorpus/final_tfrecords_sharded + * Optional: Select visible GPUs + * -e CUDA_VISIBLE_DEVICES=0 + +** Inside of the container starting here** +3) Download pretrained weights (they contain vocab files for preprocessing) + * cd data/pretrained_models_google && python3 download_models.py +4) "One-click" Wikipedia data download and prep (provides tfrecords) + * Set your configuration in data/wikipedia_corpus/config.sh + * cd /data/wikipedia_corpus && ./run_preprocessing.sh +5) "One-click" BookCorpus data download and prep (provided tfrecords) + * Set your configuration in data/wikipedia_corpus/config.sh + * cd /data/bookcorpus && ./run_preprocessing.sh diff --git a/PyTorch/LanguageModeling/BERT/data/bookcorpus/clean_and_merge_text.py b/PyTorch/LanguageModeling/BERT/data/bookcorpus/clean_and_merge_text.py new file mode 100644 index 00000000..c32540c3 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/bookcorpus/clean_and_merge_text.py @@ -0,0 +1,23 @@ +# NVIDIA + +import glob +import os +import argparse + +parser = argparse.ArgumentParser(description='Cleaning and merge downloaded bookcorpus files') + +parser.add_argument('download_path', type=str) +parser.add_argument('output_file', type=str) + +args = parser.parse_args() + +download_path = args.download_path +output_file = args.output_file + +with open(output_file, "w") as ofile: + for filename in glob.glob('{}/*.txt'.format(download_path), recursive=True): + with open(filename, mode='r', encoding="utf-8-sig") as file: + for line in file: + if line.strip() != "": + ofile.write(line.strip() + " ") + ofile.write("\n\n") diff --git a/PyTorch/LanguageModeling/BERT/data/bookcorpus/download_bookcorpus.sh b/PyTorch/LanguageModeling/BERT/data/bookcorpus/download_bookcorpus.sh new file mode 100755 index 00000000..2a898976 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/bookcorpus/download_bookcorpus.sh @@ -0,0 +1,9 @@ +#! /bin/bash + +# Download books +mkdir -p ./download +python3 /workspace/bookcorpus/download_files.py --list /workspace/bookcorpus/url_list.jsonl --out ./download --trash-bad-count + +# Clean and prep (one book per line) +python3 ./clean_and_merge_text.py ./download bookcorpus.txt + diff --git a/PyTorch/LanguageModeling/BERT/data/create_datasets_from_start.sh b/PyTorch/LanguageModeling/BERT/data/create_datasets_from_start.sh new file mode 100755 index 00000000..9e562468 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/create_datasets_from_start.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Note: There are several directories created to make it clear what has been performed at each stage of preprocessing. The intermediate files may be useful if you want to further clean/prepare/augment the data for your own applications. +# NLTK was chosen as the default over spaCy simply due to speed of sentence segmentation on the large files. + +MERGED_DIR=$1 +args="${*:2}" + +source utils/config.sh + +mkdir -p ${MERGED_DIR} + +corpus_file=${MERGED_DIR}/corpus.txt +## Shuffle the full corpus texts +if [ ! -z $3 ] +then + echo "Merging $args" + cat $args | sed "/^$/d" | shuf > $corpus_file +else + corpus_file=$2 +fi + +# Split articles into one-sentence-per-line format for use with BERT scripts +echo "Applying sentence segmentation to get one sentence per line" +mkdir -p ${MERGED_DIR}/final_text_file_single +python3 utils/sentence_segmentation_nltk.py $corpus_file ${MERGED_DIR}/final_text_file_single/corpus.segmented.nltk.txt + +## Shard finalized text so that it has a chance of fitting in memory when creating pretraining data into hdf5 (choose appropriate number of shards for distributed training) +echo "Shard text files - size is approximate to prevent splitting an article across shards" +mkdir -p ${MERGED_DIR}/final_text_files_sharded +python3 utils/shard_text_input_file.py ${MERGED_DIR}/final_text_file_single/corpus.segmented.nltk.txt ${MERGED_DIR}/final_text_files_sharded/corpus.segmented.part. + +# Convert sharded text files into hdf5 that are ready for BERT pretraining +echo "Creating hdf5 for each text shard" +mkdir -p ${MERGED_DIR}/hdf5_shards +export TARGET_DIR=${MERGED_DIR} +. utils/preprocessing_xargs_wrapper.sh ${N_PROCS_PREPROCESS} + diff --git a/PyTorch/LanguageModeling/BERT/data/glue/download_mrpc.sh b/PyTorch/LanguageModeling/BERT/data/glue/download_mrpc.sh new file mode 100755 index 00000000..d6faedb4 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/glue/download_mrpc.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +echo "Downloading MRPC data" + +wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py + +python download_glue_data.py --data_dir . --tasks MRPC diff --git a/PyTorch/LanguageModeling/BERT/data/merge_datasets_after_creation.sh b/PyTorch/LanguageModeling/BERT/data/merge_datasets_after_creation.sh new file mode 100755 index 00000000..2a4ab8da --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/merge_datasets_after_creation.sh @@ -0,0 +1,29 @@ +#!/bin/bash + + +MERGED_DIR=$1 # e.g wikipedia+bookcorpus +INPUTFILES=$2 # directories with hdf5 files separated by comma +NUM_SHARDS=$3 + +source utils/config.sh + + +META_DIR=$MERGED_DIR/meta +mkdir -p ${MERGED_DIR} +mkdir -p ${META_DIR} + +echo "create mixed dataset ids" +echo "python utils/create_mixed_dataset_ids.py --input_files=${INPUTFILES} --num_output_shards=${NUM_SHARDS} --output_dir=${META_DIR} --random_seed=${SEED}" +python utils/create_mixed_dataset_ids.py --input_files=${INPUTFILES} --num_output_shards=${NUM_SHARDS} --output_dir=${META_DIR} --random_seed=${SEED} + + +echo "Creating hdf5 for each text shard" +mkdir -p ${MERGED_DIR}/hdf5_shards +echo "create mixed datasets with hdf5 files" +echo "python utils/create_mixed_dataset.py --input_files=${INPUTFILES} --output_dir=${MERGED_DIR}/hdf5_shards --lookup=${META_DIR}/lookup_table.pkl --indices_dir=${META_DIR} --index_range=0-${NUM_SHARDS} --random_seed=${SEED}" +python utils/create_mixed_dataset.py --input_files=${INPUTFILES} --output_dir=${MERGED_DIR}/hdf5_shards --lookup=${META_DIR}/lookup_table.pkl --indices_dir=${META_DIR} --index_range=0-$((NUM_SHARDS-1)) --random_seed=${SEED} + + +rm -rf ${META_DIR} + + diff --git a/PyTorch/LanguageModeling/BERT/data/squad/squad_download.sh b/PyTorch/LanguageModeling/BERT/data/squad/squad_download.sh new file mode 100755 index 00000000..249778f5 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/squad/squad_download.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +echo "Downloading dataset for squad..." + +# Download SQuAD + +v1="v1.1" +mkdir $v1 +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O $v1/train-v1.1.json +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O $v1/dev-v1.1.json +wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O $v1/evaluate-v1.1.py + +EXP_TRAIN_v1='981b29407e0affa3b1b156f72073b945 -' +EXP_DEV_v1='3e85deb501d4e538b6bc56f786231552 -' +EXP_EVAL_v1='afb04912d18ff20696f7f88eed49bea9 -' +CALC_TRAIN_v1=`cat ${v1}/train-v1.1.json |md5sum` +CALC_DEV_v1=`cat ${v1}/dev-v1.1.json |md5sum` +CALC_EVAL_v1=`cat ${v1}/evaluate-v1.1.py |md5sum` + +v2="v2.0" +mkdir $v2 +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O $v2/train-v2.0.json +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O $v2/dev-v2.0.json +wget https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ -O $v2/evaluate-v2.0.py + +EXP_TRAIN_v2='62108c273c268d70893182d5cf8df740 -' +EXP_DEV_v2='246adae8b7002f8679c027697b0b7cf8 -' +EXP_EVAL_v2='ff23213bed5516ea4a6d9edb6cd7d627 -' + +CALC_TRAIN_v2=`cat ${v2}/train-v2.0.json |md5sum` +CALC_DEV_v2=`cat ${v2}/dev-v2.0.json |md5sum` +CALC_EVAL_v2=`cat ${v2}/evaluate-v2.0.py |md5sum` + +echo "Squad data download done!" + +echo "Verifying Dataset...." + +if [ "$EXP_TRAIN_v1" != "$CALC_TRAIN_v1" ]; then + echo "train-v1.1.json is corrupted! md5sum doesn't match" +fi + +if [ "$EXP_DEV_v1" != "$CALC_DEV_v1" ]; then + echo "dev-v1.1.json is corrupted! md5sum doesn't match" +fi +if [ "$EXP_EVAL_v1" != "$CALC_EVAL_v1" ]; then + echo "evaluate-v1.1.py is corrupted! md5sum doesn't match" +fi + + +if [ "$EXP_TRAIN_v2" != "$CALC_TRAIN_v2" ]; then + echo "train-v2.0.json is corrupted! md5sum doesn't match" +fi +if [ "$EXP_DEV_v2" != "$CALC_DEV_v2" ]; then + echo "dev-v2.0.json is corrupted! md5sum doesn't match" +fi +if [ "$EXP_EVAL_v2" != "$CALC_EVAL_v2" ]; then + echo "evaluate-v2.0.py is corrupted! md5sum doesn't match" +fi + +echo "Complete!" diff --git a/PyTorch/LanguageModeling/BERT/data/utils/config.sh b/PyTorch/LanguageModeling/BERT/data/utils/config.sh new file mode 100755 index 00000000..3192fb04 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/config.sh @@ -0,0 +1,24 @@ +#! /bin/bash + +set -e + +USE_BERT_LARGE=true +MAX_SEQUENCE_LENGTH=512 +MAX_PREDICTIONS_PER_SEQUENCE=80 +MASKED_LM_PROB=0.15 +SEED=12345 +DUPE_FACTOR=5 +DO_LOWER_CASE="True" +N_LINES_PER_SHARD_APPROX=396000 # Default=396000 creates 256 shards + +N_PROCS_PREPROCESS=4 # Adjust this based on memory requirements and available number of cores + +BERT_BASE_DIR="/workspace/bert/vocab/uncased_L-12_H-768_A-12" +BERT_LARGE_DIR="/workspace/bert/vocab/uncased_L-24_H-1024_A-16" + +if [ "$USE_BERT_LARGE" = true ] ; then + VOCAB_FILE="${BERT_LARGE_DIR}/vocab.txt" +else + VOCAB_FILE="${BERT_BASE_DIR}/vocab.txt" +fi + diff --git a/PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset.py b/PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset.py new file mode 100644 index 00000000..7e63dc2e --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset.py @@ -0,0 +1,160 @@ +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse +import logging +import os +import random +from io import open +import h5py +import numpy as np +from tqdm import tqdm, trange +import random +import collections +import math +import multiprocessing as mp +""" +mixing hdf5 shards with each other +""" + + +def shard_files(output_files, l_instance_ids, lookuptable, files): + + l_input_ids = [] + l_input_masks = [] + l_segment_ids = [] + l_masked_lm_positions = [] + l_masked_lm_ids = [] + l_next_sentence_labels = [] + + seq_len = 0 + pred_len = 0 + with h5py.File(files[0], 'r') as f: + seq_len = f['input_ids'].shape[1] + pred_len = f['masked_lm_positions'].shape[1] + + assert(seq_len > 0 and pred_len > 0) + for i, output_file in enumerate(output_files): + output_length = len(l_instance_ids[i]) + print("preparing to write {} instances to {}".format(output_length, output_file)) + input_ids = np.ones([output_length, seq_len], dtype=np.int32) + input_masks = np.ones([output_length, seq_len], dtype=np.int8) + segment_ids = np.ones([output_length, seq_len], dtype=np.int8) + masked_lm_positions = np.ones([output_length, pred_len], dtype=np.int32) + masked_lm_ids= np.ones([output_length, pred_len], dtype=np.int32) + next_sentence_labels = np.ones(output_length, dtype=np.int8) + l_input_ids.append(input_ids) + l_input_masks.append(input_masks) + l_segment_ids.append(segment_ids) + l_masked_lm_positions.append(masked_lm_positions) + l_masked_lm_ids.append(masked_lm_ids) + l_next_sentence_labels.append(next_sentence_labels) + for did, f in enumerate(tqdm(files)): + h5_f = h5py.File(f, 'r') + f_input_ids = h5_f['input_ids'][:] + f_input_masks = h5_f['input_mask'][:] + f_segment_ids = h5_f['segment_ids'][:] + f_masked_lm_positions = h5_f['masked_lm_positions'][:] + f_masked_lm_ids = h5_f['masked_lm_ids'][:] + f_next_sentence_labels = h5_f['next_sentence_labels'][:] + h5_f.close() + for out_i, out_file in enumerate(output_files): + instance_ids = l_instance_ids[out_i] + for l, idx in enumerate(instance_ids): + doc_id, line_id = lookuptable[idx] + if doc_id == did: + l_input_ids[out_i][l] = f_input_ids[line_id] + l_input_masks[out_i][l] = f_input_masks[line_id] + l_segment_ids[out_i][l] = f_segment_ids[line_id] + l_masked_lm_positions[out_i][l] = f_masked_lm_positions[line_id] + l_masked_lm_ids[out_i][l] = f_masked_lm_ids[line_id] + l_next_sentence_labels[out_i][l] = f_next_sentence_labels[line_id] + for out_i, out_file in enumerate(output_files): + output_length = len(l_input_ids[out_i]) + print("writing {} instances to {}".format(output_length, out_file)) + with h5py.File(out_file, 'w') as f: + f.create_dataset("input_ids", data=l_input_ids[out_i], dtype='i4', compression='gzip') + f.create_dataset("input_mask", data=l_input_masks[out_i], dtype='i1', compression='gzip') + f.create_dataset("segment_ids", data=l_segment_ids[out_i], dtype='i1', compression='gzip') + f.create_dataset("masked_lm_positions", data=l_masked_lm_positions[out_i], dtype='i4', compression='gzip') + f.create_dataset("masked_lm_ids", data=l_masked_lm_ids[out_i], dtype='i4', compression='gzip') + f.create_dataset("next_sentence_labels", data=l_next_sentence_labels[out_i], dtype='i1', compression='gzip') + + +def main(): + + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--input_files", + default=None, + type=str, + required=True, + help="comma seperated list of file paths, each path can be either file or directory of files") + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="directory for output shards") + parser.add_argument("--lookup", + default=None, + type=str, + required=True, + help="path to lookup table") + parser.add_argument("--indices_dir", + default=None, + type=str, + required=True, + help="path to shuffled instance indices") + parser.add_argument("--index_range", + default=None, + type=str, + required=True, + help="index range of output files to be written out, e.g specify '0-100' for writing out 0.hdf5 , ..., 100.hdf5") + parser.add_argument('--random_seed', + type=int, + default=12345, + help="random seed for initialization") + + args = parser.parse_args() + + rng = random.Random(args.random_seed) + np.random.seed(args.random_seed) + + + input_paths = args.input_files.strip().split(',') + input_paths = [f for f in input_paths if f] + + input_files = [] + for path in input_paths: + if os.path.isfile(path): + assert (path.endswith('.hdf5')), "file must be hdf5 file" + input_files.append(path) + else: + assert os.path.isdir(path) + hdf5_files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and f.endswith('.hdf5')] + input_files.extend(hdf5_files) + + input_files.sort() + assert(os.path.isdir(args.output_dir)) + + + + print("loading indices file") + start_idx, end_idx= int(args.index_range.split('-')[0]), int(args.index_range.split('-')[1]) + index_files = [] + instance_ids = [] + for i in range(start_idx, end_idx + 1): + index_files.append(os.path.join(args.indices_dir, "indices_" + str(i) + ".npy")) + instance_ids.append( np.load(index_files[-1])) + + output_files = [os.path.join(args.output_dir, indices_file.split('.')[0].split('_')[-1] + ".hdf5") for indices_file in index_files] + print("output_files", output_files) + + print("loading lookup table") + lookup_table = np.load(args.lookup) + shard_files(output_files, instance_ids, lookup_table, input_files) + + + +if __name__ == "__main__": + main() + diff --git a/PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset_ids.py b/PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset_ids.py new file mode 100644 index 00000000..6d4e1b89 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/create_mixed_dataset_ids.py @@ -0,0 +1,134 @@ +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse +import logging +import os +import random +from io import open +import h5py +import numpy as np +from tqdm import tqdm, trange +import random +import collections +import math +from tqdm import tqdm +import multiprocessing as mp +import pickle +import json +""" +mixing hdf5 shards with each other +""" +def load_and_prepare(input_files, num_shards): + + seq_len = None + pred_len = None + + input_lengths = [] + for input_file in input_files: + with h5py.File(input_file, 'r') as f: + input_lengths.append(len(f['input_ids'])) + if seq_len is None: + seq_len = f['input_ids'].shape[1] + pred_len = f['masked_lm_ids'].shape[1] + + assert (isinstance(seq_len, int) and isinstance(pred_len, int)) + + total_instances = sum(input_lengths) + n_inst_per_file = math.ceil(total_instances * 1.0 / num_shards) + permutation = np.random.permutation(total_instances) + + + instance_indices = [] + for i in range(0, num_shards): + start_pos = i * n_inst_per_file + end_pos = min((i+1) * n_inst_per_file, total_instances) + instance_indices.append(permutation[start_pos:end_pos]) + + return seq_len, pred_len, input_lengths, instance_indices + + + + +def main(): + + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--input_files", + default=None, + type=str, + required=True, + help="comma seperated list of file paths, each path can be either file or directory of hdf5 files") + parser.add_argument("--num_output_shards", + default=None, + type=int, + required=True, + help="number of shards to be created. shards will be created as even as possible.") + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="directory for meta files") + parser.add_argument('--random_seed', + type=int, + default=12345, + help="random seed for initialization") + + args = parser.parse_args() + + rng = random.Random(args.random_seed) + np.random.seed(args.random_seed) + + + input_paths = args.input_files.strip().split(',') + input_paths = [f for f in input_paths if f] + + input_files = [] + for path in input_paths: + if os.path.isfile(path): + assert (path.endswith('.hdf5')), "file must be hdf5 file" + input_files.append(path) + else: + assert os.path.isdir(path) + hdf5_files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and f.endswith('.hdf5')] + input_files.extend(hdf5_files) + input_files.sort() + + assert(os.path.isdir(args.output_dir)) + + print("load and prepare") + seq_len, pred_len, input_lengths, output_inst_indices = load_and_prepare(input_files, args.num_output_shards) + print("preparing lookup table") + total_num_instances = sum(input_lengths) + out_2_in = dict() + length_so_far = 0 + for i, l in enumerate(input_lengths): + for j in range(l): + out_2_in[length_so_far + j] = (i, j) + length_so_far += input_lengths[i] + + + + output_files = [os.path.join(args.output_dir, "indices_" + str(i) + ".npy") for i in range(args.num_output_shards)] + print("save data") + + + with open(os.path.join(args.output_dir, 'lookup_table.pkl'), 'wb') as f: + pickle.dump(out_2_in, f) + + for i, out_file in enumerate(output_files): + np.save(out_file, output_inst_indices[i]) + + + meta = {'seq_len': seq_len, 'pred_len':pred_len} + + with open(os.path.join(args.output_dir, 'meta_data.pkl'), 'wb') as f: + pickle.dump(meta, f) + + + + + + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/data/utils/preprocessing.sh b/PyTorch/LanguageModeling/BERT/data/utils/preprocessing.sh new file mode 100755 index 00000000..64e63a9b --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/preprocessing.sh @@ -0,0 +1,23 @@ +#! /bin/bash + +SHARD_INDEX=${1} +INPUT_FILE="${TARGET_DIR}/final_text_files_sharded/corpus.segmented.part.${SHARD_INDEX}.txt" + +source /workspace/bert/data/utils/config.sh + +OUTPUT_DIR=${TARGET_DIR}/hdf5_shards +mkdir -p ${OUTPUT_DIR} + +OUTPUT_FILE="${OUTPUT_DIR}/${SHARD_INDEX}.hdf5" + +python /workspace/bert/create_pretraining_data.py \ + --input_file=${INPUT_FILE} \ + --output_file=${OUTPUT_FILE} \ + --vocab_file=${VOCAB_FILE} \ + --do_lower_case \ + --max_seq_length=${MAX_SEQUENCE_LENGTH} \ + --max_predictions_per_seq=${MAX_PREDICTIONS_PER_SEQUENCE} \ + --masked_lm_prob=${MASKED_LM_PROB} \ + --random_seed=${SEED} \ + --dupe_factor=${DUPE_FACTOR} + diff --git a/PyTorch/LanguageModeling/BERT/data/utils/preprocessing_xargs_wrapper.sh b/PyTorch/LanguageModeling/BERT/data/utils/preprocessing_xargs_wrapper.sh new file mode 100755 index 00000000..0e767cd0 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/preprocessing_xargs_wrapper.sh @@ -0,0 +1,15 @@ +#! /bin/bash + +source /workspace/bert/data/utils/config.sh + +SHARD_COUNT=0 +rm -rf ${TARGET_DIR}/xarg_list.txt +touch ${TARGET_DIR}/xarg_list.txt +for file in ${TARGET_DIR}/final_text_files_sharded/*; do + echo ${SHARD_COUNT} >> ${TARGET_DIR}/xarg_list.txt + SHARD_COUNT=$((SHARD_COUNT+1)) +done + +xargs -n 1 --max-procs=${N_PROCS_PREPROCESS} --arg-file=${TARGET_DIR}/xarg_list.txt /workspace/bert/data/utils/preprocessing.sh + +rm ${TARGET_DIR}/xarg_list.txt diff --git a/PyTorch/LanguageModeling/BERT/data/utils/sentence_segmentation_nltk.py b/PyTorch/LanguageModeling/BERT/data/utils/sentence_segmentation_nltk.py new file mode 100644 index 00000000..fa237987 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/sentence_segmentation_nltk.py @@ -0,0 +1,28 @@ +# NVIDIA + +import argparse +import nltk +import os + +nltk.download('punkt') + +parser = argparse.ArgumentParser(description='Sentence Segmentation') + +parser.add_argument('input_file', type=str) +parser.add_argument('output_file', type=str) + +args = parser.parse_args() + +input_file = args.input_file +output_file = args.output_file + +doc_seperator = "\n" + +with open(input_file) as ifile: + with open(output_file, "w") as ofile: + for line in ifile: + if line != "\n": + sent_list = nltk.tokenize.sent_tokenize(line) + for sent in sent_list: + ofile.write(sent + "\n") + ofile.write(doc_seperator) diff --git a/PyTorch/LanguageModeling/BERT/data/utils/shard_text_input_file.py b/PyTorch/LanguageModeling/BERT/data/utils/shard_text_input_file.py new file mode 100644 index 00000000..f436314e --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/utils/shard_text_input_file.py @@ -0,0 +1,47 @@ +# NVIDIA + +import os +import argparse + +parser = argparse.ArgumentParser(description='Dataset sharding') + +parser.add_argument('input_file', type=str) +parser.add_argument('output_file', type=str) + +args = parser.parse_args() + +input_file = args.input_file +output_file = args.output_file + +doc_seperator = "\n" + +line_buffer = [] +shard_size = 396000 # Approximate, will split at next article break +line_counter = 0 +shard_index = 0 + +ifile_lines = 0 +with open(input_file) as ifile: + for line in ifile: + ifile_lines += 1 + +print("Input file contains", ifile_lines, "lines.") + +iline_counter = 1 +with open(input_file) as ifile: + for line in ifile: + if line_counter < shard_size and iline_counter < ifile_lines: + line_buffer.append(line) + line_counter += 1 + iline_counter += 1 + elif line_counter >= shard_size and line != "\n" and iline_counter < ifile_lines: + line_buffer.append(line) + line_counter += 1 + iline_counter += 1 + else: + with open(output_file + str(shard_index) + ".txt", "w") as ofile: + for oline in line_buffer: + ofile.write(oline) + line_buffer = [] + line_counter = 0 + shard_index += 1 diff --git a/PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/download_wikipedia.sh b/PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/download_wikipedia.sh new file mode 100755 index 00000000..63608a00 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/download_wikipedia.sh @@ -0,0 +1,30 @@ +#! /bin/bash + +WIKI_DUMP="ftp://ftpmirror.your.org/pub/wikimedia/dumps/enwiki/20190301/enwiki-20190301-pages-articles-multistream.xml.bz2" +N_PROCS_PREPROCESS=4 # Adjust this based on memory requirements and available number of cores + +# Download Wikipedia dump file +mkdir -p ./download + +# Not using --noclobber since it emits an error if exists (incompatible with bash 'set -e') +echo "Downloading Wikidump" +if [ ! -f ./download/wikidump.xml.bz2 ]; then + wget -O ./download/wikidump.xml.bz2 ${WIKI_DUMP} +fi + +# Extract dump +echo "Extracting Wikidump" +mkdir -p ./raw_data +if [ ! -f ./raw_data/wikidump.xml ]; then + pv ./download/wikidump.xml.bz2 | bunzip2 -kdc > ./raw_data/wikidump.xml +fi + +# Wikiextractor.py - Creates lots of folders/files in "doc format" +echo "Running Wikiextractor" +mkdir -p ./extracted_articles +/workspace/wikiextractor/WikiExtractor.py ./raw_data/wikidump.xml -b 1000M --processes ${N_PROCS_PREPROCESS} -o ./extracted_articles + +# Remove XML Tags and extraneous titles (since they are not sentences) +# Also clean to remove lines between paragraphs within article and use space-separated articles +echo "Cleaning and formatting files (one article per line)" +python3 ./remove_tags_and_clean.py ./extracted_articles ./wikipedia_corpus.txt diff --git a/PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/remove_tags_and_clean.py b/PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/remove_tags_and_clean.py new file mode 100644 index 00000000..fb096b5c --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/data/wikipedia_corpus/remove_tags_and_clean.py @@ -0,0 +1,39 @@ +# NVIDIA + +import glob +import os +import argparse + +parser = argparse.ArgumentParser(description='Cleaning and merge downloaded bookcorpus files') + +parser.add_argument('extracted_articles_path', type=str) +parser.add_argument('output_file', type=str) + +args = parser.parse_args() + +extracted_articles_path = args.extracted_articles_path +output_file = args.output_file + +with open(output_file, "w") as ofile: + for dirname in glob.glob('{}/*/'.format(extracted_articles_path), recursive=False): + for filename in glob.glob(dirname + 'wiki_*', recursive=True): + print(filename) + article_lines = [] + article_open = False + + with open(filename, "r") as file: + for line in file: + if "" in line: + article_open = False + for oline in article_lines[1:]: + if oline != "\n": + ofile.write(oline.rstrip() + " ") + ofile.write("\n\n") + article_lines = [] + else: + if article_open: + article_lines.append(line) + + diff --git a/PyTorch/LanguageModeling/BERT/extract_features.py b/PyTorch/LanguageModeling/BERT/extract_features.py new file mode 100644 index 00000000..c41d4517 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/extract_features.py @@ -0,0 +1,297 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Extract pre-computed feature vectors from a PyTorch BERT model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import collections +import logging +import json +import re + +import torch +from torch.utils.data import TensorDataset, DataLoader, SequentialSampler +from torch.utils.data.distributed import DistributedSampler + +from tokenization import BertTokenizer +from modeling import BertModel + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + + +class InputExample(object): + + def __init__(self, unique_id, text_a, text_b): + self.unique_id = unique_id + self.text_a = text_a + self.text_b = text_b + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): + self.unique_id = unique_id + self.tokens = tokens + self.input_ids = input_ids + self.input_mask = input_mask + self.input_type_ids = input_type_ids + + +def convert_examples_to_features(examples, seq_length, tokenizer): + """Loads a data file into a list of `InputBatch`s.""" + + features = [] + for (ex_index, example) in enumerate(examples): + tokens_a = tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + + if tokens_b: + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > seq_length - 2: + tokens_a = tokens_a[0:(seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambigiously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = [] + input_type_ids = [] + tokens.append("[CLS]") + input_type_ids.append(0) + for token in tokens_a: + tokens.append(token) + input_type_ids.append(0) + tokens.append("[SEP]") + input_type_ids.append(0) + + if tokens_b: + for token in tokens_b: + tokens.append(token) + input_type_ids.append(1) + tokens.append("[SEP]") + input_type_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < seq_length: + input_ids.append(0) + input_mask.append(0) + input_type_ids.append(0) + + assert len(input_ids) == seq_length + assert len(input_mask) == seq_length + assert len(input_type_ids) == seq_length + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("unique_id: %s" % (example.unique_id)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info( + "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) + + features.append( + InputFeatures( + unique_id=example.unique_id, + tokens=tokens, + input_ids=input_ids, + input_mask=input_mask, + input_type_ids=input_type_ids)) + return features + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +def read_examples(input_file): + """Read a list of `InputExample`s from an input file.""" + examples = [] + unique_id = 0 + with open(input_file, "r", encoding='utf-8') as reader: + while True: + line = reader.readline() + if not line: + break + line = line.strip() + text_a = None + text_b = None + m = re.match(r"^(.*) \|\|\| (.*)$", line) + if m is None: + text_a = line + else: + text_a = m.group(1) + text_b = m.group(2) + examples.append( + InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) + unique_id += 1 + return examples + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--input_file", default=None, type=str, required=True) + parser.add_argument("--output_file", default=None, type=str, required=True) + parser.add_argument("--bert_model", default=None, type=str, required=True, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + + ## Other parameters + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + parser.add_argument("--layers", default="-1,-2,-3,-4", type=str) + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after WordPiece tokenization. Sequences longer " + "than this will be truncated, and sequences shorter than this will be padded.") + parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.") + parser.add_argument("--local_rank", + type=int, + default=-1, + help = "local_rank for distributed training on gpus") + parser.add_argument("--no_cuda", + action='store_true', + help="Whether not to use CUDA when available") + + args = parser.parse_args() + + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + n_gpu = torch.cuda.device_count() + else: + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl') + logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1))) + + layer_indexes = [int(x) for x in args.layers.split(",")] + + tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) + + examples = read_examples(args.input_file) + + features = convert_examples_to_features( + examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer) + + unique_id_to_feature = {} + for feature in features: + unique_id_to_feature[feature.unique_id] = feature + + model = BertModel.from_pretrained(args.bert_model) + model.to(device) + + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + + eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index) + if args.local_rank == -1: + eval_sampler = SequentialSampler(eval_data) + else: + eval_sampler = DistributedSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) + + model.eval() + with open(args.output_file, "w", encoding='utf-8') as writer: + for input_ids, input_mask, example_indices in eval_dataloader: + input_ids = input_ids.to(device) + input_mask = input_mask.to(device) + + all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask) + all_encoder_layers = all_encoder_layers + + for b, example_index in enumerate(example_indices): + feature = features[example_index.item()] + unique_id = int(feature.unique_id) + # feature = unique_id_to_feature[unique_id] + output_json = collections.OrderedDict() + output_json["linex_index"] = unique_id + all_out_features = [] + for (i, token) in enumerate(feature.tokens): + all_layers = [] + for (j, layer_index) in enumerate(layer_indexes): + layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy() + layer_output = layer_output[b] + layers = collections.OrderedDict() + layers["index"] = layer_index + layers["values"] = [ + round(x.item(), 6) for x in layer_output[i] + ] + all_layers.append(layers) + out_features = collections.OrderedDict() + out_features["token"] = token + out_features["layers"] = all_layers + all_out_features.append(out_features) + output_json["features"] = all_out_features + writer.write(json.dumps(output_json) + "\n") + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/file_utils.py b/PyTorch/LanguageModeling/BERT/file_utils.py new file mode 100644 index 00000000..b475d450 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/file_utils.py @@ -0,0 +1,249 @@ +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" +from __future__ import (absolute_import, division, print_function, unicode_literals) + +import json +import logging +import os +import shutil +import tempfile +from functools import wraps +from hashlib import sha256 +import sys +from io import open + +import boto3 +import requests +from botocore.exceptions import ClientError +from tqdm import tqdm + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + +try: + from pathlib import Path + PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + Path.home() / '.pytorch_pretrained_bert')) +except AttributeError: + PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert')) + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +def url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + """ + url_bytes = url.encode('utf-8') + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode('utf-8') + etag_hash = sha256(etag_bytes) + filename += '.' + etag_hash.hexdigest() + + return filename + + +def filename_to_url(filename, cache_dir=None): + """ + Return the url and etag (which may be ``None``) stored for `filename`. + Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + cache_path = os.path.join(cache_dir, filename) + if not os.path.exists(cache_path): + raise EnvironmentError("file {} not found".format(cache_path)) + + meta_path = cache_path + '.json' + if not os.path.exists(meta_path): + raise EnvironmentError("file {} not found".format(meta_path)) + + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata['url'] + etag = metadata['etag'] + + return url, etag + + +def cached_path(url_or_filename, cache_dir=None): + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + parsed = urlparse(url_or_filename) + + if parsed.scheme in ('http', 'https', 's3'): + # URL, so get it from the cache (downloading if necessary) + return get_from_cache(url_or_filename, cache_dir) + elif os.path.exists(url_or_filename): + # File, and it exists. + return url_or_filename + elif parsed.scheme == '': + # File, but it doesn't exist. + raise EnvironmentError("file {} not found".format(url_or_filename)) + else: + # Something unknown + raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) + + +def split_s3_path(url): + """Split a full s3 path into the bucket name and path.""" + parsed = urlparse(url) + if not parsed.netloc or not parsed.path: + raise ValueError("bad s3 path {}".format(url)) + bucket_name = parsed.netloc + s3_path = parsed.path + # Remove '/' at beginning of path. + if s3_path.startswith("/"): + s3_path = s3_path[1:] + return bucket_name, s3_path + + +def s3_request(func): + """ + Wrapper function for s3 requests in order to create more helpful error + messages. + """ + + @wraps(func) + def wrapper(url, *args, **kwargs): + try: + return func(url, *args, **kwargs) + except ClientError as exc: + if int(exc.response["Error"]["Code"]) == 404: + raise EnvironmentError("file {} not found".format(url)) + else: + raise + + return wrapper + + +@s3_request +def s3_etag(url): + """Check ETag on S3 object.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_object = s3_resource.Object(bucket_name, s3_path) + return s3_object.e_tag + + +@s3_request +def s3_get(url, temp_file): + """Pull a file directly from S3.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) + + +def http_get(url, temp_file): + req = requests.get(url, stream=True) + content_length = req.headers.get('Content-Length') + total = int(content_length) if content_length is not None else None + progress = tqdm(unit="B", total=total) + for chunk in req.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + progress.update(len(chunk)) + temp_file.write(chunk) + progress.close() + + +def get_from_cache(url, cache_dir=None): + """ + Given a URL, look for the corresponding dataset in the local cache. + If it's not there, download it. Then return the path to the cached file. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + # Get eTag to add to filename, if it exists. + if url.startswith("s3://"): + etag = s3_etag(url) + else: + response = requests.head(url, allow_redirects=True) + if response.status_code != 200: + raise IOError("HEAD request failed for url {} with status code {}" + .format(url, response.status_code)) + etag = response.headers.get("ETag") + + filename = url_to_filename(url, etag) + + # get cache path to put the file + cache_path = os.path.join(cache_dir, filename) + + if not os.path.exists(cache_path): + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with tempfile.NamedTemporaryFile() as temp_file: + logger.info("%s not found in cache, downloading to %s", url, temp_file.name) + + # GET file object + if url.startswith("s3://"): + s3_get(url, temp_file) + else: + http_get(url, temp_file) + + # we are copying the file before closing it, so flush to avoid truncation + temp_file.flush() + # shutil.copyfileobj() starts at the current position, so go to the start + temp_file.seek(0) + + logger.info("copying %s to cache at %s", temp_file.name, cache_path) + with open(cache_path, 'wb') as cache_file: + shutil.copyfileobj(temp_file, cache_file) + + logger.info("creating metadata file for %s", cache_path) + meta = {'url': url, 'etag': etag} + meta_path = cache_path + '.json' + with open(meta_path, 'w', encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + logger.info("removing temp file %s", temp_file.name) + + return cache_path + + +def read_set_from_file(filename): + ''' + Extract a de-duped collection (set) of text from a file. + Expected file format is one item per line. + ''' + collection = set() + with open(filename, 'r', encoding='utf-8') as file_: + for line in file_: + collection.add(line.rstrip()) + return collection + + +def get_file_extension(path, dot=True, lower=True): + ext = os.path.splitext(path)[1] + ext = ext if dot else ext[1:] + return ext.lower() if lower else ext diff --git a/PyTorch/LanguageModeling/BERT/fused_adam_local.py b/PyTorch/LanguageModeling/BERT/fused_adam_local.py new file mode 100644 index 00000000..7afa76d1 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/fused_adam_local.py @@ -0,0 +1,205 @@ +import types +import importlib + +import math +import torch + +def warmup_cosine(x, warmup=0.002): + if x < warmup: + return x/warmup + return 0.5 * (1.0 + torch.cos(math.pi * x)) + +def warmup_constant(x, warmup=0.002): + if x < warmup: + return x/warmup + return 1.0 + +def warmup_linear(x, warmup=0.002): + if x < warmup: + return x/warmup + return 1.0 - x + +SCHEDULES = { + 'warmup_cosine':warmup_cosine, + 'warmup_constant':warmup_constant, + 'warmup_linear':warmup_linear, +} + +class FusedAdamBert(torch.optim.Optimizer): + + """Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via + ``python setup.py install --cuda_ext --cpp_ext``. + It has been proposed in `Adam: A Method for Stochastic Optimization`_. + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in FusedAdam! + eps_inside_sqrt (boolean, optional): in the 'update parameters' step, + adds eps to the bias-corrected second moment estimate before + evaluating square root instead of adding it to the square root of + second moment estimate as in the original paper. (default: False) + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + +# def __init__(self, params, +# lr=1e-3, bias_correction = True, +# betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt = False, +# weight_decay=0., max_grad_norm=0., amsgrad=False): + + def __init__(self, params, lr=1e-3, warmup=-1, t_total=-1, bias_correction=False, betas=(0.9, 0.999), schedule='warmup_linear', + eps=1e-6, eps_inside_sqrt = False, weight_decay=0., max_grad_norm=1.0, amsgrad=False): + + + global fused_adam_cuda + fused_adam_cuda = importlib.import_module("fused_adam_cuda") + + if amsgrad: + raise RuntimeError('FusedAdam does not support the AMSGrad variant.') + defaults = dict(lr=lr, bias_correction=bias_correction, + betas=betas, eps=eps, weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + super(FusedAdamBert, self).__init__(params, defaults) + print("LOCAL FUSED ADAM") + self.eps_mode = 0 if eps_inside_sqrt else 1 + self.schedule = schedule + self.t_total = t_total + self.warmup = warmup + + def get_lr(self): + lr = [] + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + if len(state) == 0: + return [0] + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + lr.append(lr_scheduled) + print("LR {}".format(lr_scheduled)) + return lr + + def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + grads (list of tensors, optional): weight gradient to use for the + optimizer update. If gradients have type torch.half, parameters + are expected to be in type torch.float. (default: None) + output params (list of tensors, optional): A reduced precision copy + of the updated weights written out in addition to the regular + updated weights. Have to be of same type as gradients. (default: None) + scale (float, optional): factor to divide gradient tensor values + by before applying to weights. (default: 1) + """ + loss = None + if closure is not None: + loss = closure() + + if grads is None: + grads_group = [None]*len(self.param_groups) + # backward compatibility + # assuming a list/generator of parameter means single group + elif isinstance(grads, types.GeneratorType): + grads_group = [grads] + elif type(grads[0])!=list: + grads_group = [grads] + else: + grads_group = grads + + if output_params is None: + output_params_group = [None]*len(self.param_groups) + elif isinstance(output_params, types.GeneratorType): + output_params_group = [output_params] + elif type(output_params[0])!=list: + output_params_group = [output_params] + else: + output_params_group = output_params + + if grad_norms is None: + grad_norms = [None]*len(self.param_groups) + + #Compute global norm + global_norm = 0.0 + for group, grads_this_group, output_params_this_group, grad_norm in zip(self.param_groups, grads_group, + output_params_group, grad_norms): + global_norm = (global_norm ** 2 + grad_norm ** 2) ** 0.5 + + for group, grads_this_group, output_params_this_group, grad_norm in zip(self.param_groups, grads_group, output_params_group, grad_norms): + if grads_this_group is None: + grads_this_group = [None]*len(group['params']) + if output_params_this_group is None: + output_params_this_group = [None]*len(group['params']) + + # compute combined scale factor for this group + combined_scale = scale + if group['max_grad_norm'] > 0: + # norm is in fact norm*scale + clip = ((global_norm / scale) + 1e-6) / group['max_grad_norm'] + if clip > 1: + combined_scale = clip * scale + + bias_correction = 1 if group['bias_correction'] else 0 + + for p, grad, output_param in zip(group['params'], grads_this_group, output_params_this_group): + #note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients + if p.grad is None and grad is None: + continue + if grad is None: + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + out_p = torch.tensor([], dtype = torch.float) if output_param is None else output_param + #Changes sharath + + schedule_fct = SCHEDULES[self.schedule] + #schedule_fct(state['step']/self.t_total, self.warmup) + #step_lr = group['lr'] * schedule_fct(state['step']/self.t_total, self.warmup) + #step_lr = group['lr'] * scale#schedule_fct(state['step']/self.t_total, self.warmup)# schedule_fct(state['step']/group['t_total'], group['warmup']) + #print(scale, step_lr) + #print(group['lr']) + fused_adam_cuda.adam(p.data, + out_p, + exp_avg, + exp_avg_sq, + grad, + group['lr'], #step_lr,#group['lr'], + beta1, + beta2, + group['eps'], + combined_scale, + state['step'], + self.eps_mode, + bias_correction, + group['weight_decay']) + return loss diff --git a/PyTorch/LanguageModeling/BERT/modeling.py b/PyTorch/LanguageModeling/BERT/modeling.py new file mode 100644 index 00000000..ac9355b0 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/modeling.py @@ -0,0 +1,1249 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import copy +import json +import logging +import math +import os +import shutil +import tarfile +import tempfile +import sys +from io import open + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss +from torch.utils import checkpoint + +from file_utils import cached_path + +logger = logging.getLogger(__name__) + +PRETRAINED_MODEL_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", +} +CONFIG_NAME = 'bert_config.json' +WEIGHTS_NAME = 'pytorch_model.bin' +TF_WEIGHTS_NAME = 'model.ckpt' + +def load_tf_weights_in_bert(model, tf_checkpoint_path): + """ Load tf checkpoints in a pytorch model + """ + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + print("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + print("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in ["adam_v", "adam_m"] for n in name): + print("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + if l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + else: + pointer = getattr(pointer, l[0]) + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.transpose(array) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + print("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +def gelu(x): + """Implementation of the gelu activation function. + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + Also see https://arxiv.org/abs/1606.08415 + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +class BertConfig(object): + """Configuration class to store the configuration of a `BertModel`. + """ + def __init__(self, + vocab_size_or_config_json_file, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02): + """Constructs BertConfig. + + Args: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu" and "swish" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `BertModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + """ + if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 + and isinstance(vocab_size_or_config_json_file, unicode)): + with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + else: + raise ValueError("First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)") + + @classmethod + def from_dict(cls, json_object): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + config = BertConfig(vocab_size_or_config_json_file=-1) + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + +try: + from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm +except ImportError: + print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") + class BertLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None): + seq_length = input_ids.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask): + self_output = self.self(input_tensor, attention_mask) + attention_output = self.output(self_output, input_tensor) + return attention_output + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask): + attention_output = self.attention(hidden_states, attention_mask) + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + +class BertEncoder(nn.Module): + def __init__(self, config): + super(BertEncoder, self).__init__() + layer = BertLayer(config) + self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) + + # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): + # all_encoder_layers = [] + # for layer_module in self.layer: + # hidden_states = layer_module(hidden_states, attention_mask) + # if output_all_encoded_layers: + # all_encoder_layers.append(hidden_states) + # if not output_all_encoded_layers: + # all_encoder_layers.append(hidden_states) + # return all_encoder_layers + def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False): + all_encoder_layers = [] + def custom(start, end): + def custom_forward(*inputs): + layers = self.layer[start:end] + x_ = inputs[0] + for layer in layers: + x_ = layer(x_, inputs[1]) + return x_ + return custom_forward + + if checkpoint_activations: + l = 0 + num_layers = len(self.layer) + chunk_length = math.ceil(math.sqrt(num_layers)) + while l < num_layers: + hidden_states = checkpoint.checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1) + l += chunk_length + # decoder layers + else: + for i,layer_module in enumerate(self.layer): + hidden_states = layer_module(hidden_states, attention_mask) + + if output_all_encoded_layers: + all_encoder_layers.append(hidden_states) + + if not output_all_encoded_layers or checkpoint_activations: + all_encoder_layers.append(hidden_states) + return all_encoder_layers + +#class BertEncoder(nn.Module): +# def __init__(self, config): +# super(BertEncoder, self).__init__() +# layer = BertLayer(config) +# self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) +# +# def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): +# all_encoder_layers = [] +# for layer_module in self.layer: +# hidden_states = layer_module(hidden_states, attention_mask) +# if output_all_encoded_layers: +# all_encoder_layers.append(hidden_states) +# if not output_all_encoded_layers: +# all_encoder_layers.append(hidden_states) +# return all_encoder_layers + + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(BertPredictionHeadTransform, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertLMPredictionHead, self).__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(bert_model_embedding_weights.size(1), + bert_model_embedding_weights.size(0), + bias=False) + self.decoder.weight = bert_model_embedding_weights + self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertOnlyMLMHead, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super(BertOnlyNSPHead, self).__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertPreTrainingHeads, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, config, *inputs, **kwargs): + super(BertPreTrainedModel, self).__init__() + if not isinstance(config, BertConfig): + raise ValueError( + "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " + "To create a model from a Google pretrained model use " + "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + self.__class__.__name__, self.__class__.__name__ + )) + self.config = config + + def init_bert_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, BertLayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, + from_tf=False, *inputs, **kwargs): + """ + Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. + Download and cache the pre-trained model file if needed. + + Params: + pretrained_model_name_or_path: either: + - a str with the name of a pre-trained model to load selected in the list of: + . `bert-base-uncased` + . `bert-large-uncased` + . `bert-base-cased` + . `bert-large-cased` + . `bert-base-multilingual-uncased` + . `bert-base-multilingual-cased` + . `bert-base-chinese` + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `model.chkpt` a TensorFlow checkpoint + from_tf: should we load the weights from a locally saved TensorFlow checkpoint + cache_dir: an optional path to a folder in which the pre-trained models will be cached. + state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models + *inputs, **kwargs: additional input for the specific Bert class + (ex: num_labels for BertForSequenceClassification) + """ + if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: + archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] + else: + archive_file = pretrained_model_name_or_path + # redirect to the cache, if necessary + try: + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), + archive_file)) + return None + if resolved_archive_file == archive_file: + logger.info("loading archive file {}".format(archive_file)) + else: + logger.info("loading archive file {} from cache at {}".format( + archive_file, resolved_archive_file)) + tempdir = None + if os.path.isdir(resolved_archive_file) or from_tf: + serialization_dir = resolved_archive_file + else: + # Extract archive to temp dir + tempdir = tempfile.mkdtemp() + logger.info("extracting archive file {} to temp dir {}".format( + resolved_archive_file, tempdir)) + with tarfile.open(resolved_archive_file, 'r:gz') as archive: + archive.extractall(tempdir) + serialization_dir = tempdir + # Load config + config_file = os.path.join(serialization_dir, CONFIG_NAME) + config = BertConfig.from_json_file(config_file) + logger.info("Model config {}".format(config)) + # Instantiate model. + model = cls(config, *inputs, **kwargs) + if state_dict is None and not from_tf: + weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) + state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None) + if tempdir: + # Clean up temp dir + shutil.rmtree(tempdir) + if from_tf: + # Directly load from a TensorFlow checkpoint + weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) + return load_tf_weights_in_bert(model, weights_path) + # Load from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if 'gamma' in key: + new_key = key.replace('gamma', 'weight') + if 'beta' in key: + new_key = key.replace('beta', 'bias') + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + start_prefix = '' + if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): + start_prefix = 'bert.' + load(model, prefix=start_prefix) + if len(missing_keys) > 0: + logger.info("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + logger.info("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + model.__class__.__name__, "\n\t".join(error_msgs))) + return model + + +class BertModel(BertPreTrainedModel): + """BERT model ("Bidirectional Embedding Representations from a Transformer"). + + Params: + config: a BertConfig class instance with the configuration to build a new model + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. + + Outputs: Tuple of (encoded_layers, pooled_output) + `encoded_layers`: controled by `output_all_encoded_layers` argument: + - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end + of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each + encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], + - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding + to the last attention block of shape [batch_size, sequence_length, hidden_size], + `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a + classifier pretrained on top of the hidden state associated to the first character of the + input (`CLS`) to train on the Next-Sentence task (see BERT's paper). + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = modeling.BertModel(config=config) + all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertModel, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + self.pooler = BertPooler(config) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, checkpoint_activations=False): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + embedding_output = self.embeddings(input_ids, token_type_ids) + encoded_layers = self.encoder(embedding_output, + extended_attention_mask, + output_all_encoded_layers=output_all_encoded_layers, checkpoint_activations=checkpoint_activations) + sequence_output = encoded_layers[-1] + pooled_output = self.pooler(sequence_output) + if not output_all_encoded_layers: + encoded_layers = encoded_layers[-1] + return encoded_layers, pooled_output + + +class BertForPreTraining(BertPreTrainedModel): + """BERT model with pre-training heads. + This module comprises the BERT model followed by the two pre-training heads: + - the masked language modeling head, and + - the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `masked_lm_labels` and `next_sentence_label` are not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `masked_lm_labels` or `next_sentence_label` is `None`: + Outputs a tuple comprising + - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and + - the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForPreTraining(config) + masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForPreTraining, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False): + sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations) + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + + if masked_lm_labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + #print("loss is {} {}".format(masked_lm_loss, next_sentence_loss)) + total_loss = masked_lm_loss + next_sentence_loss + return total_loss + else: + return prediction_scores, seq_relationship_score + + +class BertForMaskedLM(BertPreTrainedModel): + """BERT model with the masked language modeling head. + This module comprises the BERT model followed by the masked language modeling head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + + Outputs: + if `masked_lm_labels` is not `None`: + Outputs the masked language modeling loss. + if `masked_lm_labels` is `None`: + Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForMaskedLM(config) + masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForMaskedLM, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, checkpoint_activations=False): + sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False) + prediction_scores = self.cls(sequence_output) + + if masked_lm_labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + return masked_lm_loss + else: + return prediction_scores + + +class BertForNextSentencePrediction(BertPreTrainedModel): + """BERT model with next sentence prediction head. + This module comprises the BERT model followed by the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `next_sentence_label` is not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `next_sentence_label` is `None`: + Outputs the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForNextSentencePrediction(config) + seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForNextSentencePrediction, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertOnlyNSPHead(config) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, checkpoint_activations=False): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False) + seq_relationship_score = self.cls( pooled_output) + + if next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + return next_sentence_loss + else: + return seq_relationship_score + + +class BertForSequenceClassification(BertPreTrainedModel): + """BERT model for classification. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForSequenceClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_labels): + super(BertForSequenceClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + return loss + else: + return logits + + +class BertForMultipleChoice(BertPreTrainedModel): + """BERT model for multiple choice tasks. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_choices`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] + with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` + and type 1 corresponds to a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_choices]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) + input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) + token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_choices = 2 + + model = BertForMultipleChoice(config, num_choices) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_choices): + super(BertForMultipleChoice, self).__init__(config) + self.num_choices = num_choices + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) + _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, self.num_choices) + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + return loss + else: + return reshaped_logits + + +class BertForTokenClassification(BertPreTrainedModel): + """BERT model for token-level classification. + This module is composed of the BERT model with a linear layer on top of + the full hidden state of the last layer. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForTokenClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_labels): + super(BertForTokenClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): + sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + return loss + else: + return logits + + +class BertForQuestionAnswering(BertPreTrainedModel): + """BERT model for Question Answering (span extraction). + This module is composed of the BERT model with a linear layer on top of + the sequence output that computes start_logits and end_logits + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. + Positions are clamped to the length of the sequence and position outside of the sequence are not taken + into account for computing the loss. + `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. + Positions are clamped to the length of the sequence and position outside of the sequence are not taken + into account for computing the loss. + + Outputs: + if `start_positions` and `end_positions` are not `None`: + Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. + if `start_positions` or `end_positions` is `None`: + Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end + position tokens of shape [batch_size, sequence_length]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForQuestionAnswering(config) + start_logits, end_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForQuestionAnswering, self).__init__(config) + self.bert = BertModel(config) + # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version + # self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, checkpoint_activations=False): + sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + return total_loss + else: + return start_logits, end_logits diff --git a/PyTorch/LanguageModeling/BERT/optimization.py b/PyTorch/LanguageModeling/BERT/optimization.py new file mode 100644 index 00000000..1a9ade8f --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/optimization.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for BERT model.""" + +import math +import torch +from torch.optim import Optimizer +from torch.optim.optimizer import required +from torch.nn.utils import clip_grad_norm_ +#from fused_adam_local import FusedAdam +from apex.optimizers import FusedAdam + +def warmup_cosine(x, warmup=0.002): + if x < warmup: + return x/warmup + return 0.5 * (1.0 + torch.cos(math.pi * x)) + +def warmup_constant(x, warmup=0.002): + if x < warmup: + return x/warmup + return 1.0 + +def warmup_linear(x, warmup=0.002): + if x < warmup: + return x/warmup + # return (1.0 - x) + + return max((x - 1. )/ (warmup - 1.), 0.) + +SCHEDULES = { + 'warmup_cosine':warmup_cosine, + 'warmup_constant':warmup_constant, + 'warmup_linear':warmup_linear, +} + + +class BertAdam(Optimizer): + """Implements BERT version of Adam algorithm with weight decay fix. + Params: + lr: learning rate + warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 + t_total: total number of training steps for the learning + rate schedule, -1 means constant learning rate. Default: -1 + schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' + b1: Adams b1. Default: 0.9 + b2: Adams b2. Default: 0.999 + e: Adams epsilon. Default: 1e-6 + weight_decay: Weight decay. Default: 0.01 + max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 + """ + def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', + b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, + max_grad_norm=1.0): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) + if schedule not in SCHEDULES: + raise ValueError("Invalid schedule parameter: {}".format(schedule)) + if not 0.0 <= warmup < 1.0 and not warmup == -1: + raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) + if not 0.0 <= b1 < 1.0: + raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) + if not 0.0 <= b2 < 1.0: + raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) + if not e >= 0.0: + raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) + defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, + b1=b1, b2=b2, e=e, weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + super(BertAdam, self).__init__(params, defaults) + + def get_lr(self): + lr = [] + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + if len(state) == 0: + return [0] + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + lr.append(lr_scheduled) + return lr + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['next_m'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['next_v'] = torch.zeros_like(p.data) + + next_m, next_v = state['next_m'], state['next_v'] + beta1, beta2 = group['b1'], group['b2'] + + # Add grad clipping + if group['max_grad_norm'] > 0: + clip_grad_norm_(p, group['max_grad_norm']) + + # Decay the first and second moment running average coefficient + # In-place operations to update the averages at the same time + next_m.mul_(beta1).add_(1 - beta1, grad) + next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) + update = next_m / (next_v.sqrt() + group['e']) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + + update_with_lr = lr_scheduled * update + p.data.add_(-update_with_lr) + + state['step'] += 1 + + # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 + # No bias correction + # bias_correction1 = 1 - beta1 ** state['step'] + # bias_correction2 = 1 - beta2 ** state['step'] + + return loss + +# ======================================================================= +class BertAdam_FP16(FusedAdam): + """Implements BERT version of Adam algorithm with weight decay fix. + Params: + lr: learning rate + warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 + t_total: total number of training steps for the learning + rate schedule, -1 means constant learning rate. Default: -1 + schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' + b1: Adams b1. Default: 0.9 + b2: Adams b2. Default: 0.999 + e: Adams epsilon. Default: 1e-6 + weight_decay: Weight decay. Default: 0.01 + max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 + """ + def __init__(self, params, lr, warmup=-1, t_total=-1, bias_correction=False, schedule='warmup_linear', + b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, + max_grad_norm=1.0): + if not lr >= 0.0: + raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) + if schedule not in SCHEDULES: + raise ValueError("Invalid schedule parameter: {}".format(schedule)) + if not 0.0 <= warmup < 1.0 and not warmup == -1: + raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) + if not 0.0 <= b1 < 1.0: + raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) + if not 0.0 <= b2 < 1.0: + raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) + if not e >= 0.0: + raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) + # defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, + # b1=b1, b2=b2, e=e, weight_decay=weight_decay, + # max_grad_norm=max_grad_norm) + super(BertAdam_FP16, self).__init__(params, lr=lr, bias_correction=bias_correction, betas=(b1, b2), eps=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)#defaults) + + def get_lr(self): + lr = [] + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + if len(state) == 0: + print("returning", state) + return [0] + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + lr.append(lr_scheduled) + print("LR {}".format(lr_scheduled)) + return lr diff --git a/PyTorch/LanguageModeling/BERT/requirements.txt b/PyTorch/LanguageModeling/BERT/requirements.txt new file mode 100644 index 00000000..2bfcc493 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/requirements.txt @@ -0,0 +1,13 @@ +# progress bars in model download and training scripts +tqdm +# Accessing files from S3 directly. +boto3 +# Used for downloading models over HTTP +requests +six +ipdb +#Data processing +h5py +html2text +nltk +progressbar \ No newline at end of file diff --git a/PyTorch/LanguageModeling/BERT/run_glue.py b/PyTorch/LanguageModeling/BERT/run_glue.py new file mode 100644 index 00000000..7c33a4a3 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/run_glue.py @@ -0,0 +1,649 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""BERT finetuning runner.""" + +from __future__ import absolute_import, division, print_function + +import argparse +import csv +import logging +import os +import random +import sys + +import numpy as np +import torch +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, + TensorDataset) +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm, trange + +from file_utils import PYTORCH_PRETRAINED_BERT_CACHE +from modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME +from tokenization import BertTokenizer +from optimization import BertAdam, warmup_linear + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + + +class InputExample(object): + """A single training/test example for simple sequence classification.""" + + def __init__(self, guid, text_a, text_b=None, label=None): + """Constructs a InputExample. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, input_ids, input_mask, segment_ids, label_id): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id + + +class DataProcessor(object): + """Base class for data converters for sequence classification data sets.""" + + def get_train_examples(self, data_dir): + """Gets a collection of `InputExample`s for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of `InputExample`s for the dev set.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with open(input_file, "r") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + if sys.version_info[0] == 2: + line = list(unicode(cell, 'utf-8') for cell in line) + lines.append(line) + return lines + + +class MrpcProcessor(DataProcessor): + """Processor for the MRPC data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = line[3] + text_b = line[4] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliProcessor(DataProcessor): + """Processor for the MultiNLI data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), + "dev_matched") + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, line[0]) + text_a = line[8] + text_b = line[9] + label = line[-1] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class ColaProcessor(DataProcessor): + """Processor for the CoLA data set (GLUE version).""" + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples( + self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + guid = "%s-%s" % (set_type, i) + text_a = line[3] + label = line[1] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): + """Loads a data file into a list of `InputBatch`s.""" + + label_map = {label : i for i, label in enumerate(label_list)} + + features = [] + for (ex_index, example) in enumerate(examples): + tokens_a = tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:(max_seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambigiously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + segment_ids = [0] * len(tokens) + + if tokens_b: + tokens += tokens_b + ["[SEP]"] + segment_ids += [1] * (len(tokens_b) + 1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + padding = [0] * (max_seq_length - len(input_ids)) + input_ids += padding + input_mask += padding + segment_ids += padding + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + label_id = label_map[example.label] + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join( + [str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %d)" % (example.label, label_id)) + + features.append( + InputFeatures(input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id)) + return features + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + +def accuracy(out, labels): + outputs = np.argmax(out, axis=1) + return np.sum(outputs == labels) + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--bert_model", default=None, type=str, required=True, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " + "bert-base-multilingual-cased, bert-base-chinese.") + parser.add_argument("--task_name", + default=None, + type=str, + required=True, + help="The name of the task to train.") + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--init_checkpoint", + default=None, + type=str, + required=True, + help="The checkpoint file from pretraining") + + ## Other parameters + parser.add_argument("--cache_dir", + default="", + type=str, + help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", + default=128, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--do_train", + action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", + action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_lower_case", + action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--train_batch_size", + default=32, + type=int, + help="Total batch size for training.") + parser.add_argument("--eval_batch_size", + default=8, + type=int, + help="Total batch size for eval.") + parser.add_argument("--learning_rate", + default=5e-5, + type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--num_train_epochs", + default=3.0, + type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1.0, type=float, + help="Total number of training steps to perform.") + parser.add_argument("--warmup_proportion", + default=0.1, + type=float, + help="Proportion of training to perform linear learning rate warmup for. " + "E.g., 0.1 = 10%% of training.") + parser.add_argument("--no_cuda", + action='store_true', + help="Whether not to use CUDA when available") + parser.add_argument("--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument('--fp16', + action='store_true', + help="Whether to use 16-bit float precision instead of 32-bit") + parser.add_argument('--loss_scale', + type=float, default=0, + help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" + "0 (default value): dynamic loss scaling.\n" + "Positive power of 2: static loss scaling value.\n") + parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") + args = parser.parse_args() + + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + processors = { + "cola": ColaProcessor, + "mnli": MnliProcessor, + "mrpc": MrpcProcessor, + } + + num_labels_task = { + "cola": 2, + "mnli": 3, + "mrpc": 2, + } + + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + n_gpu = torch.cuda.device_count() + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl') + logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( + device, n_gpu, bool(args.local_rank != -1), args.fp16)) + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + + args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + if not args.do_train and not args.do_eval: + raise ValueError("At least one of `do_train` or `do_eval` must be True.") + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: + print("WARNING: Output directory ({}) already exists and is not empty.".format(args.output_dir)) + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + task_name = args.task_name.lower() + + if task_name not in processors: + raise ValueError("Task not found: %s" % (task_name)) + + processor = processors[task_name]() + num_labels = num_labels_task[task_name] + label_list = processor.get_labels() + + tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) + + train_examples = None + num_train_optimization_steps = None + if args.do_train: + train_examples = processor.get_train_examples(args.data_dir) + num_train_optimization_steps = int( + len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs + if args.local_rank != -1: + num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() + + # Prepare model + cache_dir = args.cache_dir if args.cache_dir else os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank)) + model = BertForSequenceClassification.from_pretrained(args.bert_model, + cache_dir=cache_dir, + num_labels = num_labels) + model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False) + + if args.fp16: + model.half() + model.to(device) + if args.local_rank != -1: + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + model = DDP(model) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + if args.fp16: + try: + from apex.optimizers import FP16_Optimizer + from apex.optimizers import FusedAdam + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + optimizer = FusedAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + bias_correction=False, + max_grad_norm=1.0) + if args.loss_scale == 0: + optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + else: + optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + + else: + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + warmup=args.warmup_proportion, + t_total=num_train_optimization_steps) + + global_step = 0 + nb_tr_steps = 0 + tr_loss = 0 + if args.do_train: + train_features = convert_examples_to_features( + train_examples, label_list, args.max_seq_length, tokenizer) + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_examples)) + logger.info(" Batch size = %d", args.train_batch_size) + logger.info(" Num steps = %d", num_train_optimization_steps) + all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) + all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + if args.local_rank == -1: + train_sampler = RandomSampler(train_data) + else: + train_sampler = DistributedSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) + + model.train() + for _ in trange(int(args.num_train_epochs), desc="Epoch"): + tr_loss = 0 + nb_tr_examples, nb_tr_steps = 0, 0 + for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): + if args.max_steps > 0 and global_step > args.max_steps: + break + batch = tuple(t.to(device) for t in batch) + input_ids, input_mask, segment_ids, label_ids = batch + loss = model(input_ids, segment_ids, input_mask, label_ids) + if n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + optimizer.backward(loss) + else: + loss.backward() + + tr_loss += loss.item() + nb_tr_examples += input_ids.size(0) + nb_tr_steps += 1 + if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + # modify learning rate with special warm up BERT uses + # if args.fp16 is False, BertAdam is used that handles this automatically + lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion) + for param_group in optimizer.param_groups: + param_group['lr'] = lr_this_step + optimizer.step() + optimizer.zero_grad() + global_step += 1 + + if args.do_train: + # Save a trained model and the associated configuration + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) + torch.save(model_to_save.state_dict(), output_model_file) + output_config_file = os.path.join(args.output_dir, CONFIG_NAME) + with open(output_config_file, 'w') as f: + f.write(model_to_save.config.to_json_string()) + + # Load a trained model and config that you have fine-tuned + config = BertConfig(output_config_file) + model = BertForSequenceClassification(config, num_labels=num_labels) + model.load_state_dict(torch.load(output_model_file)) + else: + model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels) + model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False) + model.to(device) + + if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + eval_examples = processor.get_dev_examples(args.data_dir) + eval_features = convert_examples_to_features( + eval_examples, label_list, args.max_seq_length, tokenizer) + logger.info("***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_examples)) + logger.info(" Batch size = %d", args.eval_batch_size) + all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) + all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) + eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + # Run prediction for full data + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + + model.eval() + eval_loss, eval_accuracy = 0, 0 + nb_eval_steps, nb_eval_examples = 0, 0 + + for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): + input_ids = input_ids.to(device) + input_mask = input_mask.to(device) + segment_ids = segment_ids.to(device) + label_ids = label_ids.to(device) + + with torch.no_grad(): + tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) + logits = model(input_ids, segment_ids, input_mask) + + logits = logits.detach().cpu().numpy() + label_ids = label_ids.to('cpu').numpy() + tmp_eval_accuracy = accuracy(logits, label_ids) + + eval_loss += tmp_eval_loss.mean().item() + eval_accuracy += tmp_eval_accuracy + + nb_eval_examples += input_ids.size(0) + nb_eval_steps += 1 + + eval_loss = eval_loss / nb_eval_steps + eval_accuracy = eval_accuracy / nb_eval_examples + loss = tr_loss/nb_tr_steps if args.do_train else None + result = {'eval_loss': eval_loss, + 'eval_accuracy': eval_accuracy, + 'global_step': global_step, + 'loss': loss} + + output_eval_file = os.path.join(args.output_dir, "eval_results.txt") + with open(output_eval_file, "w") as writer: + logger.info("***** Eval results *****") + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(result[key])) + writer.write("%s = %s\n" % (key, str(result[key]))) + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/run_pretraining.py b/PyTorch/LanguageModeling/BERT/run_pretraining.py new file mode 100644 index 00000000..af73c4cd --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/run_pretraining.py @@ -0,0 +1,417 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""BERT finetuning runner.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +#================== +import csv +import os +import logging +import argparse +import random +import h5py +from tqdm import tqdm, trange +import os +import numpy as np +import torch +from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset +from torch.utils.data.distributed import DistributedSampler +import math +from apex import amp + + + +from tokenization import BertTokenizer +from modeling import BertForPreTraining, BertConfig +from optimization import BertAdam, BertAdam_FP16 + +# from fused_adam_local import FusedAdamBert +from file_utils import PYTORCH_PRETRAINED_BERT_CACHE + +from apex.optimizers import FusedAdam #, FP16_Optimizer +#from apex.optimizers import FusedAdam +from apex.parallel import DistributedDataParallel as DDP +from schedulers import LinearWarmUpScheduler + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + +class pretraining_dataset(Dataset): + + def __init__(self, input_file, max_pred_length): + self.input_file = input_file + self.max_pred_length = max_pred_length + f = h5py.File(input_file, "r") + self.input_ids = np.asarray(f["input_ids"][:]).astype(np.int64)#[num_instances x max_seq_length]) + self.input_masks = np.asarray(f["input_mask"][:]).astype(np.int64) #[num_instances x max_seq_length] + self.segment_ids = np.asarray(f["segment_ids"][:]).astype(np.int64) #[num_instances x max_seq_length] + self.masked_lm_positions = np.asarray(f["masked_lm_positions"][:]).astype(np.int64) #[num_instances x max_pred_length] + self.masked_lm_ids= np.asarray(f["masked_lm_ids"][:]).astype(np.int64) #[num_instances x max_pred_length] + self.next_sentence_labels = np.asarray(f["next_sentence_labels"][:]).astype(np.int64) # [num_instances] + f.close() + + def __len__(self): + 'Denotes the total number of samples' + return len(self.input_ids) + + def __getitem__(self, index): + + input_ids= torch.from_numpy(self.input_ids[index]) # [max_seq_length] + input_mask = torch.from_numpy(self.input_masks[index]) #[max_seq_length] + segment_ids = torch.from_numpy(self.segment_ids[index])# [max_seq_length] + masked_lm_positions = torch.from_numpy(self.masked_lm_positions[index]) #[max_pred_length] + masked_lm_ids = torch.from_numpy(self.masked_lm_ids[index]) #[max_pred_length] + next_sentence_labels = torch.from_numpy(np.asarray(self.next_sentence_labels[index])) #[1] + + masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1 + index = self.max_pred_length + # store number of masked tokens in index + if len((masked_lm_positions == 0).nonzero()) != 0: + index = (masked_lm_positions == 0).nonzero()[0].item() + masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] + + return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels] + +def main(): + + print("IN NEW MAIN XD\n") + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--input_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain .hdf5 files for the task.") + + parser.add_argument("--config_file", + default=None, + type=str, + required=True, + help="The BERT model config") + + parser.add_argument("--bert_model", default="bert-large-uncased", type=str, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="The output directory where the model checkpoints will be written.") + + ## Other parameters + parser.add_argument("--max_seq_length", + default=512, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--max_predictions_per_seq", + default=80, + type=int, + help="The maximum total of masked tokens in input sequence") + parser.add_argument("--train_batch_size", + default=32, + type=int, + help="Total batch size for training.") + parser.add_argument("--learning_rate", + default=5e-5, + type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--num_train_epochs", + default=3.0, + type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", + default=1000, + type=float, + help="Total number of training steps to perform.") + parser.add_argument("--warmup_proportion", + default=0.01, + type=float, + help="Proportion of training to perform linear learning rate warmup for. " + "E.g., 0.1 = 10%% of training.") + parser.add_argument("--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="Number of updates steps to accumualte before performing a backward/update pass.") + parser.add_argument('--fp16', + default=False, + action='store_true', + help="Whether to use 16-bit float precision instead of 32-bit") + parser.add_argument('--loss_scale', + type=float, default=0.0, + help='Loss scaling, positive power of 2 values can improve fp16 convergence.') + parser.add_argument('--log_freq', + type=float, default=10.0, + help='frequency of logging loss.') + parser.add_argument('--checkpoint_activations', + default=False, + action='store_true', + help="Whether to use gradient checkpointing") + parser.add_argument("--resume_from_checkpoint", + default=False, + action='store_true', + help="Whether to resume training from checkpoint.") + parser.add_argument('--resume_step', + type=int, + default=-1, + help="Step to resume training from.") + parser.add_argument('--num_steps_per_checkpoint', + type=int, + default=2000, + help="Number of update steps until a model checkpoint is saved to disk.") + + + args = parser.parse_args() + + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + + assert(torch.cuda.is_available()) + + if args.local_rank == -1: + device = torch.device("cuda") + n_gpu = torch.cuda.device_count() + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl', init_method='env://') + + logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + if args.train_batch_size % args.gradient_accumulation_steps != 0: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format( + args.gradient_accumulation_steps, args.train_batch_size)) + + args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps + + + + if not args.resume_from_checkpoint and os.path.exists(args.output_dir) and (os.listdir(args.output_dir) and os.listdir(args.output_dir)!=['logfile.txt']): + raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) + + if not args.resume_from_checkpoint: + os.makedirs(args.output_dir, exist_ok=True) + + # Prepare model + config = BertConfig.from_json_file(args.config_file) + model = BertForPreTraining(config) + + + if not args.resume_from_checkpoint: + global_step = 0 + else: + if args.resume_step == -1: + model_names = [f for f in os.listdir(args.output_dir) if f.endswith(".pt")] + args.resume_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names]) + + global_step = args.resume_step + + checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu") + model.load_state_dict(checkpoint['model'], strict=False) + + print("resume step from ", args.resume_step) + + model.to(device) + + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + + + + if args.fp16: + + optimizer = FusedAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + #warmup=args.warmup_proportion, + #t_total=args.max_steps, + bias_correction=False, + weight_decay=0.01, + max_grad_norm=1.0) + + if args.loss_scale == 0: + # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale="dynamic") + else: + # optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale=args.loss_scale) + + scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps) + + else: + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + warmup=args.warmup_proportion, + t_total=args.max_steps) + + + + if args.resume_from_checkpoint: + optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False) + + + + if args.local_rank != -1: + model = DDP(model) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + + files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if os.path.isfile(os.path.join(args.input_dir, f))] + files.sort() + + num_files = len(files) + + + logger.info("***** Running training *****") + # logger.info(" Num examples = %d", len(train_data)) + logger.info(" Batch size = %d", args.train_batch_size) + print(" LR = ", args.learning_rate) + + + model.train() + print("Training. . .") + + most_recent_ckpts_paths = [] + + print("Training. . .") + tr_loss = 0.0 # total added training loss + average_loss = 0.0 # averaged loss every args.log_freq steps + epoch = 0 + training_steps = 0 + while True: + if not args.resume_from_checkpoint: + random.shuffle(files) + f_start_id = 0 + else: + f_start_id = checkpoint['files'][0] + files = checkpoint['files'][1:] + args.resume_from_checkpoint = False + for f_id in range(f_start_id, len(files)): + data_file = files[f_id] + logger.info("file no %s file %s" %(f_id, data_file)) + train_data = pretraining_dataset(input_file=data_file, max_pred_length=args.max_predictions_per_seq) + + if args.local_rank == -1: + train_sampler = RandomSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size * n_gpu, num_workers=4, pin_memory=True) + else: + train_sampler = DistributedSampler(train_data) + + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=4, pin_memory=True) + for step, batch in enumerate(tqdm(train_dataloader, desc="File Iteration")): + + training_steps += 1 + batch = [t.to(device) for t in batch] + input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch#\ + loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, next_sentence_label=next_sentence_labels, checkpoint_activations=args.checkpoint_activations) + if n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + # optimizer.backward(loss) + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + tr_loss += loss + average_loss += loss.item() + + if training_steps % args.gradient_accumulation_steps == 0: + if args.fp16: + scheduler.step() + optimizer.step() + optimizer.zero_grad() + global_step += 1 + + + + if training_steps == 1 * args.gradient_accumulation_steps: + logger.info("Step:{} Average Loss = {} Step Loss = {} LR {}".format(global_step, average_loss, + loss.item(), optimizer.param_groups[0]['lr'])) + + if training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0: + logger.info("Step:{} Average Loss = {} Step Loss = {} LR {}".format(global_step, average_loss / args.log_freq, + loss.item(), optimizer.param_groups[0]['lr'])) + average_loss = 0 + + + if global_step >= args.max_steps or training_steps == 1 * args.gradient_accumulation_steps or training_steps % (args.num_steps_per_checkpoint * args.gradient_accumulation_steps) == 0: + if (not torch.distributed.is_initialized() or (torch.distributed.is_initialized() and torch.distributed.get_rank() == 0)): + # Save a trained model + logger.info("** ** * Saving fine - tuned model ** ** * ") + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)) + + torch.save({'model' : model_to_save.state_dict(), + 'optimizer' : optimizer.state_dict(), + 'files' : [f_id] + files }, output_save_file) + + most_recent_ckpts_paths.append(output_save_file) + if len(most_recent_ckpts_paths) > 3: + ckpt_to_be_removed = most_recent_ckpts_paths.pop(0) + os.remove(ckpt_to_be_removed) + + if global_step >= args.max_steps: + tr_loss = tr_loss * args.gradient_accumulation_steps / training_steps + if (torch.distributed.is_initialized()): + tr_loss /= torch.distributed.get_world_size() + torch.distributed.all_reduce(tr_loss) + logger.info("Total Steps:{} Final Loss = {}".format(training_steps, tr_loss.item())) + return + del train_dataloader + del train_sampler + del train_data + #for obj in gc.get_objects(): + # if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)): + # del obj + + torch.cuda.empty_cache() + epoch += 1 + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/run_pretraining_inference.py b/PyTorch/LanguageModeling/BERT/run_pretraining_inference.py new file mode 100644 index 00000000..ecfde31f --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/run_pretraining_inference.py @@ -0,0 +1,300 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""BERT finetuning runner.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +#================== +import csv +import os +import logging +import argparse +import random +import h5py +from tqdm import tqdm, trange +import os +import numpy as np +import torch +from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset +from torch.utils.data.distributed import DistributedSampler +import math +import time + +from tokenization import BertTokenizer +from modeling import BertForPreTraining, BertConfig + +# from fused_adam_local import FusedAdamBert +from file_utils import PYTORCH_PRETRAINED_BERT_CACHE + +from apex.parallel import DistributedDataParallel as DDP +import torch.distributed as dist + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + +class pretraining_dataset(Dataset): + + def __init__(self, input_file, max_pred_length): + self.input_file = input_file + self.max_pred_length = max_pred_length + f = h5py.File(input_file, "r") + self.input_ids = np.asarray(f["input_ids"][:]).astype(np.int64)#[num_instances x max_seq_length]) + self.input_masks = np.asarray(f["input_mask"][:]).astype(np.int64) #[num_instances x max_seq_length] + self.segment_ids = np.asarray(f["segment_ids"][:]).astype(np.int64) #[num_instances x max_seq_length] + self.masked_lm_positions = np.asarray(f["masked_lm_positions"][:]).astype(np.int64) #[num_instances x max_pred_length] + self.masked_lm_ids= np.asarray(f["masked_lm_ids"][:]).astype(np.int64) #[num_instances x max_pred_length] + self.next_sentence_labels = np.asarray(f["next_sentence_labels"][:]).astype(np.int64) # [num_instances] + f.close() + + def __len__(self): + 'Denotes the total number of samples' + return len(self.input_ids) + + def __getitem__(self, index): + + input_ids= torch.from_numpy(self.input_ids[index]) # [max_seq_length] + input_mask = torch.from_numpy(self.input_masks[index]) #[max_seq_length] + segment_ids = torch.from_numpy(self.segment_ids[index])# [max_seq_length] + masked_lm_positions = torch.from_numpy(self.masked_lm_positions[index]) #[max_pred_length] + masked_lm_ids = torch.from_numpy(self.masked_lm_ids[index]) #[max_pred_length] + next_sentence_labels = torch.from_numpy(np.asarray(self.next_sentence_labels[index])) #[1] + + masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1 + index = self.max_pred_length + # store number of masked tokens in index + if len((masked_lm_positions == 0).nonzero()) != 0: + index = (masked_lm_positions == 0).nonzero()[0].item() + masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] + + return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels] + +def main(): + + print("IN NEW MAIN XD\n") + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--input_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain .hdf5 files for the task.") + parser.add_argument("--config_file", + default="bert_config.json", + type=str, + required=False, + help="The BERT model config") + parser.add_argument("--ckpt_dir", + default=None, + type=str, + required=True, + help="The ckpt directory, e.g. /results") + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--eval', dest='do_eval', action='store_true') + group.add_argument('--prediction', dest='do_eval', action='store_false') + ## Other parameters + parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + parser.add_argument("--max_seq_length", + default=512, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--max_predictions_per_seq", + default=80, + type=int, + help="The maximum total of masked tokens in input sequence") + parser.add_argument("--ckpt_step", + default=-1, + type=int, + required=False, + help="The model checkpoint iteration, e.g. 1000") + + parser.add_argument("--eval_batch_size", + default=8, + type=int, + help="Total batch size for training.") + parser.add_argument("--max_steps", + default=-1, + type=int, + help="Total number of eval steps to perform, otherwise use full dataset") + parser.add_argument("--no_cuda", + default=False, + action='store_true', + help="Whether not to use CUDA when available") + parser.add_argument("--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument('--fp16', + default=False, + action='store_true', + help="Whether to use 16-bit float precision instead of 32-bit") + + + + args = parser.parse_args() + + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl', init_method='env://') + n_gpu = torch.cuda.device_count() + if n_gpu > 1: + assert(args.local_rank != -1) # only use torch.distributed for multi-gpu + logger.info("device %s n_gpu %d distributed inference %r", device, n_gpu, bool(args.local_rank != -1)) + + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + + + + # Prepare model + config = BertConfig.from_json_file(args.config_file) + model = BertForPreTraining(config) + + + if args.ckpt_step == -1: + #retrieve latest model + model_names = [f for f in os.listdir(args.ckpt_dir) if f.endswith(".model")] + args.ckpt_step = max([int(x.split('.model')[0].split('_')[1].strip()) for x in model_names]) + print("load model saved at iteraton", args.ckpt_step) + model_file = os.path.join(args.ckpt_dir, "ckpt_" + str(args.ckpt_step) + ".model") + state_dict = torch.load(model_file, map_location="cpu") + model.load_state_dict(state_dict, strict=False) + + if args.fp16: + model.half() # all parameters and buffers are converted to half precision + model.to(device) + + multi_gpu_training = args.local_rank != -1 and torch.distributed.is_initialized() + if multi_gpu_training: + model = DDP(model) + + files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if os.path.isfile(os.path.join(args.input_dir, f))] + files.sort() + + + + + logger.info("***** Running evaluation *****") + logger.info(" Batch size = %d", args.eval_batch_size) + + + model.eval() + print("Evaluation. . .") + + nb_instances = 0 + max_steps = args.max_steps if args.max_steps > 0 else np.inf + global_step = 0 + + + with torch.no_grad(): + if args.do_eval: + final_loss = 0.0 # + for data_file in files: + logger.info("file %s" %( data_file)) + dataset = pretraining_dataset(input_file=data_file, max_pred_length=args.max_predictions_per_seq) + if not multi_gpu_training: + train_sampler = RandomSampler(dataset) + datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True) + else: + train_sampler = DistributedSampler(dataset) + datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True) + for step, batch in enumerate(tqdm(datasetloader, desc="Iteration")): + if global_step > max_steps: + break + + + batch = [t.to(device) for t in batch] + input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch#\ + loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, next_sentence_label=next_sentence_labels) + final_loss += loss + + global_step += 1 + + torch.cuda.empty_cache() + if global_step > max_steps: + break + final_loss /= global_step + if multi_gpu_training: + final_loss /= torch.distributed.get_world_size() + dist.all_reduce(final_loss) + if (not multi_gpu_training or (multi_gpu_training and torch.distributed.get_rank() == 0)): + logger.info("Finished: Final Loss = {}".format(final_loss)) + + + else: # inference + # if multi_gpu_training: + # torch.distributed.barrier() + # start_t0 = time.time() + for data_file in files: + logger.info("file %s" %( data_file)) + dataset = pretraining_dataset(input_file=data_file, max_pred_length=args.max_predictions_per_seq) + if not multi_gpu_training: + train_sampler = RandomSampler(dataset) + datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True) + else: + train_sampler = DistributedSampler(dataset) + datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True) + for step, batch in enumerate(tqdm(datasetloader, desc="Iteration")): + if global_step > max_steps: + break + + + batch = [t.to(device) for t in batch] + input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch#\ + + lm_logits, nsp_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=None, next_sentence_label=None) + + nb_instances += input_ids.size(0) + + + global_step += 1 + torch.cuda.empty_cache() + if global_step > max_steps: + break + # if multi_gpu_training: + # torch.distributed.barrier() + if (not multi_gpu_training or (multi_gpu_training and torch.distributed.get_rank() == 0)): + logger.info("Finished") + + + + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/run_squad.py b/PyTorch/LanguageModeling/BERT/run_squad.py new file mode 100644 index 00000000..d5d499d1 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/run_squad.py @@ -0,0 +1,1143 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Run BERT on SQuAD.""" + +from __future__ import absolute_import, division, print_function + +import argparse +import collections +import json +import logging +import math +import os +import random +import sys +from io import open + +import numpy as np +import torch +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, + TensorDataset) +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm, trange + +from apex import amp +from schedulers import LinearWarmUpScheduler +from file_utils import PYTORCH_PRETRAINED_BERT_CACHE +from modeling import BertForQuestionAnswering, BertConfig, WEIGHTS_NAME, CONFIG_NAME +from optimization import BertAdam, warmup_linear +from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize) + +if sys.version_info[0] == 2: + import cPickle as pickle +else: + import pickle + +logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO) +logger = logging.getLogger(__name__) + + +class SquadExample(object): + """ + A single training/test example for the Squad dataset. + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=None): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (self.qas_id) + s += ", question_text: %s" % ( + self.question_text) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.end_position: + s += ", end_position: %d" % (self.end_position) + if self.is_impossible: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +def read_squad_examples(input_file, is_training, version_2_with_negative): + """Read a SQuAD json file into a list of SquadExample.""" + with open(input_file, "r", encoding='utf-8') as reader: + input_data = json.load(reader)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + end_position = None + orig_answer_text = None + is_impossible = False + if is_training: + if version_2_with_negative: + is_impossible = qa["is_impossible"] + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + answer_offset = answer["answer_start"] + answer_length = len(orig_answer_text) + start_position = char_to_word_offset[answer_offset] + end_position = char_to_word_offset[answer_offset + answer_length - 1] + # Only add answers where the text can be exactly recovered from the + # document. If this CAN'T happen it's likely due to weird Unicode + # stuff so we will just skip the example. + # + # Note that this means for training mode, every example is NOT + # guaranteed to be preserved. + actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join( + whitespace_tokenize(orig_answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + logger.warning("Could not find answer: '%s' vs. '%s'", + actual_text, cleaned_answer_text) + continue + else: + start_position = -1 + end_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + is_impossible=is_impossible) + examples.append(example) + return examples + + +def convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training): + """Loads a data file into a list of `InputBatch`s.""" + + unique_id = 1000000000 + + features = [] + for (example_index, example) in enumerate(examples): + query_tokens = tokenizer.tokenize(example.question_text) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if is_training and not example.is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and + tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + start_position = 0 + end_position = 0 + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + if is_training and example.is_impossible: + start_position = 0 + end_position = 0 + if example_index < 20: + logger.info("*** Example ***") + logger.info("unique_id: %s" % (unique_id)) + logger.info("example_index: %s" % (example_index)) + logger.info("doc_span_index: %s" % (doc_span_index)) + logger.info("tokens: %s" % " ".join(tokens)) + logger.info("token_to_orig_map: %s" % " ".join([ + "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) + logger.info("token_is_max_context: %s" % " ".join([ + "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() + ])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + if is_training and example.is_impossible: + logger.info("impossible example") + if is_training and not example.is_impossible: + answer_text = " ".join(tokens[start_position:(end_position + 1)]) + logger.info("start_position: %d" % (start_position)) + logger.info("end_position: %d" % (end_position)) + logger.info( + "answer: %s" % (answer_text)) + + features.append( + InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + start_position=start_position, + end_position=end_position, + is_impossible=example.is_impossible)) + unique_id += 1 + + return features + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + + # The SQuAD annotations are character based. We first project them to + # whitespace-tokenized words. But then after WordPiece tokenization, we can + # often find a "better match". For example: + # + # Question: What year was John Smith born? + # Context: The leader was John Smith (1895-1943). + # Answer: 1895 + # + # The original whitespace-tokenized answer will be "(1895-1943).". However + # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match + # the exact answer, 1895. + # + # However, this is not always possible. Consider the following: + # + # Question: What country is the top exporter of electornics? + # Context: The Japanese electronics industry is the lagest in the world. + # Answer: Japan + # + # In this case, the annotator chose "Japan" as a character sub-span of + # the word "Japanese". Since our WordPiece tokenizer does not split + # "Japanese", we just use "Japanese" as the annotation. This is fairly rare + # in SQuAD, but does happen. + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +RawResult = collections.namedtuple("RawResult", + ["unique_id", "start_logits", "end_logits"]) + + +def write_predictions(all_examples, all_features, all_results, n_best_size, + max_answer_length, do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, verbose_logging, + version_2_with_negative, null_score_diff_threshold): + """Write final predictions to the json file and log-odds of null if needed.""" + logger.info("Writing predictions to: %s" % (output_prediction_file)) + logger.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min mull score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=0, + end_index=0, + start_logit=null_start_logit, + end_logit=null_end_logit)) + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + # if we didn't include the empty option in the n-best, include it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append( + _NbestPrediction( + text="", + start_logit=null_start_logit, + end_logit=null_end_logit)) + + # In very rare edge cases we could only have single null prediction. + # So we just create a nonce prediction in this case to avoid failure. + if len(nbest) == 1: + nbest.insert(0, + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + # predict "" iff the null score - the score of best non-null > threshold + score_diff = score_null - best_non_null_entry.start_logit - ( + best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > null_score_diff_threshold: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + all_nbest_json[example.qas_id] = nbest_json + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + +def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the SQuAD eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heruistic between + # `pred_text` and `orig_text` to get a character-to-charcter alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + + tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if verbose_logging: + logger.info( + "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if verbose_logging: + logger.info("Length not equal after stripping spaces: '%s' vs '%s'", + orig_ns_text, tok_ns_text) + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in tok_ns_to_s_map.items(): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if verbose_logging: + logger.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if verbose_logging: + logger.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--bert_model", default=None, type=str, required=True, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " + "bert-base-multilingual-cased, bert-base-chinese.") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model checkpoints and predictions will be written.") + parser.add_argument("--init_checkpoint", + default=None, + type=str, + required=True, + help="The checkpoint file from pretraining") + + ## Other parameters + parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json") + parser.add_argument("--predict_file", default=None, type=str, + help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") + parser.add_argument("--max_seq_length", default=384, type=int, + help="The maximum total input sequence length after WordPiece tokenization. Sequences " + "longer than this will be truncated, and sequences shorter than this will be padded.") + parser.add_argument("--doc_stride", default=128, type=int, + help="When splitting up a long document into chunks, how much stride to take between chunks.") + parser.add_argument("--max_query_length", default=64, type=int, + help="The maximum number of tokens for the question. Questions longer than this will " + "be truncated to this length.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--old", action='store_true', help="use old fp16 optimizer") + parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") + parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1.0, type=float, + help="Total number of training steps to perform.") + parser.add_argument("--warmup_proportion", default=0.1, type=float, + help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% " + "of training.") + parser.add_argument("--n_best_size", default=20, type=int, + help="The total number of n-best predictions to generate in the nbest_predictions.json " + "output file.") + parser.add_argument("--max_answer_length", default=30, type=int, + help="The maximum length of an answer that can be generated. This is needed because the start " + "and end predictions are not conditioned on one another.") + parser.add_argument("--verbose_logging", action='store_true', + help="If true, all of the warnings related to data processing will be printed. " + "A number of warnings are expected for a normal SQuAD evaluation.") + parser.add_argument("--no_cuda", + action='store_true', + help="Whether not to use CUDA when available") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--do_lower_case", + action='store_true', + help="Whether to lower case the input text. True for uncased models, False for cased models.") + parser.add_argument("--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--fp16', + action='store_true', + help="Whether to use 16-bit float precision instead of 32-bit") + parser.add_argument('--loss_scale', + type=float, default=0, + help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" + "0 (default value): dynamic loss scaling.\n" + "Positive power of 2: static loss scaling value.\n") + parser.add_argument('--version_2_with_negative', + action='store_true', + help='If true, the SQuAD examples contain some that do not have an answer.') + parser.add_argument('--null_score_diff_threshold', + type=float, default=0.0, + help="If null_score - best_non_null is greater than the threshold predict null.") + parser.add_argument('--vocab_file', + type=str, default=None, required=True, + help="Vocabulary mapping/file BERT was pretrainined on") + parser.add_argument("--config_file", + default=None, + type=str, + required=True, + help="The BERT model config") + parser.add_argument('--log_freq', + type=int, default=50, + help='frequency of logging loss.') + args = parser.parse_args() + + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + n_gpu = torch.cuda.device_count() + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl', init_method='env://') + logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( + device, n_gpu, bool(args.local_rank != -1), args.fp16)) + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + + args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + if not args.do_train and not args.do_predict: + raise ValueError("At least one of `do_train` or `do_predict` must be True.") + + if args.do_train: + if not args.train_file: + raise ValueError( + "If `do_train` is True, then `train_file` must be specified.") + if args.do_predict: + if not args.predict_file: + raise ValueError( + "If `do_predict` is True, then `predict_file` must be specified.") + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and os.listdir(args.output_dir)!=['logfile.txt']: + print("WARNING: Output directory {} already exists and is not empty.".format(args.output_dir), os.listdir(args.output_dir)) + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large + # tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) + + train_examples = None + num_train_optimization_steps = None + if args.do_train: + train_examples = read_squad_examples( + input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative) + num_train_optimization_steps = int( + len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs + if args.local_rank != -1: + num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() + + # Prepare model + config = BertConfig.from_json_file(args.config_file) + model = BertForQuestionAnswering(config) + # model = BertForQuestionAnswering.from_pretrained(args.bert_model, + # cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))) + model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False) + + model.to(device) + if args.fp16 and args.old: + model.half() + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + + # hack to remove pooler, which is not used + # thus it produce None grad that break apex + param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] + + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + + if args.fp16: + try: + # from fused_adam_local import FusedAdamBert as FusedAdam + from apex.optimizers import FusedAdam + from apex.optimizers import FP16_Optimizer + except ImportError: + raise ImportError( + "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + # import ipdb; ipdb.set_trace() + optimizer = FusedAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + bias_correction=False, + max_grad_norm=1.0) + + if args.loss_scale == 0: + if args.old: + optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + else: + model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, + loss_scale="dynamic") + else: + if args.old: + optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + else: + model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale=args.loss_scale) + if not args.old and args.do_train: + scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps) + + else: + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + warmup=args.warmup_proportion, + t_total=num_train_optimization_steps) + + #print(model) + if args.local_rank != -1: + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError( + "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + model = DDP(model) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + global_step = 0 + if args.do_train: + cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format( + list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), + str(args.max_query_length)) + train_features = None + try: + with open(cached_train_features_file, "rb") as reader: + train_features = pickle.load(reader) + except: + train_features = convert_examples_to_features( + examples=train_examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=True) + if args.local_rank == -1 or torch.distributed.get_rank() == 0: + logger.info(" Saving train features into cached file %s", cached_train_features_file) + with open(cached_train_features_file, "wb") as writer: + pickle.dump(train_features, writer) + logger.info("***** Running training *****") + logger.info(" Num orig examples = %d", len(train_examples)) + logger.info(" Num split examples = %d", len(train_features)) + logger.info(" Batch size = %d", args.train_batch_size) + logger.info(" Num steps = %d", num_train_optimization_steps) + all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) + all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_start_positions, all_end_positions) + if args.local_rank == -1: + train_sampler = RandomSampler(train_data) + else: + train_sampler = DistributedSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) + + model.train() + for _ in trange(int(args.num_train_epochs), desc="Epoch"): + for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): + # Terminate early for benchmarking + + if args.max_steps > 0 and global_step > args.max_steps: + break + + if n_gpu == 1: + batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self + input_ids, input_mask, segment_ids, start_positions, end_positions = batch + loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions) + if n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + if args.fp16: + if args.old: + optimizer.backward(loss) + else: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + # if args.fp16: + # optimizer.backward(loss) + # else: + # loss.backward() + if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16 : + # modify learning rate with special warm up for BERT which FusedAdam doesn't do + if not args.old: + scheduler.step() + else: + lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion) + for param_group in optimizer.param_groups: + param_group['lr'] = lr_this_step + + optimizer.step() + optimizer.zero_grad() + global_step += 1 + if step % args.log_freq == 0: + # logger.info("Step {}: Loss {}, LR {} ".format(global_step, loss.item(), lr_this_step)) + logger.info( + "Step {}: Loss {}, LR {} ".format(global_step, loss.item(), optimizer.param_groups[0]['lr'])) + + if args.do_train: + # Save a trained model and the associated configuration + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) + torch.save(model_to_save.state_dict(), output_model_file) + output_config_file = os.path.join(args.output_dir, CONFIG_NAME) + with open(output_config_file, 'w') as f: + f.write(model_to_save.config.to_json_string()) + + # # Load a trained model and config that you have fine-tuned + # config = BertConfig(output_config_file) + # model = BertForQuestionAnswering(config) + # model.load_state_dict(torch.load(output_model_file)) + # else: + # model = BertForQuestionAnswering.from_pretrained(args.bert_model) + + + if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + eval_examples = read_squad_examples( + input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative) + eval_features = convert_examples_to_features( + examples=eval_examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=False) + + logger.info("***** Running predictions *****") + logger.info(" Num orig examples = %d", len(eval_examples)) + logger.info(" Num split examples = %d", len(eval_features)) + logger.info(" Batch size = %d", args.predict_batch_size) + + all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) + all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) + # Run prediction for full data + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) + + model.eval() + all_results = [] + logger.info("Start evaluating") + for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"): + if len(all_results) % 1000 == 0: + logger.info("Processing example: %d" % (len(all_results))) + input_ids = input_ids.to(device) + input_mask = input_mask.to(device) + segment_ids = segment_ids.to(device) + with torch.no_grad(): + batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask) + for i, example_index in enumerate(example_indices): + start_logits = batch_start_logits[i].detach().cpu().tolist() + end_logits = batch_end_logits[i].detach().cpu().tolist() + eval_feature = eval_features[example_index.item()] + unique_id = int(eval_feature.unique_id) + all_results.append(RawResult(unique_id=unique_id, + start_logits=start_logits, + end_logits=end_logits)) + output_prediction_file = os.path.join(args.output_dir, "predictions.json") + output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json") + output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json") + write_predictions(eval_examples, eval_features, all_results, + args.n_best_size, args.max_answer_length, + args.do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, args.verbose_logging, + args.version_2_with_negative, args.null_score_diff_threshold) + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/run_swag.py b/PyTorch/LanguageModeling/BERT/run_swag.py new file mode 100644 index 00000000..cb8ea149 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/run_swag.py @@ -0,0 +1,561 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""BERT finetuning runner.""" + +import argparse +import csv +import logging +import os +import random +import sys +from io import open + +import numpy as np +import torch +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, + TensorDataset) +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm, trange + +from file_utils import PYTORCH_PRETRAINED_BERT_CACHE +from modeling import BertForMultipleChoice, BertConfig, WEIGHTS_NAME, CONFIG_NAME +from optimization import BertAdam, warmup_linear +from tokenization import BertTokenizer + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + + +class SwagExample(object): + """A single training/test example for the SWAG dataset.""" + def __init__(self, + swag_id, + context_sentence, + start_ending, + ending_0, + ending_1, + ending_2, + ending_3, + label = None): + self.swag_id = swag_id + self.context_sentence = context_sentence + self.start_ending = start_ending + self.endings = [ + ending_0, + ending_1, + ending_2, + ending_3, + ] + self.label = label + + def __str__(self): + return self.__repr__() + + def __repr__(self): + l = [ + "swag_id: {}".format(self.swag_id), + "context_sentence: {}".format(self.context_sentence), + "start_ending: {}".format(self.start_ending), + "ending_0: {}".format(self.endings[0]), + "ending_1: {}".format(self.endings[1]), + "ending_2: {}".format(self.endings[2]), + "ending_3: {}".format(self.endings[3]), + ] + + if self.label is not None: + l.append("label: {}".format(self.label)) + + return ", ".join(l) + + +class InputFeatures(object): + def __init__(self, + example_id, + choices_features, + label + + ): + self.example_id = example_id + self.choices_features = [ + { + 'input_ids': input_ids, + 'input_mask': input_mask, + 'segment_ids': segment_ids + } + for _, input_ids, input_mask, segment_ids in choices_features + ] + self.label = label + + +def read_swag_examples(input_file, is_training): + with open(input_file, 'r', encoding='utf-8') as f: + reader = csv.reader(f) + lines = [] + for line in reader: + if sys.version_info[0] == 2: + line = list(unicode(cell, 'utf-8') for cell in line) + lines.append(line) + + if is_training and lines[0][-1] != 'label': + raise ValueError( + "For training, the input file must contain a label column." + ) + + examples = [ + SwagExample( + swag_id = line[2], + context_sentence = line[4], + start_ending = line[5], # in the swag dataset, the + # common beginning of each + # choice is stored in "sent2". + ending_0 = line[7], + ending_1 = line[8], + ending_2 = line[9], + ending_3 = line[10], + label = int(line[11]) if is_training else None + ) for line in lines[1:] # we skip the line with the column names + ] + + return examples + +def convert_examples_to_features(examples, tokenizer, max_seq_length, + is_training): + """Loads a data file into a list of `InputBatch`s.""" + + # Swag is a multiple choice task. To perform this task using Bert, + # we will use the formatting proposed in "Improving Language + # Understanding by Generative Pre-Training" and suggested by + # @jacobdevlin-google in this issue + # https://github.com/google-research/bert/issues/38. + # + # Each choice will correspond to a sample on which we run the + # inference. For a given Swag example, we will create the 4 + # following inputs: + # - [CLS] context [SEP] choice_1 [SEP] + # - [CLS] context [SEP] choice_2 [SEP] + # - [CLS] context [SEP] choice_3 [SEP] + # - [CLS] context [SEP] choice_4 [SEP] + # The model will output a single value for each input. To get the + # final decision of the model, we will run a softmax over these 4 + # outputs. + features = [] + for example_index, example in enumerate(examples): + context_tokens = tokenizer.tokenize(example.context_sentence) + start_ending_tokens = tokenizer.tokenize(example.start_ending) + + choices_features = [] + for ending_index, ending in enumerate(example.endings): + # We create a copy of the context tokens in order to be + # able to shrink it according to ending_tokens + context_tokens_choice = context_tokens[:] + ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) + # Modifies `context_tokens_choice` and `ending_tokens` in + # place so that the total length is less than the + # specified length. Account for [CLS], [SEP], [SEP] with + # "- 3" + _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) + + tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] + segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + padding = [0] * (max_seq_length - len(input_ids)) + input_ids += padding + input_mask += padding + segment_ids += padding + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + choices_features.append((tokens, input_ids, input_mask, segment_ids)) + + label = example.label + if example_index < 5: + logger.info("*** Example ***") + logger.info("swag_id: {}".format(example.swag_id)) + for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): + logger.info("choice: {}".format(choice_idx)) + logger.info("tokens: {}".format(' '.join(tokens))) + logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) + logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) + logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) + if is_training: + logger.info("label: {}".format(label)) + + features.append( + InputFeatures( + example_id = example.swag_id, + choices_features = choices_features, + label = label + ) + ) + + return features + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + +def accuracy(out, labels): + outputs = np.argmax(out, axis=1) + return np.sum(outputs == labels) + +def select_field(features, field): + return [ + [ + choice[field] + for choice in feature.choices_features + ] + for feature in features + ] + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain the .csv files (or other data files) for the task.") + parser.add_argument("--bert_model", default=None, type=str, required=True, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " + "bert-base-multilingual-cased, bert-base-chinese.") + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="The output directory where the model checkpoints will be written.") + parser.add_argument("--init_checkpoint", + default=None, + type=str, + required=True, + help="The checkpoint file from pretraining") + + ## Other parameters + parser.add_argument("--max_seq_length", + default=128, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--do_train", + action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", + action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_lower_case", + action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--train_batch_size", + default=32, + type=int, + help="Total batch size for training.") + parser.add_argument("--eval_batch_size", + default=8, + type=int, + help="Total batch size for eval.") + parser.add_argument("--learning_rate", + default=5e-5, + type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--num_train_epochs", + default=3.0, + type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1.0, type=float, + help="Total number of training steps to perform.") + parser.add_argument("--warmup_proportion", + default=0.1, + type=float, + help="Proportion of training to perform linear learning rate warmup for. " + "E.g., 0.1 = 10%% of training.") + parser.add_argument("--no_cuda", + action='store_true', + help="Whether not to use CUDA when available") + parser.add_argument("--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument('--fp16', + action='store_true', + help="Whether to use 16-bit float precision instead of 32-bit") + parser.add_argument('--loss_scale', + type=float, default=0, + help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" + "0 (default value): dynamic loss scaling.\n" + "Positive power of 2: static loss scaling value.\n") + + args = parser.parse_args() + + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + n_gpu = torch.cuda.device_count() + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl') + logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( + device, n_gpu, bool(args.local_rank != -1), args.fp16)) + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + + args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + if not args.do_train and not args.do_eval: + raise ValueError("At least one of `do_train` or `do_eval` must be True.") + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir): + print("WARNING: Output directory ({}) already exists and is not empty.".format(args.output_dir)) + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) + + train_examples = None + num_train_optimization_steps = None + if args.do_train: + train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True) + num_train_optimization_steps = int( + len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs + if args.local_rank != -1: + num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() + + # Prepare model + model = BertForMultipleChoice.from_pretrained(args.bert_model, + cache_dir=os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank)), + num_choices=4) + model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False) + + if args.fp16: + model.half() + model.to(device) + if args.local_rank != -1: + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + model = DDP(model) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + + # hack to remove pooler, which is not used + # thus it produce None grad that break apex + param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] + + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + if args.fp16: + try: + from apex.optimizers import FP16_Optimizer + from apex.optimizers import FusedAdam + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + optimizer = FusedAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + bias_correction=False, + max_grad_norm=1.0) + if args.loss_scale == 0: + optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + else: + optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + else: + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.learning_rate, + warmup=args.warmup_proportion, + t_total=num_train_optimization_steps) + + global_step = 0 + if args.do_train: + train_features = convert_examples_to_features( + train_examples, tokenizer, args.max_seq_length, True) + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_examples)) + logger.info(" Batch size = %d", args.train_batch_size) + logger.info(" Num steps = %d", num_train_optimization_steps) + all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long) + all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long) + all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long) + all_label = torch.tensor([f.label for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) + if args.local_rank == -1: + train_sampler = RandomSampler(train_data) + else: + train_sampler = DistributedSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) + + model.train() + for _ in trange(int(args.num_train_epochs), desc="Epoch"): + tr_loss = 0 + nb_tr_examples, nb_tr_steps = 0, 0 + for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): + # Terminate early for benchmarking + if args.max_steps > 0 and global_step > args.max_steps: + break + + batch = tuple(t.to(device) for t in batch) + input_ids, input_mask, segment_ids, label_ids = batch + loss = model(input_ids, segment_ids, input_mask, label_ids) + if n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + if args.fp16 and args.loss_scale != 1.0: + # rescale loss for fp16 training + # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html + loss = loss * args.loss_scale + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + tr_loss += loss.item() + nb_tr_examples += input_ids.size(0) + nb_tr_steps += 1 + + if args.fp16: + optimizer.backward(loss) + else: + loss.backward() + if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + # modify learning rate with special warm up BERT uses + # if args.fp16 is False, BertAdam is used that handles this automatically + lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion) + for param_group in optimizer.param_groups: + param_group['lr'] = lr_this_step + optimizer.step() + optimizer.zero_grad() + global_step += 1 + + + if args.do_train: + # Save a trained model and the associated configuration + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) + torch.save(model_to_save.state_dict(), output_model_file) + output_config_file = os.path.join(args.output_dir, CONFIG_NAME) + with open(output_config_file, 'w') as f: + f.write(model_to_save.config.to_json_string()) + + # Load a trained model and config that you have fine-tuned + config = BertConfig(output_config_file) + model = BertForMultipleChoice(config, num_choices=4) + model.load_state_dict(torch.load(output_model_file)) + else: + model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4) + model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False) + model.to(device) + + + if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True) + eval_features = convert_examples_to_features( + eval_examples, tokenizer, args.max_seq_length, True) + logger.info("***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_examples)) + logger.info(" Batch size = %d", args.eval_batch_size) + all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long) + all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long) + all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long) + all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long) + eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) + # Run prediction for full data + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + + model.eval() + eval_loss, eval_accuracy = 0, 0 + nb_eval_steps, nb_eval_examples = 0, 0 + for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): + input_ids = input_ids.to(device) + input_mask = input_mask.to(device) + segment_ids = segment_ids.to(device) + label_ids = label_ids.to(device) + + with torch.no_grad(): + tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) + logits = model(input_ids, segment_ids, input_mask) + + logits = logits.detach().cpu().numpy() + label_ids = label_ids.to('cpu').numpy() + tmp_eval_accuracy = accuracy(logits, label_ids) + + eval_loss += tmp_eval_loss.mean().item() + eval_accuracy += tmp_eval_accuracy + + nb_eval_examples += input_ids.size(0) + nb_eval_steps += 1 + + eval_loss = eval_loss / nb_eval_steps + eval_accuracy = eval_accuracy / nb_eval_examples + + result = {'eval_loss': eval_loss, + 'eval_accuracy': eval_accuracy, + 'global_step': global_step, + 'loss': tr_loss/nb_tr_steps} + + output_eval_file = os.path.join(args.output_dir, "eval_results.txt") + with open(output_eval_file, "w") as writer: + logger.info("***** Eval results *****") + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(result[key])) + writer.write("%s = %s\n" % (key, str(result[key]))) + + +if __name__ == "__main__": + main() diff --git a/PyTorch/LanguageModeling/BERT/schedulers.py b/PyTorch/LanguageModeling/BERT/schedulers.py new file mode 100644 index 00000000..0333bbd1 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/schedulers.py @@ -0,0 +1,92 @@ +import math +import torch +from torch.optim.optimizer import Optimizer +from apex.optimizers import FP16_Optimizer +from torch.optim.lr_scheduler import _LRScheduler + + +class LRScheduler(_LRScheduler): + def __init__(self, optimizer, last_epoch=-1): + # Check if using mixed precision training + self.mixed_training = False + base_optimizer = optimizer + if isinstance(optimizer, FP16_Optimizer): + self.mixed_training = True + self.fp16_optimizer = optimizer + base_optimizer = optimizer.optimizer + # Check that optimizer param is valid + elif not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + + super(LRScheduler, self).__init__(base_optimizer, last_epoch) + + def step(self, epoch=None): + # Set the current training step + # ('epoch' is used to be consistent with _LRScheduler) + if self.mixed_training: + # The assumption is that the step will be constant + state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]] + if 'step' in state_dict: + self.last_epoch = state_dict['step'] + 1 + else: + self.last_epoch = 1 + else: + self.last_epoch = epoch if epoch is not None else self.last_epoch + 1 + + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + + +class CosineWarmupScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return [base_lr * (0.5 * (1.0 + torch.cos(math.pi + progress))) for base_lr in self.base_lrs] + + +class ConstantWarmupScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return self.base_lrs + + +class LinearWarmUpScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return [base_lr * max(( progress - 1.0)/(self.warmup - 1.0), 0.) for base_lr in self.base_lrs] diff --git a/PyTorch/LanguageModeling/BERT/scripts/data_download.sh b/PyTorch/LanguageModeling/BERT/scripts/data_download.sh new file mode 100755 index 00000000..a71f3110 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/data_download.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +DATA_DIR=${1:-/workspace/bert/data} + +# Check running from repository root +if [ ! -d .git ]; then + echo "Not running from repository root! Exiting." + exit 1 +fi + +# Download vocab files from pretrained model +cd vocab && python3 download_models.py && rm *.zip && rm ./*/*.ckpt.* + +# Download SQUAD +cd $DATA_DIR/squad && . squad_download.sh + +# Download SWAG +git clone https://github.com/rowanz/swagaf.git $DATA_DIR/swag + +# Download GLUE +cd $DATA_DIR/glue && . download_mrpc.sh + +# WIKI Download +cd $DATA_DIR/wikipedia_corpus && . download_wikipedia.sh + +# Bookcorpus Download +cd $DATA_DIR/bookcorpus && . download_bookcorpus.sh + +cd $DATA_DIR +# Create HDF5 files for WIKI +bash create_datasets_from_start.sh wikipedia_corpus ./wikipedia_corpus/wikipedia_corpus.txt \ + && rm -r ./wikipedia_corpus/final_* \ + +# Create HDF5 files for Bookcorpus +bash create_datasets_from_start.sh bookcorpus ./bookcorpus/bookcorpus.txt \ + && rm -r ./bookcorpus/final_* \ + +# Create HDF5 files for inter sequence-pair mixed Wikipedia and Bookcorpus +bash merge_datasets_after_creation.sh merged_wiki+books wikipedia_corpus/hdf5_shards,bookcorpus/hdf5_shards 1024 diff --git a/PyTorch/LanguageModeling/BERT/scripts/docker/build.sh b/PyTorch/LanguageModeling/BERT/scripts/docker/build.sh new file mode 100644 index 00000000..a8eb8350 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/docker/build.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Check running from repository root +if [ ! -d .git ]; then + echo "Not running from repository root! Exiting." + exit 1 +fi + +docker build . --rm -t bert diff --git a/PyTorch/LanguageModeling/BERT/scripts/docker/launch.sh b/PyTorch/LanguageModeling/BERT/scripts/docker/launch.sh new file mode 100644 index 00000000..9f93e107 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/docker/launch.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Check running from repository root +if [ ! -d .git ]; then + echo "Not running from repository root! Exiting." + exit 1 +fi + +DATA_DIR=${1:-"/mnt/dldata/bert"} +VOCAB_DIR=${2:-"/mnt/dldata/bert/vocab"} +CHECKPOINT_DIR=${3:-"/mnt/dldata/bert/pretrained_models_nvidia_pytorch"} + +docker run -it --rm \ + --runtime=nvidia \ + -p 8888:8888 \ + --shm-size=1g \ + --ulimit memlock=-1 \ + --ulimit stack=67108864 \ + -v $DATA_DIR:/workspace/bert/data \ + -v $CHECKPOINT_DIR:/workspace/checkpoints \ + -v $VOCAB_DIR:/workspace/bert/vocab \ + -v $PWD/results:/results \ + bert bash \ No newline at end of file diff --git a/PyTorch/LanguageModeling/BERT/scripts/run.sh b/PyTorch/LanguageModeling/BERT/scripts/run.sh new file mode 100755 index 00000000..84dcd956 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/run.sh @@ -0,0 +1,184 @@ +#!/bin/bash +#SBATCH -p mlperf # partition +#SBATCH -N 1 # number of nodes +#SBATCH -t 12:00:00 # wall time +#SBATCH -J image_classification # job name +#SBATCH --exclusive # exclusive node access +#SBATCH --mem=0 # all mem avail +#SBATCH --mail-type=FAIL # only send email on failure +#SBATCH --ntasks-per-node=8 # n tasks per machine (one task per gpu) +#SBATCH --threads-per-core=2 # HT is on +#SBATCH --cores-per-socket=20 # 20 cores on each socket +#SBATCH --overcommit + +hostname +#DGXIBDEVICES=$(eval ls /dev/infiniband/ | tr " " "\n" | awk '{printf "--device=/dev/infiniband/%s ",$1}' | sed s'/.$//') +printf "DGXIBDEVICES=%s\n" "$DGXIBDEVICES" +printf "VOLS=%s\n" "$VOLS" +printf "EXTRA_PARAMS=%s\n" "$EXTRA_PARAMS" + +cd $CODEDIR + +VOLS+=" -v $CHKPTDIR/$SLURM_JOB_ID:/checkpoints" + +mkdir -p $CHKPTDIR/$SLURM_JOB_ID + +## DO NOT CHANGE ANYTHING BELOW -- DL params are in run_and_time.sh and config_.sh files + +DEBUG=1 # 1 = Print verbose messages for debugging + +## Pre-warming the containers ## +hosts=( `scontrol show hostname |tr "\n" " "` ) +pids=(); for hostn in ${hosts[@]}; do + timeout -k 600s 600s \ + srun -N 1 -n 1 -w $hostn \ + docker pull $CONT & + pids+=($!); + pids+=($!); rets+=($?); +done +wait "${pids[@]}" +success=0; for s in ${rets[@]}; do ((success+=s)); done ; if [ $success -ne 0 ]; then echo "ERR: Container pull failed"; exit $success ; fi + +IBDEVICES=${IBDEVICES:-$DGXIBDEVICES} + +## Check whether we are running in a slurm env +INSLURM=1 +if [[ -z "$SLURM_JOB_ID" ]]; then + INSLURM=0 + export SLURM_JOB_ID="${DATESTAMP}" + export SLURM_NNODES=1 +fi +if [[ -z "SLURM_JOB_ID" || $SLURM_NNODES -eq 1 ]]; then + # don't need IB if not multi-node + export IBDEVICES="" +fi + +# Create results directory +LOGFILE_BASE="${LOGDIR}/${DATESTAMP}" +mkdir -p $(dirname "${LOGFILE_BASE}") + +export CONTNAME="${SLURM_JOB_ID}" +export DOCKEREXEC="nvidia-docker run --rm --net=host --uts=host --ipc=host --ulimit stack=67108864 --ulimit memlock=-1 --security-opt seccomp=unconfined $IBDEVICES" +CMD="python -np $((SLURM_NNODES*DGXNGPU)) -x EXTRA_PARAMS=\"${EXTRA_PARAMS}\" -x NCCL_LL_THRESHOLD=0 -x NCCL_DEBUG=INFO -x NCCL_NET_GDR_READ=1 -x NCCL_SOCKET_IFNAME=^docker0,bond0,lo $BIND ./run_pretraining.sh" +echo $CMD + +mkdir -m 777 -p $LOGDIR +echo $CMD | tee -a $LOGDIR/$DATESTAMP.log +echo "slurm job id" $SLURM_JOB_ID &> $LOGDIR/$DATESTAMP.log + +MASTER_IP=`getent hosts \`hostname\` | cut -d ' ' -f1` +SSH='' +SRUN='' +if [[ $INSLURM -eq 0 ]]; then + export hosts=( `hostname` ) +else + export hosts=( `scontrol show hostname |tr "\n" " "` ) + SSH='ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $hostn' + SRUN='srun -N 1 -n 1 -w $hostn' +fi +unique_hosts=( $(echo "${hosts[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ' ) ) +export MASTER_HOST=${hosts[0]} + +VARS="-e OMPI_MCA_mca_base_param_files=/dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf -e EXTRA_PARAMS -e GPUS -e BATCHSIZE -e CONT -e DGXSYSTEM=$DGXSYSTEM -e MASTER_HOST -e MASTER_IP -e SLURM_JOB_NUM_NODES -e SLURM_NNODES -e SLURM_NTASKS_PER_NODE -w /workspace/bert" + +RUNSLEEPCMD="" + +[[ "${PULL}" -eq "1" ]] && docker pull $CONT + +## Setting up MPI +# MPI support files - in /dev/shm/mpi/ +# 1. Copy user keys to /dev/shm/mpi/ +# 2. Create mca_params.conf +# 3. Create sshentry.sh to support lauching into containers on worker nodes +# 4. Create mpi_hosts file +# 5. Copy standard ssh + +if [[ $SLURM_NNODES -ne "1" ]]; then + + # Make keys and copy + echo + + [[ $DEBUG == 1 ]] && echo "Setting up ssh keys and config" + + mkdir -p ${HOME}/.ssh/sbatch/${SLURM_JOB_ID} + ssh-keygen -t rsa -b 2048 -n "" -f "${HOME}/.ssh/sbatch/${SLURM_JOB_ID}/sshkey.rsa" -C "mxnet_${SLURM_JOB_ID}_" &>/dev/null + echo command=no-port-forwarding,no-agent-forwarding,no-X11-forwarding $(cat ${HOME}/.ssh/sbatch/${SLURM_JOB_ID}/sshkey.rsa.pub) >> ${HOME}/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + + [[ $DEBUG == 1 ]] && echo "Copy keys: srun -n $SLURM_JOB_NUM_NODES && cp -R ${HOME}/.ssh/sbatch/${SLURM_JOB_ID} /dev/shm/mpi && chmod 700 /dev/shm/mpi/${SLURM_JOB_ID}" + + srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 bash -c "mkdir -p /dev/shm/mpi/${SLURM_JOB_ID}; cp -R ${HOME}/.ssh/sbatch/${SLURM_JOB_ID} /dev/shm/mpi; chmod 700 /dev/shm/mpi/${SLURM_JOB_ID}" + + sleep 2 # Making copy + + [[ $DEBUG == 1 ]] && ls /dev/shm + + # Create mpi config file + srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 tee /dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf <> /dev/shm/mpi/${SLURM_JOB_ID}/mpi_hosts + done + + [[ $DEBUG == 1 ]] && echo '::mpi-host file=' && cat /dev/shm/mpi/${SLURM_JOB_ID}/mpi_hosts + + srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 bash -c "cp $(which ssh) /dev/shm/mpi/${SLURM_JOB_ID}/.; chmod 755 /dev/shm/mpi/${SLURM_JOB_ID}/mca_params.conf; chmod 755 /dev/shm/mpi/${SLURM_JOB_ID}/sshentry.sh" + + # Check that ssh/mpi dir has correct number of files + [[ $(ls /dev/shm/mpi/${SLURM_JOB_ID} | wc -w) -lt 5 ]] && echo "ERR: /dev/shm/mpi/${SLURM_JOB_ID} doesn't exist or missing ssh/mpi files" && exit $? + +fi + +# Container launch +if [[ $INSLURM -eq 1 ]]; then + + # Launch containers behind srun + + [[ $DEBUG == 1 ]] && echo "" && echo ":Launch containers: srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 $DOCKEREXEC --name $CONTNAME $VOLS $VARS $CONT bash -c 'sleep infinity'" + srun -n $SLURM_JOB_NUM_NODES --ntasks-per-node=1 $DOCKEREXEC --name $CONTNAME $VOLS $VARS $CONT bash -c 'sleep infinity' & rv=$? +else + $DOCKEREXEC --name $CONTNAME $VOLS $VARS $CONT bash -c 'sleep infinity' & rv=$? +fi +[[ $rv -ne 0 ]] && echo "ERR: Launch sleep containers failed." && exit $rv +echo "sleep 60 while we pull our container, good golly!" +sleep 60 + +# Run benchmarks +echo "sleep again for 20" +sleep 20 +export EXTRA_PARAMS + +( +# Launching app +echo +echo "Launching user script on master node:" + hostn=$MASTER_HOST + $(eval echo $SSH) docker exec $VARS $CONTNAME $MPICMD ; rv=$? + [[ $rv -ne 0 ]] && echo "ERR: User script failed." && exit $rv +) |& tee ${LOGFILE_BASE}_$nrun.log + +# Clean up (note: on SLURM we skip this, as the epilogue will take care of it) +if [[ $INSLURM -eq 0 ]]; then + docker rm -f $CONTNAME +fi \ No newline at end of file diff --git a/PyTorch/LanguageModeling/BERT/scripts/run_glue.sh b/PyTorch/LanguageModeling/BERT/scripts/run_glue.sh new file mode 100755 index 00000000..5fe89e05 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/run_glue.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +MRPC_DIR=/workspace/bert/data/glue/MRPC +OUT_DIR=/results/MRPC + +mkdir -p $OUT_DIR + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +init_checkpoint=${1} +mode=${2:-"train"} +max_steps=${3:-"-1.0"} # if < 0, has no effect +batch_size=${4:-"12"} +learning_rate=${5:-"5e-6"} +precision=${6:-"fp32"} +num_gpu=${7:-"8"} +epochs=${8:-"2"} + +if [ "$mode" != "train" ] ; then + num_gpu=1 +fi + +use_fp16="" +if [ "$precision" = "fp16" ] ; then + echo "fp16 activated!" + use_fp16="--fp16" +fi + +if [ "$num_gpu" = "1" ] ; then + mpi_command="" +else + mpi_command="torch.distributed.launch --nproc_per_node=$num_gpu" +fi + +CMD="python -m $mpi_command run_glue.py " +CMD+="--task_name MRPC " +if [ "$mode" = "train" ] ; then + CMD+="--do_train " + CMD+="--train_batch_size=$batch_size " +else + CMD+="--do_eval " + CMD+="--eval_batch_size=$batch_size " +fi +CMD+="--do_lower_case " +CMD+="--data_dir $MRPC_DIR " +CMD+="--bert_model bert-large-uncased " +CMD+="--init_checkpoint $init_checkpoint " +CMD+="--max_seq_length 128 " +CMD+="--learning_rate $learning_rate " +CMD+="--num_train_epochs $epochs " +CMD+="--max_steps $max_steps " +CMD+="--output_dir $OUT_DIR " +CMD+="$use_fp16" + +LOGFILE=$OUT_DIR/logfile +$CMD |& tee $LOGFILE + +sed -r 's/ |(\[A)/\n/g' $LOGFILE > $LOGFILE.edit + +throughput=`cat $LOGFILE.edit | grep -E 'Iteration.*[0-9.]+(s/it|it/s)' | tail -1 | egrep -o '[0-9.]+(s/it|it/s)'` + +echo "throughput: $throughput" + diff --git a/PyTorch/LanguageModeling/BERT/scripts/run_pretraining.sh b/PyTorch/LanguageModeling/BERT/scripts/run_pretraining.sh new file mode 100644 index 00000000..aaa0b1d1 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/run_pretraining.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +DATASET=wikipedia_corpus # change this for other datasets + +DATA_DIR=data/${DATASET}/hdf5_shards/ +BERT_CONFIG=bert_config.json +RESULTS_DIR=/results +CHECKPOINTS_DIR=/results/checkpoints + +mkdir -p $CHECKPOINTS_DIR + + +if [ ! -d "$DATA_DIR" ] ; then + echo "Warning! $DATA_DIR directory missing. Training cannot start" +fi +if [ ! -d "$RESULTS_DIR" ] ; then + echo "Error! $RESULTS_DIR directory missing." + exit -1 +fi +if [ ! -d "$CHECKPOINTS_DIR" ] ; then + echo "Warning! $CHECKPOINTS_DIR directory missing." + echo "Checkpoints will be written to $RESULTS_DIR instead." + CHECKPOINTS_DIR=$RESULTS_DIR +fi +if [ ! -f "$BERT_CONFIG" ] ; then + echo "Error! BERT large configuration file not found at $BERT_CONFIG" + exit -1 +fi + +train_batch_size=${1:-14} +learning_rate=${2:-"0.4375e-4"} +precision=${3:-"fp16"} +num_gpus=${4:-8} +warmup_proportion=${5:-"0.01"} +train_steps=${6:-2285714} +save_checkpoint_steps=${7:-2000} +resume_training=${8:-"false"} +create_logfile=${9:-"true"} +checkpoint_activations=${10:-"false"} +seed=${11:-42} + +PREC="" +if [ "$precision" = "fp16" ] ; then + PREC="--fp16" +elif [ "$precision" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +CHECKPOINT_ACTIVATIONS="" +if [ "$checkpoint_activations" == "true" ] ; then + CHECKPOINT_ACTIVATIONS="--checkpoint_activations" +fi + +CHECKPOINT="" +if [ "$resume_training" == "true" ] ; then + CHECKPOINT="--resume_from_checkpoint" +fi + +echo $DATA_DIR +INPUT_DIR=$DATA_DIR +CMD=" /workspace/bert/run_pretraining.py" +CMD+=" --input_dir=$DATA_DIR" +CMD+=" --output_dir=$CHECKPOINTS_DIR" +CMD+=" --config_file=$BERT_CONFIG" +CMD+=" --do_train" +CMD+=" --bert_model=bert-large-uncased" +CMD+=" --train_batch_size=$train_batch_size" +CMD+=" --max_seq_length=512" +CMD+=" --max_predictions_per_seq=80" +CMD+=" --max_steps=$train_steps" +CMD+=" --warmup_proportion=$warmup_proportion" +CMD+=" --num_steps_per_checkpoint=$save_checkpoint_steps" +CMD+=" --learning_rate=$learning_rate" +CMD+=" --seed=$seed" +CMD+=" $PREC" +CMD+=" $CHECKPOINT_ACTIVATIONS" +CMD+=" $CHECKPOINT" + + +if [ "$num_gpus" -gt 1 ] ; then + CMD="python3 -m torch.distributed.launch --nproc_per_node=$num_gpus $CMD" +else + CMD="python3 $CMD" +fi + + +if [ "$create_logfile" = "true" ] ; then + export GBS=$(expr $train_batch_size \* $num_gpus) + printf -v TAG "pyt_bert_pretraining_%s_gbs%d" "$precision" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log + printf "Logs written to %s\n" "$LOGFILE" +fi + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee $LOGFILE +fi + +set +x + +echo "finished pretraining, starting benchmarking" + +target_loss=15 +THROUGHPUT=10 +THRESHOLD=0.9 + +throughput=`cat $LOGFILE | grep Iteration | tail -1 | awk -F's/it' '{print $1}' | awk -F',' '{print $2}' | egrep -o [0-9.]+` +loss=`cat $LOGFILE | grep 'Average Loss' | tail -1 | awk -F'Average Loss =' '{print $2}' | awk -F' ' '{print $1}' | egrep -o [0-9.]+` +final_loss=`cat $LOGFILE | grep 'Total Steps' | tail -1 | awk -F'Final Loss =' '{print $2}' | awk -F' ' '{print $1}' | egrep -o [0-9.]+` + +echo "throughput: $throughput s/it" +echo "average loss: $loss" +echo "final loss: $final_loss" + +ACCURACY_TEST_RESULT=$(awk 'BEGIN {print ('${loss}' <= '${target_loss}')}') + +if [ $ACCURACY_TEST_RESULT == 1 ]; + then + echo "&&&& ACCURACY TEST PASSED" + else + echo "&&&& ACCURACY TEST FAILED" + fi + +PERFORMANCE_TEST_RESULT=$(awk 'BEGIN {print ('${throughput}' <= ('${THROUGHPUT}' * '${THRESHOLD}'))}') + +if [ $PERFORMANCE_TEST_RESULT == 1 ]; + then + echo "&&&& PERFORMANCE TEST PASSED" + else + echo "&&&& PERFORMANCE TEST FAILED" + fi + +if [ $ACCURACY_TEST_RESULT == 1 -a $PERFORMANCE_TEST_RESULT == 1 ]; + then + echo "&&&& PASSED" + exit 0 + else + echo "&&&& FAILED" + exit 1 + fi + + diff --git a/PyTorch/LanguageModeling/BERT/scripts/run_pretraining_inference.sh b/PyTorch/LanguageModeling/BERT/scripts/run_pretraining_inference.sh new file mode 100644 index 00000000..5e7519c4 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/run_pretraining_inference.sh @@ -0,0 +1,146 @@ +#!/bin/bash + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +DATASET=wikipedia_corpus # change this for other datasets + +DATA_DIR=data/${DATASET}/hdf5_shards/ +BERT_CONFIG=bert_config.json +RESULTS_DIR=/results +CHECKPOINTS_DIR=/results/checkpoints + + +if [ ! -d "$DATA_DIR" ] ; then + echo "Warning! $DATA_DIR directory missing. Inference cannot start" +fi +if [ ! -d "$RESULTS_DIR" ] ; then + echo "Error! $RESULTS_DIR directory missing." + exit -1 +fi +if [ ! -d "$CHECKPOINTS_DIR" ] ; then + echo "Warning! $CHECKPOINTS_DIR directory missing." + echo "Checkpoints will be loaded from $RESULTS_DIR instead." + CHECKPOINTS_DIR=$RESULTS_DIR +fi +if [ ! -f "$BERT_CONFIG" ] ; then + echo "Error! BERT large configuration file not found at $BERT_CONFIG" + exit -1 +fi + +eval_batch_size=${1:-14} +precision=${2:-"fp16"} +num_gpus=${3:-8} +inference_mode=${4:-"eval"} +model_checkpoint=${5:-"-1"} +inference_steps=${6:-"-1"} +create_logfile=${7:-"true"} +seed=${8:-42} + +PREC="" +if [ "$precision" = "fp16" ] ; then + PREC="--fp16" +elif [ "$precision" = "fp32" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + + +MODE="" +if [ "$inference_mode" = "eval" ] ; then + MODE="--eval" +elif [ "$inference_mode" = "prediction" ] ; then + MODE="--prediction" +else + echo "Unknown argument" + exit -2 +fi + +echo $DATA_DIR +CMD=" /workspace/bert/run_pretraining_inference.py" +CMD+=" --input_dir=$DATA_DIR" +CMD+=" --ckpt_dir=$CHECKPOINTS_DIR" +CMD+=" --config_file=$BERT_CONFIG" +CMD+=" --bert_model=bert-large-uncased" +CMD+=" --eval_batch_size=$eval_batch_size" +CMD+=" --max_seq_length=512" +CMD+=" --max_predictions_per_seq=80" +CMD+=" --max_steps=$inference_steps" +CMD+=" --ckpt_step=$model_checkpoint" +CMD+=" --seed=$seed" +CMD+=" $PREC" +CMD+=" $MODE" + +if [ "$num_gpus" -gt 1 ] ; then + CMD="python3 -m torch.distributed.launch --nproc_per_node=$num_gpus $CMD" +else + CMD="python3 $CMD" +fi + +if [ "$create_logfile" = "true" ] ; then + export GBS=$((eval_batch_size * num_gpus)) + printf -v TAG "pyt_bert_pretraining_inference_%s_gbs%d" "$precision" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log + printf "Logs written to %s\n" "$LOGFILE" +fi + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee $LOGFILE +fi +set +x + +target_loss=15 +THROUGHPUT=1.0 +THRESHOLD=0.9 + +throughput=`cat $LOGFILE | grep Iteration | tail -1 | awk -F'it/s' '{print $1}' | awk -F',' '{print $2}' | egrep -o [0-9.]+` + + +echo "throughput: $throughput it/s" + + +PERFORMANCE_TEST_RESULT=$(awk 'BEGIN {print ('${throughput}' >= \ + ('${THROUGHPUT}' * '${THRESHOLD}'))}') + +if [ $PERFORMANCE_TEST_RESULT == 1 ]; + then + echo "&&&& PERFORMANCE TEST PASSED" + else + echo "&&&& PERFORMANCE TEST FAILED" + fi + + +if [ "$inference_mode" = "eval" ] ; then + loss=`cat $LOGFILE | grep Finished | tail -1 | awk -F'Final Loss =' '{print $2}' | awk -F' ' '{print $1}' | egrep -o [0-9.]+` + + + echo "final loss: $loss" + + + ACCURACY_TEST_RESULT=$(awk 'BEGIN {print ('${loss}' <= '${target_loss}')}') + + if [ $ACCURACY_TEST_RESULT == 1 ]; + then + echo "&&&& ACCURACY TEST PASSED" + else + echo "&&&& ACCURACY TEST FAILED" + fi + + + if [ $ACCURACY_TEST_RESULT == 1 -a $PERFORMANCE_TEST_RESULT == 1 ]; + then + echo "&&&& PASSED" + exit 0 + else + echo "&&&& FAILED" + exit 1 + fi +fi + diff --git a/PyTorch/LanguageModeling/BERT/scripts/run_squad.sh b/PyTorch/LanguageModeling/BERT/scripts/run_squad.sh new file mode 100755 index 00000000..8977f0ab --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/run_squad.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +#OUT_DIR=/results/SQuAD + + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +init_checkpoint=${1:-"/workspace/checkpoints/bert_uncased.pt"} +epochs=${2:-"2.0"} +batch_size=${3:-"24"} +learning_rate=${4:-"3e-5"} +precision=${5:-"fp16"} +num_gpu=${6:-"8"} +seed=${7:-"42"} +squad_dir=${8:-"/workspace/bert/data/squad/v1.1"} +vocab_file=${9:-"/workspace/bert/vocab/vocab"} +OUT_DIR=${10:-"/results/SQuAD"} +mode=${11:-"train eval"} +CONFIG_FILE=${12:-"/workspace/bert/bert_config.json"} +max_steps=${13:-"-1"} + +echo "out dir is $OUT_DIR" +mkdir -p $OUT_DIR +if [ ! -d "$OUT_DIR" ]; then + echo "ERROR: non existing $OUT_DIR" + exit 1 +fi + +use_fp16="" +if [ "$precision" = "fp16" ] ; then + echo "fp16 activated!" + use_fp16=" --fp16 " +fi + +if [ "$num_gpu" = "1" ] ; then + export CUDA_VISIBLE_DEVICES=0 + mpi_command="" +else + unset CUDA_VISIBLE_DEVICES + mpi_command=" -m torch.distributed.launch --nproc_per_node=$num_gpu" +fi + +CMD="python $mpi_command run_squad.py " +CMD+="--init_checkpoint=$init_checkpoint " +if [ "$mode" = "train" ] ; then + CMD+="--do_train " + CMD+="--train_file=$squad_dir/train-v1.1.json " + CMD+="--train_batch_size=$batch_size " +elif [ "$mode" = "eval" ] ; then + CMD+="--do_predict " + CMD+="--predict_file=$squad_dir/dev-v1.1.json " + CMD+="--predict_batch_size=$batch_size " +else + CMD+=" --do_train " + CMD+=" --train_file=$squad_dir/train-v1.1.json " + CMD+=" --train_batch_size=$batch_size " + CMD+="--do_predict " + CMD+="--predict_file=$squad_dir/dev-v1.1.json " + CMD+="--predict_batch_size=$batch_size " +fi +CMD+=" --do_lower_case " +# CMD+=" --old " +# CMD+=" --loss_scale=128 " +CMD+=" --bert_model=bert-large-uncased " +CMD+=" --learning_rate=$learning_rate " +CMD+=" --seed=$seed " +CMD+=" --num_train_epochs=$epochs " +CMD+=" --max_seq_length=384 " +CMD+=" --doc_stride=128 " +CMD+=" --output_dir=$OUT_DIR " +CMD+=" --vocab_file=$vocab_file " +CMD+=" --config_file=$CONFIG_FILE " +CMD+=" --max_steps=$max_steps " +CMD+=" $use_fp16" + +LOGFILE=$OUT_DIR/logfile.txt +echo "$CMD |& tee $LOGFILE" +time $CMD |& tee $LOGFILE + +#sed -r 's/ +#|([A)/\n/g' $LOGFILE > $LOGFILE.edit +throughput=`cat $LOGFILE | grep -E 'Iteration.*[0-9.]+(s/it|it/s)' | tail -1 | egrep -o '[0-9.]+(s/it|it/s)' | head -1 | egrep -o '[0-9.]+'` + +if [ "$mode" != "train" ]; then +python $squad_dir/evaluate-v1.1.py $squad_dir/dev-v1.1.json $OUT_DIR/predictions.json |& tee -a $LOGFILE +fi + +echo "throughput: $throughput" \ No newline at end of file diff --git a/PyTorch/LanguageModeling/BERT/scripts/run_swag.sh b/PyTorch/LanguageModeling/BERT/scripts/run_swag.sh new file mode 100755 index 00000000..8a854bb1 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/run_swag.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +SWAG_DIR=/workspace/bert/data/swag +OUT_DIR=/results/SWAG + +mkdir -p $OUT_DIR + +echo "Container nvidia build = " $NVIDIA_BUILD_ID + +init_checkpoint=${1} +mode=${2:-"train"} +max_steps=${3:-"-1.0"} # if < 0, has no effect +batch_size=${4:-"12"} +learning_rate=${5:-"5e-6"} +precision=${6:-"fp32"} +num_gpu=${7:-"8"} +epochs=${8:-"2"} + +if [ "$mode" != "train" ] ; then + num_gpu=1 +fi + +use_fp16="" +if [ "$precision" = "fp16" ] ; then + echo "fp16 activated!" + use_fp16="--fp16" +fi + +if [ "$num_gpu" = "1" ] ; then + mpi_command="" +else + mpi_command="torch.distributed.launch --nproc_per_node=$num_gpu" +fi + +CMD="python -m $mpi_command run_swag.py " +CMD+="--init_checkpoint=$init_checkpoint " +if [ "$mode" = "train" ] ; then + CMD+="--do_train " + CMD+="--train_batch_size=$batch_size " +else + CMD+="--do_eval " + CMD+="--eval_batch_size=$batch_size " +fi +CMD+="--do_lower_case " +CMD+="--data_dir $SWAG_DIR/data/ " +CMD+="--bert_model bert-large-uncased " +CMD+="--max_seq_length 128 " +CMD+="--learning_rate $learning_rate " +CMD+="--num_train_epochs $epochs " +CMD+="--max_steps $max_steps " +CMD+="--output_dir $OUT_DIR " +CMD+="$use_fp16" + +LOGFILE=$OUT_DIR/logfile +$CMD |& tee $LOGFILE + +sed -r 's/ |(\[A)/\n/g' $LOGFILE > $LOGFILE.edit + +throughput=`cat $LOGFILE.edit | grep -E 'Iteration.*[0-9.]+(s/it|it/s)' | tail -1 | egrep -o '[0-9.]+(s/it|it/s)'` + +echo "throughput: $throughput" + diff --git a/PyTorch/LanguageModeling/BERT/scripts/start_pretraining.sh b/PyTorch/LanguageModeling/BERT/scripts/start_pretraining.sh new file mode 100644 index 00000000..7732ae90 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/scripts/start_pretraining.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# purpose: for multinode training on slurm clusters +node_type=${1:-"dgx1"} +num_nodes=${2:-1} +partition=${3:-"default"} +wall_time=${4:-"12:00:00"} +job_name=${5:-"pyt_bert"} +root_dir=${6:-"$PWD"} +train_batch_size=${7:-4} +eval_batch_size=${8:-4} +train_steps=${9:-1000000} +warmup_proportion=${10:-0.01} +learning_rate=${11:-1e-4} +precision=${12:-"fp16"} +save_checkpoint_steps=${13:-5000} +results_dir=${14:-"$root_dir/results"} +checkpoints_dir=${15:-"$root_dir/checkpoints"} + +CONT=${CONT:-"gitlab-master.nvidia.com:5005/dl/dgx/pytorch:19.02-py3-devel"} + +BENCHMARK=${BENCHMARK:-"bert"} +BENCHMARK_NAME="bert" + +if [ "$node_type" = "dgx1" ] ; then + echo "Running on dgx1 systems" + DGXSYSTEM="DGX1" + DGXNGPU=8 + DGXSOCKETCORES=20 + DGXNSOCKET=2 + DGXHT=2 + DGXIBDEVICES='--device=/dev/infiniband --device=/dev/infiniband/rdma_cm --device=/dev/infiniband/ucm3 --device=/dev/infiniband/ucm2 --device=/dev/infiniband/ucm1 --device=/dev/infiniband/ucm0 --device=/dev/infiniband/uverbs3 --device=/dev/infiniband/uverbs2 --device=/dev/infiniband/uverbs1 --device=/dev/infiniband/uverbs0 --device=/dev/infiniband/issm3 --device=/dev/infiniband/umad3 --device=/dev/infiniband/issm2 --device=/dev/infiniband/umad2 --device=/dev/infiniband/issm1 --device=/dev/infiniband/umad1 --device=/dev/infiniband/issm0 --device=/dev/infiniband/umad0' +elif [ "$node_type" = "dgx2h" ] ; then + echo "Running on dgx2h systems" + DGXSYSTEM="DGX2H" + DGXNGPU=16 + DGXSOCKETCORES=24 + DGXNSOCKET=2 + DGXHT=2 # HT is on is 2, HT off is 1 + DGXIBDEVICES='--device=/dev/infiniband/rdma_cm --device=/dev/infiniband/ucm10 --device=/dev/infiniband/ucm9 --device=/dev/infiniband/ucm8 --device=/dev/infiniband/ucm7 --device=/dev/infiniband/ucm4 --device=/dev/infiniband/ucm3 --device=/dev/infiniband/ucm2 --device=/dev/infiniband/ucm1 --device=/dev/infiniband/uverbs10 --device=/dev/infiniband/uverbs9 --device=/dev/infiniband/uverbs8 --device=/dev/infiniband/uverbs7 --device=/dev/infiniband/uverbs4 --device=/dev/infiniband/uverbs3 --device=/dev/infiniband/uverbs2 --device=/dev/infiniband/uverbs1 --device=/dev/infiniband/issm10 --device=/dev/infiniband/umad10 --device=/dev/infiniband/issm9 --device=/dev/infiniband/umad9 --device=/dev/infiniband/issm8 --device=/dev/infiniband/umad8 --device=/dev/infiniband/issm7 --device=/dev/infiniband/umad7 --device=/dev/infiniband/issm4 --device=/dev/infiniband/umad4 --device=/dev/infiniband/issm3 --device=/dev/infiniband/umad3 --device=/dev/infiniband/issm2 --device=/dev/infiniband/umad2 --device=/dev/infiniband/issm1 --device=/dev/infiniband/umad1' +else + echo "Unknown , must be either dgx1 or dgx2" + exit -1 +fi + +printf -v EXTRA_PARAMS "%d %d %e %s 1 %d %d %d false" $train_batch_size $eval_batch_size $learning_rate "$precision" $warmup_proportion $train_steps $save_checkpoint_steps + +export ROOTDIR=$root_dir +export DATA_DIR=${DATA_DIR:-$CODEDIR/data/wikipedia_corpus/pyt_hdf5_shards} + +VOLS="-v $ROOTDIR:/workspace/bert" +VOLS+=" -v $DATA_DIR:/workspace/bert/data/wikipedia_corpus/pyt_hdf5_shards" +# VOLS+=" -v $BOOKS_DIR:/workspace/bert/data/bookcorpus/final_tfrecord_sharded" +VOLS+=" -v $results_dir:/results" +VOLS+=" -v $checkpoints_dir:/checkpoints" + +export VOLS +export CONT +export DGXSYSTEM +export DGXNGPU +export DGXIBDEVICES +export EXTRA_PARAMS + +set -x +cd $CODEDIR +pwd + +PART="" +if [ "$partition" != "default" ] ; then + printf -v PART "%s" "-p $partition" +fi + +export GBS=$(expr $num_nodes \* $batch_size \* $DGXNGPU) +printf -v TAG "%s_%dn_%s_gbs%d" "$job_name" $num_nodes "$precision" $GBS +export DATESTAMP=`date +'%y%m%d%H%M%S'` + +sbatch $PART \ + -N $num_nodes \ + -t $wall_time \ + -J $job_name \ + --exclusive \ + --mem=0 \ + --mail-type=FAIL \ + --ntasks-per-node=$DGXNGPU \ + --threads-per-core=$DGXHT \ + --cores-per-socket=$DGXSOCKETCORES \ + --output=$LOGDIR/$TAG.$DATESTAMP.log \ + $CODEDIR/scripts/run.sub +set +x + diff --git a/PyTorch/LanguageModeling/BERT/tokenization.py b/PyTorch/LanguageModeling/BERT/tokenization.py new file mode 100644 index 00000000..5f364385 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/tokenization.py @@ -0,0 +1,391 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import collections +import logging +import os +import unicodedata +import six +from io import open + +from file_utils import cached_path + +logger = logging.getLogger(__name__) + +PRETRAINED_VOCAB_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", +} +PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { + 'bert-base-uncased': 512, + 'bert-large-uncased': 512, + 'bert-base-cased': 512, + 'bert-large-cased': 512, + 'bert-base-multilingual-uncased': 512, + 'bert-base-multilingual-cased': 512, + 'bert-base-chinese': 512, +} +VOCAB_NAME = 'vocab.txt' + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + with open(vocab_file, "r", encoding="utf-8") as reader: + while True: + token = reader.readline() + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class BertTokenizer(object): + """Runs end-to-end tokenization: punctuation splitting + wordpiece""" + + def __init__(self, vocab_file, do_lower_case=True, max_len=None, + never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict( + [(ids, tok) for tok, ids in self.vocab.items()]) + self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, + never_split=never_split) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + self.max_len = max_len if max_len is not None else int(1e12) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + return split_tokens + + def convert_tokens_to_ids(self, tokens): + """Converts a sequence of tokens into ids using the vocab.""" + ids = [] + for token in tokens: + ids.append(self.vocab[token]) + if len(ids) > self.max_len: + raise ValueError( + "Token indices sequence length is longer than the specified maximum " + " sequence length for this BERT model ({} > {}). Running this" + " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) + ) + return ids + + def convert_ids_to_tokens(self, ids): + """Converts a sequence of ids in wordpiece tokens using the vocab.""" + tokens = [] + for i in ids: + tokens.append(self.ids_to_tokens[i]) + return tokens + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): + """ + Instantiate a PreTrainedBertModel from a pre-trained model file. + Download and cache the pre-trained model file if needed. + """ + if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: + vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] + else: + vocab_file = pretrained_model_name_or_path + if os.path.isdir(vocab_file): + vocab_file = os.path.join(vocab_file, VOCAB_NAME) + # redirect to the cache, if necessary + try: + resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), + vocab_file)) + return None + if resolved_vocab_file == vocab_file: + logger.info("loading vocabulary file {}".format(vocab_file)) + else: + logger.info("loading vocabulary file {} from cache at {}".format( + vocab_file, resolved_vocab_file)) + if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: + # if we're using a pretrained model, ensure the tokenizer wont index sequences longer + # than the number of positional embeddings + max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] + kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) + # Instantiate tokenizer. + tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) + return tokenizer + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, + do_lower_case=True, + never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + """ + self.do_lower_case = do_lower_case + self.never_split = never_split + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = self._clean_text(text) + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if self.do_lower_case and token not in self.never_split: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + if text in self.never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer`. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically contorl characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat.startswith("C"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False diff --git a/PyTorch/LanguageModeling/BERT/vocab/download_models.py b/PyTorch/LanguageModeling/BERT/vocab/download_models.py new file mode 100644 index 00000000..e671c194 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/vocab/download_models.py @@ -0,0 +1,123 @@ +# NVIDIA + +import hashlib +import urllib.request +import zipfile + +# Download urls +model_urls = { + 'bert_base_uncased' : ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'), + 'bert_large_uncased' : ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'), + 'bert_base_cased' : ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'), + 'bert_large_cased' : ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'), + 'bert_base_multilingual_cased' : ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'), + 'bert_large_multilingual_uncased' : ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'), + 'bert_base_chinese' : ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip') +} + +# SHA256sum verification for file download integrity (and checking for changes from the download source over time) +bert_base_uncased_sha = { + 'bert_config.json' : '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc', + 'bert_model.ckpt.data-00000-of-00001' : '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84', + 'bert_model.ckpt.index' : '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b', + 'bert_model.ckpt.meta' : 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e', + 'vocab.txt' : '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', +} + +bert_large_uncased_sha = { + 'bert_config.json' : 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb', + 'bert_model.ckpt.data-00000-of-00001' : 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1', + 'bert_model.ckpt.index' : '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093', + 'bert_model.ckpt.meta' : '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1', + 'vocab.txt' : '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', +} + +bert_base_cased_sha = { + 'bert_config.json' : 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc', + 'bert_model.ckpt.data-00000-of-00001' : '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea', + 'bert_model.ckpt.index' : '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1', + 'bert_model.ckpt.meta' : '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98', + 'vocab.txt' : 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', +} + +bert_large_cased_sha = { + 'bert_config.json' : '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57', + 'bert_model.ckpt.data-00000-of-00001' : '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0', + 'bert_model.ckpt.index' : 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf', + 'bert_model.ckpt.meta' : 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1', + 'vocab.txt' : 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', +} + +bert_base_multilingual_cased_sha = { + 'bert_config.json' : 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0', + 'bert_model.ckpt.data-00000-of-00001' : '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5', + 'bert_model.ckpt.index' : '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37', + 'bert_model.ckpt.meta' : '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa', + 'vocab.txt' : 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c', +} + +bert_large_multilingual_uncased_sha = { + 'bert_config.json' : '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624', + 'bert_model.ckpt.data-00000-of-00001' : '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429', + 'bert_model.ckpt.index' : '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7', + 'bert_model.ckpt.meta' : '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29', + 'vocab.txt' : '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f', +} + +bert_base_chinese_sha = { + 'bert_config.json' : '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015', + 'bert_model.ckpt.data-00000-of-00001' : '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba', + 'bert_model.ckpt.index' : '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e', + 'bert_model.ckpt.meta' : 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047', + 'vocab.txt' : '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c', +} + +# Relate SHA to urls for loop below +model_sha = { + 'bert_base_uncased' : bert_base_uncased_sha, + 'bert_large_uncased' : bert_large_uncased_sha, + 'bert_base_cased' : bert_base_cased_sha, + 'bert_large_cased' : bert_large_cased_sha, + 'bert_base_multilingual_cased' : bert_base_multilingual_cased_sha, + 'bert_large_multilingual_uncased' : bert_large_multilingual_uncased_sha, + 'bert_base_chinese' : bert_base_chinese_sha +} + +# Helper to get sha256sum of a file +def sha256sum(filename): + h = hashlib.sha256() + b = bytearray(128*1024) + mv = memoryview(b) + with open(filename, 'rb', buffering=0) as f: + for n in iter(lambda : f.readinto(mv), 0): + h.update(mv[:n]) + return h.hexdigest() + +# Iterate over urls: download, unzip, verify sha256sum +found_mismatch_sha = False +for model in model_urls: + url = model_urls[model][0] + file = model_urls[model][1] + + print("Downloading", url) + response = urllib.request.urlopen(url) + with open(file, "wb") as handle: + handle.write(response.read()) + + print("Unzipping", file) + zip = zipfile.ZipFile(file, 'r') + zip.extractall() + zip.close() + + sha_dict = model_sha[model] + for extracted_file in sha_dict: + sha = sha_dict[extracted_file] + if sha != sha256sum(file[:-4] + "/" + extracted_file): + found_mismatch_sha = True + print("SHA256sum does not match on file:", extracted_file, "from download url:", url) + else: + print(file[:-4] + "/" + extracted_file, "\t", "verified") + +if not found_mismatch_sha: + print("All downloads pass sha256sum verification.") + diff --git a/PyTorch/LanguageModeling/BERT/vocab/vocab b/PyTorch/LanguageModeling/BERT/vocab/vocab new file mode 100644 index 00000000..fb140275 --- /dev/null +++ b/PyTorch/LanguageModeling/BERT/vocab/vocab @@ -0,0 +1,30522 @@ +[PAD] +[unused0] +[unused1] +[unused2] +[unused3] +[unused4] +[unused5] +[unused6] +[unused7] +[unused8] +[unused9] +[unused10] +[unused11] +[unused12] +[unused13] +[unused14] +[unused15] +[unused16] +[unused17] +[unused18] +[unused19] +[unused20] +[unused21] +[unused22] +[unused23] +[unused24] +[unused25] +[unused26] +[unused27] +[unused28] +[unused29] +[unused30] +[unused31] +[unused32] +[unused33] +[unused34] +[unused35] +[unused36] +[unused37] +[unused38] +[unused39] +[unused40] +[unused41] +[unused42] +[unused43] +[unused44] +[unused45] +[unused46] +[unused47] +[unused48] +[unused49] +[unused50] +[unused51] +[unused52] +[unused53] +[unused54] +[unused55] +[unused56] +[unused57] +[unused58] +[unused59] +[unused60] +[unused61] +[unused62] +[unused63] +[unused64] +[unused65] +[unused66] +[unused67] +[unused68] +[unused69] +[unused70] +[unused71] +[unused72] +[unused73] +[unused74] +[unused75] +[unused76] +[unused77] +[unused78] +[unused79] +[unused80] +[unused81] +[unused82] +[unused83] +[unused84] +[unused85] +[unused86] +[unused87] +[unused88] +[unused89] +[unused90] +[unused91] +[unused92] +[unused93] +[unused94] +[unused95] +[unused96] +[unused97] +[unused98] +[UNK] +[CLS] +[SEP] +[MASK] +[unused99] +[unused100] +[unused101] +[unused102] +[unused103] +[unused104] +[unused105] +[unused106] +[unused107] +[unused108] +[unused109] +[unused110] +[unused111] +[unused112] +[unused113] +[unused114] +[unused115] +[unused116] +[unused117] +[unused118] +[unused119] +[unused120] +[unused121] +[unused122] +[unused123] +[unused124] +[unused125] +[unused126] +[unused127] +[unused128] +[unused129] +[unused130] +[unused131] +[unused132] +[unused133] +[unused134] +[unused135] +[unused136] +[unused137] +[unused138] +[unused139] +[unused140] +[unused141] +[unused142] +[unused143] +[unused144] +[unused145] +[unused146] +[unused147] +[unused148] +[unused149] +[unused150] +[unused151] +[unused152] +[unused153] +[unused154] +[unused155] +[unused156] +[unused157] +[unused158] +[unused159] +[unused160] +[unused161] +[unused162] +[unused163] +[unused164] +[unused165] +[unused166] +[unused167] +[unused168] +[unused169] +[unused170] +[unused171] +[unused172] +[unused173] +[unused174] +[unused175] +[unused176] +[unused177] +[unused178] +[unused179] +[unused180] +[unused181] +[unused182] +[unused183] +[unused184] +[unused185] +[unused186] +[unused187] +[unused188] +[unused189] +[unused190] +[unused191] +[unused192] +[unused193] +[unused194] +[unused195] +[unused196] +[unused197] +[unused198] +[unused199] +[unused200] +[unused201] +[unused202] +[unused203] +[unused204] +[unused205] +[unused206] +[unused207] +[unused208] +[unused209] +[unused210] +[unused211] +[unused212] +[unused213] +[unused214] +[unused215] +[unused216] +[unused217] +[unused218] +[unused219] +[unused220] +[unused221] +[unused222] +[unused223] +[unused224] +[unused225] +[unused226] +[unused227] +[unused228] +[unused229] +[unused230] +[unused231] +[unused232] +[unused233] +[unused234] +[unused235] +[unused236] +[unused237] +[unused238] +[unused239] +[unused240] +[unused241] +[unused242] +[unused243] +[unused244] +[unused245] +[unused246] +[unused247] +[unused248] +[unused249] +[unused250] +[unused251] +[unused252] +[unused253] +[unused254] +[unused255] +[unused256] +[unused257] +[unused258] +[unused259] +[unused260] +[unused261] +[unused262] +[unused263] +[unused264] +[unused265] +[unused266] +[unused267] +[unused268] +[unused269] +[unused270] +[unused271] +[unused272] +[unused273] +[unused274] +[unused275] +[unused276] +[unused277] +[unused278] +[unused279] +[unused280] +[unused281] +[unused282] +[unused283] +[unused284] +[unused285] +[unused286] +[unused287] +[unused288] +[unused289] +[unused290] +[unused291] +[unused292] +[unused293] +[unused294] +[unused295] +[unused296] +[unused297] +[unused298] +[unused299] +[unused300] +[unused301] +[unused302] +[unused303] +[unused304] +[unused305] +[unused306] +[unused307] +[unused308] +[unused309] +[unused310] +[unused311] +[unused312] +[unused313] +[unused314] +[unused315] +[unused316] +[unused317] +[unused318] +[unused319] +[unused320] +[unused321] +[unused322] +[unused323] +[unused324] +[unused325] +[unused326] +[unused327] +[unused328] +[unused329] +[unused330] +[unused331] +[unused332] +[unused333] +[unused334] +[unused335] +[unused336] +[unused337] +[unused338] +[unused339] +[unused340] +[unused341] +[unused342] +[unused343] +[unused344] +[unused345] +[unused346] +[unused347] +[unused348] +[unused349] +[unused350] +[unused351] +[unused352] +[unused353] +[unused354] +[unused355] +[unused356] +[unused357] +[unused358] +[unused359] +[unused360] +[unused361] +[unused362] +[unused363] +[unused364] +[unused365] +[unused366] +[unused367] +[unused368] +[unused369] +[unused370] +[unused371] +[unused372] +[unused373] +[unused374] +[unused375] +[unused376] +[unused377] +[unused378] +[unused379] +[unused380] +[unused381] +[unused382] +[unused383] +[unused384] +[unused385] +[unused386] +[unused387] +[unused388] +[unused389] +[unused390] +[unused391] +[unused392] +[unused393] +[unused394] +[unused395] +[unused396] +[unused397] +[unused398] +[unused399] +[unused400] +[unused401] +[unused402] +[unused403] +[unused404] +[unused405] +[unused406] +[unused407] +[unused408] +[unused409] +[unused410] +[unused411] +[unused412] +[unused413] +[unused414] +[unused415] +[unused416] +[unused417] +[unused418] +[unused419] +[unused420] +[unused421] +[unused422] +[unused423] +[unused424] +[unused425] +[unused426] +[unused427] +[unused428] +[unused429] +[unused430] +[unused431] +[unused432] +[unused433] +[unused434] +[unused435] +[unused436] +[unused437] +[unused438] +[unused439] +[unused440] +[unused441] +[unused442] +[unused443] +[unused444] +[unused445] +[unused446] +[unused447] +[unused448] +[unused449] +[unused450] +[unused451] +[unused452] +[unused453] +[unused454] +[unused455] +[unused456] +[unused457] +[unused458] +[unused459] +[unused460] +[unused461] +[unused462] +[unused463] +[unused464] +[unused465] +[unused466] +[unused467] +[unused468] +[unused469] +[unused470] +[unused471] +[unused472] +[unused473] +[unused474] +[unused475] +[unused476] +[unused477] +[unused478] +[unused479] +[unused480] +[unused481] +[unused482] +[unused483] +[unused484] +[unused485] +[unused486] +[unused487] +[unused488] +[unused489] +[unused490] +[unused491] +[unused492] +[unused493] +[unused494] +[unused495] +[unused496] +[unused497] +[unused498] +[unused499] +[unused500] +[unused501] +[unused502] +[unused503] +[unused504] +[unused505] +[unused506] +[unused507] +[unused508] +[unused509] +[unused510] +[unused511] +[unused512] +[unused513] +[unused514] +[unused515] +[unused516] +[unused517] +[unused518] +[unused519] +[unused520] +[unused521] +[unused522] +[unused523] +[unused524] +[unused525] +[unused526] +[unused527] +[unused528] +[unused529] +[unused530] +[unused531] +[unused532] +[unused533] +[unused534] +[unused535] +[unused536] +[unused537] +[unused538] +[unused539] +[unused540] +[unused541] +[unused542] +[unused543] +[unused544] +[unused545] +[unused546] +[unused547] +[unused548] +[unused549] +[unused550] +[unused551] +[unused552] +[unused553] +[unused554] +[unused555] +[unused556] +[unused557] +[unused558] +[unused559] +[unused560] +[unused561] +[unused562] +[unused563] +[unused564] +[unused565] +[unused566] +[unused567] +[unused568] +[unused569] +[unused570] +[unused571] +[unused572] +[unused573] +[unused574] +[unused575] +[unused576] +[unused577] +[unused578] +[unused579] +[unused580] +[unused581] +[unused582] +[unused583] +[unused584] +[unused585] +[unused586] +[unused587] +[unused588] +[unused589] +[unused590] +[unused591] +[unused592] +[unused593] +[unused594] +[unused595] +[unused596] +[unused597] +[unused598] +[unused599] +[unused600] +[unused601] +[unused602] +[unused603] +[unused604] +[unused605] +[unused606] +[unused607] +[unused608] +[unused609] +[unused610] +[unused611] +[unused612] +[unused613] +[unused614] +[unused615] +[unused616] +[unused617] +[unused618] +[unused619] +[unused620] +[unused621] +[unused622] +[unused623] +[unused624] +[unused625] +[unused626] +[unused627] +[unused628] +[unused629] +[unused630] +[unused631] +[unused632] +[unused633] +[unused634] +[unused635] +[unused636] +[unused637] +[unused638] +[unused639] +[unused640] +[unused641] +[unused642] +[unused643] +[unused644] +[unused645] +[unused646] +[unused647] +[unused648] +[unused649] +[unused650] +[unused651] +[unused652] +[unused653] +[unused654] +[unused655] +[unused656] +[unused657] +[unused658] +[unused659] +[unused660] +[unused661] +[unused662] +[unused663] +[unused664] +[unused665] +[unused666] +[unused667] +[unused668] +[unused669] +[unused670] +[unused671] +[unused672] +[unused673] +[unused674] +[unused675] +[unused676] +[unused677] +[unused678] +[unused679] +[unused680] +[unused681] +[unused682] +[unused683] +[unused684] +[unused685] +[unused686] +[unused687] +[unused688] +[unused689] +[unused690] +[unused691] +[unused692] +[unused693] +[unused694] +[unused695] +[unused696] +[unused697] +[unused698] +[unused699] +[unused700] +[unused701] +[unused702] +[unused703] +[unused704] +[unused705] +[unused706] +[unused707] +[unused708] +[unused709] +[unused710] +[unused711] +[unused712] +[unused713] +[unused714] +[unused715] +[unused716] +[unused717] +[unused718] +[unused719] +[unused720] +[unused721] +[unused722] +[unused723] +[unused724] +[unused725] +[unused726] +[unused727] +[unused728] +[unused729] +[unused730] +[unused731] +[unused732] +[unused733] +[unused734] +[unused735] +[unused736] +[unused737] +[unused738] +[unused739] +[unused740] +[unused741] +[unused742] +[unused743] +[unused744] +[unused745] +[unused746] +[unused747] +[unused748] +[unused749] +[unused750] +[unused751] +[unused752] +[unused753] +[unused754] +[unused755] +[unused756] +[unused757] +[unused758] +[unused759] +[unused760] +[unused761] +[unused762] +[unused763] +[unused764] +[unused765] +[unused766] +[unused767] +[unused768] +[unused769] +[unused770] +[unused771] +[unused772] +[unused773] +[unused774] +[unused775] +[unused776] +[unused777] +[unused778] +[unused779] +[unused780] +[unused781] +[unused782] +[unused783] +[unused784] +[unused785] +[unused786] +[unused787] +[unused788] +[unused789] +[unused790] +[unused791] +[unused792] +[unused793] +[unused794] +[unused795] +[unused796] +[unused797] +[unused798] +[unused799] +[unused800] +[unused801] +[unused802] +[unused803] +[unused804] +[unused805] +[unused806] +[unused807] +[unused808] +[unused809] +[unused810] +[unused811] +[unused812] +[unused813] +[unused814] +[unused815] +[unused816] +[unused817] +[unused818] +[unused819] +[unused820] +[unused821] +[unused822] +[unused823] +[unused824] +[unused825] +[unused826] +[unused827] +[unused828] +[unused829] +[unused830] +[unused831] +[unused832] +[unused833] +[unused834] +[unused835] +[unused836] +[unused837] +[unused838] +[unused839] +[unused840] +[unused841] +[unused842] +[unused843] +[unused844] +[unused845] +[unused846] +[unused847] +[unused848] +[unused849] +[unused850] +[unused851] +[unused852] +[unused853] +[unused854] +[unused855] +[unused856] +[unused857] +[unused858] +[unused859] +[unused860] +[unused861] +[unused862] +[unused863] +[unused864] +[unused865] +[unused866] +[unused867] +[unused868] +[unused869] +[unused870] +[unused871] +[unused872] +[unused873] +[unused874] +[unused875] +[unused876] +[unused877] +[unused878] +[unused879] +[unused880] +[unused881] +[unused882] +[unused883] +[unused884] +[unused885] +[unused886] +[unused887] +[unused888] +[unused889] +[unused890] +[unused891] +[unused892] +[unused893] +[unused894] +[unused895] +[unused896] +[unused897] +[unused898] +[unused899] +[unused900] +[unused901] +[unused902] +[unused903] +[unused904] +[unused905] +[unused906] +[unused907] +[unused908] +[unused909] +[unused910] +[unused911] +[unused912] +[unused913] +[unused914] +[unused915] +[unused916] +[unused917] +[unused918] +[unused919] +[unused920] +[unused921] +[unused922] +[unused923] +[unused924] +[unused925] +[unused926] +[unused927] +[unused928] +[unused929] +[unused930] +[unused931] +[unused932] +[unused933] +[unused934] +[unused935] +[unused936] +[unused937] +[unused938] +[unused939] +[unused940] +[unused941] +[unused942] +[unused943] +[unused944] +[unused945] +[unused946] +[unused947] +[unused948] +[unused949] +[unused950] +[unused951] +[unused952] +[unused953] +[unused954] +[unused955] +[unused956] +[unused957] +[unused958] +[unused959] +[unused960] +[unused961] +[unused962] +[unused963] +[unused964] +[unused965] +[unused966] +[unused967] +[unused968] +[unused969] +[unused970] +[unused971] +[unused972] +[unused973] +[unused974] +[unused975] +[unused976] +[unused977] +[unused978] +[unused979] +[unused980] +[unused981] +[unused982] +[unused983] +[unused984] +[unused985] +[unused986] +[unused987] +[unused988] +[unused989] +[unused990] +[unused991] +[unused992] +[unused993] +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +¡ +¢ +£ +¤ +¥ +¦ +§ +¨ +© +ª +« +¬ +® +° +± +² +³ +´ +µ +¶ +· +¹ +º +» +¼ +½ +¾ +¿ +× +ß +æ +ð +÷ +ø +þ +đ +ħ +ı +ł +ŋ +œ +ƒ +ɐ +ɑ +ɒ +ɔ +ɕ +ə +ɛ +ɡ +ɣ +ɨ +ɪ +ɫ +ɬ +ɯ +ɲ +ɴ +ɹ +ɾ +ʀ +ʁ +ʂ +ʃ +ʉ +ʊ +ʋ +ʌ +ʎ +ʐ +ʑ +ʒ +ʔ +ʰ +ʲ +ʳ +ʷ +ʸ +ʻ +ʼ +ʾ +ʿ +ˈ +ː +ˡ +ˢ +ˣ +ˤ +α +β +γ +δ +ε +ζ +η +θ +ι +κ +λ +μ +ν +ξ +ο +π +ρ +ς +σ +τ +υ +φ +χ +ψ +ω +а +б +в +г +д +е +ж +з +и +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +щ +ъ +ы +ь +э +ю +я +ђ +є +і +ј +љ +њ +ћ +ӏ +ա +բ +գ +դ +ե +թ +ի +լ +կ +հ +մ +յ +ն +ո +պ +ս +վ +տ +ր +ւ +ք +־ +א +ב +ג +ד +ה +ו +ז +ח +ט +י +ך +כ +ל +ם +מ +ן +נ +ס +ע +ף +פ +ץ +צ +ק +ר +ש +ת +، +ء +ا +ب +ة +ت +ث +ج +ح +خ +د +ذ +ر +ز +س +ش +ص +ض +ط +ظ +ع +غ +ـ +ف +ق +ك +ل +م +ن +ه +و +ى +ي +ٹ +پ +چ +ک +گ +ں +ھ +ہ +ی +ے +अ +आ +उ +ए +क +ख +ग +च +ज +ट +ड +ण +त +थ +द +ध +न +प +ब +भ +म +य +र +ल +व +श +ष +स +ह +ा +ि +ी +ो +। +॥ +ং +অ +আ +ই +উ +এ +ও +ক +খ +গ +চ +ছ +জ +ট +ড +ণ +ত +থ +দ +ধ +ন +প +ব +ভ +ম +য +র +ল +শ +ষ +স +হ +া +ি +ী +ে +க +ச +ட +த +ந +ன +ப +ம +ய +ர +ல +ள +வ +ா +ி +ு +ே +ை +ನ +ರ +ಾ +ක +ය +ර +ල +ව +ා +ก +ง +ต +ท +น +พ +ม +ย +ร +ล +ว +ส +อ +า +เ +་ +། +ག +ང +ད +ན +པ +བ +མ +འ +ར +ལ +ས +မ +ა +ბ +გ +დ +ე +ვ +თ +ი +კ +ლ +მ +ნ +ო +რ +ს +ტ +უ +ᄀ +ᄂ +ᄃ +ᄅ +ᄆ +ᄇ +ᄉ +ᄊ +ᄋ +ᄌ +ᄎ +ᄏ +ᄐ +ᄑ +ᄒ +ᅡ +ᅢ +ᅥ +ᅦ +ᅧ +ᅩ +ᅪ +ᅭ +ᅮ +ᅯ +ᅲ +ᅳ +ᅴ +ᅵ +ᆨ +ᆫ +ᆯ +ᆷ +ᆸ +ᆼ +ᴬ +ᴮ +ᴰ +ᴵ +ᴺ +ᵀ +ᵃ +ᵇ +ᵈ +ᵉ +ᵍ +ᵏ +ᵐ +ᵒ +ᵖ +ᵗ +ᵘ +ᵢ +ᵣ +ᵤ +ᵥ +ᶜ +ᶠ +‐ +‑ +‒ +– +— +― +‖ +‘ +’ +‚ +“ +” +„ +† +‡ +• +… +‰ +′ +″ +› +‿ +⁄ +⁰ +ⁱ +⁴ +⁵ +⁶ +⁷ +⁸ +⁹ +⁺ +⁻ +ⁿ +₀ +₁ +₂ +₃ +₄ +₅ +₆ +₇ +₈ +₉ +₊ +₍ +₎ +ₐ +ₑ +ₒ +ₓ +ₕ +ₖ +ₗ +ₘ +ₙ +ₚ +ₛ +ₜ +₤ +₩ +€ +₱ +₹ +ℓ +№ +ℝ +™ +⅓ +⅔ +← +↑ +→ +↓ +↔ +↦ +⇄ +⇌ +⇒ +∂ +∅ +∆ +∇ +∈ +− +∗ +∘ +√ +∞ +∧ +∨ +∩ +∪ +≈ +≡ +≤ +≥ +⊂ +⊆ +⊕ +⊗ +⋅ +─ +│ +■ +▪ +● +★ +☆ +☉ +♠ +♣ +♥ +♦ +♭ +♯ +⟨ +⟩ +ⱼ +⺩ +⺼ +⽥ +、 +。 +〈 +〉 +《 +》 +「 +」 +『 +』 +〜 +あ +い +う +え +お +か +き +く +け +こ +さ +し +す +せ +そ +た +ち +っ +つ +て +と +な +に +ぬ +ね +の +は +ひ +ふ +へ +ほ +ま +み +む +め +も +や +ゆ +よ +ら +り +る +れ +ろ +を +ん +ァ +ア +ィ +イ +ウ +ェ +エ +オ +カ +キ +ク +ケ +コ +サ +シ +ス +セ +タ +チ +ッ +ツ +テ +ト +ナ +ニ +ノ +ハ +ヒ +フ +ヘ +ホ +マ +ミ +ム +メ +モ +ャ +ュ +ョ +ラ +リ +ル +レ +ロ +ワ +ン +・ +ー +一 +三 +上 +下 +不 +世 +中 +主 +久 +之 +也 +事 +二 +五 +井 +京 +人 +亻 +仁 +介 +代 +仮 +伊 +会 +佐 +侍 +保 +信 +健 +元 +光 +八 +公 +内 +出 +分 +前 +劉 +力 +加 +勝 +北 +区 +十 +千 +南 +博 +原 +口 +古 +史 +司 +合 +吉 +同 +名 +和 +囗 +四 +国 +國 +土 +地 +坂 +城 +堂 +場 +士 +夏 +外 +大 +天 +太 +夫 +奈 +女 +子 +学 +宀 +宇 +安 +宗 +定 +宣 +宮 +家 +宿 +寺 +將 +小 +尚 +山 +岡 +島 +崎 +川 +州 +巿 +帝 +平 +年 +幸 +广 +弘 +張 +彳 +後 +御 +德 +心 +忄 +志 +忠 +愛 +成 +我 +戦 +戸 +手 +扌 +政 +文 +新 +方 +日 +明 +星 +春 +昭 +智 +曲 +書 +月 +有 +朝 +木 +本 +李 +村 +東 +松 +林 +森 +楊 +樹 +橋 +歌 +止 +正 +武 +比 +氏 +民 +水 +氵 +氷 +永 +江 +沢 +河 +治 +法 +海 +清 +漢 +瀬 +火 +版 +犬 +王 +生 +田 +男 +疒 +発 +白 +的 +皇 +目 +相 +省 +真 +石 +示 +社 +神 +福 +禾 +秀 +秋 +空 +立 +章 +竹 +糹 +美 +義 +耳 +良 +艹 +花 +英 +華 +葉 +藤 +行 +街 +西 +見 +訁 +語 +谷 +貝 +貴 +車 +軍 +辶 +道 +郎 +郡 +部 +都 +里 +野 +金 +鈴 +镇 +長 +門 +間 +阝 +阿 +陳 +陽 +雄 +青 +面 +風 +食 +香 +馬 +高 +龍 +龸 +fi +fl +! +( +) +, +- +. +/ +: +? +~ +the +of +and +in +to +was +he +is +as +for +on +with +that +it +his +by +at +from +her +##s +she +you +had +an +were +but +be +this +are +not +my +they +one +which +or +have +him +me +first +all +also +their +has +up +who +out +been +when +after +there +into +new +two +its +##a +time +would +no +what +about +said +we +over +then +other +so +more +##e +can +if +like +back +them +only +some +could +##i +where +just +##ing +during +before +##n +do +##o +made +school +through +than +now +years +most +world +may +between +down +well +three +##d +year +while +will +##ed +##r +##y +later +##t +city +under +around +did +such +being +used +state +people +part +know +against +your +many +second +university +both +national +##er +these +don +known +off +way +until +re +how +even +get +head +... +didn +##ly +team +american +because +de +##l +born +united +film +since +still +long +work +south +us +became +any +high +again +day +family +see +right +man +eyes +house +season +war +states +including +took +life +north +same +each +called +name +much +place +however +go +four +group +another +found +won +area +here +going +10 +away +series +left +home +music +best +make +hand +number +company +several +never +last +john +000 +very +album +take +end +good +too +following +released +game +played +little +began +district +##m +old +want +those +side +held +own +early +county +ll +league +use +west +##u +face +think +##es +2010 +government +##h +march +came +small +general +town +june +##on +line +based +something +##k +september +thought +looked +along +international +2011 +air +july +club +went +january +october +our +august +april +york +12 +few +2012 +2008 +east +show +member +college +2009 +father +public +##us +come +men +five +set +station +church +##c +next +former +november +room +party +located +december +2013 +age +got +2007 +##g +system +let +love +2006 +though +every +2014 +look +song +water +century +without +body +black +night +within +great +women +single +ve +building +large +population +river +named +band +white +started +##an +once +15 +20 +should +18 +2015 +service +top +built +british +open +death +king +moved +local +times +children +february +book +why +11 +door +need +president +order +final +road +wasn +although +due +major +died +village +third +knew +2016 +asked +turned +st +wanted +say +##p +together +received +main +son +served +different +##en +behind +himself +felt +members +power +football +law +voice +play +##in +near +park +history +30 +having +2005 +16 +##man +saw +mother +##al +army +point +front +help +english +street +art +late +hands +games +award +##ia +young +14 +put +published +country +division +across +told +13 +often +ever +french +london +center +six +red +2017 +led +days +include +light +25 +find +tell +among +species +really +according +central +half +2004 +form +original +gave +office +making +enough +lost +full +opened +must +included +live +given +german +player +run +business +woman +community +cup +might +million +land +2000 +court +development +17 +short +round +ii +km +seen +class +story +always +become +sure +research +almost +director +council +la +##2 +career +things +using +island +##z +couldn +car +##is +24 +close +force +##1 +better +free +support +control +field +students +2003 +education +married +##b +nothing +worked +others +record +big +inside +level +anything +continued +give +james +##3 +military +established +non +returned +feel +does +title +written +thing +feet +william +far +co +association +hard +already +2002 +##ra +championship +human +western +100 +##na +department +hall +role +various +production +21 +19 +heart +2001 +living +fire +version +##ers +##f +television +royal +##4 +produced +working +act +case +society +region +present +radio +period +looking +least +total +keep +england +wife +program +per +brother +mind +special +22 +##le +am +works +soon +##6 +political +george +services +taken +created +##7 +further +able +reached +david +union +joined +upon +done +important +social +information +either +##ic +##x +appeared +position +ground +lead +rock +dark +election +23 +board +france +hair +course +arms +site +police +girl +instead +real +sound +##v +words +moment +##te +someone +##8 +summer +project +announced +san +less +wrote +past +followed +##5 +blue +founded +al +finally +india +taking +records +america +##ne +1999 +design +considered +northern +god +stop +battle +toward +european +outside +described +track +today +playing +language +28 +call +26 +heard +professional +low +australia +miles +california +win +yet +green +##ie +trying +blood +##ton +southern +science +maybe +everything +match +square +27 +mouth +video +race +recorded +leave +above +##9 +daughter +points +space +1998 +museum +change +middle +common +##0 +move +tv +post +##ta +lake +seven +tried +elected +closed +ten +paul +minister +##th +months +start +chief +return +canada +person +sea +release +similar +modern +brought +rest +hit +formed +mr +##la +1997 +floor +event +doing +thomas +1996 +robert +care +killed +training +star +week +needed +turn +finished +railway +rather +news +health +sent +example +ran +term +michael +coming +currently +yes +forces +despite +gold +areas +50 +stage +fact +29 +dead +says +popular +2018 +originally +germany +probably +developed +result +pulled +friend +stood +money +running +mi +signed +word +songs +child +eventually +met +tour +average +teams +minutes +festival +current +deep +kind +1995 +decided +usually +eastern +seemed +##ness +episode +bed +added +table +indian +private +charles +route +available +idea +throughout +centre +addition +appointed +style +1994 +books +eight +construction +press +mean +wall +friends +remained +schools +study +##ch +##um +institute +oh +chinese +sometimes +events +possible +1992 +australian +type +brown +forward +talk +process +food +debut +seat +performance +committee +features +character +arts +herself +else +lot +strong +russian +range +hours +peter +arm +##da +morning +dr +sold +##ry +quickly +directed +1993 +guitar +china +##w +31 +list +##ma +performed +media +uk +players +smile +##rs +myself +40 +placed +coach +province +towards +wouldn +leading +whole +boy +official +designed +grand +census +##el +europe +attack +japanese +henry +1991 +##re +##os +cross +getting +alone +action +lower +network +wide +washington +japan +1990 +hospital +believe +changed +sister +##ar +hold +gone +sir +hadn +ship +##ka +studies +academy +shot +rights +below +base +bad +involved +kept +largest +##ist +bank +future +especially +beginning +mark +movement +section +female +magazine +plan +professor +lord +longer +##ian +sat +walked +hill +actually +civil +energy +model +families +size +thus +aircraft +completed +includes +data +captain +##or +fight +vocals +featured +richard +bridge +fourth +1989 +officer +stone +hear +##ism +means +medical +groups +management +self +lips +competition +entire +lived +technology +leaving +federal +tournament +bit +passed +hot +independent +awards +kingdom +mary +spent +fine +doesn +reported +##ling +jack +fall +raised +itself +stay +true +studio +1988 +sports +replaced +paris +systems +saint +leader +theatre +whose +market +capital +parents +spanish +canadian +earth +##ity +cut +degree +writing +bay +christian +awarded +natural +higher +bill +##as +coast +provided +previous +senior +ft +valley +organization +stopped +onto +countries +parts +conference +queen +security +interest +saying +allowed +master +earlier +phone +matter +smith +winning +try +happened +moving +campaign +los +##ley +breath +nearly +mid +1987 +certain +girls +date +italian +african +standing +fell +artist +##ted +shows +deal +mine +industry +1986 +##ng +everyone +republic +provide +collection +library +student +##ville +primary +owned +older +via +heavy +1st +makes +##able +attention +anyone +africa +##ri +stated +length +ended +fingers +command +staff +skin +foreign +opening +governor +okay +medal +kill +sun +cover +job +1985 +introduced +chest +hell +feeling +##ies +success +meet +reason +standard +meeting +novel +1984 +trade +source +buildings +##land +rose +guy +goal +##ur +chapter +native +husband +previously +unit +limited +entered +weeks +producer +operations +mountain +takes +covered +forced +related +roman +complete +successful +key +texas +cold +##ya +channel +1980 +traditional +films +dance +clear +approximately +500 +nine +van +prince +question +active +tracks +ireland +regional +silver +author +personal +sense +operation +##ine +economic +1983 +holding +twenty +isbn +additional +speed +hour +edition +regular +historic +places +whom +shook +movie +km² +secretary +prior +report +chicago +read +foundation +view +engine +scored +1982 +units +ask +airport +property +ready +immediately +lady +month +listed +contract +##de +manager +themselves +lines +##ki +navy +writer +meant +##ts +runs +##ro +practice +championships +singer +glass +commission +required +forest +starting +culture +generally +giving +access +attended +test +couple +stand +catholic +martin +caught +executive +##less +eye +##ey +thinking +chair +quite +shoulder +1979 +hope +decision +plays +defeated +municipality +whether +structure +offered +slowly +pain +ice +direction +##ion +paper +mission +1981 +mostly +200 +noted +individual +managed +nature +lives +plant +##ha +helped +except +studied +computer +figure +relationship +issue +significant +loss +die +smiled +gun +ago +highest +1972 +##am +male +bring +goals +mexico +problem +distance +commercial +completely +location +annual +famous +drive +1976 +neck +1978 +surface +caused +italy +understand +greek +highway +wrong +hotel +comes +appearance +joseph +double +issues +musical +companies +castle +income +review +assembly +bass +initially +parliament +artists +experience +1974 +particular +walk +foot +engineering +talking +window +dropped +##ter +miss +baby +boys +break +1975 +stars +edge +remember +policy +carried +train +stadium +bar +sex +angeles +evidence +##ge +becoming +assistant +soviet +1977 +upper +step +wing +1970 +youth +financial +reach +##ll +actor +numerous +##se +##st +nodded +arrived +##ation +minute +##nt +believed +sorry +complex +beautiful +victory +associated +temple +1968 +1973 +chance +perhaps +metal +##son +1945 +bishop +##et +lee +launched +particularly +tree +le +retired +subject +prize +contains +yeah +theory +empire +##ce +suddenly +waiting +trust +recording +##to +happy +terms +camp +champion +1971 +religious +pass +zealand +names +2nd +port +ancient +tom +corner +represented +watch +legal +anti +justice +cause +watched +brothers +45 +material +changes +simply +response +louis +fast +##ting +answer +60 +historical +1969 +stories +straight +create +feature +increased +rate +administration +virginia +el +activities +cultural +overall +winner +programs +basketball +legs +guard +beyond +cast +doctor +mm +flight +results +remains +cost +effect +winter +##ble +larger +islands +problems +chairman +grew +commander +isn +1967 +pay +failed +selected +hurt +fort +box +regiment +majority +journal +35 +edward +plans +##ke +##ni +shown +pretty +irish +characters +directly +scene +likely +operated +allow +spring +##j +junior +matches +looks +mike +houses +fellow +##tion +beach +marriage +##ham +##ive +rules +oil +65 +florida +expected +nearby +congress +sam +peace +recent +iii +wait +subsequently +cell +##do +variety +serving +agreed +please +poor +joe +pacific +attempt +wood +democratic +piece +prime +##ca +rural +mile +touch +appears +township +1964 +1966 +soldiers +##men +##ized +1965 +pennsylvania +closer +fighting +claimed +score +jones +physical +editor +##ous +filled +genus +specific +sitting +super +mom +##va +therefore +supported +status +fear +cases +store +meaning +wales +minor +spain +tower +focus +vice +frank +follow +parish +separate +golden +horse +fifth +remaining +branch +32 +presented +stared +##id +uses +secret +forms +##co +baseball +exactly +##ck +choice +note +discovered +travel +composed +truth +russia +ball +color +kiss +dad +wind +continue +ring +referred +numbers +digital +greater +##ns +metres +slightly +direct +increase +1960 +responsible +crew +rule +trees +troops +##no +broke +goes +individuals +hundred +weight +creek +sleep +memory +defense +provides +ordered +code +value +jewish +windows +1944 +safe +judge +whatever +corps +realized +growing +pre +##ga +cities +alexander +gaze +lies +spread +scott +letter +showed +situation +mayor +transport +watching +workers +extended +##li +expression +normal +##ment +chart +multiple +border +##ba +host +##ner +daily +mrs +walls +piano +##ko +heat +cannot +##ate +earned +products +drama +era +authority +seasons +join +grade +##io +sign +difficult +machine +1963 +territory +mainly +##wood +stations +squadron +1962 +stepped +iron +19th +##led +serve +appear +sky +speak +broken +charge +knowledge +kilometres +removed +ships +article +campus +simple +##ty +pushed +britain +##ve +leaves +recently +cd +soft +boston +latter +easy +acquired +poland +##sa +quality +officers +presence +planned +nations +mass +broadcast +jean +share +image +influence +wild +offer +emperor +electric +reading +headed +ability +promoted +yellow +ministry +1942 +throat +smaller +politician +##by +latin +spoke +cars +williams +males +lack +pop +80 +##ier +acting +seeing +consists +##ti +estate +1961 +pressure +johnson +newspaper +jr +chris +olympics +online +conditions +beat +elements +walking +vote +##field +needs +carolina +text +featuring +global +block +shirt +levels +francisco +purpose +females +et +dutch +duke +ahead +gas +twice +safety +serious +turning +highly +lieutenant +firm +maria +amount +mixed +daniel +proposed +perfect +agreement +affairs +3rd +seconds +contemporary +paid +1943 +prison +save +kitchen +label +administrative +intended +constructed +academic +nice +teacher +races +1956 +formerly +corporation +ben +nation +issued +shut +1958 +drums +housing +victoria +seems +opera +1959 +graduated +function +von +mentioned +picked +build +recognized +shortly +protection +picture +notable +exchange +elections +1980s +loved +percent +racing +fish +elizabeth +garden +volume +hockey +1941 +beside +settled +##ford +1940 +competed +replied +drew +1948 +actress +marine +scotland +steel +glanced +farm +steve +1957 +risk +tonight +positive +magic +singles +effects +gray +screen +dog +##ja +residents +bus +sides +none +secondary +literature +polish +destroyed +flying +founder +households +1939 +lay +reserve +usa +gallery +##ler +1946 +industrial +younger +approach +appearances +urban +ones +1950 +finish +avenue +powerful +fully +growth +page +honor +jersey +projects +advanced +revealed +basic +90 +infantry +pair +equipment +visit +33 +evening +search +grant +effort +solo +treatment +buried +republican +primarily +bottom +owner +1970s +israel +gives +jim +dream +bob +remain +spot +70 +notes +produce +champions +contact +ed +soul +accepted +ways +del +##ally +losing +split +price +capacity +basis +trial +questions +##ina +1955 +20th +guess +officially +memorial +naval +initial +##ization +whispered +median +engineer +##ful +sydney +##go +columbia +strength +300 +1952 +tears +senate +00 +card +asian +agent +1947 +software +44 +draw +warm +supposed +com +pro +##il +transferred +leaned +##at +candidate +escape +mountains +asia +potential +activity +entertainment +seem +traffic +jackson +murder +36 +slow +product +orchestra +haven +agency +bbc +taught +website +comedy +unable +storm +planning +albums +rugby +environment +scientific +grabbed +protect +##hi +boat +typically +1954 +1953 +damage +principal +divided +dedicated +mount +ohio +##berg +pick +fought +driver +##der +empty +shoulders +sort +thank +berlin +prominent +account +freedom +necessary +efforts +alex +headquarters +follows +alongside +des +simon +andrew +suggested +operating +learning +steps +1949 +sweet +technical +begin +easily +34 +teeth +speaking +settlement +scale +##sh +renamed +ray +max +enemy +semi +joint +compared +##rd +scottish +leadership +analysis +offers +georgia +pieces +captured +animal +deputy +guest +organized +##lin +tony +combined +method +challenge +1960s +huge +wants +battalion +sons +rise +crime +types +facilities +telling +path +1951 +platform +sit +1990s +##lo +tells +assigned +rich +pull +##ot +commonly +alive +##za +letters +concept +conducted +wearing +happen +bought +becomes +holy +gets +ocean +defeat +languages +purchased +coffee +occurred +titled +##q +declared +applied +sciences +concert +sounds +jazz +brain +##me +painting +fleet +tax +nick +##ius +michigan +count +animals +leaders +episodes +##line +content +##den +birth +##it +clubs +64 +palace +critical +refused +fair +leg +laughed +returning +surrounding +participated +formation +lifted +pointed +connected +rome +medicine +laid +taylor +santa +powers +adam +tall +shared +focused +knowing +yards +entrance +falls +##wa +calling +##ad +sources +chosen +beneath +resources +yard +##ite +nominated +silence +zone +defined +##que +gained +thirty +38 +bodies +moon +##ard +adopted +christmas +widely +register +apart +iran +premier +serves +du +unknown +parties +##les +generation +##ff +continues +quick +fields +brigade +quiet +teaching +clothes +impact +weapons +partner +flat +theater +supreme +1938 +37 +relations +##tor +plants +suffered +1936 +wilson +kids +begins +##age +1918 +seats +armed +internet +models +worth +laws +400 +communities +classes +background +knows +thanks +quarter +reaching +humans +carry +killing +format +kong +hong +setting +75 +architecture +disease +railroad +inc +possibly +wish +arthur +thoughts +harry +doors +density +##di +crowd +illinois +stomach +tone +unique +reports +anyway +##ir +liberal +der +vehicle +thick +dry +drug +faced +largely +facility +theme +holds +creation +strange +colonel +##mi +revolution +bell +politics +turns +silent +rail +relief +independence +combat +shape +write +determined +sales +learned +4th +finger +oxford +providing +1937 +heritage +fiction +situated +designated +allowing +distribution +hosted +##est +sight +interview +estimated +reduced +##ria +toronto +footballer +keeping +guys +damn +claim +motion +sport +sixth +stayed +##ze +en +rear +receive +handed +twelve +dress +audience +granted +brazil +##well +spirit +##ated +noticed +etc +olympic +representative +eric +tight +trouble +reviews +drink +vampire +missing +roles +ranked +newly +household +finals +wave +critics +##ee +phase +massachusetts +pilot +unlike +philadelphia +bright +guns +crown +organizations +roof +42 +respectively +clearly +tongue +marked +circle +fox +korea +bronze +brian +expanded +sexual +supply +yourself +inspired +labour +fc +##ah +reference +vision +draft +connection +brand +reasons +1935 +classic +driving +trip +jesus +cells +entry +1920 +neither +trail +claims +atlantic +orders +labor +nose +afraid +identified +intelligence +calls +cancer +attacked +passing +stephen +positions +imperial +grey +jason +39 +sunday +48 +swedish +avoid +extra +uncle +message +covers +allows +surprise +materials +fame +hunter +##ji +1930 +citizens +figures +davis +environmental +confirmed +shit +titles +di +performing +difference +acts +attacks +##ov +existing +votes +opportunity +nor +shop +entirely +trains +opposite +pakistan +##pa +develop +resulted +representatives +actions +reality +pressed +##ish +barely +wine +conversation +faculty +northwest +ends +documentary +nuclear +stock +grace +sets +eat +alternative +##ps +bag +resulting +creating +surprised +cemetery +1919 +drop +finding +sarah +cricket +streets +tradition +ride +1933 +exhibition +target +ear +explained +rain +composer +injury +apartment +municipal +educational +occupied +netherlands +clean +billion +constitution +learn +1914 +maximum +classical +francis +lose +opposition +jose +ontario +bear +core +hills +rolled +ending +drawn +permanent +fun +##tes +##lla +lewis +sites +chamber +ryan +##way +scoring +height +1934 +##house +lyrics +staring +55 +officials +1917 +snow +oldest +##tic +orange +##ger +qualified +interior +apparently +succeeded +thousand +dinner +lights +existence +fans +heavily +41 +greatest +conservative +send +bowl +plus +enter +catch +##un +economy +duty +1929 +speech +authorities +princess +performances +versions +shall +graduate +pictures +effective +remembered +poetry +desk +crossed +starring +starts +passenger +sharp +##ant +acres +ass +weather +falling +rank +fund +supporting +check +adult +publishing +heads +cm +southeast +lane +##burg +application +bc +##ura +les +condition +transfer +prevent +display +ex +regions +earl +federation +cool +relatively +answered +besides +1928 +obtained +portion +##town +mix +##ding +reaction +liked +dean +express +peak +1932 +##tte +counter +religion +chain +rare +miller +convention +aid +lie +vehicles +mobile +perform +squad +wonder +lying +crazy +sword +##ping +attempted +centuries +weren +philosophy +category +##ize +anna +interested +47 +sweden +wolf +frequently +abandoned +kg +literary +alliance +task +entitled +##ay +threw +promotion +factory +tiny +soccer +visited +matt +fm +achieved +52 +defence +internal +persian +43 +methods +##ging +arrested +otherwise +cambridge +programming +villages +elementary +districts +rooms +criminal +conflict +worry +trained +1931 +attempts +waited +signal +bird +truck +subsequent +programme +##ol +ad +49 +communist +details +faith +sector +patrick +carrying +laugh +##ss +controlled +korean +showing +origin +fuel +evil +1927 +##ent +brief +identity +darkness +address +pool +missed +publication +web +planet +ian +anne +wings +invited +##tt +briefly +standards +kissed +##be +ideas +climate +causing +walter +worse +albert +articles +winners +desire +aged +northeast +dangerous +gate +doubt +1922 +wooden +multi +##ky +poet +rising +funding +46 +communications +communication +violence +copies +prepared +ford +investigation +skills +1924 +pulling +electronic +##ak +##ial +##han +containing +ultimately +offices +singing +understanding +restaurant +tomorrow +fashion +christ +ward +da +pope +stands +5th +flow +studios +aired +commissioned +contained +exist +fresh +americans +##per +wrestling +approved +kid +employed +respect +suit +1925 +angel +asking +increasing +frame +angry +selling +1950s +thin +finds +##nd +temperature +statement +ali +explain +inhabitants +towns +extensive +narrow +51 +jane +flowers +images +promise +somewhere +object +fly +closely +##ls +1912 +bureau +cape +1926 +weekly +presidential +legislative +1921 +##ai +##au +launch +founding +##ny +978 +##ring +artillery +strike +un +institutions +roll +writers +landing +chose +kevin +anymore +pp +##ut +attorney +fit +dan +billboard +receiving +agricultural +breaking +sought +dave +admitted +lands +mexican +##bury +charlie +specifically +hole +iv +howard +credit +moscow +roads +accident +1923 +proved +wear +struck +hey +guards +stuff +slid +expansion +1915 +cat +anthony +##kin +melbourne +opposed +sub +southwest +architect +failure +plane +1916 +##ron +map +camera +tank +listen +regarding +wet +introduction +metropolitan +link +ep +fighter +inch +grown +gene +anger +fixed +buy +dvd +khan +domestic +worldwide +chapel +mill +functions +examples +##head +developing +1910 +turkey +hits +pocket +antonio +papers +grow +unless +circuit +18th +concerned +attached +journalist +selection +journey +converted +provincial +painted +hearing +aren +bands +negative +aside +wondered +knight +lap +survey +ma +##ow +noise +billy +##ium +shooting +guide +bedroom +priest +resistance +motor +homes +sounded +giant +##mer +150 +scenes +equal +comic +patients +hidden +solid +actual +bringing +afternoon +touched +funds +wedding +consisted +marie +canal +sr +kim +treaty +turkish +recognition +residence +cathedral +broad +knees +incident +shaped +fired +norwegian +handle +cheek +contest +represent +##pe +representing +beauty +##sen +birds +advantage +emergency +wrapped +drawing +notice +pink +broadcasting +##ong +somehow +bachelor +seventh +collected +registered +establishment +alan +assumed +chemical +personnel +roger +retirement +jeff +portuguese +wore +tied +device +threat +progress +advance +##ised +banks +hired +manchester +nfl +teachers +structures +forever +##bo +tennis +helping +saturday +sale +applications +junction +hip +incorporated +neighborhood +dressed +ceremony +##ds +influenced +hers +visual +stairs +decades +inner +kansas +hung +hoped +gain +scheduled +downtown +engaged +austria +clock +norway +certainly +pale +protected +1913 +victor +employees +plate +putting +surrounded +##ists +finishing +blues +tropical +##ries +minnesota +consider +philippines +accept +54 +retrieved +1900 +concern +anderson +properties +institution +gordon +successfully +vietnam +##dy +backing +outstanding +muslim +crossing +folk +producing +usual +demand +occurs +observed +lawyer +educated +##ana +kelly +string +pleasure +budget +items +quietly +colorado +philip +typical +##worth +derived +600 +survived +asks +mental +##ide +56 +jake +jews +distinguished +ltd +1911 +sri +extremely +53 +athletic +loud +thousands +worried +shadow +transportation +horses +weapon +arena +importance +users +tim +objects +contributed +dragon +douglas +aware +senator +johnny +jordan +sisters +engines +flag +investment +samuel +shock +capable +clark +row +wheel +refers +session +familiar +biggest +wins +hate +maintained +drove +hamilton +request +expressed +injured +underground +churches +walker +wars +tunnel +passes +stupid +agriculture +softly +cabinet +regarded +joining +indiana +##ea +##ms +push +dates +spend +behavior +woods +protein +gently +chase +morgan +mention +burning +wake +combination +occur +mirror +leads +jimmy +indeed +impossible +singapore +paintings +covering +##nes +soldier +locations +attendance +sell +historian +wisconsin +invasion +argued +painter +diego +changing +egypt +##don +experienced +inches +##ku +missouri +vol +grounds +spoken +switzerland +##gan +reform +rolling +ha +forget +massive +resigned +burned +allen +tennessee +locked +values +improved +##mo +wounded +universe +sick +dating +facing +pack +purchase +user +##pur +moments +##ul +merged +anniversary +1908 +coal +brick +understood +causes +dynasty +queensland +establish +stores +crisis +promote +hoping +views +cards +referee +extension +##si +raise +arizona +improve +colonial +formal +charged +##rt +palm +lucky +hide +rescue +faces +95 +feelings +candidates +juan +##ell +goods +6th +courses +weekend +59 +luke +cash +fallen +##om +delivered +affected +installed +carefully +tries +swiss +hollywood +costs +lincoln +responsibility +##he +shore +file +proper +normally +maryland +assistance +jump +constant +offering +friendly +waters +persons +realize +contain +trophy +800 +partnership +factor +58 +musicians +cry +bound +oregon +indicated +hero +houston +medium +##ure +consisting +somewhat +##ara +57 +cycle +##che +beer +moore +frederick +gotten +eleven +worst +weak +approached +arranged +chin +loan +universal +bond +fifteen +pattern +disappeared +##ney +translated +##zed +lip +arab +capture +interests +insurance +##chi +shifted +cave +prix +warning +sections +courts +coat +plot +smell +feed +golf +favorite +maintain +knife +vs +voted +degrees +finance +quebec +opinion +translation +manner +ruled +operate +productions +choose +musician +discovery +confused +tired +separated +stream +techniques +committed +attend +ranking +kings +throw +passengers +measure +horror +fan +mining +sand +danger +salt +calm +decade +dam +require +runner +##ik +rush +associate +greece +##ker +rivers +consecutive +matthew +##ski +sighed +sq +documents +steam +edited +closing +tie +accused +1905 +##ini +islamic +distributed +directors +organisation +bruce +7th +breathing +mad +lit +arrival +concrete +taste +08 +composition +shaking +faster +amateur +adjacent +stating +1906 +twin +flew +##ran +tokyo +publications +##tone +obviously +ridge +storage +1907 +carl +pages +concluded +desert +driven +universities +ages +terminal +sequence +borough +250 +constituency +creative +cousin +economics +dreams +margaret +notably +reduce +montreal +mode +17th +ears +saved +jan +vocal +##ica +1909 +andy +##jo +riding +roughly +threatened +##ise +meters +meanwhile +landed +compete +repeated +grass +czech +regularly +charges +tea +sudden +appeal +##ung +solution +describes +pierre +classification +glad +parking +##ning +belt +physics +99 +rachel +add +hungarian +participate +expedition +damaged +gift +childhood +85 +fifty +##red +mathematics +jumped +letting +defensive +mph +##ux +##gh +testing +##hip +hundreds +shoot +owners +matters +smoke +israeli +kentucky +dancing +mounted +grandfather +emma +designs +profit +argentina +##gs +truly +li +lawrence +cole +begun +detroit +willing +branches +smiling +decide +miami +enjoyed +recordings +##dale +poverty +ethnic +gay +##bi +gary +arabic +09 +accompanied +##one +##ons +fishing +determine +residential +acid +##ary +alice +returns +starred +mail +##ang +jonathan +strategy +##ue +net +forty +cook +businesses +equivalent +commonwealth +distinct +ill +##cy +seriously +##ors +##ped +shift +harris +replace +rio +imagine +formula +ensure +##ber +additionally +scheme +conservation +occasionally +purposes +feels +favor +##and +##ore +1930s +contrast +hanging +hunt +movies +1904 +instruments +victims +danish +christopher +busy +demon +sugar +earliest +colony +studying +balance +duties +##ks +belgium +slipped +carter +05 +visible +stages +iraq +fifa +##im +commune +forming +zero +07 +continuing +talked +counties +legend +bathroom +option +tail +clay +daughters +afterwards +severe +jaw +visitors +##ded +devices +aviation +russell +kate +##vi +entering +subjects +##ino +temporary +swimming +forth +smooth +ghost +audio +bush +operates +rocks +movements +signs +eddie +##tz +ann +voices +honorary +06 +memories +dallas +pure +measures +racial +promised +66 +harvard +ceo +16th +parliamentary +indicate +benefit +flesh +dublin +louisiana +1902 +1901 +patient +sleeping +1903 +membership +coastal +medieval +wanting +element +scholars +rice +62 +limit +survive +makeup +rating +definitely +collaboration +obvious +##tan +boss +ms +baron +birthday +linked +soil +diocese +##lan +ncaa +##mann +offensive +shell +shouldn +waist +##tus +plain +ross +organ +resolution +manufacturing +adding +relative +kennedy +98 +whilst +moth +marketing +gardens +crash +72 +heading +partners +credited +carlos +moves +cable +##zi +marshall +##out +depending +bottle +represents +rejected +responded +existed +04 +jobs +denmark +lock +##ating +treated +graham +routes +talent +commissioner +drugs +secure +tests +reign +restored +photography +##gi +contributions +oklahoma +designer +disc +grin +seattle +robin +paused +atlanta +unusual +##gate +praised +las +laughing +satellite +hungary +visiting +##sky +interesting +factors +deck +poems +norman +##water +stuck +speaker +rifle +domain +premiered +##her +dc +comics +actors +01 +reputation +eliminated +8th +ceiling +prisoners +script +##nce +leather +austin +mississippi +rapidly +admiral +parallel +charlotte +guilty +tools +gender +divisions +fruit +##bs +laboratory +nelson +fantasy +marry +rapid +aunt +tribe +requirements +aspects +suicide +amongst +adams +bone +ukraine +abc +kick +sees +edinburgh +clothing +column +rough +gods +hunting +broadway +gathered +concerns +##ek +spending +ty +12th +snapped +requires +solar +bones +cavalry +##tta +iowa +drinking +waste +index +franklin +charity +thompson +stewart +tip +flash +landscape +friday +enjoy +singh +poem +listening +##back +eighth +fred +differences +adapted +bomb +ukrainian +surgery +corporate +masters +anywhere +##more +waves +odd +sean +portugal +orleans +dick +debate +kent +eating +puerto +cleared +96 +expect +cinema +97 +guitarist +blocks +electrical +agree +involving +depth +dying +panel +struggle +##ged +peninsula +adults +novels +emerged +vienna +metro +debuted +shoes +tamil +songwriter +meets +prove +beating +instance +heaven +scared +sending +marks +artistic +passage +superior +03 +significantly +shopping +##tive +retained +##izing +malaysia +technique +cheeks +##ola +warren +maintenance +destroy +extreme +allied +120 +appearing +##yn +fill +advice +alabama +qualifying +policies +cleveland +hat +battery +smart +authors +10th +soundtrack +acted +dated +lb +glance +equipped +coalition +funny +outer +ambassador +roy +possibility +couples +campbell +dna +loose +ethan +supplies +1898 +gonna +88 +monster +##res +shake +agents +frequency +springs +dogs +practices +61 +gang +plastic +easier +suggests +gulf +blade +exposed +colors +industries +markets +pan +nervous +electoral +charts +legislation +ownership +##idae +mac +appointment +shield +copy +assault +socialist +abbey +monument +license +throne +employment +jay +93 +replacement +charter +cloud +powered +suffering +accounts +oak +connecticut +strongly +wright +colour +crystal +13th +context +welsh +networks +voiced +gabriel +jerry +##cing +forehead +mp +##ens +manage +schedule +totally +remix +##ii +forests +occupation +print +nicholas +brazilian +strategic +vampires +engineers +76 +roots +seek +correct +instrumental +und +alfred +backed +hop +##des +stanley +robinson +traveled +wayne +welcome +austrian +achieve +67 +exit +rates +1899 +strip +whereas +##cs +sing +deeply +adventure +bobby +rick +jamie +careful +components +cap +useful +personality +knee +##shi +pushing +hosts +02 +protest +ca +ottoman +symphony +##sis +63 +boundary +1890 +processes +considering +considerable +tons +##work +##ft +##nia +cooper +trading +dear +conduct +91 +illegal +apple +revolutionary +holiday +definition +harder +##van +jacob +circumstances +destruction +##lle +popularity +grip +classified +liverpool +donald +baltimore +flows +seeking +honour +approval +92 +mechanical +till +happening +statue +critic +increasingly +immediate +describe +commerce +stare +##ster +indonesia +meat +rounds +boats +baker +orthodox +depression +formally +worn +naked +claire +muttered +sentence +11th +emily +document +77 +criticism +wished +vessel +spiritual +bent +virgin +parker +minimum +murray +lunch +danny +printed +compilation +keyboards +false +blow +belonged +68 +raising +78 +cutting +##board +pittsburgh +##up +9th +shadows +81 +hated +indigenous +jon +15th +barry +scholar +ah +##zer +oliver +##gy +stick +susan +meetings +attracted +spell +romantic +##ver +ye +1895 +photo +demanded +customers +##ac +1896 +logan +revival +keys +modified +commanded +jeans +##ious +upset +raw +phil +detective +hiding +resident +vincent +##bly +experiences +diamond +defeating +coverage +lucas +external +parks +franchise +helen +bible +successor +percussion +celebrated +il +lift +profile +clan +romania +##ied +mills +##su +nobody +achievement +shrugged +fault +1897 +rhythm +initiative +breakfast +carbon +700 +69 +lasted +violent +74 +wound +ken +killer +gradually +filmed +°c +dollars +processing +94 +remove +criticized +guests +sang +chemistry +##vin +legislature +disney +##bridge +uniform +escaped +integrated +proposal +purple +denied +liquid +karl +influential +morris +nights +stones +intense +experimental +twisted +71 +84 +##ld +pace +nazi +mitchell +ny +blind +reporter +newspapers +14th +centers +burn +basin +forgotten +surviving +filed +collections +monastery +losses +manual +couch +description +appropriate +merely +tag +missions +sebastian +restoration +replacing +triple +73 +elder +julia +warriors +benjamin +julian +convinced +stronger +amazing +declined +versus +merchant +happens +output +finland +bare +barbara +absence +ignored +dawn +injuries +##port +producers +##ram +82 +luis +##ities +kw +admit +expensive +electricity +nba +exception +symbol +##ving +ladies +shower +sheriff +characteristics +##je +aimed +button +ratio +effectively +summit +angle +jury +bears +foster +vessels +pants +executed +evans +dozen +advertising +kicked +patrol +1889 +competitions +lifetime +principles +athletics +##logy +birmingham +sponsored +89 +rob +nomination +1893 +acoustic +##sm +creature +longest +##tra +credits +harbor +dust +josh +##so +territories +milk +infrastructure +completion +thailand +indians +leon +archbishop +##sy +assist +pitch +blake +arrangement +girlfriend +serbian +operational +hence +sad +scent +fur +dj +sessions +hp +refer +rarely +##ora +exists +1892 +##ten +scientists +dirty +penalty +burst +portrait +seed +79 +pole +limits +rival +1894 +stable +alpha +grave +constitutional +alcohol +arrest +flower +mystery +devil +architectural +relationships +greatly +habitat +##istic +larry +progressive +remote +cotton +##ics +##ok +preserved +reaches +##ming +cited +86 +vast +scholarship +decisions +cbs +joy +teach +1885 +editions +knocked +eve +searching +partly +participation +gap +animated +fate +excellent +##ett +na +87 +alternate +saints +youngest +##ily +climbed +##ita +##tors +suggest +##ct +discussion +staying +choir +lakes +jacket +revenue +nevertheless +peaked +instrument +wondering +annually +managing +neil +1891 +signing +terry +##ice +apply +clinical +brooklyn +aim +catherine +fuck +farmers +figured +ninth +pride +hugh +evolution +ordinary +involvement +comfortable +shouted +tech +encouraged +taiwan +representation +sharing +##lia +##em +panic +exact +cargo +competing +fat +cried +83 +1920s +occasions +pa +cabin +borders +utah +marcus +##isation +badly +muscles +##ance +victorian +transition +warner +bet +permission +##rin +slave +terrible +similarly +shares +seth +uefa +possession +medals +benefits +colleges +lowered +perfectly +mall +transit +##ye +##kar +publisher +##ened +harrison +deaths +elevation +##ae +asleep +machines +sigh +ash +hardly +argument +occasion +parent +leo +decline +1888 +contribution +##ua +concentration +1000 +opportunities +hispanic +guardian +extent +emotions +hips +mason +volumes +bloody +controversy +diameter +steady +mistake +phoenix +identify +violin +##sk +departure +richmond +spin +funeral +enemies +1864 +gear +literally +connor +random +sergeant +grab +confusion +1865 +transmission +informed +op +leaning +sacred +suspended +thinks +gates +portland +luck +agencies +yours +hull +expert +muscle +layer +practical +sculpture +jerusalem +latest +lloyd +statistics +deeper +recommended +warrior +arkansas +mess +supports +greg +eagle +1880 +recovered +rated +concerts +rushed +##ano +stops +eggs +files +premiere +keith +##vo +delhi +turner +pit +affair +belief +paint +##zing +mate +##ach +##ev +victim +##ology +withdrew +bonus +styles +fled +##ud +glasgow +technologies +funded +nbc +adaptation +##ata +portrayed +cooperation +supporters +judges +bernard +justin +hallway +ralph +##ick +graduating +controversial +distant +continental +spider +bite +##ho +recognize +intention +mixing +##ese +egyptian +bow +tourism +suppose +claiming +tiger +dominated +participants +vi +##ru +nurse +partially +tape +##rum +psychology +##rn +essential +touring +duo +voting +civilian +emotional +channels +##king +apparent +hebrew +1887 +tommy +carrier +intersection +beast +hudson +##gar +##zo +lab +nova +bench +discuss +costa +##ered +detailed +behalf +drivers +unfortunately +obtain +##lis +rocky +##dae +siege +friendship +honey +##rian +1861 +amy +hang +posted +governments +collins +respond +wildlife +preferred +operator +##po +laura +pregnant +videos +dennis +suspected +boots +instantly +weird +automatic +businessman +alleged +placing +throwing +ph +mood +1862 +perry +venue +jet +remainder +##lli +##ci +passion +biological +boyfriend +1863 +dirt +buffalo +ron +segment +fa +abuse +##era +genre +thrown +stroke +colored +stress +exercise +displayed +##gen +struggled +##tti +abroad +dramatic +wonderful +thereafter +madrid +component +widespread +##sed +tale +citizen +todd +monday +1886 +vancouver +overseas +forcing +crying +descent +##ris +discussed +substantial +ranks +regime +1870 +provinces +switch +drum +zane +ted +tribes +proof +lp +cream +researchers +volunteer +manor +silk +milan +donated +allies +venture +principle +delivery +enterprise +##ves +##ans +bars +traditionally +witch +reminded +copper +##uk +pete +inter +links +colin +grinned +elsewhere +competitive +frequent +##oy +scream +##hu +tension +texts +submarine +finnish +defending +defend +pat +detail +1884 +affiliated +stuart +themes +villa +periods +tool +belgian +ruling +crimes +answers +folded +licensed +resort +demolished +hans +lucy +1881 +lion +traded +photographs +writes +craig +##fa +trials +generated +beth +noble +debt +percentage +yorkshire +erected +ss +viewed +grades +confidence +ceased +islam +telephone +retail +##ible +chile +m² +roberts +sixteen +##ich +commented +hampshire +innocent +dual +pounds +checked +regulations +afghanistan +sung +rico +liberty +assets +bigger +options +angels +relegated +tribute +wells +attending +leaf +##yan +butler +romanian +forum +monthly +lisa +patterns +gmina +##tory +madison +hurricane +rev +##ians +bristol +##ula +elite +valuable +disaster +democracy +awareness +germans +freyja +##ins +loop +absolutely +paying +populations +maine +sole +prayer +spencer +releases +doorway +bull +##ani +lover +midnight +conclusion +##sson +thirteen +lily +mediterranean +##lt +nhl +proud +sample +##hill +drummer +guinea +##ova +murphy +climb +##ston +instant +attributed +horn +ain +railways +steven +##ao +autumn +ferry +opponent +root +traveling +secured +corridor +stretched +tales +sheet +trinity +cattle +helps +indicates +manhattan +murdered +fitted +1882 +gentle +grandmother +mines +shocked +vegas +produces +##light +caribbean +##ou +belong +continuous +desperate +drunk +historically +trio +waved +raf +dealing +nathan +bat +murmured +interrupted +residing +scientist +pioneer +harold +aaron +##net +delta +attempting +minority +mini +believes +chorus +tend +lots +eyed +indoor +load +shots +updated +jail +##llo +concerning +connecting +wealth +##ved +slaves +arrive +rangers +sufficient +rebuilt +##wick +cardinal +flood +muhammad +whenever +relation +runners +moral +repair +viewers +arriving +revenge +punk +assisted +bath +fairly +breathe +lists +innings +illustrated +whisper +nearest +voters +clinton +ties +ultimate +screamed +beijing +lions +andre +fictional +gathering +comfort +radar +suitable +dismissed +hms +ban +pine +wrist +atmosphere +voivodeship +bid +timber +##ned +##nan +giants +##ane +cameron +recovery +uss +identical +categories +switched +serbia +laughter +noah +ensemble +therapy +peoples +touching +##off +locally +pearl +platforms +everywhere +ballet +tables +lanka +herbert +outdoor +toured +derek +1883 +spaces +contested +swept +1878 +exclusive +slight +connections +##dra +winds +prisoner +collective +bangladesh +tube +publicly +wealthy +thai +##ys +isolated +select +##ric +insisted +pen +fortune +ticket +spotted +reportedly +animation +enforcement +tanks +110 +decides +wider +lowest +owen +##time +nod +hitting +##hn +gregory +furthermore +magazines +fighters +solutions +##ery +pointing +requested +peru +reed +chancellor +knights +mask +worker +eldest +flames +reduction +1860 +volunteers +##tis +reporting +##hl +wire +advisory +endemic +origins +settlers +pursue +knock +consumer +1876 +eu +compound +creatures +mansion +sentenced +ivan +deployed +guitars +frowned +involves +mechanism +kilometers +perspective +shops +maps +terminus +duncan +alien +fist +bridges +##pers +heroes +fed +derby +swallowed +##ros +patent +sara +illness +characterized +adventures +slide +hawaii +jurisdiction +##op +organised +##side +adelaide +walks +biology +se +##ties +rogers +swing +tightly +boundaries +##rie +prepare +implementation +stolen +##sha +certified +colombia +edwards +garage +##mm +recalled +##ball +rage +harm +nigeria +breast +##ren +furniture +pupils +settle +##lus +cuba +balls +client +alaska +21st +linear +thrust +celebration +latino +genetic +terror +##cia +##ening +lightning +fee +witness +lodge +establishing +skull +##ique +earning +hood +##ei +rebellion +wang +sporting +warned +missile +devoted +activist +porch +worship +fourteen +package +1871 +decorated +##shire +housed +##ock +chess +sailed +doctors +oscar +joan +treat +garcia +harbour +jeremy +##ire +traditions +dominant +jacques +##gon +##wan +relocated +1879 +amendment +sized +companion +simultaneously +volleyball +spun +acre +increases +stopping +loves +belongs +affect +drafted +tossed +scout +battles +1875 +filming +shoved +munich +tenure +vertical +romance +pc +##cher +argue +##ical +craft +ranging +www +opens +honest +tyler +yesterday +virtual +##let +muslims +reveal +snake +immigrants +radical +screaming +speakers +firing +saving +belonging +ease +lighting +prefecture +blame +farmer +hungry +grows +rubbed +beam +sur +subsidiary +##cha +armenian +sao +dropping +conventional +##fer +microsoft +reply +qualify +spots +1867 +sweat +festivals +##ken +immigration +physician +discover +exposure +sandy +explanation +isaac +implemented +##fish +hart +initiated +connect +stakes +presents +heights +householder +pleased +tourist +regardless +slip +closest +##ction +surely +sultan +brings +riley +preparation +aboard +slammed +baptist +experiment +ongoing +interstate +organic +playoffs +##ika +1877 +130 +##tar +hindu +error +tours +tier +plenty +arrangements +talks +trapped +excited +sank +ho +athens +1872 +denver +welfare +suburb +athletes +trick +diverse +belly +exclusively +yelled +1868 +##med +conversion +##ette +1874 +internationally +computers +conductor +abilities +sensitive +hello +dispute +measured +globe +rocket +prices +amsterdam +flights +tigers +inn +municipalities +emotion +references +3d +##mus +explains +airlines +manufactured +pm +archaeological +1873 +interpretation +devon +comment +##ites +settlements +kissing +absolute +improvement +suite +impressed +barcelona +sullivan +jefferson +towers +jesse +julie +##tin +##lu +grandson +hi +gauge +regard +rings +interviews +trace +raymond +thumb +departments +burns +serial +bulgarian +scores +demonstrated +##ix +1866 +kyle +alberta +underneath +romanized +##ward +relieved +acquisition +phrase +cliff +reveals +han +cuts +merger +custom +##dar +nee +gilbert +graduation +##nts +assessment +cafe +difficulty +demands +swung +democrat +jennifer +commons +1940s +grove +##yo +completing +focuses +sum +substitute +bearing +stretch +reception +##py +reflected +essentially +destination +pairs +##ched +survival +resource +##bach +promoting +doubles +messages +tear +##down +##fully +parade +florence +harvey +incumbent +partial +framework +900 +pedro +frozen +procedure +olivia +controls +##mic +shelter +personally +temperatures +##od +brisbane +tested +sits +marble +comprehensive +oxygen +leonard +##kov +inaugural +iranian +referring +quarters +attitude +##ivity +mainstream +lined +mars +dakota +norfolk +unsuccessful +##° +explosion +helicopter +congressional +##sing +inspector +bitch +seal +departed +divine +##ters +coaching +examination +punishment +manufacturer +sink +columns +unincorporated +signals +nevada +squeezed +dylan +dining +photos +martial +manuel +eighteen +elevator +brushed +plates +ministers +ivy +congregation +##len +slept +specialized +taxes +curve +restricted +negotiations +likes +statistical +arnold +inspiration +execution +bold +intermediate +significance +margin +ruler +wheels +gothic +intellectual +dependent +listened +eligible +buses +widow +syria +earn +cincinnati +collapsed +recipient +secrets +accessible +philippine +maritime +goddess +clerk +surrender +breaks +playoff +database +##ified +##lon +ideal +beetle +aspect +soap +regulation +strings +expand +anglo +shorter +crosses +retreat +tough +coins +wallace +directions +pressing +##oon +shipping +locomotives +comparison +topics +nephew +##mes +distinction +honors +travelled +sierra +ibn +##over +fortress +sa +recognised +carved +1869 +clients +##dan +intent +##mar +coaches +describing +bread +##ington +beaten +northwestern +##ona +merit +youtube +collapse +challenges +em +historians +objective +submitted +virus +attacking +drake +assume +##ere +diseases +marc +stem +leeds +##cus +##ab +farming +glasses +##lock +visits +nowhere +fellowship +relevant +carries +restaurants +experiments +101 +constantly +bases +targets +shah +tenth +opponents +verse +territorial +##ira +writings +corruption +##hs +instruction +inherited +reverse +emphasis +##vic +employee +arch +keeps +rabbi +watson +payment +uh +##ala +nancy +##tre +venice +fastest +sexy +banned +adrian +properly +ruth +touchdown +dollar +boards +metre +circles +edges +favour +comments +ok +travels +liberation +scattered +firmly +##ular +holland +permitted +diesel +kenya +den +originated +##ral +demons +resumed +dragged +rider +##rus +servant +blinked +extend +torn +##ias +##sey +input +meal +everybody +cylinder +kinds +camps +##fe +bullet +logic +##wn +croatian +evolved +healthy +fool +chocolate +wise +preserve +pradesh +##ess +respective +1850 +##ew +chicken +artificial +gross +corresponding +convicted +cage +caroline +dialogue +##dor +narrative +stranger +mario +br +christianity +failing +trent +commanding +buddhist +1848 +maurice +focusing +yale +bike +altitude +##ering +mouse +revised +##sley +veteran +##ig +pulls +theology +crashed +campaigns +legion +##ability +drag +excellence +customer +cancelled +intensity +excuse +##lar +liga +participating +contributing +printing +##burn +variable +##rk +curious +bin +legacy +renaissance +##my +symptoms +binding +vocalist +dancer +##nie +grammar +gospel +democrats +ya +enters +sc +diplomatic +hitler +##ser +clouds +mathematical +quit +defended +oriented +##heim +fundamental +hardware +impressive +equally +convince +confederate +guilt +chuck +sliding +##ware +magnetic +narrowed +petersburg +bulgaria +otto +phd +skill +##ama +reader +hopes +pitcher +reservoir +hearts +automatically +expecting +mysterious +bennett +extensively +imagined +seeds +monitor +fix +##ative +journalism +struggling +signature +ranch +encounter +photographer +observation +protests +##pin +influences +##hr +calendar +##all +cruz +croatia +locomotive +hughes +naturally +shakespeare +basement +hook +uncredited +faded +theories +approaches +dare +phillips +filling +fury +obama +##ain +efficient +arc +deliver +min +raid +breeding +inducted +leagues +efficiency +axis +montana +eagles +##ked +supplied +instructions +karen +picking +indicating +trap +anchor +practically +christians +tomb +vary +occasional +electronics +lords +readers +newcastle +faint +innovation +collect +situations +engagement +160 +claude +mixture +##feld +peer +tissue +logo +lean +##ration +°f +floors +##ven +architects +reducing +##our +##ments +rope +1859 +ottawa +##har +samples +banking +declaration +proteins +resignation +francois +saudi +advocate +exhibited +armor +twins +divorce +##ras +abraham +reviewed +jo +temporarily +matrix +physically +pulse +curled +##ena +difficulties +bengal +usage +##ban +annie +riders +certificate +##pi +holes +warsaw +distinctive +jessica +##mon +mutual +1857 +customs +circular +eugene +removal +loaded +mere +vulnerable +depicted +generations +dame +heir +enormous +lightly +climbing +pitched +lessons +pilots +nepal +ram +google +preparing +brad +louise +renowned +##₂ +liam +##ably +plaza +shaw +sophie +brilliant +bills +##bar +##nik +fucking +mainland +server +pleasant +seized +veterans +jerked +fail +beta +brush +radiation +stored +warmth +southeastern +nate +sin +raced +berkeley +joke +athlete +designation +trunk +##low +roland +qualification +archives +heels +artwork +receives +judicial +reserves +##bed +woke +installation +abu +floating +fake +lesser +excitement +interface +concentrated +addressed +characteristic +amanda +saxophone +monk +auto +##bus +releasing +egg +dies +interaction +defender +ce +outbreak +glory +loving +##bert +sequel +consciousness +http +awake +ski +enrolled +##ress +handling +rookie +brow +somebody +biography +warfare +amounts +contracts +presentation +fabric +dissolved +challenged +meter +psychological +lt +elevated +rally +accurate +##tha +hospitals +undergraduate +specialist +venezuela +exhibit +shed +nursing +protestant +fluid +structural +footage +jared +consistent +prey +##ska +succession +reflect +exile +lebanon +wiped +suspect +shanghai +resting +integration +preservation +marvel +variant +pirates +sheep +rounded +capita +sailing +colonies +manuscript +deemed +variations +clarke +functional +emerging +boxing +relaxed +curse +azerbaijan +heavyweight +nickname +editorial +rang +grid +tightened +earthquake +flashed +miguel +rushing +##ches +improvements +boxes +brooks +180 +consumption +molecular +felix +societies +repeatedly +variation +aids +civic +graphics +professionals +realm +autonomous +receiver +delayed +workshop +militia +chairs +trump +canyon +##point +harsh +extending +lovely +happiness +##jan +stake +eyebrows +embassy +wellington +hannah +##ella +sony +corners +bishops +swear +cloth +contents +xi +namely +commenced +1854 +stanford +nashville +courage +graphic +commitment +garrison +##bin +hamlet +clearing +rebels +attraction +literacy +cooking +ruins +temples +jenny +humanity +celebrate +hasn +freight +sixty +rebel +bastard +##art +newton +##ada +deer +##ges +##ching +smiles +delaware +singers +##ets +approaching +assists +flame +##ph +boulevard +barrel +planted +##ome +pursuit +##sia +consequences +posts +shallow +invitation +rode +depot +ernest +kane +rod +concepts +preston +topic +chambers +striking +blast +arrives +descendants +montgomery +ranges +worlds +##lay +##ari +span +chaos +praise +##ag +fewer +1855 +sanctuary +mud +fbi +##ions +programmes +maintaining +unity +harper +bore +handsome +closure +tournaments +thunder +nebraska +linda +facade +puts +satisfied +argentine +dale +cork +dome +panama +##yl +1858 +tasks +experts +##ates +feeding +equation +##las +##ida +##tu +engage +bryan +##ax +um +quartet +melody +disbanded +sheffield +blocked +gasped +delay +kisses +maggie +connects +##non +sts +poured +creator +publishers +##we +guided +ellis +extinct +hug +gaining +##ord +complicated +##bility +poll +clenched +investigate +##use +thereby +quantum +spine +cdp +humor +kills +administered +semifinals +##du +encountered +ignore +##bu +commentary +##maker +bother +roosevelt +140 +plains +halfway +flowing +cultures +crack +imprisoned +neighboring +airline +##ses +##view +##mate +##ec +gather +wolves +marathon +transformed +##ill +cruise +organisations +carol +punch +exhibitions +numbered +alarm +ratings +daddy +silently +##stein +queens +colours +impression +guidance +liu +tactical +##rat +marshal +della +arrow +##ings +rested +feared +tender +owns +bitter +advisor +escort +##ides +spare +farms +grants +##ene +dragons +encourage +colleagues +cameras +##und +sucked +pile +spirits +prague +statements +suspension +landmark +fence +torture +recreation +bags +permanently +survivors +pond +spy +predecessor +bombing +coup +##og +protecting +transformation +glow +##lands +##book +dug +priests +andrea +feat +barn +jumping +##chen +##ologist +##con +casualties +stern +auckland +pipe +serie +revealing +ba +##bel +trevor +mercy +spectrum +yang +consist +governing +collaborated +possessed +epic +comprises +blew +shane +##ack +lopez +honored +magical +sacrifice +judgment +perceived +hammer +mtv +baronet +tune +das +missionary +sheets +350 +neutral +oral +threatening +attractive +shade +aims +seminary +##master +estates +1856 +michel +wounds +refugees +manufacturers +##nic +mercury +syndrome +porter +##iya +##din +hamburg +identification +upstairs +purse +widened +pause +cared +breathed +affiliate +santiago +prevented +celtic +fisher +125 +recruited +byzantine +reconstruction +farther +##mp +diet +sake +au +spite +sensation +##ert +blank +separation +105 +##hon +vladimir +armies +anime +##lie +accommodate +orbit +cult +sofia +archive +##ify +##box +founders +sustained +disorder +honours +northeastern +mia +crops +violet +threats +blanket +fires +canton +followers +southwestern +prototype +voyage +assignment +altered +moderate +protocol +pistol +##eo +questioned +brass +lifting +1852 +math +authored +##ual +doug +dimensional +dynamic +##san +1851 +pronounced +grateful +quest +uncomfortable +boom +presidency +stevens +relating +politicians +chen +barrier +quinn +diana +mosque +tribal +cheese +palmer +portions +sometime +chester +treasure +wu +bend +download +millions +reforms +registration +##osa +consequently +monitoring +ate +preliminary +brandon +invented +ps +eaten +exterior +intervention +ports +documented +log +displays +lecture +sally +favourite +##itz +vermont +lo +invisible +isle +breed +##ator +journalists +relay +speaks +backward +explore +midfielder +actively +stefan +procedures +cannon +blond +kenneth +centered +servants +chains +libraries +malcolm +essex +henri +slavery +##hal +facts +fairy +coached +cassie +cats +washed +cop +##fi +announcement +item +2000s +vinyl +activated +marco +frontier +growled +curriculum +##das +loyal +accomplished +leslie +ritual +kenny +##00 +vii +napoleon +hollow +hybrid +jungle +stationed +friedrich +counted +##ulated +platinum +theatrical +seated +col +rubber +glen +1840 +diversity +healing +extends +id +provisions +administrator +columbus +##oe +tributary +te +assured +org +##uous +prestigious +examined +lectures +grammy +ronald +associations +bailey +allan +essays +flute +believing +consultant +proceedings +travelling +1853 +kit +kerala +yugoslavia +buddy +methodist +##ith +burial +centres +batman +##nda +discontinued +bo +dock +stockholm +lungs +severely +##nk +citing +manga +##ugh +steal +mumbai +iraqi +robot +celebrity +bride +broadcasts +abolished +pot +joel +overhead +franz +packed +reconnaissance +johann +acknowledged +introduce +handled +doctorate +developments +drinks +alley +palestine +##nis +##aki +proceeded +recover +bradley +grain +patch +afford +infection +nationalist +legendary +##ath +interchange +virtually +gen +gravity +exploration +amber +vital +wishes +powell +doctrine +elbow +screenplay +##bird +contribute +indonesian +pet +creates +##com +enzyme +kylie +discipline +drops +manila +hunger +##ien +layers +suffer +fever +bits +monica +keyboard +manages +##hood +searched +appeals +##bad +testament +grande +reid +##war +beliefs +congo +##ification +##dia +si +requiring +##via +casey +1849 +regret +streak +rape +depends +syrian +sprint +pound +tourists +upcoming +pub +##xi +tense +##els +practiced +echo +nationwide +guild +motorcycle +liz +##zar +chiefs +desired +elena +bye +precious +absorbed +relatives +booth +pianist +##mal +citizenship +exhausted +wilhelm +##ceae +##hed +noting +quarterback +urge +hectares +##gue +ace +holly +##tal +blonde +davies +parked +sustainable +stepping +twentieth +airfield +galaxy +nest +chip +##nell +tan +shaft +paulo +requirement +##zy +paradise +tobacco +trans +renewed +vietnamese +##cker +##ju +suggesting +catching +holmes +enjoying +md +trips +colt +holder +butterfly +nerve +reformed +cherry +bowling +trailer +carriage +goodbye +appreciate +toy +joshua +interactive +enabled +involve +##kan +collar +determination +bunch +facebook +recall +shorts +superintendent +episcopal +frustration +giovanni +nineteenth +laser +privately +array +circulation +##ovic +armstrong +deals +painful +permit +discrimination +##wi +aires +retiring +cottage +ni +##sta +horizon +ellen +jamaica +ripped +fernando +chapters +playstation +patron +lecturer +navigation +behaviour +genes +georgian +export +solomon +rivals +swift +seventeen +rodriguez +princeton +independently +sox +1847 +arguing +entity +casting +hank +criteria +oakland +geographic +milwaukee +reflection +expanding +conquest +dubbed +##tv +halt +brave +brunswick +doi +arched +curtis +divorced +predominantly +somerset +streams +ugly +zoo +horrible +curved +buenos +fierce +dictionary +vector +theological +unions +handful +stability +chan +punjab +segments +##lly +altar +ignoring +gesture +monsters +pastor +##stone +thighs +unexpected +operators +abruptly +coin +compiled +associates +improving +migration +pin +##ose +compact +collegiate +reserved +##urs +quarterfinals +roster +restore +assembled +hurry +oval +##cies +1846 +flags +martha +##del +victories +sharply +##rated +argues +deadly +neo +drawings +symbols +performer +##iel +griffin +restrictions +editing +andrews +java +journals +arabia +compositions +dee +pierce +removing +hindi +casino +runway +civilians +minds +nasa +hotels +##zation +refuge +rent +retain +potentially +conferences +suburban +conducting +##tto +##tions +##tle +descended +massacre +##cal +ammunition +terrain +fork +souls +counts +chelsea +durham +drives +cab +##bank +perth +realizing +palestinian +finn +simpson +##dal +betty +##ule +moreover +particles +cardinals +tent +evaluation +extraordinary +##oid +inscription +##works +wednesday +chloe +maintains +panels +ashley +trucks +##nation +cluster +sunlight +strikes +zhang +##wing +dialect +canon +##ap +tucked +##ws +collecting +##mas +##can +##sville +maker +quoted +evan +franco +aria +buying +cleaning +eva +closet +provision +apollo +clinic +rat +##ez +necessarily +ac +##gle +##ising +venues +flipped +cent +spreading +trustees +checking +authorized +##sco +disappointed +##ado +notion +duration +trumpet +hesitated +topped +brussels +rolls +theoretical +hint +define +aggressive +repeat +wash +peaceful +optical +width +allegedly +mcdonald +strict +copyright +##illa +investors +mar +jam +witnesses +sounding +miranda +michelle +privacy +hugo +harmony +##pp +valid +lynn +glared +nina +102 +headquartered +diving +boarding +gibson +##ncy +albanian +marsh +routine +dealt +enhanced +er +intelligent +substance +targeted +enlisted +discovers +spinning +observations +pissed +smoking +rebecca +capitol +visa +varied +costume +seemingly +indies +compensation +surgeon +thursday +arsenal +westminster +suburbs +rid +anglican +##ridge +knots +foods +alumni +lighter +fraser +whoever +portal +scandal +##ray +gavin +advised +instructor +flooding +terrorist +##ale +teenage +interim +senses +duck +teen +thesis +abby +eager +overcome +##ile +newport +glenn +rises +shame +##cc +prompted +priority +forgot +bomber +nicolas +protective +360 +cartoon +katherine +breeze +lonely +trusted +henderson +richardson +relax +banner +candy +palms +remarkable +##rio +legends +cricketer +essay +ordained +edmund +rifles +trigger +##uri +##away +sail +alert +1830 +audiences +penn +sussex +siblings +pursued +indianapolis +resist +rosa +consequence +succeed +avoided +1845 +##ulation +inland +##tie +##nna +counsel +profession +chronicle +hurried +##una +eyebrow +eventual +bleeding +innovative +cure +##dom +committees +accounting +con +scope +hardy +heather +tenor +gut +herald +codes +tore +scales +wagon +##oo +luxury +tin +prefer +fountain +triangle +bonds +darling +convoy +dried +traced +beings +troy +accidentally +slam +findings +smelled +joey +lawyers +outcome +steep +bosnia +configuration +shifting +toll +brook +performers +lobby +philosophical +construct +shrine +aggregate +boot +cox +phenomenon +savage +insane +solely +reynolds +lifestyle +##ima +nationally +holdings +consideration +enable +edgar +mo +mama +##tein +fights +relegation +chances +atomic +hub +conjunction +awkward +reactions +currency +finale +kumar +underwent +steering +elaborate +gifts +comprising +melissa +veins +reasonable +sunshine +chi +solve +trails +inhabited +elimination +ethics +huh +ana +molly +consent +apartments +layout +marines +##ces +hunters +bulk +##oma +hometown +##wall +##mont +cracked +reads +neighbouring +withdrawn +admission +wingspan +damned +anthology +lancashire +brands +batting +forgive +cuban +awful +##lyn +104 +dimensions +imagination +##ade +dante +##ship +tracking +desperately +goalkeeper +##yne +groaned +workshops +confident +burton +gerald +milton +circus +uncertain +slope +copenhagen +sophia +fog +philosopher +portraits +accent +cycling +varying +gripped +larvae +garrett +specified +scotia +mature +luther +kurt +rap +##kes +aerial +750 +ferdinand +heated +es +transported +##shan +safely +nonetheless +##orn +##gal +motors +demanding +##sburg +startled +##brook +ally +generate +caps +ghana +stained +demo +mentions +beds +ap +afterward +diary +##bling +utility +##iro +richards +1837 +conspiracy +conscious +shining +footsteps +observer +cyprus +urged +loyalty +developer +probability +olive +upgraded +gym +miracle +insects +graves +1844 +ourselves +hydrogen +amazon +katie +tickets +poets +##pm +planes +##pan +prevention +witnessed +dense +jin +randy +tang +warehouse +monroe +bang +archived +elderly +investigations +alec +granite +mineral +conflicts +controlling +aboriginal +carlo +##zu +mechanics +stan +stark +rhode +skirt +est +##berry +bombs +respected +##horn +imposed +limestone +deny +nominee +memphis +grabbing +disabled +##als +amusement +aa +frankfurt +corn +referendum +varies +slowed +disk +firms +unconscious +incredible +clue +sue +##zhou +twist +##cio +joins +idaho +chad +developers +computing +destroyer +103 +mortal +tucker +kingston +choices +yu +carson +1800 +os +whitney +geneva +pretend +dimension +staged +plateau +maya +##une +freestyle +##bc +rovers +hiv +##ids +tristan +classroom +prospect +##hus +honestly +diploma +lied +thermal +auxiliary +feast +unlikely +iata +##tel +morocco +pounding +treasury +lithuania +considerably +1841 +dish +1812 +geological +matching +stumbled +destroying +marched +brien +advances +cake +nicole +belle +settling +measuring +directing +##mie +tuesday +bassist +capabilities +stunned +fraud +torpedo +##list +##phone +anton +wisdom +surveillance +ruined +##ulate +lawsuit +healthcare +theorem +halls +trend +aka +horizontal +dozens +acquire +lasting +swim +hawk +gorgeous +fees +vicinity +decrease +adoption +tactics +##ography +pakistani +##ole +draws +##hall +willie +burke +heath +algorithm +integral +powder +elliott +brigadier +jackie +tate +varieties +darker +##cho +lately +cigarette +specimens +adds +##ree +##ensis +##inger +exploded +finalist +cia +murders +wilderness +arguments +nicknamed +acceptance +onwards +manufacture +robertson +jets +tampa +enterprises +blog +loudly +composers +nominations +1838 +ai +malta +inquiry +automobile +hosting +viii +rays +tilted +grief +museums +strategies +furious +euro +equality +cohen +poison +surrey +wireless +governed +ridiculous +moses +##esh +##room +vanished +##ito +barnes +attract +morrison +istanbul +##iness +absent +rotation +petition +janet +##logical +satisfaction +custody +deliberately +observatory +comedian +surfaces +pinyin +novelist +strictly +canterbury +oslo +monks +embrace +ibm +jealous +photograph +continent +dorothy +marina +doc +excess +holden +allegations +explaining +stack +avoiding +lance +storyline +majesty +poorly +spike +dos +bradford +raven +travis +classics +proven +voltage +pillow +fists +butt +1842 +interpreted +##car +1839 +gage +telegraph +lens +promising +expelled +casual +collector +zones +##min +silly +nintendo +##kh +##bra +downstairs +chef +suspicious +afl +flies +vacant +uganda +pregnancy +condemned +lutheran +estimates +cheap +decree +saxon +proximity +stripped +idiot +deposits +contrary +presenter +magnus +glacier +im +offense +edwin +##ori +upright +##long +bolt +##ois +toss +geographical +##izes +environments +delicate +marking +abstract +xavier +nails +windsor +plantation +occurring +equity +saskatchewan +fears +drifted +sequences +vegetation +revolt +##stic +1843 +sooner +fusion +opposing +nato +skating +1836 +secretly +ruin +lease +##oc +edit +##nne +flora +anxiety +ruby +##ological +##mia +tel +bout +taxi +emmy +frost +rainbow +compounds +foundations +rainfall +assassination +nightmare +dominican +##win +achievements +deserve +orlando +intact +armenia +##nte +calgary +valentine +106 +marion +proclaimed +theodore +bells +courtyard +thigh +gonzalez +console +troop +minimal +monte +everyday +##ence +##if +supporter +terrorism +buck +openly +presbyterian +activists +carpet +##iers +rubbing +uprising +##yi +cute +conceived +legally +##cht +millennium +cello +velocity +ji +rescued +cardiff +1835 +rex +concentrate +senators +beard +rendered +glowing +battalions +scouts +competitors +sculptor +catalogue +arctic +ion +raja +bicycle +wow +glancing +lawn +##woman +gentleman +lighthouse +publish +predicted +calculated +##val +variants +##gne +strain +##ui +winston +deceased +##nus +touchdowns +brady +caleb +sinking +echoed +crush +hon +blessed +protagonist +hayes +endangered +magnitude +editors +##tine +estimate +responsibilities +##mel +backup +laying +consumed +sealed +zurich +lovers +frustrated +##eau +ahmed +kicking +mit +treasurer +1832 +biblical +refuse +terrified +pump +agrees +genuine +imprisonment +refuses +plymouth +##hen +lou +##nen +tara +trembling +antarctic +ton +learns +##tas +crap +crucial +faction +atop +##borough +wrap +lancaster +odds +hopkins +erik +lyon +##eon +bros +##ode +snap +locality +tips +empress +crowned +cal +acclaimed +chuckled +##ory +clara +sends +mild +towel +##fl +##day +##а +wishing +assuming +interviewed +##bal +##die +interactions +eden +cups +helena +##lf +indie +beck +##fire +batteries +filipino +wizard +parted +##lam +traces +##born +rows +idol +albany +delegates +##ees +##sar +discussions +##ex +notre +instructed +belgrade +highways +suggestion +lauren +possess +orientation +alexandria +abdul +beats +salary +reunion +ludwig +alright +wagner +intimate +pockets +slovenia +hugged +brighton +merchants +cruel +stole +trek +slopes +repairs +enrollment +politically +underlying +promotional +counting +boeing +##bb +isabella +naming +##и +keen +bacteria +listing +separately +belfast +ussr +450 +lithuanian +anybody +ribs +sphere +martinez +cock +embarrassed +proposals +fragments +nationals +##fs +##wski +premises +fin +1500 +alpine +matched +freely +bounded +jace +sleeve +##af +gaming +pier +populated +evident +##like +frances +flooded +##dle +frightened +pour +trainer +framed +visitor +challenging +pig +wickets +##fold +infected +email +##pes +arose +##aw +reward +ecuador +oblast +vale +ch +shuttle +##usa +bach +rankings +forbidden +cornwall +accordance +salem +consumers +bruno +fantastic +toes +machinery +resolved +julius +remembering +propaganda +iceland +bombardment +tide +contacts +wives +##rah +concerto +macdonald +albania +implement +daisy +tapped +sudan +helmet +angela +mistress +##lic +crop +sunk +finest +##craft +hostile +##ute +##tsu +boxer +fr +paths +adjusted +habit +ballot +supervision +soprano +##zen +bullets +wicked +sunset +regiments +disappear +lamp +performs +app +##gia +##oa +rabbit +digging +incidents +entries +##cion +dishes +##oi +introducing +##ati +##fied +freshman +slot +jill +tackles +baroque +backs +##iest +lone +sponsor +destiny +altogether +convert +##aro +consensus +shapes +demonstration +basically +feminist +auction +artifacts +##bing +strongest +twitter +halifax +2019 +allmusic +mighty +smallest +precise +alexandra +viola +##los +##ille +manuscripts +##illo +dancers +ari +managers +monuments +blades +barracks +springfield +maiden +consolidated +electron +##end +berry +airing +wheat +nobel +inclusion +blair +payments +geography +bee +cc +eleanor +react +##hurst +afc +manitoba +##yu +su +lineup +fitness +recreational +investments +airborne +disappointment +##dis +edmonton +viewing +##row +renovation +##cast +infant +bankruptcy +roses +aftermath +pavilion +##yer +carpenter +withdrawal +ladder +##hy +discussing +popped +reliable +agreements +rochester +##abad +curves +bombers +220 +rao +reverend +decreased +choosing +107 +stiff +consulting +naples +crawford +tracy +ka +ribbon +cops +##lee +crushed +deciding +unified +teenager +accepting +flagship +explorer +poles +sanchez +inspection +revived +skilled +induced +exchanged +flee +locals +tragedy +swallow +loading +hanna +demonstrate +##ela +salvador +flown +contestants +civilization +##ines +wanna +rhodes +fletcher +hector +knocking +considers +##ough +nash +mechanisms +sensed +mentally +walt +unclear +##eus +renovated +madame +##cks +crews +governmental +##hin +undertaken +monkey +##ben +##ato +fatal +armored +copa +caves +governance +grasp +perception +certification +froze +damp +tugged +wyoming +##rg +##ero +newman +##lor +nerves +curiosity +graph +115 +##ami +withdraw +tunnels +dull +meredith +moss +exhibits +neighbors +communicate +accuracy +explored +raiders +republicans +secular +kat +superman +penny +criticised +##tch +freed +update +conviction +wade +ham +likewise +delegation +gotta +doll +promises +technological +myth +nationality +resolve +convent +##mark +sharon +dig +sip +coordinator +entrepreneur +fold +##dine +capability +councillor +synonym +blown +swan +cursed +1815 +jonas +haired +sofa +canvas +keeper +rivalry +##hart +rapper +speedway +swords +postal +maxwell +estonia +potter +recurring +##nn +##ave +errors +##oni +cognitive +1834 +##² +claws +nadu +roberto +bce +wrestler +ellie +##ations +infinite +ink +##tia +presumably +finite +staircase +108 +noel +patricia +nacional +##cation +chill +eternal +tu +preventing +prussia +fossil +limbs +##logist +ernst +frog +perez +rene +##ace +pizza +prussian +##ios +##vy +molecules +regulatory +answering +opinions +sworn +lengths +supposedly +hypothesis +upward +habitats +seating +ancestors +drank +yield +hd +synthesis +researcher +modest +##var +mothers +peered +voluntary +homeland +##the +acclaim +##igan +static +valve +luxembourg +alto +carroll +fe +receptor +norton +ambulance +##tian +johnston +catholics +depicting +jointly +elephant +gloria +mentor +badge +ahmad +distinguish +remarked +councils +precisely +allison +advancing +detection +crowded +##10 +cooperative +ankle +mercedes +dagger +surrendered +pollution +commit +subway +jeffrey +lesson +sculptures +provider +##fication +membrane +timothy +rectangular +fiscal +heating +teammate +basket +particle +anonymous +deployment +##ple +missiles +courthouse +proportion +shoe +sec +##ller +complaints +forbes +blacks +abandon +remind +sizes +overwhelming +autobiography +natalie +##awa +risks +contestant +countryside +babies +scorer +invaded +enclosed +proceed +hurling +disorders +##cu +reflecting +continuously +cruiser +graduates +freeway +investigated +ore +deserved +maid +blocking +phillip +jorge +shakes +dove +mann +variables +lacked +burden +accompanying +que +consistently +organizing +provisional +complained +endless +##rm +tubes +juice +georges +krishna +mick +labels +thriller +##uch +laps +arcade +sage +snail +##table +shannon +fi +laurence +seoul +vacation +presenting +hire +churchill +surprisingly +prohibited +savannah +technically +##oli +170 +##lessly +testimony +suited +speeds +toys +romans +mlb +flowering +measurement +talented +kay +settings +charleston +expectations +shattered +achieving +triumph +ceremonies +portsmouth +lanes +mandatory +loser +stretching +cologne +realizes +seventy +cornell +careers +webb +##ulating +americas +budapest +ava +suspicion +##ison +yo +conrad +##hai +sterling +jessie +rector +##az +1831 +transform +organize +loans +christine +volcanic +warrant +slender +summers +subfamily +newer +danced +dynamics +rhine +proceeds +heinrich +gastropod +commands +sings +facilitate +easter +ra +positioned +responses +expense +fruits +yanked +imported +25th +velvet +vic +primitive +tribune +baldwin +neighbourhood +donna +rip +hay +pr +##uro +1814 +espn +welcomed +##aria +qualifier +glare +highland +timing +##cted +shells +eased +geometry +louder +exciting +slovakia +##sion +##iz +##lot +savings +prairie +##ques +marching +rafael +tonnes +##lled +curtain +preceding +shy +heal +greene +worthy +##pot +detachment +bury +sherman +##eck +reinforced +seeks +bottles +contracted +duchess +outfit +walsh +##sc +mickey +##ase +geoffrey +archer +squeeze +dawson +eliminate +invention +##enberg +neal +##eth +stance +dealer +coral +maple +retire +polo +simplified +##ht +1833 +hid +watts +backwards +jules +##oke +genesis +mt +frames +rebounds +burma +woodland +moist +santos +whispers +drained +subspecies +##aa +streaming +ulster +burnt +correspondence +maternal +gerard +denis +stealing +##load +genius +duchy +##oria +inaugurated +momentum +suits +placement +sovereign +clause +thames +##hara +confederation +reservation +sketch +yankees +lets +rotten +charm +hal +verses +ultra +commercially +dot +salon +citation +adopt +winnipeg +mist +allocated +cairo +##boy +jenkins +interference +objectives +##wind +1820 +portfolio +armoured +sectors +##eh +initiatives +##world +integrity +exercises +robe +tap +ab +gazed +##tones +distracted +rulers +111 +favorable +jerome +tended +cart +factories +##eri +diplomat +valued +gravel +charitable +##try +calvin +exploring +chang +shepherd +terrace +pdf +pupil +##ural +reflects +ups +##rch +governors +shelf +depths +##nberg +trailed +crest +tackle +##nian +##ats +hatred +##kai +clare +makers +ethiopia +longtime +detected +embedded +lacking +slapped +rely +thomson +anticipation +iso +morton +successive +agnes +screenwriter +straightened +philippe +playwright +haunted +licence +iris +intentions +sutton +112 +logical +correctly +##weight +branded +licked +tipped +silva +ricky +narrator +requests +##ents +greeted +supernatural +cow +##wald +lung +refusing +employer +strait +gaelic +liner +##piece +zoe +sabha +##mba +driveway +harvest +prints +bates +reluctantly +threshold +algebra +ira +wherever +coupled +240 +assumption +picks +##air +designers +raids +gentlemen +##ean +roller +blowing +leipzig +locks +screw +dressing +strand +##lings +scar +dwarf +depicts +##nu +nods +##mine +differ +boris +##eur +yuan +flip +##gie +mob +invested +questioning +applying +##ture +shout +##sel +gameplay +blamed +illustrations +bothered +weakness +rehabilitation +##of +##zes +envelope +rumors +miners +leicester +subtle +kerry +##ico +ferguson +##fu +premiership +ne +##cat +bengali +prof +catches +remnants +dana +##rily +shouting +presidents +baltic +ought +ghosts +dances +sailors +shirley +fancy +dominic +##bie +madonna +##rick +bark +buttons +gymnasium +ashes +liver +toby +oath +providence +doyle +evangelical +nixon +cement +carnegie +embarked +hatch +surroundings +guarantee +needing +pirate +essence +##bee +filter +crane +hammond +projected +immune +percy +twelfth +##ult +regent +doctoral +damon +mikhail +##ichi +lu +critically +elect +realised +abortion +acute +screening +mythology +steadily +##fc +frown +nottingham +kirk +wa +minneapolis +##rra +module +algeria +mc +nautical +encounters +surprising +statues +availability +shirts +pie +alma +brows +munster +mack +soup +crater +tornado +sanskrit +cedar +explosive +bordered +dixon +planets +stamp +exam +happily +##bble +carriers +kidnapped +##vis +accommodation +emigrated +##met +knockout +correspondent +violation +profits +peaks +lang +specimen +agenda +ancestry +pottery +spelling +equations +obtaining +ki +linking +1825 +debris +asylum +##20 +buddhism +teddy +##ants +gazette +##nger +##sse +dental +eligibility +utc +fathers +averaged +zimbabwe +francesco +coloured +hissed +translator +lynch +mandate +humanities +mackenzie +uniforms +lin +##iana +##gio +asset +mhz +fitting +samantha +genera +wei +rim +beloved +shark +riot +entities +expressions +indo +carmen +slipping +owing +abbot +neighbor +sidney +##av +rats +recommendations +encouraging +squadrons +anticipated +commanders +conquered +##oto +donations +diagnosed +##mond +divide +##iva +guessed +decoration +vernon +auditorium +revelation +conversations +##kers +##power +herzegovina +dash +alike +protested +lateral +herman +accredited +mg +##gent +freeman +mel +fiji +crow +crimson +##rine +livestock +##pped +humanitarian +bored +oz +whip +##lene +##ali +legitimate +alter +grinning +spelled +anxious +oriental +wesley +##nin +##hole +carnival +controller +detect +##ssa +bowed +educator +kosovo +macedonia +##sin +occupy +mastering +stephanie +janeiro +para +unaware +nurses +noon +135 +cam +hopefully +ranger +combine +sociology +polar +rica +##eer +neill +##sman +holocaust +##ip +doubled +lust +1828 +109 +decent +cooling +unveiled +##card +1829 +nsw +homer +chapman +meyer +##gin +dive +mae +reagan +expertise +##gled +darwin +brooke +sided +prosecution +investigating +comprised +petroleum +genres +reluctant +differently +trilogy +johns +vegetables +corpse +highlighted +lounge +pension +unsuccessfully +elegant +aided +ivory +beatles +amelia +cain +dubai +sunny +immigrant +babe +click +##nder +underwater +pepper +combining +mumbled +atlas +horns +accessed +ballad +physicians +homeless +gestured +rpm +freak +louisville +corporations +patriots +prizes +rational +warn +modes +decorative +overnight +din +troubled +phantom +##ort +monarch +sheer +##dorf +generals +guidelines +organs +addresses +##zon +enhance +curling +parishes +cord +##kie +linux +caesar +deutsche +bavaria +##bia +coleman +cyclone +##eria +bacon +petty +##yama +##old +hampton +diagnosis +1824 +throws +complexity +rita +disputed +##₃ +pablo +##sch +marketed +trafficking +##ulus +examine +plague +formats +##oh +vault +faithful +##bourne +webster +##ox +highlights +##ient +##ann +phones +vacuum +sandwich +modeling +##gated +bolivia +clergy +qualities +isabel +##nas +##ars +wears +screams +reunited +annoyed +bra +##ancy +##rate +differential +transmitter +tattoo +container +poker +##och +excessive +resides +cowboys +##tum +augustus +trash +providers +statute +retreated +balcony +reversed +void +storey +preceded +masses +leap +laughs +neighborhoods +wards +schemes +falcon +santo +battlefield +pad +ronnie +thread +lesbian +venus +##dian +beg +sandstone +daylight +punched +gwen +analog +stroked +wwe +acceptable +measurements +dec +toxic +##kel +adequate +surgical +economist +parameters +varsity +##sberg +quantity +ella +##chy +##rton +countess +generating +precision +diamonds +expressway +ga +##ı +1821 +uruguay +talents +galleries +expenses +scanned +colleague +outlets +ryder +lucien +##ila +paramount +##bon +syracuse +dim +fangs +gown +sweep +##sie +toyota +missionaries +websites +##nsis +sentences +adviser +val +trademark +spells +##plane +patience +starter +slim +##borg +toe +incredibly +shoots +elliot +nobility +##wyn +cowboy +endorsed +gardner +tendency +persuaded +organisms +emissions +kazakhstan +amused +boring +chips +themed +##hand +llc +constantinople +chasing +systematic +guatemala +borrowed +erin +carey +##hard +highlands +struggles +1810 +##ifying +##ced +wong +exceptions +develops +enlarged +kindergarten +castro +##ern +##rina +leigh +zombie +juvenile +##most +consul +##nar +sailor +hyde +clarence +intensive +pinned +nasty +useless +jung +clayton +stuffed +exceptional +ix +apostolic +230 +transactions +##dge +exempt +swinging +cove +religions +##ash +shields +dairy +bypass +190 +pursuing +bug +joyce +bombay +chassis +southampton +chat +interact +redesignated +##pen +nascar +pray +salmon +rigid +regained +malaysian +grim +publicity +constituted +capturing +toilet +delegate +purely +tray +drift +loosely +striker +weakened +trinidad +mitch +itv +defines +transmitted +ming +scarlet +nodding +fitzgerald +fu +narrowly +sp +tooth +standings +virtue +##₁ +##wara +##cting +chateau +gloves +lid +##nel +hurting +conservatory +##pel +sinclair +reopened +sympathy +nigerian +strode +advocated +optional +chronic +discharge +##rc +suck +compatible +laurel +stella +shi +fails +wage +dodge +128 +informal +sorts +levi +buddha +villagers +##aka +chronicles +heavier +summoned +gateway +3000 +eleventh +jewelry +translations +accordingly +seas +##ency +fiber +pyramid +cubic +dragging +##ista +caring +##ops +android +contacted +lunar +##dt +kai +lisbon +patted +1826 +sacramento +theft +madagascar +subtropical +disputes +ta +holidays +piper +willow +mare +cane +itunes +newfoundland +benny +companions +dong +raj +observe +roar +charming +plaque +tibetan +fossils +enacted +manning +bubble +tina +tanzania +##eda +##hir +funk +swamp +deputies +cloak +ufc +scenario +par +scratch +metals +anthem +guru +engaging +specially +##boat +dialects +nineteen +cecil +duet +disability +messenger +unofficial +##lies +defunct +eds +moonlight +drainage +surname +puzzle +honda +switching +conservatives +mammals +knox +broadcaster +sidewalk +cope +##ried +benson +princes +peterson +##sal +bedford +sharks +eli +wreck +alberto +gasp +archaeology +lgbt +teaches +securities +madness +compromise +waving +coordination +davidson +visions +leased +possibilities +eighty +jun +fernandez +enthusiasm +assassin +sponsorship +reviewer +kingdoms +estonian +laboratories +##fy +##nal +applies +verb +celebrations +##zzo +rowing +lightweight +sadness +submit +mvp +balanced +dude +##vas +explicitly +metric +magnificent +mound +brett +mohammad +mistakes +irregular +##hing +##ass +sanders +betrayed +shipped +surge +##enburg +reporters +termed +georg +pity +verbal +bulls +abbreviated +enabling +appealed +##are +##atic +sicily +sting +heel +sweetheart +bart +spacecraft +brutal +monarchy +##tter +aberdeen +cameo +diane +##ub +survivor +clyde +##aries +complaint +##makers +clarinet +delicious +chilean +karnataka +coordinates +1818 +panties +##rst +pretending +ar +dramatically +kiev +bella +tends +distances +113 +catalog +launching +instances +telecommunications +portable +lindsay +vatican +##eim +angles +aliens +marker +stint +screens +bolton +##rne +judy +wool +benedict +plasma +europa +spark +imaging +filmmaker +swiftly +##een +contributor +##nor +opted +stamps +apologize +financing +butter +gideon +sophisticated +alignment +avery +chemicals +yearly +speculation +prominence +professionally +##ils +immortal +institutional +inception +wrists +identifying +tribunal +derives +gains +##wo +papal +preference +linguistic +vince +operative +brewery +##ont +unemployment +boyd +##ured +##outs +albeit +prophet +1813 +bi +##rr +##face +##rad +quarterly +asteroid +cleaned +radius +temper +##llen +telugu +jerk +viscount +menu +##ote +glimpse +##aya +yacht +hawaiian +baden +##rl +laptop +readily +##gu +monetary +offshore +scots +watches +##yang +##arian +upgrade +needle +xbox +lea +encyclopedia +flank +fingertips +##pus +delight +teachings +confirm +roth +beaches +midway +winters +##iah +teasing +daytime +beverly +gambling +bonnie +##backs +regulated +clement +hermann +tricks +knot +##shing +##uring +##vre +detached +ecological +owed +specialty +byron +inventor +bats +stays +screened +unesco +midland +trim +affection +##ander +##rry +jess +thoroughly +feedback +##uma +chennai +strained +heartbeat +wrapping +overtime +pleaded +##sworth +mon +leisure +oclc +##tate +##ele +feathers +angelo +thirds +nuts +surveys +clever +gill +commentator +##dos +darren +rides +gibraltar +##nc +##mu +dissolution +dedication +shin +meals +saddle +elvis +reds +chaired +taller +appreciation +functioning +niece +favored +advocacy +robbie +criminals +suffolk +yugoslav +passport +constable +congressman +hastings +vera +##rov +consecrated +sparks +ecclesiastical +confined +##ovich +muller +floyd +nora +1822 +paved +1827 +cumberland +ned +saga +spiral +##flow +appreciated +yi +collaborative +treating +similarities +feminine +finishes +##ib +jade +import +##nse +##hot +champagne +mice +securing +celebrities +helsinki +attributes +##gos +cousins +phases +ache +lucia +gandhi +submission +vicar +spear +shine +tasmania +biting +detention +constitute +tighter +seasonal +##gus +terrestrial +matthews +##oka +effectiveness +parody +philharmonic +##onic +1816 +strangers +encoded +consortium +guaranteed +regards +shifts +tortured +collision +supervisor +inform +broader +insight +theaters +armour +emeritus +blink +incorporates +mapping +##50 +##ein +handball +flexible +##nta +substantially +generous +thief +##own +carr +loses +1793 +prose +ucla +romeo +generic +metallic +realization +damages +mk +commissioners +zach +default +##ther +helicopters +lengthy +stems +spa +partnered +spectators +rogue +indication +penalties +teresa +1801 +sen +##tric +dalton +##wich +irving +photographic +##vey +dell +deaf +peters +excluded +unsure +##vable +patterson +crawled +##zio +resided +whipped +latvia +slower +ecole +pipes +employers +maharashtra +comparable +va +textile +pageant +##gel +alphabet +binary +irrigation +chartered +choked +antoine +offs +waking +supplement +##wen +quantities +demolition +regain +locate +urdu +folks +alt +114 +##mc +scary +andreas +whites +##ava +classrooms +mw +aesthetic +publishes +valleys +guides +cubs +johannes +bryant +conventions +affecting +##itt +drain +awesome +isolation +prosecutor +ambitious +apology +captive +downs +atmospheric +lorenzo +aisle +beef +foul +##onia +kidding +composite +disturbed +illusion +natives +##ffer +emi +rockets +riverside +wartime +painters +adolf +melted +##ail +uncertainty +simulation +hawks +progressed +meantime +builder +spray +breach +unhappy +regina +russians +##urg +determining +##tation +tram +1806 +##quin +aging +##12 +1823 +garion +rented +mister +diaz +terminated +clip +1817 +depend +nervously +disco +owe +defenders +shiva +notorious +disbelief +shiny +worcester +##gation +##yr +trailing +undertook +islander +belarus +limitations +watershed +fuller +overlooking +utilized +raphael +1819 +synthetic +breakdown +klein +##nate +moaned +memoir +lamb +practicing +##erly +cellular +arrows +exotic +##graphy +witches +117 +charted +rey +hut +hierarchy +subdivision +freshwater +giuseppe +aloud +reyes +qatar +marty +sideways +utterly +sexually +jude +prayers +mccarthy +softball +blend +damien +##gging +##metric +wholly +erupted +lebanese +negro +revenues +tasted +comparative +teamed +transaction +labeled +maori +sovereignty +parkway +trauma +gran +malay +121 +advancement +descendant +2020 +buzz +salvation +inventory +symbolic +##making +antarctica +mps +##gas +##bro +mohammed +myanmar +holt +submarines +tones +##lman +locker +patriarch +bangkok +emerson +remarks +predators +kin +afghan +confession +norwich +rental +emerge +advantages +##zel +rca +##hold +shortened +storms +aidan +##matic +autonomy +compliance +##quet +dudley +atp +##osis +1803 +motto +documentation +summary +professors +spectacular +christina +archdiocese +flashing +innocence +remake +##dell +psychic +reef +scare +employ +rs +sticks +meg +gus +leans +##ude +accompany +bergen +tomas +##iko +doom +wages +pools +##nch +##bes +breasts +scholarly +alison +outline +brittany +breakthrough +willis +realistic +##cut +##boro +competitor +##stan +pike +picnic +icon +designing +commercials +washing +villain +skiing +micro +costumes +auburn +halted +executives +##hat +logistics +cycles +vowel +applicable +barrett +exclaimed +eurovision +eternity +ramon +##umi +##lls +modifications +sweeping +disgust +##uck +torch +aviv +ensuring +rude +dusty +sonic +donovan +outskirts +cu +pathway +##band +##gun +##lines +disciplines +acids +cadet +paired +##40 +sketches +##sive +marriages +##⁺ +folding +peers +slovak +implies +admired +##beck +1880s +leopold +instinct +attained +weston +megan +horace +##ination +dorsal +ingredients +evolutionary +##its +complications +deity +lethal +brushing +levy +deserted +institutes +posthumously +delivering +telescope +coronation +motivated +rapids +luc +flicked +pays +volcano +tanner +weighed +##nica +crowds +frankie +gifted +addressing +granddaughter +winding +##rna +constantine +gomez +##front +landscapes +rudolf +anthropology +slate +werewolf +##lio +astronomy +circa +rouge +dreaming +sack +knelt +drowned +naomi +prolific +tracked +freezing +herb +##dium +agony +randall +twisting +wendy +deposit +touches +vein +wheeler +##bbled +##bor +batted +retaining +tire +presently +compare +specification +daemon +nigel +##grave +merry +recommendation +czechoslovakia +sandra +ng +roma +##sts +lambert +inheritance +sheikh +winchester +cries +examining +##yle +comeback +cuisine +nave +##iv +ko +retrieve +tomatoes +barker +polished +defining +irene +lantern +personalities +begging +tract +swore +1809 +175 +##gic +omaha +brotherhood +##rley +haiti +##ots +exeter +##ete +##zia +steele +dumb +pearson +210 +surveyed +elisabeth +trends +##ef +fritz +##rf +premium +bugs +fraction +calmly +viking +##birds +tug +inserted +unusually +##ield +confronted +distress +crashing +brent +turks +resign +##olo +cambodia +gabe +sauce +##kal +evelyn +116 +extant +clusters +quarry +teenagers +luna +##lers +##ister +affiliation +drill +##ashi +panthers +scenic +libya +anita +strengthen +inscriptions +##cated +lace +sued +judith +riots +##uted +mint +##eta +preparations +midst +dub +challenger +##vich +mock +cf +displaced +wicket +breaths +enables +schmidt +analyst +##lum +ag +highlight +automotive +axe +josef +newark +sufficiently +resembles +50th +##pal +flushed +mum +traits +##ante +commodore +incomplete +warming +titular +ceremonial +ethical +118 +celebrating +eighteenth +cao +lima +medalist +mobility +strips +snakes +##city +miniature +zagreb +barton +escapes +umbrella +automated +doubted +differs +cooled +georgetown +dresden +cooked +fade +wyatt +rna +jacobs +carlton +abundant +stereo +boost +madras +inning +##hia +spur +ip +malayalam +begged +osaka +groan +escaping +charging +dose +vista +##aj +bud +papa +communists +advocates +edged +tri +##cent +resemble +peaking +necklace +fried +montenegro +saxony +goose +glances +stuttgart +curator +recruit +grocery +sympathetic +##tting +##fort +127 +lotus +randolph +ancestor +##rand +succeeding +jupiter +1798 +macedonian +##heads +hiking +1808 +handing +fischer +##itive +garbage +node +##pies +prone +singular +papua +inclined +attractions +italia +pouring +motioned +grandma +garnered +jacksonville +corp +ego +ringing +aluminum +##hausen +ordering +##foot +drawer +traders +synagogue +##play +##kawa +resistant +wandering +fragile +fiona +teased +var +hardcore +soaked +jubilee +decisive +exposition +mercer +poster +valencia +hale +kuwait +1811 +##ises +##wr +##eed +tavern +gamma +122 +johan +##uer +airways +amino +gil +##ury +vocational +domains +torres +##sp +generator +folklore +outcomes +##keeper +canberra +shooter +fl +beams +confrontation +##lling +##gram +feb +aligned +forestry +pipeline +jax +motorway +conception +decay +##tos +coffin +##cott +stalin +1805 +escorted +minded +##nam +sitcom +purchasing +twilight +veronica +additions +passive +tensions +straw +123 +frequencies +1804 +refugee +cultivation +##iate +christie +clary +bulletin +crept +disposal +##rich +##zong +processor +crescent +##rol +bmw +emphasized +whale +nazis +aurora +##eng +dwelling +hauled +sponsors +toledo +mega +ideology +theatres +tessa +cerambycidae +saves +turtle +cone +suspects +kara +rusty +yelling +greeks +mozart +shades +cocked +participant +##tro +shire +spit +freeze +necessity +##cos +inmates +nielsen +councillors +loaned +uncommon +omar +peasants +botanical +offspring +daniels +formations +jokes +1794 +pioneers +sigma +licensing +##sus +wheelchair +polite +1807 +liquor +pratt +trustee +##uta +forewings +balloon +##zz +kilometre +camping +explicit +casually +shawn +foolish +teammates +nm +hassan +carrie +judged +satisfy +vanessa +knives +selective +cnn +flowed +##lice +eclipse +stressed +eliza +mathematician +cease +cultivated +##roy +commissions +browns +##ania +destroyers +sheridan +meadow +##rius +minerals +##cial +downstream +clash +gram +memoirs +ventures +baha +seymour +archie +midlands +edith +fare +flynn +invite +canceled +tiles +stabbed +boulder +incorporate +amended +camden +facial +mollusk +unreleased +descriptions +yoga +grabs +550 +raises +ramp +shiver +##rose +coined +pioneering +tunes +qing +warwick +tops +119 +melanie +giles +##rous +wandered +##inal +annexed +nov +30th +unnamed +##ished +organizational +airplane +normandy +stoke +whistle +blessing +violations +chased +holders +shotgun +##ctic +outlet +reactor +##vik +tires +tearing +shores +fortified +mascot +constituencies +nc +columnist +productive +tibet +##rta +lineage +hooked +oct +tapes +judging +cody +##gger +hansen +kashmir +triggered +##eva +solved +cliffs +##tree +resisted +anatomy +protesters +transparent +implied +##iga +injection +mattress +excluding +##mbo +defenses +helpless +devotion +##elli +growl +liberals +weber +phenomena +atoms +plug +##iff +mortality +apprentice +howe +convincing +aaa +swimmer +barber +leone +promptly +sodium +def +nowadays +arise +##oning +gloucester +corrected +dignity +norm +erie +##ders +elders +evacuated +sylvia +compression +##yar +hartford +pose +backpack +reasoning +accepts +24th +wipe +millimetres +marcel +##oda +dodgers +albion +1790 +overwhelmed +aerospace +oaks +1795 +showcase +acknowledge +recovering +nolan +ashe +hurts +geology +fashioned +disappearance +farewell +swollen +shrug +marquis +wimbledon +124 +rue +1792 +commemorate +reduces +experiencing +inevitable +calcutta +intel +##court +murderer +sticking +fisheries +imagery +bloom +280 +brake +##inus +gustav +hesitation +memorable +po +viral +beans +accidents +tunisia +antenna +spilled +consort +treatments +aye +perimeter +##gard +donation +hostage +migrated +banker +addiction +apex +lil +trout +##ously +conscience +##nova +rams +sands +genome +passionate +troubles +##lets +##set +amid +##ibility +##ret +higgins +exceed +vikings +##vie +payne +##zan +muscular +##ste +defendant +sucking +##wal +ibrahim +fuselage +claudia +vfl +europeans +snails +interval +##garh +preparatory +statewide +tasked +lacrosse +viktor +##lation +angola +##hra +flint +implications +employs +teens +patrons +stall +weekends +barriers +scrambled +nucleus +tehran +jenna +parsons +lifelong +robots +displacement +5000 +##bles +precipitation +##gt +knuckles +clutched +1802 +marrying +ecology +marx +accusations +declare +scars +kolkata +mat +meadows +bermuda +skeleton +finalists +vintage +crawl +coordinate +affects +subjected +orchestral +mistaken +##tc +mirrors +dipped +relied +260 +arches +candle +##nick +incorporating +wildly +fond +basilica +owl +fringe +rituals +whispering +stirred +feud +tertiary +slick +goat +honorable +whereby +skip +ricardo +stripes +parachute +adjoining +submerged +synthesizer +##gren +intend +positively +ninety +phi +beaver +partition +fellows +alexis +prohibition +carlisle +bizarre +fraternity +##bre +doubts +icy +cbc +aquatic +sneak +sonny +combines +airports +crude +supervised +spatial +merge +alfonso +##bic +corrupt +scan +undergo +##ams +disabilities +colombian +comparing +dolphins +perkins +##lish +reprinted +unanimous +bounced +hairs +underworld +midwest +semester +bucket +paperback +miniseries +coventry +demise +##leigh +demonstrations +sensor +rotating +yan +##hler +arrange +soils +##idge +hyderabad +labs +##dr +brakes +grandchildren +##nde +negotiated +rover +ferrari +continuation +directorate +augusta +stevenson +counterpart +gore +##rda +nursery +rican +ave +collectively +broadly +pastoral +repertoire +asserted +discovering +nordic +styled +fiba +cunningham +harley +middlesex +survives +tumor +tempo +zack +aiming +lok +urgent +##rade +##nto +devils +##ement +contractor +turin +##wl +##ool +bliss +repaired +simmons +moan +astronomical +cr +negotiate +lyric +1890s +lara +bred +clad +angus +pbs +##ience +engineered +posed +##lk +hernandez +possessions +elbows +psychiatric +strokes +confluence +electorate +lifts +campuses +lava +alps +##ep +##ution +##date +physicist +woody +##page +##ographic +##itis +juliet +reformation +sparhawk +320 +complement +suppressed +jewel +##½ +floated +##kas +continuity +sadly +##ische +inability +melting +scanning +paula +flour +judaism +safer +vague +##lm +solving +curb +##stown +financially +gable +bees +expired +miserable +cassidy +dominion +1789 +cupped +145 +robbery +facto +amos +warden +resume +tallest +marvin +ing +pounded +usd +declaring +gasoline +##aux +darkened +270 +650 +sophomore +##mere +erection +gossip +televised +risen +dial +##eu +pillars +##link +passages +profound +##tina +arabian +ashton +silicon +nail +##ead +##lated +##wer +##hardt +fleming +firearms +ducked +circuits +blows +waterloo +titans +##lina +atom +fireplace +cheshire +financed +activation +algorithms +##zzi +constituent +catcher +cherokee +partnerships +sexuality +platoon +tragic +vivian +guarded +whiskey +meditation +poetic +##late +##nga +##ake +porto +listeners +dominance +kendra +mona +chandler +factions +22nd +salisbury +attitudes +derivative +##ido +##haus +intake +paced +javier +illustrator +barrels +bias +cockpit +burnett +dreamed +ensuing +##anda +receptors +someday +hawkins +mattered +##lal +slavic +1799 +jesuit +cameroon +wasted +tai +wax +lowering +victorious +freaking +outright +hancock +librarian +sensing +bald +calcium +myers +tablet +announcing +barack +shipyard +pharmaceutical +##uan +greenwich +flush +medley +patches +wolfgang +pt +speeches +acquiring +exams +nikolai +##gg +hayden +kannada +##type +reilly +##pt +waitress +abdomen +devastated +capped +pseudonym +pharmacy +fulfill +paraguay +1796 +clicked +##trom +archipelago +syndicated +##hman +lumber +orgasm +rejection +clifford +lorraine +advent +mafia +rodney +brock +##ght +##used +##elia +cassette +chamberlain +despair +mongolia +sensors +developmental +upstream +##eg +##alis +spanning +165 +trombone +basque +seeded +interred +renewable +rhys +leapt +revision +molecule +##ages +chord +vicious +nord +shivered +23rd +arlington +debts +corpus +sunrise +bays +blackburn +centimetres +##uded +shuddered +gm +strangely +gripping +cartoons +isabelle +orbital +##ppa +seals +proving +##lton +refusal +strengthened +bust +assisting +baghdad +batsman +portrayal +mara +pushes +spears +og +##cock +reside +nathaniel +brennan +1776 +confirmation +caucus +##worthy +markings +yemen +nobles +ku +lazy +viewer +catalan +encompasses +sawyer +##fall +sparked +substances +patents +braves +arranger +evacuation +sergio +persuade +dover +tolerance +penguin +cum +jockey +insufficient +townships +occupying +declining +plural +processed +projection +puppet +flanders +introduces +liability +##yon +gymnastics +antwerp +taipei +hobart +candles +jeep +wes +observers +126 +chaplain +bundle +glorious +##hine +hazel +flung +sol +excavations +dumped +stares +sh +bangalore +triangular +icelandic +intervals +expressing +turbine +##vers +songwriting +crafts +##igo +jasmine +ditch +rite +##ways +entertaining +comply +sorrow +wrestlers +basel +emirates +marian +rivera +helpful +##some +caution +downward +networking +##atory +##tered +darted +genocide +emergence +replies +specializing +spokesman +convenient +unlocked +fading +augustine +concentrations +resemblance +elijah +investigator +andhra +##uda +promotes +bean +##rrell +fleeing +wan +simone +announcer +##ame +##bby +lydia +weaver +132 +residency +modification +##fest +stretches +##ast +alternatively +nat +lowe +lacks +##ented +pam +tile +concealed +inferior +abdullah +residences +tissues +vengeance +##ided +moisture +peculiar +groove +zip +bologna +jennings +ninja +oversaw +zombies +pumping +batch +livingston +emerald +installations +1797 +peel +nitrogen +rama +##fying +##star +schooling +strands +responding +werner +##ost +lime +casa +accurately +targeting +##rod +underway +##uru +hemisphere +lester +##yard +occupies +2d +griffith +angrily +reorganized +##owing +courtney +deposited +##dd +##30 +estadio +##ifies +dunn +exiled +##ying +checks +##combe +##о +##fly +successes +unexpectedly +blu +assessed +##flower +##ه +observing +sacked +spiders +kn +##tail +mu +nodes +prosperity +audrey +divisional +155 +broncos +tangled +adjust +feeds +erosion +paolo +surf +directory +snatched +humid +admiralty +screwed +gt +reddish +##nese +modules +trench +lamps +bind +leah +bucks +competes +##nz +##form +transcription +##uc +isles +violently +clutching +pga +cyclist +inflation +flats +ragged +unnecessary +##hian +stubborn +coordinated +harriet +baba +disqualified +330 +insect +wolfe +##fies +reinforcements +rocked +duel +winked +embraced +bricks +##raj +hiatus +defeats +pending +brightly +jealousy +##xton +##hm +##uki +lena +gdp +colorful +##dley +stein +kidney +##shu +underwear +wanderers +##haw +##icus +guardians +m³ +roared +habits +##wise +permits +gp +uranium +punished +disguise +bundesliga +elise +dundee +erotic +partisan +pi +collectors +float +individually +rendering +behavioral +bucharest +ser +hare +valerie +corporal +nutrition +proportional +##isa +immense +##kis +pavement +##zie +##eld +sutherland +crouched +1775 +##lp +suzuki +trades +endurance +operas +crosby +prayed +priory +rory +socially +##urn +gujarat +##pu +walton +cube +pasha +privilege +lennon +floods +thorne +waterfall +nipple +scouting +approve +##lov +minorities +voter +dwight +extensions +assure +ballroom +slap +dripping +privileges +rejoined +confessed +demonstrating +patriotic +yell +investor +##uth +pagan +slumped +squares +##cle +##kins +confront +bert +embarrassment +##aid +aston +urging +sweater +starr +yuri +brains +williamson +commuter +mortar +structured +selfish +exports +##jon +cds +##him +unfinished +##rre +mortgage +destinations +##nagar +canoe +solitary +buchanan +delays +magistrate +fk +##pling +motivation +##lier +##vier +recruiting +assess +##mouth +malik +antique +1791 +pius +rahman +reich +tub +zhou +smashed +airs +galway +xii +conditioning +honduras +discharged +dexter +##pf +lionel +129 +debates +lemon +tiffany +volunteered +dom +dioxide +procession +devi +sic +tremendous +advertisements +colts +transferring +verdict +hanover +decommissioned +utter +relate +pac +racism +##top +beacon +limp +similarity +terra +occurrence +ant +##how +becky +capt +updates +armament +richie +pal +##graph +halloween +mayo +##ssen +##bone +cara +serena +fcc +dolls +obligations +##dling +violated +lafayette +jakarta +exploitation +##ime +infamous +iconic +##lah +##park +kitty +moody +reginald +dread +spill +crystals +olivier +modeled +bluff +equilibrium +separating +notices +ordnance +extinction +onset +cosmic +attachment +sammy +expose +privy +anchored +##bil +abbott +admits +bending +baritone +emmanuel +policeman +vaughan +winged +climax +dresses +denny +polytechnic +mohamed +burmese +authentic +nikki +genetics +grandparents +homestead +gaza +postponed +metacritic +una +##sby +##bat +unstable +dissertation +##rial +##cian +curls +obscure +uncovered +bronx +praying +disappearing +##hoe +prehistoric +coke +turret +mutations +nonprofit +pits +monaco +##ي +##usion +prominently +dispatched +podium +##mir +uci +##uation +133 +fortifications +birthplace +kendall +##lby +##oll +preacher +rack +goodman +##rman +persistent +##ott +countless +jaime +recorder +lexington +persecution +jumps +renewal +wagons +##11 +crushing +##holder +decorations +##lake +abundance +wrath +laundry +£1 +garde +##rp +jeanne +beetles +peasant +##sl +splitting +caste +sergei +##rer +##ema +scripts +##ively +rub +satellites +##vor +inscribed +verlag +scrapped +gale +packages +chick +potato +slogan +kathleen +arabs +##culture +counterparts +reminiscent +choral +##tead +rand +retains +bushes +dane +accomplish +courtesy +closes +##oth +slaughter +hague +krakow +lawson +tailed +elias +ginger +##ttes +canopy +betrayal +rebuilding +turf +##hof +frowning +allegiance +brigades +kicks +rebuild +polls +alias +nationalism +td +rowan +audition +bowie +fortunately +recognizes +harp +dillon +horrified +##oro +renault +##tics +ropes +##α +presumed +rewarded +infrared +wiping +accelerated +illustration +##rid +presses +practitioners +badminton +##iard +detained +##tera +recognizing +relates +misery +##sies +##tly +reproduction +piercing +potatoes +thornton +esther +manners +hbo +##aan +ours +bullshit +ernie +perennial +sensitivity +illuminated +rupert +##jin +##iss +##ear +rfc +nassau +##dock +staggered +socialism +##haven +appointments +nonsense +prestige +sharma +haul +##tical +solidarity +gps +##ook +##rata +igor +pedestrian +##uit +baxter +tenants +wires +medication +unlimited +guiding +impacts +diabetes +##rama +sasha +pas +clive +extraction +131 +continually +constraints +##bilities +sonata +hunted +sixteenth +chu +planting +quote +mayer +pretended +abs +spat +##hua +ceramic +##cci +curtains +pigs +pitching +##dad +latvian +sore +dayton +##sted +##qi +patrols +slice +playground +##nted +shone +stool +apparatus +inadequate +mates +treason +##ija +desires +##liga +##croft +somalia +laurent +mir +leonardo +oracle +grape +obliged +chevrolet +thirteenth +stunning +enthusiastic +##ede +accounted +concludes +currents +basil +##kovic +drought +##rica +mai +##aire +shove +posting +##shed +pilgrimage +humorous +packing +fry +pencil +wines +smells +144 +marilyn +aching +newest +clung +bon +neighbours +sanctioned +##pie +mug +##stock +drowning +##mma +hydraulic +##vil +hiring +reminder +lilly +investigators +##ncies +sour +##eous +compulsory +packet +##rion +##graphic +##elle +cannes +##inate +depressed +##rit +heroic +importantly +theresa +##tled +conway +saturn +marginal +rae +##xia +corresponds +royce +pact +jasper +explosives +packaging +aluminium +##ttered +denotes +rhythmic +spans +assignments +hereditary +outlined +originating +sundays +lad +reissued +greeting +beatrice +##dic +pillar +marcos +plots +handbook +alcoholic +judiciary +avant +slides +extract +masculine +blur +##eum +##force +homage +trembled +owens +hymn +trey +omega +signaling +socks +accumulated +reacted +attic +theo +lining +angie +distraction +primera +talbot +##key +1200 +ti +creativity +billed +##hey +deacon +eduardo +identifies +proposition +dizzy +gunner +hogan +##yam +##pping +##hol +ja +##chan +jensen +reconstructed +##berger +clearance +darius +##nier +abe +harlem +plea +dei +circled +emotionally +notation +fascist +neville +exceeded +upwards +viable +ducks +##fo +workforce +racer +limiting +shri +##lson +possesses +1600 +kerr +moths +devastating +laden +disturbing +locking +##cture +gal +fearing +accreditation +flavor +aide +1870s +mountainous +##baum +melt +##ures +motel +texture +servers +soda +##mb +herd +##nium +erect +puzzled +hum +peggy +examinations +gould +testified +geoff +ren +devised +sacks +##law +denial +posters +grunted +cesar +tutor +ec +gerry +offerings +byrne +falcons +combinations +ct +incoming +pardon +rocking +26th +avengers +flared +mankind +seller +uttar +loch +nadia +stroking +exposing +##hd +fertile +ancestral +instituted +##has +noises +prophecy +taxation +eminent +vivid +pol +##bol +dart +indirect +multimedia +notebook +upside +displaying +adrenaline +referenced +geometric +##iving +progression +##ddy +blunt +announce +##far +implementing +##lav +aggression +liaison +cooler +cares +headache +plantations +gorge +dots +impulse +thickness +ashamed +averaging +kathy +obligation +precursor +137 +fowler +symmetry +thee +225 +hears +##rai +undergoing +ads +butcher +bowler +##lip +cigarettes +subscription +goodness +##ically +browne +##hos +##tech +kyoto +donor +##erty +damaging +friction +drifting +expeditions +hardened +prostitution +152 +fauna +blankets +claw +tossing +snarled +butterflies +recruits +investigative +coated +healed +138 +communal +hai +xiii +academics +boone +psychologist +restless +lahore +stephens +mba +brendan +foreigners +printer +##pc +ached +explode +27th +deed +scratched +dared +##pole +cardiac +1780 +okinawa +proto +commando +compelled +oddly +electrons +##base +replica +thanksgiving +##rist +sheila +deliberate +stafford +tidal +representations +hercules +ou +##path +##iated +kidnapping +lenses +##tling +deficit +samoa +mouths +consuming +computational +maze +granting +smirk +razor +fixture +ideals +inviting +aiden +nominal +##vs +issuing +julio +pitt +ramsey +docks +##oss +exhaust +##owed +bavarian +draped +anterior +mating +ethiopian +explores +noticing +##nton +discarded +convenience +hoffman +endowment +beasts +cartridge +mormon +paternal +probe +sleeves +interfere +lump +deadline +##rail +jenks +bulldogs +scrap +alternating +justified +reproductive +nam +seize +descending +secretariat +kirby +coupe +grouped +smash +panther +sedan +tapping +##18 +lola +cheer +germanic +unfortunate +##eter +unrelated +##fan +subordinate +##sdale +suzanne +advertisement +##ility +horsepower +##lda +cautiously +discourse +luigi +##mans +##fields +noun +prevalent +mao +schneider +everett +surround +governorate +kira +##avia +westward +##take +misty +rails +sustainability +134 +unused +##rating +packs +toast +unwilling +regulate +thy +suffrage +nile +awe +assam +definitions +travelers +affordable +##rb +conferred +sells +undefeated +beneficial +torso +basal +repeating +remixes +##pass +bahrain +cables +fang +##itated +excavated +numbering +statutory +##rey +deluxe +##lian +forested +ramirez +derbyshire +zeus +slamming +transfers +astronomer +banana +lottery +berg +histories +bamboo +##uchi +resurrection +posterior +bowls +vaguely +##thi +thou +preserving +tensed +offence +##inas +meyrick +callum +ridden +watt +langdon +tying +lowland +snorted +daring +truman +##hale +##girl +aura +overly +filing +weighing +goa +infections +philanthropist +saunders +eponymous +##owski +latitude +perspectives +reviewing +mets +commandant +radial +##kha +flashlight +reliability +koch +vowels +amazed +ada +elaine +supper +##rth +##encies +predator +debated +soviets +cola +##boards +##nah +compartment +crooked +arbitrary +fourteenth +##ctive +havana +majors +steelers +clips +profitable +ambush +exited +packers +##tile +nude +cracks +fungi +##е +limb +trousers +josie +shelby +tens +frederic +##ος +definite +smoothly +constellation +insult +baton +discs +lingering +##nco +conclusions +lent +staging +becker +grandpa +shaky +##tron +einstein +obstacles +sk +adverse +elle +economically +##moto +mccartney +thor +dismissal +motions +readings +nostrils +treatise +##pace +squeezing +evidently +prolonged +1783 +venezuelan +je +marguerite +beirut +takeover +shareholders +##vent +denise +digit +airplay +norse +##bbling +imaginary +pills +hubert +blaze +vacated +eliminating +##ello +vine +mansfield +##tty +retrospective +barrow +borne +clutch +bail +forensic +weaving +##nett +##witz +desktop +citadel +promotions +worrying +dorset +ieee +subdivided +##iating +manned +expeditionary +pickup +synod +chuckle +185 +barney +##rz +##ffin +functionality +karachi +litigation +meanings +uc +lick +turbo +anders +##ffed +execute +curl +oppose +ankles +typhoon +##د +##ache +##asia +linguistics +compassion +pressures +grazing +perfection +##iting +immunity +monopoly +muddy +backgrounds +136 +namibia +francesca +monitors +attracting +stunt +tuition +##ии +vegetable +##mates +##quent +mgm +jen +complexes +forts +##ond +cellar +bites +seventeenth +royals +flemish +failures +mast +charities +##cular +peruvian +capitals +macmillan +ipswich +outward +frigate +postgraduate +folds +employing +##ouse +concurrently +fiery +##tai +contingent +nightmares +monumental +nicaragua +##kowski +lizard +mal +fielding +gig +reject +##pad +harding +##ipe +coastline +##cin +##nos +beethoven +humphrey +innovations +##tam +##nge +norris +doris +solicitor +huang +obey +141 +##lc +niagara +##tton +shelves +aug +bourbon +curry +nightclub +specifications +hilton +##ndo +centennial +dispersed +worm +neglected +briggs +sm +font +kuala +uneasy +plc +##nstein +##bound +##aking +##burgh +awaiting +pronunciation +##bbed +##quest +eh +optimal +zhu +raped +greens +presided +brenda +worries +##life +venetian +marxist +turnout +##lius +refined +braced +sins +grasped +sunderland +nickel +speculated +lowell +cyrillic +communism +fundraising +resembling +colonists +mutant +freddie +usc +##mos +gratitude +##run +mural +##lous +chemist +wi +reminds +28th +steals +tess +pietro +##ingen +promoter +ri +microphone +honoured +rai +sant +##qui +feather +##nson +burlington +kurdish +terrorists +deborah +sickness +##wed +##eet +hazard +irritated +desperation +veil +clarity +##rik +jewels +xv +##gged +##ows +##cup +berkshire +unfair +mysteries +orchid +winced +exhaustion +renovations +stranded +obe +infinity +##nies +adapt +redevelopment +thanked +registry +olga +domingo +noir +tudor +ole +##atus +commenting +behaviors +##ais +crisp +pauline +probable +stirling +wigan +##bian +paralympics +panting +surpassed +##rew +luca +barred +pony +famed +##sters +cassandra +waiter +carolyn +exported +##orted +andres +destructive +deeds +jonah +castles +vacancy +suv +##glass +1788 +orchard +yep +famine +belarusian +sprang +##forth +skinny +##mis +administrators +rotterdam +zambia +zhao +boiler +discoveries +##ride +##physics +lucius +disappointing +outreach +spoon +##frame +qualifications +unanimously +enjoys +regency +##iidae +stade +realism +veterinary +rodgers +dump +alain +chestnut +castile +censorship +rumble +gibbs +##itor +communion +reggae +inactivated +logs +loads +##houses +homosexual +##iano +ale +informs +##cas +phrases +plaster +linebacker +ambrose +kaiser +fascinated +850 +limerick +recruitment +forge +mastered +##nding +leinster +rooted +threaten +##strom +borneo +##hes +suggestions +scholarships +propeller +documentaries +patronage +coats +constructing +invest +neurons +comet +entirety +shouts +identities +annoying +unchanged +wary +##antly +##ogy +neat +oversight +##kos +phillies +replay +constance +##kka +incarnation +humble +skies +minus +##acy +smithsonian +##chel +guerrilla +jar +cadets +##plate +surplus +audit +##aru +cracking +joanna +louisa +pacing +##lights +intentionally +##iri +diner +nwa +imprint +australians +tong +unprecedented +bunker +naive +specialists +ark +nichols +railing +leaked +pedal +##uka +shrub +longing +roofs +v8 +captains +neural +tuned +##ntal +##jet +emission +medina +frantic +codex +definitive +sid +abolition +intensified +stocks +enrique +sustain +genoa +oxide +##written +clues +cha +##gers +tributaries +fragment +venom +##rity +##ente +##sca +muffled +vain +sire +laos +##ingly +##hana +hastily +snapping +surfaced +sentiment +motive +##oft +contests +approximate +mesa +luckily +dinosaur +exchanges +propelled +accord +bourne +relieve +tow +masks +offended +##ues +cynthia +##mmer +rains +bartender +zinc +reviewers +lois +##sai +legged +arrogant +rafe +rosie +comprise +handicap +blockade +inlet +lagoon +copied +drilling +shelley +petals +##inian +mandarin +obsolete +##inated +onward +arguably +productivity +cindy +praising +seldom +busch +discusses +raleigh +shortage +ranged +stanton +encouragement +firstly +conceded +overs +temporal +##uke +cbe +##bos +woo +certainty +pumps +##pton +stalked +##uli +lizzie +periodic +thieves +weaker +##night +gases +shoving +chooses +wc +##chemical +prompting +weights +##kill +robust +flanked +sticky +hu +tuberculosis +##eb +##eal +christchurch +resembled +wallet +reese +inappropriate +pictured +distract +fixing +fiddle +giggled +burger +heirs +hairy +mechanic +torque +apache +obsessed +chiefly +cheng +logging +##tag +extracted +meaningful +numb +##vsky +gloucestershire +reminding +##bay +unite +##lit +breeds +diminished +clown +glove +1860s +##ن +##ug +archibald +focal +freelance +sliced +depiction +##yk +organism +switches +sights +stray +crawling +##ril +lever +leningrad +interpretations +loops +anytime +reel +alicia +delighted +##ech +inhaled +xiv +suitcase +bernie +vega +licenses +northampton +exclusion +induction +monasteries +racecourse +homosexuality +##right +##sfield +##rky +dimitri +michele +alternatives +ions +commentators +genuinely +objected +pork +hospitality +fencing +stephan +warships +peripheral +wit +drunken +wrinkled +quentin +spends +departing +chung +numerical +spokesperson +##zone +johannesburg +caliber +killers +##udge +assumes +neatly +demographic +abigail +bloc +##vel +mounting +##lain +bentley +slightest +xu +recipients +##jk +merlin +##writer +seniors +prisons +blinking +hindwings +flickered +kappa +##hel +80s +strengthening +appealing +brewing +gypsy +mali +lashes +hulk +unpleasant +harassment +bio +treaties +predict +instrumentation +pulp +troupe +boiling +mantle +##ffe +ins +##vn +dividing +handles +verbs +##onal +coconut +senegal +340 +thorough +gum +momentarily +##sto +cocaine +panicked +destined +##turing +teatro +denying +weary +captained +mans +##hawks +##code +wakefield +bollywood +thankfully +##16 +cyril +##wu +amendments +##bahn +consultation +stud +reflections +kindness +1787 +internally +##ovo +tex +mosaic +distribute +paddy +seeming +143 +##hic +piers +##15 +##mura +##verse +popularly +winger +kang +sentinel +mccoy +##anza +covenant +##bag +verge +fireworks +suppress +thrilled +dominate +##jar +swansea +##60 +142 +reconciliation +##ndi +stiffened +cue +dorian +##uf +damascus +amor +ida +foremost +##aga +porsche +unseen +dir +##had +##azi +stony +lexi +melodies +##nko +angular +integer +podcast +ants +inherent +jaws +justify +persona +##olved +josephine +##nr +##ressed +customary +flashes +gala +cyrus +glaring +backyard +ariel +physiology +greenland +html +stir +avon +atletico +finch +methodology +ked +##lent +mas +catholicism +townsend +branding +quincy +fits +containers +1777 +ashore +aragon +##19 +forearm +poisoning +##sd +adopting +conquer +grinding +amnesty +keller +finances +evaluate +forged +lankan +instincts +##uto +guam +bosnian +photographed +workplace +desirable +protector +##dog +allocation +intently +encourages +willy +##sten +bodyguard +electro +brighter +##ν +bihar +##chev +lasts +opener +amphibious +sal +verde +arte +##cope +captivity +vocabulary +yields +##tted +agreeing +desmond +pioneered +##chus +strap +campaigned +railroads +##ович +emblem +##dre +stormed +501 +##ulous +marijuana +northumberland +##gn +##nath +bowen +landmarks +beaumont +##qua +danube +##bler +attorneys +th +ge +flyers +critique +villains +cass +mutation +acc +##0s +colombo +mckay +motif +sampling +concluding +syndicate +##rell +neon +stables +ds +warnings +clint +mourning +wilkinson +##tated +merrill +leopard +evenings +exhaled +emil +sonia +ezra +discrete +stove +farrell +fifteenth +prescribed +superhero +##rier +worms +helm +wren +##duction +##hc +expo +##rator +hq +unfamiliar +antony +prevents +acceleration +fiercely +mari +painfully +calculations +cheaper +ign +clifton +irvine +davenport +mozambique +##np +pierced +##evich +wonders +##wig +##cate +##iling +crusade +ware +##uel +enzymes +reasonably +mls +##coe +mater +ambition +bunny +eliot +kernel +##fin +asphalt +headmaster +torah +aden +lush +pins +waived +##care +##yas +joao +substrate +enforce +##grad +##ules +alvarez +selections +epidemic +tempted +##bit +bremen +translates +ensured +waterfront +29th +forrest +manny +malone +kramer +reigning +cookies +simpler +absorption +205 +engraved +##ffy +evaluated +1778 +haze +146 +comforting +crossover +##abe +thorn +##rift +##imo +##pop +suppression +fatigue +cutter +##tr +201 +wurttemberg +##orf +enforced +hovering +proprietary +gb +samurai +syllable +ascent +lacey +tick +lars +tractor +merchandise +rep +bouncing +defendants +##yre +huntington +##ground +##oko +standardized +##hor +##hima +assassinated +nu +predecessors +rainy +liar +assurance +lyrical +##uga +secondly +flattened +ios +parameter +undercover +##mity +bordeaux +punish +ridges +markers +exodus +inactive +hesitate +debbie +nyc +pledge +savoy +nagar +offset +organist +##tium +hesse +marin +converting +##iver +diagram +propulsion +pu +validity +reverted +supportive +##dc +ministries +clans +responds +proclamation +##inae +##ø +##rea +ein +pleading +patriot +sf +birch +islanders +strauss +hates +##dh +brandenburg +concession +rd +##ob +1900s +killings +textbook +antiquity +cinematography +wharf +embarrassing +setup +creed +farmland +inequality +centred +signatures +fallon +370 +##ingham +##uts +ceylon +gazing +directive +laurie +##tern +globally +##uated +##dent +allah +excavation +threads +##cross +148 +frantically +icc +utilize +determines +respiratory +thoughtful +receptions +##dicate +merging +chandra +seine +147 +builders +builds +diagnostic +dev +visibility +goddamn +analyses +dhaka +cho +proves +chancel +concurrent +curiously +canadians +pumped +restoring +1850s +turtles +jaguar +sinister +spinal +traction +declan +vows +1784 +glowed +capitalism +swirling +install +universidad +##lder +##oat +soloist +##genic +##oor +coincidence +beginnings +nissan +dip +resorts +caucasus +combustion +infectious +##eno +pigeon +serpent +##itating +conclude +masked +salad +jew +##gr +surreal +toni +##wc +harmonica +151 +##gins +##etic +##coat +fishermen +intending +bravery +##wave +klaus +titan +wembley +taiwanese +ransom +40th +incorrect +hussein +eyelids +jp +cooke +dramas +utilities +##etta +##print +eisenhower +principally +granada +lana +##rak +openings +concord +##bl +bethany +connie +morality +sega +##mons +##nard +earnings +##kara +##cine +wii +communes +##rel +coma +composing +softened +severed +grapes +##17 +nguyen +analyzed +warlord +hubbard +heavenly +behave +slovenian +##hit +##ony +hailed +filmmakers +trance +caldwell +skye +unrest +coward +likelihood +##aging +bern +sci +taliban +honolulu +propose +##wang +1700 +browser +imagining +cobra +contributes +dukes +instinctively +conan +violinist +##ores +accessories +gradual +##amp +quotes +sioux +##dating +undertake +intercepted +sparkling +compressed +139 +fungus +tombs +haley +imposing +rests +degradation +lincolnshire +retailers +wetlands +tulsa +distributor +dungeon +nun +greenhouse +convey +atlantis +aft +exits +oman +dresser +lyons +##sti +joking +eddy +judgement +omitted +digits +##cts +##game +juniors +##rae +cents +stricken +une +##ngo +wizards +weir +breton +nan +technician +fibers +liking +royalty +##cca +154 +persia +terribly +magician +##rable +##unt +vance +cafeteria +booker +camille +warmer +##static +consume +cavern +gaps +compass +contemporaries +foyer +soothing +graveyard +maj +plunged +blush +##wear +cascade +demonstrates +ordinance +##nov +boyle +##lana +rockefeller +shaken +banjo +izzy +##ense +breathless +vines +##32 +##eman +alterations +chromosome +dwellings +feudal +mole +153 +catalonia +relics +tenant +mandated +##fm +fridge +hats +honesty +patented +raul +heap +cruisers +accusing +enlightenment +infants +wherein +chatham +contractors +zen +affinity +hc +osborne +piston +156 +traps +maturity +##rana +lagos +##zal +peering +##nay +attendant +dealers +protocols +subset +prospects +biographical +##cre +artery +##zers +insignia +nuns +endured +##eration +recommend +schwartz +serbs +berger +cromwell +crossroads +##ctor +enduring +clasped +grounded +##bine +marseille +twitched +abel +choke +https +catalyst +moldova +italians +##tist +disastrous +wee +##oured +##nti +wwf +nope +##piration +##asa +expresses +thumbs +167 +##nza +coca +1781 +cheating +##ption +skipped +sensory +heidelberg +spies +satan +dangers +semifinal +202 +bohemia +whitish +confusing +shipbuilding +relies +surgeons +landings +ravi +baku +moor +suffix +alejandro +##yana +litre +upheld +##unk +rajasthan +##rek +coaster +insists +posture +scenarios +etienne +favoured +appoint +transgender +elephants +poked +greenwood +defences +fulfilled +militant +somali +1758 +chalk +potent +##ucci +migrants +wink +assistants +nos +restriction +activism +niger +##ario +colon +shaun +##sat +daphne +##erated +swam +congregations +reprise +considerations +magnet +playable +xvi +##р +overthrow +tobias +knob +chavez +coding +##mers +propped +katrina +orient +newcomer +##suke +temperate +##pool +farmhouse +interrogation +##vd +committing +##vert +forthcoming +strawberry +joaquin +macau +ponds +shocking +siberia +##cellular +chant +contributors +##nant +##ologists +sped +absorb +hail +1782 +spared +##hore +barbados +karate +opus +originates +saul +##xie +evergreen +leaped +##rock +correlation +exaggerated +weekday +unification +bump +tracing +brig +afb +pathways +utilizing +##ners +mod +mb +disturbance +kneeling +##stad +##guchi +100th +pune +##thy +decreasing +168 +manipulation +miriam +academia +ecosystem +occupational +rbi +##lem +rift +##14 +rotary +stacked +incorporation +awakening +generators +guerrero +racist +##omy +cyber +derivatives +culminated +allie +annals +panzer +sainte +wikipedia +pops +zu +austro +##vate +algerian +politely +nicholson +mornings +educate +tastes +thrill +dartmouth +##gating +db +##jee +regan +differing +concentrating +choreography +divinity +##media +pledged +alexandre +routing +gregor +madeline +##idal +apocalypse +##hora +gunfire +culminating +elves +fined +liang +lam +programmed +tar +guessing +transparency +gabrielle +##gna +cancellation +flexibility +##lining +accession +shea +stronghold +nets +specializes +##rgan +abused +hasan +sgt +ling +exceeding +##₄ +admiration +supermarket +##ark +photographers +specialised +tilt +resonance +hmm +perfume +380 +sami +threatens +garland +botany +guarding +boiled +greet +puppy +russo +supplier +wilmington +vibrant +vijay +##bius +paralympic +grumbled +paige +faa +licking +margins +hurricanes +##gong +fest +grenade +ripping +##uz +counseling +weigh +##sian +needles +wiltshire +edison +costly +##not +fulton +tramway +redesigned +staffordshire +cache +gasping +watkins +sleepy +candidacy +##group +monkeys +timeline +throbbing +##bid +##sos +berth +uzbekistan +vanderbilt +bothering +overturned +ballots +gem +##iger +sunglasses +subscribers +hooker +compelling +ang +exceptionally +saloon +stab +##rdi +carla +terrifying +rom +##vision +coil +##oids +satisfying +vendors +31st +mackay +deities +overlooked +ambient +bahamas +felipe +olympia +whirled +botanist +advertised +tugging +##dden +disciples +morales +unionist +rites +foley +morse +motives +creepy +##₀ +soo +##sz +bargain +highness +frightening +turnpike +tory +reorganization +##cer +depict +biographer +##walk +unopposed +manifesto +##gles +institut +emile +accidental +kapoor +##dam +kilkenny +cortex +lively +##13 +romanesque +jain +shan +cannons +##ood +##ske +petrol +echoing +amalgamated +disappears +cautious +proposes +sanctions +trenton +##ر +flotilla +aus +contempt +tor +canary +cote +theirs +##hun +conceptual +deleted +fascinating +paso +blazing +elf +honourable +hutchinson +##eiro +##outh +##zin +surveyor +tee +amidst +wooded +reissue +intro +##ono +cobb +shelters +newsletter +hanson +brace +encoding +confiscated +dem +caravan +marino +scroll +melodic +cows +imam +##adi +##aneous +northward +searches +biodiversity +cora +310 +roaring +##bers +connell +theologian +halo +compose +pathetic +unmarried +dynamo +##oot +az +calculation +toulouse +deserves +humour +nr +forgiveness +tam +undergone +martyr +pamela +myths +whore +counselor +hicks +290 +heavens +battleship +electromagnetic +##bbs +stellar +establishments +presley +hopped +##chin +temptation +90s +wills +nas +##yuan +nhs +##nya +seminars +##yev +adaptations +gong +asher +lex +indicator +sikh +tobago +cites +goin +##yte +satirical +##gies +characterised +correspond +bubbles +lure +participates +##vid +eruption +skate +therapeutic +1785 +canals +wholesale +defaulted +sac +460 +petit +##zzled +virgil +leak +ravens +256 +portraying +##yx +ghetto +creators +dams +portray +vicente +##rington +fae +namesake +bounty +##arium +joachim +##ota +##iser +aforementioned +axle +snout +depended +dismantled +reuben +480 +##ibly +gallagher +##lau +##pd +earnest +##ieu +##iary +inflicted +objections +##llar +asa +gritted +##athy +jericho +##sea +##was +flick +underside +ceramics +undead +substituted +195 +eastward +undoubtedly +wheeled +chimney +##iche +guinness +cb +##ager +siding +##bell +traitor +baptiste +disguised +inauguration +149 +tipperary +choreographer +perched +warmed +stationary +eco +##ike +##ntes +bacterial +##aurus +flores +phosphate +##core +attacker +invaders +alvin +intersects +a1 +indirectly +immigrated +businessmen +cornelius +valves +narrated +pill +sober +ul +nationale +monastic +applicants +scenery +##jack +161 +motifs +constitutes +cpu +##osh +jurisdictions +sd +tuning +irritation +woven +##uddin +fertility +gao +##erie +antagonist +impatient +glacial +hides +boarded +denominations +interception +##jas +cookie +nicola +##tee +algebraic +marquess +bahn +parole +buyers +bait +turbines +paperwork +bestowed +natasha +renee +oceans +purchases +157 +vaccine +215 +##tock +fixtures +playhouse +integrate +jai +oswald +intellectuals +##cky +booked +nests +mortimer +##isi +obsession +sept +##gler +##sum +440 +scrutiny +simultaneous +squinted +##shin +collects +oven +shankar +penned +remarkably +##я +slips +luggage +spectral +1786 +collaborations +louie +consolidation +##ailed +##ivating +420 +hoover +blackpool +harness +ignition +vest +tails +belmont +mongol +skinner +##nae +visually +mage +derry +##tism +##unce +stevie +transitional +##rdy +redskins +drying +prep +prospective +##21 +annoyance +oversee +##loaded +fills +##books +##iki +announces +fda +scowled +respects +prasad +mystic +tucson +##vale +revue +springer +bankrupt +1772 +aristotle +salvatore +habsburg +##geny +dal +natal +nut +pod +chewing +darts +moroccan +walkover +rosario +lenin +punjabi +##ße +grossed +scattering +wired +invasive +hui +polynomial +corridors +wakes +gina +portrays +##cratic +arid +retreating +erich +irwin +sniper +##dha +linen +lindsey +maneuver +butch +shutting +socio +bounce +commemorative +postseason +jeremiah +pines +275 +mystical +beads +bp +abbas +furnace +bidding +consulted +assaulted +empirical +rubble +enclosure +sob +weakly +cancel +polly +yielded +##emann +curly +prediction +battered +70s +vhs +jacqueline +render +sails +barked +detailing +grayson +riga +sloane +raging +##yah +herbs +bravo +##athlon +alloy +giggle +imminent +suffers +assumptions +waltz +##itate +accomplishments +##ited +bathing +remixed +deception +prefix +##emia +deepest +##tier +##eis +balkan +frogs +##rong +slab +##pate +philosophers +peterborough +grains +imports +dickinson +rwanda +##atics +1774 +dirk +lan +tablets +##rove +clone +##rice +caretaker +hostilities +mclean +##gre +regimental +treasures +norms +impose +tsar +tango +diplomacy +variously +complain +192 +recognise +arrests +1779 +celestial +pulitzer +##dus +bing +libretto +##moor +adele +splash +##rite +expectation +lds +confronts +##izer +spontaneous +harmful +wedge +entrepreneurs +buyer +##ope +bilingual +translate +rugged +conner +circulated +uae +eaton +##gra +##zzle +lingered +lockheed +vishnu +reelection +alonso +##oom +joints +yankee +headline +cooperate +heinz +laureate +invading +##sford +echoes +scandinavian +##dham +hugging +vitamin +salute +micah +hind +trader +##sper +radioactive +##ndra +militants +poisoned +ratified +remark +campeonato +deprived +wander +prop +##dong +outlook +##tani +##rix +##eye +chiang +darcy +##oping +mandolin +spice +statesman +babylon +182 +walled +forgetting +afro +##cap +158 +giorgio +buffer +##polis +planetary +##gis +overlap +terminals +kinda +centenary +##bir +arising +manipulate +elm +ke +1770 +ak +##tad +chrysler +mapped +moose +pomeranian +quad +macarthur +assemblies +shoreline +recalls +stratford +##rted +noticeable +##evic +imp +##rita +##sque +accustomed +supplying +tents +disgusted +vogue +sipped +filters +khz +reno +selecting +luftwaffe +mcmahon +tyne +masterpiece +carriages +collided +dunes +exercised +flare +remembers +muzzle +##mobile +heck +##rson +burgess +lunged +middleton +boycott +bilateral +##sity +hazardous +lumpur +multiplayer +spotlight +jackets +goldman +liege +porcelain +rag +waterford +benz +attracts +hopeful +battling +ottomans +kensington +baked +hymns +cheyenne +lattice +levine +borrow +polymer +clashes +michaels +monitored +commitments +denounced +##25 +##von +cavity +##oney +hobby +akin +##holders +futures +intricate +cornish +patty +##oned +illegally +dolphin +##lag +barlow +yellowish +maddie +apologized +luton +plagued +##puram +nana +##rds +sway +fanny +łodz +##rino +psi +suspicions +hanged +##eding +initiate +charlton +##por +nak +competent +235 +analytical +annex +wardrobe +reservations +##rma +sect +162 +fairfax +hedge +piled +buckingham +uneven +bauer +simplicity +snyder +interpret +accountability +donors +moderately +byrd +continents +##cite +##max +disciple +hr +jamaican +ping +nominees +##uss +mongolian +diver +attackers +eagerly +ideological +pillows +miracles +apartheid +revolver +sulfur +clinics +moran +163 +##enko +ile +katy +rhetoric +##icated +chronology +recycling +##hrer +elongated +mughal +pascal +profiles +vibration +databases +domination +##fare +##rant +matthias +digest +rehearsal +polling +weiss +initiation +reeves +clinging +flourished +impress +ngo +##hoff +##ume +buckley +symposium +rhythms +weed +emphasize +transforming +##taking +##gence +##yman +accountant +analyze +flicker +foil +priesthood +voluntarily +decreases +##80 +##hya +slater +sv +charting +mcgill +##lde +moreno +##iu +besieged +zur +robes +##phic +admitting +api +deported +turmoil +peyton +earthquakes +##ares +nationalists +beau +clair +brethren +interrupt +welch +curated +galerie +requesting +164 +##ested +impending +steward +viper +##vina +complaining +beautifully +brandy +foam +nl +1660 +##cake +alessandro +punches +laced +explanations +##lim +attribute +clit +reggie +discomfort +##cards +smoothed +whales +##cene +adler +countered +duffy +disciplinary +widening +recipe +reliance +conducts +goats +gradient +preaching +##shaw +matilda +quasi +striped +meridian +cannabis +cordoba +certificates +##agh +##tering +graffiti +hangs +pilgrims +repeats +##ych +revive +urine +etat +##hawk +fueled +belts +fuzzy +susceptible +##hang +mauritius +salle +sincere +beers +hooks +##cki +arbitration +entrusted +advise +sniffed +seminar +junk +donnell +processors +principality +strapped +celia +mendoza +everton +fortunes +prejudice +starving +reassigned +steamer +##lund +tuck +evenly +foreman +##ffen +dans +375 +envisioned +slit +##xy +baseman +liberia +rosemary +##weed +electrified +periodically +potassium +stride +contexts +sperm +slade +mariners +influx +bianca +subcommittee +##rane +spilling +icao +estuary +##nock +delivers +iphone +##ulata +isa +mira +bohemian +dessert +##sbury +welcoming +proudly +slowing +##chs +musee +ascension +russ +##vian +waits +##psy +africans +exploit +##morphic +gov +eccentric +crab +peck +##ull +entrances +formidable +marketplace +groom +bolted +metabolism +patton +robbins +courier +payload +endure +##ifier +andes +refrigerator +##pr +ornate +##uca +ruthless +illegitimate +masonry +strasbourg +bikes +adobe +##³ +apples +quintet +willingly +niche +bakery +corpses +energetic +##cliffe +##sser +##ards +177 +centimeters +centro +fuscous +cretaceous +rancho +##yde +andrei +telecom +tottenham +oasis +ordination +vulnerability +presiding +corey +cp +penguins +sims +##pis +malawi +piss +##48 +correction +##cked +##ffle +##ryn +countdown +detectives +psychiatrist +psychedelic +dinosaurs +blouse +##get +choi +vowed +##oz +randomly +##pol +49ers +scrub +blanche +bruins +dusseldorf +##using +unwanted +##ums +212 +dominique +elevations +headlights +om +laguna +##oga +1750 +famously +ignorance +shrewsbury +##aine +ajax +breuning +che +confederacy +greco +overhaul +##screen +paz +skirts +disagreement +cruelty +jagged +phoebe +shifter +hovered +viruses +##wes +mandy +##lined +##gc +landlord +squirrel +dashed +##ι +ornamental +gag +wally +grange +literal +spurs +undisclosed +proceeding +yin +##text +billie +orphan +spanned +humidity +indy +weighted +presentations +explosions +lucian +##tary +vaughn +hindus +##anga +##hell +psycho +171 +daytona +protects +efficiently +rematch +sly +tandem +##oya +rebranded +impaired +hee +metropolis +peach +godfrey +diaspora +ethnicity +prosperous +gleaming +dar +grossing +playback +##rden +stripe +pistols +##tain +births +labelled +##cating +172 +rudy +alba +##onne +aquarium +hostility +##gb +##tase +shudder +sumatra +hardest +lakers +consonant +creeping +demos +homicide +capsule +zeke +liberties +expulsion +pueblo +##comb +trait +transporting +##ddin +##neck +##yna +depart +gregg +mold +ledge +hangar +oldham +playboy +termination +analysts +gmbh +romero +##itic +insist +cradle +filthy +brightness +slash +shootout +deposed +bordering +##truct +isis +microwave +tumbled +sheltered +cathy +werewolves +messy +andersen +convex +clapped +clinched +satire +wasting +edo +vc +rufus +##jak +mont +##etti +poznan +##keeping +restructuring +transverse +##rland +azerbaijani +slovene +gestures +roommate +choking +shear +##quist +vanguard +oblivious +##hiro +disagreed +baptism +##lich +coliseum +##aceae +salvage +societe +cory +locke +relocation +relying +versailles +ahl +swelling +##elo +cheerful +##word +##edes +gin +sarajevo +obstacle +diverted +##nac +messed +thoroughbred +fluttered +utrecht +chewed +acquaintance +assassins +dispatch +mirza +##wart +nike +salzburg +swell +yen +##gee +idle +ligue +samson +##nds +##igh +playful +spawned +##cise +tease +##case +burgundy +##bot +stirring +skeptical +interceptions +marathi +##dies +bedrooms +aroused +pinch +##lik +preferences +tattoos +buster +digitally +projecting +rust +##ital +kitten +priorities +addison +pseudo +##guard +dusk +icons +sermon +##psis +##iba +bt +##lift +##xt +ju +truce +rink +##dah +##wy +defects +psychiatry +offences +calculate +glucose +##iful +##rized +##unda +francaise +##hari +richest +warwickshire +carly +1763 +purity +redemption +lending +##cious +muse +bruises +cerebral +aero +carving +##name +preface +terminology +invade +monty +##int +anarchist +blurred +##iled +rossi +treats +guts +shu +foothills +ballads +undertaking +premise +cecilia +affiliates +blasted +conditional +wilder +minors +drone +rudolph +buffy +swallowing +horton +attested +##hop +rutherford +howell +primetime +livery +penal +##bis +minimize +hydro +wrecked +wrought +palazzo +##gling +cans +vernacular +friedman +nobleman +shale +walnut +danielle +##ection +##tley +sears +##kumar +chords +lend +flipping +streamed +por +dracula +gallons +sacrifices +gamble +orphanage +##iman +mckenzie +##gible +boxers +daly +##balls +##ان +208 +##ific +##rative +##iq +exploited +slated +##uity +circling +hillary +pinched +goldberg +provost +campaigning +lim +piles +ironically +jong +mohan +successors +usaf +##tem +##ught +autobiographical +haute +preserves +##ending +acquitted +comparisons +203 +hydroelectric +gangs +cypriot +torpedoes +rushes +chrome +derive +bumps +instability +fiat +pets +##mbe +silas +dye +reckless +settler +##itation +info +heats +##writing +176 +canonical +maltese +fins +mushroom +stacy +aspen +avid +##kur +##loading +vickers +gaston +hillside +statutes +wilde +gail +kung +sabine +comfortably +motorcycles +##rgo +169 +pneumonia +fetch +##sonic +axel +faintly +parallels +##oop +mclaren +spouse +compton +interdisciplinary +miner +##eni +181 +clamped +##chal +##llah +separates +versa +##mler +scarborough +labrador +##lity +##osing +rutgers +hurdles +como +166 +burt +divers +##100 +wichita +cade +coincided +##erson +bruised +mla +##pper +vineyard +##ili +##brush +notch +mentioning +jase +hearted +kits +doe +##acle +pomerania +##ady +ronan +seizure +pavel +problematic +##zaki +domenico +##ulin +catering +penelope +dependence +parental +emilio +ministerial +atkinson +##bolic +clarkson +chargers +colby +grill +peeked +arises +summon +##aged +fools +##grapher +faculties +qaeda +##vial +garner +refurbished +##hwa +geelong +disasters +nudged +bs +shareholder +lori +algae +reinstated +rot +##ades +##nous +invites +stainless +183 +inclusive +##itude +diocesan +til +##icz +denomination +##xa +benton +floral +registers +##ider +##erman +##kell +absurd +brunei +guangzhou +hitter +retaliation +##uled +##eve +blanc +nh +consistency +contamination +##eres +##rner +dire +palermo +broadcasters +diaries +inspire +vols +brewer +tightening +ky +mixtape +hormone +##tok +stokes +##color +##dly +##ssi +pg +##ometer +##lington +sanitation +##tility +intercontinental +apps +##adt +¹⁄₂ +cylinders +economies +favourable +unison +croix +gertrude +odyssey +vanity +dangling +##logists +upgrades +dice +middleweight +practitioner +##ight +206 +henrik +parlor +orion +angered +lac +python +blurted +##rri +sensual +intends +swings +angled +##phs +husky +attain +peerage +precinct +textiles +cheltenham +shuffled +dai +confess +tasting +bhutan +##riation +tyrone +segregation +abrupt +ruiz +##rish +smirked +blackwell +confidential +browning +amounted +##put +vase +scarce +fabulous +raided +staple +guyana +unemployed +glider +shay +##tow +carmine +troll +intervene +squash +superstar +##uce +cylindrical +len +roadway +researched +handy +##rium +##jana +meta +lao +declares +##rring +##tadt +##elin +##kova +willem +shrubs +napoleonic +realms +skater +qi +volkswagen +##ł +tad +hara +archaeologist +awkwardly +eerie +##kind +wiley +##heimer +##24 +titus +organizers +cfl +crusaders +lama +usb +vent +enraged +thankful +occupants +maximilian +##gaard +possessing +textbooks +##oran +collaborator +quaker +##ulo +avalanche +mono +silky +straits +isaiah +mustang +surged +resolutions +potomac +descend +cl +kilograms +plato +strains +saturdays +##olin +bernstein +##ype +holstein +ponytail +##watch +belize +conversely +heroine +perpetual +##ylus +charcoal +piedmont +glee +negotiating +backdrop +prologue +##jah +##mmy +pasadena +climbs +ramos +sunni +##holm +##tner +##tri +anand +deficiency +hertfordshire +stout +##avi +aperture +orioles +##irs +doncaster +intrigued +bombed +coating +otis +##mat +cocktail +##jit +##eto +amir +arousal +sar +##proof +##act +##ories +dixie +pots +##bow +whereabouts +159 +##fted +drains +bullying +cottages +scripture +coherent +fore +poe +appetite +##uration +sampled +##ators +##dp +derrick +rotor +jays +peacock +installment +##rro +advisors +##coming +rodeo +scotch +##mot +##db +##fen +##vant +ensued +rodrigo +dictatorship +martyrs +twenties +##н +towed +incidence +marta +rainforest +sai +scaled +##cles +oceanic +qualifiers +symphonic +mcbride +dislike +generalized +aubrey +colonization +##iation +##lion +##ssing +disliked +lublin +salesman +##ulates +spherical +whatsoever +sweating +avalon +contention +punt +severity +alderman +atari +##dina +##grant +##rop +scarf +seville +vertices +annexation +fairfield +fascination +inspiring +launches +palatinate +regretted +##rca +feral +##iom +elk +nap +olsen +reddy +yong +##leader +##iae +garment +transports +feng +gracie +outrage +viceroy +insides +##esis +breakup +grady +organizer +softer +grimaced +222 +murals +galicia +arranging +vectors +##rsten +bas +##sb +##cens +sloan +##eka +bitten +ara +fender +nausea +bumped +kris +banquet +comrades +detector +persisted +##llan +adjustment +endowed +cinemas +##shot +sellers +##uman +peek +epa +kindly +neglect +simpsons +talon +mausoleum +runaway +hangul +lookout +##cic +rewards +coughed +acquainted +chloride +##ald +quicker +accordion +neolithic +##qa +artemis +coefficient +lenny +pandora +tx +##xed +ecstasy +litter +segunda +chairperson +gemma +hiss +rumor +vow +nasal +antioch +compensate +patiently +transformers +##eded +judo +morrow +penis +posthumous +philips +bandits +husbands +denote +flaming +##any +##phones +langley +yorker +1760 +walters +##uo +##kle +gubernatorial +fatty +samsung +leroy +outlaw +##nine +unpublished +poole +jakob +##ᵢ +##ₙ +crete +distorted +superiority +##dhi +intercept +crust +mig +claus +crashes +positioning +188 +stallion +301 +frontal +armistice +##estinal +elton +aj +encompassing +camel +commemorated +malaria +woodward +calf +cigar +penetrate +##oso +willard +##rno +##uche +illustrate +amusing +convergence +noteworthy +##lma +##rva +journeys +realise +manfred +##sable +410 +##vocation +hearings +fiance +##posed +educators +provoked +adjusting +##cturing +modular +stockton +paterson +vlad +rejects +electors +selena +maureen +##tres +uber +##rce +swirled +##num +proportions +nanny +pawn +naturalist +parma +apostles +awoke +ethel +wen +##bey +monsoon +overview +##inating +mccain +rendition +risky +adorned +##ih +equestrian +germain +nj +conspicuous +confirming +##yoshi +shivering +##imeter +milestone +rumours +flinched +bounds +smacked +token +##bei +lectured +automobiles +##shore +impacted +##iable +nouns +nero +##leaf +ismail +prostitute +trams +##lace +bridget +sud +stimulus +impressions +reins +revolves +##oud +##gned +giro +honeymoon +##swell +criterion +##sms +##uil +libyan +prefers +##osition +211 +preview +sucks +accusation +bursts +metaphor +diffusion +tolerate +faye +betting +cinematographer +liturgical +specials +bitterly +humboldt +##ckle +flux +rattled +##itzer +archaeologists +odor +authorised +marshes +discretion +##ов +alarmed +archaic +inverse +##leton +explorers +##pine +drummond +tsunami +woodlands +##minate +##tland +booklet +insanity +owning +insert +crafted +calculus +##tore +receivers +##bt +stung +##eca +##nched +prevailing +travellers +eyeing +lila +graphs +##borne +178 +julien +##won +morale +adaptive +therapist +erica +cw +libertarian +bowman +pitches +vita +##ional +crook +##ads +##entation +caledonia +mutiny +##sible +1840s +automation +##ß +flock +##pia +ironic +pathology +##imus +remarried +##22 +joker +withstand +energies +##att +shropshire +hostages +madeleine +tentatively +conflicting +mateo +recipes +euros +ol +mercenaries +nico +##ndon +albuquerque +augmented +mythical +bel +freud +##child +cough +##lica +365 +freddy +lillian +genetically +nuremberg +calder +209 +bonn +outdoors +paste +suns +urgency +vin +restraint +tyson +##cera +##selle +barrage +bethlehem +kahn +##par +mounts +nippon +barony +happier +ryu +makeshift +sheldon +blushed +castillo +barking +listener +taped +bethel +fluent +headlines +pornography +rum +disclosure +sighing +mace +doubling +gunther +manly +##plex +rt +interventions +physiological +forwards +emerges +##tooth +##gny +compliment +rib +recession +visibly +barge +faults +connector +exquisite +prefect +##rlin +patio +##cured +elevators +brandt +italics +pena +173 +wasp +satin +ea +botswana +graceful +respectable +##jima +##rter +##oic +franciscan +generates +##dl +alfredo +disgusting +##olate +##iously +sherwood +warns +cod +promo +cheryl +sino +##ة +##escu +twitch +##zhi +brownish +thom +ortiz +##dron +densely +##beat +carmel +reinforce +##bana +187 +anastasia +downhill +vertex +contaminated +remembrance +harmonic +homework +##sol +fiancee +gears +olds +angelica +loft +ramsay +quiz +colliery +sevens +##cape +autism +##hil +walkway +##boats +ruben +abnormal +ounce +khmer +##bbe +zachary +bedside +morphology +punching +##olar +sparrow +convinces +##35 +hewitt +queer +remastered +rods +mabel +solemn +notified +lyricist +symmetric +##xide +174 +encore +passports +wildcats +##uni +baja +##pac +mildly +##ease +bleed +commodity +mounds +glossy +orchestras +##omo +damian +prelude +ambitions +##vet +awhile +remotely +##aud +asserts +imply +##iques +distinctly +modelling +remedy +##dded +windshield +dani +xiao +##endra +audible +powerplant +1300 +invalid +elemental +acquisitions +##hala +immaculate +libby +plata +smuggling +ventilation +denoted +minh +##morphism +430 +differed +dion +kelley +lore +mocking +sabbath +spikes +hygiene +drown +runoff +stylized +tally +liberated +aux +interpreter +righteous +aba +siren +reaper +pearce +millie +##cier +##yra +gaius +##iso +captures +##ttering +dorm +claudio +##sic +benches +knighted +blackness +##ored +discount +fumble +oxidation +routed +##ς +novak +perpendicular +spoiled +fracture +splits +##urt +pads +topology +##cats +axes +fortunate +offenders +protestants +esteem +221 +broadband +convened +frankly +hound +prototypes +isil +facilitated +keel +##sher +sahara +awaited +bubba +orb +prosecutors +186 +hem +520 +##xing +relaxing +remnant +romney +sorted +slalom +stefano +ulrich +##active +exemption +folder +pauses +foliage +hitchcock +epithet +204 +criticisms +##aca +ballistic +brody +hinduism +chaotic +youths +equals +##pala +pts +thicker +analogous +capitalist +improvised +overseeing +sinatra +ascended +beverage +##tl +straightforward +##kon +curran +##west +bois +325 +induce +surveying +emperors +sax +unpopular +##kk +cartoonist +fused +##mble +unto +##yuki +localities +##cko +##ln +darlington +slain +academie +lobbying +sediment +puzzles +##grass +defiance +dickens +manifest +tongues +alumnus +arbor +coincide +184 +appalachian +mustafa +examiner +cabaret +traumatic +yves +bracelet +draining +heroin +magnum +baths +odessa +consonants +mitsubishi +##gua +kellan +vaudeville +##fr +joked +null +straps +probation +##ław +ceded +interfaces +##pas +##zawa +blinding +viet +224 +rothschild +museo +640 +huddersfield +##vr +tactic +##storm +brackets +dazed +incorrectly +##vu +reg +glazed +fearful +manifold +benefited +irony +##sun +stumbling +##rte +willingness +balkans +mei +wraps +##aba +injected +##lea +gu +syed +harmless +##hammer +bray +takeoff +poppy +timor +cardboard +astronaut +purdue +weeping +southbound +cursing +stalls +diagonal +##neer +lamar +bryce +comte +weekdays +harrington +##uba +negatively +##see +lays +grouping +##cken +##henko +affirmed +halle +modernist +##lai +hodges +smelling +aristocratic +baptized +dismiss +justification +oilers +##now +coupling +qin +snack +healer +##qing +gardener +layla +battled +formulated +stephenson +gravitational +##gill +##jun +1768 +granny +coordinating +suites +##cd +##ioned +monarchs +##cote +##hips +sep +blended +apr +barrister +deposition +fia +mina +policemen +paranoid +##pressed +churchyard +covert +crumpled +creep +abandoning +tr +transmit +conceal +barr +understands +readiness +spire +##cology +##enia +##erry +610 +startling +unlock +vida +bowled +slots +##nat +##islav +spaced +trusting +admire +rig +##ink +slack +##70 +mv +207 +casualty +##wei +classmates +##odes +##rar +##rked +amherst +furnished +evolve +foundry +menace +mead +##lein +flu +wesleyan +##kled +monterey +webber +##vos +wil +##mith +##на +bartholomew +justices +restrained +##cke +amenities +191 +mediated +sewage +trenches +ml +mainz +##thus +1800s +##cula +##inski +caine +bonding +213 +converts +spheres +superseded +marianne +crypt +sweaty +ensign +historia +##br +spruce +##post +##ask +forks +thoughtfully +yukon +pamphlet +ames +##uter +karma +##yya +bryn +negotiation +sighs +incapable +##mbre +##ntial +actresses +taft +##mill +luce +prevailed +##amine +1773 +motionless +envoy +testify +investing +sculpted +instructors +provence +kali +cullen +horseback +##while +goodwin +##jos +gaa +norte +##ldon +modify +wavelength +abd +214 +skinned +sprinter +forecast +scheduling +marries +squared +tentative +##chman +boer +##isch +bolts +swap +fisherman +assyrian +impatiently +guthrie +martins +murdoch +194 +tanya +nicely +dolly +lacy +med +##45 +syn +decks +fashionable +millionaire +##ust +surfing +##ml +##ision +heaved +tammy +consulate +attendees +routinely +197 +fuse +saxophonist +backseat +malaya +##lord +scowl +tau +##ishly +193 +sighted +steaming +##rks +303 +911 +##holes +##hong +ching +##wife +bless +conserved +jurassic +stacey +unix +zion +chunk +rigorous +blaine +198 +peabody +slayer +dismay +brewers +nz +##jer +det +##glia +glover +postwar +int +penetration +sylvester +imitation +vertically +airlift +heiress +knoxville +viva +##uin +390 +macon +##rim +##fighter +##gonal +janice +##orescence +##wari +marius +belongings +leicestershire +196 +blanco +inverted +preseason +sanity +sobbing +##due +##elt +##dled +collingwood +regeneration +flickering +shortest +##mount +##osi +feminism +##lat +sherlock +cabinets +fumbled +northbound +precedent +snaps +##mme +researching +##akes +guillaume +insights +manipulated +vapor +neighbour +sap +gangster +frey +f1 +stalking +scarcely +callie +barnett +tendencies +audi +doomed +assessing +slung +panchayat +ambiguous +bartlett +##etto +distributing +violating +wolverhampton +##hetic +swami +histoire +##urus +liable +pounder +groin +hussain +larsen +popping +surprises +##atter +vie +curt +##station +mute +relocate +musicals +authorization +richter +##sef +immortality +tna +bombings +##press +deteriorated +yiddish +##acious +robbed +colchester +cs +pmid +ao +verified +balancing +apostle +swayed +recognizable +oxfordshire +retention +nottinghamshire +contender +judd +invitational +shrimp +uhf +##icient +cleaner +longitudinal +tanker +##mur +acronym +broker +koppen +sundance +suppliers +##gil +4000 +clipped +fuels +petite +##anne +landslide +helene +diversion +populous +landowners +auspices +melville +quantitative +##xes +ferries +nicky +##llus +doo +haunting +roche +carver +downed +unavailable +##pathy +approximation +hiroshima +##hue +garfield +valle +comparatively +keyboardist +traveler +##eit +congestion +calculating +subsidiaries +##bate +serb +modernization +fairies +deepened +ville +averages +##lore +inflammatory +tonga +##itch +co₂ +squads +##hea +gigantic +serum +enjoyment +retailer +verona +35th +cis +##phobic +magna +technicians +##vati +arithmetic +##sport +levin +##dation +amtrak +chow +sienna +##eyer +backstage +entrepreneurship +##otic +learnt +tao +##udy +worcestershire +formulation +baggage +hesitant +bali +sabotage +##kari +barren +enhancing +murmur +pl +freshly +putnam +syntax +aces +medicines +resentment +bandwidth +##sier +grins +chili +guido +##sei +framing +implying +gareth +lissa +genevieve +pertaining +admissions +geo +thorpe +proliferation +sato +bela +analyzing +parting +##gor +awakened +##isman +huddled +secrecy +##kling +hush +gentry +540 +dungeons +##ego +coasts +##utz +sacrificed +##chule +landowner +mutually +prevalence +programmer +adolescent +disrupted +seaside +gee +trusts +vamp +georgie +##nesian +##iol +schedules +sindh +##market +etched +hm +sparse +bey +beaux +scratching +gliding +unidentified +216 +collaborating +gems +jesuits +oro +accumulation +shaping +mbe +anal +##xin +231 +enthusiasts +newscast +##egan +janata +dewey +parkinson +179 +ankara +biennial +towering +dd +inconsistent +950 +##chet +thriving +terminate +cabins +furiously +eats +advocating +donkey +marley +muster +phyllis +leiden +##user +grassland +glittering +iucn +loneliness +217 +memorandum +armenians +##ddle +popularized +rhodesia +60s +lame +##illon +sans +bikini +header +orbits +##xx +##finger +##ulator +sharif +spines +biotechnology +strolled +naughty +yates +##wire +fremantle +milo +##mour +abducted +removes +##atin +humming +wonderland +##chrome +##ester +hume +pivotal +##rates +armand +grams +believers +elector +rte +apron +bis +scraped +##yria +endorsement +initials +##llation +eps +dotted +hints +buzzing +emigration +nearer +##tom +indicators +##ulu +coarse +neutron +protectorate +##uze +directional +exploits +pains +loire +1830s +proponents +guggenheim +rabbits +ritchie +305 +hectare +inputs +hutton +##raz +verify +##ako +boilers +longitude +##lev +skeletal +yer +emilia +citrus +compromised +##gau +pokemon +prescription +paragraph +eduard +cadillac +attire +categorized +kenyan +weddings +charley +##bourg +entertain +monmouth +##lles +nutrients +davey +mesh +incentive +practised +ecosystems +kemp +subdued +overheard +##rya +bodily +maxim +##nius +apprenticeship +ursula +##fight +lodged +rug +silesian +unconstitutional +patel +inspected +coyote +unbeaten +##hak +34th +disruption +convict +parcel +##cl +##nham +collier +implicated +mallory +##iac +##lab +susannah +winkler +##rber +shia +phelps +sediments +graphical +robotic +##sner +adulthood +mart +smoked +##isto +kathryn +clarified +##aran +divides +convictions +oppression +pausing +burying +##mt +federico +mathias +eileen +##tana +kite +hunched +##acies +189 +##atz +disadvantage +liza +kinetic +greedy +paradox +yokohama +dowager +trunks +ventured +##gement +gupta +vilnius +olaf +##thest +crimean +hopper +##ej +progressively +arturo +mouthed +arrondissement +##fusion +rubin +simulcast +oceania +##orum +##stra +##rred +busiest +intensely +navigator +cary +##vine +##hini +##bies +fife +rowe +rowland +posing +insurgents +shafts +lawsuits +activate +conor +inward +culturally +garlic +265 +##eering +eclectic +##hui +##kee +##nl +furrowed +vargas +meteorological +rendezvous +##aus +culinary +commencement +##dition +quota +##notes +mommy +salaries +overlapping +mule +##iology +##mology +sums +wentworth +##isk +##zione +mainline +subgroup +##illy +hack +plaintiff +verdi +bulb +differentiation +engagements +multinational +supplemented +bertrand +caller +regis +##naire +##sler +##arts +##imated +blossom +propagation +kilometer +viaduct +vineyards +##uate +beckett +optimization +golfer +songwriters +seminal +semitic +thud +volatile +evolving +ridley +##wley +trivial +distributions +scandinavia +jiang +##ject +wrestled +insistence +##dio +emphasizes +napkin +##ods +adjunct +rhyme +##ricted +##eti +hopeless +surrounds +tremble +32nd +smoky +##ntly +oils +medicinal +padded +steer +wilkes +219 +255 +concessions +hue +uniquely +blinded +landon +yahoo +##lane +hendrix +commemorating +dex +specify +chicks +##ggio +intercity +1400 +morley +##torm +highlighting +##oting +pang +oblique +stalled +##liner +flirting +newborn +1769 +bishopric +shaved +232 +currie +##ush +dharma +spartan +##ooped +favorites +smug +novella +sirens +abusive +creations +espana +##lage +paradigm +semiconductor +sheen +##rdo +##yen +##zak +nrl +renew +##pose +##tur +adjutant +marches +norma +##enity +ineffective +weimar +grunt +##gat +lordship +plotting +expenditure +infringement +lbs +refrain +av +mimi +mistakenly +postmaster +1771 +##bara +ras +motorsports +tito +199 +subjective +##zza +bully +stew +##kaya +prescott +1a +##raphic +##zam +bids +styling +paranormal +reeve +sneaking +exploding +katz +akbar +migrant +syllables +indefinitely +##ogical +destroys +replaces +applause +##phine +pest +##fide +218 +articulated +bertie +##thing +##cars +##ptic +courtroom +crowley +aesthetics +cummings +tehsil +hormones +titanic +dangerously +##ibe +stadion +jaenelle +auguste +ciudad +##chu +mysore +partisans +##sio +lucan +philipp +##aly +debating +henley +interiors +##rano +##tious +homecoming +beyonce +usher +henrietta +prepares +weeds +##oman +ely +plucked +##pire +##dable +luxurious +##aq +artifact +password +pasture +juno +maddy +minsk +##dder +##ologies +##rone +assessments +martian +royalist +1765 +examines +##mani +##rge +nino +223 +parry +scooped +relativity +##eli +##uting +##cao +congregational +noisy +traverse +##agawa +strikeouts +nickelodeon +obituary +transylvania +binds +depictions +polk +trolley +##yed +##lard +breeders +##under +dryly +hokkaido +1762 +strengths +stacks +bonaparte +connectivity +neared +prostitutes +stamped +anaheim +gutierrez +sinai +##zzling +bram +fresno +madhya +##86 +proton +##lena +##llum +##phon +reelected +wanda +##anus +##lb +ample +distinguishing +##yler +grasping +sermons +tomato +bland +stimulation +avenues +##eux +spreads +scarlett +fern +pentagon +assert +baird +chesapeake +ir +calmed +distortion +fatalities +##olis +correctional +pricing +##astic +##gina +prom +dammit +ying +collaborate +##chia +welterweight +33rd +pointer +substitution +bonded +umpire +communicating +multitude +paddle +##obe +federally +intimacy +##insky +betray +ssr +##lett +##lean +##lves +##therapy +airbus +##tery +functioned +ud +bearer +biomedical +netflix +##hire +##nca +condom +brink +ik +##nical +macy +##bet +flap +gma +experimented +jelly +lavender +##icles +##ulia +munro +##mian +##tial +rye +##rle +60th +gigs +hottest +rotated +predictions +fuji +bu +##erence +##omi +barangay +##fulness +##sas +clocks +##rwood +##liness +cereal +roe +wight +decker +uttered +babu +onion +xml +forcibly +##df +petra +sarcasm +hartley +peeled +storytelling +##42 +##xley +##ysis +##ffa +fibre +kiel +auditor +fig +harald +greenville +##berries +geographically +nell +quartz +##athic +cemeteries +##lr +crossings +nah +holloway +reptiles +chun +sichuan +snowy +660 +corrections +##ivo +zheng +ambassadors +blacksmith +fielded +fluids +hardcover +turnover +medications +melvin +academies +##erton +ro +roach +absorbing +spaniards +colton +##founded +outsider +espionage +kelsey +245 +edible +##ulf +dora +establishes +##sham +##tries +contracting +##tania +cinematic +costello +nesting +##uron +connolly +duff +##nology +mma +##mata +fergus +sexes +gi +optics +spectator +woodstock +banning +##hee +##fle +differentiate +outfielder +refinery +226 +312 +gerhard +horde +lair +drastically +##udi +landfall +##cheng +motorsport +odi +##achi +predominant +quay +skins +##ental +edna +harshly +complementary +murdering +##aves +wreckage +##90 +ono +outstretched +lennox +munitions +galen +reconcile +470 +scalp +bicycles +gillespie +questionable +rosenberg +guillermo +hostel +jarvis +kabul +volvo +opium +yd +##twined +abuses +decca +outpost +##cino +sensible +neutrality +##64 +ponce +anchorage +atkins +turrets +inadvertently +disagree +libre +vodka +reassuring +weighs +##yal +glide +jumper +ceilings +repertory +outs +stain +##bial +envy +##ucible +smashing +heightened +policing +hyun +mixes +lai +prima +##ples +celeste +##bina +lucrative +intervened +kc +manually +##rned +stature +staffed +bun +bastards +nairobi +priced +##auer +thatcher +##kia +tripped +comune +##ogan +##pled +brasil +incentives +emanuel +hereford +musica +##kim +benedictine +biennale +##lani +eureka +gardiner +rb +knocks +sha +##ael +##elled +##onate +efficacy +ventura +masonic +sanford +maize +leverage +##feit +capacities +santana +##aur +novelty +vanilla +##cter +##tour +benin +##oir +##rain +neptune +drafting +tallinn +##cable +humiliation +##boarding +schleswig +fabian +bernardo +liturgy +spectacle +sweeney +pont +routledge +##tment +cosmos +ut +hilt +sleek +universally +##eville +##gawa +typed +##dry +favors +allegheny +glaciers +##rly +recalling +aziz +##log +parasite +requiem +auf +##berto +##llin +illumination +##breaker +##issa +festivities +bows +govern +vibe +vp +333 +sprawled +larson +pilgrim +bwf +leaping +##rts +##ssel +alexei +greyhound +hoarse +##dler +##oration +seneca +##cule +gaping +##ulously +##pura +cinnamon +##gens +##rricular +craven +fantasies +houghton +engined +reigned +dictator +supervising +##oris +bogota +commentaries +unnatural +fingernails +spirituality +tighten +##tm +canadiens +protesting +intentional +cheers +sparta +##ytic +##iere +##zine +widen +belgarath +controllers +dodd +iaaf +navarre +##ication +defect +squire +steiner +whisky +##mins +560 +inevitably +tome +##gold +chew +##uid +##lid +elastic +##aby +streaked +alliances +jailed +regal +##ined +##phy +czechoslovak +narration +absently +##uld +bluegrass +guangdong +quran +criticizing +hose +hari +##liest +##owa +skier +streaks +deploy +##lom +raft +bose +dialed +huff +##eira +haifa +simplest +bursting +endings +ib +sultanate +##titled +franks +whitman +ensures +sven +##ggs +collaborators +forster +organising +ui +banished +napier +injustice +teller +layered +thump +##otti +roc +battleships +evidenced +fugitive +sadie +robotics +##roud +equatorial +geologist +##iza +yielding +##bron +##sr +internationale +mecca +##diment +sbs +skyline +toad +uploaded +reflective +undrafted +lal +leafs +bayern +##dai +lakshmi +shortlisted +##stick +##wicz +camouflage +donate +af +christi +lau +##acio +disclosed +nemesis +1761 +assemble +straining +northamptonshire +tal +##asi +bernardino +premature +heidi +42nd +coefficients +galactic +reproduce +buzzed +sensations +zionist +monsieur +myrtle +##eme +archery +strangled +musically +viewpoint +antiquities +bei +trailers +seahawks +cured +pee +preferring +tasmanian +lange +sul +##mail +##working +colder +overland +lucivar +massey +gatherings +haitian +##smith +disapproval +flaws +##cco +##enbach +1766 +npr +##icular +boroughs +creole +forums +techno +1755 +dent +abdominal +streetcar +##eson +##stream +procurement +gemini +predictable +##tya +acheron +christoph +feeder +fronts +vendor +bernhard +jammu +tumors +slang +##uber +goaltender +twists +curving +manson +vuelta +mer +peanut +confessions +pouch +unpredictable +allowance +theodor +vascular +##factory +bala +authenticity +metabolic +coughing +nanjing +##cea +pembroke +##bard +splendid +36th +ff +hourly +##ahu +elmer +handel +##ivate +awarding +thrusting +dl +experimentation +##hesion +##46 +caressed +entertained +steak +##rangle +biologist +orphans +baroness +oyster +stepfather +##dridge +mirage +reefs +speeding +##31 +barons +1764 +227 +inhabit +preached +repealed +##tral +honoring +boogie +captives +administer +johanna +##imate +gel +suspiciously +1767 +sobs +##dington +backbone +hayward +garry +##folding +##nesia +maxi +##oof +##ppe +ellison +galileo +##stand +crimea +frenzy +amour +bumper +matrices +natalia +baking +garth +palestinians +##grove +smack +conveyed +ensembles +gardening +##manship +##rup +##stituting +1640 +harvesting +topography +jing +shifters +dormitory +##carriage +##lston +ist +skulls +##stadt +dolores +jewellery +sarawak +##wai +##zier +fences +christy +confinement +tumbling +credibility +fir +stench +##bria +##plication +##nged +##sam +virtues +##belt +marjorie +pba +##eem +##made +celebrates +schooner +agitated +barley +fulfilling +anthropologist +##pro +restrict +novi +regulating +##nent +padres +##rani +##hesive +loyola +tabitha +milky +olson +proprietor +crambidae +guarantees +intercollegiate +ljubljana +hilda +##sko +ignorant +hooded +##lts +sardinia +##lidae +##vation +frontman +privileged +witchcraft +##gp +jammed +laude +poking +##than +bracket +amazement +yunnan +##erus +maharaja +linnaeus +264 +commissioning +milano +peacefully +##logies +akira +rani +regulator +##36 +grasses +##rance +luzon +crows +compiler +gretchen +seaman +edouard +tab +buccaneers +ellington +hamlets +whig +socialists +##anto +directorial +easton +mythological +##kr +##vary +rhineland +semantic +taut +dune +inventions +succeeds +##iter +replication +branched +##pired +jul +prosecuted +kangaroo +penetrated +##avian +middlesbrough +doses +bleak +madam +predatory +relentless +##vili +reluctance +##vir +hailey +crore +silvery +1759 +monstrous +swimmers +transmissions +hawthorn +informing +##eral +toilets +caracas +crouch +kb +##sett +295 +cartel +hadley +##aling +alexia +yvonne +##biology +cinderella +eton +superb +blizzard +stabbing +industrialist +maximus +##gm +##orus +groves +maud +clade +oversized +comedic +##bella +rosen +nomadic +fulham +montane +beverages +galaxies +redundant +swarm +##rot +##folia +##llis +buckinghamshire +fen +bearings +bahadur +##rom +gilles +phased +dynamite +faber +benoit +vip +##ount +##wd +booking +fractured +tailored +anya +spices +westwood +cairns +auditions +inflammation +steamed +##rocity +##acion +##urne +skyla +thereof +watford +torment +archdeacon +transforms +lulu +demeanor +fucked +serge +##sor +mckenna +minas +entertainer +##icide +caress +originate +residue +##sty +1740 +##ilised +##org +beech +##wana +subsidies +##ghton +emptied +gladstone +ru +firefighters +voodoo +##rcle +het +nightingale +tamara +edmond +ingredient +weaknesses +silhouette +285 +compatibility +withdrawing +hampson +##mona +anguish +giggling +##mber +bookstore +##jiang +southernmost +tilting +##vance +bai +economical +rf +briefcase +dreadful +hinted +projections +shattering +totaling +##rogate +analogue +indicted +periodical +fullback +##dman +haynes +##tenberg +##ffs +##ishment +1745 +thirst +stumble +penang +vigorous +##ddling +##kor +##lium +octave +##ove +##enstein +##inen +##ones +siberian +##uti +cbn +repeal +swaying +##vington +khalid +tanaka +unicorn +otago +plastered +lobe +riddle +##rella +perch +##ishing +croydon +filtered +graeme +tripoli +##ossa +crocodile +##chers +sufi +mined +##tung +inferno +lsu +##phi +swelled +utilizes +£2 +cale +periodicals +styx +hike +informally +coop +lund +##tidae +ala +hen +qui +transformations +disposed +sheath +chickens +##cade +fitzroy +sas +silesia +unacceptable +odisha +1650 +sabrina +pe +spokane +ratios +athena +massage +shen +dilemma +##drum +##riz +##hul +corona +doubtful +niall +##pha +##bino +fines +cite +acknowledging +bangor +ballard +bathurst +##resh +huron +mustered +alzheimer +garments +kinase +tyre +warship +##cp +flashback +pulmonary +braun +cheat +kamal +cyclists +constructions +grenades +ndp +traveller +excuses +stomped +signalling +trimmed +futsal +mosques +relevance +##wine +wta +##23 +##vah +##lter +hoc +##riding +optimistic +##´s +deco +sim +interacting +rejecting +moniker +waterways +##ieri +##oku +mayors +gdansk +outnumbered +pearls +##ended +##hampton +fairs +totals +dominating +262 +notions +stairway +compiling +pursed +commodities +grease +yeast +##jong +carthage +griffiths +residual +amc +contraction +laird +sapphire +##marine +##ivated +amalgamation +dissolve +inclination +lyle +packaged +altitudes +suez +canons +graded +lurched +narrowing +boasts +guise +wed +enrico +##ovsky +rower +scarred +bree +cub +iberian +protagonists +bargaining +proposing +trainers +voyages +vans +fishes +##aea +##ivist +##verance +encryption +artworks +kazan +sabre +cleopatra +hepburn +rotting +supremacy +mecklenburg +##brate +burrows +hazards +outgoing +flair +organizes +##ctions +scorpion +##usions +boo +234 +chevalier +dunedin +slapping +##34 +ineligible +pensions +##38 +##omic +manufactures +emails +bismarck +238 +weakening +blackish +ding +mcgee +quo +##rling +northernmost +xx +manpower +greed +sampson +clicking +##ange +##horpe +##inations +##roving +torre +##eptive +##moral +symbolism +38th +asshole +meritorious +outfits +splashed +biographies +sprung +astros +##tale +302 +737 +filly +raoul +nw +tokugawa +linden +clubhouse +##apa +tracts +romano +##pio +putin +tags +##note +chained +dickson +gunshot +moe +gunn +rashid +##tails +zipper +##bas +##nea +contrasted +##ply +##udes +plum +pharaoh +##pile +aw +comedies +ingrid +sandwiches +subdivisions +1100 +mariana +nokia +kamen +hz +delaney +veto +herring +##words +possessive +outlines +##roup +siemens +stairwell +rc +gallantry +messiah +palais +yells +233 +zeppelin +##dm +bolivar +##cede +smackdown +mckinley +##mora +##yt +muted +geologic +finely +unitary +avatar +hamas +maynard +rees +bog +contrasting +##rut +liv +chico +disposition +pixel +##erate +becca +dmitry +yeshiva +narratives +##lva +##ulton +mercenary +sharpe +tempered +navigate +stealth +amassed +keynes +##lini +untouched +##rrie +havoc +lithium +##fighting +abyss +graf +southward +wolverine +balloons +implements +ngos +transitions +##icum +ambushed +concacaf +dormant +economists +##dim +costing +csi +rana +universite +boulders +verity +##llon +collin +mellon +misses +cypress +fluorescent +lifeless +spence +##ulla +crewe +shepard +pak +revelations +##م +jolly +gibbons +paw +##dro +##quel +freeing +##test +shack +fries +palatine +##51 +##hiko +accompaniment +cruising +recycled +##aver +erwin +sorting +synthesizers +dyke +realities +sg +strides +enslaved +wetland +##ghan +competence +gunpowder +grassy +maroon +reactors +objection +##oms +carlson +gearbox +macintosh +radios +shelton +##sho +clergyman +prakash +254 +mongols +trophies +oricon +228 +stimuli +twenty20 +cantonese +cortes +mirrored +##saurus +bhp +cristina +melancholy +##lating +enjoyable +nuevo +##wny +downfall +schumacher +##ind +banging +lausanne +rumbled +paramilitary +reflex +ax +amplitude +migratory +##gall +##ups +midi +barnard +lastly +sherry +##hp +##nall +keystone +##kra +carleton +slippery +##53 +coloring +foe +socket +otter +##rgos +mats +##tose +consultants +bafta +bison +topping +##km +490 +primal +abandonment +transplant +atoll +hideous +mort +pained +reproduced +tae +howling +##turn +unlawful +billionaire +hotter +poised +lansing +##chang +dinamo +retro +messing +nfc +domesday +##mina +blitz +timed +##athing +##kley +ascending +gesturing +##izations +signaled +tis +chinatown +mermaid +savanna +jameson +##aint +catalina +##pet +##hers +cochrane +cy +chatting +##kus +alerted +computation +mused +noelle +majestic +mohawk +campo +octagonal +##sant +##hend +241 +aspiring +##mart +comprehend +iona +paralyzed +shimmering +swindon +rhone +##eley +reputed +configurations +pitchfork +agitation +francais +gillian +lipstick +##ilo +outsiders +pontifical +resisting +bitterness +sewer +rockies +##edd +##ucher +misleading +1756 +exiting +galloway +##nging +risked +##heart +246 +commemoration +schultz +##rka +integrating +##rsa +poses +shrieked +##weiler +guineas +gladys +jerking +owls +goldsmith +nightly +penetrating +##unced +lia +##33 +ignited +betsy +##aring +##thorpe +follower +vigorously +##rave +coded +kiran +knit +zoology +tbilisi +##28 +##bered +repository +govt +deciduous +dino +growling +##bba +enhancement +unleashed +chanting +pussy +biochemistry +##eric +kettle +repression +toxicity +nrhp +##arth +##kko +##bush +ernesto +commended +outspoken +242 +mca +parchment +sms +kristen +##aton +bisexual +raked +glamour +navajo +a2 +conditioned +showcased +##hma +spacious +youthful +##esa +usl +appliances +junta +brest +layne +conglomerate +enchanted +chao +loosened +picasso +circulating +inspect +montevideo +##centric +##kti +piazza +spurred +##aith +bari +freedoms +poultry +stamford +lieu +##ect +indigo +sarcastic +bahia +stump +attach +dvds +frankenstein +lille +approx +scriptures +pollen +##script +nmi +overseen +##ivism +tides +proponent +newmarket +inherit +milling +##erland +centralized +##rou +distributors +credentials +drawers +abbreviation +##lco +##xon +downing +uncomfortably +ripe +##oes +erase +franchises +##ever +populace +##bery +##khar +decomposition +pleas +##tet +daryl +sabah +##stle +##wide +fearless +genie +lesions +annette +##ogist +oboe +appendix +nair +dripped +petitioned +maclean +mosquito +parrot +rpg +hampered +1648 +operatic +reservoirs +##tham +irrelevant +jolt +summarized +##fp +medallion +##taff +##− +clawed +harlow +narrower +goddard +marcia +bodied +fremont +suarez +altering +tempest +mussolini +porn +##isms +sweetly +oversees +walkers +solitude +grimly +shrines +hk +ich +supervisors +hostess +dietrich +legitimacy +brushes +expressive +##yp +dissipated +##rse +localized +systemic +##nikov +gettysburg +##js +##uaries +dialogues +muttering +251 +housekeeper +sicilian +discouraged +##frey +beamed +kaladin +halftime +kidnap +##amo +##llet +1754 +synonymous +depleted +instituto +insulin +reprised +##opsis +clashed +##ctric +interrupting +radcliffe +insisting +medici +1715 +ejected +playfully +turbulent +##47 +starvation +##rini +shipment +rebellious +petersen +verification +merits +##rified +cakes +##charged +1757 +milford +shortages +spying +fidelity +##aker +emitted +storylines +harvested +seismic +##iform +cheung +kilda +theoretically +barbie +lynx +##rgy +##tius +goblin +mata +poisonous +##nburg +reactive +residues +obedience +##евич +conjecture +##rac +401 +hating +sixties +kicker +moaning +motown +##bha +emancipation +neoclassical +##hering +consoles +ebert +professorship +##tures +sustaining +assaults +obeyed +affluent +incurred +tornadoes +##eber +##zow +emphasizing +highlanders +cheated +helmets +##ctus +internship +terence +bony +executions +legislators +berries +peninsular +tinged +##aco +1689 +amplifier +corvette +ribbons +lavish +pennant +##lander +worthless +##chfield +##forms +mariano +pyrenees +expenditures +##icides +chesterfield +mandir +tailor +39th +sergey +nestled +willed +aristocracy +devotees +goodnight +raaf +rumored +weaponry +remy +appropriations +harcourt +burr +riaa +##lence +limitation +unnoticed +guo +soaking +swamps +##tica +collapsing +tatiana +descriptive +brigham +psalm +##chment +maddox +##lization +patti +caliph +##aja +akron +injuring +serra +##ganj +basins +##sari +astonished +launcher +##church +hilary +wilkins +sewing +##sf +stinging +##fia +##ncia +underwood +startup +##ition +compilations +vibrations +embankment +jurist +##nity +bard +juventus +groundwater +kern +palaces +helium +boca +cramped +marissa +soto +##worm +jae +princely +##ggy +faso +bazaar +warmly +##voking +229 +pairing +##lite +##grate +##nets +wien +freaked +ulysses +rebirth +##alia +##rent +mummy +guzman +jimenez +stilled +##nitz +trajectory +tha +woken +archival +professions +##pts +##pta +hilly +shadowy +shrink +##bolt +norwood +glued +migrate +stereotypes +devoid +##pheus +625 +evacuate +horrors +infancy +gotham +knowles +optic +downloaded +sachs +kingsley +parramatta +darryl +mor +##onale +shady +commence +confesses +kan +##meter +##placed +marlborough +roundabout +regents +frigates +io +##imating +gothenburg +revoked +carvings +clockwise +convertible +intruder +##sche +banged +##ogo +vicky +bourgeois +##mony +dupont +footing +##gum +pd +##real +buckle +yun +penthouse +sane +720 +serviced +stakeholders +neumann +bb +##eers +comb +##gam +catchment +pinning +rallies +typing +##elles +forefront +freiburg +sweetie +giacomo +widowed +goodwill +worshipped +aspirations +midday +##vat +fishery +##trick +bournemouth +turk +243 +hearth +ethanol +guadalajara +murmurs +sl +##uge +afforded +scripted +##hta +wah +##jn +coroner +translucent +252 +memorials +puck +progresses +clumsy +##race +315 +candace +recounted +##27 +##slin +##uve +filtering +##mac +howl +strata +heron +leveled +##ays +dubious +##oja +##т +##wheel +citations +exhibiting +##laya +##mics +##pods +turkic +##lberg +injunction +##ennial +##mit +antibodies +##44 +organise +##rigues +cardiovascular +cushion +inverness +##zquez +dia +cocoa +sibling +##tman +##roid +expanse +feasible +tunisian +algiers +##relli +rus +bloomberg +dso +westphalia +bro +tacoma +281 +downloads +##ours +konrad +duran +##hdi +continuum +jett +compares +legislator +secession +##nable +##gues +##zuka +translating +reacher +##gley +##ła +aleppo +##agi +tc +orchards +trapping +linguist +versatile +drumming +postage +calhoun +superiors +##mx +barefoot +leary +##cis +ignacio +alfa +kaplan +##rogen +bratislava +mori +##vot +disturb +haas +313 +cartridges +gilmore +radiated +salford +tunic +hades +##ulsive +archeological +delilah +magistrates +auditioned +brewster +charters +empowerment +blogs +cappella +dynasties +iroquois +whipping +##krishna +raceway +truths +myra +weaken +judah +mcgregor +##horse +mic +refueling +37th +burnley +bosses +markus +premio +query +##gga +dunbar +##economic +darkest +lyndon +sealing +commendation +reappeared +##mun +addicted +ezio +slaughtered +satisfactory +shuffle +##eves +##thic +##uj +fortification +warrington +##otto +resurrected +fargo +mane +##utable +##lei +##space +foreword +ox +##aris +##vern +abrams +hua +##mento +sakura +##alo +uv +sentimental +##skaya +midfield +##eses +sturdy +scrolls +macleod +##kyu +entropy +##lance +mitochondrial +cicero +excelled +thinner +convoys +perceive +##oslav +##urable +systematically +grind +burkina +287 +##tagram +ops +##aman +guantanamo +##cloth +##tite +forcefully +wavy +##jou +pointless +##linger +##tze +layton +portico +superficial +clerical +outlaws +##hism +burials +muir +##inn +creditors +hauling +rattle +##leg +calais +monde +archers +reclaimed +dwell +wexford +hellenic +falsely +remorse +##tek +dough +furnishings +##uttered +gabon +neurological +novice +##igraphy +contemplated +pulpit +nightstand +saratoga +##istan +documenting +pulsing +taluk +##firmed +busted +marital +##rien +disagreements +wasps +##yes +hodge +mcdonnell +mimic +fran +pendant +dhabi +musa +##nington +congratulations +argent +darrell +concussion +losers +regrets +thessaloniki +reversal +donaldson +hardwood +thence +achilles +ritter +##eran +demonic +jurgen +prophets +goethe +eki +classmate +buff +##cking +yank +irrational +##inging +perished +seductive +qur +sourced +##crat +##typic +mustard +ravine +barre +horizontally +characterization +phylogenetic +boise +##dit +##runner +##tower +brutally +intercourse +seduce +##bbing +fay +ferris +ogden +amar +nik +unarmed +##inator +evaluating +kyrgyzstan +sweetness +##lford +##oki +mccormick +meiji +notoriety +stimulate +disrupt +figuring +instructional +mcgrath +##zoo +groundbreaking +##lto +flinch +khorasan +agrarian +bengals +mixer +radiating +##sov +ingram +pitchers +nad +tariff +##cript +tata +##codes +##emi +##ungen +appellate +lehigh +##bled +##giri +brawl +duct +texans +##ciation +##ropolis +skipper +speculative +vomit +doctrines +stresses +253 +davy +graders +whitehead +jozef +timely +cumulative +haryana +paints +appropriately +boon +cactus +##ales +##pid +dow +legions +##pit +perceptions +1730 +picturesque +##yse +periphery +rune +wr +##aha +celtics +sentencing +whoa +##erin +confirms +variance +425 +moines +mathews +spade +rave +m1 +fronted +fx +blending +alleging +reared +##gl +237 +##paper +grassroots +eroded +##free +##physical +directs +ordeal +##sław +accelerate +hacker +rooftop +##inia +lev +buys +cebu +devote +##lce +specialising +##ulsion +choreographed +repetition +warehouses +##ryl +paisley +tuscany +analogy +sorcerer +hash +huts +shards +descends +exclude +nix +chaplin +gaga +ito +vane +##drich +causeway +misconduct +limo +orchestrated +glands +jana +##kot +u2 +##mple +##sons +branching +contrasts +scoop +longed +##virus +chattanooga +##75 +syrup +cornerstone +##tized +##mind +##iaceae +careless +precedence +frescoes +##uet +chilled +consult +modelled +snatch +peat +##thermal +caucasian +humane +relaxation +spins +temperance +##lbert +occupations +lambda +hybrids +moons +mp3 +##oese +247 +rolf +societal +yerevan +ness +##ssler +befriended +mechanized +nominate +trough +boasted +cues +seater +##hom +bends +##tangle +conductors +emptiness +##lmer +eurasian +adriatic +tian +##cie +anxiously +lark +propellers +chichester +jock +ev +2a +##holding +credible +recounts +tori +loyalist +abduction +##hoot +##redo +nepali +##mite +ventral +tempting +##ango +##crats +steered +##wice +javelin +dipping +laborers +prentice +looming +titanium +##ː +badges +emir +tensor +##ntation +egyptians +rash +denies +hawthorne +lombard +showers +wehrmacht +dietary +trojan +##reus +welles +executing +horseshoe +lifeboat +##lak +elsa +infirmary +nearing +roberta +boyer +mutter +trillion +joanne +##fine +##oked +sinks +vortex +uruguayan +clasp +sirius +##block +accelerator +prohibit +sunken +byu +chronological +diplomats +ochreous +510 +symmetrical +1644 +maia +##tology +salts +reigns +atrocities +##ия +hess +bared +issn +##vyn +cater +saturated +##cycle +##isse +sable +voyager +dyer +yusuf +##inge +fountains +wolff +##39 +##nni +engraving +rollins +atheist +ominous +##ault +herr +chariot +martina +strung +##fell +##farlane +horrific +sahib +gazes +saetan +erased +ptolemy +##olic +flushing +lauderdale +analytic +##ices +530 +navarro +beak +gorilla +herrera +broom +guadalupe +raiding +sykes +311 +bsc +deliveries +1720 +invasions +carmichael +tajikistan +thematic +ecumenical +sentiments +onstage +##rians +##brand +##sume +catastrophic +flanks +molten +##arns +waller +aimee +terminating +##icing +alternately +##oche +nehru +printers +outraged +##eving +empires +template +banners +repetitive +za +##oise +vegetarian +##tell +guiana +opt +cavendish +lucknow +synthesized +##hani +##mada +finalized +##ctable +fictitious +mayoral +unreliable +##enham +embracing +peppers +rbis +##chio +##neo +inhibition +slashed +togo +orderly +embroidered +safari +salty +236 +barron +benito +totaled +##dak +pubs +simulated +caden +devin +tolkien +momma +welding +sesame +##ept +gottingen +hardness +630 +shaman +temeraire +620 +adequately +pediatric +##kit +ck +assertion +radicals +composure +cadence +seafood +beaufort +lazarus +mani +warily +cunning +kurdistan +249 +cantata +##kir +ares +##41 +##clusive +nape +townland +geared +insulted +flutter +boating +violate +draper +dumping +malmo +##hh +##romatic +firearm +alta +bono +obscured +##clave +exceeds +panorama +unbelievable +##train +preschool +##essed +disconnected +installing +rescuing +secretaries +accessibility +##castle +##drive +##ifice +##film +bouts +slug +waterway +mindanao +##buro +##ratic +halves +##ل +calming +liter +maternity +adorable +bragg +electrification +mcc +##dote +roxy +schizophrenia +##body +munoz +kaye +whaling +239 +mil +tingling +tolerant +##ago +unconventional +volcanoes +##finder +deportivo +##llie +robson +kaufman +neuroscience +wai +deportation +masovian +scraping +converse +##bh +hacking +bulge +##oun +administratively +yao +580 +amp +mammoth +booster +claremont +hooper +nomenclature +pursuits +mclaughlin +melinda +##sul +catfish +barclay +substrates +taxa +zee +originals +kimberly +packets +padma +##ality +borrowing +ostensibly +solvent +##bri +##genesis +##mist +lukas +shreveport +veracruz +##ь +##lou +##wives +cheney +tt +anatolia +hobbs +##zyn +cyclic +radiant +alistair +greenish +siena +dat +independents +##bation +conform +pieter +hyper +applicant +bradshaw +spores +telangana +vinci +inexpensive +nuclei +322 +jang +nme +soho +spd +##ign +cradled +receptionist +pow +##43 +##rika +fascism +##ifer +experimenting +##ading +##iec +##region +345 +jocelyn +maris +stair +nocturnal +toro +constabulary +elgin +##kker +msc +##giving +##schen +##rase +doherty +doping +sarcastically +batter +maneuvers +##cano +##apple +##gai +##git +intrinsic +##nst +##stor +1753 +showtime +cafes +gasps +lviv +ushered +##thed +fours +restart +astonishment +transmitting +flyer +shrugs +##sau +intriguing +cones +dictated +mushrooms +medial +##kovsky +##elman +escorting +gaped +##26 +godfather +##door +##sell +djs +recaptured +timetable +vila +1710 +3a +aerodrome +mortals +scientology +##orne +angelina +mag +convection +unpaid +insertion +intermittent +lego +##nated +endeavor +kota +pereira +##lz +304 +bwv +glamorgan +insults +agatha +fey +##cend +fleetwood +mahogany +protruding +steamship +zeta +##arty +mcguire +suspense +##sphere +advising +urges +##wala +hurriedly +meteor +gilded +inline +arroyo +stalker +##oge +excitedly +revered +##cure +earle +introductory +##break +##ilde +mutants +puff +pulses +reinforcement +##haling +curses +lizards +stalk +correlated +##fixed +fallout +macquarie +##unas +bearded +denton +heaving +802 +##ocation +winery +assign +dortmund +##lkirk +everest +invariant +charismatic +susie +##elling +bled +lesley +telegram +sumner +bk +##ogen +##к +wilcox +needy +colbert +duval +##iferous +##mbled +allotted +attends +imperative +##hita +replacements +hawker +##inda +insurgency +##zee +##eke +casts +##yla +680 +ives +transitioned +##pack +##powering +authoritative +baylor +flex +cringed +plaintiffs +woodrow +##skie +drastic +ape +aroma +unfolded +commotion +nt +preoccupied +theta +routines +lasers +privatization +wand +domino +ek +clenching +nsa +strategically +showered +bile +handkerchief +pere +storing +christophe +insulting +316 +nakamura +romani +asiatic +magdalena +palma +cruises +stripping +405 +konstantin +soaring +##berman +colloquially +forerunner +havilland +incarcerated +parasites +sincerity +##utus +disks +plank +saigon +##ining +corbin +homo +ornaments +powerhouse +##tlement +chong +fastened +feasibility +idf +morphological +usable +##nish +##zuki +aqueduct +jaguars +keepers +##flies +aleksandr +faust +assigns +ewing +bacterium +hurled +tricky +hungarians +integers +wallis +321 +yamaha +##isha +hushed +oblivion +aviator +evangelist +friars +##eller +monograph +ode +##nary +airplanes +labourers +charms +##nee +1661 +hagen +tnt +rudder +fiesta +transcript +dorothea +ska +inhibitor +maccabi +retorted +raining +encompassed +clauses +menacing +1642 +lineman +##gist +vamps +##ape +##dick +gloom +##rera +dealings +easing +seekers +##nut +##pment +helens +unmanned +##anu +##isson +basics +##amy +##ckman +adjustments +1688 +brutality +horne +##zell +sui +##55 +##mable +aggregator +##thal +rhino +##drick +##vira +counters +zoom +##01 +##rting +mn +montenegrin +packard +##unciation +##♭ +##kki +reclaim +scholastic +thugs +pulsed +##icia +syriac +quan +saddam +banda +kobe +blaming +buddies +dissent +##lusion +##usia +corbett +jaya +delle +erratic +lexie +##hesis +435 +amiga +hermes +##pressing +##leen +chapels +gospels +jamal +##uating +compute +revolving +warp +##sso +##thes +armory +##eras +##gol +antrim +loki +##kow +##asian +##good +##zano +braid +handwriting +subdistrict +funky +pantheon +##iculate +concurrency +estimation +improper +juliana +##his +newcomers +johnstone +staten +communicated +##oco +##alle +sausage +stormy +##stered +##tters +superfamily +##grade +acidic +collateral +tabloid +##oped +##rza +bladder +austen +##ellant +mcgraw +##hay +hannibal +mein +aquino +lucifer +wo +badger +boar +cher +christensen +greenberg +interruption +##kken +jem +244 +mocked +bottoms +cambridgeshire +##lide +sprawling +##bbly +eastwood +ghent +synth +##buck +advisers +##bah +nominally +hapoel +qu +daggers +estranged +fabricated +towels +vinnie +wcw +misunderstanding +anglia +nothin +unmistakable +##dust +##lova +chilly +marquette +truss +##edge +##erine +reece +##lty +##chemist +##connected +272 +308 +41st +bash +raion +waterfalls +##ump +##main +labyrinth +queue +theorist +##istle +bharatiya +flexed +soundtracks +rooney +leftist +patrolling +wharton +plainly +alleviate +eastman +schuster +topographic +engages +immensely +unbearable +fairchild +1620 +dona +lurking +parisian +oliveira +ia +indictment +hahn +bangladeshi +##aster +vivo +##uming +##ential +antonia +expects +indoors +kildare +harlan +##logue +##ogenic +##sities +forgiven +##wat +childish +tavi +##mide +##orra +plausible +grimm +successively +scooted +##bola +##dget +##rith +spartans +emery +flatly +azure +epilogue +##wark +flourish +##iny +##tracted +##overs +##oshi +bestseller +distressed +receipt +spitting +hermit +topological +##cot +drilled +subunit +francs +##layer +eel +##fk +##itas +octopus +footprint +petitions +ufo +##say +##foil +interfering +leaking +palo +##metry +thistle +valiant +##pic +narayan +mcpherson +##fast +gonzales +##ym +##enne +dustin +novgorod +solos +##zman +doin +##raph +##patient +##meyer +soluble +ashland +cuffs +carole +pendleton +whistling +vassal +##river +deviation +revisited +constituents +rallied +rotate +loomed +##eil +##nting +amateurs +augsburg +auschwitz +crowns +skeletons +##cona +bonnet +257 +dummy +globalization +simeon +sleeper +mandal +differentiated +##crow +##mare +milne +bundled +exasperated +talmud +owes +segregated +##feng +##uary +dentist +piracy +props +##rang +devlin +##torium +malicious +paws +##laid +dependency +##ergy +##fers +##enna +258 +pistons +rourke +jed +grammatical +tres +maha +wig +512 +ghostly +jayne +##achal +##creen +##ilis +##lins +##rence +designate +##with +arrogance +cambodian +clones +showdown +throttle +twain +##ception +lobes +metz +nagoya +335 +braking +##furt +385 +roaming +##minster +amin +crippled +##37 +##llary +indifferent +hoffmann +idols +intimidating +1751 +261 +influenza +memo +onions +1748 +bandage +consciously +##landa +##rage +clandestine +observes +swiped +tangle +##ener +##jected +##trum +##bill +##lta +hugs +congresses +josiah +spirited +##dek +humanist +managerial +filmmaking +inmate +rhymes +debuting +grimsby +ur +##laze +duplicate +vigor +##tf +republished +bolshevik +refurbishment +antibiotics +martini +methane +newscasts +royale +horizons +levant +iain +visas +##ischen +paler +##around +manifestation +snuck +alf +chop +futile +pedestal +rehab +##kat +bmg +kerman +res +fairbanks +jarrett +abstraction +saharan +##zek +1746 +procedural +clearer +kincaid +sash +luciano +##ffey +crunch +helmut +##vara +revolutionaries +##tute +creamy +leach +##mmon +1747 +permitting +nes +plight +wendell +##lese +contra +ts +clancy +ipa +mach +staples +autopsy +disturbances +nueva +karin +pontiac +##uding +proxy +venerable +haunt +leto +bergman +expands +##helm +wal +##pipe +canning +celine +cords +obesity +##enary +intrusion +planner +##phate +reasoned +sequencing +307 +harrow +##chon +##dora +marred +mcintyre +repay +tarzan +darting +248 +harrisburg +margarita +repulsed +##hur +##lding +belinda +hamburger +novo +compliant +runways +bingham +registrar +skyscraper +ic +cuthbert +improvisation +livelihood +##corp +##elial +admiring +##dened +sporadic +believer +casablanca +popcorn +##29 +asha +shovel +##bek +##dice +coiled +tangible +##dez +casper +elsie +resin +tenderness +rectory +##ivision +avail +sonar +##mori +boutique +##dier +guerre +bathed +upbringing +vaulted +sandals +blessings +##naut +##utnant +1680 +306 +foxes +pia +corrosion +hesitantly +confederates +crystalline +footprints +shapiro +tirana +valentin +drones +45th +microscope +shipments +texted +inquisition +wry +guernsey +unauthorized +resigning +760 +ripple +schubert +stu +reassure +felony +##ardo +brittle +koreans +##havan +##ives +dun +implicit +tyres +##aldi +##lth +magnolia +##ehan +##puri +##poulos +aggressively +fei +gr +familiarity +##poo +indicative +##trust +fundamentally +jimmie +overrun +395 +anchors +moans +##opus +britannia +armagh +##ggle +purposely +seizing +##vao +bewildered +mundane +avoidance +cosmopolitan +geometridae +quartermaster +caf +415 +chatter +engulfed +gleam +purge +##icate +juliette +jurisprudence +guerra +revisions +##bn +casimir +brew +##jm +1749 +clapton +cloudy +conde +hermitage +278 +simulations +torches +vincenzo +matteo +##rill +hidalgo +booming +westbound +accomplishment +tentacles +unaffected +##sius +annabelle +flopped +sloping +##litz +dreamer +interceptor +vu +##loh +consecration +copying +messaging +breaker +climates +hospitalized +1752 +torino +afternoons +winfield +witnessing +##teacher +breakers +choirs +sawmill +coldly +##ege +sipping +haste +uninhabited +conical +bibliography +pamphlets +severn +edict +##oca +deux +illnesses +grips +##pl +rehearsals +sis +thinkers +tame +##keepers +1690 +acacia +reformer +##osed +##rys +shuffling +##iring +##shima +eastbound +ionic +rhea +flees +littered +##oum +rocker +vomiting +groaning +champ +overwhelmingly +civilizations +paces +sloop +adoptive +##tish +skaters +##vres +aiding +mango +##joy +nikola +shriek +##ignon +pharmaceuticals +##mg +tuna +calvert +gustavo +stocked +yearbook +##urai +##mana +computed +subsp +riff +hanoi +kelvin +hamid +moors +pastures +summons +jihad +nectar +##ctors +bayou +untitled +pleasing +vastly +republics +intellect +##η +##ulio +##tou +crumbling +stylistic +sb +##ی +consolation +frequented +h₂o +walden +widows +##iens +404 +##ignment +chunks +improves +288 +grit +recited +##dev +snarl +sociological +##arte +##gul +inquired +##held +bruise +clube +consultancy +homogeneous +hornets +multiplication +pasta +prick +savior +##grin +##kou +##phile +yoon +##gara +grimes +vanishing +cheering +reacting +bn +distillery +##quisite +##vity +coe +dockyard +massif +##jord +escorts +voss +##valent +byte +chopped +hawke +illusions +workings +floats +##koto +##vac +kv +annapolis +madden +##onus +alvaro +noctuidae +##cum +##scopic +avenge +steamboat +forte +illustrates +erika +##trip +570 +dew +nationalities +bran +manifested +thirsty +diversified +muscled +reborn +##standing +arson +##lessness +##dran +##logram +##boys +##kushima +##vious +willoughby +##phobia +286 +alsace +dashboard +yuki +##chai +granville +myspace +publicized +tricked +##gang +adjective +##ater +relic +reorganisation +enthusiastically +indications +saxe +##lassified +consolidate +iec +padua +helplessly +ramps +renaming +regulars +pedestrians +accents +convicts +inaccurate +lowers +mana +##pati +barrie +bjp +outta +someplace +berwick +flanking +invoked +marrow +sparsely +excerpts +clothed +rei +##ginal +wept +##straße +##vish +alexa +excel +##ptive +membranes +aquitaine +creeks +cutler +sheppard +implementations +ns +##dur +fragrance +budge +concordia +magnesium +marcelo +##antes +gladly +vibrating +##rral +##ggles +montrose +##omba +lew +seamus +1630 +cocky +##ament +##uen +bjorn +##rrick +fielder +fluttering +##lase +methyl +kimberley +mcdowell +reductions +barbed +##jic +##tonic +aeronautical +condensed +distracting +##promising +huffed +##cala +##sle +claudius +invincible +missy +pious +balthazar +ci +##lang +butte +combo +orson +##dication +myriad +1707 +silenced +##fed +##rh +coco +netball +yourselves +##oza +clarify +heller +peg +durban +etudes +offender +roast +blackmail +curvature +##woods +vile +309 +illicit +suriname +##linson +overture +1685 +bubbling +gymnast +tucking +##mming +##ouin +maldives +##bala +gurney +##dda +##eased +##oides +backside +pinto +jars +racehorse +tending +##rdial +baronetcy +wiener +duly +##rke +barbarian +cupping +flawed +##thesis +bertha +pleistocene +puddle +swearing +##nob +##tically +fleeting +prostate +amulet +educating +##mined +##iti +##tler +75th +jens +respondents +analytics +cavaliers +papacy +raju +##iente +##ulum +##tip +funnel +271 +disneyland +##lley +sociologist +##iam +2500 +faulkner +louvre +menon +##dson +276 +##ower +afterlife +mannheim +peptide +referees +comedians +meaningless +##anger +##laise +fabrics +hurley +renal +sleeps +##bour +##icle +breakout +kristin +roadside +animator +clover +disdain +unsafe +redesign +##urity +firth +barnsley +portage +reset +narrows +268 +commandos +expansive +speechless +tubular +##lux +essendon +eyelashes +smashwords +##yad +##bang +##claim +craved +sprinted +chet +somme +astor +wrocław +orton +266 +bane +##erving +##uing +mischief +##amps +##sund +scaling +terre +##xious +impairment +offenses +undermine +moi +soy +contiguous +arcadia +inuit +seam +##tops +macbeth +rebelled +##icative +##iot +590 +elaborated +frs +uniformed +##dberg +259 +powerless +priscilla +stimulated +980 +qc +arboretum +frustrating +trieste +bullock +##nified +enriched +glistening +intern +##adia +locus +nouvelle +ollie +ike +lash +starboard +ee +tapestry +headlined +hove +rigged +##vite +pollock +##yme +thrive +clustered +cas +roi +gleamed +olympiad +##lino +pressured +regimes +##hosis +##lick +ripley +##ophone +kickoff +gallon +rockwell +##arable +crusader +glue +revolutions +scrambling +1714 +grover +##jure +englishman +aztec +263 +contemplating +coven +ipad +preach +triumphant +tufts +##esian +rotational +##phus +328 +falkland +##brates +strewn +clarissa +rejoin +environmentally +glint +banded +drenched +moat +albanians +johor +rr +maestro +malley +nouveau +shaded +taxonomy +v6 +adhere +bunk +airfields +##ritan +1741 +encompass +remington +tran +##erative +amelie +mazda +friar +morals +passions +##zai +breadth +vis +##hae +argus +burnham +caressing +insider +rudd +##imov +##mini +##rso +italianate +murderous +textual +wainwright +armada +bam +weave +timer +##taken +##nh +fra +##crest +ardent +salazar +taps +tunis +##ntino +allegro +gland +philanthropic +##chester +implication +##optera +esq +judas +noticeably +wynn +##dara +inched +indexed +crises +villiers +bandit +royalties +patterned +cupboard +interspersed +accessory +isla +kendrick +entourage +stitches +##esthesia +headwaters +##ior +interlude +distraught +draught +1727 +##basket +biased +sy +transient +triad +subgenus +adapting +kidd +shortstop +##umatic +dimly +spiked +mcleod +reprint +nellie +pretoria +windmill +##cek +singled +##mps +273 +reunite +##orous +747 +bankers +outlying +##omp +##ports +##tream +apologies +cosmetics +patsy +##deh +##ocks +##yson +bender +nantes +serene +##nad +lucha +mmm +323 +##cius +##gli +cmll +coinage +nestor +juarez +##rook +smeared +sprayed +twitching +sterile +irina +embodied +juveniles +enveloped +miscellaneous +cancers +dq +gulped +luisa +crested +swat +donegal +ref +##anov +##acker +hearst +mercantile +##lika +doorbell +ua +vicki +##alla +##som +bilbao +psychologists +stryker +sw +horsemen +turkmenistan +wits +##national +anson +mathew +screenings +##umb +rihanna +##agne +##nessy +aisles +##iani +##osphere +hines +kenton +saskatoon +tasha +truncated +##champ +##itan +mildred +advises +fredrik +interpreting +inhibitors +##athi +spectroscopy +##hab +##kong +karim +panda +##oia +##nail +##vc +conqueror +kgb +leukemia +##dity +arrivals +cheered +pisa +phosphorus +shielded +##riated +mammal +unitarian +urgently +chopin +sanitary +##mission +spicy +drugged +hinges +##tort +tipping +trier +impoverished +westchester +##caster +267 +epoch +nonstop +##gman +##khov +aromatic +centrally +cerro +##tively +##vio +billions +modulation +sedimentary +283 +facilitating +outrageous +goldstein +##eak +##kt +ld +maitland +penultimate +pollard +##dance +fleets +spaceship +vertebrae +##nig +alcoholism +als +recital +##bham +##ference +##omics +m2 +##bm +trois +##tropical +##в +commemorates +##meric +marge +##raction +1643 +670 +cosmetic +ravaged +##ige +catastrophe +eng +##shida +albrecht +arterial +bellamy +decor +harmon +##rde +bulbs +synchronized +vito +easiest +shetland +shielding +wnba +##glers +##ssar +##riam +brianna +cumbria +##aceous +##rard +cores +thayer +##nsk +brood +hilltop +luminous +carts +keynote +larkin +logos +##cta +##ا +##mund +##quay +lilith +tinted +277 +wrestle +mobilization +##uses +sequential +siam +bloomfield +takahashi +274 +##ieving +presenters +ringo +blazed +witty +##oven +##ignant +devastation +haydn +harmed +newt +therese +##peed +gershwin +molina +rabbis +sudanese +001 +innate +restarted +##sack +##fus +slices +wb +##shah +enroll +hypothetical +hysterical +1743 +fabio +indefinite +warped +##hg +exchanging +525 +unsuitable +##sboro +gallo +1603 +bret +cobalt +homemade +##hunter +mx +operatives +##dhar +terraces +durable +latch +pens +whorls +##ctuated +##eaux +billing +ligament +succumbed +##gly +regulators +spawn +##brick +##stead +filmfare +rochelle +##nzo +1725 +circumstance +saber +supplements +##nsky +##tson +crowe +wellesley +carrot +##9th +##movable +primate +drury +sincerely +topical +##mad +##rao +callahan +kyiv +smarter +tits +undo +##yeh +announcements +anthologies +barrio +nebula +##islaus +##shaft +##tyn +bodyguards +2021 +assassinate +barns +emmett +scully +##mah +##yd +##eland +##tino +##itarian +demoted +gorman +lashed +prized +adventist +writ +##gui +alla +invertebrates +##ausen +1641 +amman +1742 +align +healy +redistribution +##gf +##rize +insulation +##drop +adherents +hezbollah +vitro +ferns +yanking +269 +php +registering +uppsala +cheerleading +confines +mischievous +tully +##ross +49th +docked +roam +stipulated +pumpkin +##bry +prompt +##ezer +blindly +shuddering +craftsmen +frail +scented +katharine +scramble +shaggy +sponge +helix +zaragoza +279 +##52 +43rd +backlash +fontaine +seizures +posse +cowan +nonfiction +telenovela +wwii +hammered +undone +##gpur +encircled +irs +##ivation +artefacts +oneself +searing +smallpox +##belle +##osaurus +shandong +breached +upland +blushing +rankin +infinitely +psyche +tolerated +docking +evicted +##col +unmarked +##lving +gnome +lettering +litres +musique +##oint +benevolent +##jal +blackened +##anna +mccall +racers +tingle +##ocene +##orestation +introductions +radically +292 +##hiff +##باد +1610 +1739 +munchen +plead +##nka +condo +scissors +##sight +##tens +apprehension +##cey +##yin +hallmark +watering +formulas +sequels +##llas +aggravated +bae +commencing +##building +enfield +prohibits +marne +vedic +civilized +euclidean +jagger +beforehand +blasts +dumont +##arney +##nem +740 +conversions +hierarchical +rios +simulator +##dya +##lellan +hedges +oleg +thrusts +shadowed +darby +maximize +1744 +gregorian +##nded +##routed +sham +unspecified +##hog +emory +factual +##smo +##tp +fooled +##rger +ortega +wellness +marlon +##oton +##urance +casket +keating +ley +enclave +##ayan +char +influencing +jia +##chenko +412 +ammonia +erebidae +incompatible +violins +cornered +##arat +grooves +astronauts +columbian +rampant +fabrication +kyushu +mahmud +vanish +##dern +mesopotamia +##lete +ict +##rgen +caspian +kenji +pitted +##vered +999 +grimace +roanoke +tchaikovsky +twinned +##analysis +##awan +xinjiang +arias +clemson +kazakh +sizable +1662 +##khand +##vard +plunge +tatum +vittorio +##nden +cholera +##dana +##oper +bracing +indifference +projectile +superliga +##chee +realises +upgrading +299 +porte +retribution +##vies +nk +stil +##resses +ama +bureaucracy +blackberry +bosch +testosterone +collapses +greer +##pathic +ioc +fifties +malls +##erved +bao +baskets +adolescents +siegfried +##osity +##tosis +mantra +detecting +existent +fledgling +##cchi +dissatisfied +gan +telecommunication +mingled +sobbed +6000 +controversies +outdated +taxis +##raus +fright +slams +##lham +##fect +##tten +detectors +fetal +tanned +##uw +fray +goth +olympian +skipping +mandates +scratches +sheng +unspoken +hyundai +tracey +hotspur +restrictive +##buch +americana +mundo +##bari +burroughs +diva +vulcan +##6th +distinctions +thumping +##ngen +mikey +sheds +fide +rescues +springsteen +vested +valuation +##ece +##ely +pinnacle +rake +sylvie +##edo +almond +quivering +##irus +alteration +faltered +##wad +51st +hydra +ticked +##kato +recommends +##dicated +antigua +arjun +stagecoach +wilfred +trickle +pronouns +##pon +aryan +nighttime +##anian +gall +pea +stitch +##hei +leung +milos +##dini +eritrea +nexus +starved +snowfall +kant +parasitic +cot +discus +hana +strikers +appleton +kitchens +##erina +##partisan +##itha +##vius +disclose +metis +##channel +1701 +tesla +##vera +fitch +1735 +blooded +##tila +decimal +##tang +##bai +cyclones +eun +bottled +peas +pensacola +basha +bolivian +crabs +boil +lanterns +partridge +roofed +1645 +necks +##phila +opined +patting +##kla +##lland +chuckles +volta +whereupon +##nche +devout +euroleague +suicidal +##dee +inherently +involuntary +knitting +nasser +##hide +puppets +colourful +courageous +southend +stills +miraculous +hodgson +richer +rochdale +ethernet +greta +uniting +prism +umm +##haya +##itical +##utation +deterioration +pointe +prowess +##ropriation +lids +scranton +billings +subcontinent +##koff +##scope +brute +kellogg +psalms +degraded +##vez +stanisław +##ructured +ferreira +pun +astonishing +gunnar +##yat +arya +prc +gottfried +##tight +excursion +##ographer +dina +##quil +##nare +huffington +illustrious +wilbur +gundam +verandah +##zard +naacp +##odle +constructive +fjord +kade +##naud +generosity +thrilling +baseline +cayman +frankish +plastics +accommodations +zoological +##fting +cedric +qb +motorized +##dome +##otted +squealed +tackled +canucks +budgets +situ +asthma +dail +gabled +grasslands +whimpered +writhing +judgments +##65 +minnie +pv +##carbon +bananas +grille +domes +monique +odin +maguire +markham +tierney +##estra +##chua +libel +poke +speedy +atrium +laval +notwithstanding +##edly +fai +kala +##sur +robb +##sma +listings +luz +supplementary +tianjin +##acing +enzo +jd +ric +scanner +croats +transcribed +##49 +arden +cv +##hair +##raphy +##lver +##uy +357 +seventies +staggering +alam +horticultural +hs +regression +timbers +blasting +##ounded +montagu +manipulating +##cit +catalytic +1550 +troopers +##meo +condemnation +fitzpatrick +##oire +##roved +inexperienced +1670 +castes +##lative +outing +314 +dubois +flicking +quarrel +ste +learners +1625 +iq +whistled +##class +282 +classify +tariffs +temperament +355 +folly +liszt +##yles +immersed +jordanian +ceasefire +apparel +extras +maru +fished +##bio +harta +stockport +assortment +craftsman +paralysis +transmitters +##cola +blindness +##wk +fatally +proficiency +solemnly +##orno +repairing +amore +groceries +ultraviolet +##chase +schoolhouse +##tua +resurgence +nailed +##otype +##× +ruse +saliva +diagrams +##tructing +albans +rann +thirties +1b +antennas +hilarious +cougars +paddington +stats +##eger +breakaway +ipod +reza +authorship +prohibiting +scoffed +##etz +##ttle +conscription +defected +trondheim +##fires +ivanov +keenan +##adan +##ciful +##fb +##slow +locating +##ials +##tford +cadiz +basalt +blankly +interned +rags +rattling +##tick +carpathian +reassured +sync +bum +guildford +iss +staunch +##onga +astronomers +sera +sofie +emergencies +susquehanna +##heard +duc +mastery +vh1 +williamsburg +bayer +buckled +craving +##khan +##rdes +bloomington +##write +alton +barbecue +##bians +justine +##hri +##ndt +delightful +smartphone +newtown +photon +retrieval +peugeot +hissing +##monium +##orough +flavors +lighted +relaunched +tainted +##games +##lysis +anarchy +microscopic +hopping +adept +evade +evie +##beau +inhibit +sinn +adjustable +hurst +intuition +wilton +cisco +44th +lawful +lowlands +stockings +thierry +##dalen +##hila +##nai +fates +prank +tb +maison +lobbied +provocative +1724 +4a +utopia +##qual +carbonate +gujarati +purcell +##rford +curtiss +##mei +overgrown +arenas +mediation +swallows +##rnik +respectful +turnbull +##hedron +##hope +alyssa +ozone +##ʻi +ami +gestapo +johansson +snooker +canteen +cuff +declines +empathy +stigma +##ags +##iner +##raine +taxpayers +gui +volga +##wright +##copic +lifespan +overcame +tattooed +enactment +giggles +##ador +##camp +barrington +bribe +obligatory +orbiting +peng +##enas +elusive +sucker +##vating +cong +hardship +empowered +anticipating +estrada +cryptic +greasy +detainees +planck +sudbury +plaid +dod +marriott +kayla +##ears +##vb +##zd +mortally +##hein +cognition +radha +319 +liechtenstein +meade +richly +argyle +harpsichord +liberalism +trumpets +lauded +tyrant +salsa +tiled +lear +promoters +reused +slicing +trident +##chuk +##gami +##lka +cantor +checkpoint +##points +gaul +leger +mammalian +##tov +##aar +##schaft +doha +frenchman +nirvana +##vino +delgado +headlining +##eron +##iography +jug +tko +1649 +naga +intersections +##jia +benfica +nawab +##suka +ashford +gulp +##deck +##vill +##rug +brentford +frazier +pleasures +dunne +potsdam +shenzhen +dentistry +##tec +flanagan +##dorff +##hear +chorale +dinah +prem +quezon +##rogated +relinquished +sutra +terri +##pani +flaps +##rissa +poly +##rnet +homme +aback +##eki +linger +womb +##kson +##lewood +doorstep +orthodoxy +threaded +westfield +##rval +dioceses +fridays +subsided +##gata +loyalists +##biotic +##ettes +letterman +lunatic +prelate +tenderly +invariably +souza +thug +winslow +##otide +furlongs +gogh +jeopardy +##runa +pegasus +##umble +humiliated +standalone +tagged +##roller +freshmen +klan +##bright +attaining +initiating +transatlantic +logged +viz +##uance +1723 +combatants +intervening +stephane +chieftain +despised +grazed +317 +cdc +galveston +godzilla +macro +simulate +##planes +parades +##esses +960 +##ductive +##unes +equator +overdose +##cans +##hosh +##lifting +joshi +epstein +sonora +treacherous +aquatics +manchu +responsive +##sation +supervisory +##christ +##llins +##ibar +##balance +##uso +kimball +karlsruhe +mab +##emy +ignores +phonetic +reuters +spaghetti +820 +almighty +danzig +rumbling +tombstone +designations +lured +outset +##felt +supermarkets +##wt +grupo +kei +kraft +susanna +##blood +comprehension +genealogy +##aghan +##verted +redding +##ythe +1722 +bowing +##pore +##roi +lest +sharpened +fulbright +valkyrie +sikhs +##unds +swans +bouquet +merritt +##tage +##venting +commuted +redhead +clerks +leasing +cesare +dea +hazy +##vances +fledged +greenfield +servicemen +##gical +armando +blackout +dt +sagged +downloadable +intra +potion +pods +##4th +##mism +xp +attendants +gambia +stale +##ntine +plump +asteroids +rediscovered +buds +flea +hive +##neas +1737 +classifications +debuts +##eles +olympus +scala +##eurs +##gno +##mute +hummed +sigismund +visuals +wiggled +await +pilasters +clench +sulfate +##ances +bellevue +enigma +trainee +snort +##sw +clouded +denim +##rank +##rder +churning +hartman +lodges +riches +sima +##missible +accountable +socrates +regulates +mueller +##cr +1702 +avoids +solids +himalayas +nutrient +pup +##jevic +squat +fades +nec +##lates +##pina +##rona +##ου +privateer +tequila +##gative +##mpton +apt +hornet +immortals +##dou +asturias +cleansing +dario +##rries +##anta +etymology +servicing +zhejiang +##venor +##nx +horned +erasmus +rayon +relocating +£10 +##bags +escalated +promenade +stubble +2010s +artisans +axial +liquids +mora +sho +yoo +##tsky +bundles +oldies +##nally +notification +bastion +##ths +sparkle +##lved +1728 +leash +pathogen +highs +##hmi +immature +880 +gonzaga +ignatius +mansions +monterrey +sweets +bryson +##loe +polled +regatta +brightest +pei +rosy +squid +hatfield +payroll +addict +meath +cornerback +heaviest +lodging +##mage +capcom +rippled +##sily +barnet +mayhem +ymca +snuggled +rousseau +##cute +blanchard +284 +fragmented +leighton +chromosomes +risking +##md +##strel +##utter +corinne +coyotes +cynical +hiroshi +yeomanry +##ractive +ebook +grading +mandela +plume +agustin +magdalene +##rkin +bea +femme +trafford +##coll +##lun +##tance +52nd +fourier +upton +##mental +camilla +gust +iihf +islamabad +longevity +##kala +feldman +netting +##rization +endeavour +foraging +mfa +orr +##open +greyish +contradiction +graz +##ruff +handicapped +marlene +tweed +oaxaca +spp +campos +miocene +pri +configured +cooks +pluto +cozy +pornographic +##entes +70th +fairness +glided +jonny +lynne +rounding +sired +##emon +##nist +remade +uncover +##mack +complied +lei +newsweek +##jured +##parts +##enting +##pg +293 +finer +guerrillas +athenian +deng +disused +stepmother +accuse +gingerly +seduction +521 +confronting +##walker +##going +gora +nostalgia +sabres +virginity +wrenched +##minated +syndication +wielding +eyre +##56 +##gnon +##igny +behaved +taxpayer +sweeps +##growth +childless +gallant +##ywood +amplified +geraldine +scrape +##ffi +babylonian +fresco +##rdan +##kney +##position +1718 +restricting +tack +fukuoka +osborn +selector +partnering +##dlow +318 +gnu +kia +tak +whitley +gables +##54 +##mania +mri +softness +immersion +##bots +##evsky +1713 +chilling +insignificant +pcs +##uis +elites +lina +purported +supplemental +teaming +##americana +##dding +##inton +proficient +rouen +##nage +##rret +niccolo +selects +##bread +fluffy +1621 +gruff +knotted +mukherjee +polgara +thrash +nicholls +secluded +smoothing +thru +corsica +loaf +whitaker +inquiries +##rrier +##kam +indochina +289 +marlins +myles +peking +##tea +extracts +pastry +superhuman +connacht +vogel +##ditional +##het +##udged +##lash +gloss +quarries +refit +teaser +##alic +##gaon +20s +materialized +sling +camped +pickering +tung +tracker +pursuant +##cide +cranes +soc +##cini +##typical +##viere +anhalt +overboard +workout +chores +fares +orphaned +stains +##logie +fenton +surpassing +joyah +triggers +##itte +grandmaster +##lass +##lists +clapping +fraudulent +ledger +nagasaki +##cor +##nosis +##tsa +eucalyptus +tun +##icio +##rney +##tara +dax +heroism +ina +wrexham +onboard +unsigned +##dates +moshe +galley +winnie +droplets +exiles +praises +watered +noodles +##aia +fein +adi +leland +multicultural +stink +bingo +comets +erskine +modernized +canned +constraint +domestically +chemotherapy +featherweight +stifled +##mum +darkly +irresistible +refreshing +hasty +isolate +##oys +kitchener +planners +##wehr +cages +yarn +implant +toulon +elects +childbirth +yue +##lind +##lone +cn +rightful +sportsman +junctions +remodeled +specifies +##rgh +291 +##oons +complimented +##urgent +lister +ot +##logic +bequeathed +cheekbones +fontana +gabby +##dial +amadeus +corrugated +maverick +resented +triangles +##hered +##usly +nazareth +tyrol +1675 +assent +poorer +sectional +aegean +##cous +296 +nylon +ghanaian +##egorical +##weig +cushions +forbid +fusiliers +obstruction +somerville +##scia +dime +earrings +elliptical +leyte +oder +polymers +timmy +atm +midtown +piloted +settles +continual +externally +mayfield +##uh +enrichment +henson +keane +persians +1733 +benji +braden +pep +324 +##efe +contenders +pepsi +valet +##isches +298 +##asse +##earing +goofy +stroll +##amen +authoritarian +occurrences +adversary +ahmedabad +tangent +toppled +dorchester +1672 +modernism +marxism +islamist +charlemagne +exponential +racks +unicode +brunette +mbc +pic +skirmish +##bund +##lad +##powered +##yst +hoisted +messina +shatter +##ctum +jedi +vantage +##music +##neil +clemens +mahmoud +corrupted +authentication +lowry +nils +##washed +omnibus +wounding +jillian +##itors +##opped +serialized +narcotics +handheld +##arm +##plicity +intersecting +stimulating +##onis +crate +fellowships +hemingway +casinos +climatic +fordham +copeland +drip +beatty +leaflets +robber +brothel +madeira +##hedral +sphinx +ultrasound +##vana +valor +forbade +leonid +villas +##aldo +duane +marquez +##cytes +disadvantaged +forearms +kawasaki +reacts +consular +lax +uncles +uphold +##hopper +concepcion +dorsey +lass +##izan +arching +passageway +1708 +researches +tia +internationals +##graphs +##opers +distinguishes +javanese +divert +##uven +plotted +##listic +##rwin +##erik +##tify +affirmative +signifies +validation +##bson +kari +felicity +georgina +zulu +##eros +##rained +##rath +overcoming +##dot +argyll +##rbin +1734 +chiba +ratification +windy +earls +parapet +##marks +hunan +pristine +astrid +punta +##gart +brodie +##kota +##oder +malaga +minerva +rouse +##phonic +bellowed +pagoda +portals +reclamation +##gur +##odies +##⁄₄ +parentheses +quoting +allergic +palette +showcases +benefactor +heartland +nonlinear +##tness +bladed +cheerfully +scans +##ety +##hone +1666 +girlfriends +pedersen +hiram +sous +##liche +##nator +1683 +##nery +##orio +##umen +bobo +primaries +smiley +##cb +unearthed +uniformly +fis +metadata +1635 +ind +##oted +recoil +##titles +##tura +##ια +406 +hilbert +jamestown +mcmillan +tulane +seychelles +##frid +antics +coli +fated +stucco +##grants +1654 +bulky +accolades +arrays +caledonian +carnage +optimism +puebla +##tative +##cave +enforcing +rotherham +seo +dunlop +aeronautics +chimed +incline +zoning +archduke +hellenistic +##oses +##sions +candi +thong +##ople +magnate +rustic +##rsk +projective +slant +##offs +danes +hollis +vocalists +##ammed +congenital +contend +gesellschaft +##ocating +##pressive +douglass +quieter +##cm +##kshi +howled +salim +spontaneously +townsville +buena +southport +##bold +kato +1638 +faerie +stiffly +##vus +##rled +297 +flawless +realising +taboo +##7th +bytes +straightening +356 +jena +##hid +##rmin +cartwright +berber +bertram +soloists +411 +noses +417 +coping +fission +hardin +inca +##cen +1717 +mobilized +vhf +##raf +biscuits +curate +##85 +##anial +331 +gaunt +neighbourhoods +1540 +##abas +blanca +bypassed +sockets +behold +coincidentally +##bane +nara +shave +splinter +terrific +##arion +##erian +commonplace +juris +redwood +waistband +boxed +caitlin +fingerprints +jennie +naturalized +##ired +balfour +craters +jody +bungalow +hugely +quilt +glitter +pigeons +undertaker +bulging +constrained +goo +##sil +##akh +assimilation +reworked +##person +persuasion +##pants +felicia +##cliff +##ulent +1732 +explodes +##dun +##inium +##zic +lyman +vulture +hog +overlook +begs +northwards +ow +spoil +##urer +fatima +favorably +accumulate +sargent +sorority +corresponded +dispersal +kochi +toned +##imi +##lita +internacional +newfound +##agger +##lynn +##rigue +booths +peanuts +##eborg +medicare +muriel +nur +##uram +crates +millennia +pajamas +worsened +##breakers +jimi +vanuatu +yawned +##udeau +carousel +##hony +hurdle +##ccus +##mounted +##pod +rv +##eche +airship +ambiguity +compulsion +recapture +##claiming +arthritis +##osomal +1667 +asserting +ngc +sniffing +dade +discontent +glendale +ported +##amina +defamation +rammed +##scent +fling +livingstone +##fleet +875 +##ppy +apocalyptic +comrade +lcd +##lowe +cessna +eine +persecuted +subsistence +demi +hoop +reliefs +710 +coptic +progressing +stemmed +perpetrators +1665 +priestess +##nio +dobson +ebony +rooster +itf +tortricidae +##bbon +##jian +cleanup +##jean +##øy +1721 +eighties +taxonomic +holiness +##hearted +##spar +antilles +showcasing +stabilized +##nb +gia +mascara +michelangelo +dawned +##uria +##vinsky +extinguished +fitz +grotesque +£100 +##fera +##loid +##mous +barges +neue +throbbed +cipher +johnnie +##a1 +##mpt +outburst +##swick +spearheaded +administrations +c1 +heartbreak +pixels +pleasantly +##enay +lombardy +plush +##nsed +bobbie +##hly +reapers +tremor +xiang +minogue +substantive +hitch +barak +##wyl +kwan +##encia +910 +obscene +elegance +indus +surfer +bribery +conserve +##hyllum +##masters +horatio +##fat +apes +rebound +psychotic +##pour +iteration +##mium +##vani +botanic +horribly +antiques +dispose +paxton +##hli +##wg +timeless +1704 +disregard +engraver +hounds +##bau +##version +looted +uno +facilitates +groans +masjid +rutland +antibody +disqualification +decatur +footballers +quake +slacks +48th +rein +scribe +stabilize +commits +exemplary +tho +##hort +##chison +pantry +traversed +##hiti +disrepair +identifiable +vibrated +baccalaureate +##nnis +csa +interviewing +##iensis +##raße +greaves +wealthiest +343 +classed +jogged +£5 +##58 +##atal +illuminating +knicks +respecting +##uno +scrubbed +##iji +##dles +kruger +moods +growls +raider +silvia +chefs +kam +vr +cree +percival +##terol +gunter +counterattack +defiant +henan +ze +##rasia +##riety +equivalence +submissions +##fra +##thor +bautista +mechanically +##heater +cornice +herbal +templar +##mering +outputs +ruining +ligand +renumbered +extravagant +mika +blockbuster +eta +insurrection +##ilia +darkening +ferocious +pianos +strife +kinship +##aer +melee +##anor +##iste +##may +##oue +decidedly +weep +##jad +##missive +##ppel +354 +puget +unease +##gnant +1629 +hammering +kassel +ob +wessex +##lga +bromwich +egan +paranoia +utilization +##atable +##idad +contradictory +provoke +##ols +##ouring +##tangled +knesset +##very +##lette +plumbing +##sden +##¹ +greensboro +occult +sniff +338 +zev +beaming +gamer +haggard +mahal +##olt +##pins +mendes +utmost +briefing +gunnery +##gut +##pher +##zh +##rok +1679 +khalifa +sonya +##boot +principals +urbana +wiring +##liffe +##minating +##rrado +dahl +nyu +skepticism +np +townspeople +ithaca +lobster +somethin +##fur +##arina +##−1 +freighter +zimmerman +biceps +contractual +##herton +amend +hurrying +subconscious +##anal +336 +meng +clermont +spawning +##eia +##lub +dignitaries +impetus +snacks +spotting +twigs +##bilis +##cz +##ouk +libertadores +nic +skylar +##aina +##firm +gustave +asean +##anum +dieter +legislatures +flirt +bromley +trolls +umar +##bbies +##tyle +blah +parc +bridgeport +crank +negligence +##nction +46th +constantin +molded +bandages +seriousness +00pm +siegel +carpets +compartments +upbeat +statehood +##dner +##edging +marko +730 +platt +##hane +paving +##iy +1738 +abbess +impatience +limousine +nbl +##talk +441 +lucille +mojo +nightfall +robbers +##nais +karel +brisk +calves +replicate +ascribed +telescopes +##olf +intimidated +##reen +ballast +specialization +##sit +aerodynamic +caliphate +rainer +visionary +##arded +epsilon +##aday +##onte +aggregation +auditory +boosted +reunification +kathmandu +loco +robyn +402 +acknowledges +appointing +humanoid +newell +redeveloped +restraints +##tained +barbarians +chopper +1609 +italiana +##lez +##lho +investigates +wrestlemania +##anies +##bib +690 +##falls +creaked +dragoons +gravely +minions +stupidity +volley +##harat +##week +musik +##eries +##uously +fungal +massimo +semantics +malvern +##ahl +##pee +discourage +embryo +imperialism +1910s +profoundly +##ddled +jiangsu +sparkled +stat +##holz +sweatshirt +tobin +##iction +sneered +##cheon +##oit +brit +causal +smyth +##neuve +diffuse +perrin +silvio +##ipes +##recht +detonated +iqbal +selma +##nism +##zumi +roasted +##riders +tay +##ados +##mament +##mut +##rud +840 +completes +nipples +cfa +flavour +hirsch +##laus +calderon +sneakers +moravian +##ksha +1622 +rq +294 +##imeters +bodo +##isance +##pre +##ronia +anatomical +excerpt +##lke +dh +kunst +##tablished +##scoe +biomass +panted +unharmed +gael +housemates +montpellier +##59 +coa +rodents +tonic +hickory +singleton +##taro +451 +1719 +aldo +breaststroke +dempsey +och +rocco +##cuit +merton +dissemination +midsummer +serials +##idi +haji +polynomials +##rdon +gs +enoch +prematurely +shutter +taunton +£3 +##grating +##inates +archangel +harassed +##asco +326 +archway +dazzling +##ecin +1736 +sumo +wat +##kovich +1086 +honneur +##ently +##nostic +##ttal +##idon +1605 +403 +1716 +blogger +rents +##gnan +hires +##ikh +##dant +howie +##rons +handler +retracted +shocks +1632 +arun +duluth +kepler +trumpeter +##lary +peeking +seasoned +trooper +##mara +laszlo +##iciencies +##rti +heterosexual +##inatory +##ssion +indira +jogging +##inga +##lism +beit +dissatisfaction +malice +##ately +nedra +peeling +##rgeon +47th +stadiums +475 +vertigo +##ains +iced +restroom +##plify +##tub +illustrating +pear +##chner +##sibility +inorganic +rappers +receipts +watery +##kura +lucinda +##oulos +reintroduced +##8th +##tched +gracefully +saxons +nutritional +wastewater +rained +favourites +bedrock +fisted +hallways +likeness +upscale +##lateral +1580 +blinds +prequel +##pps +##tama +deter +humiliating +restraining +tn +vents +1659 +laundering +recess +rosary +tractors +coulter +federer +##ifiers +##plin +persistence +##quitable +geschichte +pendulum +quakers +##beam +bassett +pictorial +buffet +koln +##sitor +drills +reciprocal +shooters +##57 +##cton +##tees +converge +pip +dmitri +donnelly +yamamoto +aqua +azores +demographics +hypnotic +spitfire +suspend +wryly +roderick +##rran +sebastien +##asurable +mavericks +##fles +##200 +himalayan +prodigy +##iance +transvaal +demonstrators +handcuffs +dodged +mcnamara +sublime +1726 +crazed +##efined +##till +ivo +pondered +reconciled +shrill +sava +##duk +bal +cad +heresy +jaipur +goran +##nished +341 +lux +shelly +whitehall +##hre +israelis +peacekeeping +##wled +1703 +demetrius +ousted +##arians +##zos +beale +anwar +backstroke +raged +shrinking +cremated +##yck +benign +towing +wadi +darmstadt +landfill +parana +soothe +colleen +sidewalks +mayfair +tumble +hepatitis +ferrer +superstructure +##gingly +##urse +##wee +anthropological +translators +##mies +closeness +hooves +##pw +mondays +##roll +##vita +landscaping +##urized +purification +sock +thorns +thwarted +jalan +tiberius +##taka +saline +##rito +confidently +khyber +sculptors +##ij +brahms +hammersmith +inspectors +battista +fivb +fragmentation +hackney +##uls +arresting +exercising +antoinette +bedfordshire +##zily +dyed +##hema +1656 +racetrack +variability +##tique +1655 +austrians +deteriorating +madman +theorists +aix +lehman +weathered +1731 +decreed +eruptions +1729 +flaw +quinlan +sorbonne +flutes +nunez +1711 +adored +downwards +fable +rasped +1712 +moritz +mouthful +renegade +shivers +stunts +dysfunction +restrain +translit +327 +pancakes +##avio +##cision +##tray +351 +vial +##lden +bain +##maid +##oxide +chihuahua +malacca +vimes +##rba +##rnier +1664 +donnie +plaques +##ually +337 +bangs +floppy +huntsville +loretta +nikolay +##otte +eater +handgun +ubiquitous +##hett +eras +zodiac +1634 +##omorphic +1820s +##zog +cochran +##bula +##lithic +warring +##rada +dalai +excused +blazers +mcconnell +reeling +bot +este +##abi +geese +hoax +taxon +##bla +guitarists +##icon +condemning +hunts +inversion +moffat +taekwondo +##lvis +1624 +stammered +##rest +##rzy +sousa +fundraiser +marylebone +navigable +uptown +cabbage +daniela +salman +shitty +whimper +##kian +##utive +programmers +protections +rm +##rmi +##rued +forceful +##enes +fuss +##tao +##wash +brat +oppressive +reykjavik +spartak +ticking +##inkles +##kiewicz +adolph +horst +maui +protege +straighten +cpc +landau +concourse +clements +resultant +##ando +imaginative +joo +reactivated +##rem +##ffled +##uising +consultative +##guide +flop +kaitlyn +mergers +parenting +somber +##vron +supervise +vidhan +##imum +courtship +exemplified +harmonies +medallist +refining +##rrow +##ка +amara +##hum +780 +goalscorer +sited +overshadowed +rohan +displeasure +secretive +multiplied +osman +##orth +engravings +padre +##kali +##veda +miniatures +mis +##yala +clap +pali +rook +##cana +1692 +57th +antennae +astro +oskar +1628 +bulldog +crotch +hackett +yucatan +##sure +amplifiers +brno +ferrara +migrating +##gree +thanking +turing +##eza +mccann +ting +andersson +onslaught +gaines +ganga +incense +standardization +##mation +sentai +scuba +stuffing +turquoise +waivers +alloys +##vitt +regaining +vaults +##clops +##gizing +digger +furry +memorabilia +probing +##iad +payton +rec +deutschland +filippo +opaque +seamen +zenith +afrikaans +##filtration +disciplined +inspirational +##merie +banco +confuse +grafton +tod +##dgets +championed +simi +anomaly +biplane +##ceptive +electrode +##para +1697 +cleavage +crossbow +swirl +informant +##lars +##osta +afi +bonfire +spec +##oux +lakeside +slump +##culus +##lais +##qvist +##rrigan +1016 +facades +borg +inwardly +cervical +xl +pointedly +050 +stabilization +##odon +chests +1699 +hacked +ctv +orthogonal +suzy +##lastic +gaulle +jacobite +rearview +##cam +##erted +ashby +##drik +##igate +##mise +##zbek +affectionately +canine +disperse +latham +##istles +##ivar +spielberg +##orin +##idium +ezekiel +cid +##sg +durga +middletown +##cina +customized +frontiers +harden +##etano +##zzy +1604 +bolsheviks +##66 +coloration +yoko +##bedo +briefs +slabs +debra +liquidation +plumage +##oin +blossoms +dementia +subsidy +1611 +proctor +relational +jerseys +parochial +ter +##ici +esa +peshawar +cavalier +loren +cpi +idiots +shamrock +1646 +dutton +malabar +mustache +##endez +##ocytes +referencing +terminates +marche +yarmouth +##sop +acton +mated +seton +subtly +baptised +beige +extremes +jolted +kristina +telecast +##actic +safeguard +waldo +##baldi +##bular +endeavors +sloppy +subterranean +##ensburg +##itung +delicately +pigment +tq +##scu +1626 +##ound +collisions +coveted +herds +##personal +##meister +##nberger +chopra +##ricting +abnormalities +defective +galician +lucie +##dilly +alligator +likened +##genase +burundi +clears +complexion +derelict +deafening +diablo +fingered +champaign +dogg +enlist +isotope +labeling +mrna +##erre +brilliance +marvelous +##ayo +1652 +crawley +ether +footed +dwellers +deserts +hamish +rubs +warlock +skimmed +##lizer +870 +buick +embark +heraldic +irregularities +##ajan +kiara +##kulam +##ieg +antigen +kowalski +##lge +oakley +visitation +##mbit +vt +##suit +1570 +murderers +##miento +##rites +chimneys +##sling +condemn +custer +exchequer +havre +##ghi +fluctuations +##rations +dfb +hendricks +vaccines +##tarian +nietzsche +biking +juicy +##duced +brooding +scrolling +selangor +##ragan +352 +annum +boomed +seminole +sugarcane +##dna +departmental +dismissing +innsbruck +arteries +ashok +batavia +daze +kun +overtook +##rga +##tlan +beheaded +gaddafi +holm +electronically +faulty +galilee +fractures +kobayashi +##lized +gunmen +magma +aramaic +mala +eastenders +inference +messengers +bf +##qu +407 +bathrooms +##vere +1658 +flashbacks +ideally +misunderstood +##jali +##weather +mendez +##grounds +505 +uncanny +##iii +1709 +friendships +##nbc +sacrament +accommodated +reiterated +logistical +pebbles +thumped +##escence +administering +decrees +drafts +##flight +##cased +##tula +futuristic +picket +intimidation +winthrop +##fahan +interfered +339 +afar +francoise +morally +uta +cochin +croft +dwarfs +##bruck +##dents +##nami +biker +##hner +##meral +nano +##isen +##ometric +##pres +##ан +brightened +meek +parcels +securely +gunners +##jhl +##zko +agile +hysteria +##lten +##rcus +bukit +champs +chevy +cuckoo +leith +sadler +theologians +welded +##section +1663 +jj +plurality +xander +##rooms +##formed +shredded +temps +intimately +pau +tormented +##lok +##stellar +1618 +charred +ems +essen +##mmel +alarms +spraying +ascot +blooms +twinkle +##abia +##apes +internment +obsidian +##chaft +snoop +##dav +##ooping +malibu +##tension +quiver +##itia +hays +mcintosh +travers +walsall +##ffie +1623 +beverley +schwarz +plunging +structurally +m3 +rosenthal +vikram +##tsk +770 +ghz +##onda +##tiv +chalmers +groningen +pew +reckon +unicef +##rvis +55th +##gni +1651 +sulawesi +avila +cai +metaphysical +screwing +turbulence +##mberg +augusto +samba +56th +baffled +momentary +toxin +##urian +##wani +aachen +condoms +dali +steppe +##3d +##app +##oed +##year +adolescence +dauphin +electrically +inaccessible +microscopy +nikita +##ega +atv +##cel +##enter +##oles +##oteric +##ы +accountants +punishments +wrongly +bribes +adventurous +clinch +flinders +southland +##hem +##kata +gough +##ciency +lads +soared +##ה +undergoes +deformation +outlawed +rubbish +##arus +##mussen +##nidae +##rzburg +arcs +##ingdon +##tituted +1695 +wheelbase +wheeling +bombardier +campground +zebra +##lices +##oj +##bain +lullaby +##ecure +donetsk +wylie +grenada +##arding +##ης +squinting +eireann +opposes +##andra +maximal +runes +##broken +##cuting +##iface +##ror +##rosis +additive +britney +adultery +triggering +##drome +detrimental +aarhus +containment +jc +swapped +vichy +##ioms +madly +##oric +##rag +brant +##ckey +##trix +1560 +1612 +broughton +rustling +##stems +##uder +asbestos +mentoring +##nivorous +finley +leaps +##isan +apical +pry +slits +substitutes +##dict +intuitive +fantasia +insistent +unreasonable +##igen +##vna +domed +hannover +margot +ponder +##zziness +impromptu +jian +lc +rampage +stemming +##eft +andrey +gerais +whichever +amnesia +appropriated +anzac +clicks +modifying +ultimatum +cambrian +maids +verve +yellowstone +##mbs +conservatoire +##scribe +adherence +dinners +spectra +imperfect +mysteriously +sidekick +tatar +tuba +##aks +##ifolia +distrust +##athan +##zle +c2 +ronin +zac +##pse +celaena +instrumentalist +scents +skopje +##mbling +comical +compensated +vidal +condor +intersect +jingle +wavelengths +##urrent +mcqueen +##izzly +carp +weasel +422 +kanye +militias +postdoctoral +eugen +gunslinger +##ɛ +faux +hospice +##for +appalled +derivation +dwarves +##elis +dilapidated +##folk +astoria +philology +##lwyn +##otho +##saka +inducing +philanthropy +##bf +##itative +geek +markedly +sql +##yce +bessie +indices +rn +##flict +495 +frowns +resolving +weightlifting +tugs +cleric +contentious +1653 +mania +rms +##miya +##reate +##ruck +##tucket +bien +eels +marek +##ayton +##cence +discreet +unofficially +##ife +leaks +##bber +1705 +332 +dung +compressor +hillsborough +pandit +shillings +distal +##skin +381 +##tat +##you +nosed +##nir +mangrove +undeveloped +##idia +textures +##inho +##500 +##rise +ae +irritating +nay +amazingly +bancroft +apologetic +compassionate +kata +symphonies +##lovic +airspace +##lch +930 +gifford +precautions +fulfillment +sevilla +vulgar +martinique +##urities +looting +piccolo +tidy +##dermott +quadrant +armchair +incomes +mathematicians +stampede +nilsson +##inking +##scan +foo +quarterfinal +##ostal +shang +shouldered +squirrels +##owe +344 +vinegar +##bner +##rchy +##systems +delaying +##trics +ars +dwyer +rhapsody +sponsoring +##gration +bipolar +cinder +starters +##olio +##urst +421 +signage +##nty +aground +figurative +mons +acquaintances +duets +erroneously +soyuz +elliptic +recreated +##cultural +##quette +##ssed +##tma +##zcz +moderator +scares +##itaire +##stones +##udence +juniper +sighting +##just +##nsen +britten +calabria +ry +bop +cramer +forsyth +stillness +##л +airmen +gathers +unfit +##umber +##upt +taunting +##rip +seeker +streamlined +##bution +holster +schumann +tread +vox +##gano +##onzo +strive +dil +reforming +covent +newbury +predicting +##orro +decorate +tre +##puted +andover +ie +asahi +dept +dunkirk +gills +##tori +buren +huskies +##stis +##stov +abstracts +bets +loosen +##opa +1682 +yearning +##glio +##sir +berman +effortlessly +enamel +napoli +persist +##peration +##uez +attache +elisa +b1 +invitations +##kic +accelerating +reindeer +boardwalk +clutches +nelly +polka +starbucks +##kei +adamant +huey +lough +unbroken +adventurer +embroidery +inspecting +stanza +##ducted +naia +taluka +##pone +##roids +chases +deprivation +florian +##jing +##ppet +earthly +##lib +##ssee +colossal +foreigner +vet +freaks +patrice +rosewood +triassic +upstate +##pkins +dominates +ata +chants +ks +vo +##400 +##bley +##raya +##rmed +555 +agra +infiltrate +##ailing +##ilation +##tzer +##uppe +##werk +binoculars +enthusiast +fujian +squeak +##avs +abolitionist +almeida +boredom +hampstead +marsden +rations +##ands +inflated +334 +bonuses +rosalie +patna +##rco +329 +detachments +penitentiary +54th +flourishing +woolf +##dion +##etched +papyrus +##lster +##nsor +##toy +bobbed +dismounted +endelle +inhuman +motorola +tbs +wince +wreath +##ticus +hideout +inspections +sanjay +disgrace +infused +pudding +stalks +##urbed +arsenic +leases +##hyl +##rrard +collarbone +##waite +##wil +dowry +##bant +##edance +genealogical +nitrate +salamanca +scandals +thyroid +necessitated +##! +##" +### +##$ +##% +##& +##' +##( +##) +##* +##+ +##, +##- +##. +##/ +##: +##; +##< +##= +##> +##? +##@ +##[ +##\ +##] +##^ +##_ +##` +##{ +##| +##} +##~ +##¡ +##¢ +##£ +##¤ +##¥ +##¦ +##§ +##¨ +##© +##ª +##« +##¬ +##® +##± +##´ +##µ +##¶ +##· +##º +##» +##¼ +##¾ +##¿ +##æ +##ð +##÷ +##þ +##đ +##ħ +##ŋ +##œ +##ƒ +##ɐ +##ɑ +##ɒ +##ɔ +##ɕ +##ə +##ɡ +##ɣ +##ɨ +##ɪ +##ɫ +##ɬ +##ɯ +##ɲ +##ɴ +##ɹ +##ɾ +##ʀ +##ʁ +##ʂ +##ʃ +##ʉ +##ʊ +##ʋ +##ʌ +##ʎ +##ʐ +##ʑ +##ʒ +##ʔ +##ʰ +##ʲ +##ʳ +##ʷ +##ʸ +##ʻ +##ʼ +##ʾ +##ʿ +##ˈ +##ˡ +##ˢ +##ˣ +##ˤ +##β +##γ +##δ +##ε +##ζ +##θ +##κ +##λ +##μ +##ξ +##ο +##π +##ρ +##σ +##τ +##υ +##φ +##χ +##ψ +##ω +##б +##г +##д +##ж +##з +##м +##п +##с +##у +##ф +##х +##ц +##ч +##ш +##щ +##ъ +##э +##ю +##ђ +##є +##і +##ј +##љ +##њ +##ћ +##ӏ +##ա +##բ +##գ +##դ +##ե +##թ +##ի +##լ +##կ +##հ +##մ +##յ +##ն +##ո +##պ +##ս +##վ +##տ +##ր +##ւ +##ք +##־ +##א +##ב +##ג +##ד +##ו +##ז +##ח +##ט +##י +##ך +##כ +##ל +##ם +##מ +##ן +##נ +##ס +##ע +##ף +##פ +##ץ +##צ +##ק +##ר +##ש +##ת +##، +##ء +##ب +##ت +##ث +##ج +##ح +##خ +##ذ +##ز +##س +##ش +##ص +##ض +##ط +##ظ +##ع +##غ +##ـ +##ف +##ق +##ك +##و +##ى +##ٹ +##پ +##چ +##ک +##گ +##ں +##ھ +##ہ +##ے +##अ +##आ +##उ +##ए +##क +##ख +##ग +##च +##ज +##ट +##ड +##ण +##त +##थ +##द +##ध +##न +##प +##ब +##भ +##म +##य +##र +##ल +##व +##श +##ष +##स +##ह +##ा +##ि +##ी +##ो +##। +##॥ +##ং +##অ +##আ +##ই +##উ +##এ +##ও +##ক +##খ +##গ +##চ +##ছ +##জ +##ট +##ড +##ণ +##ত +##থ +##দ +##ধ +##ন +##প +##ব +##ভ +##ম +##য +##র +##ল +##শ +##ষ +##স +##হ +##া +##ি +##ী +##ে +##க +##ச +##ட +##த +##ந +##ன +##ப +##ம +##ய +##ர +##ல +##ள +##வ +##ா +##ி +##ு +##ே +##ை +##ನ +##ರ +##ಾ +##ක +##ය +##ර +##ල +##ව +##ා +##ก +##ง +##ต +##ท +##น +##พ +##ม +##ย +##ร +##ล +##ว +##ส +##อ +##า +##เ +##་ +##། +##ག +##ང +##ད +##ན +##པ +##བ +##མ +##འ +##ར +##ལ +##ས +##မ +##ა +##ბ +##გ +##დ +##ე +##ვ +##თ +##ი +##კ +##ლ +##მ +##ნ +##ო +##რ +##ს +##ტ +##უ +##ᄀ +##ᄂ +##ᄃ +##ᄅ +##ᄆ +##ᄇ +##ᄉ +##ᄊ +##ᄋ +##ᄌ +##ᄎ +##ᄏ +##ᄐ +##ᄑ +##ᄒ +##ᅡ +##ᅢ +##ᅥ +##ᅦ +##ᅧ +##ᅩ +##ᅪ +##ᅭ +##ᅮ +##ᅯ +##ᅲ +##ᅳ +##ᅴ +##ᅵ +##ᆨ +##ᆫ +##ᆯ +##ᆷ +##ᆸ +##ᆼ +##ᴬ +##ᴮ +##ᴰ +##ᴵ +##ᴺ +##ᵀ +##ᵃ +##ᵇ +##ᵈ +##ᵉ +##ᵍ +##ᵏ +##ᵐ +##ᵒ +##ᵖ +##ᵗ +##ᵘ +##ᵣ +##ᵤ +##ᵥ +##ᶜ +##ᶠ +##‐ +##‑ +##‒ +##– +##— +##― +##‖ +##‘ +##’ +##‚ +##“ +##” +##„ +##† +##‡ +##• +##… +##‰ +##′ +##″ +##› +##‿ +##⁄ +##⁰ +##ⁱ +##⁴ +##⁵ +##⁶ +##⁷ +##⁸ +##⁹ +##⁻ +##ⁿ +##₅ +##₆ +##₇ +##₈ +##₉ +##₊ +##₍ +##₎ +##ₐ +##ₑ +##ₒ +##ₓ +##ₕ +##ₖ +##ₗ +##ₘ +##ₚ +##ₛ +##ₜ +##₤ +##₩ +##€ +##₱ +##₹ +##ℓ +##№ +##ℝ +##™ +##⅓ +##⅔ +##← +##↑ +##→ +##↓ +##↔ +##↦ +##⇄ +##⇌ +##⇒ +##∂ +##∅ +##∆ +##∇ +##∈ +##∗ +##∘ +##√ +##∞ +##∧ +##∨ +##∩ +##∪ +##≈ +##≡ +##≤ +##≥ +##⊂ +##⊆ +##⊕ +##⊗ +##⋅ +##─ +##│ +##■ +##▪ +##● +##★ +##☆ +##☉ +##♠ +##♣ +##♥ +##♦ +##♯ +##⟨ +##⟩ +##ⱼ +##⺩ +##⺼ +##⽥ +##、 +##。 +##〈 +##〉 +##《 +##》 +##「 +##」 +##『 +##』 +##〜 +##あ +##い +##う +##え +##お +##か +##き +##く +##け +##こ +##さ +##し +##す +##せ +##そ +##た +##ち +##っ +##つ +##て +##と +##な +##に +##ぬ +##ね +##の +##は +##ひ +##ふ +##へ +##ほ +##ま +##み +##む +##め +##も +##や +##ゆ +##よ +##ら +##り +##る +##れ +##ろ +##を +##ん +##ァ +##ア +##ィ +##イ +##ウ +##ェ +##エ +##オ +##カ +##キ +##ク +##ケ +##コ +##サ +##シ +##ス +##セ +##タ +##チ +##ッ +##ツ +##テ +##ト +##ナ +##ニ +##ノ +##ハ +##ヒ +##フ +##ヘ +##ホ +##マ +##ミ +##ム +##メ +##モ +##ャ +##ュ +##ョ +##ラ +##リ +##ル +##レ +##ロ +##ワ +##ン +##・ +##ー +##一 +##三 +##上 +##下 +##不 +##世 +##中 +##主 +##久 +##之 +##也 +##事 +##二 +##五 +##井 +##京 +##人 +##亻 +##仁 +##介 +##代 +##仮 +##伊 +##会 +##佐 +##侍 +##保 +##信 +##健 +##元 +##光 +##八 +##公 +##内 +##出 +##分 +##前 +##劉 +##力 +##加 +##勝 +##北 +##区 +##十 +##千 +##南 +##博 +##原 +##口 +##古 +##史 +##司 +##合 +##吉 +##同 +##名 +##和 +##囗 +##四 +##国 +##國 +##土 +##地 +##坂 +##城 +##堂 +##場 +##士 +##夏 +##外 +##大 +##天 +##太 +##夫 +##奈 +##女 +##子 +##学 +##宀 +##宇 +##安 +##宗 +##定 +##宣 +##宮 +##家 +##宿 +##寺 +##將 +##小 +##尚 +##山 +##岡 +##島 +##崎 +##川 +##州 +##巿 +##帝 +##平 +##年 +##幸 +##广 +##弘 +##張 +##彳 +##後 +##御 +##德 +##心 +##忄 +##志 +##忠 +##愛 +##成 +##我 +##戦 +##戸 +##手 +##扌 +##政 +##文 +##新 +##方 +##日 +##明 +##星 +##春 +##昭 +##智 +##曲 +##書 +##月 +##有 +##朝 +##木 +##本 +##李 +##村 +##東 +##松 +##林 +##森 +##楊 +##樹 +##橋 +##歌 +##止 +##正 +##武 +##比 +##氏 +##民 +##水 +##氵 +##氷 +##永 +##江 +##沢 +##河 +##治 +##法 +##海 +##清 +##漢 +##瀬 +##火 +##版 +##犬 +##王 +##生 +##田 +##男 +##疒 +##発 +##白 +##的 +##皇 +##目 +##相 +##省 +##真 +##石 +##示 +##社 +##神 +##福 +##禾 +##秀 +##秋 +##空 +##立 +##章 +##竹 +##糹 +##美 +##義 +##耳 +##良 +##艹 +##花 +##英 +##華 +##葉 +##藤 +##行 +##街 +##西 +##見 +##訁 +##語 +##谷 +##貝 +##貴 +##車 +##軍 +##辶 +##道 +##郎 +##郡 +##部 +##都 +##里 +##野 +##金 +##鈴 +##镇 +##長 +##門 +##間 +##阝 +##阿 +##陳 +##陽 +##雄 +##青 +##面 +##風 +##食 +##香 +##馬 +##高 +##龍 +##龸 +##fi +##fl +##! +##( +##) +##, +##- +##. +##/ +##: +##? +##~ diff --git a/PyTorch/Recommendation/NCF/Dockerfile b/PyTorch/Recommendation/NCF/Dockerfile index 2cee939d..94acf1e8 100644 --- a/PyTorch/Recommendation/NCF/Dockerfile +++ b/PyTorch/Recommendation/NCF/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM nvcr.io/nvidia/pytorch:18.12.1-py3 +FROM nvcr.io/nvidia/pytorch:19.05-py3 RUN apt-get update && \ apt-get install -y unzip diff --git a/PyTorch/Recommendation/NCF/README.md b/PyTorch/Recommendation/NCF/README.md index cf2a8174..0b83cf78 100644 --- a/PyTorch/Recommendation/NCF/README.md +++ b/PyTorch/Recommendation/NCF/README.md @@ -1,6 +1,52 @@ -# Neural Collaborative Filtering (NCF) +# Neural Collaborative Filtering (NCF) for PyTorch + +This repository provides a script and recipe to train the Neural Collaborative Filtering (NCF) +model to achieve state of the art accuracy, and is tested and maintained by NVIDIA. + +Table of Contents +================= + + * [The model](#the-model) + * [Model architecture](#model-architecture) + * [Default configuration](#default-configuration) + * [Feature support matrix](#feature-support-matrix) + * [Features](#features) + * [Setup](#setup) + * [Requirements](#requirements) + * [Quick Start Guide](#quick-start-guide) + * [Details](#details) + * [Scripts and sample code](#scripts-and-sample-code) + * [Command-line options](#command-line-options) + * [Getting the data](#getting-the-data) + * [Dataset guidelines](#dataset-guidelines) + * [Multi-dataset](#multi-dataset) + * [ML-1m](#ml-1m) + * [Training process](#training-process) + * [Inference process](#inference-process) + * [Mixed precision training](#mixed-precision-training) + * [Enabling mixed precision](#enabling-mixed-precision) + * [Benchmarking](#benchmarking) + * [Training performance benchmark](#training-performance-benchmark) + * [Inference performance benchmark](#inference-performance-benchmark) + * [Results](#results) + * [Training accuracy results](#training-accuracy-results) + * [NVIDIA DGX-1 (8x V100 32G)](#nvidia-dgx-1-8x-v100-32g) + * [Training stability test](#training-stability-test) + * [Training performance results](#training-performance-results) + * [NVIDIA DGX-1 (8x V100 16G)](#nvidia-dgx-1-(8x-v100-16g)) + * [NVIDIA DGX-1 (8x V100 32G)](#nvidia-dgx-1-(8x-v100-32g)) + * [NVIDIA DGX-2 (16x V100 32G)](#nvidia-dgx-2-(16x-v100-32g)) + * [Inference performance results](#inference-performance-results) + * [NVIDIA DGX-1 (8x V100 16G)](#nvidia-dgx-1-(8x-v100-16g)) + * [NVIDIA DGX-1 (8x V100 32G)](#nvidia-dgx-1-(8x-v100-32g)) + * [NVIDIA DGX-2 (16x V100 32G)](#nvidia-dgx-2-(16x-v100-32g)) + * [Changelog](#changelog) + * [Known issues](#known-issues) + * [Scaling beyond 8 GPUs](#scaling-beyond-8-gpus) + * [Memory usage](#memory-usage) ## The model + The NCF model focuses on providing recommendations, also known as collaborative filtering; with implicit feedback. The training data for this model should contain binary information about whether a user interacted with a specific item. NCF was first described by Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu and Tat-Seng Chua in the [Neural Collaborative Filtering paper](https://arxiv.org/abs/1708.05031). @@ -8,6 +54,23 @@ The implementation in this repository focuses on the NeuMF instantiation of the We modified it to use dropout in the FullyConnected layers. This reduces overfitting and increases the final accuracy. Training the other two instantiations of NCF (GMF and MLP) is not supported. +Contrary to the original paper, we benchmark the model on the larger [ML-20m dataset](https://grouplens.org/datasets/movielens/20m/) +instead of using the smaller [ML-1m](https://grouplens.org/datasets/movielens/1m/) dataset as we think this is more realistic of production type environments. +However, using the ML-1m dataset is also supported. + +This model is trained with mixed precision using Tensor Cores on NVIDIA Volta and Turing GPUs. Therefore, researchers can get results 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. Multi-GPU training is also supported. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. + + + +### Model architecture + +This model is based mainly on Embedding and FullyConnected layers. The control flow is divided into two branches: +* Multi Layer Perceptron (MLP) branch, which transforms the input through FullyConnected layers with ReLU activations and dropout. +* Matrix Factorization (MF) branch, which performs collaborative filtering factorization. +Each user and each item has two embedding vectors associated with it -- one for the MLP branch and the other for the MF branch. + +The outputs from those branches are concatenated and fed to the final FullyConnected layer with sigmoid activation. +This can be interpreted as a probability of a user interacting with a given item.

@@ -16,252 +79,483 @@ Figure 1. The architecture of a Neural Collaborative Filtering model. Taken from

-Contrary to the original paper, we benchmark the model on the larger [ml-20m dataset](https://grouplens.org/datasets/movielens/20m/) -instead of using the smaller [ml-1m](https://grouplens.org/datasets/movielens/1m/) dataset as we think this is more realistic of production type environments. -However, using the ml-1m dataset is also supported. +### Default configuration -## Requirements +The following features were implemented in this model: + * Automatic Mixed Precision (AMP) + * Data-parallel multi-GPU training and evaluation + * Dropout + * Gradient accumulation -The easiest way to train the model is to use a Docker container. This would require: -* [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) -* [PyTorch 18.12.1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) or newer - -For more information about how to get started with NGC containers, see the -following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning -Frameworks Documentation: -* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) -* [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) -* [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running) +The following performance optimizations were implemented in this model: + * FusedAdam optimizer + * Approximate train negative sampling + * Caching all the positive training samples in the device memory -## Training using mixed precision with Tensor Cores -### Supported hardware -Before you can train using mixed precision with Tensor Cores, ensure that you have an - NVIDIA Volta based GPU. Other platforms may work, however, are not officially - supported. - -### Software changes - For detailed information about how to train using mixed precision, see the [Mixed - Precision Training paper](https://arxiv.org/abs/1710.03740) - and [Training With Mixed Precision documentation](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html). +### Feature support matrix +The following features are supported by this model: -Another option for adding mixed-precision support is available from NVIDIA’s -[APEX](https://github.com/NVIDIA/apex), a PyTorch extension, that contains utility libraries, such as AMP, which require minimal network code changes to leverage Tensor Core performance. +| **Feature** | **NCF PyTorch** | +|:---:|:--------:| +| Automatic Mixed Precision (AMP) | Yes | +| Multi-GPU training with Distributed Data Parallel (DDP) | Yes | +| Fused Adam | Yes | -This implementation of the NCF model uses a custom FP16 optimizer to implement mixed precision with static loss scaling. -The custom FP16 Optimizer was used to take advantage of the performance gains provided by the FusedOptimizer. +#### Features + +* Automatic Mixed Precision - This implementation of NCF uses AMP to implement mixed precision training. +It allows us to use FP16 training with FP32 master weights by modifying just 3 lines of code. +* Multi-GPU training with Distributed Data Parallel - uses Apex's DDP to implement efficient multi-GPU training with NCCL. +* Fused Adam - We use a special implementation of the Adam implementation provided by the Apex package. It fuses some operations for faster weight updates. +Since NCF is a relatively lightweight model with a large number of parameters, we’ve observed significant performance improvements from using FusedAdam. -## Quick start guide +## Setup +The following section lists the requirements in order to start training the Neural Collaborative Filtering model. -### 1. Build and launch an NCF PyTorch Docker container +### Requirements +This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. +Aside from these dependencies, ensure you have the following components: +NVIDIA Docker +PyTorch 19.05-py3 NGC container +NVIDIA Volta or Turing based GPU -After Docker is correctly set up, you can build the NCF image with: +For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: +Getting Started Using NVIDIA GPU Cloud +Accessing And Pulling From The NGC Container Registry +Running PyTorch + +For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned NVIDIA Container Support Matrix. + +### Quick Start Guide + +1. Clone the repository. +```bash +git clone https://github.com/NVIDIA/DeepLearningExamples +cd DeepLearningExamples/TensorFlow/Segmentation/UNetIndustrial +``` + +2. Build an NCF PyTorch Docker container. + +After Docker is setup, you can build the NCF image with: ```bash docker build . -t nvidia_ncf ``` -After that the NVIDIA NCF container can be launched with: +3. Start an interactive session in the NGC container to run preprocessing/training and inference. + +The NCF PyTorch container can be launched with: ```bash mkdir data docker run --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data nvidia_ncf bash ``` -This will launch the container and mount the ./data directory as a volume to the /data directory inside the container. -Any datasets and experiment results (logs, checkpoints etc.) saved to /data will be accessible -in the './data' directory on the host. +This will launch the container and mount the `./data` directory as a volume to the `./data` directory inside the container. +Any datasets and experiment results (logs, checkpoints etc.) saved to `./data` will be accessible +in the `./data` directory on the host. -### 2. Data preparation +4. Download and preprocess the data. -Preprocessing consists of downloading the data, filtering out users that have less than 20 ratings (by default), sorting the data and dropping the duplicates. +Preprocessing consists of downloading the data, filtering out users that have less than 20 ratings (by default), sorting the data and dropping the duplicates. The preprocessed train and test data is then saved in PyTorch binary format to be loaded just before training. +Note: Preprocessing requires PyTorch and should therefore be run inside the Docker container. + No data augmentation techniques are used. -To download and preprocess the ml-20m dataset you can run: +To download and preprocess the ML-20m dataset you can run: ```bash ./prepare_dataset.sh ``` -Please note that this command will return immediately without downloading anything if the data is already present in the /data directory. +Note: This command will return immediately without downloading anything if the data is already present in the `./data` directory. -#### Other datasets +This will store the preprocessed training and evaluation data in the `./data` directory so that it can be later +used to train the model (by passing the appropriate `--data` argument to the `ncf.py` script). -This implementation is tuned for the ml-20m and ml-1m datasets. -Using other datasets might require tuning some hyperparameters (e.g., learning rate, beta1, beta2) +5. Start training. -If you'd like to use your custom dataset you can do it by adding support for it in the prepare_dataset.sh and download_dataset.sh scripts. -The required format of the data is a CSV file in which the first column contains the userID and the second column contains -the itemID. - -The performance of the model depends on the dataset size. -Generally, the model should scale better for datasets containing more data points. -For a smaller dataset the you might experience slower performance. - - -##### ml-1m -To download and preprocess the ml-1m dataset run: -```bash -./prepare_dataset.sh ml-1m -``` - -This will store the preprocessed training and evaluation data in the /data directory so that it can be later -used to train the model (by passing the appropriate --data argument to the ncf.py script). - -### 3. Run the training -After the docker container is launched, the training with the [default hyperparameters](#5-hyperparameters) can be started with: +After the Docker container is launched, the training with the default hyperparameters can be started with: ```bash ./prepare_dataset.sh python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-20m ``` -This will result in a checkpoint file being written to /data/checkpoints/model.pth. +This will result in a checkpoint file being written to `/data/checkpoints/model.pth`. -### 4. Test a trained model +6. Start validation/evaluation. -The trained model can be evaluated by passing the --mode test flag to the run.sh script: +The trained model can be evaluated by passing the `--mode test` flag to the `run.sh` script: ```bash python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-20m --mode test --checkpoint-path /data/checkpoints/model.pth ``` -### 5. Hyperparameters and command line arguments -The default hyperparameters used are: +## Details -* learning rate: 0.0045 -* beta1: 0.25 -* beta2: 0.5 -* training batch size: 1048576 -* epsilon: 1e-8 -* loss scale: 8192 -* negatives sampled for training: 4 -* use mixed precision training: Yes -* number of GPUs used: 8 +The following sections provide greater details of the dataset, running training and inference, and the training results. + +### Scripts and sample code + +The `ncf.py` script contains most of the training and validation logic. Data loading and preprocessing code is located in `dataloading.py`. +The model architecture is defined in `neumf.py`. Some initial data preprocessing is located in `convert.py`. +The logger directory contains simple bookkeeping utilities for storing training results. + +### Command-line options + +To see the full list of available options and their descriptions, use the `-h` or `--help` command line option, for example: +`python ncf.py --help` + +The following example output is printed when running the sample: +``` +usage: ncf.py [-h] [--data DATA] [-e EPOCHS] [-b BATCH_SIZE] + [--valid_batch_size VALID_BATCH_SIZE] [-f FACTORS] + [--layers LAYERS [LAYERS ...]] [-n NEGATIVE_SAMPLES] + [-l LEARNING_RATE] [-k TOPK] [--seed SEED] + [--threshold THRESHOLD] [--valid_negative VALID_NEGATIVE] + [--beta1 BETA1] [--beta2 BETA2] [--eps EPS] [--dropout DROPOUT] + [--checkpoint_dir CHECKPOINT_DIR] [--mode {train,test}] + [--grads_accumulated GRADS_ACCUMULATED] [--opt_level {O0,O2}] + [--local_rank LOCAL_RANK] + +Train a Neural Collaborative Filtering model: + + +optional arguments: + -h, --help show this help message and exit + --data DATA Path to test and training data files + -e EPOCHS, --epochs EPOCHS + Number of epochs for training + -b BATCH_SIZE, --batch_size BATCH_SIZE + Number of examples for each iteration + --valid_batch_size VALID_BATCH_SIZE + Number of examples in each validation chunk + -f FACTORS, --factors FACTORS + Number of predictive factors + --layers LAYERS [LAYERS ...] + Sizes of hidden layers for MLP + -n NEGATIVE_SAMPLES, --negative_samples NEGATIVE_SAMPLES + Number of negative examples per interaction + -l LEARNING_RATE, --learning_rate LEARNING_RATE + Learning rate for optimizer + -k TOPK, --topk TOPK Rank for test examples to be considered a hit + --seed SEED, -s SEED Manually set random seed for torch + --threshold THRESHOLD, -t THRESHOLD + Stop training early at threshold + --valid_negative VALID_NEGATIVE + Number of negative samples for each positive test + example + --beta1 BETA1, -b1 BETA1 + Beta1 for Adam + --beta2 BETA2, -b2 BETA2 + Beta1 for Adam + --eps EPS Epsilon for Adam + --dropout DROPOUT Dropout probability, if equal to 0 will not use + dropout at all + --checkpoint_dir CHECKPOINT_DIR + Path to the directory storing the checkpoint file + --mode {train,test} Passing "test" will only run a single evaluation, + otherwise full training will be performed + --grads_accumulated GRADS_ACCUMULATED + Number of gradients to accumulate before performing an + optimization step + --opt_level {O0,O2} Optimization level for Automatic Mixed Precision + --local_rank LOCAL_RANK + Necessary for multi-GPU training -All these parameters can be controlled by passing command line arguments to the ncf.py script. -To get a complete list of all command line arguments with descriptions and default values you can run: -```bash -python ncf.py --help ``` +### Getting the data -## Training accuracy results +The NCF model was trained on the ML-20m dataset. +For each user, the interaction with the latest timestamp was included in the test set and the rest of the examples are used as the training data. + +This repository contains the `./prepare_dataset.sh` script which will automatically download and preprocess the training and validation datasets. +By default, data will be downloaded to the `/data` directory. The preprocessed data will be placed in `/data/cache`. + +#### Dataset guidelines + +The required format of the data is a CSV file with three columns: `user_id`, `item_id` and `timestamp`. This CSV should contain only the positive examples, in other words, +the ones for which an interaction between a user and an item occurred. The negatives will be sampled during the training and validation. + +#### Multi-dataset + +This implementation is tuned for the ML-20m and ML-1m datasets. +Using other datasets might require tuning some hyperparameters (for example, learning rate, beta1 and beta2). + +If you'd like to use your custom dataset you can do it by adding support for it in the `prepare_dataset.sh` and `download_dataset.sh` scripts. + +The performance of the model depends on the dataset size. +Generally, the model should scale better for datasets containing more data points. +For a smaller dataset you might experience slower performance. + + +#### ML-1m + +To download, preprocess and train on the ML-1m dataset run: +```bash +./prepare_dataset.sh ml-1m +python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-1m +``` + +### Training process +The name of the training script is `ncf.py`. Because of the multi-GPU support, it should always be run with the torch distributed launcher like this: +```bash +python -m torch.distributed.launch --nproc_per_node= ncf.py --data [other_parameters] +``` + +The main result of the training are checkpoints stored by default in `/data/checkpoints/`. This location can be controlled +by the `--checkpoint_dir` command-line argument. + +The validation metric is Hit Rate at 10 (HR@10) with 100 test negative samples. This means that for each positive sample in +the test set 100 negatives are sampled. All resulting 101 samples are then scored by the model. If the true positive sample is +among the 10 samples with highest scores we have a "hit" and the metric is equal to 1, otherwise it's equal to 0. +The HR@10 metric is the number of hits in the entire test set divided by the number of samples in the test set. + +### Inference process + +Inference can be launched with the same script used for training by passing the `--mode test` flag: +```bash +python -m torch.distributed.launch --nproc_per_node= ncf.py --data --mode test [other_parameters] +``` + +The script will then: +* Load the checkpoint from the directory specified by the `--checkpoint_dir` directory +* Run inference on the test dataset +* Compute and print the validation metric + +## Mixed precision training + +Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [tensor cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing architecture, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: +1. Porting the model to use the FP16 data type where appropriate. +2. Adding loss scaling to preserve small gradient values. + +The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK. + +For information about: +- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. +- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. +- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. +- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). + + +### Enabling mixed precision + +Using the Automatic Mixed Precision (AMP) package requires two modifications in the source code. +The first one is to initialize the model and the optimizer using the `amp.initialize` function: +```python +model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, + keep_batchnorm_fp32=False, loss_scale='dynamic') +``` + +The second one is to use the AMP's loss scaling context manager: +```python +with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() +``` + +## Benchmarking + +### Training performance benchmark + +NCF training on NVIDIA DGX systems is very fast, therefore, in order to measure train and validation throughput, you can simply run the full training job with: +```bash +./prepare_dataset.sh +python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-20m --epochs 5 +``` + +At the end of the script, a line reporting the best train throughput is printed. + + +### Inference performance benchmark + +Validation throughput can be measured by running the full training job with: +```bash +./prepare_dataset.sh +python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-20m --epochs 5 +``` + +The best validation throughput is reported to the standard output. + +## Results + +The following sections provide details on how we achieved our performance and accuracy in training and inference. + +### Training accuracy results + +#### NVIDIA DGX-1 (8x V100 32G) + +Our results were obtained by following the steps in the Quick Start Guide in the PyTorch 19.05-py3 NGC container on NVIDIA DGX-1 with 8x V100 32G GPUs. The following table lists the best hit rate at 10 for DGX-1 with 8 V100 32G GPUs: -| **Number of GPUs** | **Full precision HR@10** | **Mixed precision HR@10** | +| **Number of GPUs** | **Single precision HR@10** | **Mixed precision HR@10** | |:---:|:--------:|:-------:| -|1| 0.959015 |0.959485| -|4| 0.959389 |0.959274| -|8| 0.959015 |0.96| +|1| 0.95847 | 0.95845 | +|4| 0.95887 | 0.95841 | +|8| 0.95850 | 0.95885 | -Here's an example validation accuracy curve for mixed precision vs full precision on DGX-1 with 8 V100 32G GPUs: +Here's an example validation accuracy curve for mixed precision vs single precision on DGX-1 with 8 V100 32G GPUs: ![ValidationAccuracy](./img/dgx1v_32_curve.png) +To reproduce this result, start the NCF Docker container interactively and run: +```bash +./prepare_dataset.sh +python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-20m +``` + +Training accuracy results on a DGX-1 with 8 V100 16G GPUs and on DGX-2 should be the same. + +#### Training stability test The histogram below shows the best HR@10 achieved for 400 experiments using mixed precision and 400 experiments using single precision. -Mean HR@10 for mixed precision was equal to 0.95917 and for single precision it was equal to -0.95915. +Mean HR@10 for mixed precision was equal to 0.95868 and for single precision it was equal to +0.95867. ![hr_histogram](./img/hr_histogram.png) -## Training performance results +### Training performance results -This example is based on [our submission for the MLPerf v0.5 benchmark](https://github.com/mlperf/results/tree/master/v0.5.0/nvidia/submission/code/recommendation/pytorch). Please note that we've introduced some improvements to this version that make time-to-train not directly comparable between it and our MLPerf submission: -- This version uses a more efficient multi-gpu sharding algorithm -- We added dropout operations here to achieve better accuracy -- This version uses 100 negatives by default during the evaluation phase as was done in the original NCF paper. MLPerf version used 999 -- We save the model checkpoints in this version. This might make the training a few seconds slower depending on the speed of your storage -### NVIDIA DGX-1 with 8 V100 16G GPUs +#### NVIDIA DGX-1 (8x V100 16G) + +Our results were obtained by following the steps in the Quick Start Guide in the PyTorch 19.05-py3 NGC container on NVIDIA DGX-1 with 8x V100 16G GPUs. The following table shows the best training throughput: -| **Number of GPUs (samples/sec)** | **Mixed precision (samples/sec)** | **Full precision (samples/sec)** | **Speedup** | -|:---:|:-------------:|:-----------:|:-----:| -| 1 | 20,027,840 | 9,529,271 | 2.10 | -| 4 | 62,633,260| 32,719,700 | 1.91 | -| 8 | 99,332,230| 55,004,590 | 1.81 | +| **Number of GPUs** | **Batch size per GPU**| **Mixed precision throughput (samples/sec)** | **Single precision throughput (samples/sec)** | **Speed-up with mixed precision** | **Multi-GPU strong scaling with mixed precision** | **Multi-GPU strong scaling with FP32** | +|:---:|:--------:|:-----:|:-----------:|:-----:|:----:|:---| +| 1 |1048576| 20,459,365| 9,777,551 | 2.09 | 1 | 1 | +| 4 |262144 | 61,782,125| 32,583,924 | 1.90 | 3.02 |3.33| +| 8 |131072 | 98,464,084| 55,365,147 | 1.78 |4.81 |5.66| + +The following table shows the average time to reach HR@10 of 0.9562 across 5 random seeds. The training time was measured excluding data downloading, preprocessing, validation data generation and library initialization times. -The following table shows mean time to reach HR@10 of 0.9562 across 5 random seeds. The training time was measured excluding data downloading, preprocessing and library initialization times. +| **Number of GPUs** | **Batch size per GPU** | **Mixed precision (seconds)** | **Single precision (seconds)** | **Speed-up with mixed precision** | +|:---:|:----:|:---------:|:-----------:|:-----:| +| 1 | 1048576| 67.03 | 142.31 | 2.12 | +| 4 | 262144| 23.92 | 47.57 | 1.99 | +| 8 | 131072| 18.82 | 31.48 | 1.67 | -| **Number of GPUs (samples/sec)** | **Mixed precision (seconds)** | **Full precision (seconds)** | **Speedup** | -|:---:|:-------------:|:-----------:|:-----:| -| 1 | 78.73 | 153.90 | 1.95 | -| 4 | 25.80 | 49.41 | 1.92 | -| 8 | 20.42 | 32.68 | 1.60 | -### NVIDIA DGX-1 with 8 V100 32G GPUs + +#### NVIDIA DGX-1 (8x V100 32G) + +Our results were obtained by following the steps in the Quick Start Guide in the PyTorch 19.05-py3 NGC container on NVIDIA DGX-1 with 8x V100 32G GPUs. The following table shows the best training throughput: -| **Number of GPUs (samples/sec)** | **Mixed precision (samples/sec)** | **Full precision (samples/sec)** | **Speedup** | +| **Number of GPUs** | **Batch size per GPU** | **Mixed precision throughput (samples/sec)** | **Single precision throughput (samples/sec)** | **Speed-up with mixed precision** | **Multi-GPU strong scaling with mixed precision** | **Multi-GPU strong scaling with FP32** | +|:---:|:----:|:---------:|:-----------:|:-----:|:---:|:---:| +| 1 | 1048576| 19,314,944 | 9,464,431 | 2.04 | 1 | 1 | +| 4 | 262144| 58,579,745 |31,577,085 | 1.86 | 3.03 | 3.34 | +| 8 | 131072| 92,964,306 | 53,972,811 | 1.72 | 4.81 | 5.70 | + +The following table shows the average time to reach HR@10 of 0.9562 across 5 random seeds. The training time was measured excluding data downloading, preprocessing, validation data generation and library initialization times. + +| **Number of GPUs** | **Mixed precision (seconds)** | **Single precision (seconds)** | **Speed-up with mixed precision** | |:---:|:-------------:|:-----------:|:-----:| -| 1 | 18,871,650 | 9,206,424 | 2.05 | -| 4 | 59,413,640 | 31,898,870 | 1.86 | -| 8 | 94,752,770 | 53,645,640 | 1.77 | +| 1 | 70.49 | 146.68 | 2.08 | +| 4 | 24.61 | 49.01 | 1.99 | +| 8 | 19.72 | 32.25 | 1.64 | -The following table shows mean time to reach HR@10 of 0.9562 across 5 random seeds. The training time was measured excluding data downloading, preprocessing and library initialization times. -| **Number of GPUs (samples/sec)** | **Mixed precision (seconds)** | **Full precision (seconds)** | **Speedup** | + +#### NVIDIA DGX-2 (16x V100 32G) + +Our results were obtained by following the steps in the Quick Start Guide in the PyTorch 19.05-py3 NGC container on NVIDIA DGX-2 with 16x V100 32G GPUs. + +The following table shows the best training throughput: + +| **Number of GPUs ** | **Batch size per GPU** | **Mixed precision throughput (samples/sec)** | **Single precision throughput (samples/sec)** | **Speed-up with mixed precision** | **Multi-GPU strong scaling with mixed precision** | **Multi-GPU strong scaling with FP32** | +|:---:|:-----:|:-------:|:-----------:|:-----:|:---:|:---:| +| 1 | 1048576| 20,645,544 | 10,145,873 | 2.03 | 1 | 1 | +| 4 | 262144 | 63,608,950 | 34,758,369 | 1.83 | 3.08 | 3.43 | +| 8 | 131072| 98,887,103 | 57,251,418 | 1.73 | 4.79 | 5.64 | +| 16 | 65536| 128,976,394 | 82,932,545 | 1.56 | 6.25 | 8.17 | + +The following table shows the average time to reach HR@10 of 0.9562 across 5 random seeds. The training time was measured excluding data downloading, preprocessing, validation data generation and library initialization times. + +| **Number of GPUs ** | **Mixed precision (seconds)** | **Single precision (seconds)** | **Speed-up with mixed precision** | |:---:|:-------------:|:-----------:|:-----:| -| 1 | 79.80 | 147.92 | 1.85 | -| 4 | 27.67 | 47.64 | 1.72 | -| 8 | 22.61 | 31.62 | 1.40 | +| 1 | 65.99 |134.93 |2.04| +| 4 | 26.21 |41.12 |1.57| +| 8 | 21.96 |29.71 |1.35| +| 16| 22.15 |28.99 |1.31| -## Inference performance results -### NVIDIA DGX-1 with 8 V100 16G GPUs +### Inference performance results + + +#### NVIDIA DGX-1 (8x V100 16G) + +Our results were obtained by following the steps in the Quick Start Guide in the PyTorch 19.05-py3 NGC container on NVIDIA DGX-1 with 8x V100 16G GPUs. The following table shows the best inference throughput: -| **Number of GPUs (samples/sec)** | **Mixed precision (samples/sec)** | **Full precision (samples/sec)** | **Speedup** | +| **Number of GPUs ** | **Mixed precision (samples/sec)** | **Single precision (samples/sec)** | **Speed-up with mixed precision** | |:---:|:-------------:|:-----------:|:-----:| -| 1 | 58,836,420 | 28,964,964 | 2.03 | +| 1 | 57,163,273 | 28,877,257 | 1.98 | -### NVIDIA DGX-1 with 8 V100 32G GPUs +#### NVIDIA DGX-1 (8x V100 32G) + +Our results were obtained by following the steps in the Quick Start Guidein the PyTorch 19.05-py3 NGC container on NVIDIA DGX-1 with 8x V100 32G GPUs. The following table shows the best inference throughput: -| **Number of GPUs (samples/sec)** | **Mixed precision (samples/sec)** | **Full precision (samples/sec)** | **Speedup** | +| **Number of GPUs** | **Mixed precision (samples/sec)** | **Single precision (samples/sec)** | **Speed-up with mixed precision** | |:---:|:-------------:|:-----------:|:-----:| -| 1 | 55,317,010 | 28,470,920 | 1.94 | +| 1 | 54,570,476 | 28,085,521 | 1.94 | + + +#### NVIDIA DGX-2 (16x V100 32G) + +Our results were obtained by following the steps in the Quick Start Guide in the PyTorch 19.05-py3 NGC container on NVIDIA DGX-2 with 16x V100 32G GPUs. + +The following table shows the best inference throughput: + +| **Number of GPUs** | **Mixed precision (samples/sec)** | **Single precision (samples/sec)** | **Speed-up with mixed precision** | +|:---:|:-------------:|:-----------:|:-----:| +| 1 | 58,383,216 | 30,018,043 | 1.94 | ## Changelog 1. January 22, 2018 * Initial release +2. May, 2019 + * Lower memory consumption (down from about 18GB to 10GB for batch size 1M on a single NVIDIA Tesla V100). Achieved by using an approximate method for generating negatives for training. + * Automatic Mixed Precision (AMP) with dynamic loss scaling instead of a custom mixed-precision optimizer. + * Performance numbers for NVIDIA DGX-2. + * Data loading code cleanup. + * Default container updated to PyTorch 19.05-py3. + * Updated README.md. -## Known issues +## Known issues + ### Scaling beyond 8 GPUs -Neural Collaborative Filtering is a relatively lightweight model that trains quickly with this relatively smaller dataset, ml-20m. -Because of that the high ratio of communication to computation makes it difficult to -efficiently use more than 8 GPUs. Normally this is not an issue because when using 8 -GPUs with fp16 precision the training is sufficiently fast. However, if you’d like to - scale the training to 16 GPUs and beyond you might try modifying the model so that - the communication-computation ratio facilitates better scaling. This could be done e.g., +Neural Collaborative Filtering is a relatively lightweight model that trains quickly with this relatively smaller dataset, ML-20m. +Because of that, the high ratio of communication to computation makes it difficult to +efficiently use more than 8 GPUs. Typically, this is not an issue because when using 8 +GPUs with FP16 precision, the training is sufficiently fast. However, if you’d like to + scale the training to 16 GPUs and beyond, you might try modifying the model so that + the communication-computation ratio facilitates better scaling. This could be done, for example, by finding hyperparameters that enable using a larger batch size or by reducing the number of trainable parameters. ### Memory usage -Training on a single GPU with less than 16GB of memory or switching off FP16 mode might result in out-of-memory errors. To reduce memory usage you can use a smaller batch size. -However, since we’re using the Adam optimizer, this might require changing the hyperparameters such as learning rate, beta1 and beta2. -To circumvent this you can use gradient accumulation to combine multiple gradients computed from smaller batches into a single weight update. -This should keep the “effective” batch size the same as original and enable using the default hyperparameters with much lower memory usage: -```bash -python -m torch.distributed.launch --nproc_per_node=8 ncf.py --data /data/cache/ml-20m --grads_accumulated 2 --batch-size 524288 -``` +In the default settings, the additional memory beyond 16G may not be fully utilized. +This is because we set the default batch size for ML-20m dataset to 1M, +which is too small to completely fill-up multiple 32G GPUs. +1M is the batch size for which we experienced the best convergence on the ML-20m dataset. +However, on other datasets, even faster performance can be possible by finding hyperparameters that work well for larger batches and leverage additional GPU memory. + -In the default settings the additional memory beyond 16G may not be fully utilized. -This is because we set the default batch size for ml-20m dataset to 1M, -which is too small to completely fill up multiple 32G GPUs. -1M is the batch size for which we experienced the best convergence on the ml-20m dataset. -However, on other datasets even faster performance can be possible by finding hyperparameters that work well for larger batches and leverage additional GPU memory. diff --git a/PyTorch/Recommendation/NCF/dataloading.py b/PyTorch/Recommendation/NCF/dataloading.py new file mode 100644 index 00000000..31b3b201 --- /dev/null +++ b/PyTorch/Recommendation/NCF/dataloading.py @@ -0,0 +1,158 @@ +# Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ----------------------------------------------------------------------- +# +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import torch +import tqdm + +class _TestNegSampler: + def __init__(self, train_ratings, nb_neg): + self.nb_neg = nb_neg + self.nb_users = int(train_ratings[:, 0].max()) + 1 + self.nb_items = int(train_ratings[:, 1].max()) + 1 + + # compute unique ids for quickly created hash set and fast lookup + ids = (train_ratings[:, 0] * self.nb_items) + train_ratings[:, 1] + self.set = set(ids) + + def generate(self, batch_size=128*1024): + users = torch.arange(0, self.nb_users).reshape([1, -1]).repeat([self.nb_neg, 1]).transpose(0, 1).reshape(-1) + + items = [-1] * len(users) + + random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist() + print('Generating validation negatives...') + for idx, u in enumerate(tqdm.tqdm(users.tolist())): + if not random_items: + random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist() + j = random_items.pop() + while u * self.nb_items + j in self.set: + if not random_items: + random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist() + j = random_items.pop() + + items[idx] = j + items = torch.LongTensor(items) + return items + + +def create_test_data(train_ratings, test_ratings, args): + test_users = test_ratings[:,0] + test_pos = test_ratings[:,1].reshape(-1,1) + + begin = time.time() + sampler = _TestNegSampler(train_ratings.cpu().numpy(), args.valid_negative) + test_negs = sampler.generate().cuda() + end = time.time() + print('Generating validation negatives took: ', end - begin) + del train_ratings + + # create items with real sample at last position + test_users = test_users.reshape(-1,1).repeat(1, 1 + args.valid_negative) + test_items = torch.cat((test_negs.reshape(-1, args.valid_negative), test_pos), dim=1) + del test_ratings, test_negs + + # generate dup mask and real indices for exact same behavior on duplication compare to reference + # here we need a sort that is stable(keep order of duplicates) + sorted_items, indices = torch.sort(test_items) # [1,1,1,2], [3,1,0,2] + sum_item_indices = sorted_items.float()+indices.float()/len(indices[0]) #[1.75,1.25,1.0,2.5] + indices_order = torch.sort(sum_item_indices)[1] #[2,1,0,3] + stable_indices = torch.gather(indices, 1, indices_order) #[0,1,3,2] + # produce -1 mask + dup_mask = (sorted_items[:,0:-1] == sorted_items[:,1:]) + dup_mask = torch.cat((torch.zeros_like(test_pos, dtype=torch.uint8), dup_mask),dim=1) + dup_mask = torch.gather(dup_mask,1,stable_indices.sort()[1]) + # produce real sample indices to later check in topk + sorted_items, indices = (test_items != test_pos).sort() + sum_item_indices = sorted_items.float()+indices.float()/len(indices[0]) + indices_order = torch.sort(sum_item_indices)[1] + stable_indices = torch.gather(indices, 1, indices_order) + real_indices = stable_indices[:,0] + + if args.distributed: + test_users = torch.chunk(test_users, args.world_size)[args.local_rank] + test_items = torch.chunk(test_items, args.world_size)[args.local_rank] + dup_mask = torch.chunk(dup_mask, args.world_size)[args.local_rank] + real_indices = torch.chunk(real_indices, args.world_size)[args.local_rank] + + test_users = test_users.view(-1).split(args.valid_batch_size) + test_items = test_items.view(-1).split(args.valid_batch_size) + + return test_users, test_items, dup_mask, real_indices + + +def prepare_epoch_train_data(train_ratings, nb_items, args): + # create label + train_label = torch.ones_like(train_ratings[:,0], dtype=torch.float32) + neg_label = torch.zeros_like(train_label, dtype=torch.float32) + neg_label = neg_label.repeat(args.negative_samples) + train_label = torch.cat((train_label,neg_label)) + del neg_label + + train_users = train_ratings[:,0] + train_items = train_ratings[:,1] + + train_users_per_worker = len(train_label) / args.world_size + train_users_begin = int(train_users_per_worker * args.local_rank) + train_users_end = int(train_users_per_worker * (args.local_rank + 1)) + + # prepare data for epoch + neg_users = train_users.repeat(args.negative_samples) + neg_items = torch.empty_like(neg_users, dtype=torch.int64).random_(0, nb_items) + + epoch_users = torch.cat((train_users, neg_users)) + epoch_items = torch.cat((train_items, neg_items)) + + del neg_users, neg_items + + # shuffle prepared data and split into batches + epoch_indices = torch.randperm(train_users_end - train_users_begin, device='cuda:{}'.format(args.local_rank)) + epoch_indices += train_users_begin + + epoch_users = epoch_users[epoch_indices] + epoch_items = epoch_items[epoch_indices] + epoch_label = train_label[epoch_indices] + + if args.distributed: + local_batch = args.batch_size // args.world_size + else: + local_batch = args.batch_size + + epoch_users = epoch_users.split(local_batch) + epoch_items = epoch_items.split(local_batch) + epoch_label = epoch_label.split(local_batch) + + # the last batch will almost certainly be smaller, drop it + epoch_users = epoch_users[:-1] + epoch_items = epoch_items[:-1] + epoch_label = epoch_label[:-1] + + return epoch_users, epoch_items, epoch_label + diff --git a/PyTorch/Recommendation/NCF/download_dataset.sh b/PyTorch/Recommendation/NCF/download_dataset.sh index 526f6ac7..6ca3bc97 100755 --- a/PyTorch/Recommendation/NCF/download_dataset.sh +++ b/PyTorch/Recommendation/NCF/download_dataset.sh @@ -3,16 +3,19 @@ RAW_DATADIR=$2 function download_20m { echo "Download ml-20m" + cd ${RAW_DATADIR} curl -O http://files.grouplens.org/datasets/movielens/ml-20m.zip - mv ml-20m.zip ${RAW_DATADIR} + cd - } function download_1m { echo "Downloading ml-1m" + cd ${RAW_DATADIR} curl -O http://files.grouplens.org/datasets/movielens/ml-1m.zip - mv ml-1m.zip ${RAW_DATADIR} + cd - } + if [[ ${DATASET_NAME} == "ml-1m" ]] then download_1m diff --git a/PyTorch/Recommendation/NCF/img/dgx1v_32_curve.png b/PyTorch/Recommendation/NCF/img/dgx1v_32_curve.png index 49bcab0238829d64e2ff6fa863089840c0cbb2e0..e770b29da1d879d07b224df89f2e033fc1bf9cc9 100644 GIT binary patch literal 41910 zcmbrmby$^M*Dnf)(x9YtcXx*%9Rea9(nxo=bPLiY-Q8W%E#2MS4QI0Ve&6q0=bZlz z*Lto;*IK~6<~`>azZwS1%ZVew9CA#5iXwztW`KEP;Kbme3)|cqN4|@tx z6;7e|(4YR#!%vXlL)8EL6*^-g*xT=a-VU282Tk_hH$#8gwI?AW{_hWi70yG0g!|8r z0&n9H5iV60&rVq7eQ)a6++Su&`?s%89jWtgBNOncQVmv$OAl(rKnDN$LxhTk zrqQx|ywrrvX{q_ZDe*puID$8$Dp+4`7k<4SjVGCF+pm=9^(n2Q-V`!P>^M5gM)Ut4 z*Zu!kvg0H?!z^ysmbA`JVJRu8u|0dSiaMl!|6UQibNzH@drP@2#Zs^4PoMmW&Q~Gw74fAlrm;3enS7sp%S{QZ+12y8BIp}o$B-eqssHGrHLN9nUGy~wdwc2IYY4CYV88wYse&b$R&Bm zCI8ulZzKHoTYarlqYcz7Js~B}A#J^J!75Ie+*h^psl{~-7hkZHL)2qSWDESMczX)% zZlE?2L$-%_bw%C^wcK?1;MR^+NhkQjC>aF`1rb4j7liie7ci z>#ca5?Lb0LA_0L`kp=tfLp3RBX~_Ki{PvEH43Qg3*zo`Fow|5?wt+HG$yc1S#bf$A z91D{1yC4zdUAyb*4NorU5I@vY^fvkBTYqRIHlbv{u4GQ1;QCH2>0hlkg=^$?$s5Y*Db@95}A&(NgT z4WH(I-jD2s&%wcgh|L1+z0~A1F?mJbJRzEz7W zGE{3NEYA|SAojk$f4?Qvk<)$dy(!%h^e&IFWW!SJ;w>X%88VWXefIqPd>mG#cVJ+k zXa{UQO5x@1c$U^R?SJ+xQMFbmWU%yGN))J=vIO?Td&Hpa>5>>j?gz!OHM#Ghbc0CP zs1M%nC8Kfbh!OKM1z^tuSu7X1FzMC*qJ81>NlwNkARuUdx?Y%^oILF#ja*${7ZwtN zNcVbf7dBhXd%3|!D4}fgoLELqC$5lftiewvTpVnRG>7sWA5YD2TQP`r|0K-TLZBa79J-p zHwpZ19M{*^j~j$95UY~~U%~gh5f>MCy*}*Ma$4%XyXdJXy*ij-{oW2yTvBp&dAV8U z^;9(8ihE)7w1}c$=43S0yMmSUT<>yu7@?_hB=A zhY)-`{i*Be>dF&MR`Fwkg5pbBSZXT1$HTQ@Zv;MlE9HL=i0}m6QRNxMX#<%gOg;)3 zAM`B*A(TXPHKb77mKy^<${R5#8*kdHEc;8C=XsN4LWoeGI&w+Z_8&P3C#}YdK0?ufq`<-o?$F-W{(R_DS zq=1yQHB*)GC2$%WEzv?mPmd&$?;Q4Erpy>@(BHp*vo+AQgO`SJ zt(rT)x$Z01?YdnLWqiHaFen5Dr>h+STgt_%9j~uHoV1NfGq(Iqby}7b6YF80OPBsn zgUsM9ry?RAJgh( zC&#LV+tr^auWJ~J#kl>t{BxCtVT6JN-jkC`t@QhA*!jfQcS zyAdnIh`*5zfPf!st z8;{^&GH9AOKZ6TkF&QJ$b~|G4sz$=)Sl!+h1FJ|X7Pi`rrDL%7FGtMhSB6Mnl7)zo z5d}CPi>?PHTs%CeA3yf}`Sdf%{9+)M*5$mPGLhGb0fMpl6w2-TP#2sk_p@%SfGq;A zN1NB3@u!t(I{*on%XSksOFYu@%;UpfW){W0iasuaZ$WERQF{U|dL~bDR zQx`Uu^`A8-$>`m=D#|zS5Se7Y7ERSRHrlRr`cu@c9UczdUmXA>f%g*zWjiOHq1Oo+ zpU01G@bYM3#w0Z<35P-^0b&yCZFoqCa5RH)To?}fK<2eh!(zTd)@-v&)p)gTR~SMG z0iSaezuR@fFq?#|EIAcbP)Z6Og0xme4GT*-wwYXmC?N1iSB(nI=RT(GK>!~R# zi&E=hvh~R@F$gM)ZxgQJ=UF_ok5yhpZpI>V=K!Nn|nVmRHBT_A5YeQ$kw9$ z#qRoW?x$kGW~V>G@7P%E5U%|zSFq=ai8~CMYHMqcPT|$3ne@cpv}lipVYABY%R+z- zr>Ld1RCvV8!($7282ABQ5%|2$Td^v)y@Jo5goS;HqrM0CKw>jJFR%@n?Y&;Xn3`$z zmXevYb;a4qelF|`wOZWxKKtYzyU7(!H4hR`17^)XWQ|w^Wht1L6S-HYXwvUM7-gc_ zwdRU5-9NsZ8Mam|odWl{Y@1iQ2Z7V4I?AU}Wgxanj&BamP{PQ#Lzgv6Iv zB;a%}Fa5Q0b#`_pa9R=EEW0rlDVLD2utaBOk~p3$TTltyjftRCm`>5eF=!2Rp$HBE zJaTv1g+f2?rKyQm)_e|Mx8lj$R{U`31?Y*VXT5~1;N-?J>O=$v!t9Ks z>I2|VtX=~T8r5qR+lUm5$jhT-H5&R;SQxUqYm(-?4i|G|F<0pi>PJrmekka(O^!#I z&CSjI?R{Ur@OM{@3P_unP>BR0Z-p|pem_}m!Q-*-%bcMD>y5!S&vKnzrf1bdW9kqa zuvZ}3cIJ5a{TcoAV~pzyAqPz!gL%#vrmNbQZ%d*Brnb}*o0Bi*kM(RNnH+?Pjzy53 zQwEn{1LjZ1dFNUz$V(}Q`=ZCc!Qj{*^g&z@n;5vn`n82CYScq0Br5tAm&2Thni|d< zEVIpu#|=2$i{12S&}*i_-hUko`lHEJ)zr@J56a%W$H5tRt?AL^()DLOxNvZAx=W1? zj}I#^+hFA>czJO-IXT6`KI--*0J`}7VtbfQqc)7)Y?{e_S6KkbP$Cx*5peKPd z)YJCzWCgg4%bGt{5?$-uA7YWUMB_Bw`RX4gY&f8(Ca@U710V)Lr(O+9OG~R3 z-JM@xVAYR_Tq6WscwZt-$+DTOaT5Sf_QskV)jg2T)XaheZb)Jsw#ECV2i32Qn)`n> zm(^1rA_X{)qG6dYdD9J=ROjSE2GByGY_YrSZZ?)$TE>s{*C5mxBouqR|<+t_k4{RH4P1mjI^}0KB&rzZYM35fPNup%y665#BM6%iRP5|%rGmu z=OWBt6yS@`9ZA;gMYcZn9b~ z)|XmDod%LbG+gIR>`_ruXVljxfPLOI8Xzty38!9y2{76H_0ga=PQ z3z5;Wp*3WBmpC((=^IES$fU=WaI^rHB zmmzPxU{S|xG?XCMByqAk%F|MuS5A{S6wSpVz0pS#zT@6)b6JV(t<9ic&;2O-m=ai1 zPQK=9Q{ves6>Tpcw}pwK^o#hFS3j;zO;0OoYMP^(m^f>ztA~(7J{A_z07fe*BlEdH z36+3g1boQdnKGSZ#ZW^vio113Psux8mo4#O2K_QeW4+Th>*{MG>2tK-rj=padGUWw z<;=;lpa^uOPO!?qHB~p_`xX>7>N(~L^nyEIP)xfU%`scVUtVV`UQ#XMK`s9s_9K;r zJheIF7nOO<`=@_wh9ciHbX_zyu8Vam^cQ0$xFEL%gl?N3LV7{2dL`l%-MMXtHpGC#Q z6f-f&uV0%QsaB_7@p|ERyE(RSi$5N*!`<*Ud`cxbo|R*mNS`N01zM;r+=9#Bsi=vr<0 z#QrNSO4iSAY@8?S8{@i_S5O!~ewf^|PvLP00jL`CJ%AiwfjjoPWyDm>%IfVflBuk!8eYzc3=eac=4L)jI#utcCO2dDnkoBxp7Vi7oVSGVMT$vU;?uKVo!p`B z5nA>s^G)S1`_{dCi6e563^i8*KVn_7DbqX{)sez3#e&s7BH`z6DM}`S6~-!BQnRK}e;AF;l!2SBDQ6moZWcL{-TJ^gPfa-P)E{yS`JYy$&> za>GGPK0dzJoAC5-!&!i-BcJ%U_BTdKeR=<(BYJemFHf(g-xzB@*hZxcM<>VPKkmCR z3Z|-cCU%yHUCJBgkh}I=IEb4-#p!4WjWO|Vi%TixvzZ{(OIKKI&X}jCT3i>#QL}HXsEVr-+}2V zr7o;cvzICc7yQ8!g@t`{#iF%nNma#t`rEf}HjlS9g+)c!ECv#R-xkZpmCISLT=ZC0 zSRU#WBxvwTS{Pw)ag14JZ$IWP2+%K`Z?G_iwq&h0&#J%QzqC_pc`;J>6!;@A1)nx>Rcr@O3Xh z91?gOXh7{aiX!Qf^_ZSArR`^sW-|~pi0e_%j+x2zP@THX5af;xX;ywmNzN|-Nr1RV zDC+F&oE*WP@z9`LMow2kObnFKF-Rbn?n>c%3~l|!2Y@IUkN<_FhN>`T{5ah=3)XHf^Ds&(ueuP5Y_N?7T&Z8UL+}g4McO`;QP|nCG*B_}sWbIdQ z=LOzC+MdMpmD3LqHyK^mUD{h7(c1f(slCEG{wE~IoEE+7k(AW=@B(5+)=n$kk%)DPL(e)rhUfZ&c7 z1M{(%RM))5+}wbKgpb!JD_;~8K8%cv82vOr8vRyyz)Du5fjnwd!h z{D=ez&&bGF1^Kq6n6$te*G7#nN~yl4UswAnC4iOGoV>$Qf8lYyQPIMJ{*@fOvL5Zv zpP_FS?NF|-w;6}bOige1i^>314C-%g^>|?MdUiiNoafa4Bk-Q1cCIs~BqQp#GdChx zy9?Q~9BG4fkB`>EMnOplyP9%|GN;<=1RhY#Kyd|dgNTF#3M<~MC?HBqqX+ahO4MhjD1zMHy`T{0bsWu8YG^u=$~$vtYg&V&qB2AA1{tg>RjftZM=ME5 z$zWoxU(wP3%x5A7FK3;9v)Z3h^+cwG2L|?jtI5cx38(KLWd3z{0w&1*Mfa zU;PyETue(%z1HsY=5KE=XR^m@JE*tW_$cVf2l%1IvX1VQnkUy2RX4?=cL@P#ebX7AsRt>@L7VLYv#<8Ac%4t!JysT2S8{42nW)wEFjE#6M1#cfjV)9 zuZj-<=VD?9mC^EJ+}auAhE>y%Y|qE<0OhMAi02Q{S`Ayi5htaU`^ ziVK;ioJfV@oZVXI+gAmoYQr$d@@)miGXBm>m5hBYgA>DX4ajlKfTxS4V*~xPOWqk z4p{jMz{7B4L&L)Uj*gD*t`ClnS1e?+seDtZba)Xb!&?reD9f{1Sv7KYae0*p-ywb! z0$<4!)-mvPXB&yVNp#s9LpPqefvq7wza2mk;h(7iY|Uk}4sBy&lOo{J_{}Nf=Xp z+e3CGvt)rp$2tUknbnQY_lx=;)v}tm`E>AFy0W1J+7sFhWl%gywDzUIVJinLKPxl} z)Uk7*>^UZ{is)al<9q693<&m>@u>;QmI>)>hCaJ9H9meHQ@zID0_zJa2jt`$9&(+c zGF_|t8oXE*ssdIfvrk*I`JmAr(GfzL%J$ zGsql?f?{yI*gq=xVt#&eb$Myf;rkXSTpSgS7hr3tHaXG*tVwYJ^oqe^wJPW89?^jVuHxY+&lJcXlQVMx|%U+x>D27U^Sbj6AQ;N z9CuO|lcY;Vp{IP%{BZ9Jaq0j3r>2?rXG{#*+S*z`ASjRB2eq-<)4w!tY2mojZ3|Ze zF`Im9v0#eQ#OBZmuCLmf=H59KjfdiB7R{iN5a|YhzK|*kP3Bi>&)%!)_=Ciab$MO& zxyR@WqHB}vcd#G)npO-r$Q6sq45k0E=v6JVHXSeetW@74H;-S}r~~yt)be;s1`)I< zAmc3S`XiNzxt>V*1r8o~XAp`tt~B z`B#m&j6V~5vki8X3w2hU$#cMXBLwEc&!VCt(FHT2K^F1RY}50d|6~EYx)`aw^6F_2 z8Ya;^bRimjAvRGNqv%KP&#tc64Zc9bz|@)1CuQ#2quNdXN~oGP_ekqWv3SHjwiAS8 zP|z&qk^W?e`u1Btx-x}AX1{r5t&|288M$R`NBn`(Y~aj`C00P{w@3!uscrR^xc887 ztc`X;!euj6CgUVPsp1U@37M@iix-7iZn;^O1{f3Qq|6V^ymKvXoB-?R{rS_L$(;Gq z*H^XKnfZ&rodIQasR!`A$70pphV~5c^B5i_WO_7)k&W|zPa0OO(GzFj*xSy>O>oC_U)wBA-= zP)g(A^$NOKT13Bgt?FW~-Kvs{Gh|_52?`C}7|Rf$$E3KrzBZc7S3tz&7&8CG8!py$ z()Kc9)%Fx1rtfLelE&}G0tBgdCK>u}+nhsYk zA_w|2KCdHN;KDMA|vuHPOjXFI!Il0)UkRG4V=xEf?$dcoc;o;%@1wc63 z#=9*HEr@wXTZA;HVT|6C0urhZ70=u{vr?g4^Kz#bUhCWCqJx_LMBK1-CHm2@1q#68 zCs8kZ|L4ALZ6XcrQ3{~`j%SHyIepP{+h>(^EIIhG5}(|uP1@-p7H=4}wX<^pVuMvl=n|_?PWyp#k*Xm!}i2antaZX*oH$MX#5q%l)YX#TMU>p#Ke3JOac*mgnaS z8;PACa?irPFW@@sUt1vdNm&_F$LpzP?oSmk4}pbdG+VB#a-|KF@8JGhl(w7}^IP+n zgW@rrOA(#-L9Ij(4fE%#Eg%m-OiT=R?8eDSB;Z*w{$k0QQ#01*ohYx`#at+F5)v5| zIlG73h(Y~{u&|qqY2~VIYH3oZ^rw){LSPMtkipZ&Bn=;%)8-iX*9?5XjYXTLp)~(3 zr&@$p!Ev!Dw&iz~vlFoOO-_{-ze1V2wlL`O|8!IP9q+$!^ct_`8_>2XKxbYj`8uO0 zlv!IF2P8v2et!DQs+jp_J+BRzrZ)+xSKtn?#At6D;z%XLa*GfA# zJNq6RTT)309TE}})5A+iSs4`{e;DlGq+*GawLM?-#z4iqCCvs&xL~T6z(kyL)Y|=s zGO4mD)8NR+of#c33y$~9u9qgjg_^a@3W5kYW{@>hMaRObYv2%Z10j|CDbuRR{e4q# z8q8sZR9z!ffDALenU{=+&ZFwWLcS>)@UZOljdYZ)k5*6)Y-zla2VWem33j31(MdrB zRc8}RVlJgf)YR0Nk*jO_`vdv7neuN_C@4-=l%}-Unl#G~xQzT-eA4X>4yZN5LA?id zo=yTZKER!~2RbiOpFe*NJmjRM4XrVoQB+bgVAINwXF3(bf?sOODBqWXInjZ$H&KwetN%wLEenuJvNysMcrcGxw|^70^85@Qj2VmNQFDA z{$q&r7FUSfpWL4F6AW(xe%Ft_XhW2g59iWhYz8~V83^XYUNkVY9IYj;!a+5Ih@UE`k?$i;9Yw2pS?921fTPsN-Bj9b9*(TBg>G0!-i~SJ?yu>xHVGMpJIO3ZOO;JPvaZo?uoAHxyxJgLjN%^#XaLz!{IboWwSiVZuw zlA6I-SyL^lsmaB#vR2DC`xPk;?Uq6Y1J@-U1T1jVVBWopPD>jBWPW$0P4J}kp>(`* z_$`q-yabzl@-65Ow>yF_{UG21Yy?6=kIN50?R&clOgjCu^=|XU`d@kJo=Lz1wBGFd zaDQ_$+URiLyb;dH<#db(N)526fw|?vJ4aoa|(EM@6UP7JU0g z(@E<_`pY9F2xF`Q^I9f_>leTqe=`|KjwD|(@#G|P{5Mzc7HlgUzL&P)h**Mkj$me= zNVIK%60N|Ye|rbB(T~tTs7)S1aK!Iz_F1WQ7T-*^Y9;2Ft}nX;PN)pf-kNHv=z#-t zIOra@^rMUedlSw&LJtTG&`seMbx*)6XJKXK_I%_5{wFCdZP7RVNlp;3qfkTlpC^5^ zHe%kPjY!rq{xgGT&RsX@tAOII zw@_<=$Mycp*F1ij8@?XdV7;Coa(vHDz`H?RUj;{)9ln%37qHZ|0Nez&ABuVC@lBXD zy*EYL(>DK+H%so!Q-SkPPi>wf+VuldIrn{hOTf>DgZx5PLIdByy%z;fln>hiQ|tPr z)z!a&EcpQ!cVS(Y?u!))o~v=Rej9;Ou2WlVGzKR$-@q%tD$r0VYDWcq$$Yllht*hy z_5EwQN$2@IUJl!_GPBnE@8Rfd!;{m%=p{!YBR9q$f_ql@CKgQ#-;ZlI5MA!a-Tghi zhu7+Mi@>@%zZey~02!C$mUcoOdny1KEN6=Ia{DTWfHeP@a``zXOZApFe0BSj(?$h_ zgqs`e!Vzl#iz&?JRD~-U#cW5!*W4lfFW!>vK#_gJ_mu0xUaO1xr0t}awNH?4!#*D( zjg8SCP)hg;byf1R#^W5-$7SsR{cGkLKVM60dL#V?bw+MP9G70LGF$rQ<$A$Nqxv;p zVK!4rPE4HfmE{9Sv`H>|K3ITS=K@jz4R_nglCrYwi@K90Y_DMizz5%{{<+p zBLIb*US5&`vE0`es%jAIDWj2;;F1!0V2(q52S@ESzvSl!EyBhQN(X?Tajzf^v~gRI zu&D&eyXj)J3CNR7B73i^D`L;@KoFM2RPwt=o9I}!!$!MxPCi+z`&=N;%DRsKh!i zbz{xsoFNf4L%KX6?+d*#d@U36txg!S`=WtcZzLfl2S*$TMj4${Q=`Mb8nYk^vkM4N zpgW9m5ewD+6S8SCSv583_)Zc5>%6d)qi5`=1C`5HzrCt?vS^M2&h zC@v=KW~NHF9W9~4V}y~kx21PuRR)AB4z7c?p7^^>6UGsU+!`It*HA!m`U`g)&Q)Ra zyV?gX!_5r3j$j;-2<+&~ZCjOo89zZXefYi~AqRQ1B^0E&?PIyZFLQSxK@pL%VCi%F z`!{;)-8OF3%K?ybAGa92GQ3`%k183ZC89qQMq>M2B?!A;<2YtNr%R5}6&*qll7$)l z+0jWLIHURX%kGz9BJdzy)l)~uTHORKX|vJ3*i!J=wZm=P>bpF(?j~pQGYuDPsPVtO zSFba5-j2{Kv2CBB{%7t%``xRqAz>V(hwWY*}<_$yt|ucF}081nc1gN z*9x0xOoq1L=>#d1dQ2`}-i_VeAjN{egGF{dVb37<)eQoSkg9c-_@17g?jWN_r&<3# zU+{-3i0vV%p+PXV-jV>Lg6B150`b}#K*Wa^kSzfKQ;a}a1t~k*^G!(+Mmc}y4r~2F}lR?QowZYb*I_g48*S}D1HBA`;pN+%S1SfCGoO!N|yM8TDO&MV4 z#+z2&66vy4JrwKapXh{7zb0%ze_&49-rMV^uUq^_ZPBXe+5O`+9kJFtntYhFW^gpP zxjf1|xw9;Ha8aOG0E>vocF#voP0b8V37pLSfq}|=iuUeMEL<&_#Smd~Qr@jU*25xe zt_1#O1U1UBkJ0B&sJ7GVS;@m|2KdzC2wd*FRNubkb_H{6udRIoAigUQ<)j?pWWQ0- zF~EV*6I$wUGWbwMb~g21z!xkcLw_A?uX zvkFaYfRJAU4xw;wr7|}Y`KF85Exm!;06g-Rhb5a1<3SKO`hZdr zUXQ;#9)?z+gF-^|K&#N29+oeVw-agCd*?Q~QD?_kA!t`4DeJF)hx=;1h`}%zx|eya zZfzObhbw;75a~=L>AyT)=)6)}> zT3YrIaayz^#j-MTbKixF={r|ny8yR?CKkR%-QsrcwEl4szGfb`h6kcXJv>}JybEKn z(Bi<0=9?6mKXOavJGH`3W%-{Nzh%V~jwuE!7k-xJW^cUzBW850_Q)?&ra-UCwT9oO6cT zQlXgi>!6?Q0IV&O#+RsJ+29W{rQ?T{z=-JS?d^RZ=6(M?FftM)PcD@OnhfgAn{YXE z4|n%^>os4|qYr)upKKf`jH?Mx7t$;yO_EFL$0k})vfdC<{(G>TlvR<_P6V9LYk<#$Kgn8jz6sYY>DiJmLnWaH|YLwRQEexhv8kGJQc{EY3F4>a6}m< z+I6>8QF@RY!+TlUaswTC!2XpkG%g6#&OiX%@kK%7T_|=`S!Y3}4d=0v!^hP<8(XeUM4!K;q;a47f0pDVwnaLB?9EmE}#R zUy21}G&CV#W7W9^31&Ps%-*sZx4(N0nrTGGC5G{3kH=NA-%*1wT6Se^yZQM1yIL9* zKrMSE+S=B138!@}YU}EZA8*eV0U8ETZxCPV0@~@25ycON`cD63`B^2>J3^5yFVy~8 z0iQk?64S?m*H2&NgU)QVsZjYO#8R8D{mf{_A1vYRf z);Il6l4Xljq)gvkrwv8rHw7=mb&Y4m9|Qsf1qez%0M5(JE*9_~fkTt@3K>9>BE|*S z^T7Sc>wL98nhQ~t<1q4Ih)O!U=SP>3biq-;*}U&hT!8B; z;@vqn*U;VG5j&y&6`~PLeK;tosMIxo=T)SMRaN88nnB5!1Pthz+-Z)A8y|r7MrMCu z-E+f#ajhK|x)v4|zd077;;z;L32nQOII9N*a=)a@f%s$F<2eFg?m+YX$$MO%HHD27 z{=z(ETGQWrV93S#$s~eq@D;C@^nb~UG5fKP$6eiP0MbRI)xEzROHjr!k(CMP@EU)X zP=7&-Ig1-_eo{M$R`LAp^7DBxQ9qGFfBH)zOU_(z;dS=J4B!sD*9OHTd57O)2Iulq zu(w{A^na%9M?KFTiIyl&6sa(R8}xRPkQYQ$#MIE|=H@Ux9B~wFG*snm1tR~wpwq0_ zZSvez)X}9(^aF!05WC0CsruYB9<9W8fUH&UNg}3u;r^;QT%+?vUn1UX^VfStMa9`l zLxjD(y`Hi!aX^Hj1d(`L%X%NMCpGLp<;)y#gXl6K31*eB8NwM%&?NJyhwpjf(l3~v zIUb$lv~LMDV$wMp7PJ)>>}~oXb`(C}3H}ut<>g_gDv+o8-&V9Q*b%SJySaI>0#luj zwmt%YS*iFDuPK0beJWQ2QpqT-xIXdVjlB&|)`>3du89n*&<6%u+6}+}N!h2^XkSMe zUPoEF*8(+)iYX%hro4Kw|I8_fyqmPBNFNCd@^AP9)I~jxW9DkiumqoPv&QeP!BJ4H zF~vZ@q-*c(b;l2;1(A+3;J~605?VPvCot>R1DqWmka5JYcqE)4=!~`#;%N-*3)=w{ z?BTLv+EuSStPdW?!8JIr`f=-0_ilo z5_+BnNN7xW6Rk_T1j(yilXdU1I1U|@beGAXNoxju1f>~|3Iz9LK5DH5)kV2{=(+UB>r2=LV#5fk0!f! z8=>gjFamBBDbLm0!Wvn`!9sf?(0Rn}1^ps7{>WJtP^+k@fDmN|0G8a2haUknH~RUu z5B_pTWGo%!QzfG?wliMN$`3LJ`aFa>>s)JEU;#66Z{AS^ddPw3DG2CNBr4OyhJY;9 z$Hwhi7!VF=I8_1Ro}oQo!esifVsI~3?~KK6`6bE^!YQ!qqRE(bJE0#RA8)~kLVj^E z+Yl`s-DXMM@_K30F^f9`#}=5m0*c7^VURf335yOhT7G_h)#tH}t>5c#)$n55fkZYN zcOKRsgl5RjEn5u7Z|=srXhkgI@oZ|JLRIn&sNlE`6XPaqAife48~e&QKq(Lv6d10EF!5l6+wPBB0R$7{R70V3+|?(PhN!{wEgdSFln z#I#?>Sa!1_*VorOWjU8VgUOXZWPB1z%0S~ZmjJM(S!_2Yo*#DvFQzrDK44<@fO&#~ z;$j29Cn*-(0G$mE3(MAN^E9bZ=dfCqv9>Os%jAfmkkwml_Zdp&9DQ%#dO0o*-el0} z2dDDn3ENq-@7^uh%5`pBg;8}hMG$uGl|c0(N+HtkzzH0Y@rhQ(bi#&kJz0+}XSINJ zKgeA(1|7}9Dbv>wG&23OMrOaBP7m+j9rsD|IxNAwHDJ|7Kl2;weBJQX=o}ab0ow^! zjvndX)Z!YP&oUJ-MJ+8$6Y1cw{lzh*+G4IeCS&Pm3QgpTiiq+aqxzS)^BJLcm#LC7 ziRIUfaKof@yZ3zQAFc1l@6p$3smN)2uGB|vwMW_9>eRu;yE+h07x3V6y`<8pvt;L% zEG;S`<>5&J!%)m%I;5acD1zy#In3=x>;iou0LLWh3~0J$+ZiKznTG1d-B@U%V9 z0&>|m{J<#p0Sp>x391?Y9V%$E#LE|rhC+%tiXVU)4~7vyAvj#9iv+~p)Ktc71w;f) zzAOcC?7jOfAW8KmGRvA_RcNFakNX8f|4sU9VoJ4WU)&D>C~!R$AZ!-X$W~ffN~c;5 z0aU>ak3JUlmUrQ`lcylyo>@T4o@8{HPs{wMt)?=2XgwvbBjKFA^7FInXzz02)=LdY ze5SeXQv;}A%w{VIY66&2q9i5#sbSSJwdt0ZndvK=%we+|wPy?lh``KG8c5DKna*?; ztYd9V>36yryNWM;sx}|A`mp`wwOJDfTph2|h@?a*CUZXalWnp3# zh=CP#bvxml%c~$e-!VNM=lOJB;e4j|=h@%@j4`bhRSQ+M zwMN|`7}+^FV02CdAW6Ze%a}Psa>;ctskK_sOP~(`osdVA-qrrpjKzI4QT3+vvWhv+ zop*n~FT9)O(F4V=<(ogsj%}A?xwkw>YvcK!TX`HQjYBiXb5Ml6#%~QAdxkk{qVWJXsmW#67|FyP<55<+lz$tWl)!3GWq38Ni1W5WTZxCfl9S2dB#VgI+JvgjMh z=#Rjt0*s@(tE=mfrL(4Exe%sKta%u?4B40`or4*IKyoWf9EAkU@VI^h`TtDZ1qBB) zr}t-l%^KW~WGE7hmL@1{*PnDwH{5C12_Afo>TgM>9J685XkF- zy#1JTkqi0G4qHY=I;Y&odj~)4(yd%JeHd(UzUx_i^ZDvv5b?zwUI0Eb=wGj+1As3Q z53+!Ywhp9V)`3D3K;^o@c;plV=liCR*nzbEmY2v6tq60TjvzKQgS+j*i>&8&@&>rh z3zkc00AIo(A(6AOO)`}f77mOJlk)N=&(Eue`oe@0f>f8vw?Eai{gvjhGC;b?13kA9 zw?A5dZ)q5e?QI4W7OR++HnrDG8)lInF1I9up_#Ap@Wxu(a=h<;wwcv^_++3) z0iud-EXvX9ml`pHuSe^mA%5H^(#gc8(JWKi{LO!|0Ey)34mcLr{@(Ptzr)pMi_amm47g3)HfdXbTg%zq^@*2?ee7aJpqMnoeMHbxGCy^_cITb$FQ63l0z80=GbCE(K?aoBVSn;~p+%r=9et1d;nWT% zByuCli~`oFrCwR^fJ~AHOyu8HEaEn_K5Kv}yi&XFsYG{|OXSJ{avwLW8!-vrHvLBV3#UsVVT zDp-e0Fg(VVMULha9%qi*$95(qemG%Sbn0-hnbCW=J_4{8TlVYCip9sI(qf8ub~t@7 z(83R7&#^)am1%7mF!&(tmEAbsrlqTqz|{<))Xp)ZEYxjrbm` z@S88q<#(F{XJ09ro4=(tc$MWMJnOqtlm`*{Il+3LO7Gi-Dleq5A4}1>mtmqp$ z&;1>ZgBy#$VQOrygvmEKGRxs=ndf+N+Q<*AB)+T3uOPFxd|2KdFq5go0v7+(jLxVD zhhB4~fI{_R0;6YDCi(cE-Z!GO6jR>e1e)G`jJ&{7+=x$pab1>_^{|OXjTeH6l`RKXb*0@cou>;FtbrM5rGX_2k-rr|% zfsPLSfq=EqT~#R~O);Z$NdVpHLB8GDFARh-v5NLj`doRD_kVk*0EhqtY-50UAAtk| z=r<0ikYKj(``LRR-!U!fSgF^*5J=NDSg%2Wn?))hu*`J18jJTdul-8_HtDL9huomCpb)wc+)R>$1QX3@6PB#t;L#zz z7@IU^ibSr&zn^1F(4h?xK48heSAvF~%b5G&XfnM#Q+V+>E^qDTW4U{*vGnB9rro;x z(6pocM5FDJ)9NwG?PO*jm|h@hz+1P_Xxbo}$L2ODGVkDO(_qJsO1a-EU^O+Hsc1<{ zL-P-ekK$bWzcH5R}kQg>}Zs1>BQ=qVQf z#c3jmI%Yk&{X8nxV+x$-yVE0W@}`059QiBjFB^m}RCUAaZ&3K&&qA7L&b%ld)cYh= zT`ff9;UOE9(rBqNs)bVOSD6Yu(%MOY9llpQV<~aZ5bx)H$kC)Y-G%b70 zvJ%-T2aEKvh!{eykys3@v|`-(C;6^OKA*vG7x*VgFfcH-9BECmU;l`Mv+sx-KC;Z3zISn{^UbyOuTFj} zjL!H>pw7gVMf$&dv$x-MA9{a2pYQkk_&k0+{L_7(`*pwOHJ;b=x^C&!O*~E%H}|L$ zzd9h>ZSGH;?c35_mCm}eW_fqbOS?Br=S2U(3`Zs?;WMW2*`;l%6beI(e0{<3y|#8h ztTk1q4Kp}6I0_fAu-Sf1itxHrnJ-L*jRuMCzVq224JD^s^uB7--NUm_-*wdg=&Hri z+;@HnBbxvN^`R-!R`w~q+YCZ)H-hKOKGvQ#WH^Iz7^>4UGc)V!U`Tkz;;Qufcc`H5 zDY{O%UY0_o1j+S@=Q+K^pVq5WZ#3rT@ux-JEr@(NKtbWn*3=gp;_J`X?Ys^JFn9%= zq{j?3HQ&J0igjM&pkZW82U={Uhi|8OR_nvbV7|N=yEkTc=VjxZNz%6q%f(ld!=~1# z@149{@Z$XJAa-SQt2zBqMZ51}qlMfWYf<3vj5AX)z^r1{* zn$y-EH{{E^D#ddSwtDMPk5(_r=+$=gi1vuVWM>;aI?S2atoin>;A1GE(GKZ#h9d`L zt#5iwSX)c~jZL_8aquP{8_A1eY#S1y!fw1PmtHsL|F!it_i(Uy^~u#-qGPXMKCI>l zwA)5C8LM~K!rh1{7*}zFb;?!Ugk{~j$N|+ENA%Wq8j2x$`j!`Z(flFiS3a^VokSH7K^cEqdBelQQ=EG}^fUBx z+Wvfd`N)j11`0laggUBd4QmyMJuME$qB;m9ci_y5JEqt%4dU;az^|++g5-?Bp&{eH zhw+d&$d*_rl6vm6F(g6bTvok8-Ewc;X*c2HNv@%DqdM%@mio%84R6iTuD{F=k zui{T;-f+Lps@;;q>Uw2PXy@+~=;hLhvB1p!EymC}g9^O~FO>gMDH~tKfT8;X$?mQR zbWHbDD-`tcB7$AG;$F^M;bdqSUF@dX|HL%qgsKwz%P|d`$!O zd+F=r;qP*Pc+)Hmo`{@!8?z!wxl7^h-NXQ)&~o($N&yPa?#Ar}ZptPs59CdlmWhXy zFEX3nf39xP)WB*fzbc5=c@}4B5v54ipXR>wto9q9bjH#LRsGI1c>*VnD~eulE6wTo zcgD=Rm-V8;UiP6Tf}oI)UKZGNld=rNh}c%;Y6gCzVqMP)6A0Zpou9&GwIa)7KcLWU zU&G8O;b;+~F#zj=C|uJr|Ct#(wHOJ})Z|xi_}?tjC{Yw*A+UwXw|G&$K3jXJ;3t&I z(1xIKpzTKkLtS<{E~TP_K7rD~Q{iUr!k>j2#uosfRfognBO|!(FlrCeTkYt6_46+e z6F4@u9xiVs0BKZa2Kd-0Ss};;&xm zS?ejC--p5l-rOu^l>2zk;M6IjEo7UVWDUb4E8RW1mFe0bbvk2hA_ zc`6$6YV@ZHrs$6_OXvb-sI96xdX&yMM<1w9&x0%0^&(-L!1M*ZduwH3KQcwyBn4Q$V7MQR2q;s z&so2BLB05w{J;61eHCYbzI{pRNlD`fOIL4Y?6VE_A06x;f`8BS%F9q zO9C)f4f%AqpZoiZnpPiC9653Pcmj`3vY``(3I@X{u=^kdx_@bn2z7w?G5Jb+_YS6< z1n^`G;7T3X_t|3$S6DP|9g>&NJrp!np}&BB&}5l*+3>>!%-^x>D1wQ$vHH~~yQ-7M zPms=;ytXoa{VM#;!@TAO4sN0jtNz91S(6XfuU#X(H&w(>cA`O(>cbw$+Or#I(ZvgG z29zzAKUn?jza01SWdabOAQ7>=8RTAQbhKf@bOkW-nPoh?v`V%%*Z4M;pvT@%bH4jL z78*jK8kMnJmQji3nM4yqvvnOpxgd{x^Hr1}2G?8sXT5;4OzUIPi?+^Mjl_MLDS)jd z1N0jnwI&$4@1R(><4eeP$$0lJJts|**>k+z?N@nLX@AQ_1??O0rN5;;a2;EzYF%%X za{vU_h5BE&l)pZa;ae@5Xka7qh>MHQ?2!Y3jhpl;crJo#Q|;ZDawY=Y1j;0*?)aHz z$QxOJ;!5Myr)^09o70_gnUwSg48;JLe0C(nzJ-Dg4ztMs~ z%KtQpq*mm>!jhJ4TWT@*88716Jgwp+Xy8IChBKj4J;zvBvLT7LHYl$x9a0!*v<(mN z@}{HRviyCmN1J6oVQ|6WC5+zVh;@aiJ8q5G^UeZNQii|=C&3xSdSp}&{4`kjJvGck zJgTOE)%~6)y2w(^Ag`dnd-0++;GyBn<4bFyad9~l6IRUPMJl+(dQ$no_#Z*v5dP?=w^TX~GXtQMCfVnhd-u9OOcE7UMGp+(8sU zz$G4?p*Py&HjN%{%gWebfeH=I=XtRKP2h*n(a_k}Zy-zNS#k%K?BGigrRJRP(HM5i z2VCJ#GS)ZoD}<+vrq{BF^qzF|mI2~y-}o#O61PmVx;0yec9BX!LGv3HYJ#{-n{kKq zKRjg5qyvIP>{0Qn(9A1PMf+7;(DXmx*Z!)EudAl}!=7H4npddZS5y5=0O1tWz zFs9qX!r#4i7<_^2dh~IzGp~=SjjZx@|CoAqwam`+&Yia`9`jn(-Nk6=yr5Vhl4s}& zHEl#85)yb?Ec^TIPawD!fOIU0g_2@pAOZsyj>5@W&EVBg;!GF0a9Jza^ICI#sA1lX zX6FE*Ij$@n%Qt_GQ1Qbi)2|a_2rMu1QQDb_DO_11)$@66Q`tkq6n24`L`;Qe*-&F# zPIPqi{b6u|9g*Py{TaVFxH`}+)AF8a*oMV<;Q<;vzSeJR`sh2l=4zvJVu4f3rD0PW z&Pg$=vVJdO@QT5`JY4a)U6+<2qtpP*y;fl1rYEU{OSHAltZiV*-HQ+t9NZoq5RIi1 z9uOl+g3~RD1$>+YRt7f93)zu^L(MK_bVn~F6B9WI3O8{w6^WaIgk%h^mOsrl!0*+( zTc~>X1zsvaGbLAbZ55pLPrK{YuMcaW+|9B!ZtopteL4RcN;z^-@veC}WB@dFyWV(6hj6n2N#A<|x8KCl5WU;)La2cBiZ{`zfoq{xWz0H@86SbW&c&5B z@8?^iTm0kRL*j=|biV_HY*R(vF*p~+BB=}QSq9EO%lVL^a7oF@dTSDWm_2hI!*eV7 zz_IjzNEe;W3#fpm0*S=(z+KO5DZlTpK3)LT6*6Wl-h>@%Qg1Y@K)r}}k1r!c77u3< zeJq*G8=`rfdj|djjE@FK8m~HVqs*0PRv$UYAPEu0Zm3_@-gW$;{)H{0XHWgyj@tB< zUpLCgS&9})R`&Tqmtu7T=N}-{t5huC9Ic|>&2HI4m(3sMxa@v?Nd@B=ap9BFSVl7s z69x7sltmM9TeM=9b}#G_&uPzq!W~!uFTwAaovIZ5gvnU>RWW>kBOoAo-ZSdL+aPep zA<+y!-lhf>f>1@>dSHMcO47D`MZvkpB?S_J-vH;?un3 zY*`LWEC>`bQNi$x+M}|#Hna#ZnJ5C$X?0~FoA4Bc;ZO5Zb{-w!AN_q;&>~=^eQwxU z<@w9`>!JH-H4Mj}?ba-Us;3^yzn@{=-+998dMzE`)nbH$UieGSdSz&&H(&H~1U}hm zIZfgc^Qf{+2oPsQV9KaYU6g}O(gI40X;@fTcmYUIY9<~Fp&;0F zdm{ApfHC?IXC`r)(KHg}dR3bbWxy3!(8h9ECUN4BgK_B7W8Iu|Dyyo^b~x)*E>l|0 zva@MPiK?j6oPqgm65)6EYnnoEBpK{Az2#*OUENnukuVWzWEOqc{5V=#hAAA_AB4Hz z#n#<5+G{wvUmc8uACPaI4XO?Y6DI+7(7^gs_h36mELa?5XQvrVl^2D>M1J;JAd;nlE^F+IG}ySb^ZsHSeIrEGxFN< z)L-9zQ;dX;P6vKZNLuMd~vaWqXbNlvfMlqXc z=r5hFB+I&KO^d|Kibk!du5qfW$M28b@i-1xihxmZVK<`8#||P-ul#wb@qO|wFh!`< zVx?|E*-eMEjOW(kLK@>i<-b#ndKY={w$cqrLGT0xi4Gpw_3i7|A5iC#nF!?75tt~d z;!)~|y^12XzY>Bl{v#}rKNjwVOTa==XogGKh*7*TyMYt6?UAZE831WCk(cf7{CGlx z*kcA2N$Z3>pUC#dV?NT;=SZhV@0cU689U^&=Z7rt5VRznChSPaX8roLZ|z~lY#(2z z!;p@mwbd|vt3-B%3NyHs6sfp9w6c_%02zTpa~W$??AQcV6b;8x@7LstjVBn%;7Jmq zr?O7#Y<}AwMxhU^1qI?#*VWZkzHc9=`i|##?y)zr>|VP2hjW_#M^HaJJT}h;|IXon z_TLEoK24$7$b7|B7w{Zs5|CzEL4GHxItf%-jS`N0?-a$P-mrHOKZ-chX8&LJW#0yL%8w*c=g}_ zv}=_>q9`2;%apbvT`j*v($WX%iBwTPrKUud3WqeLOB6O7Arxb2P{n8iIP}gWNZyHu zbogSDR#hf3vS35&9z8QPDL@Lqj3e@ozZ7+xLLL$W-*kZlf#6io-j(){grO;Lw`lRJ z36cNvspx>IW#J@|qL15iw1*Owngx_z2sp7Sijz=gR1eG!6=w8=?vzbg#hj&LmUWkA zch_GN!!bbdC}iJ(UpRKEqBjTVC|@zB5wH@G+yYMM*hZ7gp9U=2F9gLj56>x#Z%PaQ zS$JGb?v(c}S+-pyVFU^R7=elCHdUku`&a<4`#x#SGyeKbuwV+D*Rp66N(#x7>wW7Q zJir0=x$D-(5;GBoosJxOcwjz{k7(8FAB4#{>5NsHb^h0}GtjXYKrrP2HlH2S$&<+? zQ^mDaLEmfnq!XZDgiskxe=6G~H`j6&$HO+r#()H9N9JH=oE(x1lqgKo^T3=x7 zT?pUaBW$l>`K|(SO@-Bc#wXztlK=__I-#<9{(jhsI7?ne(dn>gKSFtCgp~JTRM}VBIx;? z_Hk`R^%8adrv7djXA-7nvAz4s7N==|iV^vY3m)L~W(u)=(MzLyYD~T;L=*l9!ird-2tIuIs*lWZ1 zTOgviyQZ1}k-RLyYg=hkGHXwIdRG9B093ZSeNFA;dqg~FT2Fb^?qWkp0FEKO^5?js z_43SbYWX6Uv8?(Rt{bEpI=ON3)OM_q!rgZ%nwNNkhVVZ&{)Z_g3}a?>2C>!G2ee>U2|J} zZVD(t`sA+yNi2eaKy6@aqW-<<=?4lB=n=qCuA(V10lp4r_g z;TFnq1);~9+Uz|c?M}jBNVTtH;IGB(pKsZMCtNN&#B9bcK2B&8$xUDxk<9ya3y^LK ziu6aayHn+i|K3!bu{-aUp#H}yyz!(_>-eN($+e1&^th?X1@i^+YIF0xjz9w) zQ)zO$=*3yrPw0NZ1Eg7%hOtStB@2t$gvgEsG2{%nOPllo4XzaGsynMqyJfj=(w60| zc-}k7be}Xmx?oNWnjGFcJI8$enVv8l_uw2A+oiOx;SOp!r^o!XYdfT| z%)d+^D3pe`@*)O|CR{W5u(`a#EMeU)UhU=a2XFnWR3*h(yrQocD!x4T^({;`&Tj3$ zBGb|%+>mk!Vs{FZm@6aO=a!KjUSeUGx5$Eos;)HOQPFcKb#^Y1Y^rU8l^I-97N z$e=dKDApforAE9zMOAqNiY`~1icSUT>eJ^+#g?HR3a<30Rz2ayVSASlr9+~Fwcc)? z(7U?uvB4{QcKK__*`OcsXe$}6s(D>s?$T;k<)G84DLDo?H#0mF7e*V}7aVV-oN_b6 z-T!iWtjYaaV3|33kUp-(AZm)N)#f5EKqW(s_rFp29z^nbKMjWdYj=KI!e6vCe=-~? z?^JxZSbj9_AdU6-pV7yfwyzqprn!E4(EGo*d1y3Ouc(R(Mk+^TyXFx%y_$Qi44)VF z82UFAW+@Ruqc&6 zpSrt^YHjgYdMD;^dfW}L)c>~3%~{4d+RL>-n9m`2+6M55#rHC_Xf@Xoc&Ws^$2z9!+H5WEn?S!XT0 za=QOQx_iww2oqI!CX%$_Ok9jbCW8$v6cRFTin7JvObXhPZ{obzMqej#^PZAXYaVaw zyxG?2(boBsVEl6arON(Orq{UY3G)u9weJ;2G(G69FgNjj$WiGwoQERG=eURa7vDnh zqhjDlizK%kc6I+6j%ED$Q3xWoZ=Oh&=Tul1UKf|=#+AnD6qSeqMIZ=?GOLZt3|s#KVR?e$kW4 zk}Ks2VV%FZLj$rUp?W$AA`Q$$$aInS^GlL+%GW4V*D$HjqBKzBbGC*c#wr{J;`ltb zXq(1uR8UWkV*vy}C`E(+VzCuVuZ}f+sP5U1^n+I?l$(>5%Y`fEoQ}+yw0m6b=fmcG zh0J_;L;>&NdJ^k(<&>#SsAF8>w2|6?th`=g#l&Ts<6?5mQ33bUqO{vE^%24~LJRCd zLRtutYHBE_6rDWpqO!CU5pS2{fJHeq*>4O+8NdlL2PsxO$Oe;hCm?j_Nogvva}`>=JUcJWz+=PWbNW z2DNNU-yY5+uJ8gV>NZ@%tYi5)gnu+K@da;pmgy*$iMJHtZUD~Adb z@?hpxRAeOPNm~q*D$33%I=b+AzhqqK6+58-i<7N4b!;BQcRhT8=i%m7^gk)?{0>su zD}YvhzY15g%8eT>H?jCxPJeHZnv^$TleKsyUb1*A_$jB1cGj`mjidKgm!o9FvHYg1 zBj4h;G;b(5hMMgE7+5jhw{J?4BswKt=(BbdpB3NhpdX!kJByz%ADC$HJ-S#vsl?iF zs`CzR@-{9@qj`gH@+NMB(55`bk+#LPwSGNG(t18Tzalb@GSJZTuXvXD#QNJSxf?Pi zj0#H4;LlRi(bMk_`uypWBjVtXVsf~33!FBcuUuLdt_r`|9^Vm{;_|B^BO+>A$N2A0 z-m;#b>yMDwuVNw1DZ-o44fMo>)1Tb*G=LqwOU<}vZd zo3<9C(W>bss5GqtI6*VSx&WmtnY_fs$*DWO0KiO|d3J9i1jEB53kJU!ZaAT3CMD-w z_Uaq!PVoH*P+K}zu&fK514l359Gp+rA+93UrFW!eD9`EKwsKs6<=N4_q3XmAUdvMA zV&%%2d)#?$=wh^m7^;dO{Zgb06J>l{jWV2fk!VKp7;xSTE`7#gKxHCb=3MOlCX%byxKjd{IDNw+q)d-&qvqoYK8 z8e{k0;o_o!im&4b@rDqp>}`o9{m#S(t=7kE<`j#uI-@s`xveBZ2;vOte}XldF$$T) z-t>kCeGjQ>%+8gJ^xr>wyT`()eeoN%LjCGhAC4QT=d8XqG?dRCM&AMZY1TEz^N^pA z!gT_xmhva6NaD&>=5NNVm(A~ihBR%Wtvt-#r!|MPw{|Pv0!g7|vk)flUWMCdq4JIo?$DjxIge%=|8@ zXmcxMfn@7BLS)N604GU(aPkn7o2oBj&JbS3#AJaVqzHyO5@Wb0sKv+5!9mB$ngb;E zQ>N+p84CEv4?c;Mj|;XY{mgQ89(TM)&vH1LZ~h)`G3;6Xz@Pn{&Q4F<7YXvBWlfQ7 zUOw#C2XOhU$ly{T)>Y@)a}pQpqID7&b87=-nvAtIWhOP0;Y6<4)sA`w)=9i$KBqE4U}BA!M~ z5m4;G_02bgq)MJ=FhTN)inRIuD@03 zCeOdi9@^CKAah$ZXs)2n)K|kuTo@9h4ca>q#_D{>#aaGUc9llerb>n{}MhqrfWPJ7Tcy=6=f6J zyJGhk%E2YW+S3=4w~H>lh7#MzO&rmEOU1{uxBTjCoYgb~jFkbna0%H402bC*N!`o>Wbyu+G zgO-+-D?sWu#Skn8KqY92X!_G2e}MFCxIM5?p4S&29S7(t#J>Brosr*oXUMbpym7j1 zPSv})Yu4P+F8cHp*L(m?Clw6H_i=J?AAf1<%_SBu#mzhv?@ysF`)r_1BD71RF$DOa zu**8Yc`UzNYT01rc`0w_1@&C|c3)q}3))$eVYvNj_IV?=KE7SIbkvMTrony+>_*)@ zvwFkrCNz&DztLk|eKfn|6t*SDt>^(>;@Gc(oNXL~0t?{6L-qH9CbeQ^S%iF=AXgxq zi2|!QyqG*#5+@8c2EEp++1B1J-WGTu_&|F>`2zow$BzrQQM@f3Ex(GH$MoK8KjPQo{Y8ucAKSa=6uY)pw$R>F{Vo?6c(NfF*b!C8jfi^wl5 zzAm>tlW>OO+x1XlSW8$ag{~R*1PG6ovKeUtc=E1H<%0vG10*_Er~?FOhvWMZ9=(yu z;Jeq9Mc#G`ANu>aq@4|E?2S4cd39(=pA2MObIl3iUJUqjyZ-3}94)y^V%FIIL zU8ZyNP^>o03>=#=WOgP`k)1&(o#yT;)g#DmB<;Osr<=K ztdlG|3eP3-J_-=h;VfexMWIY$x8^uZRW)#v9}iW_oEhap1uJ7XEK?KP1BJ}9h|_Vb zPMBsdrOq>HH=5;8UP9H;`E8yLtXU6#yafUNpn;F=3$0;tqZY#U~pB~hT2C7$UR3re;OC@p=rA#1o zV%#Ry3zz3|m<7IZkMiIqO}>}+th^6KxvCQ2-##dqSK=C{s|}g0ckbLt_h3%91t79B z&s?ML@|mI=dpAs|n4a-zrNvKw1q&2wS2(TNUO4rZrlZyfd+FmFjLbcLrpwQWFOiBL4VQ|oOM)Rqupfo}oSA*+%fe^bgW2}Z^LZe6Zo6z zxVmrbeQpWv@!42Ms|6RMU)(2XX!Umv*h3|Biq*P8^RMdHIb2z<<_iYM6p0kWCEi^HUP;G#tj%IDnuH*W@0>O7U%XrfQ zm7V)K&R8P!oK}w$dmaHnWwN)TS8I;bNQ$MywYdM2K`VY+NwVn-%JXLty+Y=nDK{jA ziL1X9={uCORGK)PvAUVc3J3Skl1X%^=^gdjpr4up;A;#7AAeALtb z$vdft$HxhY9W#1komQDDWEh{~A(hV_tm@BE#-5XF_X0vW5z=&NEU(~A@|;-oK?@w2 zM7m5?d;8+*khWkFUWME=n*y5;qy0pOPz=PR?tIi?2bK^@``P2up$y$sNe#$vC_ehi z?iNK#l?9n<_3z)C zk%*P3t-m8myCQ6f?>uMc6LL6&;v)om+pR!2!^+iqqi5S!p z{b3sMi}u`uLT4B|u#s+oy`PQxIVoMnzxa(IO>xfeIhDr!9kuulx{S_i(<}D; z(${w0j4W4Dnc#s=$c6}wh;Vt|#~+sR^LEu!qHe(#`zcYfkU!wMhqe$q7#pM{b#7q6 zQgi&-;KJiVGK5O;MjFO}&TuSNm@VkGAcIbt!N-*P(xG;i`ZH?Q^29rX*6&St4swvB zilcBI^cnZ4Bn?0YnK=C=t+#RD~?D+13 zx(2sz#{-9TFx7>ym}XMT0|5^Eoq>5=WAnlODso7HD%ZPf&R*0xJ9K1cqS3JW*bEC~ zqPt1bo37i@0}7HDY{iomy(Tsr2Q->JsZg6i0YUu3)>P^_+^ITHaT^u00jretc{jW%ownL+B5; zTQD{6#Vq6Yam(~;BZ&pT-hzFFCIB67e&ZPv@k7jU5_@sUzljotiTVbgce}emAFMWi zifUTAwd^VK7{vnYjP$#$s~XHT@%+>oCbesZW90SK?1fGB99X%FlM?-@`0=Kjzt6ni zjy6)HP9Na3lmF#x{VHdKV8A6}lCWRZA?FOzo@|*Sn-3r6{o-`cKx&&g>HW_@0X$i8 zTO5FgZnj}2dtJJ;#=A2LkeMZ$Cfj!m7Ws8h>ZGcNe@DZ{%Lg$sNORs#!7mG6_!+qp zG|-_ZkEMz0S0_iWy!Sy&CX{z}aI`X3CkPYQ<6*Sj?;jzs4%)p;4>ZbK_?d`NPt}~# zPLq3f9kzqA$NNRr;`x()mRORM(6^mQpxWoNhu2ZUHZEvV(2lga$lSLxsZXEE?xtH^ z{b4dF^Rq|m^rc&`RJxJ2C>SGo_tRFt<_;8E3l!R0{n|D_NX+>5x=!_L5mVK2A+r=A zGuMc@U`EsO3)Ru0)zQM$?q&g}?1`*LUefm zxmSHL|2%Q)m56M!K0*3oweB)iE5qF5#Bk1yl0;*|Cc1e#P^iG<;(2s)3BiUSqS~yE z@(&a;${u_9rg}npBQ2M@Mgr^My7aQ^VtW=buYq-1xcbP8`QxZ*6`(hbCDBJy3r)Rpj7scoT|JSiWx~4Y?|zDziRhWh zNiCS%)je==g{)uUAoi=oab8&=n4$HTR|J`=Ue26m+oA*wP zZR^ZD)F5+qR0stJn-M5s6X~AS-tZC0sxM!3*NUq-yTvpNoHF&kW&7XM(bgtBrOr9; z_9+x$(b+ftZ}=_#y8?dWnzl-4w&?Bum@DDTRsk-U<|WQAbm$4;-y~9ns4Gc6c3>jr5yf>fAmk1X`oP}(keA-Ad6_CpWsY-$MJI<4|&Qz z9r&3SfNNN^%_eyhUkp^#|Blvb$o`*bEu{uT9XvCeQ+Uqaz*S`0%%$EO4_Zf;2b=(! z#;*@Z06%5>>^?%b@|>=CG5dy1}@qE(?F5p z8$@eV2u#b}eIR&lpZhj5Ln%wVXq0=25wDzh8=sV!jd zcfq^J&;ju>R5AaRT z<=C2(n7?6i5`Lb(r*dsjrn}8PXi}PdfQf;!Z(m^CevkUToaC$!)3j~Ni~tXy3ws!O z^oPn^+g=&`AnY`Y z_mO=|Q+&bbbF3QSIXmS7WZ*+?{I@JJ!bTBz07mX`R>sJ!@q8%f%0&r@1|&byIH%xk zEz&MUsuql*PE4@}KBV}?DUN(?3A(}#gs|cS+gJ4*r?dWX0XCyg3r4k^w`&;qi%gOY zaeI>P!X&*YGY=az{CE{L=-dv@E-*XZc3Er>>;Mh4h6`TjnsES&aUy$LnyJ8RA-aDG zhQx--t-3uMXE}LUgYCa=eyKLi)9n|0#kr#KA$+2$TLk8cRO;eTfFWx(#DER_lGGyV zuDZRLg#j=RmkGqHig{v6gv?$JFNNm=QolUNTXu{Qrs3VxR})H^wZ;zwW%k`Qkl{N9 zCHdvU$*Yk!axeH`E>d0OF?ttQi8bxBS^M_nyP!B9DpxP} zR1DMbO&N0IefxI%?Xz*K{#4m`slac&$lk{kh_b-fDcLp^-@M=mvFHN=!{PM_!MqX$ zBq@f5Mr)ClJlR-gA34gQ0_m8#Uv!tvN;Hs*2zSe(vSEl-)2~97a2H7RjUVnun{VIr z22_ag-OD1$-7;&!+K!_YQ}8(z1QOnHNo4oXQehNqFn#7G5tGs`+xAc&Z1sKT2iA|% zwhf1F-Ic5nZwn^BzR*wh?Jh}|&4UNGBJO^pKbcxnRh0%3%7#eR7Zg5n*PVVJ^vnI_ zu{hSM!a}_@MxL*!-v*YHz*W(9sY0n0R+P?|9OBq3jhA5nbSQ z;xovbn1U0w!VP+-j^(Y%ZV$U|eUIV4OZvGE4e%Hi9qPjf6095 zyus>)!%5d~Nm#SRTNUK%gb5_^X#5JZws^{=^0L9|n(}G0pVrRt_g*@Eyr*)2C;3(K zR~I&W+8P4a&%clEuAo+@SeH86$_VT?!#pe2W<2X3NDX(*t*y8v2v>B(iB~v`nY0)7 zKj#}68;hKxMG-hrghQrmO>fnwS^r+MRja7+P)4pX`uNECGcu)Rhg{CjPTp!(5#*0& zB>x(FN+~}zzA5I<-?_ey=Pe#FF){P=^RI4%!c&|mx_z|#Bg@M)N0~gH`hCXXR^41( zMZnzL)arJsa@C#0t)Jq{Flan#O50P&?zsJ_}5F4}}eHXr3e(x0ra-X*3kSH2HSW(6rZI+`K~5L{DQ%^Y?~am+sOEn&$5l z-v_+x7ujv1hMhHIJ~qYNy~a}hF4+s}8gQY>&1h$d@1>>rd$R9n-#}$+<@L&NjyttS z25fhE(l1}Iqnk0POyTHPd-;@~(rFj#p4eFPjmYXB7OXCJMFX5;rr;|br+_Lb@P21Z zug4GhE$v#mJJcQVGdyB?@2Zc18N-%S-&EatOPxEf@bvzv-927dS+h+evrUZI=0UqZ zOy!TxMtrq@s=&%UuHMm6XwN~TrHw`=p0hPlG{o0eppUWdV{tIcp6r<4xi)>{50$Xt9L3BW&EL;j@9CEJ z-m+Zq#Biplcf7tVf`9UOXBxk;cQ0}1etQ1Q`wgve*Wq=E3qoB-$a>c5* zd?RyO2++-(@yKsF117tZSysg1YtOU2Wm?3~r8=he4QWRjBu3UvyTz1j73MlY+a_H24;uO(HMcvtND5Yt+4jDn+P|0&6p-7d?aq8s~Y zkuD}G2oJ0A3(u+dHJp95CGR(O!tVHrU* z+}i5U2^c;&8?qxD>Oac9-FqZ(ic>>?t41$w%Kw0?tE+M*S)L_EfY|c>*d(26v*imZ z^)0QfXl=;Gg~rM*Lmw|(E`E7X1HRybbeaaa(0%COQtIi3K*Y5#JrYrVek6%KbL8(>AzWqiOH2<-Q&Th7}#RE36q20&5TzXu{-DZCn>3x`< zh}2&)Zwm^-gN}OTBTXe%4_08Jy81x4eBGJcdU8wZdDlM4UL(J41m0#i__eW>;oszwA?V*8Ai;A=O=AR?i5TD0+9cg-d;d z>{~yoz^Qrs_D<4)6}|)eNR{dSbrqze+H*q4m6ptq3p*=F34_0MxwGc64_S7k9XzEZ zlJ?i{_z~6zM}x92cegh+eS~UnDm(LB1kY=)RhKs|$BT-3fGjx=CE*jFljkT>650~I z++6eJwpjiv>Km%0E0E_X4-{15VvwrQnfLIOZ9@7B zj55k<>?YEnJ8n$y1&F-)@J%(-g^*WJ{Q)*|~RiSr3DnHhIZGxfw z_aCnAU%arQSTOVu9St%4K|1nin{UG8UNq%CdNDWWgY8_he=X1Ua9oh<$Wh@rjj`cP z+g7VXHL!nlcOML1-=3HP@%^+k_Bc5yW*S9~JBo6KVztXla&jhe0rFo=Y3y4>Og$yo z%js!q(kr(9xR=XG!r3+L4^i|aZ}-Sw(YyV89~`#ZKV#pA?K{&~OY{ET)4v526^Gp! z3J)#w_qEXcO-7j-A9iP1W|~PKo8G#@ZY(@)+p;f~)srbmP_lc+Oua7#i zZ9LSQZ)?4WPY|sq^1o_5#;VFMy%)5cKUM$up{e7Q;`C!;PQAWnTiuBaM}O}t)oH?b zG}Lp2Fzl=Li7nn_xpiyI?rWV8FCS+Qnz>zrOZD=RBj{&T(Sui}-{w;9g))fsU}DzC z&to8g)?B(>^!e7>pI&wG**PlP2b4hZ=S^d;m(OdI6lL9MKiYx!(VV(GZb4jZobPmm z3sc9gF3qJTDqTI<(hef`MF(fIAWGZ4<*y=A^pxC9rI)PC8y97c9yh<3ICU@P>9#;k z7HQYNWV3(D-&fH)hjMz~rQLD6e0>{t-yKqGT^y{$c|Y?xoEqKySGkY*<&?|*8KUnp zb!8l-%t1ry(y6X;?eVecUX?)^^QzU@!{#jK$wGoMN{8)5L|m{hv2wqUai3OSb)9>% z#9ftct{B~GzI@)tEA@SF!Vc|;Zqg@cZ1+kZa%WhUo(Zd4A6lT)6FqkHlGL;FfmjH~x+_p(wx5&e-`g-n7wS_SWS1sc@_E%tS2t zbV-Ogh%f3;-LgvzmMYCowgt2CpJYTt5Urc>GK$P zW0`X!?-i88`zK2fVe03zSte@zPX`EtU=mt*9LRbuZQDw2KqSiL`~7=^m~ZH{ED`u~ z&h6|-@}1P^cke$WBgg#TE_VPyyvJ#BLm>L{v}1|sw>|X6BP`o8dI*%-SDzsSXO zyvpdvj3PHOuUO;Fj1LEoO#I!^^EepDZown0S(di{F1-XjV#i$_e1HDQRAhOrz)Jki z5G-%Q5DGtD4}QTxPnx&>xlZ^WH@IWh+`cxnOeWQ;BVQ__yAJ;kV))uDgN1p1#AeugwUHEhGG+7$Afi$i>d;ejApkwR2k>_ZJ@{X8( zg521*D07v?$Ud3lY~kIO+FCxc?UlF}#Azjqo0+0o39eNuOaQO5qix`Qw@PB&a+AwC zcN8iAC)u|?lasv$EyfkMjZmTF2J+t>;biRCwX-@u?|6vHXdT`HWFETVjzBe>M8qn}6N$&Re$PmM<@}GXgYP@lp%$B@u zn#FBJZV2IQC4bR)Q> zlbsuKYHka~Q~%yzkNmuBnN*qo!|Qg zVP<+67K*aR2Kgq?yysxgj=H!0i@jfx#-5XXAA5f=$DPlK zjo}Jkp0W3@wYqIymn;2kSPmaAle}QqGWF9=>)*aA96F&S_r<_l(mR+^pOSi)Nsuq7 zwJn-H=|1B=-oEfSGq5@u2fQZ?c!XStW2ovZHb({PUb`0!gHf_AO}y(M50W4j;^blZOS8h zbBk_SrB8o&2_0zwwP)2N!&u)q2cE`|)i|al6Wm?LUF{UqZ65P%c9TcKJ(x!VeZ}Of zB>J*+cR$l9_DR6E2ky#=Qu&>gy!klrMp5uWt%>o!Wyt)MR-o2Prw_3SwoOD+)$jo?JYi^$*R~=P;(pXZFcNP}rodjqKx1u~s!d6?)7xS7$O(Mp!u5Y-mnb54SE7 z+J33?J5+fPcbfkb5!5hX)!Fy&T#Eij|E5~qSh2aq5t<<3M^?aT?Ly@Tx|nGS?1n-E zWhlyHA{zB_T&Uh1TvlIL`jz^auEM#YH~pKc_-)fcPglLV)#7bAp@#b}uQQQWKxdMm+g;jXo3Xw(4zAPAtW{p^-+TI43%JL%dFY$KXAADO`=8_%-%dSw?c% z#6$Zq=Pdpd2C|qrbc;{P$BcNwm-m%+F`0aIC-ImgiAhv;12B&X;*;4sQE0yv+~sGQWrB*qe+)QFWgVL+Jz;9!MRAJQ`f@x-% z-4zvy;#$6K-}51I`}@Jue9uO2{_}GeVXg=~Ik?QO;T_NS1H7Ct8ochf4br4tL7sxn z?94mzz(Y_p7XV#sp)b!RD{L$Wbg-kuSrER9VJ^`i^Cf+d{{CjO=pY^0wbAxD!l`^= z0Uvl<;68raeG7zEZ+(9nHpRN#fG{*LGCiGR>72>vwBz?12!)*c=*Y(@fcd@S-LG8; zIoI*^>j9`iC(+U*q<-G(Nvpc}l( zuh{bGe_Fptn>vPx07K8(piA7tgh`zXQrgYiSz$KA3tN9fdXpL4C~EFi^o- z?_${?o-A-lA zOAH^Nsn8lE5*aRgd&b6d+v!5BvrsF%jl%1u*1W$CVpG`>tq%mX{Hz3vIKN0~0V{3rO%o&k^(9Hy4A=Sa6&pEAvrvR0(}<#bSgL? zeHJotRl)cx>FM^)&X2#fQ`>Jeth`S85Stw`isA>KwX=qlX&kEfN@B_Ayv&(P*&7ij zU954(Jc(9v*k z@(-x}M@`ptHQf-F87|vc_F-_RXp?dV>~q>eaZ>K@^+S(jmXW6I!E7YW2(XZdzlr*^ zXXT4$ZzQ>+brE*xE1Fmn98zWdPP#qS{pfV|{)P4%+}ah_B6dG1`09{_cf$M}xu@k5 zGyP@SB-jU7tqQpJA8KtA4XAMgV}BG|ucE<{-O%g;2U!X*gpxGeg1W}{<{ekW;kY}M z#dlL{wRwA14iT%9u@$-)@AV-GMm6?mHg8GSUjXLhrYwlUw&24I#zCcpe2ToX?Jy(- zY7lHKRiMBq#6&psQ5#nOWEdZ*UA;Sm>&=kDP#$cs3c28}HDILbXN9X#_0 zjk&a>b`MC_wTD#7%Y??dfw^qzV^sjv6iksu^1D7}+5$|>`O^e4CtQYue&*b~?TdS0UofjyFyPu21}zC_ zV@2Y^yq{QdLoks)!kp7Kui~ANbJFxxe9f7~mI|wSm|*>HO9|TdP#*H`XhY3|ZKmDi z`4y6AmHtntvF^kI z(qP4;dl}`V->ghG%BU&hoN7nNHp>+(S(hr#r@u@WE7Ax12Eo34*w1hxlXZQ}cv5o5 zvF$AsyMWdI47wK7 zMcOvVA$kVJxi8ZfF`=U3_P|LD_WrRWhMFv%!KB>jp4GU zvK0Bjaf=s~5i*Nx|MH-}Pvw`7_8=>sh-sk#q0wZZ3gNt>H6<6XR~$$>6olIcJi(*sJE_fhykUCj-1e}pypjxG-^sw_voP#u{iQ&spQcQ)oU8xn$!zSfa0Y=Dp9tVRZk7=gi&`!78_S_*?9I(@L) z@zK^Lg=CBh&zV8K(os7zx!g*B%*T;`YP;PcFWmJFE2%W5t^c;4+!DkrRNE*?tnoHZCN%1vH#~00dmt~6_(z&Q z6>mgSyy7z-4#FCDta6tJVSb;hhr=@Nl!ATkz5Uy0U4l}w`<{Ne5pQEbU}?5q1MnvtSn@&z=key|ydtsBA&-g)LqkL~Vt>9nnkd9~z~36KdR>Prn` zIK!zKcdmndo=Bojp>7AVaiNXA0(IL~ko}JTI>$fHOqk6`F#gUH%}!@{!cy0PXAqf4 z3!R;G&(FDs9q*dtQQC>$!)Ly1CO)>)onFQa70y(=32I*n_O%LMra2+)up*v{>$Qbq ziEf=8@6;hsh%hl zC*P^DYTp$WBY}Spqd^f{Q8b+MvGPFTs0RU`9*qL*TZso~Zs-%`8AFFkbRAzdl$+PS zfnu-t?3IB}_^mwm1<593*htJiP4F$#%;-3eo)6DY?l_M~&_|TB5U?off4fl4z+n#; VaYMSv(%F3xKDfPD{*J@vegR`^=eYm? literal 42574 zcmeFZWmr{F*ENiwprnG7l!$Z+(j_7y-CY9G-QC?H-QC^Y!jWzc-Q9g?-o^cV_w)Y$ ze!pH9;6A|K`>ZwRm}87NH$l>p!f43vkm2Cq&_sTGlZAtO&H)Dp|K%kjc*U?{3LkuV zZu3<{{w4V5`tnyGIDTdML&XLT4h8e+6*3#6% z*3?Mny}h2bjgf`Cpw{BX{tugAHw_jh9`o8|pC-Qg^ z8TK##d$?*P(*ECXeGSBvf=B<~qi9D=%m4kk%)e2dg8zF&p8A~de@{tu@^Sfp&JAAr z7DV|h;D3)qd0uh+f7ku`QfBr4UFyG!`~Md3|94nWt#o#Eot&T3?dnGxqTn)QD-3>uA=C#&7?2jlY%HlJ8oyL);N2?z*i z@T8Qh8k}-S2NQ&3mv|j3@Hd`5$3b`h)LoE)Q)lgtgOVx$K&x^ zu(7f6Jl>tbg%WY&*x1;Znwo~T<=G$3ziQtcOe#>Vp+QGS=iufZuh11s#GH+Apt)Cg9LH;0;&b+=!NB0)4_sXD)YQ~&U`JZ%Zt(ePbqv(h)Y;kD<+eMr zi!MjJ=)^o%`2jclTlS9R#A$?L$H&J^?u)XrvdRtCIaVrl<_kYWL{@eui#{8UcuJ;l zWki2qV)|{jHx(Hj{ms>t$KBn%`DVAMXG=v*t;%{s)O^0aquJ>UkIfX1+kT%acmeC{ z%ai#3pkAcK<>h6y#Zs!4`?a6b+2)%!Z=SWg9JL5PKHOg~`(Ri)I3Tsp%_T|kUHei< zB*OjF(dpcusk*+|E8}!Nr}9C<*sOtS_G9ZeKiuqnwpb)YK?zZxt0^x}H-VPI)KyaW zJ@|N?&zTTEI-npUXWH(J!EM`efu%ASPJK~bdZp25+c7kRPD@Mck51GFmP^;w-2HOL ze6mQ1^8I^HEK13F7+$GrP4LKw^qRsZnQ%B9lhx`sE{DTplLhTA|C7xm3wS1jAwqa~ z_@_Ad>W6%H+=T&K=vnYbBk+%l!?m}!_x1CuFq>mrV{XwL_I-twuUzq=wzd|$`UVRN zp}j&E6566Zzp(I>e{lCV7i(P@{GVHU7P^=~N(}!E!YWWKc{epR1rq1@dMk~f?hPK^ zztfF=n;NUBVwIkI6jappb}t0i>y5bPtJRQ(DVMWBc6>I|X+an#4GnUNBqW*B=J@oq zb96NFsO>Se#RVo?qRI*~6_3j{-|D`>W@{{60O1Qtow~YuC}ib$rHg}$%ldR(1a5V0 z&FQ2Y*UHYW8w!mx8qdYoqU7aG0g=11z5V0o&-X5{L${yYc3ec`xe{66&DYo0GwJ+& zse6GjQ^;MmrOb7GY%$Z7|dQ>5B8+Nfgh} zP*ea9epOX0AkQWW6+-&s7!gLYMX+?GFnl`NuTIuZPEI-o2gCT>ZYq3Ph#CDMT`6^WuU>E9}n$Cu}k9dybc|#i- zINfe9H^y?rK7RbTe0Ufh5D;M1P*6Y}85!Ago924@N}*7`}ZBBFbFx))ipFGOEsG0B_+cX zxgAPRPfld;@$qSBXhc<1RST2YEoi_QdV6K``l4mH9S-G10V$fK!N$igKt=RH#=uaD zjg2)1`6f!8!)CEq+!sUZuw>7mT>eF)$zDRwz@SJZih|m~!C|f9aW#Zd-qNz1%jqxr5mnG3p$7P#7k+N5MvWz%JcfUFCoL z2yC=m@jd}z4%JdnS5F?>yD;wkL*CcdC)y2x5X3n-I_C4bTvGXTqz@%?W@Tn(%DcKY zS5{Waf&~V5SXosSQ&dzmTW7I^u!Nfv&E@gX3_sD&*j79?K0euKwU5^%zE=axd5EdQJJC@Fb;{ah}W z%i`0E3k3il3Zm4k*6TkeL2ktS{+-UCUZ3vm?M-~O2v-PJgoczK{kw5%wr`TTVK{Y;fXQ7|@bVU@vfA*dyjp!NW4 zDg&`Uk}g0)PcK2lW&0zBMnRN_-<@apzF0cLTeZ!-<-~n!C}q+h%`G=twh%zH@x$FU zxZ0nns4)IB2bYoI>y+EL$jA(T43g63=H}4;tgNh=rjf5d;k~$@XA5;iIgm7+X+9zFq7P zZDN6o=XI$qRI4lHa=kj{-CMx7{A@BMJ{bS0tA2mIHwx^QILJ|g2n%uXP%9^}s1vK* zA!qXsAnO!BQpd=uzSaeM0ut#drc9@$-R@2xyP$Z(I7Jf&lh|ofB=hzU4#Mg{1@5l* z*xcNFLYIcd#==~Qq)mKZ@{b?$he2U%B^6EMbF*$)0uU&QgP-}q_w{Pz-|p^}^>yE@ zEHVlTiUwp`cTg-ol9P{unx<}t0J7E*6eXv%F!p#pH!cv7v?FY{=R3NDxs^XV;BUbH z)Gzm|hToxJfaGF=o`DoH8cBavQBeVV`z$OpbbZDkO>cLi;IrxU>+~_`^L<}_O>B~RBEV?kIxAX7M37jAaJ1O{{*xUj!v~YkeJsQFRcj> zK5!NTQyTY;flXb8%@N5BQ$oQ^I143vND9T>$Pgz>XJf_{qu1hqn@v zlE%xl_&^{K(?GQD8=J{60W`>AxSe?0+2{N+tUPXyFaspRnEp|6bhw_6iaS#&=5-y zg=J;`oJ`f2D6Lh3>hT9qNf7DN)kav_+S)LlrQY5kfJI{<4UH=p*=Hs780 zznC>YT5P!nl3}L7hO){2AQJ#!kDkd?@h2SO=Cc#-jrDar6H8C~%IQoY6uz77EOpAE z7c+U>4WMp=y0b~+>PX4R$oM^iIA!lSJw1I`oTl?DPr(0Q6BA3Cn$iKbz>Y`=-u4p- zLM7o#1}{VbLJDx`C_#aO5qw<>VQgK7YI%@<`VeAnSpoYezg*yJ|`t5m8aR_jNv+b_{-dUDE^Z!hx;7|;LAUsl+Y02>dH!FQ7gm` z1(%71r5_t}LkZ9?K0dx;m8#9u zEzdS3)^v4yLbV4IS&OyWh`G6&C~M6Csga8PkGeZO07V?Uo8ZHRrtu;r#zMssWWyE$ z_nRF`zQ;S8QuPM-FJO_khSS*jV1OZtC$WX8ROx4o?*L9u2js>;0t=*GQc)2Ha1W}S ze?vnQWMpJe+iFmT^8|blo6pC@s!eB@p_6M(8V%Nd06u^J`QxowX0Xs`N2l2w4=#@| z`^ppuGr&rz%0Srg4i69WEzpyadVzCby?q`uh4jZ~7xZ?F16a($0~I+w8BdS_VBBVBj67F7;k2C% zTt0Jr2b}RmwRb@s%EY31SGGvhbejjiDD?+2vM|ai92^|m!}$b&KgR+OGYZM%AI{EP&Mm1R>2yDhg8V!M zP#75>{{!s7lIz)^U>$5DPU{bNL0lYFE>AK?t z+3RQ~1rWP|(JbN7y^HCY8Etd(@Sxt7N6c2RDXyS=CG&fvMn}H|x>5*;yF;}Jn6~rz zcE?14+)Se#El7q>d=F>C{5BW63gY78mX?-q|EhQ+j6td=bJ-ySZVo7k(^iT@zGe#- z053S#%Nc_w)f=o@tq7kr_1C{(FUDDD>0AEL}PW=qXNHn8XD)?dEV_(AR zBG^&uEd!9)-3tpTS;7%d(rWD#fFG0dxgppquaXja#^z&qXqgr}!_izFqi_Uqi-Dsk z*sfA2H62iAYk=2>60i?(VD$qzI9+Q-kei$Pl<VK>PqfZ}8wnNf3&4J(iTpRMt*y$+%Imb{4QT+rgXq<2M~YQgo^)qK znMPA=dir~?4^KvcSS($~*w|@golPC6FSFG~KHwdK`|1As_al^!4{-TafTZ$;^1W9o zt-QCPfSCRf$~zrzQ+RS&GXvG@Exkawvjl2Dy$cC2y{F0}cCO$6!XbghAhaXx$z>&8 z$h!FSA9vRg85I#h%k3hj0@9g+PQ)eut>j7UeY5%~C@=aO6gJrgYdvtnzguY@?!%{m zawM`_NCF{cIF|hi5SAW2Tn6=YaD&gEKYxvf_d{NOLe8`STtmjPpr8OGU4?cRBH#6T zRHk|O)0Klc@iS$=g2)EG)=arJLTYO2ljH>$j)jM3sGcVl#{e9)a8QCPjm9Z92NJrx zU!cG}T@37gu3mpE@N0k>;vEyCAan4aGd?+jGWpp^!mla}hKa$ge)B`drcx+;yU^sY z6~X`TuV(0E4PrD^j1LF6CMyF!C@+A>0#L?;1MK=0`+qIPySyLmGG76;+bDEC+Sw5S zyHjchu-$_XjV}!3DbS&rK(Ydc0N zlj*0(jVIoR>mk;U1%r|MxJjp7<8r3_wW5iNV*`AD;=ok||Z^)X~ui zg#oTktyBtbo`;7gl$Yblz-qLm2K3L-*%|ZQJ5jJcaE>c}xYK|Hr9OiIa}1C9*Go7c zczyxhDrFo;tm5d%0qP3%uKr!yM@Z}TzY0b!>-~^e!pBUq|9dED#e4ar0e2|^;i=r; zB>a;DH2kTmfNDueOB(`e9RQqL^*!|_do-U8xOSk+dOV*YgbquHimn1G4hNu*TFFZG z$=(i}Lq+TktKQEv_kY^ZpJP5*zM$gk0=5iL^XQXmoy4JNPLA&xxqcA^7`KmP4n`k_JgQMJxM?LgpD|!lR&|@bL0}VrOUE&3!5JUX)s=G|z_VI|>WB`j2(T zI|*yl*_p(;DC`B2qs%5H_bqE<-ZS z-yku59E0WYPOHXO8CsgTCYOi|{iEO_4LgI%21)+dxU2b`i77kdxO&RFBCO(q0)F7w zuA&E`g2G>Yq$hfIH*3F1BgdAyvOoFu&*3{w9rMDh;ui7^LOP@EEm2M+u|7`KRi`}@ z6&Eqlgbnmh#xV}RmYdz^`0<3HiQQ$(^&qMu;~xKcxU3olg@tv40tCFotZ{4!*byK} zpfZR2ntOa9N{xw$83?`)h1TUKk^M!;(ciS>VC$V`S(6seJXxPEjHaICIlv#who5XA zdUavlK3Q9R(on`)@DF!S@{?1S(dJR`=HRBgZP|uGW!ZPy7apyzpu^r$2-CM@6Yrjv zJ_t=^x5z+u_zf6`*m9IP^4U_uRjIU1v?VP$UA+5emd=us@Mpd)A@gLG=3vI8yR|(` z>tV{)z@Xu8-zBD`S3A(}vPWw^huK`~Bc_#d4QUB8tge}M+bHzQ4#}Xf@DR~cZ~gR3 zGBXlK=J-{lg3aDHm%!vE_mQY5E@};+ROYweo`P?GeE^Hn27Xiw zAOo*`<#mTcpMunGhoX+lK)PcxvFE{D9%i5JUq|+E$QIALoZUaX5Ub*5T*-gKg#}Gs z&BA^&uxKB?QbIe7 z-8=aJ=1ZJw=hPHGP6ghBf?X$zt4P;t!i2fpHB0MH`kY3l5w9}0Y;7K(Ee!65LImct zIah1aJp4z&xklLRHN<*|zT)`iB~mYQ+Ur6UoOEDNdeXCs@Nsch)xt#kdwv`OebTke!km}1 z6cQmxMeO(Z2&}ed^?<>=QYNcGYJUcH zY7yxxCj*tewiJ3kM^+{?wdlGqc=yu!budNfNkj*^JD+O}R-e3I->^VT?eg-y^4u#sR`UAf1&=aZVPkkg zt!I%09Yt&t?_TEQ805eHdWZX?o%A8<;qFW-oP>V}(1XY6GCf^gK~Ygr(rOwHfXk3@ zBJRVcX0y3kz-C_}AqC`3QIMu)4-oQZDG_!rjwQp=O1`352-7sqzTFim1`Dl%?eOll zsbcjfp_Unk#4w4&_{gwdq*>uQXK1FfNE3S7CBj_gai@c7{WIxXh3Q+Y;@j7ni|`au zTOVDzPWqc$kKx=8+34_;Z%0uYXkHkw8@-Sl_7kKg$UiHzuYx^)ziN%HAm!SNTqU(# z>&!tAuB0a$_qsVbRnjdSz%{=qXFq!q8MT>lXFa;WdHz9Ht67 zN284v2H|*0=B<@%KAZ#S6=~l>XNTum1cN0;H9;MV z_zsVLk_PNd7hu(DO$>f1gX$WqmV4NfyfuddTI$9s@#D7&maR0ZOsp|wrepkfHw}EF zW(kSgj-LjiwhCi5wlqcurU^xwENahO1ChGauGiK~@4wYO{y;@kKB4)j<@;>UI?Xdd z?Mr`!e!Bwge`f)XZ{awqUmKr0tqxxA{S)%1=lmhf>9Ps)Ky)b*rcx{(_7BV{Un8g| ze8#4i-mUy{4AR-t1Z$s&G>mK$i89JrGPB|9y--#6 z=zp}-JW4fmB>W_SO(|@sd5v{sD2P+~;Jf0Q9>G#dLlVg zywtx&$w@@2f1^GWj-N!fBi}NQ5a5T+&6!}SkMQjdJ#Xr(B-gZyzydVG7}pZR{|cep zj@j2E_nv3!-AlUw?=Ur)I!(}tB%>1W$no{e&!++e{z-5?JUpE5&qM)X_M{*{^K!b% z00R#6VD+{~GT>~tN4$X5g#bEO{3BHcQgB15ykbdgW-n1tvOu#7bSp7E&$fo;>dZBO zKhPII;_kR)FJB<*2f8dkbbooW9mK>C(m|s*l#ufe@ScuAzjDEWJC0E+R~HiQ>+Aa( z6H^D34Ul}eddHIwfUbP_{5g7g`6u8G`mXoDlY4eNsigG=^o%VnnLx+*Q!PE{!vWn7 z41jOIyZRv|#rW4jAh6Shl{0E5>4VXm<;%ZaVcaV5NzD1$FKU~i(HRo76(?7EiBgN5 z5RY$`tej^BHDp@$;(p^M85hrwQJWl`q$!V!UnKRIC>5loZy@P9>7G3M+Cq5cv`pqp zfX`kN^{;@m{7&zH#yJ50c2Ag2XAiWFKwo9sOxh-S#B^6e3lY_sFQ2rwY zsxnH~@RtCK4zjoC52*YO-i)-J7WXULN550g+x6!UyWgawPp(p3qB@tbkC2pg#kn7g zNpZAc7s7lWOth!p7+x2!MGNQoSZg1TeQ>l3V7oJcuruY*6caxa`6SfB4ypt44`<6j z%q`GNr_*SRPIJErQmrwrFdY2=dRIfh?d$~&Oyvq4q0$S3p=1%z#yPpT&^ul4-Jh*N z@dOh57}zswTU!udBRL(^K%e}?E`PK&u%|gvX?&pXvDSLPVYmAh^fPin8xa+c$8EkyQ0&k~_GQaOY*L=VJB0z@kb9ZqFg z;{tobKkZSj3}`<0bH}@>w;hIQuUC>yoY-wbT3JyP)z3DCrtgXNlTZ@atMPx%x}n9W zK5)-(YA3sDFC*H9G@hE@Iqv$kN<9V|S0`4+Jhn*30GxBnzHf|@4aN>w#cSJrF0wPz zJf9EWrFz9{8?2ooDcmB;Fj2BS5Ie4PZQNzFsOIcz(M5(o)fsU~y20c&{hKytdF)L| zUdImWtTfiLzqNKz=_@^B`)sSCaOK!OEm9>i6WoLKR%fYyl2FM9sfQdteMqcV5dPIf z0L`$GpbFt3k-v2lF64Yrl|Ebt8X`zt@_lf0sm4vTp%~2p*^$_rThOoP=gsfoN|)ER zsb~_y9hYT4m!!%q(0y&8o$On|QU+SzTJ`qO==fhll8I#GZEN)9=iqn<=|UP+z6O2( zO(Vx&X~jG}Cs#>+H;G=v_MuLE+g^KDS69+C#tLwtr3N;flPe`0^lLG(vF(O$4i67q zfr|KSHX99`zqR%CZ=#~ezzIzUTiJ8L%u-WS3&u1sz|o6zhoeORHO#~Ij!sT80Mg)q z7VQRYxX@?9Kvu#L*QWrni|-N(1d)zKS`*?IAkFZYe!>C5-+bJO;_L6fsS322J#aJv zYig!)T7jANBuy_5=GON1Ow{@5=;(&mK75l232f>pyVb>;Xvfux%OrZYs&CM7gH#t&4*DH%tiwr#My#okx@uA68# z!gZS$LP*LE13psR&sTn47+!oC(h}R~!WO=!WcHF~M8dHx7EKS(@X{2_8z0TCBnZ3L z05Sp@5Rt*THbVWI+#iePJ03^-y!hb9)9v6VV!!Q@2X z6%l4ud-dC^nlb;htGcPny7XjP3K}tsPFqAb z=ez_fc_Y;m#__NDmHD$5?uD|TcoRc<3*TCNhPl9pxqNPAm>SrY@rffK z|73L_!lz+JXn;Z#RQt)c@Z@coI>C9lZ8}?a7VPwH@OVUrE?N17hX-vzy?0JRVO=ijJb;H~>7{cI7S&f= zPLsDTS|(S8A5E(j;B_eZ^FGT`hdEog`rO zT7Mir6Lak4Y55w2kJX5H`MaqO*ON~BTSIy6=7GA#^<1v(dNKB!yT9?L=bAD{lmkCzvFPy;YG00u`wKucs1 zX2+;r-wxc|0x-=4OxPzs8CI-oVBoNH0LFrnfqNQWx9Hr@@oi&BlW=r&6j(saptHlw z!oq2}{H4_MiQk;>K_g&G9wa$$LD4bj@P6Uva_n0#L0#{Tp6M{(c#j}5I$PN&n9)u7 zGx3q1x+p?ZM{2^3!nQbALk0t(SCCZ-2?xEf>!i&i_xv)*wepU<6|F4kaRo8L6Kw|ZDYh?sTWPw@xD?&4ZCSS6ELS}_YaL&3~4$~Txe2GS9+Qx6pk<3yL4QFk$XqKnF{P&Y8>eKoYDx3Eiagi%UxA;E$t zw&=9;nqRN2om&*;1k+xm4!2in8FtmB{t}Q@b1-C;B*?A2MKa5u>{XDiNWgKAE_+6Zq{N}- z1YhuPzut(WkcvKk#qrleRbQV1*oOL`?mId)*G5NNCYJ} zcM{+qSeTfdpoOtBnuP@BxUvCCz}yai0m-73+SJLUocMH<2B?K^hebOjTlfR zP8S@OWB|ckI|Fl)*QfoA!2E&q^z?8fZ`1-pI|8sI)oBwS4iW9$H_+G9;|O+4H*N7k zfsW^I8=oy>qaoHi%c3xw2fU+>tJaH^T$aNyZi9anIXgKhCY;ef@0#*Y|JPnZ`OWiZ zdI#HzMe(+F!~K35&kr4u5S%yT68|xBrr@85x#*kn^*_mYwtDdlNj(!=eFW2_=aVi* zc38-n9dnkJ&x#4q?xAlTRY&HcWnEiNP{$wmx==ER#dk4Jk7+OQ*|lZZ&1&Q9wFLi( zMX-H)-)75Y$c$;)-(FbvO57zV=01+eN%#Z5Isfn_3p9S7)mL@n=srPQd~mgjmGa6? zi!L=hc3?*h3KQsnf1l8{3kdUW1fE-eyR!K$#!G7Moz1o~jJw>z}f;dpVPpI``R_yT$$) zw=S=hHWCW9Pbs8mw8!>h9&`f|HH+p*Yj}^)SLWFUEcMzi{8?dLmGT|AXNd1%Vsw{b_Z8wxw*FTH_|Z1yzXr~{s*!2rj_r8_ z)E+gZvf_;_wJB}2P#;Q^J@zpLarO;l<9_EsL&$bXuC9ej_`Wa2_$`rb%lyhQ(q844 ztC^)Hp3G2XH>(d-Yu61RdQ5v<+(ipk5U&2Ywm&IP(=>Ogh{CUmgiR+OF8M=mKMgYN zx=Z73&gMEx%Aw_9^T1DXo2dx=XU(l|nOA>^fz@^L_%|c|W9)ud$NYrvn-JKkk^${} z*%4H7NXK4qswBZRnZ{5E(IVho_y)<{u=9oH^1E~mjr$66+jw*Z?yZL1jU@c1U}6){ z3%00_w8qA1%?~>`x+X{v)=eLjr&@ZQ7h_Q|vaVsz{%#lvQi5OklOK4ZPE9%d5_cLT zjNAWu$0WxtI8`}L@>@s>Vg9@<+Fg*@>Nx#tM#+Nc)ul_kn_mtss`g+MWDZZ_Ia#Za0`ddrSVab5Bs^M3{NN8h;he+ zJAUU)*GjjlRBg?XNq_M|dtKzE%CTAYJvugq9uw6AmvbcOCXk+rA_IHWb*>803>nXK zesj%qQ0m^e;^Z%O&x2EFoc_V8pESnz8Z@fr48yO1I(b+3tk{{)skB;MgAQ9rA09^u z-jY1N5ir~9AF#j2t_r!Ox_7Cq9if*_LLSXawV%8ur8IClMfHiRGo%y{Lo%lH7=$d! z#p$ljMy+pSXU*c6NtNIBiTZRekX~b+YuK3>|HCs=nH*Y{AzC=PFQ_u0@_@RB=?(cL zRB{GC*j&nX&3QSfD(1SAZwaGtvAGtfd~ryCIm*cn#0__oa=s zJ4*6WAvXC7%i{)Y`}sy}$bEfUXp$;M41v|`F-sj>NvjkoI^+}HTaynApDvEZlUtL$ z(~{7}UF6#wrZR*ro0ctnL>XLhiF;vcLr*&f1cTI4BVV^gpDyWN-T#cWKahapfGVtG zQ4Y0vH5HT>czM))^-{Q5FzG}F>hD+ehJbzhjI#LlOVYdZF_~>1h-3GKv^Zuz`ri}d zs9;BKT&pmzX1w!qQ>MQM0p=*x1xiPo>N4CxnvZWNn%Z)zAd<5fS3yfd3CzH&Z_z)> ztu4bksx2H~X?;;Y=4$Gv*j#>D^!mJW?MN(VQ@ViM^cwT4v*CCVpXXw_;}MLEj`wxH zi3AOErrpLJK*G8Xt-4)|>YJstx?8IScMlwUuWm+xrZS^CEEu;8Q)mspfPP3|pL)pe_j?ZBWyORPa|*8$6r$PXDS~>Gtl=vU*gJN=`%UpWexJ z@zUI1)+}TMBrPkwb?meUijHqk*w>D(9u(89&ZfjwoDK}6h0S*7D@h{=?eS29?I2H^ z#gg0{EiR;FlG`GmvQ-#MfBtm{^y!@P6!f^Jj(-iLLGbE!Iob|FwC7Pe9;bGCd%Z&m zqWi>0a(AW@<{Nh&?ic@_heIT+%gB7GaGqxG10UP1}v08S;w5Ys< zpky$7WYgxQ(s8!^v^`EQP?qu zI?Gx*3O?8-$fc{;rc!}WgT+lH8Ez!~c6|luIr@|?KrBRUF^vAUL8d8X36XQ>r>f}Q zsC-Kvj&Z$WBo-c$1^s&bIO2kLcr*0pvwQcZC#B+4Q4V&K7wv4;L=F{pFPBtVGfe%Mgi9Y{U&c5DGoBMI|aN;2Qtq~Ev#fH{uTrivOcdEfBo0p zho6(n##C&_P?_;y<%L0vce41w78@m&HFI@3!`2i%H<4^<`{1;PUL5nS(^cw?Na+ebzlS zD(+9gvhm==VMcY)=E+sZcYDNN9|fFciM1@2lOq4}K)#h;X>P9Oh$-Jii^~?E?u!V; zMInCS^-PHP2oyspyzI3m*y?Gi2>xT|94`JlzDwzH8kET}tbMz<6J2^LQn*0`D0h zggdiA^#uu2bKS-AD+f_(*at0(+yusGh)Nz`lDhRhY(J<;Y3c1Hm?YQdYh)13Mc%Er{9-f!E!Of64dw)vU;Zjg}FFlSKuE`2&0BOj}KXnE;Bh z@%gB8TeJmSPH~7WZZLTr&B&QUJT$e(MKdGzHo&!N2bBo93Rk#OrDQW;=o>B7O|^lT z)APAub=^~cx#XtAzOeIs9%u!%nSV$aQfcjirPYxjKJ81khHRP?YJEvoGj{eZ#z^9$=3nw0tfcrS30)Di0}i5M&Mbzbkr}4McX0a7 zNME3`SywLiSq1XvFE9{K`D5#UMm-BD6Qn`XA+uiZ1etri;IL9rH&*<6EgQ zxy4vO3n}O|<10~L$KN_vTDbs6I;}rnB8FV5|NLf$Pz!+yu0=|#Ci(pvnD8}XRLEPX zK4`u@iT_{V&f&{e^Q?n?u*oJ~^@oFY7-tVDLehph~B z7#Dz}$rzrE!qnz-`EY)Vq8@ByQKklF15Vo7B+G#<)0|9aM6&7?B4MO%Cbp&{hm1Qn zRzDlv)17qOP ztBO$xM+Nu<>p!@ybJ63`H_T(FVpte>&v2q>87UvZQM5Zxg%zRIslThSAbMb;1saOT zHa?VF=XM-sqrV9$4`7-_UmX6^3^xqQ%fiOiFe4^5-L_etE%4|Hz4Ec7hCff3wMIcY ze(6^^S$#xLR9%L1S=Z_?a6e7d;Z41a6t6C!4cImwmQ`5$bM92ytpJZfeLoiNSXtF1 z3`YN~HWRPkH3aep@EMG}mQxj373}f#mmfjrc17DlKvW-nFI*!xvW?b)t?1hzPi{NT z*MFZYR-Xm$eXallX!0dDMUmc7dlhz|^6Q;pCIFFl)4>fu=)V;fw5IW z;Vj3aEX*Eh7*@gGLLA#xtf62AWA(K~Sl0i}#&5&ip6&6f;4~&DhQD+_=ZNF0&gQJM@L&1Yxo1D$ra;&o17m3=Nk6B2>w}E>3OGxE& z%{WI)68V4&S9Gax~OS%hLIiA1M zwhQ(Kj7TTi=xJ)%dg44g1gg1sWKZ_(-=Gk7d?p7*T#NNX(iMHRSv+M&E>+o4=9u3} zD~>&7U8}3awp5m7g_AhQyI0HbeTWDjzkR*);&wJ59Ca>&i>{pPM2(O(Q`L;vUEWrs z>Via%A*Y0^*(HatGd~%B8$H-Y1nRlR&XgO!jH)DvRG;TTjRaC%n0%YFUl7+G!T~(( zT))V>-rE);EE&L`9f$y8sJHJ^`A?4@O~W5^8~>ekg+ih7~Tc*=ZsH% zN-)2vt*_q$rtDt5dPT~|mkJ)Gu|8Q90#AhsSX(oL$@gOA3h(yz_7Af~c~bDR+GXCF3Wzqe=7>AD_d=&|VDDBSO>;}nb5rWD2j4V(I=zB;h+ zmU%V&^du*Pa6~Cn>5>XH{-=Is*W667Xepb#^({=XV{$X2%enFb5sW|J2c!@NNqnU) zuVTs)5#px!i;*2&!<(ZySBl;1BJW~gQlqfjjBZB3bF=sMOUC`;C%pD=`r=f90mQdp zC`Ie2Qhw<*mur6rYYT~)Ahps@Nj0;0fi(VVm4sl@oa)T@HN}+oV2;Wjn2CjEaKz{l z964tQ^L&>!kDSBPF_0YvNRkzicgXrTdr+D0xAr>+9^qId78FF*;`U|# z)@jxCLzRTsm`wvzt^dvfbY(8!UTN=fmQomCZm#_!7^E4!vdW`j`~YUq9Jio?K>vId zBVo~RnQhEZvc&*%Y)=oiJlGVMl)OSgA!BBKYGLD%4FCJb^svgPR!gVXZ;U--W@h&4 z<;(XB42O>jV1i^03{tRI4T>1waExtkTFg3LHVPg!xu-Sq?MF!dI=zweb>tdpFSE=o zy>lHDiT1{reIRjx5fLjS)LGqeMViBI{b(Czeg}d4ZU7dWRQEw|44&H_!utwV*b#VJsh?h#OBx< z;B4dck8jv=a+hLqv}2CVZkdSlX)?eQE1MORd^gZ!dvFp`hG!m~l$SQjzP!b*y*3vY z>z0DH*x@*7r*8_?@2Lz)6Njt2@#Vil>t|?ullDjGZpJh`b&!;gJm~^ySXTLNZVQ7i zwY2)gKI`JBGXn7)Yp|;c*0n9ju9XX@04-afn+P!MLc- z=E7Beh%P^YoH0C7M|zvmz%}&xYlAP96#N{iRDt89;x;s^saCYu8|L3gM_pn6S6o- zGX6pP#hUmY-i|7U$-kGo&%==7+Q+2q?AKQoOYoLjI8hvwuME3nDBLPiq&=i^FD<&v z4wB?U(zEiX`-J&PHd<-|3bddJjFb-D&em-${eRwlc@`kgjmxO{k%x!S6Fk~o37%NS zyd57Kdm7kgY`bHI1CLsWr}8BH{re3J1^Z;__FCKm%lqkx2=MTueiH6`9fYGh$G|h( z(*y@KF>#blJdJtY`%=f|-e0SOGDMt?Tv2&A`}!MEE#^0Jy(vLk%ET^z@(`;#j-WOg zyZs%VLFHGZS)B9jN@lkqrX50A0{Y)8-+Wg$`*$8@Y3cjS!*8>${SP%$QQN0KWIEmz z9^G=^`2&#@rRy?Ch%)36#=J9iFvl72GK59hgopL9(*6u)65&{%hKLgGX|@>wwxr@4LxMQ#L{nouJl!YZ7t15xX{FXTNOW;AiPkryo=G z`4F3jznwa+~$cjho&XvO<*E3MSLP&Vp$Zrq$GFiaT+ zX?V_2-G{fZIoE6W<3QT$K8?n6##Bf8ha95cXFbS?jcfA>-Z&I;cG9C8dbmGK)}l6= zIXZapxtE?w=Q4K9wVpI+U|s+fBjq1ThtX#dWq2zw?IZR4rOub`Uyo=fEuoXWUO~L7 zOca&)V)o>pY`SD}2WCW?au0bp-~XQD{#>(Hom(U#-oKLISFtb7Gfb4h*X-_qvw-@g51OL78(&EO9P zJWB^phU_o3rGZCHj@uD1p8oQJx<=Teh=G9t_+toEPtW`F@RU|{r2fFJd;EoHQ%la= z>S8lXlxA}7TfY$jb=A}`5auIXN@#khN`Pz~=&X=9&s$0buIkWwsJ#)E!%!l=f26gk zXaD5w$I6Ho#wm5=H<~t=%P4I1hR*5vpW3b#t*=$0sBCJm0AY3lsM^EFkEXs9q&=1g zUS;oS?bN-Ze6CM_ge_I6mL&K6BY!YuqgHXUHP%eSLEF>39p{RAh4_fT*EFd-Yt^n* z_GqWhgK>7m@-?girCouacEdrOA~DP*?ZQG8&vKbJxVEmH^t8i-pOGDFGu%Jghe@BW zdROhbCRWg@BF4g={A4Q8CwpU^1uXwC8giz#kgKznq<&S9M>sM$Ic)@P?0!x~6UzOp zv(hdr?m_wSR=3MbjN{c15e?efXLdfZBWppy+*;&&W0}u1R6oB5cbjI;)ssmi;)?C& zO-hu=oFiq@+22qoqAl|&#Gc}0akeYDcOc7SuIFxe*}fXqa$d#i`Wn@3PPvw6lXm); z!qgFgi~nWUBCQ>tfiR11Bb%7>l1)6sR2LGzIz z+{nS9K7%WEUs7L67V&5iSMjo0PuQUTj3`TG-eI`2*qv#U*3elO+GNh(Y)WUwvF#Cd zD?2lv)b5*8eob_>LsIVW=J|rVhL@a*I0aT~+-7T>+Lw>EcxY-5x^yIa|A(``fa+>{ z;)h{C0YO4U0VPyYx=Tu=L=+TIT0lyX?gm9fNdf5+DG_Pu7U>e{1}W+8dggq;*Wdqn zp7pNvuJ>i#yT13Hv(MhMXJ*fy*)bny>ddE?F4z3Pop7TS&8^U68)Pmr;SqUgHAm3L zc~$9jHQ|*DVfM)?s|0RUS^wWst5we%YfpwRyHY=6pRXu$li-f9yT#8tHW69cRzN5$ z60>jR#b&UT;H$aqN@2I(Na-ws71&HSK!YSUOa2jt~7sN!*HOWoXT?&Hs)5WB%Z8_)CX*@AI&eSk7>ESX*?`6mVV>u!=GP8Vp3q^ zY`y-pk6N^q-TdlCPM3c8?`m0TiM;6s9fK5^#~(g?xPALJ23${qoGCVI6Ox+WpTFug zok{Ygp%oH}f=giyV@7_eQ4HmwgCR}{#j-wsN-YJZjqa>&xCaw2|E&Wfw9%Y)_7;C; z=h{0~C39v4#iutcvxM(Dd67H%>P(f!Gv1lfQx~c|C;S?RQp(yJN9_nAQt{X3*KYSq z4D57E6`a}HzxKS3bcfAL>(sc~<3e$j=~>amwAv<(esPcX^B>lDN5n{DcE24Rqzsj1 zc}vjspN2O?b9U>m*F|ZI1wIZtsn$(@r|~&pUU7LP*FHn-V@!NwbsyEUNyYPoi=}I; zu0JS9CeqWbcepTx0H|*;fN4D*f z&%$`lZ8;e9z3J#5XRYh5);_)F$Q4_{+Y^~Y?Y6gGbH(^u@LZ?=E|0s8XzolC3qE(y3EW5B>UE`pe~0*ZZotgz8Fa z9WB0zdIncvyIYyso$;kN5(+e&D{g%|-af`6z3^gd=I^a~^&CnAa}ce+tjJ0p%SpV5Ag*504myD?5BYa7Lfe-CpKojjLGh!UGW=aYB0wXjc=%H)aO za%8G{n*Gf!pFGU9@SDwPc8YP=Iid^)NNe!eRIA$KxJA3g0+MdrkK#(|*?~K>j}M6L zNwi#78|lE(ukWXDL>bIUD&lW>?7Z!mnbtFIC^@&X*J(@>TOMYlV#!>ns-wX+$FT+@ zNLG4}r0d(sw(hUW_jyZy&ATTCa12tUmaqo)eYy{c*8Ir>EeRr48YR!&O*FvmgAiMqC zb_TBhn6DQQmsK-d7f#$@|7?kAcrQ`LbH$sjCRb|8@75Yp46WZ2wrJ3w%#D7mOemkP0 zQh!>ecBe&Oed{`(1#YtVqwDnj?Pdb&TQ#?7;$xeG9o?EfnTP3hoO80e{LQ`nd5qAs zt}JoXJ!Mrh29e?qZ6jFuoA}Kw?DVX~HG46;%Yl9(AS5B57xC#YK z%<-ke7uVOn)k#%q{jsLLc46RiRf74|u7U@29mca8?ju*K#vNmV1!)Xs-zM)l_v%~S zRe5V?b}!v{+tLX;qT&bb$#I>&_@G_DEa5`_->w8Zi>E=vqS6+jSVSZtrX8k*8!h?A zCFQp^B)o@yj9FfYrq5~)3*sgMvezP}@z{NG%N`YO)7^1i+#p8Uc@D zDC%mvH3Am<0wUkysC3F$3f32GQ8Uphx3dR}$=m%9KH>gMx4kIzW7e@&iI@1K_DW0bhD7eiRyqz zTkMA6**8J2j7I|>re7zY4dyTFRMN%?#A2;VTWG5#!Koy&mAfh_*U7}06~#ls6E^uO zynktcQ<#G3sE!l;zWy}Spd zy_z7*G663VuQbC)-&$EiOhCo=8I;80lZsSwvTDY1SFNrOlV1hi&qcHZd()N$x zy278}E~~#232)xYBk8fu{;7250pl7TKTYJJy_-2(Qn9Uo6Te_uZVDAfs{q9@Hs9(a z=WSIJbDTS|!xM9!k^@5O5`;mPuSu>+6VgK2jNvjYlI<+M`tE-qP}0+-z-;`ubJVWS8>Rt?*y&B+fRLbhC|x zOY4~69u#lR?tDnwvYZQy^{0!lC3iB`Cm0S+G`Z`WcC`8~i0@0fXYw-%e4!L=xNHs= zCy+CU_`+YAGkfo8YF^FG&h8gi|5`D>7-S{D8XU}I*HC-b%%kyn%N3r_cXFmy7JkZ2 ziP-K2=sNw$)N$LsyW1-Dq`>C9)1j}F-HWG*`bpxKT}Cr`M6_MP#4l{*2ksx|ZR$@| zV`OquRr!7U{m_Weg<5_|F5LhVB=0Q)R)-*1e z6m0CdDbrVCQ4W@9W^ic7Q^DN-QS!zE=9?_Wi(PzuOA_tGhf15H2`WM&G4owtZ-@jX zB3?vCWkY_8i|6&7G%u-^`tk8ZNYw#%cbA?>Sy*I`#f{Uesi_?u^HUWn$4d~s*9sK$ z58skFq%+CibF;-jHpw5yt!t6k>gr7T&QZ=-^?s&kRHeN}P=|a~x zaq1({y=R-&vW`1UJ=K1Es+(*4G&b%yRak_>6oGQB+<~{JO+9oT&Ez?ktCP5fPLgYc=FuQ4biaPNaxmyNfPqXQ`?Y8I$U034D6lic@im?dRCcyN{o}q@>Rp#MHwA z4QaepP`{jAN%oCiwXM2V*^;jO;Joh{v4Ya#+Sc`0K-9h>7ne~*OLyaHwR=lVNf`LaEY+uycEAP}2Oeh3e~wzy)x4?<(hS zQ1HHFC6x$H%SdMhsxz#lq)kNwCoY6&n(Nx&gqc{*J@X*Zz3-P|viSDC?zPT$qJzuZQ29oiWF?k!$*G zFO`H1`39CYB_^K53Z55Fu#e);;~ihRKb!P)C%%Z0_HqBjhqMZQmX8sEi(RD}?rla} ze?It;(@fOk?#it-c{=_cYxIUC(B2j@T4mPfXMNxGXuh24u@#q-9QYMktyVG zc{@kQeyRoAA%xSJSWIrV7$NLe zZ;s^VVJ^qy?nGCyxqSEiK}eFpf640I-7JGb&3!O2!EJ+*dZdC8;#~zjT=U>*=u6UdPE^-Bo}_(WvQ=a?ph@YwMNCPF--$?O)4k7K{t?#8Z*C^=aC! z;0b5UWFH?Mrz1|4#3-4x!L9RA%BQZ@M z#k2O-PC9z5s}7j77{lJ z6-YgYL7arsV{iKsX^3?LpF4g<~?Mx91Cv>JXD@ zAK}VAmSX*nAMYPJg}>meF4$1VY@R3IAJ(}cbW8i7i+VeWn>MBYTF#+8(!+N>s`x_E zDGkGsakXp29=S)BZ?QB=ld~`$J516JPJqikPVGL)9u-i+IM{ zJ6F_C9l!Kb;GC|`=)8X+Y$h7_qk%31*;sLSdSgX#?a!}?@r3Fr-lkTsvCfgLMaV%# zLXv@p=w~4RVdUAgtN^;7{b8Lne_t2N4Yo_SwwtEzr_JTYaOUS86Nm@jYLED-8^z7> zT3~FY-{U05MdJLRiT)x1(M-=wbz1oVs0isC2gU>})jRqe{$|SMu1_cWHQj%3p5sSf z^e-2TJz3SZmlsj3&Z1gXQ8k-cWu1zuoDnK6wES^s@FQbU`<28>kte>KM-AUJ`^hhJ z3Q6kDjit}(Z;$85PaM?F?b@w3+Jze@gIB#npIR_~mnD{T_0Ox%>MH}AWHR-_XA5bX z>ngR7=D{Rax+M^1;ChPGu9SYTC3AVP^%jw5Qx2;6&@iRwo_fS9QE#X5vcwHE;or{)n0RDb0Aw+11qb&Tn*s)i&%7xr7WO z*j&0oex55Daany`PW+#~@7!z{4CdaZ!g!^V?RNb6&HxYf5nYl@j-RUzWOk#a?Tv%@ zEQ1@|?2TWM!@^c9Vc_xpctgX^vV(T49#2YQ@Dxy(}4T}Zz$HUHQM>2iKgdI!X*8VkFTl>^) zjA=yYQsfrjitvRWwbXcTv|M9S6C@KG3frj?tcK)<>o4_( z?;B{|`}HZ4W~kKpOy5zSpl_!k$)82%E@5rf0W=MMHXVpZq$|dy!e>&o4u3YHJ7DicZ2ko5*ZaX%9F#5fgz;D56ZLC zA^hnob@tw1r#}BHmp|YXhaC9r&x2E(9OGa5s=~TnNVRv{)4`N}&1qBN z6D%J(}W+>Lj?V7sLO;ibgusKH- z;Qd;=;`i>I4Wg2n7goO}L{OYRRRYed>bjEYrA4u~U#pv}4VSg;IAGWBwdJZLUE0IK z-?Uu0DkAP(mpR#2E!jR$|W+bu=^u z5WD_3NqwlTeIP97`8ta?fP~cCS9NUhCUIEv*kwet?^ItPeLQ%tg|k2C7SZjWi_y90 z8Z<~yTQ||w&UGNa{Rq$q&=>*d4eUS4giTD1S?Q&&J9@?OsUO_rGd&O4g1<+igg|>) z0g&T-pz}+Xx8yzk3K7M% z(6^1Zj$KBL=DU}Zyppd%3O_ z!ucUs{B|6*k7REp%MdD`yB&|Sl-VEl;~A?yCUhM6`}v8PrC0h9T&xsbKPz#vM+eCU zq-AAo`gI(;;EHW)qv-C4?fp2R5L_B3FSx&lWOd|$)Y%x2B~l8qmsYlrmpYSo)FwUt zHFM*c;!}+4cR(P>pY~@Gs$OY3JGc@2JqdDb!Szq%co^)DKlu{{>A~PO7shQ!cB-&U zp3%3%@%ppdyOKl-gLk3Rg$~iP6;Fo(i{%?N-O);c|MPgO9)J$5GoFCaM?*dI9EmtH3b`z`z2JcU=m< zIt^D7jiC!|31YG0?gtZdbH0!kOK)p#ZF3Vz#D`pQhdd&X@_JEu4#`{xxzez%Gc&)3 zIBsBrK|6A3X^BNc&0H_Xbv431fFpg^M#u`^*JJPf&+q5$UaCLLtRmiWx0EQ`-AO$} z{Up2o_rrNv5>t+U5?ZW(R(iAxaWZ6dxJLySmtCFD$~ia)_>*%sw6-p8Y+}MKei>LQ zAEb zgl@y0Cz7)Q$;JtnykUjqKX_YZ-UPhoPjkEVb3J5HLru6?E9$E@s$GcbnWXqaKFjcU@jHK;a+FEeJqH2 zmqL%cV$QjX9gdkP8*zn$1(fTWd zAGe5Q_1BKf*{A*uHq1XLjsA`JRJ{eB%$Kyi-r+?G)~o-!!ohzlk zzOe7vS5+`Kgeum#bnNx#7&QJscjT)wQbeIe|E`}vjDO7iNWn(U-+07pKeo!vQFYK( zqf4}Da|z!bh-!_^w>ke#srkt%Axh$3S~`H z?Ga0D+NAiZ?q$FjC6OY7Lp@{aOSD({PCFc7($yU|!g`+z;#EIqyRVeO4nCdsf3lip z0T#VG*2rqux>U_EF`E4|8@##Ogt9US2c{QLZ{tU>nQ^Z_bIZ`-jv$Dmx_|c2X&{LU z)q@NUREgz;^Fj8X>Y9|xVJG|h?s`pcb1jh%+A`ZGfy1(tIYGOmL$JJx(;pfr?eCYh zB!91e>k^s^jKHO5y0}m7JjIb7(KYSw`e+6xYzpk3)si!k{lS|ar{rPW&)!l_UO+VR6zLsCa20&ej7JD*kN;W zV3ySQ4YfJWYfm*RyQ-5-&~Qw-Opl4n4^15{BuQ|UOxD%M`z|Y2p=j$d1HnuW;oD3a z5~Lrkqzrl|)@4u#l@a~vuZx(6^8{87-VyErxyjqNpa0rem=moc;Zdx~%84j-g*6R` zCAP=WTD*t^v)Rk41eeG^%y)JC4WI09c7hG=ruH{)0wb}43$31qugBqJc~FjaeHJV! z2^XDPDQ(ElU0z|^46Wv;*k-e>Mp4wX$_$R#%K~Z450iu0TjFZO=c>A$)U!S~Y$*@q z5j~r&-BGGpI@?}$Do|F(ib3=Z2F=8@<}8B&+dORZ)DBlJ9sj5Hh!dLKevxonOL@XA zaGsWtl+V;awCorYe{A5zVF*m{&d}^yH_wjA&HIqCpWN@p_HVfjO9?W)9613OL53c3b_I zQ13}f#ZeWpJ(^e|8mkp3Hv|LtiE*XJzVA9OS|$H0FkD-GK8o7RMi`7FWDT6ej7dN< zpday>wKXgl<3=sJ=b7KXAirDv@Dnfv`D>9uz1&dDC8H#iHJ`Teegqnh{3#P54Y#m$ zc{N9UenwZWl%mqu{OY!)V62V`>eH_hP}0UI;^vwKadwebJ2F8Bog@g|`r8 zE4t=`dOgqhCHP#)62RasYr(PSQZ}>{gQlGqaqIJuLlMl3 zVeK4O41|@OyqFj%pQ1@O#X*dnrXow62tH;A)L?9ZDW_udZx~y=gQ3dYe$m{mP~W>t zEP?>z2Vy$luZa+`#HkveYnIMck-&#$ps;poBpKoiK2;vq7mO^35)8xY_0s#1=P))^9$E6KY?SOj(h&R30HpC^h#sR9=~f4k@z8$B@IS5;AY2J$>k^1h zz>=-$f(2S~Io#N%YACLvDBFk)pl`Zg^uoLA^D|c0Fy3>-zy4W1YeXFcBW_M*63SUo z3CGs^9MS&35;b>tfKtN@XJAp2C0o~oDtU}u?jVDwIoDO1hvUZmD9afVg9z=Y;Ucz2 zd~I#Bab!TA;eT4h3MC*ZI;3Ah5~G5nc=z5tmOKewUQ$m_&osq&)6CG+vRr9zPc^RL6}dYwWx(+> zqAE?=C#be860m|yKY#i1jw!+EH6(*3B`0qLPw&0IKdW_{ZKqfxLLU4Qo4)Y zi`d6eTkN5&0&TSo%uqJ&fFT2*G=z}T2TKopL?q!o9C_0fXXM}ravXEU^*T8| z)9Sfm*>{s61?6|4}r`xAOHlzb}I!6!qZyizNr)t-bA+$1WX_osl16q>L?Y z{2wEGSqf8*aARlKlGlDg)wna^0YElD7*YpGa-j1(c7Gop9Utd72SM1!4GC*808L>4 z;B6SxbUYv-(&PN>1s1c;t5l1HdUJiO!Atu$1tM<+{TgEr{=y!Mv4-hZq4$aWQnBsi z5;iMSzMpW2M%(*u=gr==8qcpxx6VDEFr}>-z0XU`Tk6wr;lu0hb+ZAhZ_2v550X}0 z54$RhIY&K_pKGJ2|AU{5bNE7E@c=@FjjVO1nu&n{Vi~nR6p-^Jf=vU9LgG5)@d^MC zB{;)s1SebDAU*KXlG(=QrnRjtYr&)W|92d-``O|bI*#d8essf37)ReA$4RMkZo1&* z@~sAGf%I1koTOX?#&KeCn%UigcD6}spU%T>sm@r0*2AB$>EB~* z^RNVMa4*Yde)<135E4;if1EI*d@YidkD^dBt1Vbu5{?5dyNj_*hr+fJgjpm5KP&Ha zpu-EStF@1^kVMS@_z*DwLD2VBPe@$P2ic~p7K8zM>n9|)XGs>#2m;&Mcc~&Kywp|h zkMo~SSS>iRqq1sGV zd2w1)xEbc)*NtPh5itmKf|kHp^f$OpgBkw`j1oN86oKZ} z*Sv=c50nODKh{=F-*2#2v3-P|n-XBg?O|*dsnyi}TKiY6i^{3RiG4^ z6irDL`1G)0p`#*c3s`qU)e$gO!gtwdCP4*`xbPIk#`vS=_q*FDMm5QMf2U)(8qfip zq_h+)xc-Eyum8Rb+hBa#hoA=;x=R1}V2o&G_vOpCH#%CoRh}%1 z$vm;RVNm|H6zv=KP$9x7>vm0AqurCklC|x661dQnWpw7>8iBGk!V0^H=Tk3l3&KIn zXE4Yc>>mGPlm`0k&9yaLC5c(N#G2;Xl-bWH7AsGIzmYK?S|oK_&E~wHdOmA(?TrU7 z{?0yI{fZja(kvgLe=%%yS9sVi{0i(nR1C4s^RoV5??0g4&i9{C>O@~NPm-xCv$O2| zRmG1Air=YXfdcRkb|u#25v|WgNHfJvRW%t$wg2z)VC3_KE%5;{@*gV(wH+!#F%JS( zzWt+)7g4eIe*Tl?CZB@HOWk#i;oK|S=}5{e|0lrq>F29h(kT~zw(&>!R=P5dUu&H@ z^1*8OAM^AO>()J;QVW}`1qm0#gRr~Eu&(w@M)UdN+cOUKHPaD<7uUfJ3 z`(`!%rud(x*CAxcUo5`bt>SVYdsUCjtaw6Cz&iLJ4TOjirQKxVy6W#D;9b>e3qB^C zd8VNHeHMnCO(d$ZuP{y2_D)Qkt??|TX{L3w8$JR->mdNwEZhm*_fR?o|`x8XL(pjHs+!m2!+ z?|0xp??(SDx@0x*zSorD)~y)Gg^mHMsgOZ5Gh*xG%W^9sk3H5sM;~NW%rZ!6=J`D$ zxDS!GN{6s{0&UWHWM|NhnE;1Co)`h zaK{r%yVFe+Dae#En|m^$0XvEB{F5WzqUW9=xd*b#pK^0AczT{H4<8>L!R3!!DH&z< zOP7DNnAD7tA5=bk)*q=AG&QaF=VYaQNi`0oxbNIwob{zfu9h`5-315%DY%h8*_X&k}EWT_?wcC;oUi+rmjfIOTyH{HYi%27XlbIJT-R`K zZ4&XuAp;{;!{0#ggJ285RYZ;Hv9c`B(H1f6%huN2ThgrHeZM5iPKch^3&BW>;1Llq z3JDF28Qrzdo5jA#@#cnQ?dp@e%p64m$oxPsPJ)Q|8-`ru#d2_d3gFaV z=NQ?WFmD_KlnC!Gr+L*8z4rFr-of=_PJX<<8am`g)_4!%oi=gDKOLd-{)$>;8Y=4k`xByY~P%0e~O~coNIZ z_Fe4KD5CLGY&Q|fIb4Nh4wj;g@vQgKMPHO~C6q)i-9$@Z`{qP$*nD?7axoha&Du$o zL?r=b!}(-?4#D+pjjK{tRs9K1Ya#JE1daxPSO^#-;X<_e{W zMuKte+O;B!QGmB0fcuch&K2aQ{>_-9Vo@{eICX01;7 zX#2dJ0`jRkToHkc(+v;R+4)9bnNn>=iSTt@3Bs^Cn%rJu5cF7 zPfX3XeEPsBz=3wRQWlhJ^l`h>mFC7zPXysm4Yi2F8tfn0O#Z+I7%*JeBWi#xlxT5x zm-ln@OiYpL`p5xDKz_a0h@i*9NQ11Po!VT`GA7;Q29 z{t!}PB6!3Qepwv(JPAebYROo&bWTT{fAP~qC{(rKWn85MF%$3EX{~|W8_l!Zz0LtvGKg$3YjA;>WR!=X`X!+;?ELaP2ofI$N2yOEKR zd!mh6;-@m(YE(jt;boi*2VQ|atFOnp(MY|nPP?-VU3mZX> zgJh1?eooDoO(8`!1i$zpC6|kC$681|C+*4knH_K*TeQrAolXRs%k1G7otDh5-73Fs zbol!JAu!}#ao_^oK3Na;#8?Axu8nRb32xvL0-;uQw4s1xplt}E*7_q1AhT4$HjoVc zEJU&zxq6t6quZX#qplwk?A--VcH>8>@Sgi3?S)_CO2VmknF`hUo@uh@ zy{LDv(#*5F6``B_a7a&qdb#F)tYTuQ%XQ65`fSn%&v{vB`p&TS&Rn2>MgRLlJ56Z2 zMgA_0hv$g?@J&%AqH@>e%0rjqutV3;{uPgvPVMOCs!cI%*B-I<_VzP}FU>$Qi+x!N z%6L*`Wo4oCSy@^8eZ^JbkTQXIyK!2YPeNyZ^K!%&vy+qa!m2B+hg5{;F)-E?&Qf8k zL)Ofb#p)AQ&mFkI2-saSq=|ddG@#cb){WFKid}yX3`E6Rl+Az1$;irTheVt4FSAP- zVWFfM&|ni`%hecJBKe!-8>*_@2`!pH%5PXUQnIMr?xSx8e7$G|&PqOVZ7WHR>Z6Sj zn^tyVNCfSBY->xfBOcbHcrPu{zGo(3tm>%MF`<)^ysahFIh{@0NBV%L>ki!{Z5KV`Bi1#gVGa43LD)jp93bAMd5=9+bg$vk~0ulG7=N zr6b&@IRIU7IWk@wE}uL9dC!D%>dmzmlPGV@d6YV(f4UpSY2>J-c5-B%!}^5_1H&ui zLCvF22rMGt%E?65-u-=K$KzSgsZ+H(Jgg#UVB=xbJc8wig@Hk_&-RG@D600ODZ#qml=4c%W zs~4NQx=oV_RTZnJ!s)sBNTaW=P+>dGr$%nPS8{u%H`3SV(6a~aTag$&`aWwC8g0Tf z#l#@rQt7_G1;RYlk`VD%OnhQx4d2qmdL?75!n9>OQS(ZTnpbw&oiVz=NpJ73((ZKq zj#TOKat>$iP2m5+^@*D`auNq!pB3~{<`=Wfcr-nGYf@dSE5&l6`5DwI`?_+W!c#%4 zAgrUwYbTSZ>&MlH<<*pF_b9lB5|mtsiOskU!bX?(PlEc6JgbZ7`KRt-ljCAwaBbiT z;ahddkP}v%{FRf48MY|TsxhS?*{O1Seb4B9OXc`j2s5eaMz((^3!0Q0KnkEG<39D= z2ul<-ZKL3~l3x13=SbBd6)4eJt`-+QA4ZLgCMz~7QmtI&=8ZF_uM4?Kopo-503plT$DdXeg zg|~ZD!suaPk~#d|V6~@b$PD+$!C_dX5|ctsYi*s_e)G!Ch|}Qf&zYcwUs!+-R6lBaUMU? z^c76pOXjQH;SzIf9lJ%vhV#+gyyl_ZD)3Q|uh$vBz6$m$5)8gd!gRr_W(A_ypuK0$ z34MCruEb?HLkn>l%2`}1ZR{{CoBAMb6BVEs&@qtZv7p-oG1W}P3nl0GmmJ31$$|qh! z?O(%ymhZdsDXL+Onu-rXwV$05LM5cMPzk?8Seq!*xN{X9;FE^(rYW(M(IO4dqu-qf z`)?dmvm0ANHF0O5yQ#%YgS!Ra?+Y`T#~V=w029%V?d5)}b*PAKhSEn4>0N@3zQGgv zbW=|8;lASGQk?ZdN*w=7n_6syYn(_m`}6e8)s;f*KqPCbat4~tNT4%+H+t*7u-LGO zsqH-0A*x*h)CbW^v2G5b)5_>ZFTt~*%OrR{3qvsiiE?@MG`vnTxH6oD^fAO{?j51Bu)5%m~*h&H~r(Z1M{8 zS==L1jEiO1RAeu^im-$UzUGq%qt0lppKH6NAtl19HP z7@{98=3EC+>V07^!=QLcwCc<Pz$r*jT_8OY1)kxUI{-SbA3U?%wvAxn+gaHIwfs6n<}#3Phm<7cf4Y!|jP&>j8?bw*LG%(u67EDw6NyhhVy4`uJ4*qUf{w0ElTI$ERYx&^f| zN;n^6+-Ik5S+a#RP0Y|(a*)@Vx6KK2C%p1J_9ws5jk!Yw1THRzzsu(`TJ$MmAIUOc%3TIHQ3Y_=;-LKzKohOrLNG@Y0hAcQ6fXVa$C?kjCiL=v>NmEPCTIlv zDjML9i=Xay5R1^!_kIM_-JqZNauFcSOXt&KH=5{zQvi1mpqO=I zVq${I<|4E#nh6C*KEQ?*Ex@{V%874;&^N<=!UpH*2Iu~~l4lHnQ%*}=J9s!WG~~BO zVSqAdcUq$R3cF7zfo3T{No2Ts^(yRdBtnpE2$LW9zz=BL)@qYiUYMG;JXX^Qr!NKg zW85*_@6R8RaeQl|5!!6y<#k*cSprZ-Cy5%0-rP!t*0k>_J>!vW8me}}h}a{%lJ5bg ziHJ$OeP4|egh2p7yZdNY+o~3O|9527M|$UpSGces@M>wec=d}bknkJ4VL%tP{Oiw1 z2-4Vokrn>R3&gM2G_Y=UsdO>Be501wgKKxp)smxpDZi(;H#IHIc;XmBDemgy;c5gU z!)T;Lt)oc5Vk8Q{ff(f-0I3v$cOtp@#?<6w1L?kPe@&w88J~#kIFZR{`ewm|qns_V zg2I8%ipqss9(=zK|NM2&+o~3F(WR>73>qVsTfIVn6~yvLO)V1eK#2knFm&)JlUIa+ z1-Qmmc6N5t{<=rcjN76@=#h!ZW4Ygh-E%+EGg`1}Juv6M)&jOsMx3SsH^8beyOJ23+<2FS(-UJ8PVgcU7 zWPfAk1)%zFOaCVSL{IMjdH;==rpi%fvblnTG;wzzks#itNn%=x@7k-dbe%J7D^wuuSAxFeG2` zg!DcvOQ*{7J-3$!2Vn}jL_(f#D|&LyQafz%^f(pvoSoGF928OPDuo*1UP? zz)<@^ml4u27wDKYFn6QOIeoTAekyD>J*?Y|3+4|lDya2tnd`wuGP$NLG#R>)l(M~V z7(^%wAHMbWI)l^fxnj|oWTOjhx_2_p7o#ZUA^}QY42;1%VBPWUR9v#2Bo8E3cc%u^ zJ4OGns;4tPW>AHR22zTJKR~LYK$v^B(*1b9Hi!kqBvo9<&HRR;TPl4 zTU0#Wh|noOP8XI$oP>jhf;@lw)S^vq_gUHNIjKQR`;(Y@>wdFGBI^`vXmeWULMTGOc_}P`O-$S zAMt>Db3IH`N4LZ z!c8rR;sH8PHC|SH_!X*xatHeKxCprFZYBwD3}A>W-Wd+@GPt)p zW_WP)IulXAa-jO+&@EO4RmOoSLVL20v_|51?pmxIi@!_vfqXr+`deZ+iF8cAUhoyS& zVbI>zhpxOHDsPbwy)u8-l#y{N{gO5b2yuV`DL&R4Pai#0UCrZ~tjbbCizp95<(I+& z9`C7-2@WXV^U{6f(em9I?$UIzXwiZRQy{8A03`FNsH5d#INqqa00ju}W)$%9S8HrQ zAC-jy<9Wn_`T;LUdxi$%hpdm8?F*x75t(wKzqFZEVoC2AHwi;Lkk_C-XAAAac|A9h zT52v{hDUc$j~pXZ1d|GQauJ>X^bo`-nH4`OY~ZG5q=y5_csmKzzgw-sl|{)sC{=_~ zK`i+I`^@&VLI@)CQkd>N=Z^u5TAWoayA9sh)L&LJ?9bLYvwFOcb7G22tHqVe=5-ny zU;4%#cPv@svYtUS9jW~B=i*{P!BIOE8t3T{K7RfgSPxZDQR#lPi>#@XPQInwfR$K7 zSd7_kk`w1y8%sQDPMq#eJlV2}6f?Eo-AbLZQZkOX75clx@Xr={P;?{JCMDef;$TTj zlJS1j2f#$0uEo73x{v^wUbXFeAIpcb!U>=+y8y}@jhx$g*`BQ7swOPYBZeE=%o}EX z!UC+k)NE<=_}T8tsPN*q0-OfG8|;L1u)$?>ug`NdMOJO4(l?(W7X1ME%n+33I?Xxt zE|kRk%Exh%K|FM~&AYIS`h0Fk1P9E_m z%|^@Y&#{OJQ&Us>^cQ9o3d;ZPuM%12FLZW0yYu2^z?-q)$0(O3;skLkl6O&Vg_yS_ zIDZQZ3;6(@_-Y-{r-QZw8jf z&+7dy4;Gng0T^Ebprn}ra9aOU*X2jMe1d}MIXRs2`H({3n^tl5JLSeVC_RQ@&GrZx z{+Nh-ii!jr$K4&XOJ`$XX^9@NvD{%fIR-9c%t5lLd&=@+kb5vr#-Gdx&b^mz_i5o) zov>RvD~xStJI=h+J=C1-Oiax2*v^!3|M(~=zxlbRDN!+_iZL0=!;dm~fA6vefV#ng zxqVL|KtH-q07On^0bsJJ9SvD}Y{p7y4y$*=(8-u8Q&|xHct2VP)# zrg{8K+!NPap4?`b(QZ$|fWiWU5v{ZEg@93nkqpM&b10@bD&{^fG!k|tHOTN!hJ5=T zT9-GWcpS>Us2uLbRzlNUcfcqL6*}58$*1kC6rY0WtT_jv=TuLhMszLMkWoCCr7-B; zc%2DSSp>^_r<6UKZ2}%q;Grr*>~6y~3trT`@&+PYYtvPhbOEs|{q8jch*LxYu0vx+ zVSz|NBlS0eHq@si=ucaPUjkQdBP`&eMbUxSlxJWq5NlZT4&14!o-vJ%3OulzSCFZM z4uY^Cm6s-tiaGAi#6g4gWZ)azhH#szxTwQswucH&2(1<97JxC27w(!eF_5NRLL`X+ zGm-WD%Dh73WWxPU%mT#ekcC-`%6BK*CUm-|>I}*@Qb+=QR72%uVf5)+MJU^dE(MxL z5F{_QjG7Mut^=cqvgELWz^0;XKl+Jz{3vrOC%^A7y08D$)dI$!d3%>Z#TXSeV#p!= zMf?^MZMlE_Mfm;;lxRPU=rMZE37|mQFxQ!h6VA&-TTH#52$P?$vY9+JH!IF>aI^7{3O+)3o{W-u+deNaf*Ag8a)UxqWIpePl zd@qDZ>C4D4;sQk{{Kp1;C>a3CDjgamu`e(>{Nv0`RBZ@nV8%haHPRqnZqrq^xve2OOkXi%PLqY@ zBdTN=M{QY%!bGDKrsmOq9g7yrri&I5%gA6yCVh+(T%<6lf^iDn|5e_VMm2e*;iz+@ zqiC#c70`;-0E#Rb5h8;j+E$&&s$vz{w4kwKWeKtch!&4btJD${N`X)|5kU|UA(9Z# zx&R@mAc^cKt1S5-Ktjki?~U!BnRDix`8^zdKps6z_KhAyrr9}@izgk=prBa)0fgPa-^Cq%j(f~LL@C6z-EeD5C;f&5R-fKB zd~eTTFk(ZLnvsS2Rh0q|#q(d`dFzuAeJZnWJsQHpvhd+#%IZ+P7a_g{D>Hf!l}3-9 zOfj>3Z=^fX)gdlt9B#{a1p-gkt@@i!fXZ5bQZ9$QV5(FYd{yQ(wj;_(uZ(m6yBSI%kU8u-mYL%tFobAnN*3F+B^-b}s zjaSx{iHQtxv%(uMYM4{>qgrFh+C)UIy{x=i*ZDR0GqvsoM;| zPQu%d95thsab-=H6#Rwu@?p?EBIaQ`eOQ245x{Di?=i2OZHEzWjnD9WsK06fx+-8E zBc+u>6C+U$g;E=ZIghcI+th3fSZGaW5%9aFc0=pdPw^`Gr)?717zze@R{&Kkn!ffm zez?SuvRY}A0cF0Sn}P8-6Ze}T?54w@1{OA~Bb^sKONJopL7wxnshfYByJfBbU=~tQ zc4X&0tzepJBYZr#O}DL4XClravp#;wu>6lOYh29hgI(rdnuS7LcqFf^%*H87y50zb zV`L$H*xy4R&Y-On^pB2?zF5nje+rcFW_Y~=XxoDU3`oGZZ$&85(+84CD`Pc@@1Vm` zh)_#Jy3Iq=ApOPOAJ*su#1bip8@1mRaM1NNo&!|LD1L8(JqWv z!Y;z40eojXdDfE^1oWOaH#%&M_3;~IhI~-$6XM1K26r1%JHlkcR{TMbWO(xv;!n5S zN2Nb5KM69AAmG3tw6`CWoabxgmn5)px)4Hpz;v+Wt`CWrpWm50YqldlLl3?{mei{I z6MO+Yh(^4zpi-%>+Q8bz^#>h8YaHiAUSx0tGFodin6dG3`bc)v8b9IB;X=oeLG_GC zBpYhpKRDfx-w4yY9WXPlcCOsCM%>!UDk-n~J6arYs&FI)3o{y2jy7r^4z1_7q1 zL}U)31Jo)uyxCT?QhTt3C_sdkmqpCRRQ2*<1vLYi=`__%a&TDV`);(ot1FYleX}B_ ztE)HdH@c{QRj8+ZEQ3LBb0ZC#Q zkw_alIyu<_7Kp%F1D9XmH*sB|dl&G1d((HER)38lo-qid4KZWIMKTS>q*z;9E9ak6{39~*g-7dT zHwD>Fq_ut#2VWKvm;DuH6!|g^eWX4b>d4}fl5O|$Du#yqMetwH!|ew^^Rlut=NvV& zZK!qIoa!Gp{S+=y4RiV^5pmmo%)nQ?g$xcbn2b{k-*lx zk)7o!Ch$4NcBcoMV2i-0x|!?;RaJ#WMHb&PF~g*E1|K|vAd_SZJUkrQDZtc1ELxgW zA9$Udmb4*u%Saa89fc!gB+7is!e$XU@$>pU?dV8y5cH%?c2yVE);b|Jyh`Hl6X6~B z82W}0O?*jm-Fad3V38|vw(OVhdeQ)I388+u$BQTLJlFYPEA>7EU9M&_#I`1{Vqhqs zEOqdb!G@^`dI=G^qk-56faoAf|ci8f#-q`}} zZMsx``NUM*xKCe`Pp>R33(}G$!S{85Bv_{V#}?C=|KuIVuv*Mnq+P`T-NSvbwpqFS z7Sx>RGsNWqIiT;b0qF$nW9QO4b%iu&m!otwch5Q&!O3qbnbY1WDQoKGG1`Ct0tY>U z!E_loY(6#hZO-y%Ofnw1LS^9AwpHza#S#>U9=xBnOUT*?@0>M}sNMX}}09%2;~7Mh7Dx;Wta!gU&j1r=#=hFU@r=(jv_*wCvqoY|*7 z%|e({1k(DrW;$3SBq*J)9wIcpd6LxD(cvvHSd808fTaH7rnyHFo=sGD*#6!HaW%3^ z(DR8L+c!Eegh)9eC8Yz1NC_w%#sJdN9fH73cemckNOwqCfHX+=+``atOE)7a z-7xfCgU|Cm&-=Y!?fw0+_aEEi;JBPCR-EfRe`l=UdiPvKj{NG)s}Kl;T>cqC9Rh&{ zLm>D=SBSu!fba7u;6mW~L|*d>_~&=!B@+B5aeAib3W1Pb$Nk~;PX(NUo1$(qx^5bd zmTsOeTr42=FWj8$9Np}!&2D*ExVTz7I@}WEz0Z4}`_>6NxN<(+J(m4e;gw7tUi@&)O#>60sh$S02oUVWo_c`K4EjEjO7k3iM} z7X0pd41wB9Dnc28GtpO}0?4mx6Mz3HlNi*la^s3=5W$Vx!35E1Gb~Hm>PER6MGAR7 z^8V6&8mgXC`wi|J4em3wOKA+8>}>L}Z1ROa?HibaaR1F+N+wVMchpJwN#encFEnP? z@8VwJX<>T~F0uMZ`46}kE3U;oDs+RJc_2>9tLAjaWLl8OY7w|`IPwNe&Yj;r6v=Y% z(r-l^Zst=;8J_oITq^MNz%m@hUy9}CIQU+hBZp}6D;nHsHTD}GNWG?>Iij(^1Y3vw zm|ol8%SMWP7)@#&5;e(`X34t{Akoa!_L!ZljN8^hw5$;oSe?w0I`&W-X_JZ)j*^Nn zVIMdtZ@wK+L)huZFIV5p;WIJ}{1mr6Q(S-R9&t@X{cs)L7OMU)nAfXg9=|bCwSORCsFxIeI6n{A;BBntYe* zX=N-u$`lhZP4Ol3i0CLJ--n#&d<>&|9JX-QU0#rh*Mx6e>?+k4VsjI>4C)H|883Z} z5CvDcRXF!^!Xjc&T@6eKmMsN^<)tiPnTip)U{?6-pYi<$3siC~rWg)X0LmTp4mCF) z*5gUjOc{avtf7#g^+zUDHYYxVXTwQqOvX?n_yr1lqJgg6Bq}1Nzz-<#OPw%0+s3ZT zb7r9*nJ8m)bP)vi?`D;k#B}lW7P@DeoNPt0D_7NL>IKrm@^j*iZRZ8THiN5b)1AhS zBv|sS3n=muR4T;wVB2qyUbo=J5y&Lg*+wL`*lg0xB|$cbqbu(~nLG8u6}D}P;0{GD zvCh^bb?Z87DZ`K)p*F#uGA+Fc>r9<_Oab4%7GWQhT%)6c<|C3BcVlNiDeqS-Rd`2MUw*5_ zCM}GTmj8?sF7@2v{X_U{H_}TCK7CiJuUMufD0ErMxaB}WZE8CPiPh(n#l)KRr*?Mk z1?kTk!q75vQ}caRFJ-!+L0!R2LF?)YEYCR8)75JQvi4Tv`-Ovff>?t$Wn2ED?h261 z+Ov!J=toeU7Z0J?oq=@YOHEI8!%967hRm?Q2;>OsY%Wqn5>BL*;xUKBS|GZ)IuH6_ z+f(<#j9B3TH(?idBbzVYA&0mUwbrplJn(GlE`Le*BBI5!bBQOi&p>P}QCf%bH4G+q zW9d&BnaK-2xU6P3EzH?(o4%d~CM?r}-H@s)x-rpo7v3sc)Z2Iqo)Lj$4cne$oo#Jj z%FAJ$9iZOz{DX=P#u|oojQDo19mbz65@Kk?e<2i;Pt|0|I8!Ct$Yn&o6p7xhxC>{F zn)TCL5^}f()1jFV5r(V03$^Jh-IJ&^K}bAm)c9sHhCK@tp`n>@l}Fg=TBK8jp6zeS z=trKidq;9-b0!P~%C-!evFK~YuggyP z1yF^0OB+~BMyh%6h8fD7h9kYci3k}ZPt(g>g;#a6Mysr-C$)9MWK%B-`W%uE6~cMcE!R;jeA zGCQFyEw?0wmCBWplZK8wRP$@o=FaJIHPy5RQelv3xsYvfl^K&{fF;Sc_)u@3m79UJ znLTYEfro|qpGAjSw^Fa3D9qaiw}rV+GQlR?4(anAdT~5IL*GnKJ0=?A!uw3X-6tN2ad2#Z&NpN$}p-jt(Lerp3%Tn0Fj6xAb zHQts1W^;bUHz7MV-#d+vpaeokaY|b=-hKgj-9ALh7lx$FjxQ?GZ9PC`pbywFBA8Gl zmPj;4gjSadD^QbX>q$$*8a&ZK(mjSEMdbv|AX+#y6q?v{kZ5r@-E>xw+&Uygc8xi!rMWaBkbCjb4&l+*ym;e8Y*f^KF#Gx)Rt}k}n8StLV2<{Q zCYI-8dVd#(ZnxnX#B{kbdta>C8N4!-@u0=Xp_);5P_#kYFn-Rg_)v>LNqFm*_;Y=s zi(!;3s;Y~|mv>7KgTKRHC*?%2HT_Fh>tGeNjnYD|N}?)IHt7L272XD=^%Lz{Zqg)a z)Ue%-rS~Q@6Q`n>9N2b9e7{K^&)uvCd@ipJ7%+60y;;E%5^om6*k_0qUYXNF>WPCp zN4kx?a1T|^3fcjCl}5!g{G(BZM@9N)Ptqs6luL{F;_{;c>2+TllGjf5qU1U(wk3{K z#9}7uGL)n|FDOfBoJ5>}QN*bGd`xx_#whGG?nz?wc8*LPF$P1g!jSGAo@f&S6{sZN^ef|+vJU%qvde3i5y51WLn4m2*R0_0^6qD;orNsC%Y*p#mmPvovA2Q zoB@^4^po9Wy-0rWxy(JfYd0Ugvw5%5TXE;;K-j38xa4+?(mBf!ofxt{b2?7l=SpJQ zYlnN;(r-@BJnmQMm>2ESt#=j&iOz41A4WI14iii1@=wX44^F?1X6J<2kFsDzy`@DX z{U%wiPO7&&A6H!!C06eijP~QExRW&Hit<4b2UXE-=JyCAPiq?UlQ*4BO)#fOU5+eG zJ%3MW)4NCG>@kN;Kr5TR#7`F-=jOV9iT68NDdWRHcHZUZkF14yTSBFId;{sXOodlP zD|LsfLw~NuHaR&$h&11=Zq1(M&4<174Gy*#Q0C3HOx|t zP2IN_tI<^R23>EJ35}uCTKpF}VMjB1^%oSP{PcgL#iaFXc&1uJVm1ydH2GDykNUh)F(ek(R-@V6U4k@lpVC@Giz+q&ggQJTmgqC{2Qoya|+w! z%Zt0Z4V@+Zl(efleng}}t&#*l}`ZhKaLi zp3ze}iMV&N+!DHA{H*9sEvM*X73!|5E-vm*(FX+zD$EXi3?6!=1*I0vLcCIKnr%WE zI_#}zRI5d^I{%s^^*|)gu;&x>Rv?d=c#YSE(9WAJLDcxv`fH2LL(XBzkE!Mfq^Sw5t#$S#O&%OMYWj`D_T65$*t8XstqWtQe!tST(WEt&(ZH3+|L5tM zrgyJ?*y)*%n#}1`I+XImT#bL&?3rBMSmKzQ1WV#UW`{*HQFPR*3`z`L;*56V!?#UP zuJhzSm0p>$@+3q1#kpO?+`pUk-JHy~K8v^EPdpR$m@Rw7;7qoex{P;LT1qkb4L3Qafk`>u6R_NS!Dxv`D%ZoA|T#%*IX# zQ4guReB=F;lJaB|BhOfXj*S_$=tr?6oy&8cLA}Bag_^h-&gqUETzIrx86f>QICtyX0=V(@c}0McAhTZJ=J8$pzRjbT-+m~CG!ch zn_rN0>T?H=rZ0?d&3xo8T;jph&5I{oX!Dxb$?loH`6dtngOugm# zRX?tm8!<&06oscF(^59iwyloYacK0i-8SvCUlK#!TNfbhi_U*ac*Kj=+%fvAbLFK0 zR5%IkdH;y_SL{oExbY|R{ojDDrv30 zcR13I?$GCMxSLnbFY3MQkQwCX?B|0BSLE!8M6S1=4DW?5kh2F?8u1jnDS3`MYAEp0 zER@5il{mGu4}Iv9<6PSK5}ND89ppJ13^nDO()_9xT&d&Yc@MbqX%h`X{0{B!r?V9M zYSSA(WV_cJ!Yw)7rbSll!OM+4`Gcb?@Dui=7(JtZ_*ZOdhZZCUPfo6-XP)H`-n>)i zq9+YY?L`MiDp~Q&;mDA7tztzcg#RnQHtfzf`HNITi=Aa8mbK>UMqen)wOSYaBf0E0 z`0NXpSAlBY{ljl;h6YSN%?a5*a$qpZwW(OVTEYBzYI$vxNr!)miaWpj!f2-Bn_`=F z!xBoJTUE9t1@!}!1D_RgsX4F52A2ymKmv2IYB$9;0vE*MwG@`K3=Qf}Ttl@3?gZ;E zgCV^`dd3F&XCTh0x0>Ly#GF~pKK$htyx&=8 z#Omyn86V!tl#U8T7JrFXv7Hqf`jiv(qv)Us+P;33fj7%d332+Tp^Ph~OWgvn^`!Xz zW^#Syzy_MKXVvH{YUE7LxI6}A5*8Z+nYT=y{f>C$wlxu}^r&8NTXWR4OD)$MK*4u;tdmjIQ zC}?xu)Vq95&C=RXKjHiB@8*&^*5w^O?+V!G#rHpjIpb{|Aw|jx6sEYi8(tIDK*KwY z@fAHEDlmQWOU%-GvHI?X3Zni|QMTc7!#rCBS6%F;Wx#_hH&;ESgg$oeslUbwrs-C< zM2>h@BBt06IkT+bvmT~RNRf0u2_p@)hsVx4gcnTK)>aZj@dq8A&pqTme!V`t6d=PV zCU=Bi)3DZvdIKwU*B?jw;j#lSvg`0K z+Z7~bP<+49e59HR5*f&msX=jDhqO35WvN}E3GR7gkKe^U9_ChX_39&qWp9@@mL-{% z_wy`qDr`Du>zZoHTTgZx1(NI6Ib>U^@+YnHMl`s613t&SNU)KVt=pv#3vf(|oEm%B z3fX!hJR`^DMxj*0njRA5DAQ8cA^o@CyLw9P-5Vtq2(qttKU=N^>UWIYdCt{<)Xh2d z@9dNE`QT-6JXE*WWwU^u+Mn;$i46a?G18H)ULGSJ5j06UHxu;q-guJVw@!gNb+KH& zqXgca&rDL9Z35aY)F^FOsm9nbag6V-Z$|IttwFOVYLdm}lOP!pcI2}ybK);iMrc;j z!{KjbT5K$KdDYm6v;M5F6Idt7iSzv^nQb@9>b3^kQa7DJV$J3^>OI*HZ(XTLYE%s7 zpPx2QYRf_qvwqSnERDZw*WKS{<(54jfE98X%3=@o@DPR)BV?-1WUfzKp^b~G=wxPB zjEbbw=Ev&H_if&C;qy(P?V*ipI#K!htsc)}UBmH~PQ>XGbR}br1j7#{nbgiqNFe;; znhT^x>}aIxPF;wf+~Kc#nR>bQ8oB;IDp1BS{z|*k*7^50_P?GMMBcg(an&N&@SWHV;S+0vv#f5Nqg6f){1R4 zyLTyfcAwVOe>PZPtc^C@_qaAyPFwV&Vm?D6>GYabd>@Bxa@DYTxp;`D%bKxXbNxu{ z#GFdRDdSA1h(cSJOFER*%u7>(_suC&Y4pZnlRIZVy=Gc!Q+vP4+L!kr9WKPwB9rAd z20ytSE?n7}s!^9bRu@MCEs&XS*}Mp~3E_wdjA-D62Pir~okWgat+qvPD#!#R4ju+w zgds23N2a8sPZeJjdS(Tv7VYewAhD7)$5XWJP56AfcAxHuB%FQ?=VJX-f3mCGAxU1; z{`xfwo6g87vmy(OKYViEqhon~MZSq^BYWy;%~{+$7;rovAzaO`WEA??QO%7FlXkYSAN_z_jD@ic8ammzd-*cHDfE`%07OO`0*v2f4DcZjmT z^gVlY=8_h7pdqxf#@H%mLUuF3rA<>XR6tq?9+0Yj{8Yi(dA@DI5GrA)`SgTKRBigr zX-cBlWU_3LzKTthEW+f?;CYhp#{VA3itocXO zHKDt89qE4Yo(B)elBr-xu}x9Nw;eZ$+m~XBB(B2*+t&|_*)}JL<9x}CV(J{(b7G{E zY(x8&hs=&t<9tch_ryzqI=-cojk66kd_3LwGze4{(kphkqJAcrH60UfwIQ+WH4(GA zJ4+|mj@oE+8(lFc9ZTgVaZNL?(|%F1Zcd~h!H(*7FJv69ZxbB#UO1qfr^-3v#T5KM zgsGb}iunS4Sg;qT4g3H&#s$8;qvDF0Z`Z#~x6HEr8mz{juZDJAas{^YM+Lx)l`^CD+@C&z3xAa2+>D?|Pa;x$_~Vrq&m?@DXC*Sh(%aX_F+x7U2$sy(7O*+G1#@mzs_3W`eNI{fR~p+PL)mKAMj zFDnOwxXwFAA9RUBUyPfJ)Qe@&+5uNJ^xkURM0EB&i*lRol=GOhTTKenQBio>%a%17 z9YeBRQDbpIAJhFUhYs38&#D^CMt$3*J9*$2x}J=REY5!q57mQkG2L!r&x~oVW2rtV ztZzvUwWVK~V|S`Dzxh51=qyOH#@FwGoPf~_!o&hAy?xYHEloRd=;_jzexmuVc*jui z(`aJN{M_zsOti{DnN8F+eap;)q4&kla&_MKJmdNJHG3QXD26=jZl39sa$_{s zYh~2k^T>^bkXMmwnpaNp7F>wk1bHT>keiri$yI^Ga*7_&4^EZa@W8f>b||ktO?408 zvRP1q4lC9)eED+Ej_U5iX9%*qej<$Ed>-GrBumWk`}q8oHQA!4Ire&#@l&5c*DSSrt@RNzd9rVUIuKlATq zfqzP$B=`_<>MxRJxUQ+rCrA>1(@LmjsySx9E-;XziV3UG86_AEd3Gdf!hy#j6QDif zGw*Rb>dT~JCfmNJPK>yVX#6C_x-q7VGmGA+mpxZ*^v>^Xtab%g4yP6*%zCogxq+!w zz}SCZ>n$05|HO;06p4BlX5RbK;9{P#&zGo!yGZ+2bZ^uwWzcFUta=DfDZ~*(G6+Ye z2wHhazo4{rbhMgndDp=7cE$8xU;L|?0i7;xct)Oum@jd{|7ueWx7lWMn@f4~18y6x ztYN2_wNMdBj7n`53vIl@bq49OK7O!BBcp7q;dQN|T=!DVIBU54{!= zbdtqAc|XfVS#DweLLit)@%PGQN92n= zscpZ0eAe^dA7V&^C{!S?Xq))E%SJKx)O+u0dwJgp#*K%3+HY5xh?02{$mtC^vqbf6 ztr%r|cCb-+)0_BtL$2CM=D+p_e^2G|!~Uyuf*v8mz2$*1yRx45jB4gs4zMPk-&NN zT%`Hg{>V~Rk}*hEN7#o5|<}{>;_L!Qa|yrYu{^ z@D;Edy)PTd*b#WX6u5J`+vk~l_==R*bhk&?s$xj{b75gbM;wQzuIEv0ulS~f(6pEN zu#u;cY&hLtU*zMkOVSr4+y+c;+q392w|@D8rt{;1%5fXJ#-qj44bLV6fq;XxF$6)9 zu@7h6W}|q1aidtOLoMfsNuYmKW=E*NAjd@_pRg3r0-@!n%C?*rF5}`nbSpKrfkve7OEo*v2;6>QUdiNgsIiFPqGaNquVmo zzSYQc3QaCNiF!pyI%<{auUqfMA01bFaa?>ci-JeQxlpD#an!3|JM>2RnxAb~YX zB3AlrSrzEAkl!p-rs-d=2p{_Mc6D9aUcZ9FDRx^aQ;I1_Rm>ld4}Nn2K1WHt*H96C z6AUHZLGuN`f~ZtPf;Chn6DAz$BBBEJK5Hd814R0UrZI5B{tR&i>gT%mL#_-cEB+RC zE^4>SO&3BTed^>;{gVi%1p!;HS)9O^0^mWf)QKJZm1o1M5zxP|V*hp9 zsn(0e!-lBhfc<=kUHx|JB#?C)kggxLnZ;P3C*Wkg?Jr`M`uQ^wjx-dFf!$xZIT^Jt zN8ivIw~yNv_W(J0Z~D%3$#bN)sL3&*fk8C9d|y~KU28+8Dt-0<*yoGQhC?FL(UKQ- zrD&S;RzG0tPHUxY-!9Hipj-RJf$8*umQLX27BE4-4mXqI{-jZ0g%FXW`M4UOB{;#3 zI=3;^of#(TL`{=~BlqF6uaI81;WRQW&9G9%F)R|f4BNItaEBugkaRO~{5VPjSE^^z z$&})G)D@h;ET%d(z8nSksb^bQku2f4!TYgRR&_hjk7 zfFWxW?*-}%{{uab(H9{U&7PCaZ5Fe$&oHT5W!7ao@O zgCA)Qqzyt0iNfQ6jL5Z!YbXp!zrZ1|(M=I2(~v`$n!9f9`mWgZ_N2Oi^vQ5Ora^22Fg zK0(WFOea_Iyur*djYf{j!baVuacuq2Ckc|bkro;^M~p-ZzGl=@i4c16X6>bbS^{Sl z*klyWj%N<`qrE0_RfIju&01mkv8lj)ve72uIULnQ^CwWw`7d!Qv){US=hhtq#X}ff z+GCv!9BSA(*)syqy@kgA_)Jm!{KCrFk0H!vugQgU?_{gX@dIbL*vjBou+M6mpr!ruY%@@LAV+lxXW&CRCJUil zMY@)?z%}h2R5hgo11oIaGH`520IOcUxc^X5wUT6#w)zg7m&k)KgD`+BEp8{H zWIPEAlz77FG#3 zOo|*#`#MVP=c;Z4nWpmj{Uilqy)Ci#_tP|>osXs+#X&jlI#4lUAQ1q92R}iB3*Slk z=G&ytoriQ4@2F;UA1!6}$w}?NBE?qj;=IS&SXs^5w=V3Wa8pOgi)N9?>N%VV zB|osfPD;x=4=l`hr@kq^KelgqCBJyFgCp}LaGs{vt)G(sMQ{@%7a9#Z+FjZ`UM(*3 z+MZ?SXLN3XrTOi2gwqSYJ*;1nH1yj>Pr7vR;Qkb)G=g+MNRnb-JelYM1f;)hE|E3J z%4e#!eN0or?sTgJB2rJ+BpDr4O1*nVP+eKK;MlM~oXc0coSo_;PB7H*U)CXnC7^m` z>&I$U)~r~iobl%o$9%30Babn5CSZ6>ZzEWwrIb^J3Y$wRD=YUJHvJttd~X@Kbg+Z1 zWeTxhAId?)*>AvP=`ZG3E=0)b1=!Ax`T|urv-W_peX$Fe`GtvOdq(?fe(*g_hYx`22=xf;@AgNKu2Jn0Uak*(>`4OXR9Wv=B?rK zP+|(DRQpQ3TCmEA;ek6H98}R8jVBxLE3>|oos}eeTISsr4%74yuS|i@M!>(WcfCy~ z^8;KUK=V*_5UhTzu)O@uIsiXhg;ROO=7{Q!(lbG;UOV-@uHuG<^mkMo2Oe~JuPug~ zbdArVEQT+1EziOhh9sgScAiyfT}x^^J=&!gveRu&s{2MK?7#@7C(_33Yv&m94dIxm zr*i@rU2*E~pS!dC#V&?OPU zIiVMsY#hFINwTsnw@f)O2N_o`a{+A+$ua(J&!7p`*?bgw-XmBJUjt8FffL?HsuY2g z3)Vwz&wIS4-8W6cHF=B#KK0P!3RgO_BEE)#r(8=Tssl9=loQq@eK&ROf$4oG3b2XK zQ6jfPj|-V&2d_zba{$gm0xH2MTotu93!-JVeNN6S3+4l!{digTm9aSc%Md!i7J~}r zHmokGYJN^?077)I2!=lwaEv_do93;NaJbJx#W)79p>hLh+C8e)aJH?;)eyrfCt%gf zXbhJ2?Nm&RO^o_%#fY(aZ<=U0qo~W9q>;}@N87LFYWiO zHzX+Xm;rl)NSv&d)q|*u++gFAOc>pRKb|)y<}9v82r!5^-Lkj0S3GE@esM_z9Qp&i zPhN9{Z)AG9H_^ah>~D*_nq%J-Jtj@uWdEHW_;2BV?BEk6+!Wbz-~8X;7yf^>fr9=C zyl1i6%wS2(dIep5#mT@bzX9sNY9@BS&}?fmzW=M4-;Fs+!eRUk0t!V=$w3Vef@pWA zSuF}Ij87X}SjYJ|P5~{Tl{8}7<2P;iHnM0jmks>49ApBr8??%5rc2p&$-Cq#%qXHb z?u+l=O@M~g1SfV zHoCG>2t?%!49-w)-I5Qgty#T#IP+xZ87eUUYpx)$wEmc-tKedJ(_+|M-c(_EW#!v!cDYgE4?@>yQ1yL-_`utms)I>_{1}PMYxBTy-LIf*hs%`OnCFcU3P0 z*R!==MLEf2J(dAb))*}2rXd@IyV@OnKPal$wU{yF0r%fXmn3F|E0iUbYnDa6Id@9i-^co>2j1la{(N8h ztf70wTLnWIM2VLKsnTf3`MbzYE}*65Y0VK!w0Po}C|rs--UrVfn!%e|Ozzd#>yFc< z*u1YCAWFyTDlETUFdT6VPN8l-C&mgMQT*g~`Bo+FB+OlNQcblr(^aE_dtW`pCl>f^ zt3*#3&a6Pl#a$K}STTN}DyO7w{RQv+hj3Od9;aP?t-k4Fa2V%y14acmYXS_DdY0?3{Yf=ka>TYD>%&Yl zLv%{v#dky>ya~yG^b*j-u(0vrx?O2rsiDF$nevuU967_-lOVni1TMXYy6d1ugsZS= zReZl9Oik+4+0^do=ih^nk@UnThB^YJ=dBU@it8dbDxZTS4Uh5pfs{-O1k;6z_?NwD z5~t%&GQuVx`FJx#tx#Kt_=DR*xUSV-5BYHb{>uFlQ73UHru)cA&hA9cthQ9DjLSA- zR(u*wz&5d|ewqN1 z*e`n2VekvitdE>oFD#0GDu=4#J{f$p&lYTpFAdCXyukE#2U-AaIJX@CV=4E`N&N7! zmWVa7$-Zw)&1qxp{{~Cf?q{ARBRl0}*igR@Qyf>nk3+@xzwIIWhb91Ai)S0c#de_1 z>EIZ08>GcrNkCp{ecP}bQaP##$e`aKs7jqAzk zK5?gpkMjL5&wxm4Yh2gGGCLU}Mn7L_$iButo0~XZ9mEwr`xrB{6T_L+z(Ajj`#g6W z7>5Y~uf6uIOYtrOXi(y08twk)N~oqjg22t5Al7JMPeTu%+E{bY!9rwO&S2XL+s984 zYkERb;H(Hjf&oU#`e|zV*=4j?YNLYB`iTd=0olOk#UBq*OKwTr5~j_2Qhp%}E@7L#)4S>VA+MYo7eua!Ws z&lQ$~k$2gBT)8Y$NW(YGyhHFq@PEUtu_B$M&A*ol_^fj+nMlumima|~?|(UeZoJYC zN(Ugw241NbLIKI9+0;Og-KyHHD{ZxqAgU+Ug<|hIJw_-Mu)d{GS6G5BL6ws4-&Rhm zo(!{je{kS&Z1%qda$pSqFz^`h*Yv6?zdXKL;BjUN2Q}Sr+~iE$XUC+XON21X-}J_V zmi|LP?7zSal?8NLU+ivH4+A3g$*BhM*G znN}jf6^hgd;tICX9ba8SnD+g9*ybcrGlh}qDwD!laR0Xz1rSzCf|E4a=no;mEZOlH zg)a`&P8*JVUSPHrz{rrJ zOjxEPmFaXeCJNdn+tSFHwJMfQ|BRFA?|;U5xfG5KBw%YHpw?2!EKKY?nek~Lw@5E8nQNf>VW zvG21p*qoZPWdINWn1?;&2$) z18S&`53vy`IA?QkmmG;~4)zT?VPN{FwOUhElY#`m8<9qN%2mR&H5T8`mkLX}982j6 zcuEKM`^X@TM5k1vqJYFW;(8f-9f$gi6BN( zd3(VLzw?0ipHp(>h(`VqhBRlLokY?Bq2YP?YJB<~$7xD3HQSL(5cyrla^J6~s}SGn zX$`ppT57<9-iG_EKeSx?I(>eHEthxSjm(wENn62MAJNUn5WwSPYTgX`%(RHSkH<8Rp%W-Jjj;;+%c>S8B| z8_BCdu=}*^)$jReOE$c|4x1rXdB!<4_{Hl|0?LE-?tdfcG!8rhP5`98 zsyIm@>BxP|UgWysYP-Tw|mwD`}je8|Utt)WmQ(;^*w z79@%iQV#zOinl5zNf^ofpkC=|52mSLRnb)63xnd-Yb(5OUnQSB-3 z&#N*|UPx4buX!#7mm;qxTZ4e3T6m0vX|d|H<;j);qOeI8k>VV)%Z=RxEyqd{M(u)& ztl6{Q`8UC3{=GBx6t{N$S652KgSFBKCy7r#dP(PD#jDI}Yf!&zNRh~=!B6o(o&66` zI`)g_`r|+O19|_)CSsJ{#%s9xaZXc`UNKuR%=u0UYl*qSa^ZrJio&uV#xID3>dqHX zSrfX0BC`2UId?AKFU6(XePNfCHW0S2Zm)cRZ8Kq{gAc)Rzq<_ke%n=rW{H*GoG>8PU7-6a+u@%_Ll@b)fo~-aBRj8rmJgldIOw$x?;*;%IxHiAX_cHg0pCA_F>Q% zh~?w^2jlyTgV76f|23@g6}Zg1^z{ls{Vaa$GJv`nt*%c86AW4p-o0n_??$kh@@%yP z?2}5-<5ETW*p;EAM&VQD%N-P)1(+z{%TWg|X_IA=h8GDv{m%x#Q@t=WJ*{FAu9#ld z%_hIUqBz445OvAu7?|nmo(A<$Tq$xdi*4&@*3kgH^)Jm&ibY)(nzHa<{8vUvsvD|a zUD`x~d{iaKbw8qH(Gyj8z$;0jWE?Yp z5n^4^YN}DztOre2&A%n95p@SZn#ZkLy3C;Ef4DCcqboC(U6=mmdWy#eQR?Cn@hFB4}GO=*Y_uVXY_ns`gRK{*bXCC8O}uaHlk$?c6-yLL8~Ck|2=3oCazq-1 z*mvazJhR;Yn=s|b*_7YOFl_G95F$skWlRxvL~BsP5vw70 zRB%t!r7xCEZ12`?D=|jc5p&~Dr=Nc)z)pu3)+`pb~B-7~~wOp2gB zQwUMtf9E4HNh|w@O!1%I8$;a=tv8A4i3)MXL#|;!9h_2gwVEn<$ZCi?H2+VWBZ__d zktyeO03mh+iKPdp`9GsPP%=CbHowMwYV(dQdd0$yRwNb+c9Y9`%^Jm`L{}x4(#tAq z?{|wOq@pts&SfAHRMj@QD~oIE!2{5-TYC4YzNJb}_m7mCD8dhH*46OYZ^-J}%maE@ z42-|x+smOHH^k&k*yc1UVCE)lCKMS0pY?<}gCpP9V0HUt>+?u(#&#nD`4Hwj201wm zLsoOZwY3A7g$jiE|58-h&bKdd>s3waU8@Wpfc1s!D{B6S$Teb%SQuI zK-j81EKO#pS8@IaroI~lU7$ovlT6MHQeb03yVw*EwI#wzZ7rvT+vzy)6pzb!a?2HC zAJ|2L?Xu4KJE6O4c}c7%V;j%F$=N>6-oumUmgkK&XPG;*XSOrYw9J$Xy_6?D+<$9T zteb^egQ{2sp&Ssk#g3t+)bpupu)zL44^qKhVU*)N){vQZNJ)X++|FaQ8v~MfLePvt zpY`LIRwNzq8kvOd({NSDl>c}w*E$eEDozmgk1VIJOhVI)hfX0|?>pyGgUngYPLQKy zSi3BL_!c?FH|6LhxzRL!_sW1!7_A&LyZlWW9MA}DbfMOeUKR^Sp24CCZM5n)-Zf>> z5i*iWP#98j<4fr`d`2cS!v=zRf(yC&4!@4i`(gUvv-8MN9=Hk~1%9t>yLcR1LSz*( znl0h`b4Qrk*qtCklyZuwI9w>JIuNsu$`KS~HN!`54oSB6wl+;w-vdG3= z0?4Wbez^my(6S8HAJn?wG?&{f|Nr%WR}QI_VnIG*fPLh;GMHOPF=v4z4YfuvjI7+( zgt`vvb?e~pwe3G}(|ROr%6lnY)dB~z!e*f;Kqho)WAZE)3r|{JU$+3Coy-=zkj?{Ql4~TPfuU;(>k2 zCDpLds4g+eZ=MxL=0gxPG&H>Snc{C_Y`z`BD1h1Z=%1KSL~H>vAi2#6PPo0IOAkcy zj<;xFV4&)$xPiMa_1fzP?k@EeTc5C;Vu%fWmb3N_4g~DSA5;Q55RuHV7%{WgtuR_X zz)`HEA3Ug<2@s!ajj(mn+<$Q?_1rs@5?c@)uM*G{?cDG3m3ge|3&a%_6}?!u0^>68 z|04f-IskBzn^%fcey^DI-;tt=bW%qhsyn|vM_q>jiX?RqATR^+Kw6T$c-nHtYW7P& zgB}7*#B>~8wNL5<$FVA!SpX6iSi^NbbAC|eJ(O>Z22Vu+ zi-}nB%uNJ`F2E!Iv6LAoCB9YyodM*%>a&B15Xo5~y3FIj9FdFD-R0%wpE#Zk;FVpW zl)~P^DG7v2y8sg|*o&DBDGoeaO$#_)Hgx)naILIQYSMkgh|zbgH2mhn zP*K;#=#={1^v!ud$Aw-pW7ocs9!`0(fQEhvza3B!kHC?d4S^uT0Yf}XG3lGza(_S- z5DU+ycb_j#rKQ0lK#~QmC_ct0)M`C9D$gXY%u2m?O@jk=o)oOAPDZxz#%ijzY;TPWtfcR8( zbaa#l*hA*ZGS9e)S^Vxl58qHu5qQ(Z*OZ3i5wr+ z0;IZ_I8~_>93>QRIzv{An=$|s?+u_VrcQazFa-f}6FFToV77j@H965gm~fO%ZzL7R?;ig5(qk*Ij%fTIN*#d*@HnY@0#(2)=W*rFJmQhooE1k#-$nX1Uw z;D*fX$2_zb`vY-GvZZk=$P=#)%0iq`Fuuly`BUMfF~MCr61m+Is(~{jUr|gDbXa=Y zER4r%Ki{MW$5U*yA5a#yINn=c&EzwxjR$O@@JllVB-7O-+*E)Ft_7K#&eDQ4_V&lC z<_EaP2$)!efyX(4oq)#L3Ao`Rm`b48rgiI;1}2avYNif=(8>4C7ZBHUO(6JoIC|&N zOLy-73lt8VagRNTP1T~Z@#L8k(E1~i;=heShgIgot;h^c}H)Wx*KfC?p zCsu~?Q~M7A#ok1K$9>8>kk-geZHm`;H`25IwH&kepE%C&=`T6yS))xK+Rea=6IZ}drMHozzf0)i84AV`VyHY$RMiu5W(lpq~NdP~Mp zBXz`~M3AT;NC%PLwlfq*0#SM=149t$y(YYC2WRG-^Pcr0ngdJS1gPiY_votWX^)aV4|TEN*|WaA&KU_Z2mntj1Fd5h zh9V>z+W!n>m%6WmKTrYjdo?o+WBu2YMEND{x z3rqMlS%TOGy3faqlLb`{m%Vs9oh`t11d7$lw?Qo~mi|ggymE(iWME+6evQ5(R>r=j zN@k=L`peFMKmb@RC$iiP6ad|B^t^S<<$IkjWS(cMkFWVd5mt;RD8UNwx~b1pVPQe{ z2lOz&z(1}}0Ta`_TeeD$zEA`2Lq2FC?!S1*8xIw2PLdS_+Z1uSo_-syDD&3L)$Fz^o~o=UUgtS=jy7o@ruL>~bL6JQsttD!+RSwqaY$ z6{q~_Wgy$pbOZbKeTWk&{umg%1o*8Zc-S1<^4Mb6u9~CM#6g4N1E>!)VY9NAcyvliNG0svA2XrGBv z5k(gPK>d#sXdBjkOEj>7l3*nV_CO2}yQiQR0?)kQ44_nw*|#CEs&Fsqc^J0Y7{+d) z0Cv`qrnzzp;Aj)2)zcLdDuBl)YX&L7lhOOm8SZVF8)?j+2?7QN2Q0i>`Di@Mo5K`! zq?qP24n7qE`#WkdKY)484`Ts%ms0LMmREbB0r1D>r_1Q55&(!UQGN&H<9uuxPoQiE z%*YM~&1lls-;-(CP?$+!0e0O`*cnr#mvsD|Qwp|J z{HPmnY&fnyh$5+Dy&D>qKU2L?VY?=KGu#E~Pd<)3BAy^vO_=M}3DKjV=H_kuP`3hq zkT{0Qk06;yIAoCgwdDlM_8y7&?zaU3NG}CRILOi!aH?Jb``MIq8amr~j1$k8Lf<(# z%SA7OQsXH0*}ao)IOz*F%BJyio=wzFisUKaqGe{{Tawq__FcWb`Yhmvkivp6{TXkL zAv`2YSu(E4yRw{v~?fr**%E@`+&1UPwbnsODs`E)%1%eDlf z03_Bs`@9ivU{KJbkYAV0?{1R^f9GTVtpoON@;3jI|K}s;_)!4!%CYHZNfT=(`eLkF zU3xCx_pxbDaGzgYpgvbGb!5^g+O>`#%vFnkqr3l?kqYqW5x|oPzJC3>j!x71%=q{X zfPu?Q*vFpnzj6xHLd9mwwVDkSyp0`0Ogm}ZB`6P$Xd1OL=|JjH{pT*t&lLapj$PU& z$6u@MIM=x>-}@w}u<*Dh>c5$>h$P|H;CYWm zU27KGnS2l(qX$|n=B5B;~$gR<={6;J5AK3aNwo1nMK)Bm@yHz@Z$< zdUYw^)V~#KanC!p`se!&sDI$`b}A36S=Iyx*fhK*lE0-~M9QhIL9;>=a2n=bV9PUe zu-W;uDSCCe(0OpoM(+9kv18CN1n5Bc0GwAo?8o(qsj1h8|G|Vr*)obX{Ug~aC6FDG zsWimlPxDN3`*L?aKhZtlR5YFC1FvD)4Hql45c(!zfKU(pDo{U|qct(KqrX3Pn=a&Z zVT7P)$Qpi)OLwl)orrfJWe_+IYtTVTL)J^XUiE$xv57tFV0P{`SIJQY=r&LZWE{SL zuumYzn08WlBe_ohQJ+*_D}zl?l-GsBCgIOO7850u-f9s8;x53q(TdV_u3i&9)AQvI$KR!=X4=dhex;dvszT z4=6f=(Q3g*wH#u|5}vCcsZOV+o)W{d{BZEvrmbhc-qFveqKxfYcGl+icVvD;4ll`A z@Pr6;mr#~-Pfft`rSF~>kC7uxW`yjnf{Hu;K&vol22t82#dX5^9bd`t0DL-a#0KfS< z*4s)Zgq^3g^N<6T=!$tPVSs36Hers`=4m6jYr2~wWbURDM*KkA78z>1*|g&zNo)HwDbek_uWlf;Y8I5X@-G|xLCy0 z`3#Q>CQmOsIr@}`i@*%@oQNTT?; z1}Wq&{=)n%&gn00zLK*d;pksBB*Uw%ps9k;#knZx@%c$ON{Kan9gmq+r5C>R{h1ub z$+8?ch^C=xfv%RK(KxjBqgRXt9%LYa7J1a053lO@1+W*LIKbJd;>}d+j2}8%8!hSQ zaHr@>boW6Cwu<=9)vMEl3{FqSlEds=$H=Nhv;|Iv*+KZ>jGuQHb82yI z>ZPK#1<1~XU6KHsJ@<-I&2I)5>cDY2-|IwXYlsV?iz836Gvn52@k#Z5977z-sbzZf z7NBe`2nPg|W7%j!aiO&?ipCX>=)2Z`+Tr;O!ZX{6eLrapZybfkYzU>d7@%9bf_WCV zQSpsFA%RO8QQ7Oxl#BA)%>nHUFnyk==*++r^k=SM;j))0XW<);6;6#Ge{lk-47eQV zHUJM(AWr%cFX;+<5}odg?W9tJ2o|77(S^Ku^7O7EKfG3V;w(I6`b(y5Os2xZPP#X5 z__jL5DIOrA3l9|tg5{6I0ORph2t32rBfy|8mIwN`EiNt=c>VrM{GyEh^Ecp`X((3Q zE5zg}VqPE@s0*q!JyUcA{nH)>>nWfW&tN?+?W$Q*$5~mRzxg{J;qCJ9{`3&hLcWU9B8^c>=koKJ_}c7o(Foz3F~;{W*FY)$7&{ZIuzg;r zS@_IKeR?BWKD2=C^u#LV#6N+&iN_!%$lGjc2sv0%DCopkQCN6HUIUc?3ONJSMBo4= z@$DP|IY7|sjGUOt+lSlOjmR|v^y)!+J9YCsWtoRKM$LWyuGN_{hH^LGaki)xkzGj4A3LNCrNhAx7Qp8QdD0G9oHbJtq_#!jD5PR%< z8uzlYNFwj2fq^TYo-?>#ZfA`)C6qdD>rUqA1ZX378gG4_yK{#S2=hRK)Wd4a_|RH4 z6B8Emrvn9)k}|RpLCMshk-ofAq0KxzJ3XR>QXWu7h68oXzkoQ|2kL1|J~PP1JHiOI`&rSC7h+~ zPb9A(Cepp_^yFVJs6&I~HM-ooIIqTKqseCLQVga6;hCngt*}gaiX%B=$%cpw{@1A_ zmEC3B{*i+D>#Ft-<(V{cuHXUj zqqulo(+T93`P$l<*;Dkx(dZ+udu0xUX~S)=ATY1SA4V z&7Se|XdOU5!ZUn0YJ@&eZ~l`T2yU!}how#TJjEq~K;HpfGzCG=Z?d_r4l5bu2+FSD z;tE7g+1~D#9@})Fa5|VWzVPh(%S$eF!KH76+Qs6_JWOFP4;VtWb`CmR|G?pDG^s1E zPYxSqSG$)@U@aMg9XA6)$8JcCz*+H7lx-<8ZvbWD=VmDYi+NhQ5Ud$u_Pjfd(RE~e zUH@@ge1AS`FLWHkI)y12&j|6OJNbOWv-TF-9=>;q-i4kk@!>_w^OWshHLKq4yy&o( z0Y1ke72efQ9a~h@E)~RT5_QvDVw->KumGRtaRI)nf64w!U9Qb-O}Ph<_mOk+)%#7I1Hu>H9JLGEv0Idppk5Q_BA7Z_pklC zs(h~_cl<=Q=eDzetNd&~%NwPw@%;uk#yN8R!MA4uw(2pE{#8?*O%okA8|NU+g6rqE zal;NX8Nr7=JZ1c=M`o3w_&Bp7LfD(I+w3U{%yv9FD@xy__KR3rb%e^yuK#WQ!l4j? z=W<(((3d>JZJtm!>-COcvF|&|N#`0!NXJ4VnBgPg3mnSG&=hi) zvXlyY0%5(zNh0N=GMpHO!U8eu?<6Xz9X^9eH`UbK*Zr!MH3904si^IZHiXkf%|wxg zklI`gh5^qd!t9j*JA!=>uiTvt5ud z@H>A+-?8Ul^F~KIegpTgC4{a`OXRCKB&UQ86GqD-#1RLP8%lC)sWN`P2Y zP^pbsVl!r2UhFlBI`i{(?J|w8;~hp0oPSONeWYZPB`EdBkgC_+_Wx~blrhV0=IkD` zGp&l?r0nDMT(|hr%3e@+LrdUzz78dDh7CeIqiomsQz%BlA^5M1Pv`Dm_gLY@Y?&IE zXjBTf9e=Pk2KCKL?_t~yKR0`kK8&wt*X1P4PSJLX+K+GX$Bn6@o)RWH-G3s}IFyYd z#n{jNFw>Jy1SHR#Md@I+SI4;EuVNS z^YFX2m7o4{*sTnJMqJ%B%T4Dcm*d}T{6;k(7$88V^a<$l%`qhSw1YC3`k!I_(c=5Q z>tgdkg>X)-ZCurL{oW^%ir}gNou;txGRw@+G}Lz`K;N%J4c@b@a%YNxEnWyHL{yd1 zzCMCoYqP!>Bq*r&zdORl%{X0q!3bsWNdf|^;PhN|9xuQe(Wu}m0!^o`8e&4u&|UgG zzFra@QNjvK+qr2uI>sB)tIbc%*3c3W0&Qq+T>actaA+>UCVYg)p9N4Lp*kgfqrEMz z_dtCbxbV8URfgOC&~JazIsd=^er63US`ucs7UN)~b4y3G%_&5uu8QQZfdMuDK~i@S zS^ko6jigEXUi1|!xJ^Ex7%YM_E=j4pR)$bX2+NcXdCr-P5asCSN#@&B!T2^)K{J$b z>nxmQ{O80)nr~P86NtKb79V;XyCqvt5_nx8tY(WHpvIB?ka`>=M2VzB8L%uynNHoi zO5d)A@RHXwy!LzHvbX8ICuu!xXO(aL6;iV7_M(f3J&5fzl#+DWOY3iGsHh*44~MS) z4idZXI&NGvmb~ZVQ@j}b2f%9Kb3gLJwDyI@#6=oUm)=%u9$oYeHKvAHlsqFuHT#XagTz&#u zyx{idSLSe=78{dkKLU=WnAZsae-;DIj2&3~m2p#H2f@NWm{Yd?&aFH=j7=+ zI0Y}|*~iq)nND{6T>U0)Z0mj(;?t+%0ifzu5nfueT!3W4(;uM=Lk!q{RT}!mRQUJw z8A6l7!UVbe2v(a+(zWbGx9=8D7@4>je!GY`BsIUI2(GVInT>yf1_~QRS-DKQps?8L`^O$ zjOv~LMecqaECTP;qIn;a)D^sSvqwJB|4in>SzNeuvYndA$xXj+Y})2ZnAVyC4tz5| zyQ4xkzC;Rz^*-QrLN1$@WhG#-6GrKe@}5_}%ER0>n~$Eu(la1E;uImmTDap=@zOoB zrKW$Wal!PUnwpcl&y7SMDdh<5$HVwJKO`lm<{CRlcD~*F7Dg`tW2S5K9L6(2v?~1< z*9n0)WLWBGh@wwnhFeJfBf2HpT9|`6bql~J(b+VkEH&!rEEh#)_A*L2+`OB;98N{R zFbs9Vsk&67z9-@M2i6mXgE_r-uP%MA3Lj)bvxwZd`w0m-X(|S*1?$Q>g znqkCU(B{?>LDG}jC|AWSPZf#B6e8cxkMJ)S~6p zFsDhR3=E*MSMy1U#uUGS@?I7DYNqJTp*&ip*pF0cCMpJVfa=1*qR7sm@ zXi~NEUOhw@$EFQlOFOb#x5~H0D$Y3$WDWJ|N+le68wR^Bb`B>q`Ms{`OqxG-?fFJy zS!hBpY0T|#Opz2XwdbY4N(1~N9}+3(Ric(=*2B=1mqIVIv|7Ych2~^N?{qTOtj9_UGmdvbDQV;^Cx_R5^}Sx%&5|?v6R6Zq_UJL#WwHK{yLTv(l zoTZcyvEBVCADXhARhy{d=GrmCI_r-Jsl@#Dv8kWWRo`7khHrfk)i=6!nWrU& z^a#5^Nt(a<@ul+OFr zv%k_rZnPS3JL4p9U%?R7h-V1XTRw_M)1pZGlf>_4D$3ZC1N6Z`^LlmXA$Rm#FLiTm zcIe|n)qR$m^|wIACZk&9%Sw1Oc`X;*fFjP{BPyR}OL-VW*U3O17I5 zv(tKI>g{&ybBpSPr*@Mh3#@ar8UyON}-BiIlj1%Nfulndq zGr8=yj*?4vNqSLM+~^PpI}=U4lAw9!xiNRm|xa5@0c|=-4VjT3Z;* z^kY(79OV)3M6*GxX+0>FrX>Wa%N^?ePY``#2BC=1w?3hQ;RC6KNx#6+F+YsM)_Rqq zzH!e5e2E%2D2e1Ld@XUui?wEiaC@m2+HXFSU>cg6;A_An*sj6lopQYo3g z=VP{ELkJ<(B2pfcOilstGMqGm8P>va&EB9-3Rh|(%JK~RiXr0p0De$#EOyT3_~~fo z<3TZW)5g1{6`s#Y7zY`-lDDiI^p9CfdK_bpjCm4Bk(EMjxbHx_rxflf71*$nNKkxS zs5#r9veU9P!(IRNbx?Qr2912C$CZteoh+(FduYCTs34B8t9NRK9w{uWm`!X6l$Fn~ zYC>z|HANgOL?zIIJi^UwjwrR(_36ReeDpiqL z_jyQmP*1_zr9)|kqlGiw=<+d5MQp8lT`71V12}Fu!-R9lq%%k-C zkP$S2UFhMR)CCD$Zy2{Z{}XvPkx8_pJJ8oimiqFstAX@<8Bk51`tSsVS6`0+-CBZ( tzs1NhZObYa=eIcaDNDI+Mf`dZyc-pvf8nZ5-Z|V4{{_D=1nB?( literal 28153 zcmeFa2T+vh+AWGXjWRka1_VQ+0s4EOAzwZstv!1oq^UiHKnX~KH z?pVvfz_6Zr?oR~=(5 z{~vr;tEx3ax-U5~-!l9l;2GplQcmv`IfUAHteGu_R{eVF_3q1^^n zR_4MyJpX;V43Iy>MuEvcsFdXN+G`N)h>fUNRJpNgA z^27@A^VssG9OUP#J-^E>Aul%k#yk_ z1&o$YZ2La*W@cm@@XY4Rse1P9&c2cCvhgDo#IYvp|dk4Sg3yz{6)rzu)mElzP zmVpwU{5Y4n=`9Y~t~JXTMM9&ZqMZCKEi67I=Fh7y-?GHbd86EtrWeB2J>O#!^F9c0 z9XX<4+v=P?X4jhMy!N->a!ij~SXd<2U;FU>ea05OhUSrS@kG2qwYa$0=+je)`=bFm zZTGJ;7P`(?nx@n%Up&$*yJVh- zIjgVRdZ^;lv$NI{&9=QwnU=N5ejLh`@9*#9x9R)LJQx1_d9>riKu=v_$j4yO*oq1{ z>%kV$v+w7!g>3pBai}K5VPNOJb!nURg&eaub%%-PqoqrtcJAmqCLMz>Z`_4l=X1vU z>Yu5^s~ff#de!?xnL8^ke`3swYrs#D{+y;X!xeX~-ee^#*X785VZPuKXhC2)spPxJ(78d645x|GDbNzV#%_V-)E;rRI*39%2oRt(v-2Zqyvw#Yl!pY&n7#J^*qO#FD@&SzJLFI z!%LGOY^Vdawzl?mcHaZ81{ZI2x+W0R;y5Tu7Dz%ufMy`Elx{sK!WVnc}lE-bzWAC9HdE zrO*1X_))g&lJQWERguQEj2cp-#|N5YfBZ1#QhRa2ENAGJrl#g7c17!ZDgT>^iHX8i z-|v|u>RVZ*c9sQ2#%rX<=;XV^sKf_KJ~$Y*a{Ug2La*&>*RN00zDms+%$upswl%AX zyTEsL=F_K7*pwBRf8~ht+t;&-X%=pCo*S#nto>u-#;`NqJBn*+l(B{#7Um{W%5;)0 zy9WxIaSI9x*2JpH_SVLybnX&$P}(JIbuTtHHp`|zhVSafb00DV7cjCrjvCw(5fSO= z?v5U6FXH6mQ;d#|zHVz<*OKcPFCZHt7JvQQhwh11SAR;pdYU9wtbcZPc0;;ZBCG4{ zCv_XF8l(E;=bBntHIcHc&%?tzvhM0QkNIU;bwB;%fXsVr#fE|@EWG#S<+7M!M4IYy z@r7%mkupz&1BI<)269GZ{CIT4%I{4ly3R@cvH$#g>v>fj=jWJU1x}SXIXu9(j*jrP zTR03pzqo|^l}qT~qIBg-IL0$n{@I!0=4LHCd_%I{oxS_^ee3U!$LiNR-Z9qjY)OR7 zlNbS$1~vYLiTnk>kdTnHxtFHRF(ek@Q)PktB{mMP@9#Ui`S;&$(!oj`8*<`2ccXQ z@p#ml>Cy~`%_St@dT!y2F>TIL&$cm1=oPZ=QNkYKY)sv_?~K9Zk1r}3c+l(C*2>LU z)+r`8cgjaAu!&B8e^xsap`Gi1S@XC0HWK&e?M=19)02}1qu)Ob|M($SOk>-!b!$Xw z>d{@t%-bT27|eGbW-z~rFE`1X9WJ?6wRi8{^GHC~wIWO!U+T}#P8Dv`rO{}mm~}HR z-cGE$Gc>BKY%Hp5tB)iS;WRa@kA1SApT9` zZ$~v=UgIs?HuCZK;5J!VS<9kbwvVuNf~dax0}#EcqU85k4m4$6n{z_=+rNAF?#5KZ zs)zvOAo0f3Rjf`GIb*dNnYH;wH5``TEB5#zp*?-}Ogi)Wx6-@2j(hPIzJ2?48#Zss zrGk~d0Re_z+uO^?J$UirVy`AACm$_awc(MJKX-te8v~zy={DW-ACo5E@AG-|;>BTK zgj;MZJ@s@`lg`or3`8gE!lZ}&&^xAhhZg4vt*UqpLHzc(?Z8n4E`E)4Q~vGSx9>l4 zq_R3et1&!0{84qal47h17oSPPQQwCT^&fI;89jddc-SeItoiotZbMNq^ENjN8ykV0 z0@n^38XD^NR7aEbN9KK-U8qzS6BcebcKmqI+{}aq zKN5X_S^iw@6@C3cET%8A zrDfWW=*LDFx8A{31Yf^?-TB6S9oa+?NL8H$jN+*;6McPs*Dx{)V&pq>CtCPeSXlOR za&i_q6ux^GfOs2#%Wu>5Dt`a|Fuzgt;a1m$T&j$W01{ZOFf#g^zUFMfy8f}Qa&Kd; zY?nyc5P^vnN3)mZMJv{A-G(@}uNWxiU0+M?@j4ru{sz4d57>R%Q&q@ zAN+aDIFzC;&iQewa+kX~l#5P0TA1w;?;P!}s&6Rf3bpB}E-nogZFI3?t45Toz>! zbx7ULd-=}<6RpJBSd%U$Ib;lD(O^->m)+ITzPyExurXSvzZ#bM@qGXG!H*A=gqLI^kqYw%9M;Y&C0MOdN%tS*y#G+HxPRI6&+zr(X0XiUv>o>A}bh(yHc zd}7;zo&Cqwt@>x~G6(ctNBGtUx~^?Utk8Hp+>kU0NE%T2TfeB1IL%KufAdo<)%6k8 z7yVdTdKAd%G}w?m5@EFj9pV$S6AmS+wLR2gW8$i>XH(HoK@V{kXEwI*YXf6!mCo8+9!I5 z4Hl^n6vJY;_PS`BE^vSm@?{xdrZh5y^!4k>{DVV9tmT5C+S#+@?zHW{ZHbMTpPSJb zNls4oN=Zo(0)pW;LGVOg6)t=v5+G`G07Y?^9mT$xPgNTE>FdTp&ASI1qb8treaoBq_Moz5KEW! zK-HKLSCnyYJP@nQ<70ceKZHf?JL7c^D5YXyY7Q~_d@X{-GHQ@UR)|ridW}% zcX#hbTv#!nNCTD*=pQTTZ>o55+?E67h}6SF^5@Tcack!sq)uaE<4jU*2eqxgSG>+@ zE8gcLa#?g?a;SZHbhI>mD-)AH^O#PC`AGzaOzwP}e)TkyMirC`G~57StZr^Zkq>MB zz@C~q?e%q}0IM5EJt8S9>+j*=kv6w5Rqm?p@dKlk3&=*heEITbCZ;#ff3TFX|K z8IxXn1Xp2~*$g~h7#Szjd;&pbJ-rP>z9Hg1>6_RjF?1T(i?ylZJ_|X^SF9*5Dd8;o z@(ID^CHECeQQ*zg!jB)H9AB5QQmEB9Q@W&gP9;)iS2MoV^Uj?rj7eNKzOM#bE5&34 zL09E~Y;g7MKMoMQ4RqL|E|zZ++&HY->aQ?v;xs*KsFh_!1@KCq&RdwDvBoc`Q$9tp zEq3kex9eKfY1tN+m-7@&wa~0OOSeCdjO>~$%^V+F7@2(dpliIZF+FiBmpX;2o?#v~ zJ>JhrC6Mmnp$kuiEIZ#96r5%hn+zQvYz?{EpX)gBas0yd{!6o}MbuNLZfrfI7K*y0 z(+;tG>|0k?WMqhoaN{V_L+=+4R+Yw=CS!xH3&+dnmuzMQ?;=6Mn0l(geV2tfTkq|B zXL23JD&O5^7+I>_*D)QnAHK3(cdcG-f~xV!UE(exjU$OJnIq%lwTP7={dZbX1=r*`J5nou zd?`rNE!yy|u&{@$;vlm;cJ{96)TMB-BZnu#VZ4J zTx*$|n`;fQ8l-;J%*+e{`^?pkkCR@`;qFt-TE(aeh&DB-ZMf7^&moFcVt-w6H^f_k z4!9e8ZMrMjMCT{l$zYs1b?W7qh)ti`U%k^Kc6@~$lN;L-bLJCs#{>7v2FC#dU8ow! z9uxy?ApXsMAoh;G?-W+|siuF6P9;vLxMyq7IGAeDbx0C?r znL~tDOl>?U=p#D*2>5LaD{CT9TGQGnS~Vb+^VBzgVf*3m9Rk;)_ME=eB*(hK57^b@ zI4K(LpJgc?JG+_T9n+F6C+<&8w)>C;>~~Q9P1fNvw>PmVr5P#X-}W|&&a=4AT1bb8 zX`Q{xERP7S?KC7Y)?Ia0Mn(qk#$P2tGnz}|rCR!%_dM73v$2svn6OIuuKDvbtqWTl z9FTNxIE<7DT95Z#z;e+9UVOoQcNnx$_gI}y`l(M-BVB1$-3nAy)Ba6={PApb)C${8 zTd(Ay^6gEkVzsfVi7rwD0mm>0&VaMiNbX86uU)8#l0SXircVxudhE?Qp6|hq1D~30 zTjQ~$yQ^fyl{GaVi_K5!_D&&tOH!%w2(m~a4`{Tf_yVdb_g2N^E7Y>bLZsMq0Lj5d z1e-?C(f})}_o?l2v$Ph8Q0qBk2wIcMS=Rf~c0GxyVdH(m`fA2Legmsm91`t?5~K=MMA1(-R;){QPGUWBYc*BDYNR=g8JRR?$ai;lJIwT_lft|A?45j$!-wU-KNQJb}ABHv2~)hDZR83R4N~PEM$2GrAjT3BPh9p@tT5R|t6=MmVxWzX>8HkRt*20Q8-pHm=u0ZP zu9blDZU-KSz}PthUt|_sNs43A4wY5(*s~MwJh$kj)W=PXDyzC~+b(GO7?qsW=y!RH zOaHCGZa@m<6)RWvkgA}kHa-@Yq0&eK+`_5y9k7l=F~SSH#r5jzDPprfP8&Ot0D`5W z3UJLv#b}UqhXfgkhdDL4)2;n$^M=s(aT@6|AULmsZv1ECzEr!R%Lx54fM`2#1E|w? zgOf37&QeC@qO>qSSCgcBwkBRf70V0^M+!x&zDmHj_K^GEf0O8o6--zW0$Q)On&pl? z+{&$a5U6(j_9K+W8eX$nl5hu_vttpPj9PLsS)6{H?3Iibbr`L9#H+h`)26+sp7;$b z-)8OYA|VstL>Uk$>*-L9sN~I+>lLnDi$iKo*=6dO5-4C2gQ`p2(MZRkn{93+WI+kr z?2Om8u)qIa-d~?`7NtQ1=z>U;aM)dC4)bSOox@42_+Am#kYk@hutL8dyH-}}Lmr(3 z>?T4ipa`x)3|B@*Wwy@9$}+<01J{(!p%NE`JK{KY>~o+ik`(*iy{AAbDS$Wx198v9 zNAYA?h`0`FDx;>1B=T;cCcWdMX_!2cqDTT>#bWm=aA0sS5lQMqxyXnY-gZz&M@Qn! z8TH<&`l})AAa)5|NMn<#MyOj_R~>qCne*&Wk;8cXRpsZBTgVc`$}6=#zNH!kOAOe$ zeT}J@3-*Ht&pA(z$`c+locf1JYn~SNrcPczi5#GhGQKzrZvvBmvFF5FF~zmOSNwKA zF0$?4{{avHbWH`o9ofJ3j*h)$f~~0urrG9SPJt4QQg|*Ic+BGI%HOt`H|L?C4kKI# z0*1ra+Xs$cHT5C@)yfEbW!ga?b`{J7OhWFADCQB{OD*5+nr<~Y=k9S(z4 zggOQ5Fg!Fw4?lpMMs2VDa@AARuSz!q4OOFB?{cO1!-qEE-iH*E`ecb%mH5hA>vzU+=45AN7+^Zx)t508>GHW-d+lWS zOvMtx0s<&MiB(+J@@=+1XR0#@xzJ~{!8oxSU`iQvM;hwD+{v$Zu*Mp=)^M0>4h;>F zRe(f$c~HSr4MZR|f7d;aW6MrhoJD1(p`LmDIdaz_T`vSwAq<)UcAdM+0NYx9+$f1d z7#D%(pnox2gn%YX-_d{NtRAq&i;x>3yH_x5xPl1K)0&^}#XCGPQHL$PCejA0D->*j zqd%k}Q_H@SKP4k>+H#@<{aYexW3Ij&CF#czqZoNt%Q(Ys=qV`_>vpt#-nQ@H>cTa{ z1=~)ZJS=lBq^a;6V*JO)Lb{Qj^LeYI-pLV`%z4HpyY}~8yW+vgWY;nj6)wJ;wQ%*? z8x}WaI|RQn*Lt=7oUE)Wun-FMKCQK;r%)Lns`Y8DoM)x0 zR~bt%n1l(#*RDR6&aUf)?3MW;W!V>00noNqg2{-&HvPM*@ezarL{epB97q2f3m9m; z!SwepqvJ)!-___E*a_ZoLX9KhxK};|ClHpZDA(VZo}CgGo$lk#oML`Mt# z{g9WCN>u-tiDIl=1L!Ihb93{sRBOH?e0&STVW}v5GY4i-(>0x^nxE)9nJck!19KD@ z$!PFHwW&ZQC)e*3s79q3jneqaOj;L$t3TLb5St{DYgE<6Yea$#eqUFYUg0u@VS3fi z!_6&2KIJPb7L9`Pw5-U`-+9wLoG1xS;xl!~S`;eag+ph+RSFg0y$;MS8x`6_Ua;NQ zHPp&41s>LbFeGATAz!AVrl(ed&j9ChoZqBDslCW2-*gwbELK%Gf)19i6fGQLkwNJr zUPADK1`50?j@_w%e1~d`506;7HbQmcbFhfL!mi^sO4ubeBxK|Y3kK-mj0$pMdZf%22^QbxcLaok1=#0q%QlHQv^l*`RCuLyHc%t zRS3p{gyB!=ZhL)`JQ`9D(KDREf(gEOF0XX#?Zj5_X3xHU{hBP@+mIGJHi*f`1nJSj zk--mAD1V~Bmj^%mt+&^B?koPE6G8@60kn-uoLU$l6jPZb#^+TSNx;Y}ew3-Hsc6ft zkC6VLVHn;Wd`cs9cJA;8&MZ{XM6HZP>?X3+LoSWc&$>PrN`PV|X?ZgP$2lv;23vJ> z3%!cdHUPqt1xc$;)Db7x4sbY!5)BD~X4UMHH$QD`Ty6^NuN-#D{aSyzX|p;(0}c*Z zAdc=cFBt>1qKvf_*Z6L6Gh8DMd!Ha{{0aK>#)1s3J8iB8$@BM<|74h#mmb}Gh_t7G#Vj6*y8Ko z6n0KdDO9x$xsJ98B}^S0>8{$H4MnP_bzSB(Wg$n6yl3Qo_xhv5gZIXb4CVH4P#%(!lF2NQ;o+N&X6jx(2?Ecir>BSU1so+sprqsn z0UkCs2_OQzuEc7fTj#^74cb0XV$kMJfx#zI{Pu53h=2D_4UUBiUF_UmtWteZ!r= z2M@w^i|!Vq@ZYm%50MX{q8O#!fBZNOOTq|Hh^$pF-f8U1Ei5d-xh_@Ix0Id8C4ag9)RMb{;WaL{yZGy}a9XE1(4^Y$`23KY!`6WoDmt zY(9@1*=+2ltLqiId)tBcjJnHLSi}|0txLvzu zjXYuuzvK9|F_aMCEtDn(nl0l?-o2ApzvHMJRHtC)@eBN>O=|nj-hBW{4EiDpAw8%Q&VHKK#HbUe=V!Yw7g1H z&D+`PJkbm?q~xVH<^qM-D0A<{lzK9H0=iyFwj1A(s*1(o`yLVAjkqI6)>uf58IxRe6dr|^g(1~Bt(IDz|YQOhO^~J#A)CwHL^^nzN}mJ zE3cDG{Z(aUH3Bt^1nEKIu3fpiP=-k%w!RlA+nA@guq$cR*j9VM$t}q97@+qhB~l=T zs(Taj6Tk>SLX!Zc!B!T)cNS5P;2qQehYlP#PzE+R>ufbpS1c4Rc7$*2|5Cxn1PHw9 z_jGf(b4We)U{UJHn=8L%&Vv3wYTA^+Ty^utjm?`kD@COdSBv{|lq%_CEsDps4&3}{ zZ=s_Zx2=WDoXL69FQb*OI5!&=K3dv*YsK9=Qp3l#j?--~f}z*ylR>Ib+L{Wzc=19B z+CZ+<>jhm#;+rvF2H(PF)bLf2vW?lBoBmGLE9n7t8s}cN;6pdkG`S%NM&r)$EB7`~ zXye>V7JN=S{qJSAak%pv^D#NB2u0uh(s72avqK(4i3QG#1zRHQI37nS-u4V|pro!& z)wabx5*f=-^Z*BkByfZ(IIS&PwkUV>(jQ8~9djZw)!~bEqxI5IG2*;D;3z@8)&or! z@UQnNAU#zGGxhrBD(w4&u+turS(^;NuSTKP1}h_tm63Xi3j@rsS#Wu^28klzP841m z5@pxGAd13%^r#Zpv-AkYZHG_52!jt{Lv&dUcTnSX^0i5U4Kar$`@RRBMilbaj=4O0om^#^bkA?Ii}Zrn)KFWWISJ^i7+{sPE)S~zL|SS7R-AilFv2}gWg z({2p|!&$`#j$e(oUXW#QdwSwTt^`=r2fd>p?m8dor_>*H9PiWgXFX;f3e2mvxMSV+ z5ni55+Q0p09+REz1eqcDfkM4H2rJ*K)^qZ~B^ph07QFHAUtVq9n|F#=uIa@pepbw` zf|}YRAd_ccXNehPoEvw-z~ITY)JSY4BTF1=L>XcaL3ku)0us21o{}rIjC}27cQz8rL35cFc7)(%{w384D^ASzz|ypa;7gK+M2EFv}obHx`j~) zNj3=^$QwNw@ulQjh)D${xzk;HMBJ3q66B9^A{PIciGlcPXa16k5vCE-!sllc^0$Kz zKi>@Wc<^8|A+AA)euAW==XIZ=1?nEm`LD~fE;V9lab0lEg}hASxH@z+X}&iJ1@QcXX6gu%YUPOettE?xwhpS`O}G{vnR?5>U3P=u{lV#4neZIJ0autY^j6;;4RlrfiDby3cZIv9k%%TCshG>^a&^Vom>7YQ$AlG z6x-jSRU9cafDA>70fB3uFAgS?3xJ zU2eo;Y(5gK*C)3n%Q`UQ(qv{BHnU(Xv4x%|Xk{D0ivg?`1U#bjba~Kv^7pknnW!I% zz%Zo2F*Y?h`5ue&_U+r6$LP~JjbAoInjB|Y7+OE?^AfW}|j85qvFYM!rp^GH{sR;N`U%`=XN zVe@slStIrf%_-9nAp)d750TjU%OuLyk)T*gH}6GQ>L zjPDp2;64z^$>Z@0UDvnw^!Iat_P9={Dl*5|MNJ)n^fjW36|nEvF*PXN?zJsg)@hgD zt=%Pb{ZHU)LRqFFX9k%H0uiI&qJ>XR+94#hS_KL2`%hHt+o)K7$55hH^|se$Khc!@ z_tSUo&ZoVf=J?RBR_%HHxTM5!8Bd31xnq+Hg%(&*8}Z0QbpfYuR1x}GI(?!>RfnW! z;;W%VUo8)whB9D5ft8+5FWeg6zI}V{<@MVWaaV0T_{UGC2r+No zyrED*!4flMx4PMx|AIZ!BcF;KNbL)llZ>u>@qT*Up*NOtcdj4bn)bBmE^V~FXT5~L zzwgg{gOM2F=25Kl&d&_`Q~FzSXClU?P!!{3~upDG#dO$HmRYzjcHGxLy1iEGS7}+tz??_TBhL;vjzM)RXY_`H1BvX`XqBhx{ zgw%u+1rnnnK2IW@le4oQW({$SN&~kL{`T#ki1wu13p}DH85YMx_RX;!8-~ix0;MXE z3av`=K}M#gI0Az7<72#yM^Qx*nWYH8=poVR%wojJhcb4NAPuBPv@~!a1RjXa^raZJ z=H=#=kX+<;*~2Z+2O*cpE2JaK>xT$o;T*dw1?ozO@7E)eks2biCiFm1Fnm?iRANPYk_f zsdj=hNeeuH!1JHZZ@i zrIJ6(9m2?1pky$7ePcyKre!21`WaCqKwGIIe#5QNuf;;%xp^n<;T-TTY8S%jrx|=Y z1%0{#YAn_v6WBxYu6HZb1B&r*qE-lKq@+i3cz_T?uiwEVrdwL$UFjk2yhWa=`N426Llk_7*e+2>l>p9pk_Gea_p@jD~P3XUZ6x9nL#xYdFTt&0S6EXA3z^_$c zR?;A+*IyHWt|CjSI(S`)=uNEADEH-wg5pKjF#w8$jvwa)p$?TTCqS8gd5^aev|)3o zNK^&X%}7|+XPdoX(EE;^UPs6#a6WMDocq6rms>`&=uvbZLNv=ffR-d}|M^F9QT^~X zXfHs-+q``{JCy_af}}rpJfV1mh1Dzl3hpdhwu~SCz<9N!Q%F2`8 zA#XAMFSZGI?gBJHwzigPD1#Xl%LfeLAwnf!HNDu-mHRBF9FF>~_s2XM73J^xe*dm>C!BRN_k6wu^Wwy|&}?#Zayk&F z5!uesh}9Q)HVQv$!?NKHfJj>V6B4FmzUFI~Dsqqw@dLg#9sO|Rql zPt5JxxR_*B>-lE<6rNxDbpx-EbAR1{CWW1ml=l?T*&hXKH*EMgWcv()#U2n`a8RD0 zk<3ac0AxH*Pfr>}ViBX$!rR&|68)xd8_~M9a45C?&?eLHZj+6r z$}=(AV(7}i(6k}roPyZ $)GUg_StMAZ`~++YdJJhzElt7+--YA0qmb5R~EfPdZs z7=faWbN~Lr7Cm~*Pda^qo&+hxklv7OoA@zAJQ1dX=8wvYIiWx(YHIbmbyYw*5E&^e zR;}WMX9w3yMly42)RYe%`7o7$S1T~m;9wq7RxDrs9iTkTlAqjyoD-=MAXkj_rIg1Y zo02XF_|2k`lpXy^earA}13n6-jmV&4bEArgKIe%O8G))#Yb$xj=Htv?gM3hf_Ml2j zW+81M(7Tg#f7T&M$jOMTHOOj2F$WlMMhgi6*#F90qEYtd*4Csf<~=i*GjbQf@&NV2 zhYvI`X@ny`u1aq?dbbY26k0jh4)VS8DyySb{D4^U9rRWRg&lG@qv+_pC!9;&@;$`4 zBerzUom$eSZ{efc)=5xM;oV)aC?5z6Cz7Uq>7y`wuyve!9l4t4E@T5;K!5PC>A)Mb zZ>LY6u6Zi4j*$K~2?V<`cruNLZhZwxAkH-eB&S?MX=$kvd?RrM?mo;yPqB8$1|j?! z%mWm5pmw~z4?%@gj2Rgjr@C3mWti7vNEhZO^2y~zL2AHhPp~c_nSxAhQ7&MI-ygGs z+(Cpjg3O^$5}-~C$Ct-ke&n&>R-=g$4LWqR&IHOtUbBcQpPzn*FVFwj2H;aj z8KdwK#-aoznza`%g3~m9VHTB3Ds{j(cTyFRQW&)_fg%{+2lwyq0Y~_*sHg;GD~+@x z06LU_TSQ$)2xoM)Jbu;;dKIXaN&|bjW!pBpf?myS@ZS6)Z-xNF0naO1^qR(HHd}jq z^Gt}vP4k_(m-tm!W@#p)QP>y9+wHrt=725S8(P3wnl?Y(kBmyU8Nl0B#NtTI;fkQm zpFVwB4S9gx?m}KeDkJ+4f+Gz?Qgt91R(dGD2U`oFCMv>ClO_=0>nl(8IdI~g%Gkp@ z(5nOoSlGX?Hy{Eb--twy?P+#_Uh7``31A47Ny%qd|nO(vl1{gA*ZoN>76?e08j$;z{Ot;EYp8MjC^6!5hxX%=r$pG0JOx0 zEbCbP@?g@1CLM4@0#a}-wlZPt$-mN4F-yBd?1(u}2~`1v1fwo`jCa$x>8Ab`eKpna zy;EsuyCJ$NEF}c4p)-mWjw+q&R}5JH->A>*8b)$E>1!uWrXh$OBNV(zL+Yn!#mJ5? zU-nRAHbKtAhkw*2zPE-;x-oamAq}H`rfuG`#q9if*MIXj9DB?3s`Zq-cK1uOjXL(f17l@eq$>~q zqmJFj&^FxT4wl;j4IGsD{7dQhU(m>V!#8&w!;gv4qIXBRyo45O)V34<^Ka}5ZCP0h zg97Ruw5BQ&V;dNleyvTO(S#qlc7F^}1JK7&ZkHwm7BGSlFj;eepF_$f#;c$B;}lH% z<|tTvk*EP6m9nf1LBScI^!f=6*6vzT^ZHjI(uXPlo9DO+y@pI**%i9=50FV~85u#X zwC3x8w6Tfrf~-eCq}G4c3tu10oIB6?b^iL!eP}I^&|E=T8dEUxb3qm@vO=Xvnv498 zXL4#Uam_$Y`?=y5>ROsTl$Ego#9+dmAv8z2Rhg;3Dzq~8AjjN9-B(2tT&Gho%ihTx)KWbmI=WX=|eN$Pf05lh! zm!}QM9-Xy(*@sA!_#vz!$Hl#S_X(cdQ5xYrZ{1o+q+7Hi^wGietKQX7Ol}b8pvnen z6((MOUgdpBdUUn=4Y9ut{QB{a_2=IT4wrJ%GXq1yqD4sUXRr9hH9yxL+t2Ruhd+D9 z|G)BvQ*QMuUd@nJU=U3K>IF%4AEj}Z)c7`N?$g1e9Ea4O>Lu*h}09yL~f4S}rC39%-w%aNO6%*+a*6|Q5IIN_s z{7x&F>u*K6WO-CURW%Y0q)F>zeiV4Z@iGehp1%K$U!Q-|TF{-Zd8o)fY%#;XcntJ` zom@MYn3JQq!Q(V3f4#kx!8XC(xR2V9r0e0B58W>pJQOV)%}>S7CRV(it0_(QX=W80e_R@b$Q5yt^S_D&N{9-cb5p%T$|r~?m%BEkhY(nugD3>v+c zGA>RDjS=LRsC{#Pyje%O2Vl=2?rQ)_2>cFXc5R|@^byEt1Fu1NUsAT7+xBmu9BVp7xmjzfp)X90yYainkC{VQ2!Ox%5W;7MY-283{encam6e}^7^CMW zB{el-176P}qDDHHkH46gvbWF7oBXz+ES}*FU`IH%Ffj#L93yK*cCL4G zE4bzCM0UUMa_k`BOgoxt452^J!f)JIO7IXYq7{Cs+Uyt!%;`B=pG`K@ zRyHbO!cLxC(p8F~X?r@BXe<{*8keQGGyg=xK^h7dLO_u(1nkNN{nb_=X~C^_lE2tE zF*gFO`0_L*dHFDO&}Oi%TenUTd`n6rDMx1)P_TAf&!kX^>mLpoVm;b^D-0FqL0D^j zM27zhZVK4@BD0+xG{`ks4=gb z$x2DVq?RWWEE1iZe8lhJLt^S7E(t@!XpFIvj!xq2tP|L#06TgPu5eJpdQgm%@hkXJ zLDYYEcTfWvpk5}Z56hPFBvZbU97=(KfMcH66W6X@T><5w2F@*Nr4&ytU0V$cI|+4m zAo~HCP`CU&p)YBu%lYdCU@Fs}aD_Q|6n(fy>sy)(>3l$&*|B#7+@AP@E7PWm^*;iX z0R?P?va+%ZYs+s1c;UBW$Bt6&6$@|K^YDI#9cutMtGcUEPhYWNkLSjl5oGwXf1CY- zZYe#n4=B3|>(|g`yVwXy_#myN^S2kHCBy$F`Obt5V?{Uw6qy|@HfSG4$9x_4MV-+R+^sO@87a6HV*2B5Ge#rkL*zItei4c!aQFuO)3*8pB9@6WEu; zBc`WExKlPNNTeE!Yx9iI6B#f$5F=7Qxo$>M?&#>yVLlF%;e`tqXej*t)}I*J;!$wP zVf_mMhvi%+LtgpX)|#nkmH#Cc#1mjF$8&D>5U|)k*5;mxj2Q0B!M8#F{*eea+3EQp z!;buR8kDXo9FZ_B97WSNY_=EB7+;G?Cq8rZmKBzsrVoxv!CE8E0GzAb_rw- z(V5f9bx2GXkXCq1TT#^dnxKw^rD*Qm93LN_UqC>2O#}J*SDtja;0L=-o^3ouYoMy0 z;W6X?08KIy!3GjUQQr~z#|AgbiT(q>BD#1Wsf-TeB$kMX!af@Pk39I}BO%KO0FVd} zsajBPah6W4NqTbxs{7ACzvE7G>3S(#WJ$y5}0 zFqRztjqbcgLc4UGXle{?uB)TU?1&K=jS_RM3v(erCQXgmux*jn=(iupn_h`;lndg8 zEfG|{2I*`fWW9|L{qqy}{AUmqghNIyA=VLTmJsw2t)Vm4ZVP)u1=>;2enJoW7-PQX(V3TE{?@~u(TEuNj^+IBkyslyi`gA;z37`LD&;PR%A z={YFg?fD5t!t#=S#*7R#A_J0RZF+ioMs`?27ex%H3_W@CC}c7qx9GUI^FTVujs1Zl z_Q^fHy=+v{WC}@9FTK$Z4%gw~>paW^o-(L0az?R;z{f*c=eEGOiiIaBJR%ka08}94 ztqL##+Xnegf?dO@z7GJ$ApL%avRwOsUC|2d^dMXHh^3rtI7Bc*^pt%lFSo5DF^OUR zmRrwj@C;*Uk%@w}TzhBLx9i>TXOWh)yR6~~_wM~pG$f+JQea>phuM5Wr<9VMTBrgJ-O$M}5wwqS8RKXuq>&C0$X7_8zpl1cMNV|}+O?!;2kTv9u*G`l>$|R_Er>YE zxDCJr>;4Ai{F&xKa=yh#!hZ!-7A?BLK7f%`zbd4TH=FI^TCQ5AS=6xqxOls~bA-*K z{dma7u9Cr^#F`yrvxi<^7@8>*#%jX(M(kW%WybB!Ak(wpL$`((jjoG^qC`Q#qt5Lw zh2H$@!cP!@{jBugSwjOOd{W6b1!-w-7@~>3rRx#E8u(N?2ZyPqc8r^N>1EBLxK-#Ct&ZNWiLIY!|?< z${hQV(XV%SKA5J*#>7aXiSr(%>=Ey2%odij0}l7_^Yv}_Yp2)lv6oiV+$4(xtp(;h zQ$^_C%O3nV7RY>)uI|-0VNnJe#tG^dk4Sur0)k{OnT-wVg^Oj_6OWOSXHH}S*v^|hk z0N_Jq)DI-ErO>?~4EY7P7P^ET1~R)waezmF)>^V3Ci#~d@u94KP z=;7E!f<&WFxmjg1wHxaD8Q1g-gFzX3!DVdv@8`sd_QZaVuy9n1%FCrH~T>EB9DNg?6k zk=A1RWkaaUpX)aptIMBP&bH}S$ak3=ZET%asnd3hC%uOl?0`kf$K?8@M(a@S8-h-A zKo1;0Iv`2jAfE!Js((&TPOWYGsVq$nz46nXL1ssB+6zu1aik+Ea_|v6iThRQcDnhS z7=rw{#(ZdNX`fya-zy>(D&`}LMvoujkH7Z5Mt-Tk(#T(@se6izJWg9H3WP#r=O|Cl zwNG#g8PR=yE4W7?&M<5n!AxQDwjXfF=#ztT)KL>XmWd>Xh)j%wkncs?iC_{P8~lJa zqJIVLwFE7~M2Ik%D{NzJorY7#qRHuYKq90i97T92sN7hv6ByYl6ztCtc#9AJMxGtH zM-CW=805gGC)U!#u*wOnFP-^ere=$xT^wK^?(pP&?idP5jG0$Q>m;EzNxwx+U0p8@ zTp&(4@XZJHu3qIlaNsODlyS^fuz+b3otS(0Fy_SzIrI)GT)OmSig^clI<-xLRHH(0 zMHFgUTADD$kk!jg`p)5Ou6rtRhd6mA9OJbA z&+u4Wd@=M7IMX-1&hK(TLs0gE$A+-0L#i?mTX7Ghee?m6B2;cXY~&}gMcF{rJLtb4 z=wp%7InVzSK@Z)1r8}nYUt!dc(X-j^#oN9k7cKJo{ve^gblKq732$qB-$qu}R*v>t zi`Ho5WLh}d!cl!h!2$v2niAL<4o*(!@;p=pRK&3D4!Y#gt78LSixf1H`}xtO4>{B* zvDK{#8$1gB1>uM8`niig(mEBpMnvstR`9Yi?qyrMO2nu#J-xp6ZCbN=!16|X^VXtK z>7;TXM;4NUjfe(=-aUD^q;M*VA7XZ2-!D3&+`63|QT{J+M8o(#48Xi>S#3FQT(q2r066X6&HsS z8$`d&&%&=?jiWpaFH4Y6&=!KR@j@FA^Z-)aQ3xHk14nDoZ~c|0%GUXdHf!QLUxw%D z`@7v?Sdz9Kgy%zeJ}zw|eofL}Nv%ZfZ(YphZ-9f}_Ai<~=H0QFAgcvx(Wg~YX>}CW z3)5yKtL{izIGWU&okq&`tI|>Uyhrw=!@Va@;!z`z)^qSYz*)h+q(0Re{s@{j~^h#WLx%qF=A8v{WC@mYK@ zlfEQSV{*x9Y4y)%pho1>!_a^`+M{JJaxD7`= z7czV=cHqWbCkt4~5dqEiT~BbzA~}EsTITJM9aY^{Mp!9c0A_R`3LyQ&<+c0T+ zp%E;O*Hi_xFruYN3a@Amm&2 z;ATxQ!*IWI>~1Sf-}e)dha+Kne^L)$%AGd>i1%%pJ+)Ky$x_CLkF-`E zx+^xc0UwRLY87PKk#KS>6#2E_*7_<$uIJys8xRT| zCu4Srw4g=co0YJ#wsrzIN>bC4V@vrjzgC}Q(~rMoI|Go?34X_swF-nJhdYu&1>mkg9KMp?SOZ50dRAt_Sr{@tzj(Ta z!EFkmDk?U%icp=vq(*3wAijf{{byPrr}v5Zk%|mZ+#p#NHjWmYrR1z%^$|d@{s=3TxS|vDNlsTK=y_G+H&4os`bffhI2{jvi$&= z3=BslhK&kw&QCNZjr3(9Z*=|KEJW(o$}6mu_Q3 zAG&Hu^r5SkuhrE~jwSs!J&8gkhAc46)o@mV2K|l`k7$&F!=nH*D3mzi>H&^9lHx?J zXuuEN9jB*N5Ssq$*UO}M1AC=xVUa|5PM|u%c@y%pxcENhKR8cQ(*$sydc$bi;kQa z15-EY(xr4KmvC48wQLoIin^S@k{KbmBv8(n53XaF@2g012+aRil%Qwmax%sWLQ<4TO4gN1(QuF|jXG9%?@w2ns=l?3|aTeBD-1d`J zGMZ|1o#Xl!i(`2s-A*~&gyviWS@AhmPY+IAuERlW6e?D=3J6aSk4hIVL~G|dP=D?7 z3Ckxkk=XM=UIB|P!++@&3nxCl`L)F}4^KJp^P%SNao8~0{W#+VT^{Xt_jmKMHPN4Mm{JE!A+X&W){qcKh>mq-9^;MZ!3 z2q_5oavEL)6>t@lZgM88jm^vb(t+Vf0NtLP2^FXX02rmENtYaUG2xNWFB1ei$GWdO zq>)H@R!521py8|du$PY0H8{YeUxF{@g6%qOv2d-j@T9w~Uj1tuKu2NtaU@|4_TMqV zMa7Zf|G}PU(O>yW;;(cUtce$5Wb@&l{}n335`89ya=0W3l8*&DN(elRPCeUe*~Lw^ zf+cpMn`{zt8(ax^3!E(>q^!X{fk9jn$mJ&((LQ@3Ed$l_du%U6H2}jpuw7XF7k_er z*2|Ud#70FCM}1QWS|35lzgeubL#?swLvVPk5KO zFI1ZW`PMHm(8y+czNepz;4S{2s{g-D8khKBpU7T3rhnP@K8)x3Bjmpku&8)55C_HV zQ?w!&;v@l(Hi$EZ!;?_D6G?j*VinAm;IjsbaEdiK%mTEKKvq~4R{dZL4p8;)eW+o2 zpCZJ(0h(ks>rDB=Y8p}yn5YSAfCO4IKX&X7Y8>}eNyB^qp+7`ZC&-4PT zqKn3#LV!ANWNmT+y7xFv6Ba}%eremSOK8h}m(hOrc#{u^_2PDB8~}Y7&8iqoFT(9- zbVmf`*e(CzUfbgfrDZ1;FtSm(=p_eE^`jLAm9oCgT2vH)U-|5%D{Nn zj-2;(&To2Y>&mCb-y^elqgiApz8O6-{AdZ($DPj(*yi)WJ*$V;P581x*SR{^?YCYV z(`pOXas+}r@K{v9$$@-CABUgJdz_t{o6vx4R5)J^h9~QB0P>%h1M9@luWzc=oiC@< zTcHVwN_b4}{MqYtDggW=g$nRV&ba=KJ1qUE&cteN5lkKkK*As-yt3;&}iz)%-j3iA7b`n^$01!*(Sf{$k zkt|r)(MV1MCLyl#nQ#SL!MkjP>5sQi`;m<4#L3snIR6a=brliJ`}+F!kH9Jqruwt! z*0|z2(Y;jAJ@U{oL5KoY&ew{F*qt;UJ%?*Rf`_dHCpZGjWizQCVT~)QbKX0Y)p5#F zOmrBBr^P^5BxkAJ`fdADXyg+{1_neDCR~e~o7;Zf;lOa~jdB(;o#i2kq}zgEJ@TI^ zsHl)5XHQiISkB(@^h~|}e{^?lF-_iK82_eQmTX41I2CKA7l01T@G!I@C~OK+%$xzl z2~;pHR3(C(GztRQh=&;2sHf=^95AMWMFU%;iWoHmQG|l^@J*~z`!_B zqa=d@*6X5ux3fQ?sMmNsa9)!;Obi3OM`ek$ZLqkP@8e0oJf?E*5ek?Z_!}RHl=P?Jn1Shs3v!bZ7w2VAL zXy}JxYZc1N&s9@2u(kudAv&5yFPW1}pZ(gH#TH1di9K`rw1nP84=+fuX0^Bn+IBNc z`V8U99Aj&1n{al}(RD1*(8yTwbd5x$T_#*-qUr5F`!e`q9as5*Bt(pIC0=uVQcdSX zL1PHp?A60C_(4HIC3Ciae$V*jr@`YH_!a4S&6=YqHn?8QPYm1c9X+{S5{(Wur54p)zPTnI7$yprV%!h$%RY&N$U_yhSBuc%niAsXz}fw{&#~C2Q*MLkr5G= z?1ElY8*l6)`h$qr;0=xL#RjbQ;HkxaQOBpm*&9rip0Tjfnf?{!zhs+VTBbR_OYd6o zgT%ZsRp`3oM3&sOZUbZs14B{Zxi9i3bl-VZLIy%PfX@tCZ+^s(X~||p!#OZpQiI*Syly!H%EJ(5Q)E^POJfMz!yFL0jzciL31JG*B&@|rv+m8JE(PjQ!e3S^U& z!tH+Lh8E|`m!MzPG2NhAt<5-8P*`Zq9$Tr2K5~2Zg-(%8lq*@U8hh7^rf z;-`;vyrW#$a2ysMPrJT!X62=D3P93S9mmuq)4%^q*Yd{L}_!SYJBxaXZ>I3Qwr1r(pJwOK}oE^XCz97|0$PsEQ zl)AT%4{VrBdoYjXsUe%8z{4dv5|Y;Z^D~}J(_%Rz8H>E;H zX|>v__3+voVc}$+SLiE$=CUdr>KRyjxu*`7PB+;Y2X|OVc^9Vq1ttjzj%CcTFr%54 zZ4>IA$}hDPKaNRN{f5%(DaMN{-yA!pl4q9iN!0U3?l6q2h2u(h}34hagkf zcNvH9)PQqXs9*|-0T)=9TGalS+h*A29I5mWU%c+@EMg7B`zFUX_Cps05tf_L9koi$ zZ;OXZWa|aAYw=!UAr5f!-&z6q06QLpGeq1tk>SKMO19@xs>lPebeLausr98SQvxI( zV01XiKZwr>=1IC2(4+uUUX&zDy@bIfqWh!wtrVW>gpKnIGN=&95g8udzE;bIs}S;s ztW-`NCN#~%$B{j?vUGO7$$oRRlH1ff{}V-t&6CrX3nol&!jVx*?vy6Xq+1GVVCs2l z%wlN3hku1bDCAAx>(GOG{$s~Q@B0t 0: # generate then filter loop - neg_items = torch.empty_like(neg_users, dtype=torch.int64).random_(0, item_range) - neg_mask = true_mat[neg_users, neg_items] - neg_u.append(neg_users.masked_select(neg_mask)) - neg_i.append(neg_items.masked_select(neg_mask)) - - neg_users = neg_users.masked_select(1-neg_mask) - - neg_users = torch.cat(neg_u) - neg_items = torch.cat(neg_i) - if sort == False: - return neg_users, neg_items - - sorted_users, sort_indices = torch.sort(neg_users) - return sorted_users, neg_items[sort_indices] + return hr, ndcg def main(): log_hardware() - args = parse_args() args.distributed, args.world_size = init_distributed(args.local_rank) log_args(args) @@ -229,90 +178,35 @@ def main(): if args.seed is not None: torch.manual_seed(args.seed) - # Save configuration to file print("Saving results to {}".format(args.checkpoint_dir)) if not os.path.exists(args.checkpoint_dir) and args.checkpoint_dir != '': os.makedirs(args.checkpoint_dir, exist_ok=True) checkpoint_path = os.path.join(args.checkpoint_dir, 'model.pth') - # more like load trigger timer now LOGGER.log(key=tags.PREPROC_HP_NUM_EVAL, value=args.valid_negative) # The default of np.random.choice is replace=True, so does pytorch random_() LOGGER.log(key=tags.PREPROC_HP_SAMPLE_EVAL_REPLACEMENT, value=True) LOGGER.log(key=tags.INPUT_HP_SAMPLE_TRAIN_REPLACEMENT, value=True) LOGGER.log(key=tags.INPUT_STEP_EVAL_NEG_GEN) - # sync worker before timing. + # sync workers before timing if args.distributed: torch.distributed.broadcast(torch.tensor([1], device="cuda"), 0) torch.cuda.synchronize() LOGGER.log(key=tags.RUN_START) - run_start_time = time.time() - # load not converted data, just seperate one for test train_ratings = torch.load(args.data+'/train_ratings.pt', map_location=torch.device('cuda:{}'.format(args.local_rank))) test_ratings = torch.load(args.data+'/test_ratings.pt', map_location=torch.device('cuda:{}'.format(args.local_rank))) - # get input data - # get dims nb_maxs = torch.max(train_ratings, 0)[0] - nb_users = nb_maxs[0].item()+1 - nb_items = nb_maxs[1].item()+1 - train_users = train_ratings[:,0] - train_items = train_ratings[:,1] - del nb_maxs, train_ratings - LOGGER.log(key=tags.INPUT_SIZE, value=len(train_users)) - # produce things not change between epoch - # mask for filtering duplicates with real sample - # note: test data is removed before create mask, same as reference - mat = torch.cuda.ByteTensor(nb_users, nb_items).fill_(1) - mat[train_users, train_items] = 0 - # create label - train_label = torch.ones_like(train_users, dtype=torch.float32) - neg_label = torch.zeros_like(train_label, dtype=torch.float32) - neg_label = neg_label.repeat(args.negative_samples) - train_label = torch.cat((train_label,neg_label)) - del neg_label - if args.fp16: - train_label = train_label.half() + nb_users = nb_maxs[0].item() + 1 + nb_items = nb_maxs[1].item() + 1 + LOGGER.log(key=tags.INPUT_SIZE, value=len(train_ratings)) - # produce validation negative sample on GPU all_test_users = test_ratings.shape[0] - test_users = test_ratings[:,0] - test_pos = test_ratings[:,1].reshape(-1,1) - test_negs = generate_neg(test_users, mat, nb_items, args.valid_negative, True)[1] - - # create items with real sample at last position - test_users = test_users.reshape(-1,1).repeat(1,1+args.valid_negative) - test_items = torch.cat((test_negs.reshape(-1,args.valid_negative), test_pos), dim=1) - del test_ratings, test_negs - - # generate dup mask and real indice for exact same behavior on duplication compare to reference - # here we need a sort that is stable(keep order of duplicates) - # this is a version works on integer - sorted_items, indices = torch.sort(test_items) # [1,1,1,2], [3,1,0,2] - sum_item_indices = sorted_items.float()+indices.float()/len(indices[0]) #[1.75,1.25,1.0,2.5] - indices_order = torch.sort(sum_item_indices)[1] #[2,1,0,3] - stable_indices = torch.gather(indices, 1, indices_order) #[0,1,3,2] - # produce -1 mask - dup_mask = (sorted_items[:,0:-1] == sorted_items[:,1:]) - dup_mask = torch.cat((torch.zeros_like(test_pos, dtype=torch.uint8), dup_mask),dim=1) - dup_mask = torch.gather(dup_mask,1,stable_indices.sort()[1]) - # produce real sample indices to later check in topk - sorted_items, indices = (test_items != test_pos).sort() - sum_item_indices = sorted_items.float()+indices.float()/len(indices[0]) - indices_order = torch.sort(sum_item_indices)[1] - stable_indices = torch.gather(indices, 1, indices_order) - real_indices = stable_indices[:,0] - del sorted_items, indices, sum_item_indices, indices_order, stable_indices, test_pos - - if args.distributed: - test_users = torch.chunk(test_users, args.world_size)[args.local_rank] - test_items = torch.chunk(test_items, args.world_size)[args.local_rank] - dup_mask = torch.chunk(dup_mask, args.world_size)[args.local_rank] - real_indices = torch.chunk(real_indices, args.world_size)[args.local_rank] + test_users, test_items, dup_mask, real_indices = dataloading.create_test_data(train_ratings, test_ratings, args) # make pytorch memory behavior more consistent later torch.cuda.empty_cache() @@ -320,36 +214,33 @@ def main(): LOGGER.log(key=tags.INPUT_BATCH_SIZE, value=args.batch_size) LOGGER.log(key=tags.INPUT_ORDER) # we shuffled later with randperm - print('Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d' - % (time.time()-run_start_time, nb_users, nb_items, len(train_users), - nb_users)) - # Create model model = NeuMF(nb_users, nb_items, - mf_dim=args.factors, mf_reg=0., + mf_dim=args.factors, mlp_layer_sizes=args.layers, - mlp_layer_regs=[0. for i in args.layers], dropout=args.dropout) - if args.fp16: - model = model.half() + optimizer = FusedAdam(model.parameters(), lr=args.learning_rate, + betas=(args.beta1, args.beta2), eps=args.eps, eps_inside_sqrt=False) + + criterion = nn.BCEWithLogitsLoss(reduction='none') # use torch.mean() with dim later to avoid copy to host + # Move model and loss to GPU + model = model.cuda() + criterion = criterion.cuda() + + if args.opt_level == "O2": + model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, + keep_batchnorm_fp32=False, loss_scale='dynamic') + + if args.distributed: + model = DDP(model) + + local_batch = args.batch_size // args.world_size + traced_criterion = torch.jit.trace(criterion.forward, + (torch.rand(local_batch,1),torch.rand(local_batch,1))) print(model) print("{} parameters".format(utils.count_parameters(model))) - - # Save model text description - with open(os.path.join(args.checkpoint_dir, 'model.txt'), 'w') as file: - file.write(str(model)) - - # Add optimizer and loss to graph - if args.fp16: - fp_optimizer = Fp16Optimizer(model, args.loss_scale) - params = fp_optimizer.fp32_params - else: - params = model.parameters() - - optimizer = FusedAdam(params, lr=args.learning_rate, betas=(args.beta1, args.beta2), eps=args.eps, eps_inside_sqrt=False) - criterion = nn.BCEWithLogitsLoss(reduction='none') # use torch.mean() with dim later to avoid copy to host LOGGER.log(key=tags.OPT_LR, value=args.learning_rate) LOGGER.log(key=tags.OPT_NAME, value="Adam") LOGGER.log(key=tags.OPT_HP_ADAM_BETA1, value=args.beta1) @@ -357,53 +248,22 @@ def main(): LOGGER.log(key=tags.OPT_HP_ADAM_EPSILON, value=args.eps) LOGGER.log(key=tags.MODEL_HP_LOSS_FN, value=tags.VALUE_BCE) - # Move model and loss to GPU - model = model.cuda() - criterion = criterion.cuda() - - if args.distributed: - model = DDP(model) - local_batch = args.batch_size // int(os.environ['WORLD_SIZE']) - else: - local_batch = args.batch_size - traced_criterion = torch.jit.trace(criterion.forward, (torch.rand(local_batch,1),torch.rand(local_batch,1))) - - train_users_per_worker = len(train_label) / int(os.environ['WORLD_SIZE']) - train_users_begin = int(train_users_per_worker * args.local_rank) - train_users_end = int(train_users_per_worker * (args.local_rank + 1)) - - # Create files for tracking training - valid_results_file = os.path.join(args.checkpoint_dir, 'valid_results.csv') - # Calculate initial Hit Ratio and NDCG - test_x = test_users.view(-1).split(args.valid_batch_size) - test_y = test_items.view(-1).split(args.valid_batch_size) if args.mode == 'test': state_dict = torch.load(checkpoint_path) model.load_state_dict(state_dict) - - begin = time.time() - LOGGER.log(key=tags.EVAL_START, value=-1) - - hr, ndcg = val_epoch(model, test_x, test_y, dup_mask, real_indices, args.topk, samples_per_user=test_items.size(1), - num_user=all_test_users, distributed=args.distributed) - val_time = time.time() - begin - print('Initial HR@{K} = {hit_rate:.4f}, NDCG@{K} = {ndcg:.4f}, valid_time: {val_time:.4f}' - .format(K=args.topk, hit_rate=hr, ndcg=ndcg, val_time=val_time)) - - LOGGER.log(key=tags.EVAL_ACCURACY, value={"epoch": -1, "value": hr}) - LOGGER.log(key=tags.EVAL_TARGET, value=args.threshold) - LOGGER.log(key=tags.EVAL_STOP, value=-1) - - if args.mode == 'test': + hr, ndcg = val_epoch(model, test_users, test_items, dup_mask, real_indices, args.topk, + samples_per_user=args.valid_negative + 1, + num_user=all_test_users, distributed=args.distributed) + print('HR@{K} = {hit_rate:.4f}, NDCG@{K} = {ndcg:.4f}' + .format(K=args.topk, hit_rate=hr, ndcg=ndcg)) return success = False max_hr = 0 - LOGGER.log(key=tags.TRAIN_LOOP) - train_throughputs = [] - eval_throughputs = [] + train_throughputs, eval_throughputs = [], [] + LOGGER.log(key=tags.TRAIN_LOOP) for epoch in range(args.epochs): LOGGER.log(key=tags.TRAIN_EPOCH_START, value=epoch) @@ -412,68 +272,43 @@ def main(): begin = time.time() - # prepare data for epoch - neg_users, neg_items = generate_neg(train_users, mat, nb_items, args.negative_samples) - epoch_users = torch.cat((train_users,neg_users)) - epoch_items = torch.cat((train_items,neg_items)) - - del neg_users, neg_items - - # shuffle prepared data and split into batches - epoch_indices = torch.randperm(train_users_end - train_users_begin, device='cuda:{}'.format(args.local_rank)) - epoch_indices += train_users_begin - - epoch_users = epoch_users[epoch_indices] - epoch_items = epoch_items[epoch_indices] - epoch_label = train_label[epoch_indices] - - epoch_users_list = epoch_users.split(local_batch) - epoch_items_list = epoch_items.split(local_batch) - epoch_label_list = epoch_label.split(local_batch) - - # only print progress bar on rank 0 - num_batches = len(epoch_users_list) - # handle extremely rare case where last batch size < number of worker - if len(epoch_users) % args.batch_size < args.world_size: - print("epoch_size % batch_size < number of worker!") - exit(1) - + epoch_users, epoch_items, epoch_label = dataloading.prepare_epoch_train_data(train_ratings, nb_items, args) + num_batches = len(epoch_users) for i in range(num_batches // args.grads_accumulated): for j in range(args.grads_accumulated): batch_idx = (args.grads_accumulated * i) + j - user = epoch_users_list[batch_idx] - item = epoch_items_list[batch_idx] - label = epoch_label_list[batch_idx].view(-1,1) + user = epoch_users[batch_idx] + item = epoch_items[batch_idx] + label = epoch_label[batch_idx].view(-1,1) outputs = model(user, item) loss = traced_criterion(outputs, label).float() loss = torch.mean(loss.view(-1), 0) - if args.fp16: - fp_optimizer.backward(loss) + + if args.opt_level == "O2": + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() else: loss.backward() - - if args.fp16: - fp_optimizer.step(optimizer) - else: - optimizer.step() + optimizer.step() for p in model.parameters(): - p.grad = None + p.grad = None - del epoch_users, epoch_items, epoch_label, epoch_users_list, epoch_items_list, epoch_label_list, user, item, label + del epoch_users, epoch_items, epoch_label train_time = time.time() - begin begin = time.time() - epoch_samples = len(train_users) * (args.negative_samples + 1) + epoch_samples = len(train_ratings) * (args.negative_samples + 1) train_throughput = epoch_samples / train_time train_throughputs.append(train_throughput) LOGGER.log(key='train_throughput', value=train_throughput) LOGGER.log(key=tags.TRAIN_EPOCH_STOP, value=epoch) LOGGER.log(key=tags.EVAL_START, value=epoch) - hr, ndcg = val_epoch(model, test_x, test_y, dup_mask, real_indices, args.topk, samples_per_user=test_items.size(1), - num_user=all_test_users, output=valid_results_file, epoch=epoch, distributed=args.distributed) + hr, ndcg = val_epoch(model, test_users, test_items, dup_mask, real_indices, args.topk, + samples_per_user=args.valid_negative + 1, + num_user=all_test_users, epoch=epoch, distributed=args.distributed) val_time = time.time() - begin print('Epoch {epoch}: HR@{K} = {hit_rate:.4f}, NDCG@{K} = {ndcg:.4f},' @@ -486,7 +321,7 @@ def main(): LOGGER.log(key=tags.EVAL_TARGET, value=args.threshold) LOGGER.log(key=tags.EVAL_STOP, value=epoch) - eval_size = all_test_users * test_items.size(1) + eval_size = all_test_users * (args.valid_negative + 1) eval_throughput = eval_size / val_time eval_throughputs.append(eval_throughput) LOGGER.log(key='eval_throughput', value=eval_throughput) diff --git a/PyTorch/Recommendation/NCF/neumf.py b/PyTorch/Recommendation/NCF/neumf.py index 60dbfb54..6485b8ff 100644 --- a/PyTorch/Recommendation/NCF/neumf.py +++ b/PyTorch/Recommendation/NCF/neumf.py @@ -34,8 +34,8 @@ import torch.nn as nn import sys from os.path import abspath, join, dirname -# enabling modules discovery from global entrypoint -sys.path.append(abspath(dirname(__file__)+'/')) +# enabling modules discovery from the global entrypoint +sys.path.append(abspath(dirname(__file__) + '/')) from logger.logger import LOGGER from logger import tags @@ -44,12 +44,8 @@ LOGGER.model = 'ncf' class NeuMF(nn.Module): def __init__(self, nb_users, nb_items, - mf_dim, mf_reg, - mlp_layer_sizes, mlp_layer_regs, - dropout=0): + mf_dim, mlp_layer_sizes, dropout=0): - if len(mlp_layer_sizes) != len(mlp_layer_regs): - raise RuntimeError('u dummy, layer_sizes != layer_regs!') if mlp_layer_sizes[0] % 2 != 0: raise RuntimeError('u dummy, mlp_layer_sizes[0] % 2 != 0') super(NeuMF, self).__init__() diff --git a/PyTorch/Recommendation/NCF/prepare_dataset.sh b/PyTorch/Recommendation/NCF/prepare_dataset.sh index 8cad052b..1abca111 100755 --- a/PyTorch/Recommendation/NCF/prepare_dataset.sh +++ b/PyTorch/Recommendation/NCF/prepare_dataset.sh @@ -31,10 +31,11 @@ #!/bin/bash set -e +set -x DATASET_NAME=${1:-'ml-20m'} -RAW_DATADIR='/data' -CACHED_DATADIR='/data/cache/'${DATASET_NAME} +RAW_DATADIR=${2:-'/data'} +CACHED_DATADIR="${RAW_DATADIR}/cache/${DATASET_NAME}" # you can add another option to this case in order to support other datasets case ${DATASET_NAME} in @@ -51,9 +52,17 @@ case ${DATASET_NAME} in exit 1 esac -mkdir -p ${RAW_DATADIR} -mkdir -p ${CACHED_DATADIR} -rm -f log +if [ ! -d ${RAW_DATADIR} ]; then + mkdir -p ${RAW_DATADIR} +fi + +if [ ! -d ${CACHED_DATADIR} ]; then + mkdir -p ${CACHED_DATADIR} +fi + +if [ -f log ]; then + rm -f log +fi if [ ! -f ${ZIP_PATH} ]; then echo 'Dataset not found, downloading...' @@ -76,6 +85,6 @@ else fi echo "Dataset $DATASET_NAME successfully prepared at: $CACHED_DATADIR\n" -echo 'You can now run the training with: python -m torch.distributed.launch --nproc_per_node= ncf.py --data /data/cache/ml-20m' +echo "You can now run the training with: python -m torch.distributed.launch --nproc_per_node= ncf.py --data ${CACHED_DATADIR}" diff --git a/PyTorch/Recommendation/NCF/requirements.txt b/PyTorch/Recommendation/NCF/requirements.txt index fb6c7ed7..8c822a6e 100644 --- a/PyTorch/Recommendation/NCF/requirements.txt +++ b/PyTorch/Recommendation/NCF/requirements.txt @@ -1 +1,2 @@ pandas +tqdm diff --git a/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/trainer.py b/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/trainer.py index 6b144e26..1ef0d04f 100755 --- a/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/trainer.py +++ b/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/trainer.py @@ -10,6 +10,12 @@ import torch.distributed as dist from maskrcnn_benchmark.utils.comm import get_world_size from maskrcnn_benchmark.utils.metric_logger import MetricLogger +try: + from apex import amp + use_amp = True +except ImportError: + print('Use APEX for multi-precision via apex.amp') + use_amp = False def reduce_loss_dict(loss_dict): """ @@ -80,7 +86,7 @@ def do_train( # Note: If mixed precision is not used, this ends up doing nothing # Otherwise apply loss scaling for mixed-precision recipe if use_amp: - with optimizer.scale_loss(losses) as scaled_losses: + with amp.scale_loss(losses, optimizer) as scaled_losses: scaled_losses.backward() else: losses.backward() diff --git a/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils/model_zoo.py b/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils/model_zoo.py index 7a0ebb34..bd644d7c 100755 --- a/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils/model_zoo.py +++ b/PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils/model_zoo.py @@ -2,9 +2,14 @@ import os import sys -from torch.utils.model_zoo import _download_url_to_file -from torch.utils.model_zoo import urlparse -from torch.utils.model_zoo import HASH_REGEX +try: + from torch.utils.model_zoo import _download_url_to_file + from torch.utils.model_zoo import urlparse + from torch.utils.model_zoo import HASH_REGEX +except: + from torch.hub import _download_url_to_file + from torch.hub import urlparse + from torch.hub import HASH_REGEX from maskrcnn_benchmark.utils.comm import is_main_process from maskrcnn_benchmark.utils.comm import synchronize diff --git a/PyTorch/Segmentation/MaskRCNN/pytorch/tools/train_net.py b/PyTorch/Segmentation/MaskRCNN/pytorch/tools/train_net.py index fef069ff..6faf4675 100755 --- a/PyTorch/Segmentation/MaskRCNN/pytorch/tools/train_net.py +++ b/PyTorch/Segmentation/MaskRCNN/pytorch/tools/train_net.py @@ -97,14 +97,9 @@ def train(cfg, local_rank, distributed): if use_amp: # Initialize mixed-precision training use_mixed_precision = cfg.DTYPE == "float16" - amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE) - # wrap the optimizer for mixed precision - if cfg.SOLVER.ACCUMULATE_GRAD: - # also specify number of steps to accumulate over - optimizer = amp_handle.wrap_optimizer(optimizer, num_loss=cfg.SOLVER.ACCUMULATE_STEPS) - else: - optimizer = amp_handle.wrap_optimizer(optimizer) + amp_opt_level = 'O1' if use_mixed_precision else 'O0' + model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level) if distributed: if use_apex_ddp: diff --git a/PyTorch/SpeechSynthesis/Tacotron2/Dockerfile b/PyTorch/SpeechSynthesis/Tacotron2/Dockerfile index c0972a3d..176fa906 100644 --- a/PyTorch/SpeechSynthesis/Tacotron2/Dockerfile +++ b/PyTorch/SpeechSynthesis/Tacotron2/Dockerfile @@ -1,5 +1,10 @@ -FROM nvcr.io/nvidia/pytorch:18.12.1-py3 +FROM nvcr.io/nvidia/pytorch:19.03-py3 ADD . /workspace/tacotron2 WORKDIR /workspace/tacotron2 RUN pip install -r requirements.txt +RUN cd /workspace; \ + git clone https://github.com/NVIDIA/apex.git; \ + cd /workspace/apex; \ + pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . +WORKDIR /workspace/tacotron2 diff --git a/PyTorch/SpeechSynthesis/Tacotron2/README.md b/PyTorch/SpeechSynthesis/Tacotron2/README.md index f0802bfa..f5606df6 100644 --- a/PyTorch/SpeechSynthesis/Tacotron2/README.md +++ b/PyTorch/SpeechSynthesis/Tacotron2/README.md @@ -1,79 +1,157 @@ -# Tacotron 2 And WaveGlow v1.0 For PyTorch +# Tacotron 2 And WaveGlow v1.5 For PyTorch -This repository provides a script and recipe to train Tacotron 2 and WaveGlow v1.0 to achieve state of the art accuracy, and is tested and maintained by NVIDIA. +This repository provides a script and recipe to train Tacotron 2 and WaveGlow +v1.5 models to achieve state of the art accuracy, and is tested and maintained by +NVIDIA. -## Table Of Contents +Table of Contents +================= * [The model](#the-model) - * [Default configuration](#default-configuration) + * [Model architecture](#model-architecture) + * [Default configuration](#default-configuration) + * [Feature support matrix](#feature-support-matrix) + * [Features](#features) * [Setup](#setup) - * [Requirements](#requirements) + * [Requirements](#requirements) * [Quick Start Guide](#quick-start-guide) * [Details](#details) - * [Training process](#training-process) - * [Hyperparameters and command line arguments](#hyperparameters-and-command-line-arguments) - * [Shared parameters](#shared-parameters) - * [Shared audio/STFT parameters](#shared-audiostft-parameters) - * [Tacotron 2 parameters](#tacotron-2-parameters) - * [WaveGlow parameters](#waveglow-parameters) - * [Enabling mixed precision](#enabling-mixed-precision) - * [Inference process](#inference-process) + * [Scripts and sample code](#scripts-and-sample-code) + * [Parameters](#parameters) + * [Shared parameters](#shared-parameters) + * [Shared audio/STFT parameters](#shared-audiostft-parameters) + * [Tacotron 2 parameters](#tacotron-2-parameters) + * [WaveGlow parameters](#waveglow-parameters) + * [Command-line options](#command-line-options) + * [Getting the data](#getting-the-data) + * [Dataset guidelines](#dataset-guidelines) + * [Multi-dataset](#multi-dataset) + * [Training process](#training-process) + * [Inference process](#inference-process) +* [Mixed precision training](#mixed-precision-training) + * [Enabling mixed precision](#enabling-mixed-precision) * [Benchmarking](#benchmarking) - * [Inference performance benchmark](#inference-performance-benchmark) - * [Training performance benchmark](#training-performance-benchmark) + * [Training performance benchmark](#training-performance-benchmark) + * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) - * [Training accuracy results](#training-accuracy-results) - * [Training performance results](#training-performance-results) - * [Expected training time](#expected-training-time) - * [Inference performance results](#inference-performance-results) + * [Training accuracy results](#training-accuracy-results) + * [NVIDIA DGX-1 (8x V100 16G)](#nvidia-dgx-1-8x-v100-16g) + * [Training performance results](#training-performance-results) + * [NVIDIA DGX-1 (8x V100 16G)](#nvidia-dgx-1-8x-v100-16g) + * [Expected training time](#expected-training-time) + * [Inference performance results](#inference-performance-results) + * [NVIDIA DGX-1 (8x V100 16G)](#nvidia-dgx-1-8x-v100-16g) * [Changelog](#changelog) * [Known issues](#known-issues) +## The model +This text-to-speech (TTS) system is a combination of two neural network +models: -# The model -This text-to-speech (TTS) system is a combination of two neural network models: -* a modified Tacotron 2 model from the [Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions](https://arxiv.org/abs/1712.05884) paper and +* a modified Tacotron 2 model from the [Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions](https://arxiv.org/abs/1712.05884) +paper and * a flow-based neural network model from the [WaveGlow: A Flow-based Generative Network for Speech Synthesis](https://arxiv.org/abs/1811.00002) paper. -The Tacotron 2 and WaveGlow model form a text-to-speech system that enables -user to synthesise a natural sounding speech from raw transcripts without -any additional prosody information. +The Tacotron 2 and WaveGlow models form a text-to-speech system that enables +users to synthesize natural sounding speech from raw transcripts without +any additional information such as patterns and/or rhythms of speech. -Our implementation of Tacotron 2 model differs from the model described in the -paper. Our implementation uses Dropout instead of Zoneout to regularize the LSTM layers. -Also, the original text-to-speech system proposed in the paper used the [WaveNet](https://arxiv.org/abs/1609.03499) -model to synthesize waveforms. -In our implementation, we use the WaveGlow model for this purpose. +Our implementation of Tacotron 2 models differs from the model described in the +paper. Our implementation uses Dropout instead of Zoneout to regularize the +LSTM layers. Also, the original text-to-speech system proposed in the paper +uses the [WaveNet](https://arxiv.org/abs/1609.03499) model to synthesize +waveforms. In our implementation, we use the WaveGlow model for this purpose. Both models are based on implementations of NVIDIA GitHub repositories [Tacotron 2](https://github.com/NVIDIA/tacotron2) and [WaveGlow](https://github.com/NVIDIA/waveglow), and are trained on a publicly available [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/). -This model trains with mixed precision tensor cores on Volta, therefore researchers -can get results much faster than training without tensor cores. This model is -tested against each NGC monthly container release to ensure consistent accuracy -and performance over time. +The Tacotron 2 and WaveGlow model enables you to efficiently synthesize high +quality speech from text. -## Default configuration -The Tacotron 2 model produces mel spectrograms from input text using -encoder-decoder architecture. WaveGlow is a flow-based model that consumes the -mel spectrograms to generate speech. Both models support multi-gpu and mixed -precision training with dynamic loss scaling (see Apex code [here](https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py)), -as well as mixed precision inference. +Both models are trained with mixed precision using Tensor Cores on NVIDIA +Volta and Turing GPUs. Therefore, researchers can get results 1.5x faster for Tacotron 2 +and 2.2x faster for WaveGlow than training without Tensor Cores, while +experiencing the benefits of mixed precision training. The models are tested +against each NGC monthly container release to ensure consistent accuracy and +performance over time. -# Setup -The following sections list the requirements in order to -start training the Tacotron 2 and WaveGlow models. +### Model architecture -## Requirements -This repository contains `Dockerfile` which extends the PyTorch NGC container +The Tacotron 2 model is a recurrent sequence-to-sequence model with attention that +predicts mel-spectrograms from text. The encoder (blue blocks in the figure +below) transforms the whole text into a fixed-size hidden feature +representation. This feature representation is then consumed by the +autoregressive decoder (orange blocks) that produces one spectrogram frame at +a time. In our implementation, the autoregressive WaveNet (green block) is +replaced by the flow-based generative WaveGlow. + +![](./img/tacotron2_arch.png "Tacotron 2 architecture") + +Figure 1. Architecture of the Tacotron 2 model. Taken from the +[Tacotron 2](https://arxiv.org/abs/1712.05884) paper. + +The WaveGlow model is a flow-based generative model that generates audio +samples from Gaussian distribution using mel-spectrogram conditioning (Figure +2). During training, the model learns to transform the dataset distribution +into spherical Gaussian distribution through a series of flows. One step of a +flow consists of an invertible convolution, followed by a modified WaveNet +architecture that serves as an affine coupling layer. During inference, the +network is inverted and audio samples are generated from the Gaussian +distribution. + +![](./img/waveglow_arch.png "WaveGlow architecture") + +Figure 2. Architecture of the WaveGlow model. Taken from the +[WaveGlow](https://arxiv.org/abs/1811.00002) paper. + + +### Default configuration + +Both models support multi-GPU and mixed precision training with dynamic loss +scaling (see Apex code +[here](https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py)), +as well as mixed precision inference. To speed up Tacotron 2 training, +reference mel-spectrograms are generated during a preprocessing step and read +directly from disk during training, instead of being generated during training. + +The following features were implemented in this model: + +* data-parallel multi-GPU training +* dynamic loss scaling with backoff for Tensor Cores (mixed precision) +training. + +### Feature support matrix + +The following features are supported by this model. + +| Feature | Tacotron 2 | and WaveGlow | +|:-------|---------:|-----------:| +|[AMP](https://nvidia.github.io/apex/amp.html) | Yes | Yes | +|[Apex DistributedDataParallel](https://nvidia.github.io/apex/parallel.html) | Yes | Yes | + +#### Features + +AMP - a tool that enables Tensor Core-accelerated training. Please refer to section [Enabling mixed precision](#enabling-mixed-precision) for more details. + +Apex DistributedDataParallel - a module wrapper that enables easy multiprocess distributed data parallel training, similar to `torch.nn.parallel.DistributedDataParallel`. `DistributedDataParallel` is optimized for use with NCCL. It achieves high performance by overlapping communication with computation during backward() and bucketing smaller gradient transfers to reduce the total number of transfers required. + +## Setup + +The following section lists the requirements in order to start training the +Tacotron 2 and WaveGlow models. + +### Requirements + +This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) -* [PyTorch 19.05-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) or newer -* [NVIDIA Volta based GPU](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) +* [PyTorch 19.04-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) +or newer +* [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU For more information about how to get started with NGC containers, see the @@ -84,35 +162,49 @@ Documentation: * [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) * [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running) -# Quick Start Guide -To train your model using mixed precision with tensor cores or using FP32, -perform the following steps using the default parameters of the Tacrotron 2 -and WaveGlow model on the [LJ Speech](https://keithito.com/LJ-Speech-Dataset/) dataset. +For those unable to use the PyTorch NGC container, to set up the required +environment or create your own container, see the versioned +[NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/dgx/support-matrix/index.html). -## 1. Clone the repository. +## Quick Start Guide + +To train your model using mixed precision with Tensor Cores or using FP32, +perform the following steps using the default parameters of the Tacrotron 2 +and WaveGlow model on the [LJ Speech](https://keithito.com/LJ-Speech-Dataset/) +dataset. + +1. Clone the repository. ```bash git clone https://github.com/NVIDIA/DeepLearningExamples.git cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2 ``` -## 2. Download and preprocess the dataset. +2. Download and preprocess the dataset. Use the `./scripts/prepare-dataset.sh` download script to automatically -download and preprocess the training, validation and test datasets. To run this script, issue: +download and preprocess the training, validation and test datasets. To run +this script, issue: ```bash bash scripts/prepare-dataset.sh ``` + +To preprocess the datasets for Tacotron 2 training, use the +`./scripts/prepare-mels.sh` script: +```bash +bash scripts/prepare_mels.sh +``` + Data is downloaded to the `./LJSpeech-1.1` directory (on the host). The `./LJSpeech-1.1` directory is mounted to the `/workspace/tacotron2/LJSpeech-1.1` -location in the NGC container. The script will also generate the necessary -filelists for training and validation in `./filelists` if they are not already present. +location in the NGC container. The preprocessed mel-spectrograms are stored in the +`./LJSpeech-1.1/mels` directory. -## 3. Build the Tacotron 2 and WaveGlow PyTorch NGC container. +3. Build the Tacotron 2 and WaveGlow PyTorch NGC container. ```bash bash scripts/docker/build.sh ``` -## 4. Start an interactive session in the NGC container to run training/inference. -After you build the container image, you can start an interactive CLI session with +4. Start an interactive session in the NGC container to run training/inference. +After you build the container image, you can start an interactive CLI session with: ```bash bash scripts/docker/interactive.sh @@ -121,210 +213,260 @@ bash scripts/docker/interactive.sh The `interactive.sh` script requires that the location on the dataset is specified. For example, `LJSpeech-1.1`. -## 5. Start training. -To run Tacotron 2 training, run: +5. Start training. +To start Tacotron 2 training, run: ```bash bash scripts/train_tacotron2.sh ``` -To run WaveGlow training, run: +To start WaveGlow training, run: ```bash bash scripts/train_waveglow.sh ``` -## 6. Start validation/evaluation. -Ensure your loss values are comparable to those listed in the table in the -Results section. For both models, the loss values are stored in the -`./output/nvlog.json` log file. +6. Start validation/evaluation. +Ensure your loss values are comparable to those listed in the table in the +[Results][#results] section. For both models, the loss values are stored in the +`./output/nvlog.json` log file. -After you have trained the Tacotron 2 and WaveGlow models, you should get audio results similar to the -samples in the `./audio` folder. For details about generating audio, see the +After you have trained the Tacotron 2 model for 1500 epochs and the +WaveGlow model for 800 epochs, you should get audio results similar to the +samples in the `./audio` folder. For details about generating audio, see the [Inference process](#inference-process) section below. -The training scripts automatically run the validation after each training -epoch. The results from the validation are printed to the standard output +The training scripts automatically run the validation after each training +epoch. The results from the validation are printed to the standard output (`stdout`) and saved to the log files. -## 7. Start inference. -After you have trained the Tacotron 2 and WaveGlow models, you can perform -inference using the respective checkpoints that are passed as `--tacotron2` -and `--waveglow` arguments. +7. Start inference. +After you have trained the Tacotron 2 and WaveGlow models, you can perform +inference using the respective checkpoints that are passed as `--tacotron2` +and `--waveglow` arguments. To run inference issue: ```bash -python inference.py --tacotron2 --waveglow -o output/ -i phrase.txt --fp16-run +python inference.py --tacotron2 --waveglow -o output/ -i text.txt --fp16-run ``` -The speech is generated from text file passed with `-i` argument. -If no file is provided or if the provided file cannot be opened, speech will be -generated from a default text located in the `inference.py` file. To run -inference in mixed precision, use `--fp16-run` flag. The output audio will -be stored in the path specified by `-o` argument. +The speech is generated from a text file that is passed with `-i` argument. To run +inference in mixed precision, use the `--amp-run` flag. The output audio will +be stored in the path specified by the `-o` argument. -# Details -The following sections provide greater details of the dataset, running training -and inference, and the training results. +## Details -## Training process -The Tacotron2 and WaveGlow models are trained separately and independently. -Both models obtain mel spectrograms from short time Fourier transform (STFT) -during training. These mel spectrograms are used for loss computation in case -of Tacotron 2 and as conditioning input to the network in case of WaveGlow. +The following sections provide greater details of the dataset, running +training and inference, and the training results. -The training loss is averaged over an entire training epoch, whereas the -validation loss is averaged over the validation dataset. Performance is -reported in total input tokens per second for the Tacotron 2 model, and -in total output samples per second for the WaveGlow model. Both measures are -recorded as `train_iter_items/sec` (after each iteration) and `train_epoch_items/sec` -(averaged over epoch) in the output log. The result is averaged over an -entire training epoch and summed over all GPUs that were included in the training. +### Scripts and sample code -Even though the training script uses all available GPUs, you can change -this behavior by setting the `CUDA_VISIBLE_DEVICES` variable in your -environment or by setting the `NV_GPU` variable at the Docker container launch -([see section "GPU isolation"](https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation)). +The sample code for Tacotron 2 and WaveGlow has scripts specific to a +particular model, located in directories `./tacotron2` and `./waveglow`, as well as scripts common to both +models, located in the `./common` directory. The model-specific scripts are as follows: -### Hyperparameters and command line arguments -Here, we list the most important hyperparameters and command line arguments, -together with their default values that are used to train Tacotron 2 and +* `/model.py` - the model architecture, definition of forward and +inference functions +* `/arg_parser.py` - argument parser for parameters specific to a +given model +* `/data_function.py` - data loading functions +* `/loss_function.py` - loss function for the model + +The common scripts contain layer definitions common to both models +(`common/layers.py`), some utility scripts (`common/utils.py`) and scripts +for audio processing (`common/audio_processing.py` and `common/stft.py`). In +the root directory `./` of this repository, the `./run.py` script is used for +training while inference can be executed with the `./inference.py` script. The +scripts `./models.py`, `./data_functions.py` and `./loss_functions.py` call +the respective scripts in the `` directory, depending on what +model is trained using the `run.py` script. + +### Parameters + +In this section, we list the most important hyperparameters and command-line arguments, +together with their default values that are used to train Tacotron 2 and WaveGlow models. #### Shared parameters -`--epochs` - number of epochs (Tacotron 2: 1500, WaveGlow: 1000) -`--learning-rate` - learning rate (Tacotron 2: 1e-3, WaveGlow: 1e-4) - -`--batch-size` - batch size (Tacotron 2 FP16/FP32: 80/48, WaveGlow FP16/FP32: 8/4) - -`--fp16-run` - use mixed precision training +* `--epochs` - number of epochs (Tacotron 2: 1500, WaveGlow: 1000) +* `--learning-rate` - learning rate (Tacotron 2: 1e-3, WaveGlow: 1e-4) +* `--batch-size` - batch size (Tacotron 2 FP16/FP32: 80/48, WaveGlow FP16/FP32: 8/4) +* `--amp-run` - use mixed precision training #### Shared audio/STFT parameters -`--sampling-rate` - Sampling rate in Hz of input and output audio (22050) -`--filter-length` - (1024) - -`--hop-length` - Hop length for FFT, i.e., sample stride between consecutive FFTs (256) - -`--win-length` - Window size for FFT (1024) - -`--mel-fmin` - Lowest frequency in Hz (0.0) - -`--mel-fmax` - Highest frequency in Hz (8.000) +* `--sampling-rate` - sampling rate in Hz of input and output audio (22050) +* `--filter-length` - (1024) +* `--hop-length` - hop length for FFT, i.e., sample stride between consecutive FFTs (256) +* `--win-length` - window size for FFT (1024) +* `--mel-fmin` - lowest frequency in Hz (0.0) +* `--mel-fmax` - highest frequency in Hz (8.000) #### Tacotron 2 parameters -`--anneal-steps` - epochs at which to anneal the learning rate (500 1000 1500) -`--anneal-factor` - factor by which to anneal the learning rate (FP16/FP32: 0.3/0.1) +* `--anneal-steps` - epochs at which to anneal the learning rate (500 1000 1500) +* `--anneal-factor` - factor by which to anneal the learning rate (FP16/FP32: 0.3/0.1) #### WaveGlow parameters -`--segment-length` - segment length of input audio processed by the neural network (8000) + +* `--segment-length` - segment length of input audio processed by the neural network (8000) -## Enabling mixed precision -[Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant -computational speedup by performing operations in half-precision format, while -storing minimal information in single-precision to retain as much information as -possible in critical parts of the network. Since the introduction of -[tensor cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing -architectures, significant training speedups are experienced by switching to -mixed precision -- up to 3x overall speedup on the most arithmetically intense -model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) -previously required two steps: +### Command-line options + +To see the full list of available options and their descriptions, use the `-h` or `--help` command line option, for example: +```bash +python train.py --help +``` + + +### Getting the data + +The Tacotron 2 and WaveGlow models were trained on the LJSpeech-1.1 dataset. +This repository contains the `./scripts/prepare_dataset.sh` script which will automatically download and extract the whole dataset. By default, data will be extracted to the `./LJSpeech-1.1` directory. The dataset directory contains a `README` file, a `wavs` directory with all audio samples, and a file `metadata.csv` that contains audio file names and the corresponding transcripts. + +#### Dataset guidelines + +The LJSpeech dataset has 13,100 clips that amount to about 24 hours of speech. Since the original dataset has all transcripts in the `metadata.csv` file, in this repository we provide file lists in the `./filelists` directory that determine training and validation subsets; `ljs_audio_text_train_filelist.txt` is a test set used as a training dataset and `ljs_audio_text_val_filelist.txt` is a test set used as a validation dataset. + +#### Multi-dataset + +To use datasets different than the default LJSpeech dataset: + +1. Prepare a directory with all audio files and pass it to the `--dataset-path` command-line option. + +2. Add two text files containing file lists: one for the training subset (`--training-files`) and one for the validation subset (`--validation files`). +The structure of the filelists should be as follows: +```bash +`

^??cjdW1Ma!FgVnmNaB7(sq;B$riQdcL#*Z=<%%zq4!spzQCN+X#q81c4f9cEU>Hh1SB8cROOX49B`EAa6N~XFS2&GxRa}L0 zFz1rT!T0J|n3LoI6LL-A_XI=8xwH;~r$j>&i2=8uh0riBhbEkx#!ZNqp&nYTG$dLY zbbfZw86__`+rc@=;AS#fo8pSnd2+3K$QEI{xD0nWaGjvcW;t>Y-GW?ZR|(c{58#4w z_HYL`?B^Vn#OS^rd1{<6mKzC65)1{K3XN4$=?4ut$W4%ewR<9{fx$MxL@!IUH*`Fj zeFIJR>$j@)?wul7&DKlPGucg<>iB;`53l3EuMys>E#Kwh%ZNl z5@%3C4Mk2riji001@z>}CA8335}O)Y%sU_sm9}F+l}veIlx zO&1Ke4pFHOKe*IO7`1(kL7kVp(GtN9bbEdtj|1g*>k>UU_rUhrY71FXDb2ip|3KK;U7S_7D~#R1&UJA&e^mEgY13CilWz`c^4P`}F!UY9I^ zPw5N5IK~;WN?pO}=3=lIGUnH|N5fB^R5mSS9*r=(FYp%=BFT)8DCPZ6^nRZxp5ic! zY^9~~VBkC~K5iv`ba@$e>|c&EV*_zOUpDT^D8r|C)Iq%X5xh&*1CLs|3j0r(htt1K zz;l}ap(>rT=P(7L=m@s-2+Os6ZV(YFDio|}?PqE{m zf0-og(HGLssiHJsQ?j5nK_J+2^cBa}$8%BKU%`eGX~;kK0xH_zfa=U*gwDO+xZ$B8 zPHeZIFjcKiaDDOzfkbJF(B<2DYEx1`*SirjZtAXle2*BOUIPsW?oM6k$e zMf_FE5YH;t!E(CKP>^jFnw)Dg$(Ys0Gzj_Q-RZSO_se`|P3 zt%Ve*#m)nf;Q&sfaxMHeR za)Fk87E(`*Sem%nmJT>-Qr)kMIRDD`f;so%QNg=Pbk{ZpxynhS1H~sew#1*lH#Vow zWyc8IS7OvKy;JZhz>BsGkAkWD$Mf~jBkG!_M`szdbDlLVbWYbIxIZHtHeUP>()DJ7 zPh2y-*sexjM(yXSdgbYU%iXk6T8h3LWrnPJt|O;!H_@2yVJK}t3hiF5g7hX?Bl+M~ z)Hguztq>1v>TiaxToS>9y(0KkpAoLR?1z`EiNd43U9i&BZqzK*EHHQfD-ga@z#dy; z@CvIETqu){D~1+g`+;T@6q|)cR$W83&ni$%l03TLSH$gXE2l1QPpRXct8~?$1Umk- z0&VZ^YH%?b#jP7_PYu&gQMBVJ{Vc8p(%STWhT>PeA;?BL5?{A3#)C7?;wMwmaFVJ$4vUn+WBr=Y zQ146hsoE0TKaIeX?Sva}3o=*`$ns@$SU8>tHV=fcq3!b`~9dlYI&wx>Q5@6$EbPpI~v2>NSQ6SqCSRhU(F z%)(trG*-B7vE!G4!691#`N` z!$YgFAh%cn=BiGBBx^p`;5-HXT$F{kd1ds_nXz<#(O7P!dn7m2`YN}fevO4fusUkd zNk&@rwaEWlI@%_5LE&Teq75-;kxj{K)D<9$|G2$FV);=>=eGgkK6{|hfHa<{=!PCe zWeMymzjEUqSJBy*FVmaj)9G;B8k*>CL=}g9XlhR`ZCQJruKD|qH-M``u$MI0y(pxc z+y=NwJmGU?btKjNdWE_cl+q2${%{&k5(V506SVV(ABycrLCN2I(NEi(0>}Ey2Bndu z<{vY@2v+YoibDOKB7;u~ShQ#iezS_AS^Y_Z@_BKb^0GDD!S`Q{z%=bDsKmP(5V~`nHjw_6&6_ zOk9k$ll*YM*%54V)E{T3TH=jDHLMmdj&}{bL#pj9=x%2sO5WTgxZt&$3uztCse5WjpQ?p;>xPjexDY7vm%*(40u9E@pSTSUkd2!eoY$A*AHE4 zS2%GgZ7Nk9L`R8TqhrEC`xmpKwW+X zeL3|V^*J~J4rVKYNL)QNjhbk_Z+ro&4}6Mj93P;A(<0ET(pM6+cN@kZZaZc{?Oz^)D`s--1!!cxkHc>EQIgyV0RZ7#|UZHX`87&b@q2ht7=!W(7*4LruH=$;L97&}2_?+Q?> zpAO-r!_+{=hZ+TX3GHPjA?4CwG&L#$`Mf!V?rZKr)e}QeXytYkU?+`?E=CK^gh&aL zzZ1b7=Lo^Wr|->ITE62}L`Tuckju2{{16q3k$`MJ1-LR-2}Ylnfdd+yRI>FDowY-r zW|bERx2{AM1;fDUtIA|)oHjY zSdKRr|3V|#!zlF7TXf1!h$gPvgZ3&fK>_ZE(S6Tm#9a`<+0A|EZWcxskEYPUP!W*a zIukaY=aGl#GaXsvN)=yu2-?r)p!S}RNWnw}t4NQ>gALlaOGXdhIADw~Uf76({~gDh zZHuw(j#?acw*kK!N3p9Ehu>HburfcZsj=nl*c(^lyDui=q}Ag1*quJ)sCX02 ziY`SphBJ_r)go?H+cD}>^O>HyGXoOWI78X5J+Sw_TmwV51AFGhyY*A)>|;%6K^J#{}lJfuWDN)x$4*WH|KiXS(7od-86_ozj%uLw%Y zUx%K~aX_;+sG;;sc_g(c5ZzecipJ(B;-?SvvCaWiymjphG;c`}x|dvz_RMWXM#X)| zZ^1A!5POcU@YdnJD2f!s3sLPOA9Ux8Eb<{qg28?{WUwh8Whjc_vSMSLAz_0r|1!mb zFl~JOk_xt$H^#xbd$1uY#&blwu#=rQQRx{&{N+_hhL|GBj+7*KC3quF>kHhnvJaaC z_TkIdp5nnH1NhnHe*D0p4~N~nhMmr4W8Ej#*f@19dcft;A~yo(n-4>pOB%?E=D^D{ zS+L?rCO8M=!J_l0!NvV5$Z8KC&z|mABS}NwS)j570f(3RifGf!vQ^ zz{{3EaFaO{Y_Nwii#;$SAPU5mMS_I&4wz&&9U3y<(s*=+8av0)*V3`{YkfSuHGG1e zUMr*z7j@F(j&JDVUB798;&1xa{}&C?7@_T}ztf!$-_!HvJ=bwCHb}LaLS2M>SGz(t8zC zso|zS0=1*XXvhBZ=zMWJ`tsKq)prM?4E58f_Ujk4{*4q~?fwHT^}UBOQ!b$<{XSHx z_!o`YE{4BUi{ko{cWCSLN9gXh59oS|3VwQO9`@g8hm#j9z-zs9ak=|@r0jnc-J8T) zzw0Hi&J`2vW#NfS6q0$=OC45Qe-D2d^9!fU9!mn}=#o|H(}_%xA(?TeZv!?0k_}U%I`9iBZto_w4zcB0`J(s z(WYHczvKw~RLO#)d1s+(SvzQccn2#yB-l%tvFsqf2Y&LM4s&tQWv7nlFq^fSOd>;> zS>Kanqg&tdJq5bF!(mu>Jrqvw^MnB82E(Cip(4u`ifYW^C*L#an4ttN2c_U!>SvmG{UcrQR2*I& zmWR1ZN5N^^mz1Q`QMV1dXs&%Kx8})A!4vz}f+Ia0f=&UBs)eX$~rE-@{m z+hZ@#y0NF}t1D@A&~*>B&eo?ZRA*CC9Y`DRo}qiwKGCY>N&vp&!11^oc=g_)+s+2j zSHTIK%#kX=bx|BfH4=n^!?mORVwCWQ$Rr1xhmNiaJ` zEZ#*D$sPNNe7rAtA-0D|UN}hf4#W}L?o6UAl26hKPm(z0RPufG0n)-*5%1QoSS9N} zZ0ouTEtE>2YdmD(OZW`<(rO1s%65a_`YdpJ&<2CS(yT$on9WFaVHbQ4vHq*^Y)pL` z^Xf`rwPgHyh`TQpN{tKBM^D5%l=tXxvevk0n>l!#`E5 z@YOzhoT254byIg@o&P+s_SV&Szrkdzr`w6lyA@Hi%`MJcw3H4+ih<=;ZP3*=hjXQN zu&iPon2v0Pjz!;@MILw=S_PcpO*GUT~ z*P8&vQxsrF>R3qC(+2LA0YvYe2CLfC!L#Kft$dqB^R>k3=bDoiR6zz!i`Pft4I_dh zT@nJ*&Np1)f++g%YXg1$^eVMs4fNiT9QtcU3>E#CNJpogruE%D^k}^VJR4MiUoREm z^db?M7iL9GqsTuuZzl$t`$KFuK>H*(1|<1F4xm_SzaM-pk5gT!)b7`Y!DMTVkN ziJeg)xj(Opbgd{O*KAWs?c$vTEmS9M4~}8;b9XpDgBVB{_ZUKFNV5%b^31J7n(ZDI zVd{TH*a=N#7W7%bBy0DupA$0JfzZ=zdNRjuPohl5rG~{^%VjSmq8NVV%o-O?V-pvP zvxb3(kf?DLj#)LsR>55!3(^S|gXck1V>dL`Z>GV%UvSN`$9V7hI;=h^6zhA+ zss=NS#>wE(>HV2%8UMt##pfJf5))B=f1-P`EQ^rC&7NH$gvD}DQ4@@ z4|BOJc=fE6?tCVL$GkX?PwpQ@OtNN@=T8=pD;t)QKNU7)@nI9vSgk_}evT%;r9^o| z)^{AT{ySEBB0`cPcvM=76j7HDC-c=m;J0JjaoF*EJQBYG-(H-J4n}xUvHP>3ql(8! z?Jr~U{~>Xzo?o89B%T~>oNzKPxl-$Y)>P zvEi_8R;VLnHxyEtf5t}M{yUN7N{)ahk0@-Zy$vlb&%wm!18BPqKv6;+cnt_Z?LS#` zHSRcueIjJ5gfWTGSVe{dT*;HOE+lo-GP1(Wj7Zh#5#1;y@-AJDOqCo(BsdKUFMyHnX=BN)690SjXD!Tse~a7yIhzH<{i z%e({Xp~Gj3XO%-~rEmn43umHfJRg2%VbCt~}eaOn*ru#fu*ex3$Qcg8F>nT}`kbVOLlxjx8O zABNa;ZB{jY3p20CX7}b^XCCi|*!TUSjWG|!8$B(*vnt0&jNUuPF1?OpL9$M)`rZ_# zJTR7>|2Kx6T{VGCY13ylQTmK1DzhH_hah8X3O1d8kbQU`-ZEt#@pJbg7Z)8NDlemn zIFAMUZSGHw&f7?ygDrW{VNTY|8k4w3vxw4^ImGO+8ToToKu!}gQuB5?Y3Nrb*7JvO zLSR0QXqkwI&kBU08LD945e-Wg0j!D~1pi;+ta7FzyK17ww0=%tW$C&scf&Lm)<2tl zIf+>0l*P=~X*sK`TFzo57qMnvGiKc|g}Hhtv0H0DLz(aj#7CsUsk&7VXfF+uOR}ic zL=h@BBro`J%?RlkXP_Hf9wVQJinvzY&NM z1&@q#5NK5ctM8P;YsqYgxaa{v!?twgjvct4Difd1|Hz&2Jn}f_0ugn-Ocu3WBI#1~ zWWkCO;@p}}L{}Uqcv%wB=_w+rbFY)(%C{t}UDC4Z(kM&6yAqZIC!Uk$)uqI8lMRtL zxf3@g`$JHZVK)k|N67gGWi1DHoWJ~cv^2Av{7N?nzy{0Ba*kMXO z&$S?Y6rY&$mf3^0X2jWd2C?hqcgy$65FhC-92|B6OMMmK*MY~;ra5wymH(r$|5ktm zj)BGMrJ(-e1|+jLpgkc@{`?=AIM)ewJeWX)u%-bSnp&0y`9rm|CZI?VN^Dr;z# zV@Lk`2^DHjA!hwG;7#WcGcOUoYi@&gX!qzBLE0GDb#m9(_?_5JS4#&~v;)_%}LK1=7yKX^Y% zf$bi}&=y_|4g-}?{UirQi$*|_$ubZ(mj~?wIkfY;3{CVo%*A_S?xFFJ(BN^ZV6ub| z(Gy#+j7cZ1Z<#_qz1U84!;{E7+X`~vKmIx5*huF8Y9P&pl|;e*1nJluN3_;PlCqH) zGHW=E?2;`fbGtZ_*MF59SlL9B9IMIxpjcvj*OchZO2wbI{G;arM&M`dTy`C;WM{aQ z?39ZYvv@Uw8PrW?ZfbLw>kmg}h$7iSw;~qfa+x(JcCjtJ&sfv%nh6SFF zXB+)Cv5yC5u&R}^Y;?~M_!V`*y7o>OH`ofR{Lew6LJauJ%fiNIVrZd{8`j_4j`fSj z65pLhWXYEWM9*_6X>nLg=Cxq*N_iF$zoJcY)s;zn_bAd{E=$-O86y8niug?sCnHP0 z;mnvvSY_2YY_~ET4^EcE?|rR>75k+?^UhwV*DK~{p&aZrx&%sFFG9U-6KsjP0g3hZ zAZo!Ykm(j-kM1e3!v8ebfS5L$Vl#os9UaSrBcjZG%VTJ%s|N>p9;;{bAJk1<4yV_d zK*Y}}AlEj_qtx zS8GdTW%rO-)hXnH!)fv;yoq#tx=vo7;z;hoT+*)QN#ys)ll9#j@mqO&s=aaxOi@UH zFK&Bb^;kWaw7Ha?FF!zYocXzuNC6xRP-AmXY-P)X(wWS-I)=I~vn%>n+4|)dn9Z4d z<})vnIsI~EbNr{XbAOeXtOUQmxn~Spcw_<-KGtQTUo_e0dRb zhjTisaaHqoY+k2K=1$fo-+8?BBUyEFy;`2El$RhuwZHJ)Nnh|yn{U|k>TkSIRg|Rf z5+Si6-*98V6I|uX;j*IRIN-D?7HUN!krm6StG6zc-3^C?j0(_S*#?vFGq~t80w>)> zS>P&Zc5uHUi*i+GEe#XdL*X>G@rn_9vV9IKs-D9hzBOd3{<=&hN|}ihakj!~5EPDI z0?r{9X38IgG5@WC_XfuBaQ7In{rrf^_@~m0lF4+b!X_@_-foMf1xbRtKe7ZnZ&eE# z##aj_z(zq{xT=M$^)^n)m9IS$PEg4MpQwzEDb&sK22trq_qkl)r!C$?2U|#HXPbKlir5%2w;p@1xR#@L)<6_@jwg;@@he3HmY_4l*q(`{JK^nai}xB`;yUPpn2_4tv<7^1t{oM) zA;W{qi7>>Fh~AVXw)i&wJn0Bl9KMGNJT_9D87|;@@-l4v%d_(eO__whD-%-)WoN{X zG3{e1Y}@oq_WDN_`<9u-*7RgBqw}e3Y;*#9y5uOk%I{wb;6L_M)QxRdTEcehHDc2Y zmDtHq{b0N&AKw18fi9g&8hCZPKv;1Ty>TivF;jmq$*uA3%t7-hjlV6LGjJ{v^)zz2Sz`hd~ zJZ-{8H!os)oi3bPDn;@q&mjkS6l?l0&(dovBI%3I^7UX1QH?7muR@ZEzDWp4{I!Ox z8Dma#HcTU9)TWVNS!U$Jz2#)VPFJ#5c0Gyxh{=D05+tg83$8JJN3CByhq5*c_Ts~T ztYZFArdW8KMgE9p56VOMZ21mmH*GW1Kkdnkstz;7hzvH-=P9B&oBHV~6 zxwut}w3K|uFPnJQ%s>&J6{~eR-HP?6#fRY>)4ZxYI0&}r>t33gc0*K(PK55cRW`~>i4_)( zVmmedLbiD?q#Un>?kf@SAlw+HCIgKv@a7bDFG58ZZ=>35WlU3yv6chELu2RTgkT#y zIcp(a_T2(+b~VIXJ9Y5D!tq!+W&+l$(#0!BO~)qLW_Vn)09X4O;WzK5;WtHdvA6nW zd{8VK7dJn^#!JVNWxwZ<;j9g0!GgWyNm(RWx;K`LAAOYA@xM{*{=-B}ErOW5J4&)= zq?3JXipc;yN1i=uB+=IziFj!(S@kNL%*^vAW^1PsyI<#UjYB=!ey5LyP6>d^Tt2j^ zH^9jioltUDgk^e8WoY3h7RV*Bkh|xYwfJ3DEb)qMiu=f{9pAG10T za&tSe7cM8#bS5!bCqq8TUd20)1>m5*>*&?59IorwOL|<_5^hi53p%wKutB31jH++L z&KqB0V81e}cs-NJh}yD=Jm>L_|5i3NZ7W;4)0KJUtYl&Zn5}G_#tg=(vz<$&SmcL~ zumShLSRfxeKTKIX45^u_QS z>YwC@?`Id|HM|wQHcO3+3!6=f@60Fe){Dqk`-LQA+k6rbzkt~G*pi$LPUQC!Z}O%f zf>i8IBBkypNcgv0(i56VHb0Fan!4MF@oYoV@{)fRmiS;>(Ie>Hn@#lmLUA~IW-h4m zeX>GGfES4^@X~b*yBoBK)uMyUBIg8)v#w>uK}}4~pqZIBU1g=ufo*KAVS>3lAId9> z1)WM_;qT(uy3nI+iA5xnH$KRmO+DGW2TR#06&~IFw;yDt?gzs*O`3gN9ao&>xkwMY zv3!|0d3{oaTzIcXuAZDt4%k?e5aC*~`MD>7$S`6!HinEZh$lyHBom=%3aK+pAR31J z8K1kKtnk@N;w|k+kf{+dZ&fDy+u!2N)6e1SoA%*M^c&rn_(N#zr3U|!GvVL+mvH8$ z1~Yq#*$GD%_MmeQ`#1M6OWJyrJ@|c`B}gZ-o$HcVT4oZPqLswf|4v|6P90;p(MQ-T zRX-*YL>7C{7NIFvrZ(q6?pPq35TYo9UJyY)Bd8@?8 zF>hT`xEqmg`&W`(Th^2LP3y?yANHhV?LuO-%Z!}Fv&j~hnM5Vskf=T|B(0`~#5Zsn zIcum#*7PZp)O!-7+w=|AZN85OJZ|F9=WTeb?*lBA*NqpJ@t<*s!(T*G@NN}b9B6k9 zxvte0Y>Kp{kJjYTQ!0tHGFg_IOe?hr`0ts(C0{}C$=QmQl!}A()sL8ViHxvSGV zX0_YU&NJ1>ckOx9aXSW?dpr=-{5a0-5MM@r&zVOp#Y6=*-)fQFDJxv~CIwfd^KEO5 zdpIw%6HhL$$IWfA_~)lN_=QFdx_vl9prAO9yEfBHnEdIbK;J7FeK>mu-K%|s-fz!E z8@d85u2?^y!S!x%t2qzmw=ig*eI2$XH9*szOg<;(0fHks(4YT`9(deE-(-&lW!DvO zz$zZHKh%OCy9Jhhy8*60&cf`bqu^s9fS$fCs+@R;dQMBFQM~ z>u z6(bkqjBUivVv!p!api~zX%6{`^U6DLV?#1lS*U|`e;`yHwvF3qn!#Oo)guVJbRT_f zUW~_#O~9j_&S8a3x3KEWN0{AwiqFm%#4i+o;4gl1ME|KK$t^J^>q2daT%;56uH8;- zM+Xq0{6SJ_6+w!Pj}X=8;l%w-5a~VOPBw-vCf0$I$RYo)_%C0lNWf?;9+p#z5J1Fw2~8_R_O zZmBQFr+W+Vd)acF@%|RRuPaTimdqwI&Tk;?{riZHK?E5s8BWSueaL_OInsMVpS%iq zkBjrman|$$__5C}{Aeg0FFx0SH_VqOFQ!Z(%8}E^;KuPpKtJHbEh%_kb2{4Trv#rM z7y7f>;6(H-Sn;<4-s^h7UFFYoee5C{P-RZd;=1Vzr~e=)q7{k)<(cTtnJh^kGoxs8 z)|;lsV7mzW+T6(J2V$Xh+cv0U`@rH_0j%x057F}CY~^Pq_F9a1$BsXQf7vO# zzC#0gw*2BgPTGJR_nbj%?Vj07eY@iK0-eqm!$FqGtfQR3HZ>Xqxk5V zCaf@N5N}KTgw<8w;sbkbV~6V{_^Q)ET(ra*r&rFzn+HsA@`shUH6R4Xe~jaIUDI&u z@kBgVo1e{_tKf*m(~;|qT~uw6I((M#hJmtph@YMgRoTg)93KP9DPbV_$PaFh2?dYV z<6!Gq0EZ?uz~}wV&_4P$_{iOcj{eJV^IZ+>c$x}bb=$x@P8&`*bJYCsLOQtpB1iLP zaoWvuxsg$7LbsR}!Bw{ibo<&DR5y*+iTMR%<d%WANi)(QNi zHwF6#r{Wx5+4k5c9}oB!;J)5etXCAm=Ti9J@wr2-5rsBeJq=Bx)Q>DV@qP_nWb<&;@M8f<>(RpEcY68?ggQ zMr>e6o4Iw3X1BL|#4P7I z)|I-1A3otN>u-GVCJ#NH;T4Bk463=k)_15&^8)a6JPrYx4Ujs!AGSRgV{@M=vE&Xt zb|iK-iwv`538G7xo7hTrzHue{AXv_Rq%C1lcWu~{oeSBQ+w+;LAOF36&1TgR)0vdl zROan!z~=N#Wfp$w?2O4U)EOkf&vWOf*YXD_e<%YFjr_qC6Xz1mPn*bB&jaM*wg_Us z`w*G?A($+43nI@Khm&!Wcz*BRY%*K9jGWz6NACLzNkmNSDk%-2qExbrY)aXoqGXowdG13)TH1pqX=*5{Z)xcF{QmB` z`im_vo2hPrT?H7V|60;a)Ab{*t-yF=Vyxt*}f{W+X5 z)2zxFU7*2tAeux6p854iumtONO5!$v0s$T{53V(~}KIa@kPw4V5P421VKwu1jkz z>}b>U*^~)xG)jFsNyQn`y)9qapyvH-^p1EwQ?`ba$e9Dy>tY~qdMXrdJqS^48KAqM z5FU0`!@sy|5UcbNUSArF?^AVfs?#L=CFOuOA56w2C3bjnn;G^@)Wj8^rSW^?L(t!L z7}|8qA;fMoms+a8OW&{KiziAlg{1=1a!Tlm;kr zY+=a{pR>niPg%mxbL`lT6n5I(hTR+#!;fz26TP3_$}Ly1fzM+?z)2w+l#7JCWB6q_ zy!$cSsre=-=w#68mooYbx!ENJ`q({j0@lXbqWKAX94?$0?VTO5APsTcKy%zHs6}2i zNugh78yIAq2Fv<5=v?Uz^`p(;iMbN2thvIWRiCKO?kFF$a}jghQo#}?^|6FTb$VlE zNr&AgQ-h8pxh71ZAXB7*P-|*kHIZ87tx2prm4fcN)0!7PWG%OyjEi@Y@{<6{R`H^< zT~kT&#76-vmjGjqq{o1xQ;c^r!#3 z0iU`qf!dnWaMvIV*3FcI!STIyVNOcSO*5Kxo~&X2`P^l#*VPUffL54Sn^ zT5S&goIXkD?vucu=mJv4hV0}BIm*1?MHBVY1l5s{VNb21lXj=ccyS&b%S|EE_&7T5 zn?P^!vS^EE6-lgVA$QLgq)H#CJf@3+jy)n1|8o?aR6tcv!zsJmiJV?3QK;iH_FA!l z9h0nOC9Im=tI1>Wd7kWNatvQ+IvD(K)W8URWmI1>8K=(?&WRuPp!VMgJbNe%-+m3i z-{aQc*s060L30)QY~75uLwBIVuMjMc^v4ca5A^K?ED9WrN*>)Xe0Mn%K@L@%y~`VY5Yh~x`!c-8}& z6J>E-_E_|;amKqtcVpa$3|v-p3KN~{F{Pd1&U0sQwBj)|x{-`0Zib-dAP>AW$_Zum z2pu@jEzv4#Jerm1A)Oe4|0?dlih1+FZMqbnpq|F^hslxFJqH?BwU!EdcTmgC-DEy= z2hG^*Pu4d!)Ay^}=)tfk`g}NyQ0Bn>~8pW(h1IVJmI>Hzt|1JYy9o< z`OLZLDEne5a1`19*zAJA)HF+hTsFwi^@aa2`umib$lYM;n{G2v-g`D-nH(K|Glqgn zZ0YA*XWAAzmln6%QdQtUx>&fB#c2(IwESnFu4IHow;k}vSxb~G9E^&)2;MZLLGJG= zIQnNO>RAZ8f2|`J_4ynQaA?PupFiSGk2kn|&~3EUDaQ=!1T^_3oa6tSiQSWD<7tgG zn3@rekJRFDNI?|VqZcO=tNFmPiw@xDWh-)1$jo~S zsAmgk#8@9H*c3x^lJcm0bPctKHBrL!E3`51JZ&nfq~$@Gq>CXm`j|Z>n<`MR{avD`aoB=CJ1L;cVbw1NMAcv$*P@2DHpB2c>C)Ftfv0xWC(DYq|sWe=)^? zzw%gE_7s`}&O)d~1@snNf+ap3a3r7?-mdrn-%4&mK}kOJ7`ek*zLG1xvViZjc44t? zr`TeXPNw!lf*gGZ(i;b9>JE{o2c9Fz?Uz33&z(SqCDxP|FCv}ONT!db&`@%sN!E^} zFwllPd38GR;v#^^726X-kdbMdOwEvWxMFaqSYi_J&oKZ=}_sWZZ_h> zVHVy#iiOOz=B=x~iJEsG=dwc|b7p(~a%FFZK#jZwbp756;{Ic>$oV1M-6V;nOO!Cw zLIwAS%HxuSpMlG(11X2yFf(%~bQmw;hHZ-E)Q+Uzx-H%&!&A7?lcd@5>2#D8$`mYcRcO2VUQ@3-evK;g^#E zIQjk_EPIrM+GT~fVCEU&u6+)5oUh=#H!a9iuj6^y3wT6ajXOUbMY9cp7bkcNUa)h- zg-f*1=C%ZG`g#c(haG^A#$#YefCr}@V9BRH`A>K)-!TH!_Ei>xUU zhEU7dc6Oc4vWPip?A~2(<}k*XJ*n*B*X8Wt!&HWe3JvqQkOpm79UBDATq%TmJOQJI zAK)P;ftw${7xZ1{p|IpI7+>B1r<&~H!VxQoSZxFLJ8YqLni&)xR)yB27u?)W)3`^$ z=6trg6?@Ttg7uDm!_MbP(ujQ$WM%c94L*67S-rT#R_78s^`M^pT2RE?8-1CoZ9A{n zWNA0QGo8!k20~rsVhEdi8nV{?hTc(HINibm10ITSF-6Q6{)D5pNAi2 z6yU1vOzdt?#14qTKRH6Dv2_I2{S846Sc{q`XW)fZ#@PN+85Qh*LB+Uh;Il9Rg5zIt z-4=)WZH7TCJGzd^ENo+~``$1MS1Gbuu0d=4Z7KNMJR17bn>Y-lcQ1F*&$w_pA{9Yd z7Gb2ha0f-)+Dyk!t)Xy{2l=mbCuchsLEp#Gq%mWO1gDi**FW}R!#nmo;u$llyvDQ+ z6f+X?I{!kR^WU>>iEl416>JZJ+b z^eh3c*An2a95_FC#bs8%=Uxu#<>HI~<7VeHbJjOzabE4rHu9A>e@bxp=+8;!U#q+F zrTaxF1Z-{8rR~K9#51IdRvONJL8Zy3-PJRHk`I~Kk8l-_LOS7 z@Y=9>h(E?*{Nhm<=RFkreFov@V_#rJ>NOZRpa7m8Tm!q?u5#V>HoQTJ4GYc4Vhg37 zu@U10k4^YklKp8y{R_=#c()ZT^m8I}dqEXB-kUDFZl>+7+eyJ^54DOTs6{W5%JX)U zY~lv04_-t8{~YM!UK8q&)}#)BQ{t?KP^pU~-T3gB>3USLS6SI?R8BJEwrpYw`6g_0 zW;x%nUfwqMNgVh3z6AK6w+CyX@5IU}9R{3=fo_@E5H>*-%8yEYX zD;lT*X?GPN2fuM=wtnRXUe$rDmLL!|VQ|UqHB6iI3x0ol3vRNt@a9oIToW-k9Qzwm zR~ia_9A{j0Z3P-^^+bpKdDyLEhdajUVu1E&Ov=~A4;PJbRgx}F*&v6t7oWq1l5>!| zPvF{ngt^qxSm?2_0Y9$_F0|2{Us=3>wKx)+y-OLoQp>9687QYbnVj)nqstxqfcw9m!i1dDP?#1CedaS^Pr+2MELsX- zWo|H?yy3V)Ea-$JL)FztXdbozwgnmhmoy$G-P#6M^-`d3fFCH`l85}O)!dMmir|=& zBB&o9fim<%sGA&yMf`!%J2f!-;}Mv>ssTP69e_UsC20Kon95Y|YTibUNx9*yH61iuhmQA^5W5H>XsPY>p-vQ)a6fO&>CaZeDUFk0stz`g03?tXoX; zcL*~NqxlqX>qY}KG*DNa?B&K%o6v`9((r=q)i^B7=U;Y@GNTKWz*#nc7n%dP$RDZEK;;%>!1S zjDdK&EV#RQF|2lp;Kug}8G*fn;Nh-OAR#f8W1+2luuKT^-WA1$zqVm--dZs6m`FAv zzlr^q{h9gYzGk;v+n9!54{NU(Oof@6wBe#IE!?F{n|4akCQBiE^Jbv1J5;4{c1AQ! zINPyPS6NxfFJ^B2l^q{+jY&+n$tDlWWtM&3tjI5t1&>|E+-(f_kLp*sqZgw@SxTDh zZj%{%Hi_dUo}_cOefpp$I9d#qro!`TPZ%I%3aej<;HaGpcWKfMPV#sOs3*UItg!vy z7SYCMwHEN_hUGzkVF!2&&IFq}6Nv1~6J>a2aM`0&z)zIS**ypb{iAs>C13=2ZgPdr zT|5_J@5*J^9fQk!25h*U0t*Je0!NkCV=@d- zZ0Cv=roc8^O)PGcL%nxq@M!&e{(_wkH{|+YyrJF?6CR%8>K&i)H^0lVkmH_QoRlww zDC`()Q-}NrWv3&Y}9JlX?H8ogn}%#-tAGHvetGu zBWcW*Rl3mgpJsG*qax27ZbPx~ysDfCWp`;T9sC-_-u1qM3-l3RIinV%L1NgWl< z-2R^XQx**FmDj<2Q8v6<^hOjv$C@RlbZ||l9Z+SIF23>0=8rZXWX_jo@*_=hVf%Jj z9Mc*N6NcFEvseD+-PsSjgNB{l%UQ<4=i?MCbvJ_=#W%dA{|i1U%!X}|HG^1vZ4_-7 z3y0jr?DfAXZ0k5@I3vCba~d~;@}pk69@)vTTH+R0H*-7Nu6$B#y;BTMlINgp@JL9M zTW41;tHV!!8ONL5RE8_d1nzIrDwwuepT(?M#&mO1#5P^l{9Yvm{z3Fa=nL4(AKE8o zO^0RKt-r2(LvA5=ZRJ#`lQ_dV%|(I}Z$De`p93^r4B!>v1lzRw2pjiOhGw3>%I?n{ z4sP(k3o>?l>3IqP)`uDYGGASkU)yorU> zYdnbNw+Qn{uHppCtogzzPPD-v!n-}DXC!@=bf*?mJ+5-mY&?H#2G(8j;d7J5kom_2 z%*6RGd~+^_A;B%8JHD4--Vq+=4I=*7a6jfR70Q=fX$EbVML6PO8dO}$Vqsm`EYETv z_oHPZZkVku^vjQ8$~zY_c`i_#`MH-{ndS!igB7_avn{zG-xkhdaTLG8O`TQD*5<5O zA}kD>1okFE22I6>Rm-1XPtCeHFTJ<0X+F>Ib=xTL=5Bnt&rdNb#c@qF8KCh>8iuak z$lj$Tv6jLf{!?-{*Xq3uVpsKX`-c>;+9ZGGZ!rUk;}*k@!^@a{*nSp%N*3a~d|{@Y zGG9Hlh9x~Y#}vfTqCeCALdvQ#?)H;@W-tAaoqsh}boX8rto!&}wE39h?g+0(4)p{=R<4SfUbunF7bb_B>vk3dDTR8270ko^> z9{cm~0rXC&heqqQOgB26MStHfD%q&TUd5%?Kg{ii*An|+rDhgWZWUOwm7mR+#zKw|D7Hygh0rp;CB+R}FK1!3c zD4Ct9&4iXyF3fk^ERa1_2fGefQJjI$*&eBhCOHYPX76`)%S@Uw=7oupb=yIH-3pk6 zc5M5l&rE$oAXl|k3iGU6_!qnO^EU@bK*6Ro#@5Z{@Lw?uTCN72SjcG{TgVq3|HZ%R zk!3?)9fr}r1t(|3H|Cc*k#dXX!nsT3AUk6T7^pBdqtJi`tvtxtJ$nKx_Z;QkMRu~1 znFHwV#Ry*UdM>{q>IHZZtR%<23-$+Tw)@_?;bkb3d}D;j;6$*K9U z_Xd+a^f|7kSLf(6_Cz#?f8X{L93RJmBs8%p z-3_7%H-_UdZDB?nzfBZ#NQ=4nXRwdoqB-;YQ3wZ%`F-iSRDSuZs6kT>OO7>)7ap6- zzJHEkDdTm>J!%WoES`_|MUuSBR&P3{lF$AgHiP9-sgQ834rcphv1ct|q@sDArD+8V zJ-u_$TgsHiuh~u?Ha*}Uotcg|a!oN<*w^@u^Cgi-QvKsD5Bz&}7U-Wdpa~)4sNZD? z$drenqTqCq8{sWTltTR4NhZgwQPiokvy&vc1K=Tzu7At9rpB$6qbz- zgLpS&{YHcMyp?UR`o{;Z@RBfd{rrht-8Y53Oxlg#;#}BeH%~hBa3ZD$S)gCjSh{6~ z#9v5Z(<)Blu5f?6tKUiLO-Cy zM4V;1A&H;I09%)_n;(~v3X_(NL_h5uwmRzo zoe4~2W*>9V{rNQ9_Hze)|K~w{Q(i%BXDPf@T*iGb61b${9c(oiGV8X-oQ8TM)9n>; z32)+Ym-03ish3Ipy#;I@siSjMBAyROhY4pYNM>;y#jP?#-L53mxIYR5r*EXa$#ZF7 z(R|_eV~EXr0#V$$N|aKxkzAinCiAd$Le^tF$~bD$F8#^$@2)9+pL7jQO!Q|dTC17+ zr9>F>j)#3`)A{x-b6D#V1y+({ivLwCg@=}ZS$gLbredp%Go)mY57=2Nb8#XKSbK%l zo%6+(C=QREd&yqDbfu;%>Ehl$IJH(S2Y4&x8lV5RD7&QLpnrrB{cD1*n6(FvGhev~fkT}h?OWAW`i9mK9S zrjpUfI&`)8Qvz2#uK6`QS953EdKKudf3n@UjR(O;#sF7ZT?FMmA9B>JVv`Gc;r{PO zApUk6GG07nUJs+G)n^PI6o=x$KRIl>UN`INGiHV=JdC{@46PcnH1p^(Tb8~UZv@Iy zYWQ5bLt1Ef@jBS&w=$=BpG1MfDuug-Ek#Xd{J)D~Xm|1fXBS{e8@GRBk;?~xciXMUL$RA&JW^)DqxN<)n z>086}HBRy$55IvTEg-dKjxq)}W9O83@C>XWInhIQBx)lT{SdfD%YW>SS2bO8Qou0N zYxpyA7>y54rB*Xp_;w);>w^vOWqvBh?lONg$)?xQ*|J?R{CTVlz){)|NbvR8c3_gSP+@C+;`=wQ0vZKg2w4_|aTmihXM zISg<{t#_x{@37_MvU(G=ydH`!&qUGUt%a14__LOowJNUkeQ;FfQ5 zev3ad``l8_=*18APc)wU(xr`6Au-$zie-yRhfw;8F7E4@Tkl3wtd#i*xK{Ny+*XPlXCtx&JkYR;aNW zlS%Z;@Ez>40W|5FN25-}!7nr`VZNSB_KqZWCJ+yp^+R*o z5qh8dmFsCp#USZ%v?p@{Jx?)y;c7m)8PxF)l5V1gfjM)GJ4oyFkMe2Xhk>GO zJ!G~A<12H40k|jG@JzUyn`;iT-;yQ+Yd;Tb2zrHdAxFmko@pi?QF}H-A#ji2mZR5i346_rZe+eysX^K98mH?09yb<;;M(kF(vE&k$|ni03}_aPidA%6;xI+1 zSE1ddVs56(X*-#ai+)01*94{X@l zl!WQ!%Jvb1&eCYpMDoaYN7rl9VBB*#eE)6~?K>h#{_eZ*SLYYr*gS$PzXf2_(VsBo zWfFOt3_#f^d#H;kWrvoB!uVUV+{hz>GeGM(SVyb|jgUz+AoD68>b?oTI#)xB9nt0G zt}HmV7?&Tp4VP-y(=g-RG;{V>m}_zeE%&`-NB7^Ry`AS^W?(8lGn&PEy?61a%#zsT zL=~F9OoRshlhGl}n9Ou0)5e8Y!8>RoHXezBPcrjqxaM%GUgL){DGwm=w1}2JF5w@~ z$biDV!{B80W!7^y8(hOq@Y2TPS@w;u+&HOdcJECbFiKIU|~Zrb9kysHy>Z) zw62tZa-<4Y>}lg~g&n4cE@|*{R3;u*i(#ubc+-)28cbC>1m}5Og#WJD(t{)|+Iq?d zcSrX?=737l=08G&RS_os7JfeYV<|a!INJJfuzP3?OgOZTJ$Tc>@|D*?>)Zic&CDBM zas=VWzi86@y_GBO%EDFipRrN;!|7-2YWQ1@e4l|l)OzUS&RpRc?$@QF`9nc>u_<;O z+ll5S2PwuSN5~S4#Mu>xu|QJ|zOcsvmy=1y)0c90wYTE)u?{dUXb>jL<`W+?g`UN) zz+TA!?AqZb?ub}VPTWlDw3viWSf`4aDM=K{Y=Az{`-uk8Iu3gY@B$pgj-&bM(YAblVY7W z?ppE$TU__jNqwz`=SCNC>|h1^xg%eaZ&Wf}xsxxjS@p2FZKI=EM5nxNdVk3Y5G zH*0wkiCT)}?quWh8iVQxdnot>yh z*7iEbgglPzpaw%*$%gauui&~Gv_ExTgAO~cY#3-21QypF?59SrgWuu~`Y_gptU{*Z z`q#mD?okvwc2`7l*~=hH=NI%mH>T|_pIHod1C!Ri~!hx{@v^jMQmo%QK-diF~ktLcTM*3u2W5jWuV zGcyc)=S<(OIOD-ObJ$RCMiVYQX6)x!$bNGHca{3_T#ZV@p}g_fd+<5VFp^}S!`ITv z8F|#{T_N!AH}OS4KYiYGUC5Q!;Jt0vSzfOjc4lr^zEDl>Re3ihrVI z1TJg7kOBJ!tho>-r<`IgmkZeeugPrJRN;>IcLe^J7D@qu+o@184u@sT#P5{F>^EwX z?RbCMUL@QBmVd@oT@|#=?F-cfg<iszmK1VMTT$KbD z)j4Q+U!SGcII+#1C)nm~Ly`Mv!G`r3(s8M^STjBjZi?N=z$=}lzg~c&yrr?SP5LXDk5vNOX{Xjs9yKC-Ir{+qd9#osXvnkF)V!qCA>D{Y4chi_hom5;|n=vHs|5O#a(U_NY`B z<+H2V!!<+5QOMJ|jglqb)MKHr&f3WKimhCTgyO5;X9r7LKg{$wp zhlJXhZ20@zFk+Mk{R&m&wp6C$7GY1XJZC;7u5^TSh3)vfQ%m4k!zoa|LVT?CA|{^P z!bukXp)tqnxoPYPw(XL_qNcMH-kC;6dV_K5r8aDz15|*G6tu*dzbNpUQD-7CAX>3u zc3lE}jN$ArZ1X@Z-^C680+%t>hnn_VpLyUrQz5%NbY#Q9UmEtB-_-4Ui}e~V-0q=U z$8Vta{%<(pP5=#=`it7`%*I>(ConM4m3pghh%y`%b zUs_Aa&GayThi5*?ww+zGtH_dR8{X$u`p>AxrvtY#VkhDWaLH z!s(c53r?!h;A0xc(#1*Vxy7kRc`G4rJ|HKH*?1mh$=Q1N;X*&({9z5vzb&{P*IWet zc>+m)8P2L~ef@gQY&keM9*aaLvs|dP3#v==(!wKikHtDsM{V;NhW8iO&jmcg^wqa-y|1@_4a zeiXk5_Gs8d;p{e-ZpbLY&F(YkX&+Bl_IT2@dqwzdd_2gQh-lvDaz4sbo~32a!S|Pf z`H;GuWR(Ocdms~X$IYhsbt#a1_$Yi0NMPYlZDB!1IqPT}Dsb($_(7u^z;oPa5W9$I z&gv9)>TxGrSrChnwH@s2^iDEL(ZJ&H3%K=;9`%J)urW{8u@h1k*oi$WasE{e+_5>1 z+IF|H!(YZ@bK@x3aLpH{Z<41Uk`w6O5;bI6}u6Yf1jbhKL(LUY&()alcA1Z8bkrgC60Td9zPWxrF{D!144Q>Ovn zg$h2RTMy~%a&;(bjKweJQGy`+Dvj~av=rhHlzD)by7$1p{vdT2M!8+}694Q4U_f?@V@lGoXG z-vJFvHAu`qZo_YM7=N!C*znr8oYdWA?Dq+5kj>9_`mV5_+qG92VPyg{Uy=g%gM0aq zrH-Oq*;1^k*?@Tnu40ejSHRjOoq8%h!;LNbvFzMq4=Yt{I!=3fH z6Qkck{&&R&3=8na;k~gmKBJ29dAA_TY%XVEcNT(zJS!-CvO@K&kD26Gd# z^!UxWcGa_my&FY4qGPbnb_RWJuV*{u!tp}QDjM)(Go1PujapU*$+%7E35+O2gFVu; ze!4XUo}Z6Gc@vC&eV65qOlK>6`yfHe1(GW}*}2(oIezLI&N85YeJ)J_6T=^T!x&dE zQCJ2mC!`YlJQ#;BI}D0nZ*z0fc-R&8p1ydGgCX*&_Li4|DbeI9Wjo)1Xtij}IADo^ zdbM-mH-+8CnK$`pB2;bcovVb{%*&8=vwH9kJX^jFZvJ9G7!`Osbn{lL- zF4kRLOLEN`G}O8f&kY9XKT<+}-{|1Z+d?*bOAFf>Jc1s)p29`kh{MjOU%AEo$LQY( zCH7tIF6!s)XO)A~DE;n!SllCxQR5onigGxsE%he-meWuq@cGY+FR~CHE4t-A5S~_j zgu#PuaoILQn2C;%b&xv)HX$#;t(0Tc%Y2~If$?WcH&VwI9?q?KFF3FxY1!h(G*aj+ ze9&HqS8ipBtu}>`(o!pWA-NoXoejtD*TY!r(>QYR-OgeY%drk?z*Z)m@-K$4D-M(K zeHU>LS}Q@Lt)IyhM$wwTgi@mn(7pd3-HI{Bc`9i*=VvDQ{ZWCYJ$b^Ju$OJ?bjAAW z!`yPWi_|`<7c{ME@YkAdHm-I*<;cC@b7pEH@8pT`1ICb4aWw^f*1`C#ckp1PHx=w@ zBq_aQyimIctF9^0@(l&l?5~37ZL6{Jf-n8G%BOicaTuzjizmt(*>2FJ_Y;TF>5C_@ z^wLC>K6rt4KJy?gnHId|JpyBv%%gC5PjJuVj||2Wcew-h>{ z9f6bPw87Eb3NlDF#k_560Mbgzc}G-2PCu&teJuPjF?g*$NfQxFw5Ag1jC z>^)gVMi%Al@GCL;rD(GwIveRh`epd~wo7!^>Lyn>d@!z)4x`@U7XHkpWc=xy#F~S@ zF+00ytfuKHykR|%qy36a`|_Ike18Da(;h=&^m?X0Y9NU#DD!U8P^d#i(Py3mWvx(C-gvm zWAlFGuyZoyw6&y^erfc;fdx-dJ3W=! zJC2d;l3$o6a$x;)uFyZrPAuNT*T1olZ&;fB0wryeF|2xM!?j0w@Mt;178Ll#3^U&0 zTqKT^PbdHGOwp@5LKa?g9-XM1N71c9N14%2XphPfddW2jTK;jTu5T00DP!4Q^F#`G zk^@y8(Kw|4DqnI>c=vCRWw)~q;Y+>ou)JnBtqGndPJSnWgE~F_ln!ly7Yjk zS$KaW4vd13t&8xAc@Qt%>_7{?kDw2S4&v96b+}Txl>T}L(FI?jn0`qtPFV4leQB>E zH%TwJwXhvO*#|#?vZXeFv zR3SPvqLj9VXyFO@Bs9f&G_QX&z0@lPiJDa4AD4s2QeVOgo7p_~dnj42MV=GB(d@G& z=%{iTH+u@s=(4Fa@+89rIcdC&hXie3KLQ3ser6wT{e?JHP4rRorWZzbLcg63z7C7z zoIaVdtKZf$K4zMbQSPX>AM6a9zcfPwZ6Z$FEsV|{|E&eb76J>(% zpXom4X;4TCJ;UIGZ63ZkHkEg{u!Gc27D0f(TS;uZz#hKRMNr!dqioL7ai>s7o6&=Y zu?F-^OS&QX-BG+8{Rm^s57FkDBQ###90vs?qJD=P`yG=jFqd;VosHKp^3fw^WiG7 ztx$n5u$m1Ny07NPb2$9HI+j-Kr}dv0OY65p&r7=@Z&)oq?c^|O|8b9N;%>pulw4Rk z<1{nLUP&`%ZbI9lomjW2l1jA_m{NE=&eDrynvXt+8Zvn9lKBt{ofQfDJ00TE6%DZ7lo*|l}d&EK9)+|vZ)lf-?u6VapFYW$2v`6y>(#DBT* zg)aS3z$W=Ngr$x&KdFe4VjQtWh!KewE~MhybJ(=Ald)k%Eng{NO720^`KOzd(0HdA zUfQ>WKICT6^z)^7?{W~XT9rm$c1TgBt|`oKw85xU!8O^xj4nKu=7+w00A;oX21HJ#oV(0Q! zW|lXJ8eRLKtjG*M_{!7C6^z}IeE}{B{dNn#<}$Cr!|-U;6TUvwlV*K?1rN$M<6D1@ zz9-6%n#5>y@)y4D6C-%1BTo_+v?(+%4`Ze*#T%yWbgo&S3Z^~6s5?@)ddYfPdu9Sn zpST;&zD&e_zK6K}yXiE0rvd!XUyONwud=^KKeMR5iD(h5B2re^KxXy#ptRNwXB<$c zEn^D>#;FD>{Oq~s_p`ZavV+%-E@l4sOnia<+8fP&h9ohpEZd zG<(MxevEk#K76P{YL6}HbV)QWRTu{?_iTyuJnh<^AAzK*OI+%T(R|PuPP`l}7#|_S z%p&U{S?WFL3{a#?&gS%G{4AWhdN7v%XG||ojb}T?oq&x`kXI4W~K8+>6Z-OLLEtNbJ2Aa|J$&G+O&^sU&zuKWD9L5IQq=r9)dB!d3~ zIiP5e$awW|CckVjb}klkMxUiVZ^qIEOK4yQd01^{V zfx&h;QaVw_53q8;%(s)+xpjtAw*5S02M@vaTQzKxIEz{DPXdel6o}a2$-YEYv3c`t z!T-f?&=@PQBtp;F1^Y4>J=q1*w|``DA7@kFq7j&>HVT!SzO%gGE6nI>7^L^_2dz;Sotg!{>?bbPBv+#p_BU@H5#nyrm%OvjJb|S z<>LMmY5dT~@=HSvQ#(K#T$#z*M;&0^Er z#9$9f-?ovZYHee!K_lQqr3AS2C~|Y^Zt@#FM>4U&e6Z_# z!M%EPfZeGYXy@H7MGxf%r!US(>C1o6(v8MHB3yMcaq1-@B4|XX}Akm?eBXJBy85 zC)`W)^0{f}&u|H2_K0LJFJklW#q$@29)Zkx#{A)drR=g}68E3h1xP*G$tOQ~#m3Ju zhk)A;p!@GQHtKUdn=GRWXRP1DAbWK-1v^-+u&dapTn6q{J6QB?Mdo9?0tQ+%aqDN^ z<+uEK!YhcYIL^-xko(ZYVrD#92fUMq;tUtGfe+8q^1{7wg{sfps*+&<<% zjTh^FtcPX6E5-Bf9cRkxe{qK80kGtq9DC%W$X5RB<~+6xho1I!UekCmtJc?ty)9l` z;LNkUu9uN`!+&j}aSxYsqZSx&7mW4!r_&YLNd5O*w8;n<$M?iYU%IiHk;?KhT*owwMs zK3hZ1_+JGV@nswTE9{Rrz*2$>xb;Y+`znqfGp|?_tEU5tP2ThJS z?G#zX59WH`PT?m_brzS{)p2g0%f!!)SMnbR)^Gzp+HsrqmhwFV^|(+wSurdy;LrFc zbGEB&#M#coZ@#Rt;l`4P&iLefAMKPUtRHt zZ`S(24eoXZt;c^=?zINIS=EM(o zEzy@%Pk61ep1L#c)tuB#C(iG1pIy_>zjc+7s(eHJeJ-{2D%Y^Ak6*D2MNMnwLVxf9 z-u;6TQ@t$@jeh-Hvt$I{HSaQC?oltgUSb5nXGe-F_2#hntv|WFN5(*nmIG5+6UcB+izqg;$KE4lv z?=*hD2bc8*Ny-5S&#p-D2|gz&*SzCP~*&v;`p|H8&3I^9o%w@;D1>k7!72nm!<>!r_!YQ}T0gKmn>nblfvoHPjT=0LBAkwZ*w7f!!X{ycQ*DuzB<2&TU zL$dzkhp2jSXDn2?!ONxj^H;3+^)5HX=SRkHIkQy67whHNnPtz#+2bZwEg`Yz3L0?%SG-p2TuW4f&>4 zC7AT?5U1O+fK8c@!;kGqfFHhAqRNI?*3vMa8xt1_s+A`E8^>krc4mrblcEQ-1s99G z9Pb2OVGlX5q>+BxAR%Cyl8rQ!YIv(1S48*6e8ZuwWOzxX`HTQkJGar7mOT1%2 zf%x&}&ieIE4qR-@VLR`mkNMi=ioE%z7hIw0V3FImf$a566_M6KKTfAQuD&~d25e$hCd{tbNL+zWhxUXFNun&3w}mdnL&Z{fo_WZ2$`mQXyqfZKIt4wDX>A-ZYsjGK_f ziAP;<;kV6;;g7*Su511yZu|cjx(;_Nzc3uh$V{SSlgtv~JI_%{kZ2GY>fP+D3d zBQgq+Y#EgjDZcX@MP-GgXh>6~R4Nrs_51#T@4DXeUGICI`+n|m`xalcHk)VLH&Xq# zS#;aW1U}p=o|mbfq1MW_w14+bzPsWq`WhQl^lv;*4qe_!y(@O`3Yp9N?YF-){pAU| zci4i;!_hWeZ2va9ccQGJSt@>&@;Kb??dd9d#a+Gf_0k{sUurG2gY!nyA;iy ztf5~+1F3f0Wqw&D9*<2sK{M)qpkvKUDF3HH)6d596PEUPUS$F88W}_$S#mVH@6K`F zbUMjOp1STA>5q43$NgP zgR7`x&`Bj8Yf+hr=XuZ<8T_XfMDuq2gea!Z{e>g=t$cBqG(w7|^lQK#yHu{|v5vnn zQD75nS|K!7foh)Z;$OBYRW2`*vm0J>iQbWNt^9hdjq4t(p%a??s6|~1?@a0E<%3qB z_$ZOiny`<{OzP#|yr>Ve*tVZlYHE40uq65t>e?ECK&xmDiWii>H_2zSo}o zM{W?EZ>L5@Q3HJU$qH~}iS*kKkw5fK3*6e7NBC`+d{#c${seZ9_4=RH$l~mlw{Dw%c+m_Zf?|Y zj+;Jlp?lUv<4)^yTz-QMy)svf`Aih6Wb01BmT_BXH4Eb>Zyw^6<4!~7z3<>xeE`ok zALLQidMw9b2DR|J4v&Y(^68f&IW}2P?bvHnJ|ddEm(St;clP41vPvr9dyZP_03Y{5 ztTOq)9J=YCyWQl~!CX3aVdbjnH|P~-Q@g&=sod|SLZyrMTv|EmD+P-syysFLe`x7J zgVQucUAqR@;gMM3bF&8(yH>!p%R*{2Tn>72D*3s9g*0?}y$Ii#a-HAt#8LS;Y$$i+ z!_Quz(O0ffLrba3#0UY0#Ts_>Mk$>0f5IOaM_1JCSxVRU&q2B|7ft7G#LBHn!c&@* z8hT6fIN!s(%e;ebl6wbAlZRC<3G0K2LuzPj@Dc7d{ur0DOs0PE=s zg*_)n^2-Zq@xk^${JNkCMUiD$5Q7W6CP!QuE`wr7#W8%4lsIOV~b{Gvg zu^p#m&)^#zSovSuR>8&7V`)#r5J8S1Eo+E_HsPs{Q2<)4QL)F_*%g(tMhF zU6-1lS;d!^M+r^-MbV5bC0KT9H20Qy27!%s^zQy_0ije0iNAl16|IvZ_`t)+@Sw45}L9*h&UD*6PndP~U zrvdl36Z6()43d9JA`Z$?pU!D?);(2W@_-x{U$+!xti`CKq7RMSmWd?n2#-}c=>7g#94|e>fd;T`?y;6Pro23$U-;+h%JPvY|S+Dq~qS;iiHIQ~{ zU*fle(~tfu&asitBB=lNXLz+v znSXcRNJsd#knb|r@P(=_y}Q1iovl#kU(t*%d@+L79D9Oo59IioJT1ERw+r1Wwt`=B zXk#@uPtdk)-!LZDmn%w#(oJhBKsh~@FR#-e3O*}n{-j(ymwJ+TW=K(`r_1=VW?8yi zs*ui%J0+SG3h;gIS-PNPC=WjC#l1XNfl*ErJS%@n78UsLV*5k9>c1Iu`WR(evg=XkpKpOy$01#-(}hj^q~5}nW)OdIFj;&uTVJZhMR-P_G(RIOB_@~lLdAjDG5 zuFq7SDy|Zpr;NAApVB}*HVo2E5fxK^F_R~E4N%n`JE`6FIF6NP(KQpO(+HrW0@ZlH z!*zUvN+i|vnnLe~C2_0en*3eaG#WN!5Wc7Mqm6VZ_f^;i@pzVgyP<{Cnosg~dTO-c z%mw<|ARhhB8gRR`iFEvu1Zwtb7Rq@~=WpZv>1R>aXvI`z{{Gw@bedO2_k8V#oDqBZ z)2>r+#IleEJyn8T-{*4dZzHPYJ;=>pFXrEZ?tx`-JEX@p;1E%6(>&c6=zc?}pL`(S^vRBU z98jfyvX8<3W;s6b67cGrpD-Z(4=x5A;uo$=$CY)PsgdOg%yzzym$mPJmJ@;9`5b0? zRWp0R81Cy*2g%cHX}QfA)X05^ADfPXcepk6`D}nAe0}-h{2o{m>`o^qYVa*P1pG|3 zIo(yDNI^Y-%Lp8K+`BZIS>i|c_vP|+Qx9?d$j-PJNGp=qD95D1DQ&!F7y{iLk z6Zbu3_DS}6mMca{QklNRP5Y+ zu$~yggLF!9`t>99Q>Gj3J(j|^ADYIS#l-CX1P9Y+ufOuR^c~on(?sRBCBx1?C0MrE z7Y!9n==*zVblv!VrlhLEqdX00gIFLPQ27UCfjW3PdpB?Hccaob-_r?CYlZnskcWRV z!Et|O?5q|W(%g8d%95#FO#8-X+CTmoDnJx(wCDkec}mc6pb_<(r-H8jSgco>!p&O~ zSw_%L=xFH1IT!D6gN>oKmo8+|FN-?yp-e1Sm~;VsbX(v{&`O%yuER`ojRs;hE)MAf8m5ne@5|nrSeqnL>-NnFS6~r z5zehWykOLW!!$=V!PeYv9j?xg2Kk>FFiv4FUfj$x<#oX-wEp=GDz12h+5j;oY#>Z3eXoy+j&iWb?Z2m=vzZ?#7mHRk;?n}#_;s$!;~35Li?05zNWZ|jo27!jrsdxhL4Y+m4s7Ho%u-hf)VkE7qko5oOfY^6xt;v92hC zu3H(6fAkyq6T=T+ot;G;x3*yVJAHn5l_Hc6j{&VP6Cg@8M024AEUS$omPH2CJGBu8 zYChN+*Sb+F%VG4Oa{%6-JOFRxYas1i6Vv~xLjw-Zgy!r5^jhx6rh1Kqou;CUR^v@r zG|YgHizt92E)^i5+KA7dX<=WZKYm*siRtYMwD7_S;t}G$hRZ6e9tj7Gamb~GTDm=MAl9rY(po?m1@mKIBZgEwY zIync@Yd8Y?9sgqT(=uj#I~c~Te?dm|sDRz}Ma*lL4?nZ>g3xj5OuE2aOVl@-jLTjm z!ZFzg=&+=TMcng&2>MTOI!6&>7LVYsla(;6IfAZi&W0Ji1>9X+jvrU5qm{9?)Mb}M zWkGl;=10qmL}vaZesKdoc0CdEMg2t?0Yx+^Hit{w6f$-Nwm!{#TSv*2cA{y27QNXXYVp^I`pZb- zyPf`!@x=hL(-h#HsRrwBeTH_zUMSo94C+e{@zqUJc*p%jy6<8>*!lhDQBtjV@#|SS zY2`RkO6ViL%~1@86xNac_m+J0*!P&N`xLi*9KqK|--M}7%TfM92JdZ-!2X3l$ojN! zY+AM%7Z%BJ9rb8@wBjBd={<^T&xGT%EBoO1`5p}Uqku1l>QEgCPqroLBL>CXXFH!r z)A5y3^!2=tD6vk7eKYr@U!}^yS?mZIm?Z~Cjs-x;CvmFPc9wj2?hUQEHK4V{mGR#) z)Kg&)3YETx{KyiyBeYZIU6KCh(x5bm`%GM}0b!Z6okJv&dF0h997hmw!z4Nf$pq-5TZ3M>W zM^okH|Cnm4*boy7BkoSb!c7zSC6P~db44v1SGF7n78&ElDX!%B zu3RC`RN?APrD)xyOZ`{fVP?v}lQwAz4CLm(>G;E>#3U27bLMb6mQuc8l`cJb^derm z>dHTCGa}FCO{Q0;{$~7 zxWe(p^va}Lw8#1xZmD0yqh*jS3ya=54U>w4-bl0!N|wDO*I!~z zSR%3mgYPjy&l@VGYS@}~D{g-{gD71PqYox?mu}{rF5Xkp3bj3#8-s&i&WGX=(k@c^pc`0@A@$XyR@g$ zFKwzce~~xNt!>7IyUu~x9$V44WvATRaw%gc)s@_KY zy;ziE)-R4yRmj=J;`|#jqBz2zHk6*k?qd#IO)r#wyBSY!zEt6#($%rgp@MoWJVH}@ zn)O~eNPDA>(85sBLelabX6qcLF1iQlmvydKC^Z~kezc^c^-4i&$V7DR7lZ4mD)jf~ z&%~_60YCHMaOvP|xVW+eu&YyOvi~P~r7QvI&I|BRc?2)E68)!F3&F*55sd42gTJo1 z@CSMW@XRrpRz3FROJa`l{W4pqmD(&iY5gL;CS?TgZ1tuEhF&yL)sjDye1$_LQfZ8F z8az2_%v&RWz~LLlbfKg?|FPf|`*(d1K7KwYSdspUU>9!W+1 zYe$;?p%d0^bKxf)<-w}11`I^=#DUfnVUMmXSC6y7zf~fPac~IlH_qpG-F1n|*HJW~ zX*kF5aQ<-nTHH3I5{loR$L|q)_{4pO(fnfzX#cv%Li4TpxRt70QEe}5fC8XPM{(6# zF1+`H8Emv@gYq?BF!x?FhP~PZin)E{Wk?RB&y^F7yjP0D4*vnkbq2I!`#a3+yMXtd zGUr9Y(SDy{netf-&ye853O1sNLn%ZPUx;q9-_1NR`7(YH4MjzQK(jOvDSkkCfFr-z3 z#`L?QrkgwlClrI%^7w2VryWgdSRwrPEsi~&5&}Phx?tAtcldc+1IBa}!de+EL9pv7jJYS2S{GVAN{=tEYT+^-C%|h-B>h|I!PgDu zh&q|CgIJd{ts5rBLs%}COdUnughkL}v6Yy!IGh);a5y`OQuI;+{oRO4nkSKpJ3`e* zLwYq*2@Kl0@Sogxc$H4+v>-dCa{3R;T>!EQG{mkdx9x>B@^{26KU*UW$H~W@on5eIBx4h!z9~bcbEk>AKD8k+FNPT6CYS; z*@T;H#p$zcHngCs01YQU#r!gNSoZoI@!zM$M^+f~|2{@S=VU{8HqC)c76oyw8|(4O zrbtNrd>;+gUf>R2U*pL!8))U^3Z~X|o{v&*!j|I;DBm-j*ajr?_V_$>2wgyv-x-3Z ztvP=(Mi*bta}eR)t5KdUo`&AcVOq~dejSs`wb7LSu{tn1(Dr2Wx9^%gV zIRvdOsN|hA(CQZB;Rg?+W$8@%WUB+!%iE6;CSlmK&7Rh7F{gt*RTbV_CqkD_J5-ea zA`R9Hf!qBe8&tJ1tkj9*8P5bYqYJ`Dp$dL@a7AFD)IxsiOlGw9LxsV@!)%6oC)?J$ zo|t_tVXcp!;wa}l!Pd`fz~hxHy8YcszPP8u%|UMnx3Gh`zl%wt#eRtKjfJ6cR&Yo* zid?vL8dAnjqk*U9!tC-oyl~_JoG*5xlRp%Z&O@`%#`Hg2X&OSj2WnvAuwyv7Uzz`n zc*>@)YZFXKNyj_)`|!f!eB8N63g^A4Wwl>R@J#q9{%PuW+|-zin=VbmuD^BY-gg^^ zbms_vr)-4KaR<@x&KC4|Kb)RDY6)jeYEVNg zuzbRGEV+N*7QM{qAd7)g(Qn3yj{q};_mFj^A6`qfF=1*B*0^TConI>SvtPc@ujD>P z4d_Ah=Qki(qQ&mD-^O>{uUYcLSI}1WMcA=)5JeOc9%uz*zd)Oxb)*m!QN0Y zdMY*!e!L$Psy{jkZPPx1itjM0B4e=G8%uhjVBc5nRhp`B&5igEn#1oscB0)6q2t*pC? zAHo#y*7p&ZcV2<(JJg_5MLN{irh-%EX|(Fvg3}+xgY_RB2unPMG2@akpt6C5O_xQl zdjxG0ro-Ghs_^NmEoSs2k*19x2 z?IxQk=Yg(z;`G3wVf47{Oc}2WZT{ytIa!tkFzsf+O%U<4>@|c5B=I( zaH;uC+g+NIVB4Z;7<+vT-(q2hWvdrKN|rxPIM;#(rw)-u*N+0zU7$7Hz_PqQyvt&StP*_o(fwHd9J zNP@dgRfT!72|xUJEZ?{Fu+8=sO?Vl54$VGTa4!jMbT7_?DG-jI_l)KXdu&i)`fc{| z<|~mtK%9Fh`QVS;fPxicDZ@0zQ?@5#~jJvYc&i{~uYzl^+a zsR6l-PT-ny3STZ90^gV3hHq7#cs#oe|0olZ;j9lvmudxGp3Y1xUXPj2S|J#HZ=pca z?sdictLI6i>p0N$iG#OokI4zYVMNj6IBP!RDJ*+a4^rLHP^cb)(NqClm#>1q1s0(9 zc^*qPvB%*lCSaIyA8w?K#$cmUxG38g|4p99F6C%oX4*FP=3XAm{!s!;?edtK{S{_f zIv;wQjRBPR3YICjVBGFx46F|%AAipVt9MJ7|qDTg3X`Wc;n=f*TVw@k`8Af#6i2;4-no;RQ3Wc$Xq9ENo$7+G4zQ_aC;~xgPQ| zWl-R!$d_5A3%$KoKydLO%YHH*)#`moNae?hhj*5s(t%v|XTt?XFdipgI)!HLC9HdV zEVCBQ#h1QA@zQfGHsxLhI?YeV=pd{0glNODv0n`Dn< zB(<0}oX&rH8hw{NAb$I%Qis-lXjMuVW=`KguHBkU4?HlYt2)OCqtu_m#^uZDxIqL+ z9)m}sB4K~$E8s3Kg-b)lh^kUH^!@c@pB>I%&li0m```}gW+QP*iv?Fr`HX|AhhYBq z4Aj?J$^W%pL$yU_qPvD+)TO()VNMgywy$T)dL3}Id_PkAYP@@FkO_{LuxE;v*fwVa z<}P|h&c3|_3NOXr%ZRaTb8HCF^PB~IOFVq&*Mx1A^=!^38SHbu3}5nVz&TqRf9#gV z>?QAEj_xM-7m|u-VZeq?&4XKI4aEK0cq|ZHA#QIGbVQ4QOHshiUv~wYp9w(fuClbY zG}5UnMVFuHBf)17S<)MF_e3KoKDA*6|JC5WSL)Yr7pTP4tGdM=kZ5TN_0hVr*#3#LOI3rpE_6C)bn({4dU7;*$h+kng z)4Eu<^?Y3Hy8y@Zz7&W@WwFPGmN>4hla*NiWBUVkGoRN#nDVqv)>hF}0e1*49R;{4 z`2;DPF&EYpjuJ&6n&9Kc(=1R!0R}bVnM_g=+ALblCecHnGgKBf?~%Y(*?hMBc_AFv zDS*oo3z^x2m8cQkOm?sLgcphB>?Rg6Of8#s(y6?vXtP#^mdt zDeV5dMnT2aZ9>>QnZ%9Q%Gk4N)*W-1X|g_cur`G$^DGi+u#wgM`^An0cQD64#;CDP z6{F{m!P{-dxL3v#wNIzvqFr*hXr>Odu9!un@7QCL&SvKMZzg$pA(0HJ{m*vF)m`xD z&QtLGP!6TPw?X?m26H`LLDyd)B=n^~CNels>jFXX!-e_DGvMysQ;-ta#pGYl$2)JQ z!=tu`@I!YPj;b8N12KY~pD-NEPYz;hj4AK8SdG~`%2>dL5!^X(GXE2sj@LezVZnI~ zt`Iej`<+NYEyew)w^NEctRBT_RWlyipoFEh5*RV>F=pKQfaV9x*rW@sWXS6nwr<>K zk`>hi?~F@e*@jTK(NaiCT=L-RW+nPL^aJEAdjhjg>Ck5f6zH3C6|lcQ4R#cdpfb6- zG$5}SPJhdWH4jJ78E>`d0uf(4Gi4RzRPTT{&O>3RV~g#PXlHgeqMq5@-_Lf-m6P@T z7UXTXD|0f)XVn@TNPl`GaoCl?q9&K%W7#kqS>S{XFO2c!!zlbPum*d=j}oieB(kw) zEM7h2j>?yZqJ%~mi&v;-HzsN$mv>{PThjzLn*OqClR{e)YZXXaWD2f%iQ}Cd_V#N9Nw$^)Rab3NUiF?V)=hwk)55{a{RZ*v4u(Z)&*10UKV;N>1*m=4 z50ZaX>HD!ZqC`$bTr1H}HaIsxUG5@*@9bW-+Aj`@^xnX)ad!pX%X4u9_UTMO)QH8U=Mr$f6kyqr-I%6x1NWAtE{Li)=$RP?*r_fVKs@()Pn|RU83EcO#;4H!crp# z*j}b2s7nkK&9_(B!S9Dj$c0tJxH6BW?sH&LuOE_6qIY&yrwpFEqmAo)D44F70P{gj z3@O`29&eip8PYZ6=BhB8Bh^QV)(tUOoKzvG8z^GYbqVBVqX$eqYDk>TH?vvVx5@Ri zFnH;^opcTp$MfVmdF(C;9ihv~Rf&@~$SSse$1Pp4=)Y8jbp9~Hv9HDWlLte&?)c3Zv~w2g9o>X829$Ud zACKkL_lTRb3Hm(@!s5AR5H1r7OH5zeibbZ8mJ>x43Ch_OQ6YNp!&S6{=xzYP%y`IO z{hsI!WW$Ad58=>z5ti~*Vo^q2q+~@ooXZP?=v&j^&ZZV{yFLIx*AZUyCBW->2-gHZ zAvmHKZqA*{#9XDx7@Ko2{11nym*Md7OfsyQ(gX)`DjM}T)f5Rqr;`S|zw0ddvsp}_{NOjkR971p|B=F=@h7mf;|$im zQ$=a_cIM<`}68q;m|*n2^T?zncmJUIa3KJ9#YsvIx5h z;?TNS4rguH&)U|HV~G!c2=ix;B@P;)g8x3Iv8I!kSrWUC`mgr?a=&j4XFt z?`}-2t=(brl4#aE#t)?jqOqq$91keI7PRGVXO501ac9{@l-L-QG|A@wct4L9)uwsuy;-rfd@xPni+%9hkruq`)Xog8q1o~=fHzHcd(ds zlWp~rWpStek&Zk`xPC;U;{J|yVY=;g^54=0!WZ=?@#&7OnEJ89*5koTfwbscYF5p{ zO%dHp&*&!mtG^TD^!H)Ler=SwFo*4N-o?iLmPE_e3TEA`Ce%z6vTbtHn7-0i!PtZw zWa9)W{1mQ+g4AxIr*0nGU;ZC^6X_-_*L1;#^eUE}-9QApACaGJn~B-DcxLz~6!&j; z#Kr^Xg->(m2sN)=W^E?*ch-u#Soi%7K-LO zfQfqtG2Lh9BVMJm_XuA8L)weYi!l0O0(?CGgwa0TH^KE z4g7D7hoK^CL8+s*uuwIfblj(c2ZhfCn*N&bD`6&FFRUketDM->Juzg<*cxcKT?je( zzXiV^if&a2br23ofZs|ZVS39jfpvQ##Oyr@hLW1EXHxVXNg?sfB5*#XLO$Tj`i%20l%me z<})DTlx>|@*prD+)wvX=l^!QcK23$=n%7~2S_I@8{t*XkS*dw!=?`V>H6KR`gN)&w*)e9^w-9%~mBLvE4zlw`N%&g10`D1&L&K&3 zHo7?$gT|EN9|ubu<^m||U5Ep$9No&2Fst|pi?Yzd_8C*q$*@{5zxV)2)-GlH@)YsF zEhiTDVgwX8`H-2H^vUN7$}nbqyiH(e0f~`}f#_Y%5cr=z?4RrjAD+j;Z=F9N(JxJ> znLL6^hg)I6#9r{;C`ALM2jO?UEdAM{Mq?Z0XrR_BaD4Cp?D#EMdaf1L{)&V5?~lXN z_-qImmO$9O>&(_P)mBefWnFZ94Jy4V!O`Z*sFi!0o%FB3kn~zK>afDbgn8)d5si~_ zTXFr3n`r4dj2r$i<^`KabF!}-M-}Rr3ogW@~7W`L&iXEXD;U9yEc+Nf9J{%Qz@{5C#n|_d+;~ zfj>Io;B%?~o=r)E(Q_Ar^ohCPl@$$6A`RQp`IHJd2>yai&j9zvH)A&GHRgpbjU zpsbq%w}woD!;@B!_V_-V$v#(vU*Aq4`%ad?w~{7sn79aX+s47?TN!Y3)g;&uzeW(W zGofPH@lIkm!k&0&dN4Xpj5Qy+C>RU^vd_kgJ&^T4&(mU9e7%J{sxc~fbx$Q24XTk&Ys_^DoeX?$e4ZCV7 ziG$9QaAm$3w)>djn+T6ZhRL!tn1&xbU<$ zDj1)~m!{pwb)(R&MqQvZR2{k=YQyZV`EVumJls{11S6?pL1hhvT|;W1?aCEU>?nf; z`M03*TsP!-SA%3wHGDU%f%sL;uyxsexEogtD^Dnb!5bwQgz-?3WDW@~Yv9z9u^?tq zOD6n0K~|m$6VBIPPue@Dux39K9Jx3Sm)XbSg5Wc3UD0s-1#Z`(-T`zIK?-dgCgZ4?|>X9wloE=1*XBI(s! zN!GVZLg5`N82x-LnDBcLGP?t&EtY`Woezk@vvkndnhT#qnCa!2NAORo50V03!{(p+ zAmVx)bZv=-M`ss<*w7!OM*bW5GfoTSP8q`I>m341w|JJI< zm4v@!`q|G}+IAc4S~dy2+xMaBT9FQ~ekd$nGat79SVNwSy&$}*!ib&tN8zBS12&GR zVnIX0SeYwfCH9fF+iu(?f;SY5uX@4Ez4c^{lL<3VSb(cDZ!?Kqx5k(Bc_LMnfSp4=fWlE36?Kw%3!n7aWLFsVjV5sRJvf zUST>1Z?nmH5rT7I&8ml3p^Zf#Zh2Y7Y;>#GzcZ&%&7>GRJ@nE4n;R-Aufo+!mf<9w z?aaPGhu9?;pwZKKG?yBIiIpa7qwclK{~p$p+>9qA$TN#pGr1P%{sC~Ij6hp7t-uTob zT&gl0E39&`Pr4k_T@&!tQbXKrqa@-X^)aWoj%hki5xhw0Wo`|Ja6xARK8~7)%d&ED z_vrWNzabq<*3__=x29N8zXHRLxv`g0k;JA(L&&aYun#}tNten&sPIsQJN1pkY>PCB z+%g0XUY-vJr2dgbv93@wrU;ULr$UVCN+?P;h1cg#!a3C%_%%BpPHs*E+1^CheJKs% zbI!qxNGBL(zZ?$U-UzqatBL)aYQY{}N&ar`CA}B*AxK>2pTD(75A zQB^kn5pl1Tdn8%P>?2IwzDSfYme zTz?AtnoY?kwcG6JBQK22yedo!3=zyuoW*RfSg_dGK5~1>CYZZy4D1;?3f}$>gQ;Oj0mtbg$=R~o=MzG_@eu3h^v5Ge>0m9zT zHSCMtLiYSdzs-$iM z3Cm+)%ZgL5v>^pTRJOvaAD)me+zs|67s3C=4M=@`6S598!QaRlm^LO7T6Zmn(vv0- zIUog}-j9X-eNqBe&>{@b)gmrh2~0&o4|}hl#+xCr*fDJZmWqos493a0wLc3tnkA#Y zokz>S0+N|ZlW?W1n%5e4iD!>6Ndr?h)H*arPj6(p<_Xe19p+L#ik^$ zcqq~eAFSRXi?_;#u};lQ7B5v^lwQIR7xGfwdQ*PkTOBopYXap*FIH6Wb zJPUG`h3>0I$@%vW*sLNe^cu5aoh_F?q$Qd0E8(3x z$c!+YbRf z%|o{Q#zefb#RV_S>0}%4dZMJ&MvU#z!Kh8%81OY2=NF&A=rN)_qC*~w4~b*XV-neV zS#6f*TgE18g2#cILD{ez%#uF9pz2}R8zK(@cU}{jUQWuAmNTh{V9c{jKu^h&xJ=~v zb&Su#pVJQE*n9_^bhM2*ocqZt79Phs5%%*Lb{1onoW+G%HK-LAiKF{^nQ^@V%65&% zRH$6co3}d z2Px6>KY5=C9}A+Oe@hmWB`$+e*M~se=*3WZVj7q!O@y=Fv!H9Y4QLOJ0r9L&K<<-qeBw$Q6=?fYVc5abD%ab$kPkA8PMipTDoJ1Vj>V`q{%Gt`bx0rPNQFiZe zLB+7FPNC-DPPX?|mOwf%kxbeo31i75cu_SGie8$CcJ5Zt-{b}-4-`ZCWdFE8*Q(0V+=!i48^yXn5ZdUx{zUWYZmJ-Y^fvW@WIe2XfeJTUq>N^Md7FKE`zV zZZMtNk1S_&i(sCS5=6*l*cOE}3GBbhLgfru__E^xarDuE!P#~&@M8{mFa1RpDvB_} zM^`AVenQR+k%y6|*TJUg6X3Aq8ZZyh2AO6Tu=tV;x7Ck{_M^V=u0|6=F1#UYKBx*F zy9ElRN2n5;4Rq6vTXID3rncE8Zpdiz!fX`bZ^|oN>*o{c zeFurS=M6UDw;?%ov4!aB-zU{+-T-Mk-~gEd0Vl^mjRk@C&J$sn{|-_x<+@<&vv%86 z6%0q=b@QsjSOi4TDw0P$kL)wPhYMAFp<{ zX=n`7_Bw3)AmWywH7SmDXy0ICa>uiq{&lQ(M*#ksHx`Ym&a;SN6LG2No3y%w;K8r} ze7SEPwk(-~cWupZiLoAD+WeT6{?^BR>Sidb7K!yj5r0{(iSS?pTd?_)tswcf;PbM_ zWMY)6X!b567OTIJvl3&$`@0jYh;@V&4ohJ0_ytHAdk+?legpIN)PmR_UvT|+5Huc4 zho8GPfqilUj1@+}{h`qiv%&$^`ahj)9aJC`uWu$v_2OhZqqBu>FHmmOH*y$g@lxT5}#1^7186HV7EVMmfZ zQ_;J|{LkxQ>5L#Yzk3yHYw2WTDHKXmhHShgo#@}7b?FQWgGN5 z-q!R}9Z{R}hD;9aAw$2uC7L&fg6{{Bj{c7rD4gmcl}qacms`n zTgX`JM?}XUj!3&I+PKqB;oke%!XNeO!f+c`f|eJE_S9+2uyz*yegtSWZz67_wz%zy zJTB@MGH+GUo@L!w9CmIbE_@NrUJr9+dW(&Tcyyv*!B%xL;`yYCsmCM9$A&MYb>JDf zdMTJ35_usFn|xsUv<)D4VHfoL$%ko8?eN^F9(G+i53OF&Fely}k}Z7T)OJlU=oB!j8`Hm3vL}h1OwO^7$(O%m#X~AsOvg>8nCXk!-zVXOg<%zi!)_5089+`yKP}ky zN8~Mk_mS*fr*`@B)E?XCM?=Zgz%raYl}cj6x4vSHP5QW}+#bv0 zTUkzCm#|-58Hb0B$63=}^acuUh!Ae5k%A`J-S4otF=Rc8oJ5D=&kA z3z5*cNgqlEM?r&)I_SUYCX)O<=^h(Qp6z%ja6arSs9q=_e$^S|nTiUD>yw8SwOZi6 z<0er`x4aL;t@>D7)hY%}cjH zpUq{kJS~KZ)q5a#w<)L{+Xwe=pN0j2hXL|cp-S+Pq?dINf2$^PaY+cfZlcbb<}D|G z4rEj`o&3q1)y)KZwEybp>zGC}@SEHoX+hmc3@(DphJF4)Dx=if)+S@0(4eMG@qFdF`LTSI=> zWbpSoL`I4BE}lvDBxfK*5VN+JWW91HzK@3q$JGOy=U)QVZO; z;Wm3P_9^?7XoG*YiS|XmWU+9)3r=+yiUFA(_`Tv3-f`*0%oZ^|(H?PDl?NuTu|Z*V z2=4g22Q`%UV?nJD@1<4Y%NZTGOmzOmU4Ixqahv_SriRNROwjF}297wXfJ6UXVcVPU z3-{?SA*K(1l93{g*S&QHOw7JYta>(+HKB5FXlDWR8)U$V`Z!p%EeFmm=P-4AImj<7 zhpeS(aHfI7yq%Xpr+N|eg?oVG%>-!q6A5N@2VtMbOn7v8JkX*4fko(l;PlT1+G|(B zXhU0wtTuwS_7W0TKa5WzOy?>$a6#7oj4__gg2(uO#^=ygmKxVH+AZ*{`pVrNtvJq@YSOuVJ3 zf)}NS;qjK|EY-JyMNB!(ww-im&Xt6DEuVxZ;%~Dv=JN#xr6WjynKIdAn@nin88SC` zGUz`&4%2=yoJr0TBi?4q z$ZHx#46docyvLFdmzGZ4+DF5_6Ix(sJQ_aE)qui}%i-jLJa`&g3nSmwL)6#T@N(W0 z*d5pmZy%L{%7ZMZ@LULjb1v{kQvx=~-;nd!1_-t!FR%xeT+ucmeQ@@P)W@)JZvIJL3pTTvF|5J3{@l^eP9GAT+D=X2k z3Mt~A&*z-ekeQJtk&@9Qnlv=*k&&HbB_WE8dp@6YP6KHmp^|7yX~?&Q>-9Q;i#>d>$_y8rSvi94dQZmV&MPpbl!LebNyA5eoxo=uJ@87cKWL!L z5}(*rfR@Ji5IS9t(IY>3Ea54Ki|=NjrScl!uwyS^gmd9~D}N}qFcK~`j)qy!M6<-h zGvVfxX;5>j3dFKYVbr!rnDVh4dLABt@(CsB%RpvpJQ0+)ndpg;K#}il(8EX1L9(B*&yw2upgma9blrSKlG4mJdtGO57%-y7n9-2q1Zi4`+bpu$wj zV`jG0X{5VR30LpxV8m`GgFPeGprkp4*?CBS-qox@eFu)AWs|1kPv#WfTHBAl&9*=` zKf{nz{T9J+&^aP~%WJ`gFQU-jDf=0pJU0Y2T2XD$Yve9o#oV1RozYH?0nd)sf${vQ z@PT0nTu`tG7EDuwqf<$!`N0UD5Y@P=$4-L2(F-7P_yK?>iC}iDIdskOgDp$jz}$C> z!DPw<6q^?i<j2p*EZ$;w37bH^7QL2GFBrIo$YkF`Qf! z3SAzkKr&=H5Qu6mHrE%%HZFk!X5Ya*>DeGaEF1_ce84Kpa_gnOKZWt5$B8zreZ;%( zrv;{e+lh^Sxxn|rKLUS=Lc!yoQQY`N1Z$0P?_7C2JhT8uCr-rQPyRqJMl5hauQxWD z9fCtAdf|M%1Ne(>95z{Z1YZe0ho5Xck57ap;4OnO*mN)&7caBMLrSu^QBDGF5Ect( zJ1L@p`C(}CygTS$krJ-#mO!&-YFMwDQx4WFQHLJSX2G5s6wK#At% zKz~*Pk$S&GD8e)ab7blniF^GhlA42cBp2W*&7zsWlqk`DiNd?}tZ~vMV?05z0)>e* z^lI-M(Ykrh(3rFa-o0`N4F?=Ulk8aFzupp-BNCnqi-pK&7ktyc4lcOx{~2#2RMS5Q zU#!~!^*zHN^KplWak~o&kH|r1Ukmu;&@FK3s5zW7wh7*dGlWN1D!`CS=fF#wQSkKG zCZb&h zJ3NHSFN@*0HcdQ!#2?{mdtft-1g_fy!0YzUz*DOgT$+6k{Fl)L6dstucSk5F|MV}= zU#tQD9EVWvq#`_j6T+~K{!sY57H)NV2Ce)bK(kl3;F$^z8rsFf8;fG#rgbh*X5|XV zxI{p^7CTtpwH4lLj)wuAn_PCFDPQe%G)TrMFVe+wZfV~BBm4UM=et=z`dh8K>79>U^>qhUJ?16 zEdF&A7HKYq`QHp7KPv>2-L4 z=LYPPaT>2$z8%x&3~}gxTQTQSjhEib#meu}u=c(fd@+6x)-yK3HR@CF&>SaxIQ0OY z;2D6$R!QP5&eNFJ`IAAnx)ijpngborKLoiR`@qCc3qW9R6;aj7f(KFUVER@mco;PS z6_c%uGE>7$UcFDKrlyD1OR6#d#!HZixM(-LtZpF)A+*qIy`YF|1X+0P)3gs4o`@7aH$`MlE6R zQpiH6>|q46^#q{DFA@kxYr%NdXHdIBwEsA<6mH#S40|PO2+OC780B1P6rMC2Ki<`j zlJ*rN@zxh;?(~~z)zu|votzZDYI_5XJd8xQ`+|^b#}(wBTaC7Ulo#ze7lRvdzUUCY z4DBk(L6KuK&}#XMgr2?_({eCLNT9VKP~sY}^;3dVR4m}=k1cTjLtA)s8xO>mwE@aZ z2*|7jp!~8tbnRA#xn348!?)ALH?L?hq!xCNHbrv}og=Q1ZJ=IT0^V#>gV)}#goM&Eh>QJUSbiA%t00;yA4-5* zFXq6A;#2T1R|ijPWW$Hz$Kc(EQSj^YIOr_!fQNoa!V9Xh@cyI+puceqAvWn0ku{>k zL@Sk}=?B%Z@s1&MF(4GBJscHiY3rb;SEZSQ>)MEyN>ze+kB5nk@!13&-~gsl$H3pk z3{%&pE)+kX3(govfG;91sns)P!k@m8KvU-?NKVNH#Hy#npQCZ$d-qQ;>J$vF&Bn9<@%t}z;C<9-oo4|Q;e*jl>ksx0#GTq=;&xks&l@bOC)=@IqZqgXnVkX6*G}1P)1Bj5RhW;OEn;P*Ybs5KROzH@^*7FlqiixA#ZZHB_p^YC_k zBJ4PjBhr|a!3SIdOc(7G^j2(v!RmoftWGo|-KPm(_$a_nO@UDIp$XiOGYx+BUJ9=t z)_|++qCi`30$Ouf3RftIVY!8~@eC0&uzune+LvnT4{$?>QEG)p=r5bVY z!FKH8h4C8KOPFs?!h0)%aiyCJo*O*_H;Qk-6DU2ra7QT$rZ*v-Uq`H6ubUHnf44Kl zvj*^IpE-CHlt(-{E(431IdF$co5;7|l8Cpj1jqIJ!B8y+mKer?7GkY+XX$K|%w-|{ zmR7Vz!~nP45ywBY=HkYX`B-v99j68D#1dU6u%LA-E?E$SLj!)HSNa;bw)idbwb+65 zZ)BjN?jVNm0fdq3Nn%-m3OErp83(k-vkHd5qUsqiC^iNRw`+iB3ojB@ zr3Zl0dIUN@ofXYN#lg+A37|}Q7vuiBMbNIAz-+BwPE^)@AdFh;g>75<1vV{B;J)!J z7;ioawys|b{EI|w(d`sMNuUdg?w$aB>W@HCtt8xkasr&W>K!(#xcmbx6=@D`CJ2c$A5VZvjMiZFYx+jDCl~h0s=oO!q;!FgC%zTAT7}pQsNuntt)Xbx@a3@Q+;7%^Kp2x z(H{QSvlnX1B*8b|PC&nTNziX|G#mjw@VeV>2q&7u*S}PuTkt1f6XgmLoev?jm~YaL{CKQ+m^5Ak==`O)S_30 zr0YjTJxMCs7Gx#RRg7acHZMROp&L-etR<+brGd#Xlta(=O+XqE-y4U&m4LVj%|x$u zEHIWi44(O&0N*}wpl{_%AQn0s{y%N{s)$!_6&i{>D`erX3JaL5vlMQ*a0J?44}y2k z%!fB?Cc@1pI52PgyD-k}Jjx!wZu z8w}xu4{EUVgDlLf=>bO02f@CVm%&)iOAwGqf@wQ~fKHGpcw@a9^jzSGXDiYO;zN$h0HOf#tG6GD@=>xNTz5#pBDbUSw z9<&u{8rqg7LEWTuC~lGiGs2VMESL_1rZzyUj~Kq*dkbdXt%Yq*F2RT2a-sUHWY`t% zE85+-Kr=&Y=;k~FZs@gwH>VoH1feCI>OKneSt*d}C_yZ}CWiCIEOB@9Qk-lXiu-TH z;`8iz>|EB0ucn{I7Qzu6wM>G&Ki-S`_I<+Mk`M6f=Ph{Eue=h%M?TSD`Gl)qEZen?KX$|e>+3}&`vNs+0RG7+Lv;&7&@3zIFrprb)s*TRq^aon~-avKY+FH-+8-E>KW=3(!0Sn6v(TuSRVeeZ6fUk_g{Mj!#t-@u zaJzm2esg9ej-PQ*G^1LM59u$%_p=PJW`oEZ<$yU3%XGo6C&lpGAM)5?_!Rnb?+(%S zzEh~AU<54mx`FtGT+u9?2W^qBfcb@4P$Ty#m|jr>D&ELIaB324kGcw8rOpH9Z_a|I zP6c=GzA^Czd^t(X{4PEVL@a-I8qh>vrvX=pCW~w(D>lmUu%S}kP zZ4FWuTt|Jh$jr_$)qIz6=N@D|iPQKS(F@d)nfP7=pL8-Pz+9!6Egg{UWTE?UapW@O|yf@A;k!C=O3aND*Tyis}# za&Mgj+x~3_9(_e%WymK$ep(502m!1RS%$XYEpQCSLw@dI7%09G`mYUvQ;tT#BfpPB z+?fj3?23mMrmTjy1O4HlJS#Y58-(*GWdT$9^S~}S3s`luDh(C&%o;A2!#PR6YOv5k#zC;cES5WD|cjRdB z9Mz6Dp&&XRSsBSNvKy*F5_=Bl7^o6YU%eEr`Cdk(=co}SwX%Z1IZ;eW2?m=adO_;E zB_L#MD`5Y{fZ&+bAZwT>3L};aGN0xFNt0K=u;LOJe4Gmwq;4AegS05w`CBOBc`L}|A$VVf5qUtu|#s@{N% z@|AEz_y&Ap-gfMyZi6$5R^X~lJF)AL04$%o0XIzx#Krf+Fg3CT-~O76kJ>N7>MDyw zoN^$RE_cJrQ(f_GD=!>vdJ+3H0lIh0gS(P7 z;Kp?e;kPw&;Dj=3XkkFY9U6+zAYBLkIWz}K+02B#w;bRCw?%N^gEfr5PC-|B12{fK z5~k$MhC5?~;BQJI_^$Yq*se5*`CBv}nVvBg?Q35OH$BO-e)Q?6K<9)Gan`j>aAle~ zsNAbAn&aCD&x+3M+<6L6Bcl@h`>X+VNGrH<#2o$=YKqKVl;8_9J=h&C;&jJ@AmQKu z6`W(C|ARONaG2I}r&M zsUoGf@0d@>5H(Py!n;o@2hp{?!@!YZr5fzKC!LaGV~94H`sj7}4q9gYB{B`d)6 z`#iAsdk_BPeFNGJGH`gI9=tbS9WFYi3a5Xb1N|4+z?I=i5bcYAp_isZC&B@ST8Z}Y z4bpJ?F$%0r2lc=(ce{Al4v{F76sZ^?#~R{h?dPW2YE4TW5{4 zZ>+$AXjLo~I9!WWF#zb87h zq8PoO1EObMOT=B9gc9EApeIO|Y5jPENq(G#ItoQB)x~#Ky_a{R%iiA^LsxUOMotzD zJGr6i%fl$Q{+eC0yW;4?yd?c9v@o}S}uYlQ?wGmtts{>(k`#_0X zJ?OXf1D{GGfr5h*=&e}|R(0$Kt%k)QAlm|l_=*@eeF>QVQyY%fN!*6R5EL z1L`Nz@X{v(X#9_c1_rLMGsGXJ6!^jCm8v36ZW_E0)(bv(^Pom~5^!ys36h?R6Sm*4 zGg3RZF@+lxthz5}08bYK-)w#o-p z?$tx`o#)WtSR|3NJeQ$YVnL{cCQ>z6gMQyK2P%(JK&)pgINGQJ7m}hrr*0;+(%Aq< z*G0mwYl~o9#u-uFSOIUh)xqYk*|0G%8+z7d!v2%>P+hGOdaX`_E;F~n%kQ^B^~hz= zV!$3g`1KRqUhV=ETjzpMBc2F%UV;)Wicr?W6KJW~J7joO0`HWZiyi9Pk=pJ^=IO%s z==MW3?6X7}_ZLfI#ddRCDX|ovQD2T%UJ_+2xGu#ta%%Wpj}GRyH6wPQ5y?&!qE~Yc zqZW-qw8kj}oer3cVi_y+*rO6rNDJqeHljvj0% zC>1_s-iDZfLmvI$!=NW{u1^7vJ14?`v-f~wxf3uNZUrVk^kH0#8=R#*209MRhpW`~ zLjBg|FoX1lXNq^hOV50v-5*QnC2k0ttY*LleGwD#^c4s?yA~u3d4S3N0Exij+;vzSchfgtiTWrun7b7H75<%SOh&Wj1pzy>Wqq zv;yjyZ$TLBUQ1-CbrZ@ZuZfJiJz&4|PoN)^1vcG7#E*_95NR*c0?VsIht^4OP}C!> zE|!F0i^bt5$yFfFMGNHnZU?#BMXp@w_;=NC`J+{i?V2k$YaW|L56xemH%^+{k^?T$A2#36x_ z9U9hOj#i78FxwQSpsg?(xD8c+=2LD!^2l*u##V!LK(8rQ4U7(0@(j-GW@ZV z0agkhLB}Oe;PxlS`og<;AkEVgz?cML@*`s~uD3`qe~CM@$@?breBw8x5`O_LS)hzB z<;}#Q=nK-?J0E8qHO7U9LA^4ZLhj7T8qstt}_eFeS-_k$P9@<9FL zYhV|ZA?g|1h>V+?nWN2OLM%yvO&S(pqOCpfIUE9%7uFEQe`vINQo)ag!wyFE`pN016{{}g%buKx@Q#=d|x!gAo79S!76k1^M_^46X2 zM0#F-Lu@9R{a7ZMSlcETi#LSgF_Ved`%E3Gypo9Yo-RYC7tf>T+nYrR3yR1h z*G1U)ZxXl~9#0I2c3Qp6RWR>HBUrxp7BG8q5!7t_MNAkz0XF%ofN!Gnj!alW>{>SR zf1HHo|4l+GqEgYdb@@!!z-FPr;n`@1!enG9D_l_0ca<>*Cz;Dy{fueWDZ*sybab}0 zLnw74k7)Cng+zux=%e5jb7Z9np;A=@uH&QNnn=GgXSXbD-75|=AN7Nk)I|`bDGldc z2@z>(dd1A9cbQ=D5dG}&AQYlCZHLvRVS&nkz_2lL?CMKfE8t9koA{sO#DejVy90Za~kV_&i(0v zq&pO`Rl|_!o9u}acdMgk*{#UFwF));4n*cHo=AFoEpiuLM~-*uQQDv1$S7JGTYkKW z{!YD(&V7-?4^~&BO$7nyib4RII{N_nEc6txGZKl^k1oKv(;l3v`U_?j$wK9S_rYm7 zF{mPE0&m>e4{ezZFw7?%UR+cO!G`~$YY z9gl6`jik3A+@b@Fzs6usYa*CAZz33cltE~v84$Nrp9qJwj-vIqzo6=?Q*q3BX}r9& z9UYoF3E#Lfj*h3y$4k7|W6iigyy~eNR&9BV^xL$s&v%h-tzUE&&FryD&lHTenPb!6 zhWJ+_fzz-e)|!5rQP#%7fBHs@iO)`Es#gJ%o}NG$cmt5V%oBuKE&#u+PlFk6TS4YS z43f8*LZdGtZBtkq08tWPxPfM}M|}nEa!P2|KnaqcorCVo8bZrObG=Yw8=MfPikTUb zINXZD+KXNBN>42;F!cOwISZ?un4zacgI)G3h=pZBODcAkJgE& zG9PTe5~E+*z#jJmFdm%*?iXJMXS5nYDToFwzLPqWnlO32feHL(=n*pL;&4V)OjxcZA zOt{*>5l*!=foDrRAc*jSrHxMTy{!RU7B~}niT3EpuSI){bwcoAU?s5ie+RUsY2lzpeIsPWChSO8NqTs*3 z(W1HoXq8p~$Z!|kIXz+Et5rTAWy`>u+90r?)*m!`GvLN(09Zd-3_L~Oe}1wD6kRL; zd3Gt_=jeV=@+K1m`iB9Z3vS?cs1cFc)Q-kk=HwO7F* zJ%8}P$q~rk@B*2ZHQ-t0ToBS*M7YRkqa4Z4%-g?~$gW))U5M&oVi(Aw?OUFp4>syp z>4YNAW-YMJd4SI)i2S3Z|DsS`6|A&L1N(~qLAKxQarEvLSZ`oAR_-{8OFnPHGBZPINctDZ1v@Zxu9W9%10FPyAlcMI3Cl z1c7Qwu+V=Zd|V+8@3?BgDc>#N%f}i|hi)XBNUEBf2oGO$Bar zehoH@_Mz=wJ`mlR0sr%4K!nahu%O$U*D=y$1!f(qvRNA)b-> zxCp4N%4Ea}zM;ORcvS0BkG6edP~H*-Iqoh(J$0Db+nsG~60g9tof~Sj8xIAoDM!Hc zPzC5>5(stD4nU*K-SE&-L-_Bu4b0Cqf|(t&M4ak4I3>FKBJak3F2I*2oJJ}u!a=K-EYQ4LOwhzSu;^2caD7*cK<4m9@OObI z|E6#x6p=bTfT|{F{OTg2* zJ%G=M6l@fP0`p>5IOWp}sE}U@Y~3z``J*zxm9zoJ|7n30A)AX0w;+(V4=xRP_@Gfjw@Bc!Y_xRU|=KkKS&8h^Vp9v?rt7sxcWpRM`EU~H?as$N z)1=rBeh8mmc^%yx=|%}sDX1n`lfiSh;x+rPW9xx?Sjy};w!;TNu7)Y>xuOY&XVLIZ zoF}LUHIpI5kHe!0VSOup{AZ}=4*!pDL(ub%D(IXNA~8S7VBPgz$_46SIiO3 zmu5gk?hQP=@C_8&>Z9L%iJ&*^CA?Iq3nj|-0;4zM@WPr>DD`3%{upWvzyC{u`$e49 z&X{aL$euzNAxspWx8R8V1`~i~o+Mg6@ecIYO@t0+@tD)Pf}?#>@cYjQE07f45|9tC zJ#-cM>glm@hHKCk&*h}fs*k84{ydv7t%W62`q0vImr2{Rb4XeF7%`{|~&hO{3KoAWS6aHY-R1Xho50d7m zBG6Wki|o=y18n5<3Q7sSLwnsxFltZ*KQ>5(Hd`qi{z01kZR<&%O2{RHzVLXNdKUZa z(HOp+aUUMbSV?}0-z&QE`e5C4XEs^>D7(qy0)7!=NM7A}f()FJ3Q5;kY#1_~HPoHJ z9-Om*q^ltLc;aDJ>-u)~i`G?G(Xn2X12dgm;T?t@?i^t=24q+Te;0Dw!6dTz-4&64 z;|rYsvlxag8bUKBieb~rS@`>DSJo3xWUpnrla>>Hkd;mWWLCjrcAmi;F1(m!qomJ~ zGQBgX+&$9d%fy@Pmd73JQ;~1;w(c}C_f|ML%WzCOF0()Zd()^CY{ z_puA452Uecu8Fa7ry5ActAqUd>?0hhJB^pRSA(B^kle4ko9xtyheoRm*wOi>Y%8!;7i7qtaxSCBebV!JKeRi_MUuDfe$4l*sUnJwG*#)91dxjqRH$ zuct5JnRlhEsaX_Gxt72-Px;K+kMAUnY&85!>)hn>&6_o-&RQ zXRUTwat*z{RN-13%5Bt<`*~+OXlu8JdlJuTzxU-(y*C59V zE+$j|Z5H_jdC%omn>LZtcLl(Tj~bludrR`w97pO~1W)QM(c;c%W^?j!8`$N^iPYP- z>6G8=uk4bWET?k!4EyuyTXLUd1^Herjg0xLPTqPri3?2&=Z@5!fJYS&wLPPdY!;s6 zw&Tleds8u+<`T|^%STch?s`yF`pI42 z^{Jsr?cCq0JT7<|rvB5&qfRYn*s9$EQI@3*R+;6?nd@q@NA!v)Z?zlbeMMDnaoqr6 zCsxAdD6(YZ)NIP};TEnk`4rbaYcciF%9qkoOCgQJg}^JQiFIGPfc^C<1wRUXhINAm zNKBhk?+-?i@r%!}7E>ZQT&TjyCs|Va>=sepduS?E?FTuQn8gYAO7c=lP3((%{giL` z18TSCMDoj(QqG@Bl)R1D2W=|rOPc`|BbsH`-pv2<46gg)sut8s?-gqX3F)E7#BKU zn=c5jW!c_f3a<#E!ag0QLWA1aLY=#u-w`t|=g?lt`(y%@))dY<*~hZ{R(a~6l>v9r zBaTbhT0^#c@}X{?xkoy!J;PpPG`T#V5capv4>J3XKJ~_90{)nr&6-}zW4Glxb6(pw zkcpkK)T83LWVc5#sCt~vYL>rZdra!_n{El99HBrCKU1P)hlWV3*1%qGI>G%8J0?y9rb{AqN)s!5$cBe)WVOw#4#IBE~QPLOZk3*EcrSa zkW3QykEx_0jzU_--;a|tSjFp|S5r+a%%gP>74y)#6Gt0SI2te6(6Tj=HeS!@8J?oq4yD) z7@|#CwgR|!%nvJ!l(XA;Kq)N=q}+=`xDhc|ZqIXHYJ%l|a9f`i+x4fMoO|*AQZ8jyHphASQ|0PMsRtdZoTYv=XQ$W&MV{?cGtMAQ zN*}YsMn}1yBmSHPS4MrEIGO&vTa%mmWdh&0-I9BCM2R(-CQY5vlBZwwlu`A8@3|9$ zCVWKuGqOpjLmQ79Qg4D|IG1IHT;sk;><@!ha^ki^YR#NCEScxXS=jHRDh^e%{dN=h zUwgACeT@b56x|Z;!K6kG{P&Z5<-39m@tsd?MeS^}@I3b-*oXS@GlO!h$z_+i-z4Sy zY{|%i5^j-P9Cy>tpE4f`rwq@8lk~Cql&Ld(Bb1p&l4X32FiSm~{Mn&2NbILjr+}HLj>h!)O zN~J5En^SMhWjgy)+J)KN(hmvThc;7sp6n16^7tON?2iJ!YvegQXZ8p+s{v72$qc6? z$^|vJE=6C-_2K$Dm+}D=Pc`o{q{}Zn;>1pz;y#sHQ_)x3$)o8$RIs}uTXy3iR|H;i zY1>azrUlKE1@)Xg**=wucrcw>IqMzgV}FgC^+)u64SM6Q>&rNclx|A*Tpp#G7sK6g z8NgSD{;@BruX1N{)i}kCEHydAfL?ZJfV@zq$U9L5oVCkyGV+@tX=t*8ntXXbD&rFQTZ9w)VqLQmHLi5b@DcsIPii>Uz|pDTRvkqDL-VtEpgx^+I6@H zoJeVfgpqGU^0>!O%c=E?J?PETZ*d1UZQ=Q8{p94m+tC_5JMLoWFTEl$X80c>17qpw8nyk+=^BMJ`&#IJO_O^XH^6C^LAbOXZTktCqD#=Z%E?$ zCQarS{H*1ssPCi-lhkRul~VM)`cy8Ev*J_NYLeOQ?d+Rzl2bjdO-J>r(I;Sa*Dcjl-XlFI(B>q#~qd7-p?|in>IXT!&-y+ho_xjxC~7n z?6IIibN+FR|4OoQr!~DndWhYVuE;Ab-hw~x4IumMSgaXxoBSSFOF1XV(4D%A_<+gB z_``#%|F3Nj0GRPHiTylUXy=I@@VBx>XfS2Z2sdXmOP7jD!S2$Q#mEYd+gET z_piA~z5LrwCridr-KkZ4Wx69@;QW$Wm34v2`ea5GXmwMH4pH31l!@@^vIo=$trh&5 zC4PK{uOIDR2Jh{7jhx(BOqDnr z)0zbXP(3fX?7-``rsx&$#Ky%Z=2M?R$ghUAHCa z&dC%vPweRlaa#OmvW6_xEc-Bvax7Tr@%ePbdBX`(GYovLVhbA;S zI+eaN#gbR6QRLfhc~ed!lC($iEjIF}G{3p@0cH3+4Z3=L7wLtz(X%fnQHDm(_{o_m zd`iqCT8zC)+gYyWd&uK_uhmoPmiuQ>TT;PIwVOe`AH7DYtoRF;^q%A9Jn1A$8-7vc zHa*;xOS-(+<3=h`LWNqAn?WUQ4xz5Z=X2(6JQu5dimUX@q1>+Rp<~F&oLcc-eg^Y^ ztyTO)HD+~_rIm)a9jN6_N#El>G?~!`jT}2(#yc^|o=(Z{@{Bk~DRa}K{ zoZLY>FV?1?O?t%*bUo!VtlLRDXGK1*@C~Q1S%!ApUPeDqdPlu}8^Pya+Ry)Y$c#!^ z@PcykDCg$RU~%JyF{*CWeQJ+R0k$`~$nokgxGGyIst4fjgYvq22WPS(th( z&hsi4*KnnOYiaF3DK2YcJfD&Oh1y}OP50dB<5K%ZKu>uGmA*}a@9eGN@V-Y>;Y1;o zTla(%9?&C)P#cSfckoa4erK&p(&)kNEUGhGfiq`= zdL;c6r6JqE<<&)UuMAI9Wlv4Hyhqdc5PLNWzR;kXR=mYas_%0Rn^M-e5bDhfiGv!9Fqqs0|j4U>;M9q1aepIR~_op-*d$Ln=Xr{B-wD3kT?DM=Yc z?#P?XyzBYfWLLvc`dw@px8?RCzJXGv*L)tKv`)O_K8R#`17a4W>x4FL_sw8x+K4=T zaSP-UIyLx3p-v?Fszx{L-9Xvr$nmZdIWB2hEA{+P3#B@!!r8^IU`-STNz>B;e)iK) zZvLiC^p)>Fs9;Sg_JV2;ms3IWtM^=CH(e6FxAhA2^B^I6#d9V9;FT{o*=`O!cCnWH zrBTSHKc6ImB!%4TfP;MUS5c|6B#54IG=@9S6VH1`I@4$GWz&vRRr$gjLA-;MC*?g> zMmcn@81rbz;l|E;7UyuL5MBpzO zomBPY>(sJAU+&-Y&s_HPZ=6>Ef#+`j6NgGUdptZf5xC76(@_)Bz zaYN`db$7Q4mGq*E(pu6-@yM3d*Eqwst~?GKPVT0!#eC!ZTAldF>ESeUw~l_9zk@HT z?BKsuKIh_p-lWT%oanlSFs>pyi`_~d;&m2gain({)uywM>|g9cPy7Cfy7sw>S4)-R zzrM+$*-A5dRq0}$t60ldj8<}I{##Ffv(KfU4j<=6?x*up?N^Wyu>th*A|KwR{~Ooh zy^Zb}OQO5*5V_>*RbJ*&Bu}qBL4WPMO0UVC%11~f@Kg4bQxmMeQ_G@XVAJWFPnP3_4EFj2Iy`CT--y!xE`^U-wXZFDUY5uXmCtZyL=foAJU&YP>*M zKrbpxrk7f;p=`De@n?Ro<4wkRx;9gT)@-%oi@oG%|HA#0Dy7KhT#}^DWGx~W7EI>L zf*QGuOeK2#qfpweb_Z49HjQr<+HgC6*0HM`M>xGO8T+p;SzImj8Yuc1SOC}Z4vsRzt18)uU+ooKgTiI8%ZjylCbL18uqael^ z?z~CYzhfHPSNuA46vz4rLpKaa*>8BqWMR5uqgJJ<3?EPqF)(WuRooRI_gF(ztiXHluTbp6P^T)eT>GWRM zd#jdfNj}WKy^+RkTQwP~mCRVl$XE!8-zf>|a->p|0W7W3^L&LS_5Wolm{`0N|? zr0O=ER4Wac!+b`=7C2e63mPF9y$@eH$WiB+nU(ZI%M{(L!0o)4JCvbM-N4)jt zGwPO_a{c3;;}>ads1Y1yEm>>e&BZdBcV{3ST;RZllzoSmSQSpm(2{bBjp$^`EH>TB zhP{4ZPv<8X(tn$*nXThJQSO`Lc(1L3!DcPkvhyQF`n6+JcqY?qlf$@!ztQXD9#$Qf z!;Woz$qyb-K(8G#DdndIb=D*^nGsS#Qc=Nc(=Ex<%oDe6J5Kjv9H1ztnVAHgfY)cD z=(#u^6E``st5sEe<;%S^AtRn^uuNg6tKH#2Mh{}X5q@|V4FhlJ5-lSAm*(E*G~di`TJX?97~ezTjjWtzFw>UoIWNqKAIPIWzNV<;Ozy z^o^8I{cQo^s6k@;m0?h)+s3Y*nS>`2d};YaTc+Nx3R>L@Vf&V1)&T{~)9Nd%{3$E8 z%+{naO)^5mgYwM(L?roE6megD7l|UBk6=-PG3G8fW$)l%%R=N*;FHd7qJqWn!*B~r z@rmZUqyJ)Ll_rgz)re(3&DeZjj@fM(2xH|^c}}>8_F0!>NbN+{8Zl1rWXK(Eg7Xk| z&1*H5oYBKdMJucf`-nqlrGcZ^iS>=l#x(bpH0F;Q{M>z%r3ULTh3abRdlXN{wiUD4 zuiaQmqZyf5%Hlp5BP3Rha<@0b#rcO|TCfFrHVU!LDZ^!ZeYl?T z%Us~pKk!>Yz-B%?ifTu)Xv@1b@GeD~t<<)my``t{a%3WFxV4N;{fMZvLO_m-Gf8F6 z5&XlSXYcFQc!F<6-{WR3{>oA^#M`#Z$s)v!No>=y=FV`x4pN`f!SPpG%99 z_i&2d&$w@4dAxYLAxn8Wk1>U8?5a6LdMAGZ+c1qi-WN~yqgyb3+5t8qDUFXy9{^|9 z&SjA+ms6X}ak6OI59KvJZ26pem^xXRSv^vto@Hl9SxJXGtdh@~^Eeo)dJ4z+rNiI2 zt9WF8IPD#`g5Niq*ep9Q_CYp-2FE+mt@cug)PK%t2E@br7B^5~88EI_8=}LUXjRB! z$~V3WgG!FGXMeNcPNNTPP+f!rR>ld+)2GuU>0vC^{x(Z@(&h`K)9AW&CjH!`!`xrR zvxeG_XtL=R#=Ad+Z8p33yL-AYXLBEPCtI+8{TdXWCs_+Ws!R^qOJHNz zEtLKJ4GXfp;N_DX7=2NL^&U)x<1uz*(e)L3<(=SuW;3`AJ`KTs+OT+zBYm4;OxhJ? zEVE@J`+o5?MULA^Js%U9iH;MiuOG?MV#s&9lb{r zlrLS!hZddy&Fnv*ye0)>K8~QDHOFwI;5Mx4HD$8JI(Wfb#4qoAi}3~iwD+G9iyP6* zRt>QeoWGSs0ahG-e;tSWM=rp5PxRif~x9Njut1-jP7Y*OPJey+nl>bR2*Hme82nB_xA!+bUDHF^r4 zhx@|2!}|Q^ecSMIO&*u6SA|24x#7{|Xx8g-nOlA*fiJ@z+P;?icQYCDeuQl0E~raryap8LOPr2lFXdfjKc2Q=P5_24%_WynErwUj4~*Pe{)h;#(+}PRZFL{(fhF~ zA`N_^l{uw;V}A48hww$O88R!ELH2zOyqdg}23ARtzReE^`BDVakC}tdvWXBXyHxb| z@L=w$N*li2Z$|;22eO;rSFkdh647gRB(ka^4{`0Qs3W!}N~p~e*TYdgKk0Y3eg7}kBBE;!Jy#a`wsvHGK1 zut0K;akm$WjqVc{zC znN*BE=6+jBRrl;DRDUj4den>c$;z?EPZVjW@d?a*(*uPI#tQBk>>$7V3^?|yh^0J? zMr}=Jx}p^XCVHx%<~V_hMo;8AioKXkyc(o?^r7I(NB+_Er?8}Nfxz9$nL7V0hQEPP zZ2rp)IQf?c)vu_4OW&lxVuS+eOZh>Ek~~wZcwSTCqRL&)sDN5WA2Nz|F*GQq z;(~`u$!6_tT)%!T%eyn2dHp+t1qa^?_Ke;N=&4Q5zt-a2H|p$P$5eQEsR#e-pTdC$)`-9RWR@Ztz*|kvJawV$3O~JDKV`^q6~6K2w^# zgnxZliI$zaDr$M4i;sJcVzFg4xXgP6b7oiK(nrI|ZhAbNG-GV{%^Knk$EQvPS_N4_AKIoHHmM>>n zHIA+Nqsg+qY2j0xOGo+&QO~XwP8nCg)7mD%yrZpnLCzCx6Qo#6(0G{pZZiJyn8oSc zxP(PVa&U*!X*je#05n%sinc!8$K`3P#s{y4kZnvGAF=i`6z;eI^COe7d2b2EZd=B! z9pb=EDGUPcgf`4~YDB-hTiEn@9T#!dAAbGlgu^Z8pnk!2oM6*}BRk&lG`k2EOSJ9@ z!)M}U?ISqhvk7No);-z#o*!o;l?cOic0y%>p{RbRFaB^-L)Wj$u=z_N_bX!s#(Z{0 z+XI=<;w{bo)65ji*|r!51xiK?{kB4HH^&+?n;vNE5pmvwJaDwjT)uCp6SvLNPb8bK zhg?82cRP1DtGcBKt6lZz$ERw%rK<@Y@ABX;+rw-20g9IS!l3HC+YquSI;G=0r5KyTEPNt>P;F z^hpFgiOw=sh{EC*T>j}~xZ^Sz4E4Wo-z%n~>(*~r7j&EF>o>pX_YakE9CvsY@ z?Huy7_{P@nd{(eRflBE`Zs>4P6uD*n2YJB6=;dNBCQHkpo6QQVUkh?N^x~4 zPiCatJb=oEeZco`yy?Q5)s&y~5U+?&U{ zZ^0XO{#bJQ3MTGqN0DMO4!G)yF0Dh*dD21Amz9bzaeQq}4f^05sWaU2Y;PE6@|4@K zXA;~}35Gw{tYCg{5*)N1#6sqIGJnwuHZs?km1J&XGuR0hb^Z`Lb~J{KIh(rAYxTLhnZ^WOCPqtga_h*Y@Ld#T}``pp9fdwv(;JHL|OHL0e{i zr^Bo7lfU#cQlBg%-k8-#ryM@gnYQ1wXw739kSZecYnSNtBQcdQfb)5BpQA+ zpAH?~Pcwd*(yFD>^mxQ=%=IhAN!CSpPqP~<>mQ+NnHLs@oD!T*xyuC?9pHuT^8BPP ze|ftRX4E;+8eMB{uvgb@h5W1O!UG?ag;x^O;A2B0F7yaxts))aopMLv?JifL+7>4K zZfz)Rf1xJ4Fk4z!_exIKv3I2KT$Gh?>eDg8rKbl9J=k+L@92MQ)AtjMo<*>MwhLLT zQZaKL*~b1UdNMQ3ILx)Pq1E|wsrr){#dyh(yZj7NHjJlb*`+l9!DI5iBqfgd+E0z` zpQ$DE3q^1EMk;-Z;-$el;=B{u;zgQ+#VKM{ae{b|*mi@0xYBH(cu26kIIX9P&gDF# zeIq{6$OpC5o^*sN6VmCfODs9xi=u_yi>cs?HfgtB!5C(ds7vJUy6USvUyTMi+Cj=$P5+w zmzW9%?-(Z>7BNtGf9(T_g>aksI9y}vYC2ndXG9rHk{EXyEAs zhBks*|!Kr=xpYq zZXe=LXlGzV?|l4~5y+2f(!-m#Uf}K)Ckm)6#L^RM+1Yh+!YECBVgJ2K*3namb`zG9 z$%{Cc+4qYbu(TB({AnRCjDU3PP}G!n;)^Z2UpmOixb#Ww+8NrWgqt@a1b=FKgg}^ zcSfa)<7sqJ3{_~Jp^pVTMb|Y@+v|t)jb2KQl>f+mjJh~nVWc=XKu`R)Pfh$dM^PNw zFD)MXNKU+EmzMbZa6R$3f+q42cEyib?PJ%SQNbW6uMX$t7+oMI2RS4Vqus#R3y8M_(;qkar&tt;;l}D#e3hWii;bx#F8A9c!oe-?6gr^ zJSB9v7&ohk+f8M}ceuY4ozX?#pVZLHn`h`~Y6W@jI!%GfM`?b0GC7S}N0#+kbT%Ls zd$(@Fukm-$dCdSip)W%cU>obV_43PK4uVYsesF%0vN*BG6qW27@U^onb&Y(4Cr_$U zc18qMjLf6bI#)V3x`>xoG-oc={;cP)9t)|M2I`s$U@-a^^eEQA?kXXxa5~QptWXjf zuC)}Z)|d*PgeeJUf0h!4s1FyW=T8>4R5M|tz)@J)vrstd$vmM^rHOFG_@P3xdwRk} zZexYDji$oZDk{E3 zYd(LV{MZiq>HL6BS9X$1*h>o2ml0po*An~s8i)hm>xnlTtBLP(isE_G)WyYZI^ysz z!^Qs=X^0I^%ZRIgf1t8OcW7q;(M)kYeUdMyOs6QC=CqTZ@7+Nb(;R8Q79;x5C{5F6 z9LBB-@mT(NI)=91;-SkJ4{VI*=YB~P+&g=RS3Y%&zp>AZH+Z6rvwkI`x&0Z8%PmDQ zlc$9xb0}1O0~ru>e0fE zY;&QhgMqN4Lqj-trJV3gNHa56Ph)FVPGH`KAD~k79Bdsn!uqTbzT3tUN45{8Z)*zZ z#i0MF{b>U&o^_kHy!b$UM}AY1jjZ^gm#UbbKUiE}q9*lQdQisa+rAU zGj(xGu#C7M_%qddy`tltO*Ag$1Pz{koO+B!bY4(ImG;}H=Cdtb=@>y3ouetWP(pHz z97<_6M=-EN6+bJ?N7Y3c82-HsQw`HlV{JDdWGlsM*ZZN#l0;lpy9Ryr4xz`ZqnJ7F zFm6@1c(>YL?*Us;Mtbqp;@}avaojv*Z zjOERe7mi#ZFU-08i>0@JWtR)%Dct;P zl<>cLb79tf3*nSpeWBR_MIol$WZ9W%?Ck69tnVksG8R}e)ox>^rYM4aP)Q>i_xjGipy>F7MWS+Q1+s9O8FE_P0Fq`yV!=(!c^(d z*%}<^vu2dT&D+J<(%PWqsf({|4Z2D143` z#P?)-;<}3|=sCX!U-Yh~sHP)iYj=Pi&DcXnG7?E#c#IZHS5RHrb@~w4McUf!)Ean& zVs>AqFE?+}wCr0nDWHaydz8_?LY_3mNatHlP~EgN3JD0NJ_ldwy}5&KYzwBK>04-x zix>ICSksyRbf~}U3#MMafZsM>!V#iNSU=?s{-ZzG7cNGN`|Hr_a4I%N9Y?zxp_uby zH6ENCg>U16@D1;e3bP_Ganw=toRWbD$sD(D8je~UeDH}=4d1eEAox|s!OppD@NUpE zSZR3@=D(~4!#O{|=8`&_eR4K4?~P&#ZMn>%y@m~vt7hXRrSM?z?gJHU)7f06@*t7r{@Km$@7TnSopNC^ z;YMtTfed?BT@5n23*n-yAs6x?lOOn_m9O99h1E;*@!+g-9QoxZMh06^aP=Zel+-p+ zhn>mO(wYX{pGwYk)9FCFH+k$xqYttrv~Jn~l2zPKQpTy&J@)_&P>P{AE{SSRW>OHQ z(KG)9ax+MxEi02LEH{RFr^QhAoN%)Jw}=9lGFsy3NYA^cQq7@pq!6J_J-?pd1jQEI z_vR(e72HRg^@W&nKN(k4Bw|&gCQgxC$>&cUh2vH&!qo71e$^vGuIsT67i*iuT|0bJ zFfeuoCp$_N-pqD@Q<>Y~>f)O)W~?&ny{W>kE&mJp(pSM-a1t)+eSpD-Ca}>Jv)DD0 zRgC(g*}j}GcIf_E7ND_*O>d23>b3`%s5XlUJ{)BR6OXVW(nDZY1cOH8lmwXYh_rv!4&>*e4`qXO)*xWIK7{}v1$cS`iwDn``kQOqr! zV*%stFM-^=gW#%E2hSz;LQZB6{F`479wk@7WScx&Kgg1ueY=c(H4kT#v%{GrCc>Vr zNoBK34zkJxiEL4QIy3gmVx|kzn6zm++Y+0{bhj0;+LP(by)TMAJGGP5?Q&;N_As{M zxfA>Er9>Ca(PG=i|AENUZNSva;6c_pxML&cc5Znnx?cZ;kM}Uc)cq#7s!bU$2TZ`; z#R+KqrVRJxHeq3#JXxgZlBeEq(y=zBqVDnJ$js^PCXUorFna4SktR4f()o*Psb%|q zN~nyZ_nwh7VNp1RObVkBvAf7?O9Xw352xGSn@R5YI*QsFNDsF~Qp2kCB!AwPMremTu)$E)?~8F-;S`9?*(k6eG!vu%3|MQvsw156Kqg+F&p|e zo7I*Su)*P_EZw4%Ila$jO7r)zmxf!}-@D$-D{>NZiyFt$u1;kNbDY?&(sAruvJ$J= z)eT8qC!r}a47SMF!)nzOPAZ)jsGT`oRQVm|Z_!BR; zOdy4uKy0ohS?=dZ^kpHbJ9$#q*VT0HWh4zA6-^Ty_tBG6hbTd{>$1+zBfVnBr_dfNWwr*4@j>aEV=zUCEjF7r6f%r}8sInV(@ zE|o)RN>k+nFt%!a7aF&_OKFO91$YfeXDg3-KTG)JPD{h+Uhe^|CqUx^$xOinXW`C>1`8xOTN}Lp}m6IpCHN$BBa*oFD zTSRjeSJK&QYv}2>&2%nl7a4c$rSEU|ky&{PJ)d=y*q40T;Z;CIOG_v;u!IgPW>HY` z0s8O$A$qw)avyGHQq~_y{lle@<(Oy+)(fEdu?uK#k1grTnGp33qbf65(k`w?FOzI+ z)r>(WO+7Tb`a^WTRfP-mc+Pz;9}gp6I)Ki@4WN^K2A0M@gD~Ab2{Hc?sQRc#+| zw?{MaBWpH$oF&t}?7%!F_{R^AiLAEWj`^p|X8xgGEa%*6=A!1$Je~(LjmB`M>A8dT zw+FNB%@M5ib2u~A2w_r-HnYqZer$yOdbaxD9yY-viglda!7g6)VdnL7naAGAEJo9i znU<)toeCQ4d65R&bnzEFSX~FcN$D_aQZN)|GpNft%GKES3X+02u4#PVMS6aj&%I-T z_qTJXs5uA!Mu*|krHSaHlZvhKl{k8C3wrx>VBzhr_$#L$SKoh!Ci}nR!$pHhrPqWE zUrZq>&Wj$)ETPS|Gw9CDnN&N-jU0z8rcV9kbZhNeV%kw)YgaxPb*MK9DTWZ@vvZ}^9N{a36QatF_(@56J)e(w5P&a-z)E8w*5)24qp5B1g&_=MXY=sq3uOa{K zV~`g1K)u&n_*B~pyBqI7-@iV1;53qbzHZLaDs|Y=Fj-cAMuOsY%fCCZc)}}u z(a?|1{u;!GSyFK2T=G5cM!O?sljCF;a;aZI>$H)N~gI(6^S&D7uYR#<+aCDUh zPQ9gy?hY?SBj*l=X}R;@uku1TKO+#rwkC zuzu!Sxc%`zxH|j>w6M1j`1YB^LbwOV54?aY&hMdk?NjhGdjlnB==hOK8yDmSz7OBb_MOQy2Q=QG*I_jB3S zj45o%&}q#7i5D};b72|pb=lke*U)d#2-AKZf@--?I6E)~j^Eq_Rh2I}F|Wuk+SuV<#*)tx4QG%lNv&5Wfs_$>FPTb; ze}y!8;W%n>98LSz+SB!!3#fkGLNb^;pG2EHNqVRsnfd$E{_qv_pv9Z6P4T7^2R$k9 z+iHsZxs`-@zO-zh1kZ4^r(1~To{=<6pI^3|k04FuL z;P=y?rAn&&z_pQP;sF%oX8+EA1-Dym(0%_be&cZ%SvC=7kFVfT&nj@|@-K-_XKWE2k@_jv z-aNkM8C3Hh8)Z<-#R8v;S7G_`eHeTr5iNE`qoy?D%%j_X7nA;K zFS`3|0r{&>Bje$A^yThk+MPIs0*Ip)KQo%R!hp;qR^Ej^MJldq!NZM}sOrdLSWFGR z_>+eT^0BBaXNS`a&G^4z@|@P&L4rEn!FHEe8aK*U8K(P7!=k5J@Uv_e3@prrg^Gpn zZQdC$lHknI`L*^i#k&_IZB)PkJDpit0YK< zw<&Ws)ni^yG}x+>@~p+=33PW}0>|ssp!@JF4BvVjJ}Q(zZt3b1^ z4j?mAMVfL$i{6egr7_`lG`icKt_(Ay#dk*0eibWPHi1!F2BU$0>`1tE5={&?r>r?6 zC@N7)lHJ##fqzx$K>l}B^X0p_@cqS(88J% zG--1;-~1A+xv2vczcZotW-k01ngAU|IZ(XxJlOv(17o!^$lY=mrfz)?2X%hH#%2|k zD$r+LNrp_O*@(%lH(~*cM=&c@E%r5TFk9I)gx#&xV57^2vg@1yd(~pXb|x7xA0rj^ z%K9~gS>FPu=~eI`@gxjATmy}1)llu125a7|gRHyua8sj?)9+8>jz(xWjeMhm}A;YQ+(1j3m+#g z!PcNTNMBs>dQ=c9O0JhnOcM6q*^i?Pci^=f>o8atg*MwuaNU;*EVy?9XD1#*m5M@S zYtEtQog|qkMR-S|zmBOTEE*-oC959d`j0R0)6~0o&_aZ{x5`mLj_`_J11h{LMOJ(W zvqtA&>g{8wrxAmxJ)3a2!+Jb(aT11=wDaGKZu9c1e)Bb}yZPV09`pTGZG3`)17F{) zD^Oo75Pct*z?WG>@C{l?BL6?eoKaK(XSphsQ+yZ2<+zn{(YLQ~XQMm0D-sLZ(a#uE zR+_>fnQ6czNco}2T39^l6dcLj4O?@AMQ<^l>rw(m z<~aA$XCHS+w^5Mkk!aT$XeF}9kQIGYw-?;?LHp)i$N6V>&2W|SSp0P18{aE>#;+Nw ziA8x9c>CEbT)k~2O8ITVC8Oi9ePb>LuRM&)vUcK*Z9#Z^SvHC{oy12qC-8pt8B9_= zht0Fkq5I3@=gm(iD8V|LyHhdt-(h@feF%-$rsJZES(s9ujm}HsaOFWi zJo|hWj@fR3^>QjGm~x+2oLkQ8sn_t+)ob~8;(5Gnx~s@S%h~=%i8}W^D4r9m#c-nY z;hg*ASDg1{NqSY}05e)0!7y(&_^%Se@HHHW_bvb{1y8WP?FA4Q1Pvq7A@+ST0Otc& z)Lr1STnIS0#e$J|ADHV0!lZHAq4iY+9KRb6Ul*l=TX70p_!|r>6Z~OZWe8j}i2!Wb z20G#Z2oglVd5b8BtPO(7+%R~(a5tQTwNUV z!yE|!q;JcOyFSa_SMQEU)?koGr9vq9v%>SjfR1v})X6%0Q|=l5zx&GgBH=qf{PIP9 zqInZP!Lg6eny-m^Ifi)e{wU1QGr%>vR)WbpN`W<6{3fI6iUAd#*57v7}}VHK9SM*zHkrvnTH`IF2KVJ)bZsi zO)RsRj;ps%#ku`jsBu#r?Ryn*xnC)NWOa+Ed&6i^Fl!N=j+f)(Gy6r6e|eFYPni7_ z{rTL`gK1o2qX&2R_XO_Hll@#|bQ)(7>%oPT?BN>Rvp9{004}$DG?%<-8s`__#EokD zDp=t3L2xZ_88>M{2p4;T<91ud2()(|5{=oe#9N#j&nsQq%};i5;@AKAU?0-+MPSmi zKx9!E%`X(h@P|&Uu~Ll?ei`fAKKfH4fsog+b^3-C*e(2U7};!d7h&2s5riW7aw7Iu{RHS8jqH`vhq9 zDTD`Oi{Yp2Vc0DBo^>l2=CwM5Fv17~mvrIS^O5jecQmN4wSf~eCcv3wDbQMSoZEA* zj2mv*!Z{b*<)lWu;0$lP=B~~<%=LUnf$=geqm^hl?q6QZgX{|a`WpqlC_+{6wDqgtYo{r0d+mvaEqCA2^v4 z8h6?~*45^f_NeiW>)(rVg~xc_XfS#R>`?5y8pn>>ho3Je;6dRb+zF-lVeJ*nczGX3 zz3)M7MTs|+*M^ZsH?d(yJAMg#i_aysLE`uCIQ8luTz3C2T26Y0dz0>?b=NhVvOt81 zwPm>H6`{Y*4HWrZ!l&NF=y^N?M|_IMD`D$#nlqs9WNVZuABSlb!?0$DGMZc*fTsq2 z=4U^<$h-F(nQ)gZ7}JR)DLPwAHkzJFF|(sQ;6Qt2(2GPP@sGXe3yzKXH_LkXsiN@yQje;Jq=dHNdChj z06O~}!KBz2?w(KpxPFaGD8I@D#I$e;^J2Jdo3Gm+)|kSN%jEgB5BmA1qZM&(r#gRe*U}@dL)G4x;#9*L^LpsHFt>{Wu>=f+F!MKrG3v=bjKcfh*0rdU;Nja{;a_|Z}cBj-tB z;nDm28ShX&an*Ve_ksz!Zfy}{E{qdwDA3}3= zJmXf{6mU^NCEOIvTbxtYIqp9Tf6nOlJWl>d6qgvC!wq;5&23z#$Bq2wBMA4bxzKed zLXc{Px&pZjmEC0FV)M-XAvXg{txNHFC01$*L6Fj=W@Nvnz#`87S4UnQO^8EJ|_|!=d%1=xmU^|obSPQ zF7Cr4&djlai>WJhzMj>%l7XDV0nHkiAona*_$ z|H^3`7zT+hgP~nj7B=pbfnvQ6+|`LUxs+$fxdck0x8vG4E1PESaZe3*>hEDL#3Yt8 znwi8&#@}()D*Bv@%yq#r%Q=ES6O{$eKS>FS`;|p<&8GZzi%8zaJCk<{KEQuo;Kg@$ zyYggxhQC<%p4Zg4&aX+!;>kIlACz~PmwR)OA0t)GJG!3XKknmsI(Ub#o%V|VyYC4< zxuBL;8!hI$_^W)j%747&xm)~7(-yvUq!iLQU3~g}EV|E|f`@abVbxGuRQha(3$6>X zH+CjQ{B)Mow~O$DX8xJa9r^u^f0%^v(+Oof4CybTHNQ43<}^2 z;P*o>^Sg==*|I=_5I8NZ{(8WpcakUgWw~%fP5PTF?=x4$byP z;9oNl{>~T=eZ_`gzFr^1_SzDpR~No#3E@P`CeV%93&yio!V#~vaC1gH6uW1DVZb3c zp0*G4x9o-j)nj1!y%N?fFNO_iS#Y=h2n5|a21>F;pki1JTFwnHqp2ADrbmF2L58H> zO#^{pjHGKy9;mb&k_=GxhSwKOA@9d0PCToTj_lrr-6853?u~J<1RcB-v2{2 zLYK!U;98_ugS9gIy0Q;eUb+W{kIzBo;0m}K zD#HeMaLil5gH4K<&u-3`%=$k`&TMBj*`VhdOkP@x)jri_w_c56rqfMX$u)D<XB zEiq%Bz7tsdTw6AJv?S*qKZN~R{u{1p)kEF>BoJTm1aE(5(4VmnUfQ05`hZg~sNxVj znKlZ>m8FVgCsgsCSZ<4<2Vgvzl~h))a~N zk~569-_|GBJJzIQv6#9x@1ScsA(Y2$C25a!v{%oK+KnWdQ@4<2Z+9ZonNBn|*pA}& zn9}CaBWcuDJu;hVK#!6}(TAM|)Ol5pe5MSbobC!-EuM&%9@O)D0uS)ltQ|yU+jRsL zAu0R=?|~TMyO;N|Pvy1>N5B(5E7){31d_fLgUZ7c&@S=u=;+AWD8f9r;O zx9`EebFW~As|s89+nCKwGi5g(=&{?=n+;CTK?`} z3OeiAzI$ugo}oT$yl4qiDfVOcHtc2gr}ndEw|I6$JB-QbE?|Wm zU`s96vUx@m*|&mgkUL`-7>Av%QIb~|UD9l-ky|5$zh*|G!|Q5X*{w>qM2pFOVi;*} zilYpxRBC#gMa#T1sjEMSTo0b3iz?S?=je8c_IaDgs*y&Ro}zuCY)Ta8&`#HET4$F< zPyeKo!^|`~4$;(Uvx(G(uAxlr02-dFLA$0u<*HqKV8Z$(5O(eiwzbGpTW28dl#d1F zRxNh2VHgW39m&G0P1*9#qu59ZpHzFpo>`_ku%lmH*wjmF+2P4s*}IJ0tlefa>zOs4 zr8#wiaQ;O&S*psqTj#UGKC75ow}6d*G=SZf?|>Y8F1i&evr4dki4V_yec@d< znc~buA)YIkj2=(jFxqkv;*Uv~cYG{PnK%!>OG%iI?0>k^*MQXb=n-v`rUxqjVR8uK zPq$t)nLUJdOY|aVIa@llbTY+Xmwf&mO7UyHp`b;I)>x>~&Q(Jx)m)$U8Vn`Xeah6k zMV0P2_TnL%^B8A&41brpV)98vY<6eZyUh*1YmddVrILMi-%-)UPqR7eAqCu^t8cj3 zn?pG>l~``tn%Pi4GzQ!ib3t1DKd_tE4HcGeVdqjFT(3pI#@q0P5Y5lgr<;v0Vc5{tubB=Y&T?%eo7 zZJ6r66ZX0kKxmQ(c74o;q`X4-(A5KzgLIkvt4Yjry`@A`){rpQ55UE<31VJqu*oZ? zG36W&ChV|hH~NM$^<#sXXSWSIb4%hG$1Y+uK`u;w`Fv(IcP;B0x`UN|_hqr)+*nA5 zKXbOuVZKX5%+L5T)3dn19)@JFe}R7N>Zf_^*qwFEM=OU_2a4H(kUEy1RLWW9Im8@*Zh zn%*AopueLZQf9*gGS+IO%k4+#O>8)A4)-M`6?ZD0<4lh>jiC$iKX9hpRlIR40$tvl z^HxJu;g5L?XlUF6`BCk#!@C%~c3gtO?=Ruw870;}dIZ~UK9LQ9MQqZq)htcMm5msp z&3451{f*<8+&xWZUKq=MUaMoWUac(e&~?@Y*V+0b zE$sY?JFI-;C1x}HB%6FHll@m!#*Y1a#)ixNU|O5InP0{U<|OUQrXP|-_ZMU`uPMiv z@9AXrbMZXp8vh=~)YgG#OdVL9p9u43=}TtSN>fzWXu3N|i|Recl2!Xg%G*;wTQ=0v z62UF{=zoWFs_xU2luvYNprTl(ZJ^lPPEmZ~pOpCh^xw4gfV8+LTtU2Uqk=dfOIlo1 z-Al8oyXf`#yL7p`fvR_&r4aWFT0S?FYzM3&(Zi7qaE27&Z z4gO7lG)xQH2lHaR$6rFcG)!!e-k?e*vt&FUaqQ&Q&w~+c4 zDGj5vR4VNdO2bwmN?9SY36XivIkyrmQiP;|6p98?Y3TR){ma9BJluOfpL5>l^L#yD z@Zg~$w4RZKs|)M!-e^A@8#^1HOBRFtflw$U6h02-fta5j{Ie6M0pFtN=>x6!F~oxO zw`vG*CGhr66fFD2t1XK6!#US&u5IP-J7mj$36+RC8!Sv2_vZSsTOz}buyL7FHncq!d zgP~FE+r$fO#?v5H{M&+wuBfN&N1DhFyG&I0S)AvWM&l*ZY%w{09~%UI!JBLU;eT7S z;n0m$(3HIkyvKP!ZPOuW*|Z+&tG7c}WC*01hQP=Y57525lV9i8fVZSGbc{dG>o6|E z3Kk0gtYToHb0}n=afVlfSAezq!m6fFfMX2mQktP^;4W;fErn*?ICwet2xx|%hYe{( zuxnN=RHU|n#YiJuex45|sh6Q|_Zi60-2msm0ep40gV!e8Ay3l^+cdXu%d_trW5m#{r7H&>DTu&k<@M`hd!}Y z!{1nnx2RD6y_9fcnY1wCqonZE-GA(j?`JmT^@G@qpt*G&$b~w?Z27&=a>pB9{jh~Fe*Jkj?-Q2zFat`qm!#}TmZX@>W+BH{DR^`JYj1ojx{!&1G)5H4>7TaNs}v#)-`UtiYa-%inJ z%gzM4Z}c2izvv88dlklf-F;dA-XJzh)}O^b<8v?DE0`GRWIk_332!}}C_FAQMR=!Q zQb^XfG1qp&+WIP)(U@QC(ZNZ=l+vlflP_e1`xV>SW4APxUFpYeC7)&J5-%I0WW@qz}^QpVfN{d@JCtFbkh|{)A;TtkhydTELQkJa$ge6`P>5+ zDy2=^zW)S;9giS%ei(#KHHDLt>>x;WJETr_0kZ4}tXNIWuw*?vGmd!_D10`A69H&?#|Ur=5radtgd0N9(A#K9U?;Y zrIJFk3xC;(iZ14JrkQO{d%`-k>zLcsC(I|jk^TAmg#BGy!E){;u_Z=ftfKcM8*1Od z4h<}0Pdbd)&pmqV$uTK*RjZipf3u8^sp8Mahklz(-jIP#KRt?qy8}_Vy)>TFyB(iU z7zGwJJK=e{BZQ9p#4oST#GYmwQL>K&H13avC%QRMz^@a}`aNJMR~=$<)Ih1%0i>mm z!W{EBSn5*)t0> zq~|0<=A|r{zOWSZU2j6zqnnV}kO8ye1L1+rY*-Y06-OVG!EyS2*l^ih923vswrNvo zZp?eS*43B|)Nf@@`R?r7s0+-@C7o60moUo?#@_neV#__Avn4%$na3u8 zFOY@n^JT!~nkV#UUWb+Io`NF12mdKXga5r#aN$@qtWC}Vr+;Oz@p~(bd(sIRO1*GG z<2$U5uZ6FXiJ-(%A^A`-Jk!4eE{j?~{!TH}ecuc9zcz!V?J+p|X%k%7Fdv?Zx8up{ z@1i4u2rfS7GFKDnN_U8jP%q3e|8#G*@4i3Fo#D@#ToT!7RKeWOK4x0_UCdVJ8{3!m zjrINf#ctdGWe?U4v)rMd%s^X2X#9uIL1c*uhqwJ>eF|Tg%B?mwd-yRkIa|bF<5l*+ zD~+*b8Em`5W%jJpi~a1H!(uBsshh}6dc*1x&9qoZ2hMAgvV=8)wx?diZAo|eE+0?a zqoD}1tIXj1C|$5P+lRALPUCfxjBs011Abks1vA~3fkxJ0=r{8MHfK4g@A!(dmpGzb zf&%Vx;siMPFdg2^F9&sxOYp+Wlz;AblOKU6 z&ysJ-%!BL7agc0y3G9AfhdoQ$!TeG`Y?ONePaR4ir6UWHCR~T*+6l0$#1C@Aw?l2L z2ef(xfZC$HaOT+n#!@N**W@&Me6f&b{@qBK`dX^{@hO$rw4Qln#xwJXVkUp`5!)Wp zzyuwSnCqtpOjZ0bE9-4zt8a`F7Mzq9{x_%~{FfmolxY6L?0i15^RK@$tC4?9F?*EI z)$BL3v2SN?@9wb*Zvk`YDrDMiacr{7S@u#dm`SF4u-lhx*+MZ{wxV_i9p&XpQpFz$ zE?JKkge-rGcAh2pY|bQzFPjciw6x)A;6fN1!a;+p2{>*CFuS=N)`|vzcu5`zH&wuR zn=4@2sRwPMS@`VYF1(fBC+Rs6NZtAX+IW`MOlBd>kRQa>I{i3>SVLk4iI*IrwXv(2wpsv- z>xySupEB9#=3JKETF5kimar=R>_GokH z7CZej+kLo~ttjtiN;h7zWB;m{yILU|Sa6Mv>bbj1l_lECI%&t+{~F1uNm&w?E9GUdi5b}6lmUH|VZlRq=abi+S0 z{OTLS=0DiGJwwd8cYv8hzhOJ?K4*08JNB!liLFtgOroQZjg(~a=Q@e3Ju{N!PTs>d z?3l@HGbNd6gg8sclVw^JvaH=Soi6&k8o5ZH#m6q*!@uW=z+Im5=xiX~_yVD@h3(%SDAn$Hj!=G7>^ZtH11SST{o{J?x!RAHN>HWQ*E`?Ci-z)}R&4 z7EbkLB?Ey>W%n+2xKf=}?h?{_au)PdhcZ38^#*xnLe+gsNSO2&YX@3@o_rcSc-{spvW6hE;|2T) z%>h|of6z_Z4JGcO@J^=~zIisouyPl~={|)@>oh*=a}e~zbim}lgig1X;K=I)C{KAo()pd(KDH3*CzJV)t%LS4Pu*~rLzphTWnA16E^U* zgRLIyXR;0=LM}~8sBAVy`0$3T@JOVjaOv1l!gYG1g&_fQLU9dQ;n?S5!cVWivx$ly z+4GRs?Eb16W;pvMQ!PzmF&44xec5HUbI&=RvE9PDMo(nB?(jU$fk4{cZ$?YZ9+Hd8 z=94d7szgp)6OAg3#QsT&pz_cTy6TpJyv{^O_}+@gLlxGy9S8b{wt>NkSeR9q51B^A zP*!#Y&IRrVX;TiS6=_4Zo(t$ETnDG1I;g%=2WU_Tg;rPK>SZ@DkvasG6E4E(-vY2Y z{2JOuI^dU}9tJv#q2c0X&>9GaUG0~_CAb1yel~-x-y7KG@fdve<-+Lo=U~6xE|BBa zg2mW1Q2cHuSZqEDr^j!BjLk~0>=m!GYZ)X_D=O)s(wB7nf6r*@TSoiNchaRZm$8y# z*Vv?-7ABiA%mjVESgzk&h8$YhhKw$zEiEdXrawja$68Mqq&Z8NNwtMC0%hTcS7U_X zOC^NI&PxgRe3lkUe3KPkrBXuA%0YHW_>7(0Sk7i%yvjC=zrczopJV%9>}B-A95&5Q zg7qB9qq4j2kngomP`zak{!o7pZ*}Xys@yMp>4yk}%J$(M2h?G&Zy*edWy0ph4E~-Q z43cCK6yKZ)ZQJ-+RxFFpir$B{o9}|%(s;Q0aUHa|>+$o}2AI1s26TQ@K`wg*<-_$b zwJ{$~M)|`%(^Y`q?}6nDav@RYIlQm_0RP;d!;6MoxUx9{WRC@d>4~$TrjY<|Jj&sO z(>)MtsRFm+YoO1oe4a>XfY{Qh@VaFY9NFy*`!;L=lWa{$3%ZU)rVS!*aV;`Vu$unV zc|gBujb)~D)R@BBIjmV}2^$@EkQG+Pu+3k}*b&=iR(_(J1+D01^>zdNdp*oTc710e zJAbj$A)|$jLvq5PrQ?Nf!)1l)9ixQSntzy<)ps^?^#EHrtB*Zh-pRJVsAuo5++qtS z7O;7mmzfGV%v?TOvHb;zU5qtlk&ovvm$zT2#JPF&(P4(x>V)D|#)UX1;5HtE`f#Sr z7|2Rifm8c*;M#99sGfEJzAZfiReb*D89527A8iNqxw|3ub12xIO@mE*Ucp)-5T>kM z0;`rH*tq5ZlzmErmKBv?;YZ-=)+7kf_5q0t8{tr!D|CB>!q%)DxOMR^NX>f4s})P3 zU3d{9jP^ie(iX@Y9Rx@3=fJl3D)noA~db^dHn&t3!yrHg@=vBQR~ zdeC`Y5@vr%!}moFqI-9AXkd$u(#EYtZlyP^AzX*hpi#v4B}`8A(dr|>h25&6KZ-?g#37Okw} zPXp5!c))f?Qx+GI&3dC^nEaSfHf`x?Hg$^|yC}bhJXXbDy-w{&VkAU0hKsaY|2IM^a;q)&zm~OWn z>c2Ze^YV>6BY6NKc)hBLa}a#c2?S@2BajoZ8%Eu51KnH4;aI#MM1Kv0R&oNobzGqf z+e70K6L@fo&js>oR*|K;utfz!z8VJq)Jb5`Rg6uGMqwYBdck*#4C3gMNiMQx(s*h< z4dEi`_JJ;1;jhfNF_``JuxBN=cC+&H2iZ|GPu9`r%Rc9yWnX+Qv*H`cY|uB2C9Y3n z=Qi`_xKZ)!rAG`~Y>~hkg40>k>}*zeJc|iJuCg(O7n$GTQ0CAV!W<%kS^m}#CjBgc zefe^T9f)^evYwcQm8!DpCGyN>qb%F`c$m)q-bnX^WYD1Wo%CtRFzMd-w_=||D{@Sk zi8~wC4I;V*AS|?iZ`F>V#pgy? z{2tIBbcRD0?V-2L4v04Y{u^$A$8-0?n#so?YVs+dcaK5C{k?GAaRX$pTL&JioFLo! zAk0613_2n`z+mDQSVb(sEQ;sr7tR3YstR7Y8Zc{|7JrSYLf|JkFvoB3wgZWH*6mgJ zPoNCm+%be|rAAQFk*Da(a5{IHm6H3MDNU~ppbE;@>7wWL^x*t%`tsHXnzyN)I?MOc zmH`_PVi&Bd2Vd1 z&sMhoDW45Qn_107Hx@1J%kMdkF@Lm;fp8fM4>V>AzAj+;H|Dd&bSC>bZyGBy6lHzu znrVYe2`!oxMI$zDqP_Q&sMLh##3R(1jLC4~dd|H;{wp1@>Do}-=@N~n9*M!*d0q3Q zZI!qw>?bbzJqD5sC&Hs0Q$S?@Bv^8L3bdH2fRmXfH1z7g>I7Z5a#R~`IB3DLt9r2U zv;q8CXaEN{&4fHF9gx1N3fn6AHWu9pkTX>t0zOQD^vdzzTq_Rp#|E*QXgB_x{06)9 zyuzhYZ8&e$TddjLieKj6!+L9Ym89W0tgg#9qeN}Qlb)Z(cziUDdUqLrm+-@h{01nz*a8Mo^Q=Mta>;SVr;ftA^g(b_~zl1G6Wz2MKX0Ybv6WQZeW0^;q6su1iq88l` zXpXXAEaXgQ_g(@aeyaySX4zmvoUAMJ6HUJxERbp{J| z1>nb#r?BdzD7@f!8O}~_!rouov2O1xJo@?@T-V%<1C9Iey0#8nVcLjUQ42ob@g3tE zGN9og2k%|QVUK1v9#*Nr?*5Okny>>e3>?PA3LdlP>&$;8iJ#^BUr zfjDjF7OdQ8g1=d*;e|iO;sWj?IzD4B3K(h@%&n~>=1hsoR+!MOZaZl~eH11;mF#%qm{W{cQQDhTYMJ}ez0C;LPqNXr;kqO-TAVt%w2 zQolBWWL9h8mN-3pKTH)rvsK0K2N&Vzg8les{w1t9hv%w|?%H$asabxmH}zU621Ruf)R-@^S0cD>$L=0=}pnj@9eWVl_U{ z=u>nAe@fkhvz6B2{vC6$MxZFxG|fh$f4$L;BL*n^S|nF~Y=xkC=NfWQ>k(OaSAtGH zI)irawdXn9AR2ong_i#+rB$}K=(7oYZHf2`x*_2^{l(|eWLGIL*R_*an8rl*{p=)G zXQRjhj;pan+q9XD^(?04Jck_#Gh}4WT&CBh$B?21)1pdDSzCz(R;sdv-YP6^yaGG9 zRg7I+{*Jy;Z=@4Vo>1$b4`_)`8U3#%fx6|Mq&2+mUe4nid3`&T7}qW*qaDfx@=rxM zV~<0qXF@O1S+9zhj4Z-y4=lz1-Lc0lsUEn9*FU%YiNp(E#9=3yTpZ5|$|KgJqMnlUv zX*k|AieE1V@aTt~xVg0r$EfvUyQtq-QEoIOCXa?U`~Tp{8$aPsDlPcO+`IVQwG!O= zvWMjSa-WS)8!6y3Q=TKSS#IdL#AB{B@tn!M zIr@Sp?$d~)tAMEP(csVO4$uwSG1O#VE=^oT`262}+FJO6w#p4s!+(;jC~qOyv5m}B2FOj+AZbLQV*!O|C4u&~pZZT+^8m1P^T%&gfgzgdUbH>xr> zdwEt?D#lXP`{;q-7gTy`E%h7AXy9NPJyI1ygHNoctE8t=@6q>(Lz*M`Q7J<3>>f9{=t_lTC@=Hs`q z(cDU0ew^Y$K@qk$D#CencX4z~8+M%h2RnF5z<5mw&^j*$roDgg`0c;(@MsapJ0%9& zzm5WzDdLb+Gz#z$5omGyiS6!x!dfeN9#s1|F22aKH@EBXP+2veZCi@Z=cM7Yzaz2Y z_@h{F>l*ww!W0i`%*XGS&c(`>v+;_(F>P0lqUVonrbegLDJmW&iDJ*l$`wNL zP9>Rq+oVjk#a`jqrR&IJ{b;QHat7YNN*kMaPsFqOWbmE)^0+c%9u6?uj`jNk@JG>5 zJYFpb`+nwC|21oIhl4e?vvS3r`$KVEbsSFmnS$*Tlkg;?XgneliM6H9<2&v#*z9L6 z&PcAnSEX*_3WrFLJ8UAA}z!#qt zVU@pW*gHHPKi+>4W81S>ZdL$(f6NVs+N|cEH_XD-p_2HgQ9aUQXVA58ZQP&ATNU;@ zL`dsX9TG8tBf_pz#KfSUERRy9AzrI#WV;KEIO{}P4y>Z%f32cMe!J-JSWo(Q$uZjA ze4Ku53!*wES7=l6b?Q`ioi3ggPq(D_QMI|=w0ZapHB7%k$NJ{djb$bDSAQA(akrSh zNX??l@}np@>PzEaxzmU#!PKvlpNd_IY2vFJbiPL{T^GnV36`wqn+8|Vus z3N}7}6PrxGiB)4#@#|}sanPxFyi()_HV=P*)vh+-)7njVmG%?dSY3t1LIn7*dM-}i zmWoy6vT?_%B3vF)!7J(W@nO5mSo_35oapc$J~Pb_zj{9nPdwU+Zg(6;y9R!9QC&UU zNrwqYJS&--%4a@?@4X|zCG+WMIY(NbzmhJTGKVUPOrzr7Q>b&bBE2%KNzu5)^wo^@ zG+uKJ)xT&!XV{FQFIGPxEvk>n5~l{@_N0MGW|WfkO2MRV{weY-ERFn=Y9^Lq9mLLy z5o?_UB9@m!+J;-nJ56cobzO;GYcr&A_I5PtpgUd0eh9LspAVuwOQoIT47@37r~ zlU$GB9+g0>)_D$d6T)%W!7yC>=mM5Bi^bo*CSd=Rt9XjyRjl2cg3;DgtgLetA2g1} zrlG;OVzoEkAH+Xr`*j|N{FjK&wO+woRwVwM<%c(PI^%_C4ZfAT4hQ_)jjKC-ans6h z9F))3ei(Y;#p)}t!u%O{-&jRFbE`I<@X-u6E?R|ODJ{jLgBRlTVnv+ymv1EOy@KS5 z{&8l1h{?;sR+FY1*SR(Rsa)QXse-$&ZAq|U4XJ)Mp8m5oq@n*-(EO?Pv>;svu8m%Fl7BRv%x5{8Z9OV1PBDiXyIi6Ao-uU$a2#D~8BZk!qUpH$P-@!ZPxmYN(5R`~=muuWH|FWm z3-Zd;bBr3b3e=#NW=^7`X7!Q2=xlPS(~~@0z(49euTMsNI|TKsqRay9y>KZW6TKW)rvHaGD%jx7frl{_ zM&iu4bo{&bI)1z<9dA}i#H;jUaqN*R*mYkzKATsB2YHq-MEM@}ad?PxrryHao%68E z_;{S|6@gn0MBr zo=P?3oMa5yXMT#PkE`E*PbW2K{z6Y4drya4YUuZQg|u2Rg<5V*p(`8G z>DAyYTI8BS&DLF@Kc;)oo!880#<59s9~GhPZ-0`uby75G=@|M=pJcmWA6T|tYK(~(p@PsV?YQ}NW5$v8wN2_KKTiZh0< z@iR^yexzN7qu>r!u&Bn*;wo{*vT}U2Fdr|_%)|Y8d01aIAFr64iSHG~0U3audo&GK0BAMn3BPMmcP(?r$Hp~_cwa|x*e5BMx(b51Dt377QtDGg~ajG zQDWr~M9Sv)6O;0A;&bT+xp?{mxo}OLKKNoo7j$f=Zv6aQ$S8q^$;y94Woyw zlIgm+Ih2gerQOO|G*Tv$9&YA!uUa{D!bmQ)^~|UFn@i}jlnUBoSxSeBbLpigadge8 zFgjBrfX2V^rLzb9XvAtiI!e@sS{&I$x6NC@GvG6*tokrPd&)@Dy;L$Xx0ob;y+aaZ zb4c2gb>#f#lY-BMPq?I$2vvHlMxDjWQ1qvn=#;r0e~qk19Yt{{`^+cw?ZY%YbY2hN z;F(mz(_^qz#4mK?{cohdP987xnujB#9q{JM2XU*28*ZMs1$*u_$C9^n@WWy~ypx}~ zHN?*0@!kpeB*f!gV!rsK6UMu16>!)uRjgxUhhK>X;f-PEar66Ac-%2_yuD`x)qX8T zy6yzI3=N`b|0d#1Ulnl4oIbSRCC|c+F61PuhAK9=WD1;qs|lo&noa(GDKRq-lp3y;q<>*F|a4S0P5U=e;>y_-8fsnzEjn^slFz zFRY|@jm@a~PoQz*Ea>7kGdd^7och_A(S9(cPD?quaKSu!BV3c}KN(NsHcL>2ucGwN zy5Gd-%vZ8F?$GJNhe$h;Me&w7h=2C9&?^flzH z9E4ProYDN86DVS8Jo>jK554hCL<76GqifPy2+BWjvYX}6=3j@9_HO<)6pK+6d4PW2 zsY0tBWTV2<7g4NR1Zo_*j=I7-k!Hgftl*%D@2BZvu?G{d;-EZb}B$(O7D>W z(P=mt^zpOplkl;cIwXD91_|mDx%nawxEULb(V&VC%E$^w-FdrF(BzxkYqKoDm*wun zeswINVNOJId70qjKl6%I{StvjX&`Z%(nO5s{~@Qi&*X#o1ELt3Pxf!iCHcP!$eF}S za)tGhy!BGF{N^~iwpx}p=6oey?)4;M+#Axfdo(S4qeRmcHK}llA`O`)L9O{dxzBgR z=%&A;=$0iSbeYB=(OCDD$fbQC?qx0H*!*JhH7A_t%sNgM9yv&yUmqm?A{L~#;+0@Y zqJ^Mh@3@LnIp0mKZ3kGno;ns4AfK;fvTcXk^03ZG~G@D zH+M|I=^;{B%$XsraTaKMN)oqb_9O22^JQp4!xhw@P>D_|l%NN9-O=ESIIh3YPe23f z1<4OX1jD0bxnVC3Ev)oMMe|J2onJS~Z&%MHF3KU~@v;4c#(WmEX`ipC`7(ne#!(e> ztyGA#ZYnWMeN8%@hKb|8FJyw;6LLXNN9;V_lQ$qs)eL7*7cEQL^TCQH$XZa;VoYV` zETX1=7Sa4qOQ@Ho75!IkO;s~(=*wBw^evzN=~=5szt?C`i92f4EK!vjKbNNmwn|W= zb3MdFkCF!USVG+PlFAjf*^Xv49z#Em=AqZ~M$nE8YWTVPEIe(h8vYSF1}B~q!5))D@x$Ya_?`C> zJUwhX)^^;B&9t0xa*rj}NYlr?AN8^S6JtC}Z3Uj9XM;}^EW?W@EWi)tRq*Z>39M!P z9wmQmM$a-|pw;9t`ZBp3RSld%NmE48Oe;-J@{mHs{$Fx}n8MwHy>j;il0CD@hpl`d z%dm{B`NZE#+P;wnIWcOi^NW}}4Uz4p5_Ep(1bV4hiw?YBKo9cVzen2wx_{V+x)v>@ zZ>5YW-?~hr?ikja}x>9`I3WLIJ} zk-nQkc5k{&<{rI3t~7@cvrYa)#cds*6H+0DnFE4Pc3Fa@IbH&vd9wwr+QEX7O{Id5 zb!CF1qqGH8)$6$YV-kpbG(kC6r=j`nkGM@SxtyNkE$;n1F;x9QA2t5kfwsz?LHo<& zP?Ta4n&uOO4!u8*cAH&B2|mea%ELnRgWN~+gKE(9S2aiqHz70cw`khr9u$hZk>kYY zC~ICNN*l>Vw)6^Ga3=@F?XE<(lkcMSJ^5%#c{EavwnHa=cX4aO1Gon(e7VKf9&s8; zGH9aLKd!3bD%Z2`P5H%SDu`PwN8%Ob$h&1dfj3=Gt+$60z9Z7kU>bDmb)knLSn{#C6f5Y-Kd^Q3|nw8AvAD5w-sEL_61pAk`}&$g(#Ko#k_x z(;s-CJKuJqioql3^r}dtdhZ$Xv}QpWBj4boGH$|NPh&cD03!@#)UEpxdri?M>L z1C@f)bTmo3og-L&v&7`s&oA8IQ3GVquZ~{6F5wy!msY$mEfY-hDHT}Bt*DR=8sYvX z9zm0;kD{~9x+q1U%qhLtE703JMKCzmhWl`TIuhS@5PjD@guFW~&;l%nR_VELhm&=U zMVH5z>?}>;MD&%>{!`o0`0nkf+}#BAei@6jaz;3!Ab}iZ$Dja}$>^DpJW}Z!i+-%1 zg!<-5qs3aYkX1fHKkgxPN!|#Zux#UYrzmq9TkDONzmecZ?0g8MT`ed3>1*e-_Ivugc`Yo)&OT zg^gU5g)Ca}A3_IS9zd@|kE2)D4kMeA{pcCb&3ry$kMd*BAda0S{}mXB7nM57~P1JUB@lW2M1akN1392)Vvh`Ov|&;s{( zR9Wne?5ow$wt7kQ-9`hM8aSc47ry9?y(bzUV~rw?XrhcC6A>;|K@&yhqV8cGbmsN8C7M3BK~p)XP=S!rV0|2N(2$H zU4l*D7n0;CPx5<4G{N45{Q1L8;umy-yj?>`x$%AS;dV0_7uP{nOnXa~7Pk>;_K}Pe z`$O7~iqm;DV${c9gq}+IM&|Ip_x{^R7Hw)K()ZqyfLWi&Jg;wrE^jBlKMTm<_asuH zcbP;Vj3vRhlSrRl5oyaVBkdj8=F4D=pEz+ym&a?+J8&u?MQD^+rd1BG92L>FD39+em5kLsWCN4oTQI zqnfoZP~zrJKj)$ATb1?ByyF?0}#_kKpFU-zMqU*FJ$(PFr$N(#67Na6asqIiSW zXQb)bfg0B~p^X1*mZia$xAqa=+QQJ@J9lj)K%3RFIFB0acv0xdF~OxtHoqpjOjX~hC< zYVbsjI*y)7`_!dr!p(kCl+sS@?sO6?_l=+*L&R9*GdZaEn(RMaO%|CH5I7b?{yhjM zojvEt^Uart$G9l6#(oD;kJThWbppY4Nh}aI5*IuTSXL44Vo;uN-OCnLMjzPyh} z=4TJ;H}60)75%6)s~>$5{e(uZ>_UBhuaT4u?^7x1M3YGm3QO%mE+M_hv-TwliLOPy zn;5bhDn$#v6(ad*>1f*T1SIJdf_{J7fL29dr0{hPI=aCSJs+!vHZ|Snw$briB1*5= zAaPKzhu#x}iHi`sWs}IV)w2mcgvnVEE3#(Q1|n!bNaSe%d9n5qxxFuztm{c8S084Q z*!*NNe>jO)I!2M4RY7FwcRymd;Vj8ocb&Dm|_sKi0C&F~n>^a&zwhffgTb`up}One8l z$%dH*e8I+ZUlY^%-c>5fx;kuY~Z_`Do2|4wWbypc`+MP(s~5u4VHd&S~`+v|461N?tM( z%~g;^6W%`O>LmO*=MUjr$mKvz&SeQ#t~H+9r5Kc7b)FR&v5ZBJ4wa-O89B#=e=ndE49F8Tg5pKKd0Cc8i1BN|QBWKKv0nck5_ zt{3Ex;Te>i`uvCx-Fl*t(L{U}G!e70cgf4ro5Wiun`{8wcczl-23dj)vQC1fswx7_H|BzUQHKRuH~K09Bet4EdtB$-cgUbd=ZUDWKod1e z&Os`+vr&D723j&v8_hd93rTo5AP?1J=+=lgn&A_GL`3|M(9a)zJ>rWTegq4(&o?^7o@#kN2ZTN;W7Nbdaa!05{50z{##`;aUnlam%XY(9+dYkZj2~ zeh%p7G}oqanQj+3^A8(17pt_g=U$ry|DSHQj>;5V8weL%ToWj`GZH6QU*jQouroyP zV_BDAjhZo0l{`nX+B3);w`*k0XHPP1`d*UqE{rU0N+Kc_#pHBrB`J!g@_*^859D+%vFA%fs8L43>+=beMPxl-#9_ zC%PWwn%(2MUuws>DN`&t-%HWl3Y`VWxy}cjD+@ghw2Hn3AfHc28McW65(X9!Ok*;G2YIVp!Re_=Csaqa8DP4oi;)~Fg$9Ir=brW)R z=6R)aHQaX_2ec@s6^VuD<8K^?U!CeejtftqsJs41d6OXuoEXQgPDn-tq8^7t*({qLTIY=E{Oo@M(T~}JX?wp` z81Izg{#rjJRoUC9w!STW-)Byb6`0Yb5gdK7v7D^*&nKDze9d?6HEQsnoO)Xo(rwcm zXlJW0*(dRk^V{N$Hgt_8E9*AXFEW?u38^^xa6%{zkDf*)@5>76POU@p++C2#(r?_~ zJXP|lKa||mydwCeZG?uNRUp5XIasiG1Kyw5kLDU}GKsvdgU;$%;-ZWU{AwM=%_`Zr zIF_$sT#||nx1GSQ;TqWelL=1Vy#u?OUBS8oO}OxOIqng0$HDIMIHGkPmgDwgp}PSt zO&ms1=O3cTk$z+c0kcrKxQ)qpeELn4J6+Lv{ zhc2p@qA5)og4f}nP~LA{oH0R#%$RwARyR9S{jsXlt$UD^yR9U4hw{mbTwU7e*-EBH zpCVx!-ja5w95P*^fXHo?qI)hjkOj6PR8yDlt3{rJ@PNaa?w?JhxZW5k1?n z9^JP@NQMt-TiNgx<-e^_+zETZJdHLJk;oS)DpVbxSe<~Hi$GvM*ueFv??S47wNSD4 zKji(!7K{68ViEgS=2TIU@ETtA&TXiCgP{c z3-QM12k~gW!tH354-QG*fe(Z%#^{?N4qw%ZVlY3K`9yO6`c@;ONe%oYc?Z=^kwP8I zb;+k+>7;Su5OKYvL)UEEL;cNysbcnFdV0xbYAd##jtM-rg;_`%onS-9hCECnl?#pB3i6})Ok5DH#LkxpeAnEZi zfBg&mU1~Kx)z*o6_0M4=V>zfA1W@uc1u@wfV6s*MCU261fN4r_Vn7ehEwq9;{XBcO z)CcxY4}^X@S7_=ng;T*AAo*Jr^zAG`MPdhh9bN;MF6u(%l%Lops}x%a&f{eV^|4xd zFq*$*L3yx5CV48RLQj3aM3+wxWowqtX8(<`Wbehcvajh!nN^z)+q!{wif5c>fgaK9 zrb`MFk<4LFbaI%LXddHk6|lO}O!mzB8awkog$bpSSvPlyDQWmK$9Pw^{)Qv(T()Dg z)NEMl%$cmENsg(T_V7H`1*&+&j<)~9v{K^(d6(u#g2dxd{hpIJG%*-=FOI}lJRV|0 z-FMi~a|j>O5rYjx3JQ4sy~l8v_da~YuM5Wk6wL+)c@8`bbm3{qVhB3v3sVDpLFtDh ze5jblGYu)YLGCr~)LI8({gKdpBo9>C6^M#;gvv#;;gX9oWF{_wV?$@4Q7IliTf{@Q z_yxH5_$1g%*uv?B>aZza3SO<410Bw%LC!iFf^WIOKKl{;WSl+THie-eqpA4u`wO_f z!wVaT>_`5W)#%LH$Ml$o7*q6-;X88XGMaLjSH^I6o&Hj2DwjSn8M(UwI_u(FsDAgmn@0>Sf8c;Qxc@@bwPcTKI;o+4F_Z@O)-}dpg+M%;#+D(kDz=ql&K^FJl^yGT9cVtE_?d zu~d~_VUBv2nIL>GyM0%M{hZxK#b;cjURk-cU+NZRSA6KG8;eNC;>GxI=xv+?MR?|y zJbX}O5YN~$8p=*9Lz#^h9A6;@Ha^d>aGe5-ki($5FdZ&7Wxz}0EqtF*D;7Ihh7H2C zVQ<%Qz6&Z3!gK1Np|2REi=yCm&{0^w!w|%Gu7s9DF`)6~7AP2hhRMtRK#u$^7z+1= ztv|NI!uihd-`0z;x9XUj*r z*;dNm>>#pFi`OYXg5%75}$`<6s@>}n3<=2x&N zOTrpd^Vo{*7nz~13%ewv$?gQcp_*ckslwS>T2g+VZeO8GQ(x{QcTz{;vqw^KfbR<& z?bV9!KPBUm484>4VQPL(sHd1;+~x!-VFo&}HZh2YZY_XKE3~^JYN5 z6Q9pXO@QLpIq)Sh4(PK*aBeYQalFqKzG>}(LNmVJee6q!xo{nVTXR54IR&zoc|!1^ zV6Yi`73|&fA)9*!Uw$>h(nA?w?HUe875$;VXALN8|Bs{dj_2xq|G1gG_m+@Rlo8(N zzK*swiYS$aiqanH)1u5sLJ`SG8A(ON``p)2A}yt%9ZiMOQYy;#{Cv|m#8rI?cullIfAe9C*G?#l zi-FNE62QGY5-#C<*sHn|qMqeKdU_!|ds6})T_<5u-hSBa8V9CFvqApSA^5pD85+F~ z!eHQP*x?%r4t){u_t^y4#>>Ia7cFFW_g>3O_2@Jk& zfv--Bum(0@i%bg6Yfi=8E@=WkCJ!CbDsix~1yk%_;SQ~KJePGJ-+XMq^LHt_iV#IM zjd(2J7WN4avt{k|c=AUv`q#%{e#jF1STAIHj<{j?ze%`Ja2oY!E8vEbQhLlVmOH9Y z%PieIoUF5)Mci||h<(svBKPb9*&(>9bb07qfgUh`rMtm#dJCP?13WM4B6eG~h;RYQ4fLO+A+{&zX{~hn`9&iw82m~D_8H==0dI5~vI(=_ zCE^_+XV3WOp_ahZ@TxtAO+hEIcx){ubY8*}v=XJC=A(@92{e6-s5j&?j{K01QqN{! zKd*<5+Vk-5igjrADqQejxZ!bOR%QRDDsI_FefL#`*K0$6m2qpY-4WUCuOQhw^kK3x zf!8aoK)qcNLXQoHAb~}-CQ1aSi#=i2-npRmGzG2>6P_W(eK5<@5?p@{lKOipkRjs( zHP3c|s9_B}7`_A&h7c$$kc0(hUF27I5BaY5loS?xC39sh!AD^8oM{o};X^yfos3#$ zLwY*1pgoNAweuv?V+b^~4iX91a&oa6$P{mNl6Pby<6yp=d*$#-?0V_|S8$`k<3Xo8 zsSj>rBF5@dtM5;!=_f6Gk!>!_-hQM{{GQO0adOysSPkuF+vC#Pi!ih^5r6#6K=bHK ztd)$#Jo6=(c5wlY?_GyM8R=M2UxC*|7x6HkiE*L9cqL~H?kXONZ|z6naAUx6@+0wf ziVFJc2z%UrrF7@pDWX+N9m$j@8Dytn4v9SMNY*aMCY#R7Amcy7aQLrNX0Dwjs?2tJM?SG@8K{*JRn;YQ!ln8kE zVJ@g%8VgsxszEW^O`31$fZY~Dn7&*AtZe0>V2mG;wQ{9jn#OQDS}!ox9nYEI>u;E{ z#%tWI)G0L1W&@3_tfeOp$fC(A75tR-gU)|-pT14HMC0X#VDXJn7-TR9Z){zHbBcX& zN`;V>h#!MGQweslK6tf!A#$(6@L#|Vfh!b;9qya)c83ps+To6m{xSHbYBhcoJWkqc z6H(>LO59}NiFPvD_~wly>P!5kCsx@AyrQ`%{_Bd7T9TNkw2iY?Z)VCyCNcje-xM#~ zDx&JE-00dfbxit3A@}fmF3gnK2~+?1!2GAWkTc5!_Kfm?p(pmjBGqb0cCP^1T?+lo zIe2ef3Ikjz+__N(Tc)0Y>FO0AmtPB8%TK|doIGf#%mc;usgOVKkZ`NA4GP6m;dA;# z_{%v!=w_jwx~B`rCbtux@A<^#@OCmKdkGVAeHvZ1`3Suy-b&l=ALO2U=5vWfm*^`K zJ6z?t4F7~I#p%sHXsPXq3Nt65(v}eX=pKoyMjpTs3y)#wkkk11N*?}8IE?Qv$K#Qs zyRbC=0IH2C#Px5_;`V_`>_`ygo&~FMaJxVLsh^Kgu_1W$=XzAIh(P6M8!<|AF$&8p z&ibT?atl)Fly@rBa_j->5TSs7@;z{#@JvQH&*x4Evy3SwCZMZk1fS~0kb%deh^o6X zxo$?uUGGV7dSDkU`kMod-wWWRP0Ke5n9~3gYVJekuh($!)O(PO;NfRPE-Z=?_&a$sz;fqQ;Qa$&2hM^` zA_lrQ$-=&yu7r81Kp&i4NJ~DK3ZBCobW6o6ns!nPoj2OzjL&m0=U@Wb#TBE^{BnG< zt5~R~b8uvH4o)aW{BZRJCP%+Q_QPW=wz`2`$2r_H{063p9-!B&>nN{(8O^vF3>H^m z#m5R9@u&a;W*)&^4e6+r6N^Lti@->QP`una2@RDP^lLE0DIcZKM5~Whp8P`#zZ|Eh zot|@E{$IJHfel>8$hmaZMO|*2Q9N1x?iVqXP=kqKY7irD0#c5xq}`)~JlAoCBcdco z(W-{Y#J;lJ5c;6JtlYF7+l1MOAWtVStTd)-@@J*Ek?zh8vZ zxp!gb!5^S%ugEUEuE0*NxDOsO+hM1iHB7$e5BL5=Lg@D-Sd+XB9z`p{kPWBExL2dO zGWI9U->QTP-MzH2c7QILERAPEM`Fc<1U!;diFQBkmZ&!+lykYCs?l6jE&jXaqsf0h|B8ms$c#8Gch*&zKv3@HK@4$2%b*biI+dF z#J(Rl)cr5u_FYF5xdxzb_45uF4ezF_m^1q_nxNkTn{R^(j`GtNHf8nUCc5E2) z2pfai(N6m@CbzfYTcdl(2IQgL{#cB=u?On|giQOwBy`H%hkg_0;l&zr+)=88pZQLD z(W!*aGnqo`=19@rNuE@6rif09*TBs18}zw*FbTWh3ZqtpLgEuA$V=EyoEMxGZ!*2i zOfS)f$jUvy$Wj=7s11glxdmF597G){1%uN45a%YG-xR(G`9=*^W6+)*{5z7Z%93VR zzPk?3?ZvRy?iytNmS!Wv&DmF7hHOo%3j6(i2mJ2Jgy^JsV6b^2Y%AUZD$R%BW=}ed z30VoPcgsn_o`p1RtdPAsAB+yZ8d%|VovPUxVeLJ^5x24)r=ICRyX&3kyP+37SA9mm zu#adZb_9 zHXg!`h6C7SeFR$+gv@%=M(o_oU`DkX=FIs@LnpqW?SCH9g5ekF#iH%h-u58vOv|Nh ztK(_x25S=e+Z>)hn+jXj>Oy~kA~{VjieGO#$4J?l0N-=~0uEh)m|L$w?Q$iU_!h%W z?Rc=>8wvZJYaxjL4Rob0JN(K}R^MtEdz<|X<1gKSD*^XFanT!?D6hr-(llldFI8pN z?oed=_A0PDH15Dxm0UQ#D;mNpW`N1tM99(10ejIA;ADazDxiQl-S~$pTqPK0uZ>fr z2k5Y?Rv2Qt7QdSxM*iJJbeQlIoA>sjqMsUX+or&m@B4+L|9-+#v%2wvuMEFrlNO(! zVa&U5`uut!gI@UN8^-d_(DK|HTx0kao8R3)YrO{SbP>+ZFRSr(OBS{Vg$XsH2lfhe z@kCD-EE%xE{Z&S&ceIJ7=?2s0jmhE~Juz2Ox`8Sl%;W}+u45#%I!N~{f0!QZ3LjS1 zlT^)*Bu7#i2DkkOCUrNU+o%rebt@t5UL9PTUJZ(YS786ckFe@xKP({f?6;F9?1D*p zEVE0MjdWFDdo91fB)fOOKayjk8}(VLJ|zGzL$~{f9DYx@iC34LW_}QF`B1nL6&@N?STZ>G5A? zG&Zx2?n#QFqn(8MenK`3jVOV$7KcG6UXfHS{Ku{P%#pf-p>SQl9b`VMv5$L|*s?|6 zKxyG^p!<(Q;F&zg`+E-z%q7?*Ha097F@t^chGorTm010~^|0~M3D`6HHnh)BW9wI& zu&TXA>8Q-a69|MgJdrwi|Ea#249{MUurG8;ezC8cRSA#DP z)aQR%oATd(81bLa>+wru4ETj775U9Wq#P)XIFN`6E=g$R9F6He=HXLI5zbnshPSLbsrq(poRMdXL%hFI=KWXEz~y<+Jt7_! zSKGk-89k&Yz?2;K4JHzSGvQHS3iu1W&B&8-?BP+paBt&#D7*X>Rz1H1>n^{6uY(%w z`OOCG$7>9G#(V9~ zH*6nx7;J)B=fWZF&Qf>|>TvtkM5aFC7L6s_@LkzKTzzFPF3*TW`6CB0&hsL^8}#SQ+?asH2csJ2TitYat8Mnwz0m}Us;-eSUlqVob{w43Oo!3Gn!st)KhWGY zg!ReSV-q4Z*jNu4_DO&eyXcw8Ty+KhlB^p4IM;w@t*!a}Z$|K8ldO3QRTKW( zC_P@C)#U3_Rrt8mGQ7`!-|@xjKJ32q8x2=?qSvB3cz9MJo-&O=zm@|SXmtSlzHGvD z^<@~b(*R2zT%fa;=F!FniZuI-4(*+DgDU$J(pTPtoSWGrrc`4Vj4I-3^Hp#>t_o5N-}`)W8z3htTRz? zv87?NbLhmDqqNLvGIkrLHRov4#H zfIhnv_%EyUdFkUe{A~3ReDGHr{+^*V9}%w4U!JGLZ_JeA?c-&5&*8$lq^7{v1^mN< zLhVqvs}Ap$<={HY3{1M3iq4HY(ctks#I$ipSLtEbpO^GcLneLwu!K`y_sS#KH;Z$9 zJddvU+{VRL2a!!)TVd29F-V=_pdf{ZsN+#!*XIxQ>rX&I?L&wbo&mK}2CSux9{YTv z70ZouWF00CV{0}Vv$=+5tift)mJT?x!e# z(jOsU{TUEj2f&)IFG+e}KKal#i^MzCa$cmAM(QeH{uFPNsXc(5AH}%z%m)k*zM~5t zrTEX4%KVW7%6#}vb-v+-9>1!|fS*2S!WW-5=UqyT`9+GB{F-iieqzB;zS>KJkDM;W z|JmAwqRb9lG3o`Tjk<+%gPH_3bsdUN=A)I-4uQEc0~ehhj#iDj=vE+y_H%{I{D(3+ z!{aI^l<}gBE@je?BO;n1al}S(D9q413`5qOf{iuhFk2Y8vb>oC#byY9{NI9;T_;Tb zC(WKs)n@~YEZ76Wu==>cG3*Z=dv;HsJ!`ID%MvLYb_Fw>-MZG2y?aQ9ohiJBTTKU` zq*8)aO8*WUC0>KpyBe^)zeYG?4TaAQ$;7q%EEndxj!IQGQPUOL_;&MHlrm00ll}(W zPQGK+Ng4ixy#lYQsmPoBr^m}lxq7TrcRy_1eH}6zj)SSu5%{cl99$H)LuvJJ=<&-YGq-N0?t?lw?7t8!ZBD`A zix1!*heP0=>M}DA127~j-$1Cop=krRc$!GM#_Y9RQ#SCD6U(|zU_JULvhmye*$LsZS>wrL z*#f80?5Kr0Y{JAL?AuOxc8#wp+vcFcUKla}lXu<*lW)giGP43+TTTIUc`cZ5cqSP$ zdz=R*U!fjlfa)|&tF z)R-R{qRX2aoA6xNaNg;yJwHKUjD2}z&AX==@g!Y|_uBmr9VOr6CY6WiGqn{D-e^G~ zqJYcJ7ve3cz4!uru@b%t z@Sn$84xP!C%$mSHbsodg2s2i4iz>TutTby{(F5+j9WYtzA)IPC18=6pL;V6bSR7hR zWNr>27I(9`i2^J2SKUzjd_Ec%NS5PurCYdYeJ7q;_XR(m>BH^s<@jj|%6z?!4lnz_ zjIU|2uQ^Q^l64uoY|i6mi+nsYW;dqZ6$zehYn*Z49&b5M!40ticw*C6+I3(K;pVtN zZ`OX8bvG8Czg`CU|Lun(!DnFH*}L%H+~3f(MuCN625j6NJ62)68#~XHuv5>CXTv5< zV}BXVX6MbG$9_@qXJ5*CvDtdA?31-tY^k;d`*5fw+r8M39lA)7HT=>8>Hn@kTv-Ve zF4+y`UVll-Tra}r>2NRCZ=szArBtDGBo4}^qK$bO{_PQTAp)PTck*34@~sGBt!YVipXdc2&CA%FCn3V-|5 zApU*$4O14r!87NdqiFSgG?f-(jOPiIu1v>M`y+AvgCOj8@IkM2%P{rYTFlAv$7?!Q z>9X}DfkUY%w(1UWM2{Z@~D56gw(Vi}hP>#m=2Gnl)VJ z&OU$Z$sP}%DI{}z*nu6>*`UFRY?q%S>sN2Wwsq;V?<#cJpmiqfoi-CTs8pU^VcZER zRo5XuHV=Ny-U#lWbb*OnNp7r_6JL>ZqPxcz(q2%6%u$~iK*}49c+0gG(Smv)6dw1b@RyJ)o+t{GN+SDkrbw%oIv8OTXJkFSXJ5-r% z=Sb8Ar3kiHgh7*VUv@Zr5h;39$Q>jR^hU{JYS?X#dhOo0*l;^`ho#`p zU8itvL?dq1`G|^_CHRAfWcic;WnQ{QjdwNF=kK{&@NV{|{LVye{`y5#{)meTAKIqL zM+&v_Zfga;;k+b&w){6v4*i6GhQ7h1UH9<$8R5?GXgSv6kP9!zQyB5*1}I-`fy(Sh zkbd?H%$%jbp5A24Zi%#KlVe7+!5Z%DOGm<<;zzL?P0ZO5!8+{m-RkT%b#=CiQD0U87?My4;a`g2`{QJ|^bFwgoC;F7I+dxeoQvMxCebC54Op;G9_!+{{gack!nn=I}YVv;86jJrFzt zZEc{ptP58ElxKwr5jJh85gS=x!_G0WXJ0nhu|J=1_*P^(JMiMzK@2RE;qBez_y{}Md(o>8qc!u2@ui?I9MX1+z2v@FHjr^J^ zxJqXVHv5Xu?e%cXQq;qeDrNK<{fsvBDbk6fVo7F+2%@Cd!iBOJ7`{FW*tP;;28M^U z*mi+m{R)1QxA0p26S$oH3CUUVtmQ5}_WXQfc4GE0_RowZGuO&B7_ zQro}KYT5%nidulok0DcQyVv zqs>b*y8O?04Zc)Hp3iyLkJ{_s9=xE#*00rL$Ez8$sReqh-APRr74+C=(+$|@_Xey= zh7MaSBgI;qc7SVb3uL^lf`cg+;CtzLV1#@6y2vzWD-DFknR1|KkxBlGh!)+v6Gamz zJ*J=K^|8%XV00f(!mBCecw=rIo~ouO;aZC;x7VYO!$TZd+>J|7f;ax4z>k@r!8_Y& z^4~-%{JnU2{tK1lRr-IT`kW8=aZC$Zl{BKytXllgbQ0Gd%)O>*vAH zoQu$Jcmq;`p9<{9H}EXt8@#NMW=mcuvTK{v+4gVh?9ymuw)cSoTb(b<<_wW!dr*Q6 zkN5>opc_2ay#Q`}16&?a4yJyY;CE&x=wF-M z`jNhT>wy!Jcj8gi4E*}wJdPITDn>{&quZuuIC|L&{HFB=54C;8#1DV)Q=25ebgV32 zQ=-62^{erlRttT^3MHOPljfI<{DQj|w4$?|7%vI;M$5LP2qC{D?AW*$>pjCU^+y