diff --git a/.env b/.env new file mode 100644 index 0000000000000000000000000000000000000000..14b588ce8527c58550dcd0eb677d5ff8abd52708 --- /dev/null +++ b/.env @@ -0,0 +1,8 @@ +OPENBLAS_NUM_THREADS = 1 +no_proxy = localhost, 127.0.0.1, ::1 + +# You can change the location of the model, etc. by changing here +weight_root = logs/weights +weight_uvr5_root = assets/uvr5_weights +index_root = logs +rmvpe_root = assets/rmvpe diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..713e8166f0a69596b47f0cf72a6318c8a221ab83 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +lib/infer/infer_libs/stftpitchshift filter=lfs diff=lfs merge=lfs -text +stftpitchshift filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4a6ceba4ccd7ed6440983b46692b1f465374252e --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +ffmpeg.exe +ffprobe.exe + +runtime +torchcrepe +datasets/* +logs/* + +assets/rmvpe/rmvpe.pt +assets/rmvpe/rmvpe.onnx +assets/hubert/hubert_base.pt + +*.pyc +*.pyd +*.swp +__pycache__ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2fc2437794fbc0f60327c928e8c36fb1a18eebc4 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,29 @@ +# syntax=docker/dockerfile:1 + +FROM python:3.10-bullseye + +EXPOSE 7865 + +WORKDIR /app + +COPY . . + +RUN apt update && apt install -y -qq ffmpeg aria2 && apt clean + +RUN pip3 install --no-cache-dir -r assets/requirements/requirements.txt + +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d assets/pretrained_v2/ -o D40k.pth +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d assets/pretrained_v2/ -o G40k.pth +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d assets/pretrained_v2/ -o f0D40k.pth +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d assets/pretrained_v2/ -o f0G40k.pth + +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d assets/uvr5_weights/ -o HP2-人声vocals+非人声instrumentals.pth +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d assets/uvr5_weights/ -o HP5-主旋律人声vocals+其他instrumentals.pth + +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d assets/hubert -o hubert_base.pt + +RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d assets/rmvpe -o rmvpe.pt + +VOLUME [ "/app/logs/weights", "/app/opt" ] + +CMD ["python3", "infer-web.py"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c1aa58d41b9b642c8bbcd5c5f2b26d0792914295 --- /dev/null +++ b/LICENSE @@ -0,0 +1,65 @@ +MIT License (Non-Commercial) + +Copyright (c) 2023 liujing04 +Copyright (c) 2023 源文雨 +Copyright (c) 2023 IA Hispano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to use, +copy, modify, merge, publish and/or distribute Applio-RVC-Fork, subject to the following conditions: + +1. The software and its derivatives may only be used for non-commercial + purposes. + +2. Any commercial use, sale, or distribution of the software or its derivatives + is strictly prohibited. + +3. The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +The licenses for related libraries are as follows: + +ContentVec +https://github.com/auspicious3000/contentvec/blob/main/LICENSE +MIT License + +VITS +https://github.com/jaywalnut310/vits/blob/main/LICENSE +MIT License + +HIFIGAN +https://github.com/jik876/hifi-gan/blob/master/LICENSE +MIT License + +gradio +https://github.com/gradio-app/gradio/blob/main/LICENSE +Apache License 2.0 + +ffmpeg +https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3 +https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip +LGPLv3 License +MIT License + +UVR5 +https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE +https://github.com/yang123qwe/vocal_separation_by_uvr5 +MIT License + +audio-slicer +https://github.com/openvpi/audio-slicer/blob/main/LICENSE +MIT License + +PySimpleGUI +https://github.com/PySimpleGUI/PySimpleGUI/blob/master/license.txt +LGPLv3 License + +Please note that under this license, the software and its derivatives can only be used for non-commercial purposes, and any commercial use, sale, or distribution is prohibited. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e1ce27677fe21c85ac4f81799a739a19050e47af --- /dev/null +++ b/Makefile @@ -0,0 +1,63 @@ +.PHONY: +.ONESHELL: + +help: ## Show this help and exit + @grep -hE '^[A-Za-z0-9_ \-]*?:.*##.*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +install: ## Install dependencies (Do everytime you start up a paperspace machine) + apt-get -y install build-essential python3-dev ffmpeg + pip install --upgrade setuptools wheel + pip install --upgrade pip + pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1 + pip install -r assets/requirements/requirements.txt + pip install --upgrade lxml + apt-get update + apt -y install -qq aria2 + +basev1: ## Download version 1 pre-trained models (Do only once after cloning the fork) + mkdir -p pretrained uvr5_weights + git pull + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d pretrained -o D32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d pretrained -o D40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d pretrained -o D48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d pretrained -o G32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d pretrained -o G40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d pretrained -o G48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d pretrained -o f0D32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d pretrained -o f0D40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d pretrained -o f0D48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d pretrained -o f0G32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d pretrained -o f0G40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d pretrained -o f0G48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt + +basev2: ## Download version 2 pre-trained models (Do only once after cloning the fork) + mkdir -p pretrained_v2 uvr5_weights + git pull + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d pretrained_v2 -o D32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d pretrained_v2 -o D40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d pretrained_v2 -o D48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d pretrained_v2 -o G32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d pretrained_v2 -o G40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d pretrained_v2 -o G48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d pretrained_v2 -o f0D32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d pretrained_v2 -o f0D40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d pretrained_v2 -o f0D48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d pretrained_v2 -o f0G32k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d pretrained_v2 -o f0G40k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d pretrained_v2 -o f0G48k.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt + +run-ui: ## Run the python GUI + python infer-web.py --paperspace --pycmd python + +run-cli: ## Run the python CLI + python infer-web.py --pycmd python --is_cli + +tensorboard: ## Start the tensorboard (Run on separate terminal) + echo https://tensorboard-$$(hostname).clg07azjl.paperspacegradient.com + tensorboard --logdir logs --bind_all \ No newline at end of file diff --git a/README.md b/README.md index 9e2f4d1c84fe0516d7b52b36bb72eb820bc285c4..949ea4eea9cd6a7f52f2c79a8fb33efe9d45c415 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,10 @@ --- title: Aesthetic RVC Inference HF -emoji: 🦀 -colorFrom: pink -colorTo: yellow +emoji: 🍏😺 +colorFrom: green +colorTo: green sdk: gradio -sdk_version: 3.47.1 +sdk_version: 3.43.2 app_file: app.py pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co./docs/hub/spaces-config-reference +--- \ No newline at end of file diff --git a/app.py b/app.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..01272ae550d224f6a20cca180af1044489d5bf68 100644 --- a/app.py +++ b/app.py @@ -0,0 +1,16 @@ +import os +os.system("python pip install pedalboard") +shell_script = './install_Applio.sh' +os.system(f'chmod +x {shell_script}') +try: + return_code = os.system(shell_script) + if return_code == 0: + print("Shell script executed successfully.") + else: + print(f"Shell script failed with return code {return_code}") +except Exception as e: + print(f"An error occurred: {e}") + + + +os.system("python -m sklearnex infer-web.py --pycmd python --port 7897 --theme dark") \ No newline at end of file diff --git a/assets/audios/.gitignore b/assets/audios/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/assets/audios/audio-others/.gitignore b/assets/audios/audio-others/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/assets/audios/audio-others/.gitignore @@ -0,0 +1 @@ + diff --git a/assets/audios/audio-outputs/.gitignore b/assets/audios/audio-outputs/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/assets/audios/audio-outputs/.gitignore @@ -0,0 +1 @@ + diff --git a/assets/audios/separated/.gitkeep b/assets/audios/separated/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/assets/audios/separated/.gitkeep @@ -0,0 +1 @@ + diff --git a/assets/audios/tracks/.gitkeep b/assets/audios/tracks/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/assets/audios/tracks/.gitkeep @@ -0,0 +1 @@ + diff --git a/assets/configs/32k.json b/assets/configs/32k.json new file mode 100644 index 0000000000000000000000000000000000000000..bcae72223ec09dc199009d7cb5ed405a0c0981cf --- /dev/null +++ b/assets/configs/32k.json @@ -0,0 +1,50 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": false, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 32000, + "filter_length": 1024, + "hop_length": 320, + "win_length": 1024, + "n_mel_channels": 80, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [ + [1, 3, 5], + [1, 3, 5], + [1, 3, 5] + ], + "upsample_rates": [10, 4, 2, 2, 2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16, 16, 4, 4, 4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/32k_v2.json b/assets/configs/32k_v2.json new file mode 100644 index 0000000000000000000000000000000000000000..ad42f87b15e1ea68eff0a90db50fbc08d56c7aa9 --- /dev/null +++ b/assets/configs/32k_v2.json @@ -0,0 +1,50 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 32000, + "filter_length": 1024, + "hop_length": 320, + "win_length": 1024, + "n_mel_channels": 80, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [ + [1, 3, 5], + [1, 3, 5], + [1, 3, 5] + ], + "upsample_rates": [10, 8, 2, 2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [20, 16, 4, 4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/40k.json b/assets/configs/40k.json new file mode 100644 index 0000000000000000000000000000000000000000..28ff4d91f2618497fb39ad27872151bcb0d51761 --- /dev/null +++ b/assets/configs/40k.json @@ -0,0 +1,50 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": false, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 40000, + "filter_length": 2048, + "hop_length": 400, + "win_length": 2048, + "n_mel_channels": 125, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [ + [1, 3, 5], + [1, 3, 5], + [1, 3, 5] + ], + "upsample_rates": [10, 10, 2, 2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16, 16, 4, 4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/48k.json b/assets/configs/48k.json new file mode 100644 index 0000000000000000000000000000000000000000..4d01946ed50ade92c1f85b548ab008a4cd617eb8 --- /dev/null +++ b/assets/configs/48k.json @@ -0,0 +1,50 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": false, + "lr_decay": 0.999875, + "segment_size": 11520, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 48000, + "filter_length": 2048, + "hop_length": 480, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [ + [1, 3, 5], + [1, 3, 5], + [1, 3, 5] + ], + "upsample_rates": [10, 6, 2, 2, 2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16, 16, 4, 4, 4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/48k_v2.json b/assets/configs/48k_v2.json new file mode 100644 index 0000000000000000000000000000000000000000..50f06421912e2cd1f69768c35272981d75d86983 --- /dev/null +++ b/assets/configs/48k_v2.json @@ -0,0 +1,50 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 17280, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 48000, + "filter_length": 2048, + "hop_length": 480, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [ + [1, 3, 5], + [1, 3, 5], + [1, 3, 5] + ], + "upsample_rates": [12, 10, 2, 2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [24, 20, 4, 4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/__pycache__/config.cpython-39.pyc b/assets/configs/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cfc0ca676932ba365e5fcf2c749c0e9c338e1f1 Binary files /dev/null and b/assets/configs/__pycache__/config.cpython-39.pyc differ diff --git a/assets/configs/config.json b/assets/configs/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8e9c17669bf8028653fbfa5c9eeac23ec76c1cc9 --- /dev/null +++ b/assets/configs/config.json @@ -0,0 +1,15 @@ +{ + "pth_path": "assets/weights/kikiV1.pth", + "index_path": "logs/kikiV1.index", + "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", + "sg_output_device": "VoiceMeeter Aux Input (VB-Audio (MME)", + "threhold": -45.0, + "pitch": 12.0, + "index_rate": 0.0, + "rms_mix_rate": 0.0, + "block_time": 0.25, + "crossfade_length": 0.04, + "extra_time": 2.0, + "n_cpu": 6.0, + "f0method": "rmvpe" +} diff --git a/assets/configs/config.py b/assets/configs/config.py new file mode 100644 index 0000000000000000000000000000000000000000..46d947ab1be9f7f43766d9ca1c1cc0445e69e071 --- /dev/null +++ b/assets/configs/config.py @@ -0,0 +1,304 @@ +import argparse +import getpass +import sys +sys.path.append('..') +import json +from multiprocessing import cpu_count + +import torch + +try: + import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import + if torch.xpu.is_available(): + from lib.infer.modules.ipex import ipex_init + ipex_init() +except Exception: + pass + +import logging + +logger = logging.getLogger(__name__) + +import os +import sys +import subprocess +import platform + +syspf = platform.system() +python_version = "39" + +def find_python_executable(): + runtime_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'runtime')) + if os.path.exists(runtime_path): + logger.info("Current user: Runtime") + return runtime_path + elif syspf == "Linux": + try: + result = subprocess.run(["which", "python"], capture_output=True, text=True, check=True) + python_path = result.stdout.strip() + logger.info("Current user: Linux") + return python_path + except subprocess.CalledProcessError: + raise Exception("Could not find the Python path on Linux.") + elif syspf == "Windows": + try: + result = subprocess.run(["where", "python"], capture_output=True, text=True, check=True) + output_lines = result.stdout.strip().split('\n') + if output_lines: + python_path = output_lines[0] + python_path = os.path.dirname(python_path) + current_user = os.getlogin() or getpass.getuser() + logger.info("Current user: %s" % current_user) + return python_path + raise Exception("Python executable not found in the PATH.") + except subprocess.CalledProcessError: + raise Exception("Could not find the Python path on Windows.") + elif syspf == "Darwin": + try: + result = subprocess.run(["which", "python"], capture_output=True, text=True, check=True) + python_path = result.stdout.strip() + logger.info("Current user: Darwin") + return python_path + except subprocess.CalledProcessError: + raise Exception("Could not find the Python path on macOS.") + else: + raise Exception("Operating system not compatible: {syspf}".format(syspf=syspf)) + +python_path = find_python_executable() + + +version_config_list = [ + "v1/32k.json", + "v1/40k.json", + "v1/48k.json", + "v2/48k.json", + "v2/32k.json", +] + + +def singleton_variable(func): + def wrapper(*args, **kwargs): + if not wrapper.instance: + wrapper.instance = func(*args, **kwargs) + return wrapper.instance + + wrapper.instance = None + return wrapper + + +@singleton_variable +class Config: + def __init__(self): + self.device = "cuda:0" + self.is_half = True + self.n_cpu = 0 + self.gpu_name = None + self.json_config = self.load_config_json() + self.gpu_mem = None + ( + self.python_cmd, + self.listen_port, + self.iscolab, + self.noparallel, + self.noautoopen, + self.paperspace, + self.is_cli, + self.grtheme, + self.dml, + ) = self.arg_parse() + self.instead = "" + self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() + + @staticmethod + def load_config_json() -> dict: + d = {} + for config_file in version_config_list: + with open(f"./assets/configs/{config_file}", "r") as f: + d[config_file] = json.load(f) + return d + + @staticmethod + def arg_parse() -> tuple: + exe = sys.executable or "python" + parser = argparse.ArgumentParser() + parser.add_argument("--port", type=int, default=7865, help="Listen port") + parser.add_argument("--pycmd", type=str, default=exe, help="Python command") + parser.add_argument("--colab", action="store_true", help="Launch in colab") + parser.add_argument( + "--noparallel", action="store_true", help="Disable parallel processing" + ) + parser.add_argument( + "--noautoopen", + action="store_true", + help="Do not open in browser automatically", + ) + parser.add_argument( + "--paperspace", + action="store_true", + help="Note that this argument just shares a gradio link for the web UI. Thus can be used on other non-local CLI systems.", + ) + parser.add_argument( + "--is_cli", + action="store_true", + help="Use the CLI instead of setting up a gradio UI. This flag will launch an RVC text interface where you can execute functions from infer-web.py!", + ) + + parser.add_argument( + "-t", + "--theme", + help = "Theme for Gradio. Format - `JohnSmith9982/small_and_pretty` (no backticks)", + default = "JohnSmith9982/small_and_pretty", + type = str + ) + + parser.add_argument( + "--dml", + action="store_true", + help="Use DirectML backend instead of CUDA." + ) + + cmd_opts = parser.parse_args() + + cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865 + + return ( + cmd_opts.pycmd, + cmd_opts.port, + cmd_opts.colab, + cmd_opts.noparallel, + cmd_opts.noautoopen, + cmd_opts.paperspace, + cmd_opts.is_cli, + cmd_opts.theme, + cmd_opts.dml, + ) + + # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. + # check `getattr` and try it for compatibility + @staticmethod + def has_mps() -> bool: + if not torch.backends.mps.is_available(): + return False + try: + torch.zeros(1).to(torch.device("mps")) + return True + except Exception: + return False + + @staticmethod + def has_xpu() -> bool: + if hasattr(torch, "xpu") and torch.xpu.is_available(): + return True + else: + return False + + def use_fp32_config(self): + for config_file in version_config_list: + self.json_config[config_file]["train"]["fp16_run"] = False + + def device_config(self) -> tuple: + if torch.cuda.is_available(): + current_device = torch.cuda.current_device() + cuda_version = '.'.join(str(x) for x in torch.cuda.get_device_capability(torch.cuda.current_device())) + actual_vram = torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory / (1024 ** 3) + if self.has_xpu(): + self.device = self.instead = "xpu:0" + self.is_half = True + i_device = int(self.device.split(":")[-1]) + self.gpu_name = torch.cuda.get_device_name(i_device) + if (actual_vram is not None and actual_vram <= 1) or (1 < float(cuda_version) < 3.7): + logger.info("Using CPU due to unsupported CUDA version or low VRAM...") + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + self.device = self.instead = "cpu" + self.is_half = False + self.use_fp32_config() + if ( + ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) + or "P40" in self.gpu_name.upper() + or "P10" in self.gpu_name.upper() + or "1060" in self.gpu_name + or "1070" in self.gpu_name + or "1080" in self.gpu_name + ): + logger.info("Found GPU %s, force to fp32", self.gpu_name) + self.is_half = False + self.use_fp32_config() + else: + logger.info("Found GPU %s", self.gpu_name) + self.gpu_mem = int( + torch.cuda.get_device_properties(i_device).total_memory + / 1024 + / 1024 + / 1024 + + 0.4 + ) + if self.gpu_mem <= 4: + with open("lib/infer/modules/train/preprocess.py", "r") as f: + strr = f.read().replace("3.7", "3.0") + with open("lib/infer/modules/train/preprocess.py", "w") as f: + f.write(strr) + elif self.has_mps(): + logger.info("No supported Nvidia GPU found") + self.device = self.instead = "mps" + self.is_half = False + self.use_fp32_config() + else: + logger.info("No supported Nvidia GPU found") + self.device = self.instead = "cpu" + self.is_half = False + self.use_fp32_config() + if self.n_cpu == 0: + self.n_cpu = cpu_count() + + if self.is_half: + # 6G显存配置 + x_pad = 3 + x_query = 10 + x_center = 60 + x_max = 65 + else: + # 5G显存配置 + x_pad = 1 + x_query = 6 + x_center = 38 + x_max = 41 + + if self.gpu_mem is not None and self.gpu_mem <= 4: + if self.gpu_mem == 4: + x_pad = 1 + x_query = 5 + x_center = 30 + x_max = 32 + elif self.gpu_mem <= 3: + x_pad = 1 + x_query = 2 + x_center = 16 + x_max = 18 + + if self.dml: + logger.info("Use DirectML instead") + directml_dll_path = os.path.join(python_path, "Lib", "site-packages", "onnxruntime", "capi", "DirectML.dll") + if ( + os.path.exists( + directml_dll_path + ) + == False + ): + pass + # if self.device != "cpu": + import torch_directml + + self.device = torch_directml.device(torch_directml.default_device()) + self.is_half = False + else: + if self.instead: + logger.info(f"Use {self.instead} instead") + providers_cuda_dll_path = os.path.join(python_path, "Lib", "site-packages", "onnxruntime", "capi", "onnxruntime_providers_cuda.dll") + if ( + os.path.exists( + providers_cuda_dll_path + ) + == False + ): + pass + return x_pad, x_query, x_center, x_max diff --git a/assets/configs/v1/32k.json b/assets/configs/v1/32k.json new file mode 100644 index 0000000000000000000000000000000000000000..d5f16d691ed798f4c974b431167c36269b2ce7d2 --- /dev/null +++ b/assets/configs/v1/32k.json @@ -0,0 +1,46 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 32000, + "filter_length": 1024, + "hop_length": 320, + "win_length": 1024, + "n_mel_channels": 80, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,4,2,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/v1/40k.json b/assets/configs/v1/40k.json new file mode 100644 index 0000000000000000000000000000000000000000..4ffc87b9e9725fcd59d81a68d41a61962213b777 --- /dev/null +++ b/assets/configs/v1/40k.json @@ -0,0 +1,46 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 40000, + "filter_length": 2048, + "hop_length": 400, + "win_length": 2048, + "n_mel_channels": 125, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,10,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/v1/48k.json b/assets/configs/v1/48k.json new file mode 100644 index 0000000000000000000000000000000000000000..2d0e05beb794f6f61b769b48c7ae728bf59e6335 --- /dev/null +++ b/assets/configs/v1/48k.json @@ -0,0 +1,46 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 11520, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 48000, + "filter_length": 2048, + "hop_length": 480, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,6,2,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/v2/32k.json b/assets/configs/v2/32k.json new file mode 100644 index 0000000000000000000000000000000000000000..70e534f4c641a5a2c8e5c1e172f61398ee97e6e0 --- /dev/null +++ b/assets/configs/v2/32k.json @@ -0,0 +1,46 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 32000, + "filter_length": 1024, + "hop_length": 320, + "win_length": 1024, + "n_mel_channels": 80, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,8,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [20,16,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/v2/48k.json b/assets/configs/v2/48k.json new file mode 100644 index 0000000000000000000000000000000000000000..75f770cdacff3467e9e925ed2393b480881d0303 --- /dev/null +++ b/assets/configs/v2/48k.json @@ -0,0 +1,46 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 17280, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 48000, + "filter_length": 2048, + "hop_length": 480, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [12,10,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [24,20,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/assets/configs/version.txt b/assets/configs/version.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e3c2f1e5edb083aab93646ac7b076daa38516dd --- /dev/null +++ b/assets/configs/version.txt @@ -0,0 +1 @@ +2.1.1 diff --git a/assets/hubert/.gitignore b/assets/hubert/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/assets/i18n/__pycache__/i18n.cpython-39.pyc b/assets/i18n/__pycache__/i18n.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fe1a23ccd1b1e7bccf1825d8d8130b14d45db6d Binary files /dev/null and b/assets/i18n/__pycache__/i18n.cpython-39.pyc differ diff --git a/assets/i18n/extract_locale.py b/assets/i18n/extract_locale.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ff5ea3ddd7c612c640544099ab98a861b8fe35 --- /dev/null +++ b/assets/i18n/extract_locale.py @@ -0,0 +1,34 @@ +import json +import re + +# Define regular expression patterns +pattern = r"""i18n\([\s\n\t]*(["'][^"']+["'])[\s\n\t]*\)""" + +# Initialize the dictionary to store key-value pairs +data = {} + + +def process(fn: str): + global data + with open(fn, "r", encoding="utf-8") as f: + contents = f.read() + matches = re.findall(pattern, contents) + for key in matches: + key = eval(key) + print("extract:", key) + data[key] = key + + +print("processing infer-web.py") +process("infer-web.py") + +print("processing gui_v0.py") +process("gui_v0.py") + +print("processing gui_v1.py") +process("gui_v1.py") + +# Save as a JSON file +with open("./i18n/en_US.json", "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=4) + f.write("\n") diff --git a/assets/i18n/i18n.py b/assets/i18n/i18n.py new file mode 100644 index 0000000000000000000000000000000000000000..6e6ea46996a22fa11d65f9158274616b3a033bae --- /dev/null +++ b/assets/i18n/i18n.py @@ -0,0 +1,67 @@ +import json +import sys +sys.path.append('..') +import logging + +logger = logging.getLogger(__name__) +def load_language_list(language): + try: + with open(f"./assets/i18n/langs/{language}.json", "r", encoding="utf-8") as f: + return json.load(f) + except FileNotFoundError: + raise FileNotFoundError( + f"Failed to load language file for {language}. Check if the correct .json file exists." + ) + + +class I18nAuto: + """ + A class used for internationalization using JSON language files. + + Examples + -------- + >>> i18n = I18nAuto() + >>> i18n.print() + Using Language: en_US + """ + def __init__(self, language=None): + from locale import getdefaultlocale + language = language or getdefaultlocale()[0] + + # Check if a specific language variant exists, e.g., 'es_ES' + if self._language_exists(language): + self.language = language + else: + # If not, check if there is a language with the first two characters + # matching, e.g., 'es_' for 'es_ES'. + lang_prefix = language[:2] + for available_language in self._get_available_languages(): + if available_language.startswith(lang_prefix): + self.language = available_language + break + else: + # If no match found, default to 'en_US'. + self.language = 'en_US' + + self.language_map = load_language_list(self.language) + + @staticmethod + def _get_available_languages(): + from os import listdir + from os.path import isfile, join + + language_files = [f for f in listdir("./assets/i18n/langs/") if isfile(join("./assets/i18n/langs/", f))] + return [lang.replace(".json", "") for lang in language_files] + + @staticmethod + def _language_exists(language): + from os.path import exists + return exists(f"./assets/i18n/langs/{language}.json") + + def __call__(self, key): + """Returns the translation of the given key if it exists, else returns the key itself.""" + return self.language_map.get(key, key) + + def print(self): + """Prints the language currently in use.""" + logger.info(f"Using Language: {self.language}") \ No newline at end of file diff --git a/assets/i18n/langs/ar_AR.json b/assets/i18n/langs/ar_AR.json new file mode 100644 index 0000000000000000000000000000000000000000..c29d349e34793789e338edaa8406b0b9b3fb3c41 --- /dev/null +++ b/assets/i18n/langs/ar_AR.json @@ -0,0 +1,248 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "لسوء الحظ، لا تتوفر وحدة معالجة رسومات متوافقة لدعم تدريبك.", + "Yes": "نعم", + "Select your dataset:": "حدد مجموعة البيانات الخاصة بك.", + "Update list": "قائمة التحديث.", + "Download Model": "تحميل الموديل", + "Download Backup": "تحميل النسخ الاحتياطي", + "Download Dataset": "تحميل مجموعة البيانات", + "Download": "تحميل", + "Url:": "عنوان URL:", + "Build the index before saving.": "قم ببناء الفهرس قبل الحفظ.", + "Save your model once the training ends.": "احفظ النموذج الخاص بك بمجرد انتهاء التدريب.", + "Save type": "حفظ النوع", + "Save model": "حفظ النموذج", + "Choose the method": "اختر الطريقة", + "Save all": "احفظ الكل", + "Save D and G": "احفظ D وG", + "Save voice": "حفظ الصوت", + "Downloading the file: ": "تنزيل الملف:", + "Stop training": "توقف عن التدريب", + "Too many users have recently viewed or downloaded this file": "لقد قام عدد كبير جدًا من المستخدمين مؤخرًا بعرض هذا الملف أو تنزيله", + "Cannot get file from this private link": "لا يمكن الحصول على الملف من هذا الرابط الخاص", + "Full download": "تحميل كامل", + "An error occurred downloading": "حدث خطأ أثناء التنزيل", + "Model saved successfully": "تم حفظ النموذج بنجاح", + "Saving the model...": "جارٍ حفظ النموذج...", + "Saved without index...": "تم الحفظ بدون فهرس...", + "model_name": "اسم النموذج", + "Saved without inference model...": "تم الحفظ بدون نموذج الاستدلال...", + "An error occurred saving the model": "حدث خطأ أثناء حفظ النموذج", + "The model you want to save does not exist, be sure to enter the correct name.": "النموذج الذي تريد حفظه غير موجود، تأكد من إدخال الاسم الصحيح.", + "The file could not be downloaded.": "لا يمكن تحميل الملف.", + "Unzip error.": "خطأ في فك الضغط.", + "Path to your added.index file (if it didn't automatically find it)": "المسار إلى ملف add.index (إذا لم يتم العثور عليه تلقائيًا)", + "It has been downloaded successfully.": "لقد تم تحميله بنجاح.", + "Proceeding with the extraction...": "المضي قدما في عملية الاستخراج...", + "The Backup has been uploaded successfully.": "تم تحميل النسخة الاحتياطية بنجاح.", + "The Dataset has been loaded successfully.": "تم تحميل مجموعة البيانات بنجاح.", + "The Model has been loaded successfully.": "تم تحميل النموذج بنجاح.", + "It is used to download your inference models.": "يتم استخدامه لتنزيل نماذج الاستدلال الخاصة بك.", + "It is used to download your training backups.": "يتم استخدامه لتنزيل النسخ الاحتياطية للتدريب.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "قم بتنزيل مجموعة البيانات مع التسجيلات الصوتية بتنسيق متوافق (.wav/.flac) لتدريب النموذج الخاص بك.", + "No relevant file was found to upload.": "لم يتم العثور على ملف ذي صلة للتحميل.", + "The model works for inference, and has the .index file.": "يعمل النموذج من أجل الاستدلال، ويحتوي على ملف .index.", + "The model works for inference, but it doesn't have the .index file.": "يعمل النموذج من أجل الاستدلال، لكنه لا يحتوي على ملف .index.", + "This may take a few minutes, please wait...": "قد يستغرق ذلك بضع دقائق، يرجى الانتظار...", + "Resources": "موارد", + "Step 1: Processing data": "الخطوة 1: معالجة البيانات", + "Step 2: Extracting features": "الخطوة 2 ب: استخراج الميزات", + "Step 3: Model training started": "الخطوة 3 أ: بدأ التدريب النموذجي", + "Training is done, check train.log": "تم الانتهاء من التدريب، قم بزيارة Train.log", + "All processes have been completed!": "تم الانتهاء من جميع العمليات!", + "Model Inference": "الاستدلال النموذجي", + "Inferencing voice:": "الاستدلال الصوتي:", + "Model_Name": "اسم النموذج", + "Dataset_Name": "اسم مجموعة البيانات", + "Or add your dataset path:": "أو أدخل مسار مجموعة البيانات الخاصة بك:", + "Whether the model has pitch guidance.": "ما إذا كان النموذج يحتوي على توجيهات في الملعب.", + "Whether to save only the latest .ckpt file to save hard drive space": "ما إذا كان سيتم حفظ أحدث ملف .ckpt فقط لتوفير مساحة على القرص الصلب", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "قم بتخزين جميع مجموعات التدريب مؤقتًا في ذاكرة GPU. يمكن أن يؤدي تخزين مجموعات البيانات الصغيرة مؤقتًا (أقل من 10 دقائق) إلى تسريع عملية التدريب", + "Save a small final model to the 'weights' folder at each save point": "احفظ نموذجًا نهائيًا صغيرًا في مجلد \"الأوزان\" عند كل نقطة حفظ", + "Refresh": "تحديث قائمة الصوت ومسار الفهرس والملفات الصوتية", + "Unload voice to save GPU memory": "قم بإلغاء تحميل الصوت لحفظ ذاكرة GPU:", + "Select Speaker/Singer ID:": "حدد معرف المتحدث/المغني:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "يوصى باستخدام مفتاح +12 للتحويل من ذكر إلى أنثى، ومفتاح -12 للتحويل من أنثى إلى ذكر. إذا تجاوز نطاق الصوت كثيرًا وكان الصوت مشوهًا، فيمكنك أيضًا ضبطه على النطاق المناسب بنفسك.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "تبديل الموضع (عدد صحيح، عدد نصف النغمات، رفع بمقدار أوكتاف: 12، خفض بمقدار أوكتاف: -12):", + "Enter the path of the audio file to be processed (default is the correct format example):": "أدخل مسار الملف الصوتي المراد معالجته (الافتراضي هو مثال التنسيق الصحيح):", + "Select the pitch extraction algorithm:": "حدد خوارزمية استخراج الملعب:", + "Feature search dataset file path": "مسار ملف مجموعة بيانات البحث عن المعالم", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "إذا كان > = 3: قم بتطبيق التصفية المتوسطة على نتائج العرض التقديمي المحصودة. تمثل القيمة نصف قطر المرشح ويمكن أن تقلل من التنفس.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "المسار إلى ملف فهرس الميزات. اتركه فارغًا لاستخدام النتيجة المحددة من القائمة المنسدلة:", + "Auto-detect index path and select from the dropdown:": "الكشف التلقائي عن مسار الفهرس والاختيار من القائمة المنسدلة", + "Path to feature file:": "المسار إلى ملف الميزة:", + "Search feature ratio:": "نسبة ميزة البحث:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "قم بإعادة تشكيل الصوت الناتج في مرحلة ما بعد المعالجة إلى معدل العينة النهائي. اضبط على 0 لعدم إعادة التشكيل:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "استخدم مظروف حجم الإدخال لاستبدال أو مزج مظروف حجم الإخراج. كلما اقتربت النسبة من 1، زاد استخدام مظروف الإخراج:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "قم بحماية الحروف الساكنة وأصوات التنفس التي لا صوت لها لمنع المؤثرات مثل تمزيق الموسيقى الإلكترونية. اضبط على 0.5 للتعطيل. قم بتقليل القيمة لزيادة الحماية، ولكنه قد يقلل من دقة الفهرسة:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "ملف منحنى F0 (اختياري). خطوة واحدة لكل سطر. يستبدل الإعداد الافتراضي F0 وتعديل درجة الصوت:", + "Convert": "يتحول", + "Output information:": "معلومات الإخراج", + "Export audio (click on the three dots in the lower right corner to download)": "تصدير الصوت (انقر على النقاط الثلاث في الزاوية اليمنى السفلية للتنزيل)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "تحويل دفعة. أدخل المجلد الذي يحتوي على الملفات الصوتية المراد تحويلها أو قم بتحميل ملفات صوتية متعددة. سيتم إخراج الصوت المحول في المجلد المحدد (الافتراضي: \"اختياري\").", + "Specify output folder:": "تحديد مجلد الإخراج:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "أدخل مسار مجلد الصوت المراد معالجته (انسخه من شريط العناوين الخاص بمدير الملفات):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "يمكنك أيضًا إدخال الملفات الصوتية على دفعات. اختر أحد الخيارين. تعطى الأولوية للقراءة من المجلد.", + "Export file format": "تصدير تنسيق الملف", + "UVR5": "الأشعة فوق البنفسجية5", + "Enter the path of the audio folder to be processed:": "أدخل مسار مجلد الصوت المراد معالجته:", + "Model": "نموذج", + "Vocal Extraction Aggressive": "استخراج الصوتية العدوانية", + "Specify the output folder for vocals:": "حدد مجلد الإخراج للغناء:", + "Specify the output folder for accompaniment:": "حدد مجلد الإخراج للمرافقة:", + "Train": "يدرب", + "Enter the model name:": "أدخل اسم النموذج:", + "Target sample rate:": "معدل العينة المستهدف:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "ما إذا كان النموذج يحتوي على إرشادات لطبقة الصوت (مطلوبة للغناء، واختيارية للكلام):", + "Version": "إصدار", + "Number of CPU processes:": "عدد عمليات وحدة المعالجة المركزية المستخدمة لاستخراج الملعب ومعالجة البيانات:", + "Enter the path of the training folder:": "أدخل مسار مجلد التدريب:", + "Specify the model ID:": "يرجى تحديد معرف النموذج:", + "Auto detect audio path and select from the dropdown:": "الكشف التلقائي عن مسار الصوت والاختيار من القائمة المنسدلة:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "أضف اسم الصوت إلى المسار إلى الملف الصوتي المراد معالجته (الافتراضي هو مثال التنسيق الصحيح) قم بإزالة المسار لاستخدام الصوت من القائمة المنسدلة:", + "Advanced Settings": "إعدادات متقدمة", + "Settings": "إعدادات", + "Status:": "حالة", + "Process data": "معالجة البيانات", + "Drag your audio here:": "اسحب الصوت الخاص بك هنا واضغط على زر التحديث", + "Or record an audio:": "أو تسجيل الصوت.", + "Formant shift inference audio": "تحويل صيغة الاستدلال الصوتي", + "Used for male to female and vice-versa conversions": "يستخدم للتحويل من ذكر إلى أنثى والعكس", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "يرجى تقديم فهرس (فهارس) وحدة معالجة الرسومات مفصولة بـ \"-\"، مثل 0-1-2 لاستخدام وحدات معالجة الرسومات 0 و1 و2:", + "GPU Information:": "معلومات وحدة معالجة الرسومات", + "Feature extraction": "ميزة استخراج", + "Save frequency:": "حفظ التردد:", + "Training epochs:": "فترات التدريب:", + "Batch size per GPU:": "حجم الدفعة لكل GPU:", + "Save only the latest '.ckpt' file to save disk space:": "احفظ فقط أحدث ملف '.ckpt' لتوفير مساحة على القرص:", + "No": "لا", + "Save a small final model to the 'weights' folder at each save point:": "احفظ نموذجًا نهائيًا صغيرًا في مجلد \"الأوزان\" عند كل نقطة حفظ:", + "Load pre-trained base model G path:": "تحميل مسار G للنموذج الأساسي المُدرب مسبقًا:", + "Load pre-trained base model D path:": "تحميل المسار D للنموذج الأساسي المُدرب مسبقًا:", + "Train model": "نموذج القطار", + "Train feature index": "مؤشر ميزة القطار", + "One-click training": "التدريب بنقرة واحدة", + "Processing": "يعالج", + "Model fusion, can be used to test timbre fusion": "يمكن استخدام نموذج الاندماج لاختبار دمج الجرس", + "Path to Model A:": "المسار إلى النموذج أ:", + "Path to Model B:": "المسار إلى النموذج ب:", + "Weight for Model A:": "الوزن للنموذج أ:", + "Whether the model has pitch guidance:": "ما إذا كان النموذج يحتوي على توجيه الملعب:", + "Model information to be placed:": "معلومات النموذج المراد وضعها:", + "Model architecture version:": "نسخة البنية النموذجية:", + "Fusion": "انصهار", + "Modify model information": "تعديل معلومات النموذج", + "Path to Model:": "المسار إلى النموذج:", + "Model information to be modified:": "معلومات النموذج المراد تعديلها:", + "Save file name:": "حفظ اسم الملف:", + "Modify": "يُعدِّل", + "View model information": "عرض معلومات النموذج", + "View": "منظر", + "Model extraction": "استخراج النموذج (أدخل مسار نموذج الملف الكبير ضمن مجلد \"السجلات\"). يعد هذا مفيدًا إذا كنت تريد إيقاف التدريب في منتصف الطريق واستخراج ملف نموذج صغير وحفظه يدويًا، أو إذا كنت تريد اختبار نموذج متوسط:", + "Name:": "حفظ الاسم:", + "Whether the model has pitch guidance (1: yes, 0: no):": "ما إذا كان النموذج يحتوي على توجيه درجة الصوت (1: نعم، 0: لا):", + "Extract": "يستخرج", + "Export Onnx": "تصدير اونكس", + "RVC Model Path:": "مسار نموذج RVC:", + "Onnx Export Path:": "مسار تصدير Onnx:", + "MoeVS Model": "نموذج MoVS", + "Export Onnx Model": "تصدير نموذج Onnx", + "Load model": "نموذج التحميل", + "Hubert Model": "نموذج هيوبرت", + "Select the .pth file": "حدد ملف .pth", + "Select the .index file": "حدد ملف الفهرس", + "Select the .npy file": "حدد ملف .npy", + "Input device": "جهاز الإدخال", + "Output device": "جهاز إخراج", + "Audio device (please use the same type of driver)": "جهاز الصوت (يرجى استخدام نفس نوع برنامج التشغيل)", + "Response threshold": "عتبة الاستجابة", + "Pitch settings": "إعدادات الملعب", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "ما إذا كان سيتم استخدام أسماء الملاحظات بدلاً من قيمتها بالهيرتز. على سبيل المثال. [C5، D6] بدلاً من [523.25، 1174.66] هرتز", + "Index Rate": "معدل المؤشر", + "General settings": "الاعدادات العامة", + "Sample length": "طول العينة", + "Fade length": "طول التلاشي", + "Extra inference time": "وقت الاستدلال الإضافي", + "Input noise reduction": "تقليل ضوضاء الإدخال", + "Output noise reduction": "الحد من الضوضاء الناتج", + "Performance settings": "إعدادات الأداء", + "Start audio conversion": "ابدأ تحويل الصوت", + "Stop audio conversion": "إيقاف تحويل الصوت", + "Inference time (ms):": "وقت الاستدلال (مللي ثانية):", + "Select the pth file": "حدد ملف pth", + "Select the .index file:": "حدد ملف الفهرس", + "The hubert model path must not contain Chinese characters": "يجب ألا يحتوي مسار نموذج Hubert على أحرف صينية", + "The pth file path must not contain Chinese characters.": "يجب ألا يحتوي مسار الملف pth على أحرف صينية.", + "The index file path must not contain Chinese characters.": "يجب ألا يحتوي مسار ملف الفهرس على أحرف صينية.", + "Step algorithm": "خوارزمية الخطوة", + "Number of epoch processes": "عدد عمليات العصر", + "Lowest points export": "أدنى نقاط التصدير", + "How many lowest points to save:": "كم عدد أدنى النقاط للحفظ", + "Export lowest points of a model": "تصدير أدنى نقاط النموذج", + "Output models:": "نماذج الإخراج", + "Stats of selected models:": "إحصائيات النماذج المختارة", + "Custom f0 [Root pitch] File": "ملف f0 مخصص [درجة الجذر]", + "Min pitch:": "الملعب دقيقة", + "Specify minimal pitch for inference [HZ]": "تحديد الحد الأدنى من درجة الاستدلال [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "تحديد الحد الأدنى من درجة الصوت للاستدلال [ملاحظة] [أوكتاف]", + "Max pitch:": "ماكس الملعب", + "Specify max pitch for inference [HZ]": "تحديد أقصى درجة للاستدلال [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "تحديد أقصى درجة للاستدلال [ملاحظة] [أوكتاف]", + "Browse presets for formanting": "تصفح الإعدادات المسبقة للتشكيل", + "Presets are located in formantshiftcfg/ folder": "توجد الإعدادات المسبقة في المجلدformantshiftcfg/", + "Default value is 1.0": "القيمة الافتراضية هي 1.0", + "Quefrency for formant shifting": "التردد لتحويل الصياغة", + "Timbre for formant shifting": "Timbre لتحويل الصياغة", + "Apply": "يتقدم", + "Single": "أعزب", + "Batch": "حزمة", + "Separate YouTube tracks": "مسارات يوتيوب منفصلة", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "قم بتنزيل الصوت من مقطع فيديو على YouTube وفصل المسارات الصوتية والمسارات الآلية تلقائيًا", + "Extra": "إضافي", + "Merge": "دمج", + "Merge your generated audios with the instrumental": "دمج الصوتيات التي تم إنشاؤها مع الآلات الموسيقية", + "Choose your instrumental:": "اختر آلتك الموسيقية", + "Choose the generated audio:": "اختر الصوت الذي تم إنشاؤه", + "Combine": "يجمع", + "Download and Separate": "تحميل وفصل", + "Enter the YouTube link:": "أدخل رابط اليوتيوب", + "This section contains some extra utilities that often may be in experimental phases": "يحتوي هذا القسم على بعض الأدوات المساعدة الإضافية التي غالبًا ما تكون في مراحل تجريبية", + "Merge Audios": "دمج صوتيات", + "Audio files have been moved to the 'audios' folder.": "تم نقل الملفات الصوتية إلى مجلد \"التسجيلات الصوتية\".", + "Downloading audio from the video...": "تحميل الصوت من الفيديو...", + "Audio downloaded!": "تحميل الصوت!", + "An error occurred:": "حدث خطأ:", + "Separating audio...": "فصل الصوت...", + "File moved successfully.": "تم نقل الملف بنجاح.", + "Finished!": "انتهى!", + "The source file does not exist.": "الملف المصدر غير موجود.", + "Error moving the file:": "خطأ في نقل الملف:", + "Downloading {name} from drive": "تنزيل {name} من محرك الأقراص", + "The attempt to download using Drive didn't work": "لم تنجح محاولة التنزيل باستخدام Drive", + "Error downloading the file: {str(e)}": "حدث خطأ أثناء تنزيل الملف: {str(e)}", + "Downloading {name} from mega": "تنزيل {name} من ميجا", + "Downloading {name} from basic url": "تنزيل {name} من عنوان url الأساسي", + "Download Audio": "تحميل الصوت", + "Download audios of any format for use in inference (recommended for mobile users).": "تنزيل صوتيات بأي تنسيق لاستخدامها في الاستدلال (موصى به لمستخدمي الأجهزة المحمولة)", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "تعتبر أي أخطاء في ConnectionResetErrors بعد التحويل غير ذات صلة ومرئية بحتة؛ يمكن تجاهلها.", + "Processed audio saved at: ": "تم حفظ الصوت المعالج في:", + "Conversion complete!": "اكتمل التحويل!", + "Reverb": "تردد", + "Compressor": "ضاغط", + "Noise Gate": "بوابة الضجيج", + "Volume": "مقدار", + "Drag the audio here and click the Refresh button": "اسحب الصوت هنا وانقر على زر التحديث", + "Select the generated audio": "حدد الصوت الذي تم إنشاؤه", + "Volume of the instrumental audio:": "حجم الصوت الآلي", + "Volume of the generated audio:": "حجم الصوت الذي تم إنشاؤه", + "### Add the effects": "### أضف التأثيرات", + "Starting audio conversion... (This might take a moment)": "بدء تحويل الصوت... (قد يستغرق ذلك بعض الوقت)", + "TTS Model:": "أصوات TTS", + "TTS": "TTS", + "TTS Method:": "طريقة TTS", + "Audio TTS:": "صوت TTS", + "Audio RVC:": "نموذج صوتي", + "You can also drop your files to load your model.": "يمكنك أيضًا إسقاط ملفاتك لتحميل نموذجك.", + "Drag your .pth file here:": "اسحب ملف .pth الخاص بك هنا:", + "Drag your .index file here:": "اسحب ملف .index الخاص بك هنا:" +} diff --git a/assets/i18n/langs/de_DE.json b/assets/i18n/langs/de_DE.json new file mode 100644 index 0000000000000000000000000000000000000000..1cbcb140c58a51276a7252051ecc57ff8da874f9 --- /dev/null +++ b/assets/i18n/langs/de_DE.json @@ -0,0 +1,253 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Leider steht keine kompatible GPU zur Unterstützung Ihres Trainings zur Verfügung.", + "Yes": "Ja", + "Select your dataset:": "Wählen Sie Ihren Datensatz:", + "Update list": "Liste aktualisieren", + "Download Model": "Modell herunterladen", + "Download Backup": "Backup herunterladen", + "Download Dataset": "Datensatz herunterladen", + "Download": "Download", + "Url:": "Url:", + "Build the index before saving.": "Erstellen Sie den Index vor dem Speichern.", + "Save your model once the training ends.": "Speichern Sie Ihr Modell, sobald das Training beendet ist.", + "Save type": "Speicherart", + "Save model": "Modell speichern", + "Choose the method": "Wählen Sie die Methode", + "Save all": "Speicher alle", + "Save D and G": "D. und G.", + "Save voice": "Stimme speichern", + "Downloading the file: ": "Datei Downloaden:", + "Stop training": "Beenden Sie das Training", + "Too many users have recently viewed or downloaded this file": "Zu viele Benutzer haben diese Datei kürzlich angesehen oder heruntergeladen", + "Cannot get file from this private link": "Datei kann nicht von diesem privaten Link abgerufen werden", + "Full download": "Vollständiger Download", + "An error occurred downloading": "Beim Herunterladen der Datei ist ein Fehler aufgetreten. ", + "Model saved successfully": "Modell erfolgreich gespeichert", + "Saving the model...": "Modell wird gespeichert...", + "Saved without index...": "Ohne Index gespeichert...", + "Saved without inference model...": "Ohne Inferenzmodell gespeichert...", + "An error occurred saving the model": "Beim Speichern des Modells ist ein Fehler aufgetreten", + "The model you want to save does not exist, be sure to enter the correct name.": "Das Modell, das Sie speichern möchten, existiert nicht, geben Sie den richtigen Namen ein.", + "The file could not be downloaded.": "Die Datei konnte nicht heruntergeladen werden", + "Unzip error.": "Unzip-Fehler.", + "Path to your added.index file (if it didn't automatically find it)": "Pfad zu Ihrer Datei added.index (falls diese nicht automatisch gefunden wurde)", + "It has been downloaded successfully.": "wurde erfolgreich heruntergeladen.", + "Proceeding with the extraction...": "Fahren Sie mit der Extraktion fort...", + "The Backup has been uploaded successfully.": "Das Backup wurde erfolgreich hochgeladen.", + "The Dataset has been loaded successfully.": "Der Datensatz wurde erfolgreich geladen.", + "The Model has been loaded successfully.": "Das Modell wurde erfolgreich geladen.", + "It is used to download your inference models.": "Es wird verwendet, um Ihre Inferenzmodelle herunterzuladen.", + "It is used to download your training backups.": "Es wird verwendet, um Ihre Trainings-Backups herunterzuladen.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Laden Sie den Datensatz mit den Audios in einem kompatiblen Format (.wav/.flac) herunter, um Ihr Modell zu trainieren.", + "No relevant file was found to upload.": "Keine relevante Datei zum Hochladen gefunden.", + "The model works for inference, and has the .index file.": "Das Modell funktioniert für Inferenz und hat eine .index Datei.", + "The model works for inference, but it doesn't have the .index file.": "Das Modell funktioniert für Inferenz, hat jedoch keine .index Datei.", + "This may take a few minutes, please wait...": "Es kann einige Minuten dauern, bitte warten.", + "Resources": "Ressourcen", + "Step 1: Processing data": "Schritt 1: Datenverarbeitung", + "Step 2: Extracting features": "Schritt 2: Merkmale extrahieren", + "Step 3: Model training started": "Schritt 3: Modelltraining gestartet", + "Training is done, check train.log": "Training ist abgeschlossen, überprüfen Sie train.log", + "All processes have been completed!": "Alle Prozesse sind abgeschlossen!", + "Model Inference": "Modell Inferenz", + "Inferencing voice:": "Inferenz Stimme:", + "Model_Name": "Model Name", + "Dataset_Name": "Name des Datensatzes", + "Or add your dataset path:": "Oder geben Sie den Pfad zu Ihrem Dataset ein:", + "Whether the model has pitch guidance.": "Ob das Modell eine Tonhöhenführung hat.", + "Whether to save only the latest .ckpt file to save hard drive space": "Gibt an, ob nur die neueste .ckpt-Datei gespeichert werden soll, um Speicherplatz auf der Festplatte zu sparen", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Alle Trainings-Sets im GPU-Speicher zwischenspeichern. Das Zwischenspeichern kleiner Datensätze (weniger als 10 Minuten) kann das Training beschleunigen", + "Save a small final model to the 'weights' folder at each save point": "Speichern Sie an jedem Speicherpunkt ein kleines endgültiges Modell im Ordner \"Weights\"", + "Refresh": "Stimmenliste, Indexpfad und Audiodateien aktualisieren", + "Unload voice to save GPU memory": "Stimme entladen, um GPU-Speicher zu sparen", + "Select Speaker/Singer ID:": "Lautsprecher-/Sänger-ID auswählen:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Empfohlen wird +12-Taste für die Umwandlung von Mann zu Frau und -12-Taste für die Umwandlung von Frau zu Mann. Wenn der Klangbereich zu weit geht und die Stimme verzerrt ist, können Sie ihn auch selbst auf den entsprechenden Bereich einstellen.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transponieren (Ganzzahl, Anzahl der Halbtöne, um eine Oktave erhöhen: 12, um eine Oktave senken: -12):", + "Feature search database file path:": "Dateipfad der Funktionssuchdatenbank:", + "Enter the path of the audio file to be processed (default is the correct format example):": "Geben Sie den Pfad der zu verarbeitenden Audiodatei ein (Standard ist das richtige Formatbeispiel):", + "Select the pitch extraction algorithm:": "Wählen Sie den Pitch-Extraktionsalgorithmus:", + "Feature search dataset file path": "Dateipfad für Feature-Suche-Datensatz", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Wenn >=3: Wendet es die Medianfilterung auf die geernteten Pitch-Ergebnisse an. Der Wert stellt den Filterradius dar und kann die Atmungsaktivität reduzieren.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Pfad zur Feature-Indexdatei. Leer lassen, um das ausgewählte Ergebnis aus der Dropdown-Liste zu verwenden:", + "Auto-detect index path and select from the dropdown:": "Indexpfad automatisch erkennen und aus dem Dropdown-Menü auswählen:", + "Path to feature file:": "Pfad zur Feature-Datei:", + "Search feature ratio:": "Verhältnis der Suchfunktionen:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Sample das ausgegebene Audio in der Nachbearbeitung auf die endgültige Samplerate zurück. Für kein Resampling auf 0 setzen:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Verwende die Volumenhüllkurve der Eingabe, um die Volumenhüllkurve der Ausgabe zu ersetzen oder mit ihr zu mischen. Je näher das Verhältnis bei 1 liegt, desto mehr wird die Ausgabehüllkurve verwendet:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Schütze stimmlose Konsonanten und Atemgeräusche, um Artefakte wie das Reißen in elektronischer Musik zu verhindern. Zum Deaktivieren auf 0,5 setzen. Verringern Sie den Wert, um den Schutz zu erhöhen, aber es kann die Indexierungsgenauigkeit verringern:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0-Kurvendatei (optional). Eine Tonhöhe pro Linie. Ersetzt die voreingestellte F0- und Tonhöhenmodulation:", + "Convert": "Konvertieren", + "Output information:": "Ausgangs informationen", + "Export audio (click on the three dots in the lower right corner to download)": "Audio exportieren (klicken Sie auf die drei Punkte in der unteren rechten Ecke, um sie herunterzuladen)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Stapelkonvertierung. Geben Sie den Ordner mit den zu konvertierenden Audiodateien ein oder laden Sie mehrere Audiodateien hoch. Das konvertierte Audio wird im angegebenen Ordner ausgegeben (Standard: 'opt').", + "Specify output folder:": "Ausgabeordner festlegen:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Geben Sie den Pfad des zu verarbeitenden Audioordners ein (kopieren Sie ihn aus der Adressleiste des Dateimanagers):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Sie können Audiodateien auch stapelweise eingeben. Wählen Sie eine der beiden Optionen. Das Lesen aus dem Ordner hat Vorrang.", + "Export file format:": "Export Dateiformat", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Geben Sie den Pfad des zu verarbeitenden Audioordners ein:", + "Model:": "Modell:", + "Vocal Extraction Aggressive": "Vokale Extraktion aggressiv", + "Specify the output folder for vocals:": "Geben Sie den Ausgabeordner für Vocals an:", + "Specify the output folder for accompaniment:": "Geben Sie den Ausgabeordner für die Begleitung an:", + "Train": "Trainieren", + "Enter the model name:": "Modellname eingeben", + "Target sample rate:": "Sample Rate Ziel:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Ob das Modell eine Tonhöhenführung hat (erforderlich für Gesang, optional für Sprache):", + "Version:": "Version:", + "Number of CPU processes:": "Anzahl der Prozesse,", + "Enter the path of the training folder:": "Geben Sie den Pfad des Trainingsordners ein:", + "Specify the model ID:": "Geben Sie die Modell-ID an:", + "Auto detect audio path and select from the dropdown:": "Audiopfad automatisch erkennen und aus der Dropdown-Liste auswählen:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Fügen Sie dem Pfad zur zu verarbeitenden Audiodatei den Namen des Audios hinzu (Standard ist das richtige Formatbeispiel) Entfernen Sie den Pfad zur Verwendung eines Audios aus der Dropdown-Liste:", + "Advanced Settings": "Erweiterte Einstellungen", + "Settings": "Einstellungen", + "Status:": "Status:", + "Process data": "Betriebsdaten", + "Drag your audio here:": "Ziehen Sie Ihr Audio hierher:", + "Or record an audio:": "Oder nehmen Sie ein Audio auf:", + "Formant shift inference audio": "Formant Shift Inferenz-Audio", + "Used for male to female and vice-versa conversions": "Wird für Konvertierungen von Männern zu Frauen und umgekehrt verwendet", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Geben Sie den/die GPU-Index (e) getrennt durch '-' an, wie 0-1-2 für die Verwendung von GPUs 0, 1 und 2:", + "GPU Information:": "GPU-Informationen:", + "Feature extraction": "Merkmalsextraktion", + "Save frequency:": "Speicher-Häufigkeit", + "Training epochs:": "Trainingsepochen:", + "Batch size per GPU:": "Batch-Size pro GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Speicher nur die neueste '.ckpt' -Datei, um Speicherplatz zu sparen:", + "No": "Nein", + "Save a small final model to the 'weights' folder at each save point:": "Speicher an jedem Speicherpunkt ein kleines endgültiges Modell im Ordner \"Gewichte\":", + "Load pre-trained base model G path:": "Vortrainiertes Basismodell G Pfad:", + "Load pre-trained base model D path:": "Vorgeschultes Basismodell D Pfad:", + "Train model": "Modell trainieren", + "Train feature index": "Trainiere-Feature-Index", + "One-click training": "Ein-Klick-Training", + "Processing": "Es wird bearbeitet", + "Model fusion, can be used to test timbre fusion": "Modellfusion, kann zum Testen der Timbrefusion verwendet werden", + "Path to Model A:": "Pfad zu Modell A:", + "Path to Model B:": "Pfad zu Modell B:", + "Weight for Model A:": "Gewicht für Modell A:", + "Whether the model has pitch guidance:": "Ob das Modell eine Tonhöhenführung hat:", + "Model information to be placed:": "Zu platzierende Modellinformationen:", + "Model architecture version:": "Modellarchitekturversion:", + "Fusion": "Fusion", + "Modify model information": "Modellinformationen ändern", + "Path to Model:": "Pfad zum Modell:", + "Model information to be modified:": "Zu ändernde Modellinformationen:", + "Save file name:": "Dateiname zu speichern:", + "Modify": "Modifizieren", + "View model information": "Modellinformationen", + "View": "Ansehen", + "Model extraction": "Modellextraktion", + "Name:": "Name:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Ob das Modell eine Pitchführung hat (1: ja, 0: nein):", + "Extract": "Extrahieren", + "Export Onnx": "Onnx exportieren", + "RVC Model Path:": "RVC-Modellpfad:", + "Onnx Export Path:": "ONNX-Exportpfad:", + "MoeVS Model": "MoeVS-Modell", + "Export Onnx Model": "Onnx-Modell exportieren", + "Load model": "Lastmodell", + "Hubert Model": "Hubert-Modell", + "Select the .pth file": "Wählen Sie die Datei aus. ", + "Select the .index file": "Wählen Sie die .index-Datei", + "Select the .npy file": "Wählen Sie die Datei aus. ", + "Input device": "Eingabegerät", + "Output device": "Ausgabegerät", + "Audio device (please use the same type of driver)": "Audiogerät (bitte verwenden Sie den gleichen Treibertyp)", + "Response threshold": "Ansprechschwelle", + "Pitch settings": "Tonhöheneinstellungen", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Gibt an, ob Notiznamen anstelle ihres Hertz-Wertes verwendet werden sollen. Z.B. [C5, D6] statt [523.25, 1174.66]Hz", + "Index Rate": "Indexsatz", + "General settings": "Allgemeine Einstellungen", + "Sample length": "Probenlänge:", + "Fade length": "Ausblendlänge", + "Extra inference time": "Zusätzliche Inferenzzeit", + "Input noise reduction": "Eingangsgeräuschreduzierung", + "Output noise reduction": "Ausgangsgeräuschreduzierung", + "Performance settings": "Leistungseinstellungen", + "Start audio conversion": "Audiokonvertierung starten", + "Stop audio conversion": "Audiokonvertierung stoppen", + "Inference time (ms):": "Inferenzzeit (ms):", + "Select the pth file": "Wähle die Datei aus...", + "Select the .index file:": "Wählen Sie die Indexdatei aus", + "The hubert model path must not contain Chinese characters": "Der Hubert-Modellpfad darf keine chinesischen Schriftzeichen enthalten", + "The pth file path must not contain Chinese characters.": "Der pth-Dateipfad darf keine chinesischen Zeichen enthalten.", + "The index file path must not contain Chinese characters.": "Der Indexdateipfad darf keine chinesischen Zeichen enthalten.", + "Step algorithm": "Schritt-Algorithmus", + "Number of epoch processes": "Anzahl der Epochenprozesse", + "Lowest points export": "Export der niedrigsten Punkte", + "How many lowest points to save:": "Wie viele niedrigste Punkte zu sparen sind:", + "Export lowest points of a model": "Niedrigste Punkte eines Modells exportieren", + "Output models:": "Ausgabemodelle:", + "Stats of selected models:": "Statistiken ausgewählter Modelle:", + "Custom f0 [Root pitch] File": "Benutzerdefinierte f0 [Root Pitch] -Datei", + "Min pitch:": "Min. Tonhöhe:", + "Specify minimal pitch for inference [HZ]": "Minimale Tonhöhe für Inferenz angeben [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Geben Sie die minimale Tonhöhe für die Inferenz an [HINWEIS][OKTAVE]", + "Max pitch:": "Max. Tonhöhe ", + "Specify max pitch for inference [HZ]": "Max. Tonhöhe für Inferenz angeben [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Maximale Tonhöhe für Inferenz angeben [HINWEIS][OKTAVE]", + "Browse presets for formanting": "Voreinstellungen für Formanting durchsuchen", + "Presets are located in formantshiftcfg/ folder": "Voreinstellungen befinden sich in formantshiftcfg/ Ordner", + "Default value is 1.0": "Standardwert ist 1", + "Quefrency for formant shifting": "Quefrency für Formant-Verschiebung", + "Timbre for formant shifting": "Timbre für Formantverschiebung", + "Apply": "Übernehmen", + "Single": "Einzel", + "Batch": "Batch", + "Separate YouTube tracks": "Separate YouTube-Tracks", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Laden Sie Audio von einem YouTube-Video herunter und trennen Sie automatisch die Gesangs- und Instrumentalspuren", + "Extra": "Extra", + "Merge": "Zusammenführen", + "Merge your generated audios with the instrumental": "Fügen Sie Ihre generierten Audios mit dem Instrumental", + "Choose your instrumental:": "Wählen Sie Ihr Instrumental:", + "Choose the generated audio:": "Wählen Sie das erzeugte Audio:", + "Combine": "Kombinieren", + "Download and Separate": "Herunterladen und trennen", + "Enter the YouTube link:": "Youtube Link eingeben:", + "This section contains some extra utilities that often may be in experimental phases": "Dieser Abschnitt enthält einige zusätzliche Dienstprogramme, die sich oft in experimentellen Phasen befinden", + "Merge Audios": "Audios zusammenführen", + "Audio files have been moved to the 'audios' folder.": "Audiodateien wurden in den Ordner 'audios' verschoben.", + "Downloading audio from the video...": "Audio aus dem Video wird heruntergeladen...", + "Audio downloaded!": "Audio heruntergeladen!", + "An error occurred:": "Ein Fehler ist aufgetreten:", + "Separating audio...": "Audio wird getrennt...", + "File moved successfully.": "Datei erfolgreich verschoben.", + "Finished!": "Abgeschlossen!", + "The source file does not exist.": "Die Quelldatei ist nicht vorhanden!", + "Error moving the file:": "Fehler beim Verschieben der Datei:", + "Downloading {name} from drive": "{name} wird vom Drive heruntergeladen", + "The attempt to download using Drive didn't work": "Der Versuch, mit Drive herunterzuladen, hat nicht funktioniert", + "Error downloading the file: {str(e)}": "Fehler beim Herunterladen der Datei: {str(e)}", + "Downloading {name} from mega": "{name} wird von Mega heruntergeladen", + "Downloading {name} from basic url": "{name} wird von der Basis-URL heruntergeladen", + "Download Audio": "Audio herunterladen", + "Download audios of any format for use in inference (recommended for mobile users).": "Laden Sie Audios in jedem Format zur Verwendung in Inferenz herunter (empfohlen für mobile Benutzer).", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Alle ConnectionResetErrors nach der Konvertierung sind irrelevant und rein visuell; sie können ignoriert werden.", + "Processed audio saved at: ": "Verarbeitete Audiodaten gespeichert unter:", + "Conversion complete!": "Konvertierung abgeschlossen", + "Reverb": "Nachhall", + "Compressor": "Kompressor", + "Noise Gate": "Noise-Gate", + "Volume": "Lautstärke", + "Drag the audio here and click the Refresh button": "Ziehen Sie das Audio hierher und klicken Sie auf den Aktuallisierungs Knopf", + "Select the generated audio": "Wählen Sie das erzeugte Audio", + "Volume of the instrumental audio:": "Lautstärke des Instrumental-Audios:", + "Volume of the generated audio:": "Lautstärke des erzeugten Audios:", + "### Audio settings:": "### Audio-Einstellungen", + "### Instrumental settings:": "### Instrumentale Einstellungen:", + "### Add the effects:": "### Fügen Sie die Effekte hinzu:", + "Name for saving": "Name zum Speichern", + "Path to model": "Pfad zum Modell", + "Model information to be placed": "Zu platzierende Modellinformationen", + "Starting audio conversion... (This might take a moment)": "Starte Audio-Konvertierung... (Das kann einen Moment dauern)", + "TTS Model:": "TTS-Stimmen", + "TTS": "TTS", + "TTS Method:": "TTS-Methode", + "Audio TTS:": "Audio-TTS", + "Audio RVC:": "Audio-Modell", + "You can also drop your files to load your model.": "Sie können auch Ihre Dateien ablegen, um Ihr Modell zu laden.", + "Drag your .pth file here:": "Ziehen Sie Ihre .pth-Datei hierher:", + "Drag your .index file here:": "Ziehen Sie Ihre .index-Datei hierher:" +} diff --git a/assets/i18n/langs/en_US.json b/assets/i18n/langs/en_US.json new file mode 100644 index 0000000000000000000000000000000000000000..ba6406315eed1682e63e2323225ca3d52a18ae7a --- /dev/null +++ b/assets/i18n/langs/en_US.json @@ -0,0 +1,262 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Unfortunately, there is no compatible GPU available to support your training.", + "Yes": "Yes", + "Select your dataset:": "Select your dataset:", + "Update list": "Update list", + "Download Model": "Download Model", + "Download Backup": "Download Backup", + "Download Dataset": "Download Dataset", + "Download": "Download", + "Url:": "Url:", + "Build the index before saving.": "Build the index before saving.", + "Save your model once the training ends.": "Save your model once the training ends.", + "Save type": "Save type:", + "Save model": "Save model", + "Choose the method": "Choose the method", + "Save all": "Save all", + "Save D and G": "Save D and G", + "Save voice": "Save voice", + "Downloading the file: ": "Downloading the file: ", + "Stop training": "Stop training", + "Too many users have recently viewed or downloaded this file": "Too many users have recently viewed or downloaded this file", + "Cannot get file from this private link": "Cannot get file from this private link", + "Full download": "Full download", + "An error occurred downloading": "An error occurred downloading", + "Model saved successfully": "Model saved successfully", + "Saving the model...": "Saving the model...", + "Saved without index...": "Saved without index...", + "Saved without inference model...": "Saved without inference model...", + "An error occurred saving the model": "An error occurred saving the model", + "The model you want to save does not exist, be sure to enter the correct name.": "The model you want to save does not exist, be sure to enter the correct name.", + "The file could not be downloaded.": "The file could not be downloaded.", + "Unzip error.": "Unzip error.", + "Path to your added.index file (if it didn't automatically find it)": "Path to your added.index file (if it didn't automatically find it)", + "It has been downloaded successfully.": "It has been downloaded successfully.", + "Proceeding with the extraction...": "Proceeding with the extraction...", + "The Backup has been uploaded successfully.": "The Backup has been uploaded successfully.", + "The Dataset has been loaded successfully.": "The Dataset has been loaded successfully.", + "The Model has been loaded successfully.": "The Model has been loaded successfully.", + "It is used to download your inference models.": "It is used to download your inference models.", + "It is used to download your training backups.": "It is used to download your training backups.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.", + "No relevant file was found to upload.": "No relevant file was found to upload.", + "The model works for inference, and has the .index file.": "The model works for inference, and has the .index file.", + "The model works for inference, but it doesn't have the .index file.": "The model works for inference, but it doesn't have the .index file.", + "This may take a few minutes, please wait...": "This may take a few minutes, please wait...", + "Resources": "Resources", + "Step 1: Processing data": "Step 1: Processing data", + "Step 2: Extracting features": "Step 2: Extracting features", + "Step 3: Model training started": "Step 3: Model training started", + "Training is done, check train.log": "Training is done, check train.log", + "All processes have been completed!": "All processes have been completed!", + "Model Inference": "Model Inference", + "Inferencing voice:": "Inferencing voice:", + "Model_Name": "Model_Name", + "Dataset_Name": "Dataset_Name", + "Or add your dataset path:": "Or add your dataset path:", + "Whether the model has pitch guidance.": "Whether the model has pitch guidance.", + "Whether to save only the latest .ckpt file to save hard drive space": "Whether to save only the latest .ckpt file to save hard drive space", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training", + "Save a small final model to the 'weights' folder at each save point": "Save a small final model to the 'weights' folder at each save point", + "Refresh": "Refresh", + "Unload voice to save GPU memory": "Unload voice to save GPU memory", + "Select Speaker/Singer ID:": "Select Speaker/Singer ID:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):", + "Feature search database file path:": "Feature search database file path:", + "Enter the path of the audio file to be processed (default is the correct format example):": "Enter the path of the audio file to be processed (default is the correct format example):", + "Select the pitch extraction algorithm:": "Select the pitch extraction algorithm:", + "Hop Length (lower hop lengths take more time to infer but are more pitch accurate):": "Hop Length (lower hop lengths take more time to infer but are more pitch accurate):", + "Feature search dataset file path": "Feature search dataset file path", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", + "Auto-detect index path and select from the dropdown:": "Auto-detect index path and select from the dropdown:", + "Path to feature file:": "Path to feature file:", + "Search feature ratio:": "Search feature ratio:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:", + "Convert": "Convert", + "Output information:": "Output information:", + "Export audio (click on the three dots in the lower right corner to download)": "Export audio (click on the three dots in the lower right corner to download)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", + "Specify output folder:": "Specify output folder:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.", + "Export file format:": "Export file format:", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Enter the path of the audio folder to be processed:", + "Model:": "Model:", + "Vocal Extraction Aggressive": "Vocal Extraction Aggressive", + "Specify the output folder for vocals:": "Specify the output folder for vocals:", + "Specify the output folder for accompaniment:": "Specify the output folder for accompaniment:", + "Train": "Train", + "Enter the model name:": "Enter the model name:", + "Target sample rate:": "Target sample rate:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Whether the model has pitch guidance (required for singing, optional for speech):", + "Version:": "Version:", + "Number of CPU processes:": "Number of CPU processes:", + "Enter the path of the training folder:": "Enter the path of the training folder:", + "Specify the model ID:": "Specify the model ID:", + "Auto detect audio path and select from the dropdown:": "Auto detect audio path and select from the dropdown:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:", + "Advanced Settings": "Advanced Settings", + "Settings": "Settings", + "Status:": "Status:", + "Process data": "Process data", + "Drag your audio here:": "Drag your audio here:", + "Or record an audio:": "Or record an audio:", + "Formant shift inference audio": "Formant shift inference audio", + "Used for male to female and vice-versa conversions": "Used for male to female and vice-versa conversions", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:", + "GPU Information:": "GPU Information:", + "Feature extraction": "Feature extraction", + "Save frequency:": "Save frequency:", + "Training epochs:": "Training epochs:", + "Batch size per GPU:": "Batch size per GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Save only the latest '.ckpt' file to save disk space:", + "No": "No", + "Save a small final model to the 'weights' folder at each save point:": "Save a small final model to the 'weights' folder at each save point:", + "Load pre-trained base model G path:": "Load pre-trained base model G path:", + "Load pre-trained base model D path:": "Load pre-trained base model D path:", + "Train model": "Train model", + "Train feature index": "Train feature index", + "One-click training": "One-click training", + "Processing": "Processing", + "Model fusion, can be used to test timbre fusion": "Model fusion, can be used to test timbre fusion", + "Path to Model A:": "Path to Model A:", + "Path to Model B:": "Path to Model B:", + "Weight for Model A:": "Weight for Model A:", + "Whether the model has pitch guidance:": "Whether the model has pitch guidance:", + "Model information to be placed:": "Model information to be placed:", + "Model architecture version:": "Model architecture version:", + "Fusion": "Fusion", + "Modify model information": "Modify model information", + "Path to Model:": "Path to Model:", + "Model information to be modified:": "Model information to be modified:", + "Save file name:": "Save file name:", + "Modify": "Modify", + "View model information": "View model information", + "View": "View", + "Model extraction": "Model extraction", + "Name:": "Name:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Whether the model has pitch guidance (1: yes, 0: no):", + "Extract": "Extract", + "Export Onnx": "Export Onnx", + "RVC Model Path:": "RVC Model Path:", + "Onnx Export Path:": "Onnx Export Path:", + "MoeVS Model": "MoeVS Model", + "Export Onnx Model": "Export Onnx Model", + "Load model": "Load model", + "Hubert Model": "Hubert Model", + "Select the .pth file": "Select the .pth file", + "Select the .index file": "Select the .index file", + "Select the .npy file": "Select the .npy file", + "Input device": "Input device", + "Output device": "Output device", + "Audio device (please use the same type of driver)": "Audio device (please use the same type of driver)", + "Response threshold": "Response threshold", + "Pitch settings": "Pitch settings", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz", + "Index Rate": "Index Rate", + "General settings": "General settings", + "Sample length": "Sample length", + "Fade length": "Fade length", + "Extra inference time": "Extra inference time", + "Input noise reduction": "Input noise reduction", + "Output noise reduction": "Output noise reduction", + "Performance settings": "Performance settings", + "Start audio conversion": "Start audio conversion", + "Stop audio conversion": "Stop audio conversion", + "Inference time (ms):": "Inference time (ms):", + "Select the pth file": "Select the pth file", + "Select the .index file:": "Select the .index file:", + "The hubert model path must not contain Chinese characters": "The hubert model path must not contain Chinese characters", + "The pth file path must not contain Chinese characters.": "The pth file path must not contain Chinese characters.", + "The index file path must not contain Chinese characters.": "The index file path must not contain Chinese characters.", + "Step algorithm": "Step algorithm", + "Number of epoch processes": "Number of epoch processes", + "Lowest points export": "Lowest points export", + "How many lowest points to save:": "How many lowest points to save:", + "Export lowest points of a model": "Export lowest points of a model", + "Output models:": "Output models:", + "Stats of selected models:": "Stats of selected models:", + "Custom f0 [Root pitch] File": "Custom f0 [Root pitch] File", + "Min pitch:": "Min pitch:", + "Specify minimal pitch for inference [HZ]": "Specify minimal pitch for inference [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Specify minimal pitch for inference [NOTE][OCTAVE]", + "Max pitch:": "Max pitch:", + "Specify max pitch for inference [HZ]": "Specify max pitch for inference [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Specify max pitch for inference [NOTE][OCTAVE]", + "Browse presets for formanting": "Browse presets for formanting", + "Presets are located in formantshiftcfg/ folder": "Presets are located in formantshiftcfg/ folder", + "Default value is 1.0": "Default value is 1.0", + "Quefrency for formant shifting": "Quefrency for formant shifting", + "Timbre for formant shifting": "Timbre for formant shifting", + "Apply": "Apply", + "Single": "Single", + "Batch": "Batch", + "Separate YouTube tracks": "Separate YouTube tracks", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks", + "Extra": "Extra", + "Merge": "Merge", + "Merge your generated audios with the instrumental": "Merge your generated audios with the instrumental", + "Choose your instrumental:": "Choose your instrumental:", + "Choose the generated audio:": "Choose the generated audio:", + "Combine": "Combine", + "Download and Separate": "Download and Separate", + "Enter the YouTube link:": "Enter the YouTube link:", + "This section contains some extra utilities that often may be in experimental phases": "This section contains some extra utilities that often may be in experimental phases", + "Merge Audios": "Merge Audios", + "Audio files have been moved to the 'audios' folder.": "Audio files have been moved to the 'audios' folder.", + "Downloading audio from the video...": "Downloading audio from the video...", + "Audio downloaded!": "Audio downloaded!", + "An error occurred:": "An error occurred:", + "Separating audio...": "Separating audio...", + "File moved successfully.": "File moved successfully.", + "Finished!": "Finished!", + "The source file does not exist.": "The source file does not exist.", + "Error moving the file:": "Error moving the file:", + "Downloading {name} from drive": "Downloading {name} from drive", + "The attempt to download using Drive didn't work": "The attempt to download using Drive didn't work", + "Error downloading the file: {str(e)}": "Error downloading the file: {str(e)}", + "Downloading {name} from mega": "Downloading {name} from mega", + "Downloading {name} from basic url": "Downloading {name} from basic url", + "Download Audio": "Download Audio", + "Download audios of any format for use in inference (recommended for mobile users).": "Download audios of any format for use in inference (recommended for mobile users).", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n", + "Processed audio saved at: ": "Processed audio saved at: ", + "Conversion complete!": "Conversion complete!", + "Reverb": "Reverb", + "Compressor": "Compressor", + "Noise Gate": "Noise Gate", + "Volume": "Volume", + "Drag the audio here and click the Refresh button": "Drag the audio here and click the Refresh button", + "Select the generated audio": "Select the generated audio", + "Volume of the instrumental audio:": "Volume of the instrumental audio:", + "Volume of the generated audio:": "Volume of the generated audio:", + "### Audio settings:": "### Audio settings:", + "### Instrumental settings:": "### Instrumental settings:", + "### Add the effects:": "### Add the effects:", + "Name for saving": "Name for saving", + "Path to model": "Path to model", + "Model information to be placed": "Model information to be placed", + "Starting audio conversion... (This might take a moment)": "Starting audio conversion... (This might take a moment)", + "Error no reformatted.wav found:": "Error no reformatted.wav found:", + "Error at separating audio:": "Error at separating audio:", + "Vocal": "Vocal", + "Instrumental": "Instrumental", + "Finished": "Finished", + "TTS Model:": "TTS Model:", + "TTS": "TTS", + "RVC Model:": "RVC Model:", + "TTS Method:": "TTS Method:", + "Audio TTS:": "Audio TTS:", + "Audio RVC:": "Audio RVC:", + "Enter the text you want to convert to voice...": "Enter the text you want to convert to voice...", + "Text:": "Text:", + "You can also drop your files to load your model.": "You can also drop your files to load your model.", + "Drag your .pth file here:": "Drag your .pth file here:", + "Drag your .index file here:": "Drag your .index file here:" +} diff --git a/assets/i18n/langs/es_ES.json b/assets/i18n/langs/es_ES.json new file mode 100644 index 0000000000000000000000000000000000000000..d25c357fa2ca2f8e8e479fc4d368e773f32a0fd4 --- /dev/null +++ b/assets/i18n/langs/es_ES.json @@ -0,0 +1,262 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Lamentablemente, no hay una GPU compatible disponible para respaldar tu formación.", + "Yes": "Sí", + "Select your dataset:": "Seleccione su dataset:", + "Update list": "Actualizar lista", + "Download Model": "Descargar modelo", + "Download Backup": "Descargar copias de seguridad", + "Download Dataset": "Descargar dataset", + "Download": "Descargar", + "Url:": "Enlace:", + "Build the index before saving.": "Genere el índice antes de guardarlo.", + "Save your model once the training ends.": "Guarde su modelo una vez finalice el entrenamiento.", + "Save type": "Método de guardado:", + "Save model": "Guardar modelo", + "Choose the method": "Elige el método", + "Save all": "Guardar todos los archivos", + "Save D and G": "Guardar archivos G y D", + "Save voice": "Guadar modelo para inferencia", + "Downloading the file: ": "Descargando el archivo:", + "Stop training": "Detener entrenamiento", + "Too many users have recently viewed or downloaded this file": "Demasiados usuarios han visto o descargado recientemente este archivo", + "Cannot get file from this private link": "No se puede obtener el archivo de este enlace privado", + "Full download": "Descarga completa", + "An error occurred downloading": "Se ha producido un error al descargar", + "Model saved successfully": "Modelo guardado correctamente", + "Saving the model...": "Guardando el modelo...", + "Saved without index...": "Guardado sin archivo .index...", + "Saved without inference model...": "Guardado sin modelo de inferencia...", + "An error occurred saving the model": "Se ha producido un error al guardar el modelo", + "The model you want to save does not exist, be sure to enter the correct name.": "El modelo que desea guardar no existe, asegúrese de introducir el nombre correcto.", + "The file could not be downloaded.": "No se pudo bajar el archivo", + "Unzip error.": "Error al descomprimir.", + "Path to your added.index file (if it didn't automatically find it)": "Ruta a su archivo added.index (si no lo encontró automáticamente)", + "It has been downloaded successfully.": "Se ha descargado con éxito.", + "Proceeding with the extraction...": "Continuando con la extracción...", + "The Backup has been uploaded successfully.": "La copia de seguridad se ha cargado correctamente.", + "The Dataset has been loaded successfully.": "El dataset se ha cargado correctamente.", + "The Model has been loaded successfully.": "El modelo se ha cargado correctamente.", + "It is used to download your inference models.": "Se utiliza para descargar sus modelos de inferencia.", + "It is used to download your training backups.": "Se utiliza para descargar las copias de seguridad de entrenamientos.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Descargue el dataset con los audios en un formato compatible (.wav/.flac) para entrenar su modelo.", + "No relevant file was found to upload.": "No se ha encontrado ningún archivo relevante para cargar.", + "The model works for inference, and has the .index file.": "El modelo funciona para inferencia y tiene el archivo .index.", + "The model works for inference, but it doesn't have the .index file.": "El modelo funciona para inferencia, pero no tiene el archivo .index.", + "This may take a few minutes, please wait...": "Esto puede tardar unos minutos, espere por favor.", + "Resources": "Recursos", + "Step 1: Processing data": "Paso 1: Procesamiento de datos", + "Step 2: Extracting features": "Paso 2: Extracción de funciones", + "Step 3: Model training started": "Paso 3: Entrenamiento del modelo", + "Training is done, check train.log": "El entrenamiento ha finalizado, revisa train.log.", + "All processes have been completed!": "¡Todos los procesos se han completado!", + "Model Inference": "Inferencia", + "Inferencing voice:": "Modelo de voz:", + "Model_Name": "Nombre_Modelo", + "Dataset_Name": "Nombre_Dataset", + "Or add your dataset path:": "O introduce la ruta de tu dataset:", + "Whether the model has pitch guidance.": "Si el modelo tiene guía de tono.", + "Whether to save only the latest .ckpt file to save hard drive space": "Si guardar solo el último archivo .ckpt para ahorrar espacio en el disco duro", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "El almacenamiento en caché de pequeños conjuntos de datos (menos de 10 minutos) puede acelerar el entrenamiento", + "Save a small final model to the 'weights' folder at each save point": "Guarde un pequeño modelo final en la carpeta 'weights' en cada punto de guardado", + "Refresh": "Actualizar", + "Unload voice to save GPU memory": "Eliminar voz para ahorrar memoria de GPU", + "Select Speaker/Singer ID:": "Seleccionar ID de orador/cantante:", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transponer (entero, número de semitonos, subir una octava: 12, bajar una octava -12):", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Se recomienda la tecla +12 para la conversión de hombre a mujer y la tecla -12 para la conversión de mujer a hombre. Si el rango de sonido va demasiado lejos y la voz está distorsionada, también puede ajustarlo al rango apropiado usted mismo.", + "Enter the path of the audio file to be processed (default is the correct format example):": "Introduzca la ruta del archivo de audio a procesar (el ejemplo de formato correcto es el predeterminado):", + "Select the pitch extraction algorithm:": "Seleccione el algoritmo de extracción de tono:", + "Feature search dataset file path": "Ruta del archivo del dataset de búsqueda de funciones", + "Hop Length (lower hop lengths take more time to infer but are more pitch accurate):": "Longitud de salto (las longitudes de salto más bajas tardan más en inferirse, pero son más precisas en cuanto al tono):", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Si >=3: aplicar filtrado de mediana a los resultados de brea cosechada. El valor representa el radio del filtro y puede reducir la transpiración.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Ruta al archivo .index de entidades. Déjelo en blanco para usar el resultado seleccionado del menú desplegable:", + "Auto-detect index path and select from the dropdown:": "Detectar automáticamente la ruta del archivo .index y seleccionar en el menú desplegable:", + "Path to feature file:": "Ruta del archivo de características:", + "Search feature ratio:": "Proporción de función de búsqueda:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Vuelva a muestrear el audio de salida en el posprocesamiento a la frecuencia de muestreo final. Establecer en 0 para no volver a muestrear:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Utilice la envolvente de volumen de la entrada para reemplazar o mezclar con la envolvente de volumen de la salida. Cuanto más cerca esté la relación a 1, más se utilizará la envolvente de salida:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Proteja las consonantes sordas y los sonidos de la respiración para evitar artefactos como el desgarro en la música electrónica. Establecer en 0.5 para desactivar. Disminuya el valor para aumentar la protección, pero puede reducir la precisión de la indexación:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Archivo de curva F0 (opcional). Un paso por línea. Sustituye la modulación de paso y F0 predeterminada:", + "Convert": "Convertir", + "Output information:": "Información de salida:", + "Export audio (click on the three dots in the lower right corner to download)": "Exportar audio (haga clic en los tres puntos en la esquina inferior derecha para descargar)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Conversión por lotes. Introduzca la carpeta que contiene los archivos de audio a convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (predeterminado: 'opt').", + "Specify output folder:": "Especificar carpeta de salida:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Introduzca la ruta de la carpeta de audio a procesar (cópiela desde la barra de direcciones del gestor de archivos):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "También puede introducir archivos de audio en lotes. Elige una de las dos opciones. Se da prioridad a la lectura de la carpeta.", + "Export file format:": "Formato de archivo de exportación:", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Introduzca la ruta de la carpeta de audio a procesar:", + "Model:": "Modelo:", + "Vocal Extraction Aggressive": "Extracción vocal agresiva", + "Specify the output folder for vocals:": "Especifique la carpeta de salida para las voces:", + "Specify the output folder for accompaniment:": "Especifique la carpeta de salida para el acompañamiento:", + "Train": "Entrenar", + "Enter the model name:": "Nombre del modelo:", + "Target sample rate:": "Tasa de muestreo objetivo:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Si el modelo tiene guía de tono (requerido para cantar, opcional para el habla):", + "Version:": "Versión:", + "Number of CPU processes:": "Número de procesos de CPU:", + "Enter the path of the training folder:": "Introduzca la ruta de la carpeta de formación:", + "Specify the model ID:": "Especifique el ID del modelo:", + "Auto detect audio path and select from the dropdown:": "Detectar automáticamente la ruta de audio y seleccionar en el menú desplegable:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Agregue el nombre del audio a la ruta del archivo de audio que se procesará (el ejemplo de formato correcto es el predeterminado) Elimine la ruta para usar un audio de la lista desplegable:", + "Advanced Settings": "Ajustes avanzados", + "Settings": "Configuración", + "Status:": "Estado:", + "Process data": "Procesar datos", + "Drag your audio here:": "Arrastra tu audio aquí:", + "Or record an audio:": "O graba un audio:", + "Formant shift inference audio": "Audio de inferencia de desplazamiento formante", + "Used for male to female and vice-versa conversions": "Se utiliza para conversiones de hombre a mujer y viceversa", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Proporcione los índices de la GPU separados por '-', como 0-1-2 para usar las GPU 0, 1 y 2:", + "GPU Information:": "Información de la GPU:", + "Feature extraction": "Extracción de característicos", + "Save frequency:": "Frecuencia de guardado:", + "Training epochs:": "Epochs de entrenamiento:", + "Batch size per GPU:": "Tamaño de lote por GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Guarde solo el último archivo '.ckpt' para ahorrar espacio en disco:", + "No": "No", + "Save a small final model to the 'weights' folder at each save point:": "Guarde un pequeño modelo final en la carpeta 'pesos' en cada punto de guardado:", + "Load pre-trained base model G path:": "Cargar ruta base modelo G pre-entrenada:", + "Load pre-trained base model D path:": "Ruta del modelo D base de carga pre-entrenada:", + "Train model": "Entrenar modelo", + "Train feature index": "Índice de características", + "One-click training": "Entrenamiento con un solo clic", + "Processing": "Procesamiento", + "Model fusion, can be used to test timbre fusion": "Modelo de fusión, se puede utilizar para probar la fusión de timbre", + "Path to Model A:": "Ruta al Modelo A:", + "Path to Model B:": "Ruta al Modelo B:", + "Weight for Model A:": "Peso para el modelo A:", + "Whether the model has pitch guidance:": "Si el modelo tiene guía de tono:", + "Model information to be placed:": "Información del modelo a colocar:", + "Name:": "Nombre:", + "Model architecture version:": "Versión de la arquitectura del modelo:", + "Fusion": "Fusión", + "Modify model information": "Modificar información del modelo", + "Path to Model:": "Ruta al modelo:", + "Model information to be modified:": "Información del modelo a modificar:", + "Save file name:": "Guardar nombre de archivo:", + "Modify": "Modificar", + "View model information": "Información del modelo", + "View": "Ver", + "Model extraction": "Extracción del modelo", + "Whether the model has pitch guidance (1: yes, 0: no):": "Si el modelo tiene guía de tono (1: sí, 0: no):", + "Extract": "Extraer", + "Export Onnx": "Exportar Onnx", + "RVC Model Path:": "Ruta del modelo RVC:", + "Onnx Export Path:": "Ruta de exportación Onnx:", + "MoeVS Model": "Modelo MoeVS", + "Export Onnx Model": "Exportar modelo Onnx", + "Load model": "Cargar modelo", + "Hubert Model": "Modelo Hubert", + "Select the .pth file": "Seleccione un archivo.", + "Select the .index file": "Seleccione el archivo .index", + "Select the .npy file": "Seleccione un archivo.", + "Input device": "Dispositivo de entrada", + "Output device": "Dispositivo de salida", + "Audio device (please use the same type of driver)": "Dispositivo de audio (utilice el mismo tipo de controlador)", + "Response threshold": "Umbral de respuesta", + "Pitch settings": "Ajustes de tono", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Si se deben usar nombres de notas en lugar de su valor de hercios. POR EJEMPLO, [C5, D6] en lugar de [523,25, 1174,66]Hz", + "Index Rate": "Tasa de Índice", + "General settings": "Configuración general", + "Sample length": "Longitud de la muestra", + "Fade length": "Longitud del desvanecimiento", + "Extra inference time": "Tiempo de inferencia adicional", + "Input noise reduction": "Reducción de ruido de entrada", + "Output noise reduction": "Reducción de ruido de salida", + "Performance settings": "Ajustes de rendimiento", + "Start audio conversion": "Iniciar conversión de audio", + "Stop audio conversion": "Detener conversión de audio", + "Inference time (ms):": "Tiempo de inferencia (ms):", + "Select the pth file": "Seleccione un archivo.", + "Select the .index file:": "Selecciona el archivo .index:", + "The hubert model path must not contain Chinese characters": "La ruta del modelo hubert no debe contener caracteres chinos", + "The pth file path must not contain Chinese characters.": "La ruta del archivo pth no debe contener caracteres chinos.", + "The index file path must not contain Chinese characters.": "La ruta del archivo .index no debe contener caracteres chinos.", + "Step algorithm": "Algoritmo de pasos", + "Number of epoch processes": "Número de procesos de Epoch", + "Lowest points export": "Exportación de puntos más bajos", + "How many lowest points to save:": "Cuántos puntos bajos quieres guardar:", + "Export lowest points of a model": "Exportar los puntos más bajos de un modelo", + "Output models:": "Modelos de salida:", + "Stats of selected models:": "Estadísticas de los modelos seleccionados:", + "Custom f0 [Root pitch] File": "Archivo personalizado f0 [Root pitch]", + "Min pitch:": "Tono mínimo:", + "Feature search database file path:": "Ruta del archivo de la base de datos de búsqueda de características:", + "Specify minimal pitch for inference [HZ]": "Especificar paso mínimo para inferencia [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Especifique el tono mínimo para la inferencia [NOTA][OCTAVA]", + "Max pitch:": "Tono máximo:", + "Specify max pitch for inference [HZ]": "Especifique el paso máximo para la inferencia [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Especifique el tono máximo para la inferencia [NOTA][OCTAVA]", + "Browse presets for formanting": "Examinar ajustes preestablecidos para formatear", + "Presets are located in formantshiftcfg/ folder": "Los ajustes preestablecidos se encuentran en formantshiftcfg/ folder", + "Default value is 1.0": "El valor por defecto es 1.", + "Quefrency for formant shifting": "Quefrencia para desplazamiento de formantes", + "Timbre for formant shifting": "Timbre para el cambio de formantes", + "Apply": "Aplicar", + "Single": "Individual", + "Batch": "Lote", + "Separate YouTube tracks": "Pistas separadas de YouTube", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Descarga el audio de un vídeo de YouTube y separa automáticamente las pistas vocales e instrumentales.", + "Extra": "Extra", + "Merge": "Combinar", + "Merge your generated audios with the instrumental": "Combina tus audios generados con el instrumental", + "Choose your instrumental:": "Elige tu instrumental:", + "Choose the generated audio:": "Elige el audio generado:", + "Combine": "Combinar", + "Download and Separate": "Descargar y separar", + "Enter the YouTube link:": "Introduce el enlace de YouTube:", + "This section contains some extra utilities that often may be in experimental phases": "Esta sección contiene algunas utilidades adicionales que a menudo pueden estar en fases experimentales", + "Merge Audios": "Combinar audios", + "Audio files have been moved to the 'audios' folder.": "Los archivos de audio se han movido a la carpeta 'audios'.", + "Downloading audio from the video...": "Descargando audio del vídeo...", + "Audio downloaded!": "¡Audio descargado!", + "An error occurred:": "Se ha producido un error:", + "Separating audio...": "Separando audio...", + "File moved successfully.": "El artículo se ha movido correctamente", + "Finished!": "¡Listo!", + "The source file does not exist.": "El archivo no existe.", + "Error moving the file:": "Error al mover el archivo:", + "Downloading {name} from drive": "Descargando {name} de la unidad", + "The attempt to download using Drive didn't work": "El intento de descarga con Drive no ha funcionado", + "Error downloading the file: {str(e)}": "Error al descargar el archivo: {str(e)}", + "Downloading {name} from mega": "Descargando {name} de mega", + "Downloading {name} from basic url": "Descargando {name} desde la URL básica", + "Download Audio": "Descargar audio", + "Download audios of any format for use in inference (recommended for mobile users).": "Descargar audios de cualquier formato para su uso en inferencia (recomendado para usuarios móviles).", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Cualquier posconversión de ConnectionResetErrors es irrelevante y puramente visual; se puede ignorar.\n", + "Processed audio saved at: ": "Audio procesado guardado en:", + "Conversion complete!": "Actualización finalizada", + "Reverb": "Reverberación", + "Compressor": "Compresor", + "Noise Gate": "Puerta de ruido", + "Volume": "Volumen", + "Drag the audio here and click the Refresh button": "Arrastre el audio aquí y haga clic en el botón Actualizar", + "Select the generated audio": "Selecciona el audio generado", + "Volume of the instrumental audio:": "Volumen del audio instrumental:", + "Volume of the generated audio:": "Volumen del audio generado:", + "### Audio settings:": "### Audio generado:", + "### Instrumental settings:": "### Audio de instrumental:", + "### Add the effects:": "### Añade los efectos:", + "Name for saving": "Nombre de guardado", + "Path to model": "Ruta al modelo", + "Model information to be placed": "Información del modelo a modificar", + "Starting audio conversion... (This might take a moment)": "Iniciando la conversión del audio... (Esto podría llevar un tiempo)", + "Error no reformatted.wav found:": "Error no se encontró el archivo reformatted.wav:", + "Error at separating audio:": "Error al separar el audio:", + "Vocal": "Vocal", + "Instrumental": "Instrumental", + "Finished": "Terminado", + "TTS Model:": "Modelo TTS:", + "TTS": "TTS", + "RVC Model:": "Modelo RVC:", + "TTS Method:": "Método TTS:", + "Audio TTS:": "Audio TTS:", + "Audio RVC:": "Audio RVC:", + "Enter the text you want to convert to voice...": "Introduce el texto que desea convertir en voz...", + "Text:": "Texto:", + "You can also drop your files to load your model.": "También puedes soltar tus archivos para cargar tu modelo.", + "Drag your .pth file here:": "Arrastra tu archivo .pth aquí:", + "Drag your .index file here:": "Arrastra tu archivo .index aquí:" +} diff --git a/assets/i18n/langs/id_ID.json b/assets/i18n/langs/id_ID.json new file mode 100644 index 0000000000000000000000000000000000000000..e5c513a94c3a3d83ca72d669cb8ad115d222c2bc --- /dev/null +++ b/assets/i18n/langs/id_ID.json @@ -0,0 +1,248 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Sayangnya, tidak ada GPU kompatibel yang tersedia untuk mendukung pelatihan Anda.", + "Yes": "Ya", + "Select your dataset:": "Pilih kumpulan data Anda.", + "Update list": "Perbarui daftar.", + "Download Model": "Unduh Model", + "Download Backup": "Unduh Cadangan", + "Download Dataset": "Unduh Kumpulan Data", + "Download": "Unduh", + "Url:": "URL:", + "Build the index before saving.": "Bangun indeks sebelum menyimpan.", + "Save your model once the training ends.": "Simpan model Anda setelah pelatihan berakhir.", + "Save type": "Simpan jenis", + "Save model": "Simpan modelnya", + "Choose the method": "Pilih metodenya", + "Save all": "Simpan semua", + "Save D and G": "Simpan D dan G", + "Save voice": "Simpan suara", + "Downloading the file: ": "Mengunduh file:", + "Stop training": "Hentikan pelatihan", + "Too many users have recently viewed or downloaded this file": "Terlalu banyak pengguna yang baru-baru ini melihat atau mengunduh file ini", + "Cannot get file from this private link": "Tidak dapat memperoleh file dari tautan pribadi ini", + "Full download": "Unduhan penuh", + "An error occurred downloading": "Terjadi kesalahan saat mengunduh", + "Model saved successfully": "Model berhasil disimpan", + "Saving the model...": "Menyimpan model...", + "Saved without index...": "Disimpan tanpa indeks...", + "model_name": "nama model", + "Saved without inference model...": "Disimpan tanpa model inferensi...", + "An error occurred saving the model": "Terjadi kesalahan saat menyimpan model", + "The model you want to save does not exist, be sure to enter the correct name.": "Model yang ingin disimpan tidak ada, pastikan memasukkan nama yang benar.", + "The file could not be downloaded.": "File tidak dapat diunduh.", + "Unzip error.": "Kesalahan buka zip.", + "Path to your added.index file (if it didn't automatically find it)": "Jalur ke file add.index Anda (jika tidak menemukannya secara otomatis)", + "It has been downloaded successfully.": "Itu telah berhasil diunduh.", + "Proceeding with the extraction...": "Melanjutkan ekstraksi...", + "The Backup has been uploaded successfully.": "Cadangan telah berhasil diunggah.", + "The Dataset has been loaded successfully.": "Dataset telah berhasil dimuat.", + "The Model has been loaded successfully.": "Model telah berhasil dimuat.", + "It is used to download your inference models.": "Ini digunakan untuk mengunduh model inferensi Anda.", + "It is used to download your training backups.": "Ini digunakan untuk mengunduh cadangan pelatihan Anda.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Unduh kumpulan data dengan audio dalam format yang kompatibel (.wav/.flac) untuk melatih model Anda.", + "No relevant file was found to upload.": "Tidak ada file relevan yang ditemukan untuk diunggah.", + "The model works for inference, and has the .index file.": "Model ini berfungsi untuk inferensi, dan memiliki file .index.", + "The model works for inference, but it doesn't have the .index file.": "Model ini berfungsi untuk inferensi, tetapi tidak memiliki file .index.", + "This may take a few minutes, please wait...": "Ini mungkin memakan waktu beberapa menit, harap tunggu...", + "Resources": "Sumber daya", + "Step 1: Processing data": "Langkah 1: Memproses data", + "Step 2: Extracting features": "Langkah 2: Mengekstraksi fitur", + "Step 3: Model training started": "Langkah 3: Pelatihan model dimulai", + "Training is done, check train.log": "Pelatihan selesai, periksa train.log", + "All processes have been completed!": "Semua proses telah selesai!", + "Model Inference": "Inferensi Model", + "Inferencing voice:": "Menyimpulkan suara:", + "Model_Name": "Nama model", + "Dataset_Name": "Kumpulan Data_Nama", + "Or add your dataset path:": "Atau masukkan jalur ke kumpulan data Anda:", + "Whether the model has pitch guidance.": "Apakah model memiliki panduan nada.", + "Whether to save only the latest .ckpt file to save hard drive space": "Apakah hanya menyimpan file .ckpt terbaru untuk menghemat ruang hard drive", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Simpan semua set pelatihan ke memori GPU. Menyimpan kumpulan data kecil (kurang dari 10 menit) dapat mempercepat pelatihan", + "Save a small final model to the 'weights' folder at each save point": "Simpan model akhir kecil ke folder 'bobot' di setiap titik penyimpanan", + "Refresh": "Segarkan daftar suara, jalur indeks, dan file audio", + "Unload voice to save GPU memory": "Bongkar suara untuk menghemat memori GPU:", + "Select Speaker/Singer ID:": "Pilih ID Pembicara/Penyanyi:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Direkomendasikan kunci +12 untuk konversi pria ke wanita, dan -12 kunci untuk konversi wanita ke pria. Jika rentang suara terlalu jauh dan suaranya terdistorsi, Anda juga dapat menyesuaikannya sendiri ke rentang yang sesuai.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transpos (bilangan bulat, jumlah seminada, dinaikkan satu oktaf: 12, diturunkan satu oktaf: -12):", + "Enter the path of the audio file to be processed (default is the correct format example):": "Masukkan jalur file audio yang akan diproses (defaultnya adalah contoh format yang benar):", + "Select the pitch extraction algorithm:": "Pilih algoritma ekstraksi nada:", + "Feature search dataset file path": "Jalur file kumpulan data pencarian fitur", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Jika >=3: terapkan pemfilteran median pada hasil pitch yang dipanen. Nilai tersebut mewakili radius filter dan dapat mengurangi sesak napas.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Jalur ke file indeks fitur. Biarkan kosong untuk menggunakan hasil yang dipilih dari dropdown:", + "Auto-detect index path and select from the dropdown:": "Deteksi jalur indeks secara otomatis dan pilih dari dropdown", + "Path to feature file:": "Jalur ke file fitur:", + "Search feature ratio:": "Rasio fitur pencarian:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Sampel ulang audio keluaran dalam pasca-pemrosesan ke laju sampel akhir. Setel ke 0 tanpa pengambilan sampel ulang:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Gunakan amplop volume masukan untuk menggantikan atau mencampur dengan amplop volume keluaran. Semakin dekat rasionya ke 1, semakin banyak amplop keluaran yang digunakan:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Lindungi konsonan tak bersuara dan bunyi napas untuk mencegah artefak seperti robek pada musik elektronik. Setel ke 0,5 untuk menonaktifkan. Turunkan nilainya untuk meningkatkan perlindungan, namun hal ini dapat mengurangi akurasi pengindeksan:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "File kurva F0 (opsional). Satu nada per baris. Menggantikan F0 dan modulasi nada default:", + "Convert": "Mengubah", + "Output information:": "Informasi keluaran", + "Export audio (click on the three dots in the lower right corner to download)": "Ekspor audio (klik tiga titik di pojok kanan bawah untuk mengunduh)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Konversi batch. Masuk ke folder yang berisi file audio yang akan dikonversi atau unggah beberapa file audio. Audio yang dikonversi akan dikeluarkan di folder yang ditentukan (default: 'opt').", + "Specify output folder:": "Tentukan folder keluaran:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Masukkan jalur folder audio yang akan diproses (salin dari bilah alamat pengelola file):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Anda juga dapat memasukkan file audio secara berkelompok. Pilih salah satu dari dua opsi. Prioritas diberikan untuk membaca dari folder.", + "Export file format": "Ekspor format file", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Masukkan jalur folder audio yang akan diproses:", + "Model": "Model", + "Vocal Extraction Aggressive": "Ekstraksi Vokal Agresif", + "Specify the output folder for vocals:": "Tentukan folder keluaran untuk vokal:", + "Specify the output folder for accompaniment:": "Tentukan folder keluaran untuk pengiring:", + "Train": "Kereta", + "Enter the model name:": "Masukkan nama model:", + "Target sample rate:": "Tingkat sampel target:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Apakah model memiliki panduan nada (wajib untuk bernyanyi, opsional untuk berbicara):", + "Version": "Versi: kapan", + "Number of CPU processes:": "Jumlah proses CPU yang digunakan untuk ekstraksi nada dan pemrosesan data:", + "Enter the path of the training folder:": "Masukkan jalur folder pelatihan:", + "Specify the model ID:": "Silakan tentukan ID model:", + "Auto detect audio path and select from the dropdown:": "Deteksi otomatis jalur audio dan pilih dari dropdown:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Tambahkan nama audio ke jalur ke file audio yang akan diproses (standarnya adalah contoh format yang benar) Hapus jalur untuk menggunakan audio dari daftar dropdown:", + "Advanced Settings": "Pengaturan lanjutan", + "Settings": "Pengaturan", + "Status:": "Status:", + "Process data": "Data proses", + "Drag your audio here:": "Seret audio Anda ke sini dan tekan tombol segarkan", + "Or record an audio:": "Atau rekam audio.", + "Formant shift inference audio": "Audio inferensi pergeseran formant", + "Used for male to female and vice-versa conversions": "Digunakan untuk konversi pria ke wanita dan sebaliknya", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Harap berikan indeks GPU yang dipisahkan dengan '-', seperti 0-1-2 untuk menggunakan GPU 0, 1, dan 2:", + "GPU Information:": "Informasi GPU", + "Feature extraction": "Ekstraksi fitur", + "Save frequency:": "Simpan frekuensi:", + "Training epochs:": "Zaman pelatihan:", + "Batch size per GPU:": "Ukuran batch per GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Simpan hanya file '.ckpt' terbaru untuk menghemat ruang disk:", + "No": "TIDAK", + "Save a small final model to the 'weights' folder at each save point:": "Simpan model akhir kecil ke folder 'bobot' di setiap titik penyimpanan:", + "Load pre-trained base model G path:": "Memuat jalur G model dasar terlatih:", + "Load pre-trained base model D path:": "Memuat jalur model D dasar yang telah dilatih sebelumnya:", + "Train model": "Model kereta api", + "Train feature index": "Indeks fitur kereta", + "One-click training": "Pelatihan sekali klik", + "Processing": "Pengolahan", + "Model fusion, can be used to test timbre fusion": "Fusi model, dapat digunakan untuk menguji fusi timbre", + "Path to Model A:": "Jalan menuju Model A:", + "Path to Model B:": "Jalan menuju Model B:", + "Weight for Model A:": "Berat untuk Model A:", + "Whether the model has pitch guidance:": "Apakah model memiliki panduan nada:", + "Model information to be placed:": "Informasi model yang akan ditempatkan:", + "Model architecture version:": "Versi arsitektur model:", + "Fusion": "Fusi", + "Modify model information": "Ubah informasi model", + "Path to Model:": "Jalur Menuju Model:", + "Model information to be modified:": "Informasi model yang akan dimodifikasi:", + "Save file name:": "Simpan nama file:", + "Modify": "Memodifikasi", + "View model information": "Lihat informasi model", + "View": "Melihat", + "Model extraction": "Ekstraksi model (masukkan jalur model file besar di bawah folder 'logs'). Ini berguna jika Anda ingin menghentikan pelatihan di tengah jalan dan mengekstrak serta menyimpan file model kecil secara manual, atau jika Anda ingin menguji model perantara:", + "Name:": "Simpan nama:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Apakah model memiliki panduan nada (1: ya, 0: tidak):", + "Extract": "Ekstrak", + "Export Onnx": "Ekspor Onnx", + "RVC Model Path:": "Jalur Model RVC:", + "Onnx Export Path:": "Jalur Ekspor Onnx:", + "MoeVS Model": "Model MoeVS", + "Export Onnx Model": "Ekspor Model Onnx", + "Load model": "Model beban", + "Hubert Model": "Model Hubert", + "Select the .pth file": "Pilih file .pth", + "Select the .index file": "Pilih file .index", + "Select the .npy file": "Pilih file .npy", + "Input device": "Alat input", + "Output device": "Perangkat keluaran", + "Audio device (please use the same type of driver)": "Perangkat audio (harap gunakan jenis driver yang sama)", + "Response threshold": "Ambang batas respons", + "Pitch settings": "Pengaturan nada", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Apakah akan menggunakan nama nada dan bukan nilai hertznya. MISALNYA. [C5, D6] bukannya [523.25, 1174.66]Hz", + "Index Rate": "Tingkat Indeks", + "General settings": "Pengaturan Umum", + "Sample length": "Panjang sampel", + "Fade length": "Panjang pudar", + "Extra inference time": "Waktu inferensi ekstra", + "Input noise reduction": "Pengurangan kebisingan masukan", + "Output noise reduction": "Pengurangan kebisingan keluaran", + "Performance settings": "Pengaturan kinerja", + "Start audio conversion": "Mulai konversi audio", + "Stop audio conversion": "Hentikan konversi audio", + "Inference time (ms):": "Waktu inferensi (ms):", + "Select the pth file": "Pilih file pth", + "Select the .index file:": "Pilih file indeks", + "The hubert model path must not contain Chinese characters": "Jalur model hubert tidak boleh berisi karakter China", + "The pth file path must not contain Chinese characters.": "Jalur file pth tidak boleh berisi karakter Cina.", + "The index file path must not contain Chinese characters.": "Jalur file indeks tidak boleh berisi karakter Cina.", + "Step algorithm": "Algoritma langkah", + "Number of epoch processes": "Jumlah proses zaman", + "Lowest points export": "Ekspor poin terendah", + "How many lowest points to save:": "Berapa banyak poin terendah yang harus disimpan", + "Export lowest points of a model": "Ekspor titik terendah suatu model", + "Output models:": "Model keluaran", + "Stats of selected models:": "Statistik model yang dipilih", + "Custom f0 [Root pitch] File": "File f0 [Root pitch] khusus", + "Min pitch:": "nada minimal", + "Specify minimal pitch for inference [HZ]": "Tentukan nada minimal untuk inferensi [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Tentukan nada minimal untuk inferensi [CATATAN][OCTAVE]", + "Max pitch:": "Nada maksimal", + "Specify max pitch for inference [HZ]": "Tentukan nada maksimal untuk inferensi [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Tentukan nada maksimal untuk inferensi [CATATAN][OCTAVE]", + "Browse presets for formanting": "Telusuri preset untuk pembentukan", + "Presets are located in formantshiftcfg/ folder": "Preset terletak di folder formantshiftcfg/", + "Default value is 1.0": "Nilai defaultnya adalah 1,0", + "Quefrency for formant shifting": "Quefrency untuk pergeseran formant", + "Timbre for formant shifting": "Timbre untuk pergeseran formant", + "Apply": "Menerapkan", + "Single": "Lajang", + "Batch": "Kelompok", + "Separate YouTube tracks": "Pisahkan trek YouTube", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Unduh audio dari video YouTube dan pisahkan trek vokal dan instrumental secara otomatis", + "Extra": "Tambahan", + "Merge": "Menggabungkan", + "Merge your generated audios with the instrumental": "Gabungkan audio yang Anda hasilkan dengan instrumental", + "Choose your instrumental:": "Pilih instrumen Anda", + "Choose the generated audio:": "Pilih audio yang dihasilkan", + "Combine": "Menggabungkan", + "Download and Separate": "Unduh dan Pisahkan", + "Enter the YouTube link:": "Masukkan tautan youtube", + "This section contains some extra utilities that often may be in experimental phases": "Bagian ini berisi beberapa utilitas tambahan yang mungkin sering berada dalam tahap percobaan", + "Merge Audios": "Gabungkan Audio", + "Audio files have been moved to the 'audios' folder.": "File audio telah dipindahkan ke folder 'audios'.", + "Downloading audio from the video...": "Mengunduh audio dari video...", + "Audio downloaded!": "Unduhan audio!", + "An error occurred:": "Terjadi kesalahan:", + "Separating audio...": "Memisahkan audio...", + "File moved successfully.": "File berhasil dipindahkan.", + "Finished!": "Selesai!", + "The source file does not exist.": "File sumber tidak ada.", + "Error moving the file:": "Kesalahan saat memindahkan file:", + "Downloading {name} from drive": "Mengunduh {name} dari drive", + "The attempt to download using Drive didn't work": "Upaya mengunduh menggunakan Drive tidak berhasil", + "Error downloading the file: {str(e)}": "Kesalahan saat mengunduh berkas: {str(e)}", + "Downloading {name} from mega": "Mengunduh {name} dari mega", + "Downloading {name} from basic url": "Mengunduh {name} dari url dasar", + "Download Audio": "Unduh Audio", + "Download audios of any format for use in inference (recommended for mobile users).": "Mengunduh audio dalam format apa pun untuk digunakan dalam inferensi (disarankan untuk pengguna seluler)", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Setiap ConnectionResetErrors pasca-konversi tidak relevan dan murni visual; mereka dapat diabaikan.", + "Processed audio saved at: ": "Audio yang diproses disimpan di:", + "Conversion complete!": "Konversi selesai!", + "Reverb": "Berkumandang", + "Compressor": "Kompresor", + "Noise Gate": "Gerbang Kebisingan", + "Volume": "Volume", + "Drag the audio here and click the Refresh button": "Seret audio ke sini dan klik tombol Refresh", + "Select the generated audio": "Pilih audio yang dihasilkan", + "Volume of the instrumental audio:": "Volume audio instrumental", + "Volume of the generated audio:": "Volume audio yang dihasilkan", + "### Add the effects": "### Tambahkan efeknya", + "Starting audio conversion... (This might take a moment)": "Memulai konversi audio... (Ini mungkin memerlukan waktu sebentar)", + "TTS Model:": "Suara TTS", + "TTS": "TTS", + "TTS Method:": "Metode TTS", + "Audio TTS:": "Audio TTS:", + "Audio RVC:": "Model Audio", + "You can also drop your files to load your model.": "Anda juga dapat menjatuhkan berkas Anda untuk memuat model Anda.", + "Drag your .pth file here:": "Seret file .pth Anda ke sini:", + "Drag your .index file here:": "Seret file .index Anda ke sini:" +} diff --git a/assets/i18n/langs/it_IT.json b/assets/i18n/langs/it_IT.json new file mode 100644 index 0000000000000000000000000000000000000000..1dd8c2aad1130f84400ca91b192a27c16de6ce6d --- /dev/null +++ b/assets/i18n/langs/it_IT.json @@ -0,0 +1,253 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Purtroppo non è disponibile una GPU compatibile per supportare l'addestramento.", + "Yes": "Sì", + "Select your dataset:": "Seleziona il tuo dataset:", + "Update list": "Aggiorna la lista", + "Download Model": "Scarica il modello", + "Download Backup": "Scarica il backup", + "Download Dataset": "Scarica il dataset", + "Download": "Scarica", + "Url:": "Link:", + "Build the index before saving.": "Genera l'indice prima di salvarlo.", + "Save your model once the training ends.": "Salva il modello una volta terminato il training.", + "Save type": "Metodo di salvataggio:", + "Save model": "Salva modello", + "Choose the method": "Scegli il metodo", + "Save all": "Salva tutto", + "Save D and G": "Salva i file G e D", + "Save voice": "Salva il modello", + "Downloading the file: ": "Scaricamento del file: ", + "Stop training": "Interrompi training", + "Too many users have recently viewed or downloaded this file": "Troppi utenti hanno visto o scaricato di recente questo file", + "Cannot get file from this private link": "Impossibile ottenere il file, il link è privato", + "Full download": "Download completato", + "An error occurred downloading": "Si è verificato un errore durante il download", + "Model saved successfully": "Modello salvato con successo", + "Saving the model...": "Salvando il modello...", + "Saved without index...": "Salvataggio senza file .index...", + "Saved without inference model...": "Salvataggio senza modello di inferenza...", + "An error occurred saving the model": "Si è verificato un errore durante il salvataggio del modello", + "The model you want to save does not exist, be sure to enter the correct name.": "Il modello che vuoi salvare non esiste, assicurati di inserire il nome corretto.", + "The file could not be downloaded.": "Impossibile scaricare il file.", + "Unzip error.": "Estrazione non riuscita.", + "Path to your added.index file (if it didn't automatically find it)": "Percorso del tuo file added.index (se non l'ha trovato automaticamente)", + "It has been downloaded successfully.": "Scaricato con successo.", + "Proceeding with the extraction...": "Proseguo con l'estrazione...", + "The Backup has been uploaded successfully.": "Il backup è stato caricato correttamente.", + "The Dataset has been loaded successfully.": "Il dataset è stato caricato correttamente.", + "The Model has been loaded successfully.": "Il modello è stato caricato correttamente.", + "It is used to download your inference models.": "Serve per scaricare i suoi modelli di inferenza.", + "It is used to download your training backups.": "Serve per scaricare i backup degi training.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Scarica il dataset con gli audio in un formato compatibile (.wav/.flac) per trainare il tuo modello.", + "No relevant file was found to upload.": "Non è stato trovato alcun file rilevante da caricare.", + "The model works for inference, and has the .index file.": "Il modello funziona per l'inferenza ed ha il file .index.", + "The model works for inference, but it doesn't have the .index file.": "Il modello funziona per l'inferenza, ma non ha il file .index.", + "This may take a few minutes, please wait...": "Potrebbe richiedere alcuni minuti, attendere per favore.", + "Resources": "Risorse", + "Step 1: Processing data": "Fase 1: Elaborazione dei dati", + "Step 2: Extracting features": "Fase 2: Estrazione delle caratteristiche", + "Step 3: Model training started": "Fase 3: Training del modello", + "Training is done, check train.log": "Il training è terminato, controlla train.log.", + "All processes have been completed!": "Tutti i processi sono stati completati!", + "Model Inference": "Inferenza del modello", + "Inferencing voice:": "Modello vocale:", + "Model_Name": "Nome_Modello", + "Dataset_Name": "Nome_Dataset", + "Or add your dataset path:": "Oppure inserire il percorso del set di dati:", + "Whether the model has pitch guidance.": "Indica se il modello ha una guida tonale.", + "Whether to save only the latest .ckpt file to save hard drive space": "Indica se salvare solo l'ultimo file .ckpt per risparmiare spazio sul disco rigido", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "La memorizzazione nella cache di piccoli dataset (meno di 10 minuti) può accelerare l'allenamento", + "Save a small final model to the 'weights' folder at each save point": "Salva un piccolo modello finale nella cartella 'weights' in ogni punto di salvataggio", + "Refresh": "Aggiorna la lista dei modelli, il percorso del file .index e gli audio", + "Unload voice to save GPU memory": "Rimuovi modelli per risparmiare memoria GPU", + "Select Speaker/Singer ID:": "Seleziona ID della persona che parla o del cantante:", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Trasporre (numero intero, numero di semitoni, per alzare di un'ottava: 12, per scendere di un'ottava: -12):", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Si consiglia +12 per la conversione da uomo a donna e -12 per la conversione da donna a uomo. Se la gamma sonora va troppo oltre e la voce è distorta, puoi anche regolarla tu stesso nella gamma appropriata.", + "Enter the path of the audio file to be processed (default is the correct format example):": "Inserisci il percorso del file audio da elaborare (l'esempio di formato corretto è quello predefinito):", + "Select the pitch extraction algorithm:": "Selezionare l'algoritmo di estrazione del pitch:", + "Feature search dataset file path": "Percorso del file del dataset di ricerca delle caratteristiche", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Se >=3: applica un filtro di media usati con il metodo di tonalità harvest. Il valore rappresenta il raggio del filtro e può ridurre la traspirazione.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Percorso al file .index. Lasciarlo vuoto per utilizzare il risultato selezionato dal menu a tendina:", + "Auto-detect index path and select from the dropdown:": "Rilevare automaticamente il percorso del file .index e selezionare dal menu a tendina:", + "Path to feature file:": "Percorso al file delle caratteristiche:", + "Search feature ratio:": "Proporzione della caratteristiche di ricerca:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Ricampionare l'audio in uscita in post-elaborazione alla frequenza di campionamento finale. Impostare a 0 per non campionare di nuovo:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Utilizzare l'inviluppo del volume dell'ingresso per sostituire o mescolare con l'inviluppo del volume dell'uscita. Più il rapporto è vicino a 1, più verrà utilizzato l'inviluppo di uscita:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Proteggi le consonanti sorde e i suoni del respiro per evitare artefatti come la lacerazione nella musica elettronica. Impostare su 0.5 per disattivare. Diminuire il valore per aumentare la protezione, ma può ridurre la precisione dell'indicizzazione:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "File curva F0 (opzionale). Un passo per riga. Sostituisce la modulazione di passo e F0 di default:", + "Convert": "Conversione", + "Output information:": "Informazioni in uscita", + "Export audio (click on the three dots in the lower right corner to download)": "Esporta audio (clicca sui tre puntini nell'angolo in basso a destra per scaricare)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Conversione batch. Inserire la cartella contenente i file audio da convertire o caricare più file audio. L'audio convertito verrà emesso nella cartella specificata (default: 'opt').", + "Specify output folder:": "Specifica cartella di output:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Inserire il percorso della cartella audio da elaborare (copiarla dalla barra degli indirizzi del file manager):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Puoi anche inserire file audio in batch. Scegli una delle due opzioni. Viene data priorità alla lettura della cartella.", + "Export file format:": "Formato file di esportazione:", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Inserire il percorso della cartella audio da elaborare:", + "Model:": "Modello:", + "Vocal Extraction Aggressive": "Estrazione vocale aggressiva", + "Specify the output folder for vocals:": "Specificare la cartella di output per le voci:", + "Specify the output folder for accompaniment:": "Specificare la cartella di uscita per l'accompagnamento:", + "Train": "Addestramento", + "Enter the model name:": "Inserisci il nome del modello:", + "Target sample rate:": "Tasso di campionamento target:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Se il modello ha una guida di pitch (richiesto per cantare, opzionale per parlare):", + "Version:": "Versione:", + "Number of CPU processes:": "Numero di processi della CPU:", + "Enter the path of the training folder:": "Inserire il percorso della cartella del training:", + "Specify the model ID:": "Specificare l'ID del modello:", + "Auto detect audio path and select from the dropdown:": "Rileva automaticamente il percorso audio e seleziona dal menu a tendina:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Aggiungere il nome dell'audio al percorso del file audio da elaborare (l'esempio di formato corretto è quello predefinito) Eliminare il percorso per utilizzare un audio dall'elenco a discesa:", + "Advanced Settings": "Impostazioni avanzate", + "Settings": "Impostazioni", + "Status:": "Stato:", + "Process data": "Elabora dati", + "Drag your audio here:": "Trascina il tuo audio qui:", + "Or record an audio:": "Oppure registra un audio:", + "Formant shift inference audio": "Audio di inferenza di spostamento formante", + "Used for male to female and vice-versa conversions": "Viene utilizzato per le conversioni da uomo a donna e viceversa", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Fornire gli indici GPU separati da '-', come 0-1-2 per utilizzare le GPU 0, 1 e 2:", + "GPU Information:": "Informazioni GPU:", + "Feature extraction": "Estrazione delle caratteristiche", + "Save frequency:": "Frequenza di salvataggio:", + "Training epochs:": "Epoch di addestramento:", + "Batch size per GPU:": "Dimensione del lotto per GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Salvare solo l'ultimo file '.ckpt' per risparmiare spazio su disco:", + "No": "No", + "Save a small final model to the 'weights' folder at each save point:": "Salvare un piccolo modello finale nella cartella \"pesi\" in ogni punto di salvataggio:", + "Load pre-trained base model G path:": "Carica percorso base modello G pre-allenato:", + "Load pre-trained base model D path:": "Percorso del modello D base di ricarica pre-allenata:", + "Train model": "Allena modello", + "Train feature index": "Allena l'indice delle caratteristiche", + "One-click training": "Allenamento con un clic", + "Processing": "Elaborazione", + "Model fusion, can be used to test timbre fusion": "Modello di fusione, può essere utilizzato per testare la fusione del timbro", + "Path to Model A:": "Percorso al Modello A:", + "Path to Model B:": "Percorso al Modello B:", + "Weight for Model A:": "Peso per il modello A:", + "Whether the model has pitch guidance:": "Indica se il modello ha una guida di tono:", + "Model information to be placed:": "Informazioni sul modello da mettere:", + "Name:": "Nome:", + "Model architecture version:": "Versione dell'architettura del modello:", + "Fusion": "Fusione", + "Modify model information": "Modifica informazioni modello", + "Path to Model:": "Percorso al modello:", + "Model information to be modified:": "Informazioni sul modello da modificare:", + "Save file name:": "Salva nome file:", + "Modify": "Modifica", + "View model information": "Visualizza informazioni sul modello", + "View": "Vedi", + "Model extraction": "Estrazione del modello", + "Whether the model has pitch guidance (1: yes, 0: no):": "Indica se il modello ha una guida di tono (1: sì, 0: no):", + "Extract": "Estrai", + "Export Onnx": "Esporta Onnx", + "RVC Model Path:": "Percorso del modello RVC:", + "Onnx Export Path:": "Percorso di esportazione:", + "MoeVS Model": "Modello MoeVS", + "Export Onnx Model": "Esporta modello", + "Load model": "Carica modello", + "Hubert Model": "Modello Hubert", + "Select the .pth file": "Seleziona il file .pth", + "Select the .index file": "Seleziona il file .index", + "Select the .npy file": "Seleziona il file .npy", + "Input device": "Dispositivo di input", + "Output device": "Dispositivo di output", + "Audio device (please use the same type of driver)": "Dispositivo audio (utilizzare lo stesso tipo di dispositivo)", + "Response threshold": "Soglia di risposta", + "Pitch settings": "Impostazioni di tonalità (pitch)", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Indica se i nomi delle note devono essere utilizzati al posto del loro valore di hertz. AD ESEMPIO, [C5, D6] anziché [523,25, 1174,66]Hz", + "Index Rate": "Tasso dell'indice", + "General settings": "Impostazioni generali", + "Sample length": "Lunghezza campione", + "Fade length": "Lunghezza sbiadimento/fade", + "Extra inference time": "Tempo di inferenza aggiuntivo", + "Input noise reduction": "Riduzione del rumore in ingresso", + "Output noise reduction": "Riduzione del rumore in uscita", + "Performance settings": "Impostazioni delle prestazioni", + "Start audio conversion": "Avvia conversione audio", + "Stop audio conversion": "Interrompi la conversione audio", + "Inference time (ms):": "Tempo di inferenza (ms):", + "Select the pth file": "Seleziona il file .pth", + "Select the .index file:": "Seleziona il file .index", + "The hubert model path must not contain Chinese characters": "Il percorso del modello hubert non deve contenere caratteri cinesi", + "The pth file path must not contain Chinese characters.": "Il percorso del file pth non deve contenere caratteri cinesi.", + "The index file path must not contain Chinese characters.": "Il percorso del file index non deve contenere caratteri cinesi.", + "Step algorithm": "Algoritmo dei passi", + "Number of epoch processes": "Numero di processi dell'epoch", + "Lowest points export": "Esportazione dei punti più bassi", + "How many lowest points to save:": "Quanti punti bassi vuoi salvare:", + "Export lowest points of a model": "Esporta i punti più bassi di un modello", + "Output models:": "Modelli di uscita:", + "Stats of selected models:": "Statistiche dei modelli selezionati:", + "Custom f0 [Root pitch] File": "File personalizzato f0 [Root pitch]", + "Min pitch:": "Tonalità minima:", + "Feature search database file path:": "Percorso del file del database di ricerca delle caratteristiche:", + "Specify minimal pitch for inference [HZ]": "Specificare tonalità minima per l'inferenza [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Specificare tonalità minima per l'inferenza [NOTA][OTTAVA]", + "Max pitch:": "Tonalità massima:", + "Specify max pitch for inference [HZ]": "Specificare tonalità massima per l'inferenza [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Specificare tonalità massima per l'inferenza [NOTA][OTTAVA]", + "Browse presets for formanting": "Sfoglia i preset per il formante", + "Presets are located in formantshiftcfg/ folder": "I preset si trovano nella cartella /formantshiftcfg/", + "Default value is 1.0": "Il valore di default è 1.0", + "Quefrency for formant shifting": "Frequenza per spostamento formanti", + "Timbre for formant shifting": "Timbro per il format shifting", + "Apply": "Applica", + "Single": "Singola", + "Batch": "Batch", + "Separate YouTube tracks": "Separa tracce di YouTube", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Scarica l'audio di un video da YouTube e separa automaticamente le tracce vocali e strumentali.", + "Extra": "Altro", + "Merge": "Unisci", + "Merge your generated audios with the instrumental": "Combina i tuoi audio generati con la base strumentale", + "Choose your instrumental:": "Scegli la base strumentale:", + "Choose the generated audio:": "Scegli l'audio generato:", + "Combine": "Combina", + "Download and Separate": "Scaricare e separare", + "Enter the YouTube link:": "Inserisci il link di YouTube:", + "This section contains some extra utilities that often may be in experimental phases": "Questa sezione contiene alcune utility aggiuntive che spesso possono essere in fase sperimentale", + "Merge Audios": "Unisci audio", + "Audio files have been moved to the 'audios' folder.": "I file audio sono stati spostati nella cartella 'audios'.", + "Downloading audio from the video...": "Scaricamento audio del video in corso...", + "Audio downloaded!": "Audio scaricato!", + "An error occurred:": "Si è verificato un errore:", + "Separating audio...": "Separazione audio in corso...", + "File moved successfully.": "Il file è stato spostato correttamente", + "Finished!": "Finito!", + "The source file does not exist.": "Il file sorgente non esiste.", + "Error moving the file:": "Errore durante lo spostamento del file:", + "Downloading {name} from drive": "Download di {name} da Google Drive", + "The attempt to download using Drive didn't work": "Il tentativo di download con Drive non ha funzionato", + "Error downloading the file: {str(e)}": "Errore durante il download del file: {str(e)}", + "Downloading {name} from mega": "Download di {name} da MEGA", + "Downloading {name} from basic url": "Download di {name} dall'URL di base", + "Download Audio": "Scarica Audio", + "Download audios of any format for use in inference (recommended for mobile users).": "Scarica audio di qualsiasi formato per l'uso in inferenza (consigliato per utenti mobili).", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Qualsiasi post-conversione di ConnectionResetErrors è irrilevante e puramente visiva; può essere ignorata.", + "Processed audio saved at: ": "Audio elaborato salvato in:", + "Conversion complete!": "Conversione completata", + "Reverb": "Riverbero", + "Compressor": "Compressore", + "Noise Gate": "Riduzione del Rumore", + "Volume": "Volume", + "Drag the audio here and click the Refresh button": "Trascina l'audio qui e fai clic sul pulsante Aggiorna", + "Select the generated audio": "Seleziona l'audio generato", + "Volume of the instrumental audio:": "Volume della base strumentale:", + "Volume of the generated audio:": "Volume dell'audio generato:", + "### Audio settings:": "### Impostazioni audio:", + "### Instrumental settings:": "### Impostazioni base strumentale:", + "### Add the effects:": "### Aggiungi gli effetti:", + "Name for saving": "Nome salvataggio", + "Path to model": "Percorso al modello", + "Model information to be placed": "Informazioni sul modello da modificare", + "Starting audio conversion... (This might take a moment)": "Avvio conversione audio... (Questo potrebbe richiedere un po' di tempo)", + "TTS Model:": "Voci TTS", + "TTS": "TTS", + "TTS Method:": "Metodo TTS", + "Audio TTS:": "Audio TTS:", + "Audio RVC:": "Modello Audio", + "You can also drop your files to load your model.": "Puoi anche trascinare i tuoi file per caricare il tuo modello.", + "Drag your .pth file here:": "Trascina il tuo file .pth qui:", + "Drag your .index file here:": "Trascina il tuo file .index qui:" +} diff --git a/assets/i18n/langs/pl_PL.json b/assets/i18n/langs/pl_PL.json new file mode 100644 index 0000000000000000000000000000000000000000..9b424a29cbf731a0ed8ab1d660dd569fbfbbd8c4 --- /dev/null +++ b/assets/i18n/langs/pl_PL.json @@ -0,0 +1,261 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Niestety, ale nie masz kompatybilnego GPU, który wspierałby trenowanie.", + "Yes": "Tak", + "Select your dataset:": "Wybierz swój dataset:", + "Update list": "Uaktualnij listę", + "Download Model": "Pobierz model", + "Download Backup": "Pobierz kopię zapasową", + "Download Dataset": "Pobierz dataset", + "Download": "Pobierz", + "Url:": "Url:", + "Build the index before saving.": "Utwórz index przed zapisem.", + "Save your model once the training ends.": "Zapisz model po ukończonym treningu.", + "Save type": "Typ zapisu:", + "Save model": "Zapisz model", + "Choose the method": "Wybierz metodę", + "Save all": "Zapisz wszystko", + "Save D and G": "Zapisz D i G", + "Save voice": "Zapisz głos", + "Downloading the file: ": "Pobieranie pliku: ", + "Stop training": "Zatrzymaj trenowanie", + "Too many users have recently viewed or downloaded this file": "Zbyt wielu użytkowników ostatnio oglądało lub pobrało ten plik", + "Cannot get file from this private link": "Nie można pobrać pliku z prywatnego łącza", + "Full download": "Pełne pobieranie", + "An error occurred downloading": "Wystąpił błąd podczas pobierania", + "Model saved successfully": "Model został pomyślnie zapisany", + "Saving the model...": "Zapisywanie modelu...", + "Saved without index...": "Zapisano bez inedxu...", + "Saved without inference model...": "Zapisano bez inferencji modelu...", + "An error occurred saving the model": "Wystąpił błąd podczas zapisywania modelu", + "The model you want to save does not exist, be sure to enter the correct name.": "Model, który ma zostać zapisany, nie istnieje, należy wprowadzić poprawną nazwę.", + "The file could not be downloaded.": "Nie można pobrać pliku.", + "Unzip error.": "Błąd podczas wypakowywania.", + "Path to your added.index file (if it didn't automatically find it)": "Ścieżka do pliku added.index (jeśli nie została załadowana automatycznie)", + "It has been downloaded successfully.": "Pomyślnie pobrano.", + "Proceeding with the extraction...": "Przystępowanie do ekstrakcji...", + "The Backup has been uploaded successfully.": "Kopia zapasowa została pomyślnie przesłana", + "The Dataset has been loaded successfully.": "Dataset został pomyślnie załadowany", + "The Model has been loaded successfully.": "Model został pomyślnie załadowany", + "It is used to download your inference models.": "To służy do pobierania modeli.", + "It is used to download your training backups.": "To służy do pobierania kopii zapasowych treningu.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Pobierz dataset z plikami autio w kompatybilnym formacie (.wav/.flac), aby wytrenować swój model.", + "No relevant file was found to upload.": "Nie znaleziono odpowiedniego pliku do przesłania.", + "The model works for inference, and has the .index file.": "Model działa poprawnie i ma plik .index.", + "The model works for inference, but it doesn't have the .index file.": "Model działa poprawnie, ale nie ma pliku .index.", + "This may take a few minutes, please wait...": "Może to potrwać kilka minut, proszę czekać...", + "Resources": "Zasoby", + "Step 1: Processing data": "Krok 1: Przetwarzanie danych", + "Step 2: Extracting features": "Krok 2: Ekstrakcja cech", + "Step 3: Model training started": "Krok 3: Trenowanie modelu", + "Training is done, check train.log": "Trening skończony, sprawdź train.log", + "All processes have been completed!": "Wszystkie procesy zostały zakończone!", + "Model Inference": "Inferencja modelu", + "Inferencing voice:": "Wybierz model głosu:", + "Model_Name": "Nazwa_Modelu", + "Dataset_Name": "Nazwa_Datasetu", + "Or add your dataset path:": "Lub dodaj ścieżkę datasetu:", + "Whether the model has pitch guidance.": "Czy model ma wskazówki dot. ekstrakcji pitchu.", + "Whether to save only the latest .ckpt file to save hard drive space": "Czy zapisywać tylko najnowszy plik .ckpt w celu zaoszczędzenia miejsca na dysku?", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Buforowanie całego datasetu w pamięciu GPU. Przy krótkim datasecie (poniżej 10 minut) może przyspieszyć szkolenie", + "Save a small final model to the 'weights' folder at each save point": "Zapisywanie małego modelu końcowego w folderze 'weights' co każdy punkt zapisu", + "Refresh": "Odśwież", + "Unload voice to save GPU memory": "Odłączenie głosu, aby zaoszczędzić pamięć GPU", + "Select Speaker/Singer ID:": "Wybierz ID Głośnika/Piosenkarza:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Zalecane +12 przy konwersji z mężczyzny na kobietę i -12 przy konwersji z kobiety na mężczyznę. Jeśli zakres dźwięku jest zbyt szeroki i głos jest zniekształcony, można samodzielnie dostosować go do odpowiedniego zakresu.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transpozycja (liczba całkowita, liczba półtonów, podniesienie o oktawę: 12, obniżenie o oktawę: -12):", + "Feature search database file path:": "Ścieżka do pliku bazy wyszukiwania funkcji:", + "Enter the path of the audio file to be processed (default is the correct format example):": "Wprowadź ścieżkę do pliku audio, który ma zostać poddany konwersji (domyślnie jest to przykład poprawnego formatu):", + "Select the pitch extraction algorithm:": "Wybierz algorytm ekstrakcji pitchu:", + "Feature search dataset file path": "Ścieżka do datasetu wyszukiwania funkcji", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Jeśli >=3: zastosuj filtrowanie medianowe do zebranych wyników pitchu. Wartość ta reprezentuje promień filtra i może zmniejszyć 'duszność'", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Ścieżka do pliku indexu funkcji. Zostaw puste, aby użyć wybranego wyniku z listy rozwijanej:", + "Auto-detect index path and select from the dropdown:": "Automatycznie wykryta ścieżka indexu, wybierz z rozwijanej listy:", + "Path to feature file:": "Ścieżka do pliku funkcji:", + "Search feature ratio:": "Wskaźnik funkcji wyszukiwania:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Ponowne próbkowanie wyjściowego sygnału audio w postprocessingu do końcowej częstotliwości próbkowania. Ustawienie 0 oznacza brak ponownego próbkowania:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Użyj obwiedni głośności wejścia, aby zastąpić lub zmiksować z wyjściem obwiedni. Im bliższy 1, tym bardziej wykorzystywana jest obwiednia.", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Chroni bezdźwięczne spółgłoski i dźwięki oddechu, aby zapobiec artefaktom, takim jak rozdarcie w muzyce elektronicznej. Ustaw na 0,5, aby wyłączyć. Zmniejszenie wartości zwiększa ochronę, ale może zmniejszyć dokładność indeksowania:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Plik krzywej F0 (opcjonalnie). Jedna wysokość dźwięku na linię. Zastępuje domyślną modulację F0 i wysokość dzwięku:", + "Convert": "Konwertuj", + "Output information:": "Informacje wyjściowe:", + "Export audio (click on the three dots in the lower right corner to download)": "Wyeksportowany dźwięk (kliknij trzy kropki w prawym dolnym rogu, aby pobrać)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Konwersja wsadowa. Wprowadź folder zawierający pliki audio do konwersji, lub prześlij wiele plików audio. Przekonwertowany dźwięk zostanie zapisany w określonym folderze (domyślnie: 'opt')", + "Specify output folder:": "Określ folder wyjściowy:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Wprowadź ścieżkę folderu audio do przetworzenia (skopiuj ją z paska adresu menadżera plików)", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Pliki audio można również wprowadzać partiami, Wybierz jedną z dwóch opcji. Pierwszeństwo ma odczyt z folderu.", + "Export file format:": "Format eksportowanego pliku:", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Wprowadź ścieżkę folderu audio, który ma zostać przetworzony:", + "Model:": "Model:", + "Vocal Extraction Aggressive": "Agresja ekstrakcji głosu", + "Specify the output folder for vocals:": "Określ folder wyjściowy dla wokali:", + "Specify the output folder for accompaniment:": "Określ folder wyjściowy dla instrumentali:", + "Train": "Trenowanie", + "Enter the model name:": "Wprowadź nazwę modelu:", + "Target sample rate:": "Docelowa częstotliwości próbkowania:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Czy model ma wskazówki dot. ekstrakcji pitchu (wymagane w przypadku śpiewu, opcjonalne w przypadku mowy):", + "Version:": "Wersja:", + "Number of CPU processes:": "Liczba procesów CPU:", + "Enter the path of the training folder:": "Wprowadź ścieżkę folderu szkoleniowego", + "Specify the model ID:": "Podaj ID modelu:", + "Auto detect audio path and select from the dropdown:": "Automatycznie wykryta ścieżka audio, wybierz z rozwijanej listy:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Dodaj nazwę audio do ścieżki do pliku audio, który ma zostać przetworzony (domyślnie jest przykład poprawnego formatu) Usuń ścieżkę, aby użyć audio z rozwijanej listy:", + "Advanced Settings": "Ustawienia zaawansowane", + "Settings": "Ustawienia", + "Status:": "Status:", + "Process data": "Przetwórz dane", + "Drag your audio here:": "Przeciągnij tu swój plik audio:", + "Or record an audio:": "Albo nagraj:", + "Formant shift inference audio": "Inferencja o zmianie formantu audio", + "Used for male to female and vice-versa conversions": "Używane do konwersji z mężczyzny na kobietę i odwrotnie", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Podaj indeks(y) GPU oddzielone znakiem '-', np. 0-1-2, aby użyć GPU 0, 1 i 2:", + "GPU Information:": "Informacje o GPU:", + "Feature extraction": "Ekstraktuj cechy", + "Save frequency:": "Częstotliwość zapisu:", + "Training epochs:": "Epoch do wytrenowania:", + "Batch size per GPU:": "Batch size na GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Zapisz tylko najnowszy plik '.ckpt', aby zaoszczędzić miejsce na dysku:", + "No": "Nie", + "Save a small final model to the 'weights' folder at each save point:": "Zapisywanie małego modelu końcowego w folderze 'weights' co każdy punkt zapisu", + "Load pre-trained base model G path:": "Wstępnie wytrenowany model bazowy ścieżki G:", + "Load pre-trained base model D path:": "Wstępnie wytrenowany model bazowy ścieżki D:", + "Train model": "Trenuj model", + "Train feature index": "Trenuj index", + "One-click training": "Trenowanie jednym kliknięciem", + "Processing": "Przetwarzanie", + "Model fusion, can be used to test timbre fusion": "Fuzja modeli, może być używana do testowania fuzji barw", + "Path to Model A:": "Ścieżka do modelu A:", + "Path to Model B:": "Ścieżka do modelu B:", + "Weight for Model A:": "Waga modelu A:", + "Whether the model has pitch guidance:": "Czy model ma wskazówki dot. ekstrakcji pitchu:", + "Model information to be placed:": "Informacje o modelu do umieszczenia:", + "Model architecture version:": "Wersja architektury modelu:", + "Fusion": "Fuzja", + "Modify model information": "Modyfikowanie informacji o modelu", + "Path to Model:": "Ścieżka do modelu:", + "Model information to be modified:": "Informacje o modelu do modyfikacji:", + "Save file name:": "Nazwa zapisanego pliku:", + "Modify": "Modyfikuj", + "View model information": "Sprawdzanie informacji o modelu", + "View": "Sprawdź", + "Model extraction": "Ekstrakcja modelu", + "Name:": "Nazwa:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Czy model ma wskazówki dot. ekstrakcji pitchu (1: tak, 0: nie)", + "Extract": "Ekstraktuj", + "Export Onnx": "Eksportuj Onnx", + "RVC Model Path:": "Ścieżka modelu RVC:", + "Onnx Export Path:": "Ścieżka eksportu Onnx:", + "MoeVS Model": "MoeVS Model", + "Export Onnx Model": "Eksportuj model Onnx", + "Load model": "Załaduj model", + "Hubert Model": "Hubert Model", + "Select the .pth file": "Wybierz plik .pth", + "Select the .index file": "Wybierz plik .index", + "Select the .npy file": "Wybierz plik .npy", + "Input device": "Urządzenie wejściowe", + "Output device": "Urządzenie wyjściowe", + "Audio device (please use the same type of driver)": "Urządzenie audio (należy użyć tego samego typu sterownika)", + "Response threshold": "Próg odpowiedzi", + "Pitch settings": "Ustawienia pitchu", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Czy używać nazw nut zamiast ich wartości w hercach. Np. [C5, D6] zamiast [523.25, 1174.66]Hz", + "Index Rate": "Wskaźnik indexu", + "General settings": "Główne ustawienia", + "Sample length": "Długość próbki", + "Fade length": "Długość zaniku", + "Extra inference time": "Dodatkowy czas inferencji", + "Input noise reduction": "Redukcja szumów wejściowych", + "Output noise reduction": "Redukcja szumów wyjściowych", + "Performance settings": "Ustawienia wydajności", + "Start audio conversion": "Rozpocznij konwersję audio", + "Stop audio conversion": "Zatrzymaj konwersję audio", + "Inference time (ms):": "Czas inferencji (ms):", + "Select the pth file": "Wybierz plik pth", + "Select the .index file:": "Wybierz plik .index:", + "The hubert model path must not contain Chinese characters": "Ścieżka modelu hubert nie może zawierać chińskich znaków", + "The pth file path must not contain Chinese characters.": "Ścieżka do pliku pth nie może zawierać chińskich znaków.", + "The index file path must not contain Chinese characters.": "Ścieżka do pliku index nie może zawierać chińskich znaków.", + "Step algorithm": "Algorytm kroków", + "Number of epoch processes": "Liczba procesów epokowych", + "Lowest points export": "Eksport najniższych punktów", + "How many lowest points to save:": "Ile najniższych punktów zapisać:", + "Export lowest points of a model": "Eksporttuj najniższe punkty modelu", + "Output models:": "Modele wyjściowe:", + "Stats of selected models:": "Statystyki wybranych modeli:", + "Custom f0 [Root pitch] File": "Niestandardowy plik f0 [Root pitch]", + "Min pitch:": "Minimalny pitch:", + "Specify minimal pitch for inference [HZ]": "Określ minimaly pitch dla inferencji [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Określ minimaly pitch dla inferencji [NOTE][OCTAVE]", + "Max pitch:": "Maksymalny pitch:", + "Specify max pitch for inference [HZ]": "Określ maksymalny pitch dla inferencji [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Określ maksymalny pitch dla inferencji [NOTE][OCTAVE]", + "Browse presets for formanting": "Przeglądaj presety dla formantowania", + "Presets are located in formantshiftcfg/ folder": "Presety znajdują się w folderze formantshiftcfg/", + "Default value is 1.0": "Wartość domyślna to 1.0", + "Quefrency for formant shifting": "Quefrency dla przesunięcia formantu", + "Timbre for formant shifting": "Barwa dźwięku dla przesunięcia formantu", + "Apply": "Zastosuj", + "Single": "Pojedyncze", + "Batch": "Seria", + "Separate YouTube tracks": "Oddziel utwory z YouTube", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Pobieranie dźwięku z wideo YouTube i automatyczne oddzielanie ścieżek wokalnych i instrumentalnych", + "Extra": "Dodatki", + "Merge": "Scal", + "Merge your generated audios with the instrumental": "Scal wygenerowane dźwięki z instrumentalem", + "Choose your instrumental:": "Wybierz instrumental:", + "Choose the generated audio:": "Wybierz wygenerowany dźwięk:", + "Combine": "Łączenie", + "Download and Separate": "Pobieranie i rozdzielanie", + "Enter the YouTube link:": "Wprowadź link do YouTube", + "This section contains some extra utilities that often may be in experimental phases": "Ta sekcja zawiera kilka dodatkowych narzędzi, które często mogą być w fazie eksperymentalnej", + "Merge Audios": "Scal pliki audio", + "Audio files have been moved to the 'audios' folder.": "Pliki audio zostały przeniesione do folderu 'audio'.", + "Downloading audio from the video...": "Pobieranie pliku audio...", + "Audio downloaded!": "Audio pobrane!", + "An error occurred:": "Wystąpił błąd:", + "Separating audio...": "Rozdzielanie dźwięku...", + "File moved successfully.": "Plik został pomyślnie przeniesiony.", + "Finished!": "Ukończono!", + "The source file does not exist.": "Plik źródłowy nie istnieje.", + "Error moving the file:": "Błąd podczas przenoszenia pliku:", + "Downloading {name} from drive": "Pobieranie {name} z dysku", + "The attempt to download using Drive didn't work": "Próba pobrania za pomocą Drive nie powiodła się", + "Error downloading the file: {str(e)}": "Błąd pobierania pliku: {str(e)}", + "Downloading {name} from mega": "Pobieranie {name} z mega", + "Downloading {name} from basic url": "Pobieranie {name} z adresu url", + "Download Audio": "Pobierz audio", + "Download audios of any format for use in inference (recommended for mobile users).": "Pobierz pliki audio w dowolnym formacie do wykorzystania w inferencji (zalecane dla użytkowników urządzeń mobilnych).", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Wszelkie błędy ConnectionResetErrors po konwersji są nieistotne i czysto wizualne; można je zignorować.\n", + "Processed audio saved at: ": "Przetworzony dźwięk zapisany w: ", + "Conversion complete!": "Konwersja zakończona!", + "Reverb": "Pogłos", + "Compressor": "Kompresor", + "Noise Gate": "Bramka szumów", + "Volume": "Głośność", + "Drag the audio here and click the Refresh button": "Przeciągnij tutaj dźwięk i kliknij przycisk Odśwież", + "Select the generated audio": "Wybierz wygenerowany dźwięk", + "Volume of the instrumental audio:": "Głośność instrumentalu:", + "Volume of the generated audio:": "Głośność wygenerowanego dźwięku:", + "### Audio settings:": "### Ustawienia audio:", + "### Instrumental settings:": "### Ustawienia instrumentalu:", + "### Add the effects:": "### Dodaj efekty:", + "Name for saving": "Nazwa do zapisu", + "Path to model": "Ścieżka do modelu", + "Model information to be placed": "Informacje o modelu do umieszczenia", + "Starting audio conversion... (This might take a moment)": "Rozpoczęcie konwersji audio... (może to chwilę potrwać)", + "Error no reformatted.wav found:": "Błąd, nie znaleziono reformatted.wav:", + "Error at separating audio:": "Błąd podczas oddzielania dźwięku:", + "Vocal": "Wokal", + "Instrumental": "Instrumental", + "Finished": "Zakończono", + "TTS Model:": "Model TTS:", + "TTS": "TTS", + "RVC Model:": "Model RVC:", + "TTS Method:": "Metoda TTS:", + "Audio TTS:": "Audio TTS:", + "Audio RVC:": "Audio RVC:", + "Enter the text you want to convert to voice...": "Wprowadź tekst do konwersji TTS...", + "Text:": "Tekst:", + "You can also drop your files to load your model.": "Możesz również przeciągnąć swoje pliki, aby załadować swój model.", + "Drag your .pth file here:": "Przeciągnij swój plik .pth tutaj:", + "Drag your .index file here:": "Przeciągnij swój plik .index tutaj:" +} diff --git a/assets/i18n/langs/pt_PT.json b/assets/i18n/langs/pt_PT.json new file mode 100644 index 0000000000000000000000000000000000000000..badd36a9b143d15b40ad22f2ed0b598966a09174 --- /dev/null +++ b/assets/i18n/langs/pt_PT.json @@ -0,0 +1,248 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Infelizmente, não há GPU compatível disponível para apoiar o seu treinamento.", + "Yes": "Sim", + "Select your dataset:": "Selecione seu conjunto de dados.", + "Update list": "Lista de atualização.", + "Download Model": "Baixar modelo", + "Download Backup": "Baixar cópia de segurança", + "Download Dataset": "Baixar conjunto de dados", + "Download": "Download", + "Url:": "URL:", + "Build the index before saving.": "Crie o índice antes de salvar.", + "Save your model once the training ends.": "Salve seu modelo quando o treinamento terminar.", + "Save type": "Salvar tipo", + "Save model": "Salvar modelo", + "Choose the method": "Escolha o método", + "Save all": "Salvar tudo", + "Save D and G": "Salve D e G", + "Save voice": "Salvar voz", + "Downloading the file: ": "Baixando o arquivo:", + "Stop training": "Pare de treinar", + "Too many users have recently viewed or downloaded this file": "Muitos usuários visualizaram ou baixaram este arquivo recentemente", + "Cannot get file from this private link": "Não é possível obter o arquivo deste link privado", + "Full download": "Download completo", + "An error occurred downloading": "Ocorreu um erro ao baixar", + "Model saved successfully": "Modelo salvo com sucesso", + "Saving the model...": "Salvando o modelo...", + "Saved without index...": "Salvo sem índice...", + "model_name": "nome_modelo", + "Saved without inference model...": "Salvo sem modelo de inferência...", + "An error occurred saving the model": "Ocorreu um erro ao salvar o modelo", + "The model you want to save does not exist, be sure to enter the correct name.": "O modelo que você deseja salvar não existe, certifique-se de inserir o nome correto.", + "The file could not be downloaded.": "O arquivo não pôde ser baixado.", + "Unzip error.": "Erro ao descompactar.", + "Path to your added.index file (if it didn't automatically find it)": "Caminho para o seu arquivo add.index (se não o encontrou automaticamente)", + "It has been downloaded successfully.": "Ele foi baixado com sucesso.", + "Proceeding with the extraction...": "Prosseguindo com a extração...", + "The Backup has been uploaded successfully.": "O backup foi carregado com sucesso.", + "The Dataset has been loaded successfully.": "O conjunto de dados foi carregado com sucesso.", + "The Model has been loaded successfully.": "O modelo foi carregado com sucesso.", + "It is used to download your inference models.": "Ele é usado para baixar seus modelos de inferência.", + "It is used to download your training backups.": "Ele é usado para baixar seus backups de treinamento.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Baixe o conjunto de dados com os áudios em formato compatível (.wav/.flac) para treinar seu modelo.", + "No relevant file was found to upload.": "Nenhum arquivo relevante foi encontrado para upload.", + "The model works for inference, and has the .index file.": "O modelo funciona para inferência e possui o arquivo .index.", + "The model works for inference, but it doesn't have the .index file.": "O modelo funciona para inferência, mas não possui o arquivo .index.", + "This may take a few minutes, please wait...": "Isso pode levar alguns minutos, aguarde...", + "Resources": "Recursos", + "Step 1: Processing data": "Etapa 1: Processamento de dados", + "Step 2: Extracting features": "Etapa 2b: Extraindo recursos", + "Step 3: Model training started": "Etapa 3a: treinamento do modelo iniciado", + "Training is done, check train.log": "O treinamento foi concluído, verifique train.log", + "All processes have been completed!": "Todos os processos foram concluídos!", + "Model Inference": "Inferência de modelo", + "Inferencing voice:": "Inferência de voz:", + "Model_Name": "Nome_modelo", + "Dataset_Name": "Conjunto de dados_Nome", + "Or add your dataset path:": "Ou introduza o caminho para o seu conjunto de dados:", + "Whether the model has pitch guidance.": "Se o modelo tem orientação de pitch.", + "Whether to save only the latest .ckpt file to save hard drive space": "Se deseja salvar apenas o arquivo .ckpt mais recente para economizar espaço no disco rígido", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Armazene em cache todos os conjuntos de treinamento na memória da GPU. Armazenar pequenos conjuntos de dados em cache (menos de 10 minutos) pode acelerar o treinamento", + "Save a small final model to the 'weights' folder at each save point": "Salve um pequeno modelo final na pasta 'pesos' em cada ponto de salvamento", + "Refresh": "Atualizar lista de voz, caminho de índice e arquivos de áudio", + "Unload voice to save GPU memory": "Descarregue a voz para economizar memória da GPU:", + "Select Speaker/Singer ID:": "Selecione o ID do palestrante/cantor:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Chave recomendada +12 para conversão de homem para mulher e chave -12 para conversão de mulher para homem. Se o alcance do som for muito longe e a voz estiver distorcida, você também poderá ajustá-lo para o alcance apropriado.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transpor (inteiro, número de semitons, aumentar uma oitava: 12, diminuir uma oitava: -12):", + "Enter the path of the audio file to be processed (default is the correct format example):": "Digite o caminho do arquivo de áudio a ser processado (o padrão é o exemplo de formato correto):", + "Select the pitch extraction algorithm:": "Selecione o algoritmo de extração de pitch:", + "Feature search dataset file path": "Caminho do arquivo do conjunto de dados de pesquisa de recursos", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Se >=3: aplique filtragem mediana aos resultados de pitch colhidos. O valor representa o raio do filtro e pode reduzir a soprosidade.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Caminho para o arquivo de índice de recursos. Deixe em branco para usar o resultado selecionado no menu suspenso:", + "Auto-detect index path and select from the dropdown:": "Detecte automaticamente o caminho do índice e selecione no menu suspenso", + "Path to feature file:": "Caminho para o arquivo de recurso:", + "Search feature ratio:": "Proporção de recursos de pesquisa:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Faça uma nova amostragem do áudio de saída no pós-processamento para a taxa de amostragem final. Defina como 0 para nenhuma reamostragem:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Use o envelope de volume da entrada para substituir ou mixar com o envelope de volume da saída. Quanto mais próxima a proporção estiver de 1, mais o envelope de saída será usado:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Proteja consoantes surdas e sons respiratórios para evitar artefatos como lacrimejamento na música eletrônica. Defina como 0,5 para desativar. Diminua o valor para aumentar a proteção, mas poderá reduzir a precisão da indexação:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Arquivo de curva F0 (opcional). Um tom por linha. Substitui o F0 padrão e a modulação de pitch:", + "Convert": "Converter", + "Output information:": "Informações de saída", + "Export audio (click on the three dots in the lower right corner to download)": "Exportar áudio (clique nos três pontos no canto inferior direito para fazer o download)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Conversão em lote. Entre na pasta que contém os arquivos de áudio a serem convertidos ou carregue vários arquivos de áudio. O áudio convertido será enviado para a pasta especificada (padrão: 'opt').", + "Specify output folder:": "Especifique a pasta de saída:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Digite o caminho da pasta de áudio a ser processada (copie-o da barra de endereço do gerenciador de arquivos):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Você também pode inserir arquivos de áudio em lotes. Escolha uma das duas opções. É dada prioridade à leitura da pasta.", + "Export file format": "Exportar formato de arquivo", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "Digite o caminho da pasta de áudio a ser processada:", + "Model": "Modelo", + "Vocal Extraction Aggressive": "Extração Vocal Agressiva", + "Specify the output folder for vocals:": "Especifique a pasta de saída para vocais:", + "Specify the output folder for accompaniment:": "Especifique a pasta de saída para acompanhamento:", + "Train": "Trem", + "Enter the model name:": "Digite o nome do modelo:", + "Target sample rate:": "Taxa de amostragem desejada:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Se o modelo possui orientação de tom (obrigatório para canto, opcional para fala):", + "Version": "Versão", + "Number of CPU processes:": "Número de processos de CPU usados ​​para extração de pitch e processamento de dados:", + "Enter the path of the training folder:": "Digite o caminho da pasta de treinamento:", + "Specify the model ID:": "Especifique o ID do modelo:", + "Auto detect audio path and select from the dropdown:": "Detecte automaticamente o caminho de áudio e selecione no menu suspenso:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Adicione o nome do áudio ao caminho do arquivo de áudio a ser processado (o padrão é o exemplo de formato correto) Remova o caminho para usar um áudio da lista suspensa:", + "Advanced Settings": "Configurações avançadas", + "Settings": "Configurações", + "Status:": "Status:", + "Process data": "Processar dados", + "Drag your audio here:": "Arraste seu áudio aqui", + "Or record an audio:": "Ou grave um áudio.", + "Formant shift inference audio": "Áudio de inferência de mudança de formante", + "Used for male to female and vice-versa conversions": "Usado para conversões de homem para mulher e vice-versa", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Forneça os índices de GPU separados por '-', como 0-1-2 para usar GPUs 0, 1 e 2:", + "GPU Information:": "Informações da GPU", + "Feature extraction": "Extração de recursos", + "Save frequency:": "Salvar frequência:", + "Training epochs:": "Épocas de treinamento:", + "Batch size per GPU:": "Tamanho do lote por GPU:", + "Save only the latest '.ckpt' file to save disk space:": "Salve apenas o arquivo '.ckpt' mais recente para economizar espaço em disco:", + "No": "Não", + "Save a small final model to the 'weights' folder at each save point:": "Salve um pequeno modelo final na pasta 'pesos' em cada ponto de salvamento:", + "Load pre-trained base model G path:": "Carregar caminho G do modelo base pré-treinado:", + "Load pre-trained base model D path:": "Carregar caminho D do modelo base pré-treinado:", + "Train model": "Modelo de trem", + "Train feature index": "Índice de recursos de trem", + "One-click training": "Treinamento com um clique", + "Processing": "Em processamento", + "Model fusion, can be used to test timbre fusion": "Fusão de modelos, pode ser usada para testar a fusão de timbres", + "Path to Model A:": "Caminho para o modelo A:", + "Path to Model B:": "Caminho para o modelo B:", + "Weight for Model A:": "Peso para o Modelo A:", + "Whether the model has pitch guidance:": "Se o modelo tem orientação de pitch:", + "Model information to be placed:": "Informações do modelo a ser colocado:", + "Model architecture version:": "Versão da arquitetura do modelo:", + "Fusion": "Fusão", + "Modify model information": "Modificar informações do modelo", + "Path to Model:": "Caminho para o modelo:", + "Model information to be modified:": "Informações do modelo a serem modificadas:", + "Save file name:": "Salvar nome do arquivo:", + "Modify": "Modificar", + "View model information": "Ver informações do modelo", + "View": "Visualizar", + "Model extraction": "Extração de modelo (insira o caminho do modelo de arquivo grande na pasta 'logs'). Isso é útil se você quiser interromper o treinamento no meio e extrair e salvar manualmente um arquivo de modelo pequeno, ou se quiser testar um modelo intermediário:", + "Name:": "Salvar nome:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Se o modelo possui orientação de pitch (1: sim, 0: não):", + "Extract": "Extrair", + "Export Onnx": "Exportar Onnx", + "RVC Model Path:": "Caminho do modelo RVC:", + "Onnx Export Path:": "Caminho de exportação Onnx:", + "MoeVS Model": "Modelo MoeVS", + "Export Onnx Model": "Exportar modelo Onnx", + "Load model": "Modelo de carga", + "Hubert Model": "Modelo Hubert", + "Select the .pth file": "Selecione o arquivo .pth", + "Select the .index file": "Selecione o arquivo .index", + "Select the .npy file": "Selecione o arquivo .npy", + "Input device": "Dispositivo de entrada", + "Output device": "Dispositivo de saída", + "Audio device (please use the same type of driver)": "Dispositivo de áudio (use o mesmo tipo de driver)", + "Response threshold": "Limite de resposta", + "Pitch settings": "Configurações de tom", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Se deve usar nomes de notas em vez de seu valor em hertz. POR EXEMPLO. [C5, D6] em vez de [523,25, 1174,66] Hz", + "Index Rate": "Taxa de índice", + "General settings": "Configurações Gerais", + "Sample length": "Comprimento da amostra", + "Fade length": "Comprimento do esmaecimento", + "Extra inference time": "Tempo extra de inferência", + "Input noise reduction": "Redução de ruído de entrada", + "Output noise reduction": "Redução de ruído de saída", + "Performance settings": "Configurações de desempenho", + "Start audio conversion": "Iniciar conversão de áudio", + "Stop audio conversion": "Pare a conversão de áudio", + "Inference time (ms):": "Tempo de inferência (ms):", + "Select the pth file": "Selecione o arquivo pth", + "Select the .index file:": "Selecione o arquivo de índice", + "The hubert model path must not contain Chinese characters": "O caminho do modelo Hubert não deve conter caracteres chineses", + "The pth file path must not contain Chinese characters.": "O caminho do arquivo pth não deve conter caracteres chineses.", + "The index file path must not contain Chinese characters.": "O caminho do arquivo de índice não deve conter caracteres chineses.", + "Step algorithm": "Algoritmo de etapas", + "Number of epoch processes": "Número de processos de época", + "Lowest points export": "Exportação de pontos mais baixos", + "How many lowest points to save:": "Quantos pontos mais baixos salvar", + "Export lowest points of a model": "Exportar os pontos mais baixos de um modelo", + "Output models:": "Modelos de saída", + "Stats of selected models:": "Estatísticas dos modelos selecionados", + "Custom f0 [Root pitch] File": "Arquivo f0 [inclinação da raiz] personalizado", + "Min pitch:": "Passo mínimo", + "Specify minimal pitch for inference [HZ]": "Especifique o tom mínimo para inferência [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Especifique o tom mínimo para inferência [NOTA][OCTAVE]", + "Max pitch:": "Tom máximo", + "Specify max pitch for inference [HZ]": "Especifique o tom máximo para inferência [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Especifique o tom máximo para inferência [NOTE][OCTAVE]", + "Browse presets for formanting": "Procure predefinições para formatação", + "Presets are located in formantshiftcfg/ folder": "As predefinições estão localizadas na pasta formantshiftcfg/", + "Default value is 1.0": "O valor padrão é 1,0", + "Quefrency for formant shifting": "Quefrency para mudança de formantes", + "Timbre for formant shifting": "Timbre para mudança de formantes", + "Apply": "Aplicar", + "Single": "Solteiro", + "Batch": "Lote", + "Separate YouTube tracks": "Faixas separadas do YouTube", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Baixe o áudio de um vídeo do YouTube e separe automaticamente as faixas vocais e instrumentais", + "Extra": "Extra", + "Merge": "Mesclar", + "Merge your generated audios with the instrumental": "Mescle seus áudios gerados com o instrumental", + "Choose your instrumental:": "Escolha seu instrumental", + "Choose the generated audio:": "Escolha o áudio gerado", + "Combine": "Combinar", + "Download and Separate": "Baixe e separe", + "Enter the YouTube link:": "Digite o link do youtube", + "This section contains some extra utilities that often may be in experimental phases": "Esta seção contém alguns utilitários extras que muitas vezes podem estar em fases experimentais", + "Merge Audios": "Mesclar áudios", + "Audio files have been moved to the 'audios' folder.": "Os arquivos de áudio foram movidos para a pasta ‘audios’.", + "Downloading audio from the video...": "Baixando o áudio do vídeo...", + "Audio downloaded!": "Baixar áudio!", + "An error occurred:": "Um erro ocorreu:", + "Separating audio...": "Separando áudio...", + "File moved successfully.": "Arquivo movido com sucesso.", + "Finished!": "Finalizado!", + "The source file does not exist.": "O arquivo de origem não existe.", + "Error moving the file:": "Erro ao mover o arquivo:", + "Downloading {name} from drive": "Baixando {name} da unidade", + "The attempt to download using Drive didn't work": "A tentativa de download usando o Drive não funcionou", + "Error downloading the file: {str(e)}": "Erro ao baixar o arquivo: {str(e)}", + "Downloading {name} from mega": "Baixando {nome} do mega", + "Downloading {name} from basic url": "Baixando {nome} do URL básico", + "Download Audio": "Baixar áudio", + "Download audios of any format for use in inference (recommended for mobile users).": "Baixe áudios de qualquer formato para uso em inferência (recomendado para usuários móveis)", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Qualquer ConnectionResetErrors pós-conversão é irrelevante e puramente visual; eles podem ser ignorados.", + "Processed audio saved at: ": "Áudio processado salvo em:", + "Conversion complete!": "Conversão concluída!", + "Reverb": "Ressonância", + "Compressor": "Compressor", + "Noise Gate": "Portão de Ruído", + "Volume": "Volume", + "Drag the audio here and click the Refresh button": "Arraste o áudio aqui e clique no botão Atualizar", + "Select the generated audio": "Selecione o áudio gerado", + "Volume of the instrumental audio:": "Volume do áudio instrumental", + "Volume of the generated audio:": "Volume do áudio gerado", + "### Add the effects": "### Adicione os efeitos", + "Starting audio conversion... (This might take a moment)": "Iniciando a conversão de áudio... (Isso pode levar um tempo)", + "TTS Model:": "Vozes TTS", + "TTS": "TTS", + "TTS Method:": "Método TTS", + "Audio TTS:": "Áudio TTS", + "Audio RVC:": "Modelo de Áudio", + "You can also drop your files to load your model.": "Você também pode soltar seus arquivos para carregar seu modelo.", + "Drag your .pth file here:": "Arraste seu arquivo .pth aqui:", + "Drag your .index file here:": "Arraste seu arquivo .index aqui:" +} diff --git a/assets/i18n/langs/ru_RU.json b/assets/i18n/langs/ru_RU.json new file mode 100644 index 0000000000000000000000000000000000000000..f0b14b04a7c97de2f50790de40966151e490ba35 --- /dev/null +++ b/assets/i18n/langs/ru_RU.json @@ -0,0 +1,248 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "К сожалению, для вашего обучения не существует совместимого графического процессора.", + "Yes": "Да", + "Select your dataset:": "Выберите свой набор данных.", + "Update list": "Обновить список.", + "Download Model": "Скачать модель", + "Download Backup": "Скачать резервную копию", + "Download Dataset": "Скачать набор данных", + "Download": "Скачать", + "Url:": "URL:", + "Build the index before saving.": "Создайте индекс перед сохранением.", + "Save your model once the training ends.": "Сохраните свою модель после окончания обучения.", + "Save type": "Тип сохранения", + "Save model": "Сохранить модель", + "Choose the method": "Выберите метод", + "Save all": "Сохранить все", + "Save D and G": "Спасите D и G", + "Save voice": "Сохранить голос", + "Downloading the file: ": "Скачиваем файл:", + "Stop training": "Прекратить тренировку", + "Too many users have recently viewed or downloaded this file": "Слишком много пользователей недавно просмотрели или скачали этот файл.", + "Cannot get file from this private link": "Невозможно получить файл по этой частной ссылке", + "Full download": "Полная загрузка", + "An error occurred downloading": "Произошла ошибка при загрузке", + "Model saved successfully": "Модель успешно сохранена", + "Saving the model...": "Сохраняем модель...", + "Saved without index...": "Сохранено без индекса...", + "model_name": "название модели", + "Saved without inference model...": "Сохранено без модели вывода...", + "An error occurred saving the model": "Произошла ошибка при сохранении модели.", + "The model you want to save does not exist, be sure to enter the correct name.": "Модель, которую вы хотите сохранить, не существует. Обязательно введите правильное имя.", + "The file could not be downloaded.": "Не удалось загрузить файл.", + "Unzip error.": "Ошибка разархивирования.", + "Path to your added.index file (if it didn't automatically find it)": "Путь к файлу add.index (если он не был найден автоматически)", + "It has been downloaded successfully.": "Он был успешно загружен.", + "Proceeding with the extraction...": "Приступаем к извлечению...", + "The Backup has been uploaded successfully.": "Резервная копия успешно загружена.", + "The Dataset has been loaded successfully.": "Набор данных успешно загружен.", + "The Model has been loaded successfully.": "Модель успешно загружена.", + "It is used to download your inference models.": "Он используется для загрузки ваших моделей вывода.", + "It is used to download your training backups.": "Он используется для загрузки резервных копий тренировок.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Загрузите набор данных со звуками в совместимом формате (.wav/.flac), чтобы обучить свою модель.", + "No relevant file was found to upload.": "Не найден соответствующий файл для загрузки.", + "The model works for inference, and has the .index file.": "Модель работает для вывода и имеет файл .index.", + "The model works for inference, but it doesn't have the .index file.": "Модель работает для вывода, но у нее нет файла .index.", + "This may take a few minutes, please wait...": "Это может занять несколько минут, пожалуйста, подождите...", + "Resources": "Ресурсы", + "Step 1: Processing data": "Шаг 1: Обработка данных", + "Step 2: Extracting features": "Шаг 2: Извлечение объектов", + "Step 3: Model training started": "Шаг 3: Начало обучения модели", + "Training is done, check train.log": "Обучение завершено, проверьте train.log", + "All processes have been completed!": "Все процессы завершены!", + "Model Inference": "Вывод модели", + "Inferencing voice:": "Выводящий голос:", + "Model_Name": "Название модели", + "Dataset_Name": "Имя_набора данных", + "Or add your dataset path:": "Или введите путь к набору данных:", + "Whether the model has pitch guidance.": "Имеет ли модель наведение по тангажу.", + "Whether to save only the latest .ckpt file to save hard drive space": "Сохранять ли только последний файл .ckpt для экономии места на жестком диске", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Кэшируйте все обучающие наборы в память графического процессора. Кэширование небольших наборов данных (менее 10 минут) может ускорить обучение.", + "Save a small final model to the 'weights' folder at each save point": "Сохраняйте небольшую окончательную модель в папке «веса» в каждой точке сохранения.", + "Refresh": "Обновить список голосов, индексный путь и аудиофайлы.", + "Unload voice to save GPU memory": "Выгрузите голос, чтобы сэкономить память графического процессора:", + "Select Speaker/Singer ID:": "Выберите идентификатор докладчика/певца:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Рекомендуемый ключ +12 для преобразования мужчины в женщину и ключ -12 для преобразования женщины в мужчину. Если звуковой диапазон заходит слишком далеко и голос искажается, вы также можете самостоятельно настроить его на соответствующий диапазон.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Транспонирование (целое число, количество полутонов, повышение на октаву: 12, понижение на октаву: -12):", + "Enter the path of the audio file to be processed (default is the correct format example):": "Введите путь к аудиофайлу, который необходимо обработать (по умолчанию — правильный пример формата):", + "Select the pitch extraction algorithm:": "Выберите алгоритм извлечения высоты звука:", + "Feature search dataset file path": "Путь к файлу набора данных поиска объектов", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Если >=3: применить медианную фильтрацию к собранным результатам высоты тона. Значение представляет собой радиус фильтра и может уменьшить одышку.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Путь к индексному файлу объекта. Оставьте поле пустым, чтобы использовать выбранный результат из раскрывающегося списка:", + "Auto-detect index path and select from the dropdown:": "Автоматическое определение пути к индексу и выбор из раскрывающегося списка.", + "Path to feature file:": "Путь к файлу объекта:", + "Search feature ratio:": "Соотношение функций поиска:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Повторно дискретизируйте выходной звук при постобработке до окончательной частоты дискретизации. Установите значение 0, чтобы не выполнять повторную выборку:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Используйте огибающую громкости входа для замены или смешивания с огибающей громкости выхода. Чем ближе соотношение к 1, тем больше используется выходная огибающая:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Защищайте глухие согласные и звуки дыхания, чтобы предотвратить появление таких артефактов, как разрывы в электронной музыке. Установите значение 0,5, чтобы отключить. Уменьшите значение, чтобы повысить защиту, но это может снизить точность индексации:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Файл кривой F0 (опционально). Один шаг на строку. Заменяет стандартную F0 и модуляцию высоты тона:", + "Convert": "Конвертировать", + "Output information:": "Выходная информация", + "Export audio (click on the three dots in the lower right corner to download)": "Экспортируйте аудио (нажмите на три точки в правом нижнем углу, чтобы загрузить)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Пакетное преобразование. Введите папку, содержащую аудиофайлы, которые нужно преобразовать, или загрузите несколько аудиофайлов. Конвертированный звук будет выводиться в указанную папку (по умолчанию: «opt»).", + "Specify output folder:": "Укажите выходную папку:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Введите путь к папке аудио, подлежащей обработке (скопируйте его из адресной строки файлового менеджера):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Вы также можете вводить аудиофайлы в пакетном режиме. Выберите один из двух вариантов. Приоритет отдается чтению из папки.", + "Export file format": "Формат файла экспорта", + "UVR5": "УВР5", + "Enter the path of the audio folder to be processed:": "Введите путь к аудиопапке, которую необходимо обработать:", + "Model": "Модель", + "Vocal Extraction Aggressive": "Извлечение вокала агрессивное", + "Specify the output folder for vocals:": "Укажите выходную папку для вокала:", + "Specify the output folder for accompaniment:": "Укажите выходную папку для аккомпанемента:", + "Train": "Тренироваться", + "Enter the model name:": "Введите название модели:", + "Target sample rate:": "Целевая частота дискретизации:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Имеет ли модель управление высотой тона (обязательно для пения, необязательно для речи):", + "Version": "Версия", + "Number of CPU processes:": "Количество процессов ЦП, используемых для извлечения высоты звука и обработки данных:", + "Enter the path of the training folder:": "Введите путь к папке обучения:", + "Specify the model ID:": "Пожалуйста, укажите идентификатор модели:", + "Auto detect audio path and select from the dropdown:": "Автоматическое определение пути аудио и выбор из раскрывающегося списка:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Добавьте имя аудио к пути к обрабатываемому аудиофайлу (по умолчанию используется правильный пример формата). Удалите путь для использования аудио из раскрывающегося списка:", + "Advanced Settings": "Расширенные настройки", + "Settings": "Настройки", + "Status:": "Положение дел", + "Process data": "Данные обработки", + "Drag your audio here:": "Перетащите сюда свой аудиофайл и нажмите кнопку «Обновить».", + "Or record an audio:": "Или записать звук.", + "Formant shift inference audio": "Звук вывода формантного сдвига", + "Used for male to female and vice-versa conversions": "Используется для преобразования мужского и женского пола и наоборот.", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Укажите индексы графического процессора, разделенные знаком «-», например 0-1-2, для использования графических процессоров 0, 1 и 2:", + "GPU Information:": "Информация о графическом процессоре", + "Feature extraction": "Извлечение признаков", + "Save frequency:": "Частота сохранения:", + "Training epochs:": "Эпохи обучения:", + "Batch size per GPU:": "Размер пакета на графический процессор:", + "Save only the latest '.ckpt' file to save disk space:": "Сохраните только последний файл «.ckpt», чтобы сэкономить место на диске:", + "No": "Нет", + "Save a small final model to the 'weights' folder at each save point:": "Сохраните небольшую окончательную модель в папке «веса» в каждой точке сохранения:", + "Load pre-trained base model G path:": "Загрузите предварительно обученную базовую модель G-путь:", + "Load pre-trained base model D path:": "Загрузите путь D предварительно обученной базовой модели:", + "Train model": "Модель поезда", + "Train feature index": "Индекс характеристик поезда", + "One-click training": "Обучение в один клик", + "Processing": "Обработка", + "Model fusion, can be used to test timbre fusion": "Модель Fusion, можно использовать для проверки синтеза тембров.", + "Path to Model A:": "Путь к модели А:", + "Path to Model B:": "Путь к модели Б:", + "Weight for Model A:": "Вес модели А:", + "Whether the model has pitch guidance:": "Имеет ли модель наведение по тангажу:", + "Model information to be placed:": "Информация о модели, которую необходимо разместить:", + "Model architecture version:": "Версия архитектуры модели:", + "Fusion": "Слияние", + "Modify model information": "Изменить информацию о модели", + "Path to Model:": "Путь к модели:", + "Model information to be modified:": "Информация о модели, которую необходимо изменить:", + "Save file name:": "Имя файла сохранения:", + "Modify": "Изменить", + "View model information": "Просмотр информации о модели", + "View": "Вид", + "Model extraction": "Извлечение модели (введите путь к модели большого файла в папке «logs»). Это полезно, если вы хотите остановить обучение на полпути и вручную извлечь и сохранить небольшой файл модели или если вы хотите протестировать промежуточную модель:", + "Name:": "Сохранить имя:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Имеет ли модель управление по тангажу (1: да, 0: нет):", + "Extract": "Извлекать", + "Export Onnx": "Экспортировать Onnx", + "RVC Model Path:": "Путь модели RVC:", + "Onnx Export Path:": "Путь экспорта Onnx:", + "MoeVS Model": "Модель МоэВС", + "Export Onnx Model": "Экспорт модели Onnx", + "Load model": "Загрузить модель", + "Hubert Model": "Хьюберт Модель", + "Select the .pth file": "Выберите файл .pth.", + "Select the .index file": "Выберите файл .index.", + "Select the .npy file": "Выберите файл .npy.", + "Input device": "Устройство ввода", + "Output device": "Устройство вывода", + "Audio device (please use the same type of driver)": "Аудиоустройство (пожалуйста, используйте драйвер того же типа)", + "Response threshold": "Порог ответа", + "Pitch settings": "Настройки высоты тона", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Использовать ли названия нот вместо их значения в герцах. НАПРИМЕР. [C5, D6] вместо [523,25, 1174,66] Гц", + "Index Rate": "Индексная ставка", + "General settings": "Общие настройки", + "Sample length": "Длина образца", + "Fade length": "Длина затухания", + "Extra inference time": "Дополнительное время вывода", + "Input noise reduction": "Снижение входного шума", + "Output noise reduction": "Снижение выходного шума", + "Performance settings": "Настройки производительности", + "Start audio conversion": "Начать преобразование аудио", + "Stop audio conversion": "Остановить преобразование аудио", + "Inference time (ms):": "Время вывода (мс):", + "Select the pth file": "Выберите pth-файл", + "Select the .index file:": "Выберите индексный файл", + "The hubert model path must not contain Chinese characters": "Путь модели Хьюберта не должен содержать китайские символы.", + "The pth file path must not contain Chinese characters.": "Путь к файлу pth не должен содержать китайских символов.", + "The index file path must not contain Chinese characters.": "Путь к индексному файлу не должен содержать китайских символов.", + "Step algorithm": "Пошаговый алгоритм", + "Number of epoch processes": "Количество эпохальных процессов", + "Lowest points export": "Экспорт наименьших баллов", + "How many lowest points to save:": "Сколько самых низких баллов нужно сохранить", + "Export lowest points of a model": "Экспортировать самые низкие точки модели", + "Output models:": "Выходные модели", + "Stats of selected models:": "Статистика выбранных моделей", + "Custom f0 [Root pitch] File": "Пользовательский файл f0 [Шаг основного тона]", + "Min pitch:": "Минимальный шаг", + "Specify minimal pitch for inference [HZ]": "Укажите минимальный шаг для вывода [Гц]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Укажите минимальный шаг для вывода [NOTE][OCTAVE]", + "Max pitch:": "Максимальный шаг", + "Specify max pitch for inference [HZ]": "Укажите максимальную высоту звука для вывода [Гц]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Укажите максимальный шаг для вывода [ПРИМЕЧАНИЕ][ОКТАВА]", + "Browse presets for formanting": "Просмотр пресетов для форматирования", + "Presets are located in formantshiftcfg/ folder": "Пресеты находятся в папке formantshiftcfg/.", + "Default value is 1.0": "Значение по умолчанию — 1,0.", + "Quefrency for formant shifting": "Quefrency для сдвига форманты", + "Timbre for formant shifting": "Тембр для смещения форманты", + "Apply": "Применять", + "Single": "Одинокий", + "Batch": "Партия", + "Separate YouTube tracks": "Отдельные треки YouTube", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Загрузите аудио из видео YouTube и автоматически разделите вокальные и инструментальные дорожки.", + "Extra": "Дополнительный", + "Merge": "Объединить", + "Merge your generated audios with the instrumental": "Объедините сгенерированные аудио с инструментальной композицией.", + "Choose your instrumental:": "Выберите свой инструментал", + "Choose the generated audio:": "Выберите сгенерированный звук", + "Combine": "Объединить", + "Download and Separate": "Скачать и отделить", + "Enter the YouTube link:": "Введите ссылку на ютуб", + "This section contains some extra utilities that often may be in experimental phases": "Этот раздел содержит некоторые дополнительные утилиты, которые часто могут находиться на экспериментальной стадии.", + "Merge Audios": "Объединить аудио", + "Audio files have been moved to the 'audios' folder.": "Аудиофайлы перемещены в папку «audios».", + "Downloading audio from the video...": "Загрузка звука из видео...", + "Audio downloaded!": "Аудио скачать!", + "An error occurred:": "Произошла ошибка:", + "Separating audio...": "Разделение звука...", + "File moved successfully.": "Файл успешно перемещен.", + "Finished!": "Законченный!", + "The source file does not exist.": "Исходный файл не существует.", + "Error moving the file:": "Ошибка перемещения файла:", + "Downloading {name} from drive": "Загрузка {name} с диска", + "The attempt to download using Drive didn't work": "Попытка скачать с Диска не удалась.", + "Error downloading the file: {str(e)}": "Ошибка загрузки файла: {str(e)}", + "Downloading {name} from mega": "Скачиваю {name} из мега", + "Downloading {name} from basic url": "Загрузка {name} с основного URL", + "Download Audio": "Скачать аудио", + "Download audios of any format for use in inference (recommended for mobile users).": "Загрузите аудио любого формата для использования в умозаключениях (рекомендуется для мобильных пользователей)", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Любые пост-преобразования ConnectionResetErrors не имеют значения и являются чисто визуальными; их можно игнорировать.", + "Processed audio saved at: ": "Обработанный звук сохранен по адресу:", + "Conversion complete!": "Преобразование завершено!", + "Reverb": "Реверберация", + "Compressor": "Компрессор", + "Noise Gate": "Шумовые ворота", + "Volume": "Объем", + "Drag the audio here and click the Refresh button": "Перетащите аудио сюда и нажмите кнопку «Обновить».", + "Select the generated audio": "Выберите сгенерированный звук", + "Volume of the instrumental audio:": "Громкость инструментального звука", + "Volume of the generated audio:": "Громкость сгенерированного звука", + "### Add the effects": "### Добавьте эффекты", + "Starting audio conversion... (This might take a moment)": "Начинается конвертация аудио... (Это может занять некоторое время)", + "TTS Model:": "Голоса TTS", + "TTS": "TTS", + "TTS Method:": "Метод TTS", + "Audio TTS:": "Аудио TTS", + "Audio RVC:": "Аудио Модель", + "You can also drop your files to load your model.": "Вы также можете перетащить свои файлы, чтобы загрузить свою модель.", + "Drag your .pth file here:": "Перетащите ваш файл .pth сюда:", + "Drag your .index file here:": "Перетащите ваш файл .index сюда:" +} diff --git a/assets/i18n/langs/tr_TR.json b/assets/i18n/langs/tr_TR.json new file mode 100644 index 0000000000000000000000000000000000000000..a68b9881d701174c9aaaf8154b32c40d7b91caf5 --- /dev/null +++ b/assets/i18n/langs/tr_TR.json @@ -0,0 +1,250 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "Üzgünüz, eğitiminizi desteklemek için uyumlu bir GPU bulunmuyor.", + "Yes": "Evet", + "Select your dataset:": "Veri setinizi seçin:", + "Update list": "Listeyi güncelle.", + "Download Model": "Modeli İndir", + "Download Backup": "Yedeklemeyi İndir", + "Download Dataset": "Veri Setini İndir", + "Download": "İndir", + "Url:": "URL:", + "Build the index before saving.": "Kaydetmeden önce dizini oluşturun.", + "Save your model once the training ends.": "Eğitim sona erdiğinde modelinizi kaydedin.", + "Save type": "Kaydetme türü:", + "Save model": "Modeli Kaydet", + "Choose the method": "Yöntemi seçin", + "Save all": "Hepsini kaydet", + "Save D and G": "D ve G'yi kaydet", + "Save voice": "Sesi kaydet", + "Downloading the file: ": "Dosya indiriliyor: ", + "Stop training": "Eğitimi durdur", + "Too many users have recently viewed or downloaded this file": "Çok sayıda kullanıcı bu dosyayı yakın zamanda görüntüledi veya indirdi", + "Cannot get file from this private link": "Bu özel bağlantıdan dosya alınamıyor", + "Full download": "Tam indirme", + "An error occurred downloading": "İndirme sırasında bir hata oluştu", + "Model saved successfully": "Model başarıyla kaydedildi", + "Saving the model...": "Model kaydediliyor...", + "Saved without index...": "Dizin oluşturulmadan kaydedildi...", + "Saved without inference model...": "Çıkarsama modeli oluşturulmadan kaydedildi...", + "An error occurred saving the model": "Model kaydedilirken bir hata oluştu", + "The model you want to save does not exist, be sure to enter the correct name.": "Kaydetmek istediğiniz model mevcut değil, doğru adı girdiğinizden emin olun.", + "The file could not be downloaded.": "Dosya indirilemedi.", + "Unzip error.": "Sıkıştırılmış dosya açma hatası.", + "Path to your added.index file (if it didn't automatically find it)": "added.index dosyanızın yolu (eğer otomatik olarak bulunmadıysa)", + "It has been downloaded successfully.": "Başarıyla indirildi.", + "Proceeding with the extraction...": "Çıkarma işlemine devam ediliyor...", + "The Backup has been uploaded successfully.": "Yedekleme başarıyla yüklendi.", + "The Dataset has been loaded successfully.": "Veri seti başarıyla yüklendi.", + "The Model has been loaded successfully.": "Model başarıyla yüklendi.", + "It is used to download your inference models.": "Çıkarsama modellerinizi indirmek için kullanılır.", + "It is used to download your training backups.": "Eğitim yedeklemelerinizi indirmek için kullanılır.", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Modelinizi eğitmek için ses içeren uyumlu bir format (.wav/.flac) ile veri setini indirin.", + "No relevant file was found to upload.": "Yüklemek için ilgili dosya bulunamadı.", + "The model works for inference, and has the .index file.": "Model çıkarsama için çalışır ve .index dosyasına sahiptir.", + "The model works for inference, but it doesn't have the .index file.": "Model çıkarsama için çalışır, ancak .index dosyasına sahip değildir.", + "This may take a few minutes, please wait...": "Bu birkaç dakika sürebilir, lütfen bekleyin...", + "Resources": "Kaynaklar", + "Step 1: Processing data": "Adım 1: Verileri işleme", + "Step 2: Extracting features": "Adım 2: Özellik çıkarma", + "Step 3: Model training started": "Adım 3: Model eğitimi başladı", + "Training is done, check train.log": "Eğitim tamamlandı, train.log dosyasını kontrol edin", + "All processes have been completed!": "Tüm işlemler tamamlandı!", + "Model Inference": "Model Çıkarsama", + "Inferencing voice:": "Ses çıkarma:", + "Model_Name": "Model_Adı", + "Dataset_Name": "Veri_Seti_Adı", + "Or add your dataset path:": "Veya veri kümenizin yolunu girin:", + "Whether the model has pitch guidance.": "Modelin pitch rehberi olup olmadığı.", + "Whether to save only the latest .ckpt file to save hard drive space": "Sadece en son .ckpt dosyasını kaydetmek için kayıt alanı tasarrufu yapılıp yapılmayacağı", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Tüm eğitim setlerini GPU belleğine önbelleğe alın. Küçük veri setlerini önbelleğe almak (10 dakikadan az) eğitimi hızlandırabilir.", + "Save a small final model to the 'weights' folder at each save point": "Her kaydetme noktasında 'weights' klasörüne küçük bir nihai modeli kaydedin", + "Refresh": "Ses listesini, dizin yolunu ve ses dosyalarını yenileyin", + "Unload voice to save GPU memory": "GPU belleğini kaydetmek için sesi boşalt", + "Select Speaker/Singer ID:": "Konuşmacı/Şarkıcı Kimliği Seç:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Erkekten kadına dönüşüm için önerilen +12 ton, kadından erkeğe dönüşüm için -12 ton. Ses aralığı fazla uzaklaşırsa ve ses bozulursa, uygun aralığı kendiniz ayarlayabilirsiniz.", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "Transpoze et (tamsayı, yarıton Fof sayısı, bir oktav yukarı: 12, bir oktav aşağı: -12):", + "Feature search database file path:": "Özellik arama veritabanı dosya yolu:", + "Enter the path of the audio file to be processed (default is the correct format example):": "İşlenecek ses dosyasının yolunu girin (varsayılan olarak doğru format örneğidir):", + "Select the pitch extraction algorithm:": "Pitch çıkarma algoritmasını seçin:", + "Feature search dataset file path": "Özellik arama veri seti dosya yolu", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Eğer >=3 ise: elde edilen pitch sonuçlarına medyan filtreleme uygulayın. Değer, filtre yarıçapını temsil eder ve nefes sesini azaltabilir.", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Özellik dizin dosyasının yolu. Seçilen sonucu kullanmak için boş bırakın:", + "Auto-detect index path and select from the dropdown:": "Dizin yolunu otomatik algılayın ve açılır menüden seçin:", + "Path to feature file:": "Özellik dosyasının yolu:", + "Search feature ratio:": "Özellik oranını arayın:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Çıkış sesini son işlemde nihai örnekleme hızına göre yeniden örnekleme yapın. Örnekleme yapmamak için 0 olarak ayarlayın:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Girişin ses zarfını çıkışın ses zarfıyla değiştirin veya karıştırın. Oran 1'e ne kadar yakınsa, çıkış zarfı o kadar çok kullanılır:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Sessiz ünsüzleri ve nefes seslerini koruyarak elektronik müzikte yırtılma gibi sanat efektlerini önleyin. Devre dışı bırakmak için 0.5 olarak ayarlayın. Korumayı artırmak için değeri azaltın, ancak dizinleme doğruluğunu azaltabilir:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 eğrisi dosyası (isteğe bağlı). Her satırda bir pitch bulunur. Varsayılan F0 ve pitch modülasyonunu değiştirir:", + "Convert": "Dönüştür", + "Output information:": "Çıkış bilgisi:", + "Export audio (click on the three dots in the lower right corner to download)": "Sesi dışa aktar (indirmek için sağ alt köşedeki üç noktaya tıklayın)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Toplu dönüşüm. Dönüştürülecek ses dosyalarını içeren klasörü girin veya birden fazla ses dosyası yükleyin. Dönüştürülen ses, belirtilen klasöre (varsayılan: 'opt') çıktı olarak verilir.", + "Specify output folder:": "Çıkış klasörünü belirtin:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "İşlenecek ses klasörünün yolunu girin (dosya yöneticisinin adres çubuğundan kopyalayın):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Ses dosyalarını toplu olarak da girebilirsiniz. İki seçenekten birini seçin. Öncelik klasörden okuma yapmaya verilir.", + "Export file format:": "Çıkış dosya formatı:", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "İşlenecek ses klasörünün yolunu girin:", + "Model:": "Model:", + "Vocal Extraction Aggressive": "Vokal Çıkarma Agresif", + "Specify the output folder for vocals:": "Vokaller için çıkış klasörünü belirtin:", + "Specify the output folder for accompaniment:": "Eşlik için çıkış klasörünü belirtin:", + "Train": "Eğit", + "Enter the model name:": "Model adını girin:", + "Target sample rate:": "Hedef örnek hızı:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "Modelin ton yönü rehberliği olup olmadığı (şarkı için gereklidir, konuşma için isteğe bağlıdır):", + "Version:": "Sürüm:", + "Number of CPU processes:": "CPU işlem sayısı:", + "Enter the path of the training folder:": "Eğitim klasörünün yolunu girin:", + "Specify the model ID:": "Model kimliğini belirtin:", + "Auto detect audio path and select from the dropdown:": "Otomatik olarak ses yolunu algıla ve açılır menüden seç:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "İşlenecek ses dosyasının yoluna ses dosyasının adını ekleyin (varsayılan olarak doğru format örneği) Yolu kaldırarak açılır menüden bir ses kullanın:", + "Advanced Settings": "Gelişmiş Ayarlar", + "Settings": "Ayarlar", + "Status:": "Durum:", + "Process data": "Veriyi işle", + "Drag your audio here:": "Sesinizi buraya sürükleyin:", + "Or record an audio:": "Veya bir ses kaydedin", + "Formant shift inference audio": "Formant kaydırma çıkarsama sesi", + "Used for male to female and vice-versa conversions": "Erkekten kadına ve tam tersine dönüşümler için kullanılır", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "GPU dizinini '-' ile ayırarak belirtin, örneğin 0-1-2; GPU'ları 0, 1 ve 2 kullanmak için:", + "GPU Information:": "GPU Bilgileri:", + "Feature extraction": "Özellik çıkarma", + "Save frequency:": "Kaydetme frekansı:", + "Training epochs:": "Eğitim dönemleri:", + "Batch size per GPU:": "Her GPU için toplu iş boyutu:", + "Save only the latest '.ckpt' file to save disk space:": "Sadece en son '.ckpt' dosyasını kaydederek disk alanı tasarrufu yapın:", + "No": "Hayır", + "Save a small final model to the 'weights' folder at each save point:": "Her kaydetme noktasında 'weights' klasörüne küçük bir son model kaydedin:", + "Load pre-trained base model G path:": "Önceden eğitilmiş temel G model yolu yükle:", + "Load pre-trained base model D path:": "Önceden eğitilmiş temel D model yolu yükle:", + "Train model": "Modeli eğit", + "Train feature index": "Eğitim özellik dizini", + "One-click training": "Bir tıklamayla eğitim", + "Processing": "İşleniyor", + "Model fusion, can be used to test timbre fusion": "Model birleştirme, timbre birleştirmeyi test etmek için kullanılabilir", + "Path to Model A:": "Model A'nın yolu:", + "Path to Model B:": "Model B'nin yolu:", + "Weight for Model A:": "Model A için ağırlık:", + "Whether the model has pitch guidance:": "Modelin ton yönü rehberliği olup olmadığı:", + "Model information to be placed:": "Yerleştirilecek model bilgisi:", + "Model architecture version:": "Model mimari sürümü:", + "Fusion": "Birleştirme", + "Modify model information": "Model bilgisini değiştir", + "Path to Model:": "Model yoluna:", + "Model information to be modified:": "Değiştirilecek model bilgisi:", + "Save file name:": "Dosya adını kaydet:", + "Modify": "Değiştir", + "View model information": "Model bilgisini görüntüle", + "View": "Görüntüle", + "Model extraction": "Model çıkarımı (büyük dosya modelinin 'logs' klasörünün altına yolunu girin). Eğitimi yarıda kesmek ve manuel olarak küçük bir model dosyası çıkarmak ve kaydetmek istiyorsanız veya ara bir modeli test etmek isterseniz bu yararlı olabilir:", + "Name:": "Adı kaydet:", + "Whether the model has pitch guidance (1: yes, 0: no):": "Modelin ton yönü rehberliği olup olmadığı (1: evet, 0: hayır):", + "Extract": "Çıkar", + "Export Onnx": "Onnx'i dışa aktar", + "RVC Model Path:": "RVC Model Yolu:", + "Onnx Export Path:": "Onnx Dışa Aktarma Yolu:", + "MoeVS Model": "MoeVS Modeli", + "Export Onnx Model": "Onnx Modelini Dışa Aktar", + "Load model": "Modeli yükle", + "Hubert Model": "Hubert Modeli", + "Select the .pth file": ".pth dosyasını seçin", + "Select the .index file": ".index dosyasını seçin", + "Select the .npy file": ".npy dosyasını seçin", + "Input device": "Giriş cihazı", + "Output device": "Çıkış cihazı", + "Audio device (please use the same type of driver)": "Ses cihazı (lütfen aynı sürücü türünü kullanın)", + "Response threshold": "Yanıt eşiği", + "Pitch settings": "Ton ayarları", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Hertz değeri yerine nota isimlerinin kullanılıp kullanılmayacağı. Örn. [C5, D6] yerine [523.25, 1174.66]Hz", + "Index Rate": "Dizin Oranı", + "General settings": "Genel ayarlar", + "Sample length": "Örnek uzunluğu", + "Fade length": "Solma uzunluğu", + "Extra inference time": "Ek çıkarsama süresi", + "Input noise reduction": "Giriş gürültü azaltma", + "Output noise reduction": "Çıkış gürültü azaltma", + "Performance settings": "Performans ayarları", + "Start audio conversion": "Ses dönüşümünü başlat", + "Stop audio conversion": "Ses dönüşümünü durdur", + "Inference time (ms):": "Çıkarsama süresi (ms):", + "Select the pth file": ".pth dosyasını seçin", + "Select the .index file:": ".index dosyasını seçin", + "The hubert model path must not contain Chinese characters": "Hubert model yolu Çince karakter içermemelidir", + "The pth file path must not contain Chinese characters.": ".pth dosya yolu Çince karakter içermemelidir.", + "The index file path must not contain Chinese characters.": ".index dosya yolu Çince karakter içermemelidir.", + "Step algorithm": "Adım algoritması", + "Number of epoch processes": "Dönem işlem sayısı", + "Lowest points export": "En düşük noktaları dışa aktar", + "How many lowest points to save:": "Kaç en düşük noktanın kaydedileceği", + "Export lowest points of a model": "Bir modelin en düşük noktalarını dışa aktar", + "Output models:": "Modelleri dışa aktar", + "Stats of selected models:": "Seçilen modellerin istatistikleri", + "Custom f0 [Root pitch] File": "Özel f0 [Kök ton] Dosyası", + "Min pitch:": "Minimum ton yüksekliği:", + "Specify minimal pitch for inference [HZ]": "Çıkarsama için minimum ton yüksekliğini belirt [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "Çıkarsama için minimum ton yüksekliğini belirt [NOTA][OKTAV]", + "Max pitch:": "Maksimum ton yüksekliği:", + "Specify max pitch for inference [HZ]": "Çıkarsama için maksimum ton yüksekliğini belirt [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "Çıkarsama için maksimum ton yüksekliğini belirt [NOTA][OKTAV]", + "Browse presets for formanting": "Formant ayarları için ön ayarları göz at", + "Presets are located in formantshiftcfg/ folder": "Ön ayarlar formantshiftcfg/ klasöründe bulunur", + "Default value is 1.0": "Varsayılan değer 1.0'dır", + "Quefrency for formant shifting": "Formant kaydırma için kvarakfrekans", + "Timbre for formant shifting": "Formant kaydırma için timbre", + "Apply": "Uygula", + "Single": "Tek", + "Batch": "Toplu", + "Separate YouTube tracks": "YouTube parçalarını ayır", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "YouTube videosundan ses indirin ve otomatik olarak vokal ve enstrümantal parçaları ayırın", + "Extra": "Ekstra", + "Merge": "Birleştir", + "Merge your generated audios with the instrumental": "Üretilen seslerinizi enstrümantal ile birleştirin", + "Choose your instrumental:": "Enstrümantal seçin:", + "Choose the generated audio:": "Üretilen sesi seçin:", + "Combine": "Birleştir", + "Download and Separate": "İndir ve Ayır", + "Enter the YouTube link:": "YouTube bağlantısını girin:", + "This section contains some extra utilities that often may be in experimental phases": "Bu bölüm genellikle deneysel aşamalarda olabilecek bazı ek hizmet programlarını içerir", + "Merge Audios": "Sesleri Birleştir", + "Audio files have been moved to the 'audios' folder.": "Ses dosyaları 'audios' klasörüne taşındı.", + "Downloading audio from the video...": "Videodan ses indiriliyor...", + "Audio downloaded!": "Ses indirildi!", + "An error occurred:": "Bir hata oluştu:", + "Separating audio...": "Ses ayrıştırılıyor...", + "File moved successfully.": "Dosya başarıyla taşındı.", + "Finished!": "Tamamlandı!", + "The source file does not exist.": "Kaynak dosya mevcut değil.", + "Error moving the file:": "Dosya taşınırken hata oluştu:", + "Downloading {name} from drive": "{name} Google Drive'dan indiriliyor", + "The attempt to download using Drive didn't work": "Drive kullanılarak indirme denemesi başarısız oldu", + "Error downloading the file: {str(e)}": "Dosya indirilirken hata oluştu: {str(e)}", + "Downloading {name} from mega": "{name} Mega'dan indiriliyor", + "Downloading {name} from basic url": "{name} temel URL'den indiriliyor", + "Download Audio": "Ses İndir", + "Download audios of any format for use in inference (recommended for mobile users).": "Çıkarsama için herhangi bir formatta ses indirin (mobil kullanıcılar için önerilir).", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Dönüşümden sonra herhangi bir ConnectionResetErrors önemsizdir ve sadece görseldir; ihmal edilebilirler.\n", + "Processed audio saved at: ": "İşlenmiş ses kaydedildi: ", + "Conversion complete!": "Dönüşüm tamamlandı!", + "Reverb": "Yankı", + "Compressor": "Sıkıştırıcı", + "Noise Gate": "Gürültü Kapısı", + "Volume": "Ses Düzeyi", + "Drag the audio here and click the Refresh button": "Sesi buraya sürükleyin ve Yenile düğmesine tıklayın", + "Select the generated audio": "Üretilen sesi seçin", + "Volume of the instrumental audio:": "Enstrümantal sesin ses düzeyi:", + "Volume of the generated audio:": "Üretilen sesin ses düzeyi:", + "### Audio settings:": "### Ses ayarları:", + "### Instrumental settings:": "### Enstrümantal ayarları:", + "### Add the effects:": "### Efektleri ekle:", + "Starting audio conversion... (This might take a moment)": "Ses dönüşümü başlatılıyor... (Bu biraz zaman alabilir)", + "TTS Model:": "TTS Sesleri", + "TTS": "TTS", + "TTS Method:": "TTS Yöntemi", + "Audio TTS:": "Sesli TTS", + "Audio RVC:": "Sesli Model", + "You can also drop your files to load your model.": "Modelinizi yüklemek için dosyalarınızı da sürükleyebilirsiniz.", + "Drag your .pth file here:": ".pth dosyanızı buraya sürükleyin:", + "Drag your .index file here:": ".index dosyanızı buraya sürükleyin:" +} diff --git a/assets/i18n/langs/ur_UR.json b/assets/i18n/langs/ur_UR.json new file mode 100644 index 0000000000000000000000000000000000000000..c3205a524eb68616a1d9ac1461b5c1c2aae0837e --- /dev/null +++ b/assets/i18n/langs/ur_UR.json @@ -0,0 +1,248 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "بدقسمتی سے، آپ کی تربیت کو سپورٹ کرنے کے لیے کوئی ہم آہنگ GPU دستیاب نہیں ہے۔", + "Yes": "جی ہاں", + "Select your dataset:": "اپنا ڈیٹا سیٹ منتخب کریں۔", + "Update list": "فہرست کو اپ ڈیٹ کریں۔", + "Download Model": "ماڈل ڈاؤن لوڈ کریں۔", + "Download Backup": "بیک اپ ڈاؤن لوڈ کریں۔", + "Download Dataset": "ڈیٹا سیٹ ڈاؤن لوڈ کریں۔", + "Download": "ڈاؤن لوڈ کریں", + "Url:": "یو آر ایل:", + "Build the index before saving.": "محفوظ کرنے سے پہلے انڈیکس بنائیں۔", + "Save your model once the training ends.": "ٹریننگ ختم ہونے کے بعد اپنے ماڈل کو محفوظ کریں۔", + "Save type": "قسم محفوظ کریں۔", + "Save model": "ماڈل کو محفوظ کریں۔", + "Choose the method": "طریقہ منتخب کریں۔", + "Save all": "محفوظ کریں", + "Save D and G": "ڈی اور جی کو محفوظ کریں۔", + "Save voice": "آواز محفوظ کریں۔", + "Downloading the file: ": "فائل ڈاؤن لوڈ کرنا:", + "Stop training": "تربیت بند کرو", + "Too many users have recently viewed or downloaded this file": "بہت سارے صارفین نے حال ہی میں اس فائل کو دیکھا یا ڈاؤن لوڈ کیا ہے۔", + "Cannot get file from this private link": "اس نجی لنک سے فائل حاصل نہیں کی جا سکتی", + "Full download": "مکمل ڈاؤن لوڈ", + "An error occurred downloading": "ڈاؤن لوڈ کرنے میں ایک خرابی پیش آگئی", + "Model saved successfully": "ماڈل کامیابی سے محفوظ ہو گیا۔", + "Saving the model...": "ماڈل محفوظ ہو رہا ہے...", + "Saved without index...": "انڈیکس کے بغیر محفوظ کیا گیا...", + "model_name": "ماڈل_نام", + "Saved without inference model...": "بغیر کسی اندازہ کے ماڈل کے محفوظ کیا گیا...", + "An error occurred saving the model": "ماڈل کو محفوظ کرنے میں ایک خرابی پیش آگئی", + "The model you want to save does not exist, be sure to enter the correct name.": "آپ جس ماڈل کو محفوظ کرنا چاہتے ہیں وہ موجود نہیں ہے، درست نام ضرور درج کریں۔", + "The file could not be downloaded.": "فائل ڈاؤن لوڈ نہیں ہو سکی۔", + "Unzip error.": "ان زپ کی خرابی۔", + "Path to your added.index file (if it didn't automatically find it)": "آپ کی add.index فائل کا راستہ (اگر یہ خود بخود اسے نہیں مل پاتی ہے)", + "It has been downloaded successfully.": "اسے کامیابی کے ساتھ ڈاؤن لوڈ کر لیا گیا ہے۔", + "Proceeding with the extraction...": "نکالنے کے ساتھ آگے بڑھ رہا ہے...", + "The Backup has been uploaded successfully.": "بیک اپ کامیابی کے ساتھ اپ لوڈ ہو گیا ہے۔", + "The Dataset has been loaded successfully.": "ڈیٹا سیٹ کامیابی کے ساتھ لوڈ ہو گیا ہے۔", + "The Model has been loaded successfully.": "ماڈل کامیابی کے ساتھ لوڈ ہو گیا ہے۔", + "It is used to download your inference models.": "یہ آپ کے انفرنس ماڈلز کو ڈاؤن لوڈ کرنے کے لیے استعمال ہوتا ہے۔", + "It is used to download your training backups.": "یہ آپ کے تربیتی بیک اپ کو ڈاؤن لوڈ کرنے کے لیے استعمال ہوتا ہے۔", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "اپنے ماڈل کو تربیت دینے کے لیے ڈیٹاسیٹ کو آڈیوز کے ساتھ مطابقت پذیر فارمیٹ (.wav/.flac) میں ڈاؤن لوڈ کریں۔", + "No relevant file was found to upload.": "اپ لوڈ کرنے کے لیے کوئی متعلقہ فائل نہیں ملی۔", + "The model works for inference, and has the .index file.": "ماڈل تخمینہ کے لیے کام کرتا ہے، اور اس میں .index فائل ہے۔", + "The model works for inference, but it doesn't have the .index file.": "ماڈل تخمینہ کے لیے کام کرتا ہے، لیکن اس میں .index فائل نہیں ہے۔", + "This may take a few minutes, please wait...": "اس میں کچھ منٹ لگ سکتے ہیں، براہ کرم انتظار کریں...", + "Resources": "حوالہ جات", + "Step 1: Processing data": "مرحلہ 1: ڈیٹا پر کارروائی کرنا", + "Step 2: Extracting features": "مرحلہ 2b: خصوصیات کو نکالنا", + "Step 3: Model training started": "مرحلہ 3a: ماڈل ٹریننگ شروع ہوئی۔", + "Training is done, check train.log": "ٹریننگ ہو چکی ہے، ٹرین ڈاٹ لاگ چیک کریں۔", + "All processes have been completed!": "تمام عمل مکمل ہو چکے ہیں!", + "Model Inference": "ماڈل کا اندازہ", + "Inferencing voice:": "اندازہ لگانے والی آواز:", + "Model_Name": "ماڈل_نام", + "Dataset_Name": "ڈیٹا سیٹ_نام", + "Or add your dataset path:": "یا اپنے ڈیٹاسیٹ کا راستہ درج کریں:", + "Whether the model has pitch guidance.": "آیا ماڈل میں پچ گائیڈنس ہے۔", + "Whether to save only the latest .ckpt file to save hard drive space": "آیا ہارڈ ڈرائیو کی جگہ بچانے کے لیے صرف تازہ ترین .ckpt فائل کو محفوظ کرنا ہے۔", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "تمام تربیتی سیٹوں کو GPU میموری میں کیش کریں۔ چھوٹے ڈیٹا سیٹس (10 منٹ سے کم) کیشنگ ٹریننگ کو تیز کر سکتی ہے۔", + "Save a small final model to the 'weights' folder at each save point": "ہر سیو پوائنٹ پر ایک چھوٹا فائنل ماڈل 'وزن' فولڈر میں محفوظ کریں۔", + "Refresh": "آواز کی فہرست، انڈیکس پاتھ اور آڈیو فائلوں کو ریفریش کریں۔", + "Unload voice to save GPU memory": "GPU میموری کو بچانے کے لیے آواز اتاریں:", + "Select Speaker/Singer ID:": "اسپیکر/گلوکار کی شناخت منتخب کریں:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "مرد سے خاتون کی تبدیلی کے لیے تجویز کردہ +12 کلید، اور عورت سے مرد کی تبدیلی کے لیے -12 کلید۔ اگر آواز کی حد بہت دور جاتی ہے اور آواز بگڑ جاتی ہے، تو آپ اسے خود بھی مناسب رینج میں ایڈجسٹ کر سکتے ہیں۔", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "ٹرانسپوز (انٹیجر، سیمیٹونز کی تعداد، ایک آکٹیو سے بڑھائیں: 12، ایک آکٹیو سے کم: -12):", + "Enter the path of the audio file to be processed (default is the correct format example):": "کارروائی کی جانے والی آڈیو فائل کا راستہ درج کریں (پہلے سے طے شدہ فارمیٹ کی صحیح مثال ہے):", + "Select the pitch extraction algorithm:": "پچ نکالنے کا الگورتھم منتخب کریں:", + "Feature search dataset file path": "فیچر سرچ ڈیٹاسیٹ فائل پاتھ", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "اگر >=3: کٹائی ہوئی پچ کے نتائج پر میڈین فلٹرنگ لگائیں۔ قدر فلٹر کے رداس کی نمائندگی کرتی ہے اور سانس لینے میں کمی کر سکتی ہے۔", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "فیچر انڈیکس فائل کا راستہ۔ ڈراپ ڈاؤن سے منتخب کردہ نتیجہ کو استعمال کرنے کے لیے خالی چھوڑ دیں:", + "Auto-detect index path and select from the dropdown:": "انڈیکس پاتھ کا خود بخود پتہ لگائیں اور ڈراپ ڈاؤن سے منتخب کریں۔", + "Path to feature file:": "فیچر فائل کا راستہ:", + "Search feature ratio:": "تلاش کی خصوصیت کا تناسب:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "پوسٹ پروسیسنگ میں آؤٹ پٹ آڈیو کو حتمی نمونے کی شرح پر دوبارہ نمونہ دیں۔ دوبارہ نمونے لینے کے لیے 0 پر سیٹ کریں:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "آؤٹ پٹ کے والیوم لفافے کو تبدیل کرنے یا ملانے کے لیے ان پٹ کے والیوم لفافے کا استعمال کریں۔ تناسب 1 کے جتنا قریب ہوگا، اتنا ہی زیادہ آؤٹ پٹ لفافہ استعمال ہوتا ہے:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "الیکٹرونک میوزک میں پھاڑنے جیسے فن پاروں کو روکنے کے لیے بے آواز تلفظ اور سانس کی آوازوں کی حفاظت کریں۔ غیر فعال کرنے کے لیے 0.5 پر سیٹ کریں۔ تحفظ کو بڑھانے کے لیے قدر کو کم کریں، لیکن یہ اشاریہ سازی کی درستگی کو کم کر سکتا ہے:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 وکر فائل (اختیاری)۔ فی لائن ایک پچ۔ پہلے سے طے شدہ F0 اور پچ ماڈیولیشن کو بدل دیتا ہے:", + "Convert": "تبدیل کریں", + "Output information:": "آؤٹ پٹ کی معلومات", + "Export audio (click on the three dots in the lower right corner to download)": "آڈیو برآمد کریں (ڈاؤن لوڈ کرنے کے لیے نیچے دائیں کونے میں تین نقطوں پر کلک کریں)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "بیچ کی تبدیلی۔ وہ فولڈر درج کریں جس میں آڈیو فائلیں تبدیل کی جائیں یا متعدد آڈیو فائلیں اپ لوڈ کریں۔ تبدیل شدہ آڈیو مخصوص فولڈر میں آؤٹ پٹ ہو گا (پہلے سے طے شدہ: 'opt')۔", + "Specify output folder:": "آؤٹ پٹ فولڈر کی وضاحت کریں:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "آڈیو فولڈر کا راستہ درج کریں جس پر کارروائی کی جائے (اسے فائل مینیجر کے ایڈریس بار سے کاپی کریں):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "آپ آڈیو فائلوں کو بیچوں میں بھی ڈال سکتے ہیں۔ دو آپشنز میں سے ایک کا انتخاب کریں۔ فولڈر سے پڑھنے کو ترجیح دی جاتی ہے۔", + "Export file format": "فائل کی شکل برآمد کریں۔", + "UVR5": "UVR5", + "Enter the path of the audio folder to be processed:": "جس آڈیو فولڈر پر کارروائی کی جائے گی اس کا راستہ درج کریں:", + "Model": "ماڈل", + "Vocal Extraction Aggressive": "آواز نکالنا جارحانہ", + "Specify the output folder for vocals:": "آواز کے لیے آؤٹ پٹ فولڈر کی وضاحت کریں:", + "Specify the output folder for accompaniment:": "ساتھ کے لیے آؤٹ پٹ فولڈر کی وضاحت کریں:", + "Train": "ٹرین", + "Enter the model name:": "ماڈل کا نام درج کریں:", + "Target sample rate:": "ہدف نمونہ کی شرح:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "آیا ماڈل میں پچ گائیڈنس ہے (گانے کے لیے ضروری، تقریر کے لیے اختیاری):", + "Version": "ورژن", + "Number of CPU processes:": "پچ نکالنے اور ڈیٹا پروسیسنگ کے لیے استعمال ہونے والے CPU عملوں کی تعداد:", + "Enter the path of the training folder:": "ٹریننگ فولڈر کا راستہ درج کریں:", + "Specify the model ID:": "براہ کرم ماڈل ID کی وضاحت کریں:", + "Auto detect audio path and select from the dropdown:": "آڈیو پاتھ کا خود بخود پتہ لگائیں اور ڈراپ ڈاؤن سے منتخب کریں:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "آڈیو فائل کے راستے میں آڈیو کا نام شامل کریں جس پر کارروائی کی جائے (پہلے سے طے شدہ فارمیٹ کی صحیح مثال ہے) ڈراپ ڈاؤن فہرست سے آڈیو استعمال کرنے کے لیے راستے کو ہٹا دیں:", + "Advanced Settings": "اعلی درجے کی ترتیبات", + "Settings": "ترتیبات", + "Status:": "حالت", + "Process data": "ڈیٹا پر کارروائی کریں۔", + "Drag your audio here:": "اپنے آڈیو کو یہاں گھسیٹیں اور ریفریش بٹن کو دبائیں۔", + "Or record an audio:": "یا آڈیو ریکارڈ کریں۔", + "Formant shift inference audio": "فارمینٹ شفٹ انفرنس آڈیو", + "Used for male to female and vice-versa conversions": "مرد سے عورت اور اس کے برعکس تبادلوں کے لیے استعمال کیا جاتا ہے۔", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "براہ کرم '-' سے الگ کردہ GPU انڈیکس فراہم کریں، جیسے GPUs 0، 1، اور 2 استعمال کرنے کے لیے 0-1-2:", + "GPU Information:": "GPU کی معلومات", + "Feature extraction": "خصوصیت کا اخراج", + "Save frequency:": "تعدد کو محفوظ کریں:", + "Training epochs:": "تربیتی دور:", + "Batch size per GPU:": "بیچ سائز فی GPU:", + "Save only the latest '.ckpt' file to save disk space:": "ڈسک کی جگہ بچانے کے لیے صرف تازہ ترین '.ckpt' فائل کو محفوظ کریں:", + "No": "نہیں", + "Save a small final model to the 'weights' folder at each save point:": "ہر سیو پوائنٹ پر ایک چھوٹا فائنل ماڈل 'وزن' فولڈر میں محفوظ کریں:", + "Load pre-trained base model G path:": "پہلے سے تربیت یافتہ بیس ماڈل جی پاتھ لوڈ کریں:", + "Load pre-trained base model D path:": "پہلے سے تربیت یافتہ بیس ماڈل ڈی پاتھ لوڈ کریں:", + "Train model": "ٹرین ماڈل", + "Train feature index": "ٹرین فیچر انڈیکس", + "One-click training": "ایک کلک کی تربیت", + "Processing": "پروسیسنگ", + "Model fusion, can be used to test timbre fusion": "ماڈل فیوژن، ٹمبر فیوژن کو جانچنے کے لیے استعمال کیا جا سکتا ہے۔", + "Path to Model A:": "ماڈل A کا راستہ:", + "Path to Model B:": "ماڈل B کا راستہ:", + "Weight for Model A:": "ماڈل A کے لیے وزن:", + "Whether the model has pitch guidance:": "آیا ماڈل میں پچ گائیڈنس ہے:", + "Model information to be placed:": "ماڈل کی معلومات رکھی جائے گی:", + "Model architecture version:": "ماڈل آرکیٹیکچر ورژن:", + "Fusion": "امتزاج", + "Modify model information": "ماڈل کی معلومات میں ترمیم کریں۔", + "Path to Model:": "ماڈل کا راستہ:", + "Model information to be modified:": "ماڈل کی معلومات میں ترمیم کی جائے گی:", + "Save file name:": "فائل کا نام محفوظ کریں:", + "Modify": "ترمیم کریں۔", + "View model information": "ماڈل کی معلومات دیکھیں", + "View": "دیکھیں", + "Model extraction": "ماڈل نکالنا ('لاگز' فولڈر کے نیچے بڑی فائل ماڈل کا راستہ داخل کریں)۔ یہ مفید ہے اگر آپ تربیت کو آدھے راستے سے روکنا چاہتے ہیں اور دستی طور پر ایک چھوٹی ماڈل فائل کو نکالنا اور محفوظ کرنا چاہتے ہیں، یا اگر آپ انٹرمیڈیٹ ماڈل کی جانچ کرنا چاہتے ہیں:", + "Name:": "نام محفوظ کریں:", + "Whether the model has pitch guidance (1: yes, 0: no):": "آیا ماڈل میں پچ گائیڈنس ہے (1: ہاں، 0: نہیں):", + "Extract": "نکالنا", + "Export Onnx": "Onnx برآمد کریں۔", + "RVC Model Path:": "RVC ماڈل کا راستہ:", + "Onnx Export Path:": "Onnx برآمد کا راستہ:", + "MoeVS Model": "MoeVS ماڈل", + "Export Onnx Model": "Onnx ماڈل برآمد کریں۔", + "Load model": "لوڈ ماڈل", + "Hubert Model": "ہیوبرٹ ماڈل", + "Select the .pth file": ".pth فائل کو منتخب کریں۔", + "Select the .index file": ".index فائل کو منتخب کریں۔", + "Select the .npy file": ".npy فائل کو منتخب کریں۔", + "Input device": "ان پٹ ڈیوائس", + "Output device": "آؤٹ پٹ ڈیوائس", + "Audio device (please use the same type of driver)": "آڈیو ڈیوائس (براہ کرم ایک ہی قسم کا ڈرائیور استعمال کریں)", + "Response threshold": "جوابی حد", + "Pitch settings": "پچ کی ترتیبات", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "آیا نوٹ کے نام ان کی ہرٹز قدر کے بجائے استعمال کیے جائیں۔ ای جی [C5, D6] بجائے [523.25, 1174.66]Hz", + "Index Rate": "انڈیکس ریٹ", + "General settings": "عام ترتیبات", + "Sample length": "نمونہ کی لمبائی", + "Fade length": "دھندلا لمبائی", + "Extra inference time": "اضافی تخمینہ کا وقت", + "Input noise reduction": "ان پٹ شور کی کمی", + "Output noise reduction": "آؤٹ پٹ شور کی کمی", + "Performance settings": "کارکردگی کی ترتیبات", + "Start audio conversion": "آڈیو کی تبدیلی شروع کریں۔", + "Stop audio conversion": "آڈیو تبادلوں کو روکیں۔", + "Inference time (ms):": "انفرنس ٹائم (ms):", + "Select the pth file": "pth فائل کو منتخب کریں۔", + "Select the .index file:": "انڈیکس فائل کو منتخب کریں۔", + "The hubert model path must not contain Chinese characters": "ہیوبرٹ ماڈل پاتھ میں چینی حروف نہیں ہونے چاہئیں", + "The pth file path must not contain Chinese characters.": "pth فائل کا راستہ چینی حروف پر مشتمل نہیں ہونا چاہیے۔", + "The index file path must not contain Chinese characters.": "انڈیکس فائل کا راستہ چینی حروف پر مشتمل نہیں ہونا چاہیے۔", + "Step algorithm": "مرحلہ الگورتھم", + "Number of epoch processes": "عہد کے عمل کی تعداد", + "Lowest points export": "کم ترین پوائنٹس کی برآمد", + "How many lowest points to save:": "کتنے کم پوائنٹس کو بچانا ہے۔", + "Export lowest points of a model": "ماڈل کے سب سے کم پوائنٹس برآمد کریں۔", + "Output models:": "آؤٹ پٹ ماڈلز", + "Stats of selected models:": "منتخب ماڈلز کے اعدادوشمار", + "Custom f0 [Root pitch] File": "اپنی مرضی کے مطابق f0 [روٹ پچ] فائل", + "Min pitch:": "منٹ پچ", + "Specify minimal pitch for inference [HZ]": "تخمینہ کے لیے کم سے کم پچ کی وضاحت کریں [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "قیاس کے لیے کم سے کم پچ کی وضاحت کریں [NOTE][OCTAVE]", + "Max pitch:": "زیادہ سے زیادہ پچ", + "Specify max pitch for inference [HZ]": "تخمینہ کے لیے زیادہ سے زیادہ پچ کی وضاحت کریں [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "تخمینہ کے لیے زیادہ سے زیادہ پچ کی وضاحت کریں [NOTE][OCTAVE]", + "Browse presets for formanting": "فارمیٹنگ کے لیے پیش سیٹوں کو براؤز کریں۔", + "Presets are located in formantshiftcfg/ folder": "presets formantshiftcfg/ فولڈر میں واقع ہیں۔", + "Default value is 1.0": "پہلے سے طے شدہ قدر 1.0 ہے۔", + "Quefrency for formant shifting": "فارمینٹ شفٹنگ کے لیے Quefrency", + "Timbre for formant shifting": "فارمینٹ شفٹنگ کے لیے ٹمبر", + "Apply": "درخواست دیں", + "Single": "سنگل", + "Batch": "بیچ", + "Separate YouTube tracks": "یوٹیوب ٹریکس کو الگ کریں۔", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "یوٹیوب ویڈیو سے آڈیو ڈاؤن لوڈ کریں اور خودکار طور پر آواز اور ساز کے ٹریک کو الگ کریں۔", + "Extra": "اضافی", + "Merge": "ضم", + "Merge your generated audios with the instrumental": "اپنے تیار کردہ آڈیوز کو انسٹرومینٹل کے ساتھ ضم کریں۔", + "Choose your instrumental:": "اپنے آلے کا انتخاب کریں۔", + "Choose the generated audio:": "تیار کردہ آڈیو کا انتخاب کریں۔", + "Combine": "یکجا", + "Download and Separate": "ڈاؤن لوڈ کریں اور الگ کریں۔", + "Enter the YouTube link:": "یوٹیوب کا لنک درج کریں۔", + "This section contains some extra utilities that often may be in experimental phases": "اس حصے میں کچھ اضافی افادیتیں ہیں جو اکثر تجرباتی مراحل میں ہو سکتی ہیں۔", + "Merge Audios": "آڈیوز کو ضم کریں۔", + "Audio files have been moved to the 'audios' folder.": "آڈیو فائلوں کو 'آڈیوز' فولڈر میں منتقل کر دیا گیا ہے۔", + "Downloading audio from the video...": "ویڈیو سے آڈیو ڈاؤن لوڈ ہو رہا ہے...", + "Audio downloaded!": "آڈیو ڈاؤن لوڈ!", + "An error occurred:": "ایک خرابی آگئی:", + "Separating audio...": "آڈیو کو الگ کیا جا رہا ہے...", + "File moved successfully.": "فائل کامیابی سے منتقل ہو گئی۔", + "Finished!": "ختم!", + "The source file does not exist.": "سورس فائل موجود نہیں ہے۔", + "Error moving the file:": "فائل کو منتقل کرنے میں خرابی:", + "Downloading {name} from drive": "ڈرائیو سے {name} ڈاؤن لوڈ ہو رہا ہے۔", + "The attempt to download using Drive didn't work": "Drive کا استعمال کرتے ہوئے ڈاؤن لوڈ کرنے کی کوشش نے کام نہیں کیا۔", + "Error downloading the file: {str(e)}": "فائل ڈاؤن لوڈ کرنے میں خرابی: {str(e)}", + "Downloading {name} from mega": "میگا سے {name} ڈاؤن لوڈ ہو رہا ہے۔", + "Downloading {name} from basic url": "بنیادی url سے {name} ڈاؤن لوڈ ہو رہا ہے۔", + "Download Audio": "آڈیو ڈاؤن لوڈ کریں۔", + "Download audios of any format for use in inference (recommended for mobile users).": "کسی بھی فارمیٹ کے آڈیوز کو قیاس میں استعمال کرنے کے لیے ڈاؤن لوڈ کریں (موبائل صارفین کے لیے تجویز کردہ)", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "تبدیلی کے بعد کی کوئی بھی ConnectionResetErrors غیر متعلقہ اور خالصتاً بصری ہیں۔ انہیں نظر انداز کیا جا سکتا ہے.", + "Processed audio saved at: ": "پروسیس شدہ آڈیو کو محفوظ کیا گیا:", + "Conversion complete!": "تبدیلی مکمل!", + "Reverb": "Reverb", + "Compressor": "کمپریسر", + "Noise Gate": "شور گیٹ", + "Volume": "حجم", + "Drag the audio here and click the Refresh button": "آڈیو کو یہاں گھسیٹیں اور ریفریش بٹن پر کلک کریں۔", + "Select the generated audio": "تیار کردہ آڈیو کو منتخب کریں۔", + "Volume of the instrumental audio:": "آلہ ساز آڈیو کا حجم", + "Volume of the generated audio:": "تیار کردہ آڈیو کا حجم", + "### Add the effects": "### اثرات شامل کریں۔", + "Starting audio conversion... (This might take a moment)": "آڈیو کنورشن شروع ہورہی ہے... (یہ تھوڑی دیر لگ سکتی ہے)", + "TTS Model:": "TTS آوازیں", + "TTS": "TTS", + "TTS Method:": "TTS میثاق", + "Audio TTS:": "آڈیو TTS", + "Audio RVC:": "آڈیو ماڈل", + "You can also drop your files to load your model.": "آپ اپنے ماڈل کو لوڈ کرنے کے لئے اپنے فائلوں کو بھی ڈراپ کرسکتے ہیں.", + "Drag your .pth file here:": "اپنے .pth فائل کو یہاں کھینچیں:", + "Drag your .index file here:": "اپنے .index فائل کو یہاں کھینچیں:" +} diff --git a/assets/i18n/langs/zh_CN.json b/assets/i18n/langs/zh_CN.json new file mode 100644 index 0000000000000000000000000000000000000000..201f546f14d9df0ce93ea4230b3ab47f6117eae6 --- /dev/null +++ b/assets/i18n/langs/zh_CN.json @@ -0,0 +1,248 @@ +{ + "Unfortunately, there is no compatible GPU available to support your training.": "不幸的是,没有可用的兼容 GPU 来支持您的训练。", + "Yes": "是的", + "Select your dataset:": "选择您的数据集。", + "Update list": "更新列表。", + "Download Model": "下载模型", + "Download Backup": "下载备份", + "Download Dataset": "下载数据集", + "Download": "下载", + "Url:": "网址:", + "Build the index before saving.": "保存前构建索引。", + "Save your model once the training ends.": "训练结束后保存您的模型。", + "Save type": "保存类型", + "Save model": "保存模型", + "Choose the method": "选择方法", + "Save all": "保存全部", + "Save D and G": "保存D和G", + "Save voice": "保存语音", + "Downloading the file: ": "下载文件:", + "Stop training": "停止训练", + "Too many users have recently viewed or downloaded this file": "最近有太多用户查看或下载了此文件", + "Cannot get file from this private link": "无法从此私人链接获取文件", + "Full download": "完整下载", + "An error occurred downloading": "下载时发生错误", + "Model saved successfully": "模型保存成功", + "Saving the model...": "保存模型...", + "Saved without index...": "保存时没有索引...", + "model_name": "型号名称", + "Saved without inference model...": "保存时没有推理模型...", + "An error occurred saving the model": "保存模型时出错", + "The model you want to save does not exist, be sure to enter the correct name.": "您要保存的模型不存在,请务必输入正确的名称。", + "The file could not be downloaded.": "无法下载该文件。", + "Unzip error.": "解压错误。", + "Path to your added.index file (if it didn't automatically find it)": "添加的.index 文件的路径(如果没有自动找到它)", + "It has been downloaded successfully.": "已经下载成功了。", + "Proceeding with the extraction...": "继续提取...", + "The Backup has been uploaded successfully.": "备份已成功上传。", + "The Dataset has been loaded successfully.": "数据集已成功加载。", + "The Model has been loaded successfully.": "模型已成功加载。", + "It is used to download your inference models.": "它用于下载您的推理模型。", + "It is used to download your training backups.": "它用于下载您的训练备份。", + "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "下载包含兼容格式 (.wav/.flac) 音频的数据集来训练您的模型。", + "No relevant file was found to upload.": "没有找到相关文件可以上传。", + "The model works for inference, and has the .index file.": "该模型用于推理,并具有 .index 文件。", + "The model works for inference, but it doesn't have the .index file.": "该模型适用于推理,但没有 .index 文件。", + "This may take a few minutes, please wait...": "这可能需要几分钟,请稍候...", + "Resources": "资源", + "Step 1: Processing data": "步骤一:处理数据", + "Step 2: Extracting features": "步骤2b:提取特征", + "Step 3: Model training started": "步骤3a:模型训练开始", + "Training is done, check train.log": "训练完成,查看train.log", + "All processes have been completed!": "所有流程已完成!", + "Model Inference": "模型推理", + "Inferencing voice:": "推理语音:", + "Model_Name": "型号名称", + "Dataset_Name": "数据集_名称", + "Or add your dataset path:": "或输入数据集的路径:", + "Whether the model has pitch guidance.": "模型是否有俯仰引导。", + "Whether to save only the latest .ckpt file to save hard drive space": "是否仅保存最新的.ckpt文件以节省硬盘空间", + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "将所有训练集缓存到 GPU 内存。缓存小数据集(少于 10 分钟)可以加快训练速度", + "Save a small final model to the 'weights' folder at each save point": "在每个保存点将一个小的最终模型保存到“权重”文件夹中", + "Refresh": "刷新语音列表、索引路径和音频文件", + "Unload voice to save GPU memory": "卸载语音以节省 GPU 内存:", + "Select Speaker/Singer ID:": "选择演讲者/歌手 ID:", + "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "建议+12键用于男性到女性的转换,-12键用于女性到男性的转换。如果音域走得太远,声音失真,也可以自行调整到合适的音域。", + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):": "移调(整数,半音数,升高八度:12,降低八度:-12):", + "Enter the path of the audio file to be processed (default is the correct format example):": "输入要处理的音频文件的路径(默认为正确格式示例):", + "Select the pitch extraction algorithm:": "选择音高提取算法:", + "Feature search dataset file path": "特征搜索数据集文件路径", + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "如果 >=3:对收获的音高结果应用中值滤波。该值代表过滤半径,可以减少呼吸味。", + "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "功能索引文件的路径。留空以使用下拉列表中选定的结果:", + "Auto-detect index path and select from the dropdown:": "自动检测索引路径并从下拉列表中选择", + "Path to feature file:": "功能文件的路径:", + "Search feature ratio:": "搜索特征比例:", + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "在后处理中将输出音频重新采样到最终采样率。设置为 0 表示不重采样:", + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "使用输入的音量包络来替换或与输出的音量包络混合。该比率越接近 1,使用的输出包络就越多:", + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "保护清辅音和呼吸音,以防止电子音乐中出现撕裂等伪影。设置为 0.5 以禁用。减小该值可增强保护,但可能会降低索引精度:", + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 曲线文件(可选)。每行一个音高。替换默认的 F0 和音调调制:", + "Convert": "转变", + "Output information:": "输出信息", + "Export audio (click on the three dots in the lower right corner to download)": "导出音频(点击右下角三点即可下载)", + "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "批量转换。输入包含要转换的音频文件的文件夹或上传多个音频文件。转换后的音频将输出到指定文件夹(默认:“opt”)。", + "Specify output folder:": "指定输出文件夹:", + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "输入要处理的音频文件夹路径(从文件管理器地址栏复制):", + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "您还可以批量输入音频文件。选择两个选项之一。优先从文件夹中读取。", + "Export file format": "导出文件格式", + "UVR5": "紫外线5", + "Enter the path of the audio folder to be processed:": "输入要处理的音频文件夹路径:", + "Model": "模型", + "Vocal Extraction Aggressive": "声音提取 攻击性", + "Specify the output folder for vocals:": "指定人声的输出文件夹:", + "Specify the output folder for accompaniment:": "指定伴奏的输出文件夹:", + "Train": "火车", + "Enter the model name:": "输入型号名称:", + "Target sample rate:": "目标采样率:", + "Whether the model has pitch guidance (required for singing, optional for speech):": "模型是否有音调引导(唱歌时需要,语音时可选):", + "Version": "版本", + "Number of CPU processes:": "用于音高提取和数据处理的CPU进程数:", + "Enter the path of the training folder:": "输入训练文件夹的路径:", + "Specify the model ID:": "请指定型号 ID:", + "Auto detect audio path and select from the dropdown:": "自动检测音频路径并从下拉列表中选择:", + "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "将音频的名称添加到要处理的音频文件的路径中(默认是正确的格式示例)从下拉列表中删除使用音频的路径:", + "Advanced Settings": "高级设置", + "Settings": "设置", + "Status:": "地位", + "Process data": "处理数据", + "Drag your audio here:": "将音频拖到此处并点击刷新按钮", + "Or record an audio:": "或者录制音频。", + "Formant shift inference audio": "共振峰移位推断音频", + "Used for male to female and vice-versa conversions": "用于男性到女性的转换,反之亦然", + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "请提供以“-”分隔的 GPU 索引,例如使用 GPU 0、1 和 2 时为 0-1-2:", + "GPU Information:": "GPU信息", + "Feature extraction": "特征提取", + "Save frequency:": "保存频率:", + "Training epochs:": "训练时期:", + "Batch size per GPU:": "每个 GPU 的批量大小:", + "Save only the latest '.ckpt' file to save disk space:": "仅保存最新的“.ckpt”文件以节省磁盘空间:", + "No": "不", + "Save a small final model to the 'weights' folder at each save point:": "在每个保存点将一个小的最终模型保存到“权重”文件夹中:", + "Load pre-trained base model G path:": "加载预训练的基础模型G路径:", + "Load pre-trained base model D path:": "加载预训练的基础模型D路径:", + "Train model": "火车模型", + "Train feature index": "列车特征指标", + "One-click training": "一键培训", + "Processing": "加工", + "Model fusion, can be used to test timbre fusion": "模型融合,可用于测试音色融合", + "Path to Model A:": "模型 A 的路径:", + "Path to Model B:": "模型 B 的路径:", + "Weight for Model A:": "A 型重量:", + "Whether the model has pitch guidance:": "模型是否有俯仰引导:", + "Model information to be placed:": "需放置的型号信息:", + "Model architecture version:": "模型架构版本:", + "Fusion": "融合", + "Modify model information": "修改型号信息", + "Path to Model:": "模型路径:", + "Model information to be modified:": "待修改型号信息:", + "Save file name:": "保存文件名:", + "Modify": "调整", + "View model information": "查看型号信息", + "View": "看法", + "Model extraction": "模型提取(输入“logs”文件夹下大文件模型的路径)。如果您想中途停止训练并手动提取并保存一个小模型文件,或者如果您想测试中间模型,这非常有用:", + "Name:": "保存名称:", + "Whether the model has pitch guidance (1: yes, 0: no):": "模型是否有俯仰引导(1:有,0:无):", + "Extract": "提炼", + "Export Onnx": "导出Onnx", + "RVC Model Path:": "RVC模型路径:", + "Onnx Export Path:": "Onnx 导出路径:", + "MoeVS Model": "MoeVS模型", + "Export Onnx Model": "导出 Onnx 模型", + "Load model": "负载模型", + "Hubert Model": "休伯特模型", + "Select the .pth file": "选择 .pth 文件", + "Select the .index file": "选择.index文件", + "Select the .npy file": "选择.npy 文件", + "Input device": "输入设备", + "Output device": "输出设备", + "Audio device (please use the same type of driver)": "音频设备(请使用同类型驱动程序)", + "Response threshold": "反应阈值", + "Pitch settings": "音调设置", + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "是否使用音符名称而不是赫兹值。例如。 [C5,D6]而不是[523.25,1174.66]Hz", + "Index Rate": "指数率", + "General settings": "常规设置", + "Sample length": "样品长度", + "Fade length": "淡入淡出长度", + "Extra inference time": "额外的推理时间", + "Input noise reduction": "输入噪声降低", + "Output noise reduction": "输出噪声降低", + "Performance settings": "性能设置", + "Start audio conversion": "开始音频转换", + "Stop audio conversion": "停止音频转换", + "Inference time (ms):": "推理时间(毫秒):", + "Select the pth file": "选择.pth文件", + "Select the .index file:": "选择索引文件", + "The hubert model path must not contain Chinese characters": "hubert模型路径不能包含中文字符", + "The pth file path must not contain Chinese characters.": "pth文件路径不能包含中文字符。", + "The index file path must not contain Chinese characters.": "索引文件路径不能包含中文字符。", + "Step algorithm": "步进算法", + "Number of epoch processes": "纪元进程数", + "Lowest points export": "最低点导出", + "How many lowest points to save:": "保存多少个最低点", + "Export lowest points of a model": "导出模型的最低点", + "Output models:": "输出型号", + "Stats of selected models:": "所选模型的统计数据", + "Custom f0 [Root pitch] File": "自定义 f0 [根音] 文件", + "Min pitch:": "最小间距", + "Specify minimal pitch for inference [HZ]": "指定推理的最小间距 [HZ]", + "Specify minimal pitch for inference [NOTE][OCTAVE]": "指定推理的最小间距 [NOTE][OCTAVE]", + "Max pitch:": "最大螺距", + "Specify max pitch for inference [HZ]": "指定推理的最大间距 [HZ]", + "Specify max pitch for inference [NOTE][OCTAVE]": "指定推理的最大音高 [NOTE][OCTAVE]", + "Browse presets for formanting": "浏览共振峰预设", + "Presets are located in formantshiftcfg/ folder": "预设位于formantshiftcfg/文件夹中", + "Default value is 1.0": "默认值为 1.0", + "Quefrency for formant shifting": "共振峰移位频率", + "Timbre for formant shifting": "共振峰转换的音色", + "Apply": "申请", + "Single": "单身的", + "Batch": "批", + "Separate YouTube tracks": "单独的 YouTube 曲目", + "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "从 YouTube 视频下载音频并自动分离人声和器乐曲目", + "Extra": "额外的", + "Merge": "合并", + "Merge your generated audios with the instrumental": "将生成的音频与乐器合并", + "Choose your instrumental:": "选择您的乐器", + "Choose the generated audio:": "选择生成的音频", + "Combine": "结合", + "Download and Separate": "下载并分离", + "Enter the YouTube link:": "输入 YouTube 链接", + "This section contains some extra utilities that often may be in experimental phases": "本节包含一些通常可能处于实验阶段的额外实用程序", + "Merge Audios": "合并音频", + "Audio files have been moved to the 'audios' folder.": "音频文件已移至“audios”文件夹。", + "Downloading audio from the video...": "正在从视频下载音频...", + "Audio downloaded!": "音频下载!", + "An error occurred:": "发生错误:", + "Separating audio...": "分离音频...", + "File moved successfully.": "文件移动成功。", + "Finished!": "完成的!", + "The source file does not exist.": "源文件不存在。", + "Error moving the file:": "移动文件时出错:", + "Downloading {name} from drive": "正在从驱动器下载 {name}", + "The attempt to download using Drive didn't work": "尝试使用云端硬盘下载失败", + "Error downloading the file: {str(e)}": "下载文件时出错:{str(e)}", + "Downloading {name} from mega": "正在从 mega 下载 {name}", + "Downloading {name} from basic url": "从基本网址下载 {name}", + "Download Audio": "下载音频", + "Download audios of any format for use in inference (recommended for mobile users).": "下载任何格式的音频用于推理(推荐移动用户)", + "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "转换后的任何 ConnectionResetErrors 都是无关紧要的并且纯粹是视觉上的;它们可以被忽略。", + "Processed audio saved at: ": "处理后的音频保存在:", + "Conversion complete!": "转换完成!", + "Reverb": "混响", + "Compressor": "压缩机", + "Noise Gate": "噪声门", + "Volume": "体积", + "Drag the audio here and click the Refresh button": "将音频拖至此处并单击刷新按钮", + "Select the generated audio": "选择生成的音频", + "Volume of the instrumental audio:": "乐器音频的音量", + "Volume of the generated audio:": "生成音频的音量", + "### Add the effects": "### 添加效果", + "Starting audio conversion... (This might take a moment)": "开始音频转换...(这可能需要一点时间)", + "TTS Model:": "TTS 语音", + "TTS": "TTS", + "TTS Method:": "TTS 方法", + "Audio TTS:": "音频 TTS", + "Audio RVC:": "音频模型", + "You can also drop your files to load your model.": "您还可以拖放文件以加载模型。", + "Drag your .pth file here:": "将您的 .pth 文件拖到这里:", + "Drag your .index file here:": "将您的 .index 文件拖到这里:" +} diff --git a/assets/i18n/locale_diff.py b/assets/i18n/locale_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..387ddfe1b16c2f9f32b6b9682b61353837b06bd8 --- /dev/null +++ b/assets/i18n/locale_diff.py @@ -0,0 +1,45 @@ +import json +import os +from collections import OrderedDict + +# Define the standard file name +standard_file = "en_US.json" + +# Find all JSON files in the directory +dir_path = "./" +languages = [ + f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file +] + +# Load the standard file +with open(standard_file, "r", encoding="utf-8") as f: + standard_data = json.load(f, object_pairs_hook=OrderedDict) + +# Loop through each language file +for lang_file in languages: + # Load the language file + with open(lang_file, "r", encoding="utf-8") as f: + lang_data = json.load(f, object_pairs_hook=OrderedDict) + + # Find the difference between the language file and the standard file + diff = set(standard_data.keys()) - set(lang_data.keys()) + + miss = set(lang_data.keys()) - set(standard_data.keys()) + + # Add any missing keys to the language file + for key in diff: + lang_data[key] = key + + # Del any extra keys to the language file + for key in miss: + del lang_data[key] + + # Sort the keys of the language file to match the order of the standard file + lang_data = OrderedDict( + sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) + ) + + # Save the updated language file + with open(lang_file, "w", encoding="utf-8") as f: + json.dump(lang_data, f, ensure_ascii=False, indent=4) + f.write("\n") diff --git a/assets/i18n/scan_i18n.py b/assets/i18n/scan_i18n.py new file mode 100644 index 0000000000000000000000000000000000000000..1b27f5b195111aebf8811e24def98641f46e3db4 --- /dev/null +++ b/assets/i18n/scan_i18n.py @@ -0,0 +1,75 @@ +import ast +import glob +import json +from collections import OrderedDict + + +def extract_i18n_strings(node): + i18n_strings = [] + + if ( + isinstance(node, ast.Call) + and isinstance(node.func, ast.Name) + and node.func.id == "i18n" + ): + for arg in node.args: + if isinstance(arg, ast.Str): + i18n_strings.append(arg.s) + + for child_node in ast.iter_child_nodes(node): + i18n_strings.extend(extract_i18n_strings(child_node)) + + return i18n_strings + + +# scan the directory for all .py files (recursively) +# for each file, parse the code into an AST +# for each AST, extract the i18n strings + +strings = [] +for filename in glob.iglob("**/*.py", recursive=True): + with open(filename, "r") as f: + code = f.read() + if "I18nAuto" in code: + tree = ast.parse(code) + i18n_strings = extract_i18n_strings(tree) + print(filename, len(i18n_strings)) + strings.extend(i18n_strings) +code_keys = set(strings) +""" +n_i18n.py +gui_v1.py 26 +app.py 16 +infer-web.py 147 +scan_i18n.py 0 +i18n.py 0 +lib/train/process_ckpt.py 1 +""" +print() +print("Total unique:", len(code_keys)) + + +standard_file = "i18n/langs/en_US.json" +with open(standard_file, "r", encoding="utf-8") as f: + standard_data = json.load(f, object_pairs_hook=OrderedDict) +standard_keys = set(standard_data.keys()) + +# Define the standard file name +unused_keys = standard_keys - code_keys +print("Unused keys:", len(unused_keys)) +for unused_key in unused_keys: + print("\t", unused_key) + +missing_keys = code_keys - standard_keys +print("Missing keys:", len(missing_keys)) +for missing_key in missing_keys: + print("\t", missing_key) + +code_keys_dict = OrderedDict() +for s in strings: + code_keys_dict[s] = s + +# write back +with open(standard_file, "w", encoding="utf-8") as f: + json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True) + f.write("\n") diff --git a/assets/images/icon.png b/assets/images/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..f9aae65bbbb712383531d26e6a956362576634fb Binary files /dev/null and b/assets/images/icon.png differ diff --git a/assets/pretrained/.gitignore b/assets/pretrained/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6b7ef32c8478a48c3994dcadc86837f4371184d --- /dev/null +++ b/assets/pretrained/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/assets/pretrained_v2/.gitignore b/assets/pretrained_v2/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6b7ef32c8478a48c3994dcadc86837f4371184d --- /dev/null +++ b/assets/pretrained_v2/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/assets/requirements/requirements-amd.txt b/assets/requirements/requirements-amd.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa81a88c01938bbd076755fe5bc9ab66bb6be5e8 --- /dev/null +++ b/assets/requirements/requirements-amd.txt @@ -0,0 +1,48 @@ +tensorflow-rocm +joblib>=1.1.0 +numba==0.56.4 +numpy==1.23.5 +scipy +librosa==0.9.1 +llvmlite==0.39.0 +fairseq==0.12.2 +faiss-cpu==1.7.3 +gradio==3.34.0 +Cython +pydub>=0.25.1 +soundfile>=0.12.1 +ffmpeg-python>=0.2.0 +tensorboardX +Jinja2>=3.1.2 +json5 +Markdown +matplotlib>=3.7.0 +matplotlib-inline>=0.1.3 +praat-parselmouth>=0.4.2 +Pillow>=9.1.1 +resampy>=0.4.2 +scikit-learn +tensorboard +tqdm>=4.63.1 +tornado>=6.1 +Werkzeug>=2.2.3 +uc-micro-py>=1.0.1 +sympy>=1.11.1 +tabulate>=0.8.10 +PyYAML>=6.0 +pyasn1>=0.4.8 +pyasn1-modules>=0.2.8 +fsspec>=2022.11.0 +absl-py>=1.2.0 +audioread +uvicorn>=0.21.1 +colorama>=0.4.5 +pyworld==0.3.2 +httpx +onnxruntime +onnxruntime-gpu +torchcrepe==0.0.20 +fastapi==0.88 +ffmpy==0.3.1 +python-dotenv>=1.0.0 +av diff --git a/assets/requirements/requirements-applio.txt b/assets/requirements/requirements-applio.txt new file mode 100644 index 0000000000000000000000000000000000000000..771844871f20c82fc09571f4c7a3449978b40ea7 --- /dev/null +++ b/assets/requirements/requirements-applio.txt @@ -0,0 +1,37 @@ +setuptools +pydantic +wheel +google-auth-oauthlib +pedalboard +websockets>=10.0 +gTTS==2.3.2 +wget +psutil +scikit-learn-intelex +mega.py==1.0.8 +git+https://github.com/wkentaro/gdown.git +edge-tts +git+https://github.com/suno-ai/bark.git +nltk +noisereduce==2.0.1 +unidecode +onnxruntime +onnxruntime_gpu==1.15.1 +opencv_python_headless==4.8.0.74 +pandas==2.0.3 +PySimpleGUI==4.60.5 +requests==2.31.0 +scikit_learn==1.3.0 +yt_dlp==2023.9.24 +sounddevice==0.4.6 +tensorboard==2.13.0 +tb_nightly==2.14.0a20230803 +python-dotenv>=1.0.0 +protobuf==3.20.2 +gin +gin_config +flask_cors +flask +https://github.com/soudabot/fairseq-build-whl/releases/download/3.11/fairseq-0.12.3-cp311-cp311-linux_x86_64.whl; sys_platform == 'linux' +https://github.com/soudabot/fairseq-build-whl/releases/download/3.11/fairseq-0.12.3-cp311-cp311-win_amd64.whl; sys_platform == 'win32' +https://github.com/soudabot/fairseq-build-whl/releases/download/3.11/fairseq-0.12.3-cp311-cp311-macosx_10_9_universal2.whl; sys_platform == 'darwin' diff --git a/assets/requirements/requirements-dml.txt b/assets/requirements/requirements-dml.txt new file mode 100644 index 0000000000000000000000000000000000000000..884c6d41661a3eb1d2a5f20f278d15ee1d88eced --- /dev/null +++ b/assets/requirements/requirements-dml.txt @@ -0,0 +1,46 @@ +joblib>=1.1.0 +numba==0.56.4 +numpy==1.23.5 +scipy +librosa==0.9.1 +llvmlite==0.39.0 +fairseq==0.12.2 +faiss-cpu==1.7.3 +gradio==3.34.0 +Cython +pydub>=0.25.1 +soundfile>=0.12.1 +ffmpeg-python>=0.2.0 +tensorboardX +Jinja2>=3.1.2 +json5 +Markdown +matplotlib>=3.7.0 +matplotlib-inline>=0.1.3 +praat-parselmouth>=0.4.2 +Pillow>=9.1.1 +resampy>=0.4.2 +scikit-learn +tensorboard +tqdm>=4.63.1 +tornado>=6.1 +Werkzeug>=2.2.3 +uc-micro-py>=1.0.1 +sympy>=1.11.1 +tabulate>=0.8.10 +PyYAML>=6.0 +pyasn1>=0.4.8 +pyasn1-modules>=0.2.8 +fsspec>=2022.11.0 +absl-py>=1.2.0 +audioread +uvicorn>=0.21.1 +colorama>=0.4.5 +pyworld==0.3.2 +httpx +onnxruntime-directml +torchcrepe==0.0.20 +fastapi==0.88 +ffmpy==0.3.1 +python-dotenv>=1.0.0 +av \ No newline at end of file diff --git a/assets/requirements/requirements-ipex.txt b/assets/requirements/requirements-ipex.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a96cf0001d90e2b0bf7d5369b73663456c3b592 --- /dev/null +++ b/assets/requirements/requirements-ipex.txt @@ -0,0 +1,54 @@ +torch==2.0.1a0 +intel_extension_for_pytorch==2.0.110+xpu +torchvision==0.15.2a0 +https://github.com/Disty0/Retrieval-based-Voice-Conversion-WebUI/releases/download/torchaudio_wheels_for_ipex/torchaudio-2.0.2+31de77d-cp310-cp310-linux_x86_64.whl +-f https://developer.intel.com/ipex-whl-stable-xpu +joblib>=1.1.0 +numba==0.56.4 +numpy==1.23.5 +scipy +librosa==0.9.1 +llvmlite==0.39.0 +fairseq==0.12.2 +faiss-cpu==1.7.3 +gradio==3.34.0 +Cython +pydub>=0.25.1 +soundfile>=0.12.1 +ffmpeg-python>=0.2.0 +tensorboardX +Jinja2>=3.1.2 +json5 +Markdown +matplotlib>=3.7.0 +matplotlib-inline>=0.1.3 +praat-parselmouth>=0.4.2 +Pillow>=9.1.1 +resampy>=0.4.2 +scikit-learn +tensorboard +tqdm>=4.63.1 +tornado>=6.1 +Werkzeug>=2.2.3 +uc-micro-py>=1.0.1 +sympy>=1.11.1 +tabulate>=0.8.10 +PyYAML>=6.0 +pyasn1>=0.4.8 +pyasn1-modules>=0.2.8 +fsspec>=2022.11.0 +absl-py>=1.2.0 +audioread +uvicorn>=0.21.1 +colorama>=0.4.5 +pyworld==0.3.2 +httpx +onnxruntime; sys_platform == 'darwin' +onnxruntime-gpu; sys_platform != 'darwin' +torchcrepe==0.0.20 +fastapi==0.88 +ffmpy==0.3.1 +python-dotenv>=1.0.0 +av +PySimpleGUI +sounddevice \ No newline at end of file diff --git a/assets/requirements/requirements-realtime-vc.txt b/assets/requirements/requirements-realtime-vc.txt new file mode 100644 index 0000000000000000000000000000000000000000..9d6935bfebd2df6533e8fafe2aa116d8ccf92085 --- /dev/null +++ b/assets/requirements/requirements-realtime-vc.txt @@ -0,0 +1,29 @@ +#1.Install torch from pytorch.org: +#torch 2.0 with cuda 11.8 +#pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +#torch 1.11.0 with cuda 11.3 +#pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 +einops +fairseq +flask +flask_cors +gin +gin_config +librosa +local_attention +matplotlib +praat-parselmouth +pyworld +PyYAML +resampy +scikit_learn +scipy +SoundFile +tensorboard +tqdm +wave +PySimpleGUI +sounddevice +gradio +noisereduce +torchcrepe==0.0.20 diff --git a/assets/requirements/requirements-win-for-realtime_vc_gui-dml.txt b/assets/requirements/requirements-win-for-realtime_vc_gui-dml.txt new file mode 100644 index 0000000000000000000000000000000000000000..651498960713e42c8a843c2507f0413141794807 --- /dev/null +++ b/assets/requirements/requirements-win-for-realtime_vc_gui-dml.txt @@ -0,0 +1,29 @@ +#1.Install torch from pytorch.org: +#torch 2.0 with cuda 11.8 +#pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +#torch 1.11.0 with cuda 11.3 +#pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 +einops +fairseq +flask +flask_cors +gin +gin_config +librosa +local_attention +matplotlib +praat-parselmouth +pyworld +PyYAML +resampy +scikit_learn +scipy +SoundFile +tensorboard +tqdm +wave +PySimpleGUI +sounddevice +gradio +noisereduce +onnxruntime-directml \ No newline at end of file diff --git a/assets/requirements/requirements-win-for-realtime_vc_gui.txt b/assets/requirements/requirements-win-for-realtime_vc_gui.txt new file mode 100644 index 0000000000000000000000000000000000000000..37ca23823fb800612369078bbb8908204442937a --- /dev/null +++ b/assets/requirements/requirements-win-for-realtime_vc_gui.txt @@ -0,0 +1,28 @@ +#1.Install torch from pytorch.org: +#torch 2.0 with cuda 11.8 +#pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +#torch 1.11.0 with cuda 11.3 +#pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 +einops +fairseq +flask +flask_cors +gin +gin_config +librosa +local_attention +matplotlib +praat-parselmouth +pyworld +PyYAML +resampy +scikit_learn +scipy +SoundFile +tensorboard +tqdm +wave +PySimpleGUI +sounddevice +gradio +noisereduce diff --git a/assets/requirements/requirements.txt b/assets/requirements/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1540be2c686379bd6686faad45d5fc3b62eec44 --- /dev/null +++ b/assets/requirements/requirements.txt @@ -0,0 +1,62 @@ +tornado>=6.1 +setuptools +pydantic +fairseq==0.12.2 +wheel +google-auth-oauthlib +pedalboard +pydub==0.25.1 +httpx==0.23.0 +faiss_cpu==1.7.3 +ffmpeg_python==0.2.0 +ffmpy==0.3.1 +websockets>=10.0 +gradio==3.34.0 +librosa==0.9.1 +elevenlabs +gTTS==2.3.2 +wget +psutil +matplotlib==3.7.2 +mega.py==1.0.8 +git+https://github.com/wkentaro/gdown.git +edge-tts +git+https://github.com/suno-ai/bark.git +nltk +noisereduce==2.0.1 +unidecode +numba==0.57.1 +numpy==1.23.5 +onnxruntime +onnxruntime_gpu==1.15.1 +opencv_python==4.8.0.74 +opencv_python_headless==4.8.0.74 +pandas==2.0.3 +praat-parselmouth==0.4.2 +PySimpleGUI==4.60.5 +pyworld==0.3.4 +requests==2.31.0 +resampy==0.4.2 +scikit_learn==1.3.0 +scipy==1.11.1 +yt_dlp==2023.9.24 +sounddevice==0.4.6 +soundfile==0.12.1 +tensorboard==2.13.0 +tb_nightly==2.14.0a20230803 +torch==2.0.0 +torchcrepe==0.0.21 +torch_directml==0.2.0.dev230426 +torchaudio==2.0.1 +torchvision==0.15.1 +torchgen>=0.0.1 +tqdm==4.65.0 +python-dotenv>=1.0.0 +av +fastapi==0.95.2 +protobuf==3.20.2 +tensorboardX==2.6.2.1 +gin +gin_config +flask_cors +flask diff --git a/assets/rmvpe/.gitignore b/assets/rmvpe/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/assets/themes/Acat.py b/assets/themes/Acat.py new file mode 100644 index 0000000000000000000000000000000000000000..32c0dbe3bd221eb97ca329a77fe193d124ce1fdd --- /dev/null +++ b/assets/themes/Acat.py @@ -0,0 +1,313 @@ +from __future__ import annotations + +from typing import Iterable +import gradio as gr + +#gr.themes.builder() +from gradio.themes.base import Base +from gradio.themes.utils import colors, fonts, sizes +import time + +class Acat(Base): + def __init__( + self, + *, + primary_hue: colors.Color | str = colors.green, + secondary_hue: colors.Color | str = colors.emerald, + neutral_hue: colors.Color | str = colors.neutral, + spacing_size: sizes.Size | str = sizes.spacing_md, + radius_size: sizes.Size | str = sizes.radius_md, + text_size: sizes.Size | str = sizes.text_lg, + font: fonts.Font + | str + | Iterable[fonts.Font | str] = ( + 'Inter V', + fonts.GoogleFont('Asap'), + 'ui-sans-serif', + 'sans-serif', + ), + font_mono: fonts.Font + | str + | Iterable[fonts.Font | str] = ( + 'ui-monospace', + fonts.GoogleFont("Fira Code"), + 'Consolas', + 'monospace', + ), + ): + super().__init__( + primary_hue=primary_hue, + secondary_hue=secondary_hue, + neutral_hue=neutral_hue, + spacing_size=spacing_size, + radius_size=radius_size, + text_size=text_size, + font=font, + font_mono=font_mono, + ) + self.name= "Acat", + self.secondary_100='#e2effc', + self.secondary_200='#bedff9', + self.secondary_300='#84c5f5', + self.secondary_400='#4eacef', + self.secondary_50='#f1f8fe', + self.secondary_500='#198cde', + self.secondary_600='#0c6ebd', + self.secondary_700='#0b5899', + self.secondary_800='#0e4b7e', + self.secondary_900='#113f69', + self.secondary_950='#0b2846', + self.neutral_100='#e2effc', + self.neutral_200='#bedff9', + self.neutral_300='#84c5f5', + self.neutral_400='#4eacef', + self.neutral_50='#f1f8fe', + self.neutral_500='#198cde', + self.neutral_600='#0c6ebd', + self.neutral_700='#0b5899', + self.neutral_800='#0e4b7e', + self.neutral_900='#113f69', + self.neutral_950='#0b2846', + self.primary_100='#e2effc', + self.primary_200='#bedff9', + self.primary_300='#84c5f5', + self.primary_400='#4eacef', + self.primary_50='#f1f8fe', + self.primary_500='#198cde', + self.primary_600='#0c6ebd', + self.primary_700='#0b5899', + self.primary_800='#0e4b7e', + self.primary_900='#113f69', + self.primary_950='#0b2846', + super().set( + # Blaise + background_fill_primary='#FFFFFF', + background_fill_primary_dark='#000000', + background_fill_secondary='#dce3e8', + background_fill_secondary_dark='#242424', + block_background_fill='#ECF2F7', + block_background_fill_dark='#191919', + block_border_color='#dce3e8', + block_border_color_dark='#242424', + block_border_width='1px', + block_info_text_color='#191919', + block_info_text_color_dark='#ECF2F7', + block_info_text_size='*text_sm', + block_info_text_weight='400', + block_label_background_fill='#ECF2F700', + block_label_background_fill_dark='#19191900', + block_label_border_color='#dce3e8', + block_label_border_color_dark='#242424', + block_label_border_width='1px', + block_label_margin='0', + block_label_padding='*spacing_sm *spacing_lg', + block_label_radius= "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", + block_label_right_radius= "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", + block_label_shadow='*block_shadow', + block_label_text_color='#4EACEF', + block_label_text_color_dark='#4EACEF', + block_label_text_size='*text_sm', + block_label_text_weight='400', + block_padding='*spacing_xl calc(*spacing_xl + 2px)', + block_radius='*radius_lg', + block_shadow='#FFFFFF00', + block_shadow_dark='#00000000', + block_title_background_fill='#ECF2F700', + block_title_background_fill_dark='#19191900', + block_title_border_color='#dce3e8', + block_title_border_color_dark='#242424', + block_title_border_width='0px', + block_title_padding='0', + block_title_radius='none', + block_title_text_color='#4EACEF', + block_title_text_color_dark='#4EACEF', + block_title_text_size='*text_md', + block_title_text_weight='bold', + body_background_fill="url('https://cdn.discordapp.com/attachments/1151036558389026838/1158634938128670730/image-removebg-preview_1.png') #FFFFFF no-repeat right bottom/auto 30svh padding-box fixed", + body_background_fill_dark="url('https://cdn.discordapp.com/attachments/1151036558389026838/1158634938128670730/image-removebg-preview_1.png') #000000 no-repeat right bottom/auto 30svh padding-box fixed", + body_text_color='#191919', + body_text_color_dark='#ECF2F7', + body_text_color_subdued='#636668', + body_text_color_subdued_dark='#c4c4c4', + body_text_size='*text_md', + body_text_weight='400', + border_color_accent='#dce3e8', + border_color_accent_dark='#242424', + border_color_primary='#dce3e8', + border_color_primary_dark='#242424', + button_border_width='*input_border_width', + button_border_width_dark='*input_border_width', + button_cancel_background_fill='#dce3e8', + button_cancel_background_fill_dark='#242424', + button_cancel_background_fill_hover='#d0d7db', + button_cancel_background_fill_hover_dark='#202020', + button_cancel_border_color='#191919', + button_cancel_border_color_dark='#ECF2F7', + button_cancel_border_color_hover='#202020', + button_cancel_border_color_hover_dark='#a1c3d8', + button_cancel_text_color='#4EACEF', + button_cancel_text_color_dark='#4EACEF', + button_cancel_text_color_hover='#0c6ebd', + button_cancel_text_color_hover_dark='#0c6ebd', + button_large_padding='*spacing_lg calc(2 * *spacing_lg)', + button_large_radius='*radius_lg', + button_large_text_size='*text_lg', + button_large_text_weight='600', + button_primary_background_fill='#4EACEF', + button_primary_background_fill_dark='#4EACEF', + button_primary_background_fill_hover='#0c6ebd', + button_primary_background_fill_hover_dark='#0c6ebd', + button_primary_border_color='#191919', + button_primary_border_color_dark='#ECF2F7', + button_primary_border_color_hover='#202020', + button_primary_border_color_hover_dark='#a1c3d8', + button_primary_text_color='#ECF2F7', + button_primary_text_color_dark='#191919', + button_primary_text_color_hover='#e1eaf0', + button_primary_text_color_hover_dark='#141414', + button_secondary_background_fill='#dce3e8', + button_secondary_background_fill_dark='#242424', + button_secondary_background_fill_hover='#d0d7db', + button_secondary_background_fill_hover_dark='#202020', + button_secondary_border_color='#dce3e8', + button_secondary_border_color_dark='#242424', + button_secondary_border_color_hover='#d0d7db', + button_secondary_border_color_hover_dark='#202020', + button_secondary_text_color='#4EACEF', + button_secondary_text_color_dark='#4EACEF', + button_secondary_text_color_hover='#0c6ebd', + button_secondary_text_color_hover_dark='#0c6ebd', + button_shadow='none', + button_shadow_active='none', + button_shadow_hover='none', + button_small_padding='*spacing_sm calc(2 * *spacing_sm)', + button_small_radius='*radius_lg', + button_small_text_size='*text_md', + button_small_text_weight='400', + button_transition='background-color 0.2s ease', + chatbot_code_background_color='#FFFFFF', + chatbot_code_background_color_dark='#000000', + checkbox_background_color='#dce3e8', + checkbox_background_color_dark='#242424', + checkbox_background_color_focus='#dce3e8', + checkbox_background_color_focus_dark='#242424', + checkbox_background_color_hover='#dce3e8', + checkbox_background_color_hover_dark='#242424', + checkbox_background_color_selected='#4EACEF', + checkbox_background_color_selected_dark='#4EACEF', + checkbox_border_color='#dce3e8', + checkbox_border_color_dark='#242424', + checkbox_border_color_focus='#4EACEF', + checkbox_border_color_focus_dark='#4EACEF', + checkbox_border_color_hover='#4EACEF', + checkbox_border_color_hover_dark='#4EACEF', + checkbox_border_color_selected='#4EACEF', + checkbox_border_color_selected_dark='#4EACEF', + checkbox_border_radius='*radius_sm', + checkbox_border_width='1px', + checkbox_border_width_dark='1px', + checkbox_check= "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")", + checkbox_label_background_fill='#ECF2F7', + checkbox_label_background_fill_dark='#191919', + checkbox_label_background_fill_hover='#dce3e8', + checkbox_label_background_fill_hover_dark='#242424', + checkbox_label_background_fill_selected='#dce3e8', + checkbox_label_background_fill_selected_dark='#242424', + checkbox_label_border_color='#dce3e8', + checkbox_label_border_color_dark='#242424', + checkbox_label_border_color_hover='#4EACEF', + checkbox_label_border_color_hover_dark='#4EACEF', + checkbox_label_border_width='1px', + checkbox_label_border_width_dark='1px', + checkbox_label_gap='*spacing_lg', + checkbox_label_padding='*spacing_md calc(2 * *spacing_md)', + checkbox_label_shadow='none', + checkbox_label_text_color='#191919', + checkbox_label_text_color_dark='#ECF2F7', + checkbox_label_text_color_selected='#4EACEF', + checkbox_label_text_color_selected_dark='#4EACEF', + checkbox_label_text_size='*text_md', + checkbox_label_text_weight='400', + checkbox_shadow='*input_shadow', + color_accent='*primary_500', + color_accent_soft='#dce3e8', + color_accent_soft_dark='#242424', + container_radius='*radius_lg', + embed_radius='*radius_lg', + error_background_fill='#dce3e8', + error_background_fill_dark='#242424', + error_border_color='#191919', + error_border_color_dark='#ECF2F7', + error_border_width='1px', + error_border_width_dark='1px', + error_text_color='#4EACEF', + error_text_color_dark='#4EACEF', + form_gap_width='0px', + input_background_fill='#dce3e8', + input_background_fill_dark='#242424', + input_background_fill_focus='#dce3e8', + input_background_fill_focus_dark='#242424', + input_background_fill_hover='#d0d7db', + input_background_fill_hover_dark='#202020', + input_border_color='#191919', + input_border_color_dark='#ECF2F7', + input_border_color_focus='#191919', + input_border_color_focus_dark='#ECF2F7', + input_border_color_hover='#202020', + input_border_color_hover_dark='#a1c3d8', + input_border_width='0px', + input_padding='*spacing_xl', + input_placeholder_color='#19191930', + input_placeholder_color_dark='#ECF2F730', + input_radius='*radius_lg', + input_shadow='#19191900', + input_shadow_dark='#ECF2F700', + input_shadow_focus='#19191900', + input_shadow_focus_dark='#ECF2F700', + input_text_size='*text_md', + input_text_weight='400', + layout_gap='*spacing_xxl', + link_text_color='#4EACEF', + link_text_color_active='#4EACEF', + link_text_color_active_dark='#4EACEF', + link_text_color_dark='#4EACEF', + link_text_color_hover='#0c6ebd', + link_text_color_hover_dark='#0c6ebd', + link_text_color_visited='#4EACEF', + link_text_color_visited_dark='#4EACEF', + loader_color='#4EACEF', + loader_color_dark='#4EACEF', + + panel_background_fill='#ECF2F7', + panel_background_fill_dark='#191919', + panel_border_color='#4EACEF', + panel_border_color_dark='#4EACEF', + panel_border_width='0', + + prose_header_text_weight='600', + prose_text_size='*text_md', + prose_text_weight='400', + radio_circle= "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")", + section_header_text_size='*text_md', + section_header_text_weight='400', + shadow_drop='rgba(0,0,0,0.05) 0px 1px 2px 0px', + shadow_drop_lg='0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)', + shadow_inset='rgba(0,0,0,0.05) 0px 2px 4px 0px inset', + shadow_spread='#FFFFFF', + shadow_spread_dark='#000000', + slider_color='#4EACEF', + slider_color_dark='#4EACEF', + stat_background_fill='#4EACEF', + stat_background_fill_dark='#4EACEF', + table_border_color='#191919', + table_border_color_dark='#ECF2F7', + table_even_background_fill='#ECF2F7', + table_even_background_fill_dark='#191919', + table_odd_background_fill='#dce3e8', + table_odd_background_fill_dark='#242424', + table_radius='*radius_lg', + table_row_focus='#191919', + table_row_focus_dark='#ECF2F7', + + ) + diff --git a/assets/themes/Applio.py b/assets/themes/Applio.py new file mode 100644 index 0000000000000000000000000000000000000000..ac444eb3195362ca7f28a8cbf9cdf1d71f8e795c --- /dev/null +++ b/assets/themes/Applio.py @@ -0,0 +1,300 @@ +from __future__ import annotations + +from typing import Iterable +import gradio as gr + +#gr.themes.builder() +from gradio.themes.base import Base +from gradio.themes.utils import colors, fonts, sizes +import time + +class Applio(Base): + def __init__( + self, + *, + primary_hue: colors.Color | str = colors.green, + secondary_hue: colors.Color | str = colors.emerald, + neutral_hue: colors.Color | str = colors.neutral, + spacing_size: sizes.Size | str = sizes.spacing_md, + radius_size: sizes.Size | str = sizes.radius_md, + text_size: sizes.Size | str = sizes.text_lg, + font: fonts.Font + | str + | Iterable[fonts.Font | str] = ( + 'Inter V', + fonts.GoogleFont('Inter'), + 'ui-sans-serif', + 'system-ui', + ), + font_mono: fonts.Font + | str + | Iterable[fonts.Font | str] = ( + 'ui-monospace', + fonts.GoogleFont("Roboto Mono"), + 'Consolas', + 'monospace', + ), + ): + super().__init__( + primary_hue=primary_hue, + secondary_hue=secondary_hue, + neutral_hue=neutral_hue, + spacing_size=spacing_size, + radius_size=radius_size, + text_size=text_size, + font=font, + font_mono=font_mono, + ) + self.name= "Applio", + self.secondary_100= "#dbeafe", + self.secondary_200= "#bfdbfe", + self.secondary_300= "#93c5fd", + self.secondary_400= "#60a5fa", + self.secondary_50= "#eff6ff", + self.secondary_500= "#3b82f6", + self.secondary_600= "#2563eb", + self.secondary_700= "#1d4ed8", + self.secondary_800= "#1e40af", + self.secondary_900= "#1e3a8a", + self.secondary_950= "#1d3660", + + super().set( + # Blaise + background_fill_primary= "black", + background_fill_primary_dark="black", + background_fill_secondary= "black", + background_fill_secondary_dark="black", + block_background_fill= "*neutral_800", + block_background_fill_dark="*neutral_800", + block_border_color= "*border_color_primary", + block_border_color_dark= "*border_color_primary", + block_border_width= "1px", + block_border_width_dark= "1px", + block_info_text_color= "*body_text_color_subdued", + block_info_text_color_dark= "*body_text_color_subdued", + block_info_text_size= "*text_sm", + block_info_text_weight= "400", + block_label_background_fill= "*background_fill_primary", + block_label_background_fill_dark= "*background_fill_secondary", + block_label_border_color= "*border_color_primary", + block_label_border_color_dark= "*border_color_primary", + block_label_border_width= "1px", + block_label_border_width_dark= "1px", + block_label_margin= "0", + block_label_padding= "*spacing_sm *spacing_lg", + block_label_radius= "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", + block_label_right_radius= "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", + block_label_shadow= "*block_shadow", + block_label_text_color= "*neutral_200", + block_label_text_color_dark= "*neutral_200", + block_label_text_weight= "400", + block_padding= "*spacing_xl", + block_radius= "*radius_md", + block_shadow= "none", + block_shadow_dark= "none", + block_title_background_fill= "rgba(46,85,65,255)", + block_title_background_fill_dark= "rgba(46,85,65,255)", + block_title_border_color= "none", + block_title_border_color_dark= "none", + block_title_border_width= "0px", + block_title_padding= "*block_label_padding", + block_title_radius= "*block_label_radius", + block_title_text_color= "*neutral_200", + block_title_text_color_dark= "*neutral_200", + block_title_text_size= "*text_md", + block_title_text_weight= "600", + body_background_fill= "black", + body_background_fill_dark= "black", + body_text_color= "white", + body_text_color_dark= "white", + body_text_color_subdued= "*neutral_400", + body_text_color_subdued_dark= "*neutral_400", + body_text_size= "*text_md", + body_text_weight= "400", + border_color_accent= "*neutral_600", + border_color_accent_dark= "*neutral_600", + border_color_primary= "*neutral_800", + border_color_primary_dark= "*neutral_800", + button_border_width= "*input_border_width", + button_border_width_dark= "*input_border_width", + button_cancel_background_fill= "*button_secondary_background_fill", + button_cancel_background_fill_dark= "*button_secondary_background_fill", + button_cancel_background_fill_hover= "*button_cancel_background_fill", + button_cancel_background_fill_hover_dark= "*button_cancel_background_fill", + button_cancel_border_color= "*button_secondary_border_color", + button_cancel_border_color_dark= "*button_secondary_border_color", + button_cancel_border_color_hover= "*button_cancel_border_color", + button_cancel_border_color_hover_dark= "*button_cancel_border_color", + button_cancel_text_color= "*button_secondary_text_color", + button_cancel_text_color_dark= "*button_secondary_text_color", + button_cancel_text_color_hover= "*button_cancel_text_color", + button_cancel_text_color_hover_dark= "*button_cancel_text_color", + button_large_padding= "*spacing_lg calc(2 * *spacing_lg)", + button_large_radius= "*radius_lg", + button_large_text_size= "*text_lg", + button_large_text_weight= "600", + button_primary_background_fill= "*primary_600", + button_primary_background_fill_dark= "*primary_600", + button_primary_background_fill_hover= "*primary_500", + button_primary_background_fill_hover_dark= "*primary_500", + button_primary_border_color= "*primary_500", + button_primary_border_color_dark= "*primary_500", + button_primary_border_color_hover= "*primary_400", + button_primary_border_color_hover_dark= "*primary_400", + button_primary_text_color= "white", + button_primary_text_color_dark= "white", + button_primary_text_color_hover= "*button_primary_text_color", + button_primary_text_color_hover_dark= "*button_primary_text_color", + button_secondary_background_fill= "transparent", + button_secondary_background_fill_dark= "transparent", + button_secondary_background_fill_hover= "*neutral_800", + button_secondary_background_fill_hover_dark= "*neutral_800", + button_secondary_border_color= "*neutral_700", + button_secondary_border_color_dark= "*neutral_700", + button_secondary_border_color_hover= "*neutral_600", + button_secondary_border_color_hover_dark= "*neutral_600", + button_secondary_text_color= "white", + button_secondary_text_color_dark= "white", + button_secondary_text_color_hover= "*button_secondary_text_color", + button_secondary_text_color_hover_dark= "*button_secondary_text_color", + button_shadow= "none", + button_shadow_active= "*shadow_inset", + button_shadow_hover= "none", + button_small_padding= "*spacing_sm calc(2 * *spacing_sm)", + button_small_radius= "*radius_lg", + button_small_text_size= "*text_md", + button_small_text_weight= "400", + button_transition= "0.3s ease all", + chatbot_code_background_color= "*neutral_800", + chatbot_code_background_color_dark= "*neutral_800", + checkbox_background_color= "*neutral_700", + checkbox_background_color_dark= "*neutral_700", + checkbox_background_color_focus= "*checkbox_background_color", + checkbox_background_color_focus_dark= "*checkbox_background_color", + checkbox_background_color_hover= "*checkbox_background_color", + checkbox_background_color_hover_dark= "*checkbox_background_color", + checkbox_background_color_selected= "*secondary_600", + checkbox_background_color_selected_dark= "*secondary_600", + checkbox_border_color= "*neutral_700", + checkbox_border_color_dark= "*neutral_700", + checkbox_border_color_focus= "*secondary_500", + checkbox_border_color_focus_dark= "*secondary_500", + checkbox_border_color_hover= "*neutral_600", + checkbox_border_color_hover_dark= "*neutral_600", + checkbox_border_color_selected= "*secondary_600", + checkbox_border_color_selected_dark= "*secondary_600", + checkbox_border_radius= "*radius_sm", + checkbox_border_width= "*input_border_width", + checkbox_border_width_dark= "*input_border_width", + checkbox_check= "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")", + checkbox_label_background_fill= "transparent", + checkbox_label_background_fill_dark= "transparent", + checkbox_label_background_fill_hover= "transparent", + checkbox_label_background_fill_hover_dark= "transparent", + checkbox_label_background_fill_selected= "transparent", + checkbox_label_background_fill_selected_dark= "transparent", + checkbox_label_border_color= "transparent", + checkbox_label_border_color_dark= "transparent", + checkbox_label_border_color_hover= "transparent", + checkbox_label_border_color_hover_dark= "transparent", + checkbox_label_border_width= "transparent", + checkbox_label_border_width_dark= "transparent", + checkbox_label_gap= "*spacing_lg", + checkbox_label_padding= "*spacing_md calc(2 * *spacing_md)", + checkbox_label_shadow= "none", + checkbox_label_text_color= "*body_text_color", + checkbox_label_text_color_dark= "*body_text_color", + checkbox_label_text_color_selected= "*checkbox_label_text_color", + checkbox_label_text_color_selected_dark= "*checkbox_label_text_color", + checkbox_label_text_size= "*text_md", + checkbox_label_text_weight= "400", + checkbox_shadow= "*input_shadow", + color_accent= "*primary_500", + color_accent_soft= "*primary_50", + color_accent_soft_dark= "*neutral_700", + container_radius= "*radius_xl", + embed_radius= "*radius_lg", + error_background_fill= "*background_fill_primary", + error_background_fill_dark= "*background_fill_primary", + error_border_color= "*border_color_primary", + error_border_color_dark= "*border_color_primary", + error_border_width= "1px", + error_border_width_dark= "1px", + error_text_color= "#ef4444", + error_text_color_dark= "#ef4444", + + form_gap_width= "0px", + input_background_fill= "*neutral_900", + input_background_fill_dark= "*neutral_900", + input_background_fill_focus= "*secondary_600", + input_background_fill_focus_dark= "*secondary_600", + input_background_fill_hover= "*input_background_fill", + input_background_fill_hover_dark= "*input_background_fill", + input_border_color= "*neutral_700", + input_border_color_dark= "*neutral_700", + input_border_color_focus= "*secondary_600", + input_border_color_focus_dark= "*primary_600", + input_border_color_hover= "*input_border_color", + input_border_color_hover_dark= "*input_border_color", + input_border_width= "1px", + input_border_width_dark= "1px", + input_padding= "*spacing_xl", + input_placeholder_color= "*neutral_500", + input_placeholder_color_dark= "*neutral_500", + input_radius= "*radius_lg", + input_shadow= "none", + input_shadow_dark= "none", + input_shadow_focus= "*input_shadow", + input_shadow_focus_dark= "*input_shadow", + input_text_size= "*text_md", + input_text_weight= "400", + layout_gap= "*spacing_xxl", + link_text_color= "*secondary_500", + link_text_color_active= "*secondary_500", + link_text_color_active_dark= "*secondary_500", + link_text_color_dark= "*secondary_500", + link_text_color_hover= "*secondary_400", + link_text_color_hover_dark= "*secondary_400", + link_text_color_visited= "*secondary_600", + link_text_color_visited_dark= "*secondary_600", + loader_color= "*color_accent", + loader_color_dark= "*color_accent", + + panel_background_fill= "*background_fill_secondary", + panel_background_fill_dark= "*background_fill_secondary", + panel_border_color= "*border_color_primary", + panel_border_color_dark= "*border_color_primary", + panel_border_width= "1px", + panel_border_width_dark= "1px", + + + prose_header_text_weight= "600", + prose_text_size= "*text_md", + prose_text_weight= "400", + radio_circle= "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")", + + section_header_text_size= "*text_md", + section_header_text_weight= "400", + shadow_drop= "rgba(0,0,0,0.05) 0px 1px 2px 0px", + shadow_drop_lg= "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", + shadow_inset= "rgba(0,0,0,0.05) 0px 2px 4px 0px inset", + shadow_spread= "3px", + shadow_spread_dark= "1px", + slider_color= "*primary_600", + slider_color_dark= "*primary_600", + + stat_background_fill= "*primary_500", + stat_background_fill_dark= "*primary_500", + table_border_color= "*neutral_700", + table_border_color_dark= "*neutral_700", + table_even_background_fill= "*neutral_950", + table_even_background_fill_dark= "*neutral_950", + table_odd_background_fill= "*neutral_900", + table_odd_background_fill_dark= "*neutral_900", + table_radius= "*radius_lg", + table_row_focus= "*color_accent_soft", + table_row_focus_dark= "*color_accent_soft", + + ) + + diff --git a/assets/themes/theme.json b/assets/themes/theme.json new file mode 100644 index 0000000000000000000000000000000000000000..f8c42ff064e327b1364eb90d6f73bc4283a7c1bd --- /dev/null +++ b/assets/themes/theme.json @@ -0,0 +1 @@ +{"file": "Applio.py", "class": "Applio"} \ No newline at end of file diff --git a/assets/unzips/.gitignore b/assets/unzips/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6b7ef32c8478a48c3994dcadc86837f4371184d --- /dev/null +++ b/assets/unzips/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/assets/uvr5_weights/.gitignore b/assets/uvr5_weights/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6b7ef32c8478a48c3994dcadc86837f4371184d --- /dev/null +++ b/assets/uvr5_weights/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/assets/zips/.gitignore b/assets/zips/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6b7ef32c8478a48c3994dcadc86837f4371184d --- /dev/null +++ b/assets/zips/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/datasets/.gitignore b/datasets/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/go-applio-manager.bat b/go-applio-manager.bat new file mode 100644 index 0000000000000000000000000000000000000000..dd0ed3020d690a27a7fc4da3b924c9325850d93b --- /dev/null +++ b/go-applio-manager.bat @@ -0,0 +1,356 @@ +@echo off +title Applio - Manager +cd %~dp0 +::: +::: _ _ +::: /\ | (_) +::: / \ _ __ _ __ | |_ ___ +::: / /\ \ | '_ \| '_ \| | |/ _ \ +::: / ____ \| |_) | |_) | | | (_) | +::: /_/ \_\ .__/| .__/|_|_|\___/ +::: | | | | +::: |_| |_| +::: +::: + +setlocal +set "branch=applio-recode" +set "runtime=runtime-recode" +set "repoUrl=https://github.com/IAHispano/Applio-RVC-Fork/archive/refs/heads/%branch%.zip" +set "fixesFolder=lib/fixes" +set "localFixesPy=local_fixes.py" +set "principal=%cd%" +set "URL_BASE=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main" +set "URL_EXTRA=https://huggingface.co./IAHispano/applio/resolve/main" + +:menu +for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A + +echo [1] Reinstall Applio +echo [2] Update Applio +echo [3] Download NVDIA Runtime +echo [4] Download AMD Runtime +echo [5] Update Applio + Dependencies +echo [6] Fix Tensorboard +echo. + +set /p choice=Select an option: +set choice=%choice: =% + +if "%choice%"=="1" ( + cls + echo. + goto reinstaller + pause + cls + goto menu + +) + +if "%choice%"=="2" ( + cls + echo. + goto updater + pause + cls + goto menu +) + +if "%choice%"=="3" ( + cls + echo. + goto nvdiaRuntime + pause + cls + goto menu + +) + +if "%choice%"=="4" ( + cls + echo. + goto amdRuntime + pause + cls + goto menu + +) + +if "%choice%"=="5" ( + cls + echo. + goto updaterDependencies + pause + cls + goto menu + +) + +if "%choice%"=="6" ( + cls + echo. + pip uninstall tb-nightly tensorboardX tensorboard + pip install tensorboard + cls + echo Tensorboard re-installed correctly! + echo. + pause + cls + goto menu + +) + +cls +echo Invalid option. Please enter a number from 1 to 3. +echo. +echo Press 'Enter' to access the main menu... +pause>nul +cls +goto menu + +:reinstaller + +cls +echo INFO: Please ensure you have installed the required dependencies before continuing. Refer to the installation guide for details. +echo. +echo Step-by-step guide: https://rentry.org/appliolocal +echo Build Tools: https://aka.ms/vs/17/release/vs_BuildTools.exe +echo Redistributable: https://aka.ms/vs/17/release/vc_redist.x64.exe +echo Git: https://github.com/git-for-windows/git/releases/download/v2.42.0.windows.2/Git-2.42.0.2-64-bit.exe +echo Python 3.9.8: https://www.python.org/ftp/python/3.9.8/python-3.9.8-amd64.exe +echo. +echo INFO: Its recommend installing Python 3.9.X and ensuring that it has been added to the system's path. +echo. +pause +cls +for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A +echo. + +echo Reseting the repository... +git reset --hard +git pull +echo. +cls + +echo Installing dependencies... +echo. +echo Recommended for Nvidia GPU users: +echo [1] Download Runtime (pre-installed dependencies) +echo. +echo Recommended for AMD/Intel GPU users (Broken): +echo [2] Download DML Runtime (pre-installed dependencies) +echo. +echo Only recommended for experienced users: +echo [3] Nvidia graphics cards +echo [4] AMD / Intel graphics cards +echo. +echo [5] I have already installed the dependencies +echo. +set /p choice=Select the option according to your GPU: +set choice=%choice: =% + +if "%choice%"=="1" ( +cls +powershell -command "Invoke-WebRequest -Uri https://frippery.org/files/busybox/busybox.exe -OutFile busybox.exe" +busybox.exe wget %URL_EXTRA%/runtime.zip +echo. +echo Extracting the runtime.zip file... +powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('runtime.zip', '%principal%') }" +echo. +del runtime.zip busybox.exe +cls +echo. +goto dependenciesFinished +) + +if "%choice%"=="2" ( +cls +powershell -command "Invoke-WebRequest -Uri https://frippery.org/files/busybox/busybox.exe -OutFile busybox.exe" +busybox.exe wget %URL_EXTRA%/runtime_dml.zip +echo. +echo Extracting the runtime_dml.zip file... +powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('runtime_dml.zip', '%principal%') }" +echo. +del runtime_dml.zip busybox.exe +cd runtime +python.exe -m pip install onnxruntime +cd .. +cls +echo. +goto dependenciesFinished +) + +if "%choice%"=="3" ( +cls +pip install -r assets/requirements/requirements.txt +echo. +pip uninstall torch torchvision torchaudio -y +echo. +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu117 +echo. +echo. +cls +echo Dependencies successfully installed! +echo. +goto dependenciesFinished +) + +if "%choice%"=="4" ( +cls +pip uninstall onnxruntime onnxruntime-directml +echo. +pip install -r assets/requirements/requirements.txt +echo. +pip install -r assets/requirements/requirements-dml.txt +echo. +echo. +cls +echo Dependencies successfully installed! +echo. +goto dependenciesFinished +) + +if "%choice%"=="5" ( +echo Dependencies successfully installed! +echo. +goto dependenciesFinished +) + +:dependenciesFinished +cls +echo Applio has been successfully downloaded, run the file go-applio.bat to run the web interface! +echo. +pause +exit + +echo Applio has been reinstalled! +echo. +echo Press 'Enter' to access the main menu... +pause>nul +cls +goto menu + + +:updater + +echo Updating the repository... +git pull +echo Applio has been updated! +echo. +echo Press 'Enter' to access the main menu... +pause>nul +cls +goto menu + + +:updaterDependencies + +echo Updating the repository... +git pull + +echo Installing dependencies... + +echo [1] Nvidia graphics cards +echo [2] AMD / Intel graphics cards +echo [3] I have already installed the dependencies +echo. + +set /p choice=Select the option according to your GPU: +set choice=%choice: =% + +if "%choice%"=="1" ( +cls +pip uninstall tb-nightly tensorboardX tensorboard +echo. +pip uninstall onnxruntime onnxruntime-directml +echo. +pip install -r assets/requirements/requirements.txt +echo. +pip uninstall torch torchvision torchaudio -y +echo. +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu117 +echo. +echo. +cls +echo Dependencies installed! +echo. +goto dependenciesFinished +) + +if "%choice%"=="2" ( +cls +pip uninstall tb-nightly tensorboardX tensorboard +echo. +pip uninstall onnxruntime onnxruntime-directml +echo. +pip install -r assets/requirements/requirements.txt +echo. +pip install -r assets/requirements/requirements-dml.txt +echo. +echo. +cls +echo Dependencies installed! +echo. +goto dependenciesFinished +) + +if "%choice%"=="3" ( +echo Dependencies installed! +echo. +goto dependenciesFinished +) + +:dependenciesFinished +echo Verifying if the local_fixes.py file exists in the Fixes folder... +if exist "%fixesFolder%\%localFixesPy%" ( + echo Running the file... + if exist "%principal%\runtime" ( + runtime\python.exe "%fixesFolder%\%localFixesPy%" + ) else ( + python.exe "%fixesFolder%\%localFixesPy%" + ) +) else ( + echo The file "%localFixesPy%" was not found in the "Fixes" folder. +) +echo. +echo Applio has been updated! +echo. +echo Press 'Enter' to access the main menu... +pause>nul +cls +goto menu + +:nvdiaRuntime +if exist "%principal%\runtime" ( + rmdir "%principal%\runtime" /s /q +) +cls +curl -LJO "%URL_EXTRA%/runtime.zip" +echo. +echo Extracting the runtime.zip file... +powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('runtime.zip', '%principal%') }" +echo. +del runtime.zip +cls +echo NVDIA Runtime downloaded! +echo. +goto menu + +:amdRuntime +if exist "%principal%\runtime" ( + rmdir "%principal%\runtime" /s /q +) + +cls +curl -LJO "%URL_EXTRA%/runtime_dml.zip" +echo. +echo Extracting the runtime_dml.zip file... +powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('runtime_dml.zip', '%principal%') }" +echo. +del runtime_dml.zip +cls +echo AMD Runtime downloaded! +echo. +goto menu + + diff --git a/go-applio-manager.sh b/go-applio-manager.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c887ef87785683f5e981f5ea74497331621aadc --- /dev/null +++ b/go-applio-manager.sh @@ -0,0 +1,69 @@ +#!/bin/bash +echo -e "\033]0;Applio - Installer\007" +source .venv/bin/activate +clear +menu1() { + while true; do + clear +echo " :::" +echo " ::: _ _ " +echo " ::: /\ | (_) " +echo " ::: / \ _ __ _ __ | |_ ___ " +echo " ::: / /\ \ | '_ \| '_ \| | |/ _ \ " +echo " ::: / ____ \| |_) | |_) | | | (_) | " +echo " ::: /_/ \_\ .__/| .__/|_|_|\___/ " +echo " ::: | | | | " +echo " ::: |_| |_| " +echo " ::: " +echo " ::: " +echo +echo "[1] Uninstall Applio" +echo "[2] Update Applio" +echo "[3] Update Applio + Dependencies" +echo "[4] Fix Tensorboard" +echo +read -p "Select an option: " choice1 + +case $choice1 in + 1) + pip uninstall -r assets/requirements/requirements-dml* -y + pip uninstall -r assets/requirements/requirements-ipex* -y + pip uninstall -r https://raw.githubusercontent.com/WorXeN/Retrieval-based-Voice-Conversion-WebUI/main/requirements-amd.txt -y + pip uninstall -r assets/requirements/requirements-realtime-vc.txt -y + cd .. && rm -rf *Applio* + finish1 + ;; + 2) + git pull + finish1 + ;; + 3) + git pull + ./install_Applio.sh + finish1 + ;; + 4) + python3.9 -m pip uninstall tb-nightly tensorboardX tensorboard + python3.9 -m pip install tensorboard + cls + echo Tensorboard re-installed correctly! + read -p "Press Enter to access the main menu..." + finish1 + ;; + + *) + echo "Invalid option. Please enter a number from 1 to 4." + echo "" + read -p "Press Enter to access the main menu..." + ;; +esac +done +} + +# Finish this thing +finish1() { + clear + echo "Goodbye!" +} +# Loop to the main menu +menu1 diff --git a/go-applio.bat b/go-applio.bat new file mode 100644 index 0000000000000000000000000000000000000000..70cc1bea97c811535eb36665c4a57acfe788dde4 --- /dev/null +++ b/go-applio.bat @@ -0,0 +1,100 @@ +@echo off +setlocal +title Applio - Start +cd %~dp0 + +::: +::: _ _ +::: /\ | (_) +::: / \ _ __ _ __ | |_ ___ +::: / /\ \ | '_ \| '_ \| | |/ _ \ +::: / ____ \| |_) | |_) | | | (_) | +::: /_/ \_\ .__/| .__/|_|_|\___/ +::: | | | | +::: |_| |_| +::: +::: + +for /f "usebackq delims=" %%i in ("%cd%\assets\configs\version.txt") do ( + set "localVersion=%%i" +) +for /f %%i in ('powershell -command "(Invoke-WebRequest -Uri 'https://raw.githubusercontent.com/IAHispano/Applio-RVC-Fork/main/assets/configs/version.txt').Content"') do set "onlineVersion=%%i" + +:menu +for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A +powershell -command "if ('%localVersion%' -lt '%onlineVersion%') { exit 1 } else { exit 0 }" +if %errorlevel% equ 1 ( + echo You are currently using an outdated version %localVersion% + echo. + echo We're excited to announce that version %onlineVersion% is now available for download on https://github.com/IAHispano/Applio-RVC-Fork. + echo Upgrade now to access the latest features and improvements! + echo. + goto continue +) else ( + goto continue +) + +:continue +echo Runtime: Recommended for regular users +echo [1] Start Applio - Runtime ^(Nvidia Support^) +echo [2] Start Applio - Runtime ^(Intel Support. Requires Nvidia runtime^) +echo [3] Start Applio - Runtime ^(AMD Support^) +echo. +echo Dependencies: Only recommended for experienced users +echo [4] Start Applio ^(Nvidia Support^) +echo [5] Start Applio ^(AMD Support^) +echo. +echo [6] Exit +echo. + +set /p choice=Select an option: +set choice=%choice: =% + +if "%choice%"=="6" ( + goto finish +) else if "%choice%"=="5" ( + cls + echo Starting Applio with AMD support... + python infer-web.py --pycmd python --port 7897 --dml --theme dark + pause + cls + goto menu +) else if "%choice%"=="4" ( + cls + echo Starting Applio with Nvidia support... + python infer-web.py --pycmd python --port 7897 --theme dark + pause + cls + goto menu +) else if "%choice%"=="3" ( + cls + echo Starting Applio with runtime for AMD support ^(you must have it installed^)... + runtime\python.exe infer-web.py --pycmd runtime/python.exe --port 7897 --dml --theme dark + pause + cls + goto menu +) else if "%choice%"=="2" ( + runtime\python.exe -m pip install scikit-learn-intelex + cls + echo Starting Applio with runtime for Intel CPU support ^(you must have Nvidia support installed^)... + runtime\python.exe -m sklearnex infer-web.py --pycmd runtime/python.exe --port 7897 --theme dark + pause + cls + goto menu +) else if "%choice%"=="1" ( + cls + echo Starting Applio with runtime for Nvidia support ^(you must have it installed^)... + runtime\python.exe infer-web.py --pycmd runtime/python.exe --port 7897 --theme dark + pause + cls + goto menu +) + +cls +echo Invalid option. Please enter a number from 1 to 5. +echo. +echo Press 'Enter' to access the main menu... +pause>nul +cls +goto menu +:finish diff --git a/go-applio.sh b/go-applio.sh new file mode 100644 index 0000000000000000000000000000000000000000..46c9018ad21b5dd8d689ed8ca75d53c318f6ad98 --- /dev/null +++ b/go-applio.sh @@ -0,0 +1,56 @@ +#!/bin/bash +echo -e "\033]0;Applio - Start\007" +source .venv/bin/activate +menu() { + while true; do + clear +echo " :::" +echo " ::: _ _ " +echo " ::: /\ | (_) " +echo " ::: / \ _ __ _ __ | |_ ___ " +echo " ::: / /\ \ | '_ \| '_ \| | |/ _ \ " +echo " ::: / ____ \| |_) | |_) | | | (_) | " +echo " ::: /_/ \_\ .__/| .__/|_|_|\___/ " +echo " ::: | | | | " +echo " ::: |_| |_| " +echo " ::: " +echo " ::: " + echo "" + echo "[1] Start Applio (Nvidia/AMD Support)" + echo "[2] Start Applio (Intel GPU/CPU)" + echo "" + echo "[3] Exit" + echo "" + + read -p "Select an option: " choice + case $choice in + 1) + clear + python infer-web.py --pycmd python --port 7897 --theme dark + read -p "Press Enter to continue..." + ;; + 2) + clear + python -m sklearnex infer-web.py --pycmd python --port 7897 --theme dark + read -p "Press Enter to continue..." + ;; + 3) + finish + ;; + *) + clear + echo "Invalid option. Please enter a number from 1 to 3." + echo "" + read -n 1 -s -r -p "Press 'Enter' to access the main menu..." + ;; + esac + done +} + +finish() { + clear + echo "Exiting Applio..." + exit 0 +} + +menu diff --git a/go-tensorboard.bat b/go-tensorboard.bat new file mode 100644 index 0000000000000000000000000000000000000000..631402eac66b7f9c39d803e6a280aa50dd3884b9 --- /dev/null +++ b/go-tensorboard.bat @@ -0,0 +1,5 @@ +title Applio - Tensorboard +cd %~dp0 +cls +python lib/fixes/tensor-launch.py +pause diff --git a/infer-web.py b/infer-web.py new file mode 100644 index 0000000000000000000000000000000000000000..f3408b73d14d68f7c1e4c19f95e4c452b924cfc2 --- /dev/null +++ b/infer-web.py @@ -0,0 +1,2982 @@ +import os, sys + +now_dir = os.getcwd() +sys.path.append(now_dir) +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" +import logging +import shutil +import threading +from assets.configs.config import Config +import lib.globals.globals as rvc_globals + +import lib.tools.model_fetcher as model_fetcher +import math as math +import ffmpeg as ffmpeg +import traceback +import warnings +from random import shuffle +from subprocess import Popen +from time import sleep +import json +import pathlib +import fairseq +import socket +import requests +import subprocess + +logging.getLogger("faiss").setLevel(logging.WARNING) +import faiss +import gradio as gr +import numpy as np +import torch as torch +import regex as re +import soundfile as SF + +SFWrite = SF.write +from dotenv import load_dotenv +from sklearn.cluster import MiniBatchKMeans +import datetime + + +from glob import glob1 +import signal +from signal import SIGTERM +from assets.i18n.i18n import I18nAuto +from lib.infer.infer_libs.train.process_ckpt import ( + change_info, + extract_small_model, + merge, + show_info, +) +from lib.infer.modules.uvr5.mdxnet import MDXNetDereverb +from lib.infer.modules.uvr5.preprocess import AudioPre, AudioPreDeEcho +from lib.infer.modules.vc.modules import VC +from lib.infer.modules.vc.utils import * +import lib.globals.globals as rvc_globals +import nltk + +nltk.download("punkt", quiet=True) + +import tabs.resources as resources +import tabs.tts as tts +import tabs.merge as mergeaudios +import tabs.processing as processing + +from lib.infer.infer_libs.csvutil import CSVutil +import time +import csv +from shlex import quote as SQuote + +logger = logging.getLogger(__name__) + +RQuote = lambda val: SQuote(str(val)) + +tmp = os.path.join(now_dir, "temp") + +# directories = ["logs", "datasets", "weights", "audio-others", "audio-outputs"] + +shutil.rmtree(tmp, ignore_errors=True) + +os.makedirs(tmp, exist_ok=True) + +# Start the download server +if True == True: + host = "localhost" + port = 8000 + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(2) # Timeout in seconds + + try: + sock.connect((host, port)) + logger.info("Starting the Flask server") + logger.warn( + f"Something is listening on port {port}; check open connection and restart Applio." + ) + logger.warn("Trying to start it anyway") + sock.close() + requests.post("http://localhost:8000/shutdown") + time.sleep(3) + script_path = os.path.join(now_dir, "lib", "tools", "server.py") + try: + subprocess.Popen(f"python {script_path}", shell=True) + logger.info("Flask server started!") + except Exception as e: + logger.error(f"Failed to start the Flask server") + logger.error(e) + except Exception as e: + logger.info("Starting the Flask server") + sock.close() + script_path = os.path.join(now_dir, "lib", "tools", "server.py") + try: + subprocess.Popen(f"python {script_path}", shell=True) + logger.info("Flask server started!") + except Exception as e: + logger.error("Failed to start the Flask server") + logger.error(e) + + +# for folder in directories: +# os.makedirs(os.path.join(now_dir, folder), exist_ok=True) +def remove_invalid_chars(text): + pattern = re.compile(r"[^\x00-\x7F]+") + return pattern.sub("", text) + + +def remove_text_between_parentheses(lines, start_line, end_line): + pattern = r"\[([^\]]*)\]\([^)]*\)" + processed_lines = [] + for line_number, line in enumerate(lines, start=1): + if start_line <= line_number <= end_line: + modified_line = re.sub(pattern, r"\1", line) + processed_lines.append(modified_line) + else: + processed_lines.append(line) + + return "\n".join(processed_lines) + + +with open("README.md", "r", encoding="utf8") as f: + inforeadme = f.read() + +inforeadme = remove_text_between_parentheses(inforeadme.split("\n"), 6, 17) +inforeadme = remove_invalid_chars(inforeadme) +inforeadme = remove_text_between_parentheses(inforeadme.split("\n"), 191, 207) + +os.makedirs(tmp, exist_ok=True) +os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) +os.makedirs(os.path.join(now_dir, "logs/weights"), exist_ok=True) +os.environ["temp"] = tmp +warnings.filterwarnings("ignore") +torch.manual_seed(114514) +logging.getLogger("numba").setLevel(logging.WARNING) + +if not os.path.isdir("lib/csvdb/"): + os.makedirs("lib/csvdb") + frmnt, stp = open("lib/csvdb/formanting.csv", "w"), open("lib/csvdb/stop.csv", "w") + frmnt.close() + stp.close() + +global DoFormant, Quefrency, Timbre + +try: + DoFormant, Quefrency, Timbre = CSVutil( + "lib/csvdb/formanting.csv", "r", "formanting" + ) + DoFormant = ( + lambda DoFormant: True + if DoFormant.lower() == "true" + else (False if DoFormant.lower() == "false" else DoFormant) + )(DoFormant) +except (ValueError, TypeError, IndexError): + DoFormant, Quefrency, Timbre = False, 1.0, 1.0 + CSVutil( + "lib/csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre + ) + +load_dotenv() +config = Config() +vc = VC(config) + +if config.dml == True: + + def forward_dml(ctx, x, scale): + ctx.scale = scale + res = x.clone().detach() + return res + + fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml + +i18n = I18nAuto(os.getenv('LANGUAGE')) +i18n.print() +# 判断是否有能用来训练和加速推理的N卡 +ngpu = torch.cuda.device_count() +gpu_infos = [] +mem = [] +if_gpu_ok = False + +isinterrupted = 0 + + +if torch.cuda.is_available() or ngpu != 0: + for i in range(ngpu): + gpu_name = torch.cuda.get_device_name(i) + gpu_infos.append("%s\t%s" % (i, gpu_name)) + mem.append( + int( + torch.cuda.get_device_properties(i).total_memory / 1024 / 1024 / 1024 + + 0.4 + ) + ) +if len(gpu_infos) > 0: + gpu_info = "\n".join(gpu_infos) + default_batch_size = min(mem) // 2 +else: + gpu_info = ( + "Unfortunately, there is no compatible GPU available to support your training." + ) + default_batch_size = 1 +gpus = "-".join([i[0] for i in gpu_infos]) + + +class ToolButton(gr.Button, gr.components.FormComponent): + """Small button with single emoji as text, fits inside gradio forms""" + + def __init__(self, **kwargs): + super().__init__(variant="tool", **kwargs) + + def get_block_name(self): + return "button" + + +import lib.infer.infer_libs.uvr5_pack.mdx as mdx +from lib.infer.modules.uvr5.mdxprocess import ( + get_model_list, + get_demucs_model_list, + id_to_ptm, + prepare_mdx, + run_mdx, +) + +hubert_model = None +weight_root = os.getenv("weight_root") +weight_uvr5_root = os.getenv("weight_uvr5_root") +index_root = os.getenv("index_root") +datasets_root = "datasets" +fshift_root = "lib/infer/infer_libs/formantshiftcfg" +audio_root = "assets/audios" +audio_others_root = "assets/audios/audio-others" +sup_audioext = { + "wav", + "mp3", + "flac", + "ogg", + "opus", + "m4a", + "mp4", + "aac", + "alac", + "wma", + "aiff", + "webm", + "ac3", +} + +names = [ + os.path.join(root, file) + for root, _, files in os.walk(weight_root) + for file in files + if file.endswith((".pth", ".onnx")) +] + +indexes_list = [ + os.path.join(root, name) + for root, _, files in os.walk(index_root, topdown=False) + for name in files + if name.endswith(".index") and "trained" not in name +] + +audio_paths = [ + os.path.join(root, name) + for root, _, files in os.walk(audio_root, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == audio_root +] + +audio_others_paths = [ + os.path.join(root, name) + for root, _, files in os.walk(audio_others_root, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == audio_others_root +] + +uvr5_names = [ + name.replace(".pth", "") + for name in os.listdir(weight_uvr5_root) + if name.endswith(".pth") or "onnx" in name +] + + +check_for_name = lambda: sorted(names)[0] if names else "" + +datasets = [] +for foldername in os.listdir(os.path.join(now_dir, datasets_root)): + if "." not in foldername: + datasets.append(os.path.join(now_dir, "datasets", foldername)) + + +def get_dataset(): + if len(datasets) > 0: + return sorted(datasets)[0] + else: + return "" + + +def update_model_choices(select_value): + model_ids = get_model_list() + model_ids_list = list(model_ids) + demucs_model_ids = get_demucs_model_list() + demucs_model_ids_list = list(demucs_model_ids) + if select_value == "VR": + return {"choices": uvr5_names, "__type__": "update"} + elif select_value == "MDX": + return {"choices": model_ids_list, "__type__": "update"} + elif select_value == "Demucs (Beta)": + return {"choices": demucs_model_ids_list, "__type__": "update"} + + +def update_dataset_list(name): + new_datasets = [] + for foldername in os.listdir(os.path.join(now_dir, datasets_root)): + if "." not in foldername: + new_datasets.append( + os.path.join( + now_dir, + "datasets", + foldername, + ) + ) + return gr.Dropdown.update(choices=new_datasets) + + +def get_indexes(): + indexes_list = [ + os.path.join(dirpath, filename) + for dirpath, _, filenames in os.walk(index_root) + for filename in filenames + if filename.endswith(".index") and "trained" not in filename + ] + + return indexes_list if indexes_list else "" + + +def get_fshift_presets(): + fshift_presets_list = [ + os.path.join(dirpath, filename) + for dirpath, _, filenames in os.walk(fshift_root) + for filename in filenames + if filename.endswith(".txt") + ] + + return fshift_presets_list if fshift_presets_list else "" + + +def uvr( + model_name, + inp_root, + save_root_vocal, + paths, + save_root_ins, + agg, + format0, + architecture, +): + infos = [] + if architecture == "VR": + try: + infos.append( + i18n("Starting audio conversion... (This might take a moment)") + ) + inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + save_root_vocal = ( + save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) + save_root_ins = ( + save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) + + if model_name == "onnx_dereverb_By_FoxJoy": + pre_fun = MDXNetDereverb(15, config.device) + else: + func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho + pre_fun = func( + agg=int(agg), + model_path=os.path.join( + os.getenv("weight_uvr5_root"), model_name + ".pth" + ), + device=config.device, + is_half=config.is_half, + ) + if inp_root != "": + paths = [ + os.path.join(inp_root, name) + for root, _, files in os.walk(inp_root, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == inp_root + ] + else: + paths = [path.name for path in paths] + for path in paths: + inp_path = os.path.join(inp_root, path) + need_reformat = 1 + done = 0 + try: + info = ffmpeg.probe(inp_path, cmd="ffprobe") + if ( + info["streams"][0]["channels"] == 2 + and info["streams"][0]["sample_rate"] == "44100" + ): + need_reformat = 0 + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0 + ) + done = 1 + except: + need_reformat = 1 + traceback.print_exc() + if need_reformat == 1: + tmp_path = "%s/%s.reformatted.wav" % ( + os.path.join(os.environ["tmp"]), + os.path.basename(inp_path), + ) + os.system( + "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" + % (inp_path, tmp_path) + ) + inp_path = tmp_path + try: + if done == 0: + pre_fun.path_audio( + inp_path, save_root_ins, save_root_vocal, format0 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + try: + if done == 0: + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + infos.append( + "%s->%s" + % (os.path.basename(inp_path), traceback.format_exc()) + ) + yield "\n".join(infos) + except: + infos.append(traceback.format_exc()) + yield "\n".join(infos) + finally: + try: + if model_name == "onnx_dereverb_By_FoxJoy": + del pre_fun.pred.model + del pre_fun.pred.model_ + else: + del pre_fun.model + del pre_fun + except: + traceback.print_exc() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + logger.info("Executed torch.cuda.empty_cache()") + yield "\n".join(infos) + elif architecture == "MDX": + try: + infos.append( + i18n("Starting audio conversion... (This might take a moment)") + ) + yield "\n".join(infos) + inp_root, save_root_vocal, save_root_ins = [ + x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + for x in [inp_root, save_root_vocal, save_root_ins] + ] + + if inp_root != "": + paths = [ + os.path.join(inp_root, name) + for root, _, files in os.walk(inp_root, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == inp_root + ] + else: + paths = [path.name for path in paths] + print(paths) + invert = True + denoise = True + use_custom_parameter = True + dim_f = 3072 + dim_t = 256 + n_fft = 7680 + use_custom_compensation = True + compensation = 1.025 + suffix = "Vocals_custom" # @param ["Vocals", "Drums", "Bass", "Other"]{allow-input: true} + suffix_invert = "Instrumental_custom" # @param ["Instrumental", "Drumless", "Bassless", "Instruments"]{allow-input: true} + print_settings = True # @param{type:"boolean"} + onnx = id_to_ptm(model_name) + compensation = ( + compensation + if use_custom_compensation or use_custom_parameter + else None + ) + mdx_model = prepare_mdx( + onnx, + use_custom_parameter, + dim_f, + dim_t, + n_fft, + compensation=compensation, + ) + + for path in paths: + # inp_path = os.path.join(inp_root, path) + suffix_naming = suffix if use_custom_parameter else None + diff_suffix_naming = suffix_invert if use_custom_parameter else None + run_mdx( + onnx, + mdx_model, + path, + format0, + diff=invert, + suffix=suffix_naming, + diff_suffix=diff_suffix_naming, + denoise=denoise, + ) + + if print_settings: + print() + print("[MDX-Net_Colab settings used]") + print(f"Model used: {onnx}") + print(f"Model MD5: {mdx.MDX.get_hash(onnx)}") + print(f"Model parameters:") + print(f" -dim_f: {mdx_model.dim_f}") + print(f" -dim_t: {mdx_model.dim_t}") + print(f" -n_fft: {mdx_model.n_fft}") + print(f" -compensation: {mdx_model.compensation}") + print() + print("[Input file]") + print("filename(s): ") + for filename in paths: + print(f" -{filename}") + infos.append(f"{os.path.basename(filename)}->Success") + yield "\n".join(infos) + except: + infos.append(traceback.format_exc()) + yield "\n".join(infos) + finally: + try: + del mdx_model + except: + traceback.print_exc() + + print("clean_empty_cache") + if torch.cuda.is_available(): + torch.cuda.empty_cache() + elif architecture == "Demucs (Beta)": + try: + infos.append( + i18n("Starting audio conversion... (This might take a moment)") + ) + yield "\n".join(infos) + inp_root, save_root_vocal, save_root_ins = [ + x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + for x in [inp_root, save_root_vocal, save_root_ins] + ] + + if inp_root != "": + paths = [ + os.path.join(inp_root, name) + for root, _, files in os.walk(inp_root, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == inp_root + ] + else: + paths = [path.name for path in paths] + + # Loop through the audio files and separate sources + for path in paths: + input_audio_path = os.path.join(inp_root, path) + filename_without_extension = os.path.splitext( + os.path.basename(input_audio_path) + )[0] + _output_dir = os.path.join(tmp, model_name, filename_without_extension) + vocals = os.path.join(_output_dir, "vocals.wav") + no_vocals = os.path.join(_output_dir, "no_vocals.wav") + + os.makedirs(tmp, exist_ok=True) + + if torch.cuda.is_available(): + cpu_insted = "" + else: + cpu_insted = "-d cpu" + print(cpu_insted) + + # Use with os.system to separate audio sources becuase at invoking from the command line it is faster than invoking from python + os.system( + f"python -m .separate --two-stems=vocals -n {model_name} {cpu_insted} {input_audio_path} -o {tmp}" + ) + + # Move vocals and no_vocals to the output directory assets/audios for the vocal and assets/audios/audio-others for the instrumental + shutil.move(vocals, save_root_vocal) + shutil.move(no_vocals, save_root_ins) + + # And now rename the vocals and no vocals with the name of the input audio file and the suffix vocals or instrumental + os.rename( + os.path.join(save_root_vocal, "vocals.wav"), + os.path.join( + save_root_vocal, f"{filename_without_extension}_vocals.wav" + ), + ) + os.rename( + os.path.join(save_root_ins, "no_vocals.wav"), + os.path.join( + save_root_ins, f"{filename_without_extension}_instrumental.wav" + ), + ) + + # Remove the temporary directory + os.rmdir(tmp, model_name) + + infos.append(f"{os.path.basename(input_audio_path)}->Success") + yield "\n".join(infos) + + except: + infos.append(traceback.format_exc()) + yield "\n".join(infos) + + +def change_choices(): + names = [ + os.path.join(root, file) + for root, _, files in os.walk(weight_root) + for file in files + if file.endswith((".pth", ".onnx")) + ] + indexes_list = [ + os.path.join(root, name) + for root, _, files in os.walk(index_root, topdown=False) + for name in files + if name.endswith(".index") and "trained" not in name + ] + audio_paths = [ + os.path.join(root, name) + for root, _, files in os.walk(audio_root, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == audio_root + ] + + return ( + {"choices": sorted(names), "__type__": "update"}, + {"choices": sorted(indexes_list), "__type__": "update"}, + {"choices": sorted(audio_paths), "__type__": "update"}, + ) + + +def change_choices2(): + names = [ + os.path.join(root, file) + for root, _, files in os.walk(weight_root) + for file in files + if file.endswith((".pth", ".onnx")) + ] + indexes_list = [ + os.path.join(root, name) + for root, _, files in os.walk(index_root, topdown=False) + for name in files + if name.endswith(".index") and "trained" not in name + ] + + return ( + {"choices": sorted(names), "__type__": "update"}, + {"choices": sorted(indexes_list), "__type__": "update"}, + ) + + +def clean(): + return {"value": "", "__type__": "update"} + + +def export_onnx(): + from lib.infer.modules.onnx.export import export_onnx as eo + + eo() + + +sr_dict = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + + +def if_done(done, p): + while 1: + if p.poll() is None: + sleep(0.5) + else: + break + done[0] = True + + +def if_done_multi(done, ps): + while 1: + # poll==None代表进程未结束 + # 只要有一个进程未结束都不停 + flag = 1 + for p in ps: + if p.poll() is None: + flag = 0 + sleep(0.5) + break + if flag == 1: + break + done[0] = True + + +def formant_enabled( + cbox, qfrency, tmbre, frmntapply, formantpreset, formant_refresh_button +): + if cbox: + DoFormant = True + CSVutil( + "lib/csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre + ) + + # print(f"is checked? - {cbox}\ngot {DoFormant}") + + return ( + {"value": True, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + ) + + else: + DoFormant = False + CSVutil( + "lib/csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre + ) + + # print(f"is checked? - {cbox}\ngot {DoFormant}") + return ( + {"value": False, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + ) + + +def formant_apply(qfrency, tmbre): + Quefrency = qfrency + Timbre = tmbre + DoFormant = True + CSVutil("lib/csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) + + return ( + {"value": Quefrency, "__type__": "update"}, + {"value": Timbre, "__type__": "update"}, + ) + + +def update_fshift_presets(preset, qfrency, tmbre): + if preset: + with open(preset, "r") as p: + content = p.readlines() + qfrency, tmbre = content[0].strip(), content[1] + + formant_apply(qfrency, tmbre) + else: + qfrency, tmbre = preset_apply(preset, qfrency, tmbre) + + return ( + {"choices": get_fshift_presets(), "__type__": "update"}, + {"value": qfrency, "__type__": "update"}, + {"value": tmbre, "__type__": "update"}, + ) + + +def preprocess_dataset(trainset_dir, exp_dir, sr, n_p, dataset_path): + if not dataset_path.strip() == "": + trainset_dir = dataset_path + sr = sr_dict[sr] + os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) + f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w") + f.close() + per = 3.0 if config.is_half else 3.7 + cmd = ( + '"%s" lib/infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' + % ( + config.python_cmd, + trainset_dir, + sr, + n_p, + now_dir, + exp_dir, + config.noparallel, + per, + ) + ) + logger.info(cmd) + p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir + ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + done = [False] + threading.Thread( + target=if_done, + args=( + done, + p, + ), + ).start() + while 1: + with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: + yield (f.read()) + sleep(1) + if done[0]: + break + with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: + log = f.read() + logger.info(log) + yield log + + +def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl): + gpus_rmvpe = gpus + gpus = gpus.split("-") + os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) + f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w") + f.close() + if if_f0: + if f0method != "rmvpe_gpu": + cmd = ( + '"%s" lib/infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s %s' + % (config.python_cmd, now_dir, exp_dir, n_p, f0method, RQuote(echl)) + ) + logger.info(cmd) + p = Popen( + cmd, shell=True, cwd=now_dir + ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE + ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + done = [False] + threading.Thread( + target=if_done, + args=( + done, + p, + ), + ).start() + else: + if gpus_rmvpe != "-": + gpus_rmvpe = gpus_rmvpe.split("-") + leng = len(gpus_rmvpe) + ps = [] + for idx, n_g in enumerate(gpus_rmvpe): + cmd = ( + '"%s" lib/infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s ' + % ( + config.python_cmd, + leng, + idx, + n_g, + now_dir, + exp_dir, + config.is_half, + ) + ) + logger.info(cmd) + p = Popen( + cmd, shell=True, cwd=now_dir + ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir + ps.append(p) + ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + done = [False] + threading.Thread( + target=if_done_multi, # + args=( + done, + ps, + ), + ).start() + else: + cmd = ( + config.python_cmd + + ' lib/infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" ' + % ( + now_dir, + exp_dir, + ) + ) + logger.info(cmd) + p = Popen( + cmd, shell=True, cwd=now_dir + ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir + p.wait() + done = [True] + while 1: + with open( + "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r" + ) as f: + yield (f.read()) + sleep(1) + if done[0]: + break + with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: + log = f.read() + logger.info(log) + yield log + ####对不同part分别开多进程 + """ + n_part=int(sys.argv[1]) + i_part=int(sys.argv[2]) + i_gpu=sys.argv[3] + exp_dir=sys.argv[4] + os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu) + """ + leng = len(gpus) + ps = [] + for idx, n_g in enumerate(gpus): + cmd = ( + '"%s" lib/infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s %s' + % ( + config.python_cmd, + config.device, + leng, + idx, + n_g, + now_dir, + exp_dir, + version19, + config.is_half, + ) + ) + logger.info(cmd) + p = Popen( + cmd, shell=True, cwd=now_dir + ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir + ps.append(p) + ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + done = [False] + threading.Thread( + target=if_done_multi, + args=( + done, + ps, + ), + ).start() + while 1: + with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: + yield (f.read()) + sleep(1) + if done[0]: + break + with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: + log = f.read() + logger.info(log) + yield log + + +def get_pretrained_models(path_str, f0_str, sr2): + if_pretrained_generator_exist = os.access( + "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK + ) + if_pretrained_discriminator_exist = os.access( + "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK + ) + if not if_pretrained_generator_exist: + logger.warn( + "assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model", + path_str, + f0_str, + sr2, + ) + if not if_pretrained_discriminator_exist: + logger.warn( + "assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model", + path_str, + f0_str, + sr2, + ) + return ( + "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2) + if if_pretrained_generator_exist + else "", + "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2) + if if_pretrained_discriminator_exist + else "", + ) + + +def change_sr2(sr2, if_f0_3, version19): + path_str = "" if version19 == "v1" else "_v2" + f0_str = "f0" if if_f0_3 else "" + return get_pretrained_models(path_str, f0_str, sr2) + + +def change_version19(sr2, if_f0_3, version19): + path_str = "" if version19 == "v1" else "_v2" + if sr2 == "32k" and version19 == "v1": + sr2 = "40k" + to_return_sr2 = ( + {"choices": ["40k", "48k"], "__type__": "update", "value": sr2} + if version19 == "v1" + else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2} + ) + f0_str = "f0" if if_f0_3 else "" + return ( + *get_pretrained_models(path_str, f0_str, sr2), + to_return_sr2, + ) + + +def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 + path_str = "" if version19 == "v1" else "_v2" + return ( + {"visible": if_f0_3, "__type__": "update"}, + *get_pretrained_models(path_str, "f0", sr2), + ) + + +global log_interval + + +def set_log_interval(exp_dir, batch_size12): + log_interval = 1 + folder_path = os.path.join(exp_dir, "1_16k_wavs") + + if os.path.isdir(folder_path): + wav_files_num = len(glob1(folder_path, "*.wav")) + + if wav_files_num > 0: + log_interval = math.ceil(wav_files_num / batch_size12) + if log_interval > 1: + log_interval += 1 + + return log_interval + + +global PID, PROCESS + + +def click_train( + exp_dir1, + sr2, + if_f0_3, + spk_id5, + save_epoch10, + total_epoch11, + batch_size12, + if_save_latest13, + pretrained_G14, + pretrained_D15, + gpus16, + if_cache_gpu17, + if_save_every_weights18, + version19, +): + CSVutil("lib/csvdb/stop.csv", "w+", "formanting", False) + # 生成filelist + exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) + os.makedirs(exp_dir, exist_ok=True) + gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) + feature_dir = ( + "%s/3_feature256" % (exp_dir) + if version19 == "v1" + else "%s/3_feature768" % (exp_dir) + ) + log_interval = set_log_interval(exp_dir, batch_size12) + + if if_f0_3: + f0_dir = "%s/2a_f0" % (exp_dir) + f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) + names = ( + set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) + & set([name.split(".")[0] for name in os.listdir(feature_dir)]) + & set([name.split(".")[0] for name in os.listdir(f0_dir)]) + & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) + ) + else: + names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( + [name.split(".")[0] for name in os.listdir(feature_dir)] + ) + opt = [] + for name in names: + if if_f0_3: + opt.append( + "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" + % ( + gt_wavs_dir.replace("\\", "\\\\"), + name, + feature_dir.replace("\\", "\\\\"), + name, + f0_dir.replace("\\", "\\\\"), + name, + f0nsf_dir.replace("\\", "\\\\"), + name, + spk_id5, + ) + ) + else: + opt.append( + "%s/%s.wav|%s/%s.npy|%s" + % ( + gt_wavs_dir.replace("\\", "\\\\"), + name, + feature_dir.replace("\\", "\\\\"), + name, + spk_id5, + ) + ) + fea_dim = 256 if version19 == "v1" else 768 + if if_f0_3: + for _ in range(2): + opt.append( + "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" + % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) + ) + else: + for _ in range(2): + opt.append( + "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" + % (now_dir, sr2, now_dir, fea_dim, spk_id5) + ) + shuffle(opt) + with open("%s/filelist.txt" % exp_dir, "w") as f: + f.write("\n".join(opt)) + logger.debug("Write filelist done") + logger.info("Use gpus: %s", str(gpus16)) + if pretrained_G14 == "": + logger.info("No pretrained Generator") + if pretrained_D15 == "": + logger.info("No pretrained Discriminator") + if version19 == "v1" or sr2 == "40k": + config_path = "v1/%s.json" % sr2 + else: + config_path = "v2/%s.json" % sr2 + config_save_path = os.path.join(exp_dir, "config.json") + if not pathlib.Path(config_save_path).exists(): + with open(config_save_path, "w", encoding="utf-8") as f: + json.dump( + config.json_config[config_path], + f, + ensure_ascii=False, + indent=4, + sort_keys=True, + ) + f.write("\n") + if gpus16: + cmd = ( + '"%s" lib/infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' + % ( + config.python_cmd, + exp_dir1, + sr2, + 1 if if_f0_3 else 0, + batch_size12, + gpus16, + total_epoch11, + save_epoch10, + "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", + "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", + 1 if if_save_latest13 == True else 0, + 1 if if_cache_gpu17 == True else 0, + 1 if if_save_every_weights18 == True else 0, + version19, + ) + ) + else: + cmd = ( + '"%s" lib/infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' + % ( + config.python_cmd, + exp_dir1, + sr2, + 1 if if_f0_3 else 0, + batch_size12, + total_epoch11, + save_epoch10, + "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", + "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", + 1 if if_save_latest13 == True else 0, + 1 if if_cache_gpu17 == True else 0, + 1 if if_save_every_weights18 == True else 0, + version19, + ) + ) + logger.info(cmd) + global p + p = Popen(cmd, shell=True, cwd=now_dir) + global PID + PID = p.pid + + p.wait() + + return ( + i18n("Training is done, check train.log"), + {"visible": False, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + ) + + +def train_index(exp_dir1, version19): + exp_dir = os.path.join(now_dir, "logs", exp_dir1) + # exp_dir = "logs/%s" % (exp_dir1) + os.makedirs(exp_dir, exist_ok=True) + feature_dir = ( + "%s/3_feature256" % (exp_dir) + if version19 == "v1" + else "%s/3_feature768" % (exp_dir) + ) + if not os.path.exists(feature_dir): + return "Please do the feature extraction first" + listdir_res = list(os.listdir(feature_dir)) + if len(listdir_res) == 0: + return "Please perform the feature extraction first" + infos = [] + npys = [] + for name in sorted(listdir_res): + phone = np.load("%s/%s" % (feature_dir, name)) + npys.append(phone) + big_npy = np.concatenate(npys, 0) + big_npy_idx = np.arange(big_npy.shape[0]) + np.random.shuffle(big_npy_idx) + big_npy = big_npy[big_npy_idx] + if big_npy.shape[0] > 2e5: + infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) + yield "\n".join(infos) + try: + big_npy = ( + MiniBatchKMeans( + n_clusters=10000, + verbose=True, + batch_size=256 * config.n_cpu, + compute_labels=False, + init="random", + ) + .fit(big_npy) + .cluster_centers_ + ) + except: + info = traceback.format_exc() + logger.info(info) + infos.append(info) + yield "\n".join(infos) + + np.save("%s/total_fea.npy" % exp_dir, big_npy) + n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) + # infos.append("%s,%s" % (big_npy.shape, n_ivf)) + yield "\n".join(infos) + index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) + # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) + infos.append("Generating training file...") + print("Generating training file...") + yield "\n".join(infos) + index_ivf = faiss.extract_index_ivf(index) # + index_ivf.nprobe = 1 + index.train(big_npy) + faiss.write_index( + index, + "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" + % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), + ) + + infos.append("Generating adding file...") + print("Generating adding file...") + yield "\n".join(infos) + batch_size_add = 8192 + for i in range(0, big_npy.shape[0], batch_size_add): + index.add(big_npy[i : i + batch_size_add]) + faiss.write_index( + index, + "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" + % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), + ) + infos.append("Files generated successfully!") + print("Files generated successfully!") + + +def change_info_(ckpt_path): + if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")): + return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} + try: + with open( + ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r" + ) as f: + info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) + sr, f0 = info["sample_rate"], info["if_f0"] + version = "v2" if ("version" in info and info["version"] == "v2") else "v1" + return sr, str(f0), version + except: + traceback.print_exc() + return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} + + +F0GPUVisible = config.dml == False + + +import re as regex +import scipy.io.wavfile as wavfile + +cli_current_page = "HOME" + + +def cli_split_command(com): + exp = r'(?:(?<=\s)|^)"(.*?)"(?=\s|$)|(\S+)' + split_array = regex.findall(exp, com) + split_array = [group[0] if group[0] else group[1] for group in split_array] + return split_array + + +def execute_generator_function(genObject): + for _ in genObject: + pass + + +def cli_infer(com): + # get VC first + com = cli_split_command(com) + model_name = com[0] + source_audio_path = com[1] + output_file_name = com[2] + feature_index_path = com[3] + f0_file = None # Not Implemented Yet + + # Get parameters for inference + speaker_id = int(com[4]) + transposition = float(com[5]) + f0_method = com[6] + crepe_hop_length = int(com[7]) + harvest_median_filter = int(com[8]) + resample = int(com[9]) + mix = float(com[10]) + feature_ratio = float(com[11]) + protection_amnt = float(com[12]) + protect1 = 0.5 + + if com[14] == "False" or com[14] == "false": + DoFormant = False + Quefrency = 0.0 + Timbre = 0.0 + CSVutil( + "lib/csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre + ) + + else: + DoFormant = True + Quefrency = float(com[15]) + Timbre = float(com[16]) + CSVutil( + "lib/csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre + ) + + print("Applio-RVC-Fork Infer-CLI: Starting the inference...") + vc_data = vc.get_vc(model_name, protection_amnt, protect1) + print(vc_data) + print("Applio-RVC-Fork Infer-CLI: Performing inference...") + conversion_data = vc.vc_single( + speaker_id, + source_audio_path, + transposition, + f0_file, + f0_method, + feature_index_path, + feature_index_path, + feature_ratio, + harvest_median_filter, + resample, + mix, + protection_amnt, + crepe_hop_length, + ) + if "Success." in conversion_data[0]: + print( + "Applio-RVC-Fork Infer-CLI: Inference succeeded. Writing to %s/%s..." + % ("assets", "audios", "audio-outputs", output_file_name) + ) + wavfile.write( + "%s/%s" % ("assets", "audios", "audio-outputs", output_file_name), + conversion_data[1][0], + conversion_data[1][1], + ) + print( + "Applio-RVC-Fork Infer-CLI: Finished! Saved output to %s/%s" + % ("assets", "audios", "audio-outputs", output_file_name) + ) + else: + print("Applio-RVC-Fork Infer-CLI: Inference failed. Here's the traceback: ") + print(conversion_data[0]) + + +def cli_pre_process(com): + com = cli_split_command(com) + model_name = com[0] + trainset_directory = com[1] + sample_rate = com[2] + num_processes = int(com[3]) + + print("Applio-RVC-Fork Pre-process: Starting...") + generator = preprocess_dataset( + trainset_directory, model_name, sample_rate, num_processes + ) + execute_generator_function(generator) + print("Applio-RVC-Fork Pre-process: Finished") + + +def cli_extract_feature(com): + com = cli_split_command(com) + model_name = com[0] + gpus = com[1] + num_processes = int(com[2]) + has_pitch_guidance = True if (int(com[3]) == 1) else False + f0_method = com[4] + crepe_hop_length = int(com[5]) + version = com[6] # v1 or v2 + + print("Applio-RVC-CLI: Extract Feature Has Pitch: " + str(has_pitch_guidance)) + print("Applio-RVC-CLI: Extract Feature Version: " + str(version)) + print("Applio-RVC-Fork Feature Extraction: Starting...") + generator = extract_f0_feature( + gpus, + num_processes, + f0_method, + has_pitch_guidance, + model_name, + version, + crepe_hop_length, + ) + execute_generator_function(generator) + print("Applio-RVC-Fork Feature Extraction: Finished") + + +def cli_train(com): + com = cli_split_command(com) + model_name = com[0] + sample_rate = com[1] + has_pitch_guidance = True if (int(com[2]) == 1) else False + speaker_id = int(com[3]) + save_epoch_iteration = int(com[4]) + total_epoch = int(com[5]) # 10000 + batch_size = int(com[6]) + gpu_card_slot_numbers = com[7] + if_save_latest = True if (int(com[8]) == 1) else False + if_cache_gpu = True if (int(com[9]) == 1) else False + if_save_every_weight = True if (int(com[10]) == 1) else False + version = com[11] + + pretrained_base = "pretrained/" if version == "v1" else "pretrained_v2/" + + g_pretrained_path = "%sf0G%s.pth" % (pretrained_base, sample_rate) + d_pretrained_path = "%sf0D%s.pth" % (pretrained_base, sample_rate) + + print("Applio-RVC-Fork Train-CLI: Training...") + click_train( + model_name, + sample_rate, + has_pitch_guidance, + speaker_id, + save_epoch_iteration, + total_epoch, + batch_size, + if_save_latest, + g_pretrained_path, + d_pretrained_path, + gpu_card_slot_numbers, + if_cache_gpu, + if_save_every_weight, + version, + ) + + +def cli_train_feature(com): + com = cli_split_command(com) + model_name = com[0] + version = com[1] + print("Applio-RVC-Fork Train Feature Index-CLI: Training... Please wait") + generator = train_index(model_name, version) + execute_generator_function(generator) + print("Applio-RVC-Fork Train Feature Index-CLI: Done!") + + +def cli_extract_model(com): + com = cli_split_command(com) + model_path = com[0] + save_name = com[1] + sample_rate = com[2] + has_pitch_guidance = com[3] + info = com[4] + version = com[5] + extract_small_model_process = extract_small_model( + model_path, save_name, sample_rate, has_pitch_guidance, info, version + ) + if extract_small_model_process == "Success.": + print("Applio-RVC-Fork Extract Small Model: Success!") + else: + print(str(extract_small_model_process)) + print("Applio-RVC-Fork Extract Small Model: Failed!") + + +def preset_apply(preset, qfer, tmbr): + if str(preset) != "": + with open(str(preset), "r") as p: + content = p.readlines() + qfer, tmbr = content[0].split("\n")[0], content[1] + formant_apply(qfer, tmbr) + else: + pass + return ( + {"value": qfer, "__type__": "update"}, + {"value": tmbr, "__type__": "update"}, + ) + + +def print_page_details(): + if cli_current_page == "HOME": + print( + "\n go home : Takes you back to home with a navigation list." + "\n go infer : Takes you to inference command execution." + "\n go pre-process : Takes you to training step.1) pre-process command execution." + "\n go extract-feature : Takes you to training step.2) extract-feature command execution." + "\n go train : Takes you to training step.3) being or continue training command execution." + "\n go train-feature : Takes you to the train feature index command execution." + "\n go extract-model : Takes you to the extract small model command execution." + ) + elif cli_current_page == "INFER": + print( + "\n arg 1) model name with .pth in ./weights: mi-test.pth" + "\n arg 2) source audio path: myFolder\\MySource.wav" + "\n arg 3) output file name to be placed in './audio-outputs': MyTest.wav" + "\n arg 4) feature index file path: logs/mi-test/added_IVF3042_Flat_nprobe_1.index" + "\n arg 5) speaker id: 0" + "\n arg 6) transposition: 0" + "\n arg 7) f0 method: harvest (pm, harvest, crepe, crepe-tiny, hybrid[x,x,x,x], mangio-crepe, mangio-crepe-tiny, rmvpe)" + "\n arg 8) crepe hop length: 160" + "\n arg 9) harvest median filter radius: 3 (0-7)" + "\n arg 10) post resample rate: 0" + "\n arg 11) mix volume envelope: 1" + "\n arg 12) feature index ratio: 0.78 (0-1)" + "\n arg 13) Voiceless Consonant Protection (Less Artifact): 0.33 (Smaller number = more protection. 0.50 means Dont Use.)" + "\n arg 14) Whether to formant shift the inference audio before conversion: False (if set to false, you can ignore setting the quefrency and timbre values for formanting)" + "\n arg 15)* Quefrency for formanting: 8.0 (no need to set if arg14 is False/false)" + "\n arg 16)* Timbre for formanting: 1.2 (no need to set if arg14 is False/false) \n" + "\nExample: mi-test.pth saudio/Sidney.wav myTest.wav logs/mi-test/added_index.index 0 -2 harvest 160 3 0 1 0.95 0.33 0.45 True 8.0 1.2" + ) + elif cli_current_page == "PRE-PROCESS": + print( + "\n arg 1) Model folder name in ./logs: mi-test" + "\n arg 2) Trainset directory: mydataset (or) E:\\my-data-set" + "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" + "\n arg 4) Number of CPU threads to use: 8 \n" + "\nExample: mi-test mydataset 40k 24" + ) + elif cli_current_page == "EXTRACT-FEATURE": + print( + "\n arg 1) Model folder name in ./logs: mi-test" + "\n arg 2) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" + "\n arg 3) Number of CPU threads to use: 8" + "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" + "\n arg 5) f0 Method: harvest (pm, harvest, dio, crepe)" + "\n arg 6) Crepe hop length: 128" + "\n arg 7) Version for pre-trained models: v2 (use either v1 or v2)\n" + "\nExample: mi-test 0 24 1 harvest 128 v2" + ) + elif cli_current_page == "TRAIN": + print( + "\n arg 1) Model folder name in ./logs: mi-test" + "\n arg 2) Sample rate: 40k (32k, 40k, 48k)" + "\n arg 3) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" + "\n arg 4) speaker id: 0" + "\n arg 5) Save epoch iteration: 50" + "\n arg 6) Total epochs: 10000" + "\n arg 7) Batch size: 8" + "\n arg 8) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" + "\n arg 9) Save only the latest checkpoint: 0 (0 for no, 1 for yes)" + "\n arg 10) Whether to cache training set to vram: 0 (0 for no, 1 for yes)" + "\n arg 11) Save extracted small model every generation?: 0 (0 for no, 1 for yes)" + "\n arg 12) Model architecture version: v2 (use either v1 or v2)\n" + "\nExample: mi-test 40k 1 0 50 10000 8 0 0 0 0 v2" + ) + elif cli_current_page == "TRAIN-FEATURE": + print( + "\n arg 1) Model folder name in ./logs: mi-test" + "\n arg 2) Model architecture version: v2 (use either v1 or v2)\n" + "\nExample: mi-test v2" + ) + elif cli_current_page == "EXTRACT-MODEL": + print( + "\n arg 1) Model Path: logs/mi-test/G_168000.pth" + "\n arg 2) Model save name: MyModel" + "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" + "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" + '\n arg 5) Model information: "My Model"' + "\n arg 6) Model architecture version: v2 (use either v1 or v2)\n" + '\nExample: logs/mi-test/G_168000.pth MyModel 40k 1 "Created by Cole Mangio" v2' + ) + + +def change_page(page): + global cli_current_page + cli_current_page = page + return 0 + + +def execute_command(com): + if com == "go home": + return change_page("HOME") + elif com == "go infer": + return change_page("INFER") + elif com == "go pre-process": + return change_page("PRE-PROCESS") + elif com == "go extract-feature": + return change_page("EXTRACT-FEATURE") + elif com == "go train": + return change_page("TRAIN") + elif com == "go train-feature": + return change_page("TRAIN-FEATURE") + elif com == "go extract-model": + return change_page("EXTRACT-MODEL") + else: + if com[:3] == "go ": + print("page '%s' does not exist!" % com[3:]) + return 0 + + if cli_current_page == "INFER": + cli_infer(com) + elif cli_current_page == "PRE-PROCESS": + cli_pre_process(com) + elif cli_current_page == "EXTRACT-FEATURE": + cli_extract_feature(com) + elif cli_current_page == "TRAIN": + cli_train(com) + elif cli_current_page == "TRAIN-FEATURE": + cli_train_feature(com) + elif cli_current_page == "EXTRACT-MODEL": + cli_extract_model(com) + + +def cli_navigation_loop(): + while True: + print("\nYou are currently in '%s':" % cli_current_page) + print_page_details() + command = input("%s: " % cli_current_page) + try: + execute_command(command) + except: + print(traceback.format_exc()) + + +if config.is_cli: + print("\n\nApplio-RVC-Fork CLI\n") + print( + "Welcome to the CLI version of RVC. Please read the documentation on README.MD to understand how to use this app.\n" + ) + cli_navigation_loop() + + +def switch_pitch_controls(f0method0): + is_visible = f0method0 != "rmvpe" + + if rvc_globals.NotesOrHertz: + return ( + {"visible": False, "__type__": "update"}, + {"visible": is_visible, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": is_visible, "__type__": "update"}, + ) + else: + return ( + {"visible": is_visible, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + {"visible": is_visible, "__type__": "update"}, + {"visible": False, "__type__": "update"}, + ) + + +def match_index(sid0: str) -> tuple: + sid0strip = re.sub(r"\.pth|\.onnx$", "", sid0) + sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory + + # Check if the sid0strip has the specific ending format _eXXX_sXXX + if re.match(r".+_e\d+_s\d+$", sid0name): + base_model_name = sid0name.rsplit("_", 2)[0] + else: + base_model_name = sid0name + + sid_directory = os.path.join(index_root, base_model_name) + directories_to_search = [sid_directory] if os.path.exists(sid_directory) else [] + directories_to_search.append(index_root) + + matching_index_files = [] + + for directory in directories_to_search: + for filename in os.listdir(directory): + if filename.endswith(".index") and "trained" not in filename: + # Condition to match the name + name_match = any( + name.lower() in filename.lower() + for name in [sid0name, base_model_name] + ) + + # If in the specific directory, it's automatically a match + folder_match = directory == sid_directory + + if name_match or folder_match: + index_path = os.path.join(directory, filename) + if index_path in indexes_list: + matching_index_files.append( + ( + index_path, + os.path.getsize(index_path), + " " not in filename, + ) + ) + + if matching_index_files: + # Sort by favoring files without spaces and by size (largest size first) + matching_index_files.sort(key=lambda x: (-x[2], -x[1])) + best_match_index_path = matching_index_files[0][0] + return best_match_index_path, best_match_index_path + + return "", "" + + +def stoptraining(mim): + if int(mim) == 1: + CSVutil("lib/csvdb/stop.csv", "w+", "stop", "True") + # p.terminate() + # p.kill() + try: + os.kill(PID, signal.SIGTERM) + except Exception as e: + print(f"Couldn't click due to {e}") + pass + else: + pass + + return ( + {"visible": False, "__type__": "update"}, + {"visible": True, "__type__": "update"}, + ) + + +weights_dir = "weights/" + + +def note_to_hz(note_name): + SEMITONES = { + "C": -9, + "C#": -8, + "D": -7, + "D#": -6, + "E": -5, + "F": -4, + "F#": -3, + "G": -2, + "G#": -1, + "A": 0, + "A#": 1, + "B": 2, + } + pitch_class, octave = note_name[:-1], int(note_name[-1]) + semitone = SEMITONES[pitch_class] + note_number = 12 * (octave - 4) + semitone + frequency = 440.0 * (2.0 ** (1.0 / 12)) ** note_number + return frequency + + +def save_to_wav(record_button): + if record_button is None: + pass + else: + path_to_file = record_button + new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".wav" + target_path = os.path.join("assets", "audios", os.path.basename(new_name)) + + shutil.move(path_to_file, target_path) + return target_path + + +def save_to_wav2_edited(dropbox): + if dropbox is None: + pass + else: + file_path = dropbox.name + target_path = os.path.join("assets", "audios", os.path.basename(file_path)) + + if os.path.exists(target_path): + os.remove(target_path) + print("Replacing old dropdown file...") + + shutil.move(file_path, target_path) + return + + +def save_to_wav2(dropbox): + file_path = dropbox.name + target_path = os.path.join("assets", "audios", os.path.basename(file_path)) + + if os.path.exists(target_path): + os.remove(target_path) + print("Replacing old dropdown file...") + + shutil.move(file_path, target_path) + return target_path + + +import lib.tools.loader_themes as loader_themes + +my_applio = loader_themes.load_json() +if my_applio: + pass +else: + my_applio = "JohnSmith9982/small_and_pretty" + +my_applio = loader_themes.select_theme(os.getenv("SELECT_THEME")) +my_applio = loader_themes.load_json() + +def GradioSetup(): + default_weight = names[0] if names else "" + + with gr.Blocks(theme=my_applio, title="Applio-RVC-Fork") as app: + gr.HTML("

🍏 Applio-RVC-Fork

") + with gr.Tabs(): + with gr.TabItem(i18n("Model Inference")): + with gr.Row(): + sid0 = gr.Dropdown( + label=i18n("Inferencing voice:"), + choices=sorted(names), + value=default_weight, + ) + best_match_index_path1, _ = match_index(sid0.value) + file_index2 = gr.Dropdown( + label=i18n( + "Auto-detect index path and select from the dropdown:" + ), + choices=get_indexes(), + value=best_match_index_path1, + interactive=True, + allow_custom_value=True, + ) + with gr.Column(): + refresh_button = gr.Button(i18n("Refresh"), variant="primary") + clean_button = gr.Button( + i18n("Unload voice to save GPU memory"), variant="primary" + ) + clean_button.click( + fn=lambda: ({"value": "", "__type__": "update"}), + inputs=[], + outputs=[sid0], + api_name="infer_clean", + ) + + with gr.TabItem(i18n("Single")): + with gr.Row(): + spk_item = gr.Slider( + minimum=0, + maximum=2333, + step=1, + label=i18n("Select Speaker/Singer ID:"), + value=0, + visible=False, + interactive=True, + ) + with gr.Row(): + with gr.Column(): # First column for audio-related inputs + dropbox = gr.File(label=i18n("Drag your audio here:")) + record_button = gr.Audio( + source="microphone", + label=i18n("Or record an audio:"), + type="filepath", + ) + + with gr.Column(): # Second column for pitch shift and other options + with gr.Column(): + input_audio1 = gr.Dropdown( + label=i18n( + "Auto detect audio path and select from the dropdown:" + ), + choices=sorted(audio_paths), + value="", + interactive=True, + ) + vc_transform0 = gr.Number( + label=i18n( + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):" + ), + value=0, + ) + + dropbox.upload( + fn=save_to_wav2, + inputs=[dropbox], + outputs=[input_audio1], + ) + record_button.change( + fn=save_to_wav, + inputs=[record_button], + outputs=[input_audio1], + ) + refresh_button.click( + fn=change_choices, + inputs=[], + outputs=[sid0, file_index2, input_audio1], + api_name="infer_refresh", + ) + # Create a checkbox for advanced settings + advanced_settings_checkbox = gr.Checkbox( + value=False, + label=i18n("Advanced Settings"), + interactive=True, + ) + + # Advanced settings container + with gr.Column( + visible=False + ) as advanced_settings: # Initially hidden + with gr.Row(label=i18n("Advanced Settings"), open=False): + with gr.Column(): + f0method0 = gr.Radio( + label=i18n( + "Select the pitch extraction algorithm:" + ), + choices=[ + "pm", + "harvest", + "dio", + "crepe", + "crepe-tiny", + "mangio-crepe", + "mangio-crepe-tiny", + "rmvpe", + "rmvpe+", + ], + value="rmvpe+", + interactive=True, + ) + format1_ = gr.Radio( + label=i18n("Export file format:"), + choices=["wav", "flac", "mp3", "m4a"], + value="wav", + interactive=True, + ) + + f0_autotune = gr.Checkbox( + label="Enable autotune", interactive=True + ) + split_audio = gr.Checkbox( + label="Split Audio (Better Results)", + interactive=True, + ) + + crepe_hop_length = gr.Slider( + minimum=1, + maximum=512, + step=1, + label=i18n( + "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate." + ), + value=120, + interactive=True, + visible=False, + ) + + + minpitch_slider = gr.Slider( + label=i18n("Min pitch:"), + info=i18n( + "Specify minimal pitch for inference [HZ]" + ), + step=0.1, + minimum=1, + scale=0, + value=50, + maximum=16000, + interactive=True, + visible=(not rvc_globals.NotesOrHertz) + and (f0method0.value != "rmvpe"), + ) + minpitch_txtbox = gr.Textbox( + label=i18n("Min pitch:"), + info=i18n( + "Specify minimal pitch for inference [NOTE][OCTAVE]" + ), + placeholder="C5", + visible=(rvc_globals.NotesOrHertz) + and (f0method0.value != "rmvpe"), + interactive=True, + ) + + maxpitch_slider = gr.Slider( + label=i18n("Max pitch:"), + info=i18n("Specify max pitch for inference [HZ]"), + step=0.1, + minimum=1, + scale=0, + value=1100, + maximum=16000, + interactive=True, + visible=(not rvc_globals.NotesOrHertz) + and (f0method0.value != "rmvpe"), + ) + maxpitch_txtbox = gr.Textbox( + label=i18n("Max pitch:"), + info=i18n( + "Specify max pitch for inference [NOTE][OCTAVE]" + ), + placeholder="C6", + visible=(rvc_globals.NotesOrHertz) + and (f0method0.value != "rmvpe"), + interactive=True, + ) + + file_index1 = gr.Textbox( + label=i18n("Feature search database file path:"), + value="", + interactive=True, + ) + + f0_file = gr.File( + label=i18n( + "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:" + ) + ) + + f0method0.change( + fn=lambda radio: ( + { + "visible": radio + in ["mangio-crepe", "mangio-crepe-tiny"], + "__type__": "update", + } + ), + inputs=[f0method0], + outputs=[crepe_hop_length], + ) + + f0method0.change( + fn=switch_pitch_controls, + inputs=[f0method0], + outputs=[ + minpitch_slider, + minpitch_txtbox, + maxpitch_slider, + maxpitch_txtbox, + ], + ) + + with gr.Column(): + resample_sr0 = gr.Slider( + minimum=0, + maximum=48000, + label=i18n( + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:" + ), + value=0, + step=1, + interactive=True, + ) + rms_mix_rate0 = gr.Slider( + minimum=0, + maximum=1, + label=i18n( + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:" + ), + value=0.25, + interactive=True, + ) + protect0 = gr.Slider( + minimum=0, + maximum=0.5, + label=i18n( + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:" + ), + value=0.33, + step=0.01, + interactive=True, + ) + filter_radius0 = gr.Slider( + minimum=0, + maximum=7, + label=i18n( + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness." + ), + value=3, + step=1, + interactive=True, + ) + index_rate1 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("Search feature ratio:"), + value=0.75, + interactive=True, + ) + formanting = gr.Checkbox( + value=bool(DoFormant), + label=i18n("Formant shift inference audio"), + info=i18n( + "Used for male to female and vice-versa conversions" + ), + interactive=True, + visible=True, + ) + + formant_preset = gr.Dropdown( + value="", + choices=get_fshift_presets(), + label=i18n("Browse presets for formanting"), + info=i18n( + "Presets are located in formantshiftcfg/ folder" + ), + visible=bool(DoFormant), + ) + + formant_refresh_button = gr.Button( + value="\U0001f504", + visible=bool(DoFormant), + variant="primary", + ) + + qfrency = gr.Slider( + value=Quefrency, + info=i18n("Default value is 1.0"), + label=i18n("Quefrency for formant shifting"), + minimum=0.0, + maximum=16.0, + step=0.1, + visible=bool(DoFormant), + interactive=True, + ) + + tmbre = gr.Slider( + value=Timbre, + info=i18n("Default value is 1.0"), + label=i18n("Timbre for formant shifting"), + minimum=0.0, + maximum=16.0, + step=0.1, + visible=bool(DoFormant), + interactive=True, + ) + frmntbut = gr.Button( + "Apply", variant="primary", visible=bool(DoFormant) + ) + + formant_preset.change( + fn=preset_apply, + inputs=[formant_preset, qfrency, tmbre], + outputs=[qfrency, tmbre], + ) + formanting.change( + fn=formant_enabled, + inputs=[ + formanting, + qfrency, + tmbre, + frmntbut, + formant_preset, + formant_refresh_button, + ], + outputs=[ + formanting, + qfrency, + tmbre, + frmntbut, + formant_preset, + formant_refresh_button, + ], + ) + frmntbut.click( + fn=formant_apply, + inputs=[qfrency, tmbre], + outputs=[qfrency, tmbre], + ) + formant_refresh_button.click( + fn=update_fshift_presets, + inputs=[formant_preset, qfrency, tmbre], + outputs=[formant_preset, qfrency, tmbre], + ) + + # Function to toggle advanced settings + def toggle_advanced_settings(checkbox): + return {"visible": checkbox, "__type__": "update"} + + # Attach the change event + advanced_settings_checkbox.change( + fn=toggle_advanced_settings, + inputs=[advanced_settings_checkbox], + outputs=[advanced_settings], + ) + + but0 = gr.Button(i18n("Convert"), variant="primary").style( + full_width=True + ) + + with gr.Row(): # Defines output info + output audio download after conversion + vc_output1 = gr.Textbox(label=i18n("Output information:")) + vc_output2 = gr.Audio( + label=i18n( + "Export audio (click on the three dots in the lower right corner to download)" + ) + ) + + with gr.Group(): # I think this defines the big convert button + with gr.Row(): + but0.click( + vc.vc_single, + [ + spk_item, + input_audio1, + vc_transform0, + f0_file, + f0method0, + file_index1, + file_index2, + index_rate1, + filter_radius0, + resample_sr0, + rms_mix_rate0, + protect0, + format1_, + split_audio, + crepe_hop_length, + minpitch_slider, + minpitch_txtbox, + maxpitch_slider, + maxpitch_txtbox, + f0_autotune, + ], + [vc_output1, vc_output2], + api_name="infer_convert", + ) + + with gr.TabItem(i18n("Batch")): # Dont Change + with gr.Row(): + with gr.Column(): + vc_transform1 = gr.Number( + label=i18n( + "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):" + ), + value=0, + ) + opt_input = gr.Textbox( + label=i18n("Specify output folder:"), value="assets/audios/audio-outputs" + ) + with gr.Column(): + dir_input = gr.Textbox( + label=i18n( + "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):" + ), + value=os.path.join(now_dir, "assets", "audios"), + ) + sid0.select( + fn=match_index, + inputs=[sid0], + outputs=[file_index2], + ) + + with gr.Column(): + inputs = gr.File( + file_count="multiple", + label=i18n( + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder." + ), + ) + with gr.Row(): + with gr.Column(): + # Create a checkbox for advanced batch settings + advanced_settings_batch_checkbox = gr.Checkbox( + value=False, + label=i18n("Advanced Settings"), + interactive=True, + ) + + # Advanced batch settings container + with gr.Row( + visible=False + ) as advanced_settings_batch: # Initially hidden + with gr.Row( + label=i18n("Advanced Settings"), open=False + ): + with gr.Column(): + file_index3 = gr.Textbox( + label=i18n( + "Feature search database file path:" + ), + value="", + interactive=True, + ) + f0method1 = gr.Radio( + label=i18n( + "Select the pitch extraction algorithm:" + ), + choices=[ + "pm", + "harvest", + "dio", + "crepe", + "crepe-tiny", + "mangio-crepe", + "mangio-crepe-tiny", + "rmvpe", + ], + value="rmvpe", + interactive=True, + ) + + format1 = gr.Radio( + label=i18n("Export file format:"), + choices=["wav", "flac", "mp3", "m4a"], + value="wav", + interactive=True, + ) + + with gr.Column(): + resample_sr1 = gr.Slider( + minimum=0, + maximum=48000, + label=i18n( + "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:" + ), + value=0, + step=1, + interactive=True, + ) + rms_mix_rate1 = gr.Slider( + minimum=0, + maximum=1, + label=i18n( + "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:" + ), + value=1, + interactive=True, + ) + protect1 = gr.Slider( + minimum=0, + maximum=0.5, + label=i18n( + "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:" + ), + value=0.33, + step=0.01, + interactive=True, + ) + filter_radius1 = gr.Slider( + minimum=0, + maximum=7, + label=i18n( + "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness." + ), + value=3, + step=1, + interactive=True, + ) + + index_rate2 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("Search feature ratio:"), + value=0.75, + interactive=True, + ) + f0_autotune = gr.Checkbox( + label="Enable autotune", interactive=True + ) + hop_length = gr.Slider( + minimum=1, + maximum=512, + step=1, + label=i18n( + "Hop Length (lower hop lengths take more time to infer but are more pitch accurate):" + ), + value=120, + interactive=True, + visible=False, + ) + + but1 = gr.Button(i18n("Convert"), variant="primary") + vc_output3 = gr.Textbox(label=i18n("Output information:")) + but1.click( + vc.vc_multi, + [ + spk_item, + dir_input, + opt_input, + inputs, + vc_transform1, + f0method1, + file_index3, + file_index2, + index_rate2, + filter_radius1, + resample_sr1, + rms_mix_rate1, + protect1, + format1, + hop_length, + minpitch_slider, + minpitch_txtbox, + maxpitch_slider, + maxpitch_txtbox, + f0_autotune, + ], + [vc_output3], + api_name="infer_convert_batch", + ) + + sid0.change( + fn=vc.get_vc, + inputs=[sid0, protect0, protect1], + outputs=[spk_item, protect0, protect1], + api_name="infer_change_voice", + ) + if not sid0.value == "": + spk_item, protect0, protect1 = vc.get_vc( + sid0.value, protect0, protect1 + ) + + # spk_item, protect0, protect1 = vc.get_vc(sid0.value, protect0, protect1) + + # Function to toggle advanced settings + def toggle_advanced_settings_batch(checkbox): + return {"visible": checkbox, "__type__": "update"} + + # Attach the change event + advanced_settings_batch_checkbox.change( + fn=toggle_advanced_settings_batch, + inputs=[advanced_settings_batch_checkbox], + outputs=[advanced_settings_batch], + ) + + with gr.TabItem(i18n("Train")): + with gr.Accordion(label=i18n("Step 1: Processing data")): + with gr.Row(): + with gr.Column(): + exp_dir1 = gr.Textbox( + label=i18n("Enter the model name:"), + value=i18n("Model_Name"), + ) + if_f0_3 = gr.Checkbox( + label=i18n("Whether the model has pitch guidance."), + value=True, + interactive=True, + ) + sr2 = gr.Radio( + label=i18n("Target sample rate:"), + choices=["40k", "48k", "32k"], + value="40k", + interactive=True, + ) + version19 = gr.Radio( + label=i18n("Version:"), + choices=["v1", "v2"], + value="v2", + interactive=True, + visible=True, + ) + + with gr.Column(): + np7 = gr.Slider( + minimum=1, + maximum=config.n_cpu, + step=1, + label=i18n("Number of CPU processes:"), + value=config.n_cpu, + interactive=True, + ) + spk_id5 = gr.Slider( + minimum=0, + maximum=4, + step=1, + label=i18n("Specify the model ID:"), + value=0, + interactive=True, + ) + + with gr.Row(): + with gr.Column(): + trainset_dir4 = gr.Dropdown( + choices=sorted(datasets), + label=i18n("Select your dataset:"), + value=get_dataset(), + ) + + dataset_path = gr.Textbox( + label=i18n("Or add your dataset path:"), + interactive=True, + ) + btn_update_dataset_list = gr.Button( + i18n("Update list"), variant="primary" + ) + + btn_update_dataset_list.click( + resources.update_dataset_list, [spk_id5], trainset_dir4 + ) + but1 = gr.Button(i18n("Process data"), variant="primary") + info1 = gr.Textbox(label=i18n("Output information:"), value="") + but1.click( + preprocess_dataset, + [trainset_dir4, exp_dir1, sr2, np7, dataset_path], + [info1], + api_name="train_preprocess", + ) + + with gr.Accordion(label=i18n("Step 2: Extracting features")): + with gr.Row(): + with gr.Column(): + gpus6 = gr.Textbox( + label=i18n( + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:" + ), + value=gpus, + interactive=True, + ) + gpu_info9 = gr.Textbox( + label=i18n("GPU Information:"), + value=gpu_info, + visible=F0GPUVisible, + ) + with gr.Column(): + f0method8 = gr.Radio( + label=i18n("Select the pitch extraction algorithm:"), + choices=[ + "pm", + "harvest", + "dio", + "crepe", + "mangio-crepe", + "rmvpe", + "rmvpe_gpu", + ], + value="rmvpe", + interactive=True, + ) + hop_length = gr.Slider( + minimum=1, + maximum=512, + step=1, + label=i18n( + "Hop Length (lower hop lengths take more time to infer but are more pitch accurate):" + ), + value=64, + interactive=True, + ) + + with gr.Row(): + but2 = gr.Button(i18n("Feature extraction"), variant="primary") + info2 = gr.Textbox( + label=i18n("Output information:"), + value="", + max_lines=8, + interactive=False, + ) + + but2.click( + extract_f0_feature, + [ + gpus6, + np7, + f0method8, + if_f0_3, + exp_dir1, + version19, + hop_length, + ], + [info2], + api_name="train_extract_f0_feature", + ) + + with gr.Row(): + with gr.Accordion(label=i18n("Step 3: Model training started")): + with gr.Row(): + save_epoch10 = gr.Slider( + minimum=1, + maximum=100, + step=1, + label=i18n("Save frequency:"), + value=10, + interactive=True, + visible=True, + ) + total_epoch11 = gr.Slider( + minimum=1, + maximum=10000, + step=2, + label=i18n("Training epochs:"), + value=750, + interactive=True, + ) + batch_size12 = gr.Slider( + minimum=1, + maximum=50, + step=1, + label=i18n("Batch size per GPU:"), + value=default_batch_size, + # value=20, + interactive=True, + ) + + with gr.Row(): + if_save_latest13 = gr.Checkbox( + label=i18n( + "Whether to save only the latest .ckpt file to save hard drive space" + ), + value=True, + interactive=True, + ) + if_cache_gpu17 = gr.Checkbox( + label=i18n( + "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training" + ), + value=False, + interactive=True, + ) + if_save_every_weights18 = gr.Checkbox( + label=i18n( + "Save a small final model to the 'weights' folder at each save point" + ), + value=True, + interactive=True, + ) + with gr.Column(): + with gr.Row(): + pretrained_G14 = gr.Textbox( + label=i18n("Load pre-trained base model G path:"), + value="assets/pretrained_v2/f0G40k.pth", + interactive=True, + ) + pretrained_D15 = gr.Textbox( + label=i18n("Load pre-trained base model D path:"), + value="assets/pretrained_v2/f0D40k.pth", + interactive=True, + ) + with gr.Row(): + gpus16 = gr.Textbox( + label=i18n( + "Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:" + ), + value=gpus, + interactive=True, + ) + sr2.change( + change_sr2, + [sr2, if_f0_3, version19], + [pretrained_G14, pretrained_D15], + ) + version19.change( + change_version19, + [sr2, if_f0_3, version19], + [pretrained_G14, pretrained_D15, sr2], + ) + if_f0_3.change( + fn=change_f0, + inputs=[if_f0_3, sr2, version19], + outputs=[f0method8, pretrained_G14, pretrained_D15], + ) + with gr.Row(): + butstop = gr.Button( + i18n("Stop training"), + variant="primary", + visible=False, + ) + but3 = gr.Button( + i18n("Train model"), variant="primary", visible=True + ) + but3.click( + fn=stoptraining, + inputs=[gr.Number(value=0, visible=False)], + outputs=[but3, butstop], + api_name="train_stop", + ) + butstop.click( + fn=stoptraining, + inputs=[gr.Number(value=1, visible=False)], + outputs=[but3, butstop], + ) + info3 = gr.Textbox( + label=i18n("Output information:"), + value="", + lines=4, + max_lines=4, + ) + + with gr.Column(): + save_action = gr.Dropdown( + label=i18n("Save type"), + choices=[ + i18n("Save all"), + i18n("Save D and G"), + i18n("Save voice"), + ], + value=i18n("Choose the method"), + interactive=True, + ) + but4 = gr.Button( + i18n("Train feature index"), variant="primary" + ) + + but7 = gr.Button(i18n("Save model"), variant="primary") + + if_save_every_weights18.change( + fn=lambda if_save_every_weights: ( + { + "visible": if_save_every_weights, + "__type__": "update", + } + ), + inputs=[if_save_every_weights18], + outputs=[save_epoch10], + ) + + but3.click( + click_train, + [ + exp_dir1, + sr2, + if_f0_3, + spk_id5, + save_epoch10, + total_epoch11, + batch_size12, + if_save_latest13, + pretrained_G14, + pretrained_D15, + gpus16, + if_cache_gpu17, + if_save_every_weights18, + version19, + ], + [info3, butstop, but3], + api_name="train_start", + ) + + but4.click(train_index, [exp_dir1, version19], info3) + but7.click(resources.save_model, [exp_dir1, save_action], info3) + + with gr.TabItem(i18n("UVR5")): # UVR section + with gr.Row(): + with gr.Column(): + model_select = gr.Radio( + label=i18n("Model Architecture:"), + choices=["VR", "MDX", "Demucs (Beta)"], + value="VR", + interactive=True, + ) + dir_wav_input = gr.Textbox( + label=i18n( + "Enter the path of the audio folder to be processed:" + ), + value=os.path.join(now_dir, "assets", "audios"), + ) + wav_inputs = gr.File( + file_count="multiple", + label=i18n( + "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder." + ), + ) + + with gr.Column(): + model_choose = gr.Dropdown( + label=i18n("Model:"), choices=uvr5_names + ) + agg = gr.Slider( + minimum=0, + maximum=20, + step=1, + label="Vocal Extraction Aggressive", + value=10, + interactive=True, + visible=False, + ) + opt_vocal_root = gr.Textbox( + label=i18n("Specify the output folder for vocals:"), + value="assets/audios", + ) + opt_ins_root = gr.Textbox( + label=i18n("Specify the output folder for accompaniment:"), + value="assets/audios/audio-others", + ) + format0 = gr.Radio( + label=i18n("Export file format:"), + choices=["wav", "flac", "mp3", "m4a"], + value="flac", + interactive=True, + ) + model_select.change( + fn=update_model_choices, + inputs=model_select, + outputs=model_choose, + ) + but2 = gr.Button(i18n("Convert"), variant="primary") + vc_output4 = gr.Textbox(label=i18n("Output information:")) + # wav_inputs.upload(fn=save_to_wav2_edited, inputs=[wav_inputs], outputs=[]) + but2.click( + uvr, + [ + model_choose, + dir_wav_input, + opt_vocal_root, + wav_inputs, + opt_ins_root, + agg, + format0, + model_select, + ], + [vc_output4], + api_name="uvr_convert", + ) + with gr.TabItem(i18n("TTS")): + with gr.Column(): + text_test = gr.Textbox( + label=i18n("Text:"), + placeholder=i18n( + "Enter the text you want to convert to voice..." + ), + lines=6, + ) + + with gr.Row(): + with gr.Column(): + tts_methods_voice = ["Edge-tts", "Bark-tts"] + ttsmethod_test = gr.Dropdown( + tts_methods_voice, + value="Edge-tts", + label=i18n("TTS Method:"), + visible=True, + ) + tts_test = gr.Dropdown( + tts.set_edge_voice, + label=i18n("TTS Model:"), + visible=True, + ) + ttsmethod_test.change( + fn=tts.update_tts_methods_voice, + inputs=ttsmethod_test, + outputs=tts_test, + ) + + with gr.Column(): + model_voice_path07 = gr.Dropdown( + label=i18n("RVC Model:"), + choices=sorted(names), + value=default_weight, + ) + best_match_index_path1, _ = match_index( + model_voice_path07.value + ) + + file_index2_07 = gr.Dropdown( + label=i18n("Select the .index file:"), + choices=get_indexes(), + value=best_match_index_path1, + interactive=True, + allow_custom_value=True, + ) + with gr.Row(): + refresh_button_ = gr.Button(i18n("Refresh"), variant="primary") + refresh_button_.click( + fn=change_choices2, + inputs=[], + outputs=[model_voice_path07, file_index2_07], + ) + with gr.Row(): + original_ttsvoice = gr.Audio(label=i18n("Audio TTS:")) + ttsvoice = gr.Audio(label=i18n("Audio RVC:")) + + with gr.Row(): + button_test = gr.Button(i18n("Convert"), variant="primary") + + button_test.click( + tts.use_tts, + inputs=[ + text_test, + tts_test, + model_voice_path07, + file_index2_07, + # transpose_test, + vc_transform0, + f0method8, + index_rate1, + crepe_hop_length, + f0_autotune, + ttsmethod_test, + ], + outputs=[ttsvoice, original_ttsvoice], + ) + + with gr.TabItem(i18n("Resources")): + resources.download_model() + resources.download_backup() + resources.download_dataset(trainset_dir4) + resources.download_audio() + resources.youtube_separator() + with gr.TabItem(i18n("Extra")): + gr.Markdown( + value=i18n( + "This section contains some extra utilities that often may be in experimental phases" + ) + ) + with gr.TabItem(i18n("Merge Audios")): + mergeaudios.merge_audios() + + with gr.TabItem(i18n("Processing")): + processing.processing_() + + with gr.TabItem(i18n("Settings")): + with gr.Row(): + with gr.Column(): + gr.Markdown(value=i18n("Pitch settings")) + noteshertz = gr.Checkbox( + label=i18n( + "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz" + ), + value=rvc_globals.NotesOrHertz, + interactive=True, + ) + themes_select = gr.Dropdown( + loader_themes.get_list(), + value=loader_themes.read_json(), + label=i18n("Select Theme:"), + visible=True, + ) + themes_select.change( + fn=loader_themes.select_theme, + inputs=themes_select, + outputs=[], + ) + + noteshertz.change( + fn=lambda nhertz: rvc_globals.__setattr__("NotesOrHertz", nhertz), + inputs=[noteshertz], + outputs=[], + ) + + noteshertz.change( + fn=switch_pitch_controls, + inputs=[f0method0], + outputs=[ + minpitch_slider, + minpitch_txtbox, + maxpitch_slider, + maxpitch_txtbox, + ], + ) + + with gr.TabItem(i18n("Readme")): + gr.Markdown(value=inforeadme) + return app + + +def GradioRun(app): + share_gradio_link = config.iscolab or config.paperspace + concurrency_count = 511 + max_size = 1022 + + if config.iscolab or config.paperspace: + app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( + favicon_path="./assets/images/icon.png", + ) + else: + app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( + favicon_path="./assets/images/icon.png", + ) + + +if __name__ == "__main__": + app = GradioSetup() + GradioRun(app) diff --git a/install_Applio.bat b/install_Applio.bat new file mode 100644 index 0000000000000000000000000000000000000000..966c10158941990028fd16b9186410cac88b5af9 --- /dev/null +++ b/install_Applio.bat @@ -0,0 +1,145 @@ +@echo off +Title Applio - Installer +setlocal +cd %~dp0 + +::: +::: _ _ +::: /\ | (_) +::: / \ _ __ _ __ | |_ ___ +::: / /\ \ | '_ \| '_ \| | |/ _ \ +::: / ____ \| |_) | |_) | | | (_) | +::: /_/ \_\ .__/| .__/|_|_|\___/ +::: | | | | +::: |_| |_| +::: +::: + +set "repoUrl=https://github.com/IAHispano/Applio-RVC-Fork.git" +set "repoFolder=Applio-RVC-Fork" +set "principal=%cd%\%repoFolder%" +set "runtime_scripts=%cd%\%repoFolder%\runtime\Scripts" +set "URL_BASE=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main" +set "URL_EXTRA=https://huggingface.co./IAHispano/applio/resolve/main" + +echo. +cls +echo INFO: It's important not to run this installer as an administrator as it might cause issues, and it's recommended to disable antivirus or firewall, as errors might occur when downloading pretrained models. +echo. +pause + +cls +echo INFO: Please ensure you have installed the required dependencies before continuing. Refer to the installation guide for details. +echo. +echo Step-by-step guide: https://rentry.org/appliolocal +echo Build Tools: https://aka.ms/vs/17/release/vs_BuildTools.exe +echo Redistributable: https://aka.ms/vs/17/release/vc_redist.x64.exe +echo Git: https://github.com/git-for-windows/git/releases/download/v2.42.0.windows.2/Git-2.42.0.2-64-bit.exe +echo Python 3.9.8: https://www.python.org/ftp/python/3.9.8/python-3.9.8-amd64.exe +echo. +echo INFO: Its recommend installing Python 3.9.X and ensuring that it has been added to the system's path. +echo. +pause +cls +for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A +echo. + +echo Cloning the repository... +git clone %repoUrl% %repoFolder% +cd %repoFolder% +del install_Applio.bat +del /q *.sh +echo. +cls + +echo Installing dependencies... +echo. +echo Recommended for Nvidia GPU users: +echo [1] Download Runtime (pre-installed dependencies) +echo. +echo Recommended for AMD/Intel GPU users (Broken): +echo [2] Download DML Runtime (pre-installed dependencies) +echo. +echo Only recommended for experienced users: +echo [3] Nvidia graphics cards +echo [4] AMD / Intel graphics cards +echo. +echo [5] I have already installed the dependencies +echo. +set /p choice=Select the option according to your GPU: +set choice=%choice: =% + +if "%choice%"=="1" ( +cls +powershell -command "Invoke-WebRequest -Uri https://frippery.org/files/busybox/busybox.exe -OutFile busybox.exe" +busybox.exe wget %URL_EXTRA%/runtime.zip +echo. +echo Extracting the runtime.zip file... +powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('runtime.zip', '%principal%') }" +echo. +del runtime.zip busybox.exe +cls +echo. +goto dependenciesFinished +) + +if "%choice%"=="2" ( +cls +powershell -command "Invoke-WebRequest -Uri https://frippery.org/files/busybox/busybox.exe -OutFile busybox.exe" +busybox.exe wget %URL_EXTRA%/runtime_dml.zip +echo. +echo Extracting the runtime_dml.zip file... +powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('runtime_dml.zip', '%principal%') }" +echo. +del runtime_dml.zip busybox.exe +cd runtime +python.exe -m pip install onnxruntime +cd .. +cls +echo. +goto dependenciesFinished +) + +if "%choice%"=="3" ( +cls +pip install -r assets/requirements/requirements.txt +echo. +pip uninstall torch torchvision torchaudio -y +echo. +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu117 +echo. +echo. +cls +echo Dependencies successfully installed! +echo. +goto dependenciesFinished +) + +if "%choice%"=="4" ( +cls +pip uninstall onnxruntime onnxruntime-directml +echo. +pip install -r assets/requirements/requirements.txt +echo. +pip install -r assets/requirements/requirements-dml.txt +echo. +echo. +cls +echo Dependencies successfully installed! +echo. +goto dependenciesFinished +) + +if "%choice%"=="5" ( +echo Dependencies successfully installed! +echo. +goto dependenciesFinished +) + +:dependenciesFinished +cls +echo Applio has been successfully downloaded, run the file go-applio.bat to run the web interface! +echo. +pause +exit + diff --git a/install_Applio.sh b/install_Applio.sh new file mode 100644 index 0000000000000000000000000000000000000000..a181ab462ad4133deb36c778593a7ba1f339b7a0 --- /dev/null +++ b/install_Applio.sh @@ -0,0 +1,132 @@ +#!/bin/bash +echo -e "\033]0;Applio - Installer\007" +clear +echo " :::" +echo " ::: _ _ " +echo " ::: /\ | (_) " +echo " ::: / \ _ __ _ __ | |_ ___ " +echo " ::: / /\ \ | '_ \| '_ \| | |/ _ \ " +echo " ::: / ____ \| |_) | |_) | | | (_) | " +echo " ::: /_/ \_\ .__/| .__/|_|_|\___/ " +echo " ::: | | | | " +echo " ::: |_| |_| " +echo " ::: " +echo " ::: " + +# if [[ "$(uname)" == "Darwin" ]]; then +# # macOS specific env: +# export PYTORCH_ENABLE_MPS_FALLBACK=1 +# export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 +# elif [[ "$(uname)" != "Linux" ]]; then +# echo "Unsupported operating system. Are you using Windows...?" +# echo "If yes use the batch (.bat) file insted this one!" +# exit 1 +# fi + +# if [ -d ".venv" ]; then +# echo "Activate venv..." +# source .venv/bin/activate +# else +# echo "Creating venv..." +requirements_file="assets/requirements/requirements-applio.txt" +# # Check if Python is installed +# if ! command -v python3 &> /dev/null; then +# echo "Python 3 not found. Attempting to install..." +# if [[ "$(uname)" == "Darwin" ]] && command -v brew &> /dev/null; then +# brew install python +# elif [[ "$(uname)" == "Linux" ]] && command -v apt-get &> /dev/null; then +# sudo apt-get update +# sudo apt-get install python +# else +# echo "Please install Python manually." +# exit 1 +# fi +# fi + + +# Clone the repo for make this script usable with echo 1 | curl blabla https://script.sh +# git clone https://github.com/IAHispano/Applio-RVC-Fork +# cd Applio-RVC-Fork +# python -m venv .venv +# source .venv/bin/activate +chmod +x stftpitchshift +chmod +x *.sh +# maybe is needed idk +chmod +x ./lib/infer/infer_libs/stftpitchshift +python -m ensurepip + + + +# fi + +# clear +# menu() { +# while true; do +# clear +# echo +# echo "Only recommended for experienced users:" +# echo "[1] Nvidia graphics cards" +# echo "[2] AMD graphics cards" +# echo "[3] Intel ARC graphics cards" +# echo +# read -p "Select the option according to your GPU: " choice + +# case $choice in +# 1) +# echo +# python -m pip install -r assets/requirements/requirements.txt +# python -m pip uninstall torch torchvision torchaudio -y +# python -m pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu117 +# echo +# finish +# ;; +# 2) +# echo +# echo "Before install this check https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/en/README.en.md#rocm-support-for-amd-graphic-cards-linux-only" +# read -p "Press enter to continue" +# python -m pip install -r assets/requirements/requirements-amd.txt +# python -m pip uninstall torch torchvision torchaudio -y +# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2 +# echo +# finish +# ;; +# 3) +# echo +#python -m pip install -r assets/requirements/requirements-ipex.txt +python -m pip install scikit-learn-intelex + # finish + # ;; + # *) + # echo "Invalid option. Please enter a number from 1 to 3." + # echo "" + # read -p "Press Enter to access the main menu..." + # ;; +# esac +# done +# } + +# Finish installation +# finish() { +# # Check if required packages are installed and install them if notia +# # I will change this to make a requirements with the applio changes +# # And add a custom one for nvidia, ipx, amd support on linux and directml for the batch script +# if [ -f "${requirements_file}" ]; then +# installed_packages=$(python -m pip freeze) +# while IFS= read -r package; do +# [[ "${package}" =~ ^#.* ]] && continue +# package_name=$(echo "${package}" | sed 's/[<>=!].*//') +# if ! echo "${installed_packages}" | grep -q "${package_name}"; then +# echo "${package_name} not found. Attempting to install..." +# python -m pip install --upgrade "${package}" +# fi +# done < "${requirements_file}" +# else +# echo "${requirements_file} not found. Please ensure the requirements file with required packages exists." +# exit 1 +# fi +# #clear +# echo "Applio has been successfully downloaded, run the file go-applio.sh to run the web interface!" +# #exit 0 +# } +# Loop to the main menu +# menu \ No newline at end of file diff --git a/lib/csvdb/formanting.csv b/lib/csvdb/formanting.csv new file mode 100644 index 0000000000000000000000000000000000000000..bd72a81f629170d3ecc02aaba5ae6551bdcf73cd --- /dev/null +++ b/lib/csvdb/formanting.csv @@ -0,0 +1 @@ +False,1.0,1.0 diff --git a/lib/csvdb/stop.csv b/lib/csvdb/stop.csv new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/fixes/local_fixes.py b/lib/fixes/local_fixes.py new file mode 100644 index 0000000000000000000000000000000000000000..a7abad699332af42bdcb29f31eb3370423421cb4 --- /dev/null +++ b/lib/fixes/local_fixes.py @@ -0,0 +1,109 @@ +import os +import time +import shutil +import requests +import zipfile + +def insert_new_line(file_name, line_to_find, text_to_insert): + lines = [] + with open(file_name, 'r', encoding='utf-8') as read_obj: + lines = read_obj.readlines() + already_exists = False + with open(file_name + '.tmp', 'w', encoding='utf-8') as write_obj: + for i in range(len(lines)): + write_obj.write(lines[i]) + if lines[i].strip() == line_to_find: + # If next line exists and starts with sys.path.append, skip + if i+1 < len(lines) and lines[i+1].strip().startswith("sys.path.append"): + print('It was already fixed! Skip adding a line...') + already_exists = True + break + else: + write_obj.write(text_to_insert + '\n') + # If no existing sys.path.append line was found, replace the original file + if not already_exists: + os.replace(file_name + '.tmp', file_name) + return True + else: + # If existing line was found, delete temporary file + os.remove(file_name + '.tmp') + return False + +def replace_in_file(file_name, old_text, new_text): + with open(file_name, 'r', encoding='utf-8') as file: + file_contents = file.read() + + if old_text in file_contents: + file_contents = file_contents.replace(old_text, new_text) + with open(file_name, 'w', encoding='utf-8') as file: + file.write(file_contents) + return True + + return False + + +def find_torchcrepe_directory(directory): + """ + Recursively searches for the topmost folder named 'torchcrepe' within a directory. + Returns the path of the directory found or None if none is found. + """ + for root, dirs, files in os.walk(directory): + if 'torchcrepe' in dirs: + return os.path.join(root, 'torchcrepe') + return None + +def download_and_extract_torchcrepe(): + url = 'https://github.com/maxrmorrison/torchcrepe/archive/refs/heads/master.zip' + temp_dir = 'temp_torchcrepe' + destination_dir = os.getcwd() + + try: + torchcrepe_dir_path = os.path.join(destination_dir, 'torchcrepe') + + if os.path.exists(torchcrepe_dir_path): + print("Skipping the torchcrepe download. The folder already exists.") + return + + # Download the file + print("Starting torchcrepe download...") + response = requests.get(url) + + # Raise an error if the GET request was unsuccessful + response.raise_for_status() + print("Download completed.") + + # Save the downloaded file + zip_file_path = os.path.join(temp_dir, 'master.zip') + os.makedirs(temp_dir, exist_ok=True) + with open(zip_file_path, 'wb') as file: + file.write(response.content) + print(f"Zip file saved to {zip_file_path}") + + # Extract the zip file + print("Extracting content...") + with zipfile.ZipFile(zip_file_path, 'r') as zip_file: + zip_file.extractall(temp_dir) + print("Extraction completed.") + + # Locate the torchcrepe folder and move it to the destination directory + torchcrepe_dir = find_torchcrepe_directory(temp_dir) + if torchcrepe_dir: + shutil.move(torchcrepe_dir, destination_dir) + print(f"Moved the torchcrepe directory to {destination_dir}!") + else: + print("The torchcrepe directory could not be located.") + + except Exception as e: + print("Torchcrepe not successfully downloaded", e) + + # Clean up temporary directory + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + +# Run the function +download_and_extract_torchcrepe() + +temp_dir = 'temp_torchcrepe' + +if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) diff --git a/lib/fixes/tensor-launch.py b/lib/fixes/tensor-launch.py new file mode 100644 index 0000000000000000000000000000000000000000..8fbbba032a46363a69a26a9bf3aa78962bbf64b8 --- /dev/null +++ b/lib/fixes/tensor-launch.py @@ -0,0 +1,13 @@ +import time +from tensorboard import program + +log_path = "logs" + +if __name__ == "__main__": + tb = program.TensorBoard() + tb.configure(argv=[None, '--logdir', log_path]) + url = tb.launch() + print(f'Tensorboard can be accessed at: {url}') + + while True: + time.sleep(600) # Keep the main thread running \ No newline at end of file diff --git a/lib/globals/__pycache__/globals.cpython-39.pyc b/lib/globals/__pycache__/globals.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd3726f4c00b097139bbcbcf15cca2227f3417f8 Binary files /dev/null and b/lib/globals/__pycache__/globals.cpython-39.pyc differ diff --git a/lib/globals/globals.py b/lib/globals/globals.py new file mode 100644 index 0000000000000000000000000000000000000000..d0da59d56e8c2e482bcda5eeae7cf797b830560e --- /dev/null +++ b/lib/globals/globals.py @@ -0,0 +1,5 @@ +DoFormant: bool = False +Quefrency: float = 8.0 +Timbre: float = 1.2 + +NotesOrHertz: bool = False \ No newline at end of file diff --git a/lib/infer/infer_libs/__pycache__/audio.cpython-39.pyc b/lib/infer/infer_libs/__pycache__/audio.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c32402c7299bf57dd91be158d3da4145ebd11e5a Binary files /dev/null and b/lib/infer/infer_libs/__pycache__/audio.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/__pycache__/csvutil.cpython-39.pyc b/lib/infer/infer_libs/__pycache__/csvutil.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06ddaeab56e323f9e7b1167f734ce81a4903a06a Binary files /dev/null and b/lib/infer/infer_libs/__pycache__/csvutil.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/__pycache__/rmvpe.cpython-39.pyc b/lib/infer/infer_libs/__pycache__/rmvpe.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcf4424b8a4e829453256a4fce5bc9a357255fff Binary files /dev/null and b/lib/infer/infer_libs/__pycache__/rmvpe.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/audio.py b/lib/infer/infer_libs/audio.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a1c18b2888947ece8b15594ead0c4c5166cb57 --- /dev/null +++ b/lib/infer/infer_libs/audio.py @@ -0,0 +1,140 @@ +import librosa +import numpy as np +import av +from io import BytesIO +import ffmpeg +import os +import sys + +import random +from lib.infer.infer_libs.csvutil import CSVutil +#import csv + +platform_stft_mapping = { + 'linux': 'stftpitchshift', + 'darwin': 'stftpitchshift', + 'win32': 'stftpitchshift.exe', +} + +stft = platform_stft_mapping.get(sys.platform) + +def wav2(i, o, format): + inp = av.open(i, 'rb') + if format == "m4a": format = "mp4" + out = av.open(o, 'wb', format=format) + if format == "ogg": format = "libvorbis" + if format == "mp4": format = "aac" + + ostream = out.add_stream(format) + + for frame in inp.decode(audio=0): + for p in ostream.encode(frame): out.mux(p) + + for p in ostream.encode(None): out.mux(p) + + out.close() + inp.close() + +def audio2(i, o, format, sr): + inp = av.open(i, 'rb') + out = av.open(o, 'wb', format=format) + if format == "ogg": format = "libvorbis" + if format == "f32le": format = "pcm_f32le" + + ostream = out.add_stream(format, channels=1) + ostream.sample_rate = sr + + for frame in inp.decode(audio=0): + for p in ostream.encode(frame): out.mux(p) + + out.close() + inp.close() + +def load_audion(file, sr): + try: + file = ( + file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) # 防止小白拷路径头尾带了空格和"和回车 + with open(file, "rb") as f: + with BytesIO() as out: + audio2(f, out, "f32le", sr) + return np.frombuffer(out.getvalue(), np.float32).flatten() + + except AttributeError: + audio = file[1] / 32768.0 + if len(audio.shape) == 2: + audio = np.mean(audio, -1) + return librosa.resample(audio, orig_sr=file[0], target_sr=16000) + + except Exception as e: + raise RuntimeError(f"Failed to load audio: {e}") + + + + +def load_audio(file, sr, DoFormant=False, Quefrency=1.0, Timbre=1.0): + converted = False + DoFormant, Quefrency, Timbre = CSVutil("lib/csvdb/formanting.csv", "r", "formanting") + DoFormant, Quefrency, Timbre = bool(DoFormant), float(Quefrency), float(Timbre) + + try: + file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + + if not file.endswith(".wav"): + converted = True + # Conversión de formato usando ffmpeg + converting = ( + ffmpeg.input(file, threads=0) + .output(f"{file}.wav") + .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) + ) + file = f"{file}.wav" + print(f" · File converted to Wav format: {file}\n") + + if DoFormant == False: + # Procesamiento de formantes usando stftpitchshift + command = ( + f'{stft} -i "{file}" -q "{Quefrency}" ' + f'-t "{Timbre}" -o "{file}FORMANTED.wav"' + ) + os.system(command) + file = f"{file}FORMANTED.wav" + print(f" · Formanted {file}!\n") + + with open(file, "rb") as f: + with BytesIO() as out: + audio2(f, out, "f32le", sr) + audio_data = np.frombuffer(out.getvalue(), np.float32).flatten() + + if converted: + try: os.remove(file) + except Exception as e: pass; print(f"Couldn't remove converted type of file due to {e}") + converted = False + + return audio_data + except AttributeError: + audio = file[1] / 32768.0 + if len(audio.shape) == 2: + audio = np.mean(audio, -1) + return librosa.resample(audio, orig_sr=file[0], target_sr=16000) + except Exception as e: + raise RuntimeError(f"Failed to load audio: {e}") + + +def check_audio_duration(file): + try: + file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + + probe = ffmpeg.probe(file) + + duration = float(probe['streams'][0]['duration']) + + if duration < 0.76: + print( + f"Audio file, {file.split('/')[-1]}, under ~0.76s detected - file is too short. Target at least 1-2s for best results." + ) + return False + + return True + except Exception as e: + raise RuntimeError(f"Failed to check audio duration: {e}") \ No newline at end of file diff --git a/lib/infer/infer_libs/csvutil.py b/lib/infer/infer_libs/csvutil.py new file mode 100644 index 0000000000000000000000000000000000000000..8992d13ffc7497bf441232552fbe9cfb776e4919 --- /dev/null +++ b/lib/infer/infer_libs/csvutil.py @@ -0,0 +1,33 @@ + + +import csv + +# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe") + + +def CSVutil(file, rw, type, *args): + if type == "formanting": + if rw == "r": + with open(file) as fileCSVread: + csv_reader = list(csv.reader(fileCSVread)) + return ( + (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2]) + if csv_reader is not None + else (lambda: exec('raise ValueError("No data")'))() + ) + else: + if args: + doformnt = args[0] + else: + doformnt = False + qfr = args[1] if len(args) > 1 else 1.0 + tmb = args[2] if len(args) > 2 else 1.0 + with open(file, rw, newline="") as fileCSVwrite: + csv_writer = csv.writer(fileCSVwrite, delimiter=",") + csv_writer.writerow([doformnt, qfr, tmb]) + elif type == "stop": + stop = args[0] if args else False + with open(file, rw, newline="") as fileCSVwrite: + csv_writer = csv.writer(fileCSVwrite, delimiter=",") + csv_writer.writerow([stop]) + diff --git a/lib/infer/infer_libs/formantshiftcfg/f2m.txt b/lib/infer/infer_libs/formantshiftcfg/f2m.txt new file mode 100644 index 0000000000000000000000000000000000000000..40356a80ce7dd7a893bca233a41306525193f2f0 --- /dev/null +++ b/lib/infer/infer_libs/formantshiftcfg/f2m.txt @@ -0,0 +1,2 @@ +1.0 +0.8 \ No newline at end of file diff --git a/lib/infer/infer_libs/formantshiftcfg/m2f.txt b/lib/infer/infer_libs/formantshiftcfg/m2f.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa69b52dc8e29db4697401ef2766dd0b6c6f4b47 --- /dev/null +++ b/lib/infer/infer_libs/formantshiftcfg/m2f.txt @@ -0,0 +1,2 @@ +1.0 +1.2 \ No newline at end of file diff --git a/lib/infer/infer_libs/formantshiftcfg/random.txt b/lib/infer/infer_libs/formantshiftcfg/random.txt new file mode 100644 index 0000000000000000000000000000000000000000..427be5c80412098ecec082b3a06e867ddc9a7ba2 --- /dev/null +++ b/lib/infer/infer_libs/formantshiftcfg/random.txt @@ -0,0 +1,2 @@ +32.0 +9.8 \ No newline at end of file diff --git a/lib/infer/infer_libs/infer_batch_rvc.py b/lib/infer/infer_libs/infer_batch_rvc.py new file mode 100644 index 0000000000000000000000000000000000000000..7c5a1b55042abad9934067fe3a28befd9b8c2a2c --- /dev/null +++ b/lib/infer/infer_libs/infer_batch_rvc.py @@ -0,0 +1,215 @@ +""" +v1 +runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33 +v2 +runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33 +""" +import os, sys + +now_dir = os.getcwd() +sys.path.append(now_dir) +import sys +import torch +import tqdm as tq +from multiprocessing import cpu_count + + +class Config: + def __init__(self, device, is_half): + self.device = device + self.is_half = is_half + self.n_cpu = 0 + self.gpu_name = None + self.gpu_mem = None + self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() + + def device_config(self) -> tuple: + if torch.cuda.is_available(): + i_device = int(self.device.split(":")[-1]) + self.gpu_name = torch.cuda.get_device_name(i_device) + if ( + ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) + or "P40" in self.gpu_name.upper() + or "1060" in self.gpu_name + or "1070" in self.gpu_name + or "1080" in self.gpu_name + ): + print("16系/10系显卡和P40强制单精度") + self.is_half = False + for config_file in ["32k.json", "40k.json", "48k.json"]: + with open(f"assets/configs/{config_file}", "r") as f: + strr = f.read().replace("true", "false") + with open(f"assets/configs/{config_file}", "w") as f: + f.write(strr) + with open("infer/modules/train/preprocess.py", "r") as f: + strr = f.read().replace("3.7", "3.0") + with open("infer/modules/train/preprocess.py", "w") as f: + f.write(strr) + else: + self.gpu_name = None + self.gpu_mem = int( + torch.cuda.get_device_properties(i_device).total_memory + / 1024 + / 1024 + / 1024 + + 0.4 + ) + if self.gpu_mem <= 4: + with open("infer/modules/train/preprocess.py", "r") as f: + strr = f.read().replace("3.7", "3.0") + with open("infer/modules/train/preprocess.py", "w") as f: + f.write(strr) + elif torch.backends.mps.is_available(): + print("没有发现支持的N卡, 使用MPS进行推理") + self.device = "mps" + else: + print("没有发现支持的N卡, 使用CPU进行推理") + self.device = "cpu" + self.is_half = True + + if self.n_cpu == 0: + self.n_cpu = cpu_count() + + if self.is_half: + # 6G显存配置 + x_pad = 3 + x_query = 10 + x_center = 60 + x_max = 65 + else: + # 5G显存配置 + x_pad = 1 + x_query = 6 + x_center = 38 + x_max = 41 + + if self.gpu_mem != None and self.gpu_mem <= 4: + x_pad = 1 + x_query = 5 + x_center = 30 + x_max = 32 + + return x_pad, x_query, x_center, x_max + + +f0up_key = sys.argv[1] +input_path = sys.argv[2] +index_path = sys.argv[3] +f0method = sys.argv[4] # harvest or pm +opt_path = sys.argv[5] +model_path = sys.argv[6] +index_rate = float(sys.argv[7]) +device = sys.argv[8] +is_half = sys.argv[9].lower() != "false" +filter_radius = int(sys.argv[10]) +resample_sr = int(sys.argv[11]) +rms_mix_rate = float(sys.argv[12]) +protect = float(sys.argv[13]) +print(sys.argv) +config = Config(device, is_half) +now_dir = os.getcwd() +sys.path.append(now_dir) +from lib.infer.modules.vc.modules import VC +from lib.infer.infer_pack.models import ( + SynthesizerTrnMs256NSFsid, + SynthesizerTrnMs256NSFsid_nono, + SynthesizerTrnMs768NSFsid, + SynthesizerTrnMs768NSFsid_nono, +) +from lib.infer.infer_libs.audio import load_audio +from fairseq import checkpoint_utils +from scipy.io import wavfile + +hubert_model = None + + +def load_hubert(): + global hubert_model + models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( + ["hubert_base.pt"], + suffix="", + ) + hubert_model = models[0] + hubert_model = hubert_model.to(device) + if is_half: + hubert_model = hubert_model.half() + else: + hubert_model = hubert_model.float() + hubert_model.eval() + + +def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate): + global tgt_sr, net_g, vc, hubert_model, version + if input_audio is None: + return "You need to upload an audio", None + f0_up_key = int(f0_up_key) + audio = load_audio(input_audio, 16000) + times = [0, 0, 0] + if hubert_model == None: + load_hubert() + if_f0 = cpt.get("f0", 1) + # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file) + audio_opt = vc.pipeline( + hubert_model, + net_g, + sid, + audio, + input_audio, + times, + f0_up_key, + f0_method, + file_index, + index_rate, + if_f0, + filter_radius, + tgt_sr, + resample_sr, + rms_mix_rate, + version, + protect, + f0_file=f0_file, + ) + print(times) + return audio_opt + + +def get_vc(model_path): + global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version + print("loading pth %s" % model_path) + cpt = torch.load(model_path, map_location="cpu") + tgt_sr = cpt["config"][-1] + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk + if_f0 = cpt.get("f0", 1) + version = cpt.get("version", "v1") + if version == "v1": + if if_f0 == 1: + net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) + else: + net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) + elif version == "v2": + if if_f0 == 1: # + net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half) + else: + net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) + del net_g.enc_q + print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩 + net_g.eval().to(device) + if is_half: + net_g = net_g.half() + else: + net_g = net_g.float() + vc = VC(tgt_sr, config) + n_spk = cpt["config"][-3] + # return {"visible": True,"maximum": n_spk, "__type__": "update"} + + +get_vc(model_path) +audios = os.listdir(input_path) +for file in tq.tqdm(audios): + if file.endswith(".wav"): + file_path = input_path + "/" + file + wav_opt = vc_single( + 0, file_path, f0up_key, None, f0method, index_path, index_rate + ) + out_path = opt_path + "/" + file + wavfile.write(out_path, tgt_sr, wav_opt) diff --git a/lib/infer/infer_libs/infer_pack/__pycache__/attentions.cpython-39.pyc b/lib/infer/infer_libs/infer_pack/__pycache__/attentions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa9c245bba2d610b8fc477ebc6869af7d1cd115a Binary files /dev/null and b/lib/infer/infer_libs/infer_pack/__pycache__/attentions.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/infer_pack/__pycache__/commons.cpython-39.pyc b/lib/infer/infer_libs/infer_pack/__pycache__/commons.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..529ff3bd71efe6941678729ed16f5fc269781cca Binary files /dev/null and b/lib/infer/infer_libs/infer_pack/__pycache__/commons.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/infer_pack/__pycache__/models.cpython-39.pyc b/lib/infer/infer_libs/infer_pack/__pycache__/models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..809f087440431d28b786ce962c57c52953e68985 Binary files /dev/null and b/lib/infer/infer_libs/infer_pack/__pycache__/models.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/infer_pack/__pycache__/modules.cpython-39.pyc b/lib/infer/infer_libs/infer_pack/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..276c5711e26ea15e6415f01ebdfbf7b1b728da46 Binary files /dev/null and b/lib/infer/infer_libs/infer_pack/__pycache__/modules.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/infer_pack/__pycache__/transforms.cpython-39.pyc b/lib/infer/infer_libs/infer_pack/__pycache__/transforms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ed5ea2966c61fba869d067d615b3ae1f1ca8810 Binary files /dev/null and b/lib/infer/infer_libs/infer_pack/__pycache__/transforms.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/infer_pack/attentions.py b/lib/infer/infer_libs/infer_pack/attentions.py new file mode 100644 index 0000000000000000000000000000000000000000..679d8511efc2afd7352670ed48f86072809520be --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/attentions.py @@ -0,0 +1,414 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +from lib.infer.infer_libs.infer_pack import commons +from lib.infer.infer_libs.infer_pack.modules import LayerNorm + + +class Encoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=10, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + window_size=window_size, + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + proximal_bias=False, + proximal_init=True, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + proximal_bias=proximal_bias, + proximal_init=proximal_init, + ) + ) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append( + MultiHeadAttention( + hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + causal=True, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( + device=x.device, dtype=x.dtype + ) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + self.emb_rel_v = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert ( + t_s == t_t + ), "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys( + query / math.sqrt(self.k_channels), key_relative_embeddings + ) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to( + device=scores.device, dtype=scores.dtype + ) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert ( + t_s == t_t + ), "Local attention is only available for self-attention." + block_mask = ( + torch.ones_like(scores) + .triu(-self.block_length) + .tril(self.block_length) + ) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_v, t_s + ) + output = output + self._matmul_with_relative_values( + relative_weights, value_relative_embeddings + ) + output = ( + output.transpose(2, 3).contiguous().view(b, d, t_t) + ) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + ) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[ + :, slice_start_position:slice_end_position + ] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad( + x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + ) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ + :, :, :length, length - 1 : + ] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad( + x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + ) + x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__( + self, + in_channels, + out_channels, + filter_channels, + kernel_size, + p_dropout=0.0, + activation=None, + causal=False, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x diff --git a/lib/infer/infer_libs/infer_pack/commons.py b/lib/infer/infer_libs/infer_pack/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..2618e3ad501d1d4745a34024c2bf1676546fae80 --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/commons.py @@ -0,0 +1,164 @@ +import math +import torch +from torch.nn import functional as F + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += ( + 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) + ) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def slice_segments2(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( + num_timescales - 1 + ) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment + ) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + device = duration.device + + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2, 3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1.0 / norm_type) + return total_norm diff --git a/lib/infer/infer_libs/infer_pack/models.py b/lib/infer/infer_libs/infer_pack/models.py new file mode 100644 index 0000000000000000000000000000000000000000..2fa6b6b74c75a0abd478eceaea319d4f12bab97a --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/models.py @@ -0,0 +1,1174 @@ +import math +import logging + +logger = logging.getLogger(__name__) + +import numpy as np +import torch +from torch import nn +from torch.nn import Conv1d, Conv2d, ConvTranspose1d +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm + +from lib.infer.infer_libs.infer_pack import attentions, commons, modules +from lib.infer.infer_libs.infer_pack.commons import get_padding, init_weights +has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available()) + +class TextEncoder256(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(256, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class TextEncoder768(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class SineGen(torch.nn.Module): + """Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__( + self, + samp_rate, + harmonic_num=0, + sine_amp=0.1, + noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False, + ): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + if uv.device.type == "privateuseone": # for DirectML + uv = uv.float() + return uv + + def forward(self, f0, upp): + """sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( + idx + 2 + ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 + rand_ini = torch.rand( + f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device + ) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 + tmp_over_one *= upp + tmp_over_one = F.interpolate( + tmp_over_one.transpose(2, 1), + scale_factor=upp, + mode="linear", + align_corners=True, + ).transpose(2, 1) + rad_values = F.interpolate( + rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose( + 2, 1 + ) ####### + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin( + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + ) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate( + uv.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + # to produce sine waveforms + self.l_sin_gen = SineGen( + sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod + ) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + if hasattr(self, "ddtype") == False: + self.ddtype = self.l_linear.weight.dtype + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype) + # if self.is_half: + # sine_wavs = sine_wavs.half() + # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x))) + # print(sine_wavs.dtype,self.ddtype) + if sine_wavs.dtype != self.ddtype: + sine_wavs = sine_wavs.to(self.ddtype) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None # noise, uv + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF( + sampling_rate=sr, harmonic_num=0, is_half=is_half + ) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append( + Conv1d( + 1, + c_cur, + kernel_size=stride_f0 * 2, + stride=stride_f0, + padding=stride_f0 // 2, + ) + ) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +sr2sr = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + + +class SynthesizerTrnMs256NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + logger.debug( + "gin_channels: " + + str(gin_channels) + + ", self.spk_embed_dim: " + + str(self.spk_embed_dim) + ) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward( + self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + ): # 这里ds是id,[bs,1] + # print(1,pitch.shape)#[bs,t] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + # print(-2,pitchf.shape,z_slice.shape) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + nsff0 = nsff0[:, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + logger.debug( + "gin_channels: " + + str(gin_channels) + + ", self.spk_embed_dim: " + + str(self.spk_embed_dim) + ) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward( + self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + ): # 这里ds是id,[bs,1] + # print(1,pitch.shape)#[bs,t] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + # print(-2,pitchf.shape,z_slice.shape) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + nsff0 = nsff0[:, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs256NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=False, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + logger.debug( + "gin_channels: " + + str(gin_channels) + + ", self.spk_embed_dim: " + + str(self.spk_embed_dim) + ) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=False, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + logger.debug( + "gin_channels: " + + str(gin_channels) + + ", self.spk_embed_dim: " + + str(self.spk_embed_dim) + ) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + # periods = [3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + # periods = [2, 3, 5, 7, 11, 17] + periods = [2, 3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + if has_xpu and x.dtype == torch.bfloat16: + x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(dtype=torch.bfloat16) + else: + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap diff --git a/lib/infer/infer_libs/infer_pack/models_onnx.py b/lib/infer/infer_libs/infer_pack/models_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..2b06555fafd8e64826844ecf4ee9e15b94fcec6a --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/models_onnx.py @@ -0,0 +1,824 @@ +import math +import logging + +logger = logging.getLogger(__name__) + +import numpy as np +import torch +from torch import nn +from torch.nn import Conv1d, Conv2d, ConvTranspose1d +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm + +from lib.infer.infer_libs.infer_pack import attentions, commons, modules +from lib.infer.infer_libs.infer_pack.commons import get_padding, init_weights + + +class TextEncoder256(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(256, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class TextEncoder768(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class SineGen(torch.nn.Module): + """Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__( + self, + samp_rate, + harmonic_num=0, + sine_amp=0.1, + noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False, + ): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def forward(self, f0, upp): + """sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( + idx + 2 + ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 + rand_ini = torch.rand( + f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device + ) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 + tmp_over_one *= upp + tmp_over_one = F.interpolate( + tmp_over_one.transpose(2, 1), + scale_factor=upp, + mode="linear", + align_corners=True, + ).transpose(2, 1) + rad_values = F.interpolate( + rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose( + 2, 1 + ) ####### + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin( + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + ) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate( + uv.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + # to produce sine waveforms + self.l_sin_gen = SineGen( + sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod + ) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + if self.is_half: + sine_wavs = sine_wavs.half() + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None # noise, uv + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF( + sampling_rate=sr, harmonic_num=0, is_half=is_half + ) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append( + Conv1d( + 1, + c_cur, + kernel_size=stride_f0 * 2, + stride=stride_f0, + padding=stride_f0 // 2, + ) + ) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +sr2sr = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + + +class SynthesizerTrnMsNSFsidM(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + version, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + if version == "v1": + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + else: + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + self.speaker_map = None + logger.debug( + "gin_channels: " + + gin_channels + + ", self.spk_embed_dim: " + + self.spk_embed_dim + ) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def construct_spkmixmap(self, n_speaker): + self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) + for i in range(n_speaker): + self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) + self.speaker_map = self.speaker_map.unsqueeze(0) + + def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): + if self.speaker_map is not None: # [N, S] * [S, B, 1, H] + g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] + g = g * self.speaker_map # [N, S, B, 1, H] + g = torch.sum(g, dim=1) # [N, 1, B, 1, H] + g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] + else: + g = g.unsqueeze(0) + g = self.emb_g(g).transpose(1, 2) + + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + return o + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + # periods = [3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + # periods = [2, 3, 5, 7, 11, 17] + periods = [2, 3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap diff --git a/lib/infer/infer_libs/infer_pack/modules.py b/lib/infer/infer_libs/infer_pack/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..f3aa1c45f24f1301ef329353a81e47f48655195e --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/modules.py @@ -0,0 +1,517 @@ +import math +import torch +from torch import nn +from torch.nn import Conv1d +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, weight_norm + +from lib.infer.infer_libs.infer_pack import commons +from lib.infer.infer_libs.infer_pack.commons import get_padding, init_weights +from lib.infer.infer_libs.infer_pack.transforms import piecewise_rational_quadratic_transform + +LRELU_SLOPE = 0.1 + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__( + self, + in_channels, + hidden_channels, + out_channels, + kernel_size, + n_layers, + p_dropout, + ): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append( + nn.Conv1d( + in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 + ) + ) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append( + nn.Conv1d( + hidden_channels, + hidden_channels, + kernel_size, + padding=kernel_size // 2, + ) + ) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size**i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append( + nn.Conv1d( + channels, + channels, + kernel_size, + groups=channels, + dilation=dilation, + padding=padding, + ) + ) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__( + self, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + p_dropout=0, + ): + super(WN, self).__init__() + assert kernel_size % 2 == 1 + self.hidden_channels = hidden_channels + self.kernel_size = (kernel_size,) + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d( + gin_channels, 2 * hidden_channels * n_layers, 1 + ) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + + for i in range(n_layers): + dilation = dilation_rate**i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d( + hidden_channels, + 2 * hidden_channels, + kernel_size, + dilation=dilation, + padding=padding, + ) + in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, : self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels :, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ) + ), + ] + ) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + ] + ) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + ] + ) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False, + ): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=p_dropout, + gin_channels=gin_channels, + ) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class ConvFlow(nn.Module): + def __init__( + self, + in_channels, + filter_channels, + kernel_size, + n_layers, + num_bins=10, + tail_bound=5.0, + ): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) + self.proj = nn.Conv1d( + filter_channels, self.half_channels * (num_bins * 3 - 1), 1 + ) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( + self.filter_channels + ) + unnormalized_derivatives = h[..., 2 * self.num_bins :] + + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails="linear", + tail_bound=self.tail_bound, + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x diff --git a/lib/infer/infer_libs/infer_pack/modules/F0Predictor/DioF0Predictor.py b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/DioF0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..54c2fd2484c3d52c3dc9bb4c88e5c102fa686fdc --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/DioF0Predictor.py @@ -0,0 +1,91 @@ +import numpy as np +import pyworld + +from lib.infer.infer_libs.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor + + +class DioF0Predictor(F0Predictor): + def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.sampling_rate = sampling_rate + + def interpolate_f0(self, f0): + """ + 对F0进行插值处理 + """ + + data = np.reshape(f0, (f0.size, 1)) + + vuv_vector = np.zeros((data.size, 1), dtype=np.float32) + vuv_vector[data > 0.0] = 1.0 + vuv_vector[data <= 0.0] = 0.0 + + ip_data = data + + frame_number = data.size + last_value = 0.0 + for i in range(frame_number): + if data[i] <= 0.0: + j = i + 1 + for j in range(i + 1, frame_number): + if data[j] > 0.0: + break + if j < frame_number - 1: + if last_value > 0.0: + step = (data[j] - data[i - 1]) / float(j - i) + for k in range(i, j): + ip_data[k] = data[i - 1] + step * (k - i + 1) + else: + for k in range(i, j): + ip_data[k] = data[j] + else: + for k in range(i, frame_number): + ip_data[k] = last_value + else: + ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 + last_value = data[i] + + return ip_data[:, 0], vuv_vector[:, 0] + + def resize_f0(self, x, target_len): + source = np.array(x) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * target_len, len(source)) / target_len, + np.arange(0, len(source)), + source, + ) + res = np.nan_to_num(target) + return res + + def compute_f0(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.dio( + wav.astype(np.double), + fs=self.sampling_rate, + f0_floor=self.f0_min, + f0_ceil=self.f0_max, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) + for index, pitch in enumerate(f0): + f0[index] = round(pitch, 1) + return self.interpolate_f0(self.resize_f0(f0, p_len))[0] + + def compute_f0_uv(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.dio( + wav.astype(np.double), + fs=self.sampling_rate, + f0_floor=self.f0_min, + f0_ceil=self.f0_max, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) + for index, pitch in enumerate(f0): + f0[index] = round(pitch, 1) + return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/lib/infer/infer_libs/infer_pack/modules/F0Predictor/F0Predictor.py b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/F0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..f56e49e7f0e6eab3babf0711cae2933371b9f9cc --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/F0Predictor.py @@ -0,0 +1,16 @@ +class F0Predictor(object): + def compute_f0(self, wav, p_len): + """ + input: wav:[signal_length] + p_len:int + output: f0:[signal_length//hop_length] + """ + pass + + def compute_f0_uv(self, wav, p_len): + """ + input: wav:[signal_length] + p_len:int + output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] + """ + pass diff --git a/lib/infer/infer_libs/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/HarvestF0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..7d415a5bb4b86cb03e3daf6fa50c770e8bef7f27 --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/HarvestF0Predictor.py @@ -0,0 +1,87 @@ +import numpy as np +import pyworld + +from lib.infer.infer_libs.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor + + +class HarvestF0Predictor(F0Predictor): + def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.sampling_rate = sampling_rate + + def interpolate_f0(self, f0): + """ + 对F0进行插值处理 + """ + + data = np.reshape(f0, (f0.size, 1)) + + vuv_vector = np.zeros((data.size, 1), dtype=np.float32) + vuv_vector[data > 0.0] = 1.0 + vuv_vector[data <= 0.0] = 0.0 + + ip_data = data + + frame_number = data.size + last_value = 0.0 + for i in range(frame_number): + if data[i] <= 0.0: + j = i + 1 + for j in range(i + 1, frame_number): + if data[j] > 0.0: + break + if j < frame_number - 1: + if last_value > 0.0: + step = (data[j] - data[i - 1]) / float(j - i) + for k in range(i, j): + ip_data[k] = data[i - 1] + step * (k - i + 1) + else: + for k in range(i, j): + ip_data[k] = data[j] + else: + for k in range(i, frame_number): + ip_data[k] = last_value + else: + ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 + last_value = data[i] + + return ip_data[:, 0], vuv_vector[:, 0] + + def resize_f0(self, x, target_len): + source = np.array(x) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * target_len, len(source)) / target_len, + np.arange(0, len(source)), + source, + ) + res = np.nan_to_num(target) + return res + + def compute_f0(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.harvest( + wav.astype(np.double), + fs=self.hop_length, + f0_ceil=self.f0_max, + f0_floor=self.f0_min, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) + return self.interpolate_f0(self.resize_f0(f0, p_len))[0] + + def compute_f0_uv(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.harvest( + wav.astype(np.double), + fs=self.sampling_rate, + f0_floor=self.f0_min, + f0_ceil=self.f0_max, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) + return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/lib/infer/infer_libs/infer_pack/modules/F0Predictor/PMF0Predictor.py b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/PMF0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..9f4b3cd0fbf7fb5ecd19f6bd095b00cc7109c0b4 --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/PMF0Predictor.py @@ -0,0 +1,98 @@ +import numpy as np +import parselmouth + +from lib.infer.infer_libs.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor + + +class PMF0Predictor(F0Predictor): + def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.sampling_rate = sampling_rate + + def interpolate_f0(self, f0): + """ + 对F0进行插值处理 + """ + + data = np.reshape(f0, (f0.size, 1)) + + vuv_vector = np.zeros((data.size, 1), dtype=np.float32) + vuv_vector[data > 0.0] = 1.0 + vuv_vector[data <= 0.0] = 0.0 + + ip_data = data + + frame_number = data.size + last_value = 0.0 + for i in range(frame_number): + if data[i] <= 0.0: + j = i + 1 + for j in range(i + 1, frame_number): + if data[j] > 0.0: + break + if j < frame_number - 1: + if last_value > 0.0: + step = (data[j] - data[i - 1]) / float(j - i) + for k in range(i, j): + ip_data[k] = data[i - 1] + step * (k - i + 1) + else: + for k in range(i, j): + ip_data[k] = data[j] + else: + for k in range(i, frame_number): + ip_data[k] = last_value + else: + ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 + last_value = data[i] + + return ip_data[:, 0], vuv_vector[:, 0] + + def compute_f0(self, wav, p_len=None): + x = wav + if p_len is None: + p_len = x.shape[0] // self.hop_length + else: + assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" + time_step = self.hop_length / self.sampling_rate * 1000 + f0 = ( + parselmouth.Sound(x, self.sampling_rate) + .to_pitch_ac( + time_step=time_step / 1000, + voicing_threshold=0.6, + pitch_floor=self.f0_min, + pitch_ceiling=self.f0_max, + ) + .selected_array["frequency"] + ) + + pad_size = (p_len - len(f0) + 1) // 2 + if pad_size > 0 or p_len - len(f0) - pad_size > 0: + f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") + f0, uv = self.interpolate_f0(f0) + return f0 + + def compute_f0_uv(self, wav, p_len=None): + x = wav + if p_len is None: + p_len = x.shape[0] // self.hop_length + else: + assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" + time_step = self.hop_length / self.sampling_rate * 1000 + f0 = ( + parselmouth.Sound(x, self.sampling_rate) + .to_pitch_ac( + time_step=time_step / 1000, + voicing_threshold=0.6, + pitch_floor=self.f0_min, + pitch_ceiling=self.f0_max, + ) + .selected_array["frequency"] + ) + + pad_size = (p_len - len(f0) + 1) // 2 + if pad_size > 0 or p_len - len(f0) - pad_size > 0: + f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") + f0, uv = self.interpolate_f0(f0) + return f0, uv diff --git a/lib/infer/infer_libs/infer_pack/modules/F0Predictor/__init__.py b/lib/infer/infer_libs/infer_pack/modules/F0Predictor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/infer/infer_libs/infer_pack/onnx_inference.py b/lib/infer/infer_libs/infer_pack/onnx_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..2726cdb9c8fb19414bd901aa4eb87fc1e4ab807a --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/onnx_inference.py @@ -0,0 +1,148 @@ +import librosa +import numpy as np +import onnxruntime + +import logging + +logger = logging.getLogger(__name__) + + +class ContentVec: + def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): + logger.info("Load model(s) from {}".format(vec_path)) + if device == "cpu" or device is None: + providers = ["CPUExecutionProvider"] + elif device == "cuda": + providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + elif device == "dml": + providers = ["DmlExecutionProvider"] + else: + raise RuntimeError("Unsportted Device") + self.model = onnxruntime.InferenceSession(vec_path, providers=providers) + + def __call__(self, wav): + return self.forward(wav) + + def forward(self, wav): + feats = wav + if feats.ndim == 2: # double channels + feats = feats.mean(-1) + assert feats.ndim == 1, feats.ndim + feats = np.expand_dims(np.expand_dims(feats, 0), 0) + onnx_input = {self.model.get_inputs()[0].name: feats} + logits = self.model.run(None, onnx_input)[0] + return logits.transpose(0, 2, 1) + + +def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): + if f0_predictor == "pm": + from lib.infer.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor + + f0_predictor_object = PMF0Predictor( + hop_length=hop_length, sampling_rate=sampling_rate + ) + elif f0_predictor == "harvest": + from lib.infer.infer_pack.modules.F0Predictor.HarvestF0Predictor import ( + HarvestF0Predictor, + ) + + f0_predictor_object = HarvestF0Predictor( + hop_length=hop_length, sampling_rate=sampling_rate + ) + elif f0_predictor == "dio": + from lib.infer.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor + + f0_predictor_object = DioF0Predictor( + hop_length=hop_length, sampling_rate=sampling_rate + ) + else: + raise Exception("Unknown f0 predictor") + return f0_predictor_object + + +class OnnxRVC: + def __init__( + self, + model_path, + sr=40000, + hop_size=512, + vec_path="vec-768-layer-12", + device="cpu", + ): + vec_path = f"pretrained/{vec_path}.onnx" + self.vec_model = ContentVec(vec_path, device) + if device == "cpu" or device is None: + providers = ["CPUExecutionProvider"] + elif device == "cuda": + providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + elif device == "dml": + providers = ["DmlExecutionProvider"] + else: + raise RuntimeError("Unsportted Device") + self.model = onnxruntime.InferenceSession(model_path, providers=providers) + self.sampling_rate = sr + self.hop_size = hop_size + + def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): + onnx_input = { + self.model.get_inputs()[0].name: hubert, + self.model.get_inputs()[1].name: hubert_length, + self.model.get_inputs()[2].name: pitch, + self.model.get_inputs()[3].name: pitchf, + self.model.get_inputs()[4].name: ds, + self.model.get_inputs()[5].name: rnd, + } + return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) + + def inference( + self, + raw_path, + sid, + f0_method="dio", + f0_up_key=0, + pad_time=0.5, + cr_threshold=0.02, + ): + f0_min = 50 + f0_max = 1100 + f0_mel_min = 1127 * np.log(1 + f0_min / 700) + f0_mel_max = 1127 * np.log(1 + f0_max / 700) + f0_predictor = get_f0_predictor( + f0_method, + hop_length=self.hop_size, + sampling_rate=self.sampling_rate, + threshold=cr_threshold, + ) + wav, sr = librosa.load(raw_path, sr=self.sampling_rate) + org_length = len(wav) + if org_length / sr > 50.0: + raise RuntimeError("Reached Max Length") + + wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) + wav16k = wav16k + + hubert = self.vec_model(wav16k) + hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) + hubert_length = hubert.shape[1] + + pitchf = f0_predictor.compute_f0(wav, hubert_length) + pitchf = pitchf * 2 ** (f0_up_key / 12) + pitch = pitchf.copy() + f0_mel = 1127 * np.log(1 + pitch / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( + f0_mel_max - f0_mel_min + ) + 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > 255] = 255 + pitch = np.rint(f0_mel).astype(np.int64) + + pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) + pitch = pitch.reshape(1, len(pitch)) + ds = np.array([sid]).astype(np.int64) + + rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) + hubert_length = np.array([hubert_length]).astype(np.int64) + + out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() + out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") + return out_wav[0:org_length] diff --git a/lib/infer/infer_libs/infer_pack/transforms.py b/lib/infer/infer_libs/infer_pack/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6f30b7177d17fc61a4173c21b4233172a890be58 --- /dev/null +++ b/lib/infer/infer_libs/infer_pack/transforms.py @@ -0,0 +1,207 @@ +import numpy as np +import torch +from torch.nn import functional as F + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = {"tails": tails, "tail_bound": tail_bound} + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + + +def unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails="linear", + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == "linear": + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError("{} tails are not implemented.".format(tails)) + + ( + outputs[inside_interval_mask], + logabsdet[inside_interval_mask], + ) = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, + right=tail_bound, + bottom=-tail_bound, + top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + ) + + return outputs, logabsdet + + +def rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0.0, + right=1.0, + bottom=0.0, + top=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError("Input to a transform is not within its domain") + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError("Minimal bin width too large for the number of bins") + if min_bin_height * num_bins > 1.0: + raise ValueError("Minimal bin height too large for the number of bins") + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + input_heights * (input_delta - input_derivatives) + b = input_heights * input_derivatives - (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + c = -input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta + ) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2) + ) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * ( + input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta + ) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta + ) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2) + ) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet diff --git a/lib/infer/infer_libs/inference-presets.json b/lib/infer/infer_libs/inference-presets.json new file mode 100644 index 0000000000000000000000000000000000000000..7f68a27d96b01d89265d0507aa6f7af5a67065c1 --- /dev/null +++ b/lib/infer/infer_libs/inference-presets.json @@ -0,0 +1,20 @@ +{ + "presets": [ + { + "name": "Default Preset", + "model": "", + "transpose": 0, + "audio_file": "", + "f0_method": "pm", + "crepe_hop_length": 160, + "median_filtering": 3, + "feature_path": "", + "auto_feature_path": "", + "search_feature_ratio": 0.88, + "resample": 0, + "volume_envelope": 1, + "protect_voiceless": 0.33, + "f0_file_path": "" + } + ] +} diff --git a/lib/infer/infer_libs/rmvpe.py b/lib/infer/infer_libs/rmvpe.py new file mode 100644 index 0000000000000000000000000000000000000000..08dd79b76e505a9c8bee554da32c75772f560006 --- /dev/null +++ b/lib/infer/infer_libs/rmvpe.py @@ -0,0 +1,724 @@ +import os + +import numpy as np +import torch +try: + #Fix "Torch not compiled with CUDA enabled" + import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import + if torch.xpu.is_available(): + from lib.infer.modules.ipex import ipex_init + ipex_init() +except Exception: + pass +import torch.nn as nn +import torch.nn.functional as F +from librosa.util import normalize, pad_center, tiny +from scipy.signal import get_window + +import logging + +logger = logging.getLogger(__name__) + + +###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py +def window_sumsquare( + window, + n_frames, + hop_length=200, + win_length=800, + n_fft=800, + dtype=np.float32, + norm=None, +): + """ + # from librosa 0.6 + Compute the sum-square envelope of a window function at a given hop length. + This is used to estimate modulation effects induced by windowing + observations in short-time fourier transforms. + Parameters + ---------- + window : string, tuple, number, callable, or list-like + Window specification, as in `get_window` + n_frames : int > 0 + The number of analysis frames + hop_length : int > 0 + The number of samples to advance between frames + win_length : [optional] + The length of the window function. By default, this matches `n_fft`. + n_fft : int > 0 + The length of each analysis frame. + dtype : np.dtype + The data type of the output + Returns + ------- + wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` + The sum-squared envelope of the window function + """ + if win_length is None: + win_length = n_fft + + n = n_fft + hop_length * (n_frames - 1) + x = np.zeros(n, dtype=dtype) + + # Compute the squared window at the desired length + win_sq = get_window(window, win_length, fftbins=True) + win_sq = normalize(win_sq, norm=norm) ** 2 + win_sq = pad_center(win_sq, n_fft) + + # Fill the envelope + for i in range(n_frames): + sample = i * hop_length + x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))] + return x + + +class STFT(torch.nn.Module): + def __init__( + self, filter_length=1024, hop_length=512, win_length=None, window="hann" + ): + """ + This module implements an STFT using 1D convolution and 1D transpose convolutions. + This is a bit tricky so there are some cases that probably won't work as working + out the same sizes before and after in all overlap add setups is tough. Right now, + this code should work with hop lengths that are half the filter length (50% overlap + between frames). + + Keyword Arguments: + filter_length {int} -- Length of filters used (default: {1024}) + hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512}) + win_length {[type]} -- Length of the window function applied to each frame (if not specified, it + equals the filter length). (default: {None}) + window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris) + (default: {'hann'}) + """ + super(STFT, self).__init__() + self.filter_length = filter_length + self.hop_length = hop_length + self.win_length = win_length if win_length else filter_length + self.window = window + self.forward_transform = None + self.pad_amount = int(self.filter_length / 2) + scale = self.filter_length / self.hop_length + fourier_basis = np.fft.fft(np.eye(self.filter_length)) + + cutoff = int((self.filter_length / 2 + 1)) + fourier_basis = np.vstack( + [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] + ) + forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) + inverse_basis = torch.FloatTensor( + np.linalg.pinv(scale * fourier_basis).T[:, None, :] + ) + + assert filter_length >= self.win_length + # get window and zero center pad it to filter_length + fft_window = get_window(window, self.win_length, fftbins=True) + fft_window = pad_center(fft_window, size=filter_length) + fft_window = torch.from_numpy(fft_window).float() + + # window the bases + forward_basis *= fft_window + inverse_basis *= fft_window + + self.register_buffer("forward_basis", forward_basis.float()) + self.register_buffer("inverse_basis", inverse_basis.float()) + + def transform(self, input_data): + """Take input data (audio) to STFT domain. + + Arguments: + input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples) + + Returns: + magnitude {tensor} -- Magnitude of STFT with shape (num_batch, + num_frequencies, num_frames) + phase {tensor} -- Phase of STFT with shape (num_batch, + num_frequencies, num_frames) + """ + num_batches = input_data.shape[0] + num_samples = input_data.shape[-1] + + self.num_samples = num_samples + + # similar to librosa, reflect-pad the input + input_data = input_data.view(num_batches, 1, num_samples) + # print(1234,input_data.shape) + input_data = F.pad( + input_data.unsqueeze(1), + (self.pad_amount, self.pad_amount, 0, 0, 0, 0), + mode="reflect", + ).squeeze(1) + # print(2333,input_data.shape,self.forward_basis.shape,self.hop_length) + # pdb.set_trace() + forward_transform = F.conv1d( + input_data, self.forward_basis, stride=self.hop_length, padding=0 + ) + + cutoff = int((self.filter_length / 2) + 1) + real_part = forward_transform[:, :cutoff, :] + imag_part = forward_transform[:, cutoff:, :] + + magnitude = torch.sqrt(real_part**2 + imag_part**2) + # phase = torch.atan2(imag_part.data, real_part.data) + + return magnitude # , phase + + def inverse(self, magnitude, phase): + """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced + by the ```transform``` function. + + Arguments: + magnitude {tensor} -- Magnitude of STFT with shape (num_batch, + num_frequencies, num_frames) + phase {tensor} -- Phase of STFT with shape (num_batch, + num_frequencies, num_frames) + + Returns: + inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of + shape (num_batch, num_samples) + """ + recombine_magnitude_phase = torch.cat( + [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 + ) + + inverse_transform = F.conv_transpose1d( + recombine_magnitude_phase, + self.inverse_basis, + stride=self.hop_length, + padding=0, + ) + + if self.window is not None: + window_sum = window_sumsquare( + self.window, + magnitude.size(-1), + hop_length=self.hop_length, + win_length=self.win_length, + n_fft=self.filter_length, + dtype=np.float32, + ) + # remove modulation effects + approx_nonzero_indices = torch.from_numpy( + np.where(window_sum > tiny(window_sum))[0] + ) + window_sum = torch.from_numpy(window_sum).to(inverse_transform.device) + inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ + approx_nonzero_indices + ] + + # scale by hop ratio + inverse_transform *= float(self.filter_length) / self.hop_length + + inverse_transform = inverse_transform[..., self.pad_amount :] + inverse_transform = inverse_transform[..., : self.num_samples] + inverse_transform = inverse_transform.squeeze(1) + + return inverse_transform + + def forward(self, input_data): + """Take input data (audio) to STFT domain and then back to audio. + + Arguments: + input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples) + + Returns: + reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of + shape (num_batch, num_samples) + """ + self.magnitude, self.phase = self.transform(input_data) + reconstruction = self.inverse(self.magnitude, self.phase) + return reconstruction + + +from time import time as ttime + + +class BiGRU(nn.Module): + def __init__(self, input_features, hidden_features, num_layers): + super(BiGRU, self).__init__() + self.gru = nn.GRU( + input_features, + hidden_features, + num_layers=num_layers, + batch_first=True, + bidirectional=True, + ) + + def forward(self, x): + return self.gru(x)[0] + + +class ConvBlockRes(nn.Module): + def __init__(self, in_channels, out_channels, momentum=0.01): + super(ConvBlockRes, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + bias=False, + ), + nn.BatchNorm2d(out_channels, momentum=momentum), + nn.ReLU(), + nn.Conv2d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + bias=False, + ), + nn.BatchNorm2d(out_channels, momentum=momentum), + nn.ReLU(), + ) + if in_channels != out_channels: + self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) + self.is_shortcut = True + else: + self.is_shortcut = False + + def forward(self, x): + if self.is_shortcut: + return self.conv(x) + self.shortcut(x) + else: + return self.conv(x) + x + + +class Encoder(nn.Module): + def __init__( + self, + in_channels, + in_size, + n_encoders, + kernel_size, + n_blocks, + out_channels=16, + momentum=0.01, + ): + super(Encoder, self).__init__() + self.n_encoders = n_encoders + self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) + self.layers = nn.ModuleList() + self.latent_channels = [] + for i in range(self.n_encoders): + self.layers.append( + ResEncoderBlock( + in_channels, out_channels, kernel_size, n_blocks, momentum=momentum + ) + ) + self.latent_channels.append([out_channels, in_size]) + in_channels = out_channels + out_channels *= 2 + in_size //= 2 + self.out_size = in_size + self.out_channel = out_channels + + def forward(self, x): + concat_tensors = [] + x = self.bn(x) + for i in range(self.n_encoders): + _, x = self.layers[i](x) + concat_tensors.append(_) + return x, concat_tensors + + +class ResEncoderBlock(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 + ): + super(ResEncoderBlock, self).__init__() + self.n_blocks = n_blocks + self.conv = nn.ModuleList() + self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) + for i in range(n_blocks - 1): + self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) + self.kernel_size = kernel_size + if self.kernel_size is not None: + self.pool = nn.AvgPool2d(kernel_size=kernel_size) + + def forward(self, x): + for i in range(self.n_blocks): + x = self.conv[i](x) + if self.kernel_size is not None: + return x, self.pool(x) + else: + return x + + +class Intermediate(nn.Module): # + def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): + super(Intermediate, self).__init__() + self.n_inters = n_inters + self.layers = nn.ModuleList() + self.layers.append( + ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) + ) + for i in range(self.n_inters - 1): + self.layers.append( + ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) + ) + + def forward(self, x): + for i in range(self.n_inters): + x = self.layers[i](x) + return x + + +class ResDecoderBlock(nn.Module): + def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): + super(ResDecoderBlock, self).__init__() + out_padding = (0, 1) if stride == (1, 2) else (1, 1) + self.n_blocks = n_blocks + self.conv1 = nn.Sequential( + nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=(3, 3), + stride=stride, + padding=(1, 1), + output_padding=out_padding, + bias=False, + ), + nn.BatchNorm2d(out_channels, momentum=momentum), + nn.ReLU(), + ) + self.conv2 = nn.ModuleList() + self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) + for i in range(n_blocks - 1): + self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) + + def forward(self, x, concat_tensor): + x = self.conv1(x) + x = torch.cat((x, concat_tensor), dim=1) + for i in range(self.n_blocks): + x = self.conv2[i](x) + return x + + +class Decoder(nn.Module): + def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): + super(Decoder, self).__init__() + self.layers = nn.ModuleList() + self.n_decoders = n_decoders + for i in range(self.n_decoders): + out_channels = in_channels // 2 + self.layers.append( + ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) + ) + in_channels = out_channels + + def forward(self, x, concat_tensors): + for i in range(self.n_decoders): + x = self.layers[i](x, concat_tensors[-1 - i]) + return x + + +class DeepUnet(nn.Module): + def __init__( + self, + kernel_size, + n_blocks, + en_de_layers=5, + inter_layers=4, + in_channels=1, + en_out_channels=16, + ): + super(DeepUnet, self).__init__() + self.encoder = Encoder( + in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels + ) + self.intermediate = Intermediate( + self.encoder.out_channel // 2, + self.encoder.out_channel, + inter_layers, + n_blocks, + ) + self.decoder = Decoder( + self.encoder.out_channel, en_de_layers, kernel_size, n_blocks + ) + + def forward(self, x): + x, concat_tensors = self.encoder(x) + x = self.intermediate(x) + x = self.decoder(x, concat_tensors) + return x + + +class E2E(nn.Module): + def __init__( + self, + n_blocks, + n_gru, + kernel_size, + en_de_layers=5, + inter_layers=4, + in_channels=1, + en_out_channels=16, + ): + super(E2E, self).__init__() + self.unet = DeepUnet( + kernel_size, + n_blocks, + en_de_layers, + inter_layers, + in_channels, + en_out_channels, + ) + self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) + if n_gru: + self.fc = nn.Sequential( + BiGRU(3 * 128, 256, n_gru), + nn.Linear(512, 360), + nn.Dropout(0.25), + nn.Sigmoid(), + ) + else: + self.fc = nn.Sequential( + nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid() + ) + + def forward(self, mel): + # print(mel.shape) + mel = mel.transpose(-1, -2).unsqueeze(1) + x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) + x = self.fc(x) + # print(x.shape) + return x + + +from librosa.filters import mel + + +class MelSpectrogram(torch.nn.Module): + def __init__( + self, + is_half, + n_mel_channels, + sampling_rate, + win_length, + hop_length, + n_fft=None, + mel_fmin=0, + mel_fmax=None, + clamp=1e-5, + ): + super().__init__() + n_fft = win_length if n_fft is None else n_fft + self.hann_window = {} + mel_basis = mel( + sr=sampling_rate, + n_fft=n_fft, + n_mels=n_mel_channels, + fmin=mel_fmin, + fmax=mel_fmax, + htk=True, + ) + mel_basis = torch.from_numpy(mel_basis).float() + self.register_buffer("mel_basis", mel_basis) + self.n_fft = win_length if n_fft is None else n_fft + self.hop_length = hop_length + self.win_length = win_length + self.sampling_rate = sampling_rate + self.n_mel_channels = n_mel_channels + self.clamp = clamp + self.is_half = is_half + + def forward(self, audio, keyshift=0, speed=1, center=True): + factor = 2 ** (keyshift / 12) + n_fft_new = int(np.round(self.n_fft * factor)) + win_length_new = int(np.round(self.win_length * factor)) + hop_length_new = int(np.round(self.hop_length * speed)) + keyshift_key = str(keyshift) + "_" + str(audio.device) + if keyshift_key not in self.hann_window: + self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( + # "cpu"if(audio.device.type=="privateuseone") else audio.device + audio.device + ) + # fft = torch.stft(#doesn't support pytorch_dml + # # audio.cpu() if(audio.device.type=="privateuseone")else audio, + # audio, + # n_fft=n_fft_new, + # hop_length=hop_length_new, + # win_length=win_length_new, + # window=self.hann_window[keyshift_key], + # center=center, + # return_complex=True, + # ) + # magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) + # print(1111111111) + # print(222222222222222,audio.device,self.is_half) + if hasattr(self, "stft") == False: + # print(n_fft_new,hop_length_new,win_length_new,audio.shape) + self.stft = STFT( + filter_length=n_fft_new, + hop_length=hop_length_new, + win_length=win_length_new, + window="hann", + ).to(audio.device) + magnitude = self.stft.transform(audio) # phase + # if (audio.device.type == "privateuseone"): + # magnitude=magnitude.to(audio.device) + if keyshift != 0: + size = self.n_fft // 2 + 1 + resize = magnitude.size(1) + if resize < size: + magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) + magnitude = magnitude[:, :size, :] * self.win_length / win_length_new + mel_output = torch.matmul(self.mel_basis, magnitude) + if self.is_half == True: + mel_output = mel_output.half() + log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) + # print(log_mel_spec.device.type) + return log_mel_spec + + +class RMVPE: + def __init__(self, model_path, is_half, device=None): + self.resample_kernel = {} + self.resample_kernel = {} + self.is_half = is_half + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + self.device = device + self.mel_extractor = MelSpectrogram( + is_half, 128, 16000, 1024, 160, None, 30, 8000 + ).to(device) + if "privateuseone" in str(device): + import onnxruntime as ort + + ort_session = ort.InferenceSession( + "%s/rmvpe.onnx" % os.environ["rmvpe_root"], + providers=["DmlExecutionProvider"], + ) + self.model = ort_session + else: + model = E2E(4, 1, (2, 2)) + ckpt = torch.load(model_path, map_location="cpu") + model.load_state_dict(ckpt) + model.eval() + if is_half == True: + model = model.half() + self.model = model + self.model = self.model.to(device) + cents_mapping = 20 * np.arange(360) + 1997.3794084376191 + self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 + + def mel2hidden(self, mel): + with torch.no_grad(): + n_frames = mel.shape[-1] + mel = F.pad( + mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="constant" + ) + if "privateuseone" in str(self.device): + onnx_input_name = self.model.get_inputs()[0].name + onnx_outputs_names = self.model.get_outputs()[0].name + hidden = self.model.run( + [onnx_outputs_names], + input_feed={onnx_input_name: mel.cpu().numpy()}, + )[0] + else: + hidden = self.model(mel) + return hidden[:, :n_frames] + + def decode(self, hidden, thred=0.03): + cents_pred = self.to_local_average_cents(hidden, thred=thred) + f0 = 10 * (2 ** (cents_pred / 1200)) + f0[f0 == 10] = 0 + # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) + return f0 + + def infer_from_audio(self, audio, thred=0.03): + # torch.cuda.synchronize() + t0 = ttime() + mel = self.mel_extractor( + torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True + ) + # print(123123123,mel.device.type) + # torch.cuda.synchronize() + t1 = ttime() + hidden = self.mel2hidden(mel) + # torch.cuda.synchronize() + t2 = ttime() + # print(234234,hidden.device.type) + if "privateuseone" not in str(self.device): + hidden = hidden.squeeze(0).cpu().numpy() + else: + hidden = hidden[0] + if self.is_half == True: + hidden = hidden.astype("float32") + + f0 = self.decode(hidden, thred=thred) + # torch.cuda.synchronize() + t3 = ttime() + # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0)) + return f0 + + def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100): + t0 = ttime() + audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) + mel = self.mel_extractor(audio, center=True) + t1 = ttime() + hidden = self.mel2hidden(mel) + t2 = ttime() + if "privateuseone" not in str(self.device): + hidden = hidden.squeeze(0).cpu().numpy() + else: + hidden = hidden[0] + if self.is_half == True: + hidden = hidden.astype("float32") + f0 = self.decode(hidden, thred=thred) + f0[(f0 < f0_min) | (f0 > f0_max)] = 0 + t3 = ttime() + return f0 + + def to_local_average_cents(self, salience, thred=0.05): + # t0 = ttime() + center = np.argmax(salience, axis=1) # 帧长#index + salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368 + # t1 = ttime() + center += 4 + todo_salience = [] + todo_cents_mapping = [] + starts = center - 4 + ends = center + 5 + for idx in range(salience.shape[0]): + todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) + todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) + # t2 = ttime() + todo_salience = np.array(todo_salience) # 帧长,9 + todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9 + product_sum = np.sum(todo_salience * todo_cents_mapping, 1) + weight_sum = np.sum(todo_salience, 1) # 帧长 + devided = product_sum / weight_sum # 帧长 + # t3 = ttime() + maxx = np.max(salience, axis=1) # 帧长 + devided[maxx <= thred] = 0 + # t4 = ttime() + # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) + return devided + + +if __name__ == "__main__": + import librosa + import soundfile as sf + + audio, sampling_rate = sf.read(r"C:\Users\liujing04\Desktop\Z\冬之花clip1.wav") + if len(audio.shape) > 1: + audio = librosa.to_mono(audio.transpose(1, 0)) + audio_bak = audio.copy() + if sampling_rate != 16000: + audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) + model_path = r"D:\BaiduNetdiskDownload\RVC-beta-v2-0727AMD_realtime\rmvpe.pt" + thred = 0.03 # 0.01 + device = "cuda" if torch.cuda.is_available() else "cpu" + rmvpe = RMVPE(model_path, is_half=False, device=device) + t0 = ttime() + f0 = rmvpe.infer_from_audio(audio, thred=thred) + # f0 = rmvpe.infer_from_audio(audio, thred=thred) + # f0 = rmvpe.infer_from_audio(audio, thred=thred) + # f0 = rmvpe.infer_from_audio(audio, thred=thred) + # f0 = rmvpe.infer_from_audio(audio, thred=thred) + t1 = ttime() + logger.info("%s %.2f", f0.shape, t1 - t0) diff --git a/lib/infer/infer_libs/slicer2.py b/lib/infer/infer_libs/slicer2.py new file mode 100644 index 0000000000000000000000000000000000000000..5b29ee262aa54045e807be2cffeb41687499ba58 --- /dev/null +++ b/lib/infer/infer_libs/slicer2.py @@ -0,0 +1,260 @@ +import numpy as np + + +# This function is obtained from librosa. +def get_rms( + y, + frame_length=2048, + hop_length=512, + pad_mode="constant", +): + padding = (int(frame_length // 2), int(frame_length // 2)) + y = np.pad(y, padding, mode=pad_mode) + + axis = -1 + # put our new within-frame axis at the end for now + out_strides = y.strides + tuple([y.strides[axis]]) + # Reduce the shape on the framing axis + x_shape_trimmed = list(y.shape) + x_shape_trimmed[axis] -= frame_length - 1 + out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) + xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides) + if axis < 0: + target_axis = axis - 1 + else: + target_axis = axis + 1 + xw = np.moveaxis(xw, -1, target_axis) + # Downsample along the target axis + slices = [slice(None)] * xw.ndim + slices[axis] = slice(0, None, hop_length) + x = xw[tuple(slices)] + + # Calculate power + power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) + + return np.sqrt(power) + + +class Slicer: + def __init__( + self, + sr: int, + threshold: float = -40.0, + min_length: int = 5000, + min_interval: int = 300, + hop_size: int = 20, + max_sil_kept: int = 5000, + ): + if not min_length >= min_interval >= hop_size: + raise ValueError( + "The following condition must be satisfied: min_length >= min_interval >= hop_size" + ) + if not max_sil_kept >= hop_size: + raise ValueError( + "The following condition must be satisfied: max_sil_kept >= hop_size" + ) + min_interval = sr * min_interval / 1000 + self.threshold = 10 ** (threshold / 20.0) + self.hop_size = round(sr * hop_size / 1000) + self.win_size = min(round(min_interval), 4 * self.hop_size) + self.min_length = round(sr * min_length / 1000 / self.hop_size) + self.min_interval = round(min_interval / self.hop_size) + self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) + + def _apply_slice(self, waveform, begin, end): + if len(waveform.shape) > 1: + return waveform[ + :, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size) + ] + else: + return waveform[ + begin * self.hop_size : min(waveform.shape[0], end * self.hop_size) + ] + + # @timeit + def slice(self, waveform): + if len(waveform.shape) > 1: + samples = waveform.mean(axis=0) + else: + samples = waveform + if samples.shape[0] <= self.min_length: + return [waveform] + rms_list = get_rms( + y=samples, frame_length=self.win_size, hop_length=self.hop_size + ).squeeze(0) + sil_tags = [] + silence_start = None + clip_start = 0 + for i, rms in enumerate(rms_list): + # Keep looping while frame is silent. + if rms < self.threshold: + # Record start of silent frames. + if silence_start is None: + silence_start = i + continue + # Keep looping while frame is not silent and silence start has not been recorded. + if silence_start is None: + continue + # Clear recorded silence start if interval is not enough or clip is too short + is_leading_silence = silence_start == 0 and i > self.max_sil_kept + need_slice_middle = ( + i - silence_start >= self.min_interval + and i - clip_start >= self.min_length + ) + if not is_leading_silence and not need_slice_middle: + silence_start = None + continue + # Need slicing. Record the range of silent frames to be removed. + if i - silence_start <= self.max_sil_kept: + pos = rms_list[silence_start : i + 1].argmin() + silence_start + if silence_start == 0: + sil_tags.append((0, pos)) + else: + sil_tags.append((pos, pos)) + clip_start = pos + elif i - silence_start <= self.max_sil_kept * 2: + pos = rms_list[ + i - self.max_sil_kept : silence_start + self.max_sil_kept + 1 + ].argmin() + pos += i - self.max_sil_kept + pos_l = ( + rms_list[ + silence_start : silence_start + self.max_sil_kept + 1 + ].argmin() + + silence_start + ) + pos_r = ( + rms_list[i - self.max_sil_kept : i + 1].argmin() + + i + - self.max_sil_kept + ) + if silence_start == 0: + sil_tags.append((0, pos_r)) + clip_start = pos_r + else: + sil_tags.append((min(pos_l, pos), max(pos_r, pos))) + clip_start = max(pos_r, pos) + else: + pos_l = ( + rms_list[ + silence_start : silence_start + self.max_sil_kept + 1 + ].argmin() + + silence_start + ) + pos_r = ( + rms_list[i - self.max_sil_kept : i + 1].argmin() + + i + - self.max_sil_kept + ) + if silence_start == 0: + sil_tags.append((0, pos_r)) + else: + sil_tags.append((pos_l, pos_r)) + clip_start = pos_r + silence_start = None + # Deal with trailing silence. + total_frames = rms_list.shape[0] + if ( + silence_start is not None + and total_frames - silence_start >= self.min_interval + ): + silence_end = min(total_frames, silence_start + self.max_sil_kept) + pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start + sil_tags.append((pos, total_frames + 1)) + # Apply and return slices. + if len(sil_tags) == 0: + return [waveform] + else: + chunks = [] + if sil_tags[0][0] > 0: + chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0])) + for i in range(len(sil_tags) - 1): + chunks.append( + self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]) + ) + if sil_tags[-1][1] < total_frames: + chunks.append( + self._apply_slice(waveform, sil_tags[-1][1], total_frames) + ) + return chunks + + +def main(): + import os.path + from argparse import ArgumentParser + + import librosa + import soundfile + + parser = ArgumentParser() + parser.add_argument("audio", type=str, help="The audio to be sliced") + parser.add_argument( + "--out", type=str, help="Output directory of the sliced audio clips" + ) + parser.add_argument( + "--db_thresh", + type=float, + required=False, + default=-40, + help="The dB threshold for silence detection", + ) + parser.add_argument( + "--min_length", + type=int, + required=False, + default=5000, + help="The minimum milliseconds required for each sliced audio clip", + ) + parser.add_argument( + "--min_interval", + type=int, + required=False, + default=300, + help="The minimum milliseconds for a silence part to be sliced", + ) + parser.add_argument( + "--hop_size", + type=int, + required=False, + default=10, + help="Frame length in milliseconds", + ) + parser.add_argument( + "--max_sil_kept", + type=int, + required=False, + default=500, + help="The maximum silence length kept around the sliced clip, presented in milliseconds", + ) + args = parser.parse_args() + out = args.out + if out is None: + out = os.path.dirname(os.path.abspath(args.audio)) + audio, sr = librosa.load(args.audio, sr=None, mono=False) + slicer = Slicer( + sr=sr, + threshold=args.db_thresh, + min_length=args.min_length, + min_interval=args.min_interval, + hop_size=args.hop_size, + max_sil_kept=args.max_sil_kept, + ) + chunks = slicer.slice(audio) + if not os.path.exists(out): + os.makedirs(out) + for i, chunk in enumerate(chunks): + if len(chunk.shape) > 1: + chunk = chunk.T + soundfile.write( + os.path.join( + out, + f"%s_%d.wav" + % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i), + ), + chunk, + sr, + ) + + +if __name__ == "__main__": + main() diff --git a/lib/infer/infer_libs/stftpitchshift b/lib/infer/infer_libs/stftpitchshift new file mode 100644 index 0000000000000000000000000000000000000000..4f62e31529b9aafede1b7c08de1f41b980fdf132 --- /dev/null +++ b/lib/infer/infer_libs/stftpitchshift @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2f50ea8e5ca1a11a587f11f25ba9182f9b24e2367ac480f430b3f04062782e +size 1822104 diff --git a/lib/infer/infer_libs/stftpitchshift.exe b/lib/infer/infer_libs/stftpitchshift.exe new file mode 100644 index 0000000000000000000000000000000000000000..39c73ad888644657dc44dd7df62e1a77859355f9 Binary files /dev/null and b/lib/infer/infer_libs/stftpitchshift.exe differ diff --git a/lib/infer/infer_libs/train/__pycache__/process_ckpt.cpython-39.pyc b/lib/infer/infer_libs/train/__pycache__/process_ckpt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35e533ad0c5741f06fd2ff63ed9b6818d1c38530 Binary files /dev/null and b/lib/infer/infer_libs/train/__pycache__/process_ckpt.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/train/data_utils.py b/lib/infer/infer_libs/train/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ad2f8a08718c0e55e9809bc9ac7fe70b58f6f310 --- /dev/null +++ b/lib/infer/infer_libs/train/data_utils.py @@ -0,0 +1,517 @@ +import os +import traceback +import logging + +logger = logging.getLogger(__name__) + +import numpy as np +import torch +import torch.utils.data + +from lib.infer.infer_libs.train.mel_processing import spectrogram_torch +from lib.infer.infer_libs.train.utils import load_filepaths_and_text, load_wav_to_torch + + +class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset): + """ + 1) loads audio, text pairs + 2) normalizes text and converts them to sequences of integers + 3) computes spectrograms from audio files. + """ + + def __init__(self, audiopaths_and_text, hparams): + self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) + self.max_wav_value = hparams.max_wav_value + self.sampling_rate = hparams.sampling_rate + self.filter_length = hparams.filter_length + self.hop_length = hparams.hop_length + self.win_length = hparams.win_length + self.sampling_rate = hparams.sampling_rate + self.min_text_len = getattr(hparams, "min_text_len", 1) + self.max_text_len = getattr(hparams, "max_text_len", 5000) + self._filter() + + def _filter(self): + """ + Filter text & store spec lengths + """ + # Store spectrogram lengths for Bucketing + # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) + # spec_length = wav_length // hop_length + audiopaths_and_text_new = [] + lengths = [] + for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text: + if self.min_text_len <= len(text) and len(text) <= self.max_text_len: + audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv]) + lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length)) + self.audiopaths_and_text = audiopaths_and_text_new + self.lengths = lengths + + def get_sid(self, sid): + sid = torch.LongTensor([int(sid)]) + return sid + + def get_audio_text_pair(self, audiopath_and_text): + # separate filename and text + file = audiopath_and_text[0] + phone = audiopath_and_text[1] + pitch = audiopath_and_text[2] + pitchf = audiopath_and_text[3] + dv = audiopath_and_text[4] + + phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf) + spec, wav = self.get_audio(file) + dv = self.get_sid(dv) + + len_phone = phone.size()[0] + len_spec = spec.size()[-1] + # print(123,phone.shape,pitch.shape,spec.shape) + if len_phone != len_spec: + len_min = min(len_phone, len_spec) + # amor + len_wav = len_min * self.hop_length + + spec = spec[:, :len_min] + wav = wav[:, :len_wav] + + phone = phone[:len_min, :] + pitch = pitch[:len_min] + pitchf = pitchf[:len_min] + + return (spec, wav, phone, pitch, pitchf, dv) + + def get_labels(self, phone, pitch, pitchf): + phone = np.load(phone) + phone = np.repeat(phone, 2, axis=0) + pitch = np.load(pitch) + pitchf = np.load(pitchf) + n_num = min(phone.shape[0], 900) # DistributedBucketSampler + # print(234,phone.shape,pitch.shape) + phone = phone[:n_num, :] + pitch = pitch[:n_num] + pitchf = pitchf[:n_num] + phone = torch.FloatTensor(phone) + pitch = torch.LongTensor(pitch) + pitchf = torch.FloatTensor(pitchf) + return phone, pitch, pitchf + + def get_audio(self, filename): + audio, sampling_rate = load_wav_to_torch(filename) + if sampling_rate != self.sampling_rate: + raise ValueError( + "{} SR doesn't match target {} SR".format( + sampling_rate, self.sampling_rate + ) + ) + audio_norm = audio + # audio_norm = audio / self.max_wav_value + # audio_norm = audio / np.abs(audio).max() + + audio_norm = audio_norm.unsqueeze(0) + spec_filename = filename.replace(".wav", ".spec.pt") + if os.path.exists(spec_filename): + try: + spec = torch.load(spec_filename) + except: + logger.warn("%s %s", spec_filename, traceback.format_exc()) + spec = spectrogram_torch( + audio_norm, + self.filter_length, + self.sampling_rate, + self.hop_length, + self.win_length, + center=False, + ) + spec = torch.squeeze(spec, 0) + torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) + else: + spec = spectrogram_torch( + audio_norm, + self.filter_length, + self.sampling_rate, + self.hop_length, + self.win_length, + center=False, + ) + spec = torch.squeeze(spec, 0) + torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) + return spec, audio_norm + + def __getitem__(self, index): + return self.get_audio_text_pair(self.audiopaths_and_text[index]) + + def __len__(self): + return len(self.audiopaths_and_text) + + +class TextAudioCollateMultiNSFsid: + """Zero-pads model inputs and targets""" + + def __init__(self, return_ids=False): + self.return_ids = return_ids + + def __call__(self, batch): + """Collate's training batch from normalized text and aduio + PARAMS + ------ + batch: [text_normalized, spec_normalized, wav_normalized] + """ + # Right zero-pad all one-hot text sequences to max input length + _, ids_sorted_decreasing = torch.sort( + torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True + ) + + max_spec_len = max([x[0].size(1) for x in batch]) + max_wave_len = max([x[1].size(1) for x in batch]) + spec_lengths = torch.LongTensor(len(batch)) + wave_lengths = torch.LongTensor(len(batch)) + spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len) + wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len) + spec_padded.zero_() + wave_padded.zero_() + + max_phone_len = max([x[2].size(0) for x in batch]) + phone_lengths = torch.LongTensor(len(batch)) + phone_padded = torch.FloatTensor( + len(batch), max_phone_len, batch[0][2].shape[1] + ) # (spec, wav, phone, pitch) + pitch_padded = torch.LongTensor(len(batch), max_phone_len) + pitchf_padded = torch.FloatTensor(len(batch), max_phone_len) + phone_padded.zero_() + pitch_padded.zero_() + pitchf_padded.zero_() + # dv = torch.FloatTensor(len(batch), 256)#gin=256 + sid = torch.LongTensor(len(batch)) + + for i in range(len(ids_sorted_decreasing)): + row = batch[ids_sorted_decreasing[i]] + + spec = row[0] + spec_padded[i, :, : spec.size(1)] = spec + spec_lengths[i] = spec.size(1) + + wave = row[1] + wave_padded[i, :, : wave.size(1)] = wave + wave_lengths[i] = wave.size(1) + + phone = row[2] + phone_padded[i, : phone.size(0), :] = phone + phone_lengths[i] = phone.size(0) + + pitch = row[3] + pitch_padded[i, : pitch.size(0)] = pitch + pitchf = row[4] + pitchf_padded[i, : pitchf.size(0)] = pitchf + + # dv[i] = row[5] + sid[i] = row[5] + + return ( + phone_padded, + phone_lengths, + pitch_padded, + pitchf_padded, + spec_padded, + spec_lengths, + wave_padded, + wave_lengths, + # dv + sid, + ) + + +class TextAudioLoader(torch.utils.data.Dataset): + """ + 1) loads audio, text pairs + 2) normalizes text and converts them to sequences of integers + 3) computes spectrograms from audio files. + """ + + def __init__(self, audiopaths_and_text, hparams): + self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) + self.max_wav_value = hparams.max_wav_value + self.sampling_rate = hparams.sampling_rate + self.filter_length = hparams.filter_length + self.hop_length = hparams.hop_length + self.win_length = hparams.win_length + self.sampling_rate = hparams.sampling_rate + self.min_text_len = getattr(hparams, "min_text_len", 1) + self.max_text_len = getattr(hparams, "max_text_len", 5000) + self._filter() + + def _filter(self): + """ + Filter text & store spec lengths + """ + # Store spectrogram lengths for Bucketing + # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) + # spec_length = wav_length // hop_length + audiopaths_and_text_new = [] + lengths = [] + for audiopath, text, dv in self.audiopaths_and_text: + if self.min_text_len <= len(text) and len(text) <= self.max_text_len: + audiopaths_and_text_new.append([audiopath, text, dv]) + lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length)) + self.audiopaths_and_text = audiopaths_and_text_new + self.lengths = lengths + + def get_sid(self, sid): + sid = torch.LongTensor([int(sid)]) + return sid + + def get_audio_text_pair(self, audiopath_and_text): + # separate filename and text + file = audiopath_and_text[0] + phone = audiopath_and_text[1] + dv = audiopath_and_text[2] + + phone = self.get_labels(phone) + spec, wav = self.get_audio(file) + dv = self.get_sid(dv) + + len_phone = phone.size()[0] + len_spec = spec.size()[-1] + if len_phone != len_spec: + len_min = min(len_phone, len_spec) + len_wav = len_min * self.hop_length + spec = spec[:, :len_min] + wav = wav[:, :len_wav] + phone = phone[:len_min, :] + return (spec, wav, phone, dv) + + def get_labels(self, phone): + phone = np.load(phone) + phone = np.repeat(phone, 2, axis=0) + n_num = min(phone.shape[0], 900) # DistributedBucketSampler + phone = phone[:n_num, :] + phone = torch.FloatTensor(phone) + return phone + + def get_audio(self, filename): + audio, sampling_rate = load_wav_to_torch(filename) + if sampling_rate != self.sampling_rate: + raise ValueError( + "{} SR doesn't match target {} SR".format( + sampling_rate, self.sampling_rate + ) + ) + audio_norm = audio + # audio_norm = audio / self.max_wav_value + # audio_norm = audio / np.abs(audio).max() + + audio_norm = audio_norm.unsqueeze(0) + spec_filename = filename.replace(".wav", ".spec.pt") + if os.path.exists(spec_filename): + try: + spec = torch.load(spec_filename) + except: + logger.warn("%s %s", spec_filename, traceback.format_exc()) + spec = spectrogram_torch( + audio_norm, + self.filter_length, + self.sampling_rate, + self.hop_length, + self.win_length, + center=False, + ) + spec = torch.squeeze(spec, 0) + torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) + else: + spec = spectrogram_torch( + audio_norm, + self.filter_length, + self.sampling_rate, + self.hop_length, + self.win_length, + center=False, + ) + spec = torch.squeeze(spec, 0) + torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) + return spec, audio_norm + + def __getitem__(self, index): + return self.get_audio_text_pair(self.audiopaths_and_text[index]) + + def __len__(self): + return len(self.audiopaths_and_text) + + +class TextAudioCollate: + """Zero-pads model inputs and targets""" + + def __init__(self, return_ids=False): + self.return_ids = return_ids + + def __call__(self, batch): + """Collate's training batch from normalized text and aduio + PARAMS + ------ + batch: [text_normalized, spec_normalized, wav_normalized] + """ + # Right zero-pad all one-hot text sequences to max input length + _, ids_sorted_decreasing = torch.sort( + torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True + ) + + max_spec_len = max([x[0].size(1) for x in batch]) + max_wave_len = max([x[1].size(1) for x in batch]) + spec_lengths = torch.LongTensor(len(batch)) + wave_lengths = torch.LongTensor(len(batch)) + spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len) + wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len) + spec_padded.zero_() + wave_padded.zero_() + + max_phone_len = max([x[2].size(0) for x in batch]) + phone_lengths = torch.LongTensor(len(batch)) + phone_padded = torch.FloatTensor( + len(batch), max_phone_len, batch[0][2].shape[1] + ) + phone_padded.zero_() + sid = torch.LongTensor(len(batch)) + + for i in range(len(ids_sorted_decreasing)): + row = batch[ids_sorted_decreasing[i]] + + spec = row[0] + spec_padded[i, :, : spec.size(1)] = spec + spec_lengths[i] = spec.size(1) + + wave = row[1] + wave_padded[i, :, : wave.size(1)] = wave + wave_lengths[i] = wave.size(1) + + phone = row[2] + phone_padded[i, : phone.size(0), :] = phone + phone_lengths[i] = phone.size(0) + + sid[i] = row[3] + + return ( + phone_padded, + phone_lengths, + spec_padded, + spec_lengths, + wave_padded, + wave_lengths, + sid, + ) + + +class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): + """ + Maintain similar input lengths in a batch. + Length groups are specified by boundaries. + Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. + + It removes samples which are not included in the boundaries. + Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. + """ + + def __init__( + self, + dataset, + batch_size, + boundaries, + num_replicas=None, + rank=None, + shuffle=True, + ): + super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + self.lengths = dataset.lengths + self.batch_size = batch_size + self.boundaries = boundaries + + self.buckets, self.num_samples_per_bucket = self._create_buckets() + self.total_size = sum(self.num_samples_per_bucket) + self.num_samples = self.total_size // self.num_replicas + + def _create_buckets(self): + buckets = [[] for _ in range(len(self.boundaries) - 1)] + for i in range(len(self.lengths)): + length = self.lengths[i] + idx_bucket = self._bisect(length) + if idx_bucket != -1: + buckets[idx_bucket].append(i) + + for i in range(len(buckets) - 1, -1, -1): # + if len(buckets[i]) == 0: + buckets.pop(i) + self.boundaries.pop(i + 1) + + num_samples_per_bucket = [] + for i in range(len(buckets)): + len_bucket = len(buckets[i]) + total_batch_size = self.num_replicas * self.batch_size + rem = ( + total_batch_size - (len_bucket % total_batch_size) + ) % total_batch_size + num_samples_per_bucket.append(len_bucket + rem) + return buckets, num_samples_per_bucket + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + + indices = [] + if self.shuffle: + for bucket in self.buckets: + indices.append(torch.randperm(len(bucket), generator=g).tolist()) + else: + for bucket in self.buckets: + indices.append(list(range(len(bucket)))) + + batches = [] + for i in range(len(self.buckets)): + bucket = self.buckets[i] + len_bucket = len(bucket) + ids_bucket = indices[i] + num_samples_bucket = self.num_samples_per_bucket[i] + + # add extra samples to make it evenly divisible + rem = num_samples_bucket - len_bucket + ids_bucket = ( + ids_bucket + + ids_bucket * (rem // len_bucket) + + ids_bucket[: (rem % len_bucket)] + ) + + # subsample + ids_bucket = ids_bucket[self.rank :: self.num_replicas] + + # batching + for j in range(len(ids_bucket) // self.batch_size): + batch = [ + bucket[idx] + for idx in ids_bucket[ + j * self.batch_size : (j + 1) * self.batch_size + ] + ] + batches.append(batch) + + if self.shuffle: + batch_ids = torch.randperm(len(batches), generator=g).tolist() + batches = [batches[i] for i in batch_ids] + self.batches = batches + + assert len(self.batches) * self.batch_size == self.num_samples + return iter(self.batches) + + def _bisect(self, x, lo=0, hi=None): + if hi is None: + hi = len(self.boundaries) - 1 + + if hi > lo: + mid = (hi + lo) // 2 + if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: + return mid + elif x <= self.boundaries[mid]: + return self._bisect(x, lo, mid) + else: + return self._bisect(x, mid + 1, hi) + else: + return -1 + + def __len__(self): + return self.num_samples // self.batch_size diff --git a/lib/infer/infer_libs/train/losses.py b/lib/infer/infer_libs/train/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..b1b263e4c205e78ffe970f622ab6ff68f36d3b17 --- /dev/null +++ b/lib/infer/infer_libs/train/losses.py @@ -0,0 +1,58 @@ +import torch + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + dr = dr.float() + dg = dg.float() + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg**2) + loss += r_loss + g_loss + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +def generator_loss(disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + dg = dg.float() + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + + +def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): + """ + z_p, logs_q: [b, h, t_t] + m_p, logs_p: [b, h, t_t] + """ + z_p = z_p.float() + logs_q = logs_q.float() + m_p = m_p.float() + logs_p = logs_p.float() + z_mask = z_mask.float() + + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) + kl = torch.sum(kl * z_mask) + l = kl / torch.sum(z_mask) + return l diff --git a/lib/infer/infer_libs/train/mel_processing.py b/lib/infer/infer_libs/train/mel_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..03330d247aea554c9e87d497e8e969305772afab --- /dev/null +++ b/lib/infer/infer_libs/train/mel_processing.py @@ -0,0 +1,131 @@ +import torch +from librosa.filters import mel as librosa_mel_fn +import logging + +logger = logging.getLogger(__name__) + +MAX_WAV_VALUE = 32768.0 + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + """ + PARAMS + ------ + C: compression factor + """ + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + """ + PARAMS + ------ + C: compression factor used to compress + """ + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + return dynamic_range_compression_torch(magnitudes) + + +def spectral_de_normalize_torch(magnitudes): + return dynamic_range_decompression_torch(magnitudes) + + +# Reusable banks +mel_basis = {} +hann_window = {} + + +def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): + """Convert waveform into Linear-frequency Linear-amplitude spectrogram. + + Args: + y :: (B, T) - Audio waveforms + n_fft + sampling_rate + hop_size + win_size + center + Returns: + :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram + """ + # Validation + if torch.min(y) < -1.07: + logger.debug("min value is %s", str(torch.min(y))) + if torch.max(y) > 1.07: + logger.debug("max value is %s", str(torch.max(y))) + + # Window - Cache if needed + global hann_window + dtype_device = str(y.dtype) + "_" + str(y.device) + wnsize_dtype_device = str(win_size) + "_" + dtype_device + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( + dtype=y.dtype, device=y.device + ) + + # Padding + y = torch.nn.functional.pad( + y.unsqueeze(1), + (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), + mode="reflect", + ) + y = y.squeeze(1) + + # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2) + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[wnsize_dtype_device], + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=False, + ) + + # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + # MelBasis - Cache if needed + global mel_basis + dtype_device = str(spec.dtype) + "_" + str(spec.device) + fmax_dtype_device = str(fmax) + "_" + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn( + sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax + ) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( + dtype=spec.dtype, device=spec.device + ) + + # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame) + melspec = torch.matmul(mel_basis[fmax_dtype_device], spec) + melspec = spectral_normalize_torch(melspec) + return melspec + + +def mel_spectrogram_torch( + y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False +): + """Convert waveform into Mel-frequency Log-amplitude spectrogram. + + Args: + y :: (B, T) - Waveforms + Returns: + melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram + """ + # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame) + spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) + + # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame) + melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) + + return melspec diff --git a/lib/infer/infer_libs/train/process_ckpt.py b/lib/infer/infer_libs/train/process_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..7926bba45a64db895d413138019ede985043b465 --- /dev/null +++ b/lib/infer/infer_libs/train/process_ckpt.py @@ -0,0 +1,260 @@ +import os +import traceback +from collections import OrderedDict + +import torch + +from assets.i18n.i18n import I18nAuto + +i18n = I18nAuto() + + +def savee(ckpt, sr, if_f0, name, epoch, version, hps): + try: + opt = OrderedDict() + opt["weight"] = {} + for key in ckpt.keys(): + if "enc_q" in key: + continue + opt["weight"][key] = ckpt[key].half() + opt["config"] = [ + hps.data.filter_length // 2 + 1, + 32, + hps.model.inter_channels, + hps.model.hidden_channels, + hps.model.filter_channels, + hps.model.n_heads, + hps.model.n_layers, + hps.model.kernel_size, + hps.model.p_dropout, + hps.model.resblock, + hps.model.resblock_kernel_sizes, + hps.model.resblock_dilation_sizes, + hps.model.upsample_rates, + hps.model.upsample_initial_channel, + hps.model.upsample_kernel_sizes, + hps.model.spk_embed_dim, + hps.model.gin_channels, + hps.data.sampling_rate, + ] + opt["info"] = "%sepoch" % epoch + opt["sr"] = sr + opt["f0"] = if_f0 + opt["version"] = version + torch.save(opt, "logs/weights/%s.pth" % name) + return "Success." + except: + return traceback.format_exc() + + +def show_info(path): + try: + a = torch.load(path, map_location="cpu") + return "模型信息:%s\n采样率:%s\n模型是否输入音高引导:%s\n版本:%s" % ( + a.get("info", "None"), + a.get("sr", "None"), + a.get("f0", "None"), + a.get("version", "None"), + ) + except: + return traceback.format_exc() + + +def extract_small_model(path, name, sr, if_f0, info, version): + try: + ckpt = torch.load(path, map_location="cpu") + if "model" in ckpt: + ckpt = ckpt["model"] + opt = OrderedDict() + opt["weight"] = {} + for key in ckpt.keys(): + if "enc_q" in key: + continue + opt["weight"][key] = ckpt[key].half() + if sr == "40k": + opt["config"] = [ + 1025, + 32, + 192, + 192, + 768, + 2, + 6, + 3, + 0, + "1", + [3, 7, 11], + [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + [10, 10, 2, 2], + 512, + [16, 16, 4, 4], + 109, + 256, + 40000, + ] + elif sr == "48k": + if version == "v1": + opt["config"] = [ + 1025, + 32, + 192, + 192, + 768, + 2, + 6, + 3, + 0, + "1", + [3, 7, 11], + [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + [10, 6, 2, 2, 2], + 512, + [16, 16, 4, 4, 4], + 109, + 256, + 48000, + ] + else: + opt["config"] = [ + 1025, + 32, + 192, + 192, + 768, + 2, + 6, + 3, + 0, + "1", + [3, 7, 11], + [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + [12, 10, 2, 2], + 512, + [24, 20, 4, 4], + 109, + 256, + 48000, + ] + elif sr == "32k": + if version == "v1": + opt["config"] = [ + 513, + 32, + 192, + 192, + 768, + 2, + 6, + 3, + 0, + "1", + [3, 7, 11], + [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + [10, 4, 2, 2, 2], + 512, + [16, 16, 4, 4, 4], + 109, + 256, + 32000, + ] + else: + opt["config"] = [ + 513, + 32, + 192, + 192, + 768, + 2, + 6, + 3, + 0, + "1", + [3, 7, 11], + [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + [10, 8, 2, 2], + 512, + [20, 16, 4, 4], + 109, + 256, + 32000, + ] + if info == "": + info = "Extracted model." + opt["info"] = info + opt["version"] = version + opt["sr"] = sr + opt["f0"] = int(if_f0) + torch.save(opt, "logs/weights/%s.pth" % name) + return "Success." + except: + return traceback.format_exc() + + +def change_info(path, info, name): + try: + ckpt = torch.load(path, map_location="cpu") + ckpt["info"] = info + if name == "": + name = os.path.basename(path) + torch.save(ckpt, "logs/weights/%s" % name) + return "Success." + except: + return traceback.format_exc() + + +def merge(path1, path2, alpha1, sr, f0, info, name, version): + try: + + def extract(ckpt): + a = ckpt["model"] + opt = OrderedDict() + opt["weight"] = {} + for key in a.keys(): + if "enc_q" in key: + continue + opt["weight"][key] = a[key] + return opt + + ckpt1 = torch.load(path1, map_location="cpu") + ckpt2 = torch.load(path2, map_location="cpu") + cfg = ckpt1["config"] + if "model" in ckpt1: + ckpt1 = extract(ckpt1) + else: + ckpt1 = ckpt1["weight"] + if "model" in ckpt2: + ckpt2 = extract(ckpt2) + else: + ckpt2 = ckpt2["weight"] + if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())): + return "Fail to merge the models. The model architectures are not the same." + opt = OrderedDict() + opt["weight"] = {} + for key in ckpt1.keys(): + # try: + if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape: + min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0]) + opt["weight"][key] = ( + alpha1 * (ckpt1[key][:min_shape0].float()) + + (1 - alpha1) * (ckpt2[key][:min_shape0].float()) + ).half() + else: + opt["weight"][key] = ( + alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float()) + ).half() + # except: + # pdb.set_trace() + opt["config"] = cfg + """ + if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000] + elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000] + elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000] + """ + opt["sr"] = sr + opt["f0"] = 1 if f0 == i18n("是") else 0 + opt["version"] = version + opt["info"] = info + torch.save(opt, "logs/weights/%s.pth" % name) + return "Success." + except: + return traceback.format_exc() diff --git a/lib/infer/infer_libs/train/utils.py b/lib/infer/infer_libs/train/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..73bdf4d1f72e32dc0332b6a2284a2697147a379e --- /dev/null +++ b/lib/infer/infer_libs/train/utils.py @@ -0,0 +1,475 @@ +import argparse +import glob +import json +import logging +import os +import subprocess +import sys + +import numpy as np +import torch +from scipy.io.wavfile import read + +MATPLOTLIB_FLAG = False + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) +logger = logging + + +def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1): + assert os.path.isfile(checkpoint_path) + checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") + + ################## + def go(model, bkey): + saved_state_dict = checkpoint_dict[bkey] + if hasattr(model, "module"): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + new_state_dict = {} + for k, v in state_dict.items(): # 模型需要的shape + try: + new_state_dict[k] = saved_state_dict[k] + if saved_state_dict[k].shape != state_dict[k].shape: + logger.warn( + "shape-%s-mismatch. need: %s, get: %s", + k, + state_dict[k].shape, + saved_state_dict[k].shape, + ) # + raise KeyError + except: + # logger.info(traceback.format_exc()) + logger.info("%s is not in the checkpoint", k) # pretrain缺失的 + new_state_dict[k] = v # 模型自带的随机值 + if hasattr(model, "module"): + model.module.load_state_dict(new_state_dict, strict=False) + else: + model.load_state_dict(new_state_dict, strict=False) + return model + + go(combd, "combd") + model = go(sbd, "sbd") + ############# + logger.info("Loaded model weights") + + iteration = checkpoint_dict["iteration"] + learning_rate = checkpoint_dict["learning_rate"] + if ( + optimizer is not None and load_opt == 1 + ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch + # try: + optimizer.load_state_dict(checkpoint_dict["optimizer"]) + # except: + # traceback.print_exc() + logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration)) + return model, optimizer, learning_rate, iteration + + +# def load_checkpoint(checkpoint_path, model, optimizer=None): +# assert os.path.isfile(checkpoint_path) +# checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') +# iteration = checkpoint_dict['iteration'] +# learning_rate = checkpoint_dict['learning_rate'] +# if optimizer is not None: +# optimizer.load_state_dict(checkpoint_dict['optimizer']) +# # print(1111) +# saved_state_dict = checkpoint_dict['model'] +# # print(1111) +# +# if hasattr(model, 'module'): +# state_dict = model.module.state_dict() +# else: +# state_dict = model.state_dict() +# new_state_dict= {} +# for k, v in state_dict.items(): +# try: +# new_state_dict[k] = saved_state_dict[k] +# except: +# logger.info("%s is not in the checkpoint" % k) +# new_state_dict[k] = v +# if hasattr(model, 'module'): +# model.module.load_state_dict(new_state_dict) +# else: +# model.load_state_dict(new_state_dict) +# logger.info("Loaded checkpoint '{}' (epoch {})" .format( +# checkpoint_path, iteration)) +# return model, optimizer, learning_rate, iteration +def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1): + assert os.path.isfile(checkpoint_path) + checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") + + saved_state_dict = checkpoint_dict["model"] + if hasattr(model, "module"): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + new_state_dict = {} + for k, v in state_dict.items(): # 模型需要的shape + try: + new_state_dict[k] = saved_state_dict[k] + if saved_state_dict[k].shape != state_dict[k].shape: + logger.warn( + "shape-%s-mismatch|need-%s|get-%s", + k, + state_dict[k].shape, + saved_state_dict[k].shape, + ) # + raise KeyError + except: + # logger.info(traceback.format_exc()) + logger.info("%s is not in the checkpoint", k) # pretrain缺失的 + new_state_dict[k] = v # 模型自带的随机值 + if hasattr(model, "module"): + model.module.load_state_dict(new_state_dict, strict=False) + else: + model.load_state_dict(new_state_dict, strict=False) + logger.info("Loaded model weights") + + iteration = checkpoint_dict["iteration"] + learning_rate = checkpoint_dict["learning_rate"] + if ( + optimizer is not None and load_opt == 1 + ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch + # try: + optimizer.load_state_dict(checkpoint_dict["optimizer"]) + # except: + # traceback.print_exc() + logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration)) + return model, optimizer, learning_rate, iteration + + +def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): + logger.info( + "Saving model and optimizer state at epoch {} to {}".format( + iteration, checkpoint_path + ) + ) + if hasattr(model, "module"): + state_dict = model.module.state_dict() + else: + state_dict = model.state_dict() + torch.save( + { + "model": state_dict, + "iteration": iteration, + "optimizer": optimizer.state_dict(), + "learning_rate": learning_rate, + }, + checkpoint_path, + ) + + +def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path): + logger.info( + "Saving model and optimizer state at epoch {} to {}".format( + iteration, checkpoint_path + ) + ) + if hasattr(combd, "module"): + state_dict_combd = combd.module.state_dict() + else: + state_dict_combd = combd.state_dict() + if hasattr(sbd, "module"): + state_dict_sbd = sbd.module.state_dict() + else: + state_dict_sbd = sbd.state_dict() + torch.save( + { + "combd": state_dict_combd, + "sbd": state_dict_sbd, + "iteration": iteration, + "optimizer": optimizer.state_dict(), + "learning_rate": learning_rate, + }, + checkpoint_path, + ) + + +def summarize( + writer, + global_step, + scalars={}, + histograms={}, + images={}, + audios={}, + audio_sampling_rate=22050, +): + for k, v in scalars.items(): + writer.add_scalar(k, v, global_step) + for k, v in histograms.items(): + writer.add_histogram(k, v, global_step) + for k, v in images.items(): + writer.add_image(k, v, global_step, dataformats="HWC") + for k, v in audios.items(): + writer.add_audio(k, v, global_step, audio_sampling_rate) + + +def latest_checkpoint_path(dir_path, regex="G_*.pth"): + f_list = glob.glob(os.path.join(dir_path, regex)) + f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) + x = f_list[-1] + logger.debug(x) + return x + + +def plot_spectrogram_to_numpy(spectrogram): + global MATPLOTLIB_FLAG + if not MATPLOTLIB_FLAG: + import matplotlib + + matplotlib.use("Agg") + MATPLOTLIB_FLAG = True + mpl_logger = logging.getLogger("matplotlib") + mpl_logger.setLevel(logging.WARNING) + import matplotlib.pylab as plt + import numpy as np + + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") + plt.colorbar(im, ax=ax) + plt.xlabel("Frames") + plt.ylabel("Channels") + plt.tight_layout() + + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + plt.close() + return data + + +def plot_alignment_to_numpy(alignment, info=None): + global MATPLOTLIB_FLAG + if not MATPLOTLIB_FLAG: + import matplotlib + + matplotlib.use("Agg") + MATPLOTLIB_FLAG = True + mpl_logger = logging.getLogger("matplotlib") + mpl_logger.setLevel(logging.WARNING) + import matplotlib.pylab as plt + import numpy as np + + fig, ax = plt.subplots(figsize=(6, 4)) + im = ax.imshow( + alignment.transpose(), aspect="auto", origin="lower", interpolation="none" + ) + fig.colorbar(im, ax=ax) + xlabel = "Decoder timestep" + if info is not None: + xlabel += "\n\n" + info + plt.xlabel(xlabel) + plt.ylabel("Encoder timestep") + plt.tight_layout() + + fig.canvas.draw() + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + plt.close() + return data + + +def load_wav_to_torch(full_path): + sampling_rate, data = read(full_path) + return torch.FloatTensor(data.astype(np.float32)), sampling_rate + + +def load_filepaths_and_text(filename, split="|"): + with open(filename, encoding="utf-8") as f: + filepaths_and_text = [line.strip().split(split) for line in f] + return filepaths_and_text + + +def get_hparams(init=True): + """ + todo: + 结尾七人组: + 保存频率、总epoch done + bs done + pretrainG、pretrainD done + 卡号:os.en["CUDA_VISIBLE_DEVICES"] done + if_latest done + 模型:if_f0 done + 采样率:自动选择config done + 是否缓存数据集进GPU:if_cache_data_in_gpu done + + -m: + 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done + -c不要了 + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "-se", + "--save_every_epoch", + type=int, + required=True, + help="checkpoint save frequency (epoch)", + ) + parser.add_argument( + "-te", "--total_epoch", type=int, required=True, help="total_epoch" + ) + parser.add_argument( + "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path" + ) + parser.add_argument( + "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path" + ) + parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -") + parser.add_argument( + "-bs", "--batch_size", type=int, required=True, help="batch size" + ) + parser.add_argument( + "-e", "--experiment_dir", type=str, required=True, help="experiment dir" + ) # -m + parser.add_argument( + "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k" + ) + parser.add_argument( + "-sw", + "--save_every_weights", + type=str, + default="0", + help="save the extracted model in weights directory when saving checkpoints", + ) + parser.add_argument( + "-v", "--version", type=str, required=True, help="model version" + ) + parser.add_argument( + "-f0", + "--if_f0", + type=int, + required=True, + help="use f0 as one of the inputs of the model, 1 or 0", + ) + parser.add_argument( + "-l", + "--if_latest", + type=int, + required=True, + help="if only save the latest G/D pth file, 1 or 0", + ) + parser.add_argument( + "-c", + "--if_cache_data_in_gpu", + type=int, + required=True, + help="if caching the dataset in GPU memory, 1 or 0", + ) + + args = parser.parse_args() + name = args.experiment_dir + experiment_dir = os.path.join("./logs", args.experiment_dir) + config_save_path = os.path.join(experiment_dir, "config.json") + with open(config_save_path, "r") as f: + config = json.load(f) + hparams = HParams(**config) + hparams.model_dir = hparams.experiment_dir = experiment_dir + hparams.save_every_epoch = args.save_every_epoch + hparams.name = name + hparams.total_epoch = args.total_epoch + hparams.pretrainG = args.pretrainG + hparams.pretrainD = args.pretrainD + hparams.version = args.version + hparams.gpus = args.gpus + hparams.train.batch_size = args.batch_size + hparams.sample_rate = args.sample_rate + hparams.if_f0 = args.if_f0 + hparams.if_latest = args.if_latest + hparams.save_every_weights = args.save_every_weights + hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu + hparams.data.training_files = "%s/filelist.txt" % experiment_dir + return hparams + + +def get_hparams_from_dir(model_dir): + config_save_path = os.path.join(model_dir, "config.json") + with open(config_save_path, "r") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + hparams.model_dir = model_dir + return hparams + + +def get_hparams_from_file(config_path): + with open(config_path, "r") as f: + data = f.read() + config = json.loads(data) + + hparams = HParams(**config) + return hparams + + +def check_git_hash(model_dir): + source_dir = os.path.dirname(os.path.realpath(__file__)) + if not os.path.exists(os.path.join(source_dir, ".git")): + logger.warn( + "{} is not a git repository, therefore hash value comparison will be ignored.".format( + source_dir + ) + ) + return + + cur_hash = subprocess.getoutput("git rev-parse HEAD") + + path = os.path.join(model_dir, "githash") + if os.path.exists(path): + saved_hash = open(path).read() + if saved_hash != cur_hash: + logger.warn( + "git hash values are different. {}(saved) != {}(current)".format( + saved_hash[:8], cur_hash[:8] + ) + ) + else: + open(path, "w").write(cur_hash) + + +def get_logger(model_dir, filename="train.log"): + global logger + logger = logging.getLogger(os.path.basename(model_dir)) + logger.setLevel(logging.DEBUG) + + formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") + if not os.path.exists(model_dir): + os.makedirs(model_dir) + h = logging.FileHandler(os.path.join(model_dir, filename)) + h.setLevel(logging.DEBUG) + h.setFormatter(formatter) + logger.addHandler(h) + return logger + + +class HParams: + def __init__(self, **kwargs): + for k, v in kwargs.items(): + if type(v) == dict: + v = HParams(**v) + self[k] = v + + def keys(self): + return self.__dict__.keys() + + def items(self): + return self.__dict__.items() + + def values(self): + return self.__dict__.values() + + def __len__(self): + return len(self.__dict__) + + def __getitem__(self, key): + return getattr(self, key) + + def __setitem__(self, key, value): + return setattr(self, key, value) + + def __contains__(self, key): + return key in self.__dict__ + + def __repr__(self): + return self.__dict__.__repr__() diff --git a/lib/infer/infer_libs/uvr5_pack/__pycache__/mdx.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/__pycache__/mdx.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a253fd7435710d2c24a2d4f48be371eba3cb184 Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/__pycache__/mdx.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/__pycache__/utils.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..533bacea3228358d23d4dc1d591ad32c49e33e18 Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/__pycache__/utils.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/__init__.py b/lib/infer/infer_libs/uvr5_pack/demucs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4182e356427e1b05a79f8da641c70bb732514fa --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +__version__ = "2.0.3" diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/__main__.py b/lib/infer/infer_libs/uvr5_pack/demucs/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5148f20623bdaa827777558844796ded1876d7d0 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/__main__.py @@ -0,0 +1,317 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import json +import math +import os +import sys +import time +from dataclasses import dataclass, field + +import torch as th +from torch import distributed, nn +from torch.nn.parallel.distributed import DistributedDataParallel + +from .augment import FlipChannels, FlipSign, Remix, Scale, Shift +from .compressed import get_compressed_datasets +from .model import Demucs +from .parser import get_name, get_parser +from .raw import Rawset +from .repitch import RepitchedWrapper +from .pretrained import load_pretrained, SOURCES +from .tasnet import ConvTasNet +from .test import evaluate +from .train import train_model, validate_model +from .utils import (human_seconds, load_model, save_model, get_state, + save_state, sizeof_fmt, get_quantizer) +from .wav import get_wav_datasets, get_musdb_wav_datasets + + +@dataclass +class SavedState: + metrics: list = field(default_factory=list) + last_state: dict = None + best_state: dict = None + optimizer: dict = None + + +def main(): + parser = get_parser() + args = parser.parse_args() + name = get_name(parser, args) + print(f"Experiment {name}") + + if args.musdb is None and args.rank == 0: + print( + "You must provide the path to the MusDB dataset with the --musdb flag. " + "To download the MusDB dataset, see https://sigsep.github.io/datasets/musdb.html.", + file=sys.stderr) + sys.exit(1) + + eval_folder = args.evals / name + eval_folder.mkdir(exist_ok=True, parents=True) + args.logs.mkdir(exist_ok=True) + metrics_path = args.logs / f"{name}.json" + eval_folder.mkdir(exist_ok=True, parents=True) + args.checkpoints.mkdir(exist_ok=True, parents=True) + args.models.mkdir(exist_ok=True, parents=True) + + if args.device is None: + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + else: + device = args.device + + th.manual_seed(args.seed) + # Prevents too many threads to be started when running `museval` as it can be quite + # inefficient on NUMA architectures. + os.environ["OMP_NUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + + if args.world_size > 1: + if device != "cuda" and args.rank == 0: + print("Error: distributed training is only available with cuda device", file=sys.stderr) + sys.exit(1) + th.cuda.set_device(args.rank % th.cuda.device_count()) + distributed.init_process_group(backend="nccl", + init_method="tcp://" + args.master, + rank=args.rank, + world_size=args.world_size) + + checkpoint = args.checkpoints / f"{name}.th" + checkpoint_tmp = args.checkpoints / f"{name}.th.tmp" + if args.restart and checkpoint.exists() and args.rank == 0: + checkpoint.unlink() + + if args.test or args.test_pretrained: + args.epochs = 1 + args.repeat = 0 + if args.test: + model = load_model(args.models / args.test) + else: + model = load_pretrained(args.test_pretrained) + elif args.tasnet: + model = ConvTasNet(audio_channels=args.audio_channels, + samplerate=args.samplerate, X=args.X, + segment_length=4 * args.samples, + sources=SOURCES) + else: + model = Demucs( + audio_channels=args.audio_channels, + channels=args.channels, + context=args.context, + depth=args.depth, + glu=args.glu, + growth=args.growth, + kernel_size=args.kernel_size, + lstm_layers=args.lstm_layers, + rescale=args.rescale, + rewrite=args.rewrite, + stride=args.conv_stride, + resample=args.resample, + normalize=args.normalize, + samplerate=args.samplerate, + segment_length=4 * args.samples, + sources=SOURCES, + ) + model.to(device) + if args.init: + model.load_state_dict(load_pretrained(args.init).state_dict()) + + if args.show: + print(model) + size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters())) + print(f"Model size {size}") + return + + try: + saved = th.load(checkpoint, map_location='cpu') + except IOError: + saved = SavedState() + + optimizer = th.optim.Adam(model.parameters(), lr=args.lr) + + quantizer = None + quantizer = get_quantizer(model, args, optimizer) + + if saved.last_state is not None: + model.load_state_dict(saved.last_state, strict=False) + if saved.optimizer is not None: + optimizer.load_state_dict(saved.optimizer) + + model_name = f"{name}.th" + if args.save_model: + if args.rank == 0: + model.to("cpu") + model.load_state_dict(saved.best_state) + save_model(model, quantizer, args, args.models / model_name) + return + elif args.save_state: + model_name = f"{args.save_state}.th" + if args.rank == 0: + model.to("cpu") + model.load_state_dict(saved.best_state) + state = get_state(model, quantizer) + save_state(state, args.models / model_name) + return + + if args.rank == 0: + done = args.logs / f"{name}.done" + if done.exists(): + done.unlink() + + augment = [Shift(args.data_stride)] + if args.augment: + augment += [FlipSign(), FlipChannels(), Scale(), + Remix(group_size=args.remix_group_size)] + augment = nn.Sequential(*augment).to(device) + print("Agumentation pipeline:", augment) + + if args.mse: + criterion = nn.MSELoss() + else: + criterion = nn.L1Loss() + + # Setting number of samples so that all convolution windows are full. + # Prevents hard to debug mistake with the prediction being shifted compared + # to the input mixture. + samples = model.valid_length(args.samples) + print(f"Number of training samples adjusted to {samples}") + samples = samples + args.data_stride + if args.repitch: + # We need a bit more audio samples, to account for potential + # tempo change. + samples = math.ceil(samples / (1 - 0.01 * args.max_tempo)) + + args.metadata.mkdir(exist_ok=True, parents=True) + if args.raw: + train_set = Rawset(args.raw / "train", + samples=samples, + channels=args.audio_channels, + streams=range(1, len(model.sources) + 1), + stride=args.data_stride) + + valid_set = Rawset(args.raw / "valid", channels=args.audio_channels) + elif args.wav: + train_set, valid_set = get_wav_datasets(args, samples, model.sources) + elif args.is_wav: + train_set, valid_set = get_musdb_wav_datasets(args, samples, model.sources) + else: + train_set, valid_set = get_compressed_datasets(args, samples) + + if args.repitch: + train_set = RepitchedWrapper( + train_set, + proba=args.repitch, + max_tempo=args.max_tempo) + + best_loss = float("inf") + for epoch, metrics in enumerate(saved.metrics): + print(f"Epoch {epoch:03d}: " + f"train={metrics['train']:.8f} " + f"valid={metrics['valid']:.8f} " + f"best={metrics['best']:.4f} " + f"ms={metrics.get('true_model_size', 0):.2f}MB " + f"cms={metrics.get('compressed_model_size', 0):.2f}MB " + f"duration={human_seconds(metrics['duration'])}") + best_loss = metrics['best'] + + if args.world_size > 1: + dmodel = DistributedDataParallel(model, + device_ids=[th.cuda.current_device()], + output_device=th.cuda.current_device()) + else: + dmodel = model + + for epoch in range(len(saved.metrics), args.epochs): + begin = time.time() + model.train() + train_loss, model_size = train_model( + epoch, train_set, dmodel, criterion, optimizer, augment, + quantizer=quantizer, + batch_size=args.batch_size, + device=device, + repeat=args.repeat, + seed=args.seed, + diffq=args.diffq, + workers=args.workers, + world_size=args.world_size) + model.eval() + valid_loss = validate_model( + epoch, valid_set, model, criterion, + device=device, + rank=args.rank, + split=args.split_valid, + overlap=args.overlap, + world_size=args.world_size) + + ms = 0 + cms = 0 + if quantizer and args.rank == 0: + ms = quantizer.true_model_size() + cms = quantizer.compressed_model_size(num_workers=min(40, args.world_size * 10)) + + duration = time.time() - begin + if valid_loss < best_loss and ms <= args.ms_target: + best_loss = valid_loss + saved.best_state = { + key: value.to("cpu").clone() + for key, value in model.state_dict().items() + } + + saved.metrics.append({ + "train": train_loss, + "valid": valid_loss, + "best": best_loss, + "duration": duration, + "model_size": model_size, + "true_model_size": ms, + "compressed_model_size": cms, + }) + if args.rank == 0: + json.dump(saved.metrics, open(metrics_path, "w")) + + saved.last_state = model.state_dict() + saved.optimizer = optimizer.state_dict() + if args.rank == 0 and not args.test: + th.save(saved, checkpoint_tmp) + checkpoint_tmp.rename(checkpoint) + + print(f"Epoch {epoch:03d}: " + f"train={train_loss:.8f} valid={valid_loss:.8f} best={best_loss:.4f} ms={ms:.2f}MB " + f"cms={cms:.2f}MB " + f"duration={human_seconds(duration)}") + + if args.world_size > 1: + distributed.barrier() + + del dmodel + model.load_state_dict(saved.best_state) + if args.eval_cpu: + device = "cpu" + model.to(device) + model.eval() + evaluate(model, args.musdb, eval_folder, + is_wav=args.is_wav, + rank=args.rank, + world_size=args.world_size, + device=device, + save=args.save, + split=args.split_valid, + shifts=args.shifts, + overlap=args.overlap, + workers=args.eval_workers) + model.to("cpu") + if args.rank == 0: + if not (args.test or args.test_pretrained): + save_model(model, quantizer, args, args.models / model_name) + print("done") + done.write_text("done") + + +if __name__ == "__main__": + main() diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/audio.py b/lib/infer/infer_libs/uvr5_pack/demucs/audio.py new file mode 100644 index 0000000000000000000000000000000000000000..b29f156e4afb5fbda32c35777022caeadf50d711 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/audio.py @@ -0,0 +1,172 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +import json +import subprocess as sp +from pathlib import Path + +import julius +import numpy as np +import torch + +from .utils import temp_filenames + + +def _read_info(path): + stdout_data = sp.check_output([ + 'ffprobe', "-loglevel", "panic", + str(path), '-print_format', 'json', '-show_format', '-show_streams' + ]) + return json.loads(stdout_data.decode('utf-8')) + + +class AudioFile: + """ + Allows to read audio from any format supported by ffmpeg, as well as resampling or + converting to mono on the fly. See :method:`read` for more details. + """ + def __init__(self, path: Path): + self.path = Path(path) + self._info = None + + def __repr__(self): + features = [("path", self.path)] + features.append(("samplerate", self.samplerate())) + features.append(("channels", self.channels())) + features.append(("streams", len(self))) + features_str = ", ".join(f"{name}={value}" for name, value in features) + return f"AudioFile({features_str})" + + @property + def info(self): + if self._info is None: + self._info = _read_info(self.path) + return self._info + + @property + def duration(self): + return float(self.info['format']['duration']) + + @property + def _audio_streams(self): + return [ + index for index, stream in enumerate(self.info["streams"]) + if stream["codec_type"] == "audio" + ] + + def __len__(self): + return len(self._audio_streams) + + def channels(self, stream=0): + return int(self.info['streams'][self._audio_streams[stream]]['channels']) + + def samplerate(self, stream=0): + return int(self.info['streams'][self._audio_streams[stream]]['sample_rate']) + + def read(self, + seek_time=None, + duration=None, + streams=slice(None), + samplerate=None, + channels=None, + temp_folder=None): + """ + Slightly more efficient implementation than stempeg, + in particular, this will extract all stems at once + rather than having to loop over one file multiple times + for each stream. + + Args: + seek_time (float): seek time in seconds or None if no seeking is needed. + duration (float): duration in seconds to extract or None to extract until the end. + streams (slice, int or list): streams to extract, can be a single int, a list or + a slice. If it is a slice or list, the output will be of size [S, C, T] + with S the number of streams, C the number of channels and T the number of samples. + If it is an int, the output will be [C, T]. + samplerate (int): if provided, will resample on the fly. If None, no resampling will + be done. Original sampling rate can be obtained with :method:`samplerate`. + channels (int): if 1, will convert to mono. We do not rely on ffmpeg for that + as ffmpeg automatically scale by +3dB to conserve volume when playing on speakers. + See https://sound.stackexchange.com/a/42710. + Our definition of mono is simply the average of the two channels. Any other + value will be ignored. + temp_folder (str or Path or None): temporary folder to use for decoding. + + + """ + streams = np.array(range(len(self)))[streams] + single = not isinstance(streams, np.ndarray) + if single: + streams = [streams] + + if duration is None: + target_size = None + query_duration = None + else: + target_size = int((samplerate or self.samplerate()) * duration) + query_duration = float((target_size + 1) / (samplerate or self.samplerate())) + + with temp_filenames(len(streams)) as filenames: + command = ['ffmpeg', '-y'] + command += ['-loglevel', 'panic'] + if seek_time: + command += ['-ss', str(seek_time)] + command += ['-i', str(self.path)] + for stream, filename in zip(streams, filenames): + command += ['-map', f'0:{self._audio_streams[stream]}'] + if query_duration is not None: + command += ['-t', str(query_duration)] + command += ['-threads', '1'] + command += ['-f', 'f32le'] + if samplerate is not None: + command += ['-ar', str(samplerate)] + command += [filename] + + sp.run(command, check=True) + wavs = [] + for filename in filenames: + wav = np.fromfile(filename, dtype=np.float32) + wav = torch.from_numpy(wav) + wav = wav.view(-1, self.channels()).t() + if channels is not None: + wav = convert_audio_channels(wav, channels) + if target_size is not None: + wav = wav[..., :target_size] + wavs.append(wav) + wav = torch.stack(wavs, dim=0) + if single: + wav = wav[0] + return wav + + +def convert_audio_channels(wav, channels=2): + """Convert audio to the given number of channels.""" + *shape, src_channels, length = wav.shape + if src_channels == channels: + pass + elif channels == 1: + # Case 1: + # The caller asked 1-channel audio, but the stream have multiple + # channels, downmix all channels. + wav = wav.mean(dim=-2, keepdim=True) + elif src_channels == 1: + # Case 2: + # The caller asked for multiple channels, but the input file have + # one single channel, replicate the audio over all channels. + wav = wav.expand(*shape, channels, length) + elif src_channels >= channels: + # Case 3: + # The caller asked for multiple channels, and the input file have + # more channels than requested. In that case return the first channels. + wav = wav[..., :channels, :] + else: + # Case 4: What is a reasonable choice here? + raise ValueError('The audio file has less channels than requested but is not mono.') + return wav + + +def convert_audio(wav, from_samplerate, to_samplerate, channels): + wav = convert_audio_channels(wav, channels) + return julius.resample_frac(wav, from_samplerate, to_samplerate) diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/augment.py b/lib/infer/infer_libs/uvr5_pack/demucs/augment.py new file mode 100644 index 0000000000000000000000000000000000000000..bb36d3298d89470f306316322e7587187819c94b --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/augment.py @@ -0,0 +1,106 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import random +import torch as th +from torch import nn + + +class Shift(nn.Module): + """ + Randomly shift audio in time by up to `shift` samples. + """ + def __init__(self, shift=8192): + super().__init__() + self.shift = shift + + def forward(self, wav): + batch, sources, channels, time = wav.size() + length = time - self.shift + if self.shift > 0: + if not self.training: + wav = wav[..., :length] + else: + offsets = th.randint(self.shift, [batch, sources, 1, 1], device=wav.device) + offsets = offsets.expand(-1, -1, channels, -1) + indexes = th.arange(length, device=wav.device) + wav = wav.gather(3, indexes + offsets) + return wav + + +class FlipChannels(nn.Module): + """ + Flip left-right channels. + """ + def forward(self, wav): + batch, sources, channels, time = wav.size() + if self.training and wav.size(2) == 2: + left = th.randint(2, (batch, sources, 1, 1), device=wav.device) + left = left.expand(-1, -1, -1, time) + right = 1 - left + wav = th.cat([wav.gather(2, left), wav.gather(2, right)], dim=2) + return wav + + +class FlipSign(nn.Module): + """ + Random sign flip. + """ + def forward(self, wav): + batch, sources, channels, time = wav.size() + if self.training: + signs = th.randint(2, (batch, sources, 1, 1), device=wav.device, dtype=th.float32) + wav = wav * (2 * signs - 1) + return wav + + +class Remix(nn.Module): + """ + Shuffle sources to make new mixes. + """ + def __init__(self, group_size=4): + """ + Shuffle sources within one batch. + Each batch is divided into groups of size `group_size` and shuffling is done within + each group separatly. This allow to keep the same probability distribution no matter + the number of GPUs. Without this grouping, using more GPUs would lead to a higher + probability of keeping two sources from the same track together which can impact + performance. + """ + super().__init__() + self.group_size = group_size + + def forward(self, wav): + batch, streams, channels, time = wav.size() + device = wav.device + + if self.training: + group_size = self.group_size or batch + if batch % group_size != 0: + raise ValueError(f"Batch size {batch} must be divisible by group size {group_size}") + groups = batch // group_size + wav = wav.view(groups, group_size, streams, channels, time) + permutations = th.argsort(th.rand(groups, group_size, streams, 1, 1, device=device), + dim=1) + wav = wav.gather(1, permutations.expand(-1, -1, -1, channels, time)) + wav = wav.view(batch, streams, channels, time) + return wav + + +class Scale(nn.Module): + def __init__(self, proba=1., min=0.25, max=1.25): + super().__init__() + self.proba = proba + self.min = min + self.max = max + + def forward(self, wav): + batch, streams, channels, time = wav.size() + device = wav.device + if self.training and random.random() < self.proba: + scales = th.empty(batch, streams, 1, 1, device=device).uniform_(self.min, self.max) + wav *= scales + return wav diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/compressed.py b/lib/infer/infer_libs/uvr5_pack/demucs/compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..eb8fbb75463ba71ca86729b22baebf24598ade57 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/compressed.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import json +from fractions import Fraction +from concurrent import futures + +import musdb +from torch import distributed + +from .audio import AudioFile + + +def get_musdb_tracks(root, *args, **kwargs): + mus = musdb.DB(root, *args, **kwargs) + return {track.name: track.path for track in mus} + + +class StemsSet: + def __init__(self, tracks, metadata, duration=None, stride=1, + samplerate=44100, channels=2, streams=slice(None)): + + self.metadata = [] + for name, path in tracks.items(): + meta = dict(metadata[name]) + meta["path"] = path + meta["name"] = name + self.metadata.append(meta) + if duration is not None and meta["duration"] < duration: + raise ValueError(f"Track {name} duration is too small {meta['duration']}") + self.metadata.sort(key=lambda x: x["name"]) + self.duration = duration + self.stride = stride + self.channels = channels + self.samplerate = samplerate + self.streams = streams + + def __len__(self): + return sum(self._examples_count(m) for m in self.metadata) + + def _examples_count(self, meta): + if self.duration is None: + return 1 + else: + return int((meta["duration"] - self.duration) // self.stride + 1) + + def track_metadata(self, index): + for meta in self.metadata: + examples = self._examples_count(meta) + if index >= examples: + index -= examples + continue + return meta + + def __getitem__(self, index): + for meta in self.metadata: + examples = self._examples_count(meta) + if index >= examples: + index -= examples + continue + streams = AudioFile(meta["path"]).read(seek_time=index * self.stride, + duration=self.duration, + channels=self.channels, + samplerate=self.samplerate, + streams=self.streams) + return (streams - meta["mean"]) / meta["std"] + + +def _get_track_metadata(path): + # use mono at 44kHz as reference. For any other settings data won't be perfectly + # normalized but it should be good enough. + audio = AudioFile(path) + mix = audio.read(streams=0, channels=1, samplerate=44100) + return {"duration": audio.duration, "std": mix.std().item(), "mean": mix.mean().item()} + + +def _build_metadata(tracks, workers=10): + pendings = [] + with futures.ProcessPoolExecutor(workers) as pool: + for name, path in tracks.items(): + pendings.append((name, pool.submit(_get_track_metadata, path))) + return {name: p.result() for name, p in pendings} + + +def _build_musdb_metadata(path, musdb, workers): + tracks = get_musdb_tracks(musdb) + metadata = _build_metadata(tracks, workers) + path.parent.mkdir(exist_ok=True, parents=True) + json.dump(metadata, open(path, "w")) + + +def get_compressed_datasets(args, samples): + metadata_file = args.metadata / "musdb.json" + if not metadata_file.is_file() and args.rank == 0: + _build_musdb_metadata(metadata_file, args.musdb, args.workers) + if args.world_size > 1: + distributed.barrier() + metadata = json.load(open(metadata_file)) + duration = Fraction(samples, args.samplerate) + stride = Fraction(args.data_stride, args.samplerate) + train_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"), + metadata, + duration=duration, + stride=stride, + streams=slice(1, None), + samplerate=args.samplerate, + channels=args.audio_channels) + valid_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="valid"), + metadata, + samplerate=args.samplerate, + channels=args.audio_channels) + return train_set, valid_set diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/model.py b/lib/infer/infer_libs/uvr5_pack/demucs/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e9d932f4d014f7b95b394d2e24ed5edc379ded8d --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/model.py @@ -0,0 +1,202 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import julius +from torch import nn + +from .utils import capture_init, center_trim + + +class BLSTM(nn.Module): + def __init__(self, dim, layers=1): + super().__init__() + self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) + self.linear = nn.Linear(2 * dim, dim) + + def forward(self, x): + x = x.permute(2, 0, 1) + x = self.lstm(x)[0] + x = self.linear(x) + x = x.permute(1, 2, 0) + return x + + +def rescale_conv(conv, reference): + std = conv.weight.std().detach() + scale = (std / reference)**0.5 + conv.weight.data /= scale + if conv.bias is not None: + conv.bias.data /= scale + + +def rescale_module(module, reference): + for sub in module.modules(): + if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): + rescale_conv(sub, reference) + + +class Demucs(nn.Module): + @capture_init + def __init__(self, + sources, + audio_channels=2, + channels=64, + depth=6, + rewrite=True, + glu=True, + rescale=0.1, + resample=True, + kernel_size=8, + stride=4, + growth=2., + lstm_layers=2, + context=3, + normalize=False, + samplerate=44100, + segment_length=4 * 10 * 44100): + """ + Args: + sources (list[str]): list of source names + audio_channels (int): stereo or mono + channels (int): first convolution channels + depth (int): number of encoder/decoder layers + rewrite (bool): add 1x1 convolution to each encoder layer + and a convolution to each decoder layer. + For the decoder layer, `context` gives the kernel size. + glu (bool): use glu instead of ReLU + resample_input (bool): upsample x2 the input and downsample /2 the output. + rescale (int): rescale initial weights of convolutions + to get their standard deviation closer to `rescale` + kernel_size (int): kernel size for convolutions + stride (int): stride for convolutions + growth (float): multiply (resp divide) number of channels by that + for each layer of the encoder (resp decoder) + lstm_layers (int): number of lstm layers, 0 = no lstm + context (int): kernel size of the convolution in the + decoder before the transposed convolution. If > 1, + will provide some context from neighboring time + steps. + samplerate (int): stored as meta information for easing + future evaluations of the model. + segment_length (int): stored as meta information for easing + future evaluations of the model. Length of the segments on which + the model was trained. + """ + + super().__init__() + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.resample = resample + self.channels = channels + self.normalize = normalize + self.samplerate = samplerate + self.segment_length = segment_length + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + if glu: + activation = nn.GLU(dim=1) + ch_scale = 2 + else: + activation = nn.ReLU() + ch_scale = 1 + in_channels = audio_channels + for index in range(depth): + encode = [] + encode += [nn.Conv1d(in_channels, channels, kernel_size, stride), nn.ReLU()] + if rewrite: + encode += [nn.Conv1d(channels, ch_scale * channels, 1), activation] + self.encoder.append(nn.Sequential(*encode)) + + decode = [] + if index > 0: + out_channels = in_channels + else: + out_channels = len(self.sources) * audio_channels + if rewrite: + decode += [nn.Conv1d(channels, ch_scale * channels, context), activation] + decode += [nn.ConvTranspose1d(channels, out_channels, kernel_size, stride)] + if index > 0: + decode.append(nn.ReLU()) + self.decoder.insert(0, nn.Sequential(*decode)) + in_channels = channels + channels = int(growth * channels) + + channels = in_channels + + if lstm_layers: + self.lstm = BLSTM(channels, lstm_layers) + else: + self.lstm = None + + if rescale: + rescale_module(self, reference=rescale) + + def valid_length(self, length): + """ + Return the nearest valid length to use with the model so that + there is no time steps left over in a convolutions, e.g. for all + layers, size of the input - kernel_size % stride = 0. + + If the mixture has a valid length, the estimated sources + will have exactly the same length when context = 1. If context > 1, + the two signals can be center trimmed to match. + + For training, extracts should have a valid length.For evaluation + on full tracks we recommend passing `pad = True` to :method:`forward`. + """ + if self.resample: + length *= 2 + for _ in range(self.depth): + length = math.ceil((length - self.kernel_size) / self.stride) + 1 + length = max(1, length) + length += self.context - 1 + for _ in range(self.depth): + length = (length - 1) * self.stride + self.kernel_size + + if self.resample: + length = math.ceil(length / 2) + return int(length) + + def forward(self, mix): + x = mix + + if self.normalize: + mono = mix.mean(dim=1, keepdim=True) + mean = mono.mean(dim=-1, keepdim=True) + std = mono.std(dim=-1, keepdim=True) + else: + mean = 0 + std = 1 + + x = (x - mean) / (1e-5 + std) + + if self.resample: + x = julius.resample_frac(x, 1, 2) + + saved = [] + for encode in self.encoder: + x = encode(x) + saved.append(x) + if self.lstm: + x = self.lstm(x) + for decode in self.decoder: + skip = center_trim(saved.pop(-1), x) + x = x + skip + x = decode(x) + + if self.resample: + x = julius.resample_frac(x, 2, 1) + x = x * std + mean + x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1)) + return x diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/parser.py b/lib/infer/infer_libs/uvr5_pack/demucs/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8a19cf976e3c6dfe411da64b8dce3e9a4548e0 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/parser.py @@ -0,0 +1,244 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +from pathlib import Path + + +def get_parser(): + parser = argparse.ArgumentParser("demucs", description="Train and evaluate Demucs.") + default_raw = None + default_musdb = None + if 'DEMUCS_RAW' in os.environ: + default_raw = Path(os.environ['DEMUCS_RAW']) + if 'DEMUCS_MUSDB' in os.environ: + default_musdb = Path(os.environ['DEMUCS_MUSDB']) + parser.add_argument( + "--raw", + type=Path, + default=default_raw, + help="Path to raw audio, can be faster, see python3 -m demucs.raw to extract.") + parser.add_argument("--no_raw", action="store_const", const=None, dest="raw") + parser.add_argument("-m", + "--musdb", + type=Path, + default=default_musdb, + help="Path to musdb root") + parser.add_argument("--is_wav", action="store_true", + help="Indicate that the MusDB dataset is in wav format (i.e. MusDB-HQ).") + parser.add_argument("--metadata", type=Path, default=Path("metadata/"), + help="Folder where metadata information is stored.") + parser.add_argument("--wav", type=Path, + help="Path to a wav dataset. This should contain a 'train' and a 'valid' " + "subfolder.") + parser.add_argument("--samplerate", type=int, default=44100) + parser.add_argument("--audio_channels", type=int, default=2) + parser.add_argument("--samples", + default=44100 * 10, + type=int, + help="number of samples to feed in") + parser.add_argument("--data_stride", + default=44100, + type=int, + help="Stride for chunks, shorter = longer epochs") + parser.add_argument("-w", "--workers", default=10, type=int, help="Loader workers") + parser.add_argument("--eval_workers", default=2, type=int, help="Final evaluation workers") + parser.add_argument("-d", + "--device", + help="Device to train on, default is cuda if available else cpu") + parser.add_argument("--eval_cpu", action="store_true", help="Eval on test will be run on cpu.") + parser.add_argument("--dummy", help="Dummy parameter, useful to create a new checkpoint file") + parser.add_argument("--test", help="Just run the test pipeline + one validation. " + "This should be a filename relative to the models/ folder.") + parser.add_argument("--test_pretrained", help="Just run the test pipeline + one validation, " + "on a pretrained model. ") + + parser.add_argument("--rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument("--master") + + parser.add_argument("--checkpoints", + type=Path, + default=Path("checkpoints"), + help="Folder where to store checkpoints etc") + parser.add_argument("--evals", + type=Path, + default=Path("evals"), + help="Folder where to store evals and waveforms") + parser.add_argument("--save", + action="store_true", + help="Save estimated for the test set waveforms") + parser.add_argument("--logs", + type=Path, + default=Path("logs"), + help="Folder where to store logs") + parser.add_argument("--models", + type=Path, + default=Path("models"), + help="Folder where to store trained models") + parser.add_argument("-R", + "--restart", + action='store_true', + help='Restart training, ignoring previous run') + + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("-e", "--epochs", type=int, default=180, help="Number of epochs") + parser.add_argument("-r", + "--repeat", + type=int, + default=2, + help="Repeat the train set, longer epochs") + parser.add_argument("-b", "--batch_size", type=int, default=64) + parser.add_argument("--lr", type=float, default=3e-4) + parser.add_argument("--mse", action="store_true", help="Use MSE instead of L1") + parser.add_argument("--init", help="Initialize from a pre-trained model.") + + # Augmentation options + parser.add_argument("--no_augment", + action="store_false", + dest="augment", + default=True, + help="No basic data augmentation.") + parser.add_argument("--repitch", type=float, default=0.2, + help="Probability to do tempo/pitch change") + parser.add_argument("--max_tempo", type=float, default=12, + help="Maximum relative tempo change in %% when using repitch.") + + parser.add_argument("--remix_group_size", + type=int, + default=4, + help="Shuffle sources using group of this size. Useful to somewhat " + "replicate multi-gpu training " + "on less GPUs.") + parser.add_argument("--shifts", + type=int, + default=10, + help="Number of random shifts used for the shift trick.") + parser.add_argument("--overlap", + type=float, + default=0.25, + help="Overlap when --split_valid is passed.") + + # See model.py for doc + parser.add_argument("--growth", + type=float, + default=2., + help="Number of channels between two layers will increase by this factor") + parser.add_argument("--depth", + type=int, + default=6, + help="Number of layers for the encoder and decoder") + parser.add_argument("--lstm_layers", type=int, default=2, help="Number of layers for the LSTM") + parser.add_argument("--channels", + type=int, + default=64, + help="Number of channels for the first encoder layer") + parser.add_argument("--kernel_size", + type=int, + default=8, + help="Kernel size for the (transposed) convolutions") + parser.add_argument("--conv_stride", + type=int, + default=4, + help="Stride for the (transposed) convolutions") + parser.add_argument("--context", + type=int, + default=3, + help="Context size for the decoder convolutions " + "before the transposed convolutions") + parser.add_argument("--rescale", + type=float, + default=0.1, + help="Initial weight rescale reference") + parser.add_argument("--no_resample", action="store_false", + default=True, dest="resample", + help="No Resampling of the input/output x2") + parser.add_argument("--no_glu", + action="store_false", + default=True, + dest="glu", + help="Replace all GLUs by ReLUs") + parser.add_argument("--no_rewrite", + action="store_false", + default=True, + dest="rewrite", + help="No 1x1 rewrite convolutions") + parser.add_argument("--normalize", action="store_true") + parser.add_argument("--no_norm_wav", action="store_false", dest='norm_wav', default=True) + + # Tasnet options + parser.add_argument("--tasnet", action="store_true") + parser.add_argument("--split_valid", + action="store_true", + help="Predict chunks by chunks for valid and test. Required for tasnet") + parser.add_argument("--X", type=int, default=8) + + # Other options + parser.add_argument("--show", + action="store_true", + help="Show model architecture, size and exit") + parser.add_argument("--save_model", action="store_true", + help="Skip traning, just save final model " + "for the current checkpoint value.") + parser.add_argument("--save_state", + help="Skip training, just save state " + "for the current checkpoint value. You should " + "provide a model name as argument.") + + # Quantization options + parser.add_argument("--q-min-size", type=float, default=1, + help="Only quantize layers over this size (in MB)") + parser.add_argument( + "--qat", type=int, help="If provided, use QAT training with that many bits.") + + parser.add_argument("--diffq", type=float, default=0) + parser.add_argument( + "--ms-target", type=float, default=162, + help="Model size target in MB, when using DiffQ. Best model will be kept " + "only if it is smaller than this target.") + + return parser + + +def get_name(parser, args): + """ + Return the name of an experiment given the args. Some parameters are ignored, + for instance --workers, as they do not impact the final result. + """ + ignore_args = set([ + "checkpoints", + "deterministic", + "eval", + "evals", + "eval_cpu", + "eval_workers", + "logs", + "master", + "rank", + "restart", + "save", + "save_model", + "save_state", + "show", + "workers", + "world_size", + ]) + parts = [] + name_args = dict(args.__dict__) + for name, value in name_args.items(): + if name in ignore_args: + continue + if value != parser.get_default(name): + if isinstance(value, Path): + parts.append(f"{name}={value.name}") + else: + parts.append(f"{name}={value}") + if parts: + name = " ".join(parts) + else: + name = "default" + return name diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/pretrained.py b/lib/infer/infer_libs/uvr5_pack/demucs/pretrained.py new file mode 100644 index 0000000000000000000000000000000000000000..6aac5db100cc7a9084af96d2cd083f0c8fac473c --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/pretrained.py @@ -0,0 +1,107 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# author: adefossez + +import logging + +from diffq import DiffQuantizer +import torch.hub + +from .model import Demucs +from .tasnet import ConvTasNet +from .utils import set_state + +logger = logging.getLogger(__name__) +ROOT = "https://dl.fbaipublicfiles.com/demucs/v3.0/" + +PRETRAINED_MODELS = { + 'demucs': 'e07c671f', + 'demucs48_hq': '28a1282c', + 'demucs_extra': '3646af93', + 'demucs_quantized': '07afea75', + 'tasnet': 'beb46fac', + 'tasnet_extra': 'df3777b2', + 'demucs_unittest': '09ebc15f', +} + +SOURCES = ["drums", "bass", "other", "vocals"] + + +def get_url(name): + sig = PRETRAINED_MODELS[name] + return ROOT + name + "-" + sig[:8] + ".th" + + +def is_pretrained(name): + return name in PRETRAINED_MODELS + + +def load_pretrained(name): + if name == "demucs": + return demucs(pretrained=True) + elif name == "demucs48_hq": + return demucs(pretrained=True, hq=True, channels=48) + elif name == "demucs_extra": + return demucs(pretrained=True, extra=True) + elif name == "demucs_quantized": + return demucs(pretrained=True, quantized=True) + elif name == "demucs_unittest": + return demucs_unittest(pretrained=True) + elif name == "tasnet": + return tasnet(pretrained=True) + elif name == "tasnet_extra": + return tasnet(pretrained=True, extra=True) + else: + raise ValueError(f"Invalid pretrained name {name}") + + +def _load_state(name, model, quantizer=None): + url = get_url(name) + state = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True) + set_state(model, quantizer, state) + if quantizer: + quantizer.detach() + + +def demucs_unittest(pretrained=True): + model = Demucs(channels=4, sources=SOURCES) + if pretrained: + _load_state('demucs_unittest', model) + return model + + +def demucs(pretrained=True, extra=False, quantized=False, hq=False, channels=64): + if not pretrained and (extra or quantized or hq): + raise ValueError("if extra or quantized is True, pretrained must be True.") + model = Demucs(sources=SOURCES, channels=channels) + if pretrained: + name = 'demucs' + if channels != 64: + name += str(channels) + quantizer = None + if sum([extra, quantized, hq]) > 1: + raise ValueError("Only one of extra, quantized, hq, can be True.") + if quantized: + quantizer = DiffQuantizer(model, group_size=8, min_size=1) + name += '_quantized' + if extra: + name += '_extra' + if hq: + name += '_hq' + _load_state(name, model, quantizer) + return model + + +def tasnet(pretrained=True, extra=False): + if not pretrained and extra: + raise ValueError("if extra is True, pretrained must be True.") + model = ConvTasNet(X=10, sources=SOURCES) + if pretrained: + name = 'tasnet' + if extra: + name = 'tasnet_extra' + _load_state(name, model) + return model diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/raw.py b/lib/infer/infer_libs/uvr5_pack/demucs/raw.py new file mode 100644 index 0000000000000000000000000000000000000000..d4941ad2d7ed858f490db441f5b46b12bd61ad78 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/raw.py @@ -0,0 +1,173 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +from collections import defaultdict, namedtuple +from pathlib import Path + +import musdb +import numpy as np +import torch as th +import tqdm +from torch.utils.data import DataLoader + +from .audio import AudioFile + +ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"]) + + +class Rawset: + """ + Dataset of raw, normalized, float32 audio files + """ + def __init__(self, path, samples=None, stride=None, channels=2, streams=None): + self.path = Path(path) + self.channels = channels + self.samples = samples + if stride is None: + stride = samples if samples is not None else 0 + self.stride = stride + entries = defaultdict(list) + for root, folders, files in os.walk(self.path, followlinks=True): + folders.sort() + files.sort() + for file in files: + if file.endswith(".raw"): + path = Path(root) / file + name, stream = path.stem.rsplit('.', 1) + entries[(path.parent.relative_to(self.path), name)].append(int(stream)) + + self._entries = list(entries.keys()) + + sizes = [] + self._lengths = [] + ref_streams = sorted(entries[self._entries[0]]) + assert ref_streams == list(range(len(ref_streams))) + if streams is None: + self.streams = ref_streams + else: + self.streams = streams + for entry in sorted(entries.keys()): + streams = entries[entry] + assert sorted(streams) == ref_streams + file = self._path(*entry) + length = file.stat().st_size // (4 * channels) + if samples is None: + sizes.append(1) + else: + if length < samples: + self._entries.remove(entry) + continue + sizes.append((length - samples) // stride + 1) + self._lengths.append(length) + if not sizes: + raise ValueError(f"Empty dataset {self.path}") + self._cumulative_sizes = np.cumsum(sizes) + self._sizes = sizes + + def __len__(self): + return self._cumulative_sizes[-1] + + @property + def total_length(self): + return sum(self._lengths) + + def chunk_info(self, index): + file_index = np.searchsorted(self._cumulative_sizes, index, side='right') + if file_index == 0: + local_index = index + else: + local_index = index - self._cumulative_sizes[file_index - 1] + return ChunkInfo(offset=local_index * self.stride, + file_index=file_index, + local_index=local_index) + + def _path(self, folder, name, stream=0): + return self.path / folder / (name + f'.{stream}.raw') + + def __getitem__(self, index): + chunk = self.chunk_info(index) + entry = self._entries[chunk.file_index] + + length = self.samples or self._lengths[chunk.file_index] + streams = [] + to_read = length * self.channels * 4 + for stream_index, stream in enumerate(self.streams): + offset = chunk.offset * 4 * self.channels + file = open(self._path(*entry, stream=stream), 'rb') + file.seek(offset) + content = file.read(to_read) + assert len(content) == to_read + content = np.frombuffer(content, dtype=np.float32) + content = content.copy() # make writable + streams.append(th.from_numpy(content).view(length, self.channels).t()) + return th.stack(streams, dim=0) + + def name(self, index): + chunk = self.chunk_info(index) + folder, name = self._entries[chunk.file_index] + return folder / name + + +class MusDBSet: + def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2): + self.mus = mus + self.streams = streams + self.samplerate = samplerate + self.channels = channels + + def __len__(self): + return len(self.mus.tracks) + + def __getitem__(self, index): + track = self.mus.tracks[index] + return (track.name, AudioFile(track.path).read(channels=self.channels, + seek_time=0, + streams=self.streams, + samplerate=self.samplerate)) + + +def build_raw(mus, destination, normalize, workers, samplerate, channels): + destination.mkdir(parents=True, exist_ok=True) + loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate), + batch_size=1, + num_workers=workers, + collate_fn=lambda x: x[0]) + for name, streams in tqdm.tqdm(loader): + if normalize: + ref = streams[0].mean(dim=0) # use mono mixture as reference + streams = (streams - ref.mean()) / ref.std() + for index, stream in enumerate(streams): + open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes()) + + +def main(): + parser = argparse.ArgumentParser('rawset') + parser.add_argument('--workers', type=int, default=10) + parser.add_argument('--samplerate', type=int, default=44100) + parser.add_argument('--channels', type=int, default=2) + parser.add_argument('musdb', type=Path) + parser.add_argument('destination', type=Path) + + args = parser.parse_args() + + build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"), + args.destination / "train", + normalize=True, + channels=args.channels, + samplerate=args.samplerate, + workers=args.workers) + build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"), + args.destination / "valid", + normalize=True, + samplerate=args.samplerate, + channels=args.channels, + workers=args.workers) + + +if __name__ == "__main__": + main() diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/repitch.py b/lib/infer/infer_libs/uvr5_pack/demucs/repitch.py new file mode 100644 index 0000000000000000000000000000000000000000..8846ab2d951a024c95067f66a113968500442828 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/repitch.py @@ -0,0 +1,96 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import io +import random +import subprocess as sp +import tempfile + +import numpy as np +import torch +from scipy.io import wavfile + + +def i16_pcm(wav): + if wav.dtype == np.int16: + return wav + return (wav * 2**15).clamp_(-2**15, 2**15 - 1).short() + + +def f32_pcm(wav): + if wav.dtype == np.float: + return wav + return wav.float() / 2**15 + + +class RepitchedWrapper: + """ + Wrap a dataset to apply online change of pitch / tempo. + """ + def __init__(self, dataset, proba=0.2, max_pitch=2, max_tempo=12, tempo_std=5, vocals=[3]): + self.dataset = dataset + self.proba = proba + self.max_pitch = max_pitch + self.max_tempo = max_tempo + self.tempo_std = tempo_std + self.vocals = vocals + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, index): + streams = self.dataset[index] + in_length = streams.shape[-1] + out_length = int((1 - 0.01 * self.max_tempo) * in_length) + + if random.random() < self.proba: + delta_pitch = random.randint(-self.max_pitch, self.max_pitch) + delta_tempo = random.gauss(0, self.tempo_std) + delta_tempo = min(max(-self.max_tempo, delta_tempo), self.max_tempo) + outs = [] + for idx, stream in enumerate(streams): + stream = repitch( + stream, + delta_pitch, + delta_tempo, + voice=idx in self.vocals) + outs.append(stream[:, :out_length]) + streams = torch.stack(outs) + else: + streams = streams[..., :out_length] + return streams + + +def repitch(wav, pitch, tempo, voice=False, quick=False, samplerate=44100): + """ + tempo is a relative delta in percentage, so tempo=10 means tempo at 110%! + pitch is in semi tones. + Requires `soundstretch` to be installed, see + https://www.surina.net/soundtouch/soundstretch.html + """ + outfile = tempfile.NamedTemporaryFile(suffix=".wav") + in_ = io.BytesIO() + wavfile.write(in_, samplerate, i16_pcm(wav).t().numpy()) + command = [ + "soundstretch", + "stdin", + outfile.name, + f"-pitch={pitch}", + f"-tempo={tempo:.6f}", + ] + if quick: + command += ["-quick"] + if voice: + command += ["-speech"] + try: + sp.run(command, capture_output=True, input=in_.getvalue(), check=True) + except sp.CalledProcessError as error: + raise RuntimeError(f"Could not change bpm because {error.stderr.decode('utf-8')}") + sr, wav = wavfile.read(outfile.name) + wav = wav.copy() + wav = f32_pcm(torch.from_numpy(wav).t()) + assert sr == samplerate + return wav diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/separate.py b/lib/infer/infer_libs/uvr5_pack/demucs/separate.py new file mode 100644 index 0000000000000000000000000000000000000000..890ef271fe61690106424ea7bf79a1cff3d849d3 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/separate.py @@ -0,0 +1,185 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import sys +from pathlib import Path +import subprocess + +import julius +import torch as th +import torchaudio as ta + +from .audio import AudioFile, convert_audio_channels +from .pretrained import is_pretrained, load_pretrained +from .utils import apply_model, load_model + + +def load_track(track, device, audio_channels, samplerate): + errors = {} + wav = None + + try: + wav = AudioFile(track).read( + streams=0, + samplerate=samplerate, + channels=audio_channels).to(device) + except FileNotFoundError: + errors['ffmpeg'] = 'Ffmpeg is not installed.' + except subprocess.CalledProcessError: + errors['ffmpeg'] = 'FFmpeg could not read the file.' + + if wav is None: + try: + wav, sr = ta.load(str(track)) + except RuntimeError as err: + errors['torchaudio'] = err.args[0] + else: + wav = convert_audio_channels(wav, audio_channels) + wav = wav.to(device) + wav = julius.resample_frac(wav, sr, samplerate) + + if wav is None: + print(f"Could not load file {track}. " + "Maybe it is not a supported file format? ") + for backend, error in errors.items(): + print(f"When trying to load using {backend}, got the following error: {error}") + sys.exit(1) + return wav + + +def encode_mp3(wav, path, bitrate=320, samplerate=44100, channels=2, verbose=False): + try: + import lameenc + except ImportError: + print("Failed to call lame encoder. Maybe it is not installed? " + "On windows, run `python.exe -m pip install -U lameenc`, " + "on OSX/Linux, run `python3 -m pip install -U lameenc`, " + "then try again.", file=sys.stderr) + sys.exit(1) + encoder = lameenc.Encoder() + encoder.set_bit_rate(bitrate) + encoder.set_in_sample_rate(samplerate) + encoder.set_channels(channels) + encoder.set_quality(2) # 2-highest, 7-fastest + if not verbose: + encoder.silence() + wav = wav.transpose(0, 1).numpy() + mp3_data = encoder.encode(wav.tobytes()) + mp3_data += encoder.flush() + with open(path, "wb") as f: + f.write(mp3_data) + + +def main(): + parser = argparse.ArgumentParser("demucs.separate", + description="Separate the sources for the given tracks") + parser.add_argument("audios/tracks", nargs='+', type=Path, default=[], help='Path to tracks') + parser.add_argument("-n", + "--name", + default="demucs_quantized", + help="Model name. See README.md for the list of pretrained models. " + "Default is demucs_quantized.") + parser.add_argument("-v", "--verbose", action="store_true") + parser.add_argument("-o", + "--out", + type=Path, + default=Path("audios/separated"), + help="Folder where to put extracted tracks. A subfolder " + "with the model name will be created.") + parser.add_argument("--models", + type=Path, + default=Path("models"), + help="Path to trained models. " + "Also used to store downloaded pretrained models") + parser.add_argument("-d", + "--device", + default="cuda" if th.cuda.is_available() else "cpu", + help="Device to use, default is cuda if available else cpu") + parser.add_argument("--shifts", + default=0, + type=int, + help="Number of random shifts for equivariant stabilization." + "Increase separation time but improves quality for Demucs. 10 was used " + "in the original paper.") + parser.add_argument("--overlap", + default=0.25, + type=float, + help="Overlap between the splits.") + parser.add_argument("--no-split", + action="store_false", + dest="split", + default=True, + help="Doesn't split audio in chunks. This can use large amounts of memory.") + parser.add_argument("--float32", + action="store_true", + help="Convert the output wavefile to use pcm f32 format instead of s16. " + "This should not make a difference if you just plan on listening to the " + "audio but might be needed to compute exactly metrics like SDR etc.") + parser.add_argument("--int16", + action="store_false", + dest="float32", + help="Opposite of --float32, here for compatibility.") + parser.add_argument("--mp3", action="store_true", + help="Convert the output wavs to mp3.") + parser.add_argument("--mp3-bitrate", + default=320, + type=int, + help="Bitrate of converted mp3.") + + args = parser.parse_args() + name = args.name + ".th" + model_path = args.models / name + if model_path.is_file(): + model = load_model(model_path) + else: + if is_pretrained(args.name): + model = load_pretrained(args.name) + else: + print(f"No pre-trained model {args.name}", file=sys.stderr) + sys.exit(1) + model.to(args.device) + + out = args.out / args.name + out.mkdir(parents=True, exist_ok=True) + print(f"Separated tracks will be stored in {out.resolve()}") + for track in args.tracks: + if not track.exists(): + print( + f"File {track} does not exist. If the path contains spaces, " + "please try again after surrounding the entire path with quotes \"\".", + file=sys.stderr) + continue + print(f"Separating track {track}") + wav = load_track(track, args.device, model.audio_channels, model.samplerate) + + ref = wav.mean(0) + wav = (wav - ref.mean()) / ref.std() + sources = apply_model(model, wav, shifts=args.shifts, split=args.split, + overlap=args.overlap, progress=True) + sources = sources * ref.std() + ref.mean() + + track_folder = out / track.name.rsplit(".", 1)[0] + track_folder.mkdir(exist_ok=True) + for source, name in zip(sources, model.sources): + source = source / max(1.01 * source.abs().max(), 1) + if args.mp3 or not args.float32: + source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short() + source = source.cpu() + stem = str(track_folder / name) + if args.mp3: + encode_mp3(source, stem + ".mp3", + bitrate=args.mp3_bitrate, + samplerate=model.samplerate, + channels=model.audio_channels, + verbose=args.verbose) + else: + wavname = str(track_folder / f"{name}.wav") + ta.save(wavname, source, sample_rate=model.samplerate) + + +if __name__ == "__main__": + main() diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/tasnet.py b/lib/infer/infer_libs/uvr5_pack/demucs/tasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc1257925ea8f4fbe389ddd6d73ce9fdf45f6d4 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/tasnet.py @@ -0,0 +1,452 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +# Created on 2018/12 +# Author: Kaituo XU +# Modified on 2019/11 by Alexandre Defossez, added support for multiple output channels +# Here is the original license: +# The MIT License (MIT) +# +# Copyright (c) 2018 Kaituo XU +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .utils import capture_init + +EPS = 1e-8 + + +def overlap_and_add(signal, frame_step): + outer_dimensions = signal.size()[:-2] + frames, frame_length = signal.size()[-2:] + + subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor + subframe_step = frame_step // subframe_length + subframes_per_frame = frame_length // subframe_length + output_size = frame_step * (frames - 1) + frame_length + output_subframes = output_size // subframe_length + + subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) + + frame = torch.arange(0, output_subframes, + device=signal.device).unfold(0, subframes_per_frame, subframe_step) + frame = frame.long() # signal may in GPU or CPU + frame = frame.contiguous().view(-1) + + result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) + result.index_add_(-2, frame, subframe_signal) + result = result.view(*outer_dimensions, -1) + return result + + +class ConvTasNet(nn.Module): + @capture_init + def __init__(self, + sources, + N=256, + L=20, + B=256, + H=512, + P=3, + X=8, + R=4, + audio_channels=2, + norm_type="gLN", + causal=False, + mask_nonlinear='relu', + samplerate=44100, + segment_length=44100 * 2 * 4): + """ + Args: + sources: list of sources + N: Number of filters in autoencoder + L: Length of the filters (in samples) + B: Number of channels in bottleneck 1 × 1-conv block + H: Number of channels in convolutional blocks + P: Kernel size in convolutional blocks + X: Number of convolutional blocks in each repeat + R: Number of repeats + norm_type: BN, gLN, cLN + causal: causal or non-causal + mask_nonlinear: use which non-linear function to generate mask + """ + super(ConvTasNet, self).__init__() + # Hyper-parameter + self.sources = sources + self.C = len(sources) + self.N, self.L, self.B, self.H, self.P, self.X, self.R = N, L, B, H, P, X, R + self.norm_type = norm_type + self.causal = causal + self.mask_nonlinear = mask_nonlinear + self.audio_channels = audio_channels + self.samplerate = samplerate + self.segment_length = segment_length + # Components + self.encoder = Encoder(L, N, audio_channels) + self.separator = TemporalConvNet( + N, B, H, P, X, R, self.C, norm_type, causal, mask_nonlinear) + self.decoder = Decoder(N, L, audio_channels) + # init + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + def valid_length(self, length): + return length + + def forward(self, mixture): + """ + Args: + mixture: [M, T], M is batch size, T is #samples + Returns: + est_source: [M, C, T] + """ + mixture_w = self.encoder(mixture) + est_mask = self.separator(mixture_w) + est_source = self.decoder(mixture_w, est_mask) + + # T changed after conv1d in encoder, fix it here + T_origin = mixture.size(-1) + T_conv = est_source.size(-1) + est_source = F.pad(est_source, (0, T_origin - T_conv)) + return est_source + + +class Encoder(nn.Module): + """Estimation of the nonnegative mixture weight by a 1-D conv layer. + """ + def __init__(self, L, N, audio_channels): + super(Encoder, self).__init__() + # Hyper-parameter + self.L, self.N = L, N + # Components + # 50% overlap + self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride=L // 2, bias=False) + + def forward(self, mixture): + """ + Args: + mixture: [M, T], M is batch size, T is #samples + Returns: + mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1 + """ + mixture_w = F.relu(self.conv1d_U(mixture)) # [M, N, K] + return mixture_w + + +class Decoder(nn.Module): + def __init__(self, N, L, audio_channels): + super(Decoder, self).__init__() + # Hyper-parameter + self.N, self.L = N, L + self.audio_channels = audio_channels + # Components + self.basis_signals = nn.Linear(N, audio_channels * L, bias=False) + + def forward(self, mixture_w, est_mask): + """ + Args: + mixture_w: [M, N, K] + est_mask: [M, C, N, K] + Returns: + est_source: [M, C, T] + """ + # D = W * M + source_w = torch.unsqueeze(mixture_w, 1) * est_mask # [M, C, N, K] + source_w = torch.transpose(source_w, 2, 3) # [M, C, K, N] + # S = DV + est_source = self.basis_signals(source_w) # [M, C, K, ac * L] + m, c, k, _ = est_source.size() + est_source = est_source.view(m, c, k, self.audio_channels, -1).transpose(2, 3).contiguous() + est_source = overlap_and_add(est_source, self.L // 2) # M x C x ac x T + return est_source + + +class TemporalConvNet(nn.Module): + def __init__(self, N, B, H, P, X, R, C, norm_type="gLN", causal=False, mask_nonlinear='relu'): + """ + Args: + N: Number of filters in autoencoder + B: Number of channels in bottleneck 1 × 1-conv block + H: Number of channels in convolutional blocks + P: Kernel size in convolutional blocks + X: Number of convolutional blocks in each repeat + R: Number of repeats + C: Number of speakers + norm_type: BN, gLN, cLN + causal: causal or non-causal + mask_nonlinear: use which non-linear function to generate mask + """ + super(TemporalConvNet, self).__init__() + # Hyper-parameter + self.C = C + self.mask_nonlinear = mask_nonlinear + # Components + # [M, N, K] -> [M, N, K] + layer_norm = ChannelwiseLayerNorm(N) + # [M, N, K] -> [M, B, K] + bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False) + # [M, B, K] -> [M, B, K] + repeats = [] + for r in range(R): + blocks = [] + for x in range(X): + dilation = 2**x + padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2 + blocks += [ + TemporalBlock(B, + H, + P, + stride=1, + padding=padding, + dilation=dilation, + norm_type=norm_type, + causal=causal) + ] + repeats += [nn.Sequential(*blocks)] + temporal_conv_net = nn.Sequential(*repeats) + # [M, B, K] -> [M, C*N, K] + mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False) + # Put together + self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, + mask_conv1x1) + + def forward(self, mixture_w): + """ + Keep this API same with TasNet + Args: + mixture_w: [M, N, K], M is batch size + returns: + est_mask: [M, C, N, K] + """ + M, N, K = mixture_w.size() + score = self.network(mixture_w) # [M, N, K] -> [M, C*N, K] + score = score.view(M, self.C, N, K) # [M, C*N, K] -> [M, C, N, K] + if self.mask_nonlinear == 'softmax': + est_mask = F.softmax(score, dim=1) + elif self.mask_nonlinear == 'relu': + est_mask = F.relu(score) + else: + raise ValueError("Unsupported mask non-linear function") + return est_mask + + +class TemporalBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + norm_type="gLN", + causal=False): + super(TemporalBlock, self).__init__() + # [M, B, K] -> [M, H, K] + conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False) + prelu = nn.PReLU() + norm = chose_norm(norm_type, out_channels) + # [M, H, K] -> [M, B, K] + dsconv = DepthwiseSeparableConv(out_channels, in_channels, kernel_size, stride, padding, + dilation, norm_type, causal) + # Put together + self.net = nn.Sequential(conv1x1, prelu, norm, dsconv) + + def forward(self, x): + """ + Args: + x: [M, B, K] + Returns: + [M, B, K] + """ + residual = x + out = self.net(x) + # TODO: when P = 3 here works fine, but when P = 2 maybe need to pad? + return out + residual # look like w/o F.relu is better than w/ F.relu + # return F.relu(out + residual) + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + norm_type="gLN", + causal=False): + super(DepthwiseSeparableConv, self).__init__() + # Use `groups` option to implement depthwise convolution + # [M, H, K] -> [M, H, K] + depthwise_conv = nn.Conv1d(in_channels, + in_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=False) + if causal: + chomp = Chomp1d(padding) + prelu = nn.PReLU() + norm = chose_norm(norm_type, in_channels) + # [M, H, K] -> [M, B, K] + pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False) + # Put together + if causal: + self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv) + else: + self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv) + + def forward(self, x): + """ + Args: + x: [M, H, K] + Returns: + result: [M, B, K] + """ + return self.net(x) + + +class Chomp1d(nn.Module): + """To ensure the output length is the same as the input. + """ + def __init__(self, chomp_size): + super(Chomp1d, self).__init__() + self.chomp_size = chomp_size + + def forward(self, x): + """ + Args: + x: [M, H, Kpad] + Returns: + [M, H, K] + """ + return x[:, :, :-self.chomp_size].contiguous() + + +def chose_norm(norm_type, channel_size): + """The input of normlization will be (M, C, K), where M is batch size, + C is channel size and K is sequence length. + """ + if norm_type == "gLN": + return GlobalLayerNorm(channel_size) + elif norm_type == "cLN": + return ChannelwiseLayerNorm(channel_size) + elif norm_type == "id": + return nn.Identity() + else: # norm_type == "BN": + # Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics + # along M and K, so this BN usage is right. + return nn.BatchNorm1d(channel_size) + + +# TODO: Use nn.LayerNorm to impl cLN to speed up +class ChannelwiseLayerNorm(nn.Module): + """Channel-wise Layer Normalization (cLN)""" + def __init__(self, channel_size): + super(ChannelwiseLayerNorm, self).__init__() + self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.reset_parameters() + + def reset_parameters(self): + self.gamma.data.fill_(1) + self.beta.data.zero_() + + def forward(self, y): + """ + Args: + y: [M, N, K], M is batch size, N is channel size, K is length + Returns: + cLN_y: [M, N, K] + """ + mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K] + var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K] + cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta + return cLN_y + + +class GlobalLayerNorm(nn.Module): + """Global Layer Normalization (gLN)""" + def __init__(self, channel_size): + super(GlobalLayerNorm, self).__init__() + self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.reset_parameters() + + def reset_parameters(self): + self.gamma.data.fill_(1) + self.beta.data.zero_() + + def forward(self, y): + """ + Args: + y: [M, N, K], M is batch size, N is channel size, K is length + Returns: + gLN_y: [M, N, K] + """ + # TODO: in torch 1.0, torch.mean() support dim list + mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) # [M, 1, 1] + var = (torch.pow(y - mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) + gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta + return gLN_y + + +if __name__ == "__main__": + torch.manual_seed(123) + M, N, L, T = 2, 3, 4, 12 + K = 2 * T // L - 1 + B, H, P, X, R, C, norm_type, causal = 2, 3, 3, 3, 2, 2, "gLN", False + mixture = torch.randint(3, (M, T)) + # test Encoder + encoder = Encoder(L, N) + encoder.conv1d_U.weight.data = torch.randint(2, encoder.conv1d_U.weight.size()) + mixture_w = encoder(mixture) + print('mixture', mixture) + print('U', encoder.conv1d_U.weight) + print('mixture_w', mixture_w) + print('mixture_w size', mixture_w.size()) + + # test TemporalConvNet + separator = TemporalConvNet(N, B, H, P, X, R, C, norm_type=norm_type, causal=causal) + est_mask = separator(mixture_w) + print('est_mask', est_mask) + + # test Decoder + decoder = Decoder(N, L) + est_mask = torch.randint(2, (B, K, C, N)) + est_source = decoder(mixture_w, est_mask) + print('est_source', est_source) + + # test Conv-TasNet + conv_tasnet = ConvTasNet(N, L, B, H, P, X, R, C, norm_type=norm_type) + est_source = conv_tasnet(mixture) + print('est_source', est_source) + print('est_source size', est_source.size()) diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/test.py b/lib/infer/infer_libs/uvr5_pack/demucs/test.py new file mode 100644 index 0000000000000000000000000000000000000000..4140914ddbff3543b4056ca0cb1b5e887434a40a --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/test.py @@ -0,0 +1,109 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import gzip +import sys +from concurrent import futures + +import musdb +import museval +import torch as th +import tqdm +from scipy.io import wavfile +from torch import distributed + +from .audio import convert_audio +from .utils import apply_model + + +def evaluate(model, + musdb_path, + eval_folder, + workers=2, + device="cpu", + rank=0, + save=False, + shifts=0, + split=False, + overlap=0.25, + is_wav=False, + world_size=1): + """ + Evaluate model using museval. Run the model + on a single GPU, the bottleneck being the call to museval. + """ + + output_dir = eval_folder / "results" + output_dir.mkdir(exist_ok=True, parents=True) + json_folder = eval_folder / "results/test" + json_folder.mkdir(exist_ok=True, parents=True) + + # we load tracks from the original musdb set + test_set = musdb.DB(musdb_path, subsets=["test"], is_wav=is_wav) + src_rate = 44100 # hardcoded for now... + + for p in model.parameters(): + p.requires_grad = False + p.grad = None + + pendings = [] + with futures.ProcessPoolExecutor(workers or 1) as pool: + for index in tqdm.tqdm(range(rank, len(test_set), world_size), file=sys.stdout): + track = test_set.tracks[index] + + out = json_folder / f"{track.name}.json.gz" + if out.exists(): + continue + + mix = th.from_numpy(track.audio).t().float() + ref = mix.mean(dim=0) # mono mixture + mix = (mix - ref.mean()) / ref.std() + mix = convert_audio(mix, src_rate, model.samplerate, model.audio_channels) + estimates = apply_model(model, mix.to(device), + shifts=shifts, split=split, overlap=overlap) + estimates = estimates * ref.std() + ref.mean() + + estimates = estimates.transpose(1, 2) + references = th.stack( + [th.from_numpy(track.targets[name].audio).t() for name in model.sources]) + references = convert_audio(references, src_rate, + model.samplerate, model.audio_channels) + references = references.transpose(1, 2).numpy() + estimates = estimates.cpu().numpy() + win = int(1. * model.samplerate) + hop = int(1. * model.samplerate) + if save: + folder = eval_folder / "wav/test" / track.name + folder.mkdir(exist_ok=True, parents=True) + for name, estimate in zip(model.sources, estimates): + wavfile.write(str(folder / (name + ".wav")), 44100, estimate) + + if workers: + pendings.append((track.name, pool.submit( + museval.evaluate, references, estimates, win=win, hop=hop))) + else: + pendings.append((track.name, museval.evaluate( + references, estimates, win=win, hop=hop))) + del references, mix, estimates, track + + for track_name, pending in tqdm.tqdm(pendings, file=sys.stdout): + if workers: + pending = pending.result() + sdr, isr, sir, sar = pending + track_store = museval.TrackStore(win=44100, hop=44100, track_name=track_name) + for idx, target in enumerate(model.sources): + values = { + "SDR": sdr[idx].tolist(), + "SIR": sir[idx].tolist(), + "ISR": isr[idx].tolist(), + "SAR": sar[idx].tolist() + } + + track_store.add_target(target_name=target, values=values) + json_path = json_folder / f"{track_name}.json.gz" + gzip.open(json_path, "w").write(track_store.json.encode('utf-8')) + if world_size > 1: + distributed.barrier() diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/train.py b/lib/infer/infer_libs/uvr5_pack/demucs/train.py new file mode 100644 index 0000000000000000000000000000000000000000..6bd221279dc986a6df1a8d7b4d4444bb822a1cb3 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/train.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import sys + +import tqdm +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +from .utils import apply_model, average_metric, center_trim + + +def train_model(epoch, + dataset, + model, + criterion, + optimizer, + augment, + quantizer=None, + diffq=0, + repeat=1, + device="cpu", + seed=None, + workers=4, + world_size=1, + batch_size=16): + + if world_size > 1: + sampler = DistributedSampler(dataset) + sampler_epoch = epoch * repeat + if seed is not None: + sampler_epoch += seed * 1000 + sampler.set_epoch(sampler_epoch) + batch_size //= world_size + loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=workers) + else: + loader = DataLoader(dataset, batch_size=batch_size, num_workers=workers, shuffle=True) + current_loss = 0 + model_size = 0 + for repetition in range(repeat): + tq = tqdm.tqdm(loader, + ncols=120, + desc=f"[{epoch:03d}] train ({repetition + 1}/{repeat})", + leave=False, + file=sys.stdout, + unit=" batch") + total_loss = 0 + for idx, sources in enumerate(tq): + if len(sources) < batch_size: + # skip uncomplete batch for augment.Remix to work properly + continue + sources = sources.to(device) + sources = augment(sources) + mix = sources.sum(dim=1) + + estimates = model(mix) + sources = center_trim(sources, estimates) + loss = criterion(estimates, sources) + model_size = 0 + if quantizer is not None: + model_size = quantizer.model_size() + + train_loss = loss + diffq * model_size + train_loss.backward() + grad_norm = 0 + for p in model.parameters(): + if p.grad is not None: + grad_norm += p.grad.data.norm()**2 + grad_norm = grad_norm**0.5 + optimizer.step() + optimizer.zero_grad() + + if quantizer is not None: + model_size = model_size.item() + + total_loss += loss.item() + current_loss = total_loss / (1 + idx) + tq.set_postfix(loss=f"{current_loss:.4f}", ms=f"{model_size:.2f}", + grad=f"{grad_norm:.5f}") + + # free some space before next round + del sources, mix, estimates, loss, train_loss + + if world_size > 1: + sampler.epoch += 1 + + if world_size > 1: + current_loss = average_metric(current_loss) + return current_loss, model_size + + +def validate_model(epoch, + dataset, + model, + criterion, + device="cpu", + rank=0, + world_size=1, + shifts=0, + overlap=0.25, + split=False): + indexes = range(rank, len(dataset), world_size) + tq = tqdm.tqdm(indexes, + ncols=120, + desc=f"[{epoch:03d}] valid", + leave=False, + file=sys.stdout, + unit=" track") + current_loss = 0 + for index in tq: + streams = dataset[index] + # first five minutes to avoid OOM on --upsample models + streams = streams[..., :15_000_000] + streams = streams.to(device) + sources = streams[1:] + mix = streams[0] + estimates = apply_model(model, mix, shifts=shifts, split=split, overlap=overlap) + loss = criterion(estimates, sources) + current_loss += loss.item() / len(indexes) + del estimates, streams, sources + + if world_size > 1: + current_loss = average_metric(current_loss, len(indexes)) + return current_loss diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/utils.py b/lib/infer/infer_libs/uvr5_pack/demucs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4364184059b1afe3c8379c77793a8e76dccf9699 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/utils.py @@ -0,0 +1,323 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import errno +import functools +import hashlib +import inspect +import io +import os +import random +import socket +import tempfile +import warnings +import zlib +from contextlib import contextmanager + +from diffq import UniformQuantizer, DiffQuantizer +import torch as th +import tqdm +from torch import distributed +from torch.nn import functional as F + + +def center_trim(tensor, reference): + """ + Center trim `tensor` with respect to `reference`, along the last dimension. + `reference` can also be a number, representing the length to trim to. + If the size difference != 0 mod 2, the extra sample is removed on the right side. + """ + if hasattr(reference, "size"): + reference = reference.size(-1) + delta = tensor.size(-1) - reference + if delta < 0: + raise ValueError("tensor must be larger than reference. " f"Delta is {delta}.") + if delta: + tensor = tensor[..., delta // 2:-(delta - delta // 2)] + return tensor + + +def average_metric(metric, count=1.): + """ + Average `metric` which should be a float across all hosts. `count` should be + the weight for this particular host (i.e. number of examples). + """ + metric = th.tensor([count, count * metric], dtype=th.float32, device='cuda') + distributed.all_reduce(metric, op=distributed.ReduceOp.SUM) + return metric[1].item() / metric[0].item() + + +def free_port(host='', low=20000, high=40000): + """ + Return a port number that is most likely free. + This could suffer from a race condition although + it should be quite rare. + """ + sock = socket.socket() + while True: + port = random.randint(low, high) + try: + sock.bind((host, port)) + except OSError as error: + if error.errno == errno.EADDRINUSE: + continue + raise + return port + + +def sizeof_fmt(num, suffix='B'): + """ + Given `num` bytes, return human readable size. + Taken from https://stackoverflow.com/a/1094933 + """ + for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) + + +def human_seconds(seconds, display='.2f'): + """ + Given `seconds` seconds, return human readable duration. + """ + value = seconds * 1e6 + ratios = [1e3, 1e3, 60, 60, 24] + names = ['us', 'ms', 's', 'min', 'hrs', 'days'] + last = names.pop(0) + for name, ratio in zip(names, ratios): + if value / ratio < 0.3: + break + value /= ratio + last = name + return f"{format(value, display)} {last}" + + +class TensorChunk: + def __init__(self, tensor, offset=0, length=None): + total_length = tensor.shape[-1] + assert offset >= 0 + assert offset < total_length + + if length is None: + length = total_length - offset + else: + length = min(total_length - offset, length) + + self.tensor = tensor + self.offset = offset + self.length = length + self.device = tensor.device + + @property + def shape(self): + shape = list(self.tensor.shape) + shape[-1] = self.length + return shape + + def padded(self, target_length): + delta = target_length - self.length + total_length = self.tensor.shape[-1] + assert delta >= 0 + + start = self.offset - delta // 2 + end = start + target_length + + correct_start = max(0, start) + correct_end = min(total_length, end) + + pad_left = correct_start - start + pad_right = end - correct_end + + out = F.pad(self.tensor[..., correct_start:correct_end], (pad_left, pad_right)) + assert out.shape[-1] == target_length + return out + + +def tensor_chunk(tensor_or_chunk): + if isinstance(tensor_or_chunk, TensorChunk): + return tensor_or_chunk + else: + assert isinstance(tensor_or_chunk, th.Tensor) + return TensorChunk(tensor_or_chunk) + + +def apply_model(model, mix, shifts=None, split=False, + overlap=0.25, transition_power=1., progress=False): + """ + Apply model to a given mixture. + + Args: + shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec + and apply the oppositve shift to the output. This is repeated `shifts` time and + all predictions are averaged. This effectively makes the model time equivariant + and improves SDR by up to 0.2 points. + split (bool): if True, the input will be broken down in 8 seconds extracts + and predictions will be performed individually on each and concatenated. + Useful for model with large memory footprint like Tasnet. + progress (bool): if True, show a progress bar (requires split=True) + """ + assert transition_power >= 1, "transition_power < 1 leads to weird behavior." + device = mix.device + channels, length = mix.shape + if split: + out = th.zeros(len(model.sources), channels, length, device=device) + sum_weight = th.zeros(length, device=device) + segment = model.segment_length + stride = int((1 - overlap) * segment) + offsets = range(0, length, stride) + scale = stride / model.samplerate + if progress: + offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds') + # We start from a triangle shaped weight, with maximal weight in the middle + # of the segment. Then we normalize and take to the power `transition_power`. + # Large values of transition power will lead to sharper transitions. + weight = th.cat([th.arange(1, segment // 2 + 1), + th.arange(segment - segment // 2, 0, -1)]).to(device) + assert len(weight) == segment + # If the overlap < 50%, this will translate to linear transition when + # transition_power is 1. + weight = (weight / weight.max())**transition_power + for offset in offsets: + chunk = TensorChunk(mix, offset, segment) + chunk_out = apply_model(model, chunk, shifts=shifts) + chunk_length = chunk_out.shape[-1] + out[..., offset:offset + segment] += weight[:chunk_length] * chunk_out + sum_weight[offset:offset + segment] += weight[:chunk_length] + offset += segment + assert sum_weight.min() > 0 + out /= sum_weight + return out + elif shifts: + max_shift = int(0.5 * model.samplerate) + mix = tensor_chunk(mix) + padded_mix = mix.padded(length + 2 * max_shift) + out = 0 + for _ in range(shifts): + offset = random.randint(0, max_shift) + shifted = TensorChunk(padded_mix, offset, length + max_shift - offset) + shifted_out = apply_model(model, shifted) + out += shifted_out[..., max_shift - offset:] + out /= shifts + return out + else: + valid_length = model.valid_length(length) + mix = tensor_chunk(mix) + padded_mix = mix.padded(valid_length) + with th.no_grad(): + out = model(padded_mix.unsqueeze(0))[0] + return center_trim(out, length) + + +@contextmanager +def temp_filenames(count, delete=True): + names = [] + try: + for _ in range(count): + names.append(tempfile.NamedTemporaryFile(delete=False).name) + yield names + finally: + if delete: + for name in names: + os.unlink(name) + + +def get_quantizer(model, args, optimizer=None): + quantizer = None + if args.diffq: + quantizer = DiffQuantizer( + model, min_size=args.q_min_size, group_size=8) + if optimizer is not None: + quantizer.setup_optimizer(optimizer) + elif args.qat: + quantizer = UniformQuantizer( + model, bits=args.qat, min_size=args.q_min_size) + return quantizer + + +def load_model(path, strict=False): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + load_from = path + package = th.load(load_from, 'cpu') + + klass = package["klass"] + args = package["args"] + kwargs = package["kwargs"] + + if strict: + model = klass(*args, **kwargs) + else: + sig = inspect.signature(klass) + for key in list(kwargs): + if key not in sig.parameters: + warnings.warn("Dropping inexistant parameter " + key) + del kwargs[key] + model = klass(*args, **kwargs) + + state = package["state"] + training_args = package["training_args"] + quantizer = get_quantizer(model, training_args) + + set_state(model, quantizer, state) + return model + + +def get_state(model, quantizer): + if quantizer is None: + state = {k: p.data.to('cpu') for k, p in model.state_dict().items()} + else: + state = quantizer.get_quantized_state() + buf = io.BytesIO() + th.save(state, buf) + state = {'compressed': zlib.compress(buf.getvalue())} + return state + + +def set_state(model, quantizer, state): + if quantizer is None: + model.load_state_dict(state) + else: + buf = io.BytesIO(zlib.decompress(state["compressed"])) + state = th.load(buf, "cpu") + quantizer.restore_quantized_state(state) + + return state + + +def save_state(state, path): + buf = io.BytesIO() + th.save(state, buf) + sig = hashlib.sha256(buf.getvalue()).hexdigest()[:8] + + path = path.parent / (path.stem + "-" + sig + path.suffix) + path.write_bytes(buf.getvalue()) + + +def save_model(model, quantizer, training_args, path): + args, kwargs = model._init_args_kwargs + klass = model.__class__ + + state = get_state(model, quantizer) + + save_to = path + package = { + 'klass': klass, + 'args': args, + 'kwargs': kwargs, + 'state': state, + 'training_args': training_args, + } + th.save(package, save_to) + + +def capture_init(init): + @functools.wraps(init) + def __init__(self, *args, **kwargs): + self._init_args_kwargs = (args, kwargs) + init(self, *args, **kwargs) + + return __init__ diff --git a/lib/infer/infer_libs/uvr5_pack/demucs/wav.py b/lib/infer/infer_libs/uvr5_pack/demucs/wav.py new file mode 100644 index 0000000000000000000000000000000000000000..a65c3b2ba5aacb1fcab3753f1f85ff7b8db7fc11 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/demucs/wav.py @@ -0,0 +1,174 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import OrderedDict +import hashlib +import math +import json +from pathlib import Path + +import julius +import torch as th +from torch import distributed +import torchaudio as ta +from torch.nn import functional as F + +from .audio import convert_audio_channels +from .compressed import get_musdb_tracks + +MIXTURE = "mixture" +EXT = ".wav" + + +def _track_metadata(track, sources): + track_length = None + track_samplerate = None + for source in sources + [MIXTURE]: + file = track / f"{source}{EXT}" + info = ta.info(str(file)) + length = info.num_frames + if track_length is None: + track_length = length + track_samplerate = info.sample_rate + elif track_length != length: + raise ValueError( + f"Invalid length for file {file}: " + f"expecting {track_length} but got {length}.") + elif info.sample_rate != track_samplerate: + raise ValueError( + f"Invalid sample rate for file {file}: " + f"expecting {track_samplerate} but got {info.sample_rate}.") + if source == MIXTURE: + wav, _ = ta.load(str(file)) + wav = wav.mean(0) + mean = wav.mean().item() + std = wav.std().item() + + return {"length": length, "mean": mean, "std": std, "samplerate": track_samplerate} + + +def _build_metadata(path, sources): + meta = {} + path = Path(path) + for file in path.iterdir(): + meta[file.name] = _track_metadata(file, sources) + return meta + + +class Wavset: + def __init__( + self, + root, metadata, sources, + length=None, stride=None, normalize=True, + samplerate=44100, channels=2): + """ + Waveset (or mp3 set for that matter). Can be used to train + with arbitrary sources. Each track should be one folder inside of `path`. + The folder should contain files named `{source}.{ext}`. + Files will be grouped according to `sources` (each source is a list of + filenames). + + Sample rate and channels will be converted on the fly. + + `length` is the sample size to extract (in samples, not duration). + `stride` is how many samples to move by between each example. + """ + self.root = Path(root) + self.metadata = OrderedDict(metadata) + self.length = length + self.stride = stride or length + self.normalize = normalize + self.sources = sources + self.channels = channels + self.samplerate = samplerate + self.num_examples = [] + for name, meta in self.metadata.items(): + track_length = int(self.samplerate * meta['length'] / meta['samplerate']) + if length is None or track_length < length: + examples = 1 + else: + examples = int(math.ceil((track_length - self.length) / self.stride) + 1) + self.num_examples.append(examples) + + def __len__(self): + return sum(self.num_examples) + + def get_file(self, name, source): + return self.root / name / f"{source}{EXT}" + + def __getitem__(self, index): + for name, examples in zip(self.metadata, self.num_examples): + if index >= examples: + index -= examples + continue + meta = self.metadata[name] + num_frames = -1 + offset = 0 + if self.length is not None: + offset = int(math.ceil( + meta['samplerate'] * self.stride * index / self.samplerate)) + num_frames = int(math.ceil( + meta['samplerate'] * self.length / self.samplerate)) + wavs = [] + for source in self.sources: + file = self.get_file(name, source) + wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames) + wav = convert_audio_channels(wav, self.channels) + wavs.append(wav) + + example = th.stack(wavs) + example = julius.resample_frac(example, meta['samplerate'], self.samplerate) + if self.normalize: + example = (example - meta['mean']) / meta['std'] + if self.length: + example = example[..., :self.length] + example = F.pad(example, (0, self.length - example.shape[-1])) + return example + + +def get_wav_datasets(args, samples, sources): + sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8] + metadata_file = args.metadata / (sig + ".json") + train_path = args.wav / "train" + valid_path = args.wav / "valid" + if not metadata_file.is_file() and args.rank == 0: + train = _build_metadata(train_path, sources) + valid = _build_metadata(valid_path, sources) + json.dump([train, valid], open(metadata_file, "w")) + if args.world_size > 1: + distributed.barrier() + train, valid = json.load(open(metadata_file)) + train_set = Wavset(train_path, train, sources, + length=samples, stride=args.data_stride, + samplerate=args.samplerate, channels=args.audio_channels, + normalize=args.norm_wav) + valid_set = Wavset(valid_path, valid, [MIXTURE] + sources, + samplerate=args.samplerate, channels=args.audio_channels, + normalize=args.norm_wav) + return train_set, valid_set + + +def get_musdb_wav_datasets(args, samples, sources): + metadata_file = args.metadata / "musdb_wav.json" + root = args.musdb / "train" + if not metadata_file.is_file() and args.rank == 0: + metadata = _build_metadata(root, sources) + json.dump(metadata, open(metadata_file, "w")) + if args.world_size > 1: + distributed.barrier() + metadata = json.load(open(metadata_file)) + + train_tracks = get_musdb_tracks(args.musdb, is_wav=True, subsets=["train"], split="train") + metadata_train = {name: meta for name, meta in metadata.items() if name in train_tracks} + metadata_valid = {name: meta for name, meta in metadata.items() if name not in train_tracks} + train_set = Wavset(root, metadata_train, sources, + length=samples, stride=args.data_stride, + samplerate=args.samplerate, channels=args.audio_channels, + normalize=args.norm_wav) + valid_set = Wavset(root, metadata_valid, [MIXTURE] + sources, + samplerate=args.samplerate, channels=args.audio_channels, + normalize=args.norm_wav) + return train_set, valid_set diff --git a/lib/infer/infer_libs/uvr5_pack/infer_uvr5.py b/lib/infer/infer_libs/uvr5_pack/infer_uvr5.py new file mode 100644 index 0000000000000000000000000000000000000000..9b58f05ef69d1ea96ccf5d3d018b27acbb1c3b32 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/infer_uvr5.py @@ -0,0 +1,355 @@ +import os, sys, torch, warnings + +now_dir = os.getcwd() +sys.path.append(now_dir) + +warnings.filterwarnings("ignore") +import librosa +import numpy as np +from lib.uvr5_pack.lib_v5 import spec_utils +from lib.uvr5_pack.utils import inference +from lib.uvr5_pack.lib_v5.model_param_init import ModelParameters +import soundfile as sf +from lib.uvr5_pack.lib_v5.nets_new import CascadedNet +from lib.uvr5_pack.lib_v5 import nets_61968KB as nets + + +class _audio_pre_: + def __init__(self, agg, model_path, device, is_half): + self.model_path = model_path + self.device = device + self.data = { + # Processing Options + "postprocess": False, + "tta": False, + # Constants + "window_size": 512, + "agg": agg, + "high_end_process": "mirroring", + } + mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v2.json") + model = nets.CascadedASPPNet(mp.param["bins"] * 2) + cpk = torch.load(model_path, map_location="cpu") + model.load_state_dict(cpk) + model.eval() + if is_half: + model = model.half().to(device) + else: + model = model.to(device) + + self.mp = mp + self.model = model + + def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"): + if ins_root is None and vocal_root is None: + return "No save root." + name = os.path.basename(music_file) + if ins_root is not None: + os.makedirs(ins_root, exist_ok=True) + if vocal_root is not None: + os.makedirs(vocal_root, exist_ok=True) + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + bands_n = len(self.mp.param["band"]) + # print(bands_n) + for d in range(bands_n, 0, -1): + bp = self.mp.param["band"][d] + if d == bands_n: # high-end band + ( + X_wave[d], + _, + ) = librosa.core.load( + music_file, + bp["sr"], + False, + dtype=np.float32, + res_type=bp["res_type"], + ) + if X_wave[d].ndim == 1: + X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.core.resample( + X_wave[d + 1], + self.mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + # Stft of wave source + X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( + X_wave[d], + bp["hl"], + bp["n_fft"], + self.mp.param["mid_side"], + self.mp.param["mid_side_b2"], + self.mp.param["reverse"], + ) + # pdb.set_trace() + if d == bands_n and self.data["high_end_process"] != "none": + input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( + self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] + ) + input_high_end = X_spec_s[d][ + :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : + ] + + X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) + aggresive_set = float(self.data["agg"] / 100) + aggressiveness = { + "value": aggresive_set, + "split_bin": self.mp.param["band"][1]["crop_stop"], + } + with torch.no_grad(): + pred, X_mag, X_phase = inference( + X_spec_m, self.device, self.model, aggressiveness, self.data + ) + # Postprocess + if self.data["postprocess"]: + pred_inv = np.clip(X_mag - pred, 0, np.inf) + pred = spec_utils.mask_silence(pred, pred_inv) + y_spec_m = pred * X_phase + v_spec_m = X_spec_m - y_spec_m + + if ins_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], y_spec_m, input_high_end, self.mp + ) + wav_instrument = spec_utils.cmb_spectrogram_to_wave( + y_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) + print("%s instruments done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + ins_root, + "instrument_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) # + else: + path = os.path.join( + ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + if vocal_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], v_spec_m, input_high_end, self.mp + ) + wav_vocals = spec_utils.cmb_spectrogram_to_wave( + v_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) + print("%s vocals done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + vocal_root, + "vocal_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + else: + path = os.path.join( + vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + + +class _audio_pre_new: + def __init__(self, agg, model_path, device, is_half): + self.model_path = model_path + self.device = device + self.data = { + # Processing Options + "postprocess": False, + "tta": False, + # Constants + "window_size": 512, + "agg": agg, + "high_end_process": "mirroring", + } + mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v3.json") + nout = 64 if "DeReverb" in model_path else 48 + model = CascadedNet(mp.param["bins"] * 2, nout) + cpk = torch.load(model_path, map_location="cpu") + model.load_state_dict(cpk) + model.eval() + if is_half: + model = model.half().to(device) + else: + model = model.to(device) + + self.mp = mp + self.model = model + + def _path_audio_( + self, music_file, vocal_root=None, ins_root=None, format="flac" + ): # 3个VR模型vocal和ins是反的 + if ins_root is None and vocal_root is None: + return "No save root." + name = os.path.basename(music_file) + if ins_root is not None: + os.makedirs(ins_root, exist_ok=True) + if vocal_root is not None: + os.makedirs(vocal_root, exist_ok=True) + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + bands_n = len(self.mp.param["band"]) + # print(bands_n) + for d in range(bands_n, 0, -1): + bp = self.mp.param["band"][d] + if d == bands_n: # high-end band + ( + X_wave[d], + _, + ) = librosa.core.load( + music_file, + bp["sr"], + False, + dtype=np.float32, + res_type=bp["res_type"], + ) + if X_wave[d].ndim == 1: + X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.core.resample( + X_wave[d + 1], + self.mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + # Stft of wave source + X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( + X_wave[d], + bp["hl"], + bp["n_fft"], + self.mp.param["mid_side"], + self.mp.param["mid_side_b2"], + self.mp.param["reverse"], + ) + # pdb.set_trace() + if d == bands_n and self.data["high_end_process"] != "none": + input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( + self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] + ) + input_high_end = X_spec_s[d][ + :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : + ] + + X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) + aggresive_set = float(self.data["agg"] / 100) + aggressiveness = { + "value": aggresive_set, + "split_bin": self.mp.param["band"][1]["crop_stop"], + } + with torch.no_grad(): + pred, X_mag, X_phase = inference( + X_spec_m, self.device, self.model, aggressiveness, self.data + ) + # Postprocess + if self.data["postprocess"]: + pred_inv = np.clip(X_mag - pred, 0, np.inf) + pred = spec_utils.mask_silence(pred, pred_inv) + y_spec_m = pred * X_phase + v_spec_m = X_spec_m - y_spec_m + + if ins_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], y_spec_m, input_high_end, self.mp + ) + wav_instrument = spec_utils.cmb_spectrogram_to_wave( + y_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) + print("%s instruments done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + ins_root, + "instrument_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) # + else: + path = os.path.join( + ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + if vocal_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], v_spec_m, input_high_end, self.mp + ) + wav_vocals = spec_utils.cmb_spectrogram_to_wave( + v_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) + print("%s vocals done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + vocal_root, + "vocal_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + else: + path = os.path.join( + vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + + +if __name__ == "__main__": + device = "cuda" + is_half = True + model_path = "assets/uvr5_weights/DeEchoNormal.pth" + pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10) + audio_path = "雪雪伴奏对消HP5.wav" + save_path = "opt" + pre_fun._path_audio_(audio_path, save_path, save_path) diff --git a/lib/infer/infer_libs/uvr5_pack/julius/__init__.py b/lib/infer/infer_libs/uvr5_pack/julius/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d96cc38ae6c0288a6bfa93f91e6f438af15e52 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/__init__.py @@ -0,0 +1,34 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 + +# flake8: noqa +""" +.. image:: ../logo.png + +Julius contains different Digital Signal Processing algorithms implemented +with PyTorch, so that they are differentiable and available on CUDA. +Note that all the modules implemented here can be used with TorchScript. + +For now, I have implemented: + +- `julius.resample`: fast sinc resampling. +- `julius.fftconv`: FFT based convolutions. +- `julius.lowpass`: FIR low pass filter banks. +- `julius.filters`: FIR high pass and band pass filters. +- `julius.bands`: Decomposition of a waveform signal over mel-scale frequency bands. + +Along that, you might found useful utilities in: + +- `julius.core`: DSP related functions. +- `julius.utils`: Generic utilities. + + +Please checkout [the Github repository](https://github.com/adefossez/julius) for other informations. +For a verification of the speed and correctness of Julius, check the benchmark module `bench`. + + +This package is named in this honor of +[Julius O. Smith](https://ccrma.stanford.edu/~jos/), +whose books and website were a gold mine of information for me to learn about DSP. Go checkout his website if you want +to learn more about DSP. +""" diff --git a/lib/infer/infer_libs/uvr5_pack/julius/bands.py b/lib/infer/infer_libs/uvr5_pack/julius/bands.py new file mode 100644 index 0000000000000000000000000000000000000000..ef2162440b69e960770aa7bf81b9aaec48a63243 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/bands.py @@ -0,0 +1,119 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 +""" +Decomposition of a signal over frequency bands in the waveform domain. +""" +from typing import Optional, Sequence +import torch + +from .core import mel_frequencies +from .lowpass import LowPassFilters +from .utils import simple_repr + + +class SplitBands(torch.nn.Module): + """ + Decomposes a signal over the given frequency bands in the waveform domain using + a cascade of low pass filters as implemented by `julius.lowpass.LowPassFilters`. + You can either specify explicitely the frequency cutoffs, or just the number of bands, + in which case the frequency cutoffs will be spread out evenly in mel scale. + + Args: + sample_rate (float): Sample rate of the input signal in Hz. + n_bands (int or None): number of bands, when not giving them explictely with `cutoffs`. + In that case, the cutoff frequencies will be evenly spaced in mel-space. + cutoffs (list[float] or None): list of frequency cutoffs in Hz. + pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`, + the output will have the same length as the input. + zeros (float): Number of zero crossings to keep. See `LowPassFilters` for more informations. + fft (bool or None): See `LowPassFilters` for more info. + + ..note:: + The sum of all the bands will always be the input signal. + + ..warning:: + Unlike `julius.lowpass.LowPassFilters`, the cutoffs frequencies must be provided in Hz along + with the sample rate. + + Shape: + + - Input: `[*, T]` + - Output: `[B, *, T']`, with `T'=T` if `pad` is True. + If `n_bands` was provided, `B = n_bands` otherwise `B = len(cutoffs) + 1` + + >>> bands = SplitBands(sample_rate=128, n_bands=10) + >>> x = torch.randn(6, 4, 1024) + >>> list(bands(x).shape) + [10, 6, 4, 1024] + """ + + def __init__(self, sample_rate: float, n_bands: Optional[int] = None, + cutoffs: Optional[Sequence[float]] = None, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + super().__init__() + if (cutoffs is None) + (n_bands is None) != 1: + raise ValueError("You must provide either n_bands, or cutoffs, but not boths.") + + self.sample_rate = sample_rate + self.n_bands = n_bands + self._cutoffs = list(cutoffs) if cutoffs is not None else None + self.pad = pad + self.zeros = zeros + self.fft = fft + + if cutoffs is None: + if n_bands is None: + raise ValueError("You must provide one of n_bands or cutoffs.") + if not n_bands >= 1: + raise ValueError(f"n_bands must be greater than one (got {n_bands})") + cutoffs = mel_frequencies(n_bands + 1, 0, sample_rate / 2)[1:-1] + else: + if max(cutoffs) > 0.5 * sample_rate: + raise ValueError("A cutoff above sample_rate/2 does not make sense.") + if len(cutoffs) > 0: + self.lowpass = LowPassFilters( + [c / sample_rate for c in cutoffs], pad=pad, zeros=zeros, fft=fft) + else: + # Here I cannot make both TorchScript and MyPy happy. + # I miss the good old times, before all this madness was created. + self.lowpass = None # type: ignore + + def forward(self, input): + if self.lowpass is None: + return input[None] + lows = self.lowpass(input) + low = lows[0] + bands = [low] + for low_and_band in lows[1:]: + # Get a bandpass filter by substracting lowpasses + band = low_and_band - low + bands.append(band) + low = low_and_band + # Last band is whatever is left in the signal + bands.append(input - low) + return torch.stack(bands) + + @property + def cutoffs(self): + if self._cutoffs is not None: + return self._cutoffs + elif self.lowpass is not None: + return [c * self.sample_rate for c in self.lowpass.cutoffs] + else: + return [] + + def __repr__(self): + return simple_repr(self, overrides={"cutoffs": self._cutoffs}) + + +def split_bands(signal: torch.Tensor, sample_rate: float, n_bands: Optional[int] = None, + cutoffs: Optional[Sequence[float]] = None, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + """ + Functional version of `SplitBands`, refer to this class for more information. + + >>> x = torch.randn(6, 4, 1024) + >>> list(split_bands(x, sample_rate=64, cutoffs=[12, 24]).shape) + [3, 6, 4, 1024] + """ + return SplitBands(sample_rate, n_bands, cutoffs, pad, zeros, fft).to(signal)(signal) diff --git a/lib/infer/infer_libs/uvr5_pack/julius/core.py b/lib/infer/infer_libs/uvr5_pack/julius/core.py new file mode 100644 index 0000000000000000000000000000000000000000..6b750418424e76c9540663ac4b2a16005adaf422 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/core.py @@ -0,0 +1,122 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 +""" +Signal processing or PyTorch related utilities. +""" +import math +import typing as tp + +import torch +from torch.nn import functional as F + + +def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(x) / x + + __Warning__: the input is not multiplied by `pi`! + """ + return torch.where(x == 0, torch.tensor(1., device=x.device, dtype=x.dtype), torch.sin(x) / x) + + +def pad_to(tensor: torch.Tensor, target_length: int, mode: str = 'constant', value: float = 0): + """ + Pad the given tensor to the given length, with 0s on the right. + """ + return F.pad(tensor, (0, target_length - tensor.shape[-1]), mode=mode, value=value) + + +def hz_to_mel(freqs: torch.Tensor): + """ + Converts a Tensor of frequencies in hertz to the mel scale. + Uses the simple formula by O'Shaughnessy (1987). + + Args: + freqs (torch.Tensor): frequencies to convert. + + """ + return 2595 * torch.log10(1 + freqs / 700) + + +def mel_to_hz(mels: torch.Tensor): + """ + Converts a Tensor of mel scaled frequencies to Hertz. + Uses the simple formula by O'Shaughnessy (1987). + + Args: + mels (torch.Tensor): mel frequencies to convert. + """ + return 700 * (10**(mels / 2595) - 1) + + +def mel_frequencies(n_mels: int, fmin: float, fmax: float): + """ + Return frequencies that are evenly spaced in mel scale. + + Args: + n_mels (int): number of frequencies to return. + fmin (float): start from this frequency (in Hz). + fmax (float): finish at this frequency (in Hz). + + + """ + low = hz_to_mel(torch.tensor(float(fmin))).item() + high = hz_to_mel(torch.tensor(float(fmax))).item() + mels = torch.linspace(low, high, n_mels) + return mel_to_hz(mels) + + +def volume(x: torch.Tensor, floor=1e-8): + """ + Return the volume in dBFS. + """ + return torch.log10(floor + (x**2).mean(-1)) * 10 + + +def pure_tone(freq: float, sr: float = 128, dur: float = 4, device=None): + """ + Return a pure tone, i.e. cosine. + + Args: + freq (float): frequency (in Hz) + sr (float): sample rate (in Hz) + dur (float): duration (in seconds) + """ + time = torch.arange(int(sr * dur), device=device).float() / sr + return torch.cos(2 * math.pi * freq * time) + + +def unfold(input, kernel_size: int, stride: int): + """1D only unfolding similar to the one from PyTorch. + However PyTorch unfold is extremely slow. + + Given an input tensor of size `[*, T]` this will return + a tensor `[*, F, K]` with `K` the kernel size, and `F` the number + of frames. The i-th frame is a view onto `i * stride: i * stride + kernel_size`. + This will automatically pad the input to cover at least once all entries in `input`. + + Args: + input (Tensor): tensor for which to return the frames. + kernel_size (int): size of each frame. + stride (int): stride between each frame. + + Shape: + + - Inputs: `input` is `[*, T]` + - Output: `[*, F, kernel_size]` with `F = 1 + ceil((T - kernel_size) / stride)` + + + ..Warning:: unlike PyTorch unfold, this will pad the input + so that any position in `input` is covered by at least one frame. + """ + shape = list(input.shape) + length = shape.pop(-1) + n_frames = math.ceil((max(length, kernel_size) - kernel_size) / stride) + 1 + tgt_length = (n_frames - 1) * stride + kernel_size + padded = F.pad(input, (0, tgt_length - length)).contiguous() + strides: tp.List[int] = [] + for dim in range(padded.dim()): + strides.append(padded.stride(dim)) + assert strides.pop(-1) == 1, 'data should be contiguous' + strides = strides + [stride, 1] + return padded.as_strided(shape + [n_frames, kernel_size], strides) diff --git a/lib/infer/infer_libs/uvr5_pack/julius/fftconv.py b/lib/infer/infer_libs/uvr5_pack/julius/fftconv.py new file mode 100644 index 0000000000000000000000000000000000000000..1920e5369bb49b76eeea1832b7be2a0ddbc8db6b --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/fftconv.py @@ -0,0 +1,183 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 + +""" +Implementation of a FFT based 1D convolution in PyTorch. +While FFT is used in CUDNN for small kernel sizes, it is not the case for long ones, e.g. 512. +This module implements efficient FFT based convolutions for such convolutions. A typical +application is for evaluationg FIR filters with a long receptive field, typically +evaluated with a stride of 1. +""" +from typing import Optional + +import torch +try: + import torch.fft as new_fft +except ImportError: + new_fft = None # type: ignore +from torch.nn import functional as F + +from .core import pad_to, unfold +from .utils import simple_repr + + +# This is quite verbose, but sadly needed to make TorchScript happy. +def _new_rfft(x: torch.Tensor): + z = new_fft.rfft(x, dim=-1) + return torch.view_as_real(z) + + +def _old_rfft(x: torch.Tensor): + return torch.rfft(x, 1) # type: ignore + + +def _old_irfft(x: torch.Tensor, length: int): + result = torch.irfft(x, 1, signal_sizes=(length,)) # type: ignore + return result + + +def _new_irfft(x: torch.Tensor, length: int): + x = torch.view_as_complex(x) + return new_fft.irfft(x, length, dim=-1) + + +if new_fft is None: + _rfft = _old_rfft + _irfft = _old_irfft +else: + _rfft = _new_rfft + _irfft = _new_irfft + + +def _compl_mul_conjugate(a: torch.Tensor, b: torch.Tensor): + """ + Given a and b two tensors of dimension 4 + with the last dimension being the real and imaginary part, + returns a multiplied by the conjugate of b, the multiplication + being with respect to the second dimension. + + """ + # PyTorch 1.7 supports complex number, but not for all operations. + # Once the support is widespread, this can likely go away. + + op = "bcft,dct->bdft" + return torch.stack([ + torch.einsum(op, a[..., 0], b[..., 0]) + torch.einsum(op, a[..., 1], b[..., 1]), + torch.einsum(op, a[..., 1], b[..., 0]) - torch.einsum(op, a[..., 0], b[..., 1]) + ], + dim=-1) + + +def fft_conv1d( + input: torch.Tensor, weight: torch.Tensor, + bias: Optional[torch.Tensor] = None, stride: int = 1, padding: int = 0, + block_ratio: float = 5): + """ + Same as `torch.nn.functional.conv1d` but using FFT for the convolution. + Please check PyTorch documentation for more information. + + Args: + input (Tensor): input signal of shape `[B, C, T]`. + weight (Tensor): weight of the convolution `[D, C, K]` with `D` the number + of output channels. + bias (Tensor or None): if not None, bias term for the convolution. + stride (int): stride of convolution. + padding (int): padding to apply to the input. + block_ratio (float): can be tuned for speed. The input is splitted in chunks + with a size of `int(block_ratio * kernel_size)`. + + Shape: + + - Inputs: `input` is `[B, C, T]`, `weight` is `[D, C, K]` and bias is `[D]`. + - Output: `(*, T)` + + + ..note:: + This function is faster than `torch.nn.functional.conv1d` only in specific cases. + Typically, the kernel size should be of the order of 256 to see any real gain, + for a stride of 1. + + ..Warning:: + Dilation and groups are not supported at the moment. This function might use + more memory than the default Conv1d implementation. + """ + input = F.pad(input, (padding, padding)) + batch, channels, length = input.shape + out_channels, _, kernel_size = weight.shape + + if length < kernel_size: + raise RuntimeError(f"Input should be at least as large as the kernel size {kernel_size}, " + f"but it is only {length} samples long.") + if block_ratio < 1: + raise RuntimeError("Block ratio must be greater than 1.") + + # We are going to process the input blocks by blocks, as for some reason it is faster + # and less memory intensive (I think the culprit is `torch.einsum`. + block_size: int = min(int(kernel_size * block_ratio), length) + fold_stride = block_size - kernel_size + 1 + weight = pad_to(weight, block_size) + weight_z = _rfft(weight) + + # We pad the input and get the different frames, on which + frames = unfold(input, block_size, fold_stride) + + frames_z = _rfft(frames) + out_z = _compl_mul_conjugate(frames_z, weight_z) + out = _irfft(out_z, block_size) + # The last bit is invalid, because FFT will do a circular convolution. + out = out[..., :-kernel_size + 1] + out = out.reshape(batch, out_channels, -1) + out = out[..., ::stride] + target_length = (length - kernel_size) // stride + 1 + out = out[..., :target_length] + if bias is not None: + out += bias[:, None] + return out + + +class FFTConv1d(torch.nn.Module): + """ + Same as `torch.nn.Conv1d` but based on `fft_conv1d`. + Please check PyTorch documentation for more information. + + Args: + in_channels (int): number of input channels. + out_channels (int): number of output channels. + kernel_size (int): kernel size of convolution. + stride (int): stride of convolution. + padding (int): padding to apply to the input. + bias (bool): if True, use a bias term. + + ..note:: + This module is faster than `torch.nn.Conv1d` only in specific cases. + Typically, `kernel_size` should be of the order of 256 to see any real gain, + for a stride of 1. + + ..warning:: + Dilation and groups are not supported at the moment. This module might use + more memory than the default Conv1d implementation. + + >>> fftconv = FFTConv1d(12, 24, 128, 4) + >>> x = torch.randn(4, 12, 1024) + >>> print(list(fftconv(x).shape)) + [4, 24, 225] + """ + def __init__(self, in_channels: int, out_channels: int, kernel_size: int, + stride: int = 1, padding: int = 0, bias: bool = True): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + + conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, bias=bias) + self.weight = conv.weight + self.bias = conv.bias + + def forward(self, input: torch.Tensor): + return fft_conv1d( + input, self.weight, self.bias, self.stride, self.padding) + + def __repr__(self): + return simple_repr(self, overrides={"bias": self.bias is not None}) diff --git a/lib/infer/infer_libs/uvr5_pack/julius/filters.py b/lib/infer/infer_libs/uvr5_pack/julius/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..afabcc0158e4cf45d215174b4f946ca1b0e3acaa --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/filters.py @@ -0,0 +1,258 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2021 +""" +FIR windowed sinc highpass and bandpass filters. +Those are convenience wrappers around the filters defined in `julius.lowpass`. +""" + +from typing import Sequence, Optional + +import torch + +# Import all lowpass filters for consistency. +from .lowpass import lowpass_filter, lowpass_filters, LowPassFilter, LowPassFilters # noqa +from .utils import simple_repr + + +class HighPassFilters(torch.nn.Module): + """ + Bank of high pass filters. See `julius.lowpass.LowPassFilters` for more + details on the implementation. + + Args: + cutoffs (list[float]): list of cutoff frequencies, in [0, 0.5] expressed as `f/f_s` where + f_s is the samplerate and `f` is the cutoff frequency. + The upper limit is 0.5, because a signal sampled at `f_s` contains only + frequencies under `f_s / 2`. + stride (int): how much to decimate the output. Probably not a good idea + to do so with a high pass filters though... + pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`, + the output will have the same length as the input. + zeros (float): Number of zero crossings to keep. + Controls the receptive field of the Finite Impulse Response filter. + For filters with low cutoff frequency, e.g. 40Hz at 44.1kHz, + it is a bad idea to set this to a high value. + This is likely appropriate for most use. Lower values + will result in a faster filter, but with a slower attenuation around the + cutoff frequency. + fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions. + If False, uses PyTorch convolutions. If None, either one will be chosen automatically + depending on the effective filter size. + + + ..warning:: + All the filters will use the same filter size, aligned on the lowest + frequency provided. If you combine a lot of filters with very diverse frequencies, it might + be more efficient to split them over multiple modules with similar frequencies. + + Shape: + + - Input: `[*, T]` + - Output: `[F, *, T']`, with `T'=T` if `pad` is True and `stride` is 1, and + `F` is the numer of cutoff frequencies. + + >>> highpass = HighPassFilters([1/4]) + >>> x = torch.randn(4, 12, 21, 1024) + >>> list(highpass(x).shape) + [1, 4, 12, 21, 1024] + """ + + def __init__(self, cutoffs: Sequence[float], stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + super().__init__() + self._lowpasses = LowPassFilters(cutoffs, stride, pad, zeros, fft) + + @property + def cutoffs(self): + return self._lowpasses.cutoffs + + @property + def stride(self): + return self._lowpasses.stride + + @property + def pad(self): + return self._lowpasses.pad + + @property + def zeros(self): + return self._lowpasses.zeros + + @property + def fft(self): + return self._lowpasses.fft + + def forward(self, input): + lows = self._lowpasses(input) + + # We need to extract the right portion of the input in case + # pad is False or stride > 1 + if self.pad: + start, end = 0, input.shape[-1] + else: + start = self._lowpasses.half_size + end = -start + input = input[..., start:end:self.stride] + highs = input - lows + return highs + + def __repr__(self): + return simple_repr(self) + + +class HighPassFilter(torch.nn.Module): + """ + Same as `HighPassFilters` but applies a single high pass filter. + + Shape: + + - Input: `[*, T]` + - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1. + + >>> highpass = HighPassFilter(1/4, stride=1) + >>> x = torch.randn(4, 124) + >>> list(highpass(x).shape) + [4, 124] + """ + + def __init__(self, cutoff: float, stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + super().__init__() + self._highpasses = HighPassFilters([cutoff], stride, pad, zeros, fft) + + @property + def cutoff(self): + return self._highpasses.cutoffs[0] + + @property + def stride(self): + return self._highpasses.stride + + @property + def pad(self): + return self._highpasses.pad + + @property + def zeros(self): + return self._highpasses.zeros + + @property + def fft(self): + return self._highpasses.fft + + def forward(self, input): + return self._highpasses(input)[0] + + def __repr__(self): + return simple_repr(self) + + +def highpass_filters(input: torch.Tensor, cutoffs: Sequence[float], + stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + """ + Functional version of `HighPassFilters`, refer to this class for more information. + """ + return HighPassFilters(cutoffs, stride, pad, zeros, fft).to(input)(input) + + +def highpass_filter(input: torch.Tensor, cutoff: float, + stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + """ + Functional version of `HighPassFilter`, refer to this class for more information. + Output will not have a dimension inserted in the front. + """ + return highpass_filters(input, [cutoff], stride, pad, zeros, fft)[0] + + +class BandPassFilter(torch.nn.Module): + """ + Single band pass filter, implemented as a the difference of two lowpass filters. + + Args: + cutoff_low (float): lower cutoff frequency, in [0, 0.5] expressed as `f/f_s` where + f_s is the samplerate and `f` is the cutoff frequency. + The upper limit is 0.5, because a signal sampled at `f_s` contains only + frequencies under `f_s / 2`. + cutoff_high (float): higher cutoff frequency, in [0, 0.5] expressed as `f/f_s`. + This must be higher than cutoff_high. Note that due to the fact + that filter are not perfect, the output will be non zero even if + cutoff_high == cutoff_low. + stride (int): how much to decimate the output. + pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`, + the output will have the same length as the input. + zeros (float): Number of zero crossings to keep. + Controls the receptive field of the Finite Impulse Response filter. + For filters with low cutoff frequency, e.g. 40Hz at 44.1kHz, + it is a bad idea to set this to a high value. + This is likely appropriate for most use. Lower values + will result in a faster filter, but with a slower attenuation around the + cutoff frequency. + fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions. + If False, uses PyTorch convolutions. If None, either one will be chosen automatically + depending on the effective filter size. + + + Shape: + + - Input: `[*, T]` + - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1. + + ..Note:: There is no BandPassFilters (bank of bandpasses) because its + signification would be the same as `julius.bands.SplitBands`. + + >>> bandpass = BandPassFilter(1/4, 1/3) + >>> x = torch.randn(4, 12, 21, 1024) + >>> list(bandpass(x).shape) + [4, 12, 21, 1024] + """ + + def __init__(self, cutoff_low: float, cutoff_high: float, stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + super().__init__() + if cutoff_low > cutoff_high: + raise ValueError(f"Lower cutoff {cutoff_low} should be less than " + f"higher cutoff {cutoff_high}.") + self._lowpasses = LowPassFilters([cutoff_low, cutoff_high], stride, pad, zeros, fft) + + @property + def cutoff_low(self): + return self._lowpasses.cutoffs[0] + + @property + def cutoff_high(self): + return self._lowpasses.cutoffs[1] + + @property + def stride(self): + return self._lowpasses.stride + + @property + def pad(self): + return self._lowpasses.pad + + @property + def zeros(self): + return self._lowpasses.zeros + + @property + def fft(self): + return self._lowpasses.fft + + def forward(self, input): + lows = self._lowpasses(input) + return lows[1] - lows[0] + + def __repr__(self): + return simple_repr(self) + + +def bandpass_filter(input: torch.Tensor, cutoff_low: float, cutoff_high: float, + stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + """ + Functional version of `BandPassfilter`, refer to this class for more information. + Output will not have a dimension inserted in the front. + """ + return BandPassFilter(cutoff_low, cutoff_high, stride, pad, zeros, fft).to(input)(input) diff --git a/lib/infer/infer_libs/uvr5_pack/julius/lowpass.py b/lib/infer/infer_libs/uvr5_pack/julius/lowpass.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb46e382b20bfc2d93482f9f027986b863de6f0 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/lowpass.py @@ -0,0 +1,181 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 +""" +FIR windowed sinc lowpass filters. +""" + +import math +from typing import Sequence, Optional + +import torch +from torch.nn import functional as F + +from .core import sinc +from .fftconv import fft_conv1d +from .utils import simple_repr + + +class LowPassFilters(torch.nn.Module): + """ + Bank of low pass filters. Note that a high pass or band pass filter can easily + be implemented by substracting a same signal processed with low pass filters with different + frequencies (see `julius.bands.SplitBands` for instance). + This uses a windowed sinc filter, very similar to the one used in + `julius.resample`. However, because we do not change the sample rate here, + this filter can be much more efficiently implemented using the FFT convolution from + `julius.fftconv`. + + Args: + cutoffs (list[float]): list of cutoff frequencies, in [0, 0.5] expressed as `f/f_s` where + f_s is the samplerate and `f` is the cutoff frequency. + The upper limit is 0.5, because a signal sampled at `f_s` contains only + frequencies under `f_s / 2`. + stride (int): how much to decimate the output. Keep in mind that decimation + of the output is only acceptable if the cutoff frequency is under `1/ (2 * stride)` + of the original sampling rate. + pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`, + the output will have the same length as the input. + zeros (float): Number of zero crossings to keep. + Controls the receptive field of the Finite Impulse Response filter. + For lowpass filters with low cutoff frequency, e.g. 40Hz at 44.1kHz, + it is a bad idea to set this to a high value. + This is likely appropriate for most use. Lower values + will result in a faster filter, but with a slower attenuation around the + cutoff frequency. + fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions. + If False, uses PyTorch convolutions. If None, either one will be chosen automatically + depending on the effective filter size. + + + ..warning:: + All the filters will use the same filter size, aligned on the lowest + frequency provided. If you combine a lot of filters with very diverse frequencies, it might + be more efficient to split them over multiple modules with similar frequencies. + + ..note:: + A lowpass with a cutoff frequency of 0 is defined as the null function + by convention here. This allows for a highpass with a cutoff of 0 to + be equal to identity, as defined in `julius.filters.HighPassFilters`. + + Shape: + + - Input: `[*, T]` + - Output: `[F, *, T']`, with `T'=T` if `pad` is True and `stride` is 1, and + `F` is the numer of cutoff frequencies. + + >>> lowpass = LowPassFilters([1/4]) + >>> x = torch.randn(4, 12, 21, 1024) + >>> list(lowpass(x).shape) + [1, 4, 12, 21, 1024] + """ + + def __init__(self, cutoffs: Sequence[float], stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + super().__init__() + self.cutoffs = list(cutoffs) + if min(self.cutoffs) < 0: + raise ValueError("Minimum cutoff must be larger than zero.") + if max(self.cutoffs) > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.stride = stride + self.pad = pad + self.zeros = zeros + self.half_size = int(zeros / min([c for c in self.cutoffs if c > 0]) / 2) + if fft is None: + fft = self.half_size > 32 + self.fft = fft + window = torch.hann_window(2 * self.half_size + 1, periodic=False) + time = torch.arange(-self.half_size, self.half_size + 1) + filters = [] + for cutoff in cutoffs: + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * math.pi * time) + # Normalize filter to have sum = 1, otherwise we will have a small leakage + # of the constant component in the input signal. + filter_ /= filter_.sum() + filters.append(filter_) + self.register_buffer("filters", torch.stack(filters)[:, None]) + + def forward(self, input): + shape = list(input.shape) + input = input.view(-1, 1, shape[-1]) + if self.pad: + input = F.pad(input, (self.half_size, self.half_size), mode='replicate') + if self.fft: + out = fft_conv1d(input, self.filters, stride=self.stride) + else: + out = F.conv1d(input, self.filters, stride=self.stride) + shape.insert(0, len(self.cutoffs)) + shape[-1] = out.shape[-1] + return out.permute(1, 0, 2).reshape(shape) + + def __repr__(self): + return simple_repr(self) + + +class LowPassFilter(torch.nn.Module): + """ + Same as `LowPassFilters` but applies a single low pass filter. + + Shape: + + - Input: `[*, T]` + - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1. + + >>> lowpass = LowPassFilter(1/4, stride=2) + >>> x = torch.randn(4, 124) + >>> list(lowpass(x).shape) + [4, 62] + """ + + def __init__(self, cutoff: float, stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + super().__init__() + self._lowpasses = LowPassFilters([cutoff], stride, pad, zeros, fft) + + @property + def cutoff(self): + return self._lowpasses.cutoffs[0] + + @property + def stride(self): + return self._lowpasses.stride + + @property + def pad(self): + return self._lowpasses.pad + + @property + def zeros(self): + return self._lowpasses.zeros + + @property + def fft(self): + return self._lowpasses.fft + + def forward(self, input): + return self._lowpasses(input)[0] + + def __repr__(self): + return simple_repr(self) + + +def lowpass_filters(input: torch.Tensor, cutoffs: Sequence[float], + stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + """ + Functional version of `LowPassFilters`, refer to this class for more information. + """ + return LowPassFilters(cutoffs, stride, pad, zeros, fft).to(input)(input) + + +def lowpass_filter(input: torch.Tensor, cutoff: float, + stride: int = 1, pad: bool = True, + zeros: float = 8, fft: Optional[bool] = None): + """ + Same as `lowpass_filters` but with a single cutoff frequency. + Output will not have a dimension inserted in the front. + """ + return lowpass_filters(input, [cutoff], stride, pad, zeros, fft)[0] diff --git a/lib/infer/infer_libs/uvr5_pack/julius/resample.py b/lib/infer/infer_libs/uvr5_pack/julius/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..fd3b9b547d4c33ec7136d32e5f086420d0a72e14 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/resample.py @@ -0,0 +1,216 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 +""" +Differentiable, Pytorch based resampling. +Implementation of Julius O. Smith algorithm for resampling. +See https://ccrma.stanford.edu/~jos/resample/ for details. +This implementation is specially optimized for when new_sr / old_sr is a fraction +with a small numerator and denominator when removing the gcd (e.g. new_sr = 700, old_sr = 500). + +Very similar to [bmcfee/resampy](https://github.com/bmcfee/resampy) except this implementation +is optimized for the case mentioned before, while resampy is slower but more general. + +""" + +import math +from typing import Optional + +import torch +from torch.nn import functional as F + +from .core import sinc +from .utils import simple_repr + + +class ResampleFrac(torch.nn.Module): + """ + Resampling from the sample rate `old_sr` to `new_sr`. + """ + def __init__(self, old_sr: int, new_sr: int, zeros: int = 24, rolloff: float = 0.945): + """ + Args: + old_sr (int): sample rate of the input signal x. + new_sr (int): sample rate of the output. + zeros (int): number of zero crossing to keep in the sinc filter. + rolloff (float): use a lowpass filter that is `rolloff * new_sr / 2`, + to ensure sufficient margin due to the imperfection of the FIR filter used. + Lowering this value will reduce anti-aliasing, but will reduce some of the + highest frequencies. + + Shape: + + - Input: `[*, T]` + - Output: `[*, T']` with `T' = int(new_sr * T / old_sr) + + + .. caution:: + After dividing `old_sr` and `new_sr` by their GCD, both should be small + for this implementation to be fast. + + >>> import torch + >>> resample = ResampleFrac(4, 5) + >>> x = torch.randn(1000) + >>> print(len(resample(x))) + 1250 + """ + super().__init__() + if not isinstance(old_sr, int) or not isinstance(new_sr, int): + raise ValueError("old_sr and new_sr should be integers") + gcd = math.gcd(old_sr, new_sr) + self.old_sr = old_sr // gcd + self.new_sr = new_sr // gcd + self.zeros = zeros + self.rolloff = rolloff + + self._init_kernels() + + def _init_kernels(self): + if self.old_sr == self.new_sr: + return + + kernels = [] + sr = min(self.new_sr, self.old_sr) + # rolloff will perform antialiasing filtering by removing the highest frequencies. + # At first I thought I only needed this when downsampling, but when upsampling + # you will get edge artifacts without this, the edge is equivalent to zero padding, + # which will add high freq artifacts. + sr *= self.rolloff + + # The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor) + # using the sinc interpolation formula: + # x(t) = sum_i x[i] sinc(pi * old_sr * (i / old_sr - t)) + # We can then sample the function x(t) with a different sample rate: + # y[j] = x(j / new_sr) + # or, + # y[j] = sum_i x[i] sinc(pi * old_sr * (i / old_sr - j / new_sr)) + + # We see here that y[j] is the convolution of x[i] with a specific filter, for which + # we take an FIR approximation, stopping when we see at least `zeros` zeros crossing. + # But y[j+1] is going to have a different set of weights and so on, until y[j + new_sr]. + # Indeed: + # y[j + new_sr] = sum_i x[i] sinc(pi * old_sr * ((i / old_sr - (j + new_sr) / new_sr)) + # = sum_i x[i] sinc(pi * old_sr * ((i - old_sr) / old_sr - j / new_sr)) + # = sum_i x[i + old_sr] sinc(pi * old_sr * (i / old_sr - j / new_sr)) + # so y[j+new_sr] uses the same filter as y[j], but on a shifted version of x by `old_sr`. + # This will explain the F.conv1d after, with a stride of old_sr. + self._width = math.ceil(self.zeros * self.old_sr / sr) + # If old_sr is still big after GCD reduction, most filters will be very unbalanced, i.e., + # they will have a lot of almost zero values to the left or to the right... + # There is probably a way to evaluate those filters more efficiently, but this is kept for + # future work. + idx = torch.arange(-self._width, self._width + self.old_sr).float() + for i in range(self.new_sr): + t = (-i/self.new_sr + idx/self.old_sr) * sr + t = t.clamp_(-self.zeros, self.zeros) + t *= math.pi + window = torch.cos(t/self.zeros/2)**2 + kernel = sinc(t) * window + # Renormalize kernel to ensure a constant signal is preserved. + kernel.div_(kernel.sum()) + kernels.append(kernel) + + self.register_buffer("kernel", torch.stack(kernels).view(self.new_sr, 1, -1)) + + def forward(self, x: torch.Tensor, output_length: Optional[int] = None, full: bool = False): + """ + Resample x. + Args: + x (Tensor): signal to resample, time should be the last dimension + output_length (None or int): This can be set to the desired output length + (last dimension). Allowed values are between 0 and + ceil(length * new_sr / old_sr). When None (default) is specified, the + floored output length will be used. In order to select the largest possible + size, use the `full` argument. + full (bool): return the longest possible output from the input. This can be useful + if you chain resampling operations, and want to give the `output_length` only + for the last one, while passing `full=True` to all the other ones. + """ + if self.old_sr == self.new_sr: + return x + shape = x.shape + length = x.shape[-1] + x = x.reshape(-1, length) + x = F.pad(x[:, None], (self._width, self._width + self.old_sr), mode='replicate') + ys = F.conv1d(x, self.kernel, stride=self.old_sr) # type: ignore + y = ys.transpose(1, 2).reshape(list(shape[:-1]) + [-1]) + + float_output_length = self.new_sr * length / self.old_sr + max_output_length = int(math.ceil(float_output_length)) + default_output_length = int(float_output_length) + if output_length is None: + output_length = max_output_length if full else default_output_length + elif output_length < 0 or output_length > max_output_length: + raise ValueError(f"output_length must be between 0 and {max_output_length}") + else: + if full: + raise ValueError("You cannot pass both full=True and output_length") + return y[..., :output_length] + + def __repr__(self): + return simple_repr(self) + + +def resample_frac(x: torch.Tensor, old_sr: int, new_sr: int, + zeros: int = 24, rolloff: float = 0.945, + output_length: Optional[int] = None, full: bool = False): + """ + Functional version of `ResampleFrac`, refer to its documentation for more information. + + ..warning:: + If you call repeatidly this functions with the same sample rates, then the + resampling kernel will be recomputed everytime. For best performance, you should use + and cache an instance of `ResampleFrac`. + """ + return ResampleFrac(old_sr, new_sr, zeros, rolloff).to(x)(x, output_length, full) + + +# Easier implementations for downsampling and upsampling by a factor of 2 +# Kept for testing and reference + +def _kernel_upsample2_downsample2(zeros): + # Kernel for upsampling and downsampling by a factor of 2. Interestingly, + # it is the same kernel used for both. + win = torch.hann_window(4 * zeros + 1, periodic=False) + winodd = win[1::2] + t = torch.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) + t *= math.pi + kernel = (sinc(t) * winodd).view(1, 1, -1) + return kernel + + +def _upsample2(x, zeros=24): + """ + Upsample x by a factor of two. The output will be exactly twice as long as the input. + Args: + x (Tensor): signal to upsample, time should be the last dimension + zeros (int): number of zero crossing to keep in the sinc filter. + + This function is kept only for reference, you should use the more generic `resample_frac` + one. This function does not perform anti-aliasing filtering. + """ + *other, time = x.shape + kernel = _kernel_upsample2_downsample2(zeros).to(x) + out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(*other, time) + y = torch.stack([x, out], dim=-1) + return y.view(*other, -1) + + +def _downsample2(x, zeros=24): + """ + Downsample x by a factor of two. The output length is half of the input, ceiled. + Args: + x (Tensor): signal to downsample, time should be the last dimension + zeros (int): number of zero crossing to keep in the sinc filter. + + This function is kept only for reference, you should use the more generic `resample_frac` + one. This function does not perform anti-aliasing filtering. + """ + if x.shape[-1] % 2 != 0: + x = F.pad(x, (0, 1)) + xeven = x[..., ::2] + xodd = x[..., 1::2] + *other, time = xodd.shape + kernel = _kernel_upsample2_downsample2(zeros).to(x) + out = xeven + F.conv1d(xodd.view(-1, 1, time), kernel, padding=zeros)[..., :-1].view( + *other, time) + return out.view(*other, -1).mul(0.5) diff --git a/lib/infer/infer_libs/uvr5_pack/julius/utils.py b/lib/infer/infer_libs/uvr5_pack/julius/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..944b973ad1a38700c1ba98ab7306c233cb87868d --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/julius/utils.py @@ -0,0 +1,101 @@ +# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. +# Author: adefossez, 2020 +""" +Non signal processing related utilities. +""" + +import inspect +import typing as tp +import sys +import time + + +def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None, + overrides: dict = {}): + """ + Return a simple representation string for `obj`. + If `attrs` is not None, it should be a list of attributes to include. + """ + params = inspect.signature(obj.__class__).parameters + attrs_repr = [] + if attrs is None: + attrs = list(params.keys()) + for attr in attrs: + display = False + if attr in overrides: + value = overrides[attr] + elif hasattr(obj, attr): + value = getattr(obj, attr) + else: + continue + if attr in params: + param = params[attr] + if param.default is inspect._empty or value != param.default: # type: ignore + display = True + else: + display = True + + if display: + attrs_repr.append(f"{attr}={value}") + return f"{obj.__class__.__name__}({','.join(attrs_repr)})" + + +class MarkdownTable: + """ + Simple MarkdownTable generator. The column titles should be large enough + for the lines content. This will right align everything. + + >>> import io # we use io purely for test purposes, default is sys.stdout. + >>> file = io.StringIO() + >>> table = MarkdownTable(["Item Name", "Price"], file=file) + >>> table.header(); table.line(["Honey", "5"]); table.line(["Car", "5,000"]) + >>> print(file.getvalue().strip()) # Strip for test purposes + | Item Name | Price | + |-----------|-------| + | Honey | 5 | + | Car | 5,000 | + """ + def __init__(self, columns, file=sys.stdout): + self.columns = columns + self.file = file + + def _writeln(self, line): + self.file.write("|" + "|".join(line) + "|\n") + + def header(self): + self._writeln(f" {col} " for col in self.columns) + self._writeln("-" * (len(col) + 2) for col in self.columns) + + def line(self, line): + out = [] + for val, col in zip(line, self.columns): + val = format(val, '>' + str(len(col))) + out.append(" " + val + " ") + self._writeln(out) + + +class Chrono: + """ + Measures ellapsed time, calling `torch.cuda.synchronize` if necessary. + `Chrono` instances can be used as context managers (e.g. with `with`). + Upon exit of the block, you can access the duration of the block in seconds + with the `duration` attribute. + + >>> with Chrono() as chrono: + ... _ = sum(range(10_000)) + ... + >>> print(chrono.duration < 10) # Should be true unless on a really slow computer. + True + """ + def __init__(self): + self.duration = None + + def __enter__(self): + self._begin = time.time() + return self + + def __exit__(self, exc_type, exc_value, exc_tracebck): + import torch + if torch.cuda.is_available(): + torch.cuda.synchronize() + self.duration = time.time() - self._begin diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/layers_123821KB.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/layers_123821KB.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..926c8802fb5387b6aa47d8a54da8ac4a74cbdf97 Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/layers_123821KB.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/layers_new.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/layers_new.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e6e01c065a5cd8adc68c1501b78c0458b49c9fe Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/layers_new.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/model_param_init.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/model_param_init.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfdbd715834641ddc435e8865a221ab67d822d44 Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/model_param_init.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/nets_61968KB.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/nets_61968KB.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0e74e065da0c08e65ab8c48cd233fea3f4ce04e Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/nets_61968KB.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/nets_new.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/nets_new.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38e052f2bc9b175dd493bd3621141a9d7f2efde4 Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/nets_new.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/spec_utils.cpython-39.pyc b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/spec_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b523b9206e99d2703ff6fd73d2e74283d2d7ac1c Binary files /dev/null and b/lib/infer/infer_libs/uvr5_pack/lib_v5/__pycache__/spec_utils.cpython-39.pyc differ diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/dataset.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd01a174978d97180a897e40cb59ecadec1d12e --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/dataset.py @@ -0,0 +1,183 @@ +import os +import random + +import numpy as np +import torch +import torch.utils.data +from tqdm import tqdm + +from . import spec_utils + + +class VocalRemoverValidationSet(torch.utils.data.Dataset): + def __init__(self, patch_list): + self.patch_list = patch_list + + def __len__(self): + return len(self.patch_list) + + def __getitem__(self, idx): + path = self.patch_list[idx] + data = np.load(path) + + X, y = data["X"], data["y"] + + X_mag = np.abs(X) + y_mag = np.abs(y) + + return X_mag, y_mag + + +def make_pair(mix_dir, inst_dir): + input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] + + X_list = sorted( + [ + os.path.join(mix_dir, fname) + for fname in os.listdir(mix_dir) + if os.path.splitext(fname)[1] in input_exts + ] + ) + y_list = sorted( + [ + os.path.join(inst_dir, fname) + for fname in os.listdir(inst_dir) + if os.path.splitext(fname)[1] in input_exts + ] + ) + + filelist = list(zip(X_list, y_list)) + + return filelist + + +def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): + if split_mode == "random": + filelist = make_pair( + os.path.join(dataset_dir, "mixtures"), + os.path.join(dataset_dir, "instruments"), + ) + + random.shuffle(filelist) + + if len(val_filelist) == 0: + val_size = int(len(filelist) * val_rate) + train_filelist = filelist[:-val_size] + val_filelist = filelist[-val_size:] + else: + train_filelist = [ + pair for pair in filelist if list(pair) not in val_filelist + ] + elif split_mode == "subdirs": + if len(val_filelist) != 0: + raise ValueError( + "The `val_filelist` option is not available in `subdirs` mode" + ) + + train_filelist = make_pair( + os.path.join(dataset_dir, "training/mixtures"), + os.path.join(dataset_dir, "training/instruments"), + ) + + val_filelist = make_pair( + os.path.join(dataset_dir, "validation/mixtures"), + os.path.join(dataset_dir, "validation/instruments"), + ) + + return train_filelist, val_filelist + + +def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): + perm = np.random.permutation(len(X)) + for i, idx in enumerate(tqdm(perm)): + if np.random.uniform() < reduction_rate: + y[idx] = spec_utils.reduce_vocal_aggressively( + X[idx], y[idx], reduction_mask + ) + + if np.random.uniform() < 0.5: + # swap channel + X[idx] = X[idx, ::-1] + y[idx] = y[idx, ::-1] + if np.random.uniform() < 0.02: + # mono + X[idx] = X[idx].mean(axis=0, keepdims=True) + y[idx] = y[idx].mean(axis=0, keepdims=True) + if np.random.uniform() < 0.02: + # inst + X[idx] = y[idx] + + if np.random.uniform() < mixup_rate and i < len(perm) - 1: + lam = np.random.beta(mixup_alpha, mixup_alpha) + X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] + y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] + + return X, y + + +def make_padding(width, cropsize, offset): + left = offset + roi_size = cropsize - left * 2 + if roi_size == 0: + roi_size = cropsize + right = roi_size - (width % roi_size) + left + + return left, right, roi_size + + +def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): + len_dataset = patches * len(filelist) + + X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) + y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) + + for i, (X_path, y_path) in enumerate(tqdm(filelist)): + X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) + coef = np.max([np.abs(X).max(), np.abs(y).max()]) + X, y = X / coef, y / coef + + l, r, roi_size = make_padding(X.shape[2], cropsize, offset) + X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") + y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") + + starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) + ends = starts + cropsize + for j in range(patches): + idx = i * patches + j + X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] + y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] + + return X_dataset, y_dataset + + +def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): + patch_list = [] + patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( + cropsize, sr, hop_length, n_fft, offset + ) + os.makedirs(patch_dir, exist_ok=True) + + for i, (X_path, y_path) in enumerate(tqdm(filelist)): + basename = os.path.splitext(os.path.basename(X_path))[0] + + X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) + coef = np.max([np.abs(X).max(), np.abs(y).max()]) + X, y = X / coef, y / coef + + l, r, roi_size = make_padding(X.shape[2], cropsize, offset) + X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") + y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") + + len_dataset = int(np.ceil(X.shape[2] / roi_size)) + for j in range(len_dataset): + outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) + start = j * roi_size + if not os.path.exists(outpath): + np.savez( + outpath, + X=X_pad[:, :, start : start + cropsize], + y=y_pad[:, :, start : start + cropsize], + ) + patch_list.append(outpath) + + return VocalRemoverValidationSet(patch_list) diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers.py @@ -0,0 +1,118 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class SeperableConv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False, + ), + nn.Conv2d(nin, nout, kernel_size=1, bias=False), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + bottle = self.bottleneck(out) + return bottle diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_123812KB .py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_123812KB .py new file mode 100644 index 0000000000000000000000000000000000000000..4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_123812KB .py @@ -0,0 +1,118 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class SeperableConv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False, + ), + nn.Conv2d(nin, nout, kernel_size=1, bias=False), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + bottle = self.bottleneck(out) + return bottle diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_123821KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_123821KB.py new file mode 100644 index 0000000000000000000000000000000000000000..4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_123821KB.py @@ -0,0 +1,118 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class SeperableConv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False, + ), + nn.Conv2d(nin, nout, kernel_size=1, bias=False), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + bottle = self.bottleneck(out) + return bottle diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_33966KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_33966KB.py new file mode 100644 index 0000000000000000000000000000000000000000..9b127bc6427f5c60c8cf85603a3d8a093c3501c4 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_33966KB.py @@ -0,0 +1,126 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class SeperableConv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False, + ), + nn.Conv2d(nin, nout, kernel_size=1, bias=False), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.conv6 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.conv7 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + feat6 = self.conv6(x) + feat7 = self.conv7(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) + bottle = self.bottleneck(out) + return bottle diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_537227KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_537227KB.py new file mode 100644 index 0000000000000000000000000000000000000000..9b127bc6427f5c60c8cf85603a3d8a093c3501c4 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_537227KB.py @@ -0,0 +1,126 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class SeperableConv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False, + ), + nn.Conv2d(nin, nout, kernel_size=1, bias=False), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.conv6 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.conv7 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + feat6 = self.conv6(x) + feat7 = self.conv7(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) + bottle = self.bottleneck(out) + return bottle diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_537238KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_537238KB.py new file mode 100644 index 0000000000000000000000000000000000000000..9b127bc6427f5c60c8cf85603a3d8a093c3501c4 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_537238KB.py @@ -0,0 +1,126 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class SeperableConv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False, + ), + nn.Conv2d(nin, nout, kernel_size=1, bias=False), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.conv6 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.conv7 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + feat6 = self.conv6(x) + feat7 = self.conv7(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) + bottle = self.bottleneck(out) + return bottle diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_new.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_new.py new file mode 100644 index 0000000000000000000000000000000000000000..44153b6a23399c6938affc61c71919eaa172bcee --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/layers_new.py @@ -0,0 +1,125 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import spec_utils + + +class Conv2DBNActiv(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, + nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(nout), + activ(), + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) + + def __call__(self, x): + h = self.conv1(x) + h = self.conv2(h) + + return h + + +class Decoder(nn.Module): + def __init__( + self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False + ): + super(Decoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + + h = self.conv1(x) + # h = self.conv2(h) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), + ) + self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) + self.conv3 = Conv2DBNActiv( + nin, nout, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = Conv2DBNActiv( + nin, nout, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = Conv2DBNActiv( + nin, nout, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate( + self.conv1(x), size=(h, w), mode="bilinear", align_corners=True + ) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + out = self.bottleneck(out) + + if self.dropout is not None: + out = self.dropout(out) + + return out + + +class LSTMModule(nn.Module): + def __init__(self, nin_conv, nin_lstm, nout_lstm): + super(LSTMModule, self).__init__() + self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) + self.lstm = nn.LSTM( + input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True + ) + self.dense = nn.Sequential( + nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU() + ) + + def forward(self, x): + N, _, nbins, nframes = x.size() + h = self.conv(x)[:, 0] # N, nbins, nframes + h = h.permute(2, 0, 1) # nframes, N, nbins + h, _ = self.lstm(h) + h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins + h = h.reshape(nframes, N, 1, nbins) + h = h.permute(1, 2, 3, 0) + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/model_param_init.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/model_param_init.py new file mode 100644 index 0000000000000000000000000000000000000000..5d818dbee4d4490b2884b3346c20c9370c0810fc --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/model_param_init.py @@ -0,0 +1,68 @@ +import json +import pathlib + +default_param = {} +default_param["bins"] = 768 +default_param["unstable_bins"] = 9 # training only +default_param["reduction_bins"] = 762 # training only +default_param["sr"] = 44100 +default_param["pre_filter_start"] = 757 +default_param["pre_filter_stop"] = 768 +default_param["band"] = {} + + +default_param["band"][1] = { + "sr": 11025, + "hl": 128, + "n_fft": 960, + "crop_start": 0, + "crop_stop": 245, + "lpf_start": 61, # inference only + "res_type": "polyphase", +} + +default_param["band"][2] = { + "sr": 44100, + "hl": 512, + "n_fft": 1536, + "crop_start": 24, + "crop_stop": 547, + "hpf_start": 81, # inference only + "res_type": "sinc_best", +} + + +def int_keys(d): + r = {} + for k, v in d: + if k.isdigit(): + k = int(k) + r[k] = v + return r + + +class ModelParameters(object): + def __init__(self, config_path=""): + if ".pth" == pathlib.Path(config_path).suffix: + import zipfile + + with zipfile.ZipFile(config_path, "r") as zip: + self.param = json.loads( + zip.read("param.json"), object_pairs_hook=int_keys + ) + elif ".json" == pathlib.Path(config_path).suffix: + with open(config_path, "r") as f: + self.param = json.loads(f.read(), object_pairs_hook=int_keys) + else: + self.param = default_param + + for k in [ + "mid_side", + "mid_side_b", + "mid_side_b2", + "stereo_w", + "stereo_n", + "reverse", + ]: + if not k in self.param: + self.param[k] = False diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json new file mode 100644 index 0000000000000000000000000000000000000000..72cb4499867ad2827185e85687f06fb73d33eced --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 16000, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 16000, + "pre_filter_start": 1023, + "pre_filter_stop": 1024 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json new file mode 100644 index 0000000000000000000000000000000000000000..3c00ecf0a105e55a6a86a3c32db301a2635b5b41 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 32000, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "kaiser_fast" + } + }, + "sr": 32000, + "pre_filter_start": 1000, + "pre_filter_stop": 1021 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json new file mode 100644 index 0000000000000000000000000000000000000000..55666ac9a8d0547751fb4b4d3bffb1ee2c956913 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 33075, + "hl": 384, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 33075, + "pre_filter_start": 1000, + "pre_filter_stop": 1021 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json new file mode 100644 index 0000000000000000000000000000000000000000..665abe20eb3cc39fe0f8493dad8f25f6ef634a14 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 1024, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 1023, + "pre_filter_stop": 1024 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json new file mode 100644 index 0000000000000000000000000000000000000000..0e8b16f89b0231d06eabe8d2f7c2670c7caa2272 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json @@ -0,0 +1,19 @@ +{ + "bins": 256, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 256, + "n_fft": 512, + "crop_start": 0, + "crop_stop": 256, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 256, + "pre_filter_stop": 256 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json new file mode 100644 index 0000000000000000000000000000000000000000..3b38fcaf60ba204e03a47f5bd3f5bcfe75e1983a --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 1023, + "pre_filter_stop": 1024 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json new file mode 100644 index 0000000000000000000000000000000000000000..630df3524e340f43a1ddb7b33ff02cc91fc1cb47 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 700, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 1023, + "pre_filter_stop": 700 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_32000.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_32000.json new file mode 100644 index 0000000000000000000000000000000000000000..ab9cf1150a818eb6252105408311be0a40d423b3 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_32000.json @@ -0,0 +1,30 @@ +{ + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 705, + "band": { + "1": { + "sr": 6000, + "hl": 66, + "n_fft": 512, + "crop_start": 0, + "crop_stop": 240, + "lpf_start": 60, + "lpf_stop": 118, + "res_type": "sinc_fastest" + }, + "2": { + "sr": 32000, + "hl": 352, + "n_fft": 1024, + "crop_start": 22, + "crop_stop": 505, + "hpf_start": 44, + "hpf_stop": 23, + "res_type": "sinc_medium" + } + }, + "sr": 32000, + "pre_filter_start": 710, + "pre_filter_stop": 731 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json new file mode 100644 index 0000000000000000000000000000000000000000..7faa216d7b49aeece24123dbdd868847a1dbc03c --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json @@ -0,0 +1,30 @@ +{ + "bins": 512, + "unstable_bins": 7, + "reduction_bins": 510, + "band": { + "1": { + "sr": 11025, + "hl": 160, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 192, + "lpf_start": 41, + "lpf_stop": 139, + "res_type": "sinc_fastest" + }, + "2": { + "sr": 44100, + "hl": 640, + "n_fft": 1024, + "crop_start": 10, + "crop_stop": 320, + "hpf_start": 47, + "hpf_stop": 15, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 510, + "pre_filter_stop": 512 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_48000.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_48000.json new file mode 100644 index 0000000000000000000000000000000000000000..7e78175052b09cb1a32345e54006475992712f9a --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_48000.json @@ -0,0 +1,30 @@ +{ + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 705, + "band": { + "1": { + "sr": 6000, + "hl": 66, + "n_fft": 512, + "crop_start": 0, + "crop_stop": 240, + "lpf_start": 60, + "lpf_stop": 240, + "res_type": "sinc_fastest" + }, + "2": { + "sr": 48000, + "hl": 528, + "n_fft": 1536, + "crop_start": 22, + "crop_stop": 505, + "hpf_start": 82, + "hpf_stop": 22, + "res_type": "sinc_medium" + } + }, + "sr": 48000, + "pre_filter_start": 710, + "pre_filter_stop": 731 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100.json new file mode 100644 index 0000000000000000000000000000000000000000..d881d767ff83fbac0e18dfe2587ef16925b29b3c --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100.json @@ -0,0 +1,42 @@ +{ + "bins": 768, + "unstable_bins": 5, + "reduction_bins": 733, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 278, + "lpf_start": 28, + "lpf_stop": 140, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 256, + "n_fft": 768, + "crop_start": 14, + "crop_stop": 322, + "hpf_start": 70, + "hpf_stop": 14, + "lpf_start": 283, + "lpf_stop": 314, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 131, + "crop_stop": 313, + "hpf_start": 154, + "hpf_stop": 141, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 757, + "pre_filter_stop": 768 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json new file mode 100644 index 0000000000000000000000000000000000000000..77ec198573b19f36519a028a509767d30764c0e2 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json @@ -0,0 +1,43 @@ +{ + "mid_side": true, + "bins": 768, + "unstable_bins": 5, + "reduction_bins": 733, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 278, + "lpf_start": 28, + "lpf_stop": 140, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 256, + "n_fft": 768, + "crop_start": 14, + "crop_stop": 322, + "hpf_start": 70, + "hpf_stop": 14, + "lpf_start": 283, + "lpf_stop": 314, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 131, + "crop_stop": 313, + "hpf_start": 154, + "hpf_stop": 141, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 757, + "pre_filter_stop": 768 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json new file mode 100644 index 0000000000000000000000000000000000000000..85ee8a7d44541c9176e85ea3dce8728d34990938 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json @@ -0,0 +1,43 @@ +{ + "mid_side_b2": true, + "bins": 640, + "unstable_bins": 7, + "reduction_bins": 565, + "band": { + "1": { + "sr": 11025, + "hl": 108, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 187, + "lpf_start": 92, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 216, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 212, + "hpf_start": 68, + "hpf_stop": 34, + "lpf_start": 174, + "lpf_stop": 209, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 432, + "n_fft": 640, + "crop_start": 66, + "crop_stop": 307, + "hpf_start": 86, + "hpf_stop": 72, + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 639, + "pre_filter_stop": 640 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json new file mode 100644 index 0000000000000000000000000000000000000000..df123754204372aa50d464fbe9102a401f48cc73 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json @@ -0,0 +1,54 @@ +{ + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json new file mode 100644 index 0000000000000000000000000000000000000000..e91b699eb63d3382c3b9e9edf46d40ed91d6122b --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json @@ -0,0 +1,55 @@ +{ + "bins": 768, + "unstable_bins": 7, + "mid_side": true, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json new file mode 100644 index 0000000000000000000000000000000000000000..f852f280ec9d98fc1b65cec688290eaafec61b84 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json @@ -0,0 +1,55 @@ +{ + "mid_side_b": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json new file mode 100644 index 0000000000000000000000000000000000000000..f852f280ec9d98fc1b65cec688290eaafec61b84 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json @@ -0,0 +1,55 @@ +{ + "mid_side_b": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json new file mode 100644 index 0000000000000000000000000000000000000000..7a07d5541bd83dc1caa20b531c3b43a2ffccac88 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json @@ -0,0 +1,55 @@ +{ + "reverse": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json new file mode 100644 index 0000000000000000000000000000000000000000..ba0cf342106de793e6ec3e876854c7fd451fbf76 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json @@ -0,0 +1,55 @@ +{ + "stereo_w": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json new file mode 100644 index 0000000000000000000000000000000000000000..33281a0cf9916fc33558ddfda7a0287a2547faf4 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json @@ -0,0 +1,54 @@ +{ + "bins": 672, + "unstable_bins": 8, + "reduction_bins": 637, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json new file mode 100644 index 0000000000000000000000000000000000000000..2e5c770fe188779bf6b0873190b7a324d6a867b2 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json @@ -0,0 +1,55 @@ +{ + "bins": 672, + "unstable_bins": 8, + "reduction_bins": 637, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "convert_channels": "stereo_n", + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v3.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v3.json new file mode 100644 index 0000000000000000000000000000000000000000..edb908b8853c6359d1e98ae381888d1a9906ca0f --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v3.json @@ -0,0 +1,54 @@ +{ + "bins": 672, + "unstable_bins": 8, + "reduction_bins": 530, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/ensemble.json b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/ensemble.json new file mode 100644 index 0000000000000000000000000000000000000000..ee69beb46fc82f34619c5e48761e329fcabbbd00 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/ensemble.json @@ -0,0 +1,43 @@ +{ + "mid_side_b2": true, + "bins": 1280, + "unstable_bins": 7, + "reduction_bins": 565, + "band": { + "1": { + "sr": 11025, + "hl": 108, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 374, + "lpf_start": 92, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 216, + "n_fft": 1536, + "crop_start": 0, + "crop_stop": 424, + "hpf_start": 68, + "hpf_stop": 34, + "lpf_start": 348, + "lpf_stop": 418, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 432, + "n_fft": 1280, + "crop_start": 132, + "crop_stop": 614, + "hpf_start": 172, + "hpf_stop": 144, + "res_type": "polyphase" + } + }, + "sr": 44100, + "pre_filter_start": 1280, + "pre_filter_stop": 1280 +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets.py new file mode 100644 index 0000000000000000000000000000000000000000..42d7807ae4d72d8a5431b62e7ae468f92436e61f --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets.py @@ -0,0 +1,121 @@ +import layers +import torch +import torch.nn.functional as F +from torch import nn + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 16) + self.stg1_high_band_net = BaseASPPNet(2, 16) + + self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(8, 16) + + self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(16, 32) + + self.out = nn.Conv2d(32, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_123812KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_123812KB.py new file mode 100644 index 0000000000000000000000000000000000000000..167d4cb2198863cf43e93440f7e63c5342fc7605 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_123812KB.py @@ -0,0 +1,122 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_123821KB as layers + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 32) + self.stg1_high_band_net = BaseASPPNet(2, 32) + + self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(16, 32) + + self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(32, 64) + + self.out = nn.Conv2d(64, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_123821KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_123821KB.py new file mode 100644 index 0000000000000000000000000000000000000000..167d4cb2198863cf43e93440f7e63c5342fc7605 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_123821KB.py @@ -0,0 +1,122 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_123821KB as layers + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 32) + self.stg1_high_band_net = BaseASPPNet(2, 32) + + self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(16, 32) + + self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(32, 64) + + self.out = nn.Conv2d(64, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_33966KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_33966KB.py new file mode 100644 index 0000000000000000000000000000000000000000..73a5b836177b706c306e27875f8391c1aed4b948 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_33966KB.py @@ -0,0 +1,122 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_33966KB as layers + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 16) + self.stg1_high_band_net = BaseASPPNet(2, 16) + + self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(8, 16) + + self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(16, 32) + + self.out = nn.Conv2d(32, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537227KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537227KB.py new file mode 100644 index 0000000000000000000000000000000000000000..9bb1df1ee93d3af49725f60ac0b6052e057c6872 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537227KB.py @@ -0,0 +1,122 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_537238KB as layers + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 64) + self.stg1_high_band_net = BaseASPPNet(2, 64) + + self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(32, 64) + + self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(64, 128) + + self.out = nn.Conv2d(128, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537238KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537238KB.py new file mode 100644 index 0000000000000000000000000000000000000000..9bb1df1ee93d3af49725f60ac0b6052e057c6872 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537238KB.py @@ -0,0 +1,122 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_537238KB as layers + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 64) + self.stg1_high_band_net = BaseASPPNet(2, 64) + + self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(32, 64) + + self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(64, 128) + + self.out = nn.Conv2d(128, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_61968KB.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_61968KB.py new file mode 100644 index 0000000000000000000000000000000000000000..167d4cb2198863cf43e93440f7e63c5342fc7605 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_61968KB.py @@ -0,0 +1,122 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_123821KB as layers + + +class BaseASPPNet(nn.Module): + def __init__(self, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + + +class CascadedASPPNet(nn.Module): + def __init__(self, n_fft): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(2, 32) + self.stg1_high_band_net = BaseASPPNet(2, 32) + + self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) + self.stg2_full_band_net = BaseASPPNet(16, 32) + + self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) + self.stg3_full_band_net = BaseASPPNet(32, 64) + + self.out = nn.Conv2d(64, 2, 1, bias=False) + self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) + self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x, aggressiveness=None): + mix = x.detach() + x = x.clone() + + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat( + [ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]), + ], + dim=2, + ) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode="replicate", + ) + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode="replicate", + ) + return mask * mix, aux1 * mix, aux2 * mix + else: + if aggressiveness: + mask[:, :, : aggressiveness["split_bin"]] = torch.pow( + mask[:, :, : aggressiveness["split_bin"]], + 1 + aggressiveness["value"] / 3, + ) + mask[:, :, aggressiveness["split_bin"] :] = torch.pow( + mask[:, :, aggressiveness["split_bin"] :], + 1 + aggressiveness["value"], + ) + + return mask * mix + + def predict(self, x_mag, aggressiveness=None): + h = self.forward(x_mag, aggressiveness) + + if self.offset > 0: + h = h[:, :, :, self.offset : -self.offset] + assert h.size()[3] > 0 + + return h diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_new.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_new.py new file mode 100644 index 0000000000000000000000000000000000000000..1c0f4fa96d921e979fe31bd4151701b7783fbcea --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_new.py @@ -0,0 +1,133 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from . import layers_new + + +class BaseNet(nn.Module): + def __init__( + self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6)) + ): + super(BaseNet, self).__init__() + self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) + self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) + self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) + self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) + self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) + + self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) + + self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) + self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) + self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) + self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) + self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) + + def __call__(self, x): + e1 = self.enc1(x) + e2 = self.enc2(e1) + e3 = self.enc3(e2) + e4 = self.enc4(e3) + e5 = self.enc5(e4) + + h = self.aspp(e5) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = torch.cat([h, self.lstm_dec2(h)], dim=1) + h = self.dec1(h, e1) + + return h + + +class CascadedNet(nn.Module): + def __init__(self, n_fft, nout=32, nout_lstm=128): + super(CascadedNet, self).__init__() + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + self.nin_lstm = self.max_bin // 2 + self.offset = 64 + + self.stg1_low_band_net = nn.Sequential( + BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), + layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), + ) + + self.stg1_high_band_net = BaseNet( + 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2 + ) + + self.stg2_low_band_net = nn.Sequential( + BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), + layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), + ) + self.stg2_high_band_net = BaseNet( + nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2 + ) + + self.stg3_full_band_net = BaseNet( + 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm + ) + + self.out = nn.Conv2d(nout, 2, 1, bias=False) + self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) + + def forward(self, x): + x = x[:, :, : self.max_bin] + + bandw = x.size()[2] // 2 + l1_in = x[:, :, :bandw] + h1_in = x[:, :, bandw:] + l1 = self.stg1_low_band_net(l1_in) + h1 = self.stg1_high_band_net(h1_in) + aux1 = torch.cat([l1, h1], dim=2) + + l2_in = torch.cat([l1_in, l1], dim=1) + h2_in = torch.cat([h1_in, h1], dim=1) + l2 = self.stg2_low_band_net(l2_in) + h2 = self.stg2_high_band_net(h2_in) + aux2 = torch.cat([l2, h2], dim=2) + + f3_in = torch.cat([x, aux1, aux2], dim=1) + f3 = self.stg3_full_band_net(f3_in) + + mask = torch.sigmoid(self.out(f3)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode="replicate", + ) + + if self.training: + aux = torch.cat([aux1, aux2], dim=1) + aux = torch.sigmoid(self.aux_out(aux)) + aux = F.pad( + input=aux, + pad=(0, 0, 0, self.output_bin - aux.size()[2]), + mode="replicate", + ) + return mask, aux + else: + return mask + + def predict_mask(self, x): + mask = self.forward(x) + + if self.offset > 0: + mask = mask[:, :, :, self.offset : -self.offset] + assert mask.size()[3] > 0 + + return mask + + def predict(self, x, aggressiveness=None): + mask = self.forward(x) + pred_mag = x * mask + + if self.offset > 0: + pred_mag = pred_mag[:, :, :, self.offset : -self.offset] + assert pred_mag.size()[3] > 0 + + return pred_mag diff --git a/lib/infer/infer_libs/uvr5_pack/lib_v5/spec_utils.py b/lib/infer/infer_libs/uvr5_pack/lib_v5/spec_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..15a19363691cfd957a59bf15e6977400afc1f557 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/lib_v5/spec_utils.py @@ -0,0 +1,671 @@ +import hashlib +import json +import math +import os + +import librosa +import numpy as np +import soundfile as sf +from tqdm import tqdm + + +def crop_center(h1, h2): + h1_shape = h1.size() + h2_shape = h2.size() + + if h1_shape[3] == h2_shape[3]: + return h1 + elif h1_shape[3] < h2_shape[3]: + raise ValueError("h1_shape[3] must be greater than h2_shape[3]") + + # s_freq = (h2_shape[2] - h1_shape[2]) // 2 + # e_freq = s_freq + h1_shape[2] + s_time = (h1_shape[3] - h2_shape[3]) // 2 + e_time = s_time + h2_shape[3] + h1 = h1[:, :, :, s_time:e_time] + + return h1 + + +def wave_to_spectrogram( + wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False +): + if reverse: + wave_left = np.flip(np.asfortranarray(wave[0])) + wave_right = np.flip(np.asfortranarray(wave[1])) + elif mid_side: + wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) + wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) + elif mid_side_b2: + wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) + wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) + else: + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + + spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) + spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) + + spec = np.asfortranarray([spec_left, spec_right]) + + return spec + + +def wave_to_spectrogram_mt( + wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False +): + import threading + + if reverse: + wave_left = np.flip(np.asfortranarray(wave[0])) + wave_right = np.flip(np.asfortranarray(wave[1])) + elif mid_side: + wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) + wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) + elif mid_side_b2: + wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) + wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) + else: + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + + def run_thread(**kwargs): + global spec_left + spec_left = librosa.stft(**kwargs) + + thread = threading.Thread( + target=run_thread, + kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, + ) + thread.start() + spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) + thread.join() + + spec = np.asfortranarray([spec_left, spec_right]) + + return spec + + +def combine_spectrograms(specs, mp): + l = min([specs[i].shape[2] for i in specs]) + spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) + offset = 0 + bands_n = len(mp.param["band"]) + + for d in range(1, bands_n + 1): + h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] + spec_c[:, offset : offset + h, :l] = specs[d][ + :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l + ] + offset += h + + if offset > mp.param["bins"]: + raise ValueError("Too much bins") + + # lowpass fiter + if ( + mp.param["pre_filter_start"] > 0 + ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: + if bands_n == 1: + spec_c = fft_lp_filter( + spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"] + ) + else: + gp = 1 + for b in range( + mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"] + ): + g = math.pow( + 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0 + ) + gp = g + spec_c[:, b, :] *= g + + return np.asfortranarray(spec_c) + + +def spectrogram_to_image(spec, mode="magnitude"): + if mode == "magnitude": + if np.iscomplexobj(spec): + y = np.abs(spec) + else: + y = spec + y = np.log10(y**2 + 1e-8) + elif mode == "phase": + if np.iscomplexobj(spec): + y = np.angle(spec) + else: + y = spec + + y -= y.min() + y *= 255 / y.max() + img = np.uint8(y) + + if y.ndim == 3: + img = img.transpose(1, 2, 0) + img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) + + return img + + +def reduce_vocal_aggressively(X, y, softmask): + v = X - y + y_mag_tmp = np.abs(y) + v_mag_tmp = np.abs(v) + + v_mask = v_mag_tmp > y_mag_tmp + y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) + + return y_mag * np.exp(1.0j * np.angle(y)) + + +def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): + if min_range < fade_size * 2: + raise ValueError("min_range must be >= fade_area * 2") + + mag = mag.copy() + + idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] + starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) + ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) + uninformative = np.where(ends - starts > min_range)[0] + if len(uninformative) > 0: + starts = starts[uninformative] + ends = ends[uninformative] + old_e = None + for s, e in zip(starts, ends): + if old_e is not None and s - old_e < fade_size: + s = old_e - fade_size * 2 + + if s != 0: + weight = np.linspace(0, 1, fade_size) + mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] + else: + s -= fade_size + + if e != mag.shape[2]: + weight = np.linspace(1, 0, fade_size) + mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] + else: + e += fade_size + + mag[:, :, s + fade_size : e - fade_size] += ref[ + :, :, s + fade_size : e - fade_size + ] + old_e = e + + return mag + + +def align_wave_head_and_tail(a, b): + l = min([a[0].size, b[0].size]) + + return a[:l, :l], b[:l, :l] + + +def cache_or_load(mix_path, inst_path, mp): + mix_basename = os.path.splitext(os.path.basename(mix_path))[0] + inst_basename = os.path.splitext(os.path.basename(inst_path))[0] + + cache_dir = "mph{}".format( + hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest() + ) + mix_cache_dir = os.path.join("cache", cache_dir) + inst_cache_dir = os.path.join("cache", cache_dir) + + os.makedirs(mix_cache_dir, exist_ok=True) + os.makedirs(inst_cache_dir, exist_ok=True) + + mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") + inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") + + if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): + X_spec_m = np.load(mix_cache_path) + y_spec_m = np.load(inst_cache_path) + else: + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + + for d in range(len(mp.param["band"]), 0, -1): + bp = mp.param["band"][d] + + if d == len(mp.param["band"]): # high-end band + X_wave[d], _ = librosa.load( + mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"] + ) + y_wave[d], _ = librosa.load( + inst_path, + bp["sr"], + False, + dtype=np.float32, + res_type=bp["res_type"], + ) + else: # lower bands + X_wave[d] = librosa.resample( + X_wave[d + 1], + mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + y_wave[d] = librosa.resample( + y_wave[d + 1], + mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + + X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) + + X_spec_s[d] = wave_to_spectrogram( + X_wave[d], + bp["hl"], + bp["n_fft"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ) + y_spec_s[d] = wave_to_spectrogram( + y_wave[d], + bp["hl"], + bp["n_fft"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ) + + del X_wave, y_wave + + X_spec_m = combine_spectrograms(X_spec_s, mp) + y_spec_m = combine_spectrograms(y_spec_s, mp) + + if X_spec_m.shape != y_spec_m.shape: + raise ValueError("The combined spectrograms are different: " + mix_path) + + _, ext = os.path.splitext(mix_path) + + np.save(mix_cache_path, X_spec_m) + np.save(inst_cache_path, y_spec_m) + + return X_spec_m, y_spec_m + + +def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): + spec_left = np.asfortranarray(spec[0]) + spec_right = np.asfortranarray(spec[1]) + + wave_left = librosa.istft(spec_left, hop_length=hop_length) + wave_right = librosa.istft(spec_right, hop_length=hop_length) + + if reverse: + return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) + elif mid_side: + return np.asfortranarray( + [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] + ) + elif mid_side_b2: + return np.asfortranarray( + [ + np.add(wave_right / 1.25, 0.4 * wave_left), + np.subtract(wave_left / 1.25, 0.4 * wave_right), + ] + ) + else: + return np.asfortranarray([wave_left, wave_right]) + + +def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): + import threading + + spec_left = np.asfortranarray(spec[0]) + spec_right = np.asfortranarray(spec[1]) + + def run_thread(**kwargs): + global wave_left + wave_left = librosa.istft(**kwargs) + + thread = threading.Thread( + target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length} + ) + thread.start() + wave_right = librosa.istft(spec_right, hop_length=hop_length) + thread.join() + + if reverse: + return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) + elif mid_side: + return np.asfortranarray( + [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] + ) + elif mid_side_b2: + return np.asfortranarray( + [ + np.add(wave_right / 1.25, 0.4 * wave_left), + np.subtract(wave_left / 1.25, 0.4 * wave_right), + ] + ) + else: + return np.asfortranarray([wave_left, wave_right]) + + +def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): + wave_band = {} + bands_n = len(mp.param["band"]) + offset = 0 + + for d in range(1, bands_n + 1): + bp = mp.param["band"][d] + spec_s = np.ndarray( + shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex + ) + h = bp["crop_stop"] - bp["crop_start"] + spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[ + :, offset : offset + h, : + ] + + offset += h + if d == bands_n: # higher + if extra_bins_h: # if --high_end_process bypass + max_bin = bp["n_fft"] // 2 + spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[ + :, :extra_bins_h, : + ] + if bp["hpf_start"] > 0: + spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) + if bands_n == 1: + wave = spectrogram_to_wave( + spec_s, + bp["hl"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ) + else: + wave = np.add( + wave, + spectrogram_to_wave( + spec_s, + bp["hl"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ), + ) + else: + sr = mp.param["band"][d + 1]["sr"] + if d == 1: # lower + spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) + wave = librosa.resample( + spectrogram_to_wave( + spec_s, + bp["hl"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ), + bp["sr"], + sr, + res_type="sinc_fastest", + ) + else: # mid + spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) + spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) + wave2 = np.add( + wave, + spectrogram_to_wave( + spec_s, + bp["hl"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ), + ) + # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") + wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy") + + return wave.T + + +def fft_lp_filter(spec, bin_start, bin_stop): + g = 1.0 + for b in range(bin_start, bin_stop): + g -= 1 / (bin_stop - bin_start) + spec[:, b, :] = g * spec[:, b, :] + + spec[:, bin_stop:, :] *= 0 + + return spec + + +def fft_hp_filter(spec, bin_start, bin_stop): + g = 1.0 + for b in range(bin_start, bin_stop, -1): + g -= 1 / (bin_start - bin_stop) + spec[:, b, :] = g * spec[:, b, :] + + spec[:, 0 : bin_stop + 1, :] *= 0 + + return spec + + +def mirroring(a, spec_m, input_high_end, mp): + if "mirroring" == a: + mirror = np.flip( + np.abs( + spec_m[ + :, + mp.param["pre_filter_start"] + - 10 + - input_high_end.shape[1] : mp.param["pre_filter_start"] + - 10, + :, + ] + ), + 1, + ) + mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) + + return np.where( + np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror + ) + + if "mirroring2" == a: + mirror = np.flip( + np.abs( + spec_m[ + :, + mp.param["pre_filter_start"] + - 10 + - input_high_end.shape[1] : mp.param["pre_filter_start"] + - 10, + :, + ] + ), + 1, + ) + mi = np.multiply(mirror, input_high_end * 1.7) + + return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) + + +def ensembling(a, specs): + for i in range(1, len(specs)): + if i == 1: + spec = specs[0] + + ln = min([spec.shape[2], specs[i].shape[2]]) + spec = spec[:, :, :ln] + specs[i] = specs[i][:, :, :ln] + + if "min_mag" == a: + spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) + if "max_mag" == a: + spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) + + return spec + + +def stft(wave, nfft, hl): + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + spec_left = librosa.stft(wave_left, nfft, hop_length=hl) + spec_right = librosa.stft(wave_right, nfft, hop_length=hl) + spec = np.asfortranarray([spec_left, spec_right]) + + return spec + + +def istft(spec, hl): + spec_left = np.asfortranarray(spec[0]) + spec_right = np.asfortranarray(spec[1]) + + wave_left = librosa.istft(spec_left, hop_length=hl) + wave_right = librosa.istft(spec_right, hop_length=hl) + wave = np.asfortranarray([wave_left, wave_right]) + + +if __name__ == "__main__": + import argparse + import time + + import cv2 + from model_param_init import ModelParameters + + p = argparse.ArgumentParser() + p.add_argument( + "--algorithm", + "-a", + type=str, + choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], + default="min_mag", + ) + p.add_argument( + "--model_params", + "-m", + type=str, + default=os.path.join("modelparams", "1band_sr44100_hl512.json"), + ) + p.add_argument("--output_name", "-o", type=str, default="output") + p.add_argument("--vocals_only", "-v", action="store_true") + p.add_argument("input", nargs="+") + args = p.parse_args() + + start_time = time.time() + + if args.algorithm.startswith("invert") and len(args.input) != 2: + raise ValueError("There should be two input files.") + + if not args.algorithm.startswith("invert") and len(args.input) < 2: + raise ValueError("There must be at least two input files.") + + wave, specs = {}, {} + mp = ModelParameters(args.model_params) + + for i in range(len(args.input)): + spec = {} + + for d in range(len(mp.param["band"]), 0, -1): + bp = mp.param["band"][d] + + if d == len(mp.param["band"]): # high-end band + wave[d], _ = librosa.load( + args.input[i], + bp["sr"], + False, + dtype=np.float32, + res_type=bp["res_type"], + ) + + if len(wave[d].shape) == 1: # mono to stereo + wave[d] = np.array([wave[d], wave[d]]) + else: # lower bands + wave[d] = librosa.resample( + wave[d + 1], + mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + + spec[d] = wave_to_spectrogram( + wave[d], + bp["hl"], + bp["n_fft"], + mp.param["mid_side"], + mp.param["mid_side_b2"], + mp.param["reverse"], + ) + + specs[i] = combine_spectrograms(spec, mp) + + del wave + + if args.algorithm == "deep": + d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) + v_spec = d_spec - specs[1] + sf.write( + os.path.join("{}.wav".format(args.output_name)), + cmb_spectrogram_to_wave(v_spec, mp), + mp.param["sr"], + ) + + if args.algorithm.startswith("invert"): + ln = min([specs[0].shape[2], specs[1].shape[2]]) + specs[0] = specs[0][:, :, :ln] + specs[1] = specs[1][:, :, :ln] + + if "invert_p" == args.algorithm: + X_mag = np.abs(specs[0]) + y_mag = np.abs(specs[1]) + max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) + v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) + else: + specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) + v_spec = specs[0] - specs[1] + + if not args.vocals_only: + X_mag = np.abs(specs[0]) + y_mag = np.abs(specs[1]) + v_mag = np.abs(v_spec) + + X_image = spectrogram_to_image(X_mag) + y_image = spectrogram_to_image(y_mag) + v_image = spectrogram_to_image(v_mag) + + cv2.imwrite("{}_X.png".format(args.output_name), X_image) + cv2.imwrite("{}_y.png".format(args.output_name), y_image) + cv2.imwrite("{}_v.png".format(args.output_name), v_image) + + sf.write( + "{}_X.wav".format(args.output_name), + cmb_spectrogram_to_wave(specs[0], mp), + mp.param["sr"], + ) + sf.write( + "{}_y.wav".format(args.output_name), + cmb_spectrogram_to_wave(specs[1], mp), + mp.param["sr"], + ) + + sf.write( + "{}_v.wav".format(args.output_name), + cmb_spectrogram_to_wave(v_spec, mp), + mp.param["sr"], + ) + else: + if not args.algorithm == "deep": + sf.write( + os.path.join("ensembled", "{}.wav".format(args.output_name)), + cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), + mp.param["sr"], + ) + + if args.algorithm == "align": + trackalignment = [ + { + "file1": '"{}"'.format(args.input[0]), + "file2": '"{}"'.format(args.input[1]), + } + ] + + for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): + os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") + + # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) diff --git a/lib/infer/infer_libs/uvr5_pack/mdx.py b/lib/infer/infer_libs/uvr5_pack/mdx.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc7c08b37bc371294f2f82b3382424a5455b7c2 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/mdx.py @@ -0,0 +1,228 @@ +import torch +import onnxruntime as ort +from tqdm import tqdm +import warnings +import numpy as np +import hashlib +import queue +import threading + +warnings.filterwarnings("ignore") + +class MDX_Model: + def __init__(self, device, dim_f, dim_t, n_fft, hop=1024, stem_name=None, compensation=1.000): + self.dim_f = dim_f + self.dim_t = dim_t + self.dim_c = 4 + self.n_fft = n_fft + self.hop = hop + self.stem_name = stem_name + self.compensation = compensation + + self.n_bins = self.n_fft//2+1 + self.chunk_size = hop * (self.dim_t-1) + self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(device) + + out_c = self.dim_c + + self.freq_pad = torch.zeros([1, out_c, self.n_bins-self.dim_f, self.dim_t]).to(device) + + def stft(self, x): + x = x.reshape([-1, self.chunk_size]) + x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, return_complex=True) + x = torch.view_as_real(x) + x = x.permute([0,3,1,2]) + x = x.reshape([-1,2,2,self.n_bins,self.dim_t]).reshape([-1,4,self.n_bins,self.dim_t]) + return x[:,:,:self.dim_f] + + def istft(self, x, freq_pad=None): + freq_pad = self.freq_pad.repeat([x.shape[0],1,1,1]) if freq_pad is None else freq_pad + x = torch.cat([x, freq_pad], -2) + # c = 4*2 if self.target_name=='*' else 2 + x = x.reshape([-1,2,2,self.n_bins,self.dim_t]).reshape([-1,2,self.n_bins,self.dim_t]) + x = x.permute([0,2,3,1]) + x = x.contiguous() + x = torch.view_as_complex(x) + x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True) + return x.reshape([-1,2,self.chunk_size]) + + +class MDX: + + DEFAULT_SR = 44100 + # Unit: seconds + DEFAULT_CHUNK_SIZE = 0 * DEFAULT_SR + DEFAULT_MARGIN_SIZE = 1 * DEFAULT_SR + + DEFAULT_PROCESSOR = 0 + + def __init__(self, model_path:str, params:MDX_Model, processor=DEFAULT_PROCESSOR): + + # Set the device and the provider (CPU or CUDA) + self.device = torch.device(f'cuda:{processor}') if processor >= 0 else torch.device('cpu') + self.provider = ['CUDAExecutionProvider'] if processor >= 0 else ['CPUExecutionProvider'] + + self.model = params + + # Load the ONNX model using ONNX Runtime + self.ort = ort.InferenceSession(model_path, providers=self.provider) + # Preload the model for faster performance + self.ort.run(None, {'input':torch.rand(1, 4, params.dim_f, params.dim_t).numpy()}) + self.process = lambda spec:self.ort.run(None, {'input': spec.cpu().numpy()})[0] + + self.prog = None + + @staticmethod + def get_hash(model_path): + try: + with open(model_path, 'rb') as f: + f.seek(- 10000 * 1024, 2) + model_hash = hashlib.md5(f.read()).hexdigest() + except: + model_hash = hashlib.md5(open(model_path,'rb').read()).hexdigest() + + return model_hash + + @staticmethod + def segment(wave, combine=True, chunk_size=DEFAULT_CHUNK_SIZE, margin_size=DEFAULT_MARGIN_SIZE): + """ + Segment or join segmented wave array + + Args: + wave: (np.array) Wave array to be segmented or joined + combine: (bool) If True, combines segmented wave array. If False, segments wave array. + chunk_size: (int) Size of each segment (in samples) + margin_size: (int) Size of margin between segments (in samples) + + Returns: + numpy array: Segmented or joined wave array + """ + + if combine: + processed_wave = None # Initializing as None instead of [] for later numpy array concatenation + for segment_count, segment in enumerate(wave): + start = 0 if segment_count == 0 else margin_size + end = None if segment_count == len(wave)-1 else -margin_size + if margin_size == 0: + end = None + if processed_wave is None: # Create array for first segment + processed_wave = segment[:, start:end] + else: # Concatenate to existing array for subsequent segments + processed_wave = np.concatenate((processed_wave, segment[:, start:end]), axis=-1) + + else: + processed_wave = [] + sample_count = wave.shape[-1] + + if chunk_size <= 0 or chunk_size > sample_count: + chunk_size = sample_count + + if margin_size > chunk_size: + margin_size = chunk_size + + for segment_count, skip in enumerate(range(0, sample_count, chunk_size)): + + margin = 0 if segment_count == 0 else margin_size + end = min(skip+chunk_size+margin_size, sample_count) + start = skip-margin + + cut = wave[:,start:end].copy() + processed_wave.append(cut) + + if end == sample_count: + break + + return processed_wave + + def pad_wave(self, wave): + """ + Pad the wave array to match the required chunk size + + Args: + wave: (np.array) Wave array to be padded + + Returns: + tuple: (padded_wave, pad, trim) + - padded_wave: Padded wave array + - pad: Number of samples that were padded + - trim: Number of samples that were trimmed + """ + n_sample = wave.shape[1] + trim = self.model.n_fft//2 + gen_size = self.model.chunk_size-2*trim + pad = gen_size - n_sample%gen_size + + # Padded wave + wave_p = np.concatenate((np.zeros((2,trim)), wave, np.zeros((2,pad)), np.zeros((2,trim))), 1) + + mix_waves = [] + for i in range(0, n_sample+pad, gen_size): + waves = np.array(wave_p[:, i:i+self.model.chunk_size]) + mix_waves.append(waves) + + mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(self.device) + + return mix_waves, pad, trim + + def _process_wave(self, mix_waves, trim, pad, q:queue.Queue, _id:int): + """ + Process each wave segment in a multi-threaded environment + + Args: + mix_waves: (torch.Tensor) Wave segments to be processed + trim: (int) Number of samples trimmed during padding + pad: (int) Number of samples padded during padding + q: (queue.Queue) Queue to hold the processed wave segments + _id: (int) Identifier of the processed wave segment + + Returns: + numpy array: Processed wave segment + """ + mix_waves = mix_waves.split(1) + with torch.no_grad(): + pw = [] + for mix_wave in mix_waves: + self.prog.update() + spec = self.model.stft(mix_wave) + processed_spec = torch.tensor(self.process(spec)) + processed_wav = self.model.istft(processed_spec.to(self.device)) + processed_wav = processed_wav[:,:,trim:-trim].transpose(0,1).reshape(2, -1).cpu().numpy() + pw.append(processed_wav) + processed_signal = np.concatenate(pw, axis=-1)[:, :-pad] + q.put({_id:processed_signal}) + return processed_signal + + def process_wave(self, wave:np.array, mt_threads=1): + """ + Process the wave array in a multi-threaded environment + + Args: + wave: (np.array) Wave array to be processed + mt_threads: (int) Number of threads to be used for processing + + Returns: + numpy array: Processed wave array + """ + self.prog = tqdm(total=0) + chunk = wave.shape[-1]//mt_threads + waves = self.segment(wave, False, chunk) + + # Create a queue to hold the processed wave segments + q = queue.Queue() + threads = [] + for c, batch in enumerate(waves): + mix_waves, pad, trim = self.pad_wave(batch) + self.prog.total = len(mix_waves)*mt_threads + thread = threading.Thread(target=self._process_wave, args=(mix_waves, trim, pad, q, c)) + thread.start() + threads.append(thread) + for thread in threads: + thread.join() + self.prog.close() + + processed_batches = [] + while not q.empty(): + processed_batches.append(q.get()) + processed_batches = [list(wave.values())[0] for wave in sorted(processed_batches, key=lambda d: list(d.keys())[0])] + assert len(processed_batches) == len(waves), 'Incomplete processed batches, please reduce batch size!' + return self.segment(processed_batches, True, chunk) \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/name_params.json b/lib/infer/infer_libs/uvr5_pack/name_params.json new file mode 100644 index 0000000000000000000000000000000000000000..7ac29002cd3c5cd070d4b4deebbcf6cc2e3dca00 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/name_params.json @@ -0,0 +1,263 @@ +{ + "equivalent" : [ + { + "model_hash_name" : [ + { + "hash_name": "47939caf0cfe52a0e81442b85b971dfd", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100" + }, + { + "hash_name": "4e4ecb9764c50a8c414fee6e10395bbe", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json", + "param_name": "4band_v2" + }, + { + "hash_name": "ca106edd563e034bde0bdec4bb7a4b36", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json", + "param_name": "4band_v2" + }, + { + "hash_name": "e60a1e84803ce4efc0a6551206cc4b71", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100" + }, + { + "hash_name": "a82f14e75892e55e994376edbf0c8435", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100" + }, + { + "hash_name": "6dd9eaa6f0420af9f1d403aaafa4cc06", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json", + "param_name": "4band_v2_sn" + }, + { + "hash_name": "08611fb99bd59eaa79ad27c58d137727", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json", + "param_name": "4band_v2_sn" + }, + { + "hash_name": "5c7bbca45a187e81abbbd351606164e5", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json", + "param_name": "3band_44100_msb2" + }, + { + "hash_name": "d6b2cb685a058a091e5e7098192d3233", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json", + "param_name": "3band_44100_msb2" + }, + { + "hash_name": "c1b9f38170a7c90e96f027992eb7c62b", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100" + }, + { + "hash_name": "c3448ec923fa0edf3d03a19e633faa53", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100" + }, + { + "hash_name": "68aa2c8093d0080704b200d140f59e54", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100.json", + "param_name": "3band_44100" + }, + { + "hash_name": "fdc83be5b798e4bd29fe00fe6600e147", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json", + "param_name": "3band_44100_mid.json" + }, + { + "hash_name": "2ce34bc92fd57f55db16b7a4def3d745", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json", + "param_name": "3band_44100_mid.json" + }, + { + "hash_name": "52fdca89576f06cf4340b74a4730ee5f", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100.json" + }, + { + "hash_name": "41191165b05d38fc77f072fa9e8e8a30", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100.json" + }, + { + "hash_name": "89e83b511ad474592689e562d5b1f80e", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_32000.json", + "param_name": "2band_32000.json" + }, + { + "hash_name": "0b954da81d453b716b114d6d7c95177f", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_32000.json", + "param_name": "2band_32000.json" + } + + ], + "v4 Models": [ + { + "hash_name": "6a00461c51c2920fd68937d4609ed6c8", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json", + "param_name": "1band_sr16000_hl512" + }, + { + "hash_name": "0ab504864d20f1bd378fe9c81ef37140", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", + "param_name": "1band_sr32000_hl512" + }, + { + "hash_name": "7dd21065bf91c10f7fccb57d7d83b07f", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", + "param_name": "1band_sr32000_hl512" + }, + { + "hash_name": "80ab74d65e515caa3622728d2de07d23", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", + "param_name": "1band_sr32000_hl512" + }, + { + "hash_name": "edc115e7fc523245062200c00caa847f", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json", + "param_name": "1band_sr33075_hl384" + }, + { + "hash_name": "28063e9f6ab5b341c5f6d3c67f2045b7", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json", + "param_name": "1band_sr33075_hl384" + }, + { + "hash_name": "b58090534c52cbc3e9b5104bad666ef2", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json", + "param_name": "1band_sr44100_hl512" + }, + { + "hash_name": "0cdab9947f1b0928705f518f3c78ea8f", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json", + "param_name": "1band_sr44100_hl512" + }, + { + "hash_name": "ae702fed0238afb5346db8356fe25f13", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json", + "param_name": "1band_sr44100_hl1024" + } + ] + } + ], + "User Models" : [ + { + "1 Band": [ + { + "hash_name": "1band_sr16000_hl512", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json", + "param_name": "1band_sr16000_hl512" + }, + { + "hash_name": "1band_sr32000_hl512", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", + "param_name": "1band_sr16000_hl512" + }, + { + "hash_name": "1band_sr33075_hl384", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json", + "param_name": "1band_sr33075_hl384" + }, + { + "hash_name": "1band_sr44100_hl256", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json", + "param_name": "1band_sr44100_hl256" + }, + { + "hash_name": "1band_sr44100_hl512", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json", + "param_name": "1band_sr44100_hl512" + }, + { + "hash_name": "1band_sr44100_hl1024", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json", + "param_name": "1band_sr44100_hl1024" + } + ], + "2 Band": [ + { + "hash_name": "2band_44100_lofi", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json", + "param_name": "2band_44100_lofi" + }, + { + "hash_name": "2band_32000", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_32000.json", + "param_name": "2band_32000" + }, + { + "hash_name": "2band_48000", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/2band_48000.json", + "param_name": "2band_48000" + } + ], + "3 Band": [ + { + "hash_name": "3band_44100", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100.json", + "param_name": "3band_44100" + }, + { + "hash_name": "3band_44100_mid", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json", + "param_name": "3band_44100_mid" + }, + { + "hash_name": "3band_44100_msb2", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json", + "param_name": "3band_44100_msb2" + } + ], + "4 Band": [ + { + "hash_name": "4band_44100", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100.json", + "param_name": "4band_44100" + }, + { + "hash_name": "4band_44100_mid", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json", + "param_name": "4band_44100_mid" + }, + { + "hash_name": "4band_44100_msb", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json", + "param_name": "4band_44100_msb" + }, + { + "hash_name": "4band_44100_msb2", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json", + "param_name": "4band_44100_msb2" + }, + { + "hash_name": "4band_44100_reverse", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json", + "param_name": "4band_44100_reverse" + }, + { + "hash_name": "4band_44100_sw", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json", + "param_name": "4band_44100_sw" + }, + { + "hash_name": "4band_v2", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json", + "param_name": "4band_v2" + }, + { + "hash_name": "4band_v2_sn", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json", + "param_name": "4band_v2_sn" + }, + { + "hash_name": "tmodelparam", + "model_params": "lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/tmodelparam.json", + "param_name": "User Model Param Set" + } + ] + } + ] +} \ No newline at end of file diff --git a/lib/infer/infer_libs/uvr5_pack/utils.py b/lib/infer/infer_libs/uvr5_pack/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9bab57c35df3d7249873bb4e5e743bcea5549936 --- /dev/null +++ b/lib/infer/infer_libs/uvr5_pack/utils.py @@ -0,0 +1,121 @@ +import json + +import numpy as np +import torch +from tqdm import tqdm + + +def load_data(file_name: str = "./lib/infer/infer_libs/uvr5_pack/name_params.json") -> dict: + with open(file_name, "r") as f: + data = json.load(f) + + return data + + +def make_padding(width, cropsize, offset): + left = offset + roi_size = cropsize - left * 2 + if roi_size == 0: + roi_size = cropsize + right = roi_size - (width % roi_size) + left + + return left, right, roi_size + + +def inference(X_spec, device, model, aggressiveness, data): + """ + data : dic configs + """ + + def _execute( + X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True + ): + model.eval() + with torch.no_grad(): + preds = [] + + iterations = [n_window] + + total_iterations = sum(iterations) + for i in tqdm(range(n_window)): + start = i * roi_size + X_mag_window = X_mag_pad[ + None, :, :, start : start + data["window_size"] + ] + X_mag_window = torch.from_numpy(X_mag_window) + if is_half: + X_mag_window = X_mag_window.half() + X_mag_window = X_mag_window.to(device) + + pred = model.predict(X_mag_window, aggressiveness) + + pred = pred.detach().cpu().numpy() + preds.append(pred[0]) + + pred = np.concatenate(preds, axis=2) + return pred + + def preprocess(X_spec): + X_mag = np.abs(X_spec) + X_phase = np.angle(X_spec) + + return X_mag, X_phase + + X_mag, X_phase = preprocess(X_spec) + + coef = X_mag.max() + X_mag_pre = X_mag / coef + + n_frame = X_mag_pre.shape[2] + pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) + n_window = int(np.ceil(n_frame / roi_size)) + + X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") + + if list(model.state_dict().values())[0].dtype == torch.float16: + is_half = True + else: + is_half = False + pred = _execute( + X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half + ) + pred = pred[:, :, :n_frame] + + if data["tta"]: + pad_l += roi_size // 2 + pad_r += roi_size // 2 + n_window += 1 + + X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") + + pred_tta = _execute( + X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half + ) + pred_tta = pred_tta[:, :, roi_size // 2 :] + pred_tta = pred_tta[:, :, :n_frame] + + return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) + else: + return pred * coef, X_mag, np.exp(1.0j * X_phase) + + +def _get_name_params(model_path, model_hash): + data = load_data() + flag = False + ModelName = model_path + for type in list(data): + for model in list(data[type][0]): + for i in range(len(data[type][0][model])): + if str(data[type][0][model][i]["hash_name"]) == model_hash: + flag = True + elif str(data[type][0][model][i]["hash_name"]) in ModelName: + flag = True + + if flag: + model_params_auto = data[type][0][model][i]["model_params"] + param_name_auto = data[type][0][model][i]["param_name"] + if type == "equivalent": + return param_name_auto, model_params_auto + else: + flag = False + return param_name_auto, model_params_auto diff --git a/lib/infer/infer_pack/__pycache__/attentions.cpython-39.pyc b/lib/infer/infer_pack/__pycache__/attentions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8480fc3c96f753dd211c053e9a71a58807c9418 Binary files /dev/null and b/lib/infer/infer_pack/__pycache__/attentions.cpython-39.pyc differ diff --git a/lib/infer/infer_pack/__pycache__/commons.cpython-39.pyc b/lib/infer/infer_pack/__pycache__/commons.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb28013b97dbd99fce04b9dc231b213afc7f342f Binary files /dev/null and b/lib/infer/infer_pack/__pycache__/commons.cpython-39.pyc differ diff --git a/lib/infer/infer_pack/__pycache__/models.cpython-39.pyc b/lib/infer/infer_pack/__pycache__/models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41e8b15fa33e37ad40503bb8d57e4b5f05875b8a Binary files /dev/null and b/lib/infer/infer_pack/__pycache__/models.cpython-39.pyc differ diff --git a/lib/infer/infer_pack/__pycache__/models_onnx.cpython-39.pyc b/lib/infer/infer_pack/__pycache__/models_onnx.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d8e21be478d4829ae8ce90aa2d895d39aecffab Binary files /dev/null and b/lib/infer/infer_pack/__pycache__/models_onnx.cpython-39.pyc differ diff --git a/lib/infer/infer_pack/__pycache__/modules.cpython-39.pyc b/lib/infer/infer_pack/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e3cd94f005188b18529a8259beb8b0d82a16fc0 Binary files /dev/null and b/lib/infer/infer_pack/__pycache__/modules.cpython-39.pyc differ diff --git a/lib/infer/infer_pack/__pycache__/transforms.cpython-39.pyc b/lib/infer/infer_pack/__pycache__/transforms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f48e73bb6978a98207d6d8726279b5ad304ea2dd Binary files /dev/null and b/lib/infer/infer_pack/__pycache__/transforms.cpython-39.pyc differ diff --git a/lib/infer/infer_pack/attentions.py b/lib/infer/infer_pack/attentions.py new file mode 100644 index 0000000000000000000000000000000000000000..693966841d9b371ce3b4497d74d040db3e6aaa46 --- /dev/null +++ b/lib/infer/infer_pack/attentions.py @@ -0,0 +1,414 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +from lib.infer.infer_pack import commons +from lib.infer.infer_pack.modules import LayerNorm + + +class Encoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=10, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + window_size=window_size, + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + proximal_bias=False, + proximal_init=True, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + proximal_bias=proximal_bias, + proximal_init=proximal_init, + ) + ) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append( + MultiHeadAttention( + hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + causal=True, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( + device=x.device, dtype=x.dtype + ) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + self.emb_rel_v = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert ( + t_s == t_t + ), "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys( + query / math.sqrt(self.k_channels), key_relative_embeddings + ) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to( + device=scores.device, dtype=scores.dtype + ) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert ( + t_s == t_t + ), "Local attention is only available for self-attention." + block_mask = ( + torch.ones_like(scores) + .triu(-self.block_length) + .tril(self.block_length) + ) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_v, t_s + ) + output = output + self._matmul_with_relative_values( + relative_weights, value_relative_embeddings + ) + output = ( + output.transpose(2, 3).contiguous().view(b, d, t_t) + ) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + ) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[ + :, slice_start_position:slice_end_position + ] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad( + x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + ) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ + :, :, :length, length - 1 : + ] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad( + x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + ) + x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__( + self, + in_channels, + out_channels, + filter_channels, + kernel_size, + p_dropout=0.0, + activation=None, + causal=False, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x diff --git a/lib/infer/infer_pack/commons.py b/lib/infer/infer_pack/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..2618e3ad501d1d4745a34024c2bf1676546fae80 --- /dev/null +++ b/lib/infer/infer_pack/commons.py @@ -0,0 +1,164 @@ +import math +import torch +from torch.nn import functional as F + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += ( + 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) + ) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def slice_segments2(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( + num_timescales - 1 + ) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment + ) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + device = duration.device + + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2, 3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1.0 / norm_type) + return total_norm diff --git a/lib/infer/infer_pack/models.py b/lib/infer/infer_pack/models.py new file mode 100644 index 0000000000000000000000000000000000000000..d898604960f129fc37f464ee3669bb61cfa8f614 --- /dev/null +++ b/lib/infer/infer_pack/models.py @@ -0,0 +1,1142 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from lib.infer.infer_pack import modules +from lib.infer.infer_pack import attentions +from lib.infer.infer_pack.commons import get_padding +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from lib.infer.infer_pack.commons import init_weights +import numpy as np +from lib.infer.infer_pack import commons + + +class TextEncoder256(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(256, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class TextEncoder768(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class SineGen(torch.nn.Module): + """Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__( + self, + samp_rate, + harmonic_num=0, + sine_amp=0.1, + noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False, + ): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + if uv.device.type == "privateuseone": # for DirectML + uv = uv.float() + return uv + + def forward(self, f0, upp): + """sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( + idx + 2 + ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 + rand_ini = torch.rand( + f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device + ) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 + tmp_over_one *= upp + tmp_over_one = F.interpolate( + tmp_over_one.transpose(2, 1), + scale_factor=upp, + mode="linear", + align_corners=True, + ).transpose(2, 1) + rad_values = F.interpolate( + rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose( + 2, 1 + ) ####### + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin( + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + ) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate( + uv.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + # to produce sine waveforms + self.l_sin_gen = SineGen( + sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod + ) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + if self.is_half: + sine_wavs = sine_wavs.half() + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None # noise, uv + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF( + sampling_rate=sr, harmonic_num=0, is_half=is_half + ) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append( + Conv1d( + 1, + c_cur, + kernel_size=stride_f0 * 2, + stride=stride_f0, + padding=stride_f0 // 2, + ) + ) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +sr2sr = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + + +class SynthesizerTrnMs256NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward( + self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + ): # 这里ds是id,[bs,1] + # print(1,pitch.shape)#[bs,t] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + # print(-2,pitchf.shape,z_slice.shape) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + nsff0 = nsff0[:, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward( + self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + ): # 这里ds是id,[bs,1] + # print(1,pitch.shape)#[bs,t] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + # print(-2,pitchf.shape,z_slice.shape) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + nsff0 = nsff0[:, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs256NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=False, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=False, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, rate=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate: + head = int(z_p.shape[2] * rate) + z_p = z_p[:, :, -head:] + x_mask = x_mask[:, :, -head:] + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + # periods = [3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + # periods = [2, 3, 5, 7, 11, 17] + periods = [2, 3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap \ No newline at end of file diff --git a/lib/infer/infer_pack/models_dml.py b/lib/infer/infer_pack/models_dml.py new file mode 100644 index 0000000000000000000000000000000000000000..5806e7d919af976aec47cd974373be8dff2d272e --- /dev/null +++ b/lib/infer/infer_pack/models_dml.py @@ -0,0 +1,1122 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from lib.infer.infer_pack import modules +from lib.infer.infer_pack import attentions +from lib.infer.infer_pack.commons import get_padding +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from lib.infer.infer_pack.commons import init_weights +import numpy as np +from lib.infer.infer_pack import commons + + +class TextEncoder256(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(256, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class TextEncoder768(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class SineGen(torch.nn.Module): + """Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__( + self, + samp_rate, + harmonic_num=0, + sine_amp=0.1, + noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False, + ): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv.float() + + def forward(self, f0, upp): + """sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( + idx + 2 + ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 + rand_ini = torch.rand( + f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device + ) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 + tmp_over_one *= upp + tmp_over_one = F.interpolate( + tmp_over_one.transpose(2, 1), + scale_factor=upp, + mode="linear", + align_corners=True, + ).transpose(2, 1) + rad_values = F.interpolate( + rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose( + 2, 1 + ) ####### + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin( + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + ) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate( + uv.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + # to produce sine waveforms + self.l_sin_gen = SineGen( + sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod + ) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + if self.is_half: + sine_wavs = sine_wavs.half() + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None # noise, uv + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF( + sampling_rate=sr, harmonic_num=0, is_half=is_half + ) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append( + Conv1d( + 1, + c_cur, + kernel_size=stride_f0 * 2, + stride=stride_f0, + padding=stride_f0 // 2, + ) + ) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +sr2sr = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + + +class SynthesizerTrnMs256NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward( + self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + ): # 这里ds是id,[bs,1] + # print(1,pitch.shape)#[bs,t] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + # print(-2,pitchf.shape,z_slice.shape) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward( + self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds + ): # 这里ds是id,[bs,1] + # print(1,pitch.shape)#[bs,t] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) + pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) + # print(-2,pitchf.shape,z_slice.shape) + o = self.dec(z_slice, pitchf, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs256NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=False, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class SynthesizerTrnMs768NSFsid_nono(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=False, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] + g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = commons.rand_slice_segments( + z, y_lengths, self.segment_size + ) + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, phone, phone_lengths, sid, max_len=None): + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], g=g) + return o, x_mask, (z, z_p, m_p, logs_p) + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + # periods = [3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + # periods = [2, 3, 5, 7, 11, 17] + periods = [2, 3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap diff --git a/lib/infer/infer_pack/models_onnx.py b/lib/infer/infer_pack/models_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..e370d3736219568247a20a1ddf2f450b087bd329 --- /dev/null +++ b/lib/infer/infer_pack/models_onnx.py @@ -0,0 +1,817 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F +from lib.infer.infer_pack import modules +from lib.infer.infer_pack import attentions +from lib.infer.infer_pack.commons import get_padding +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from lib.infer.infer_pack.commons import init_weights +import numpy as np +from lib.infer.infer_pack import commons + + +class TextEncoder256(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(256, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class TextEncoder768(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + f0=True, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.emb_phone = nn.Linear(768, hidden_channels) + self.lrelu = nn.LeakyReLU(0.1, inplace=True) + if f0 == True: + self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, phone, pitch, lengths): + if pitch == None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class SineGen(torch.nn.Module): + """Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__( + self, + samp_rate, + harmonic_num=0, + sine_amp=0.1, + noise_std=0.003, + voiced_threshold=0, + flag_for_pulse=False, + ): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.dim = self.harmonic_num + 1 + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = torch.ones_like(f0) + uv = uv * (f0 > self.voiced_threshold) + return uv + + def forward(self, f0, upp): + """sine_tensor, uv = forward(f0) + input F0: tensor(batchsize=1, length, dim=1) + f0 for unvoiced steps should be 0 + output sine_tensor: tensor(batchsize=1, length, dim) + output uv: tensor(batchsize=1, length, 1) + """ + with torch.no_grad(): + f0 = f0[:, None].transpose(1, 2) + f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) + # fundamental component + f0_buf[:, :, 0] = f0[:, :, 0] + for idx in np.arange(self.harmonic_num): + f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( + idx + 2 + ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic + rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 + rand_ini = torch.rand( + f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device + ) + rand_ini[:, 0] = 0 + rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini + tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 + tmp_over_one *= upp + tmp_over_one = F.interpolate( + tmp_over_one.transpose(2, 1), + scale_factor=upp, + mode="linear", + align_corners=True, + ).transpose(2, 1) + rad_values = F.interpolate( + rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose( + 2, 1 + ) ####### + tmp_over_one %= 1 + tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 + cumsum_shift = torch.zeros_like(rad_values) + cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 + sine_waves = torch.sin( + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + ) + sine_waves = sine_waves * self.sine_amp + uv = self._f02uv(f0) + uv = F.interpolate( + uv.transpose(2, 1), scale_factor=upp, mode="nearest" + ).transpose(2, 1) + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__( + self, + sampling_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + # to produce sine waveforms + self.l_sin_gen = SineGen( + sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod + ) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x, upp=None): + sine_wavs, uv, _ = self.l_sin_gen(x, upp) + if self.is_half: + sine_wavs = sine_wavs.half() + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None # noise, uv + + +class GeneratorNSF(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF( + sampling_rate=sr, harmonic_num=0, is_half=is_half + ) + self.noise_convs = nn.ModuleList() + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + c_cur = upsample_initial_channel // (2 ** (i + 1)) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + if i + 1 < len(upsample_rates): + stride_f0 = np.prod(upsample_rates[i + 1 :]) + self.noise_convs.append( + Conv1d( + 1, + c_cur, + kernel_size=stride_f0 * 2, + stride=stride_f0, + padding=stride_f0 // 2, + ) + ) + else: + self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = np.prod(upsample_rates) + + def forward(self, x, f0, g=None): + har_source, noi_source, uv = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + x_source = self.noise_convs[i](har_source) + x = x + x_source + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +sr2sr = { + "32k": 32000, + "40k": 40000, + "48k": 48000, +} + + +class SynthesizerTrnMsNSFsidM(nn.Module): + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + version, + **kwargs + ): + super().__init__() + if type(sr) == type("strr"): + sr = sr2sr[sr] + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + # self.hop_length = hop_length# + self.spk_embed_dim = spk_embed_dim + if version == "v1": + self.enc_p = TextEncoder256( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + else: + self.enc_p = TextEncoder768( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) + self.speaker_map = None + print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) + + def remove_weight_norm(self): + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def construct_spkmixmap(self, n_speaker): + self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) + for i in range(n_speaker): + self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) + self.speaker_map = self.speaker_map.unsqueeze(0) + + def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): + if self.speaker_map is not None: # [N, S] * [S, B, 1, H] + g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] + g = g * self.speaker_map # [N, S, B, 1, H] + g = torch.sum(g, dim=1) # [N, 1, B, 1, H] + g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] + else: + g = g.unsqueeze(0) + g = self.emb_g(g).transpose(1, 2) + + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) + return o + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11, 17] + # periods = [3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class MultiPeriodDiscriminatorV2(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminatorV2, self).__init__() + # periods = [2, 3, 5, 7, 11, 17] + periods = [2, 3, 5, 7, 11, 17, 23, 37] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] # + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + # for j in range(len(fmap_r)): + # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap diff --git a/lib/infer/infer_pack/modules.py b/lib/infer/infer_pack/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..1dda5f2364b71d7c5e98a22fbae92e5528babbe5 --- /dev/null +++ b/lib/infer/infer_pack/modules.py @@ -0,0 +1,519 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +from torch.nn import Conv1d +from torch.nn.utils import weight_norm, remove_weight_norm + +from lib.infer.infer_pack import commons +from lib.infer.infer_pack.commons import init_weights, get_padding +from lib.infer.infer_pack.transforms import piecewise_rational_quadratic_transform + + +LRELU_SLOPE = 0.1 + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__( + self, + in_channels, + hidden_channels, + out_channels, + kernel_size, + n_layers, + p_dropout, + ): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append( + nn.Conv1d( + in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 + ) + ) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append( + nn.Conv1d( + hidden_channels, + hidden_channels, + kernel_size, + padding=kernel_size // 2, + ) + ) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size**i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append( + nn.Conv1d( + channels, + channels, + kernel_size, + groups=channels, + dilation=dilation, + padding=padding, + ) + ) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__( + self, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + p_dropout=0, + ): + super(WN, self).__init__() + assert kernel_size % 2 == 1 + self.hidden_channels = hidden_channels + self.kernel_size = (kernel_size,) + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d( + gin_channels, 2 * hidden_channels * n_layers, 1 + ) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + + for i in range(n_layers): + dilation = dilation_rate**i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d( + hidden_channels, + 2 * hidden_channels, + kernel_size, + dilation=dilation, + padding=padding, + ) + in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, : self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels :, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ) + ), + ] + ) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + ] + ) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + ] + ) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False, + ): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=p_dropout, + gin_channels=gin_channels, + ) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + def remove_weight_norm(self): + self.enc.remove_weight_norm() + + +class ConvFlow(nn.Module): + def __init__( + self, + in_channels, + filter_channels, + kernel_size, + n_layers, + num_bins=10, + tail_bound=5.0, + ): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) + self.proj = nn.Conv1d( + filter_channels, self.half_channels * (num_bins * 3 - 1), 1 + ) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( + self.filter_channels + ) + unnormalized_derivatives = h[..., 2 * self.num_bins :] + + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails="linear", + tail_bound=self.tail_bound, + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x diff --git a/lib/infer/infer_pack/modules/F0Predictor/DioF0Predictor.py b/lib/infer/infer_pack/modules/F0Predictor/DioF0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..01a7f2586e85fed9e87d1b22ddb6e1ec87180c8b --- /dev/null +++ b/lib/infer/infer_pack/modules/F0Predictor/DioF0Predictor.py @@ -0,0 +1,90 @@ +from lib.infer.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor +import pyworld +import numpy as np + + +class DioF0Predictor(F0Predictor): + def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.sampling_rate = sampling_rate + + def interpolate_f0(self, f0): + """ + 对F0进行插值处理 + """ + + data = np.reshape(f0, (f0.size, 1)) + + vuv_vector = np.zeros((data.size, 1), dtype=np.float32) + vuv_vector[data > 0.0] = 1.0 + vuv_vector[data <= 0.0] = 0.0 + + ip_data = data + + frame_number = data.size + last_value = 0.0 + for i in range(frame_number): + if data[i] <= 0.0: + j = i + 1 + for j in range(i + 1, frame_number): + if data[j] > 0.0: + break + if j < frame_number - 1: + if last_value > 0.0: + step = (data[j] - data[i - 1]) / float(j - i) + for k in range(i, j): + ip_data[k] = data[i - 1] + step * (k - i + 1) + else: + for k in range(i, j): + ip_data[k] = data[j] + else: + for k in range(i, frame_number): + ip_data[k] = last_value + else: + ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 + last_value = data[i] + + return ip_data[:, 0], vuv_vector[:, 0] + + def resize_f0(self, x, target_len): + source = np.array(x) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * target_len, len(source)) / target_len, + np.arange(0, len(source)), + source, + ) + res = np.nan_to_num(target) + return res + + def compute_f0(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.dio( + wav.astype(np.double), + fs=self.sampling_rate, + f0_floor=self.f0_min, + f0_ceil=self.f0_max, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) + for index, pitch in enumerate(f0): + f0[index] = round(pitch, 1) + return self.interpolate_f0(self.resize_f0(f0, p_len))[0] + + def compute_f0_uv(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.dio( + wav.astype(np.double), + fs=self.sampling_rate, + f0_floor=self.f0_min, + f0_ceil=self.f0_max, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) + for index, pitch in enumerate(f0): + f0[index] = round(pitch, 1) + return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/lib/infer/infer_pack/modules/F0Predictor/F0Predictor.py b/lib/infer/infer_pack/modules/F0Predictor/F0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..f56e49e7f0e6eab3babf0711cae2933371b9f9cc --- /dev/null +++ b/lib/infer/infer_pack/modules/F0Predictor/F0Predictor.py @@ -0,0 +1,16 @@ +class F0Predictor(object): + def compute_f0(self, wav, p_len): + """ + input: wav:[signal_length] + p_len:int + output: f0:[signal_length//hop_length] + """ + pass + + def compute_f0_uv(self, wav, p_len): + """ + input: wav:[signal_length] + p_len:int + output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] + """ + pass diff --git a/lib/infer/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/lib/infer/infer_pack/modules/F0Predictor/HarvestF0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..abaf6cc777bc5173d858cdbefc9a12464c03532f --- /dev/null +++ b/lib/infer/infer_pack/modules/F0Predictor/HarvestF0Predictor.py @@ -0,0 +1,86 @@ +from lib.infer.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor +import pyworld +import numpy as np + + +class HarvestF0Predictor(F0Predictor): + def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.sampling_rate = sampling_rate + + def interpolate_f0(self, f0): + """ + 对F0进行插值处理 + """ + + data = np.reshape(f0, (f0.size, 1)) + + vuv_vector = np.zeros((data.size, 1), dtype=np.float32) + vuv_vector[data > 0.0] = 1.0 + vuv_vector[data <= 0.0] = 0.0 + + ip_data = data + + frame_number = data.size + last_value = 0.0 + for i in range(frame_number): + if data[i] <= 0.0: + j = i + 1 + for j in range(i + 1, frame_number): + if data[j] > 0.0: + break + if j < frame_number - 1: + if last_value > 0.0: + step = (data[j] - data[i - 1]) / float(j - i) + for k in range(i, j): + ip_data[k] = data[i - 1] + step * (k - i + 1) + else: + for k in range(i, j): + ip_data[k] = data[j] + else: + for k in range(i, frame_number): + ip_data[k] = last_value + else: + ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 + last_value = data[i] + + return ip_data[:, 0], vuv_vector[:, 0] + + def resize_f0(self, x, target_len): + source = np.array(x) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * target_len, len(source)) / target_len, + np.arange(0, len(source)), + source, + ) + res = np.nan_to_num(target) + return res + + def compute_f0(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.harvest( + wav.astype(np.double), + fs=self.hop_length, + f0_ceil=self.f0_max, + f0_floor=self.f0_min, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) + return self.interpolate_f0(self.resize_f0(f0, p_len))[0] + + def compute_f0_uv(self, wav, p_len=None): + if p_len is None: + p_len = wav.shape[0] // self.hop_length + f0, t = pyworld.harvest( + wav.astype(np.double), + fs=self.sampling_rate, + f0_floor=self.f0_min, + f0_ceil=self.f0_max, + frame_period=1000 * self.hop_length / self.sampling_rate, + ) + f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) + return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py b/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..29b2d78eec2b4de5e617a21120abd5fb5a716ee5 --- /dev/null +++ b/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py @@ -0,0 +1,97 @@ +from lib.infer.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor +import parselmouth +import numpy as np + + +class PMF0Predictor(F0Predictor): + def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.sampling_rate = sampling_rate + + def interpolate_f0(self, f0): + """ + 对F0进行插值处理 + """ + + data = np.reshape(f0, (f0.size, 1)) + + vuv_vector = np.zeros((data.size, 1), dtype=np.float32) + vuv_vector[data > 0.0] = 1.0 + vuv_vector[data <= 0.0] = 0.0 + + ip_data = data + + frame_number = data.size + last_value = 0.0 + for i in range(frame_number): + if data[i] <= 0.0: + j = i + 1 + for j in range(i + 1, frame_number): + if data[j] > 0.0: + break + if j < frame_number - 1: + if last_value > 0.0: + step = (data[j] - data[i - 1]) / float(j - i) + for k in range(i, j): + ip_data[k] = data[i - 1] + step * (k - i + 1) + else: + for k in range(i, j): + ip_data[k] = data[j] + else: + for k in range(i, frame_number): + ip_data[k] = last_value + else: + ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 + last_value = data[i] + + return ip_data[:, 0], vuv_vector[:, 0] + + def compute_f0(self, wav, p_len=None): + x = wav + if p_len is None: + p_len = x.shape[0] // self.hop_length + else: + assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" + time_step = self.hop_length / self.sampling_rate * 1000 + f0 = ( + parselmouth.Sound(x, self.sampling_rate) + .to_pitch_ac( + time_step=time_step / 1000, + voicing_threshold=0.6, + pitch_floor=self.f0_min, + pitch_ceiling=self.f0_max, + ) + .selected_array["frequency"] + ) + + pad_size = (p_len - len(f0) + 1) // 2 + if pad_size > 0 or p_len - len(f0) - pad_size > 0: + f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") + f0, uv = self.interpolate_f0(f0) + return f0 + + def compute_f0_uv(self, wav, p_len=None): + x = wav + if p_len is None: + p_len = x.shape[0] // self.hop_length + else: + assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" + time_step = self.hop_length / self.sampling_rate * 1000 + f0 = ( + parselmouth.Sound(x, self.sampling_rate) + .to_pitch_ac( + time_step=time_step / 1000, + voicing_threshold=0.6, + pitch_floor=self.f0_min, + pitch_ceiling=self.f0_max, + ) + .selected_array["frequency"] + ) + + pad_size = (p_len - len(f0) + 1) // 2 + if pad_size > 0 or p_len - len(f0) - pad_size > 0: + f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") + f0, uv = self.interpolate_f0(f0) + return f0, uv diff --git a/lib/infer/infer_pack/modules/F0Predictor/__init__.py b/lib/infer/infer_pack/modules/F0Predictor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/infer/infer_pack/onnx_inference.py b/lib/infer/infer_pack/onnx_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..cbf6f71ce63bdfa9ff4f6b1a02f8feec9ab6ea92 --- /dev/null +++ b/lib/infer/infer_pack/onnx_inference.py @@ -0,0 +1,144 @@ +import onnxruntime +import librosa +import numpy as np + + +class ContentVec: + def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): + print("load model(s) from {}".format(vec_path)) + if device == "cpu" or device is None: + providers = ["CPUExecutionProvider"] + elif device == "cuda": + providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + elif device == "dml": + providers = ["DmlExecutionProvider"] + else: + raise RuntimeError("Unsportted Device") + self.model = onnxruntime.InferenceSession(vec_path, providers=providers) + + def __call__(self, wav): + return self.forward(wav) + + def forward(self, wav): + feats = wav + if feats.ndim == 2: # double channels + feats = feats.mean(-1) + assert feats.ndim == 1, feats.ndim + feats = np.expand_dims(np.expand_dims(feats, 0), 0) + onnx_input = {self.model.get_inputs()[0].name: feats} + logits = self.model.run(None, onnx_input)[0] + return logits.transpose(0, 2, 1) + + +def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): + if f0_predictor == "pm": + from lib.infer.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor + + f0_predictor_object = PMF0Predictor( + hop_length=hop_length, sampling_rate=sampling_rate + ) + elif f0_predictor == "harvest": + from lib.infer.infer_pack.modules.F0Predictor.HarvestF0Predictor import ( + HarvestF0Predictor, + ) + + f0_predictor_object = HarvestF0Predictor( + hop_length=hop_length, sampling_rate=sampling_rate + ) + elif f0_predictor == "dio": + from lib.infer.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor + + f0_predictor_object = DioF0Predictor( + hop_length=hop_length, sampling_rate=sampling_rate + ) + else: + raise Exception("Unknown f0 predictor") + return f0_predictor_object + + +class OnnxRVC: + def __init__( + self, + model_path, + sr=40000, + hop_size=512, + vec_path="vec-768-layer-12", + device="cpu", + ): + vec_path = f"pretrained/{vec_path}.onnx" + self.vec_model = ContentVec(vec_path, device) + if device == "cpu" or device is None: + providers = ["CPUExecutionProvider"] + elif device == "cuda": + providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + elif device == "dml": + providers = ["DmlExecutionProvider"] + else: + raise RuntimeError("Unsportted Device") + self.model = onnxruntime.InferenceSession(model_path, providers=providers) + self.sampling_rate = sr + self.hop_size = hop_size + + def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): + onnx_input = { + self.model.get_inputs()[0].name: hubert, + self.model.get_inputs()[1].name: hubert_length, + self.model.get_inputs()[2].name: pitch, + self.model.get_inputs()[3].name: pitchf, + self.model.get_inputs()[4].name: ds, + self.model.get_inputs()[5].name: rnd, + } + return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) + + def inference( + self, + raw_path, + sid, + f0_method="dio", + f0_up_key=0, + pad_time=0.5, + cr_threshold=0.02, + ): + f0_min = 50 + f0_max = 1100 + f0_mel_min = 1127 * np.log(1 + f0_min / 700) + f0_mel_max = 1127 * np.log(1 + f0_max / 700) + f0_predictor = get_f0_predictor( + f0_method, + hop_length=self.hop_size, + sampling_rate=self.sampling_rate, + threshold=cr_threshold, + ) + wav, sr = librosa.load(raw_path, sr=self.sampling_rate) + org_length = len(wav) + if org_length / sr > 50.0: + raise RuntimeError("Reached Max Length") + + wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) + wav16k = wav16k + + hubert = self.vec_model(wav16k) + hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) + hubert_length = hubert.shape[1] + + pitchf = f0_predictor.compute_f0(wav, hubert_length) + pitchf = pitchf * 2 ** (f0_up_key / 12) + pitch = pitchf.copy() + f0_mel = 1127 * np.log(1 + pitch / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( + f0_mel_max - f0_mel_min + ) + 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > 255] = 255 + pitch = np.rint(f0_mel).astype(np.int64) + + pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) + pitch = pitch.reshape(1, len(pitch)) + ds = np.array([sid]).astype(np.int64) + + rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) + hubert_length = np.array([hubert_length]).astype(np.int64) + + out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() + out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") + return out_wav[0:org_length] diff --git a/lib/infer/infer_pack/tensorlowest.py b/lib/infer/infer_pack/tensorlowest.py new file mode 100644 index 0000000000000000000000000000000000000000..2ba93af65689b6b105681ea3ada071b065ac41c6 --- /dev/null +++ b/lib/infer/infer_pack/tensorlowest.py @@ -0,0 +1,123 @@ +from tensorboard.backend.event_processing import event_accumulator + +import os +from shutil import copy2 +from re import search as RSearch +import pandas as pd +from ast import literal_eval as LEval + +weights_dir = 'logs/weights/' + +def find_biggest_tensorboard(tensordir): + try: + files = [f for f in os.listdir(tensordir) if f.endswith('.0')] + if not files: + print("No files with the '.0' extension found!") + return + + max_size = 0 + biggest_file = "" + + for file in files: + file_path = os.path.join(tensordir, file) + if os.path.isfile(file_path): + file_size = os.path.getsize(file_path) + if file_size > max_size: + max_size = file_size + biggest_file = file + + return biggest_file + + except FileNotFoundError: + print("Couldn't find your model!") + return + +def main(model_name, save_freq, lastmdls): + global lowestval_weight_dir, scl + + tensordir = os.path.join('logs', model_name) + lowestval_weight_dir = os.path.join(tensordir, "lowestvals") + + latest_file = find_biggest_tensorboard(tensordir) + + if latest_file is None: + print("Couldn't find a valid tensorboard file!") + return + + tfile = os.path.join(tensordir, latest_file) + + ea = event_accumulator.EventAccumulator(tfile, + size_guidance={ + event_accumulator.COMPRESSED_HISTOGRAMS: 500, + event_accumulator.IMAGES: 4, + event_accumulator.AUDIO: 4, + event_accumulator.SCALARS: 0, + event_accumulator.HISTOGRAMS: 1, + }) + + ea.Reload() + ea.Tags() + + scl = ea.Scalars('loss/g/total') + + listwstep = {} + + for val in scl: + if (val.step // save_freq) * save_freq in [val.step for val in scl]: + listwstep[float(val.value)] = (val.step // save_freq) * save_freq + + lowest_vals = sorted(listwstep.keys())[:lastmdls] + + sorted_dict = {value: step for value, step in listwstep.items() if value in lowest_vals} + + return sorted_dict + +def selectweights(model_name, file_dict, weights_dir, lowestval_weight_dir): + os.makedirs(lowestval_weight_dir, exist_ok=True) + logdir = [] + files = [] + lbldict = { + 'Values': {}, + 'Names': {} + } + weights_dir_path = os.path.join(weights_dir, "") + low_val_path = os.path.join(os.getcwd(), os.path.join(lowestval_weight_dir, "")) + + try: + file_dict = LEval(file_dict) + except Exception as e: + print(f"Error! {e}") + return f"Couldn't load tensorboard file! {e}" + + weights = [f for f in os.scandir(weights_dir)] + for key, value in file_dict.items(): + pattern = fr"^{model_name}_.*_s{value}\.pth$" + matching_weights = [f.name for f in weights if f.is_file() and RSearch(pattern, f.name)] + for weight in matching_weights: + source_path = weights_dir_path + weight + destination_path = os.path.join(lowestval_weight_dir, weight) + + copy2(source_path, destination_path) + + logdir.append(f"File = {weight} Value: {key}, Step: {value}") + + lbldict['Names'][weight] = weight + lbldict['Values'][weight] = key + + files.append(low_val_path + weight) + + print(f"File = {weight} Value: {key}, Step: {value}") + + yield ('\n'.join(logdir), files, pd.DataFrame(lbldict)) + + + return ''.join(logdir), files, pd.DataFrame(lbldict) + + +if __name__ == "__main__": + model = str(input("Enter the name of the model: ")) + sav_freq = int(input("Enter save frequency of the model: ")) + ds = main(model, sav_freq) + + if ds: selectweights(model, ds, weights_dir, lowestval_weight_dir) + \ No newline at end of file diff --git a/lib/infer/infer_pack/transforms.py b/lib/infer/infer_pack/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a11f799e023864ff7082c1f49c0cc18351a13b47 --- /dev/null +++ b/lib/infer/infer_pack/transforms.py @@ -0,0 +1,209 @@ +import torch +from torch.nn import functional as F + +import numpy as np + + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = {"tails": tails, "tail_bound": tail_bound} + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + + +def unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails="linear", + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == "linear": + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError("{} tails are not implemented.".format(tails)) + + ( + outputs[inside_interval_mask], + logabsdet[inside_interval_mask], + ) = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, + right=tail_bound, + bottom=-tail_bound, + top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + ) + + return outputs, logabsdet + + +def rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0.0, + right=1.0, + bottom=0.0, + top=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError("Input to a transform is not within its domain") + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError("Minimal bin width too large for the number of bins") + if min_bin_height * num_bins > 1.0: + raise ValueError("Minimal bin height too large for the number of bins") + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + input_heights * (input_delta - input_derivatives) + b = input_heights * input_derivatives - (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + c = -input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta + ) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2) + ) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * ( + input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta + ) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta + ) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2) + ) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet diff --git a/lib/infer/modules/ipex/__init__.py.py b/lib/infer/modules/ipex/__init__.py.py new file mode 100644 index 0000000000000000000000000000000000000000..9f53b2d3f7025b2d71369dababa4e6f2a4affc48 --- /dev/null +++ b/lib/infer/modules/ipex/__init__.py.py @@ -0,0 +1,165 @@ +import os +import sys +import contextlib +import torch +import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import +from .hijacks import ipex_hijacks +from .attention import attention_init + +# pylint: disable=protected-access, missing-function-docstring, line-too-long + +def ipex_init(): # pylint: disable=too-many-statements + try: + #Replace cuda with xpu: + torch.cuda.current_device = torch.xpu.current_device + torch.cuda.current_stream = torch.xpu.current_stream + torch.cuda.device = torch.xpu.device + torch.cuda.device_count = torch.xpu.device_count + torch.cuda.device_of = torch.xpu.device_of + torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard + torch.cuda.get_device_name = torch.xpu.get_device_name + torch.cuda.get_device_properties = torch.xpu.get_device_properties + torch.cuda.init = torch.xpu.init + torch.cuda.is_available = torch.xpu.is_available + torch.cuda.is_initialized = torch.xpu.is_initialized + torch.cuda.is_current_stream_capturing = lambda: False + torch.cuda.set_device = torch.xpu.set_device + torch.cuda.stream = torch.xpu.stream + torch.cuda.synchronize = torch.xpu.synchronize + torch.cuda.Event = torch.xpu.Event + torch.cuda.Stream = torch.xpu.Stream + torch.cuda.FloatTensor = torch.xpu.FloatTensor + torch.Tensor.cuda = torch.Tensor.xpu + torch.Tensor.is_cuda = torch.Tensor.is_xpu + torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock + torch.cuda._initialized = torch.xpu.lazy_init._initialized + torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker + torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls + torch.cuda._tls = torch.xpu.lazy_init._tls + torch.cuda.threading = torch.xpu.lazy_init.threading + torch.cuda.traceback = torch.xpu.lazy_init.traceback + torch.cuda.Optional = torch.xpu.Optional + torch.cuda.__cached__ = torch.xpu.__cached__ + torch.cuda.__loader__ = torch.xpu.__loader__ + torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage + torch.cuda.Tuple = torch.xpu.Tuple + torch.cuda.streams = torch.xpu.streams + torch.cuda._lazy_new = torch.xpu._lazy_new + torch.cuda.FloatStorage = torch.xpu.FloatStorage + torch.cuda.Any = torch.xpu.Any + torch.cuda.__doc__ = torch.xpu.__doc__ + torch.cuda.default_generators = torch.xpu.default_generators + torch.cuda.HalfTensor = torch.xpu.HalfTensor + torch.cuda._get_device_index = torch.xpu._get_device_index + torch.cuda.__path__ = torch.xpu.__path__ + torch.cuda.Device = torch.xpu.Device + torch.cuda.IntTensor = torch.xpu.IntTensor + torch.cuda.ByteStorage = torch.xpu.ByteStorage + torch.cuda.set_stream = torch.xpu.set_stream + torch.cuda.BoolStorage = torch.xpu.BoolStorage + torch.cuda.os = torch.xpu.os + torch.cuda.torch = torch.xpu.torch + torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage + torch.cuda.Union = torch.xpu.Union + torch.cuda.DoubleTensor = torch.xpu.DoubleTensor + torch.cuda.ShortTensor = torch.xpu.ShortTensor + torch.cuda.LongTensor = torch.xpu.LongTensor + torch.cuda.IntStorage = torch.xpu.IntStorage + torch.cuda.LongStorage = torch.xpu.LongStorage + torch.cuda.__annotations__ = torch.xpu.__annotations__ + torch.cuda.__package__ = torch.xpu.__package__ + torch.cuda.__builtins__ = torch.xpu.__builtins__ + torch.cuda.CharTensor = torch.xpu.CharTensor + torch.cuda.List = torch.xpu.List + torch.cuda._lazy_init = torch.xpu._lazy_init + torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor + torch.cuda.DoubleStorage = torch.xpu.DoubleStorage + torch.cuda.ByteTensor = torch.xpu.ByteTensor + torch.cuda.StreamContext = torch.xpu.StreamContext + torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage + torch.cuda.ShortStorage = torch.xpu.ShortStorage + torch.cuda._lazy_call = torch.xpu._lazy_call + torch.cuda.HalfStorage = torch.xpu.HalfStorage + torch.cuda.random = torch.xpu.random + torch.cuda._device = torch.xpu._device + torch.cuda.classproperty = torch.xpu.classproperty + torch.cuda.__name__ = torch.xpu.__name__ + torch.cuda._device_t = torch.xpu._device_t + torch.cuda.warnings = torch.xpu.warnings + torch.cuda.__spec__ = torch.xpu.__spec__ + torch.cuda.BoolTensor = torch.xpu.BoolTensor + torch.cuda.CharStorage = torch.xpu.CharStorage + torch.cuda.__file__ = torch.xpu.__file__ + torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork + #torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing + + #Memory: + torch.cuda.memory = torch.xpu.memory + if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read(): + torch.xpu.empty_cache = lambda: None + torch.cuda.empty_cache = torch.xpu.empty_cache + torch.cuda.memory_stats = torch.xpu.memory_stats + torch.cuda.memory_summary = torch.xpu.memory_summary + torch.cuda.memory_snapshot = torch.xpu.memory_snapshot + torch.cuda.memory_allocated = torch.xpu.memory_allocated + torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated + torch.cuda.memory_reserved = torch.xpu.memory_reserved + torch.cuda.memory_cached = torch.xpu.memory_reserved + torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved + torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved + torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats + torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats + torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats + torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict + torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats + + #RNG: + torch.cuda.get_rng_state = torch.xpu.get_rng_state + torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all + torch.cuda.set_rng_state = torch.xpu.set_rng_state + torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all + torch.cuda.manual_seed = torch.xpu.manual_seed + torch.cuda.manual_seed_all = torch.xpu.manual_seed_all + torch.cuda.seed = torch.xpu.seed + torch.cuda.seed_all = torch.xpu.seed_all + torch.cuda.initial_seed = torch.xpu.initial_seed + + #AMP: + torch.cuda.amp = torch.xpu.amp + if not hasattr(torch.cuda.amp, "common"): + torch.cuda.amp.common = contextlib.nullcontext() + torch.cuda.amp.common.amp_definitely_not_available = lambda: False + try: + torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler + except Exception: # pylint: disable=broad-exception-caught + try: + from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error + gradscaler_init() + torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler + except Exception: # pylint: disable=broad-exception-caught + torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler + + #C + torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream + ipex._C._DeviceProperties.major = 2023 + ipex._C._DeviceProperties.minor = 2 + + #Fix functions with ipex: + torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory] + torch._utils._get_available_device_type = lambda: "xpu" + torch.has_cuda = True + torch.cuda.has_half = True + torch.cuda.is_bf16_supported = lambda *args, **kwargs: True + torch.cuda.is_fp16_supported = lambda *args, **kwargs: True + torch.version.cuda = "11.7" + torch.cuda.get_device_capability = lambda *args, **kwargs: [11,7] + torch.cuda.get_device_properties.major = 11 + torch.cuda.get_device_properties.minor = 7 + torch.cuda.ipc_collect = lambda *args, **kwargs: None + torch.cuda.utilization = lambda *args, **kwargs: 0 + + ipex_hijacks() + attention_init() + except Exception as e: + return False, e + return True, None \ No newline at end of file diff --git a/lib/infer/modules/ipex/attention.py b/lib/infer/modules/ipex/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..b555579a17ba03db21599b902fd249e5460cada6 --- /dev/null +++ b/lib/infer/modules/ipex/attention.py @@ -0,0 +1,127 @@ +import torch + +# pylint: disable=protected-access, missing-function-docstring, line-too-long + +original_torch_bmm = torch.bmm +def torch_bmm(input, mat2, *, out=None): + if input.dtype != mat2.dtype: + mat2 = mat2.to(input.dtype) + + #ARC GPUs can't allocate more than 4GB to a single block, Slice it: + batch_size_attention, input_tokens, mat2_shape = input.shape[0], input.shape[1], mat2.shape[2] + block_multiply = 2.4 if input.dtype == torch.float32 else 1.2 + block_size = (batch_size_attention * input_tokens * mat2_shape) / 1024 * block_multiply #MB + split_slice_size = batch_size_attention + if block_size >= 4000: + do_split = True + #Find something divisible with the input_tokens + while ((split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply) > 4000: + split_slice_size = split_slice_size // 2 + if split_slice_size <= 1: + split_slice_size = 1 + break + else: + do_split = False + + split_block_size = (split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply #MB + split_2_slice_size = input_tokens + if split_block_size >= 4000: + do_split_2 = True + #Find something divisible with the input_tokens + while ((split_slice_size * split_2_slice_size * mat2_shape) / 1024 * block_multiply) > 4000: + split_2_slice_size = split_2_slice_size // 2 + if split_2_slice_size <= 1: + split_2_slice_size = 1 + break + else: + do_split_2 = False + + if do_split: + hidden_states = torch.zeros(input.shape[0], input.shape[1], mat2.shape[2], device=input.device, dtype=input.dtype) + for i in range(batch_size_attention // split_slice_size): + start_idx = i * split_slice_size + end_idx = (i + 1) * split_slice_size + if do_split_2: + for i2 in range(input_tokens // split_2_slice_size): # pylint: disable=invalid-name + start_idx_2 = i2 * split_2_slice_size + end_idx_2 = (i2 + 1) * split_2_slice_size + hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = original_torch_bmm( + input[start_idx:end_idx, start_idx_2:end_idx_2], + mat2[start_idx:end_idx, start_idx_2:end_idx_2], + out=out + ) + else: + hidden_states[start_idx:end_idx] = original_torch_bmm( + input[start_idx:end_idx], + mat2[start_idx:end_idx], + out=out + ) + else: + return original_torch_bmm(input, mat2, out=out) + return hidden_states + +original_scaled_dot_product_attention = torch.nn.functional.scaled_dot_product_attention +def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False): + #ARC GPUs can't allocate more than 4GB to a single block, Slice it: + shape_one, batch_size_attention, query_tokens, shape_four = query.shape + block_multiply = 2.4 if query.dtype == torch.float32 else 1.2 + block_size = (shape_one * batch_size_attention * query_tokens * shape_four) / 1024 * block_multiply #MB + split_slice_size = batch_size_attention + if block_size >= 4000: + do_split = True + #Find something divisible with the shape_one + while ((shape_one * split_slice_size * query_tokens * shape_four) / 1024 * block_multiply) > 4000: + split_slice_size = split_slice_size // 2 + if split_slice_size <= 1: + split_slice_size = 1 + break + else: + do_split = False + + split_block_size = (shape_one * split_slice_size * query_tokens * shape_four) / 1024 * block_multiply #MB + split_2_slice_size = query_tokens + if split_block_size >= 4000: + do_split_2 = True + #Find something divisible with the batch_size_attention + while ((shape_one * split_slice_size * split_2_slice_size * shape_four) / 1024 * block_multiply) > 4000: + split_2_slice_size = split_2_slice_size // 2 + if split_2_slice_size <= 1: + split_2_slice_size = 1 + break + else: + do_split_2 = False + + if do_split: + hidden_states = torch.zeros(query.shape, device=query.device, dtype=query.dtype) + for i in range(batch_size_attention // split_slice_size): + start_idx = i * split_slice_size + end_idx = (i + 1) * split_slice_size + if do_split_2: + for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name + start_idx_2 = i2 * split_2_slice_size + end_idx_2 = (i2 + 1) * split_2_slice_size + hidden_states[:, start_idx:end_idx, start_idx_2:end_idx_2] = original_scaled_dot_product_attention( + query[:, start_idx:end_idx, start_idx_2:end_idx_2], + key[:, start_idx:end_idx, start_idx_2:end_idx_2], + value[:, start_idx:end_idx, start_idx_2:end_idx_2], + attn_mask=attn_mask[:, start_idx:end_idx, start_idx_2:end_idx_2] if attn_mask is not None else attn_mask, + dropout_p=dropout_p, is_causal=is_causal + ) + else: + hidden_states[:, start_idx:end_idx] = original_scaled_dot_product_attention( + query[:, start_idx:end_idx], + key[:, start_idx:end_idx], + value[:, start_idx:end_idx], + attn_mask=attn_mask[:, start_idx:end_idx] if attn_mask is not None else attn_mask, + dropout_p=dropout_p, is_causal=is_causal + ) + else: + return original_scaled_dot_product_attention( + query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal + ) + return hidden_states + +def attention_init(): + #ARC GPUs can't allocate more than 4GB to a single block: + torch.bmm = torch_bmm + torch.nn.functional.scaled_dot_product_attention = scaled_dot_product_attention \ No newline at end of file diff --git a/lib/infer/modules/ipex/gradscaler.py b/lib/infer/modules/ipex/gradscaler.py new file mode 100644 index 0000000000000000000000000000000000000000..3c265ddb37453f02870afb481360c9cc30b05d81 --- /dev/null +++ b/lib/infer/modules/ipex/gradscaler.py @@ -0,0 +1,179 @@ +from collections import defaultdict +import torch +import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import +import intel_extension_for_pytorch._C as core # pylint: disable=import-error, unused-import + +# pylint: disable=protected-access, missing-function-docstring, line-too-long + +OptState = ipex.cpu.autocast._grad_scaler.OptState +_MultiDeviceReplicator = ipex.cpu.autocast._grad_scaler._MultiDeviceReplicator +_refresh_per_optimizer_state = ipex.cpu.autocast._grad_scaler._refresh_per_optimizer_state + +def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16): # pylint: disable=unused-argument + per_device_inv_scale = _MultiDeviceReplicator(inv_scale) + per_device_found_inf = _MultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be hundreds of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated] + # sync grad to master weight + if hasattr(optimizer, "sync_grad"): + optimizer.sync_grad() + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + if param.grad is None: + continue + if (not allow_fp16) and param.grad.dtype == torch.float16: + raise ValueError("Attempting to unscale FP16 gradients.") + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled fp16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.float16: + param.grad = param.grad.coalesce() + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + # -: is there a way to split by device and dtype without appending in the inner loop? + to_unscale = to_unscale.to("cpu") + per_device_and_dtype_grads[to_unscale.device][ + to_unscale.dtype + ].append(to_unscale) + + for _, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + core._amp_foreach_non_finite_check_and_unscale_( + grads, + per_device_found_inf.get("cpu"), + per_device_inv_scale.get("cpu"), + ) + + return per_device_found_inf._per_device_tensors + +def unscale_(self, optimizer): + """ + Divides ("unscales") the optimizer's gradient tensors by the scale factor. + :meth:`unscale_` is optional, serving cases where you need to + :ref:`modify or inspect gradients` + between the backward pass(es) and :meth:`step`. + If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. + Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: + ... + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + scaler.step(optimizer) + scaler.update() + Args: + optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. + .. warning:: + :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, + and only after all gradients for that optimizer's assigned parameters have been accumulated. + Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. + .. warning:: + :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. + """ + if not self._enabled: + return + + self._check_scale_growth_tracker("unscale_") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.UNSCALED: # pylint: disable=no-else-raise + raise RuntimeError( + "unscale_() has already been called on this optimizer since the last update()." + ) + elif optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError("unscale_() is being called after step().") + + # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. + assert self._scale is not None + inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device) + found_inf = torch.full( + (1,), 0.0, dtype=torch.float32, device=self._scale.device + ) + + optimizer_state["found_inf_per_device"] = self._unscale_grads_( + optimizer, inv_scale, found_inf, False + ) + optimizer_state["stage"] = OptState.UNSCALED + +def update(self, new_scale=None): + """ + Updates the scale factor. + If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` + to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, + the scale is multiplied by ``growth_factor`` to increase it. + Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not + used directly, it's used to fill GradScaler's internal scale tensor. So if + ``new_scale`` was a tensor, later in-place changes to that tensor will not further + affect the scale GradScaler uses internally.) + Args: + new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor. + .. warning:: + :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has + been invoked for all optimizers used this iteration. + """ + if not self._enabled: + return + + _scale, _growth_tracker = self._check_scale_growth_tracker("update") + + if new_scale is not None: + # Accept a new user-defined scale. + if isinstance(new_scale, float): + self._scale.fill_(new_scale) # type: ignore[union-attr] + else: + reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False." + assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined] + assert new_scale.numel() == 1, reason + assert new_scale.requires_grad is False, reason + self._scale.copy_(new_scale) # type: ignore[union-attr] + else: + # Consume shared inf/nan data collected from optimizers to update the scale. + # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. + found_infs = [ + found_inf.to(device="cpu", non_blocking=True) + for state in self._per_optimizer_states.values() + for found_inf in state["found_inf_per_device"].values() + ] + + assert len(found_infs) > 0, "No inf checks were recorded prior to update." + + found_inf_combined = found_infs[0] + if len(found_infs) > 1: + for i in range(1, len(found_infs)): + found_inf_combined += found_infs[i] + + to_device = _scale.device + _scale = _scale.to("cpu") + _growth_tracker = _growth_tracker.to("cpu") + + core._amp_update_scale_( + _scale, + _growth_tracker, + found_inf_combined, + self._growth_factor, + self._backoff_factor, + self._growth_interval, + ) + + _scale = _scale.to(to_device) + _growth_tracker = _growth_tracker.to(to_device) + # To prepare for next iteration, clear the data collected from optimizers this iteration. + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + +def gradscaler_init(): + torch.xpu.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler + torch.xpu.amp.GradScaler._unscale_grads_ = _unscale_grads_ + torch.xpu.amp.GradScaler.unscale_ = unscale_ + torch.xpu.amp.GradScaler.update = update + return torch.xpu.amp.GradScaler \ No newline at end of file diff --git a/lib/infer/modules/ipex/hijacks.py b/lib/infer/modules/ipex/hijacks.py new file mode 100644 index 0000000000000000000000000000000000000000..855e5cb9ec4791ed771808dfa52607aae047b840 --- /dev/null +++ b/lib/infer/modules/ipex/hijacks.py @@ -0,0 +1,195 @@ +import contextlib +import importlib +import torch + +# pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return + +class CondFunc: # pylint: disable=missing-class-docstring + def __new__(cls, orig_func, sub_func, cond_func): + self = super(CondFunc, cls).__new__(cls) + if isinstance(orig_func, str): + func_path = orig_func.split('.') + for i in range(len(func_path)-1, -1, -1): + try: + resolved_obj = importlib.import_module('.'.join(func_path[:i])) + break + except ImportError: + pass + for attr_name in func_path[i:-1]: + resolved_obj = getattr(resolved_obj, attr_name) + orig_func = getattr(resolved_obj, func_path[-1]) + setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) + self.__init__(orig_func, sub_func, cond_func) + return lambda *args, **kwargs: self(*args, **kwargs) + def __init__(self, orig_func, sub_func, cond_func): + self.__orig_func = orig_func + self.__sub_func = sub_func + self.__cond_func = cond_func + def __call__(self, *args, **kwargs): + if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): + return self.__sub_func(self.__orig_func, *args, **kwargs) + else: + return self.__orig_func(*args, **kwargs) + +_utils = torch.utils.data._utils +def _shutdown_workers(self): + if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None: + return + if hasattr(self, "_shutdown") and not self._shutdown: + self._shutdown = True + try: + if hasattr(self, '_pin_memory_thread'): + self._pin_memory_thread_done_event.set() + self._worker_result_queue.put((None, None)) + self._pin_memory_thread.join() + self._worker_result_queue.cancel_join_thread() + self._worker_result_queue.close() + self._workers_done_event.set() + for worker_id in range(len(self._workers)): + if self._persistent_workers or self._workers_status[worker_id]: + self._mark_worker_as_unavailable(worker_id, shutdown=True) + for w in self._workers: # pylint: disable=invalid-name + w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL) + for q in self._index_queues: # pylint: disable=invalid-name + q.cancel_join_thread() + q.close() + finally: + if self._worker_pids_set: + torch.utils.data._utils.signal_handling._remove_worker_pids(id(self)) + self._worker_pids_set = False + for w in self._workers: # pylint: disable=invalid-name + if w.is_alive(): + w.terminate() + +class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods + def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument + if isinstance(device_ids, list) and len(device_ids) > 1: + print("IPEX backend doesn't support DataParallel on multiple XPU devices") + return module.to("xpu") + +def return_null_context(*args, **kwargs): # pylint: disable=unused-argument + return contextlib.nullcontext() + +def check_device(device): + return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int)) + +def return_xpu(device): + return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu" + +def ipex_no_cuda(orig_func, *args, **kwargs): + torch.cuda.is_available = lambda: False + orig_func(*args, **kwargs) + torch.cuda.is_available = torch.xpu.is_available + +original_autocast = torch.autocast +def ipex_autocast(*args, **kwargs): + if len(args) > 0 and args[0] == "cuda": + return original_autocast("xpu", *args[1:], **kwargs) + else: + return original_autocast(*args, **kwargs) + +original_torch_cat = torch.cat +def torch_cat(tensor, *args, **kwargs): + if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype): + return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs) + else: + return original_torch_cat(tensor, *args, **kwargs) + +original_interpolate = torch.nn.functional.interpolate +def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments + if antialias or align_corners is not None: + return_device = tensor.device + return_dtype = tensor.dtype + return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode, + align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype) + else: + return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode, + align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias) + +original_linalg_solve = torch.linalg.solve +def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name + if A.device != torch.device("cpu") or B.device != torch.device("cpu"): + return_device = A.device + return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device) + else: + return original_linalg_solve(A, B, *args, **kwargs) + +def ipex_hijacks(): + CondFunc('torch.Tensor.to', + lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs), + lambda orig_func, self, device=None, *args, **kwargs: check_device(device)) + CondFunc('torch.Tensor.cuda', + lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs), + lambda orig_func, self, device=None, *args, **kwargs: check_device(device)) + CondFunc('torch.empty', + lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), + lambda orig_func, *args, device=None, **kwargs: check_device(device)) + CondFunc('torch.load', + lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs), + lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location)) + CondFunc('torch.randn', + lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), + lambda orig_func, *args, device=None, **kwargs: check_device(device)) + CondFunc('torch.ones', + lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), + lambda orig_func, *args, device=None, **kwargs: check_device(device)) + CondFunc('torch.zeros', + lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), + lambda orig_func, *args, device=None, **kwargs: check_device(device)) + CondFunc('torch.tensor', + lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), + lambda orig_func, *args, device=None, **kwargs: check_device(device)) + CondFunc('torch.linspace', + lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs), + lambda orig_func, *args, device=None, **kwargs: check_device(device)) + + CondFunc('torch.Generator', + lambda orig_func, device=None: torch.xpu.Generator(device), + lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu") + + CondFunc('torch.batch_norm', + lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input, + weight if weight is not None else torch.ones(input.size()[1], device=input.device), + bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs), + lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu")) + CondFunc('torch.instance_norm', + lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input, + weight if weight is not None else torch.ones(input.size()[1], device=input.device), + bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs), + lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu")) + + #Functions with dtype errors: + CondFunc('torch.nn.modules.GroupNorm.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.modules.linear.Linear.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.modules.conv.Conv2d.forward', + lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)), + lambda orig_func, self, input: input.dtype != self.weight.data.dtype) + CondFunc('torch.nn.functional.layer_norm', + lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: + orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs), + lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs: + weight is not None and input.dtype != weight.data.dtype) + + #Diffusers Float64 (ARC GPUs doesn't support double or Float64): + if not torch.xpu.has_fp64_dtype(): + CondFunc('torch.from_numpy', + lambda orig_func, ndarray: orig_func(ndarray.astype('float32')), + lambda orig_func, ndarray: ndarray.dtype == float) + + #Broken functions when torch.cuda.is_available is True: + CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__', + lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs), + lambda orig_func, *args, **kwargs: True) + + #Functions that make compile mad with CondFunc: + torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers + torch.nn.DataParallel = DummyDataParallel + torch.autocast = ipex_autocast + torch.cat = torch_cat + torch.linalg.solve = linalg_solve + torch.nn.functional.interpolate = interpolate + torch.backends.cuda.sdp_kernel = return_null_context \ No newline at end of file diff --git a/lib/infer/modules/onnx/export.py b/lib/infer/modules/onnx/export.py new file mode 100644 index 0000000000000000000000000000000000000000..f7a35562f4a15c9024ac6df47b213c6a0cf6c7a0 --- /dev/null +++ b/lib/infer/modules/onnx/export.py @@ -0,0 +1,52 @@ +import torch + +from lib.infer.infer_libs.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM + + +def export_onnx(ModelPath, ExportedPath): + cpt = torch.load(ModelPath, map_location="cpu") + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + vec_channels = 256 if cpt.get("version", "v1") == "v1" else 768 + + test_phone = torch.rand(1, 200, vec_channels) # hidden unit + test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用) + test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹) + test_pitchf = torch.rand(1, 200) # nsf基频 + test_ds = torch.LongTensor([0]) # 说话人ID + test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子) + + device = "cpu" # 导出时设备(不影响使用模型) + + net_g = SynthesizerTrnMsNSFsidM( + *cpt["config"], is_half=False, version=cpt.get("version", "v1") + ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16) + net_g.load_state_dict(cpt["weight"], strict=False) + input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"] + output_names = [ + "audio", + ] + # net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出 + torch.onnx.export( + net_g, + ( + test_phone.to(device), + test_phone_lengths.to(device), + test_pitch.to(device), + test_pitchf.to(device), + test_ds.to(device), + test_rnd.to(device), + ), + ExportedPath, + dynamic_axes={ + "phone": [1], + "pitch": [1], + "pitchf": [1], + "rnd": [2], + }, + do_constant_folding=False, + opset_version=13, + verbose=False, + input_names=input_names, + output_names=output_names, + ) + return "Finished" diff --git a/lib/infer/modules/train/extract/extract_f0_print.py b/lib/infer/modules/train/extract/extract_f0_print.py new file mode 100644 index 0000000000000000000000000000000000000000..b2754e0d056f8af560e3beb18b72f6aa8d61499a --- /dev/null +++ b/lib/infer/modules/train/extract/extract_f0_print.py @@ -0,0 +1,301 @@ +import os +import sys +import traceback + +import parselmouth + +now_dir = os.getcwd() +sys.path.append(now_dir) +import logging + + +import numpy as np +import pyworld +import torchcrepe +import torch +#from torch import Tensor # Fork Feature. Used for pitch prediction for torch crepe. +import tqdm +from lib.infer.infer_libs.audio import load_audio + +logging.getLogger("numba").setLevel(logging.WARNING) +from multiprocessing import Process + +exp_dir = sys.argv[1] +f = open("%s/extract_f0_feature.log" % exp_dir, "a+") + +DoFormant = False +Quefrency = 1.0 +Timbre = 1.0 + +def printt(strr): + print(strr) + f.write(f"{strr}\n") + f.flush() + + +n_p = int(sys.argv[2]) +f0method = sys.argv[3] +crepe_hop_length = 0 +try: + crepe_hop_length = int(sys.argv[4]) +except: + print("Temp Issue. echl is not being passed with argument!") + crepe_hop_length = 128 + +class FeatureInput(object): + def __init__(self, samplerate=16000, hop_size=160): + self.fs = samplerate + self.hop = hop_size + + self.f0_method_dict = self.get_f0_method_dict() + + self.f0_bin = 256 + self.f0_max = 1100.0 + self.f0_min = 50.0 + self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) + self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) + + def mncrepe(self, method, x, p_len, crepe_hop_length): + f0 = None + torch_device_index = 0 + torch_device = torch.device( + f"cuda:{torch_device_index % torch.cuda.device_count()}" + ) if torch.cuda.is_available() \ + else torch.device("mps") if torch.backends.mps.is_available() \ + else torch.device("cpu") + + audio = torch.from_numpy(x.astype(np.float32)).to(torch_device, copy=True) + audio /= torch.quantile(torch.abs(audio), 0.999) + audio = torch.unsqueeze(audio, dim=0) + if audio.ndim == 2 and audio.shape[0] > 1: + audio = torch.mean(audio, dim=0, keepdim=True).detach() + audio = audio.detach() + + if method == 'mangio-crepe': + pitch: torch.Tensor = torchcrepe.predict( + audio, + self.fs, + crepe_hop_length, + self.f0_min, + self.f0_max, + "full", + batch_size=crepe_hop_length * 2, + device=torch_device, + pad=True, + ) + p_len = p_len or x.shape[0] // crepe_hop_length + # Resize the pitch + source = np.array(pitch.squeeze(0).cpu().float().numpy()) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * p_len, len(source)) / p_len, + np.arange(0, len(source)), + source, + ) + f0 = np.nan_to_num(target) + + elif method == 'crepe': + batch_size = 512 + audio = torch.tensor(np.copy(x))[None].float() + f0, pd = torchcrepe.predict( + audio, + self.fs, + 160, + self.f0_min, + self.f0_max, + "full", + batch_size=batch_size, + device=torch_device, + return_periodicity=True, + ) + pd = torchcrepe.filter.median(pd, 3) + f0 = torchcrepe.filter.mean(f0, 3) + f0[pd < 0.1] = 0 + f0 = f0[0].cpu().numpy() + f0 = f0[1:] # Get rid of extra first frame + + return f0 + + def get_pm(self, x, p_len): + f0 = parselmouth.Sound(x, self.fs).to_pitch_ac( + time_step=160 / 16000, + voicing_threshold=0.6, + pitch_floor=self.f0_min, + pitch_ceiling=self.f0_max, + ).selected_array["frequency"] + + return np.pad( + f0, + [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], + mode="constant" + ) + + def get_harvest(self, x): + f0_spectral = pyworld.harvest( + x.astype(np.double), + fs=self.fs, + f0_ceil=self.f0_max, + f0_floor=self.f0_min, + frame_period=1000 * self.hop / self.fs, + ) + return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs) + + def get_dio(self, x): + f0_spectral = pyworld.dio( + x.astype(np.double), + fs=self.fs, + f0_ceil=self.f0_max, + f0_floor=self.f0_min, + frame_period=1000 * self.hop / self.fs, + ) + return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs) + + def get_rmvpe(self, x): + if hasattr(self, "model_rmvpe") == False: + from lib.infer.infer_libs.rmvpe import RMVPE + + print("Loading rmvpe model") + self.model_rmvpe = RMVPE( + "assets/rmvpe/rmvpe.pt", is_half=False, device="cpu" + ) + return self.model_rmvpe.infer_from_audio(x, thred=0.03) + + def get_rmvpe_dml(self, x): + ... + + def get_f0_method_dict(self): + return { + "pm": self.get_pm, + "harvest": self.get_harvest, + "dio": self.get_dio, + "rmvpe": self.get_rmvpe + } + + def get_f0_hybrid_computation( + self, + methods_str, + x, + p_len, + crepe_hop_length, + ): + # Get various f0 methods from input to use in the computation stack + s = methods_str + s = s.split("hybrid")[1] + s = s.replace("[", "").replace("]", "") + methods = s.split("+") + f0_computation_stack = [] + + for method in methods: + if method in self.f0_method_dict: + f0 = self.f0_method_dict[method](x, p_len) if method == 'pm' else self.f0_method_dict[method](x) + f0_computation_stack.append(f0) + elif method == 'crepe' or method == 'mangio-crepe': + self.the_other_complex_function(x, method, crepe_hop_length) + + if len(f0_computation_stack) != 0: + f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) if len(f0_computation_stack)>1 else f0_computation_stack[0] + return f0_median_hybrid + else: + raise ValueError("No valid methods were provided") + + def compute_f0(self, path, f0_method, crepe_hop_length): + x = load_audio(path, self.fs, DoFormant, Quefrency, Timbre) + p_len = x.shape[0] // self.hop + + if f0_method in self.f0_method_dict: + f0 = self.f0_method_dict[f0_method](x, p_len) if f0_method == 'pm' else self.f0_method_dict[f0_method](x) + elif f0_method in ['crepe', 'mangio-crepe']: + f0 = self.mncrepe(f0_method, x, p_len, crepe_hop_length) + elif "hybrid" in f0_method: # EXPERIMENTAL + # Perform hybrid median pitch estimation + f0 = self.get_f0_hybrid_computation( + f0_method, + x, + p_len, + crepe_hop_length, + ) + return f0 + + def coarse_f0(self, f0): + f0_mel = 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( + self.f0_bin - 2 + ) / (self.f0_mel_max - self.f0_mel_min) + 1 + + # use 0 or 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 + f0_coarse = np.rint(f0_mel).astype(int) + assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( + f0_coarse.max(), + f0_coarse.min(), + ) + return f0_coarse + + def go(self, paths, f0_method, crepe_hop_length, thread_n): + os.system('cls' if os.name == 'nt' else 'clear') + if len(paths) == 0: + printt("no-f0-todo") + return + with tqdm.tqdm(total=len(paths), leave=True, position=thread_n) as pbar: + description = f"Thread {thread_n} | Hop-Length: {crepe_hop_length}" + pbar.set_description(description) + + for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): + try: + if ( + os.path.exists(opt_path1 + ".npy") + and os.path.exists(opt_path2 + ".npy") + ): + pbar.update(1) + continue + + featur_pit = self.compute_f0(inp_path, f0_method, crepe_hop_length) + np.save( + opt_path2, + featur_pit, + allow_pickle=False, + ) # nsf + coarse_pit = self.coarse_f0(featur_pit) + np.save( + opt_path1, + coarse_pit, + allow_pickle=False, + ) # ori + pbar.update(1) + except Exception as e: + printt(f"f0fail-{idx}-{inp_path}-{traceback.format_exc()}") + + +if __name__ == "__main__": + # exp_dir=r"E:\codes\py39\dataset\mi-test" + # n_p=16 + # f = open("%s/log_extract_f0.log"%exp_dir, "w") + printt(sys.argv) + featureInput = FeatureInput() + paths = [] + inp_root = "%s/1_16k_wavs" % (exp_dir) + opt_root1 = "%s/2a_f0" % (exp_dir) + opt_root2 = "%s/2b-f0nsf" % (exp_dir) + + os.makedirs(opt_root1, exist_ok=True) + os.makedirs(opt_root2, exist_ok=True) + for name in sorted(list(os.listdir(inp_root))): + inp_path = "%s/%s" % (inp_root, name) + if "spec" in inp_path: + continue + opt_path1 = "%s/%s" % (opt_root1, name) + opt_path2 = "%s/%s" % (opt_root2, name) + paths.append([inp_path, opt_path1, opt_path2]) + + ps = [] + print("Using f0 method: " + f0method) + for i in range(n_p): + p = Process( + target=featureInput.go, + args=(paths[i::n_p], f0method, crepe_hop_length, i), + ) + ps.append(p) + p.start() + for i in range(n_p): + ps[i].join() diff --git a/lib/infer/modules/train/extract/extract_f0_rmvpe.py b/lib/infer/modules/train/extract/extract_f0_rmvpe.py new file mode 100644 index 0000000000000000000000000000000000000000..751b62d38df837bf6f1f21d45c485dd8e5f5d113 --- /dev/null +++ b/lib/infer/modules/train/extract/extract_f0_rmvpe.py @@ -0,0 +1,138 @@ +import os +import sys +import traceback + +now_dir = os.getcwd() +sys.path.append(now_dir) +import logging + +import numpy as np + +from lib.infer.infer_libs.audio import load_audio + +logging.getLogger("numba").setLevel(logging.WARNING) + +n_part = int(sys.argv[1]) +i_part = int(sys.argv[2]) +i_gpu = sys.argv[3] +os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu) +exp_dir = sys.argv[4] +is_half = sys.argv[5] +f = open("%s/extract_f0_feature.log" % exp_dir, "a+") + + +def printt(strr): + print(strr) + f.write("%s\n" % strr) + f.flush() + + +class FeatureInput(object): + def __init__(self, samplerate=16000, hop_size=160): + self.fs = samplerate + self.hop = hop_size + + self.f0_bin = 256 + self.f0_max = 1100.0 + self.f0_min = 50.0 + self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) + self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) + + def compute_f0(self, path, f0_method): + x = load_audio(path, self.fs) + # p_len = x.shape[0] // self.hop + if f0_method == "rmvpe": + if hasattr(self, "model_rmvpe") == False: + from lib.infer.infer_libs.rmvpe import RMVPE + + print("Loading rmvpe model") + self.model_rmvpe = RMVPE( + "assets/rmvpe/rmvpe.pt", is_half=is_half, device="cuda" + ) + f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) + return f0 + + def coarse_f0(self, f0): + f0_mel = 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( + self.f0_bin - 2 + ) / (self.f0_mel_max - self.f0_mel_min) + 1 + + # use 0 or 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 + f0_coarse = np.rint(f0_mel).astype(int) + assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( + f0_coarse.max(), + f0_coarse.min(), + ) + return f0_coarse + + def go(self, paths, f0_method): + if len(paths) == 0: + printt("no-f0-todo") + else: + printt("todo-f0-%s" % len(paths)) + n = max(len(paths) // 5, 1) # 每个进程最多打印5条 + for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): + try: + if idx % n == 0: + printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path)) + if ( + os.path.exists(opt_path1 + ".npy") == True + and os.path.exists(opt_path2 + ".npy") == True + ): + continue + featur_pit = self.compute_f0(inp_path, f0_method) + np.save( + opt_path2, + featur_pit, + allow_pickle=False, + ) # nsf + coarse_pit = self.coarse_f0(featur_pit) + np.save( + opt_path1, + coarse_pit, + allow_pickle=False, + ) # ori + except: + printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc())) + + +if __name__ == "__main__": + # exp_dir=r"E:\codes\py39\dataset\mi-test" + # n_p=16 + # f = open("%s/log_extract_f0.log"%exp_dir, "w") + printt(sys.argv) + featureInput = FeatureInput() + paths = [] + inp_root = "%s/1_16k_wavs" % (exp_dir) + opt_root1 = "%s/2a_f0" % (exp_dir) + opt_root2 = "%s/2b-f0nsf" % (exp_dir) + + os.makedirs(opt_root1, exist_ok=True) + os.makedirs(opt_root2, exist_ok=True) + for name in sorted(list(os.listdir(inp_root))): + inp_path = "%s/%s" % (inp_root, name) + if "spec" in inp_path: + continue + opt_path1 = "%s/%s" % (opt_root1, name) + opt_path2 = "%s/%s" % (opt_root2, name) + paths.append([inp_path, opt_path1, opt_path2]) + try: + featureInput.go(paths[i_part::n_part], "rmvpe") + except: + printt("f0_all_fail-%s" % (traceback.format_exc())) + # ps = [] + # for i in range(n_p): + # p = Process( + # target=featureInput.go, + # args=( + # paths[i::n_p], + # f0method, + # ), + # ) + # ps.append(p) + # p.start() + # for i in range(n_p): + # ps[i].join() diff --git a/lib/infer/modules/train/extract/extract_f0_rmvpe_dml.py b/lib/infer/modules/train/extract/extract_f0_rmvpe_dml.py new file mode 100644 index 0000000000000000000000000000000000000000..f10cfe7018e97821e8c78d2c776d10fc347fb0fd --- /dev/null +++ b/lib/infer/modules/train/extract/extract_f0_rmvpe_dml.py @@ -0,0 +1,136 @@ +import os +import sys +import traceback + +now_dir = os.getcwd() +sys.path.append(now_dir) +import logging + +import numpy as np + +from lib.infer.infer_libs.audio import load_audio + +logging.getLogger("numba").setLevel(logging.WARNING) + +exp_dir = sys.argv[1] +import torch_directml + +device = torch_directml.device(torch_directml.default_device()) +f = open("%s/extract_f0_feature.log" % exp_dir, "a+") + + +def printt(strr): + print(strr) + f.write("%s\n" % strr) + f.flush() + + +class FeatureInput(object): + def __init__(self, samplerate=16000, hop_size=160): + self.fs = samplerate + self.hop = hop_size + + self.f0_bin = 256 + self.f0_max = 1100.0 + self.f0_min = 50.0 + self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) + self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) + + def compute_f0(self, path, f0_method): + x = load_audio(path, self.fs) + # p_len = x.shape[0] // self.hop + if f0_method == "rmvpe": + if hasattr(self, "model_rmvpe") == False: + from lib.infer.infer_libs.rmvpe import RMVPE + + print("Loading rmvpe model") + self.model_rmvpe = RMVPE( + "assets/rmvpe/rmvpe.pt", is_half=False, device=device + ) + f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) + return f0 + + def coarse_f0(self, f0): + f0_mel = 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( + self.f0_bin - 2 + ) / (self.f0_mel_max - self.f0_mel_min) + 1 + + # use 0 or 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 + f0_coarse = np.rint(f0_mel).astype(int) + assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( + f0_coarse.max(), + f0_coarse.min(), + ) + return f0_coarse + + def go(self, paths, f0_method): + if len(paths) == 0: + printt("no-f0-todo") + else: + printt("todo-f0-%s" % len(paths)) + n = max(len(paths) // 5, 1) # 每个进程最多打印5条 + for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): + try: + if idx % n == 0: + printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path)) + if ( + os.path.exists(opt_path1 + ".npy") == True + and os.path.exists(opt_path2 + ".npy") == True + ): + continue + featur_pit = self.compute_f0(inp_path, f0_method) + np.save( + opt_path2, + featur_pit, + allow_pickle=False, + ) # nsf + coarse_pit = self.coarse_f0(featur_pit) + np.save( + opt_path1, + coarse_pit, + allow_pickle=False, + ) # ori + except: + printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc())) + + +if __name__ == "__main__": + # exp_dir=r"E:\codes\py39\dataset\mi-test" + # n_p=16 + # f = open("%s/log_extract_f0.log"%exp_dir, "w") + printt(sys.argv) + featureInput = FeatureInput() + paths = [] + inp_root = "%s/1_16k_wavs" % (exp_dir) + opt_root1 = "%s/2a_f0" % (exp_dir) + opt_root2 = "%s/2b-f0nsf" % (exp_dir) + + os.makedirs(opt_root1, exist_ok=True) + os.makedirs(opt_root2, exist_ok=True) + for name in sorted(list(os.listdir(inp_root))): + inp_path = "%s/%s" % (inp_root, name) + if "spec" in inp_path: + continue + opt_path1 = "%s/%s" % (opt_root1, name) + opt_path2 = "%s/%s" % (opt_root2, name) + paths.append([inp_path, opt_path1, opt_path2]) + try: + featureInput.go(paths, "rmvpe") + except: + printt("f0_all_fail-%s" % (traceback.format_exc())) + # ps = [] + # for i in range(n_p): + # p = Process( + # target=featureInput.go, + # args=( + # paths[i::n_p], + # f0method, + # ), + # ) + # ps.append(p) + # p.start() + # for i in range(n_p): + # ps[i].join() diff --git a/lib/infer/modules/train/extract_feature_print.py b/lib/infer/modules/train/extract_feature_print.py new file mode 100644 index 0000000000000000000000000000000000000000..e328f64b38c0cd5a443221b28b25887a1978e4f4 --- /dev/null +++ b/lib/infer/modules/train/extract_feature_print.py @@ -0,0 +1,152 @@ +import os +import sys +import traceback +import tqdm +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0" + +device = sys.argv[1] +n_part = int(sys.argv[2]) +i_part = int(sys.argv[3]) +if len(sys.argv) == 7: + exp_dir = sys.argv[4] + version = sys.argv[5] + is_half = sys.argv[6] +else: + i_gpu = sys.argv[4] + exp_dir = sys.argv[5] + os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu) + version = sys.argv[6] + is_half = sys.argv[7] +import fairseq +import numpy as np +import soundfile as sf +import torch +import torch.nn.functional as F + +if "privateuseone" not in device: + device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + elif torch.backends.mps.is_available(): + device = "mps" +else: + import torch_directml + + device = torch_directml.device(torch_directml.default_device()) + + def forward_dml(ctx, x, scale): + ctx.scale = scale + res = x.clone().detach() + return res + + fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml + +f = open("%s/extract_f0_feature.log" % exp_dir, "a+") + + +def printt(strr): + print(strr) + f.write("%s\n" % strr) + f.flush() + + +printt(sys.argv) +model_path = "assets/hubert/hubert_base.pt" + +printt(exp_dir) +wavPath = "%s/1_16k_wavs" % exp_dir +outPath = ( + "%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir +) +os.makedirs(outPath, exist_ok=True) + + +# wave must be 16k, hop_size=320 +def readwave(wav_path, normalize=False): + wav, sr = sf.read(wav_path) + assert sr == 16000 + #feats = torch.from_numpy(wav).float() + feats = torch.from_numpy(wav) + if is_half: + feats = feats.half() + else: + feats = feats.float() + if feats.dim() == 2: # double channels + feats = feats.mean(-1) + assert feats.dim() == 1, feats.dim() + if normalize: + with torch.no_grad(): + feats = F.layer_norm(feats, feats.shape) + feats = feats.view(1, -1) + return feats + + +# HuBERT model +os.system('cls' if os.name == 'nt' else 'clear') +print("Starting feature extraction...\n") +printt("Loaded model {}".format(model_path)) +# if hubert model is exist +if os.access(model_path, os.F_OK) == False: + printt( + "Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co./lj1995/VoiceConversionWebUI/tree/main" + % model_path + ) + exit(0) +models, saved_cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( + [model_path], + suffix="", +) +model = models[0] +model = model.to(device) +printt("Using %s" % device) +#if device not in ["mps", "cpu"]: +# model = model.half() +if is_half: + model = model.half() +else: + model = model.float() +model.eval() + +todo = sorted(list(os.listdir(wavPath)))[i_part::n_part] +n = max(1, len(todo) // 10) # 最多打印十条 +if len(todo) == 0: + os.system('cls' if os.name == 'nt' else 'clear') + printt("An error occurred in the feature extraction, make sure you have provided the audios correctly.") +else: + printt("- %s" % len(todo)) + with tqdm.tqdm(total=len(todo)) as pbar: + for idx, file in enumerate(todo): + try: + if file.endswith(".wav"): + wav_path = "%s/%s" % (wavPath, file) + out_path = "%s/%s" % (outPath, file.replace("wav", "npy")) + + if os.path.exists(out_path): + continue + + feats = readwave(wav_path, normalize=saved_cfg.task.normalize) + padding_mask = torch.BoolTensor(feats.shape).fill_(False) + inputs = { + "source": feats.to(device), + "padding_mask": padding_mask.to(device), + "output_layer": 9 if version == "v1" else 12, # layer 9 + } + with torch.no_grad(): + logits = model.extract_features(**inputs) + feats = ( + model.final_proj(logits[0]) if version == "v1" else logits[0] + ) + + feats = feats.squeeze(0).float().cpu().numpy() + if np.isnan(feats).sum() == 0: + np.save(out_path, feats, allow_pickle=False) + else: + printt("%s-contains nan" % file) + # if idx % n == 0: + # printt("now-%s,all-%s,%s,%s" % (idx, len(todo), file, feats.shape)) + pbar.set_description(f"Processing: %s - Shape: %s" % (file, feats.shape)) + except: + printt(traceback.format_exc()) + pbar.update(1) + printt("\nFeature extraction completed successfully!") diff --git a/lib/infer/modules/train/preprocess.py b/lib/infer/modules/train/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ace7fd8e64d16a2062f14ce2cfabf9b8b60913 --- /dev/null +++ b/lib/infer/modules/train/preprocess.py @@ -0,0 +1,147 @@ +import os +import sys + +from scipy import signal + +now_dir = os.getcwd() +sys.path.append(now_dir) +print(sys.argv) +inp_root = sys.argv[1] +sr = int(sys.argv[2]) +n_p = int(sys.argv[3]) +exp_dir = sys.argv[4] +noparallel = sys.argv[5] == "True" +per = float(sys.argv[6]) +import multiprocessing +import os +import traceback + +import librosa +import numpy as np +from scipy.io import wavfile + +from lib.infer.infer_libs.audio import load_audio +from lib.infer.infer_libs.slicer2 import Slicer + +mutex = multiprocessing.Lock() +f = open("%s/preprocess.log" % exp_dir, "a+") + + +def println(strr): + mutex.acquire() + print(strr) + f.write("%s\n" % strr) + f.flush() + mutex.release() + + +class PreProcess: + def __init__(self, sr, exp_dir, per=3.7): + self.slicer = Slicer( + sr=sr, + threshold=-42, + min_length=1500, + min_interval=400, + hop_size=15, + max_sil_kept=500, + ) + self.sr = sr + self.bh, self.ah = signal.butter(N=5, Wn=48, btype="high", fs=self.sr) + self.per = per + self.overlap = 0.3 + self.tail = self.per + self.overlap + self.max = 0.9 + self.alpha = 0.75 + self.exp_dir = exp_dir + self.gt_wavs_dir = "%s/0_gt_wavs" % exp_dir + self.wavs16k_dir = "%s/1_16k_wavs" % exp_dir + os.makedirs(self.exp_dir, exist_ok=True) + os.makedirs(self.gt_wavs_dir, exist_ok=True) + os.makedirs(self.wavs16k_dir, exist_ok=True) + + def norm_write(self, tmp_audio, idx0, idx1): + tmp_max = np.abs(tmp_audio).max() + if tmp_max > 2.5: + print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max)) + return + tmp_audio = (tmp_audio / tmp_max * (self.max * self.alpha)) + ( + 1 - self.alpha + ) * tmp_audio + wavfile.write( + "%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1), + self.sr, + tmp_audio.astype(np.float32), + ) + tmp_audio = librosa.resample( + tmp_audio, orig_sr=self.sr, target_sr=16000 + ) # , res_type="soxr_vhq" + wavfile.write( + "%s/%s_%s.wav" % (self.wavs16k_dir, idx0, idx1), + 16000, + tmp_audio.astype(np.float32), + ) + + def pipeline(self, path, idx0): + try: + audio = load_audio(path, self.sr) + # zero phased digital filter cause pre-ringing noise... + # audio = signal.filtfilt(self.bh, self.ah, audio) + audio = signal.lfilter(self.bh, self.ah, audio) + + idx1 = 0 + for audio in self.slicer.slice(audio): + i = 0 + while 1: + start = int(self.sr * (self.per - self.overlap) * i) + i += 1 + if len(audio[start:]) > self.tail * self.sr: + tmp_audio = audio[start : start + int(self.per * self.sr)] + self.norm_write(tmp_audio, idx0, idx1) + idx1 += 1 + else: + tmp_audio = audio[start:] + idx1 += 1 + break + self.norm_write(tmp_audio, idx0, idx1) + println("%s->Suc." % path) + except: + println("%s->%s" % (path, traceback.format_exc())) + + def pipeline_mp(self, infos): + for path, idx0 in infos: + self.pipeline(path, idx0) + + def pipeline_mp_inp_dir(self, inp_root, n_p): + try: + infos = [ + ("%s/%s" % (inp_root, name), idx) + for idx, name in enumerate(sorted(list(os.listdir(inp_root)))) + ] + if noparallel: + for i in range(n_p): + self.pipeline_mp(infos[i::n_p]) + else: + ps = [] + for i in range(n_p): + p = multiprocessing.Process( + target=self.pipeline_mp, args=(infos[i::n_p],) + ) + ps.append(p) + p.start() + for i in range(n_p): + ps[i].join() + except: + println("Fail. %s" % traceback.format_exc()) + + +def preprocess_trainset(inp_root, sr, n_p, exp_dir, per): + pp = PreProcess(sr, exp_dir, per) + os.system('cls' if os.name == 'nt' else 'clear') + println("Starting preprocessing...\n") + println(sys.argv) + pp.pipeline_mp_inp_dir(inp_root, n_p) + println("\nPreprocessing completed!\n\n") + + +if __name__ == "__main__": + preprocess_trainset(inp_root, sr, n_p, exp_dir, per) diff --git a/lib/infer/modules/train/train.py b/lib/infer/modules/train/train.py new file mode 100644 index 0000000000000000000000000000000000000000..3e47dd7471a248db88506c5a5400e3a790c1426a --- /dev/null +++ b/lib/infer/modules/train/train.py @@ -0,0 +1,723 @@ +import os +import sys +import logging + +logger = logging.getLogger(__name__) + +now_dir = os.getcwd() +sys.path.append(os.path.join(now_dir)) + +import datetime + +from lib.infer.infer_libs.train import utils + +hps = utils.get_hparams() +os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",") +n_gpus = len(hps.gpus.split("-")) +from random import randint, shuffle + +import torch +try: + import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import + if torch.xpu.is_available(): + from lib.infer.modules.ipex import ipex_init + from lib.infer.modules.ipex.gradscaler import gradscaler_init + from torch.xpu.amp import autocast + GradScaler = gradscaler_init() + ipex_init() + else: + from torch.cuda.amp import GradScaler, autocast +except Exception: + from torch.cuda.amp import GradScaler, autocast + +torch.backends.cudnn.deterministic = False +torch.backends.cudnn.benchmark = False +from time import sleep +from time import time as ttime + +import torch.distributed as dist +import torch.multiprocessing as mp + +from torch.nn import functional as F +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.data import DataLoader +from torch.utils.tensorboard import SummaryWriter + +from lib.infer.infer_libs.infer_pack import commons +from lib.infer.infer_libs.train.data_utils import ( + DistributedBucketSampler, + TextAudioCollate, + TextAudioCollateMultiNSFsid, + TextAudioLoader, + TextAudioLoaderMultiNSFsid, +) + +if hps.version == "v1": + from lib.infer.infer_libs.infer_pack.models import MultiPeriodDiscriminator + from lib.infer.infer_libs.infer_pack.models import SynthesizerTrnMs256NSFsid as RVC_Model_f0 + from lib.infer.infer_libs.infer_pack.models import ( + SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0, + ) +else: + from lib.infer.infer_libs.infer_pack.models import ( + SynthesizerTrnMs768NSFsid as RVC_Model_f0, + SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0, + MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator, + ) + +from lib.infer.infer_libs.train.losses import ( + discriminator_loss, + feature_loss, + generator_loss, + kl_loss, +) +from lib.infer.infer_libs.train.mel_processing import mel_spectrogram_torch, spec_to_mel_torch +from lib.infer.infer_libs.train.process_ckpt import savee + +global_step = 0 +import csv + +class EpochRecorder: + def __init__(self): + self.last_time = ttime() + + def record(self): + now_time = ttime() + elapsed_time = now_time - self.last_time + self.last_time = now_time + elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time)) + current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + return f"[{current_time}] | ({elapsed_time_str})" + +def reset_stop_flag(): + with open("lib/csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: + csv_writer = csv.writer(STOPCSVwrite, delimiter=",") + csv_writer.writerow(["False"]) + +def create_model(hps, model_f0, model_nof0): + filter_length_adjusted = hps.data.filter_length // 2 + 1 + segment_size_adjusted = hps.train.segment_size // hps.data.hop_length + is_half = hps.train.fp16_run + sr = hps.sample_rate + + model = model_f0 if hps.if_f0 == 1 else model_nof0 + + return model( + filter_length_adjusted, + segment_size_adjusted, + **hps.model, + is_half=is_half, + sr=sr + ) + +def move_model_to_cuda_if_available(model, rank): + if torch.cuda.is_available(): + return model.cuda(rank) + else: + return model + +def create_optimizer(model, hps): + return torch.optim.AdamW( + model.parameters(), + hps.train.learning_rate, + betas=hps.train.betas, + eps=hps.train.eps, + ) + +def create_ddp_model(model, rank): + if torch.cuda.is_available(): + return DDP(model, device_ids=[rank]) + else: + return DDP(model) + +def create_dataset(hps, if_f0=True): + return TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) if if_f0 else TextAudioLoader(hps.data.training_files, hps.data) + +def create_sampler(dataset, batch_size, n_gpus, rank): + return DistributedBucketSampler( + dataset, + batch_size * n_gpus, + # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s + [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s + num_replicas=n_gpus, + rank=rank, + shuffle=True, + ) + +def set_collate_fn(if_f0=True): + return TextAudioCollateMultiNSFsid() if if_f0 else TextAudioCollate() + + +def main(): + n_gpus = torch.cuda.device_count() + + if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True: + n_gpus = 1 + if n_gpus < 1: + # patch to unblock people without gpus. there is probably a better way. + logger.warn("NO GPU DETECTED: falling back to CPU - this may take a while") + n_gpus = 1 + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = str(randint(20000, 55555)) + children = [] + for i in range(n_gpus): + subproc = mp.Process( + target=run, + args=( + i, + n_gpus, + hps, + ), + ) + children.append(subproc) + subproc.start() + + for i in range(n_gpus): + children[i].join() + + +def run(rank, n_gpus, hps): + global global_step + if rank == 0: + logger = utils.get_logger(hps.model_dir) + logger.info(hps) + # utils.check_git_hash(hps.model_dir) + writer = SummaryWriter(log_dir=hps.model_dir) + writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) + + dist.init_process_group( + backend="gloo", init_method="env://", world_size=n_gpus, rank=rank + ) + torch.manual_seed(hps.train.seed) + if torch.cuda.is_available(): + torch.cuda.set_device(rank) + + if hps.if_f0 == 1: + train_dataset = TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) + else: + train_dataset = TextAudioLoader(hps.data.training_files, hps.data) + train_sampler = DistributedBucketSampler( + train_dataset, + hps.train.batch_size * n_gpus, + # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s + [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s + num_replicas=n_gpus, + rank=rank, + shuffle=True, + ) + # It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. + # num_workers=8 -> num_workers=4 + if hps.if_f0 == 1: + collate_fn = TextAudioCollateMultiNSFsid() + else: + collate_fn = TextAudioCollate() + train_loader = DataLoader( + train_dataset, + num_workers=4, + shuffle=False, + pin_memory=True, + collate_fn=collate_fn, + batch_sampler=train_sampler, + persistent_workers=True, + prefetch_factor=8, + ) + if hps.if_f0 == 1: + net_g = RVC_Model_f0( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + **hps.model, + is_half=hps.train.fp16_run, + sr=hps.sample_rate, + ) + else: + net_g = RVC_Model_nof0( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + **hps.model, + is_half=hps.train.fp16_run, + ) + if torch.cuda.is_available(): + net_g = net_g.cuda(rank) + net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm) + if torch.cuda.is_available(): + net_d = net_d.cuda(rank) + optim_g = torch.optim.AdamW( + net_g.parameters(), + hps.train.learning_rate, + betas=hps.train.betas, + eps=hps.train.eps, + ) + optim_d = torch.optim.AdamW( + net_d.parameters(), + hps.train.learning_rate, + betas=hps.train.betas, + eps=hps.train.eps, + ) + # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) + # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + if hasattr(torch, "xpu") and torch.xpu.is_available(): + pass + elif torch.cuda.is_available(): + net_g = DDP(net_g, device_ids=[rank]) + net_d = DDP(net_d, device_ids=[rank]) + else: + net_g = DDP(net_g) + net_d = DDP(net_d) + + try: # 如果能加载自动resume + _, _, _, epoch_str = utils.load_checkpoint( + utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d + ) # D多半加载没事 + if rank == 0: + logger.info("loaded D") + # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) + _, _, _, epoch_str = utils.load_checkpoint( + utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g + ) + global_step = (epoch_str - 1) * len(train_loader) + # epoch_str = 1 + # global_step = 0 + except: # 如果首次不能加载,加载pretrain + os.system('cls' if os.name == 'nt' else 'clear') + epoch_str = 1 + global_step = 0 + if hps.pretrainG != "": + if rank == 0: + logger.info("Loaded pretrained %s" % (hps.pretrainG)) + if hasattr(net_g, "module"): + logger.info( + net_g.module.load_state_dict( + torch.load(hps.pretrainG, map_location="cpu")["model"] + ) + ) ##测试不加载优化器 + else: + logger.info( + net_g.load_state_dict( + torch.load(hps.pretrainG, map_location="cpu")["model"] + ) + ) ##测试不加载优化器 + if hps.pretrainD != "": + if rank == 0: + logger.info("Loaded pretrained %s" % (hps.pretrainD)) + if hasattr(net_d, "module"): + logger.info( + net_d.module.load_state_dict( + torch.load(hps.pretrainD, map_location="cpu")["model"] + ) + ) + else: + logger.info( + net_d.load_state_dict( + torch.load(hps.pretrainD, map_location="cpu")["model"] + ) + ) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR( + optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 + ) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR( + optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 + ) + + scaler = GradScaler(enabled=hps.train.fp16_run) + + cache = [] + for epoch in range(epoch_str, hps.train.epochs + 1): + if rank == 0: + train_and_evaluate( + rank, + epoch, + hps, + [net_g, net_d], + [optim_g, optim_d], + [scheduler_g, scheduler_d], + scaler, + [train_loader, None], + logger, + [writer, writer_eval], + cache, + ) + else: + train_and_evaluate( + rank, + epoch, + hps, + [net_g, net_d], + [optim_g, optim_d], + [scheduler_g, scheduler_d], + scaler, + [train_loader, None], + None, + None, + cache, + ) + scheduler_g.step() + scheduler_d.step() + + +def train_and_evaluate( + rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache +): + net_g, net_d = nets + optim_g, optim_d = optims + train_loader, eval_loader = loaders + if writers is not None: + writer, writer_eval = writers + + train_loader.batch_sampler.set_epoch(epoch) + global global_step + + net_g.train() + net_d.train() + + # Prepare data iterator + if hps.if_cache_data_in_gpu == True: + # Use Cache + data_iterator = cache + if cache == []: + # Make new cache + for batch_idx, info in enumerate(train_loader): + # Unpack + if hps.if_f0 == 1: + ( + phone, + phone_lengths, + pitch, + pitchf, + spec, + spec_lengths, + wave, + wave_lengths, + sid, + ) = info + else: + ( + phone, + phone_lengths, + spec, + spec_lengths, + wave, + wave_lengths, + sid, + ) = info + # Load on CUDA + if torch.cuda.is_available(): + phone = phone.cuda(rank, non_blocking=True) + phone_lengths = phone_lengths.cuda(rank, non_blocking=True) + if hps.if_f0 == 1: + pitch = pitch.cuda(rank, non_blocking=True) + pitchf = pitchf.cuda(rank, non_blocking=True) + sid = sid.cuda(rank, non_blocking=True) + spec = spec.cuda(rank, non_blocking=True) + spec_lengths = spec_lengths.cuda(rank, non_blocking=True) + wave = wave.cuda(rank, non_blocking=True) + wave_lengths = wave_lengths.cuda(rank, non_blocking=True) + # Cache on list + if hps.if_f0 == 1: + cache.append( + ( + batch_idx, + ( + phone, + phone_lengths, + pitch, + pitchf, + spec, + spec_lengths, + wave, + wave_lengths, + sid, + ), + ) + ) + else: + cache.append( + ( + batch_idx, + ( + phone, + phone_lengths, + spec, + spec_lengths, + wave, + wave_lengths, + sid, + ), + ) + ) + else: + # Load shuffled cache + shuffle(cache) + else: + # Loader + data_iterator = enumerate(train_loader) + + # Run steps + epoch_recorder = EpochRecorder() + for batch_idx, info in data_iterator: + # Data + ## Unpack + if hps.if_f0 == 1: + ( + phone, + phone_lengths, + pitch, + pitchf, + spec, + spec_lengths, + wave, + wave_lengths, + sid, + ) = info + else: + phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info + ## Load on CUDA + if (hps.if_cache_data_in_gpu == False) and torch.cuda.is_available(): + phone = phone.cuda(rank, non_blocking=True) + phone_lengths = phone_lengths.cuda(rank, non_blocking=True) + if hps.if_f0 == 1: + pitch = pitch.cuda(rank, non_blocking=True) + pitchf = pitchf.cuda(rank, non_blocking=True) + sid = sid.cuda(rank, non_blocking=True) + spec = spec.cuda(rank, non_blocking=True) + spec_lengths = spec_lengths.cuda(rank, non_blocking=True) + wave = wave.cuda(rank, non_blocking=True) + # wave_lengths = wave_lengths.cuda(rank, non_blocking=True) + + # Calculate + with autocast(enabled=hps.train.fp16_run): + if hps.if_f0 == 1: + ( + y_hat, + ids_slice, + x_mask, + z_mask, + (z, z_p, m_p, logs_p, m_q, logs_q), + ) = net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid) + else: + ( + y_hat, + ids_slice, + x_mask, + z_mask, + (z, z_p, m_p, logs_p, m_q, logs_q), + ) = net_g(phone, phone_lengths, spec, spec_lengths, sid) + mel = spec_to_mel_torch( + spec, + hps.data.filter_length, + hps.data.n_mel_channels, + hps.data.sampling_rate, + hps.data.mel_fmin, + hps.data.mel_fmax, + ) + y_mel = commons.slice_segments( + mel, ids_slice, hps.train.segment_size // hps.data.hop_length + ) + with autocast(enabled=False): + y_hat_mel = mel_spectrogram_torch( + y_hat.float().squeeze(1), + hps.data.filter_length, + hps.data.n_mel_channels, + hps.data.sampling_rate, + hps.data.hop_length, + hps.data.win_length, + hps.data.mel_fmin, + hps.data.mel_fmax, + ) + if hps.train.fp16_run == True: + y_hat_mel = y_hat_mel.float() + wave = commons.slice_segments( + wave, ids_slice * hps.data.hop_length, hps.train.segment_size + ) # slice + + # Discriminator + y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach()) + with autocast(enabled=False): + loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( + y_d_hat_r, y_d_hat_g + ) + optim_d.zero_grad() + scaler.scale(loss_disc).backward() + scaler.unscale_(optim_d) + grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) + scaler.step(optim_d) + + with autocast(enabled=hps.train.fp16_run): + # Generator + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat) + with autocast(enabled=False): + loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel + loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl + loss_fm = feature_loss(fmap_r, fmap_g) + loss_gen, losses_gen = generator_loss(y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl + optim_g.zero_grad() + scaler.scale(loss_gen_all).backward() + scaler.unscale_(optim_g) + grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) + scaler.step(optim_g) + scaler.update() + + if rank == 0: + if global_step % hps.train.log_interval == 0: + lr = optim_g.param_groups[0]["lr"] + logger.info( + "Train Epoch: {} [{:.0f}%]".format( + epoch, 100.0 * batch_idx / len(train_loader) + ) + ) + # Amor For Tensorboard display + if loss_mel > 75: + loss_mel = 75 + if loss_kl > 9: + loss_kl = 9 + + logger.info([global_step, lr]) + logger.info( + f"[loss_disc={loss_disc:.3f}] | [loss_gen={loss_gen:.3f}] | [loss_fm={loss_fm:.3f}] | [loss_mel={loss_mel:.3f}] | [loss_kl={loss_kl:.3f}]" + ) + scalar_dict = { + "loss/g/total": loss_gen_all, + "loss/d/total": loss_disc, + "learning_rate": lr, + "grad_norm_d": grad_norm_d, + "grad_norm_g": grad_norm_g, + } + scalar_dict.update( + { + "loss/g/fm": loss_fm, + "loss/g/mel": loss_mel, + "loss/g/kl": loss_kl, + } + ) + + scalar_dict.update( + {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)} + ) + scalar_dict.update( + {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)} + ) + scalar_dict.update( + {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)} + ) + image_dict = { + "slice/mel_org": utils.plot_spectrogram_to_numpy( + y_mel[0].data.cpu().numpy() + ), + "slice/mel_gen": utils.plot_spectrogram_to_numpy( + y_hat_mel[0].data.cpu().numpy() + ), + "all/mel": utils.plot_spectrogram_to_numpy( + mel[0].data.cpu().numpy() + ), + } + utils.summarize( + writer=writer, + global_step=global_step, + images=image_dict, + scalars=scalar_dict, + ) + global_step += 1 + # /Run steps + + if epoch % hps.save_every_epoch == 0 and rank == 0: + if hps.if_latest == 0: + utils.save_checkpoint( + net_g, + optim_g, + hps.train.learning_rate, + epoch, + os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), + ) + utils.save_checkpoint( + net_d, + optim_d, + hps.train.learning_rate, + epoch, + os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), + ) + else: + utils.save_checkpoint( + net_g, + optim_g, + hps.train.learning_rate, + epoch, + os.path.join(hps.model_dir, "G_{}.pth".format(2333333)), + ) + utils.save_checkpoint( + net_d, + optim_d, + hps.train.learning_rate, + epoch, + os.path.join(hps.model_dir, "D_{}.pth".format(2333333)), + ) + if rank == 0 and hps.save_every_weights == "1": + if hasattr(net_g, "module"): + ckpt = net_g.module.state_dict() + else: + ckpt = net_g.state_dict() + logger.info( + "saving ckpt %s_e%s:%s" + % ( + hps.name, + epoch, + savee( + ckpt, + hps.sample_rate, + hps.if_f0, + hps.name + "_e%s_s%s" % (epoch, global_step), + epoch, + hps.version, + hps, + ), + ) + ) + + stopbtn = False + try: + with open("lib/csvdb/stop.csv", 'r') as csv_file: + stopbtn_str = next(csv.reader(csv_file), [None])[0] + if stopbtn_str is not None: stopbtn = stopbtn_str.lower() == 'true' + except (ValueError, TypeError, FileNotFoundError, IndexError) as e: + print(f"Handling exception: {e}") + stopbtn = False + + if stopbtn: + logger.info("Stop Button was pressed. The program is closed.") + ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict() + logger.info( + "saving final ckpt:%s" + % ( + savee( + ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps + ) + ) + ) + sleep(1) + reset_stop_flag() + os._exit(2333333) + + if rank == 0: + logger.info("Epoch: {} {}".format(epoch, epoch_recorder.record())) + if epoch >= hps.total_epoch and rank == 0: + logger.info("Training successfully completed, closing the program...") + + if hasattr(net_g, "module"): + ckpt = net_g.module.state_dict() + else: + ckpt = net_g.state_dict() + logger.info( + "Saving final ckpt... %s" + % ( + savee( + ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps + ) + ) + ) + sleep(1) + os._exit(2333333) + + +if __name__ == "__main__": + torch.multiprocessing.set_start_method("spawn") + main() diff --git a/lib/infer/modules/uvr5/__pycache__/mdxnet.cpython-39.pyc b/lib/infer/modules/uvr5/__pycache__/mdxnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fbc2c93f242f9f66626c90655463cbc79949a0d Binary files /dev/null and b/lib/infer/modules/uvr5/__pycache__/mdxnet.cpython-39.pyc differ diff --git a/lib/infer/modules/uvr5/__pycache__/mdxprocess.cpython-39.pyc b/lib/infer/modules/uvr5/__pycache__/mdxprocess.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ecf0b674ebd58de5d3c12f0764799f7d480f8e7 Binary files /dev/null and b/lib/infer/modules/uvr5/__pycache__/mdxprocess.cpython-39.pyc differ diff --git a/lib/infer/modules/uvr5/__pycache__/preprocess.cpython-39.pyc b/lib/infer/modules/uvr5/__pycache__/preprocess.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f6733b308b418bf984795057a937ceeabfde70 Binary files /dev/null and b/lib/infer/modules/uvr5/__pycache__/preprocess.cpython-39.pyc differ diff --git a/lib/infer/modules/uvr5/mdxnet.py b/lib/infer/modules/uvr5/mdxnet.py new file mode 100644 index 0000000000000000000000000000000000000000..86a066893ad99cfed77788027a9deb8ed486a7f2 --- /dev/null +++ b/lib/infer/modules/uvr5/mdxnet.py @@ -0,0 +1,246 @@ +import os +import logging + +logger = logging.getLogger(__name__) + +import librosa +import numpy as np +import soundfile as sf +import torch +from tqdm import tqdm + +cpu = torch.device("cpu") + + +class ConvTDFNetTrim: + def __init__( + self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024 + ): + super(ConvTDFNetTrim, self).__init__() + + self.dim_f = dim_f + self.dim_t = 2**dim_t + self.n_fft = n_fft + self.hop = hop + self.n_bins = self.n_fft // 2 + 1 + self.chunk_size = hop * (self.dim_t - 1) + self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to( + device + ) + self.target_name = target_name + self.blender = "blender" in model_name + + self.dim_c = 4 + out_c = self.dim_c * 4 if target_name == "*" else self.dim_c + self.freq_pad = torch.zeros( + [1, out_c, self.n_bins - self.dim_f, self.dim_t] + ).to(device) + + self.n = L // 2 + + def stft(self, x): + x = x.reshape([-1, self.chunk_size]) + x = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop, + window=self.window, + center=True, + return_complex=True, + ) + x = torch.view_as_real(x) + x = x.permute([0, 3, 1, 2]) + x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape( + [-1, self.dim_c, self.n_bins, self.dim_t] + ) + return x[:, :, : self.dim_f] + + def istft(self, x, freq_pad=None): + freq_pad = ( + self.freq_pad.repeat([x.shape[0], 1, 1, 1]) + if freq_pad is None + else freq_pad + ) + x = torch.cat([x, freq_pad], -2) + c = 4 * 2 if self.target_name == "*" else 2 + x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape( + [-1, 2, self.n_bins, self.dim_t] + ) + x = x.permute([0, 2, 3, 1]) + x = x.contiguous() + x = torch.view_as_complex(x) + x = torch.istft( + x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True + ) + return x.reshape([-1, c, self.chunk_size]) + + +def get_models(device, dim_f, dim_t, n_fft): + return ConvTDFNetTrim( + device=device, + model_name="Conv-TDF", + target_name="vocals", + L=11, + dim_f=dim_f, + dim_t=dim_t, + n_fft=n_fft, + ) + + +class Predictor: + def __init__(self, args): + import onnxruntime as ort + + logger.info(ort.get_available_providers()) + self.args = args + self.model_ = get_models( + device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft + ) + self.model = ort.InferenceSession( + os.path.join(args.onnx, self.model_.target_name + ".onnx"), + providers=[ + "CUDAExecutionProvider", + "DmlExecutionProvider", + "CPUExecutionProvider", + ], + ) + logger.info("ONNX load done") + + def demix(self, mix): + samples = mix.shape[-1] + margin = self.args.margin + chunk_size = self.args.chunks * 44100 + assert not margin == 0, "margin cannot be zero!" + if margin > chunk_size: + margin = chunk_size + + segmented_mix = {} + + if self.args.chunks == 0 or samples < chunk_size: + chunk_size = samples + + counter = -1 + for skip in range(0, samples, chunk_size): + counter += 1 + + s_margin = 0 if counter == 0 else margin + end = min(skip + chunk_size + margin, samples) + + start = skip - s_margin + + segmented_mix[skip] = mix[:, start:end].copy() + if end == samples: + break + + sources = self.demix_base(segmented_mix, margin_size=margin) + """ + mix:(2,big_sample) + segmented_mix:offset->(2,small_sample) + sources:(1,2,big_sample) + """ + return sources + + def demix_base(self, mixes, margin_size): + chunked_sources = [] + progress_bar = tqdm(total=len(mixes)) + progress_bar.set_description("Processing") + for mix in mixes: + cmix = mixes[mix] + sources = [] + n_sample = cmix.shape[1] + model = self.model_ + trim = model.n_fft // 2 + gen_size = model.chunk_size - 2 * trim + pad = gen_size - n_sample % gen_size + mix_p = np.concatenate( + (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1 + ) + mix_waves = [] + i = 0 + while i < n_sample + pad: + waves = np.array(mix_p[:, i : i + model.chunk_size]) + mix_waves.append(waves) + i += gen_size + mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) + with torch.no_grad(): + _ort = self.model + spek = model.stft(mix_waves) + if self.args.denoise: + spec_pred = ( + -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 + + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 + ) + tar_waves = model.istft(torch.tensor(spec_pred)) + else: + tar_waves = model.istft( + torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0]) + ) + tar_signal = ( + tar_waves[:, :, trim:-trim] + .transpose(0, 1) + .reshape(2, -1) + .numpy()[:, :-pad] + ) + + start = 0 if mix == 0 else margin_size + end = None if mix == list(mixes.keys())[::-1][0] else -margin_size + if margin_size == 0: + end = None + sources.append(tar_signal[:, start:end]) + + progress_bar.update(1) + + chunked_sources.append(sources) + _sources = np.concatenate(chunked_sources, axis=-1) + # del self.model + progress_bar.close() + return _sources + + def prediction(self, m, vocal_root, others_root, format): + os.makedirs(vocal_root, exist_ok=True) + os.makedirs(others_root, exist_ok=True) + basename = os.path.basename(m) + mix, rate = librosa.load(m, mono=False, sr=44100) + if mix.ndim == 1: + mix = np.asfortranarray([mix, mix]) + mix = mix.T + sources = self.demix(mix.T) + opt = sources[0].T + if format in ["wav", "flac"]: + sf.write( + "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate + ) + sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) + else: + path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) + path_other = "%s/%s_others.wav" % (others_root, basename) + sf.write(path_vocal, mix - opt, rate) + sf.write(path_other, opt, rate) + if os.path.exists(path_vocal): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path_vocal, path_vocal[:-4] + ".%s" % format) + ) + if os.path.exists(path_other): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path_other, path_other[:-4] + ".%s" % format) + ) + + +class MDXNetDereverb: + def __init__(self, chunks, device): + self.onnx = "assets/uvr5_weights/onnx_dereverb_By_FoxJoy" + self.shifts = 10 # 'Predict with randomised equivariant stabilisation' + self.mixing = "min_mag" # ['default','min_mag','max_mag'] + self.chunks = chunks + self.margin = 44100 + self.dim_t = 9 + self.dim_f = 3072 + self.n_fft = 6144 + self.denoise = True + self.pred = Predictor(self) + self.device = device + + def path_audio(self, input, vocal_root, others_root, format): + self.pred.prediction(input, vocal_root, others_root, format) diff --git a/lib/infer/modules/uvr5/mdxprocess.py b/lib/infer/modules/uvr5/mdxprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..d2012ee1d27c862fe1884ae30d24138563a97664 --- /dev/null +++ b/lib/infer/modules/uvr5/mdxprocess.py @@ -0,0 +1,188 @@ +import gc +import requests +import subprocess +import sys +import os, warnings, librosa +import soundfile as sf +import numpy as np +import torch +import json + +folder = os.path.dirname(os.path.abspath(__file__)) +folder = os.path.dirname(folder) +folder = os.path.dirname(folder) +folder = os.path.dirname(folder) +now_dir = os.path.dirname(folder) + +import sys +sys.path.append(now_dir) + +import lib.infer.infer_libs.uvr5_pack.mdx as mdx +branch = "https://github.com/NaJeongMo/Colab-for-MDX_B" + +model_params = "https://raw.githubusercontent.com/TRvlvr/application_data/main/mdx_model_data/model_data.json" +_Models = "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/" +# _models = "https://pastebin.com/raw/jBzYB8vz" +_models = "https://raw.githubusercontent.com/TRvlvr/application_data/main/filelists/download_checks.json" + + +file_folder = "Colab-for-MDX_B" +model_request = requests.get(_models).json() +model_ids = model_request["mdx_download_list"].values() +demucs_download_list = model_request["demucs_download_list"] + +# Iterate through the keys and get the model names +model_ids_demucs_inpure = [name.split(":")[1].strip() for name in demucs_download_list.keys()] + +# Remove duplicates by converting the list to a set and then back to a list +model_ids_demucs = list(set(model_ids_demucs_inpure)) + +# Remove some not working models +demucs_ids_to_delete = ["tasnet_extra", "tasnet", "light_extra", "light", "demucs_extra", "demucs", "demucs_unittest", "demucs48_hq", "repro_mdx_a_hybrid_only", "repro_mdx_a_time_only", "repro_mdx_a", "UVR Model"] + +# Add some models that are not in the list +demucs_ids_to_add = ["SIG"] + +# Add the new ID to the model_ids_demucs list + +for demucs_ids_to_add in demucs_ids_to_add: + if demucs_ids_to_add not in model_ids_demucs: + model_ids_demucs.append(demucs_ids_to_add) + +# If the ID is in the list of IDs to delete, remove it from the list of model_ids_demucs +for demucs_ids_to_delete in demucs_ids_to_delete: + if demucs_ids_to_delete in model_ids_demucs: + model_ids_demucs.remove(demucs_ids_to_delete) + +#print(model_ids) +model_params = requests.get(model_params).json() +#Remove request for stem_naming +stem_naming = { + "Vocals": "Instrumental", + "Other": "Instruments", + "Instrumental": "Vocals", + "Drums": "Drumless", + "Bass": "Bassless" +} + + +os.makedirs(f"{now_dir}/assets/uvr5_weights/MDX", exist_ok=True) + +warnings.filterwarnings("ignore") +cpu = torch.device("cpu") +if torch.cuda.is_available(): + device = torch.device("cuda:0") +elif torch.backends.mps.is_available(): + device = torch.device("mps") +else: + device = torch.device("cpu") + + +def get_model_list(): + return model_ids + +def get_demucs_model_list(): + return model_ids_demucs + +def id_to_ptm(mkey): + if mkey in model_ids: + #print(mkey) + mpath = f"{now_dir}/assets/uvr5_weights/MDX/{mkey}" + if not os.path.exists(f'{now_dir}/assets/uvr5_weights/MDX/{mkey}'): + print('Downloading model...',end=' ') + subprocess.run( + ["python", "-m", "wget", "-o", mpath, _Models+mkey] + ) + print(f'saved to {mpath}') + return mpath + else: + return mpath + else: + mpath = f'{now_dir}/assets/uvr5_weights/{mkey}' + return mpath + +def prepare_mdx(onnx,custom_param=False, dim_f=None, dim_t=None, n_fft=None, stem_name=None, compensation=None): + device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') + if custom_param: + assert not (dim_f is None or dim_t is None or n_fft is None or compensation is None), 'Custom parameter selected, but incomplete parameters are provided.' + mdx_model = mdx.MDX_Model( + device, + dim_f = dim_f, + dim_t = dim_t, + n_fft = n_fft, + stem_name=stem_name, + compensation=compensation + ) + else: + model_hash = mdx.MDX.get_hash(onnx) + if model_hash in model_params: + mp = model_params.get(model_hash) + mdx_model = mdx.MDX_Model( + device, + dim_f = mp["mdx_dim_f_set"], + dim_t = 2**mp["mdx_dim_t_set"], + n_fft = mp["mdx_n_fft_scale_set"], + stem_name=mp["primary_stem"], + compensation=compensation if not custom_param and compensation is not None else mp["compensate"] + ) + return mdx_model + +def run_mdx(onnx, mdx_model,filename, output_format='wav',diff=False,suffix=None,diff_suffix=None, denoise=False, m_threads=2): + mdx_sess = mdx.MDX(onnx,mdx_model) + print(f"Processing: {filename}") + if filename.lower().endswith('.wav'): + wave, sr = librosa.load(filename, mono=False, sr=44100) + else: + temp_wav = 'temp_audio.wav' + subprocess.run(['ffmpeg', '-i', filename, '-ar', '44100', '-ac', '2', temp_wav]) # Convert to WAV format + wave, sr = librosa.load(temp_wav, mono=False, sr=44100) + os.remove(temp_wav) + + #wave, sr = librosa.load(filename,mono=False, sr=44100) + # normalizing input wave gives better output + peak = max(np.max(wave), abs(np.min(wave))) + wave /= peak + if denoise: + wave_processed = -(mdx_sess.process_wave(-wave, m_threads)) + (mdx_sess.process_wave(wave, m_threads)) + wave_processed *= 0.5 + else: + wave_processed = mdx_sess.process_wave(wave, m_threads) + # return to previous peak + wave_processed *= peak + + stem_name = mdx_model.stem_name if suffix is None else suffix # use suffix if provided + save_path = os.path.basename(os.path.splitext(filename)[0]) + #vocals_save_path = os.path.join(vocals_folder, f"{save_path}_{stem_name}.{output_format}") + #instrumental_save_path = os.path.join(instrumental_folder, f"{save_path}_{stem_name}.{output_format}") + save_path = f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.{output_format}" + save_path = os.path.join( + 'audios', + save_path + ) + sf.write( + save_path, + wave_processed.T, + sr + ) + + print(f'done, saved to: {save_path}') + + if diff: + diff_stem_name = stem_naming.get(stem_name) if diff_suffix is None else diff_suffix # use suffix if provided + stem_name = f"{stem_name}_diff" if diff_stem_name is None else diff_stem_name + save_path = f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.{output_format}" + save_path = os.path.join( + 'audio-others', + save_path + ) + sf.write( + save_path, + (-wave_processed.T*mdx_model.compensation)+wave.T, + sr + ) + print(f'invert done, saved to: {save_path}') + del mdx_sess, wave_processed, wave + gc.collect() + +if __name__ == "__main__": + print() diff --git a/lib/infer/modules/uvr5/modules.py b/lib/infer/modules/uvr5/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..a2d4404a145bc915e6d96ee27ac0fcc408b5e90c --- /dev/null +++ b/lib/infer/modules/uvr5/modules.py @@ -0,0 +1,107 @@ +import os +import traceback +import logging + +logger = logging.getLogger(__name__) + +import ffmpeg +import torch + +from assets.configs.config import Config +from lib.infer.modules.uvr5.mdxnet import MDXNetDereverb +from lib.infer.modules.uvr5.preprocess import AudioPre, AudioPreDeEcho + +config = Config() + + +def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): + infos = [] + try: + inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + save_root_vocal = ( + save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) + save_root_ins = ( + save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) + if model_name == "onnx_dereverb_By_FoxJoy": + pre_fun = MDXNetDereverb(15, config.device) + else: + func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho + pre_fun = func( + agg=int(agg), + model_path=os.path.join( + os.getenv("weight_uvr5_root"), model_name + ".pth" + ), + device=config.device, + is_half=config.is_half, + ) + if inp_root != "": + paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] + else: + paths = [path.name for path in paths] + for path in paths: + inp_path = os.path.join(inp_root, path) + need_reformat = 1 + done = 0 + try: + info = ffmpeg.probe(inp_path, cmd="ffprobe") + if ( + info["streams"][0]["channels"] == 2 + and info["streams"][0]["sample_rate"] == "44100" + ): + need_reformat = 0 + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0 + ) + done = 1 + except: + need_reformat = 1 + traceback.print_exc() + if need_reformat == 1: + tmp_path = "%s/%s.reformatted.wav" % ( + os.path.join(os.environ["temp"]), + os.path.basename(inp_path), + ) + os.system( + "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" + % (inp_path, tmp_path) + ) + inp_path = tmp_path + try: + if done == 0: + pre_fun.path_audio( + inp_path, save_root_ins, save_root_vocal, format0 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + try: + if done == 0: + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + infos.append( + "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) + ) + yield "\n".join(infos) + except: + infos.append(traceback.format_exc()) + yield "\n".join(infos) + finally: + try: + if model_name == "onnx_dereverb_By_FoxJoy": + del pre_fun.pred.model + del pre_fun.pred.model_ + else: + del pre_fun.model + del pre_fun + except: + traceback.print_exc() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + logger.info("Executed torch.cuda.empty_cache()") + yield "\n".join(infos) diff --git a/lib/infer/modules/uvr5/preprocess.py b/lib/infer/modules/uvr5/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..784f46e0bf28f536f381356c117904dda9934e6f --- /dev/null +++ b/lib/infer/modules/uvr5/preprocess.py @@ -0,0 +1,346 @@ +import os +import logging + +logger = logging.getLogger(__name__) + +import librosa +import numpy as np +import soundfile as sf +import torch + +from lib.infer.infer_libs.uvr5_pack.lib_v5 import nets_61968KB as Nets +from lib.infer.infer_libs.uvr5_pack.lib_v5 import spec_utils +from lib.infer.infer_libs.uvr5_pack.lib_v5.model_param_init import ModelParameters +from lib.infer.infer_libs.uvr5_pack.lib_v5.nets_new import CascadedNet +from lib.infer.infer_libs.uvr5_pack.utils import inference + + +class AudioPre: + def __init__(self, agg, model_path, device, is_half): + self.model_path = model_path + self.device = device + self.data = { + # Processing Options + "postprocess": False, + "tta": False, + # Constants + "window_size": 512, + "agg": agg, + "high_end_process": "mirroring", + } + mp = ModelParameters("lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v2.json") + model = Nets.CascadedASPPNet(mp.param["bins"] * 2) + cpk = torch.load(model_path, map_location="cpu") + model.load_state_dict(cpk) + model.eval() + if is_half: + model = model.half().to(device) + else: + model = model.to(device) + + self.mp = mp + self.model = model + + def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"): + if ins_root is None and vocal_root is None: + return "No save root." + name = os.path.basename(music_file) + if ins_root is not None: + os.makedirs(ins_root, exist_ok=True) + if vocal_root is not None: + os.makedirs(vocal_root, exist_ok=True) + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + bands_n = len(self.mp.param["band"]) + # print(bands_n) + for d in range(bands_n, 0, -1): + bp = self.mp.param["band"][d] + if d == bands_n: # high-end band + ( + X_wave[d], + _, + ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 + music_file, + bp["sr"], + False, + dtype=np.float32, + res_type=bp["res_type"], + ) + if X_wave[d].ndim == 1: + X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.core.resample( + X_wave[d + 1], + self.mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + # Stft of wave source + X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( + X_wave[d], + bp["hl"], + bp["n_fft"], + self.mp.param["mid_side"], + self.mp.param["mid_side_b2"], + self.mp.param["reverse"], + ) + # pdb.set_trace() + if d == bands_n and self.data["high_end_process"] != "none": + input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( + self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] + ) + input_high_end = X_spec_s[d][ + :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : + ] + + X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) + aggresive_set = float(self.data["agg"] / 100) + aggressiveness = { + "value": aggresive_set, + "split_bin": self.mp.param["band"][1]["crop_stop"], + } + with torch.no_grad(): + pred, X_mag, X_phase = inference( + X_spec_m, self.device, self.model, aggressiveness, self.data + ) + # Postprocess + if self.data["postprocess"]: + pred_inv = np.clip(X_mag - pred, 0, np.inf) + pred = spec_utils.mask_silence(pred, pred_inv) + y_spec_m = pred * X_phase + v_spec_m = X_spec_m - y_spec_m + + if ins_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], y_spec_m, input_high_end, self.mp + ) + wav_instrument = spec_utils.cmb_spectrogram_to_wave( + y_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) + logger.info("%s instruments done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + ins_root, + "instrument_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) # + else: + path = os.path.join( + ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + if vocal_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], v_spec_m, input_high_end, self.mp + ) + wav_vocals = spec_utils.cmb_spectrogram_to_wave( + v_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) + logger.info("%s vocals done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + vocal_root, + "vocal_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + else: + path = os.path.join( + vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + + +class AudioPreDeEcho: + def __init__(self, agg, model_path, device, is_half): + self.model_path = model_path + self.device = device + self.data = { + # Processing Options + "postprocess": False, + "tta": False, + # Constants + "window_size": 512, + "agg": agg, + "high_end_process": "mirroring", + } + mp = ModelParameters("lib/infer/infer_libs/uvr5_pack/lib_v5/modelparams/4band_v3.json") + nout = 64 if "DeReverb" in model_path else 48 + model = CascadedNet(mp.param["bins"] * 2, nout) + cpk = torch.load(model_path, map_location="cpu") + model.load_state_dict(cpk) + model.eval() + if is_half: + model = model.half().to(device) + else: + model = model.to(device) + + self.mp = mp + self.model = model + + def _path_audio_( + self, music_file, vocal_root=None, ins_root=None, format="flac" + ): # 3个VR模型vocal和ins是反的 + if ins_root is None and vocal_root is None: + return "No save root." + name = os.path.basename(music_file) + if ins_root is not None: + os.makedirs(ins_root, exist_ok=True) + if vocal_root is not None: + os.makedirs(vocal_root, exist_ok=True) + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + bands_n = len(self.mp.param["band"]) + # print(bands_n) + for d in range(bands_n, 0, -1): + bp = self.mp.param["band"][d] + if d == bands_n: # high-end band + ( + X_wave[d], + _, + ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 + music_file, + bp["sr"], + False, + dtype=np.float32, + res_type=bp["res_type"], + ) + if X_wave[d].ndim == 1: + X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.core.resample( + X_wave[d + 1], + self.mp.param["band"][d + 1]["sr"], + bp["sr"], + res_type=bp["res_type"], + ) + # Stft of wave source + X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( + X_wave[d], + bp["hl"], + bp["n_fft"], + self.mp.param["mid_side"], + self.mp.param["mid_side_b2"], + self.mp.param["reverse"], + ) + # pdb.set_trace() + if d == bands_n and self.data["high_end_process"] != "none": + input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( + self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] + ) + input_high_end = X_spec_s[d][ + :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : + ] + + X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) + aggresive_set = float(self.data["agg"] / 100) + aggressiveness = { + "value": aggresive_set, + "split_bin": self.mp.param["band"][1]["crop_stop"], + } + with torch.no_grad(): + pred, X_mag, X_phase = inference( + X_spec_m, self.device, self.model, aggressiveness, self.data + ) + # Postprocess + if self.data["postprocess"]: + pred_inv = np.clip(X_mag - pred, 0, np.inf) + pred = spec_utils.mask_silence(pred, pred_inv) + y_spec_m = pred * X_phase + v_spec_m = X_spec_m - y_spec_m + + if ins_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], y_spec_m, input_high_end, self.mp + ) + wav_instrument = spec_utils.cmb_spectrogram_to_wave( + y_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) + logger.info("%s instruments done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + ins_root, + "instrument_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) # + else: + path = os.path.join( + ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) + if vocal_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring( + self.data["high_end_process"], v_spec_m, input_high_end, self.mp + ) + wav_vocals = spec_utils.cmb_spectrogram_to_wave( + v_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) + logger.info("%s vocals done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + vocal_root, + "vocal_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + else: + path = os.path.join( + vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) + ) + sf.write( + path, + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + os.system( + "ffmpeg -i %s -vn %s -q:a 2 -y" + % (path, path[:-4] + ".%s" % format) + ) diff --git a/lib/infer/modules/vc/__init__.py b/lib/infer/modules/vc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/infer/modules/vc/__pycache__/__init__.cpython-39.pyc b/lib/infer/modules/vc/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b44806cd2b0f20f3945bc94986345a2549b35bd Binary files /dev/null and b/lib/infer/modules/vc/__pycache__/__init__.cpython-39.pyc differ diff --git a/lib/infer/modules/vc/__pycache__/modules.cpython-39.pyc b/lib/infer/modules/vc/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..306c0ca3a3e71d74a8059d660031320108110ab2 Binary files /dev/null and b/lib/infer/modules/vc/__pycache__/modules.cpython-39.pyc differ diff --git a/lib/infer/modules/vc/__pycache__/pipeline.cpython-39.pyc b/lib/infer/modules/vc/__pycache__/pipeline.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59b959b02214a23268c55b2f57ff5036dc981c73 Binary files /dev/null and b/lib/infer/modules/vc/__pycache__/pipeline.cpython-39.pyc differ diff --git a/lib/infer/modules/vc/__pycache__/utils.cpython-39.pyc b/lib/infer/modules/vc/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06a1f60d9c993b8d381d9ec911e90bbcf2256ab5 Binary files /dev/null and b/lib/infer/modules/vc/__pycache__/utils.cpython-39.pyc differ diff --git a/lib/infer/modules/vc/modules.py b/lib/infer/modules/vc/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..9338160b00595fa24e2991e06a65d48a2d92e7c4 --- /dev/null +++ b/lib/infer/modules/vc/modules.py @@ -0,0 +1,699 @@ +import os, sys +import traceback +import logging +now_dir = os.getcwd() +sys.path.append(now_dir) +logger = logging.getLogger(__name__) +import lib.globals.globals as rvc_globals +import numpy as np +import soundfile as sf +import torch +from io import BytesIO +from lib.infer.infer_libs.audio import load_audio +from lib.infer.infer_libs.audio import wav2 +from lib.infer.infer_libs.infer_pack.models import ( + SynthesizerTrnMs256NSFsid, + SynthesizerTrnMs256NSFsid_nono, + SynthesizerTrnMs768NSFsid, + SynthesizerTrnMs768NSFsid_nono, +) +from lib.infer.modules.vc.pipeline import Pipeline +from lib.infer.modules.vc.utils import * +import tabs.merge as merge +import time +import scipy.io.wavfile as wavfile +import glob +from shutil import move +sup_audioext = { + "wav", + "mp3", + "flac", + "ogg", + "opus", + "m4a", + "mp4", + "aac", + "alac", + "wma", + "aiff", + "webm", + "ac3", +} +def note_to_hz(note_name): + SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} + pitch_class, octave = note_name[:-1], int(note_name[-1]) + semitone = SEMITONES[pitch_class] + note_number = 12 * (octave - 4) + semitone + frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number + return frequency + +class VC: + def __init__(self, config): + self.n_spk = None + self.tgt_sr = None + self.net_g = None + self.pipeline = None + self.cpt = None + self.version = None + self.if_f0 = None + self.version = None + self.hubert_model = None + + self.config = config + + def get_vc(self, sid, *to_return_protect): + logger.info("Get sid: " + sid) + + to_return_protect0 = { + "visible": self.if_f0 != 0, + "value": to_return_protect[0] + if self.if_f0 != 0 and to_return_protect + else 0.5, + "__type__": "update", + } + to_return_protect1 = { + "visible": self.if_f0 != 0, + "value": to_return_protect[1] + if self.if_f0 != 0 and to_return_protect + else 0.33, + "__type__": "update", + } + + if sid == "" or sid == []: + if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 + logger.info("Clean model cache") + del ( + self.net_g, + self.n_spk, + self.vc, + self.hubert_model, + self.tgt_sr, + ) # ,cpt + self.hubert_model = ( + self.net_g + ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None + if torch.cuda.is_available(): + torch.cuda.empty_cache() + ###楼下不这么折腾清理不干净 + self.if_f0 = self.cpt.get("f0", 1) + self.version = self.cpt.get("version", "v1") + if self.version == "v1": + if self.if_f0 == 1: + self.net_g = SynthesizerTrnMs256NSFsid( + *self.cpt["config"], is_half=self.config.is_half + ) + else: + self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"]) + elif self.version == "v2": + if self.if_f0 == 1: + self.net_g = SynthesizerTrnMs768NSFsid( + *self.cpt["config"], is_half=self.config.is_half + ) + else: + self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) + del self.net_g, self.cpt + if torch.cuda.is_available(): + torch.cuda.empty_cache() + return ( + {"visible": False, "__type__": "update"}, + { + "visible": True, + "value": to_return_protect0, + "__type__": "update", + }, + { + "visible": True, + "value": to_return_protect1, + "__type__": "update", + }, + "", + "", + ) + #person = f'{os.getenv("weight_root")}/{sid}' + person = f'{sid}' + #logger.info(f"Loading: {person}") + logger.info(f"Loading...") + self.cpt = torch.load(person, map_location="cpu") + self.tgt_sr = self.cpt["config"][-1] + self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk + self.if_f0 = self.cpt.get("f0", 1) + self.version = self.cpt.get("version", "v1") + + synthesizer_class = { + ("v1", 1): SynthesizerTrnMs256NSFsid, + ("v1", 0): SynthesizerTrnMs256NSFsid_nono, + ("v2", 1): SynthesizerTrnMs768NSFsid, + ("v2", 0): SynthesizerTrnMs768NSFsid_nono, + } + + self.net_g = synthesizer_class.get( + (self.version, self.if_f0), SynthesizerTrnMs256NSFsid + )(*self.cpt["config"], is_half=self.config.is_half) + + del self.net_g.enc_q + + self.net_g.load_state_dict(self.cpt["weight"], strict=False) + self.net_g.eval().to(self.config.device) + if self.config.is_half: + self.net_g = self.net_g.half() + else: + self.net_g = self.net_g.float() + + self.pipeline = Pipeline(self.tgt_sr, self.config) + n_spk = self.cpt["config"][-3] + index = {"value": get_index_path_from_model(sid), "__type__": "update"} + logger.info("Select index: " + index["value"]) + + return ( + ( + {"visible": False, "maximum": n_spk, "__type__": "update"}, + to_return_protect0, + to_return_protect1 + ) + if to_return_protect + else {"visible": False, "maximum": n_spk, "__type__": "update"} + ) + + + def vc_single( + self, + sid, + input_audio_path1, + f0_up_key, + f0_file, + f0_method, + file_index, + file_index2, + index_rate, + filter_radius, + resample_sr, + rms_mix_rate, + protect, + format1, + split_audio, + crepe_hop_length, + f0_min, + note_min, + f0_max, + note_max, + f0_autotune, + ): + global total_time + total_time = 0 + start_time = time.time() + if not input_audio_path1: + return "You need to upload an audio", None + + if (not os.path.exists(input_audio_path1)) and (not os.path.exists(os.path.join(now_dir, input_audio_path1))): + return "Audio was not properly selected or doesn't exist", None + if split_audio: + resultm, new_dir_path = merge.process_audio(input_audio_path1) + print(resultm) + print("------") + print(new_dir_path) + if resultm == "Finish": + + file_index = ( + ( + file_index.strip(" ") + .strip('"') + .strip("\n") + .strip('"') + .strip(" ") + .replace("trained", "added") + ) + if file_index != "" + else file_index2 + ) # 防止小白写错,自动帮他替换掉 + + # Use the code from vc_multi to process the segmented audio + if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': + f0_min = note_to_hz(note_min) if note_min else 50 + f0_max = note_to_hz(note_max) if note_max else 1100 + print(f"Converted Min pitch: freq - {f0_min}\n" + f"Converted Max pitch: freq - {f0_max}") + else: + f0_min = f0_min or 50 + f0_max = f0_max or 1100 + + try: + dir_path = ( + new_dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) # Prevent leading/trailing whitespace and quotes + try: + if dir_path != "": + paths = [ + os.path.join(root, name) + for root, _, files in os.walk(dir_path, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == dir_path + ] + except: + traceback.print_exc() + print(paths) + for path in paths: + info, opt = self.vc_single_dont_save( + sid, + path, + f0_up_key, + None, + f0_method, + file_index, + file_index2, + # file_big_npy, + index_rate, + filter_radius, + resample_sr, + rms_mix_rate, + protect, + crepe_hop_length, + f0_min, + note_min, + f0_max, + note_max, + f0_autotune, + ) + if "Success" in info: + try: + tgt_sr, audio_opt = opt + output_filename = os.path.splitext(os.path.basename(path))[0] + if format1 in ["wav", "flac"]: + sf.write( + "%s/%s.%s" + % (new_dir_path, output_filename, format1), + audio_opt, + tgt_sr, + ) + else: + path = "%s/%s.%s" % (new_dir_path, output_filename, format1) + with BytesIO() as wavf: + sf.write( + wavf, + audio_opt, + tgt_sr, + format="wav" + ) + wavf.seek(0, 0) + with open(path, "wb") as outf: + wav2(wavf, outf, format1) + except: + print(traceback.format_exc()) + except: + print(traceback.format_exc()) + + time.sleep(0.5) + print("Finished processing segmented audio, now merging audio...") + + # Une el audio segmentado + merge_timestamps_file = os.path.join(os.path.dirname(new_dir_path), f"{os.path.basename(input_audio_path1).split('.')[0]}_timestamps.txt") + merge.merge_audio(merge_timestamps_file) + + # Calculate the elapsed time + end_time = time.time() + total_time = end_time - start_time + + merged_audio_path = os.path.join(os.path.dirname(new_dir_path), "audio-outputs", f"{os.path.basename(input_audio_path1).split('.')[0]}_merged.wav") + index_info = ( + "Index:\n%s." % file_index + if os.path.exists(file_index) + else "Index not used." + ) + + return ( + "Success.\n%s\nTime:\infer: %s." + % (index_info, total_time), + merged_audio_path, + ) + + print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") + f0_up_key = int(f0_up_key) + if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': + f0_min = note_to_hz(note_min) if note_min else 50 + f0_max = note_to_hz(note_max) if note_max else 1100 + print(f"Converted Min pitch: freq - {f0_min}\n" + f"Converted Max pitch: freq - {f0_max}") + else: + f0_min = f0_min or 50 + f0_max = f0_max or 1100 + try: + print(f"Attempting to load {input_audio_path1}....") + audio = load_audio(file=input_audio_path1, + sr=16000, + DoFormant=rvc_globals.DoFormant, + Quefrency=rvc_globals.Quefrency, + Timbre=rvc_globals.Timbre) + + audio_max = np.abs(audio).max() / 0.95 + if audio_max > 1: + audio /= audio_max + times = [0, 0, 0] + + if self.hubert_model is None: + self.hubert_model = load_hubert(self.config) + + try: + self.if_f0 = self.cpt.get("f0", 1) + except NameError: + message = "Model was not properly selected" + print(message) + return message, None + + file_index = ( + ( + file_index.strip(" ") + .strip('"') + .strip("\n") + .strip('"') + .strip(" ") + .replace("trained", "added") + ) + if file_index != "" + else file_index2 + ) # 防止小白写错,自动帮他替换掉 + + try: + audio_opt = self.pipeline.pipeline( + self.hubert_model, + self.net_g, + sid, + audio, + input_audio_path1, + times, + f0_up_key, + f0_method, + file_index, + index_rate, + self.if_f0, + filter_radius, + self.tgt_sr, + resample_sr, + rms_mix_rate, + self.version, + protect, + crepe_hop_length, + f0_autotune, + f0_file=f0_file, + f0_min=f0_min, + f0_max=f0_max + ) + except AssertionError: + message = "Mismatching index version detected (v1 with v2, or v2 with v1)." + print(message) + return message, None + except NameError: + message = "RVC libraries are still loading. Please try again in a few seconds." + print(message) + return message, None + + if self.tgt_sr != resample_sr >= 16000: + tgt_sr = resample_sr + else: + tgt_sr = self.tgt_sr + index_info = ( + "Index:\n%s." % file_index + if os.path.exists(file_index) + else "Index not used." + ) + end_time = time.time() + total_time = end_time - start_time + opt_root = "assets/audios/audio-outputs" + os.makedirs(opt_root, exist_ok=True) + output_count = 1 + + while True: + opt_filename = f"generated_audio_{output_count}.{format1}" + current_output_path = os.path.join(opt_root, opt_filename) + if not os.path.exists(current_output_path): + break + output_count += 1 + try: + if format1 in ["wav", "flac"]: + sf.write( + current_output_path, + audio_opt, + self.tgt_sr, + ) + print(f"💾 Generated audio saved to: {current_output_path}") + else: + with BytesIO() as wavf: + sf.write( + wavf, + audio_opt, + self.tgt_sr, + format="wav" + ) + wavf.seek(0, 0) + with open(current_output_path, "wb") as outf: + wav2(wavf, outf, format1) + print(f"💾 Generated audio saved to: {current_output_path}") + except: + info = traceback.format_exc() + return ( + "Success.\n%s\nTime:\nnpy: %.2fs, f0: %.2fs, infer: %.2fs." + % (index_info, *times), + (tgt_sr, audio_opt), + ) + except: + info = traceback.format_exc() + logger.warn(info) + return info, (None, None) + + def vc_single_dont_save( + self, + sid, + input_audio_path1, + f0_up_key, + f0_file, + f0_method, + file_index, + file_index2, + index_rate, + filter_radius, + resample_sr, + rms_mix_rate, + protect, + crepe_hop_length, + f0_min, + note_min, + f0_max, + note_max, + f0_autotune, + ): + global total_time + total_time = 0 + start_time = time.time() + if not input_audio_path1: + return "You need to upload an audio", None + + if (not os.path.exists(input_audio_path1)) and (not os.path.exists(os.path.join(now_dir, input_audio_path1))): + return "Audio was not properly selected or doesn't exist", None + + print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") + f0_up_key = int(f0_up_key) + if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': + f0_min = note_to_hz(note_min) if note_min else 50 + f0_max = note_to_hz(note_max) if note_max else 1100 + print(f"Converted Min pitch: freq - {f0_min}\n" + f"Converted Max pitch: freq - {f0_max}") + else: + f0_min = f0_min or 50 + f0_max = f0_max or 1100 + try: + print(f"Attempting to load {input_audio_path1}....") + audio = load_audio(file=input_audio_path1, + sr=16000, + DoFormant=rvc_globals.DoFormant, + Quefrency=rvc_globals.Quefrency, + Timbre=rvc_globals.Timbre) + + audio_max = np.abs(audio).max() / 0.95 + if audio_max > 1: + audio /= audio_max + times = [0, 0, 0] + + if self.hubert_model is None: + self.hubert_model = load_hubert(self.config) + + try: + self.if_f0 = self.cpt.get("f0", 1) + except NameError: + message = "Model was not properly selected" + print(message) + return message, None + + file_index = ( + ( + file_index.strip(" ") + .strip('"') + .strip("\n") + .strip('"') + .strip(" ") + .replace("trained", "added") + ) + if file_index != "" + else file_index2 + ) # 防止小白写错,自动帮他替换掉 + + try: + audio_opt = self.pipeline.pipeline( + self.hubert_model, + self.net_g, + sid, + audio, + input_audio_path1, + times, + f0_up_key, + f0_method, + file_index, + index_rate, + self.if_f0, + filter_radius, + self.tgt_sr, + resample_sr, + rms_mix_rate, + self.version, + protect, + crepe_hop_length, + f0_autotune, + f0_file=f0_file, + f0_min=f0_min, + f0_max=f0_max + ) + except AssertionError: + message = "Mismatching index version detected (v1 with v2, or v2 with v1)." + print(message) + return message, None + except NameError: + message = "RVC libraries are still loading. Please try again in a few seconds." + print(message) + return message, None + + if self.tgt_sr != resample_sr >= 16000: + tgt_sr = resample_sr + else: + tgt_sr = self.tgt_sr + index_info = ( + "Index:\n%s." % file_index + if os.path.exists(file_index) + else "Index not used." + ) + end_time = time.time() + total_time = end_time - start_time + return ( + "Success.\n%s\nTime:\nnpy: %.2fs, f0: %.2fs, infer: %.2fs." + % (index_info, *times), + (tgt_sr, audio_opt), + ) + except: + info = traceback.format_exc() + logger.warn(info) + return info, (None, None) + + + + + + + def vc_multi( + self, + sid, + dir_path, + opt_root, + paths, + f0_up_key, + f0_method, + file_index, + file_index2, + index_rate, + filter_radius, + resample_sr, + rms_mix_rate, + protect, + format1, + crepe_hop_length, + f0_min, + note_min, + f0_max, + note_max, + f0_autotune, + ): + if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': + f0_min = note_to_hz(note_min) if note_min else 50 + f0_max = note_to_hz(note_max) if note_max else 1100 + print(f"Converted Min pitch: freq - {f0_min}\n" + f"Converted Max pitch: freq - {f0_max}") + else: + f0_min = f0_min or 50 + f0_max = f0_max or 1100 + try: + dir_path = ( + dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) # 防止小白拷路径头尾带了空格和"和回车 + opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + os.makedirs(opt_root, exist_ok=True) + try: + if dir_path != "": + paths = [ + os.path.join(root, name) + for root, _, files in os.walk(dir_path, topdown=False) + for name in files + if name.endswith(tuple(sup_audioext)) and root == dir_path + ] + else: + paths = [path.name for path in paths] + except: + traceback.print_exc() + paths = [path.name for path in paths] + infos = [] + print(paths) + for path in paths: + info, opt = self.vc_single_dont_save( + sid, + path, + f0_up_key, + None, + f0_method, + file_index, + file_index2, + # file_big_npy, + index_rate, + filter_radius, + resample_sr, + rms_mix_rate, + protect, + crepe_hop_length, + f0_min, + note_min, + f0_max, + note_max, + f0_autotune, + ) + if "Success" in info: + try: + tgt_sr, audio_opt = opt + if format1 in ["wav", "flac"]: + sf.write( + "%s/%s.%s" + % (opt_root, os.path.basename(path), format1), + audio_opt, + tgt_sr, + ) + else: + path = "%s/%s.%s" % (opt_root, os.path.basename(path), format1) + with BytesIO() as wavf: + sf.write( + wavf, + audio_opt, + tgt_sr, + format="wav" + ) + wavf.seek(0, 0) + with open(path, "wb") as outf: + wav2(wavf, outf, format1) + except: + info += traceback.format_exc() + infos.append("%s->%s" % (os.path.basename(path), info)) + yield "\n".join(infos) + yield "\n".join(infos) + except: + yield traceback.format_exc() diff --git a/lib/infer/modules/vc/pipeline.py b/lib/infer/modules/vc/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..e38097b4f89a6669052370c0cc41452d3049c814 --- /dev/null +++ b/lib/infer/modules/vc/pipeline.py @@ -0,0 +1,741 @@ +import os +import sys +import traceback +import logging + +logger = logging.getLogger(__name__) + +from functools import lru_cache +from time import time as ttime +from torch import Tensor +import faiss +import librosa +import numpy as np +import parselmouth +import pyworld +import torch.nn.functional as F +from scipy import signal +from tqdm import tqdm + +import random +now_dir = os.getcwd() +sys.path.append(now_dir) +import re +from functools import partial +bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) + +input_audio_path2wav = {} +import torchcrepe # Fork Feature. Crepe algo for training and preprocess +import torch +from lib.infer.infer_libs.rmvpe import RMVPE + +@lru_cache +def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): + audio = input_audio_path2wav[input_audio_path] + f0, t = pyworld.harvest( + audio, + fs=fs, + f0_ceil=f0max, + f0_floor=f0min, + frame_period=frame_period, + ) + f0 = pyworld.stonemask(audio, f0, t, fs) + return f0 + + +def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 + # print(data1.max(),data2.max()) + rms1 = librosa.feature.rms( + y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 + ) # 每半秒一个点 + rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) + rms1 = torch.from_numpy(rms1) + rms1 = F.interpolate( + rms1.unsqueeze(0), size=data2.shape[0], mode="linear" + ).squeeze() + rms2 = torch.from_numpy(rms2) + rms2 = F.interpolate( + rms2.unsqueeze(0), size=data2.shape[0], mode="linear" + ).squeeze() + rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) + data2 *= ( + torch.pow(rms1, torch.tensor(1 - rate)) + * torch.pow(rms2, torch.tensor(rate - 1)) + ).numpy() + return data2 + + +class Pipeline(object): + def __init__(self, tgt_sr, config): + self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( + config.x_pad, + config.x_query, + config.x_center, + config.x_max, + config.is_half, + ) + self.sr = 16000 # hubert输入采样率 + self.window = 160 # 每帧点数 + self.t_pad = self.sr * self.x_pad # 每条前后pad时间 + self.t_pad_tgt = tgt_sr * self.x_pad + self.t_pad2 = self.t_pad * 2 + self.t_query = self.sr * self.x_query # 查询切点前后查询时间 + self.t_center = self.sr * self.x_center # 查询切点位置 + self.t_max = self.sr * self.x_max # 免查询时长阈值 + self.device = config.device + self.model_rmvpe = RMVPE("%s/rmvpe.pt" % os.environ["rmvpe_root"], is_half=self.is_half, device=self.device) + + self.note_dict = [ + 65.41, 69.30, 73.42, 77.78, 82.41, 87.31, + 92.50, 98.00, 103.83, 110.00, 116.54, 123.47, + 130.81, 138.59, 146.83, 155.56, 164.81, 174.61, + 185.00, 196.00, 207.65, 220.00, 233.08, 246.94, + 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, + 369.99, 392.00, 415.30, 440.00, 466.16, 493.88, + 523.25, 554.37, 587.33, 622.25, 659.25, 698.46, + 739.99, 783.99, 830.61, 880.00, 932.33, 987.77, + 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91, + 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53, + 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83, + 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07 + ] + + # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) + def get_optimal_torch_device(self, index: int = 0) -> torch.device: + if torch.cuda.is_available(): + return torch.device( + f"cuda:{index % torch.cuda.device_count()}" + ) # Very fast + elif torch.backends.mps.is_available(): + return torch.device("mps") + return torch.device("cpu") + + # Fork Feature: Compute f0 with the crepe method + def get_f0_crepe_computation( + self, + x, + f0_min, + f0_max, + p_len, + *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. + **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full + ): + x = x.astype( + np.float32 + ) # fixes the F.conv2D exception. We needed to convert double to float. + x /= np.quantile(np.abs(x), 0.999) + torch_device = self.get_optimal_torch_device() + audio = torch.from_numpy(x).to(torch_device, copy=True) + audio = torch.unsqueeze(audio, dim=0) + if audio.ndim == 2 and audio.shape[0] > 1: + audio = torch.mean(audio, dim=0, keepdim=True).detach() + audio = audio.detach() + hop_length = kwargs.get('crepe_hop_length', 160) + model = kwargs.get('model', 'full') + print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) + pitch: Tensor = torchcrepe.predict( + audio, + self.sr, + hop_length, + f0_min, + f0_max, + model, + batch_size=hop_length * 2, + device=torch_device, + pad=True, + ) + p_len = p_len or x.shape[0] // hop_length + # Resize the pitch for final f0 + source = np.array(pitch.squeeze(0).cpu().float().numpy()) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * p_len, len(source)) / p_len, + np.arange(0, len(source)), + source, + ) + f0 = np.nan_to_num(target) + return f0 # Resized f0 + + def get_f0_official_crepe_computation( + self, + x, + f0_min, + f0_max, + *args, + **kwargs + ): + # Pick a batch size that doesn't cause memory errors on your gpu + batch_size = 512 + # Compute pitch using first gpu + audio = torch.tensor(np.copy(x))[None].float() + model = kwargs.get('model', 'full') + f0, pd = torchcrepe.predict( + audio, + self.sr, + self.window, + f0_min, + f0_max, + model, + batch_size=batch_size, + device=self.device, + return_periodicity=True, + ) + pd = torchcrepe.filter.median(pd, 3) + f0 = torchcrepe.filter.mean(f0, 3) + f0[pd < 0.1] = 0 + f0 = f0[0].cpu().numpy() + return f0 + + # Fork Feature: Compute pYIN f0 method + def get_f0_pyin_computation(self, x, f0_min, f0_max): + y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) + f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) + f0 = f0[1:] # Get rid of extra first frame + return f0 + + def get_pm(self, x, p_len, *args, **kwargs): + f0 = parselmouth.Sound(x, self.sr).to_pitch_ac( + time_step=160 / 16000, + voicing_threshold=0.6, + pitch_floor=kwargs.get('f0_min'), + pitch_ceiling=kwargs.get('f0_max'), + ).selected_array["frequency"] + + return np.pad( + f0, + [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], + mode="constant" + ) + + def get_harvest(self, x, *args, **kwargs): + f0_spectral = pyworld.harvest( + x.astype(np.double), + fs=self.sr, + f0_ceil=kwargs.get('f0_max'), + f0_floor=kwargs.get('f0_min'), + frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, + ) + return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) + + def get_dio(self, x, *args, **kwargs): + f0_spectral = pyworld.dio( + x.astype(np.double), + fs=self.sr, + f0_ceil=kwargs.get('f0_max'), + f0_floor=kwargs.get('f0_min'), + frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, + ) + return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) + + + def get_rmvpe(self, x, *args, **kwargs): + if not hasattr(self, "model_rmvpe"): + from lib.infer.infer_libs.rmvpe import RMVPE + + logger.info( + "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"] + ) + self.model_rmvpe = RMVPE( + "%s/rmvpe.pt" % os.environ["rmvpe_root"], + is_half=self.is_half, + device=self.device, + ) + f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) + + if "privateuseone" in str(self.device): # clean ortruntime memory + del self.model_rmvpe.model + del self.model_rmvpe + logger.info("Cleaning ortruntime memory") + + return f0 + + + def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs): + if not hasattr(self, "model_rmvpe"): + from lib.infer.infer_libs.rmvpe import RMVPE + + logger.info( + "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"] + ) + self.model_rmvpe = RMVPE( + "%s/rmvpe.pt" % os.environ["rmvpe_root"], + is_half=self.is_half, + device=self.device, + ) + f0 = self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max) + if "privateuseone" in str(self.device): # clean ortruntime memory + del self.model_rmvpe.model + del self.model_rmvpe + logger.info("Cleaning ortruntime memory") + + return f0 + + def autotune_f0(self, f0): + autotuned_f0 = [] + for freq in f0: + closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)] + autotuned_f0.append(random.choice(closest_notes)) + return np.array(autotuned_f0, np.float64) + + + # Fork Feature: Acquire median hybrid f0 estimation calculation + def get_f0_hybrid_computation( + self, + methods_str, + input_audio_path, + x, + f0_min, + f0_max, + p_len, + filter_radius, + crepe_hop_length, + time_step, + ): + # Get various f0 methods from input to use in the computation stack + methods_str = re.search('hybrid\[(.+)\]', methods_str) + if methods_str: # Ensure a match was found + methods = [method.strip() for method in methods_str.group(1).split('+')] + f0_computation_stack = [] + + print("Calculating f0 pitch estimations for methods: %s" % str(methods)) + x = x.astype(np.float32) + x /= np.quantile(np.abs(x), 0.999) + # Get f0 calculations for all methods specified + for method in methods: + f0 = None + if method == "crepe-tiny": + f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") + f0 = f0[1:] # Get rid of extra first frame + elif method == "mangio-crepe": + f0 = self.get_f0_crepe_computation( + x, f0_min, f0_max, p_len, crepe_hop_length + ) + elif method == "mangio-crepe-tiny": + f0 = self.get_f0_crepe_computation( + x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" + ) + # elif method == "pyin": Not Working just yet + # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max) + # Push method to the stack + f0_computation_stack.append(f0) + + for fc in f0_computation_stack: + print(len(fc)) + + print(f"Calculating hybrid median f0 from the stack of: {str(methods)}") + f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) + return f0_median_hybrid + + def get_f0( + self, + input_audio_path, + x, + p_len, + f0_up_key, + f0_method, + filter_radius, + crepe_hop_length, + f0_autotune, + inp_f0=None, + f0_min=50, + f0_max=1100, + ): + global input_audio_path2wav + time_step = self.window / self.sr * 1000 + f0_min = 50 + f0_max = 1100 + f0_mel_min = 1127 * np.log(1 + f0_min / 700) + f0_mel_max = 1127 * np.log(1 + f0_max / 700) + + if f0_method == "pm": + f0 = ( + parselmouth.Sound(x, self.sr) + .to_pitch_ac( + time_step=time_step / 1000, + voicing_threshold=0.6, + pitch_floor=f0_min, + pitch_ceiling=f0_max, + ) + .selected_array["frequency"] + ) + pad_size = (p_len - len(f0) + 1) // 2 + if pad_size > 0 or p_len - len(f0) - pad_size > 0: + f0 = np.pad( + f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" + ) + elif f0_method == "harvest": + input_audio_path2wav[input_audio_path] = x.astype(np.double) + f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) + if filter_radius > 2: + f0 = signal.medfilt(f0, 3) + elif f0_method == "dio": # Potentially Buggy? + f0, t = pyworld.dio( + x.astype(np.double), + fs=self.sr, + f0_ceil=f0_max, + f0_floor=f0_min, + frame_period=10, + ) + f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) + f0 = signal.medfilt(f0, 3) + elif f0_method == "crepe": + model = "full" + # Pick a batch size that doesn't cause memory errors on your gpu + batch_size = 512 + # Compute pitch using first gpu + audio = torch.tensor(np.copy(x))[None].float() + f0, pd = torchcrepe.predict( + audio, + self.sr, + self.window, + f0_min, + f0_max, + model, + batch_size=batch_size, + device=self.device, + return_periodicity=True, + ) + pd = torchcrepe.filter.median(pd, 3) + f0 = torchcrepe.filter.mean(f0, 3) + f0[pd < 0.1] = 0 + f0 = f0[0].cpu().numpy() + elif f0_method == "crepe-tiny": + f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") + elif f0_method == "mangio-crepe": + f0 = self.get_f0_crepe_computation( + x, f0_min, f0_max, p_len, crepe_hop_length + ) + elif f0_method == "mangio-crepe-tiny": + f0 = self.get_f0_crepe_computation( + x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" + ) + elif f0_method == "rmvpe": + if not hasattr(self, "model_rmvpe"): + from lib.infer.infer_libs.rmvpe import RMVPE + + logger.info( + "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"] + ) + self.model_rmvpe = RMVPE( + "%s/rmvpe.pt" % os.environ["rmvpe_root"], + is_half=self.is_half, + device=self.device, + ) + f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) + + if "privateuseone" in str(self.device): # clean ortruntime memory + del self.model_rmvpe.model + del self.model_rmvpe + logger.info("Cleaning ortruntime memory") + elif f0_method == "rmvpe+": + params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min, + 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, + 'crepe_hop_length': crepe_hop_length, 'model': "full" + } + f0 = self.get_pitch_dependant_rmvpe(**params) + elif "hybrid" in f0_method: + # Perform hybrid median pitch estimation + input_audio_path2wav[input_audio_path] = x.astype(np.double) + f0 = self.get_f0_hybrid_computation( + f0_method,+ + input_audio_path, + x, + f0_min, + f0_max, + p_len, + filter_radius, + crepe_hop_length, + time_step, + ) + print("Autotune:", f0_autotune) + if f0_autotune: + f0 = self.autotune_f0(f0) + + f0 *= pow(2, f0_up_key / 12) + # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) + tf0 = self.sr // self.window # 每秒f0点数 + if inp_f0 is not None: + delta_t = np.round( + (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 + ).astype("int16") + replace_f0 = np.interp( + list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] + ) + shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] + f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ + :shape + ] + # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) + f0bak = f0.copy() + f0_mel = 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( + f0_mel_max - f0_mel_min + ) + 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > 255] = 255 + f0_coarse = np.rint(f0_mel).astype(np.int32) + return f0_coarse, f0bak # 1-0 + + def vc( + self, + model, + net_g, + sid, + audio0, + pitch, + pitchf, + times, + index, + big_npy, + index_rate, + version, + protect, + ): # ,file_index,file_big_npy + feats = torch.from_numpy(audio0) + if self.is_half: + feats = feats.half() + else: + feats = feats.float() + if feats.dim() == 2: # double channels + feats = feats.mean(-1) + assert feats.dim() == 1, feats.dim() + feats = feats.view(1, -1) + padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) + + inputs = { + "source": feats.to(self.device), + "padding_mask": padding_mask, + "output_layer": 9 if version == "v1" else 12, + } + t0 = ttime() + with torch.no_grad(): + logits = model.extract_features(**inputs) + feats = model.final_proj(logits[0]) if version == "v1" else logits[0] + if protect < 0.5 and pitch is not None and pitchf is not None: + feats0 = feats.clone() + if ( + not isinstance(index, type(None)) + and not isinstance(big_npy, type(None)) + and index_rate != 0 + ): + npy = feats[0].cpu().numpy() + if self.is_half: + npy = npy.astype("float32") + + # _, I = index.search(npy, 1) + # npy = big_npy[I.squeeze()] + + score, ix = index.search(npy, k=8) + weight = np.square(1 / score) + weight /= weight.sum(axis=1, keepdims=True) + npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) + + if self.is_half: + npy = npy.astype("float16") + feats = ( + torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + + (1 - index_rate) * feats + ) + + feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) + if protect < 0.5 and pitch is not None and pitchf is not None: + feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( + 0, 2, 1 + ) + t1 = ttime() + p_len = audio0.shape[0] // self.window + if feats.shape[1] < p_len: + p_len = feats.shape[1] + if pitch is not None and pitchf is not None: + pitch = pitch[:, :p_len] + pitchf = pitchf[:, :p_len] + + if protect < 0.5 and pitch is not None and pitchf is not None: + pitchff = pitchf.clone() + pitchff[pitchf > 0] = 1 + pitchff[pitchf < 1] = protect + pitchff = pitchff.unsqueeze(-1) + feats = feats * pitchff + feats0 * (1 - pitchff) + feats = feats.to(feats0.dtype) + p_len = torch.tensor([p_len], device=self.device).long() + with torch.no_grad(): + hasp = pitch is not None and pitchf is not None + arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid) + audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy() + del hasp, arg + del feats, p_len, padding_mask + if torch.cuda.is_available(): + torch.cuda.empty_cache() + t2 = ttime() + times[0] += t1 - t0 + times[2] += t2 - t1 + return audio1 + def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g): + t = t // window * window + if if_f0 == 1: + return self.vc( + model, + net_g, + sid, + audio_pad[s : t + t_pad_tgt + window], + pitch[:, s // window : (t + t_pad_tgt) // window], + pitchf[:, s // window : (t + t_pad_tgt) // window], + times, + index, + big_npy, + index_rate, + version, + protect, + )[t_pad_tgt : -t_pad_tgt] + else: + return self.vc( + model, + net_g, + sid, + audio_pad[s : t + t_pad_tgt + window], + None, + None, + times, + index, + big_npy, + index_rate, + version, + protect, + )[t_pad_tgt : -t_pad_tgt] + + + def pipeline( + self, + model, + net_g, + sid, + audio, + input_audio_path, + times, + f0_up_key, + f0_method, + file_index, + index_rate, + if_f0, + filter_radius, + tgt_sr, + resample_sr, + rms_mix_rate, + version, + protect, + crepe_hop_length, + f0_autotune, + f0_file=None, + f0_min=50, + f0_max=1100 + ): + if ( + file_index != "" + # and file_big_npy != "" + # and os.path.exists(file_big_npy) == True + and os.path.exists(file_index) + and index_rate != 0 + ): + try: + index = faiss.read_index(file_index) + # big_npy = np.load(file_big_npy) + big_npy = index.reconstruct_n(0, index.ntotal) + except: + traceback.print_exc() + index = big_npy = None + else: + index = big_npy = None + audio = signal.filtfilt(bh, ah, audio) + audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") + opt_ts = [] + if audio_pad.shape[0] > self.t_max: + audio_sum = np.zeros_like(audio) + for i in range(self.window): + audio_sum += audio_pad[i : i - self.window] + for t in range(self.t_center, audio.shape[0], self.t_center): + opt_ts.append( + t + - self.t_query + + np.where( + np.abs(audio_sum[t - self.t_query : t + self.t_query]) + == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() + )[0][0] + ) + s = 0 + audio_opt = [] + t = None + t1 = ttime() + audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") + p_len = audio_pad.shape[0] // self.window + inp_f0 = None + if hasattr(f0_file, "name"): + try: + with open(f0_file.name, "r") as f: + lines = f.read().strip("\n").split("\n") + inp_f0 = [] + for line in lines: + inp_f0.append([float(i) for i in line.split(",")]) + inp_f0 = np.array(inp_f0, dtype="float32") + except: + traceback.print_exc() + sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() + pitch, pitchf = None, None + if if_f0: + pitch, pitchf = self.get_f0( + input_audio_path, + audio_pad, + p_len, + f0_up_key, + f0_method, + filter_radius, + crepe_hop_length, + f0_autotune, + inp_f0, + f0_min, + f0_max + ) + pitch = pitch[:p_len] + pitchf = pitchf[:p_len] + if "mps" not in str(self.device) or "xpu" not in str(self.device): + pitchf = pitchf.astype(np.float32) + pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() + pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() + t2 = ttime() + times[1] += t2 - t1 + + with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar: + for i, t in enumerate(opt_ts): + t = t // self.window * self.window + start = s + end = t + self.t_pad2 + self.window + audio_slice = audio_pad[start:end] + pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None + pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None + audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) + s = t + pbar.update(1) + pbar.refresh() + + audio_slice = audio_pad[t:] + pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch + pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf + audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) + + audio_opt = np.concatenate(audio_opt) + if rms_mix_rate != 1: + audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) + if tgt_sr != resample_sr >= 16000: + audio_opt = librosa.resample( + audio_opt, orig_sr=tgt_sr, target_sr=resample_sr + ) + audio_max = np.abs(audio_opt).max() / 0.99 + max_int16 = 32768 + if audio_max > 1: + max_int16 /= audio_max + audio_opt = (audio_opt * max_int16).astype(np.int16) + del pitch, pitchf, sid + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + print("Returning completed audio...") + return audio_opt diff --git a/lib/infer/modules/vc/utils.py b/lib/infer/modules/vc/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a1cb0ff84097d1c7eb82373ccf19db061f595096 --- /dev/null +++ b/lib/infer/modules/vc/utils.py @@ -0,0 +1,42 @@ +import os +import re +from fairseq import checkpoint_utils + + +def get_index_path_from_model(sid): + sid0strip = re.sub(r'\.pth|\.onnx$', '', sid) + sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory + + # Check if the sid0strip has the specific ending format _eXXX_sXXX + if re.match(r'.+_e\d+_s\d+$', sid0name): + base_model_name = sid0name.rsplit('_', 2)[0] + else: + base_model_name = sid0name + + return next( + ( + f + for f in [ + os.path.join(root, name) + for root, _, files in os.walk(os.getenv("index_root"), topdown=False) + for name in files + if name.endswith(".index") and "trained" not in name + ] + if base_model_name in f + ), + "", + ) + + +def load_hubert(config): + models, _, _ = checkpoint_utils.load_model_ensemble_and_task( + ["assets/hubert/hubert_base.pt"], + suffix="", + ) + hubert_model = models[0] + hubert_model = hubert_model.to(config.device) + if config.is_half: + hubert_model = hubert_model.half() + else: + hubert_model = hubert_model.float() + return hubert_model.eval() diff --git a/lib/model/.gitkeep b/lib/model/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/lib/model/.gitkeep @@ -0,0 +1 @@ + diff --git a/lib/tools/LazyImport.py b/lib/tools/LazyImport.py new file mode 100644 index 0000000000000000000000000000000000000000..5bdb05ddd5a546a43adba7274b4c3465bb77f2f5 --- /dev/null +++ b/lib/tools/LazyImport.py @@ -0,0 +1,13 @@ +from importlib.util import find_spec, LazyLoader, module_from_spec +from sys import modules + +def lazyload(name): + if name in modules: + return modules[name] + else: + spec = find_spec(name) + loader = LazyLoader(spec.loader) + module = module_from_spec(spec) + modules[name] = module + loader.exec_module(module) + return module \ No newline at end of file diff --git a/lib/tools/__pycache__/LazyImport.cpython-39.pyc b/lib/tools/__pycache__/LazyImport.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa5a84878686420fea4da61042c1a7dc4a5c913a Binary files /dev/null and b/lib/tools/__pycache__/LazyImport.cpython-39.pyc differ diff --git a/lib/tools/__pycache__/audioEffects.cpython-39.pyc b/lib/tools/__pycache__/audioEffects.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed9ca1a98af306aa1913b2146527330b171df8d0 Binary files /dev/null and b/lib/tools/__pycache__/audioEffects.cpython-39.pyc differ diff --git a/lib/tools/__pycache__/model_fetcher.cpython-39.pyc b/lib/tools/__pycache__/model_fetcher.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc9b3c9572896a0e40c522560175831c33209bde Binary files /dev/null and b/lib/tools/__pycache__/model_fetcher.cpython-39.pyc differ diff --git a/lib/tools/app.py b/lib/tools/app.py new file mode 100644 index 0000000000000000000000000000000000000000..d45dc69e925169b5238987ce57876035fb9abcfe --- /dev/null +++ b/lib/tools/app.py @@ -0,0 +1,148 @@ +import logging +import os + +# os.system("wget -P cvec/ https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt") +import gradio as gr +from dotenv import load_dotenv + +from assets.configs.config import Config +from assets.i18n.i18n import I18nAuto +from lib.infer.modules.vc.pipeline import Pipeline +VC = Pipeline + +logging.getLogger("numba").setLevel(logging.WARNING) +logging.getLogger("markdown_it").setLevel(logging.WARNING) +logging.getLogger("urllib3").setLevel(logging.WARNING) +logging.getLogger("matplotlib").setLevel(logging.WARNING) +logger = logging.getLogger(__name__) + +i18n = I18nAuto() +#(i18n) + +load_dotenv() +config = Config() +vc = VC(config) + +weight_root = os.getenv("weight_root") +weight_uvr5_root = os.getenv("weight_uvr5_root") +index_root = os.getenv("index_root") +names = [] +hubert_model = None +for name in os.listdir(weight_root): + if name.endswith(".pth"): + names.append(name) +index_paths = [] +for root, dirs, files in os.walk(index_root, topdown=False): + for name in files: + if name.endswith(".index") and "trained" not in name: + index_paths.append("%s/%s" % (root, name)) + + +app = gr.Blocks() +with app: + with gr.Tabs(): + with gr.TabItem("在线demo"): + gr.Markdown( + value=""" + RVC 在线demo + """ + ) + sid = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names)) + with gr.Column(): + spk_item = gr.Slider( + minimum=0, + maximum=2333, + step=1, + label=i18n("请选择说话人id"), + value=0, + visible=False, + interactive=True, + ) + sid.change(fn=vc.get_vc, inputs=[sid], outputs=[spk_item]) + gr.Markdown( + value=i18n("男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ") + ) + vc_input3 = gr.Audio(label="上传音频(长度小于90秒)") + vc_transform0 = gr.Number(label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0) + f0method0 = gr.Radio( + label=i18n("选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU"), + choices=["pm", "harvest", "crepe", "rmvpe"], + value="pm", + interactive=True, + ) + filter_radius0 = gr.Slider( + minimum=0, + maximum=7, + label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), + value=3, + step=1, + interactive=True, + ) + with gr.Column(): + file_index1 = gr.Textbox( + label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), + value="", + interactive=False, + visible=False, + ) + file_index2 = gr.Dropdown( + label=i18n("自动检测index路径,下拉式选择(dropdown)"), + choices=sorted(index_paths), + interactive=True, + ) + index_rate1 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("检索特征占比"), + value=0.88, + interactive=True, + ) + resample_sr0 = gr.Slider( + minimum=0, + maximum=48000, + label=i18n("后处理重采样至最终采样率,0为不进行重采样"), + value=0, + step=1, + interactive=True, + ) + rms_mix_rate0 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), + value=1, + interactive=True, + ) + protect0 = gr.Slider( + minimum=0, + maximum=0.5, + label=i18n("保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"), + value=0.33, + step=0.01, + interactive=True, + ) + f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调")) + but0 = gr.Button(i18n("转换"), variant="primary") + vc_output1 = gr.Textbox(label=i18n("输出信息")) + vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) + but0.click( + vc.vc_single, + [ + spk_item, + vc_input3, + vc_transform0, + f0_file, + f0method0, + file_index1, + file_index2, + # file_big_npy1, + index_rate1, + filter_radius0, + resample_sr0, + rms_mix_rate0, + protect0, + ], + [vc_output1, vc_output2], + ) + + +app.launch() diff --git a/lib/tools/audioEffects.py b/lib/tools/audioEffects.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d12584eb31f2b19d5e66cdc1a69ab73d5b6f60 --- /dev/null +++ b/lib/tools/audioEffects.py @@ -0,0 +1,33 @@ +from pedalboard import Pedalboard, Compressor, Reverb, NoiseGate +from pedalboard.io import AudioFile +import sys +import os +now_dir = os.getcwd() +sys.path.append(now_dir) +from assets.i18n.i18n import I18nAuto +i18n = I18nAuto() + +def process_audio(input_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled, ): + print(reverb_enabled) + print(compressor_enabled) + print(noise_gate_enabled) + effects = [] + if reverb_enabled: + effects.append(Reverb(room_size=0.01)) + if compressor_enabled: + effects.append(Compressor(threshold_db=-10, ratio=25)) + if noise_gate_enabled: + effects.append(NoiseGate(threshold_db=-16, ratio=1.5, release_ms=250)) + + board = Pedalboard(effects) + + with AudioFile(input_path) as f: + with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o: + while f.tell() < f.frames: + chunk = f.read(f.samplerate) + effected = board(chunk, f.samplerate, reset=False) + o.write(effected) + + result = i18n("Processed audio saved at: ") + output_path + print(result) + return output_path \ No newline at end of file diff --git a/lib/tools/calc_rvc_model_similarity.py b/lib/tools/calc_rvc_model_similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..42496e088e51dc5162d0714470c2226f696e260c --- /dev/null +++ b/lib/tools/calc_rvc_model_similarity.py @@ -0,0 +1,96 @@ +# This code references https://huggingface.co./JosephusCheung/ASimilarityCalculatior/blob/main/qwerty.py +# Fill in the path of the model to be queried and the root directory of the reference models, and this script will return the similarity between the model to be queried and all reference models. +import os +import logging + +logger = logging.getLogger(__name__) + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def cal_cross_attn(to_q, to_k, to_v, rand_input): + hidden_dim, embed_dim = to_q.shape + attn_to_q = nn.Linear(hidden_dim, embed_dim, bias=False) + attn_to_k = nn.Linear(hidden_dim, embed_dim, bias=False) + attn_to_v = nn.Linear(hidden_dim, embed_dim, bias=False) + attn_to_q.load_state_dict({"weight": to_q}) + attn_to_k.load_state_dict({"weight": to_k}) + attn_to_v.load_state_dict({"weight": to_v}) + + return torch.einsum( + "ik, jk -> ik", + F.softmax( + torch.einsum("ij, kj -> ik", attn_to_q(rand_input), attn_to_k(rand_input)), + dim=-1, + ), + attn_to_v(rand_input), + ) + + +def model_hash(filename): + try: + with open(filename, "rb") as file: + import hashlib + + m = hashlib.sha256() + + file.seek(0x100000) + m.update(file.read(0x10000)) + return m.hexdigest()[0:8] + except FileNotFoundError: + return "NOFILE" + + +def eval(model, n, input): + qk = f"enc_p.encoder.attn_layers.{n}.conv_q.weight" + uk = f"enc_p.encoder.attn_layers.{n}.conv_k.weight" + vk = f"enc_p.encoder.attn_layers.{n}.conv_v.weight" + atoq, atok, atov = model[qk][:, :, 0], model[uk][:, :, 0], model[vk][:, :, 0] + + attn = cal_cross_attn(atoq, atok, atov, input) + return attn + + +def main(path, root): + torch.manual_seed(114514) + model_a = torch.load(path, map_location="cpu")["weight"] + + logger.info("Query:\t\t%s\t%s" % (path, model_hash(path))) + + map_attn_a = {} + map_rand_input = {} + for n in range(6): + hidden_dim, embed_dim, _ = model_a[ + f"enc_p.encoder.attn_layers.{n}.conv_v.weight" + ].shape + rand_input = torch.randn([embed_dim, hidden_dim]) + + map_attn_a[n] = eval(model_a, n, rand_input) + map_rand_input[n] = rand_input + + del model_a + + for name in sorted(list(os.listdir(root))): + path = "%s/%s" % (root, name) + model_b = torch.load(path, map_location="cpu")["weight"] + + sims = [] + for n in range(6): + attn_a = map_attn_a[n] + attn_b = eval(model_b, n, map_rand_input[n]) + + sim = torch.mean(torch.cosine_similarity(attn_a, attn_b)) + sims.append(sim) + + logger.info( + "Reference:\t%s\t%s\t%s" + % (path, model_hash(path), f"{torch.mean(torch.stack(sims)) * 1e2:.2f}%") + ) + + +if __name__ == "__main__": + query_path = r"assets\weights\mi v3.pth" + reference_root = r"assets\weights" + main(query_path, reference_root) diff --git a/lib/tools/diffq/__init__.py b/lib/tools/diffq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0eebdb931a873fc818f3774335417ee940ef6ab0 --- /dev/null +++ b/lib/tools/diffq/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# flake8: noqa +""" +This package implements different quantization strategies: + +- `diffq.uniform.UniformQuantizer`: classic uniform quantization over n bits. +- `diffq.diffq.DiffQuantizer`: differentiable quantizer based on scaled noise injection. + +Also, do check `diffq.base.BaseQuantizer` for the common methods of all Quantizers. +""" diff --git a/lib/tools/diffq/base.py b/lib/tools/diffq/base.py new file mode 100644 index 0000000000000000000000000000000000000000..9bd5276b51fbed3d4b898a45b93479ff19e62a7b --- /dev/null +++ b/lib/tools/diffq/base.py @@ -0,0 +1,262 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from concurrent import futures +from fnmatch import fnmatch +from functools import partial +import io +import math +from multiprocessing import cpu_count +import typing as tp +import zlib + +import torch + + +class BaseQuantizer: + @dataclass + class _QuantizedParam: + name: str + param: torch.nn.Parameter + module: torch.nn.Module + # If a Parameter is used multiple times, `other` can be used + # to share state between the different Quantizers + other: tp.Optional[tp.Any] + + def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False, + exclude: tp.Optional[tp.List[str]] = [], detect_bound: bool = True): + self.model = model + self.min_size = min_size + self.float16 = float16 + self.exclude = exclude + self.detect_bound = detect_bound + self._quantized = False + self._pre_handle = self.model.register_forward_pre_hook(self._forward_pre_hook) + self._post_handle = self.model.register_forward_hook(self._forward_hook) + + self._quantized_state = None + self._qparams = [] + self._float16 = [] + self._others = [] + self._rnns = [] + + self._saved = [] + + self._find_params() + + def _find_params(self): + min_params = self.min_size * 2**20 // 4 + previous = {} + for module_name, module in self.model.named_modules(): + if isinstance(module, torch.nn.RNNBase): + self._rnns.append(module) + for name, param in list(module.named_parameters(recurse=False)): + full_name = f"{module_name}.{name}" + matched = False + for pattern in self.exclude: + if fnmatch(full_name, pattern) or fnmatch(name, pattern): + matched = True + break + + if param.numel() <= min_params or matched: + if id(param) in previous: + continue + if self.detect_bound: + previous[id(param)] = None + if self.float16: + self._float16.append(param) + else: + self._others.append(param) + else: + qparam = self._register_param(name, param, module, previous.get(id(param))) + if self.detect_bound: + previous[id(param)] = qparam + self._qparams.append(qparam) + + def _register_param(self, name, param, module, other): + return self.__class__._QuantizedParam(name, param, module, other) + + def _forward_pre_hook(self, module, input): + if self.model.training: + self._quantized_state = None + if self._quantized: + self.unquantize() + if self._pre_forward_train(): + self._fix_rnns() + else: + self.quantize() + + def _forward_hook(self, module, input, output): + if self.model.training: + if self._post_forward_train(): + self._fix_rnns(flatten=False) # Hacky, next forward will flatten + + def quantize(self, save=True): + """ + Immediately apply quantization to the model parameters. + If `save` is True, save a copy of the unquantized parameters, that can be + restored with `unquantize()`. + """ + if self._quantized: + return + if save: + self._saved = [qp.param.data.to('cpu', copy=True) + for qp in self._qparams if qp.other is None] + self.restore_quantized_state(self.get_quantized_state()) + self._quantized = True + self._fix_rnns() + + def unquantize(self): + """ + Revert a previous call to `quantize()`. + """ + if not self._quantized: + raise RuntimeError("Can only be called on a quantized model.") + if not self._saved: + raise RuntimeError("Nothing to restore.") + for qparam in self._qparams: + if qparam.other is None: + qparam.param.data[:] = self._saved.pop(0) + assert len(self._saved) == 0 + self._quantized = False + self._fix_rnns() + + def _pre_forward_train(self) -> bool: + """ + Called once before each forward for continuous quantization. + Should return True if parameters were changed. + """ + return False + + def _post_forward_train(self) -> bool: + """ + Called once after each forward (to restore state for instance). + Should return True if parameters were changed. + """ + return False + + def _fix_rnns(self, flatten=True): + """ + To be called after quantization happened to fix RNNs. + """ + for rnn in self._rnns: + rnn._flat_weights = [ + (lambda wn: getattr(rnn, wn) if hasattr(rnn, wn) else None)(wn) + for wn in rnn._flat_weights_names] + if flatten: + rnn.flatten_parameters() + + def get_quantized_state(self): + """ + Returns sufficient quantized information to rebuild the model state. + + ..Note:: + To achieve maximum compression, you should compress this with + gzip or other, as quantized weights are not optimally coded! + """ + if self._quantized_state is None: + self._quantized_state = self._get_quantized_state() + return self._quantized_state + + def _get_quantized_state(self): + """ + Actual implementation for `get_quantized_state`. + """ + float16_params = [] + for p in self._float16: + q = p.data.half() + float16_params.append(q) + + return { + "quantized": [self._quantize_param(qparam) for qparam in self._qparams + if qparam.other is None], + "float16": float16_params, + "others": [p.data.clone() for p in self._others], + } + + def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any: + """ + To be overriden. + """ + raise NotImplementedError() + + def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor: + """ + To be overriden. + """ + raise NotImplementedError() + + def restore_quantized_state(self, state) -> None: + """ + Restore the state of the model from the quantized state. + """ + for p, q in zip(self._float16, state["float16"]): + p.data[:] = q.to(p) + + for p, q in zip(self._others, state["others"]): + p.data[:] = q + + remaining = list(state["quantized"]) + for qparam in self._qparams: + if qparam.other is not None: + # Only unquantize first appearance of nn.Parameter. + continue + quantized = remaining.pop(0) + qparam.param.data[:] = self._unquantize_param(qparam, quantized) + self._fix_rnns() + + def detach(self) -> None: + """ + Detach from the model, removes hooks and anything else. + """ + self._pre_handle.remove() + self._post_handle.remove() + + def model_size(self) -> torch.Tensor: + """ + Returns an estimate of the quantized model size. + """ + total = torch.tensor(0.) + for p in self._float16: + total += 16 * p.numel() + for p in self._others: + total += 32 * p.numel() + return total / 2**20 / 8 # bits to MegaBytes + + def true_model_size(self) -> float: + """ + Return the true quantized model size, in MB, without extra + compression. + """ + return self.model_size().item() + + def compressed_model_size(self, compress_level=-1, num_workers=8) -> float: + """ + Return the compressed quantized model size, in MB. + + Args: + compress_level (int): compression level used with zlib, + see `zlib.compress` for details. + num_workers (int): will split the final big byte representation in that + many chunks processed in parallels. + """ + out = io.BytesIO() + torch.save(self.get_quantized_state(), out) + ms = _parallel_compress_len(out.getvalue(), compress_level, num_workers) + return ms / 2 ** 20 + + +def _compress_len(data, compress_level): + return len(zlib.compress(data, level=compress_level)) + + +def _parallel_compress_len(data, compress_level, num_workers): + num_workers = min(cpu_count(), num_workers) + chunk_size = int(math.ceil(len(data) / num_workers)) + chunks = [data[offset:offset + chunk_size] for offset in range(0, len(data), chunk_size)] + with futures.ProcessPoolExecutor(num_workers) as pool: + return sum(pool.map(partial(_compress_len, compress_level=compress_level), chunks)) diff --git a/lib/tools/diffq/diffq.py b/lib/tools/diffq/diffq.py new file mode 100644 index 0000000000000000000000000000000000000000..b475ec7f55227417b014c69b5cf55033182113e1 --- /dev/null +++ b/lib/tools/diffq/diffq.py @@ -0,0 +1,286 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Differentiable quantizer based on scaled noise injection. +""" +from dataclasses import dataclass +import math +import typing as tp + +import torch + +from .base import BaseQuantizer +from .uniform import uniform_quantize, uniform_unquantize +from .utils import simple_repr + + +class DiffQuantizer(BaseQuantizer): + @dataclass + class _QuantizedParam(BaseQuantizer._QuantizedParam): + logit: torch.nn.Parameter + + def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False, + group_size: int = 1, min_bits: float = 2, max_bits: float = 15, + param="bits", noise="gaussian", + init_bits: float = 8, extra_bits: float = 0, suffix: str = "_diffq", + exclude: tp.List[str] = [], detect_bound: bool = True): + """ + Differentiable quantizer based on scaled noise injection. + For every parameter `p` in the model, this introduces a number of bits parameter + `b` with the same dimensions (when group_size = 1). + Before each forward, `p` is replaced by `p + U` + with U uniform iid noise with range [-d/2, d/2], with `d` the uniform quantization + step for `b` bits. + This noise approximates the quantization noise in a differentiable manner, both + with respect to the unquantized parameter `p` and the number of bits `b`. + + At eveluation (as detected with `model.eval()`), the model is replaced + by its true quantized version, and restored when going back to training. + + When doing actual quantization (for serialization, or evaluation), + the number of bits is rounded to the nearest integer, and needs to be stored along. + This will cost a few bits per dimension. To reduce this cost, one can use `group_size`, + which will use a single noise level for multiple weight entries. + + You can use the `DiffQuantizer.model_size` method to get a differentiable estimate of the + model size in MB. You can then use this estimate as a penalty in your training loss. + + Args: + model (torch.nn.Module): model to quantize + min_size (float): minimum size in MB of a parameter to be quantized. + float16 (bool): if a layer is smaller than min_size, should we still do float16? + group_size (int): weight entries are groupped together to reduce the number + of noise scales to store. This should divide the size of all parameters + bigger than min_size. + min_bits (float): minimal number of bits. + max_bits (float): maximal number of bits. + init_bits (float): initial number of bits. + extra_bits (float): extra bits to add for actual quantization (before roundoff). + suffix (str): suffix used for the name of the extra noise scale parameters. + exclude (list[str]): list of patterns used to match parameters to exclude. + For instance `['bias']` to exclude all bias terms. + detect_bound (bool): if True, will detect bound parameters and reuse + the same quantized tensor for both, as well as the same number of bits. + + ..Warning:: + You must call `model.training()` and `model.eval()` for `DiffQuantizer` work properly. + + """ + self.group_size = group_size + self.min_bits = min_bits + self.max_bits = max_bits + self.init_bits = init_bits + self.extra_bits = extra_bits + self.suffix = suffix + self.param = param + self.noise = noise + assert noise in ["gaussian", "uniform"] + self._optimizer_setup = False + + self._min_noise = 1 / (2 ** self.max_bits - 1) + self._max_noise = 1 / (2 ** self.min_bits - 1) + + assert group_size >= 0 + assert min_bits < init_bits < max_bits, \ + "init_bits must be between min_bits and max_bits excluded3" + + for name, _ in model.named_parameters(): + if name.endswith(suffix): + raise RuntimeError("The model already has some noise scales parameters, " + "maybe you used twice a DiffQuantizer on the same model?.") + + super().__init__(model, min_size, float16, exclude, detect_bound) + + def _get_bits(self, logit: torch.Tensor): + if self.param == "noise": + return torch.log2(1 + 1 / self._get_noise_scale(logit)) + else: + t = torch.sigmoid(logit) + return self.max_bits * t + (1 - t) * self.min_bits + + def _get_noise_scale(self, logit: torch.Tensor): + if self.param == "noise": + t = torch.sigmoid(logit) + return torch.exp(t * math.log(self._min_noise) + (1 - t) * math.log(self._max_noise)) + else: + return 1 / (2 ** self._get_bits(logit) - 1) + + def _register_param(self, name, param, module, other): + if other is not None: + return self.__class__._QuantizedParam( + name=name, param=param, module=module, logit=other.logit, other=other) + assert self.group_size == 0 or param.numel() % self.group_size == 0 + # we want the initial number of bits to be init_bits. + if self.param == "noise": + noise_scale = 1 / (2 ** self.init_bits - 1) + t = (math.log(noise_scale) - math.log(self._max_noise)) / ( + math.log(self._min_noise) - math.log(self._max_noise)) + else: + t = (self.init_bits - self.min_bits) / (self.max_bits - self.min_bits) + assert 0 < t < 1 + logit = torch.logit(torch.tensor(float(t))) + assert abs(self._get_bits(logit) - self.init_bits) < 1e-5 + if self.group_size > 0: + nparam = param.numel() // self.group_size + else: + nparam = 1 + logit = torch.nn.Parameter( + torch.full( + (nparam,), + logit, + device=param.device)) + module.register_parameter(name + self.suffix, logit) + return self.__class__._QuantizedParam( + name=name, param=param, module=module, logit=logit, other=None) + + def clear_optimizer(self, optimizer: torch.optim.Optimizer): + params = [qp.logit for qp in self._qparams] + + for group in optimizer.param_groups: + new_params = [] + for q in list(group["params"]): + matched = False + for p in params: + if p is q: + matched = True + if not matched: + new_params.append(q) + group["params"][:] = new_params + + def setup_optimizer(self, optimizer: torch.optim.Optimizer, + lr: float = 1e-3, **kwargs): + """ + Setup the optimizer to tune the number of bits. In particular, this will deactivate + weight decay for the bits parameters. + + Args: + optimizer (torch.Optimizer): optimizer to use. + lr (float): specific learning rate for the bits parameters. 1e-3 + is perfect for Adam.,w + kwargs (dict): overrides for other optimization parameters for the bits. + """ + assert not self._optimizer_setup + self._optimizer_setup = True + + params = [qp.logit for qp in self._qparams] + + for group in optimizer.param_groups: + for q in list(group["params"]): + for p in params: + if p is q: + raise RuntimeError("You should create the optimizer " + "before the quantizer!") + + group = {"params": params, "lr": lr, "weight_decay": 0} + group.update(kwargs) + optimizer.add_param_group(group) + + def no_optimizer(self): + """ + Call this if you do not want to use an optimizer. + """ + self._optimizer_setup = True + + def check_unused(self): + for qparam in self._qparams: + if qparam.other is not None: + continue + grad = qparam.param.grad + if grad is None or (grad == 0).all(): + if qparam.logit.grad is not None: + qparam.logit.grad.data.zero_() + + def model_size(self, exact=False): + """ + Differentiable estimate of the model size. + The size is returned in MB. + + If `exact` is True, then the output is no longer differentiable but + reflect exactly an achievable size, even without compression, + i.e.same as returned by `naive_model_size()`. + """ + total = super().model_size() + subtotal = 0 + for qparam in self._qparams: + # only count the first appearance of a Parameter + if qparam.other is not None: + continue + bits = self.extra_bits + self._get_bits(qparam.logit) + if exact: + bits = bits.round().clamp(1, 15) + if self.group_size == 0: + group_size = qparam.param.numel() + else: + group_size = self.group_size + subtotal += group_size * bits.sum() + subtotal += 2 * 32 # param scale + + # Number of bits to represent each number of bits + bits_bits = math.ceil(math.log2(1 + (bits.max().round().item() - self.min_bits))) + subtotal += 8 # 8 bits for bits_bits + subtotal += bits_bits * bits.numel() + + subtotal /= 2 ** 20 * 8 # bits -> MegaBytes + return total + subtotal + + def true_model_size(self): + """ + Naive model size without zlib compression. + """ + return self.model_size(exact=True).item() + + def _pre_forward_train(self): + if not self._optimizer_setup: + raise RuntimeError("You must call `setup_optimizer()` on your optimizer " + "before starting training.") + for qparam in self._qparams: + if qparam.other is not None: + noisy = qparam.other.module._parameters[qparam.other.name] + else: + bits = self._get_bits(qparam.logit)[:, None] + if self.group_size == 0: + p_flat = qparam.param.view(-1) + else: + p_flat = qparam.param.view(-1, self.group_size) + scale = p_flat.max() - p_flat.min() + unit = 1 / (2**bits - 1) + if self.noise == "uniform": + noise_source = (torch.rand_like(p_flat) - 0.5) + elif self.noise == "gaussian": + noise_source = torch.randn_like(p_flat) / 2 + noise = scale * unit * noise_source + noisy = p_flat + noise + # We bypass the checks by PyTorch on parameters being leafs + qparam.module._parameters[qparam.name] = noisy.view_as(qparam.param) + return True + + def _post_forward_train(self): + for qparam in self._qparams: + qparam.module._parameters[qparam.name] = qparam.param + return True + + def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any: + bits = self.extra_bits + self._get_bits(qparam.logit) + bits = bits.round().clamp(1, 15)[:, None].byte() + if self.group_size == 0: + p = qparam.param.data.view(-1) + else: + p = qparam.param.data.view(-1, self.group_size) + levels, scales = uniform_quantize(p, bits) + return levels, scales, bits + + def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor: + levels, param_scale, bits = quantized + return uniform_unquantize(levels, param_scale, bits).view_as(qparam.param.data) + + def detach(self): + super().detach() + for qparam in self._qparams: + delattr(qparam.module, qparam.name + self.suffix) + + def __repr__(self): + return simple_repr(self) diff --git a/lib/tools/diffq/uniform.py b/lib/tools/diffq/uniform.py new file mode 100644 index 0000000000000000000000000000000000000000..f61e9129c04caaa33c66f726bf2433d51689cfa5 --- /dev/null +++ b/lib/tools/diffq/uniform.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Classic uniform quantization over n bits. +""" +from typing import Tuple +import torch + +from .base import BaseQuantizer +from .utils import simple_repr + + +def uniform_quantize(p: torch.Tensor, bits: torch.Tensor = torch.tensor(8.)): + """ + Quantize the given weights over `bits` bits. + + Returns: + - quantized levels + - (min, max) range. + + """ + assert (bits >= 1).all() and (bits <= 15).all() + num_levels = (2 ** bits.float()).long() + mn = p.min().item() + mx = p.max().item() + p = (p - mn) / (mx - mn) # put p in [0, 1] + unit = 1 / (num_levels - 1) # quantization unit + levels = (p / unit).round() + if (bits <= 8).all(): + levels = levels.byte() + else: + levels = levels.short() + return levels, (mn, mx) + + +def uniform_unquantize(levels: torch.Tensor, scales: Tuple[float, float], + bits: torch.Tensor = torch.tensor(8.)): + """ + Unquantize the weights from the levels and scale. Return a float32 tensor. + """ + mn, mx = scales + num_levels = 2 ** bits.float() + unit = 1 / (num_levels - 1) + levels = levels.float() + p = levels * unit # in [0, 1] + return p * (mx - mn) + mn + + +class UniformQuantizer(BaseQuantizer): + def __init__(self, model: torch.nn.Module, bits: float = 8., min_size: float = 0.01, + float16: bool = False, qat: bool = False, exclude=[], detect_bound=True): + """ + Args: + model (torch.nn.Module): model to quantize + bits (float): number of bits to quantize over. + min_size (float): minimum size in MB of a parameter to be quantized. + float16 (bool): if a layer is smaller than min_size, should we still do float16? + qat (bool): perform quantized aware training. + exclude (list[str]): list of patterns used to match parameters to exclude. + For instance `['bias']` to exclude all bias terms. + detect_bound (bool): if True, will detect bound parameters and reuse + the same quantized tensor for both. + """ + self.bits = float(bits) + self.qat = qat + + super().__init__(model, min_size, float16, exclude, detect_bound) + + def __repr__(self): + return simple_repr(self, ) + + def _pre_forward_train(self): + if self.qat: + for qparam in self._qparams: + if qparam.other is not None: + new_param = qparam.other.module._parameters[qparam.other.name] + else: + quantized = self._quantize_param(qparam) + qvalue = self._unquantize_param(qparam, quantized) + new_param = qparam.param + (qvalue - qparam.param).detach() + qparam.module._parameters[qparam.name] = new_param + return True + return False + + def _post_forward_train(self): + if self.qat: + for qparam in self._qparams: + qparam.module._parameters[qparam.name] = qparam.param + return True + return False + + def _quantize_param(self, qparam): + levels, scales = uniform_quantize(qparam.param.data, torch.tensor(self.bits)) + return (levels, scales) + + def _unquantize_param(self, qparam, quantized): + levels, scales = quantized + return uniform_unquantize(levels, scales, torch.tensor(self.bits)) + + def model_size(self): + """ + Non differentiable model size in MB. + """ + total = super().model_size() + subtotal = 0 + for qparam in self._qparams: + if qparam.other is None: # if parameter is bound, count only one copy. + subtotal += self.bits * qparam.param.numel() + 64 # 2 float for the overall scales + subtotal /= 2**20 * 8 # bits to MegaBytes + return total + subtotal + + def true_model_size(self): + """ + Return the true quantized model size, in MB, without extra + compression. + """ + return self.model_size().item() diff --git a/lib/tools/diffq/utils.py b/lib/tools/diffq/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..be6ab5253c38564140bc202077292bb99f9f397b --- /dev/null +++ b/lib/tools/diffq/utils.py @@ -0,0 +1,37 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import inspect +from typing import Optional, List + + +def simple_repr(obj, attrs: Optional[List[str]] = None, overrides={}): + """ + Return a simple representation string for `obj`. + If `attrs` is not None, it should be a list of attributes to include. + """ + params = inspect.signature(obj.__class__).parameters + attrs_repr = [] + if attrs is None: + attrs = params.keys() + for attr in attrs: + display = False + if attr in overrides: + value = overrides[attr] + elif hasattr(obj, attr): + value = getattr(obj, attr) + else: + continue + if attr in params: + param = params[attr] + if param.default is inspect._empty or value != param.default: + display = True + else: + display = True + + if display: + attrs_repr.append(f"{attr}={value}") + return f"{obj.__class__.__name__}({','.join(attrs_repr)})" diff --git a/lib/tools/dlmodels.bat b/lib/tools/dlmodels.bat new file mode 100644 index 0000000000000000000000000000000000000000..5d80f50369b1f3ed37c045d07a9e2ce8954f09d4 --- /dev/null +++ b/lib/tools/dlmodels.bat @@ -0,0 +1,348 @@ +@echo off && chcp 65001 + +echo working dir is %cd% +echo downloading requirement aria2 check. +echo= +dir /a:d/b | findstr "aria2" > flag.txt +findstr "aria2" flag.txt >nul +if %errorlevel% ==0 ( + echo aria2 checked. + echo= +) else ( + echo failed. please downloading aria2 from webpage! + echo unzip it and put in this directory! + timeout /T 5 + start https://github.com/aria2/aria2/releases/tag/release-1.36.0 + echo= + goto end +) + +echo envfiles checking start. +echo= + +for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch +:endSch + +set d32=f0D32k.pth +set d40=f0D40k.pth +set d48=f0D48k.pth +set g32=f0G32k.pth +set g40=f0G40k.pth +set g48=f0G48k.pth + +set d40v2=f0D40k.pth +set g40v2=f0G40k.pth + +set dld32=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth +set dld40=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth +set dld48=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth +set dlg32=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth +set dlg40=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth +set dlg48=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth + +set dld40v2=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth +set dlg40v2=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth + +set hp2_all=HP2_all_vocals.pth +set hp3_all=HP3_all_vocals.pth +set hp5_only=HP5_only_main_vocal.pth +set VR_DeEchoAggressive=VR-DeEchoAggressive.pth +set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth +set VR_DeEchoNormal=VR-DeEchoNormal.pth +set onnx_dereverb=vocals.onnx + +set dlhp2_all=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth +set dlhp3_all=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth +set dlhp5_only=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth +set dlVR_DeEchoAggressive=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth +set dlVR_DeEchoDeReverb=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth +set dlVR_DeEchoNormal=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth +set dlonnx_dereverb=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx + +set hb=hubert_base.pt + +set dlhb=https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt + +echo dir check start. +echo= + +if exist "%~dp0assets\pretrained" ( + echo dir .\assets\pretrained checked. + ) else ( + echo failed. generating dir .\assets\pretrained. + mkdir pretrained + ) +if exist "%~dp0assets\pretrained_v2" ( + echo dir .\assets\pretrained_v2 checked. + ) else ( + echo failed. generating dir .\assets\pretrained_v2. + mkdir pretrained_v2 + ) +if exist "%~dp0assets\uvr5_weights" ( + echo dir .\assets\uvr5_weights checked. + ) else ( + echo failed. generating dir .\assets\uvr5_weights. + mkdir uvr5_weights + ) +if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy" ( + echo dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked. + ) else ( + echo failed. generating dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy. + mkdir uvr5_weights\onnx_dereverb_By_FoxJoy + ) + +echo= +echo dir check finished. + +echo= +echo required files check start. + +echo checking D32k.pth +if exist "%~dp0assets\pretrained\D32k.pth" ( + echo D32k.pth in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0assets\pretrained -o D32k.pth + if exist "%~dp0assets\pretrained\D32k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking D40k.pth +if exist "%~dp0assets\pretrained\D40k.pth" ( + echo D40k.pth in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0assets\pretrained -o D40k.pth + if exist "%~dp0assets\pretrained\D40k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking D40k.pth +if exist "%~dp0assets\pretrained_v2\D40k.pth" ( + echo D40k.pth in .\assets\pretrained_v2 checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0assets\pretrained_v2 -o D40k.pth + if exist "%~dp0assets\pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking D48k.pth +if exist "%~dp0assets\pretrained\D48k.pth" ( + echo D48k.pth in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0assets\pretrained -o D48k.pth + if exist "%~dp0assets\pretrained\D48k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking G32k.pth +if exist "%~dp0assets\pretrained\G32k.pth" ( + echo G32k.pth in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0assets\pretrained -o G32k.pth + if exist "%~dp0assets\pretrained\G32k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking G40k.pth +if exist "%~dp0assets\pretrained\G40k.pth" ( + echo G40k.pth in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0assets\pretrained -o G40k.pth + if exist "%~dp0assets\pretrained\G40k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking G40k.pth +if exist "%~dp0assets\pretrained_v2\G40k.pth" ( + echo G40k.pth in .\assets\pretrained_v2 checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0assets\pretrained_v2 -o G40k.pth + if exist "%~dp0assets\pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking G48k.pth +if exist "%~dp0assets\pretrained\G48k.pth" ( + echo G48k.pth in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0assets\pretrained -o G48k.pth + if exist "%~dp0assets\pretrained\G48k.pth" (echo download successful.) else (echo please try again! + echo=) + ) + +echo checking %d32% +if exist "%~dp0assets\pretrained\%d32%" ( + echo %d32% in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0assets\pretrained -o %d32% + if exist "%~dp0assets\pretrained\%d32%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %d40% +if exist "%~dp0assets\pretrained\%d40%" ( + echo %d40% in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0assets\pretrained -o %d40% + if exist "%~dp0assets\pretrained\%d40%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %d40v2% +if exist "%~dp0assets\pretrained_v2\%d40v2%" ( + echo %d40v2% in .\assets\pretrained_v2 checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0assets\pretrained_v2 -o %d40v2% + if exist "%~dp0assets\pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %d48% +if exist "%~dp0assets\pretrained\%d48%" ( + echo %d48% in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0assets\pretrained -o %d48% + if exist "%~dp0assets\pretrained\%d48%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %g32% +if exist "%~dp0assets\pretrained\%g32%" ( + echo %g32% in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0assets\pretrained -o %g32% + if exist "%~dp0assets\pretrained\%g32%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %g40% +if exist "%~dp0assets\pretrained\%g40%" ( + echo %g40% in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0assets\pretrained -o %g40% + if exist "%~dp0assets\pretrained\%g40%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %g40v2% +if exist "%~dp0assets\pretrained_v2\%g40v2%" ( + echo %g40v2% in .\assets\pretrained_v2 checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0assets\pretrained_v2 -o %g40v2% + if exist "%~dp0assets\pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %g48% +if exist "%~dp0assets\pretrained\%g48%" ( + echo %g48% in .\assets\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0assets\pretrained -o %g48% + if exist "%~dp0assets\pretrained\%g48%" (echo download successful.) else (echo please try again! + echo=) + ) + +echo checking %hp2_all% +if exist "%~dp0assets\uvr5_weights\%hp2_all%" ( + echo %hp2_all% in .\assets\uvr5_weights checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0assets\uvr5_weights -o %hp2_all% + if exist "%~dp0assets\uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %hp3_all% +if exist "%~dp0assets\uvr5_weights\%hp3_all%" ( + echo %hp3_all% in .\assets\uvr5_weights checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0assets\uvr5_weights -o %hp3_all% + if exist "%~dp0assets\uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %hp5_only% +if exist "%~dp0assets\uvr5_weights\%hp5_only%" ( + echo %hp5_only% in .\assets\uvr5_weights checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0assets\uvr5_weights -o %hp5_only% + if exist "%~dp0assets\uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %VR_DeEchoAggressive% +if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" ( + echo %VR_DeEchoAggressive% in .\assets\uvr5_weights checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0assets\uvr5_weights -o %VR_DeEchoAggressive% + if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %VR_DeEchoDeReverb% +if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" ( + echo %VR_DeEchoDeReverb% in .\assets\uvr5_weights checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0assets\uvr5_weights -o %VR_DeEchoDeReverb% + if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %VR_DeEchoNormal% +if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" ( + echo %VR_DeEchoNormal% in .\assets\uvr5_weights checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0assets\uvr5_weights -o %VR_DeEchoNormal% + if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again! + echo=) + ) +echo checking %onnx_dereverb% +if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" ( + echo %onnx_dereverb% in .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb% + if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again! + echo=) + ) + +echo checking %hb% +if exist "%~dp0assets\hubert\%hb%" ( + echo %hb% in .\assets\hubert\pretrained checked. + echo= + ) else ( + echo failed. starting download from huggingface. + %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0assets\hubert\ -o %hb% + if exist "%~dp0assets\hubert\%hb%" (echo download successful.) else (echo please try again! + echo=) + ) + +echo required files check finished. +echo envfiles check complete. +pause +:end +del flag.txt diff --git a/lib/tools/dlmodels.sh b/lib/tools/dlmodels.sh new file mode 100644 index 0000000000000000000000000000000000000000..5fba0edef345c0a4384aa9402cfd5e93e29efdc3 --- /dev/null +++ b/lib/tools/dlmodels.sh @@ -0,0 +1,566 @@ +#!/bin/bash + +echo working dir is $(pwd) +echo downloading requirement aria2 check. + +if command -v aria2c &> /dev/null +then + echo "aria2c command found" +else + echo failed. please install aria2 + sleep 5 + exit 1 +fi + +d32="f0D32k.pth" +d40="f0D40k.pth" +d48="f0D48k.pth" +g32="f0G32k.pth" +g40="f0G40k.pth" +g48="f0G48k.pth" + +d40v2="f0D40k.pth" +g40v2="f0G40k.pth" + +dld32="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth" +dld40="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth" +dld48="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth" +dlg32="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth" +dlg40="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth" +dlg48="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth" + +dld40v2="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth" +dlg40v2="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth" + +hp2_all="HP2_all_vocals.pth" +hp3_all="HP3_all_vocals.pth" +hp5_only="HP5_only_main_vocal.pth" +VR_DeEchoAggressive="VR-DeEchoAggressive.pth" +VR_DeEchoDeReverb="VR-DeEchoDeReverb.pth" +VR_DeEchoNormal="VR-DeEchoNormal.pth" +onnx_dereverb="vocals.onnx" +rmvpe="rmvpe.pt" + +dlhp2_all="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth" +dlhp3_all="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth" +dlhp5_only="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth" +dlVR_DeEchoAggressive="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth" +dlVR_DeEchoDeReverb="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth" +dlVR_DeEchoNormal="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth" +dlonnx_dereverb="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx" +dlrmvpe="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt" + +hb="hubert_base.pt" + +dlhb="https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt" + +echo dir check start. + +if [ -d "./assets/pretrained" ]; then + echo dir ./assets/pretrained checked. +else + echo failed. generating dir ./assets/pretrained. + mkdir pretrained +fi + +if [ -d "./assets/pretrained_v2" ]; then + echo dir ./assets/pretrained_v2 checked. +else + echo failed. generating dir ./assets/pretrained_v2. + mkdir pretrained_v2 +fi + +if [ -d "./assets/uvr5_weights" ]; then + echo dir ./assets/uvr5_weights checked. +else + echo failed. generating dir ./assets/uvr5_weights. + mkdir uvr5_weights +fi + +if [ -d "./assets/uvr5_weights/onnx_dereverb_By_FoxJoy" ]; then + echo dir ./assets/uvr5_weights/onnx_dereverb_By_FoxJoy checked. +else + echo failed. generating dir ./assets/uvr5_weights/onnx_dereverb_By_FoxJoy. + mkdir uvr5_weights/onnx_dereverb_By_FoxJoy +fi + +echo dir check finished. + +echo required files check start. + +echo checking D32k.pth +if [ -f "./assets/pretrained/D32k.pth" ]; then + echo D32k.pth in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d ./assets/pretrained -o D32k.pth + if [ -f "./assets/pretrained/D32k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking D40k.pth +if [ -f "./assets/pretrained/D40k.pth" ]; then + echo D40k.pth in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d ./assets/pretrained -o D40k.pth + if [ -f "./assets/pretrained/D40k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking D40k.pth +if [ -f "./assets/pretrained_v2/D40k.pth" ]; then + echo D40k.pth in ./assets/pretrained_v2 checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d ./assets/pretrained_v2 -o D40k.pth + if [ -f "./assets/pretrained_v2/D40k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking D48k.pth +if [ -f "./assets/pretrained/D48k.pth" ]; then + echo D48k.pth in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d ./assets/pretrained -o D48k.pth + if [ -f "./assets/pretrained/D48k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking G32k.pth +if [ -f "./assets/pretrained/G32k.pth" ]; then + echo G32k.pth in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d ./assets/pretrained -o G32k.pth + if [ -f "./assets/pretrained/G32k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking G40k.pth +if [ -f "./assets/pretrained/G40k.pth" ]; then + echo G40k.pth in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d ./assets/pretrained -o G40k.pth + if [ -f "./assets/pretrained/G40k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking G40k.pth +if [ -f "./assets/pretrained_v2/G40k.pth" ]; then + echo G40k.pth in ./assets/pretrained_v2 checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d ./assets/pretrained_v2 -o G40k.pth + if [ -f "./assets/pretrained_v2/G40k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking G48k.pth +if [ -f "./assets/pretrained/G48k.pth" ]; then + echo G48k.pth in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co./lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d ./assets/pretrained -o G48k.pth + if [ -f "./assets/pretrained/G48k.pth" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $d32 +if [ -f "./assets/pretrained/$d32" ]; then + echo $d32 in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld32 -d ./assets/pretrained -o $d32 + if [ -f "./assets/pretrained/$d32" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $d40 +if [ -f "./assets/pretrained/$d40" ]; then + echo $d40 in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld40 -d ./assets/pretrained -o $d40 + if [ -f "./assets/pretrained/$d40" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $d40v2 +if [ -f "./assets/pretrained_v2/$d40v2" ]; then + echo $d40v2 in ./assets/pretrained_v2 checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld40v2 -d ./assets/pretrained_v2 -o $d40v2 + if [ -f "./assets/pretrained_v2/$d40v2" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $d48 +if [ -f "./assets/pretrained/$d48" ]; then + echo $d48 in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld48 -d ./assets/pretrained -o $d48 + if [ -f "./assets/pretrained/$d48" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $g32 +if [ -f "./assets/pretrained/$g32" ]; then + echo $g32 in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg32 -d ./assets/pretrained -o $g32 + if [ -f "./assets/pretrained/$g32" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $g40 +if [ -f "./assets/pretrained/$g40" ]; then + echo $g40 in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg40 -d ./assets/pretrained -o $g40 + if [ -f "./assets/pretrained/$g40" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $g40v2 +if [ -f "./assets/pretrained_v2/$g40v2" ]; then + echo $g40v2 in ./assets/pretrained_v2 checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg40v2 -d ./assets/pretrained_v2 -o $g40v2 + if [ -f "./assets/pretrained_v2/$g40v2" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $g48 +if [ -f "./assets/pretrained/$g48" ]; then + echo $g48 in ./assets/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg48 -d ./assets/pretrained -o $g48 + if [ -f "./assets/pretrained/$g48" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $hp2_all +if [ -f "./assets/uvr5_weights/$hp2_all" ]; then + echo $hp2_all in ./assets/uvr5_weights checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp2_all -d ./assets/uvr5_weights -o $hp2_all + if [ -f "./assets/uvr5_weights/$hp2_all" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $hp3_all +if [ -f "./assets/uvr5_weights/$hp3_all" ]; then + echo $hp3_all in ./assets/uvr5_weights checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp3_all -d ./assets/uvr5_weights -o $hp3_all + if [ -f "./assets/uvr5_weights/$hp3_all" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $hp5_only +if [ -f "./assets/uvr5_weights/$hp5_only" ]; then + echo $hp5_only in ./assets/uvr5_weights checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp5_only -d ./assets/uvr5_weights -o $hp5_only + if [ -f "./assets/uvr5_weights/$hp5_only" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $VR_DeEchoAggressive +if [ -f "./assets/uvr5_weights/$VR_DeEchoAggressive" ]; then + echo $VR_DeEchoAggressive in ./assets/uvr5_weights checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoAggressive -d ./assets/uvr5_weights -o $VR_DeEchoAggressive + if [ -f "./assets/uvr5_weights/$VR_DeEchoAggressive" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $VR_DeEchoDeReverb +if [ -f "./assets/uvr5_weights/$VR_DeEchoDeReverb" ]; then + echo $VR_DeEchoDeReverb in ./assets/uvr5_weights checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoDeReverb -d ./assets/uvr5_weights -o $VR_DeEchoDeReverb + if [ -f "./assets/uvr5_weights/$VR_DeEchoDeReverb" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $VR_DeEchoNormal +if [ -f "./assets/uvr5_weights/$VR_DeEchoNormal" ]; then + echo $VR_DeEchoNormal in ./assets/uvr5_weights checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoNormal -d ./assets/uvr5_weights -o $VR_DeEchoNormal + if [ -f "./assets/uvr5_weights/$VR_DeEchoNormal" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $onnx_dereverb +if [ -f "./assets/uvr5_weights/onnx_dereverb_By_FoxJoy/$onnx_dereverb" ]; then + echo $onnx_dereverb in ./assets/uvr5_weights/onnx_dereverb_By_FoxJoy checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlonnx_dereverb -d ./assets/uvr5_weights/onnx_dereverb_By_FoxJoy -o $onnx_dereverb + if [ -f "./assets/uvr5_weights/onnx_dereverb_By_FoxJoy/$onnx_dereverb" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $rmvpe +if [ -f "./assets/rmvpe/$rmvpe" ]; then + echo $rmvpe in ./assets/rmvpe checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlrmvpe -d ./assets/rmvpe -o $rmvpe + if [ -f "./assets/rmvpe/$rmvpe" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo checking $hb +if [ -f "./assets/hubert/$hb" ]; then + echo $hb in ./assets/hubert/pretrained checked. +else + echo failed. starting download from huggingface. + if command -v aria2c &> /dev/null; then + aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhb -d ./assets/hubert/ -o $hb + if [ -f "./assets/hubert/$hb" ]; then + echo download successful. + else + echo please try again! + exit 1 + fi + else + echo aria2c command not found. Please install aria2c and try again. + exit 1 + fi +fi + +echo required files check finished. diff --git a/lib/tools/get-pip.py b/lib/tools/get-pip.py new file mode 100644 index 0000000000000000000000000000000000000000..ca93fe3cee809982109cfbcefb1400956b1f4c69 --- /dev/null +++ b/lib/tools/get-pip.py @@ -0,0 +1,32635 @@ +import sys + +this_python = sys.version_info[:2] +min_version = (3, 7) +if this_python < min_version: + message_parts = [ + "This script does not work on Python {}.{}".format(*this_python), + "The minimum supported Python version is {}.{}.".format(*min_version), + "Please use https://bootstrap.pypa.io/pip/{}.{}/get-pip.py instead.".format(*this_python), + ] + print("ERROR: " + " ".join(message_parts)) + sys.exit(1) + + +import os.path +import pkgutil +import shutil +import tempfile +import argparse +import importlib +from base64 import b85decode + + +def include_setuptools(args): + """ + Install setuptools only if absent and not excluded. + """ + cli = not args.no_setuptools + env = not os.environ.get("PIP_NO_SETUPTOOLS") + absent = not importlib.util.find_spec("setuptools") + return cli and env and absent + + +def include_wheel(args): + """ + Install wheel only if absent and not excluded. + """ + cli = not args.no_wheel + env = not os.environ.get("PIP_NO_WHEEL") + absent = not importlib.util.find_spec("wheel") + return cli and env and absent + + +def determine_pip_install_arguments(): + pre_parser = argparse.ArgumentParser() + pre_parser.add_argument("--no-setuptools", action="store_true") + pre_parser.add_argument("--no-wheel", action="store_true") + pre, args = pre_parser.parse_known_args() + + args.append("pip") + + if include_setuptools(pre): + args.append("setuptools") + + if include_wheel(pre): + args.append("wheel") + + return ["install", "--upgrade", "--force-reinstall"] + args + + +def monkeypatch_for_cert(tmpdir): + """Patches `pip install` to provide default certificate with the lowest priority. + + This ensures that the bundled certificates are used unless the user specifies a + custom cert via any of pip's option passing mechanisms (config, env-var, CLI). + + A monkeypatch is the easiest way to achieve this, without messing too much with + the rest of pip's internals. + """ + from pip._internal.commands.install import InstallCommand + + # We want to be using the internal certificates. + cert_path = os.path.join(tmpdir, "cacert.pem") + with open(cert_path, "wb") as cert: + cert.write(pkgutil.get_data("pip._vendor.certifi", "cacert.pem")) + + install_parse_args = InstallCommand.parse_args + + def cert_parse_args(self, args): + if not self.parser.get_default_values().cert: + # There are no user provided cert -- force use of bundled cert + self.parser.defaults["cert"] = cert_path # calculated above + return install_parse_args(self, args) + + InstallCommand.parse_args = cert_parse_args + + +def bootstrap(tmpdir): + monkeypatch_for_cert(tmpdir) + + # Execute the included pip and use it to install the latest pip and + # setuptools from PyPI + from pip._internal.cli.main import main as pip_entry_point + args = determine_pip_install_arguments() + sys.exit(pip_entry_point(args)) + + +def main(): + tmpdir = None + try: + # Create a temporary working directory + tmpdir = tempfile.mkdtemp() + + # Unpack the zipfile into the temporary directory + pip_zip = os.path.join(tmpdir, "pip.zip") + with open(pip_zip, "wb") as fp: + fp.write(b85decode(DATA.replace(b"\n", b""))) + + # Add the zipfile to sys.path so that we can import it + sys.path.insert(0, pip_zip) + + # Run the bootstrap + bootstrap(tmpdir=tmpdir) + finally: + # Clean up our temporary working directory + if tmpdir: + shutil.rmtree(tmpdir, ignore_errors=True) + + +DATA = b""" +P)h>@6aWAK2ml*O_EvJ33*7hs003nH000jF003}la4%n9X>MtBUtcb8c|B0UO2j}6z0X&KUUXrd;wrc +n6ubz6s0VM$QfAw<4YV^ulDhQoop$MlK*;0ehK(> +3ZJA0oQV`^+*aO7_tw^Cd$4zs{Pl#j>6{|X*AaQ6!2wJ?w>%d+2&1X4Rc!^r6h-hMtH_d5{IF3D`nKTt~p1QY-O00;mZO7>Q7_pjHy0RRA2 +0{{RI0001RX>c!JUu|J&ZeL$6aCu!)OK;mS48HqU5b43r;JP^vOMxACEp{6QLy+m1h%E`C9MAjpBNe- +8r;{H19{ebpf{zJ27j)n8%0=-6Z#elILRo@w9oRWWbO{z8ujDS!QAC@3T%nJCf;1rX6ghzu#Z}R@K&*?Hgj1WFD91+adaM4G`4Xs@*hA^t@nbDYdL)-aOjsW~3}QVVby(8=@7U$ +Fzj5Y{w!2hUUH`?e9j7WDA;>-1aos>7j{2$~BfyL8p@__Y98dsP#Bs7^lWF +=e_gr;(4^?am?Cp93+7b-!?~nb}-$cPSR1zckA*zNp!)$;YjlZrfn&RWNM}=QA7*cb8A{(9@{5!vBfq +rEMoeu5FvJZngI@N#4#(2v$WnMGCAVD?b9t8W^qDfcFBe5ZZF%dPAPaq#>ikclG~yPvCg`JUGb_W2#PdCXxx}7!|T*xc9qdnTILbO-nAJaF2 +~0snMFDU<%E01X4*yW9@|}F2;vY~;0|XQR000O88%p+8eg`F$&;kGeqy+!~6#xJLaA|NaUte%(a4 +m9mZf<3AUtcb8d3{vDZrd;nz56RT_b?l9y`mj3AXtV0MT+&(WJ!7$x|Xq_^eh*q` +qYNbl$TgcX!{RW4b=Vw*pI`moV*K|DJ2bY*KQViviHGglIK{X_)>pN=IEr427|<0g`vfCSX-CrF6hnx- +fU6^LzLVM{GttvQ!RX(K-@qvQ<9nZh3{TwCd*xxj~wep|+d4YrpRGd3uJ(;$x#MJ^wO(dX9-I(W~SOL +|!j@ev4#PBd+t2O-3Y4TDlA%@&y9h}l?d7(gvc*a&O+atWdOv5| +XtFg8N1I1Eg2~6T^Prn{|GZSIw2~Ql9c?>!a3=lwO6eT!TZzV{RAoH`=gPAEk0OKF^-L_LxAV)%Ld>V +rC7Ea!84dqJ@cSb~%=6Dm=^V^deci#%k)qhs`k`mikNs;GRv|TRB1+w&XWHK8?pSmvO+Mn5HP0Rg& +0e2!{O&s!2A%Oz`W5|6)QOoeMptG0vVbf-p%MA<(l*rGUrRG$G|nf0000U0RR9D +0001RX>c!ac`kH$aAjmAj&U%1)EvFVxRjSzi=C>J@cM +k87yJyz4~-Qcqlg}hXv}1CF`fEox?~SG{pae%Dy$pBG>tnWs3{>FohpTZSG@fe-hAmws@4PFv7Mv`H@ +JnAXTbgKqwrl)IWaYE>+%OsO9KQH0000802@m7R+hDJp-}+<06hW#02u%P0B~t=FJEbHbY*gGVQep7U +ukY>bYEXCaCvo+F;B!W42Adn3hP*|5~K?fa1xA6Cs^1JI)&D4jnX98E~x(=w{RdN$P(+xdH(X;aUMbE +La7HDOJ;>ViJroJQOYSq=f31Z#UCgsvZ;PjisC7~V50}YW@1zhN!C_4fs|i^>lX7r-W?|$V(y(g0ZOD +x-5bTWf^iasXM`rih^?Sk#%z{jZl{Ri-7?Gn9_p +NH(fR_VZQx#ZustU5xCHVj%1=)fT*F;XSi#wiQR~iuoy}(RFp&L9pfC#Zn^7Axz>2yIKB7|@~y3-1&J5eC&FySna4hw0fjd92G^LTzc+Br>7Y7w1=({ +s_3<|LwzLQl3jT^=CKlyadUgD7M{+)3>-rRJjjOO9KQH0000802@m7Rtz49V_OUW00Srh02%-Q0B~t= +FJEbHbY*gGVQepAb!lv5UuAA~E^v9(T1#`|xDmeVS714ZB@>eSIHgphB=gYhxH9p$W;~m0sZ?Bwghq@ +hk_(WwwJ!hnbT%XT~X$2UELOWbx^D5} +p)=7nt84rjpQ!t=bvqBu6SXjxf*{)}V#v6kjnleUMl*qKLJw7ma)>Zw|O-`#U0v?-c6x#d+}i#X$=E%t?3;qCzPO{p3XDn-l0g8$MLf}@FhxjzhJPffk$LZTb= +tRL0mAd`8KB>SS|Ny1Wz!%10ZP4l!(Z9X~Qr(M}5e1M{2V-3vl>e`}|vFvt@s535m*|M}OlVSM$)RrHcBrimd6?lFPUdh +^8oI-}L;caqLRJjDa?_Dr07Ysf#%z>QWYbSDW3_SKrT&dAFG`Lt`@W9KJiJ}-Jm +Eim0UQMILLA#<&5?}IiA5v%!>tEItSETqsiWmt%EBta_NRXj{H*Zo{ba+L0f#Cr>zURR@B*qHa1TLRl +QIY3XdTuN;Q8cY|sQ{2jC4o$vPgD13HO~sl#?~l?=&A}cMFP(CNl(yMsR`-t2i}7DFz8rYgPveC_)gi +?sXaiv@_U|jtx7a74!l@<;4JHe05F%Q2)SdHLgXxn>Gh!i1WT2K^_-Mqe1LMOvU4R{fH+QfQ%eQYa2d ++e#MFwQ*oaQwvhGC2wTnRW_zJ##J9Pw*x1bE%az6lfqS#7Kz)e-Rnn7GhG_W5G{(q)4xvM*rJ>eb1rMlGrLDy?OC^}{4osLlt4f7K8F}Z|`B#E1o*9RQ|@+2V@Bv`<7P)h{}C>a!R4k{Eil{;q0l?#-&mQ~4}M0|c2#OI;L{3Tudz_N!_rY+hTGzghD(#5kNVHprZaZYt##W$uR8%mb^&)N6ivKk8Fogh +BMr8%*?0wS)XN@6p#nApa&Y-w9Ew#Zu@h&NSzS79U`3ykgq8X+X_$OD4A`np9UQ(OZ&?G>pd7)u100h +6&Ehk$^P1yyq9_uwBi4bItZ;{YLK4idID%pU;f7}zm-6NU3Bg;MsQ)C_Xl%pd#APfelK6ZX)4MevarN3gu`&(XOy?+-ilBrvl6uD3dNNZ)`pUd=i?WZkc;yu4_~oaIcdwK6<&R*Ivfg2cB}&44 +buBuR0s5klsH#FHwVF%6r=l3b;v1Sm=o@?fr!Fer2uDL9L&_isoatz297jX@o{A}`XCC6WOfkP0%87K +kvdJZNsFYvO_uCPfDQ#!T$k!x23L!YQl05fIp!Qum#J6eLAwB~uW~Tzhl*@BpO*0iZn3-W@i=nr-W|( +11w{K!f#O~7gF*~{#IxcX?lmI`bj@X}m2N^O|Q*fI&p?b#N0ES?Pkc+y}Zj6*0Av01gLhWTd +nOTbhdhE1BKPS3Fg`Z@s&2l@TrvgBPRJC~P2NN1QqdwS}`bs=5XEmp~mF78qqjMEpthH9w@9Bbi4O^< +&W#%iuEa|8!D0@I9@+SrhW~?;jIcMk1?8@}gUVbulb{fRenF5C)Hq^fLi3aX)oB9={Aw5B1Eya}ud)zI$Kgq)lD +w*#4Ftggw^aT0%+Vu2)HvSC$KTnIVg4EflgOXhv&oh3ZSz1L>6#^bnq(m1-OmeK*vFp=M92_79T`!l} +{9`kKpLiSkSXPgGNTXzCM!m$>YmKH1A`kiHiQ*43y-)7dhX|M$wzbh0!*8wWuO +$+?!aMXV))oSMbZk=4=^~Bzkn$82y9L)OTJZ?WBoqw*oo)B@x+ciJET=1kF<^8cqPG +P!?R*vWNM|ELa#g+A5(FI=e?5HVwlmM86Sq%vDSn84<7Nu4Cy@v^93Go0~&XH@{(?4R;W-+{wnaSX4h +dBLaWBKHJuX_r9tZX^)!C5M>y}CCk2Bg3Q180j_`3MbCnUAON=wR_Mw>=B(2!qdobEOu2v5=yT@shGM +~r3jQ4Lc*S5nc8W7-2G(!r^M~cFZ6m}#W&xX`N#98l}tUwm`Ct`*ss)D%~d2{jaf3BD8RZT}pLU*I=( +}#C|8y|~WONGYELk8FDK9$3V(nS{-09xll!z%G^#&nYYK%@=^l2j(@kWt-j^soOk{KTUk>+MW2)n^^6 +(IL-fyvERX*?qG*p`hD|5y$?@0$n)X&O2H;^6Ex)SV+1jP-txm+iOw9lzzJAF$`cMda)C%T +GVJkVYGtLqIROwK@kZ{Ay>IU@jDOX%0SdYm|x;oqbm2$vloyp_(hz1t7I43OljOG#o7wOvTf?rAeARa +|#tj9{cl%YbU1ah!CbL`!H33}dzr +htU}qX&Y?3r~m~9(#^Nq?X+Q|_9G!Gbecu}-EupvSfdppnjX=t2xj3q;=s^aZd#RHI3bE@&IndzQQe? +i1`zO-;MmiOM@SbDofi_1tz~EAd#Gh=@ohyX!HEeD{|0MK8X+k#$FHr^$7_}l_w`+ZnbT?no-_f_d2^ +gF__@%r^IIH%GBQ!NI72pmqqVd1vE@1Pr*4|@_{B@EF0PV~*Do$#zj*ila-FfeFvbZm^^FGfkZ#J7flbgK~uVgNF>Y#FaF`LaUF7%-+Dl7KV>@&S?9zU2OZ+>URZm +08I^H`XRZB-mZDJ|^~e)wBFx(RzKvAh|7nx7WpE4{G`@lqRnzbUOQa+zItGP;bDTa~9p6_;}JQPNqll +{?c=cqexYp>wOMvQqd?a(Phwky}+65WS0HZFSa?+{nDh^+sm;N5$kqW|%M-jMb-&VrJWYFY;ULN#F04 +%D&c_;;kb)4@Ign6Q{aT8=KTs))4rLN4~GJJ9cF{|Jba5iQjiDJrX0$TIOnOF^e8sbtn^X)T$NFj-8@ +{iD(+L$w!^1W||6QX|+Kfkl2FcySN}PQI%LV?h@~meaT}{!YWRZ`NhSX?_PZK;&t-(w{Ko2ub;lU!un +ZJX>5qe<=~GOsoIK!+!4%fY?Ln9d#;VG76M;4bMf#m^kaD;@PQA1r)*v2LSj&^GbPMkK6><66k7}t3G +%k;6qC2p4udo4tT?R?rHN8dg)qrSbuz1WRSnNFs+5(4TFfe%EoKWbTh8VSp>k7KDv@TRHLsjAy~-W$1 +1NTW?#JpWLXRdK6RW#F?EzNxpE#>lp) +L@KQmY%OvdbHSvR#Q(wVAd@e}J8Z3r!je`je)Ck^oa=V6;$d%XlO!@K+b%*1V2d^Xy2w4ltjxNEf#-3 +%Z{ALUeFZ1U3)_(q;J7d`IZmt%WR2RXZX+EXcUxBd?R0*?FT5;q^X!cf+#1h3DP+kJ#Eet+Auqb=xQF +Q9DDq=$BF)ebs7G3HsErkC)iV2`(78&*QQLjTPOCZkd?wy2ag;d-6k?}x1rJg}=7ORhL$$#ZPN^$zNj +Tg>9AVKS|J*h^19BgTg-Si7jbyU#zk3OeHjX#w6z)PBwmsR4&u*u~na&oyv#6o7)N(dOcowdnwS-4$gvNc5Z?Za7Vbu|?4jtqNxFtz=&^dnjT11v;4IL1ID{P8VIaktI_HeD +ph^a6s9Mm}XTh}^EIel%nssby5GkriNRV6AM)!D*Xygb1)d3!pB5HAL~sf^1LseG+ybyersu{iS!=M* +kuF}3F4jbb^91EN5$b*Ak}P;HI_0()yqv%I|AL85vcWASBqD&-~0$E7x=R_5}LkN*5*=wa8h^Qz7UIU +fvi%EVSL^kBCir+nM7d*!60Km<7sPqu|i+!T_ZXPIzO$7*Xs4&En>KIlwV0X?HObwzqXrbaTfl$l{}e1zjN>Bi_ss8)0_r(d{QhFWq$@a3>XM?j}$soOk6IBI +&u#!le>AE~2k_cxYGN!FNPI=l!F}8{4xeyA%X@jh$HDXDyZ`<-I6shZfA>e12}YZck^uA^W39p#_)p2 +?1tXA~?YD!vm?bE50NhSCj^BGD}h;>RuQ2#i7i&_fqLqRTWi}nLKk*4+C{cI`FT+ETMNbO%(&2ZV})a +A$64|l(d)5`Or`KCEg)Hd?>D=q(YqtM3teKWMm{m^@+;W!hw$?$yfP(7znro3iP!k!L^00BCW;vx+q +NX+TzEJ+U|VJoPE$|UH24jzygsZkM7^8isYA9!ZY7WyC6|sYS73jlUte`zO3Aa%^$)d*#Z|nEMSSVQD +}>o;@grJ5^0LOK=tx&yb%(S`XAOR9&=~f75q}ZKF-<~5a7#-pFZMQ;&TtGsIz)hE2gRavlZHtiLkZvY +{ZwSV!k~w`La8T+vQ`*YDPlDZwC{UD=Zn-1a&Y^0ko|)NIqvml?~u{qy;lD}oo_g+kf^F0msufq*K--n%A)TfOG-cds<|N9Wt{C3jrdGQS^DtTw`*s)69PYV+`VKuz?ioy +k*jhrh$Xa2Iy{pQ=Fyl3ea31xSnpRs%F+~5Fr=M|@FUGJj7EPZA#cIXKycEe0A9=P<3{}8VzrnB+QX`x)Je2|u-Qb7SFaxVx`#8w*J1 +?3mE?gP}g#VDb`lB4rk&&9(}DF1N9{N5@jA)J!i*ogX3G6MTP6My3*$(1bVg6Qn +!M<@5_Si=GPt~X7Kslz*{=cXBHm-h3x${9v=mfe)LP!=9;k=?-iw;ej0gk3T6$LlFQ7_SU{0H>s^A5X +RmB11KbUVj#A&NVnbCr5iDb9=9evaaM%=xCes3hP=|4Q^Ujqt`09VfB&Qhe3F5AVAI-HESy7v%BPx6i +L$CGVeKUA;`6J$?54s&BwuR;+;#PRW5^x1{c7xm^-ibAT-SjIplI?u)PTHiPyg{W)cffQhkmQ6|=OrXGd}tA1a}6Ww-9cMttV?p{AAlI_b6x~JiL+-AL7nso0=65n;wTcxPwCEg;}^VA%A;aqSj&R;3?n +$c0ZvFlx;`;?_X60p-s5Yv@=La!0Vlg7lsaAl+m^~pwj*M_>Vn$>pu@K3X0Djh9+SIsLaDjd@l=_sol +#DA(LX;(&l94T@u&$%E?2W2XHVCECWWn9$hLc>O!&WNLBrg8u29`Jo7%R#nbcs~q;;hx;bqPP3bll?F3jeP|ga5#Al)jQ3x_KVI8q`TLdzllbIEagHuxU>s~JyP!>yR +(CO7kVK@`0wzoO$a#7#)7?Z2S^$Vg-rb~G}mtJs_M#JrkmB3!{49w2^?@W>4O@Xw-K*(-ps(;%|1m^9fopm0d{)eQjvRPV +r~Jt6SOhBZZ{nvUJ`3Uh3yHs$nK=`gHg#{t|7t9X)aIbjC#7x3p*|N?nxw$cSpgj43U}16A`8*Z>wH@ +*JIFIqUwr8gv07It?g!0&E*`rRiiI@qfLgyEvU*n3S<0rR{Oc{i2pONRc`?tYpf4{_vWiNOMZXYsa^e0Yj~!AIh~K}y6eSo2|#uEhcCW-@|c1Fd2C0wmF +F~CKcT-qpqrVVbJF0DTl(C`SoE2_aTcP#tr!~U0bTI+ZeW_@dBeEnnCt@+J2Z)Znf|DN9VPual~~t1! +M7Ri90X)lJFnw6b_0|XQR000O88%p+8N-^Lqfe-)y4>kY*9smFUaA|NaUukZ1WpZv|Y%gPPZf0p`b#h^JX>V> +WaCyxe{cqbg_ILjkoQk3{=BVp#INS{Q?y|-01xu44>Bk1c&=iY~d66Z7lVj+@>r)(#x6-zXbBX-C4u;?6q0EG38$n6SIy;6Y0g76B>mk4(a3 +Az+XULh7tiTpO>Y*)yXrCcqf05G>~x8f2|UvYz)r4dd%BIH<^2+07sE1_*#v`w|Z}kB{^Hh@FT3LE8#LtQ(<>_cJ>^o;uiL5>%Da%wyb#Pq-!YY +%>F8_Rbesb~o`tWj4om+=Dx4b%oC&1f-JJv!i>~fx~jpQ+4G=lG&^@=O1BqBEPBo?(_vlr}o1& +~%ro(_Hyc?uhh5W)a|2P38`IUEdrzBqq-`Y!(I_n<_B4A=^31vJ}T))9{gTeItQ;h4c< +I{KN7gy60+_>dZfeZk4u;N(>+Vz5c0DZiJ0~ITlzG5oWRnXW(@@Sx!Oo&=7?vK~gt4Xi{Y5*S4^AYK~ +F8M+%#e!D6JG=Pl_-qo~X2ngC=~dTzRq-|ZEK*Kuu1`NqCxH?b*Y9Vagse76HfPg(D`b(A?R#K>v`N7 +8t=>TLx;(v%4Wr(ko=xqt_|x$fEd~3M&T<#@Cp26z1qDiY@o9Q>b$T+5FRo6eS3oUM9cem7<`>d!za# +ecJDlfy#iIwGj?Yd{;0`oJ +v79`GqA_1rBFZ!PA1}gjxKvt$7(AEQnl1w&d1YE7$DmB>n=^9_R|c&Tw} +!J2(Po}*xkJlnHU@+B}XE5NdWABr|e2plrk{(YdSPlbX2z}F!67#PzcAARBse$2-fogfN;l@*2+T3RE +*(apucRs~@SFbeB8#J*qno}~p>v>CWpB>*8UFqna3py*>G3OE9kQN#it#3h%jq*QEQY}gJW3~T|pqR? +MyyNd1~UIAhtmL&a0vw0XTQN&fKBb0qC69HSht~&H68MYZ0sWKB)2z(f^H$%fl(9YQN7%>IgkeG;pW` +>?@)bP_VRO4;7>OH`^S&d_%B5>ua=--9NL;N;kEiX7^KpewYC=wGJBJ?5_Dn1C&9~zyS59l9v2_6jRd +Z$6?j8KWhm+qMaAOpQ$>>mTsA%lM@LAdBB!{EQca8xfK^tw(w!pF_378?46MkeRG$1t&c!J{3%7`7ZB +&Unz|dO=v%<>;tQnA!ROdbh~HaDDx)UDdyU&8SOG(%6n^kye9CS!^K +!DX=<5VSWi-2<<9aEWiRVF+h7HK={K?*i1@EkUp%0Vj3w}1Oc{E;Ds9K$Cszzogp!)z>g{x*t`*wJ~X +TVyv{z;u@IgqI#*SN)E>F11%XGcnE~3vNrGo!voxs^O+NrbWEBK4v49N32w77?UK}zsQ +N=Oj;@NTptBVXdGzP>ANMJL_En|!d<2tJ)e>BHbtH?QdDg4rSbG0ccEY*;QgZdqqX$=uELywNT3G?QS +i4v{IKlXh3K_Bd`B6{BuI8XwS8dX5KHOCz>wZd$gK_Y<2fWF^91lIL;;1NuSwAw$clQM(|3^{BI-Qlp +a(|^+ZLf%J~^}t#C)nCvcJZX?`c>99=#1{$1v>hfYykwP37I#REE}G!+EpVb%5Gs$n6Jnict8pjrP$7 +fiXT(}r_NxOAb7HmmjWYGK!+O_43lXj19v<|SFn}8Du|w}4zVgs@kwSjV(}oRC(vBf_-d +Gcgg)FJZF2L-tQd4c~#az0_pvfehRM2LC4Z5TQZVU;BuA$|WAvucW+m8doIVA?JDQmGdJwf1cRm41n? +4_oz_6JRpXUM#w=%-yTg;a94D{;HxsscYJ3Ms06(_S!iyA)z#DXX^LJ01h<@0{y24y;z`(guDf`b7O0 +5r)VUG)7X%eansLNUgbn#A?|ixug#Ja5(#J-lVEjwzE@+6ko{Q^g=@f8x}$cU#o_1wr$|*6=`%Y5B=X%BL*$3H4hLr%2n>|3BEEAGA3F8a!tYyGJ90%X5}*^eSVmxdN8Ka+(NEfy7Caq3Q>G0)GuYLUBor4ta(uy>;)d7?$Uy-KM#l@>EZ*?@ +80tMA#jAP4hVasWjaQI_q)0GWH`0(`;rz<^v{s7mZj+!t&%N|Yp>wu7rkzye!E1tBUakx(ql&N5*3@HZ7X%?BZeQdI7!pa;z7?k5PRs+W904N?}q4 +^&+9oE8L+@MirpRSMBc@KvyHQ%ClE*VHu)jt>sPpsxc>S2_VupjPfk?9^WLfEo)g`Ex7~gZ)l;jDYVfg&0+xP>fjh~cfPx}ui2 +!V|~l2Bj6Yi;ZsDLEzkM{7V@?!j3=&Kq^J<|mC8YX{#}YAfv&2p%VN!#NW9vIv9EQs{)NU@sL^i%YGX +Ex}F+8AvuHuokiAU!dYpzUZ2JlF)Xh;5#`d4fRQ|uM{isvQOc~MVL2vGv4%ZOh$c~b}}mbP!eC1Jh!q +9cEJ$Uc-dX+=nx$dF8u>c@^x`d#)Lq4HwnYfAG5z@aGV>XD;A^@Db&7Hfi&XiZ=kTQ8ET04MC}8Vl?3 +?Vpt-)#f3r&|wxD`U78^LybY@;S9hZI$JfP%a*;52{eegj(QI$k>k*SduZh@rzJuoH;8UYzSPC0ldP- +Ky1q$-8HqYGG6gO{of65g?7Q(B$Q>ta27WH(+kBH{^IjjTb+g;)yS@` +8-PR-d2YSvd-H`fs4LNWcA>cAw_n}|}2(dxU2QX|45ya9f|B*qXZhvS#*lOFSw+=?QoYwVaG^a~_%Z# +%X*2=B6VP*__f3I!xciq)xUQXJ;qKdReH8%~z0^zG}e$?xpysoar#E5s#h$m`+ +P6e&<56#J*L}p#SdN@ihRQdowu!Qn2^J>!bVs5Fz`KLDhwxt;5Izn@tj&%C{KLELN^TamrU={I +Lw&ZHENEq2bJCNJ_LE5f@ZPMoCkt)4N#BRobzO=@iE7FQ0z2&bR5K^kNTEZW7RGbAU-Rm*p%$ly+t{@ +>DqZ5~yS=o)R?!WaCE%-NCE&@XVhi|P@%oZUgQ{B$`(3ghq5oSIS#w(yIZICp9be&4p=_&bx8iw|F6a +xkWNS@RjhZ_KLd^kZ)6JTDkc$}F-I&l^^kN2V6sPU{aa0TPr$s^ZPRx@Cida3g012Oc8@`3e{hYkEu6 +2^SYOydTtrL5b*pIn@xBP_T-#xuV&4p{u*{Z5aHGcj27E-9q4GvqqF85{fx)kMW$jvugz5e0Hzy9SpN +pRl;Sbt6obg3erECVw+q3Wx!`xbQ%Y_UqfmqGAW&egg5A^uD>mwNKx9`1O${#3UAd^Y{<$FFA-9X_GI +hfmsk{{_ich#&FQsCgj-NF$bHQorYpJzOXm*d0 +t7_|AR#Qxl#sEn6yc?c)%r;4M&kZ$;+##_^UyeYQOVKoz?G^zz@+XAOu>$w0--q)ZA`c_^Ky;>dU8OJ +*6%^^XJ9f3wJW%YLlWhw^jbBuv?V-=&IW~e7z-HWu5@>`mn`nGrA%(uGcOs5gsR`O<%f3@Rj%A@yH&K +#1V+TK?s04awJ6%eUsf+3-BSOpX#c>PWf2~nc$G7cF8*w_4l&|o0+C&{{@>P@;4$ezZQX;H%R*$?Bn9 +ii(>YiXtF(RSuQ`*YX5;1E)4(VDgZ`}Nmf$68C_55%~|O3!$p{#Cqg_n50fd7x@*iJ?6d=1b!@#H6Xw +QSE!Lm^%|OZM$j)BPz-&QhfobO%{yAHPn790H6MTdUiHu?~83cxUEZTfUv1_;tWSEH#tFaCmMR4CR^F +AABxYpQSK{D^*EPRAJB5G8%|vZzLF+c4iu<|``%jI&fMzEowll%;yl_@8|Q;Q+?5_)?H>9fmQLmTOOy9$#nIu@8J)v<)An2fsb}{-z?A{6XeEfwYd +5AjLOg&%`=%G0>w0|7{of8Kc2HO;MG7e;yU`bQIp4X94oX$eCiid?UwFJ+iX1%To^AESHYjSxQYEN3g +vB)#o4OG16{4P_?l}}+%zal+H)SdoxnW9QUHMc9#|wT9hnc<{O|H2NIyhy}s??lRr1wIq_JWVh;*1@x +o^bu2TW}UnK+$BXADpHmfc4VHZ|ZC@KWSC&tvd@>>6%Lx)m5~{q2jU?)&01MgQD%h31WM-kaw6rN^IR +{d%xrI2WH+5-b8)>+?y!R!+bRJ@tq-_b|vTK(Q0<|X8sjnuzQ`we +0SV0XE9Q~UTGQ2!d+;pu>ObMP43+h!2c{|^KXUzfqXJ|d=Z**K2u|7my#@7n_}-^hnyF!&!(O9KQH00 +00802@m7R>_yM1-u&o0J&TM02=@R0B~t=FJEbHbY*gGVQepDcw=R7bZKvHb1ras-8|cJ<2IJ>{tAS>T +P-CLdoq_)%DYaD)2&Xs(igAoPA27eX^Mm-#uTX`NV_#ERc-Az%+~(K{=@!BzGTk@0Kto7b&{7^SJIM5 +;Nalk;M@Vv^Cll6xhj)9Q=(Eb7UiPItN9|YO0f!~yKpZ3qob=uqQo+ft5k|N?=P>!+jm!@EY_mTMY3G +wMJZ-Qz7%1$E*D8Q7Y_-3irZDP@`EHRWs!yHEi^yMSF#98=?j7h|H%(48I?G4E~Zk03#TxW0r@OUQ!z +_YsSMn5A&*ow)d)hHcm&TXH4+LiPh*kgrHJK9X0gbr`O-h~Jn!g8V;kk!ESayuLdn8;R>}`$noP};G> +^hm*1zU+n49^z3d@Dlwy^EgS{)JU2~4}p^HdocMT;=WMq&;WUQO2{=(Cbx$&JhP3JIrMUj1-B37)^2J +pcCW?KdOwB8ke8I4hBKc`*{N69vED(Yl4{NW9PB%Mk-2lHbG3^TIFWUn{9f<-^*^8jlNpJc2K31uHLm +dM44r2a2dXHESZC^Usm_!s9Chlf|+CU{zY~0JRK@yJ}@1Nfm{wU8CY6SUSLWShQJ_Ajd}n{;2mNRJaP +Jeeeo?*KeCdeT@qerbqSr27CHTS%z^~hNk`3p^`6v(nT_@aHhNZ0hRH8c%VvTRo(rJe;5HQFuf!C%jE +PdnvMMt80E(BlyF +2@K*y&2I9%^=5?HR$Q5{K3!&Nq)c}^#j5i;W6H@#EBvTW)VznM6ZVY4*!TLPK2Tdz{;}oO5*}6fy@}d +AOkf#a*5K@f^i&){9YEbG=Dhpl6jUF(i@7;`r6w;XWoz5h9|W3diqntq5o5!WCKLdSF1EB2fz>cLoiL +l?C#{MsN@h=J4!>P#Cfus6h&SPtVGt+2f1~%kYWmJj*KTHw(?1u>8>x55JpvH5?LVTOAX^R5|o)aicQoBh0jzZfsH~EgqRYcm{wV|5cv#wIf+KtLuMqn0d`rg%@7O^+T~TjC= +j@%R0@Q*^2H>&4AYlY* +QLO;84`b9zay+e)6!`1bH!EeBR!^Qb3_qh?3R^QNgP`U`CDYl+{`$23dIt=8ES_q)ckK~Az^&dxSY&{ +Uck>iBj(KdmIU^etbB%i*+M=|m8*1Vh>45v1_U%pGL@$N|somCTjLQYvYEKyS75(>|D@`fY5E)F0R*Ji9wya +u?s(Fw7GZyy}BMroek$8BS_mI1C+d+b0=QIDPAW{o}E>CQf|!jw!D;jJa=duPwA6A9kQ%)8 +z&oGqAcX?(M>Ft*pvZjQyZ^R#9vc;&Sqc@Atq1to-@m>;SrNrF5}*GNswO}Drc^ +_uhA<}l6E8p~Rja};4lg$BZYwkX!`+t1(aWjOc2hFf)Z2y`$|LO(xu3^s|w3MFMKe6tlctS3ozn0rQZ^sK4P-FYHw3;)9 +0KME&Xge|Jl?=N<_jNvKU7H}g)ZV$Gb~TqkW$+JJ1|lR9emx6UuPXkv8Vo!0JeWv_&(nhZCG(8d;Ihy +p5zH@~WM`ISw>-`T-G?*pHQB%$3uf!-pu=fYR^URB>!{!f`Y0$?ftlzmXYw8A2NO6+NBE?di +I#Iymx?5aX&~7pm^;0tMs$*MP0EU4Y*tN!-TGbU5Dk$fV>h<&$8Ptr>F)LTknDGSIx?}|I;7O(sE#?@U +@K!$&b;Tq=-N$*;Gx)Xq;H_X54SD0HWnuOYTcdZk!Nk9yhyJQx#~VllekSyj1T)iW?h*}!INv3A~%BL +%NWyU(rHsDi!r>q7Ppq-;QFlnCd^@T2DRnu**E|osZ=FxP{B?CgMh$Vz!2dyzXyYhXqBw|7FlWonF!- +uN-54K=i*PFe<^053J_*2%Mj)a>SQIOWR^t4wZ7-zH*~fe5$(5SU}rHtj$w7L-;7}~k{+r&`v>CQ9Rx +#uL%G>y0^b?Twfk?jBzf)do~S8W|))#iXSmx7Yx`DQ5CnhFX8FiynLBgSxmFJy +(Q%(!+6NTKH-Qq(m5pEo2~^3Cm0EqJFYN*RC3%E7DuPemf;%XGI7Q2WI=LFXDG-6Rjp{^A(6uvCJdHI +JMH1Dy_qgWBAm#6&z|!f)8lv*7U5FEKW!2=pAt*A+qOnR8IIpDjNG96R1y;qJIJ`fqyr2T1C@3$#L(| +WmW(1q6(kjYv_n=7UKF@;e!HZGDejE;JQ44aJrFwbgA*$@5wWR^!b8J=L>5uv-_mTGZbnh>M*BGeN<9PzqK{?t(Tz^;>mfK7^HPXwt_amwUCps*Veh05 +k~o+=gVo5(M)zu>v2S;a)v7nDs&_~PA*|qDjd>Eg~&@p=&+cp>*Mj~H^cTENXZexvJ@MJwfH%qEwow~RWySpFo} +WR#3PJ>r_g47WLVe(s8?V!2;z~5dl|+h`n!NN)I+o|vWL_b1yht%D`}M@-5RODskz^nyPoDJK9@WKIFw6_^=tg+~XI%y1F_?pt>2)=%W-U^$3<~?x;rIdt7GWq +@7IVMK?dCrC6gaJ$W=uIOmFKrIq7G$M^~CYl<3j*B&`NC1ihzk +&DWM{(7=9oqa8h(#*XP$F}SU8HZ1091Ud~&_RT<}3>-D}YQZ@??i=3BsDVvqppN5V=Qk4dpt{%aBh`` +8dBd839e?lpg_J4EN>s@Eq@-?-drh)0jXfFy=Y0mV!NL~FVE2J@+A_!nBp10ukQzvT?!(S3ifUC3U4S +*c8t$R7bF_14ja6qUIrOF5Q9Vw}y5i_28+jjN+gUwJ(-TiO7SQ%7kAmJ+X@YNh*ex=$_6>a}?xYQ>v) +V-|_fZTbYFQg`wphb)aZ1-tY!xsRZEe*71PB7F>J=gC;6Bn?Y^dELm|dLH?xPwc0_B#L7zqxpPCiArr +e;Es*=@Lw<*H8afu7+HT` ++V8tWGA%NK1}?&n<_m%gZ|Jl23PIy5{A{@(`W1bCBwMzV%oo;V-dI$(6su^k%%(Pbyfs*=YkLvqtCs;uGBhrn_9B603ta6!sx!g4 +_7szMNSbPB#XIkbT_S0T+M{W}L&j-oPFoxR?&Nf-%m`3~sKU-Tat&-F)k38e#1LHK6Qj-9X}kk@)eN> +Vw_!TB8m36omA~5W5tB3C{~Dvl|mk?BR+bruM;lWs{|-K(|*E(U?_fy6%l?A_R~}@*-WJnM|# +!%cp&eey^|o)KC;2Q3FxH?E3bi+%x(`0bkxn9z!|Kg**xEN^xABLW^~_7W4LQWv;?GX-Uj}g$J;;FEw +IZh>SY#onKhSTnLHz_^?*Z#RM6bOrBOw}hDQUI;Vw`{tae% +7cDS#x?szW2C2M_7!Rdkz-RTdzT)K}*>GPC097}Y88^Ik(D3EB+2hGuWDaBouKV;*1o|Egpkcx3Vj_y +O$TCifr;uO7!QYhwQYUfl65MICx(>18;*(`}*hkzF|bLJG`S%9?|r1=9zizP!Lb^sAqSJi`$Bvtmbg! +yA|3sj{naYzdaL;?n^vsIFzgwT^w?4B(+qZmtx<;Wynl#cp;_x#er;4@I501|u+w23A74k(9waf&tcc +L2cXZQi}am7xQn#Pm{@&`kP;RZNDvyqd7c}hVh3kOdXMVzU-2d<- +ZoeT?bP|Z;Z$HVxmW54%#mnN!5&5?SjshO!YMnpm8ftBJ%*g1q2(<<2-d(f8=uV#RxY*H~IERBebSOq +5YUl#ZMOyRE8}f-SIDai)b-c?w$-7Bes}(P_8?qyEUHU)QL^AG#XbnG0pP2E*;grjd-1h@{{CnDGym% +?((cp&noH?s_yrBCWl$lm{+5BMt=4VKw{5Xe|i^|Fdd2m4(_%TpPp$7(-jyh5-|`1h$qy*0l^twKr=H&Hs+T? +wa{=?Ap^It?$j4d8Nd)7}|XtH+cy2?@XqSDj^>LVT!NS6mbLaa>6npEz#)GSkUyjAm1hKs{NmBvG0oZ)mw1m +gXT+AuOUrau07DIJN+05yoJi#Id%#O)Y~W>u!z5FFJ`@D$Q}%u8{X6^1|hcw2&_bXEW?<<1G5HVuxNX +al(u+8u8gdlAj3xC25FuL{;V()MjgdbIjS=+MSI(iX_Al{GnwbE9#r+i+NsxsT5%2T@`IT)isS?SDN% +jLJwg3t7J;1SDHi^eIJ@NfKAv?r9a`RuX?F^$0RB3j0l|R{5r@|c_LoqF%T(A3b3NV%Hw0xb)vE7q;* +xI$F0{%AG3aW`rW&Wv#Uw)>h$tet!vC0oHTQTj)rL;i#g+@2Oe`nvc11}-IZKmL>n6l+v-SHij7;)l$ +4pN?dg^kfLp#G3oOhU0fBDe%oF1?=wqb1sEI`qtY?(3661#Vj@3N1Q=`%0<0zB&+TrWhCa&H9F{x!rU4BGzp0o``lE`x +eDu%WSlR#-j-PJXJ*r99AcIuT@+D-AHQBvnD^c^9nhgAkK(h?^p=&7M_=cM5-4a@f#^bmzU8#RTr#n= +wwd?ni$1VNj;>{y!6Uw@IIS>29{$-wQrmKr%zT#pK`KpIuDeOx|8yx{LPm? +DfPKT6faAR;!pCE0;!QX-5J)2i*1C$OTlJ1HO2N;_o5B>IeBDT%xz&Letf*n6@h>&0)IO0zU;o9g)=(rgIRh)$k +FH0e4d}IPvzIU_(5|d*_;f+UDv-FEguoYL08lXNQ>67llC=b;Knq%kt}=-V49tSNN+C-H(gzDZ>9P#f +v@6UFGKk^e+9H2r?r<7-OnGnUWFvL@-inoG3c>ieTr6!FN!3NXyqSDJXsHIR=s%w=#9Iw|sk$c*&{^( +*XTDjU1mnlqbQxybc|h1`a_FWI4Awg=s(N&tsOXIDHGVm4x7lKXLO_BJoSmaoYb8*~q1)@XfSSL +C9L&$$$x97Ru#E1Vk#GD*;XRE4W^&I;qyk-cnf%7e(*$z0BOfA7CTMyK#>Bll)DpQiX|-D`i9<*+dYp +E0KzRQ%F>In#xH^x1sHHkhtVJv7p5TqK6O^Ng;x>;jlAhWKY5ZmCV(bL$j6bgOUlg)R=3yNY2uR4wsE0S0YmU&^78ck>#Jb$m#fLe+tb&te+b^3z6JC1MKHOzcy~c#d)|*nXld( +R;U3api({kK%4cyyWs%D;gJDqBaufi=F{pac`eW8Rf+k5T+)o!*iF0j?q8q9mAc0r%*@zeE65~5y?aJ +(^tdVoi1JhlH7>EGvZ|qD=NI3ZGqh0!{LA|Rd7VS4YkxAWS@5xIfQdAWMLT-$r98wtSJW|PdFdQJQGE +7c|nLy54Jlglg7hm265T6Fo9CBayeVC)cSca3pdZlQ4Nug9}<>WVYRkZb%GY!i*JDhw +qn@VsPLsVwW7?~n$~2J_Fwg}v=u(&AowVL06(GaDqu_5(wOHSN1){&ZY#uw(`3evWJurR%OweA${Cxq +f2%QnuYExV-QFmzjPl4I)P<#+7dfXmpT~tQ6msTN%+s9)9WKtw1xxfFc`L1KPus1*Q*e!niSz&rF;*? +9x=4k&0^_mBI%>zDVa3|Mbiz1(!+enG?-LUTK=fRuFO-h3=5()R4L6)QA(h_BHvNm1AJXA +thX3Sph7I=>y|v%+N8J!YUf^#dpV!Q!GB_+f8jChorOvjW09=p9E7;Af3h%{m8JakF99{c==q8nY>QVBbaL{JY7-WY!y7{r>DMrxVH;o?v^U;=}dn7^1ue!Cw`id>;)dfn_&N1 +Rrbv^VJ>iXnm=RU8AQ*QoeG9X&}lj@jR)z$(POUvpX^wM0)sIMvZ&*9U(x|Ln-ea4C7gtIx$7xC5BIa +`Qy46_)JX|&2&;GHz1>>Zxe08noa7$hjvjQ#KvuPSHbFI$z^roQMz_r2~>+}=%x74&rqSRQwfVCya_S +$g%=&_4Qp;MA$Xw{qj)6Rmyt +?aZnLX>M;y{%&&dHH_8_S^*jX9^=us7sD`#notfkewh~o-KXXIb3=XK=)mJzW|Wa+v;DL34CW&X|HOR +hxba9#=B)n%P)h>@6aWAK2ml*O_EskVqo?Qq002}0000#L003}la4%nJZggdGZeeUMZDDC{E^v80kil +xhFbsz8ehQJ(3Y~o!Yzu71oyKN+NJv4cNQhtasYJ1-tE{6v-IUeB+zDmX&JiY^EYaP^Rn&xLBg_?`@_o1&v~&MZO9KQH0000802@m7R%wcA+pq}$0Qnm +L02%-Q0B~t=FJEbHbY*gGVQepOd2n)XYGq?|E^v9(S>JEmI1YZFze49eWDLwWEpWwU2gn|Fv0Yq%-d? +aRun$2n(Ae^f8js|Yg1*xoa|=)k!d??jo!0bcb +%x6)XD})rubeoZs#C!61=PYaejq^n8ggaHpK@wpa#wu||U!Oxwtj&~i$Pw$M;W)WV~68DFDqd&p+e;#BP +7qnjF-_ly}@RCS5o6Q>0vbD794x+Kv?bG +-^W>{^V!vQuuw*TU6CRmV43y%+d;TQW{h{~Xt$Xfv^Fd)`MYFbgFtu1W;A`!)Ijp=Xa0z~A9%);lYV} +D=bya7M%M7L<+uWWQ-8MkCqJUT~F|*>S6`921}WPS(7i>dXtZi4w!A(7E;g_pL;zO20A|5&edYohloUBtnz4m@)G_SIbd_^6FGP=48kKIBTWd(HaQ6@ +h!w1pS2%XBZ)QRo(s0Fd60Cbz}l+T~3FEeJApl{wb;9usbT6R!|>?YxDK&6{~yjBCu5^z`7Brsa1J|s;#$GPvRHo}Tuq=i`EHcq=EE-5{;J1#TQjawKi +b;qBvLSy0ODpQIlo4REm%^*T1!+x^_#$AS-kBkN*oXY(F1uL=!3xV?OBQ^qPUSGbRQ^l;x*}}1gTIJ30km&aEqXdG5!`+avbZOL%c*Q4B(}Rny_M{nbm +lJX=ei?D1U(^y~5)4d;`D$Pd9W{$~^}qYEc3l2Y{?*6R-hbbr>PsOWX&62%$Y5ir5b +Y=<8E+P2BNDTCHBbE>xfJM<=JoXtM@wwM(y*y&y}yQlbF_X9;~Jp!_08M2KTPGBR~S~;Sq6T4 +#N;x>%f7(or^Aagb=683RIpIy#k9Gi{A-$YT!~T{v*0H@PV5vgqQNK(DfFcD%b4Vz8gDh$be{13$hd_ +ZcNf4WD|nyzHgsF452+F}31DFJUZ&I_5xesppKmOtH^L@Yhf#Ti(*U%7e8OW+Ou!iqdq +1Bf)f+WEi_yT1>JEqpxFhxhbOu4=U}DA@-bg>w2C_d*W#Y4_2fs9eW0*Vwh`^iF8}KSY? +1vp#n1Fj+E$-|a3#h2v^F#)Wsv4kkD>4v?XsVi~cMeou4~f-|z}*t>4V*7kuMJVda`>^fq*Sa|48aus +0N6Oi|bxJ1mD(#B)#CC=rU(>HJS=AmES#=&lcVIo;XFJQ&dp%61pS9w$q&VX?n?%0CVEz6nD=q)G~9iUqHC+5HSnW*Zu7B(VtS=KjoI-;hqLO{M^66t_3+& +Y43_b(3q~d7?cvNq_Hq=E@j-pu3>+-1)oX_vbN8;@Dy!A@>~zM&;)$ACScN75kDr{mpzWa&x7J5AxlI +JHmXPMCpVbVXsW(>E`sjQgU~Lcd>za(+s_$yZr|^{72Az81pkaai$~RB~aAJ2C1}%pF;{hfL~J=JFpk +7T%apygF4&bD)uvydZNx(lx6{xzz*TERCPDN4E-HF>SZS(b+Jix()Y({l@B`ax_HG7h7>`v4tERz3Ly +?zI^!TOEdHsX$tOl)?*Jd-EnG6(r##^!8ijvg)#19GraHvAW?CNWi1s$#=hk#NLNPjuBtS9%DdPIVyN +!b6D(px(iTxt122^?vv0BD)#<%Y@o{c9!0mqr(29(=Cp!i01(jCyvc=XxTrPCR0LZ%@`>ft$-60j*O9 +xEH*jxYiX(F9J5M(`-dPLXajP!F*O=JPCUMRzjV$gM=06aA%DyW`jot=!9wn-EZ#Nz;7-KkvwCB6~Ot +$(ANSNI-rlr}LVs^3SQpsN|QtJ&0!+5Z4gO9KQH0000802@m7R(R! +C9=Qqt0Oub703-ka0B~t=FJEbHbY*gGVQepRWo%|&Z*_EJVRU6=Ut?%xV{0yOd8JufkK4Eve)q3n9Sk +BbJVFsH+QO)}n`S1vL9Kc!;2)!nk2jRgDsIfhv$B?*w?brEKfDpV$ +Wy7z87tkX(>zP7j~)Im15&xKPp-I=TdIBqT2dbS?fA&REJ8cxK8!XzpA5}6^-1}m1$O{N^!+RM}=LoC +-kufUcdRgIxg9Zv@Fw2$(QUxWO~Wo)>_CaEoT;@7ImE5^D37OF3s*zA}qe=jlwC9$WX^pWgil^dy1eD +*)GmxrDVxH>`#qs>qn06)8?+N{YkE}wC0Z-)%-#GgV%T-k}oRF8?Y3MD(4R%pe#9b(fISPMRoVWFFp= +u+2=_?uO09^yZX{BP66+^PDu?OV9Rxq^CE3ao$%^jG_u7oP~q=HePvgsfIu2<&g33e>LEV7dUNsi!`0)*A%f;uOR +RQ8Ml*&F$!o)v2Ee_{Qo~Cjn?&z~N`Ptk4X2KJC#&Ub@%Z(HC^;NS^S!@4%|E6Ys7my{PH20{vK`Ov6 +7s!79xC1}S;OlRryj?<7b@!vxKeGylUy{jS#t6G`Z9U>%QaiG`PskE&-UkM`Ssb)tFvELXIKB3&t@}< +0?@6sLxh^m=#C#(45qSR=ReTz%E$?;$>yjLA=dN^v1c%pXkj) +94(l#5#cG#6d-i?Qfh^)3f5^oaVs-!x*KJc7I8b5&4VhN)f&_I%^w?VsjS!8{NfN;%3&=8sG)Wdck#A +(>&eMmE#g?g+iA^^+x1^wT3mQLto;ql_C;i|AVViY_p0MlY$h3qo)12uYXZ!(d!a +Xh;#~)$4Q2;=n4%d>+34t-8`EpWY<^o825nu`5;3TV)OAZ2*$eLHt6pc$}lnhqa^Hvw>qvb0V3c6ih&yT3oO;DHNdA(R_*0V7{6UW +oVN7NijMtddllXXkbNrXoThw(bH9X1N0{a#S?br)2(+$libc#%`;Jm%xTq@#0Sc;75msKJ;EX(9dk7l +iSzf=GAUAG1F%YVA=&679yx`ns-q$%D_c%_b{`a7>ESAmF*E-}waR#bUO;1AcFBZjdOszP;I?j=QzH-4Y$Rb#FZ>E{_=I!%CPs@@V8yv=&p}l-M@^w04G2?7 +g4aa}ql)C^>b)vM^x*=9J#2!i+CgY(4=G67$ef}SkuO(~i+_-$xR +*%regnyK^lV1Q_1(5_u{Khc)>?dVso)X4SEQK@5CL}Aal6S1GE{{hiB$8oozbA{qNcVC} +)pN>+m@$&zMx)XS{8VP0$+BsF`i0eHe*b8W;Oj+jX9irb)S1Yg3$zzB)D8_Ow!q7>C#(oehA;h-zFiN +3dCM|W>lj^?%d?Kga%mQ*q^voV{>X3iEZ`R^#0LU{6!9MkHK=j)d#Vio23u9H*kmZk~Lc&P0Icd0{Qp +*&NulWjQ8T2D^0dY(@2Uyk_TH&GI*Xy~G<9IoMJa5(t;{5C7 ++B2|q|3!24e{*Mj}nxJsFzG+8$_|;5%%|yu=}uzEE*g&|6X)(e_D?KgY7|x1Ig1wop2sFXM{6ricX<6 +msCKmrz~tXlA#MAHwVt +|VrBv+DOMquK&xDsVoq355*}1u0(~56Bi*JYiX=vVec9m#jSbLu|9W-#K6&x{#m|??#jE$!k^_@0>}*owmPfiVcY@*Cb|w;_31Q(I86iV?vLddrl&<(o^~Z)DO;Ca>E|u^@kYZxK|Lc$4=U-&lygX%3 +Kd>7zoFU@l$=x7d}~8sh|6B%`ua=%N5E?K7{+TFDwy5{USfY?0-;80|XQR000O88%p+8=?eQ`WDEcRLM{LR9smFUaA|NaUukZ1WpZv|Y%h0cWo2w +%Vs&Y3WMy(LaCzMtYmeNv@%#P?Mn+Hx(W!BeN4vlVXdJsRki>!Qq#ulfz>~PU%4sDq;v||uW} +7IAHk<0ywf(^A_HY0o^V-U+IFHt=gQ(z5+f)Uwt6xOk@DpJ4hVDc!0rMq%dWj{ixdYJhM>U!<{lg`xYJhy8 +HY+&D(*XVhMfeOymX?b5g&-znun~d1sA{38|Dlm%hx0!iyy^P35YlnNy3VKJbx^R-4rw-kZ{huG5LE&3Xi?A#Z0sk8$4HY8Z +4P5g0C$C52|qU*6tn2r&cv(?T;m%Wi*t$>d7f-Gxi}mE$G**b0c(4a744}5iN<{tzvEZGe0=rSJiGoV +xtiWS_~;4-q7+xZ{QGyWUO#{Kw>QrL*;gpw*Fui<0Jtcgy~8N?Zf+jeD*i3aTyHj;QtVk?w9UZ5&@7F +y+%j4TA^b^Vb4|aH<-C(R?@p+wdPQMLVn8_ExgGx)qE!%;3s41ibBd&rb{xznvh47KG +hPk+J95A%k?$UVV~1HJj1j9xSJCh>F;iA=n#-R1?c2F>J_VJA!AC^~Daq%_$Gq1|vx=+n)d-` +8q)!z{2h$HvsY>_(7lqoV;|T;;pW@KNoN8c=$J$q|;QJ@9z~q(FvHfdN<9oCwn81LI&ZC((=g%qDPY! +L@{5@LCFYt4eC_>`It$n6|_CC?n2D`Q8Kx;>~TTFu}y>#G8_}O?}oF;)$j +m5u0Qax$QuF%69AvikREW{NZAgM_dl?1#PJ<;rr*`v(G>OnAvRPnnt(*?XS|h%)2wh$)6zR*=!Xjo6S +r%>RTvuJE;Djx>r@x&&_ +3j8!Gy3oR47Au>~!hxIxjL@%mT?}pf?s`Wd7*lMxrvpUt{K-}y+ADbEl6&pp0pnF(JrV;DjLxzS+%cuR4WRiJ^^qx>A +?1NoEf@Av}jC}Es?$=-u%~2E|8}YsaO9M;7;}|1^%#Qu~LH}FIA|Lty9)uH72Esq_A@=D30V-0b5>2s +%2YZmh@<~4qIs&d#(tzU$9W_e^G!ghe#u3vSS(c=pj2&2CqVA%5i|cBBr1s5 +9|nhi|hz>2~;haQ{tGA4w9;T#u)i3ykC|(;NR#Qm-8U5bay|862h!1ODjp@mhp&MlG +cji{dhsmNpVCwo-IbH7};>W~ysqwB661{za$9YBWv@QOiu#+FW3O7eQl8A +DI%CBdUcZo69 +!oaWK0-OaVVynb#wyJeGF21j!)cgCpcxG`Vo3&uZuJtz^y#Q&*0=CW9ogSv2=%i#eT(EU~3oe3qa^F| +f`Y|DjKj}2Z1i!z_a*ZB9t$1X^Iv*n%>@BGX;Tl}D0b(a_)Kb@+2G&o;OlGaD-OGjl7suXfC>M=k+wN +(?lWx#~m2cD(zi4N@Mr3`ZOfl%9P%T+>1SM&`*c53h3b?HRp9OUA42|m%YUT{1vs@9K6^jllY?jRycI +J@pXEMa^8$!DMMW(A*lE_>!Aq~&=fi%YQ70X>TMWpx0C(q2vPHJ5iuF2sqVJN}7ltPmp|HlZC^U~L-s=>+`O|>?Ox~P_Ba%bs4X8`@W1YEPXPVD +zC5yg6$BIrzGb&yEK(SKx>-*jX1k0_p)sil_d6_Qt!Ryv<{9}iz)e!oK0I0*1XN9lF28}?4>a05}nzX +u|f@7h_5qw3L#3}jVEN>s +pE%7pitq%!p|H;ix3dF&-r~_5uTg=Gg$O5U_ecn;jS2D~I3`$T`i^pr3! +#1a)_Jv-X+$Aez~r-DN(dtp9;3?V*})`6CM3r!u#r9bV^7H~gx=m0>;SzS)uvIiF|KC+xK-+FsdV+1@ +vG%S#5wy8~!QUsC(mtPWzN$iUD=-+IY7?KcBBjJ)T~;9Qi60sa&mlA$_N-V5uoD@YeyR^9UgB7t1=?R +Yqx5Qhc&hiveJZ*EP7n6z29`*AC7AgBwWu&C!Yj*x-=yu7krPT|_?OSnEd;4_F6K +urzQyrZ@PrUTUIou=^~x&g=N-jam%Fnf@MUn3sx?iK*|Ff@-ko^-5sHC-Dh2j6R0W7k(o8*N8M5K1|~ +i^OXy?DRTb7T96!dk)gihyPPC8{{&D=0|XQR000O88%p+8&UflGXaE2Jga7~l9RL6TaA|NaUukZ1WpZ +v|Y%gPMX)j-2X>MtBUtcb8c_oZ74g(Pjj$fq)2mk;S8UO$z0001RX>c!JX>N37a&BR4FJo+JFJX0bZ)0z5aBO9CX>V>WaCx0r +O_SR;620qJVClnO1LE8=FVBjt^q)0dVv2S4$yxh%|8(o1sR;=Z(ASP3{zUhoDM!B}3^JU@Z9!W +=^OJ4i+~D}tF>VR1UqW;dM6{V#UBf{mRLH#(=_#5CPeO}&Bx)5VVMX;hqm`{VO{@$3En9zNWCzDr0WG(-jXHg{IpL*qv>-O0xMt)7JhFxSDlO@c~ +_Kkq;O_9@xss;#+Bv(1~;c8~x8FgcwF{fbh_HZv;$nqkkXlM-MBLA&L~>bWIvtXg)iSgXnVX2;HVQVK +aIiZP5ME^yW}F_YT82gK318iF#7BE_z9&VUu0?ZUZZkQ^y08hT=@KD0MV%PmM^JeZZ}e`6D%ZfzV)03 +Z3O;AF%6%ub*hMWQ8-VD6X6IlUP^8pevaGQlteu2aymiqo1@RDoGHM^?8tp;ib$kc*UG*J%=G +bVKDqWr2;a@cxHvC8C3hP_id3It2`B$aT+Ie!FnOcRz31PS0-mrQZ;Ljkfy|rias&g*yTlheaEdwu~ +J36Z1BkbQ<8ha_Qn{&{7yJmgh0ji_uh^%XXjTbSxhFYA7#f5_@7FR*lMWu4I)Kk~0HTS&j}_mQXhYip +uh56#k_wm-rjR;U@vq;SU-K|3W?)X^UgtkOctrB87}J#B>n`0Lm@Nk6FY{M%i}E3AR)6(yOt`0-Vfhv +UUp~1xDA5O-^#Nz8S6&MV^nmIzmzX|Bjm*=%8j;1@*nzk|AsaXL70UsJ7F1679 +G&h(c#vQ@!dGl}M@=$5*2=S?pu$N%vzQ;7c|;o~Q4FkY{}O8UKy*hkh+-$p`Ez+C5X{Co!kInr@NGr} +nwhwqEYJO;ycTi=!2!Odm^J0VH_7j)I><~-a&;VyWU%yN6?`oFNAFrEC%Z0s5~^L$}CJBQ5*`&@Gg!= +!)Z*t}~l+l6@zh`Pc4S>WH`P^d?(C&k{fQsCjVERjqtmjadAAGpeLDz<74lNCfHJ@w~PSk6IXhT4PP0 +s5&YD!B=bvK;z%N6Mj+@6ln}`B;ZV;xmX&f)P2fI0QV9Gt$fwYA63l$(&%5?L(sh1CyZvhr5NMnqK@9%7cd5CJ?YzG#8x5ER; +6(pkDP36$Eh9LjUV9=`$kBpWg;&=#C{qHw0J4QJ7ydlHTl-3V3cp~uN5%Fhnc +NZY=uBN?w2f;c6Hxfvi7|z{*mIWUPefqL`OFow_o-1Dh*}=HCZ5OX?XAbeGJycO^v +L>G!Cka?49;AJMF}?Kl}olPPSJP3jtu~-F>d!UDu~Drrs<6>^LgpVFH5uDJzWWvSUtdBW$a)X({&^>Jj8 +GVk-oy^5!7ot +994L{BFdDM35$XLG2bU>A?bD5WrW~3Jx*wXsIZa`j#(GB0itA4x{^N$m?lZg1wCceE(eS0M;hoW3;YN +nzV^X|uR__Mjg8DCTC0FR_Yr?T_ye436pya;+P)h>@6aWAK2ml*O_Evldu6ol7000&u001EX003}la4 +%nJZggdGZeeUMV{B1i_`Sl3nTDPNXr55@GF>(N7~2kxTs5MXpk5gCH1< +)GpH|XX_%3a;+0vEhKugQR$}l8b&%@t9n&uc_xgN^?QBX9_pXD5s_Sp=*wtr^sYX`%I!}gDWvs#`5^< +5YUd(7Oxcx460u6;l-(+uPuX3TE1imD%9cfz$dNyosVwBLGL5wfjf%FcPn!wLXoEN0v`(rtmj+0~C{f +kXXcuco%boy)E}V)vbdt-rygFEF{>rcZtlsA7DGW{#ra4Hu^#b0zohf_DVgX}quH{-3Np63XN%ldToy +dXa6k3~$ys+RambOAZ9x*&O4-a<_Q}^@!;pWr)?&*@yZ5DK0^NL$MbneVmv0?nl| +>h0Yd`KokbFAJ5#*UGfEo>Fy;Z%`6MN@BycN+diwm%G|Kwley{Gm&q(-lxN+@Mp`&sSaKm{Y9pad5j> +u^;@1kvobF-Ud7!>d^=^M3fV^HJTrPTWhwbYqW&u#Pp3~qgp9|k_PHhAp&KYqo5S*u$p^VXoS6d!Ou1 +PteLL1oqE{l}w2V~_8;cEjbG2Z86XUBgc_9*+zE(I*k-};lUZk-!yxAX(IEN)UXEXL3lpYnlj7FqM1l +O$2UC`F8TN`tpik;+qG8&Csf~-7oJP^E-bLTUKg$(BmYy%}SC#@;?s*w=qKLvD13F!5lt&m0m@j +1DZ8YsMj=$~hlfnXF+i(09mS_W~r%p?p(TyDZ(sGht_cV+);1&kH@a$RNoitPxK}%^qlkL;~mOPj{mv +h`sVxce_kVvw?gxt|4oU%-k9GbK2P@znrauUn(CRPkv>;5Hr69UBGpf&SqatcH>F+n22M(1pxP1dgk= +h1tDrP`lq{$tQZ;PPdV;6k9EX(7pgMEtoqSx_ch&j>+`xwJ@-QB1!^TTHVEETu4kFu>LQ6rFjiI`mTQ +)8l`Y@d2BkZzk7Cg+@Y2#yc5V1D5}C?fMj*v|er(X?8zqjkXK}Ww%Atq+NO;FBcbImmP1*unfZ=>Ak& +TcDMN%!en4$n~FB9BANM^_+& +4De9njjym42i*5BJ8S0nE^^%=Vb#lfG!9cdo9Te@Si}YKhqhrJ}c?h +*c>0>Afb2cXBY;V!AQ2314#T~rq*slAsfGWg&x`c9TPy38EIvjH`JgciF{5hZiPi`n{jz^%7RpLhcak +s9+`hwXJ~8925;0EJ|nzdoDxw<8XX#;&}uGpKHs5R+<_74+l>aj^AAp#N-9H0U9R-s!}~UgcX)htO1A>Uh$H>hr`MP0vTQzg +9?PNe_KhRqeJwGqX?IvBJNH@ptUAWV^zmfkq)yE@%0yx02AJbbeN*SXQ?Z8mnec#>I~oE&`?mK&8(tVEp)*kh#@kZW``OU3xjQhNcQ4jxlGAeF<(mzaIlr}8IMmeHtF?)9XD%gnDZ}k_p`I?kZ+L_;FARv0+iGAea>tqBehm=JF$lM_fmi@ +R%M9|DNT`=<}yj9@YP5Zbc?ZWFz*qwHM(gF3sOwk-02jdZKQ~{KE-7aG$$l``5xSsLk=>b4^cou1zmp +7Oj914vi+8BI#5F$EWe-M^M|L!V!rsu=L`P$bb0@9cXjjl2$f@8(kThuLiH+)a=8I66t+E~a-T|HY0Q +Gt*aMed!LYVrNode#&~js{7s&~ysg+o^x~?D~A!Lb&V4GGcpkAagi)-?Oo+q^5Ft>;DP32>EmC^B%<= +yq&Ia?xiBRyhHA@ndQwQ*zws%p&NOSyFE=>5HQ-ak}lsp7*Y;jnZN*cxTf+g2yBM;;y>cby$(7Y+YgB +YWF=@?a~vxvIgb~%(73HQPjXO91MFdY>FAVi +deUA>V>NW$d5)ui51_Hl2Berfb#ubB;Fqj-+tsQmfqHS1p5%P$ni-KB~tdBeIV7Q-~;=uB +1#s-@1AevMc9IF7rsUQ`Wl4oE+8ZgxaYS$XxBI1qsPn<(rHOmn0~S`4+v1^^_fSQPqeO*r?hN?0d`3e +q`%JY{G%^Nzaj$j=_%IZ^&u7R~6LRyK6eS+_i`lKjFq)zC~R#BbYdSQ8=%!@1MgkoYLu+ru(2t;CR35 +Q2(euOoV$QGCQtNcLgBUz(v*9ae}4mu|NCi<03sA&mGlsG&og{zea}|Dw-bMvFhk4r&?v*&2k#au|Mv +F8a`5uT@LOD!r!3}T!_07;Vl~GxePu@faBn~9KO%!)5i-sE;Ls-I?PUaG?uGkGY)R5D}3$kNt)kFoKs&+PDwbN`c8bw=s$y>D24$0-&Fd~*FKT7pUEFu_g +__X`!mvaNfG>^BI4Aips!SKAR5Ngh^On3%3?j485#N+W2t{l+bOVaA|NaUukZ1WpZv|Y% +gPMX)j}KWN&bEX>V?GE^v9>J!^B@Mv~w8D`w%kBDqUqD2nY`$5u&7_EI_(79@uvA`svLp +k%Jg|NZ(kFA$VvDz3XaS6LE)nVy;Mo_2)@7llIxBAAecD( ++_gS-4=?p&{_0#oQ7xTWFr3L)2s;vU3jqXi#bFX!w)@fDiI#%m!Eq|*UUFa&!ExhJFo57rmMpp%nKce +N;%W{*?i+G@$^^PWF +e!oOUF(1ybTQyR@r1?hI49$#epw0ChkRtE4Yeba1IWB-YCxGvCngcWa0IN5E(|n{ZH^72D?g#{rlRMy +zvWnN~>^8+!6DI7EfM?1go27a7Pn{I$D(GtXA)e(~OpC2dzmu|9WHj^MTBlW2cJ| +22SsHZs`Qr3EIXOOgb(;L}`g{bO+az^0OV$-^t@m9ZR^?phb-XC6RoW!Lq)k=k2HcPI^n@Qi&^s&U`n +~CNwmp0OK-(s*a8~PWvxF@oeJ<(Y89nRjOH_!LX}#2TtrwT)@7}&lUL9Y&I=$H6X_eJ8(*@YSDW|}sy +&iHk5a`ITcMRjdp=X1HNSh?XUatotNmXf9>xB1AqLCH@Chw%hqTFO+xp`IJ(3Tsj@ptNNS?Ebm!9U#C +`1>5;g6NQbByEs&X$ZBUzj>gTM;*pJ>Udn;)C@`~GZO&gPez304E8>^Gjp@;c3RMEQE2e^xX0pPzR&> +hwTM12tdHDqqCUWjkA4%gJ>vz)wn3z#_%{$r19%zN>pW`)!{M;!CNHal*9POO^wuJrw8nsbPH6vL0Y* +o-v9CvV^}8F4aPEnd7V0&~#lj(jny7O6o1Qh)fW}j5JXRpfH!F~@4N37iOn~)CADN_2lD{vqysvKcw) +R#={R=1jWbgYk@mC3`^QI$AHsZNlML1q`&4`2TYPbd@y)=+nMAwElLg;Zip9@k4@=M52F*f|hLWtQ?& +u)`B=&hvEtUO%zdLlx3B<*->PZaJjSPLnaUEco@6$Q>57ctkcSyx#>A{)lr)D795w2gWgqAo*zX`Gy4wf}>h9O+G~4qAL|v}YMj*}L+Fa9LL&n}TE7EG4ltsQpLiu8T0LvH*N9sc~0gO ++gkIW-AEqAWbc+z0H={gaD0DwWSJ&Tq;`NL5#ByJi+xt;S(QC2#D?$7p_dX2IQl(-m%z&GW6k(cSb7C +DKtQywvDwE|Y6LBM2!v9&;|%S|=Y)V|8VwzSqMJ{i|*J{%inV(HNXXA*k_~ +`EqRd5_W-shJORgjS%(Yaf`*;fOJONYCJB=F)U9=0a${S3k2kXF>}Nbv6*etf@P3Jz_^2>EQ3@70O<{ +ZBxymJ6exg(v3o3>l`TSF?S4cH{>iw@9QjEGtdG58glo`U(Zf2teTDa(n^` +YHr>4ZZgxqq8?mN_w@(1q*%V{(*mISmasoytmZ(I+<7x=qiq5=mzL8^PM*t#m$=?%sl%1u*ebBaBHg$j08y_tfo1Y%eFGLZJ7PUosz80Uh*6)2bLW)8-#$e|(OC96 +x=_ImyV)EE!}b^^G+1A@*s1;s=%GMf!_h0sTe6gc@V#a&jF1;r@t(kjEHuzp3s;A3eBc7yTc_IStdQr&0_I6g*z!EVBh?$o4VH?@wPXQC +mFnneZJS#Uv(_+Z4{8hZn?m%7v6;Ob!H%)8<0I)IV^Q?jQ9KpnpDA-Ww>ZlD-=7nmqm9AxNMF|FdjxP +TpI0plcdIxJVAP?PY(ZyyB_y>(oU@+p$i?r0Jvjdsrmw*~|*5a4&*(cbYl%ez>RO9BBIfA@FZ}fl}{= +?0XfCTgNpFEaVJi$d=L)PX2T;oYGW*{|~l@*X_T^4iFho$=4@%h`=Z(oko>G}D)a|U&C{`&Iu$?+SbH ++?v{z5|bJ7)Uc$D>951i)%FykP}cOL*^a8(<9ey4Pv`==o|ST#)~Z1{u3~-y+(`{_j#q86g2# +in9d%Cya&AGvlQiLi8_x(nBqei`y){0r1fw==6%Ujs}=zQWEnVHJ7X@x#J_|qE>O9O&PhDbEe-{Nj-WV0nO*bL0^tR}-5UF0jXijlI)LQ1is>_K$&L5hg9XO>ZCg^p2<@skuKi@Cu5F6cw9PIe{mI1oWF&^^>V;&)LA!1%~FuwP;L; +Z!qrY7lvKko6z6n~^fviQil@}3#V%NoIhYe}**5fQ+GLVnH5PCFJ3d&83qgL(+o{A@W=gKS7qXZY)Wn +62^mbO^oFTj8xT|WhbHG;KgB_jOrC8fC*l5(V)1St=n1K%t#?!Gvuoy7JXvK7c +p^VsH{Og4hO#vd*xNsEUHlzufUudoR7Z-2Ti3j!VRN^*93roR5_h1Dt~fN5%er{lBNLyE8Sn +GM(n6(}cslcyPZj|PBfK>b4%Y>?=CLi9{+UuC?WAvTHSI|A`XQ3#@`c3@5?GW9_Oiyl)lr5%5rnFR9WL=Z(ahP +5lPGRPUF!Y3fhW}JYQX2o?Wz2<{edk2F7OLc5y&oX*U@n%0G@r7y-M +H?vwl;KfJbrX;9Lvjmr9HlcNhX#6AaaRfsna$`0cE5xxxOB+)~0DyPMkKk)B`E)9weHO*(BN{6U25Oy +`Vym&g2P|p3iLngPblnZ9Yfc~@J^WLI~caY{;;?ym@y_iC?AhJ5{02nqY7~topkhQ8RN^zd7)$x$|sORp@cS(xrRfSyu@zKg>8 +|ZtyY`Llz{KIxe<0De)!d_NZ3Y8s~$?CgD%S!i_Lj2UoRuA8b(0Sz`Wq+BWF5_5npVP^lbsBA}D^d3y +x;ezDATwLxkn`u!~5z*?MuAqY|CiQVy?N+TD4!qwM)S(W$sTg`)8ldbDK9Q +>we+6Cz}+j%S$nQ!7bfloVKO?ett>>yz+PWm*i(JmGsMSbmw0FU3gk@k^ispbZc4<@U&%GkEb8czr-~rAQ(i6ur-<-vb( +}$Z7!JG%bgyWPcy2`vOn-JP`Zcbfd=MIJU?vR=@9G&q}!qYcNl;|?%@=5FXfz({H+=0yI9E?iDeBBH! +jzh!?V?stq;t__u2S!*H2!r-2h9Z~j?}0r!Qq +Nl}E!4fHjuI1`;SmN<4Cu-P{U<Gj1q>yZY+dzK~(qzl7O84=V%A%CD#abQJ6M>iOjQnkU*+Ti3_u2 +du3_E*St1(K?2-74R=Qildc@0gOVQy&oz+*n>h+AwK48B}K~FjW@b7_%j_t4i%yX)_rT=eE|dC@a-!a +kmDY7u<*EIvz`UVHW!dJ7MU4$GacN5)&gO$`4rs3LdL87)iBM7}IIF7DQAttb06 +kWZhS#YW$m{QZNqyj=0!T_|f@7twD!0#x~uD6d3al?Mv5bX0gUiayz7&Yx9MyaG@Ml@qG(F9AUQWZSt?r`|WehPtn0gJ_2ph8;5ld)Thn%9a|moBUhddXL!01~$4hF!8k%3;j_;i-SCWYjM$<@(N+jwFZ99GZ2xbs@W;^?~*6!R->F4eP;dMV0Thu_ +>s&zGYFEHh!7Bbz@8zqO(zGDLl|JM8ZQYktX=wyAB~(S-S$C{Tc5P4PH}t*TzxM!ZEX5rC11^#pl4bz +A50E?x|(R71%~$wR(sK+o-E>J+wT7^?*KEo6Jg)`0;H=e3<8_>GIxpJM7L6bS$>4~|Q(Q%Y_rAPeE96PhQ`6)R5T-dwjc>rfq1|^ +G9u#1c@-+}|9V@fzoSTgMe(u^z3F+T_#Z4jF?M!l#3%)p=m35o^U(ZIx1ufRnH?i&f9#!g8LMnACNLB +u#fmEnCkST2jYZHg(40;Q`Abi_fDv)5~mi*+k +vI71}4LBUNKsm&ri=vE%Kv9Z=+D&HfwBZs7fd$q-*Ck=uDb;$+0bqH9h8pGh)hXKrIp*4Xp3_t-ih +vKIgWg+)2z&;Q*8J*9F)*u=U+5s8DHvroY+Um?Q_KYV8p6hZCY9nl`@;O%fic|FKE(_I`9rU5r!KmVq ++)V9F}uadQ1L&N`Fiw)*6=6M`$vIWc8W3gS3Wz`;XpLsR%%qjCw0Cn1V9>^U-FEH_pGBd{8;bxmruyp +-Gx7mjH(eG2Lq=>{g0oNd51|j<`dW-=lVeu$BmR5KwC5v2?RT~AK?^%^on95e-0dkN{MJ4UobS}XH5< +*A8IFaJ3NVslC$WDPQ3{$T0{+EjGoTW)SSPMIYPYiE|^~SVdcE>ga1kPqP?1%pj4f8+4Xdji}8fLxE> +ZJ)$3&zoevv=TmsiGL6kq9=Oj)`y-!1$K$yW(3)pl{X)3mB6~{M;ioh#Xuh6-2P^+z+`1U=PBpXk#jH +K*>E2Q0Cyv)MnTrhHZWePgNGtYcn;qF6){ic7lZV7K~awOI>ZDQjXj-m@65F?qn$Z +=TWG(W2rW`i#yOMJJ)B@($H=~NazGxdmRbQ;3SY8L{)Px_wSDKceWuc@i+DEvoa>u(wgr*6Oz$ +#)GIZw9=Pf!E8jo7{BP?&%_d(2r_a*OWVwNI_c#!p%IB~tzn{df +(Mm@d{Nz2BDwa?1bHR>&~AmKZ{RXfb;h?ypg*6AUqT7b>ZdyjG$0j8cX5!Z_LJ@_;~m;t~5-MjCFKksf3BGZ>hYC)%RNi$((~ncxeG;$ +f03b;}*%LdJzO0E_;@Xi{5f_7LHUSA^hiLD` +8sEO{=$~<$|mheL*fjDztiCk02`KFm&I^Nz$$tpM$`BNc(r3%5J#k$gUO}+`;VsKw{1yEMHy3q-Kt-% ++{k$cJ6HR5lB1I00|G`}e)^9;Neya$R!pj1KQGd^(0&8;zX`vMC$D(s>=@Fh0@AyJBV+{z>keGdNSh8 +<%tcaEJq0anF70!tkrlP>?v|ahO0}dk!g5`v3JjTvP^)mg-* +Z($YT}7QvXLAkwZj=6m#?IWcW+mgcq}0yf%PtD_1pbSy(AL@*h6BX>=}9crqcD!#%<- +Z!no*(4)PKE<(qk9%aJ}n7UWjLu2_ep>d;N0D9ZBnk3;k|@0FnKGdZC3lbE)eCwcX=!ppGC}xEchHH0R(c{{h3JbVx`b!Tr(AS@Ajcr^@&mrsKmk +c-OF9U%xid5;V}-c;!ee#HvAs!)afwqG$f_*3%+0v6m!EVcZ-Acg;@wWnJzm=h8O;v16|9u#?3IzL^d +?0laClQNROz)&*%mP6r?I*WK8;vM^<$*gHA +28$3<+gik$J(Fy*9wKs-}s`Y2CDaNZ)eck6uU!m!= +!=xUhP<`VO5uRGeUdQCZ!F+~C$$Hc0n_t+rRdR^0a^kb(!DU+hctPXkf7C=GKWsbDC+sj4=HA!N`>M`CnMU-v$|cUq!Dc-?W%?e%%* +32(m#?uuW7XS;&H8n^a<=Hxuvsm*|Ty{8UZz-4X6t@Qn%X@$T0Vcqfx2Biv`X-#B;>=Ec&r-0ovudc{ +2EQeyX{eZg;VCi;HsPm*q2br0muQTHIY*3)a}-6Z9n;+=%vCwGye_b^NulKIn956%0c8DS^9x;=Q(tp +Hq;mYfS;?CvAW*dNVxijQQl@V3H7u$jHJ&qsW4onMRG@YFqV_qcg8{vS|F0|XQR000O88%p+8B1cUvD +**ri1_J;9Bme*aaA|NaUukZ1WpZv|Y%gPMX)j}MZEaz0WM5-%ZggdMbS`jtg;K#zgfI}j_bZw`L4th1 +CTmQL8V`Drc-d^43d3$=X~{6G@b|Wh6hY$7p`kNx9`C)hL|gIP8iJ0r`rxXnEfRm|6uptmKa+7-Nh6P +dtn>@i*UB76@cn>z84wA70s|Hy>NL^c0(j7mxoT^8u8(5$gkPwHXyCCltsf_Y|?@ +bazAbT%1;nR7TlsoI-!ZeoFm9ncwe2f7^*iYQPz7(BS+|5D0dIh=&-BKB;^@8jFC_@_H`Z$XtJSS13J +*xt6yLdi(ksU6fiR@1{S8_q17R`#sbS`8mA!grDO9KQH0000802@m7R%ULjxDo{b000XB02u%P0B~t= +FJEbHbY*gGVQepBY-ulTVQFqIaCv=JZExc?4F2w4p}83542g^T8XyA>cQ>p+(K~cE>_br)icQ;E70Hm +~q}j0lzK4>XG`-#ew9pGlkq;k|&hz|>Q#;5ksk0<9sG+RzE;*c)wN|AZgRSju&=g&plwX|E_cV7>nQ +D;ElbhAzu7cuDn&wTtbmeD-+K~+Zt3l5-#TZUHU)1o)IqTk%9)r>+!G;D$3GyssIN|7K#nJuC|}#r$&MPaJ+hlW`Di8J +kDM>X*PaAFyo`yD=<{n^>g8TpCu|Edp@-RaGXpzg7hTEF2B0)Y(~J!id +#MB9=B3E3;LhcgUs$-&>iho({d*iyLBFuS6ZGONManxGE5qw(XwW)WOO$avWjhotx(hDgh;b&>^?K_( +Ej7LvW`Lm2$}`kE6r90yq{drjurM;EVwwX3>OHFSM7GQ=y$tvJ)GBj98wIwyA}41Mh-qP6w7<4~jq47 +lAxl~F=weWFcZIp5n+H{xL3lV}01A?aeC030XSO>)wasG$Wb?p#c~N=)F@D=tLfezOMxl1p(Q@nq~FS +*yu$x~B&Y3U9$F{69z&Q>e_|J`p1f8)uA5anlOK3t-^~pfL$&S#vSKHnC%KKk=Z-EHX&Nic_Vv^?*F1JlyRk3 +wd`6AlCvw>2&Vl**J^sExhGwEfE4l`H864+`5m*qr)=r7a|44W>+d(WkqDo}83@Kpn7&2csvITfu=#S +0^>yi9IPgk}9LaXOXvBQZdO<2F+hu+AE46CD|c^{LxGaPxog!96|#u{&#MAy&UYIjl3rNVO3-c91XAA +^+m(<^=b{yY*uhFR#S!Ad_P#A}mi`g$+`GP+m6Lc_d9r5!)3P1!r7}loDo6`SYa>?q?PiG%4~U#=4!< +kqM%ReSQxr0dXc^WQ-xD +JbVC+zNs8swE5c=}l4DGo3&&X8|1wdHnI{EA@!#sfy~_meJH&bQwR<*XDxgsO(Hqi=DVUh@>vs+bOLK +UD8;Z88N!(+izz3L!syIwGky~mCz@6aWA +K2ml*O_Etc+9(rX4008n3001BW003}la4%nJZggdGZeeUMV{BqHObI+ZkDEgIXE) +EqVO+|A?N~uULg*K|q4G0Jo1z}H(L@i?O7^x{2q@hYPHH)HXHsf_8l_665C*AHFC3B{Sm7|`OO4h_28 +?@t=KXYvo@})6c3R)%Prfn)V>ognQq!|~6DG;UmmRCi_#Dm{@U?x+o5OiO$40NY9*)PR{uv$C!se7bkSy=Mi_;of)!&1+s!m3h&W8TOR +3&iBDPtNlqSIeBnvc%{>t>pTn>=wuH}X|U8C4oN!9W?4q7D$B^4Y@=K;YFLIT&boj^35o2JcQM+{X0w +8o#2D47KKBtvtgShmGUB8+3crk1dw1(3UmE6-RI0$`N`*3^xMR2RM;M&62=1({9j +};kJ2OTfM+ovvh5QUR;dcAjeD;v=R6HL*Hc+jI1*6^|#_7CNd*7Tma7-R<}+a?Z#&P*proF60KF^r?# +{*>M&Ev-iw>mjP-UdTrs8nPHzlN;4C+yz9zR)imfRLooI5a-K?RKOlcCWx4V>0EK^hgA;VEwG~<`JQZ +t>_*R+t<{e|zmR8V55ix{RJZV=jxSM&1e@A~go*^b9o2Kn4XZI@xMw#?@5mQ`XO#XUxrl>x7p$%(Jh^ +sd#tSA4FOOMsGN((_EZg@xV9MeU-r@y9C^q~yT#>L6Q~zPxJd^+ZlLTaDY +#;}k?NQbsv#iE3m&*KEuac`m@6r2@C<9&8K|VZx6#7m0u|VLX&r@wS>77NdUq% +Z2d1a1D;V*60r3bMZqbvM$^T{*;hw8(-}Na7NeP{^FwW=o>8Ds3X}+b7wP4Av6+>hH}DZ4Pd(m +T%@CoXb=scJWGmDrOT_LpU9KDFcoh>-IW$q%X8H*;WM+(g0p +WM<@a82vAhcOU{ZAv>PdygY2kZ#7JZ-+v+VWEl_Qv$Jb;KIKcKyKhCXki< +ERx+Rm3t!)qp6v;JwGkU57*4E)2#*|R6FhaBIK#i7~lyD;-uCnhp>giTl}hS%V;RRO6J(_Ui(j;r`-v +<7XDl!Y*oRq0DT>M;YcMgnv4Xe^L04r>`u)B6B*`8Jvj6oxn|HHNLkT?FTwl{26<9Qrp4SqLHguzqgh +o#kEJ$+fengRFR>sQwwPw1-vJQYxA3#WuKQsKO-ER{ED*x)`(>k!<8RD +zir1Vr1%$Y<|MZJ +7_BMjTtMvgZy_~lK=j`4tiJraoOEe$K7*mW#jNt}WQra|F*yX&Y$2}uQ*^-)382h4Uy93Mw=S~1LB1_ +`tU9h(pK2)ITV$^JdedNGitvLx}A3>|rMR2>?jh3O#YWcuUgWzToznb*=vws0lO9KQH0000802@m7R+ +NCe|H}*j06{7M02=@R0B~t=FJEbHbY*gGVQepBY-ulWVRCb2axQRr&06bk+_(|{?!SVNQCP~!O438WE +G|LQyc#q}g5c7k*jos+#NFjtE9ykb&erh%-Wfh5QlcFv=!Xh{qX#)0&ht0JO_JnqR5Btu#YIax+Dq0G +$<)v#Npf<+51r^0X~b^F+nsxa(R!qNmis~RW_&tIcdOV}b=}kMWZR1asYHt}8h%0C(4p&kCZ+kVjvXx +GzP)IVIeEn^m6NxuWj$4*&&hi(;l-~**RT^atK(f!a-iI|v?;2F7gA9*$g&bOlaAV#5C8b^@n0Xx*Pl +Os{2ZMq+AZG=unY>q&U)#;%)`gmeJ}bLC6t9M4qR4_OwVdssge$AU*d#v=$o8K)hlr!M00Y2@&&m+bA +m2*Oug6d(z0ynft6);a&l5NR7&!xXG$Ia&YJFx=nquEvZ>QZ@vf~IFj5Dfv(*WK3pDzIbU2%{{&53xC +s(Y$5TO4(3@2MR9`8ma7upbTe$BR|gne>VmZ@aTHYbm0zmqGYt4EJ6ugAxXoIfWYM9a+5c%7P6Iw851 +@nMjPTrff#zH3=cuDRNe^b5T<@E{A&V%2kPpd&4_(mFZNuO(6~9o1w5QvCVJVj$OY0peB94sfT(JzWg +D=C#`EZ^ouk5hY0Ez(KR6I$e$hGt6St +aLW%QHV&QBYFLYdWJaEorx7~5g*2hkr_KVh*uE6KWU{=X%>Ygl5`i$rjEZm|IJhEQl2#}kGJ>H_4bR+ +Y_rioIH@|4aHG1Kld$xKlj5P)2EvFY9)oidvPGssZ1gII`rkW-2=LD!`*<4ycd?6u^BP%EG% +^lJ@1s|YSu;rm?G;rxfLjbEeEvhlT?s1=(x;Sl|0YLh)}d5 +vi}z;n`NGC#ax~BuOvD)0JtV(ve%^Rzg!jo`D2YvtzQyA9 +k-TiUiI7vscy`aX^WDI11S)|%bie!YWtFc~usbE+8$;ZX-JM6UXVx0+lbyM`-cl$%9}$H1e5{77+v*< +ZREaDKac#kO>4lneoP1cXm0`z0V-C<`K>dXSnkQ%IOZ@JUAk3v7PfLlA|311=8x!>C(Z1|TQ{2}ao04 +qOFO0(?E>fS)0WxqytXP<4P4u7L_@w(nV`AiSeM_7>?Q(!w&sEc&96!qzARYZRi38mLjQB6GE&beo$r +Xc#Fif8PsZ?KA@rP?JIG2Yd74q>m1wNP>|;9JAPNj5?M +ZsYh*EO&2+6&^Xf;95eX`px?@Hwkn3$8H_?i^5ygUWRb2sk%m-sLYiyf}RHy-%K>y*5MMU)X2)h3 +yfYlfCPG&P|Y2;!09-?2`#9y?kviek<&61s#r*qr|v1|r?PIg-diLXfXsJ#LC_rWMi7cR-tj9)! +>&{>d{C$vb6!E~I%GIrQM(-!{#aRb!p8xT0k(R9ZRcj)rReo*^Y*V@FTmf_aTKYghW1oK@YQib%LbcM +>zRNyfpFe3XX{w0!ekd+G2>OuV8gmFX~Gu5mhB{{4aN}SakE}cDGsfcBax*5=BZgiBvDu+ai7>yC@^x +e#6@6Dm&Uosh+&)Yde5ugfi&C)Kb$cE=7Z_iWR}W9cbVU1*qeZ@qH$roF>d$S|dmu2;hI=2eVE16m%O?x3s4gn|(%dKhlxXQh +rzp&&ee_T6hKlYijes|I_AFe~dyun|924rYQjAG^NQ|+1HA7YQFG4akOr?!j^GNxIhObg-W-{3&5dzk +f^EwP)|+j0jrJBaG>4<3b!-VE|2I002F`rn`YSj6MYS}ZCwULO0L*p-aebuJ@%{Dx>3E>A_FOHa%~Cb +@5aaCfv$C&lqhY94h)fWXe}EAN?0C7;l +oF}d1sgy{fEdA{Aks(`Ld;PCcaO=4K-F1^{Lj)1dGZ?K(u6NuZm)_S9(l=cLSb^-h2_}d_)xs{#mj1k +W0r!b0{rTJ6BcddrbhfW=d$=)&afM&fcs&ykC@Yuz$OBY}9O5P#)g{h$i$=RRe8JaCuXVR>U5K%KyBz +csazsJRZZ1Dz*H{K2`L@kzxc3Pb(Mw+f9g5}B6Y%T^naH)``U;-;x+oz3dxJ~Yz`Ih!?&H&vp3X$LBd +{dY3eq4@(`uDW{Ev9g2Xc#dDmm4{1p4?0?0K_VSe9Th5;iI8z-w$5}$^VO2)EOu=<~x1%VG*b~3xhE> +6O5T|im~v~X1#*E68IG`g;EWm=Z;m_$JxX91}ze0QbC%aC}$|=Bmj>oVpwlaG2zQ>I_fdHdI5p-Fksr +jUOhPVSI05IGd5NP0k9Ym^ccT&vVuawB|}}IeJT&MokAhZP=6{2N?J+=9nE}rsd+pc`Bwa1go( +5B^D`*3D~fe`=*xIi2^ER$mFjZ^3|wIt5kVhGFK^n8$@zIgenJ+rMHOU1liMSPAv_bssHZj-6ycNtjO_-h +oo;*%kf1DI=X(E0Ep~asrrH8S(AC%A_R+dP!~wVUDo2<@oO4D5O-N9GGUK6Ev;w`JL0JUujJ;|ZIt5T +ytat|trors3$GHCc`N>)cT)l%#N&GH<=Z2W!woP%2!>$8FBv&9`M4GCEG|k0CHPni4v$fuK7N8_qGGf +z#zwp@R<*sxD1>iWvrred6)G{bDp|@+yRkd^PbUdSbcol3)b&F&5jRGuEt?5i956m3wj1yZdGh~c5mt +YrWiXCsGC&=}May;&#B`TmG5n#yJ*I&m`);ek-D|sfj3#s+9aG#P)cIrJKnLR)#2c`+|EY}`un{pbqh +a`K;m&dmGh%JfV^9W(sosH%l9>(=Mj)87d64BpSiPXtWrVLV2)coHwhi$b^CUJ#;_X3q!N<}Z#@k^w2 +iX(*5$zRri%flH=gn#~cb$?q8mQ1qR+_CZ_X@S9N8IkU)UQFL5pGO<8ku81X!nTi+8;dX&)+~! +ykD&NrEsXu*r}RBDBrLX6jH3^E7zXNHpVEYffl)-b&Yyn#mT@hoDM3T&JkU6Gw`AijRiI}o--Mm6V^p +T3EP`fUu%Zv0#N(W;Bnok5m25&s2J?YNqBlew|EF*0;=K$RC!Qz!g;Pe)Cnm$fRcQP!!uF~cRIp*DCw +?6e;G8$Yy>w=_Fhzw70Ua%x2NP|Q|E}h&avrmNuCye0VKGmS6mEz2!N)J#(gN)1GDc1tvy6P{+`JEmEN0 +ss+)Slt0gAE|@)TDZJG=HGwLqK}8ab*N$uWqD?B*Ad6}hu6DeK%^ZuMk_mK@!N*E!Od(oh98!Bc^E&f9^CFJ5HV&#r6;5kuqsr(bxN|Mk9z;02;ZpLQ!~||pq2W%O1U +@-hwZ?YU+P>6IXvJj$4^1sG3ShOU)BHQCQk&F>;*i!x?+sepKPfZW_!_D^Mh#w@cEtX639Ui$*#K!EvaAhkr_ce|Yt7*ML5i3MR+BhL?D)oLp +VRVmHl{~`^p&{Z49^3 +(e3gC@&aMUHHEDDKNptK8Vvz)o2`-=#;n<5CWS&O+WJ1pUZ!sr-t&fz7pE7x?1{^=aijxYcC!tlehWy^a0=P)h>@6aWAK2ml*O_EsNWs53qk000O`001BW003}la4%nJZggdGZ +eeUMV{BsY} +X4i$C9u0`{KODXCqUMz06ww%3%}nk%eSK9?VjPfva)2Nlk5T3Qn +$R&%i>ioxLh&bFL|^(?4Il|8d2>ul6f;DP*@|S4{`J&dL?L?VFM>RPRPKaVn~gw@?-5u*zYQk6X^m +Q!EKKErn^A?~0lRmop7xl!H`nlQZCbt=R`S6V!y- +1k1)<|X#_LhKti9u1+U&b6Xq^!gD!it$P~VDok9B(wB#?#Qpxz^b?sLuaU3n|7q94dhP0o<+ +rNrnNeWb4XMBCdBgwFiA2$ZOKjVTa)T2CE*z3-Zeh!#ys)zB5iMAe1{foV}V`L0O|(fWII3#Fixq+I*ewCHx<0D*U4)=V{y1H+{Epi^> +&$!f+2(RO`S^6J6N*@6pztx0=S$JEw?_1^gQ^n~W3p`{#n{o%uD*q^dea!0}OfstUKX}4h>=!fh%^m@T;Q00&0Qx|6q;@Ty=+pe1I>1-fIZRW6eGO3zS`ZGGS +!2o(TX*<1QH&)X@OF{oyj3r-QM^TjP}fz{fUqRf9W?z=Qa2Q)x=jdP4S_>wXe`jfg!@E2qOdtqbiUr!c`c2A*|qFNn* +XS6-HB{JCLJMvc9W#&?^Tcwr9px&|2G{Ah`HM;gC=InuC=9+kmmhl&FeryRMNZ$H#Y~+xBY&`o{{K9` +E+e{um5Bkaqls-~Y$&*}cf`F(CAYLCb^wX3SF59NOoRBzO%Sd*t)cGd7m)kwC26!CMIS9BTv?-k2fVt +P?v9Q0cj58{qF6AaUB&X%4Fs8*#$c5D!92Qh3#LRvsyEBOsLw24^T6T4~N9qihGzjvAmg=v9o~7!0GS +(ym8~aAbW?gaNskMms>6@(M9S;Y+pWlm`}DoCEYe2ZX%t+Jm2)_%2|*lno7WbkdeK68-)&Bzj^6LDg7 +wr;QzPG74??%?B35@5JoE-)r#A!xn&5MRkb|aanfHkc5#0(NdI5Ci-9OGrU^;P`%Ce=?1IOuEACSw%^ +vUBZ+D1mHHj5wM~?|jhwBqQmZ+>CCHL&f<4$PX2jOy^{2U5qc;BRFW{j#A`1oM<3cjdhEOTJTRb^nxfxHZ^waMwwa4{5Hsc#chatn2uW+B?t +oH!j^E^AkW@d-ew!-~d*$g>`r}%_ZgG*@w5Ca +jwy8Ze5{QCPJF3;23^LN*07dI(p;XYufJWck+Km6t5@+>`n|JOh!Fh0M2d;Tu{`1bt#GClqN^zC`5(a +rh)q%_KE@kuA=FhiA!P1^QVn#r`SYn_p^HU^aYs_($g!*1w=7MV}k7nCS!F`LHzQi;#ns$5{sK&KboW +Y0=vHfn*SYm7Zdk?JQ}0swk|`9obElVWTeV$7KY>e5X^DwCpnpe76k4Q&~~#aJdEa?Q}7#%LAg4 +K3!yIpA*?opH^EP52xci%?n3MN(g{tjxR$yS)LAW~7=1sd^h340T6I7;X +5a$R&w_PYxAi?(UBV7zCnQ-x>sBK~qov4ci?n0zTMJ$Eu4#GDa?-}`2p5EcTg+PQ@DR7aJ;6+GP+KXb +3mb6a4q>(j4;-3(li{}LmCNG?=&jnC)IU;}<|WUnzFDH7KEY%O*%$bCFfV67k`hq^%RsV$uObI((c2Q +Gv?TJu9MjrayH^-WAC?HC_#V0%FnnrEVXMzmmgePT{>&wSwva;Fv9RqP;|n!TEGZFJ0ybZECbMaYd~7 +lO*!roN6ZaT!fV<-~4cLb}_(d69OjFtHiI<15WVuPDF-wBUB?oB%h@#E(7x@H>gR?j&Zjv<7_#Z{Z$G +>W!*8OTH9z5RmhtvQFJs_Gnqei5Eb)KUR5#BMuQKJ(6m~;IM{vsD+NI@uW5WBrn!3^;%Knqx*@{eVK+ +IOLGM~;yPL}Nug8VuR}m{o+#vOEVLRCNj`@f{dcCc5}nGTUP>rGOR=r?uz}k=9h^mXqL1JFm%E$BMmH +(nyyvUcbJ7wCeuM5eRiueGV6%8pQdoNI?i>oE7wf(Ai$Z7-VvJs5|~qV)i#59f}ktEKXZ};F~^VtpLt +1LlG9R&I_=Hs2U)}R^(e|MnRm~gE*{7dO$%j#-wXMC{T9-Y-0FeWW%D8-(~s!$PDEr8w(Edl+(HM(|J +d|k9x3Z5=uwkzWMfzo#TmfqaB28?lF9-ZzVW(36GMj^5T)U3TZ&%PC|#-_{=7jy-O1(-gSwUBLey?%q +A6`WHMbWIq~nA*lO626jeHv05esV*;bm1Q;PP%SmG(zBxwzdTh}lMxC$_)Q2oZqQU@+;gk$ql%Kn0-2 +=q|rr+6Gp{$&syya={K*^}X5+uwr}6RVj1@=7V)I=26zjv?!>#A;9)lcjgX%vUF90a6k_uW#OI*>sd- +EV;hem)Sq}=_6j;f(fHdCc7h08Tn+w-cRIioH3>e^Ep)KK9L5~Dl-71;l(;5S6~EaXjA?6_V#*oEIy3 +WW)(m-b5w<{pkCu0Lwnl}TyurEm6CPw+&JXuz-#eV?8uL}Hy$(-K)Aq^-)-l{I686y`ea}%p;0lUQ4y +K%NmgfuY1+EzDK3wvEgUyKIbxU!97f>td#}XNtwz)OGJ*KYZMRH;&<=w$zS&S9g*XDefJNnMHG_Buc` +>%w0rTJ!GN^45(Bjd-Lz`1W!vX!cJHQa|hEA)ik0fY}-0Y7qJr((DYIyUzZ~ovg0Y{%MzuBkm#W<%uU4F*kZ^h +y`{^WNhlr$6TWS%Ds +K0b~yN}Ts;!?AQH7tD4U*}^1T9AK?)9Fx)S07FJ*G8q?4yNqm +KGWzXzaKepe;!_v~T+=66`j*2Hekg+U_Ou}7n}+iwx)`CHid}RV6Syr-W-0RT4I +}EnZ%4?fI+30F3P#Cv8PFU~BUS`RU7cn{0 +i1flmpkE@*0!5cgwL12E%PWt($k@9k&ICBu%gg{tl;Qt*>;gmxQtt?_ms3}9I!pX78KzrBZ8}^E56h# +nchSE8rC>y)bt3U(l8gpGN%p&Qpr0!e2~fX#`>s*xv)OxBENPY%!16PW?!@^rqc^I<|T~zKi->vE#F_ +U_gA;)_GX};jGzk>YfiU*0L>pA>FOh04RME;Sevt~o|XPBP3y=~PAuct68^t7gkK_6y34b|jn7yDE%~ +vT3WRbwk;AFkrNwR7bbw&XnS0sz{0fWyid}3VRNlKiD94I2YfpmpH4-Wu!eBRag&}Z)7+R1vq~_kCW2 +#)DM$$B+P&yOsPK2rOB|0DXs^MV#+`^})qbtG@szr37PCfDc7atVcb9hu4^kMfhpa#!;-DjTYZ*{M}- +op$I%Q{x_3_tU#xW|3~cRv#`GnxK_B^Vm8GkYA4W1fJHMRcE^iOOH1|2{m3)%iu38hl>AE<{c#71(3y +mqaX5-w8}tei#&GouUqsfr2}q#2XDGnfd +ixaV08$<)hD7Xvc#L+Rj7QAQ)D?F+Y^Bg7EU4HzwSN>N@&6u1tUlOKeoln~RWL~UIT$igpP1x7?%1XQ#fZI_e9fwJ-O8B&(6MU}OCx0x;dP4YHVqvpw-;j*ua-o_; +U&+!qV$NOaTdvv$n)vv_+@6aWAK2ml*O_ExFPS;?LT0090I0012T003}la4%nJZggdGZee +UMV{B%ZW5@+ehpiK*Voo!5oDh6+HYWy@zF}fMAfuP$Ovy-dA5FPoShJH +=A=nDKS$?Xu(tSv-1kAbQX6Z!?-3t{{0~YF6i5V5z#etI7El$e%vJqt4!j&v|H?EvXJC0=+l7zW!s}Ygi7*>8jwGXHk+)XarKq@0kW^vTm_LCxTJ>u~8b9tvFXS54=0e>TER$Me;xu?G>J8 +k?)qcEm=ydvqw;*(&(Tfd?G#@%k>j_4gcRa7Vri7nVc98!8M-I`rV}P3Enrahm#-esdLr?mlO5y4Kjy +?~xw)rVE(0_5Mo7ciKYw6Wi7^0%ECqW_00 +LN<%->|j+>k(JA1Geh=Z1)XKL-vyQH^_P%;x#-Vh_GGglFs=R}n{dUvfvw-e&1HKs@u+ +!H^;r0%N~p*a(lg623zcU3jry1$Xg{_^DE!?i7Qpx6=5cQkhFOp+g)xh;)9S5yKyI&sof!KAN!F7>q& +npakjW>?*;I1KH*|Na#0IcU!tB+4WUg}RV)%`%IcfCi=MnK8qlo6;^zqorc5nbKH}IpP58cwH=g6IRV +}p(qT=taS6Is~xdG;KgNlfqEmz!yS0CSB +oWJ*z!RIHls~?^|6TRf(?fKApj-0sTx<$DE_gBK#+1yAzy0pXix?5HY4AOHjan$(1KO86Fxxm~JJ4F=Kr6JG0Nn!JwxF=xH|Tf&(TFlzK +{-hN0#Hi>1QY-O00;mZO7>R$dH_pU0001V0000X0001RX>c!JX>N37a&BR4FJo+JFLQKZbaiuIV{c?- +b1rasJ&UmofG`Ze_FT~ups+GP8$<;pC~3-=|G$6*Hp`aPQbN@*g$_`J<)t2scH*1-GZ9*mYV(2AoVfb +RM)?f`T!O8zsV`QJ?77H)jX><@T+@d7A8~*OP)h>@6aWAK2ml*O_Ezha)iII;001fv001HY003}la4% +nJZggdGZeeUMV{dJ3VQyq|FJE72ZfSI1UoLQYombtD<1`R|&tEYL545K?@LXw;j=e*y1l&p-@UU8S-N +b3EI(D#~_5$L6XU0xir_CPZ`jTcof1a<5(=<&US$<%94!Ks22_@7Xn3T|n21#|zRAG}8_Cdr(P2t$kRltqK)=`VX2eK$cI3g@^ZJq3v{V}Y!&=_vH@&;elYqIx}c}pKF4%! +sl5UXYmV`48p3t=0{Iuu3W_eZY4Rl40*_}~yu3v^nrj*+XAaDiv>*pObuxleM6wh&J#KCQfL*-{mdm6 +{#!P>x#18mxrI5aWP~HGojyZyF@P(N|pVTr-Mv@y}8c)`l6 ++~%##ti?R0RB>;@KjQ!o0z}wJkOs#T{ft#E|L#{)SH>$;PHKL*sNzR!y_f+QnO+%h_hbg>s?QqfHAOM +#nGxSjK)PVa`)RnoPcuXy^5tYod3n=0_n9PPd|9d1cVR1)1b$}FHxc5GW^e07+zyH`9{H>xdw=eShDU +53dgIB%Y#VMg+mJp01Og%>f{+ +WGfxvcpGJV&W@`~q&xuAkl{AnOb&JRaCJfeFbZrc>>2c6ujZ8;3^jr7_}-$t{l9yqcZepcN<5Q>HTP8SvOWK7_B?YYAnTo!PPSN^y*c@R^pQu ++$+=X!2`I-z1J9BDLF5Q1DOl2LUOKJR{*Vh(^VKWJ1z*0AbbXmo9JfHew$7Tzo+(!tn&1D-Xn5CrN3ytjVk!i8s +8LtBeOgUWp%T#VkV34GVi9uh$*-U5j5e+Dcf!==q*^G5c#irpj<4>LQFq!T9+-(SPy_tPjkaq|92lBD +1zhHeQ{nyO^x6aXa`b6C4NEHRn_n|EO+Xl`evc2>gy`^oyb_aXzaIrVNVC;V+f58wmeZ1~QFz@>3C4hLNA +rl6-ZKDSLKK$r+YekLY$P78Kmn8V84;M4q!=KNwAA(D=z*?u*xi$tTT{gti4->oulo*cTzDDkHsqrF* +Ws0-d258<$xVB|dpSW4{n1k#9EW~MSU=a>K%1n0Pon0(@8=|~2vpJXP5Rwu5>D!Q09Ekiz^cSso@-I+ +J0|XQR000O88%p+8nG}$9g$MuuogDxG9{>OVaA|NaUukZ1WpZv|Y%gPPZEaz0WOFZLVPj}zE^v9x8Eb +FjHuAfF1);?vQfn*Q;(!B;^B#)a-d%x38ft+{IfpDw4QCG*fUYzz0-z2gQN*MOps$JhcD)ka +t-{3-V-8il26(dZ7(L6_j~C3&5Ei8?7JHmpvwpFljt6wv12xoV6n`CdHmCHq8ACMx)oY!C1botZEjBG +!%Ec6e!7vH~JVji3qq{{C-us?kyJ@9$x!Vr#%*+fu9<|6EJ16zT_@)0Qo&5x177AH}u;!4k>;-+;xE2 +VOmZKW}8iX}eK~w`x6wpSQIXTQu-s16ipqzoA)gjbw-y>MaG^pYx<=vr{G<=RVI&am6mtp4k$03H+GQ8C6@|f?6L0v2(VyjctUweDE +1Rr>>DG%s2juK>HrY^;sD7<77|lz0BT~-0h_mHz$q3TG`$$$4(zG3bFpES(9TkEtJbnd(^S@&V`UBlD +2BGIx!Okwk90OoMG?tVQDv_96$;W*rR$RCaS-(gORgJPnRBBP>J9q`hb-F{P^(OY59LKiQVhZK_T3$= +LGg3$dq(=+4%zFHL*)ed4JQ`9RT0yI)m&l-$AiQm6v&47M^QkW!4bss(D981x$zohE!&mT#Ww>Tg+?n +p@^)!mb2vG7T=Kco8B@;CSU54_mV}nazAYTctwFg9t>;Z{+tr@(b$cXZC&8bwERU@QuhH7Bx=x +`@-d=9KB8{OPx|^t~5GDlFmn1Eq!}9x$Ft`x{&-f#PH+x=b6?j$1>WAqYj#4C=lA1vOXtAqipX{`*6K +eb*w4h}mp<-C6a&OOR6-7PdwyzkWQVXQIs0qFbB9AjsHkK$wmfFZR_?=rjY=rkpf}#E;1Pq2p!jnK1K +V!4db-%GWlajU@Bam=H=o2Enmg1xyo|&M1vz<)89=V|)&Up?_ohVjbY)b_$;-8_x+Pd27m@nDm +gqlxZMhtc96T+2gBA3m64b3<-ssFyYxiTgL+wW<#tH#K)FDX(^5Cb| +kSzr=sDm|t#bXmVT+5j4Rdcs?)iM8?JH1N)9@IFT9yJXdW9fMC=e>J%}M(){a<(HZl7?CQYmFG@d7*J +(Vhuz|dx^8pPtLDRY>^IeOte*%NvG?qo-&)uw&j&k_cpZe>0b}t~uu!-=y6&8KrqEl=$d|<(iQbICRz +TW}41LZ-+Ez<=Bm@%@)aa49D9-&DpEeXZZ_C>VRt# +J8P$K-hMi$4+iS1asrCVnW5JHr>-8cHEAnf$IX3I2+rv2z#{X2N__j1S3Osy?{y~5*h?oU;2+_3dBsjHGhaI#v8ha%^6%|jK#C-{m +w)6NiGLn;yoiEIZGP_kEBtX-EVwOMNKzye=vHy5s`(Dj9X!+xkBi#Hqy&}nJKB5fI7{S})cYfI&}c;H +o!fC2xyr^ZA}{Y+JR!@kRNn%o>07&tYxYg^YG_7H|P0On^Duns?XdQ5)CjOF#fdo<<|tamB&+FU|edmsb3iz8 +nG2x0EgsZgO3PW~K=QvzFJ(o~|U14#=ICAC7aZ~=lMc%wkBLiOx#4k}au_~F@QZm+G3G5ELv_a8WE_ +G1Iv=G15RU!m>qd&IOokcb4>NaMU%==im==Yp&1OgkOMD)hKnb(p|&tkVOD)BjXD*`3G>BQwa^Bh<-k +*;!wChJeB>%2FI44AxNPDhNm9g=gBd7j3{VsUE21`T5~+h)op7LT=A<{%~INTH)8?9YCJ9$lY|}etpf +tfJ6_lBnOKC1J`Y$46tVQ$5nT_sI*D%jWoXiP)h>@6aWAK2ml*O_Evr7qGjBfL8zr)}#syHOX{2tl+L;!N4VF8PAP46YR~`Dn7tWUACeMTPAsZZ4+KNdX& +}ptJp=7n^9+@z0%k^ObtcTaPAU^QWc>*2KPmue~=EtUl$Dix<{o~^`q_OCDkVgAT98sJmWS#+?-oz{y +C*C0Q7|OxykOxfdET*Qc)Pz!a7{Xq-(@bH*#hE>$^RWt}37EGiWG%^5X2rFxx*(OWTIt2q&x?~((R9I +f#9E{r9o$XaR}F@T-j*|_d5eq{S(XXKy@z!iVk9pgzl+X=oqJSu38VXV+>jEuYv?U*#nuP6E8xpF>MGohSp +iZRATAjV&ZySjQ~3`i$%{GBKj@Mti7r4ABo-}+6{F>8rs4xjthy5o7sPLQ8ypuEK<~`YDJ#8x +xx9JUwdZyQXNua~fIX(Q;&kj@`mthNzSKkfnojIXS>GY&(r)hzUYXzo1kv~vN0|XQR000O88%p+8q@a +j{vaa6Ux_XgB&YYPu!{LxDh$AjRyonA=KP7#YF +WikgYaRs<#WHreppeb};ra;`>70CvS-8n(u9=XXbRuT4?XXZ%9=g80@|peU3hJz)AaFx3R~V(g$5e!L +Wg<|?qas(C-Hk@$*{n?^k<1r%z{DyS-caepJlb?R7w5CFGrIkKay_13-QK_u48!Q0DFoPwC+hoqK<8q +gN*0BHmbu)C6=$*BAC;o$x`OExz|L$my_(EtXkga?d<^9MWDK-9$Q}proC1fKhnd~(@Tne!I~`B1zYG +aPV_hDLX{D}EP0F86x_9v&*%wMUDOl>D$;e1Ah}ay4)c_d@{?*|hAVMs@%#6PgmF +41%tFi&@{kWwJ)zYdZApGb1frm4TQ|8kk&-Se>KTVC??!d* +QlF)pxLsZVL>2GqKeG&p7#T7cRr@35}Q%A~8Z*@Ld{3VDw!w&8)Ex$vXaGj3iBPB-**IT*}Tesbjxi+ +_GyelQnSe`(J-3ubDYB-Q+@7#2{r77_PMJ`p2L1{h8D+Wt7)4ocNqfP6(XtKE`4?XPKH({O`nuxhLBp +xsi*4MFllwtmbUwjna5LA1@%R`vh;M796l6m2+L(I}-}J=7OeCE>f*Vp}rK;L^oi>=7kEb-Gt6w<-LcQtIU0xc)(LV;As8kVKc0=? +SkV{OFMxf&WmD&KHx7P2Ox$A(4=8XlBzBHh}mGb%_!V;taSTk5{*Xr-f6!!rLnxAzum@2660f}PCl{r +?ivox9F#eJZmSw3DgTuukAs>V$QM@u>`tVxG*i718We>FPTQxA7E1xy<-F8y+Pw`P8dM#4M_tC!wzD9 +%MB>$nG~iaQO!R)SYI5XO*R{d%SBDWslp)1kW&1fPA72$D_3#-Z0!%Aa%ZU($E6}H=E|w!G5N1qSR`% +Xubuh){J>G;HPw4$0z+_Xa+Am)FUalxh$V+GJU&{)k1?x9?}aB+Q@H!wnsf0%IdmNPm9}l*&-DyJ&cf +Pql0+JB&v@N^ZOVvtDUs^7ptI>+Kny8NnISKuU^VxSTS3}=2Rv;Jlq@eVbvvTQ;q>b +@(dOID!mneZC5fWn6qjvj;(8%300P|tke?=@LmBf9Oam~kn0Z>Z=1QY-O00;mZO7>P_Pr^AC3jhFDCI +A2@0001RX>c!JX>N37a&BR4FJo_QZDDR?b1!3WZf0p`b#h^JX>V>WaCyBNU2ogE_1(XMQ&B_$WVPL+a +eaX6 +^|F*=#g&?@rPwi1R%Id;XJ-A6B(J$ytM(<(uqz*neaJ2{RfX)btTIs~`9#B&SsBGyQE>_HqE((n%S7> +bC3d@{NG<-UemsPts-&tFAf;Scl(XyW%j=Nc+@7AE-Q0{~iejB@YMG!w7R{e9_&e<`h4ktlS&`~*8(z +f@ZoJNNuJqz}0E4f=7(@QJ;-q~e=&|a*vq~a-i$f_+f@debq?P$Na=CXx! +k*I5AsYX}#K2)oHJuGx3KS4?e1ou_QcSt=mKIb<}+Vlxo`X;B!)g^qF(6y+exgMUj293Ppd-R-etz2@ +LM2;8LDH!88u;5z=8?8@8_@*seXGH6E=E)jihN(>5tfUa(3KF3PLfEcZkMA}gu$Dr&NxzGRz1NFGc6-#UZOdk`6F%QtCazgAcj=1rB +rzqeSH*y1-NKGgyC`yJ)~3m}W>RHrTbO@B_yq}sv)K22Ugh8!PDu)_#yfWX?EK8!}W{yI*S;;vc2*ep +rYSi>k^@q8Vy>F?|-czV{*!`IPjM{p`?5&Q;%s9L*uq4OU0%xdn9E@2=+5%Ux*@c|kI!W>0$SGGJa=P +qyJf2O?#kp)qOAKcbwG(^O16;!T2&Tno%UNC?=`*i;8@|t-=LfwS8Ph^%XvHWG2XBAW;ai66;ja(tjVib_m*O5Ra +5f&Q^ACR-?2IhP8`^^ezjBWKmf +^(ww<^FgS#k+o#xCkbBB6xLi~b`ra5r@OJoPQ#AaH>L=SlLamW3XXT4fE}xz%Od3=i9EwH@z2B~|q$w +*LuK;0`+cec})dDcGt25*#~_ZoiCe3-uv;97Y<Od#5^@tSK##;KKz(s^iAlK~+sz`EMkbKj^6 +j#^M0hC0|n8%v>yqJbY<7}pg6?#U$>LcR&oh^Om^7OP83rcut$7uK^7Y2&Tm{wM*T3m`~U +dGytO9^O||WZ;DO59#@~!Bg~+M&+S8B)9xvRrk|H77_JL~`j(9M)InOwQz`M{4lwjat8u)ZD5#8RLT{ +p5t&E6giON&AHa&h^$j~6XUr`tJZ#jUcj}L+wOymk`p5{-G4Vh&<2`Iwoi(g&w +56NI6vnfv*ZDA638k*O%P9uX@Ox;Y3&_4bbn$8I-!2EruR1UM&?BOG27L?h6(|f+{9_j +hsTQz?REqsS&M?KqN{k(2Kw%K!s8sc2NKn8cD=Z7}JwRZEd%>&8MvJb>Nmg(!|4fS#R!9S%{=!!U^oN +AO~3xd^3{`Kh2sJhCz3bdi3l6&svx8G5+ZDoIz+w?KTxZ_52(gKaS>_^lY_S`?OmgjkN7HZD&K$xlaV +wq$TPr#&r3J(3`n#*?b(v52O6zY&+^svxRSv`_Tn3rN0>p%%<6=}`(hMiZsLW%okuI-%A5;|^Ju)XZ{ +kz8nx>N-UF8kLeOR77`AyM{n2oPt8fAq7y>3|}fLvb*2BTwrFDiNwL(!jSbEqX;KPBXU6UQg+H= +U5h>g*^u7s!+uKNq$8h%?lQlHQoij#Nkubw9^>H#_*AC3Mh`mtgn9znpW27ed4}$6xFbFfcW*U!oyqV +}ng?kMm)_8KPDRqM!y`$A{x0)l>SmDU7*BV9a4%CZ}mt8stAx8DQBhJ7`z0^8z=-Ic3>Lm(x{$uBaUB +isUw`bUUZG60-=4&=`G_ny3nG917|BM6>{%h~*{3?C|>QN1Il=Oq=;d$tI8(@eGjqD!E{Co*Ehw#HW+ +z_^%VW;Dv3URO%psc6_;dcruW~44HswV;d^_ocUlrC;?7)aZbo3fMwVk@y-e8;!c&7OECRJf3hOa2Y} +w$1D%S#eUCdA6Am9yn-&*F)HMl9YM&9S;DLD(VqBU(;(l5r=?#l9FtkuK$2YS?4*!OBAkf+IhqQx&r> +uF_nk2#K~z=kO|P$?KP*q6Vs)FL^(i(Rtqc}^n%!08uHfuAi@tCg9XPt4Dtu=LB!!n*Pfd;F*ca2JAfV6j@6aWAK2ml*O_Ev&T4d9yy006%k0 +018V003}la4%nJZggdGZeeUMV{dJ3VQyq|FJxt6b!RScd7W8XZ``^Oe&1igstAq*Y=q=>aW+6|J7@tn +=VH4ru0hZewaZ2lRg$u^F#O*;!;3^}cN3$2uqATl3=#HED;`V5wH#H&^~$Z0R#vR#^V@D +G`dxUH6PZ5fRik7_#*@pS&uEhg{5f|ki+ih{;(#OqB3i8Ej0Z8Z7@RitX+ +i@2QifNiTAZs4j@`6E3S-aL_mAqbH$IT23CDzs89LCl?M1g?=)cAJy5^9iV#F^xB_v?s+~@$x>{N(^% +=tQkpWsIyN>mB;JR?H=U8UYz|gU8oEoNf+|c1*_8{09UTv5eH5^#aX1}?+xVXBxImsgXM(jq#Ok|3Sc +<~fh#|^8QVSxd~;{B6QvhR3rPO*$ew7T@Z6!=761387&^DLdvReTQ6%$I|BT#;>X(h?}H?OIN0&pJ+N +v0ANa-VnW)&-4jBGsle$(MGL_UD%-O73BRV@>TYHvm)^0kW0}wGSA-W4SA=tH9=0#l;A=rP>`|ZsJ=B +@{v$jfFH`bOR#>uh)v5s69n+V%V>zCM*++)V3wIv=L@H-U4+@;X4~kTZGsAtHQ1ji;jtx!?49a_JFZl +JaiOxMvBBdO7PYwTW@+=BGI6Lzp2+&!9k=D$j{+0Ybf_IZ1yf6f&9(X011NuuVdO(h-Ket-0iQ=7nf} +rgWK+Gym2E`lkok&IW*uWFSKrZ^K9cx6vC_FSlmPupJ+uOJA)vv>;{L+}X#gH6! +0My{#aV4pz*M3k*>;=Z1};sQ537|E`&O}D1Ahl3Mv| +>X91QQTDY`4dymdReFT6fe7?Q-I|v8r4@Sxs@|Y&9G9zo-1Cc%a8our5>~St~n)Jvm$!3IX_Ow*4EJ4 +cLJyGiXn$o<9eYkvym|sEYT`M113pc-ZHPb!uC*E#96!9IxjWA +|RM}BS1uo4uHqt!4oMI9Is(idh&azp~jniSql}ZIYAW<#e`5pmjMvKj&+0Ls}hf+P2We>@!BHMCP@~v +*?>LA{m@!?8+jj?iq>2lhSd8v+?wF>7wr!(e*^cD3w +`U?0%J379(F?h>eyV_Vx>cnpmmugh?{{fn6(vfCPtgcJ^BG;)9w%9XdKwqQ@9i4j~%3P4YY!?Jeu;Tx)&BbqDzg++0;&ziLIYJwxjh>7A9{uW+X~4~1UW=^6a>>lGNN0?*Xxz5jc +iOgYiDRY)Sf>i59pfT4f7I4FTDU-Hh!lhTH$`veQ^5)62lJijyx!E_y*-5_Y8kfExgRn%eqdXu1|+l< +oxl|N@x#imMI#87^`p5YfEED*6-+2>uC?bEp)I-;(AuzrO*@X3gaOFx8O(B+tH1#uLkWZBU&$v?hxFq +d1YN`PE)R4BbSJS<-{AI(<65Ltq&4pb!b0A3$5+czt=D+_?o&a2X +l~2VWfd6X48M*4qP1%cF!I>{s&9=JpAPe41};79F?T8o1>ait@;lJ!`9-zTZr|4=a~6G(U^-1v(wR`k +8xTH2_A;`Bwk5!;bXCvI!K0SJo>WpPBMH`<}=-(0cy6zyBf>l=YWMp;xCUx_w`g-=V`G@81VALgvN|0 +l}!~pipKwucV!tqntT?bCP|PQ3djj>}V;5AcZ+gDTq1hdR`ovGSO=ThQsdtdC$3hsKLsuWOe5J$WaG +wD1y|2CKH!ivD1uw!g0nK0%Tq+ofO6*J;Yj`Irdm>N0?0}3JddzM@Du-XC5e%J|8Vm{zr;Fcf{xlRf{ +fn34QO*fA`7#-(qUs?B9?Du|?k5Mh)E$hy97nK8#aS6dd*)B*Y#0)#r#$VMFYCC`T~f{C5<})|aP{1M +EIJDYy*ve7WXrvnT@iGk=ak2TRj^!iyiSa6!-EVV7-hf$@R8z2~4iCC4HEuqJQd^lp!S6^aJRuEyS;y +C#cD1_fSW#>LNw_fxwJO>lIXE@q#Z>cKtIWRl(PVc?j)*=aX(cAWKY+O-#CKR1yyWn`Dqy5ESI1u<-T +WTTPpA}u%SgkK{N8w_di844e;EWm1*!bA(o0C~l2ag?JOMq^n+fs~CqDu~;G2Q7fAJ?p_^P#6F}2}Q- +rnJ6-P^u-8f)zHA;sGELz%Zt-Y_@!!Qir&tX^Y6SG4ffHVV_rDV1mb`Pu(36~uOvpeXFlHek*Gk8V<9 +{m{3^CYI^H`2mfG8WKSu!k9tA0g*E9B_a-5~ii}g<~2!(`%VK7g4nZxUX;ne5J)k(}>@GJT+zRtK0od +CV4WXGX-#>9VK{c(47b9+NCKGVP5eZ9QC0x%fR{lJcTpCmd1cX9XU^$(#I1ZQAQ{L`Sc|1HQ1!1PZ)X +T7{~$6Nx_$@L=>FGq>Re%zBsd%m=UkNE!sP)h>@6aWAK2ml*O_EzczR>S%Q007q%001HY003}la4%nJ +ZggdGZeeUMV{dJ3VQyq|FJy0bZftL1WG--drC3{U+cp$__pcy46q5m4+a87iK@|*0H()?96ie5aAYf_ +f*k(hMDoHt2fBg>7Dl^+?wI`~!HPZ%2SpUF81A%7S8YL +p-EjM=75o%uNluHX*GLe^pN1k$Dp+c^tIm4I_98qS^o`71Ww_s`A+MSZ;WHo%IrP}YAtfKHg@VVg{_8 +{$DM_H0*Zg;d4z+_XOQ)Mi(jX`Y{^ar{B_wMeG`}-*oRfA^!)cLyMqU!!rt6BL3Ix27$B!nqdXoNR}O +FV-RJ#srJpQwN>D_d&7HZ_7lUjVbQT$p^%O&L{yG>ryYalIr`={*ztE6|$R9b%%n1*#fbH}xOZ)TpjBJbLFf04|&UfG?)TvqS}%z`w=1*sm?6H`OUHypIdgYbvDds1ul2`wRw7XA8LRWi|2s$e +RIBL<~W|6W-jMm4$w!gRUg2gm055KF>@QN%R01I~~t7~_uS;hv8Uh?a2yW{z&z@p9+(Kky2?Dkcdq(~ +@V_603qKEnfY)(Tr26#MDiN?ouJBlIQ@^tAc*L!h%X_paHB1G2toBoUQdE{OCEv;RvzP3&!6Y{8 +BHslDC-QoM$zj-X~v$qeTi2j-gANeB71O}Q~OiT+{0DDhOnxNDVG@#n*~|jlE0ONo1l{rx9p0T<1_gn +kPU?`FYzB%08B2EEfyDvjzPRsRw`OSeX-b9s5KM4-Is5==9205Qb8p}d!=ZtVFUU~VWZ0{_Pf%LbjQp +NE(X;_t_EZ1n(1pC;(HJ`O8B02sfNa2*MR!hUw*q{@9TxT4%nUjnmFcLYuax$2v|=_3Xj21JlTJLc*%W8et2C_%+(K{)D^(Q5W;m&Wfc;k= +Mw6n~xNZkQ(9YU`ocbq(t^`j)Z-+_lwwmMw(XBRw=r63n>?Q}q((hAlq1N%{dh#G~&o=!>Df90hY25}`sM-|Gf?z8?rw+_JTB +0{|HLqs0jYIa@zY<6+SVvcb9Kqnk})Y<*mkb%TS$4#-+PgD@j~YU9^OW7<97Sc(I3oUM?~tZTP84BIw +8IdeRb^ZLW%WGr<#UZMS%r-%SAefkCR$d6HAoE4}x9c!~GuED9pJbZ?6W>vJt3k)Wj!jL-Q0si2;M~y{;F^ZHfa~yZpN{Z3~4T<` +%GU34=hNYA +hLk$gc{=uHIQ9Xa?=z(^=r8>_8j-!T`UP0E`n0VH`#XqvN-3w{F!MPYgLF1Lz%4T@MLP|xOD_YN|IXNs(`o4f|Q3TSYD-Oi>Btz$z|_Mv5fRbS-HYTHzCnMa>baMt2=oZIZf)$EQTlan~aWwS +Y)YFFBHDoE!G~gR0sb1}jHhcNy%Q1(^2%_RxY4yz)RlQS;A*vMVb3Z&>Rz@L_QqI1tmtdT=wre4R(aTq6IdjN$lzYH?^yorhJF3bI;m~p6wE{g6U^`E-+ff(>8xZQGjcrp +vI1tQ>heAjq&`#>Iq=Q{n`iLB?Owg|*`myYwH}R&yLB|;)5hDj8g%9eavzfSi&k)k++P>;Bs +?D$XMud=2(E9**5YP6L1{sxN8rkAisFKSYKLr;88?P%;6i%Od+Ex%`}utbj-R+Kv%Qe8kU4@h<=p4s< +vVI2?g%jrK2!d1p!_V8Oe$G;?sUj5_4sZO7^(OaJAexBm%5M+byTolj{;eQj%q=%8m_77kxHVY;DjN2 +PIQ39oNaxMYyH_tl9x%6y5Y;~n4W8tloxc`Lgj3~#5v{W4VMVdOTkb3_(D#?x}Hl=CrSeKup>uykhpvXQeuJf!be6y +-(b{zBde*lC$xr#@eSJfr?Ur~9gTz-r-fDgj>$4c*a+L^;!4oK~;ln49xDJx>X&V6qz{?J0@li@W9Js +|j^B7qN$>|5AED;%#cT2cgPV5**AdLB&K>!^h#EeR7Am!;T)URD}|qrN)k!eI}yg`aKK4bs)Y>fYpEZ +RYwXqsR6tjGWET6md7~a2pz(2RivxEi4aVDAAfTnMpqPC8>7#WWzbfL`y|z6K1+W`5N5Kpz~^q-n87o +tOiz7MM=pJK0d8t$vM$8!Lm6*^H7bCEulS-Co)u~v_L5ERUIRk7uaMxl}Y(@WX;PYH1kSRBN>^))tt+ +>kvWsUne%wBaisgxoGwsHf}H@>*bp$7toG`NkK3264xCn{NlZ(wwMPbzhv3(Dm& +N7up6d5{2x$D0|XQR000O88%p+8zsURG;{pHxrv?B39smFUaA|NaUukZ1WpZv|Y%gPPZEaz0WOFZQVR +L9MaCv=IQES^U5PtWs5Qc&cj@WaU3#DZ1ptO*5ed&r&?6a*^mOM#rSH}MPPO=@xX}i4G(%pT&`@Xx2X +gh5jX}E2q*hgYGN?q4N)zi_w+Nd(RCAxDRHx|ffxaYEm(dJ$|44Ev~!n$ZMbfTk7C{|Pc|bjMmS))cU_MKd*Z;5d}5H+;I6}6!kFSMFqTG9R7h?uxx&20vXk+dt|NlKIF7 +>;ZxI#<4+0Tg;hWS2m!#vx6R*)?ibgyGje`Fj)(NEGitK^(7E1CWTtil?5H1vk8~D`=11%_LNhKr@r% +9>tz60*~UP3?;3K^GJ_#vP0(p&%0oWy6H(KUI%h7elWNAlT2ofAswkh!RpHo(TOv1d@v7I +d2Ah4_y)Y$7q$rdgR$S#jjsX^02B3=iSY)y~=uk@e4F|D*|xmy++%F9She`=wUMV{%xF`d7gE(r} +YY$$3B#*K`!+UUMZK4nYJ(^)Rls~}TGM$y*e{AbvOHJ<#;KUilTgOE5tvyP%7O1r^tk>qhcoxvCuS}e +hpgEL=(G0UP1xZbE?^rM4iCNZCqqldeL7qW?SEtVmdzi}2=iv9E9 +R#mIWeE8b2yD9O=~QQJlSuN)&!|Nf(z%sG(Hb&V>^Jksl7^3W4{lZ!$Z+&wS>i>J5XbqvDwEd;da_6G +dTe#pZ+4Kf|ul-nmoB-Cwg{|He@p5rAxLw8tuE1J4k}#fHZOutPT7gvQN_JA5cpJ1QY-O00;mZO7>QX +!<0Pg0RRAO1ONaY0001RX>c!JX>N37a&BR4FJo_QZDDR?b1!IRY;Z1cd3964j+-zLz4H|#swyC5QO{9 +#FVRY^)LYtKq6j%*0JmVfW3yRQ_1`-N%tDm)#aM6N^PA_PB;A5G&IqyyvNf6<{1i=xHYz>ldLva8kZ; +nuXatNjSuT}BQliN&jm*x%Vt&zWD^w8(`|`e4a<^=#;|46L6oq=~94uiA`oh^T}N7S3vVZUfgyK5ehfI%^IuF +~F|&16S#4Pv4Aq-%$5kt@;qqyO0HDj`TyHsh%;ja^1t5p1Gmd)KuD+xM_u>5Lm#f) +RQ>C|tfud_bHe*@!Ryg98n>}t4befy-A31g6Fy4Md?crI{xl~`?S+&B>azQ01}Qn11GwO^NWp``5*C}~LA!m%tyv9-Ip +*pe&BNtclS-qEYAwG*2CkX=bLqnT%3725S&DAe-(p0fRFCwPZ=uX-#bLDU}p!0kY!JyXXX=={kCcFfV +aA_CvEBu8JUR5|+EE6N#ebM&3TkCkQYX~Y2q-Xwgb@lZsN7vua&OTjVFLGL;vX +BIe`rsLR0TfjOH~}e0ZdN6i$lcP)8oAV`!RND>ew +JoU?7Xf+7ZqNMY(|p!*q}u6aV7|1wpUvhHa|htH8VfC7+uIAW3npFr?Mv7tg0Pw_deTWzSnP?a`lC9G +k@L-LUYflnUPjbC7q_5-oCX{qb!~T+m;hF|ucNZ|^!I`)Qt?6cgIcY0)&L5nPoDx*_+s|bs$#e!Rh6w +)tGdNf+JJN3OJ~uStyc&>Ns^fFb0&Mh1nIaCorw{I?=Wq3q9LoQ1sxg35LpC%^Mmdg7o>u~A1Hi7;@y +Fs-^4)7@gOy88Sc}c*S+9-1^T%eWMLy35d;_vnH%vJS$=oGdhT7_V+9!{Yc-M$X#fgf;LQ2O36j$b`Pbx5SC5_BvG3z;c4Hm0)P{h +{mX8YPlE>PJy=PAO!xZ_`=a#p4Kt&E?`A3kI1SL!Cfbus|RxQ9TH#QK~&6%nch17Z}rAYcGMU~fX{YndU%5#XU~R*~d$mK-I;|C~YyKrtmv>h +I06m^h18f&f0uHr>%!cWvoynZjVYpS4O)D+k>71WMWgnoI6&RtFE#8?QzYL@05LvG61t%d$;f_}Z}Dy +jyQaoo&+&T^M1B$TP^2!ddVqR=Zt4d4#;GZU)*mRotJ`C}T&6Pg8F~W_saz(ig|WsD`h@n75g>-Yxh; +H-eFCxlT{mZ#ZvbHk0|$OfjN2ZCGc5U~?6j#ne&Jh*U04M{zdJZ^{?W;jfnIs!UK^x4@Wu*<< +DM_F`LcK^nQx0~qH)gn1b+XRT}sOnI`7iK9?0qt#4dojuR@i$7>s@DSQAOhHg}Tf$OJn4!N)^e!#~0# +k^J=|rC<|M3AaU0ESm^bAOMM<^&zb)esHUDw5a(CsFf;2lrI^C2QSPea6d_MZb0$E8B5TaRt1uDrPD4 +w?N +jftm@-7NIsFrc%FLHZ}NVCe&u&(kpt|qU3_ZD5vU!9xIv&8`ua;#?pt!q_D5HC1+Su;$$(zFb`D|tMm +-4&B{D_8aX#Mg+Uf7t^Q3){jkfnac$M%pg_`Ge4ONyCvBQMJfigBF^et8ov_J+C+7MPo~C08Okf!2zP +T>hMCfz>9XZit^&3!20|XQR000O88%p+8<~jzZP6Pk|bPNChApigXaA|NaUukZ1WpZv|Y%gPPZEaz0W +OFZRZgX&DV{|TXd8Jn0Z`(Eye)nH-P%%sb9ASGk;1yfa0Ue5FK)SsI0l`RTnGHp%B$dJ_`oHf;iIil^ +55@9goA=$Xk00+M+D;n>Qa6oI&E{Zq3%YY1Hx@w*#(%l&(GE6u>IfGtzpK+0?nLFb@IhF&88DsbO7@D +X)~4)u^}>DVvgPIljg77~9(XSuL-i_*RxK)b>1{-Hj1HX<%CXN@t0_&iF>)|gXvNqp2;tEvF3U=ah^* +S0+9RaB0xS)FVU=!MuIlJ?JN&%nVI6n9C8ruwdk_EI-oATySaWI7@tQjxp?`3Tcf_6%&%Li~3k_c5B? +Td*v?bNqaTfzwS4;8Cx|e3PxxuA3G8IX_N9Hu6v(3g!01fP;7D}q(LpaMBQ@lmSip^$INp3B?SGL2-j +l|^RXR+G={bgAeK1UF^BSNLA4CO!{fWzM>chm}IErMseGrI08B!IzCpHT4E!~I7%Xw!06rr_a=Xp}a{ +=(>7ApQM;qD*lt{F_?SH8>E9+48Ttmzk$c``kzuDQX=rWW}_juC=UhP{01Mj!rkZ$;Ezj^kH^_^a>x@ +Rly`F@b;V`2T^#euL-(0=zGg>7o&m<^u>X@wi%ZiHWw&2u37Os>JV6|YMAvia`*%lD!;xEPq<-d-(pG +5g7HSPj(<>*HL3SBwK+DsOP^4ukk?tbATeRg8UQDGGKh8{V(OmRx-9B3}{BOpxdnHexd)M_YwF6U!#G +OxjV}-*q`=P$3AyRrunSV!lapo9Cs3|ES8aH~f#^}Dwf7vc$7n2Z8uYv%#FlN||<97?(H1x~Q*+&`{P +eG#exF?0q%Bqf7y|7eVEPOOimDYr?^h(YqP6U?TxhRPt!xD3>o((IzyyT#LriuA|V#psY@5l4L1o@LC +#=eMdZ_(wmskW-V@rL*CZ7H{LBV;?sem#>;MuU+}_SyD0!=;`H^%$-!H1B6-O1dK_;i1F4~{qXEm*qD7T+eo_w;RAiMd}W`+2SNk#2dmH+BJTal{B~=4zutp((F?|`= +R*izkXFx^P2g1kaIr{!gd<(IfS3Z9M(_)Rw!Caf-cyGIZBzdsfQUd4U|}YIeT5PCgLSDGMyxlILvWlh +UMxR$$rcxVD=P3rk>+vPM41~7I$Z=de`>UB+e6h@k(rXbpot~N0LVQg$JaCyx={cqdImB0J1m@)`Vp%T5j!1W5FTw +I#C-SpDLh?DLWUV}?fBbghDR5+v^A;|sh_daIG8FDCpBxp}qY$S3%-n@C=@69;h?y9;GvaZYODCZx)s +j4*JtDUUVrmDN&%6wDTZIc(>ue^~*>$=)Pk7k$FN(%kv;{Eyg+p~*{kfjSuca)W;}dVwA~y}Q?W&^ +b)kMRnZ(;gu(+@If(t0DCWVdgw0icP#wfvZ5)pnbfE7LhV67YE|Kel--x3X-W^ZPkHzfJ2h$=6BUmPx +9Tx~dxeB5$sftdMEh?&e3csR>n+Hm!m=SF!{5?Csk(Z|ATn&!5Ad_)nH`Y|>TQq;_jr-sE*vB38UzU{!^RatoTImegAhVgve?-uT900!L7zWt$f({ +(sWcY4$U|mFptUnisIjyijvNZxanV><_>85*-kY8OF5Vb7BX4 +oUd(dmqS|aMc>#O!3ZD4)f_rRpmAM$;mC8@ttk@?&+37}Vj+3QoYl4D*6I16q5J0s8RPe)HT{SY}7dU +eQq?oUBCin#YyUjsd-73wiur!%eZL@2egO~)I>~LlS?>qSY0(L0_>ARoq229elX#w2gE);}D3CK#?aw +Y4eZ^cmczmu{`Hbu2eiwWp%QF=1*_xwOV=b!_s7I=a=V_nr2O_RttIx1*~MBMLIm?UMol}R!?Iy%aVR +4GH2TF!)Hc{n>IsDmJ|$yn@w6=Cy~K(J4b=-on`@6TU~5T$I@TqDSUflpG+M5~Z`>V9;gb~1ylgWZjE +=YKrEzzzNl&@S4#=$^jHR(fA@IcQMTF#Zr#Gkw<`%zqK{4WT@i>`eXMH3?}YG*KB-(v>4Tu6ZHCR6ri{di}G!r4U-T-S&C{7G*fj(yTK?h +v<mr99PR3W1N&Zoqab>N|wJEfLu`31_nh^#%2zKq=*q=nAaAlDh|Y+tm`gJ +gARcv4p?ZTwMzy)Wu>|?-wA?g1Pe|&9HGCC}2b|!Rl5H8i3i;!~t|p#+_mF(za;gzW_H%_tmv5cCiZs +!SSLhj~kq|Ma5zG2^bNqK49Ou_^nmiFa;(ArFe_N5m+x0!FaUiz`($jt&(63mI4IkGA~Jmka;H-i+oe +U!WJFh`bGiPKCY#ziW_+>K-$^TO+{{pS-;GO??E%bAM6Sny0*c*su-;hfeW%Vt#YmGwDw_mO=|22T3GwX#CUnHn(znSF +dNJ;{4v8)CrMZfSyGMRxaNDDG|!Ei$WdJ>C*uUaOk%60z9yHI}hZI?RDPfVgbCuk>GOHt&@xd5djdQ- +Wnc8dbgRQM^E#bFA4NyM!pFm)Q_h_unlM*(o;qS>=7}BhOH|S +3#vEuQJ?KO65xsO91F%T}wIh4PsgHhjhY&tT9z*_p{&z^!$Eo1!&FZmpI;ck5&!uwUR}Y8aHIsBUft- +P@*uo01nCvv^ae@)k+pr19Z3s5+VN!r~}Ml2|h@ffoG?GDAWoQbf*`%Km)vyn?0FxU$;vW`0b`Rgy>| +l5lfArDvSLhUoT22S8_FIm0edk@LYU3Y6e6I7_Nh>dm75ffZMzT6kb=iJx&2V1SjH}BL`>!6Z-wu?WG +3%R6Nz+u6!(etib~>+}S@AR2nlYkRhA2hKZEIeLnVBVAqmmAZ&Hk)*x@1-pYpoG|dPFLQSo4{0X9P9N +db9MF)na)=)(AVHK;d2vxm~ +2(<7oN901M14=0P~p(CJWzLUHE`2Ig1G56y;b1a-;e@q+*398OP?To|e4~`FE5+vVX?g44*!Ju#iyYq +P;W7{k+l>vVL3Z26Htj*{39a|eDKOo +}epWmuRj<0F?v`Bfe&nHcJ>4b-d0kytBR;e6xQRBfYxOT_uJcRzidH))pNWemS&;pq|pOQwc;$iitPP +#Dz*K!9z5J#4iDk(!rjy??Ms0`usRU3cVVKC-h`A<1>BuH_>F>eUAHk4lzl{ot7~UP56V-hD^*Q9gxw +i`b49QqcncNH%rV?p#u{RV0l-ZCo-S*r?E>p5|$Ok4bu2H|~Vr%`NC=59#*R0ZlFw@Nxzgl;|>oWIYy +nU1Y}d?%|=CzseE|2EZ~Ef4g||nxrMd0dixB0)&zXm@!=CD-@!}%=P5~z%bLcwFv5XIvf)$5xGNj@g#}TU;L=}FRxvM|KB)+{xNSUkV +O7+RaSjr%rRglU?Bx5_xEyPS`u<9KbORerysy$AQIrPsDSoc(I*mI|Y3!fjDgCNRcpr +sy7|2u}n<{(nKHV0ZKhTI~RZW!yhAY!Rc-siDSugVc^2hzBQJK_5l+q3PNpx2l3WKBYk76Q6JF{89UDiENF`5!laSwN +H)Qjn1TndIl67aUQ*cnI=&<>D9=~Cgp;kc12K}M{LpC@9yZB9kCUXNax@hSAAe8VF%kGc_a>#n(iW_J +Q+!%@0Q{dCmOqdG@z@DseF*{Ixzqi0Gi!Hm#4Iqj2;^S&ejlVf?=%4lq&SaihjKCdNPk-V%yew?e`It +xg*VNCcKq^m=W-4ev`1K8`Pv?G`u>dK-!9zLd>7&{RXvv%l+HXumDK_6?eZj8s0e1jLT=F2PFEl?DBj +CnTNcQ(}%{mPKVm_a!p2wlfj-hC(2XN~?$ZRXK&H66N4_@!6E8M!g^uL!nS|Lc`y3`y+#8VD$Fgfkv+ +{NT0EYj>-nW8cn=YNQ=kv?t0TT@AdbLN+qC^>SIU>FlnIsnMS}V1nq?@gRy}?{mE$*u(OpvPs3M%tN! +hv)1Q&!{=tJT$VUkQr#S=e-{ +mgcvJ`7z=}C1%)!2QqqVXe;c}x~E;@Xlr*9(_k#bNfzlaIam+9xE?F2LmZ7Chd*NH* +SPUP5kNR10*o~sj$S*KQ+yA53O{V#<;^}OG<84Sma_rVkl#PVEk7LN(_lI_Umm=&JMnDsP;7nwMmQQ8 +N<(?ty_oF!Sb^N($4#7e{RhuI>m7Ir@$i8kMVw%A@WYRWWHDCnNZvA8rYg?>G3zSiO&v6~IT55KP?Dj +aNPijA5y}PQvgmqxFuGX=I2xok_YOg8S;|8P*e8AH97OCnWfTt9;#7Ah;F#*o8JnI5AP_OQjf-Fm2!8 +s*a3RXnnEORA)e0Ou+Z!_sHA_#s2F+R4tq8G$kv!7ccFvdk(h<%ol}Sug$=%qW8A9~3`0OtfmZyK +o2|Xi^RamreY`+#JG5RhH*mpFO=%I(jjll@uAO91}|+UyLkZ*2!I=Eoq?sAPCcwXFM{^ml8BzDRHSFx +u~GbWKgi6Q<6I!`6$()2ui?c+rYq)031mVM>yUzsweM`*nni(lBq&u?{_`Ix;t8T8DL;FxB`>IdZs52 +uEw+Bhm^2Z!S^9)kFPR(0-NVq#{?}}XQ%uN#82=ul@gig_|su3%V{fu=(j2_L*OO;g454vp*a;fls@o +g{t?HktHIE#%kFrkYqSq-P5fZg1kIdygyMnq83xdF;C69vQT(^ullwUyold4gFb8pyJA_vd0Qa +nQ%(kzOM_V#2yD}Q9#3bL1@$l_G +M%$)`+g9ebQ+~>p=c`(A6Z)FmTpo%dWw~SxQ;<`8PL4;VGT5B67ahQzo1hNd^EsVf-<+CT3^t=F +Vfqvd8D=J+6L-;k$&^_&w$b?n)To=epX;*cUM!H)Sl`o!gi~O6+_1jhx`!Ue|ne9rW*m^1H~FNl%M(E +IhHFB^%I^N_w^iLAft%4?yvKckLltVG=-hWrP?%_Aflf9}lm;2pyyi6=HX+8y$i0gWSjF&Xy=`8bkpI +l8~+cfhTBOh|4Qa=naqZpzQ7@81o@n%&~4ux@yc&KBL!xo3fEG3M`m|^Eu`vmn|hG$vXGp64t0xO(oh;&>hjPsSU31|ir=2-Ui%#wqwbTNw9Z +p-ziIql^$g#4NssYxQ)HQ7K@;Y@h2?Omk+yW(dqn=7u3&J{W*Nv1JX~peCzJl5JEd<$=9}L7Hy30`qV +q4u{ub!_TZ-k%C0IH}k9YA?@nbB$cX6}@X!-7ij52shr4kxA(Qn2JrPrQFu`~B%y!4RRbazDO`f0jQL +o-J_PzWz}=OaV3tUjhgUy6KTx^c)VNxVBB4J;(_W*Ap~k0Xl-A00~iamEPqr22;g&xa)x6L_&89~NCw +bPl<45TNti;ADA#zF+|sh8cki`%kAD2JYOck +u#d4`7*~xg?8oygNqi|5gFqt)qePVAZ!T-Nuw`!Qa?z*7*g~(DDm +CzyM|FG*|~3|ANGwH_(3Cn*fqi+p4$SSxXk9(PFQmfN*Zh=8(n3$J~U_#>&(VHf%d!DUhqBUv<5pcr& +;5rbr400bMy?&XeW2L=HGY=HdXN1=wa+qSsdAiuG&a?58(J(?bKanSZj%YCTuhPS#=7_tvng +nYV#V7Q{o)5Z5!9#GB4)S8TqHzs2QB=LZ1%TzzG<82!ZIRO>)LmS9j0%w1;nnE!P5@0%{f*(_c(-$H< +Rb4QXq3h~OCn7xEP|k%PlyfuJe(*8v=>Rl-1~`~*?B9L-Ol}!|EUf3M$jcA+#5#VyF-A!mfJJ);K2w2 +UMpRkQl|ZF!Q*FWL;nG-ht;!uE;W(w}qNDM8Nw78fL5jeAKAYc4zXa#k_8eJA)j(vSP2ATQ`f +|C4-9%$oo41}*A?#z^USrZXGq#NrQH{holQ$)XQ!Fi4t(>^L23YD?=@z=uRZUKC{jLS>|KW6A* +4Q9113rbOOzpg0U>h2?nBWwD)vCnF_;S`cQ?qbMS;N0)PTcDwg(K5A@I(uzhs?b&6tSYGt@Oa{|wp;l +xV9_xLDj9?Ak(qO272c1LV;uN8y~*(;cs_aQfj;o5FBe!>MN!@2P!z^U7=H?0$ +?J@iy#AI#?=6tTS)zm1@wjGcRChaU^>xJTOM_n!RfSiw6Sp|LmchlZOx +DFPqA(ToE6W|nG-JgwROb(@c6_@DPx`$Yg#PT7{hX9GD5X@UNwPY&lL--Dbl3vb2#zlBENyT6fHPTcL +tWHU3Vy<^_SfW$Zq;Qzs-&U588344J(j*BAj29$?S?x109TV;3}6nus-_@&e#XX=Q>=tpR!98Ni5RFA +`CCZoupVO+<5)P~<&x-emK1_64(pFZ$FWBCy;R%9^##A#I9K;s5$almzSVm$%`K&6 +oa_PKEJ5{FpaO8-)7xSBcAj?R;U+zKbjp +-0LX89I?>NNVyhn-g%iR)xS{*_(ghC>J-gL%xTp@Xn!}Y~+w02e=J_{yMMzbZ>Qo=X#&SK{OjF2o?B9W7E_%cg5@3Stn!&?kdZYFxH_3tCkiv9!8!INE1@cNR+f5iir- +e%}7$ibtG;c_60mlZTiLi}}XdMd!BoTpL})d6VO1m6FT3fDn3kC;d5>0Ny`%^ZQdgx*(Ng +0(&hsDJeGJsE7}1mdwZ$Hl(rT>V$4}&^aINR(e_AOz6<6ZQz+ZGMFe%O!<_dQTy~dUI^exa^ArhtCr% +*T`d;XWjW_I5Aw+qE!8aam_3vws>2l?nyB!ef)q>SoypBAur*y~eD?a;pI@CNbf@vfOYkS}&E7Ig@tj +#ZrZRmM!|Lv3+)CbBI_SmcI8nfl;hv*LLZ^GKJCne@MRpmfs!qBcut4Iz6lkpdNpU_v&}mG#NT7-aob +MnO^w4y0rZ@0juwv4ep4gMIyfHJD=ynv7JRWFy-h(nW=9mY`$~jo2z!eduMq)lFNjTM7hz=v$77^v6$ +>snvr87DHLhcg%jSGr~8jp|mzBH3)v!bC3hN~=VYr2d-rzl6QuQ>!^ATuf^GVv6%Ejr}ks$y~#tQ_+b +@ppa8FO5ae1C{dmS_v5JjHR|9LN?^mbE!tkPtPB;A6jejaEVsc&qc0+<*Tdr= +X%^f8&H@iF$7rVS@?4_406Feg0B&ExZWWPfp=O$43$=AL|KcT|j!sIi7eY-b3Pxca9G+_?GMU>jT8`L>g?k;Hao>@znRYwHpjN6 +!3K@aX7&QZ3odWrA4xuwhSd9Rk7X^uPWHXbCBtZ6g6sS!19K!nbEzse1j^m;SYcKuCs!>dCecVGQV+m +{KWP8&DraBo_3hCaVjP86+8;v*E8{B2f=KDQ60nuKfQeY?v!K9T4?5*>xR`Q2}eA3xHEU{g)4V#0|UZW0%ITh{Qjumb?=@Z=1QY-O00;mZO7>PRIjbaK3;+PBFaQ7^0001RX> +c!JX>N37a&BR4FJo_QZDDR?b1!UZb963ndF2{iZ`?NW-M@lR5kxwyh4$5e@=%|f3odAaOWMQ1Fcj{Jy +USQBsifqO7WwZt!!MB%wYKBrUTz<}YjS2dobMTlAG%ib6Rv;P?QX}L-N{z82hw(ZM}=g>Z2nH` +fl0mB-**6KcRv2-{Y82HkBjqvzWMd_io9kG6SQx|ioE5rUy*lR&)bI9EAoN;F|cOCR^;Q*)$p~UvOm! +@JKp8x4Qr}ay0H`+0%ZIfL<}j;)mbP&o4fFh@r5%4b#k6U_<{xh*rcfIC7G2+PY>Nkf%j^%bV+SyS +R*}g~-Y+{_Bw6>(yV>Vhs7fdRoz*T7;jeWPb-e3cenY`xV)-zGTe}7j1K3O&=u!GO1-Q88Qh-WhdBn6 +p~$j535X|MMM?Uu>Q6c*FZ)}fOc2l-3PrALsAw4LOXP{w-~aoYQ8SJ>s|C5jdI{}V-b3~9%|Zyh29F@ +v$7rfZU9T=+tAw2P6&LCNHJg2(S>|5{?3HDfg-S}sg$I|{eSa*{}xV`{hManrc>!>`b7LBN%ETCFw*Z +CsXP$Uu0Ml#2_>5ry^OS5!tQ!O$(FOalKF`v@o~?9yIWq@WX(v|cIwgL3(*r){# +DtejfQn(R;&8;V5sc#$4Bp8x=7bg28{7g|deZtYp!6^W9)rS3%UQOTan{xD~M==v5<_&)4g>feU_DG$ +KDwZ8AezNJpUx-cBc{8{|hbN=fVDuHj!e0Z)p>CIL$I7K6@;R4r-RUb +!ww=9DyaeSbO+3wa2hJx5ljgJfcMZ`l_~WaIhje%I9ZL?=jh9WU^K?C{uAh+OQ6HZYcNIf}G|jJ{8^u +Mh}SAO8zOQGp#To}CIPaWL~^U0JakR<|A2O2p(hnUkOI?HQ|>tw1?JT9ho7ZxS+`awdm<#9W>(o^2}* +v_B@Hs5L2_L~*U~5~y8=Ln9~Kn|;f{VTyFzC&^YY_9w$%*pejK<7BQ&NmXeW6Vwb5dT5*788HWt)UcG +Th(2fq$x}5yc})6H7?GD6@TH{)-3K~16iQ&v6;w_AUY$GUF{0)BZIuIOo|uWignPEK>@{Kr`<*hn$V}8em`i7&}$x-j<)EH(M#&8ij@mw^hGc9SzdC|m(UfwHlXL_sZ+zpGq +UTWZ)4-lV{fC$YtO`b@L93gsLc<@s_j&WP+xlR*cccxpeQ^QFrdNgBq$695?$G +{1e(QRbrUIMINPA^X9r7X9q<%Hpn;Xu+;GEZc~_VPy9as@6h{z*SELjJ#DU8rL!V3qq_~Fm&f$viz=7 +^6w&5C^rHDmtH?h5MVcnjI>vKYiv}dY`av|I9;_4s@}c00@ZXS=$?b~C|1&<&w#3k+3kF2R4*tVvdH +R^@=DAp9Cqv2Qdln0m{d%Fl;Mq$0H_H$OlRZFw8`+Iq +`{(jO2^~ErrtA>Oa>kaFn1Z&T2%IF5*dC#rB!7CC?2iq^~r!Qb*=@n$#SUUMd|8*(jPs^x%yO>R$?XV +JX9^MW{}t=gn+m*hjUYt5ZEry5vIv>j1|+JIWFp}z(1>r8QEV~exO9b`PCLV3{tz0u9>!6$R28XR4etq>4n>ie9$1A`a0=s_-JctpUuUaGS>p!R&r(Lp6Ktf^*X%{CNdOk{ +h&;z8h=wJo1gJJc0Y(tM$74Tuc(!t$8u`yvR<-xZmK&b?ECrQ?`v?tGDu^DuQcy^K(H7FBzs<1sWw+C +m;>T0!et%+(VZXn3J9+1<8@HFiSRLVKRd~n=%mw6!iCb*wDjHeOnbEEHkAU)^Z3r^ +4N`yhLKVp4ui=ny?Qel17C$=aq9Sg5euZ%$H*n{ukS4zh*jk^yGK9yYm$ei$qqns{1Hh)Mr^ZKDFVlB*vh(+qoizccyb2WeMkLl(yX48gFyVWb+m8u;@Dt}9j0U;-svK~hzAd!%6PngK8Rp@*^xd>{c +4vynKyhj^+g6W?nmvEH}bsfFQb>0eM`s7vFwCls^_SjwGXl~h&J4p3GPU%blmhyi{Rvv3c7FBj?!d~l +%uUX2!`1f(3qbX}$;an%=QE!$d!mZ00CBEX*XY=($y&b-KM#o`HKUcR7(p%h~G2`d7WfPK;(7r@ooj6 +60mpumcw`q61L+c|!f;ifkBP}|vzAQ%N&fY%Jo57mbEFKzZ-3l#;Nad;U8!;b@7gg`TPP-?0kP+!C|w +{nm+SVUeZoqJFG4VJ~vPq+FdLR?X;&WvS(E12#t(1N`w4R)pewDcO?8n7W#=t4MJj7Pt-^(Rj +}c83$5F$g8URW6#vDQco@S^@YMF&rR{JtbKTsy(Rbhf#^ZCDkzQVs+qwVxrNe>LG_3wXVcy)2nn?` +K9tei5R{noy{Bf4vd|1Vs2}IvVW#3Z2)7nII{Njl#JNth!`cbXL6^60MQNh3ni@wLRi$d{j;dNPe)7kw%h7%mIM$1s_y)Bu1PV-X0eWDQ9=k`c&yJkRQJ{&*aB6@wJCb(T|E0;r< +h2uGwr^x+aMHd!RcQLM(`a-v;e4K&`4fdxxZ8m;>!A( +dIF9R9*s!}g#L+D&Ddx{BmriS1uy7=m8NRR2HvY+2)C?%L(<;f(FxhevLyr`*j8ZS0C2$yVstE6Y0Gt +M3OF4|7M^7+=hYdT6Kc;QX2mF;ZUlb{_XXOm +$a6+w5yi6bmyTkH4k5N?dM(4 +v_e#6P)uJbQq2LJ#;761Ss0 +001RX>c!JX>N37a&BR4FJo_QZDDR?b1!pcVRB<=E^v9RSX*z~HWYsMuOL(ukXBphu+SFN7kF>PoJeWa~G4oDgk>x9SnPzuPtP7czboH5{L+Krp3aGp|tCcGIlDXZs1THj_AQgQ|KT#lB75 +hCEYcJx&{(Oo>vM*hZIa8>`F<$ToE}r66n(9h{fHkY5cX^dvyuN`L%(J&wx0i3mWPW>cayp+631JT_R +tCX$YEPvsq +X5mIsp>S!47s04fhf&XpT7}MF^=XzpSGB5p$u-N0N|%)$jrb;n)^tSR83N#;bjURntDGuDjJ2CW?%Dc +xuH%{}v10HMbkC+lX<4tR3JyXZ!bOKbH8r4Kz}9j!8lkI9l9|uT6b|Lwti@S|lCvxUGHcA7QMuZ=V~* +YTcnEdQyeNt+4*?g; +M!S+bknnFXb02dj>=hd`|iBQWU^$25ll-!O;*V~B-ydNoJGGv86m(=iVcKY@1P!C+klfZlRL2!MV`Bt +q@^kugUlQc1vl`yd!6BpU3tGZ)C<+hDK6KzPOCfJZHqR^Hua8FVWp0z7 +X99?k09psNY;We^VSV0KrH>fj!NYW^&WtkO#0cxlhO+`RJ(_54OFqxQOOglA5`7-XFkORGV>;crL9)9)V+sZ`uHbCPK2#6N$eHoG+o6OA>`McZaIq=kdN_uZry49aKRRQcoeZD2jyM#fd?11i+e$ +cAtWTAh1W}2lf*=_}2sPp^oFq-gHkXMm2pw$#X?w%LH>>ATBJOeR +zFC>{q2Z>w}xFHzO_OfTo_dK?;1L^Vc`0^P4HYqjw-~B3aI0(EXXX^Nyb8wn6_0S2j;fCKHhHF^T#a> +K|+R@P6O(;aK)O3>}F)9zzuSdCiDxHJx6iwZ@qX+N#K_y|6>vwEkbk0>mxdz-fuYaqlV0PHMP;JS^Nc +E$jWR5s3Z}`F)ZZ36FeFANm+r3`R9)B5w7JguHM|VMPKG%+$CkMPssvCcu#N&k&zZpX$*pgOyOm3-V+ +47WIF&J-i`D4azNYNWThy#piZk?A$2^ixoZ^Zb?w<{E2ngCIiPhAwP9%S|H>V`Tk`;SDq34PoL>Kwmj +B>fT3i=b*;HSHd?w2CmnMobN@9=@gw%FIIy9A_)XN~=!1yhFeCW;j>LWcUXUM>B>ku0A`VoydahxrPi +PZ8;=+-{k9O^tw9XM&8yd$nee9_{9wD%gPT{fP@7)=Kf`igSXTRAgo1)}d+zsMZaGc{$h55u$yE~jHK +RI_XIq-icQ)A#|+HUe#0X*%Pz?3!%`bjeS4^T@31QY-O00;mZO7>P&AVkH%2LJ#Q82|tt0001RX>c!J +X>N37a&BR4FJo_QZDDR?b1!pfZ+9+md5u_GZ{xNSe)q2+1QaU)ve0LvUZ5vUdKOKyi)6RwAqWIUq8)Z ++Nu{V{-J<{f%nV79lx4R@5KH9TIG1mRmWRGmnl;^iFWdcUr@8~{dfjt12qw+uZ@d}BVD|K}2RgU&vuK +6lx>Gs(pyBDc5%!7JZsnkJcE=B*{yg@$_p#TqYk9LW(t6n!<*R7xP8B_`zH&687`1E$r=sF**UE}F@= +sB=ASz^)E$9MqMb*gSnGd3@y2F9Dwd1}rujg0>&Gk3{Q!OTi*Z21y?{oI>^s*=wm9xrEmq03tA_}J)Dw69Wi?LoA9Nf;frILM)L;@*_hqU=W9k9xJj$Oy%@E<^>$zNNLaB!84; +wpy*Kh7SXKcI(6V^E91|giFK`DonnVhLDXAtOVfy&nUIK~SP}2%7a$_ +Dm;`sV?EdCz4G!-c$y?11TsG`Q@EXiD@{36|J&XeggOfO082heQ-JU&mloi7h`@w2_&x)dOWOCnvS)~ +$xj1s;fU0y-RXEh2V797oZU6)o$I*4YMvGsd)2dCOtO^o4kQ5}eGC_FUE!ORVxR6%#F%!IcabYR~HQ6dYH`TK$K +!i}*9=zPEZtTmsj-mt(QA@QHc-;mSZ-Ezm;P8+)Dh$f+nH?CSp=QN>u ++ptfKfHGo`5U;WpjJs1Y%yG<3{2q!Vi-akmN7b5|oi_d$g*rltpl^JU4s9A7nLqOiGys4^U9b+l^Di9T(8UL<9y5IuJS4!ik2tl(Fx^F!unj!FaoHlqo +;l84x$G?_!W18Q8g#*eAF*pV1s!t1wvQg9$5(}l)yW!R>!jMAQw(VQm6wjO}kGJD=eb(**34Ashd8A( +aj0R%>Snt%zx$p5(-LHbL^PZT|7$EK}#A6_6butTBJ0|!xYJ<)PM@dE*e8l!9L6|$fW|~#`m&^-bbeZ +l6MokLC`{f)9cLsexXa$})7SVwX^q=XjsYNbt^p^dFYKm!wC{1DDBEcs;fI%R +ofTpWT;434`((Lcmgek<12=aI`m8AGDBcTLXAe2~4l0bVOaBKqXfetTfk0g&o?bveGf88VRpRzYOl82 +fzge9avXacI~q(+NDFCek%Ji5#=*QQqr5>eEy8{2xrzJpH2@UQqCQa!a&vY;W9=z0@#*qL%VtwyCF0_ +3{Q$vi~K?WRIXcW;@6w-{O}XbvA96NI+%BJwbjlS_(M>QrgmFTgpWt+ULCNn7^UX +_)K62O8_c*h;xmiykIf(Nm33}vpI1FB+hiON6+>&5sGh>=r#LA9IurEgo`9p +U8`m5#wFu*Teed|rm$2@xd<M>0N$6r90wqZ9?T6DW@cMXpRY5eSOV2hgPfu}_qN(G-B+`P}wKehLZ>8@&?qhop<^rWgVH +dKP%ylRu%DoXf%;xN~LyciQyB$k$wX$^&i`$s-+u%8$VTm1ojOOVzrkYPg+sM&wb?KzR#I$wvcu~ZDm +L^xzE`a;v)6AcCq>A{`_4rL1#elpwZFZZ`gCYmvArte2uTv8TJb&E`E ++FD|@qu-3{)~qfugk`=BU-Hum+nQ@Z*cfO+T)1RJGstH8aU~iIpK?2`y6eY=J$GF*4Rm%!ScS-)#dDO7@<89|KtUlSv8}_H_ +~?U_t +xEpJe#eOvmDdiF<`N4QBS)J!`SC=Iz8=mk-BJy((>9$bf^>DcfCPp)kgBp9r1p;0?&qKJCB&%>>t?c_ +4Dzf1neYPkjmgQSJ)Tw|5poM>xKNCca!XboDcre?gdP}>k{WxaD$&sUlzO+5YPTqiu +0YqW7RDmT_VO=LHfY;BcdCi5zxgb!TeKIotQkL`%C%v&$XaO^9F ++2BS4ZoWE<9AxK-y!L?a+M=XGwnmKMum1-EzbyH>dAQ8YVGKVsTzG1Nd`KcyHt6yEuW1@=CHQGF#TZ3}i((p9)x=EndAj{ +HHklC%lYm8}|6TkG7?*hn(8qZ1j>}w7PL1>*!QkO#>J6w2y +Ir;HS?8{c7?i3$LGfv71P206+ +#ZTafJuI&=Agf=(i($t!XOT5WoBy>$rokBo4EG<`b}*4+LvHu^EBwcA2%ebgVA?f!*w8qY)Ao*K2G&g8b@)HPX +388(3-e^dVvE-MPuc4cGtG)sCCY%{i#ZBV3G2hR}**$g|mOyl(s`h- +T_b2gyjv_u9J(J4m3Lv6qhtR*57w{eK39W8V`&*lEHk`*v6Aj7j=HmcU8oO~#Q#7Bo#WbX%eUa`2cku +oP4K=bVhH^y0e4~Z=1QY-O00;mZO7>ROJ13D&2L +J#}82|tu0001RX>c!JX>N37a&BR4FJo_QZDDR?b1!#jWo2wGaCxm+ZExE)5dQ98!MP|Z1Gch#7zPAYG +4u^6P;9{3Z6AVwV5GCng%(wka_as1-I0_mT5?(kGZ0%M`P`Auz3|A$U89X-N^dr@-ppip)wX-G+tx`n +TN}M&x^ay#7Fg{2OQ;rX=yQh#@mC;bduCv +6;0rJHZYS!h1N{MCT4`VhwR;nKmh3^gEiV;_x^2#hvQD_xqO=NZC+pLnZ{EE7^l1tLUrPvc>jp<{?~K +uA3ZWY?!Xxs#R;ns|b7w?(52ll{0mqY$DN(p=zjRsStd;7p*^J@mLuefEu=MT7cX`hhY(#nB7F^r#=D +;Jt%h_T|5jla)chZ)f4ybKwfLGEiS*^`ZDETk&M!4;Hyn|gsh)%|5e1D5`E1fonDcA#fNTA`!jDKCCX +X+N&2^_7p;bCy}g4v8*8bL+p+c`4&(YIxs*J1~pFJ`k@sf4xB6$3OId(Pt8GdG{l{qJki88A$kax3@f +DmVpp(5+$S2)B$JvVsQc3hGjV&1YUhz)I}*%W!xzt +xsUn$|hx~s637ihD9e!*G`VZz=l%pNS-oOye$MTzUMsN9xD-a*B-U?33N=_Bhmhekv-IPW_JR|b#=qz +|sV)K!ogwUbT!Lu-kiqBjy_&l{sR>?V5lEqghz46_ZCaxKBUVtJmQL99#yLL*BBj{$C0fO__&W!Ui#I +yP&K`Zhr6Qm7t6LG&2rW%N0cebo={iKt+W{!(=jomo)p1zWvhzv(*iO&@B`F|*P*n;!vLKf=?ydYCUy +T2|iJD-7h5E;Owx<{=Kpy*u=}y)eam3b+=na)q{e*&50zN%t01Q>63qGYN6%^;vQ<&0Zq*)smWw_v}t +Gg&Mui$3jq{A%}R-=AMG4>Jn0(4qhDRwD>lO$1N+bHNTU!Fo$e$8Bo{i@I+|iW9Erqny}BR(rV>*s!J +tc8sH5yKmYX06Ldcv9Co%acTBqWVmyA~Zi}9DF%&B4lhviTVRpF`YfX76L~d}e1|GFq-1y!0a`U2*o< +c9BF;r^#oiK#GI2EmGy}~U`t`C!m2d6dJD@qbp%&nufJJZ6M^z&XQ3lphl{#Ms791FskT!%e7?wFWa_ +E`K>`FxllF7>;$+)yp%Faur8YjF53-CqdaD19f?5Pi8>Fx&j!luu;)4|S>93UHf*l>sc`@gx$;t34>) +_+1@MWc{+(>qDnwIhFOPW}*CwJ9=GN7AlP+{E^iElOX%(S(yOqMJz=Q^GzvGz3^e4{hu4W{E;U%O +yV9+oba%!IsoeRNccw&t-bi$VvBCPVx4fXBpLB9`4ebatp^r(po^Nbjo~l$4%(veA$ub;r}v)Tn+~dJ +>FN1*)F$T!TMfT{b!wCQj8CF9=RfSJ_GQDR5BWL84z5CQomo)RV9xs$xZV62KvEV1a_?BV@2DK4Xwid +K<=o9hxfH^2pY`3oN6N2K@JK|Jb4RXgAU|P#uqi{?efs+&A<_@LeZH!2E(ve&pW=R5jm0|d2xRvNC6L}hw<(~dkCHp^g<|lW4w|lyda9&ZlFtGhk(8Wyxi|)>&9!8b` +egVgw+Q9e;Uz$jJSdF;;Bb&~*t^;bO%406hsy?qaDQ)9v`m=oZ!7+#Zvt_V*i@j!&nPRI>Trr}AuZow +B$)|IS37Y*If1o@*(kjICK8Cgy@D%p3Du{dYS{ +6UzyOKx2a!g1=nzokIvc$AN8)+m&NQIjZDX_t~=uy20UeJrwRhhr^z+Qa4y>(*0)Gi=1W +-cRmc_ew#@55_1?kY`BWgNme$Yayc$hPC~`sRI}ed`I~9Fii5F|aBJ!v5|1L|Dx5yy-i7W;!U>27er_ +508DJ)PZcj>u?n9HNi`VJiiRQ(qCH3eeYP%tr4;0i{I&DA@6aWAK2ml*O_Eyt%4K6zY003G8001Wd003}la4%nJZggdGZeeUMWNCABa +%p09bZKvHb1z?CX>MtBUtcb8d6iPlYQr!LzWXUecUTKu9-xQ8U_10Q=mt9mCsA5a$8Ke3p>IDWX*Q1- +N)0*4^7o@pso6DfqEnWW4aioTtRf$niZ+n5J}b0GpyuQ_z)K+=U53}_g{p5c_`aMdK3vNs-bc_K5Y0s +J)fcz$jOYH73m>%k#cs!Phv$|U8?-?wRbdT{K4Kn==C$Jqo%7%Or2;uoW0N3Vs}(#wC)3&t_{Cze!_c +zzpdoI9$U-@tJI-q!LTqVJRxUrSyYO>j6V;psyNQPHu@6aWAK2ml*O_Ewaha2N*y006}V001KZ003}la4%nJZggdGZeeUM +WNCABa%p09bZKvHb1!0Hb7d}Yd4*KlZrd;nefL)od6@$={(t}-mhJ-dVZ|`)-JmTx)+$S?L@({%kFsh +f?t;!h087-lkUFs*ybBE2v_;VcH-Oi^sL+e7Imd#7Hosrhr-RO@2RXP@FTP>=|=Ih->IBD)75|4h>%gy2qO?0!tS^lk0 +9oBt|zI1<;h#adm)Ix$V!Z~s1L9|5%_`@!-3;LT)~)YlN4c>BMy59B4gE78SDxoYTUS>?~ce* +Gahmzz-=RqN;sPEMh5&Ya6_Pwiu!56Qj9%lefuzt`ShP)h>@6aWAK2ml*O_Er~{`FBeJ007wn001Ze0 +03}la4%nJZggdGZeeUMWNCABa%p09bZKvHb1!Lbb97;BY-MCFaCvP~%WA_g5WM><7M~JG^#M5)T1ugp +QcCY;ZLQ;_vZQKdsQV-)S +J0$WYL%47457pmIn7#59>eET|I-W%>IqQ;2Jz;iNi^QXX_aO9Nj{K}jRI +(a7X|qDs*0KT)v +7~xxKON?)YZQIgOF*D@bGw3LJl;o)C8rLCZPk7Wwes1^@tF82|tz0001RX>c!JX>N37a&BR4FJx(RbaH88b#!TOZgVelWNCABE^v9(SW$1 +>HVl5hUm>^{-~(K5eP}lX$bfC_hGJb>tR41H6t2$ZUDWt2Pm+@!g8cW9l4VP_@4B|@fCUH=TNFipBtK +FX`&KH$YPs8qX1CfZxo76k;{QyzrKbiI-_($?mtZpXs%vXlIl)%6c_A7UXn37(I#E{zG}nITkM!b6b1 +jr?_RyH6O_k6_iA`sOY;?Zi8vO1bHn_q|^Ei-L(g5#EXzljF;zk=@*PLEHQA(<%ZKA2*ectl&itnJ<( +xdVUUbweE+m}n5_h5L%4Iey3yFX+nF4Q{L-KJHt1U*=*x~2hD+z4}7Xat@f_PCA)tX32kwCmglDBpp3 +X5Xbn(eOPK#d@_`l{H6&FJ-67$c}Ug#P!*V;ZKqzzeB4)V`MbrOxxWo3`oARlCLg-pl5M~$+5tMp4OAHebQc}#G-cY0CokB;2j4R3U(o&=Iw#OZii +@Y<=fob@64-Ui<(X0Ehs`4QVsQX&F+6ci;+WteiWXd&@GhA75qgQTOH|oJiz%6k?!HdbWlY)vf;^P)m +fZRl8$1^1!>c5?|9b +`QAdql2(R6**yns8L}tC@e^!$Hbk8jKI3qhRfQ?24^xRw)h?oKt-KUKm@?_rR-1&4|Iazp?0Z912E$U +E6}#0g_i8)O*KVrP=M#fI!0&)W6@UL3}-VKxyh;)jyM8;HW)d;6?|_*IZ+sRhSiLEO&&>D4+@_-&hB1 +e>QZAp(fAXE9FOP5!3Pm~2qsj*I5HN-*O2a7A`)$*`smR&SVj}Im)sV(FC;88|5R>wIo3C7E_u^PL!q +fKtr&1s3sk;=l4AhajjQvCVoV;ZX8|!>x&|e-2eNc9vF{Os7Y*8iCr{Lh(GCif%0e@4I6{Hi`Lz^7$c +QxD`C(8dYRQ2bnYBLT?6I^h&M-s}7_YTtH&R_8PD*wZaB|CFy=l){)E;Afe_Z@}@%+z=9Es^Z5fhbHW +BHtP6N~i5=mJTxPXLsGnjwpFq*{e#CDmb~ms)}B%os32X^huA$EZqtQV-x3nm5863QHsDG}E32sa_qh +lZbKwz@>cd5%1kWX+2A3pU8aha2TUbKbt9Y0aj +cfwq8)Di7oEFtiwPhS*UJyRPWpAV|pQ%NBzTk-ibVk4jngW%|{s4w&LX$yx2XT_MJyiX--Gi<@Rvjh& +==(ek}iNdW}L6><2mQ>%odEwSR#?}Ykt7IxI(e>WM13Jplr0`-z$h=ODKY4i +!1$wqfZsYkz(jar>`GA_|CUAh)MHUSsW_~H{<6wTv6Ycmw579kI9^$DOuxz&yL?;={(7n%MN#ECm7;` +wz#{1Jr5f+xIEM?l+@{YJOgC5YLdAUycqd;V@m}^^203Qny&AG%dR9&bN&vyc)I6piVT2BsckR@8du1DL>Jl+@lz4tW^{BOpHkzotD8Hd!}=`e{;T<+537ci6|aYShzw;)<`k~ln`g+uQg8B`jx{9fQ7;tSPYY*z>i(?fG~QpE{LWMJPWl1p{mk+oqCd~6)8X(YCMT-re;A$t`7lBYBi=`M)4 +o^o1~2aKjp7S~p1bIT(*+p_I&7Zz-0fuF>G4irJG2HQS-kEdipVqCeR7DXFwha5>UH&A0^LUu5PP{mR +>EK3<9p~r20zkpJdAOG?LA^h1iXTqaXPnkp^wY^D^qa)4q`RrUmW)_U_J*=R@OWK~-W#zVX_AoqcTuh&bOI`&hI +kV@*NdvTT52=aQFl4@(70!^Ld?%H5>9C*CT@TDMc%@6OmQUiwNU>=)a+d}$s8`ef^Q8f}Yg&D~RMZ(z +~+@{}_*Tn*gfM#nTwc%(jL? +}Sr?!~V~y%itH0D39H}V!4XR8aXHW~Nd~m8|q%~f&=xN1&ZaITp_`0jO#h{F&KW|M?)5oJ*IlkxULki +vkj}%a1z!gvyz~2{cV}EU(q7O=wUwokPX&}-$c%jg|#$zK7fT}2pPNVmbiuY{gEoId?(Vb4GA2d1gWP +%5TL=AgtQKZ6OgYqCPc`!DBA(DV*0ftWKFct#(4EJ#q&=S))>VjTiHqkDUV!bnSO6rrI)s%yz(v$8h` +0(kSJGt{Wrbmm?Ll_7)Ym@CUlEw6FBk%w$SB%yz+_wZrVRnWUZ(REu7>xmD&uUeulVV4D;gy4wJWyM= +aX9)tv*wY_6#FK(f1VHV)T1=P#X?)&*Cx_56NaIN(@zS~NH#Tq?GOIS*~Jc|eKnd@-)D`>&s0x&80d) +S^}OVaA +|NaUukZ1WpZv|Y%ghUWMz0SUtei%X>?y-E^v8MQd08FOG&Lz$jmEAElNx-$;{7FNX}15fna-6*(6LcwG&xBDz-;u$-9+OR2T?oiWq?a2Mvhkx}4vhck~H>mS*Q;R8wQH(f#` +MyN^Z`MYGF|6x*z<#jbC6jgq2Si*8dYv2BXJk^-K(M#wMp?tNL`oiYRhf7E^bXGE#^0ApkALVp9R85)5Im24%(k+)?007NPdg-LR`IV3JXD?QciyzmHOz`;iRl>{M^c4?_nM6$ex_3-Za%5AA~$KoOb)y(>Clj4FDRAFl6eQx)GN6n=~YqWWDn$ +<;FWSvy6;q^J_ZcN0!plPwV>C39nW&qTG9V*0=@oZPtM_z@b4RHgYTHK%phhScsGVu)2op +t{FG5ZSR}rP5=|dUr#RtF9RagU@)cl{ONr%qlqB@eG4+(k!gipWZ^A{1+4$r)F6XsX=N*)DN#2(Gch| +c&0r=k!c73xMDNK6;UB;NypyWg13eZA2v^&wec5g1Jar*}cR1y$lC>GTMsU#&%gDjOzn1XUJI=fj#1_ +bwSD8{G{n&Irp1%VX?PT4}&x{T)7#PtOSo)D)$fC>$FLlonFuj_A0LLrL{JI+KOw!w +Eqw+#=X9@h5^L_&>vmJ`9-X|1@GEYt^?h&30wFA=#R@TGW=-Iu4LG>^t4a{EnnR6YJy>5n?dpRS7oDF +hB`eaK}ny;sW%fVvK>{b~htsXi=2-IpoLBTB^*U1<{~H3F`pleO8r)d3S>ELWvtt&R~@=&I)yJ9{Hz< +ViA3=@9Jh>M~m4AMp_N%ZzOf5g*Tk+%dh3sJP^{TlV7^$=)|Kqd;yAHm3fA;`24;uk{uYEL=g>>`Nj^8!HTMx@z(mXxnXK0X +w#b7`#+=yTx^@Af8O%hXPUQkkVFmwQ0s}!JdANHeI(veZYTJ^v#nKD;vaJttUuP*hh1WR1CobL?G3*?j|IRDfNpnykR6B{Q(9ti1(+g;*?MA)k=(KsNKNMj^S8;2k0E+LpB}taKj{@Uk-^Z4k +^n6j`MIl!zbmt_OKJ2=J947h6qiwl5kxL`-5V+PU6^h*D$^@QWeq>wlL?L2xTyAGMVgP>MCRZ%<{I +n(CTYKtDx;)2`SAKGWGzJ@*r6=>H*OM#{$`K2lA(81x~)o6s4{x)~@Tb818{CDx|>P7PM7zFEgdESeu +iPyo%BAoC<{0$VKsh@RXpVj1SZXkUtKzP|uK$x6o6fHxpk)cNztbfi3KZlG%=2;IygK@e;5V--0tY +N!mc!pl#cH8XB35fja1swn+6^(G+oDjU2sy;wIgUA4c>JT}I&vl&#vO~_6Y-ItI&z*v%4?qBU%5C{~h +%MS|y)JV!-sDE}41 +7TX>JJQxsT@6)EKtHWSQW&OLoB +pLftC%hnwYf%3je!`o@NbN#PNdv&elJN1%RCfq?j>`{;J*=(5|e7!*JZ$MkkIlO>C4^cD`9t{^6#4I_s +;Gct1bMk@zox4K8_A2tz;OH9GXh2ut7S5@G#erl1lY=c%IfZaF+s-3B968{a*z(|Xg{5of`u*Z)orSa +?QzNXmX@riJSWtGrK)kC9`9;r-CQ2#*SAxaH*$G+t6zivW4s;4(ZG^;eMRRA~w(?iIDFtv#U8tkoZkn +bVrpKUlkLP*yRfh0+^9-+-l**OEJV125kZNA1N%VI&`+|MvYe*@MO%&Lt_*q~BIN*P#)2A5cI*uyvG2 +muW`mlQRqX9~5xuR1ZmlLxRq5Q>xt~xg!w5$|o9tLn$ltqA1ll1oT +2y6d{l^M7Y|>sDTKLN{vk1f2iL0sO-wiJ1Iw5QExKAFn~#9Hg|l%Pz2Lf^i;Tp{WFeWwt02E2suv;tr +ZO7n^M@!wcq)SzOmbW}SpynH +S(Y(YblF>N)QXd0y#J9jKGo`u*apL8;F=wGfvR#2yA9%QDDm=ge`5J$OB${Fru**w>i&bLL=xE)e+1_ +EAtRI`s1`8(cPoD@khQsS>5?0cVL`J{xQ+C0qPu%MS6`j!Uo7ptCfH)4gEqEu6e0lav8Jg*(Q%k!%m@B0Dai76M?Ylc3=(D*AsW6Ve`#cE2!il4^+f +WO`%Rf|XozZTdoVUGs2{DLIt}Eh7^)CY70X+pTb-ECBeTHb8R>lBPi>kW?H>tk6Dvc!P;3VuJp@yo2ERaUuL1dH%^n% +Anyw#DT?gmqQkzb8{~^F1wR*$6*o|pV3-~x1Y{llZ(s9GgHc2RoX_e4b`shnaH}XEpJ>IVOpa(G!2^7 +5BE(Wo$XRgIa;|k4TTXW2;dYujNKckK1`*IC75fBEr&mN@r7IO!OCk~dv)zjw*eACBu4JI%Wm}{;Yqh +AkTeD+U3HlRIzbGqWn>hCMtF+IZuM$VU}>MF;y0}l6F(F$i*D-XwP(31 +B_nqbsc7xMyWs!>@u3R)zYbiaXM6RA7?p{2l5dyE~mp28OfIy^s4BII%VXncCDeFSBwk~-rh0oO9%^{ +KEs!5$2?en}2H))8%{uv0Y_{Wuz2KLHcb~O25NB|?;*=|@xHs +0-pDHcHw&7ADo>D!C>7Y5}5DCXCtI^D(N%O#zyUc)a#JB{zP!JhIRD*fZ^7(VbQd4o)l-yh5?s?Oex( +Q?Y{;6sCr~KbUtDhdGTK#C{a7%V0jY<$wQYzSM9^N%YFn_|gXdUPHu<#PSCn75fY<-j!>Z5|VV?(ACs +H#JMY@h%P><<#fv26z7!PbMZF0ogkB;8o>JsyHVA;juE=eEzIL7Iu;`U9NidE^1S{OQ$COVDu+VtQZ! +wt}!_rQ_1N%&C-%jW5l8hW>^U<9%rzJzr-vAQIQ0Ul56v ++tuI}2yGO(z|fKOcKnCN=rD(?Q$-82_Ds%x&`#7!I~TZ;)encO}F7YrD3Ah;WMuETn8(Zfi=RR3&mqn94PB +0IQxuIAA%khhw*Y7*k#tQ>3`k@`=5#0DXI>{`Zl0jh!vMUPuJ66klQew`h8|%W^B5zI!bA2s^Cc{d&{ +!a@V(+MC7`s0$MawQ_<^+!lv=~cba4TZHX){^tUJOTM6yxz&RFNx?b8(C9Z4;=WefGb9;{w#Ujgt(+v +*6$1wWRnyC%wSiLki8_l;cAYpzvTeob$6_E(ug2OGqda>T?vK-J6mzf(|_6rS=3G`oBsG@n)-ABa0iP +8r>e7h7~3|0t7AM5D%-zzm7DIQJ)$o@ +5h?C>XD~t_^RAefBNIC0y__4tkSw+;kKC<7ueh(qd-2Q?^ZW0F33cU3|M*V68E&pE3q%P!hz1VUtT@hTN9($b4rHYY@Rr-D5E;mDz?Y;{sggO~Py=&#Th`;%^2-i(Ng*j+c@8Z^Z4|yu%nWTHT1_}7TFtRKIO# +&uk=pQQ?hA#3w@9dxth~EiPNDvvu*P*yAK=V*;`bTjnWhPP`<~k5FzT$vvU>3vjI;Sd^VQjZett8)*F +bC;Uu|DUe-vN&OWtqM_$}V#BzJ4R3QzJ4n6UYn>G4=a7bAFB%=5c7+z?7k7vBsA>#iStlCSucB~4g-v +*vrQ7U;ZL=*7elBRu52Y!!c=u@+x_(f_s#mtZVKTO5yoqW`INB*VCPtkX7qlt3Nx`+T2hMC--TD1a7> +8R2`~{{v7<0|XQR000O88%p+8Z2uE)ks|;A@sj`mB>(^baA|NaUukZ1WpZv|Y%ghUWMz0SaA9L>VP|D +uW@&C@WpXZXdF?%GbK6Fe-}Ngd^rj3Z6xL2|>u$NB_Z(YJTn)6>(_)30evCX*kVeP7kG6Rq5oyryDc~zh5KU;aUZkw&>uXgZMsHdm@`QoH_^!>@BzdU{Z-Ap`bn_p%9QuZ_P6 +yTL-8#xotst*4CVb@npU2bNAUZ3uF`0l3~9v(5|UA4=Lcd}kJZN4j)Z_9IMpl&CZ)w+^xH_|xo%k$2l +R-TVF1-fla+htidb+s%v)vvOs%dH&i`3{f(taabw+p?2?<7YSMwkz9Cin8Nh3iZ|{MO6a{fnvFQzm!B +hgKu^u;OAS}^=jaUn^u;qtEY9>mz#}T&8Wd+pnO}M?XmCkrhn4xYxU-7{SGF(`uRf2&68>)anAhar@G +tka1hCFXGhZ%PQ+gMvVp;Xk6Mk`v#NgkNI#sZxozNKmjbeFR&vwjK>by<0`}EV)HF)O`;mS*0$|;=+p +;f~P2IQ6Mnic*PapBak=}@&i~mT|PL}QRqUe^*&LI1ep1s6Jqdk!(FrQ-A%5`TutH&?kscdDvlw*|Y% +l2IM#qO%Vu$**C4`0x;k-nF>nz|1yZK`oAe+gFMNBPUXYUNheeX0ZPqr3oVq5Av_cDVT(SPle< +G<5j+`;(Jr#mRr1o;-i~^oQreJuk}c!gl+f{u*wrWSDM_Ok4Me>u{HC)h%`VvwgK$6}9X)&2qR$B&(J +ea{0DsnQon0t>OOr8X-9SU;gys#ScH8o;F`k-rw!Zc6D@A6tJ +#EAr|7*#M>p4nV6tdF`Mp3+3hzyK7K*7luwX`?d0{*(b0w$8Gx7t&k7QrMKL`(`W9&iG8o>IkbgxI|5 +Y}9t%Uz8Aj3?6fO`G<$c*_n8<)#C9Iu*wc=n=0Kp2hg@kFwvlwSW!TUjPB&f8xA?fl&k8ekV$<^umQa`bn@}A09@@#+ZBQ$x6WFslTy+uGfk$v7y1ssKnsX +y-~vl~unWA=fAJ#ex^$+f7P-Y4ihMHZm)B%kE}XsSBfmb(o0`OG8Pbn11}fYa>bHFf9&0MD9EX3=}17 +;bB!LBZBpRckt9dtoTn@u^c&~cv59(tmG%~zbUW2RT39u$mHd|Z!#vGSy)i6nty;om1uKS)^C9 +z@e8Fq?ycTC+(>V|xWNvzRcus%{s1kP7l}3jrG}ciq&Pu2Sb{D+KMIbJZU<*O$unF%$?b6EN!zA}CF< +XOwcKyYRw&9?DM`NoD%$jbO$UgyGRK__%#*c~i$<>zy5pv=U4ZR|M>dP)89?h;@uIC8x+7*z1 +-|qV9C@1l#1rEP=to#w*=Ke333qo+w!dx-5y4fCCnL&t&IZIf$a%`9Hs-RMQ6@M1qdu#2}eF`VCYIi$ +W<;*FW_{9?Ez;{xret3RDV(SF!Zw0C)AmQfdM`Fez&QX72-=r3J#CdT7q&sgCQ$G=|XOH8qiKQcuE2; +wLsHKpN`m`X-N77a75J?h24pu=dUfZE25KDG)G;>zA`3f>Ls&VH@61$D^!$h1J4kcdnSNwt)I+ElQdtSw7{@AI&8NR(4rDfMi;t05WoQGa +N5q216U$0!iFczMz6H!Eb164tEu#1m;+ch9Q2Y_IML=z6zn_U%&A?`I9X1SYEb2!(J_01Dymp3uYw0CLwwKSbZX0UG$^MaHddlMFDtM$QG|Sd`qyVl#TAWWMe*X~h4{n0 +uYGk0j)L-7v+wavDZpjV4v&~W{LP@%QN*$X3DKgY0dgprlPM_ydU5B^0Muqe=Z{}aNg(ZHyRCZVc{`K +#m|_uJu?-zzbGVue^3`Zs9wv$^LzE!7+u?#3ns&K(%u>1YAi-dyY2?X$4VqaRTVz9D98mLg+eN6%prF +hP@5oqqixjlIzt@Olmsu^qF +Oy-!#OR8AESH$gH4)~RBcH-F&#b1;ZO3k+3;}T!+^DhW?%^qf#8 +$9dMHF&j{1NyDeE$L!56Ad?lgSQJr(dEUJ0G`9~P=lquxp=zvW>$6lVkW*|e7^vh4)X4dtWIA_>P{eV +v@}DfX?^}DznTA2l#RvZ7qgDxBtU<~bTFU#_;TtpnHGXbOzv2 +7dw0Un|`39*AaprdEK;7u!?<{F&!i(6bjAkF31~W0i?j;=@lL$19&xTdfi8nlC(pntDBiUg+ajh71OT +!u~83xM**nLcxchM{+tols!RD7X`Tu5|YgkIa!^b{wqC(q${M6eYcO7_r1cKQ&>$&ZuUtql(s&u3dMo +9_MndlQ@uaR(fz7w$|Z`LES(a$Ap*#%QURVc^idYXpK$KV +2u`yg{!Fx)%!q4T2V5)h6o6(?KrSO#In7yown!=kL-R8{c-b~^3~3($oyZyTc!C3`F$=oHGBk-?50(o +f0ET4>ic>C9+C+}O;_EC)7sQq+VSr^m*lC&ycYq_wSAn$wXBn_-vj? +5DyJ(u8EosNbfJ3tb^)pY4DPRaq0t_v%myoB0(`P=z37MAJG$x0)xWnwbp3rgEtmBzyAItzBS)F&>#A +ya#h*`U@G3p}74=0X~l_L_*dLYQ93x^gZ4Qyym0gpw#r4b&p$^_HX#Fce}J8VLB#{S*!#Mzayc7Pq5w +*`xTwhCtgtSNXSc}2xn|9bD9T~!pWY#U#5SbTPk{PPbA26^Li;znmg=+ah%8ggc1K&>rdTW?RR6y*8k!e`2S*7q1#8NP= +_IcJpG)+72U0YGufR@h3LO*dG)^AEQ9Vhg9t0I?f()z$bXzi?d89bB1lY&pQ*x;&2I*S{RN$8XVp+aB +XXfQ^PR?HaiF^<*+O`&4HKHx|HUWOZ6(59;2MY+|lvla*Bj{oA5`Ib72ImAG&@*h>B+4yPmGg3q1Q&Y +6cvm7#C18nb8rTNKuMfULG-fc0C@+7Krjbpe5f6^!WE=d|RU)z?&cgoCrAD438~(6TW!cuOkl|r +XY`Ic#anlR^ggZ(gYT>0BmH?$k&UF?7f)40sA|xGYO*LzRgc`ivNI2F}^=T!gm0(jQ>)M|*$6_u?&1f +_^6_0{ddM0J9-B0LoCD*b#@HGJ`a)6Xv0o+)+0U!NV}UtTX#^>c4b1a7w=35q~vdwo7>z{ +*SY4%~GG0fUxCel`j%VEZm$f3P&yj6ir*yO|h!ZWrxm|SA0a@5i0GEpo4C6tQm%a%PJ3Rw{LeKDwJJN +qX<^53?OSpPQGa>h%u7t$LzU$> +j0%Ouze78kXZimmKAj0i49c2{C5*}j&+lCgoCVvBuEqU3!eL3Jql0|&H3A`ZX$8jD6bENN>xV +#%+ot(Mu$T;$Qv)sHDAL&#ToIi?(jybdgMjnf#H{?2Gs<^`}N9%f=Rgn*-iH{0Jg!+B=x8~&Nt{S+V? +UTDVx3#TcCHdx&_nR$Y>x2!u*US-q$0+E)QXjzwB`f;u0e3mG8=GgE;$fcZ>-}9G +#vYCTZALo;?x(2q%d>w+%?vgyyY-g&-%qm>&L+CXyZ#&ec|i_`5^_Zj*Q!M)p~9K2!0{g5E~Qs~Xv#L +^^`62BA1tYLF4FMs*EGsRsc0ySzv?LJ2X7h4H7^4ueAkh%Ew4?e@akP$-iq7Ehe2?=y{>$!fi*Em=Tq +vp>HG46NK;QFT<|cv(1;X^4-Z!>2*3ALRXU4AZO^pR$K0FD8%DflL}s890M8f)hj<%%cZ8ll=K;k(1L +h5Saf?h_Ng+Bwe-=g{+mBo9>{*y&%iW$T~L%&rh6VfS>XZCx$lJ;}!=e#~iOom<00}pCLa{obfk_*K^ +{r)gvOLC=&~>v>rFJF%ijHXEv!Cch7-7hKfK$jjdKDMvP&f +)IS@!%}S!`kMqDaaT!BNqiic*6|;#zTP}AaCq_L1!(^I=+(RAvn9&gQdKS@foI9-Nn#Xj9gFIk +UEFnC;uW^>KF-=q+H@>o=4csG!4kUUWfsrWU~Ep+fW +a$@2Aqibe9N-_dhmL{{c+%xd=MuGB}okd>5ML*nXxQfwYB_^#DW!^xr)!>r7I`RWgR9@$(R6=7v +o^p?ZOJ5Ux^=vrSprl!aO4Rc+!U=xp6S;cQKvsBap8uKr~+i@!_Y&)Fw=(kMn?yyaiIN?2TTvRVBq#B +Z%E+}i`9a?F(b_k3j5h|hD0ffN@K)Y&M%9w_^DkF`&^gDGdl;oGEp?t8GAk#)#^QKNUQfVJyT4?BxNtm4l&(7+Wn3qF{#si665l)08_}!>W<`~ipmR6C +Id++cz4846e1_ZpsLq+(Zn3>&H)4{%F?mEc-#0}X>6l~7N5u7KEs#us9q}`8zNVs%K5K=fMjsB2ASjS@Cg~nc5UKIOQM8I(zvzXmR~WeA^pJkPqv8>KD|4o$8YxtrOaY4 +uBjoB*FB$=<_HWONR;tX9;qnR=N@uTyCBV_`b9J5Kvb@4+0jfv=CJOC;*z#Kdg$eeokS;IE`aCrob`F +#3c?tZ~H#>l(!&n~bC=1_J=p~97!NMa$2vZ;w80agS%X)^;fz`#Tg%MPguBWUcV3lc{ZpD6QO&sNN_9 +G~P(;9zbjbc!nhy)7b{}8>-GgPEuXi|EeTf<0S_-7?S`=nT%;wD_iaeowsiI%BJkQ(VvwK5qO-D``Q_ +6ZL<8%9I%`p5QIB;h1AF87Qbj_NJq12j_7@JH#a9p?a!PirRL$}0sJI~4GztMBP&>gIvtl|3dMYm-kA+{%27q1!JnI4(_ +pcgEQOIGvLj+o}!60_?8UE%bdxvNhX^a_M(?#NtKERt7tqYCu3NkDzeUZv?E= +ff^4K7PwY=lcIVWVP}7%xHFLjS3%e4|h_cSLFVJC8mD$|M#TK&ks&(H$;DFyIH5Y5>u~NsWYAE|kTPLd;9CF +TmffjfX}pTC%2tG<)Xe1E^(7|^Xy&*VZzJ_wU+(xItimVxtLh#}R*K#Q +fs>p)3aNzO|@JAA;@(Q5;GzTiN)?Q2(r3)qmA;M8Mryf3^Kj*Ve*F7W5tlnR|8K`ZBbZSIhs(w3k~CN +Cp+jSs%zkh~)SmKXFrPG@V5T3y3g?Xt?otfOqoPN#P +$$w>kUwt{v@-L>po6JIJ@k~?%ivc-f&BWwfY<=ECT%M{%Z@1;&OCX1Ep4=Y4TQa#E0#Nc~P#`i`QiVgJc?%tIRrgGw>)Zxa0#2b66N)6@b$ePr6J=gAT&?pUio3O*s8p2MXflsBAlR1()*^mU +#2ksuLz=4SUbPaucdZfnA~iD^1a*aBP)qm&(#}7TNQYM3LET`98bOf(ZZ)1#<_|D?Zu!3?`n0ba|v*h +;B;sn?(0FW^o4c00xX#BG9lqFPRk(QJ#RL7BLpM!h&%C+N=*3#B@5Qvh?EqpT2+%XLRY%Z$p+9EW#EH1Tc +Bc(w|B8=&O*9N+XzZE|>wv$p&oClbacMvWw&3A@v^mRZEq(cE%%O#<{9y>TcPzQ9yrZ|bx~7aRvqN_FQYjw25&Ugw7DnKI;D@!*eS38Yq~T8E>hqr_pOMKasdjrs;vV;A8AK+Ic=3 +=^xSGs>@;PNU?dn^ZChFb$pr?SES>6JHLf>In8h(E28oO|GGzf8|u7XC31VT%L|om95jJFD_a_6W-jNc%A$hP;r_p78B_7*iBJa~BSCrNd9twaZ^@zJR3yI$Z8{6d&)|( +DZiMNncWKEAQpd`tidnuCOU*BmWB>P(oj!RQ(2zbbB)fBS!K4Uz`ICpnLURa9i`V7w`qIKgvpYnT9VBjgPdiWl@z{ghDnn2%{ESVQ*YE +YI=H1*Zp!lsc+-JG_UDNJ$u1V$-;3Z20 +5Xl6i@A(>@TqX06J|@JyMA{zArcB<>+PI6D=^hH^#^MmdBp$>+W>bXU+?-M0!qJzZNu4cDDD8G@M0ff +-=dQcd5Yb|NQLpF3q*+@Mn0Tn&!aML>$&ufglDf<0wFb52>CCzoC>e~XOswX^FeE*5UZBsu)Ui_W58Q +p0B$He3u=iCQRbOq~cGp~4*TLl`VNJS?x>I8XeM;vdJMNOk<2e68k?8~GtzKj+k*53c-o!jVi0`u}vm +FYSis;%(}aEhPn9NBDRagp&(DP!+^stnjWZYwFDs>>ZPY3dUBr%+4z<@kdwwhD$tBhv$h@S&O>J1;)_ +0jywpdt6z00G4!)sWo`zBq-x-n4NT~6z3y)nGe*BLQo +!>{lHlku%Qb57|1lUp`C12!Dq_k`)*+|PJZRZnWu+@G84^*DDzFbhYhj3klD6;-+@`MK!Qzu?}WE&x@ +#ug{tb@)8Hb1!G!J1gbLm5sKX;0W39fXOMgm#F%aoP=Mhng{P +WfkP-tlc|3>(mQ4-62BQczBZ94I{CbAsldlx^r+aaNx4_JPAt--8&~UFtX8?6&rqVh}FVi5$AHj7DF@ +DHGXrx>RN;{L-9nC%qWkC~+kpnhEX$2hR$tU72sN=F5w+s&hiVQb(naE|Vt@z+~}dAx&<)E)lN5Kw~= +F5-lndT6T_~gcuEwvT`h(vKx%WLcFw1Zc7Ra;h%#FgioBEGbVG#*vMY=uyjIo66J_opwBAJXlPCHVOn +)3HBxW&1h%QF$d*4o`Sz#prb<%zwNZleqrV7LAx$o?(8nkc{80&(eok=?n~ZLlNl{DAbl|YC`02-I5s +1M;>4apY^W-LmKxXOg2)k$&=!yb=Z1X9!Gu|?$p5RXC0+7c9$J~CH#M>PDQHhh=y`dl?oMl&68$;iApAY$RVZUG4e3V +9#U=OIJ=>n(2t6adb&F>-;BR(d!aLNfUMJX4-O!+Ss9sq2}suoZawGS%t) +6X}>FUo3z*XG)>tA={Q0{n0adKsIUw9XRH{=t$APn78>S`J7*x1Im*U0Tf-@i8(Q%2YM%JU_e|_}=WV +%?oalKwH{fY|#r#1Zc0&8aUN(erEkm5V-=Pl%AI=<`$H9j&V4w^r2dM@z|Oo4oK2)AjtXO^VTlNboM&ew*^477?kvxaDh +h!Tw$}jmC1LFJJGT}9q(iEzBD&-!3OkrnJW8dx6Lz(#l%7!($#lbBRuycebPf-_Te7Zv7nqRCY<-4PafASX{xmf5_zyaQkR +WMt}I7VEe@p0${hQNnk4w1ki3@k6`el3Rc-oD&ETSNxs9M4`VeBXiY!H2E6{kYbD-VrJG)NObApc+z+KvRw_dpG0BiK~5U{nhE8#1W*p)I0poO!!z;1hokqRco3r*eRxNm?X&@slNylyW2@q$ +llUao_-EAdXL1tUn#Zg03aJb>fwPg^O*C+`mg{)!cgKBmOq6!!m)=n{$B-Q9W@+NbZG&6L$<%;PKo|< +8`?JNh9iU8-jJK!90TI>PbIv$%CR|}k(*ne4R?ADt^{K8CpogyEjby5=7aNP#1eu1EIVvQ@{xeGp=(CPJv@#RZhE6D1`ovG;OW(=y4{M^7&9pjM>mlJ%YxCH*{4Fn9DhqsbL;PtnmX^=&HrH +7!JzOU826EK36qd9z1wpW(S7F-F4Fx#aY=N-_P&eH7%mVMr?Ns&1{ovm>^t#=CXg&3xC90@P+%;O5Ad +K!)(F8q?!%P7ko1jHA<95XheXGmkS-_RM48`a^LD(D{E>ayFcj~KRUCiMk}Kx`BnACG3Bj=f=6a37n- +O#RVT=xq-~A}b)>v9S12iH#(KLgUTD4(KX;|~60>k7$AL~{zLVm%jFL<>#8>m`HGN|wVAYgdtd8uKw? +#X$_2h$w_R(@8-wgiXgQZU-vp^XCj^KfU!Fnl8zATi9Z{x?ud0|XQR000O88%p+8mqlXa?ganM69Pvf?~eES97(Y+m9|W(?N$`qK)BDn*mHXK}*9r1 +|?P{FcvZR!FL=R;=auqHerw>2+(s}vsvxJc+2gA#=;~i(mhX-aS&XnLy64pw|We^0JOpPIz{-F*!_B&i<)Z|cd0crR%>8$k8WmDKD*4V9Cna&IiR(1 +de{Y_HUSz9)~O2ni~R1NSD5g3Fvy7rH+pR6Hd042XF>!=Gt%V^j+q3zenv0;D|v8iUxN^aLaUHZZUjD +XsyQbUIlAA-}`9K4AqS^9@VcpO*`0^~@Z~QUU@&cY@u~!C1EDFzd>D3bKJNb{x*3Eeo@X#9^f?N@NlO +6yShc$a*5Fmf8qyZ0zvsEL?(K)%K=`K#VwRFZo_s{>0M#V##`?`k$>Avf#GfqfeQ)W&f&Wzp_~6*GT9Gm_LJaUh+rzF%0P}zMdBw#43Q1e(+ng}h2O;yso#0#qCG6nP*3RMMt^4=1NIn7+`CSRj3 +8eXyjD6H&SlGxkSQ}^+;@Ns;&7f;2<`lE$qlG`<~)GrR{8QNl#j5mClt}PZe6 +y-IH~!yH9NG790lL@XK;Db9*$w +)VRBv0!HPi7XnZqS@)tI2v$U?GSfkn0@6aWAK2ml*O_Ev%-NU|;x000F%001KZ003}la4%nJZggdGZeeUMY;R*>bZKv +Hb1z?CX>MtBUtcb8dCfa*bK5qW-}NhSbUQ2U%2b?W)5KHd&Pm+HyN=_Dlct@VhoU4%LQRo6g0!ve^uO +=(0)QYz$#&YCyG)x%Ao$>Ue}&X?mF0z)mFc9&vP5?jFC^J~uF|>vnz;w7xL71=Y`^NY_C8Lsbf)H=S) +MILv0ecHVICf(>%MrSCPiQTC{vloMV9x)`&FT`G*0^BW4TIXhu>VORT!zXkU2aGm)TS%I@FVeTuKY>; +`Q6(cgN8`kAJ!7iwk}E`-#(SATux%d%)Von-jbKyAFj?!*Nz@JYS#~-5CM?iJ-=Hx@4*594gQ?mu27aRFLZTPkzeSYn;1tvHhHYK=U*=m>;`%TA|*+AedT(5QU~uuf^M8$~IwD~f;-o#_1Z=; +P_hE1>#N=mKAWjd4-rT@Ybk1P1M?0$N3Y*dqUcU(v-c7q8!+{c!SQ^yc`7qfe(F;azwqG_-><%`)+FD +7sJj;@}Y;HkC8bj73%^Q{-NyB;B6ae<{Xbt%poa5PV!H%{GimJC&fAz*?zAv5?|APD&{lw-8X32*eF$n&PD=y8lToh6l@mM9Q0Ph4u2m^ +#peP5MAT+V!GGiq_=C7DecE)J&vV(SXG)-V-Ordx4C|M~Ni+k +OIbJutla>IbVQeV_HL&reZ0VS-uATOaW-Dl6VXPVx}2LF)3Cq6>>C5m~{;o=y2?G_QWd*L@5nG#~EUg +vIkcrDFyloRFit4y@4E;8tj}H6rkuzjc4x(PXa`Y7V>tLsEI0o2iXdhQ6PLoo!;>{g}BOi8X|+Y7%;x +JjrY;JlZ$spA78(XPLF>&KJ`4y=c5m2Cucu)Ag0N?_P#lO_320J3X&|l(ov$WWHer7(bK2TpFL`I1rk +`Tv!o=cf~aBlKJ6AaDoMm-5vOxwp2*o8G@ua1a~Y>_A*J}u^Qp9%lKRt&0G@tXlzz)Ugmvat +S;Njo7WIPtGnr9-W>>9KtX7z#?B)exXd&Ed>l427o1vx4r~!Q*imk`rY7?V948vTovM`EK;Q9>Np5$0=UaejrR5A6 +TcfR3IJrsrGj!6M9WdD~-S;HhRqe%>Vy36=htAEMi8F=d6R+*uEHVx+kknuLA1Q$SQ$5mzYgVPFj7Wj +RUEhRe^d53iQd=Ab1KIwPE8irBGEVWT*T8UNa7~L4~jDgqft8i^LQKpHW-=T~(c`}#>~s9sN9VY2__L2BA!kE6 +1Ay`-HUP7Uz!}C8xCREb(}yTPNDxMSn+iENR;L{t&|skg;+EuO?UXVG5m|=fHW299C>C*6Ud+4quT!@zDg47gb +RB+>}No0n>D$_RM>SG=aY#I`{UTeQWI*Ll6xw_OmB#CM%9FU#Cz@ApJ46Bjz{TRq7oi`H!7 +CH8jR4mr;XQd^$V%c^{!01u+WsHoK8fBliV^b167v37eWa8l?s3kpUnuxkoEA4iJIEYx4|i7tHk0*-n +yj3Qik%0=lGfs@s9FiG+O0IzlxE%ao*SrtAw{-cPI;yn`C$^S=Nvp~4;bMxMi>Wxl%&l|by4Uggx*N+wedv&6%O*RdpAKoniokmYwAY5Q1+Oc` +P9Lje70VZuL&JI+~eNKoz+AO=?|6dl$cj;zG+{*eCpawxiw#J)&mT2a)yY>NCz`?WWC8zM+EP +-6Ybn(M;3P`EEX)gV>k19)DPEt4uHKvOU<$qM>2_PNZs|$DV_XD<9$hb8a47}*s-cQt?L{5IID>Wi)L +vc=S3q(eV5%ny<&wc-`epzK8!GEXD7aszU+7qYz)f3irZuy4rPq)#4>03OB1^tTPz5w14x16aGbJfi8 +_ba(ug&9yA0D)-~zP3G5wuzOFBFIDdzvk|+m-?W{I7dfO$lHP?_19`Ja7fsK3<()t$6D^tO>@Xi!3Oe +5j763eyPk7uxNS#60m;zZG=k)OR8@8c?L*V*r^l3~4aK*)zlb<*EFz*Ci#D~bmM=SZ>JaE&5dI30nLT +)~Pt5BJZX4h^eM7cYrfQU(&JrANwZXoJaCid8;sG6`hIK9K_;l51tJsXB4T(o?38 +@+q!zx1nLWab>%F`O$hxSADh*2NO3;Jw6{22SUgA#=(lXr_vN3qKJX +aF=>C@&IxkjO_+0V37MRq2^MI08iyL>&-TLVl|D#cDoKD3tD1ysjOf5z#f|&mt{z7`Tfkd(JfW5XeSs)0Gy;(>ApPNaK)j{wEGtf*Bl&@+po;EdVleFFXg*!F`)4Xxe-IU!GX`mr*Qwe>& +d6@c`ckb4rO})5-_Pe#@eQF!0gblN)S7|^9W( +STHD;(*r!>@y$EvG0ABp9Uo(TMGh{nqS{Bv(B%a5Jblrx+YAm&>?QR@bive&RU{y$65b``k&gQ+4hww`o*lM1F~fT-FKV*dAw*p^Rj-&0nN{?ja=L$j+azQ01vx6>~;zcvA~hK#Lm9K5pm(BO(Y&y-qu&<3T>xo7W-23JFONTLliEzHam4caAUwS3Hh +sO4_#1`rqTJfL@SWiRDS@M;mJNOk!p=v2+*ZFh)55p_x5z&lWOgT9lCJNUHix?Z=pBb_PqmxIG6muS# +ks5ANA7tJ>|U&6MI3tUhVikISGODIQCSu*Q@1BUN5x8<+M?Ix7_Mefl5?ny4DmJiKX5|_kcBpAEs_JNTPJr5h3not627N>->nP)y#~o@b#X0N<=~g{?1;G+Q^e!qf!ed4sS8-5a3MNiH>J?j|m +9AH@UL+D7(^-oZG@#)IdR#|JN-9!#h4Y;dZ^gQ~~0TJN16eD{3+6rR`RZvX((6*Y1;tkyt)QpMldrO- +p|f+03f|EL)o#kUr1zwT0*8s>K?Otb$J^6iN(yMaC%Ki>Z`qr~5?eh+M`#>`!sAU0ZJ0Lo5HUQ=Lp*D +!H5;&Nxqt>Y4BCgZi|S&dfARD$*E$33OWQ% +j5TIcUtYh!OY>8taIk}N3_w|bD!?p->bLw>Rr9fhF-Ie=k1_*tewpfKS=8uEz3AvcRl34Tv2#$bn99h +eqM=J{g$C_3Q!K|EFIPt@lWvDT!W#XbFf!n_L^Lp#(nLA7QrSsx)yG6sQh?C^u{%%5dOuiA%NtB!sN$wC>`wcTrP4$Rui*j8lTy>9u?=L==9Vn*!NPplhkzvQtn0gH&mJGtVfF +eAE>oI}O}Jszez$+-umagb<4JFaA)aU2S*|u+M6wEJjymI8DtP(O&Cwv&A4A}+rkHg%VW!pFJwn^~ss +4U7f~rlEs;;=#4fnd>^%i%pcEyg!cB7K&w>m;Z#OrPtqoq7z3tPOAmE`C2e^a|OR?6p60j-BZ{Bp_Y^T8#@@oWar!Nj{ivCo;V +1?pBqeYPD2JoZHJ>m~)~-Lsyq)WlBlcXyl0j5NBJ*V;9og%dxmBBHsz1ey3(Ms&16x?Z{6_p_X(Xk^t +hBM_?dbGK1IwegPe14k-4yjNqT_;2lR~ANcv~2Jg68dND2WFAZ^D0fr{s8(=F#u{W3Nc0$)=-Q^UgC$M+ll%v?za2+N~W~-}lC;TaXW)s?d3 +=>^VYyS3Ucq&&EJ={&1(&~QPH$U##*9!>m(}zgFp7Kx$Z2wWxcKK|a^?xrPQ@`F@KK}+#O9KQH00008 +02@m7R&}Hfv=Ipa0Mr)%03`qb0B~t=FJEbHbY*gGVQepKZ)0I}X>V?GFJEM7b98ldX>4;YaCy~OZExJ +T5&pivf{;;Io61@Q1@4M1+~Sb<+MsCy)G3m}Fa)kd?K0MiI+F5w8zjH|p5cpL)^@Jww+4Y%B8S77;WN +(+l_beqRdOp;YuI5Yv|tCp8l~=7tu!lDTg$DpEk?mPwK<7H90kPC&A0)Iqje$nW=}wa +n~P-&AZPm==3=FDk|{sP=+&N@8BN%+kW%_FCjnvp;snHMDD4+6&c@J-oU6nKwqzE^lhq$?n{+t)gwLV +!aWpk}${0hU^>mY?Y;bd*cMLNvXOc9>@S;`_3xW7}`;8cb4t=y?`~JMQLFqOjB-+Xz!&~E%}neMQIrR +SE?0kbM!7n?QtzMnw{Lkk{ZUq(l`O{Fn28k+gaP0^=h@1cGqum#A?+YJHCQvD``wG%<7Ln{_xTTo>VI +>w-R9>Kg-@2#DwvdcGBDjNI2QUj{KY0c)$jTF!bgNtB>mi!@r`C`%YRm7^o?6_rM1!-`^n;THoX<@ed)D!+?d6}YX)q#FnAkZm5ER0Xco0r`|-r`a*uHP*;fD!{9@d&*K +t0KJzC|5^9aYF;ri9zuKsaV{Qc_R*Z<{;_=-KpScJi%e0p}9VNPm3u?)CeR*UyUs-?j&W ++WWXf_iYQ)i@vRdF2?y_V!q)F?yNp=KA^FygrA5*5y(o(1VzH=1J&C( +_G_?ly_5AAEH^MyeUq3{%g(K*aAf~EyGs?BH0kcc}2 +U#tA7rPvEB0pxl=XDfaF#+Znkti +%!7jn+$kWcd*O5-=%5uKk2Ul%APtxO*xnr(GQ5OxOjXNPE_sc_5xh{E?W5s+sL8$tM(q0Kt49Lqo#4{6TI25$Rig-mPJ)H0J#149u>oh4K|JzXs`Rwp!e` +Gx*|4a1jT{O{M~kpk!3i2@qC{6c@9Q6O`-@8u9nKM+%R8sRa?WbSofSrXL@BjLfR^3OrS*>~yBz|GQA +m?L_}ocnQe^c3cvh`*Z&aR$?QVxt4R6GuY&q=^`<_VPCnAwUDa99>W12>Q<=9H=8;rW2* +CrRywx_nCtYW2YJ^{)IsKJW|WHeK9EUjVLlbmAS~NzqR}hyoBhKqC$Qn9`^)8Wp}JY7ft(7)8Z9$z#~ +SMIYZi5fwnZC8itH|!J7j=nipgj-toG&zSe8e+5NQiYDHt1i7lUFr8|va2@wo;jXs`lZKz56O+ +4-14E7Wfly^nw`x}{VGb_Rl`t{?t$47*^_Sms+3*c6xqjEs1IAl4G~tXK{&JY(PfE48jIMW^d!NW>f! +ZH)TX1D;;TrzF?!x|5Yr`OP2ToY>fB&;(kK^uccLh0iWWy9wid33onbj-&NJ6Z0SqGB{Gm(a~M^}ZLR +zM3i+;Rn)}_r#g5WXvuERF6>AD&-O#Cr@pT9?bloGWwhpTg +Q^OjyO0{?>x`X=?f%i379ikP-hgiFReiLfm1H2<3f$1k0m>IRca)P76MI&x6dauIVl5iK{{?d+*re>m +a$Sf*N&$}B-C1#T!)B_SJX4M>19(XuqR+(*f}(uqv2v70Z?@ybgI8ht0W~&R4yI>B9r<}m-Bl08t +VnslfUlU^Yv(n|{gX^j{detbza0M}c2e!x$t}C|Z<@;+^6Zs#MzKh+h37URta1OZLlB#d3w{oMoKCa% +Xtywueq@KQ8x)geaV^)pGdDpo$_fj0j`-M9L;2X>KH|rlzzi_314F|Y0xy#(FUxvBW^%2eIe%LixpE{ +k)Jxivk_PyARcn;Zg0_LD%{-H1>V?GFJE(cb7OCAW@%?GaCx;^U2oes7Jc`xAk>H0Gm5gE1 +@<987RYqdo$O?rplPSOC<>XD=$MTx3L+K9DEi;`T#}L~TTU{&vq78K7VjbNz2|<>a$Tv~utMG4$?|R@ +-CL#GS9%-1%v8CQ-72%KNM^FE)tXhZN>W)GQJ1_(#A7BZBUP!HUwm0U@IvOd*-EU%8?|1@lGB2iebl1 +9EVbc9VL$OJ$6(hg7llr=1!<9Q-~4j%&x`cei+|t7%;As8Hbz49LDr_>g(x3d@wzEXsF5~hF6y*@?)6 +F*Tz6Y#D_-7-v{uT*>`s_;&A+NTUCR=`A4ILKLQr?uf;~y_mc^kSJdpRwQ~a5EBo0rO-n3G)7 +fM)dBxr-BQ#Sb!?~9@&JS$al$kYTg2x}rC63X+a-!I>`J9fBizROglRBeJXY)DBWnLZ`CLX0WY#|^c* +q@Wve~Ot_%&bH$jx;M305^QgjAC5h<8vHLvqIhr7HwACKo45Xj?$_YOZj-jyxnm;EY_@ozkt1t682#w +v;barT4rj8T~VI`UxXOo#`o0TjC<5{Mf(%=LPh>YjQE0qDS-GYt_^DEd`~cwZ0L{VfJZHHQ4u07buxj_TM~vA<92Esyf}(7}GZ>4E$Jx#G?d89`S(3 +>lJ^$_e^6LEOtBcf;@)j=F!c03Pa=iDH^}C+V;P~|B{l(jh_wO(M=H=4!n@fmdF*l~31_y}Ct +U+@VO0?DOIBSa&_z2!Q2(Fi01tGYk6#q0d;$!FD{&yh7+9Cm!{#Gv(*7YLrU0HKKu*bJf;EZB5x%UMh +u&%k`^71EA&QCdExEqV-X92gRW%ov5J-)lbisi5^8&Y~JqHRcH)CG2O9r)8Ph{aQVw*ZG0Vf-ewyPU| +5<_j5{Y(VQYaFKClvLskm}Y33rvVeK6_CR6BUjr}|~92(h!64e6UqocrlYYzQ6#i2&Jz-9|!HbRt7@D +T*FaJxoLMr~IM)6|sk4}*$ +Zu2PARIDVZc3lrgQQM06WlW%A#49SZP?Vlu@6RdiU@H(J9ScYBx1#(S~#5zmhKif|G3LPL!(o&L3ZeJ +%?DQ*mJ&$Z2T5C6h(Kh?tU-E%k=y(q#w`+!&|PXc?<{wiA%yy5qVn49r>ofJq0lyDNzS}!7;GbJRg$y +YRCGD4E>ShTHnCOfxqi@Kk$xa_aR{WlRQ`)`ibUcJJ8nSyQJRseIA2`Pg!4F8g$}-Tm)ykW)Q +|@Dq@B9Ofuq*xuM+~jvU};YMSml^ONlh5`UTigOL`u6T5Qcj73{i*)nxv9R`pdN+(s9<$&V0 +Ue#vDUKcI0}@D167UnVXS1|6<@nLhB^1(SuEwcRmIQ_e8`u@hcZqRhkPo* ++S^W@V_yu+!ZsQN^8zgih_u1@+%zmJsf9GR~>LNlC8ll?g9CL+lII(J|aIx^Z~HGN~2JBJ#1FoHd>Sv +HW&-C>_P^-@gmj4Mcw8L?;Y(>q?b*F!;z4a|x?BB@iQO?fZdK`$=dQT5J=$ZxJdq1pLlU0H7Qiya(S= +clp%yA01g`X`ipq5o4crUB%5R+|gMbv5A?2E>-~F_UbN$hSR5)aUL0$u>mk5P7bCXJ`x|b?!KJKq}5L1K7;mfGk^cfM?aEw*nvDIz+>JIpd-3GJdDf>hd% +f=_$+6`(Z`1pH7|bXMqV~!q8_OP{)$1{;CN +Fe-MaIUN58}YsUVLD_52=E(r}%PmE`Dcf<94;sIYL>_%1$If$~IyTt(`s1;~ubeFHK2BiDQ7_D2hpW2 +JterBg>*o_0u(D$XiQ0bGS-R?PoM}S<&PoqLu^gDt(%Kg_WkXXUw|icuCa?f4d!n3~Ei;branl-A5Gmo&`SQeW5z@%}sw{Mtvkw +$eBN%e;a*?`xQ+s!WHgqxWacKX9I7>#1Nudy8ClI +m0iFs;sMc(EF)J~I)Z;beeIv0+}g|glPgzNPNMxgq2Zwh7q +Me0c^0@eFoNgNjs>2^>9`l-IL7`yoR;O2{{T=+0|XQR000O88%p+8sh&krfdl{m{0RU69{>OVaA|NaU +ukZ1WpZv|Y%gqYV_|e@Z*FrhVqtS-E^v9BR@-jlHV}Q+R}AC_%dI2tVjl_^NYNnc#Rf&z3n#(0NCJVD +Xq#)9R7uL02Ko0MQWwjc1T7i_iFlcrGiPoTn?h+zi&CamsoVsj+EDG65tg^#2h;gXm0XBrvtbW~kjo% +gXtkk26mcS?$Xf|k6q(TG&3o}yXj`(J%WWf~ODO>drwJ_^m@y%yXAcQ=z+a(Qw2d6Im*n$dY3oawi-1U +Fl>aB>rx>HgF~y(Alp}%{UhL}?Y4u{0!Wn8KJ +5dRVqgz@qIT^OI((UqO>={bIS4r8#eVK({+4#l-Cgl(Z3C(;#@OP*+@Y$W;4e|Qpx0$tiZ1u<)wvP7P_d66wl;0qyDfKrTI}H=}PtW@=}=@HIV+j1ieo13Vm< +an=>3;)JrE5mcc|Br_I_v6Pw#Re+m6u-0`@KL6Hyq(|xW!C65Xa&}vMK4i7q1(KF_vjTqJcOqh-63@O +I{*^=ckC_ZuG^X+mtMBV=)~qCypwCGc9II*aYZ(!r`ql32N|AR4lkMXL92?59DoWnN@OASF2%|p)Wt^ +?-F{A}}R;7LQ=ZRe6t09ico^61?May%iR +<}&f_=-rejF0KAgtzZCqHuaoC6kC%`;7Kh;o*K0>D&dk7(=4594y)t%D5EoAk;InW9{cFxfZaE{<*uC +Hc1=ay%u2*D5E3mb*E&{sJ`2UQZ&96)%AiiTngrK$m>KIW7wL|nrHL=c!btaGkMK1^s+>GXzvrq1j!HXIEU#&faeG;-XHtQr-*1&!uHj^emG8ybGmIs9ZhlJ8QjjZKg4z#hnV +gV#;TW=OhZYdSXM42T`NQ3W_$n$mS&yJEI&t35(bFS)yperpBGQDIr@RfWw@cV}f>zhmO5f3UFp9!dV +_W-<=rSKvg0piYDfi=I6%E$`$*tGChM-xoJmw-fmKTl?yYb6r-Ci)h34M*PjszbJ_=NcX&mqYcLY#H7wU;n)QJpJ-%c5yxVHl6(*4hF$LP)h>@6aWAK2ml*O_Evl&QQ=_)00 +6iU001HY003}la4%nJZggdGZeeUMZDn*}WMOn+FJE72ZfSI1UoLQYwO8A2<2Dd|*H;YkQaM!w1dA4h( +Jh)}oo%q2#Aw|XMG(leL`Q5SQ6MR2-LLNqNy(BgNp`pOi=`nsmvhL&ktl1a45YF${F5ofA`s59Y^u~q +S?JavyC=QY!BWW*%(@oU19Ox5u6&zy72;F_0Gt^JGr3j( +nm<~)+lTab&td5VQ42`h>uLCc+;acJ=_9W)wB83nfI%S9xonCoX*5q9IJj++k{gyW!@?1q`~*as1L^39Q +Z+eOU?kugz-O4xyOnXfay|#CaHM{GNEFwyQ7PAG^clo~^>@AoUui@;h&<|p?kbqBO^$zW;?vV_BVbi# +ooof6D3Vfq5}L;_%Z0{`TET*YTydpDmT`Pr1H*&T)CHQC@Wj@7>~cDWt0nXRh80tSEefhLnO;qk-+!A +VSJ%_qUuPe$&XeoO?Cjmy>vIs%C4D7^8W0gGf-uCi<`5wF)G}h8^Ni5nJ%|+!x4V-9InLSzWuuK{g+zUcdFPuT +T_GJCd5b?5Wv3MdhU<3`_5!2uScE;(1c7iw@$B=NqCyj!w*q8wl3U>FEt +Zs}vcG;BA#btt5RE01dR|^+J3pVRo@2VH +^EEaV=6q?rf}X$TmKeL4{kavFCJ+{}MkzIx3xM564mP`UCYM)m?u`v7ycCzu?&2tl4JVc1gBFs9?SW# +C%BZ9-uD#6BWU>5~pSo-7;HrL?1KQupp&s)itMwek;LXI+?=v-~ABxGuV)E>7Tlr+#+lBbyuf!{MCDu ++<)m7?Qo;?JMh8|gB@I>O)uul+*pR$JNqd37f?$B1QY-O00;mZO7>PkZ6D*(0{{Rc3IG5f0001RX>c! +JX>N37a&BR4FKuOXVPs)+VJ}~5b8l`gaCwbZTaTMM5Ps)ZSnNYV0#0tmvVC81Ah=Yr2Pi^Rj(xEleXB-}kCHZz8f_sfoxF) +A;?unc$wcd|1sF^VNL&`03$k@l5W?+9SAb$X8lHM3!}Fm@FZF(clz;zwDxj{|2QI#E-}$Ej5HuXJB_u +gx8E04W|`LQNB|3N&LN}CKsw2%b#m$dM|HmIBvPCqI-_YnC&Id6v!}@T+DDbiB +Z_xvKo$pu^L=zUh%Cpg1ij*Q~@(NKKcUvFgaOcCVsWwU^xbw$q{nGtq85&&wbuKeganEISA=;xd~NZB +$6&zwGbKVJI-Q;>4+wQoMo+!bE{AX$pZKBc#9e!w#;}`>hdMiyGknS+{jXJ)1v#nk5zUA< +R$HmiP+gp5WGNv($CM|zQ));BsFYT0?7~ts}l=5!QGyNv1e<_nPIwb4nZ+%nZ52C2_C&UkIgQY6Q&v0 +hH?z3h`PQLo-FK2i<%#_?FpMRIns{o8q9A*fMcI>eTJX+te_Zyb;rJKxW6Yp!gHiYe79r9pPJicRC8H +j*A00Fu&6Jq$M}vQH4pl#vGX`-oecci|l +()C)AJ8@47!^X04povqD;HMGPzFFx$UjMwlcJV1kIjkp^Q0o|$RC`Ve>&~w7k(2hU*Z?kzDcEdF$) +G>5%9XlVBJ(w*m%kk-Uok#-M1TXl32ae%8$PVrt4~Fl#vdVhxfiYF|qz +&xn@TwNA1N2zHKQi8x+^!B$QB*279>8uE(oGeqEJ*}0!wqO~9SHbZT?FII1#f%P%L>@yh*66@6aW +AK2ml*O_Ez9u{xpOA=`0guhumgNhHu{H2RGOl +1-79wOCdkjuQT=HZn=WjjAfSR_5Vqn=R`+Pb>4_=PJ+4_cULxlWc8%7P7udlZE+NS|EQ*idB-Tqg9!2 +M7=MdkI=71M*{xNUQg)r;=j*Nqo;p7efsCKS3mHhlWfmlp5|$)mUWV6=66=hBvU2-e3mS0{&rTYQZ7> +E{Hk-^>P1rNR!EHRU~rhE>G(pe<#PWcKb#2t#h +rhW0abxmq58N~L^x;EOubaKjAboP%Q7$fx~2J2dcCToRuLnls)^WEs*LbiU*}C8tF#JZpzX4bwq@$`( +zCNSr%x}UcW+)qFHbK{o}FBrM6XU>o*Fj6Uhm4Z_47xhIN?M{<9hcDP4o?y`36|sqF%?q^igrWu7r3X +E?z%-JrghU4@$&pm1GIR6j!QL;V?wCUPoz?T^pF@cZ4DpVYjH-SM+Nb$R??l20&6pNaJ)OGQ$No^tRk +!=OtGvQXlJ54tE0L+a-kWQJwQB+#`%oS(x{mYK`wne;g$%U)E=|8a?{qyv*x-nWsmv&$p$*x6lyPoGPK2)_37r)i3DTD2g(Wtt +bNRaI{QiRf#{yN}XnPxj)Z=SR;e%m~sElqRb0b)_eK|3?i`qs8Z=_ET;bs%v5qi{3~TrDG;doYy;W|^ +@xgzJ4Y~FKQvGr+ph?2UFO^3OTZmr?`WB4pjv9+nyQwdz2gYY0P`^mt3pc6Slu%H!!W#L*-T)4dS3aM +1VQkWM-ft_wA8{5H>Kgi`Xk#Wcv0s9)km2LP!g}-Jfr6!Pygad83-#B8WiokR28+Uz?`L^A3&@DMh#6 +7J;6#SFZUC%yMn>+4FS`m+y;Cj}6q)ELOy97qB_8ZiBp#JNt1ZJDVl( +W1>V<6^pxu}S3#`C!Vqk_QZbHo~KZ`tG|Y=pUxnWpjkZ!g91u?WI2Xv7pSK_b#h0`c+l{q#vsLnCtHUysEIUrom +LU88)xmd6Zk2Jo7ugwwIc=*f)VFl(ULj;w;k)qI2iCgKsb9yeN4`%V>U(oy59X*KG&TQda8x&&=i^K& +E1YgO`ZS2?@3qNARv*+$h@d2E#+nO`deqh$&lYJU=ZQb|D@xIl3W44uUJ>ACpsx4(yZh_%Wv!a#YJBB-uFdfg93~5U6#TG5GE?jZIaS=#9S_G)I$V*uj($J^iccarWvLb74a~lz +;`=!V=gs>|pUOtF{ID?8;zr<_-*NEd}9;=>Rs=5SXBYq~exo;TXf%ygPFP`JNFnRTK?5uWXN2!6$qlm +U2go`dRRA9T0yOBmQmT)z9O=9Rfh!cZ6Wb^)^E`;7x`Co#9K5Ie)_Nvdf)7a^wNjGr*?~$Ql$b +U^{dXGPT6qPno3PQ;-gTn6p+Ja-P6>NwTFsK-H*wnIUU9qlp~QDMgiU%OytiWRzsNNWeyee*i21zm`` +ZI29Tz%o@z+j>V8!)jJ`OAc;<8l1(?@n|X%vSx)l+Wb!OoQqapvhNnqNOB+Ym32`0B5SZ4&tF`)<=$b +C_G+7FCP(hObFbP%w-C$H)rFIdme@yP?Le)E^8l|7q3ym0>#&ZBlNu9Xk0Pt1PTWO|75vMWjXOP7dl-KkH4BK$MEe=9Pq-Rt-ZeRJxuxUNW{x_HV4+D +74N)FHVMp%wcvd2E~32IW$>ao$8~7z`xn}EOd3g(QES3lBO`jg8N$5Q$s^6t3uF`w3vidpfJ>8WW5E+fKFkp2u)$?P05Pt +svRyAK-;5G~(!3|ii@4ximdVrGxpxQ>tS +ePV>6Lc4Q^vF)SkYJ5Rc?wEHXqj#nItLt;toCMSqJIh6)E^@J-Dz$OvC85KnROJPX$o#ZXKWocyo*GS +aMiw&&^dNer>{Fw_hqZWhO#Sj^c+?kRtmQm&03*#(wTS`#;JZWRUb`yV7ieRI1RCK!c?cqKM6#yHHvX~av6Xd~>Z%(v?+lq+UUvzauW2nq)G+Es^sRlPfmNEIflz$craM5A4I +X$L`64uD6 +b>)DXn*CnwRbi{1)RBaMuGnMj0`BwmIzq3R5{O})H`t8XD*1cJ13s-2h8rgzeDNX${v8F=U?PHO*y=V +(mY&edmJay=*y{yWrNK<>d$a4*QaR4_-|)uyaN6riqWdc5PSywOXy#UsNydg0sK&aU$9i49q2v^8JRw8~B*PzhKg7ez>a5{iD_L6 +;ZcQ>(O{{VQ#R!<$!AFtfTEfJk({X;Oy1gi<1{GPTvIG1%3UN2`+xbAX|U$6dLyH_#nW1-gB9x5Y>SK +!5nYm*T-P_b=Z +fbq+N5B!M8j^JLskn(w*E_;-cJ=_T8z65FW5&fqRzR7rG9(IV5+LnONyPqZDj^7ih?4?qvOu8LeTzZi972OA%nm{%Kozu9@9{t+|u1{^#4(?-}3Bp02|gegev=l7>qY +I%)WZHizpM?#fWn$ugk-Nb~X;hlHR0pqd>H=}9^N{h=l9O9d*`fdXHIRr&cEd7xCR}TqYjAELK5!($! +xoL;VvMohCB*qZ`>zzUjw;RAP)4Yr^?T}Zh9Z)rjptTzU(0Cj-%$jIg@iwPiUI}h=kMe`YunjJ7+m(I +;T?`_1^}s%=#ursI*8K{NfN?u5 +mGlmz*k3?fIdxd*9!x(DR>77+EOa+ur-2qd-EPEf|&i&E{AiFhW*4wVs2WIPV#7YBbh=iB(s%~smU&3j?< +SGn}$H5Hq;y9B|FWvg2=B8e>mze8JyvKRD(4o+J-R=TjfFC{s1%0tJ5D(-=G28B$Y{!QX%V1Tr0MCIN +hYCJ?5)?=fK0Yo`@CrAe1*X%J(|UcUhAq6r2XTnM62hFzE~JhiRqtX~b5cA(?xYaDaBE`hI#&eGie*b +K+=Im2Ki;G*N0uug{KfiTHX ++8i$F1C&*@+Vf$KeE|U}I`OLfo=vhYo0Q^@U{k@V=W=nsh6*>1VO|~Isx9xaD;h7ngEfXDuO!&A*HFM4dlM;hzjmExr=p4 +lLb=)%tDi_QQ=V-AArCWmH)k02eH3uItFn9szW>ia9%-e%xnXqVB!ur_#_MS5+h7U;$J{;4l$9e9k{! +?lxOi$cM|3^^foX4xCH2yD3nn-iiGL{ +6cp~ZW`C_nZ&b&WBpRa`DI6u1=k;*K2p^j}ycJ3ioU9cWyB#o5g!C{8b$o^O%AL*4acBz4z^5GH$Q># ++N%t8tp1}s~yL%wCIS6_XJ9p0(x`%uy(*k~!RF7^8%5byS^6;T!Z~n!YN1wE?@lBRx1Ap*N#;nEvci? +$UX45NWckEZ@cqi&V1DaDWUr`Zrfnb_cwN05@VDmYvtNGjyY(g4F*XB6|2R*}BSP<92IQEEaM{+T<$C +ZPD=BHdRr5+pgMQAk#nRHe3hzq| +U_?dDd9;8;vp+q7^?728l98LDsaHZN6*CT!7lJG*NXJP5`!4k0(l1>cKg4O+wvc3mna*;ci|cR;=m# ++2Hn7fn&rr5@?T%ElG>c-@hRk`ob48d^H@3*G}HvX^v*brj5k?kSjFWcP_mJ|j_($wt +|$~Yc5Ajdt_cU})tvlDaQf~H=a<^niKW=Fr}SeLC#S&Vj9v`betSG+Wu)Y6^qL!eCZx4(BRYg@C1;su +}oIJ83Okkm%iVp)*mzz^wKD&iehr&N9kv->Eq0Mo+TVXqw@9dN6dp)et>Z&_O< +1$7$p$4gQ;JWLv}r?j4@{muB(BAte6BL791jQ}tgxbHp{S{Km3U#b)rj*3J$qn`%|}IsAaTpAQ^&;E4 +6@J_rl$B5EVE4Ujs$R#ar$nVYQGiwijryXSqb&kA|27RdUEw}^vP}dnN_j2O<_=s{Jf +kHo<-?*;zGIl0YRK-x!6y?#N^ChE{oteRGVCY{5dpT+m$lO|ChAsB)SK>WKi_K{7Tccne75d(coS( +Jp!_L~ZKK!)FK8RUs2t-X=F7M7Y>&Dxg!A>&IyVo%r>On7B<-+>9%rWs?)0z;3-djcBpWLQ3UMbSt8{ +!P^TpMzaYfC8Cz}J@0*IAQAz$+{{gK+Qh>54a4NS7K~2K>uW?BLy7Qd&L$-Gt&tG`ta@V_ffEvIAEM= +$lVAaxZilNs|)@>Ak&-hb3#Xt^8w|4eRQ&$9aCeE$*K)c2Vw$iEpA%JJWL;Vu0DCk_?<@vmur=xYBvX +?Sj}zyBl-`499;>uxIWP6YsRqF18Ciatdxndu~)^ugg8OU(it0j0_%G94E{hxFZCh@P<7J4wgN3<%^n +xrw6LgZ@;erGrMsu+DmbGCC~>MSPgqKlg##*1%saGO12%1SeIG385j@6mYRDYUJjBP!PCnRiu0s+Sc#MPXSoD=UZ_p+%+?8}3W8rOJ;!sB?b{yoIp+VBb&=-eg1V=aOy%;n&?o(1y5 ++X0wS2ZK_pi#gZsh3yX+WIp1pV9l1>0iE`{@dl>%@_R{M +$^kj=GpjPdP?o-#(|p(=vS#+SI2FE-LAbu2Z74H10^>(mKF@3yJyfJTtnys~=IJ^J&{gXIoun;pK +dtbLbqT)ewi)%fKc5SgyBZ7~F*EBVheU`u?QrXb)KL9)K%5gKor0Xc_Pzlz$7PV +kn_FDz9>De;tM+^^-^{8RTF5HWw+}XsoVkur-$i)ayQg{Z=k^vFMuYyZap3Fw@cKo4f8WGMt0%E#?yCS7O*ZSo=sFv8(*|f@&=fh6r=~dU3~8+#5Ux;|IE0yNTtu=DD6Yb8uO6{JWMYk0Vzt}3dXZ^8*P0W7!Q8o> +)dLi2tyXmy;Ljqv(WvD-Ig65TCsQKp4T6S4mbVM;jyy0FmOB3<;4?kYpERY?r~QruH~RAt|z +;I`s%SnigU&PGYG}^yzR9+{P(bS?q#-Dj7)xK=RGp~`*6|2ftwxXh?qJ^b%ke;~Gz>p^%Wy?x!9t +!(4W>&NyCJ;c*f2%kN!xM7TJ^|AD;{)dnNKLlY>A}+-#ZyORVL5w<{XaK046uz +39sRp}=EBBdZe8p9ZoQx^e(&p}VF$()kr@oyh3@G=u|EnaHL%8157=Yx7*}Et3VAn*o+@N|2*Hm|ZaV +y@8dtU|33Q1~NUr`cvDh<9i9XsT^ChTYRv)(S+Mrynn2>-H3RJQ1}Yw+*PW6>XI>qwoA#FPr8+w08z_s#;>NAKSMo{JKwSS&_9R +NpoD>;KQ8SWa32GxZE!8ougl1)bgJf+tkK0Iv+N}UFJw-q;S+`fo;$J(;F@n)0s0qxQD +mAo+m#2tcA?-Cu(@KJW6$;7iW%*D=`7jhpULPAzxYEixwj8cg?k+B2#{b;cnO3(l-h%_#HDyQQZ@mv; +{Bjn+5+5<$OuruAkIUD{{v7kknj>=Nuhs22zNQDgJ(;Bz{nmVfWQxL=`%$I3^qIk{EEFEc4+xv9~>zy +hXz6b?LLV9ftIBbVAV`+=?#r0PpK;fc;GnVWXT@u#dPB%$O-!J9gQl_y$6xXXwDk6h?lsINDjYj80>1Ezr}bP5n=&3O(!-33r;XbW8_(e) +OHc?o`Gz-mZQC;h5}Fyq5b92m24khLLf#cMb=a+SyCNNh6Upd}Z$!D*NaY>dXcZCpe$x3^sa^kW?M6;&mtawuV;-|`ZoVcft>T +Q|IxePYzH~z=v~XtpNhaz=4uw-{?J*%7uY?O +B47y)#t)Xx^37otf6*C +__}xfe$^7YY5AR^On^TJGUNZ0~jnz*og09AJvJc`d1W57KylUOyp@qwqfhFV}{zsS%^{dG}Vxw&XKVP +&7@Pji49DKdNIjy1UYGXc9s_~&P_kAl!ZP_gT)m?5Q6J4#`4S=;Q +O-6mN{VC>ue-EgkpMZqH2zzqGKg=-;qNT +wl?@kp+foQS7@WNHV!3$#2d7fgPM}y5}`1RZq3LMsuJ!TO9Ni$AU|<`@}+*_X(#T6bJ`n@qJ=!%X@1T +23tN$kHCOI8K%&i +_)bkQYWehr`A(__@_MEi4m&Q6XR;+<6^HBO{Q>4bN@t$AAsKbr-Gs#33`TSCeuKw7p;16Xuh7ey~DTP ++AIiDGkR)(d55TFllJ(~=!H(`5@`<#{yAHhX{o$Y0OA)x6Kl5=1_{+|Y*1I6vF(XVR^`e$)=sNHDJEE +6nGa@cm+~N(ZAWCV!)qjc&M3PiYT&Jfr<6)gu_!ue?7wp*^6e4*JXh!rm$1^uZ4qB6_CP7u3)x83#i~ +0lIi`(t&=U&7^}K*P-6`GxNJ|q$7xfiVYptONB3(T;!aqlg0yK(Istk)SeO%nwP4 +2NXVGIKJYR*Ll9(;YthzpF&Jg5ec4wJrUT&Y{vP4p-y_lZ&VxMjFxaGfL_?apZTt?JpYIaD(8D}IFhX +mefwp434D&!Ffp%>8L-`~?a&il_q}swfnFwNee?Ole(>4L@dD;M^xn`m(Qa-RWGJxeI`H-`Y@m-B5m? +jZxl5`I;TBAZqg+Fbw<;$D0;X!92?UsHgu5B_?U?mG|e^EC3ugsMMs!OqTH3D%_B%;=zFny=%HQi +AlZ?n@O+o!RH|i1Ik>3-xRv?C5I}qo6_mgdinEw1Z3!8=ygPz&pLioXB8u>iH(0pX+#d*A1WOGqmJ-y +*53QrX;zhO`DPTU3H~C&ZaofP9V}eLN&PlgIfsQ>?mt4UwQOvB9-OyO^o&aRnfn)R+3P|H)DsnW4oi_7OE+w}0&E8WdSB+oS958I +DTCR>0fJkPoQAlUWP8QX@#F&r2he9R&O++ifGNKCMGs4sbS`&6c~Tuhm=;{MF;Nmz?y!P)h>@6aWAK2 +ml*O_Evy0b3{%6003(M001li003}la4%nJZggdGZeeUMZDn*}WMOn+FKKOXZ*p{OX<{#5UukY>bYEXC +aCu8B%Fk8Mi%-ccE-6;X%q_?-Dp7C&a*8sON=q{H^SII=3R3gR;PS3{Wtm0!dAX^1C0tzb@rgM(@$m| +_3eif)8kKYul#moE#c}}vP)h>@6aWAK2ml*O_Ev$8*Y#Ng003GC001ih003}la4%nJZggdGZeeUMZDn +*}WMOn+FKKOXZ*p{OX<{#5V{dJ6VRSBVd5u*~kJ~m7z57=TbXj?!3N3nDAVD|%N>L=;L(?9L!X8WGHM +Z;-Z{7?|*ZYvjQ4Cs^QbFN?mW#0PqOV?ZJ5K!9e@^bk(w4MV#yWkZkZxmk$yHJ#S +Noby}Us>O<_C?wLZ|mf6r>_0$6!!146Mr(OecH)rJqWJP?F`^5UYy^W0B8CzOeK4L`~9ysW8s2W1C?Q +pPYD5{59Yv0&9UkyH>NR!)S7dKV|ZvDdh)x1V)>z>=%2H^(N+n{O!=Q|DUZE}o|?`%6fv0WH`X*Yy@H +c^(_iV~Rz|l2$=f!!gL)8OHx@puG9ZG(J*_H~9Zh`jrllPpbV%Yo$T1iMQL}gKhRK>Wy@ngMdkfbBpk +X)iv!*jnHOY9eVAW_AgnJ;Oj(igi^i=f%pA{{>J +>0|XQR000O88%p+8BE&_;v;nqrXFTiyHAI`0lLs29#dhQiC!;oo-^0JXcMdZd_8~*Q|8Hy4qY1 +avGz~w`{qR8RA&od)gx2iKt)Ld3+Ewtu$;z-&>`=^q(@WSwd8}8mH?S8e~Y+#3be3We|D{)kIs%B=_% +I2;o!{Q?b|9(_eC2}KGv!ds7nH&4-CnGw3TM6&UsLnpU=Pi=54S|ueM^Sv>wOGO3jcR00OIZU;hbVm!odzk +`pvF!M#2MKWCI!jsZ;Tg3(~;A$WP4|xU~3fBSf`gzczZ?;<~UaOEjE4(_0SL1_t^8R?ek1oxI +(SX2R9Cjp=q53WzwfABWIHhLjS^Linnsf&>FGVIEOvm>hGN +Nv?LU!)VB|M*l*>iBT-Tq_uC_!MU?B43S6TGm$GpEvw{NEBwvs~36hdYZ}B*m~hVSo{Bte+em)YuJP*Otf1j*L#ZeTUHSFf*h21wd#4sb?I3vE(BcgCtnT^`sZI+23&l5sCM=#W{zYlrZbaP{EE^TC0 +6j0odqibfAVo}8(cM&U9hpK+1StCTDS$r=reqI^&Nr;x2jbQ(k?K@N@(%bN+s0353Xy_=u2g+bNR>@K +rS;NbcmJ5$hrfz=+<0ie3mm4`_OF+KssvRjt`*fY~K(KcZ25zBn3lXbDZP&Dcqbmj+B7cyygti(s8*W +l(AIm`@N|u3pNt0zZ-0+_kRk<3TP?>t|Qr=acVAG3jCoYth57vQhcW}@3cuGRdWtGZFsx@0aMY|Jr@( +;*ejaUZ^q~y4oomvLMcL8&T+uDbuoqQ_Y8d&&e{n-Oeo?RVF4b)%O&q +b?Y&uGYm=5N7ARUD+_s9uO7?V;H)R`Av>xqR(WV|@`HhDVX66!O?#0O>Gsclod?K;)d9u-0cHcpe3-{?l4g>tqA=dME3!(z~FS7KA19XSM1u!a}i0Jd0J&9*D?vvBRGpRiar6&7L +5m`((%{(8aAD}>h0;ZU(FZ*9pO|TrU7m$29$9Uw?VHp|R#vzxW;y{i9FkgJd{=GE^*+YxNpUIoUC1<* +K$_gErF1}Q#h6szDH%2FpMx3G9MQ9yUzMO+3kh6Q{v+iL@r!a`M=_d-9S7g7N_16>u +;#NqZ{yu-WZfTT~K4nS~PGb_!uxHhJ}ehL735L__$BA&y7$0HS1gIfpn5ay4FTA=!N=@c_KUHRf0ybN +J8`Ol%g!anLBlpJY>u=)SY>j5UGweedxofX?}Wj7r1#6I^^+KC5qvtVkHQP6!g|ImY9Lc +b3A52)B2h|O5vsW0~X)QBAo6zupwvwE9vSjnO9$3X-{2EuP2C#L?T|1(p=ef5gzZCT0(NJ2_eeX(t)g +Z@)r4Le)pz#eauLF^hwDXQ}X4?!7=QgL@@AChlyJyE;f_r&(T-(BzXyJ191794FBj#9_F$waOlIb{8W +0W=Rb#3R6*r>W_rHnxh|!*xH69ob!&e6FW=1cO|U88`X$`&n(<@xA@*ty0zO0*|EE|9Oq4CNq=Gg4UV +N&}1{yc_&+|FFlGkw9jD#9kVkw9CayVaAIEF^POf<2}==+dk+)oUCO|#uyv{va9`z-lQ^-1qyEV5m8J +~Jg82A&$#{UySPZ*~xJS+(M8Z|lpq&BffWilq$eW!fgB4!~4LvcgNxCK%OHs87SXqpe5dni+<{McR;~ +XH2(BA==Q4)-LW}+l@^Nk7VofJTasHJvn?5sNN{})^cG(1x40pk!^d!5Kvj61ZPunrC?rr~EPO}tXz= +Y@=b>eLr+D{&Qmfe74k4pPC3#=Xn}VE`RWb1h9Ng`RxSNDl;;&zDz=1{{k6<&oduLt6UY{`HxChXveeS6eD}kZchs0|`vKE#Fm&7jcrZ-$L%-T4lSl$>f~~@)kEj-MzZ@C&QD3FIv$Qq +RC~S=2wR=h;A*S~d^{sFb+>~ipyKLAim0|XQR000O88%p+8Y)E<&7YP6W3mpIeDF6TfaA|NaUuk +Z1WpZv|Y%gtPbYWy+bYU-PZE$aLbZlv2FJEPDc5^OpdA(U%Z`{Tee&1g)-5@YGSj|K8=0ZgpR7MTBP7 +oV#5kVkk$>Hu)Tn@__O5Ozh@BPjhUWOMbG7?mPWiEH-JNK*1xwefLMO$Uw+R|-IcoF_9%|V{ESGn@4| +E9FZqp6PlM>|}pl|SJ^|IuBBZ+^0wKFcgFd>_@Va3R;t{K5Bfi? +t};ze_~K(*12RC=rk3f0u5#PRT4nc&IFzk7rR#Cu+vcoF^NY^kt1~b%RoTj9{3@;RDuyx@xB$^XczIA +x_fX&C`R`z^F^86z+w+^7>cKR&I_t`>BH3J*PClNrE_9qQB9<()rWZNY&7(9z_ +52kDHr!>CL}B-n{?t_J{YKpfb(u88#cG%KzC(8wC0h_S}o3_Gu1bSx4ccsS2BxHgML1(V2l_tewqaXGeKYhIs_%Vg3EO}%&BH;k;(fCs`l +Xw@7(@&#)+f?F>DxHootdr!w(l}DzK=lPT*@(feDcwyQEzinlr=-Uu-ytza)aPKk7hTMNp|pD(uR~c` +shksOtmrqU%5@Wvx7%&_D7+2&(ISt|I=SIcryMV@#j5J|-qXae7 +5rSHNqUO9bXlwk?CZAK-D8X*UQnRrP+Mr;^Iyws|hyMj2{MN)h3YR@_(iQRvSq^W?dR15Lh6=9xv*p5 +7xoS<*s-At)OW62Vb?IS!r!=Z`&euT@SNA#cxhAIc?g31-DP2RKSGM4!m4oSWD^AT4%Dzxvv3_fsMELd!-g +M`dIV2>YW3ucYKX{QfJN(+^wLcsxj;@Y!y?PTe;FT(ds5|jG9xf$;9_(cynm*IF`baR{&pgncYI% +%O*X@6s<&KAs1hM!HM$i_$V2E~_QKNVpWpr{RFj>|gO)L^g_F-Di!?ms$tPkO=9yh}vm8B~>@!I^>S& +q#;Td#&>dWYAUV?|a6JF3bHPlEitr?pkSy8}#1${TQH}YL|uYb4Mf1>mh>s=p@R|GAnTXWopbj{qtIm +qgI%`duye$cT`{6$#0@MzGia3>UCxw4$7p~ijVBG)L +V7-yM-U9HLq0L;68D{gkA5kqL^G>Hw=bNSn}uBz4v}OT6~>ES43(tA@%GEP!hsLUwDFd(F>>_TCOa$BO>f3Kl^&E<68!H2#hv;g{SJoO*r{&CC)Vc1~nIg5_|K{t ++zG$z0bLidAO0R~A*;dnj{2Bp1mk2F^aF@d&)k^PBwQI98!rlop?o`i=+zh2dmRL1SyBuA~e42Mos65 +@2d+6z3li2D)(q<1tJe}cZ}3#xwRwot4LYg4iqQA#75B`eK3Q +0`{Sm~o|?L`z8;5U-2vJF9ZCD4?nT`n8ArFPUq+DYyIRZe6NO(;obKixHGIY_!uOyGA>bVPzHHJ%TIf +>U#UcrDzGR5F22xu)tFXg%??q)Vtfa5k_SV +U6QW=&FT+qc+0T-YpkMqkg?os1<)13~r;hAP|J+=r*t`izLqeGLgz6v$s+=7jx74QJs+l4JOCF56NXq +(n@6aWAK2ml*O_EvkzDE>PD002J#001BW003}la4%nJZggdG +ZeeUMZEs{{Y;!MPUukY>bYEXCaCuWwQgT!%NKDR7OixuP$w(|wNY2kINzBYER>;jyNzEyS2o|Ll6r~o +Y=9MS_ab`(oYOx-dl9Cb^08mQ<1QY-O00;mZO7>P2FrZI)0RRBr0{{Ra0001RX>c!JX>N37a&BR4FKu +sRWo&aVV_|M&X=Gt^WiD`ewNkro!!Qio{S~Yx8*udjL7kzUiav&}MIi_=9SD&nK~jPSLH@mz<426R=u +{0dZSu%Fo*QplaLRFUKvP@Ko!so?R+%k7pbyF#P_45*f|EWV$Z!cnpUtvn?4V3UZ=~j}tx*TAmDvsGS +BjTP_fb*h1u)dU;PFugteFG_|T_#C!=-tTLd(82doyF`$V7Bsd!> +Qc?rk40r~O^`Nd=bAn|ZUWlM=3_?H!pUlV>SqZF{pTZ=QeO-!cYzUx?ibCW8byGq@tRO^RLJ#WS5^mK +krhxl3yjz18#_1$1Pf(hg9`nRtO*JPesT-E42LJ#Q8vpc!JX>N37a&BR4FKusRW +o&aVWNC6`V{~72a%?Ved9@i`Z`(HT-M@m<2F%(NqkS1L1Kw(^*-&8ZhQQ5$xt_{MRLn(|G?H@a1^Ms0 +BSngo?4&I+Kcv+0c;8=SqbMT3pZ`w2d;W|>Q8eMZrWT6)E$hmz1+%MGlqJ8KHB?A8DMY;^>d^4&nsoc +i>X49^JW~mIrI?^sB}>R}jpB7h%Y>{B4Ksf)sYuA%3gDATn$ogNQ?elI34veqk_(opx1x-a0pOCBJf} +$RT!>nD`>(2^wx3IpZTX)0pvz2lpPR{K1*|$DrqL>jCzF@2F3x{irEf2OO<$j{mM@p9W%_3M`Wz6WTr +*EW^%)u^nw-D6OfQ!g=>={JHp}W=!%H?5(O>VrdUv;8p8ZMB{(W}#{aL#C_TBw-eheD__!TmqWF?i7o +KkRl{vl)9eDPc>(f~G`%2`3uYo?|en9sG%*HVE|!3CRyun!FoWO)kyVRNnRdIj(sc9;{52P9})s=0Fy +?1MgU>WWQba`uA{H@qP_eqoC2Y1uNOou30RKVtDNka;Kjl5#@Jvpj43?L3uI2JS`l7>$Qp=0!Bi_ +MD~|>n77LQE;mF1w*GDCErW$FWY3z}uT(Z-Kb$D8{yD!8&*-}Z`Dv+KOwIFx*Wa^-I_i;1>=#H +w8hLc+xc_3GS8BfAhX8?WJ^KB~FKP`x#|DR4?%>#q$n92u~JB2!0v2}+z!=N0)gMFZxDi0~}N$XD@2=()~*K_`SeMwGq}*c)eDG6vjbWgfYiZ;iiOdqI +e%V=?YNX!DS;-b3qnneN|7YkmWyt+&!k>OgCGG3N>&-&WAX!e{<&h*f)TZ))#nNV1&E2x6zgXj-&Vc@ +?Y{M4!AtO0S^?Vhn{RGzskjCoLL6&Hg^}&b&1-tYbdiC8%*r-L`ynF}9@5p8j_2LYob0t*Z$N%@tKl= +_itXey)=}#VeD4Zy*B=b$itaEOqTU2115pnEaFwRLRV6En#C^dLlX?yU-C+kOcs_t*b~A}NCfFXzT3v +Zr3Ya!e%?WL#Fwv0l4EO;a{0+fl;<{B0f#D%1Dd4)bQT>Is(vGQZo%goD9+0AYrdcTytWZvY(4m{Kp} +20m>9qmZ7RBSR5vgh8^NRnnGU!Pt{Q&SFFI=h+lA= +!-xTO(tINt*qq$*vLr<-04_*VH*HB3>&}O{)X`j=-0Zfx^lC)iKvg|B>)>7=5`h&)5us_O9yMIfjL2h +xi7B1~(;hL7-{Nh-3^VgfvTRj-<_uxY1&ZUSr)ttR*MjB@m$+H6TViG>Lg6I*%QKk$53>Q;KcMQ@)z^ +w#(d;IqU$SeO9gJ6yiJn6Z(~yc1hfrD@FewIRVwQI9(f`h5Q*t$p7EwZ;$H7^q$8d6qXu7M8Nq9WKK6(&*IWl=XNBK(QFlz)wUicW^5_KgN_U|il3}qXbGI99b3=#!IgAe%k6mh +33Z(1T;Z5LCs%b{0+F9-DIb|?>>0qe%m2m0_TAtqylVv=&l0FNc+(<8?kcs=rg%E{IuSx6!aR&ZqigM +b2n?$qU)ElXC)VYOorX8#_lfyO`@IF^aN@Sp=5u?3=EtA5B`Q=eUn>guHlBy4~v +uc>3Kr1T@3{p`qb70SjALXXI&39A6h{z7W^B;Q1l|Qb+SV}7E5NUHyPZlQhedD^uevE1A?dqYWviSuu +lQku*-OFCC(tuw_ydw*BggpE)|gN*IY#-G*6x(O8hZOf|&gS+6-D;ww*efA4MU61|~W+O+g}3{hR?o( +x+D7Gq_4(9NluY)hf=B-fb;%(op>biqQbtS0w}0Ile^c77Z{w)0ewy&qZDBScNZTf@20sxHJ>lu#6WR +@F6C;2iF91)6jbV9!&ZEy_YO{+{vgkw{!yAmKO0=+WP;I*<+(E>lTNi>}qwR)oWS-wqfId(6X3k>0TaQ$TzCq|uTV!qJJ=# +^qO(iVF=7FhysvebQw-wq1q(61tgstI4(=Seg*}>l$9>X+V4ByR0GuQg)Q0ldWnb%qevq^BPiwErOuW +Kj|kZ}XWDH2_|dVqv`r8!1o6F`n%!9StrVi7)l0Y}$%_v*ghJmAk!H%g91yP_=#67&0{{Tn2><{l0001RX>c!JX>N37a&BR4FKus +RWo&aVW^ZzBVRT<(Z*FvQZ)`4bd977TZ`?KzzVEM?vKK9)RgtRz7X^YE?WH;7(1T$Rv^48wO_2&odE* +%V?;TPvQr?{6=x;3`yMe +`u;$+jB2RBlFSgf?0>LdmblwdgQ4mD-~v%nH9W81Wao52ovcZvV8_*xhc|DB&Dj8`}y0!Dw%ds&_yiW +9%Ggac_hh`!Dz*_=SYND%`kxI>WId7%`x6@ly`8s|L7mxMAAxJE?_zVkI!6)zf^u +9kp9Ll7-aLb6!MA_#CQZxttb^ddc7oCGf_1dwWY>{xKR??ZnA<_4jc2Js#`DTpm|{Al>FRqeD;*eX&n +%6YrO;Q-#?+i@Qqkm;bp+4w9VGq~d2vY0M-Te?nV>x%&?rAA`O#bM=>?JrR!e)D0HVq8z<(9$Sa=eIA +NMKUhsM`oFtgVTaU;?JfnK%;$#qn7Bq +&0w7Y!~mMgaGA`T-En(?B3nD%f-!!u*Q27Du7;cMstk$S* +SJAQk3y>nFdb{8K4^T@31QY-O00;mZO7>RYm#NLd0RR971ONaX0001RX>c!JX>N37a&BR4FKusRWo&aVX>Md +?crI{xg;Py$qc9M?^D9Q)3rH!sN6OxNsiIYHtCi)#P~1AU|ok1-^MAvt +HC>Sz*SQ520cL~t<8t9izI-{Qxv0C|*Be=sH_^h{vp2pB`a_!eRcZ?;7OG-NS>X&MwN4uHOm!3s`Dij +hrga)C)}*u!5XC8!N3kL*H|EwabxP@nzr6zyzxR{X>vNp9VSqp@E7n>btSgx7k@T9<^b+vMJMK%6$Pf +*VgNxcdzc!Q<3rerSl8%BsF_Ro5e7t%X#omhaL%RX?3v2AYE@I?JUP)j4K~yY?#B(UWc6g`}h5zB~^@ +{a3|2ly5-!SRfb7FL>-NfxJ=|L+C_8#=HS0ouC&nVIXX?T2E5qD_Bo#oTD6iI34b9nBpWo!Fz9;UgjP +}Up^H#0y-+o+X5+%70z7NFM=5Uei5sBBRr<*wQvq +aTDCdpBE-()JP;uBCy0|XQR000O88%p+87j9e000aO4F$w?xD*ylhaA|NaUukZ1WpZv|Y%gtZWMyn~F +KKRbbYX04VRUJ4ZeMa`aBp&SE^v8;R&8(FHW2>qUvW^r*nlL*D4Mkf_6BqV1`KPFt)GHGV5F19nI>71 +vMUt%?>kbKEHQ2-Ac#dCpSyeR;hk!u4&XY?8(XVAi2VC;5G_2H$H$5oHz9}kfi*C61>tH_IR^p_Cj0bQf2OI +#EC!l|Mn&X=XI6e6u`ucW(z?59wg2kT7gEA@B@jT|76}nI!U2eMJ{TULY@4jV73OeGVsmYJ|`oBL +7&C9@!dZ+@aeZtaJRXIf31??m}_tZgU|PW=5y$rnX=(W>|YvY8-hKsE`ohRU{98kf>VPsfKGUW6SD)K +SV7Q;Hz8XTP_nYY2#?7sScE+w2-5`+t#nJeAdshyj6jJBP0-?5f^>?JY#IC4INK4~v{|F^N`>XA;bjwp%2KoyF^&~?>`Bbf>?#dB)lA +s$d5iFdT!tdBpoh1&Z^%=HUx^yXvYLKpg3G;)=owQSs2LYxGJF(heVP7;46Cu^HSg=`4O)^tEZLq#h) +!t`cqF+@AYuY|thAnP)YRpOjBQ@*UgXMrtaoVlEDHi0gDZO~b127((siBgFNMhs;WK(9}FhFWFVVWLO=d-JzpO +T9S1>Yz3KL0)HNVd7fWn80rv~@vAe;ZleT#_nqci38_E&vI}zVt8)=fkCS?LOm=f6Qfqs-P+s(=$s@H +pkF&HiFtA!q!da~*ZPB$xdWgl?yN65d=fSI8`FwcZRFiY-Au^LqGOu+)t*N-|PbOt#ch)S~Jj^!y^I} +Ym^TnsQ3c~TfNcgXev>EJUEXh}u|MvFX`eyP8ke$C0B8-1Ne)zbBvNhDvoaoZ?jkiir*8XPOe%kck6f +gm#3IHCNXsNYFpi`@}+Z3$o`#g1*lmCD6e!^EpL+v!DVH-TvEtLyNL-8cAjcNsnR(K$`r{*Ck0#=r)B +{xd@4-aZiZQ1x1l4U(1*)U_2iSWpRrV`cRkb*|jFm0igel&$JXg#zZInAa}1ZQ|f`y55}^LGlenYfvw8n+E~5!PN$JYh+jMX}Fa85iO9KQH0000802@m7R>$3tSh^Sh08>!_02}} +S0B~t=FJEbHbY*gGVQepLZ)9a`b1!UZZfh=ZdDT2?cigs>-}NiF)F$ENjA%=V(?s2A?K(>B*0FsoXS2 +Inm4b#C4I>UYAxKFRNB@2A15c78DQQk~R_$>(AaHSUao@OLv0B$n8!ftO-qv-gb_#ANTG<2*Wqol`R2 +SxFZC|WwRotvayEF}rG*4Yqmc@Cx7LAfSi>6*hxoE|_6iP`I=}xwZd$gN1Ok-XqI}!Z7|L?a)+4C1i& +;S1N^U<)>GL(of%tH3H!uufP&tf+tv +&^hheqEu-v*NvPD1YptEO)}p;Y*CaF*5%gx{fnccSJ}~jy+3+={PNA~?IV{$E#0se^lN+XRiWk~<<_E +Tt_!7C*vMSW+e~!rGD9F4Y!JUu>$31)RI)AWx%W=qv`CumQ`fmHEuw#yo4*Jp^XK&nHeY^Q+!WRJv4D +jgiHwxBHDQMQgU`fVJH;a8gF`}Y1ZYv^@+O_vWhsHD8w2?zHb1YSSJm$9AUn$@ni;-MFJ$|Q+LA1*fM +v1_7-45Odeh|4ZzWoQuBMR`O<6#L-k7Dhl97mz1!OL(xs2*7cv}KL*ggxu;d&`r0Cp{->!K_H;rV6M) +={Tqx|1D$_xA0ZckhqB&R#q_esOdRSS7JqiYNOA@ifAx&%XRzKOH>z!aN +WHo!=baJvoO&}xHC2|oHFOFkuP>9N$DFZ9r@T%nMT;{pVX&KR#C_Aar=>4)#5yK*}hOx^US3v;JkaB0 +ys#9%r4uq*T(F)--QN4)H1`9pY!#tpKQQ=6y2IvSvg{X-t=Y2kV*36g1m3&z(>a#TZehG3Fd2JE$K$Y +Wq2rdBn+wq&%drCHiD2t!OIrQhX!ltb7@$J#u=!^ZwQ&@(>0Z1W=3YR94KZ-f-p_Zpcp?VTb67-ctC1ibt2Gdhl?49nMz8;Ji2Rw?QB}7Hs{l@tj3^R9Nx(9X +E}FVqM@jnWBm#m7?ielQjRz_(E~IKx5INOKkxvn>0x>K%(WR7Yh6!krqOUyC_bCQu$O9`ba94~PL{e5 +n!aYk}fKy2j#9T8f{9g)%@(P5_<#~63b5|bBbyI^%1pWcsa!{Y5$!UK +R0_c4W4tVD@>ss#v@c*S%?o>`Y-4nku_H1Wyq$EV4M`LZtNQr%8w$;m%HKK*nOrwBu|Zlj1@xZ5oLIZ +l75izq;KlVQ9!fw +c^`!>nsfODQ)lyxWCT1!TKC&*>u@lz%C{$aifZ#sM(ud5P%u7W!f>@rgGq(LCHi|Ajb=ZQ~ +9E1LzFHy#r`;bP0c};bzNR$_K17f#wk_}3|MlD!(?Ye7O#z+7SRM;JWI0FV&>8Qk|iY85<*%2J984m; +&4Qa}Ty6u{ZUf6cTXsQ*>j0dMAj4E+p^+3QS!TvLE(2V}|c37w^2j|J{ExJ-CK$s~~$D^AD`oV7VA@4 +W=ybS;{t2>Y^s=*e#V`##NF^aT6%Xz&lfF8UTQC)#R;_jfx%sY^?isdYzy~d0oB|d^D4#U%1Z_#~iir +uDKLJuAPFvL?@!#VJ1umbz6mx2ZsH?)r`DM+3tI^RTRJj+=+3Jb;?J#hy|d?QLF`^)EX14Ch{F-0Agu +@0L#y4>$Ad@Eal9lilmVLgVI6{{rlfyFt91qx6NhGnIDN9gXNYN+Q0`o(O>1gcjMy7LZ4IukY^eaMBU +zfdCq8EJ)%mT1j}^wgRQXq9YLT*QCotp^nehX|=|jh>~<4cp(hmQY9aUNU~Wz*MXHihZZ9vQscaaS*| +A5-{6};~X%w3pS#q1kVkZtYL-2C8*23X1Rmy`dxLuS@fYgjINkCpj(hQh67u<0&Rpqt5J&HnmKV(mOb +6jpD*BUP4a9GORPx4QltR>SexKK&IZ7LEw9plpW_hVzZm<)=!uHxyLYcB)QFNIm1!gv=&w_t)1p}b){ +dXXpo>HU-T^rNlQaOi8+|WfcYf?@@Q~I`QIms#KXEiXuEZr+`VOQ5R=U)#NnUs8fYPWu_1EYJ$|=21f ++jr`t#mR8X@Sya3Gy&nAnz=SMq#*xv3x)ga?Z%JQR|wWX?P9MpeQ-wg89@29J`fI!5+xpji9^Hvr^Sl +obY?_8tQBI;t5m2Y*kz=TjS!RR%*cc1Hr-gj!lq}on_sXIdST_og;cvVGPyXp!W&L_@kBfo%MKmgZT( +H3UvwiXn-CtSaXApXKgv4kv~G1Wk8_QS`&nZu;zv1#ADo}$ELVe)LIA7p7h?q&ja(`aTB<~vYCU~+SA +_7VKn@Ev;)2jD9tO-cesyg5KP3$)1N;&_+}ELnn_>&_4S)~N6()fA5F%EH^G01SEp!}iNi8(Lz8msC0q3j@_|$}_Eo$iDPNgJ^L~`v7(cI9(j5#@F@g~f8c~gfyU|@1diOmhB +03D(af-sFv;`nqj>CwwfkbN5K1<|g8xMn#EC!ODGfut1(b7Q1^AOB67 +FvPmX-#adiq9W(_)Esx3ywM@n$nrRWbL!*9c6%!~)2s%(NExHo+##c+<09ECUSYfge+42+kUp)tB(T0 +VvxA3Or{Lne_4aJDsdx}3gcpP(#hCvCoq36X4aL?$;no_ZPB~jV +N)s-5jVUHH0(DO~}JXk2pj42pQ^+00i_2&iH3gGtFk#L_cRnPpPhKfyY9l)|B+%0;64aWI)Fga0=g7> +vl8clz>6JkqeX}XP5dmiDwN&A0|gl&X}YOUyBUA?CU1o5ubb7X%_sNY&GNXqpNs<8d#?Hl%#M*qmk`g +W-b}an@%jhy^|#F6>b=3-*RR-atKBAR?IKOh2%`CO7Z +i)B@TA593+Mi^wfybOM(vBmkN&tTLPWRh;Oa0CQ@LohL%N*#omh%Fh$(&;5`LoZT=u9Q1-9hWS9_O1d +UZr!gJ%iRBqZNskLuzTv$cy*izTzdf;sO(v0hAmJD)e^Z&Y9MMu^fX0xI0Co +VXstsX~y}&2CDso&Sh2qZJM|fn$46jAUG9(r4#8(I8d#x~)QusL*&WIb<;M2#!rj02DN)Z?q1nQvh=f +=WgKYu&=D^uNk=|&fBeVSoKbJ4AszzdsWL2BoSq73YLIcwdXikqS4aNOeP2g!yZhLhs0Swc9t+N2x~T{W9HAU*$7yX7r?9 +(Qb7P+m9@yjoTcZlzo+olDCB8hZ}O%NnbWcl#WNl}%KOs3oNprU$Uw2ohX+N1(*fMSz_fRPhr13M2D3 +l{JftZ%sgE~WA$`^ef#d7v;8Jt8KrMZQfVZJ%3PuLpWc0d|_&+?5|--Oj1U<3RhvGlMSA{;b-Zx-M&om9o(HA2~$q9U$80=Uk +zHC&lZ{x`+@LP?vMkY9=BZx*9ksN8Wb|F}ylEfI11#)4#-g}Vf$2%DDj%GRnokVF8L# +C;n%1hX+H=1{cnD$&%59>Z3_XcWzuCGak9ZfU}dS&A4c66uyu>biXM+7jE>QTT*aBcw)bnw7x&&b0x@ +a%TA8L>86AEMCU8Z*LlG>p1<0ebcvf^J>-0Wo`9oEM&I_1Xwb|3hfu)~W{W;x^&MYL}NF+4VPRl)vFdH_xA@>?vSxU0X$ZB~fg)3ZHLm0(Q7srg0K9nn;$|}q_;<2{52Ln ++&abkAZoZ-2{U;m~L+2@`xwxbZsO^VcxFnS0wm?;u#4~U6^?txm+J#wf3m!(#f#>tVAa!W)xt)Y_4vD +~oIBFW;t-Z)(|E`-FGnel{GqfmwcFQd>(g75%u}`V)HW|FdKR!p%3XyU+TywicSWJcUNB$AiD2d4T9C +Vrd3Ys#H{y%U9ex1GN^D)pMeo37@brL`F6dD=onY$g9qRXPoj?A&S-d$ASq@;Sv*p*h$y#zc +Xg2>}CLaGKwB!yo!&;wFb^Rmk&dJN!+G`6mL4Q}pr-SpI!zV}33Gwg!v9K9Q*%WLU*GR6WLn?6BC>-w +l*-}%y9o65#y5o5{22fJtg*4*MzOx|H_2xwumSA(KxY`klOxz6EEM)B@n@p4^OdkB*&Lmt9;E)Smapl +u*MN)JK$PB~v70;k{B_4Qa}+?U^mmA`dEtR6(S;T{iW@oKS%e)^i715I}m? +(1E;Cnx)-cc`CC*C)AD?l^4T{i&5YuC}2PB4GS}ZrG9TZhB+g4WFhYcTNz;h8{uMc~I8Kmvf+ty_7RC +m40t!qmR+QWDd)K96w0V{B(tI>uswh{zr21Z0~;_?|pUpvw6_}O|!kzPfgq8-}}Xs91(lABS@KBItNj +=qqw=OGR(Gn7gd~%>T78l{(xY1H+oh@0Nrh3S=X0%M0$y7MNP6u6H9A4QPy}tuF%Do3Kz(!`BI@2*I0g1nvcFAvn&n_~1$Uc;tPdR3~Tq`(Nz;A +$&P>k9}fk5mVhcAH>$p<~~{T`}T!y#PEzk{)LjT4ZD9@BzkQ<{RUn_!l)Fgpq6;A`8-1fm)*kR6{Y3tj$SGv9sFsAYYf-5Z +~5b7$CNB+DM}f?eF7oIE}?4SzNbTPH6R?XR0Vyx12wI^;J{CI*MZ!Tmu>4dH<}XpDyd)AN!xt`$3Fg) +)Ov>x}wN5TpMPs1k5vU4G*t@vGnouK^ncqpp{-xSV~}!kNCt3KR&~k9l`fbL9op +iIqaLR%IR|=7CU0W)shTqPhhl1#d?dft2@O?k(kgxv-)9**9xyCelDa|e6Wqx)x|}n +t4deyf*3t;hUGBKr_iM_R_WA<_{l%u$M0_O;Xb|hch-HI#u&@+(tCI@lwu$J=a3wBvZKC5IrNakFE`; +CeLpLtZ;s7$H=W8$8|@`NWiN4W|5Rsip7}T5-hJ~t`ttFYUrk*F%o2GS_7c6cLkM#Ogzj|v3t#AaW(z +Y}-)*#&FpPyW$rj7l!eUeICB1@8pntO;RibAy{U!zjWayX>)oV7v&Kp2q>)ZFr}%j?dh+r(J*f7|a8A}HZ$x-70G$1d`E75ip${-we$(OiFcF)E? +U;&JA=`#g?e?j|wfwSfkX2f7K&1d$?B6IA_BvMHU%;MOkY2nAjrG@zYoh(dUydi-_@PG7gr3KYa^bwQ +$za)fO23Z@TH|rpfs@Z2TCf30OA +4w03HAU0B~t=FJEbHbY*gGVQepLZ)9a`b1!paXk~3>E^v8eP))1bFc7@!S1f*)783i=TcCyVNO|yhrI +1r8#cZT?B&tY8yZLzkzH3J*Nvey+GxF}tG)-f^DTs%VSTIJEkQ1==v?p%O`~$*{5 +*3V)k5rCYh~gPeK3ES}2o9`0Ay4sOi%j4`AuG8A%{}BYkHI2gKr8>3i|}L4t#j=k+);a7Yi7{*9J6rWYp9 +!u7tzt64?+sk_aA0vI!tj`{QIgstbnE5u$M|6*xJqmTZ?*Nr_OGukFfj#-*c+9U4685y}GaHLB9ByowbeVM3Eiz@BHOg3>K*AzbQV?4#tBa#>5Pjjf_SWcHd_nTyP)qVYva3{rStu^@*@%Qmg&_+I*}u( +nKtA!)i=EEZWQoMSgq*sQq8Of4@L3x+32lJNgeIuo^PO5vqrzK~#-39pQ%FBkx2nQ|;MdZ+dcRAJsQ7 +ufMLqwtcZt~B1IDRg?D?BJ`kRQk>(YnBAZk>24;n=~vWxAEi;LCH$`!sKBgmphhO37wYVo1Uz6>+?s) +{W8S+z4P|A+wR28ZnyF;*%m1n*H`T6_blo8DY-n;x4xq1RZxqGzDt?eew3MKVuzTX!$&De4*k$6`+?8 +$e8PTgRGIgz;_UT~3C)DvH|XhTeO{w{SS5~ZD|+DZ#Yl}Ab)C+#7s^E9Zg_J +xP?WUyo(8|R^BkIfc7$jeSYxLY56K@JUmpqJnY!9H`|DX@hXeL9;Pl#{nz`WVZI0*TU3rSpEsp~QB2V>(n5%B2UWy@iPIXx}&6V-rbY%&*h4ZJ&v?)en?bdMY``hw61 +k$87?!=x75Qs!$4t!BTzeEWv9gR|>WWD*B1ECGVK8)gWSO93s6CMKQxaa5Z#)-6ZY^nHFtf1VXW>19{ +1>xo4(P6^PT!zKMz!0{1Y9(Q{@$6Z!P)Wc@OlEVAiC6Bm0fGNKhR>S<|EDb%$6e2@%j)2t4PYo)NC66 +B_Y&n5D$co*q5U){YIyD%bpXFZ9J%^$c!`5hB}JFd6HOgN&lc!**QJe>zOkC1cW8bWoy +|C0>tPX{1H0+PctFyE@uq$0woeI6-5|xD@@d-X;bgh{-y6akqvhp5B=0040lSYOQ6~>X-N%rql^TE}z +IFzqo2NU16YnKNw7ImTlR-6D&}jx0q#I&foEmU;pi7Sc1u@o@aDTk^1rE`u!yU$xQqtn>w6d{E={Y() +Ev%_lP-mS$VD0mST~iT<%J}8&)f5^c$1yOtx>SBS6+SBge5t_pQlo{H=tD{$fa;+op0I-)rKLw +$h!Vq_>my{x3TmR{1%5!%$WsG96)`a(S)?V7ubD$zrVffx%+D~xj$m8#&&)QtL>HHmZEN%N`Pa|C`(_ +qAA!EgboJ_uS39?dFt~GPbki`OK*VY_VPNQ7Zg=&v(6tcDGsC%DCh#|EQ+uha>Tdn(O3ZLH@E5KJ(pHy=!R +jxFmV5xQCOn?8vPmaK-D+j(dNx@$@p8uP>D>XPs`;PAPf4DJhWgRJq5YG)ec`$35@PU632A_#+e810nk5hFxCOnI-Ld=lJf_7qq?KRHSX4J7&@Z|%_k +%UkZ{x8)uVqWk)gU7V)LiQ;F34FgfIKPo}_C3>k0cg0MCP}nu^6+x7sSe{V{?qYW;|)VvH{#Txy32E% ++|{O<~9i8BI6=Nbo?sGF&)tH7z(}FH1mbvoE4s72#?d1Xs&!#Ar#lMqQKszJQ4s*BME0N*bBQ$795K1 +<#A+)s_$y=iVEqKj2p}i(<3;D$+(jqGn0|l7 +Krz@q)fP4<)8qSTB2DYLD}~#|UrQYf~e-t0ssJW2LJ#k0001RX>c!JX>N37a&BR +4FKusRWo&aVb7gF0V{~b6ZeMV6WoC0OaCvoAOK;mS48HqU5ZPfZ(0agt9TuPihCYs4um^T13PZ8!MvE ++IqTDq9ew1Vx=v$8V{Dk@v!I^nC@Xz +z>~Js;xfZ8|K9qE>_D5jMPsXU~K4^>6Vc&aM6fya0fui3-ws=d%#Y!+AZbN;S4w__lJkfsKC7p6A}Y_*nJ +xc&4(K5OH!a`>;84ym$xzcG5ScuFwe!JX0XJy%n!zCo@GrVydQR`klli%>m+H_(YKcdB;?DC!M&PpKg +gN$w%zBiW!M*lm#8>tF~p)&zoGV+w3a__dLIqR4Qdo^OXxej_}mt|bLFIO`fYF~h@Gt7I($-hsTtK+R +IQ;*^phZBW@UkVRt^1}vHpL;{l^+b6=fK;}wn>8tAE&^wkwd+g-2BTHBtqvm%r;*t?F|G>53(Ksh@d6 +55OrsgMh{ATpTFQfQe@Y8fpk_5Lr_9a|@iLc9}Y8H!I?i5ogPEHe;^!kPWlVorh*EE^K6qa)|^G<)w7sgNpaD-5P3BM_27#uQ4 +3Fx>R7$BM6)eKG4Tk*_5K0spTcoGE{W`SpCTpH7GQaAV-*9gmZOK;%M2p_x20MkvFQ=^(ISod)@Wq6F +nS*k`7*Z30bLE~-&9**CnJSU$??qm{nh3TyDa;s4AcX>CxL)>J3lrF}vnT-0wlRo+p{?HN +44`yv_6*frpMekBb#lr_uTYsp{Fm3|4>T<1QY-O00;mZO7 +>P!1xIDS1ONaM4*&oq0001RX>c!JX>N37a&BR4FKusRWo&aVbYXI5WprO~d30!RZZ2?ny;n +sL&jLv27ypxO3fzy)&H0tJe0(B@_YT3VuPHWVq4R19^o|GhJmB8ifc&8a%XHZ{+W-#j9#+E@>+bxCPW +1-`ANK7;Iz|B%k7@VWNVXs%LtXlf;rPO_FY%btZUjAb>?pZFOSndfKsqW!?nQ(39KuxN%gUMk0OQ`Ow +dlT^~T*;MZ=eCC>(zKbG-XJH-sNBz=d-elSo#=ma2k}iL-q>Zh(lK+ZqY9vXLTyf_xK#RWaTYoWnpCo +|KX0r*OpLEV^*C_6V1Ah@vTT=_`CH^!eaOkukIIpe1zhJZgiPFI;f)Nar#-elw5Wd#VbA?D2=Bkz9n4 +a)C8(fDkS>}}SF3aEm$8Zu}XJ?6UXLTDTvq?t@ev)qb8&OcA654aR!0DSB6T`@=NPM)se!B3NJtMe9x +V!%NvCE(kC1xSD^jWqY5EHovZUqeW0jD< +bGjFWLz^RhGs2F_4Lwl7D&0eKak-1XpVrDbv8cqUEjZ_$@s`JQgW+{CtBT!Mq%H_;9N5eHG +3KH?WKEl?yrh=s?FaTvSDOk^7C_~SL9@7~kRK+C2hCDH+VXrK;`<;G%-bJc{dKpY`rF6l82viYFEC1w +~D<8bSmMaQZwnF#X$(Ow@1zXkD;&MDrp{y-*y`>ro`U7z|S0@tpWMgMXFV2El$*|*4!5K)I+Qg~^0;7 +}wN;PQ5h{lf1;wDb*I=UZ+g3lr8oi8c!t~CiScCbQ)BeiFOMIY0BPCi%Isn3&fHR|@g&2FN6{Wz%B*%GL8Jut3fu*%TTIYA +`OdqH`7Up{Uy@7k$%?MdU>^5S_(B7jWs%S>qsl2=HkxnCtt?djJ4g5i0W#jvXp|Kaf%((fNZF-Hn~zes?~e@)3)|$+zd@abq6GUOL~XB0syyukV8V+@Dta)~fa+=j!OBD +x_W5qON3g8QoD|Mi`?_ +TfCur$i|9-3?kgi@H&O(&ZMm>GiQ4--v?jpQMyIcE5Vok2rO}%zj#v1-E2RUcP+w;wUB*qe +XiSqK52A(B6Ts@)W&1=)y~nO?(Sj-_%1kUW|)vRVWun57K~c88(FA2{Ue~M=peT)oRl!a6#Iw2amn*80fVZk}Ot%k_aNR=>Qs6h +D0bDqMzivI7!7# +Sek^Ze;aiUOVY+o}Jkr4fsGtNjn!SUuFt4aadGhfEwv2_I2hHGE{%Hr|^a$-aB3ScNZ>+p=Htr9CwO$ +fqF}svb3R${l~p=@Sv9cLFzP&ls3xkXa4PpWt +B?M8?isQ7Jh!Wc|wS5t?e_|?>K1cs(f1f8n;z++bNWw;io{6^}4X@`eTwZ;m~qzw`HI_GAPTdnf8*-< +LHib@OAh^-`PuvBphuTm7!EU5|ksNw%@+a&`Y`c)unnhx8AQ>c#;jf^^3^VloYz$s{A9>oxk|4;WGnw +3X;-@YIIHGbbdzA}6#2f>kHbYEXCaCuWwQgY7ED@n}ED^@5dElSO)RLDy$DbFv;)&+7BOHxx5N=q_xGD|X3i}kp +al$5vtP)h>@6aWAK2ml*O_Etk!O8<-%002Eu0012T003}la4%nJZggdGZeeUMZe?_LZ*prdVRdw9E^v +9pJZp2?$dTXmD-gJ@mfWLZJKl|B)iJeR`LU`vvd;DFIa(G?=F83G1(UcbPmJ`VDLQx-9ydx5-be*|AO>931hsFX~n#b=$LUU6d@D{cULa`Z?di{T~Zam +pFHx6m69hLh!OKdR8SD&A^h9S1eic?qJvPtGcS&i=^yWg$or8!c1UQ7zMCD_6c7ntwN_`{~DO+MRt&; +=|R2S@vcuAesKXH^j9wQmDmh@-I#B~3YIIGu+R4b)>zkOYQNt(fQGJV>Q%NYI>8Rs9p5I!s!Y_wXFnb +-;JfI%qU^V<-|$L~Sr2X5bB^0k=ygM07Uf34-~u4_8~wE8+bwT*9b4BQ*@ig@KrvL*ck-c{UU +i@m;EAnT?_boQfyiEe%Li^k-XUBTrWrmW)V39$N0qX$3Pw(HK8YbQ_dkdVjTY +W2UFW^G#c&o`4or$b56E9ew1CP5hM|50tNxonwa(t$E?aX@(cUU()V_OTuwAn!dpF&(N^aYIG4ZZ!k1 +5pJ@tK^-IZTS@=oN`dq?kE-q8^ +M%Jy%8+9(pIq^W=r2)+UdDhLBQWXB3dBs(?4+5}Wm8oDV&eksQuCM(bmdHaVzf$(0%)+7!@+ZyOH6Rc +S;%mx+?_FBx(;QskMx&Ie?M^p7&;)@#lF{NPyk)_1J|RzrrZI$!5O+-27G@a2#> +i=<;P)k7$NHgbX^RKH6T!SCe8QdXfl=+3krGv55E}u$qAZ$buXGRm(FaAx4->?F-wn=&bJ!^8e +NJ1AY(c5CntovJq +1^O0OMcovuM!Iq&~*!-0lxQ(q?8ATMZYp^p(Wg! +n9gC90XH7}Qa}*GAE>DhRt;Z0c>3f|e}4M-kKaM6v3~eyRXuw0^l|mw(}#b0%$}6R!$<7la +9ST?dvIrRrvBG9(@ZxwfW8rE+-SDFyIh%!8;S0p$UXpN+k3>hH~N>)gy`?G?<0R2DAkj<+DryZvnLK0 +ixO|M~u>Hz&t$UYs1gc@CLNdJc2ap>{QczT*RMyTA +VP76ogSaQ@W%j}OZaDR&Rx0!YMG=Q)+?qd8r;Kaa&hp#ICG}(b0tiCxrE&=^LLdq +@!QMUZ!ao02}vl%;wCZng@7sm%XRum6X5eF%c0LH;O#N-774;nxnbqy5`A1uvivmX2U8xJZHteh{y(< +FC-VdC($t=EodnMGh2?o9?vQ*@VIVm16&J7tof?@;T~%Wk2oQyU`hgNm6R)HDhKKB`=7WInehqFPV-E +-?R*bbtRSS#)q&){w32PNrOw7{2Up*_;)0_%Ia+bn(QMh +hn}KHphp2XGz~^IX^d~UH<&a7unR$YeN^SYmrxdF2?W;WHdJv$G(ouvp9h$>B5IE +Ug#tUkwliT;dqJ-#ZE7!aGKNAGf4bi5pXlm^b8_27zEL(fQM7s=m?W{}tO>DBJJ3(XVos%kME{hD;>4 +hRFq@ylrBy=1lH_aA@%}e|16#tjyMB*c^h#9v +ql_Hk<_A<#W&wLVlDI{z4+10DZ*bz-xpx~Y4{}HCorEu=s{2fxpkVwIkwc9i4jIe9q%;9hk%PNTK!mU +c?>1ALxj`NXi>%n~Ape?JPs!U$0C>Xtmwafeh?B4Y!C8zbC*?(LIs1jzEiEPjBHv`E76fQ1aSUu$v~q +Ndd$bl)`wC;iDUJZJQP&wMn$Xa@MJu$l2^!=AK47oFZfiBMlgXJr>{1N&3K3qx!3J@F3FwYF#88C{`y +Bil`-q1?!YMQ0=^Ukh7}KJiv*W|qjzYRbdsI1YstRIof$`?rA7asgG4@Nenvy$#+2|IS5A+)l7G1QwV +`o59xI&|I9w4PcXxKjBW# +y@uHU25Sho#x_fi*LJa@M74`2!NEz705uticY0YT9~y-11Tvk8O_Io8IDp49>WC&CVS5Zi11qyPkBHsMHdvj?ilSll$3~$> +&q#b04>qetT>?2bIJV=*q4b=^lq+EtMA`$`F+Z)Nw2{yz@CS;VZu|~L_ktguj|r@$c4>Hfna5UuFdh7}9p2Mi!+P_`3BzYb<#;MH6)YlnzMa>!hdWK_SrP_LQrjGNo7jbDqU((T=^psUn@G>07x +qv?)EF@sqKkaH>cyYDgP#dGC}bb89*GAg;viZpe~i;6vlDDaR*j*ut!ei*P%)mlQ|QmCJ*X()$Jbi8} +M0Qsy8x9K}}lOc)INWeoval~iEoRXnU=%Oc2VHb(m}CaC=Q@y$iUS4A_bdw63a4IFe1;=zfSD`|pqvK ++H$Sml?^Nk)yk56b4ZRHYrs;$01?Go29wsO0Cjuaj-jmm7T~WwjI@WmX7|5ZekBt=a=GYkA;)!CEFQt +?N#=#Xh0NuHpi_$*iTDKmZ_Y$m0Urei^{a;yBrW<6wYZKnpTSw(__ebfuf30&^nlxd>X~$aw@BRynblDW!RaICBhhDGc*h!pCq~wOam_3iZg +NNnB7{BZRihvbRk~Q~$AdFl>@(-0Ye|-$1v}Z7^4IY0V+k1O|xkAfO)QwTBs4^6?3$SxqAja+1ly +Lj7!2O8v#YE0?P_Xh$q{wSmpQv05XaX7w*yg2vt|FVu$>mW4mjs5LKwcW^4*3b(fH&Ni+-@qv+5=vOD +x-8Z>1vDxw&dQ@qXu8`*dbOF#wlBrK}OMI|mDc3})lZCh@lWfHIn^uaPsi(FK`Cjf#LA){YIK?dKxuzu{@&E9SeuB@ho@ +$=-u$7^pvbUDhAqM#Hlp0M&RsCSr9N7pPYyysk%an}i}Rh5HTa%#bIyxg`H*s0|LpF{-hoi)1zQK0c6JF~8T|v(huyZJV_Fcb&f5BPBnEXn}#(nssK +-nVfwd7{Oy%hUP#pJfkEnkS8)!tMU?vj;TuOSg02PSVPVd-+wXyb-f8oK6GGKWuf_vk;B|uLbTtGEpt +3AAUfRodO%3-c!YE>IpQtk)VwC~@u-Y5WFLCD(+E-xezkB!yJF@9*WPiIIyQoDE`5MlcrFsyx4FeP8{2b-1*~01(wra +-q+MNjf*3-uBFtHG=s~-TJgZA7oAyau@2ad_~onO)EHO%svE_P<>VYgDD|Sr{5ibi=Yx=??1YsLg8#f +!C2@?;bt7=bzdN#rKhGhKOHrHk0pBHyc6|nq)LV!)C^*9!S0Y-&hjxt^J@$G1Zdn_gdg{e$YJB;n9-T +0^FElh?om1N$*s8}KxN-y-5|))bh34}e{gbsh<(Rr=^jt>+w^=+-OBsa8hNYa#>|V|q51iOMe^;pm)A +viArA5P7L?tW&w@S(dh4~s;1MDiV47!l7HD_naXaY}b}6Y24m`zlAW3-w^Vsp>LWb!e3Iha28koy6yo +asE{UCB`8ver)7mkt<#~zFy_XPC4jr-|-d(Z3F!_q;By8BLx9g8nBwISi591*Uu +gIa1=TCs8g^FCLtr1uyHNmD%5NQf*x;A;R^u1#?jde*TIy^|1sR1?m!fcd>@@Oh@PDd9rO)GC(r@2%MWv)Nw}3I!3 +ox2@X)d7pQ-C(zqz%mml+B}zh4z4&;Ge~7d@-$bILo3xd<-Ze{axA7BHz5Qi8)z%-4@zVdD7Wd-`38b +@P*s7lrYI}9u*f5>0n+g#OXl3u5b+>%zP_{hl?1a`G8x#~lsbI_eN(|Wod|6mU<+f=S#6&gCfeO_9H$ +wKZw2W1R_S<*L#foK_wunQ%kr$pBqux{Dz5ol8@s0 +J*WytwA_;bmhDaqVRjkn>G4l6@Q4qrF_AZ}IWomWn`@rEhUZFCA-hl32UwY(15i7pqRIijQ#Arsih0po2HG#^d +NfD~Ye_qy)`?~U^%J=z9d4rA{ed5vCe)q|;(8s8X0nWdNCWDHifv4VQg^)!m1mXR3h)&@fi_tdLIM?q +CsrmX*FB%E#ApWr4J%-{1;`UK)K(-{ +@Te6nl9N{`cl6A8xa}U;{O9sO9KQH0000802@m7R!m7DT}}f40AUCK03HAU0B~t=FJEbHbY*gGVQepM +WpsCMa%(SRVPj}zE^v9hRl$y=HW0n@6(w>AWEtObX=$Qlqn%AI*(kfWQ41L;2G1DVu}$~(-=}Qcn3>s +;a!EKeG_HE}UcIs@%ksA`U;c!i_Y;Dcjl>z1=U!;Vin1(=f?jItLAzqKdWK#rk6*nM9i)}?X6A}Npk1 +T<#R&CqjrfQPE%(|s@W}*dE{iqJ2*cVFszF;8IRr4PCEEyv{P*19ui?eNNpjb$J0ai7viSFc&O0{i0p +*tQ7doSr!#z1c;w%wnix<+dkwD$Wd7Q6Fe0bzN^{n-?X~S{@7LBBLZ==k@_1An7X4xsJRrH|?H1oanNi9H?zp5>D)I9Rzd}j37(r!*k@}t*()R=fe!?_X^rX@~fBz)Ue4&~TnzymmMqe +;PztdNAE-4`aBq2_8`85Qnn1yd5I5W>n0i6#SWRkMvjotKCU%OJNIa7UckrADc!e8S2Q5rvKF_4FGB&iwd +#w?u_{SIqJKj}1j;hMOcXdG_yNyAX}bfQ#2V7;3+OXGYCQaE3&N%w~zk2So5`}OZ}i%q*(I~MxI9<@- +_^F}xDLjFfP*nkkrp$I_9X9d!JZ` +6Qq=T3b)lP|frKEuqmkW(c6w%P8Hq;4s$m+=TBdH04`;D#epJ>fzc6D`%o`dUAe4P|MgMRg$SVQ0F(8 +$7ZfK?R#0xo@Cs*#0?O_aFnA$`%1GQTkp|3hb}9TsPOO6rP#08mQ<1QY-O00;mZO7>QwD<0_m1^@ui7 +XSbu0001RX>c!JX>N37a&BR4FK%UYcW-iQFJy0bZftL1WG--d-B@jJ+cpsXu3y2aC?IdDkOKQ;zy-E6 +U4nLL(6}40AP{JYwz){6M$$>VqW^t&q)1V=;xySlO+XO6@Q(NL+;d0Aar{=?HI<+xS@Mc0a>wQW_lvY_SN>(YS;`rnF{N#L|AD@0cf1h8T{c)0#PfWFFvp<%almU70Z`G=1Nn& +QSheAN?xot&5asUX;fu3SB29D3Tmoz#s6lgp`?Y*=|-=uLmLTzQEkOFyVp^KAY_uv92zr7=?C*J$@7N +REYBxV6qRgAa?JKx>o)gh;T{P-O+A1noNXdJ)?e6AKH0E})x52q+n6ti>tfi89ye5%d7#g3NcLCGaY&BEPPoZg*Tl*obhk^!yMG3w%J0dtLQIOfpfI35Z0U!tolmSKqgEYN=kVVWHimA1@!>rm!R +f>(Sg4-OaN^CUXfUatYvfbVwpv>*sF}Nh~*P9yp*Y7k?5`Xzhk}qE;ab}^I^k35uok^%`(r+=SJsYW= +og|RJQY7(bh0JKymE@(0)4-6)NI(X{j>u8Po~+M|m4GasESJbnx9kJEWmPgEZ+y+1oxl6oZ(v0R +LZh!bC%liJd@9ZjsW6;dM%mJNA3aA%2P@hZ0MT*+5xmr%+i-^*UQnHMGN741A}0bIV?9GV;zc8v!}Gs +e0!Q_;i8lpXpFaXj6vHpqg%P;pscdB-z(P(G +_io{tK6U;m&7^XWf7?}SsSRsu)PzJd7;zI7h;Ncm-E8c0PK^xtJ&o4|v{s}J`t@>^4v=Xl&pPD0F3xbjYoen~@19%e-Fu1wJGJBL +#MDUGoRaRKFb$3fdo|*UryC`yL=D3!&YGRXFCb#CCs#+;evHM8S-mE_y>!~aRL%TR12W8LJ;NT~QNYg +GqZ?2MT~W2*T~Sf!`)+n(T}>OG;ZVYLo+tgRz^Y{$JnaX-e;k>y@wJHxPVv)$(kyq!F7NZs_fQ`eNAM +U078g+UV6ibYocp2h;TZ3_qOrmpvad{!#QNwBii6(-9AS49w%A&Ff`g9mDlzq8N9xoXz+TJY)sG|Bb-sG2S{ +jiZRLOa39re1EQbt**S}yXC;nun!s|a^E4-e`SjO~kVJ&0oqs$eqdJcQxwdJfTJFk;*9-Bn}08mQ<1Q +Y-O00;mZO7>Rus?7n{2><}r9RL6$0001RX>c!JX>N37a&BR4FK%UYcW-iQFKl6Yd0%&EWo2wGaCwzk- +EZ4C5`Xt!L1-UV-jju7AARe^<FG6bsY{1X`kOHnOB6r6$f||M#02QX(bGX_|*bw&wfeH$x@} +g7^ISz`k66WYssJ*t@&Ck3kU3W=X<}B1zbatpoh>mX}*`EB@CC-Fz-}Ti2CpL>5FW$fRzPTvhwzaVJE +Pw5ka1XEXeUZKM`y!{i>x?Wd#=c@uRXl-%wbL!4G+Bc7TrsMa1sfx?_Kq11tZeIIQ +b^ZtWt5!(+9oEr?yf%EC716$fB0|m>H5o6#BPPIt +5Og2ONf&K7vjBWc*YyV}E3YPUNEp(Pe;7l_^nXnC +qPB7IgI{a3xq-HLQ3 +Iie&_Mz_E&V;8FBGc&hu7bZvA9*sg5DyXk<{RMPWd^WOfLe6R1hIneuWwkbs-{Y-BJR0Ox>kOlgwJ`5 +brXT9bCeI*0!1XZWu=6t_9BP6??8)L)MDaE0?3e9lHxHy*f_LO`3;~UBR|>Zz_5KvOt~{knR=UU*YMF +a^AXHYvCrpy8;vs2S2KonQOJkBQue&AMTP~#`wX}N@!C2r__G{TGK%?Gn*f!KN!a5~!ds9;1*MHvDAU +|L*`&v(%ao+_)rj#!Raz)8*J8|f%QtHk&5BYs62r$`^l2Ir4FF1#AW4#hePXmI^6wxCBnHte(U6uUCK +rn97}rT6fzF4j6s{q5P};y?<;M8YbPzrXrB@bC=dM;puo@zzdkuvk0EO|(*bpQs%pA5_v0u;529Cvv? +p!b*ZR_Hr^T}C9f$-fONh?IT1wdF~5Xh?7I_Q`s2z6FEE!cG}rb1?Q +T-HG%ef%OFJ*q_+k~Ruxq%8K`?4Pshu`DW{b?~?6Pa!@YW24uc4Og%R^blyk3YL; +^9u45paVTeq(104hVwnWf_uC_K%@KosyAmYv);H1TTh6V-)FLhSqM&b3T8ej7-^(ut6#PSX= +tl+j0MJLn0d33a;{8i-u~LwlJu#uAtAzAb1`&>y7{yfJ$&U&{SH|`aLXPSU?{c-{`7n8_^SaHT?-Jpk +KhJ4foQ7K$>vZJDci_x)m8ZvF +2|7sY4*lJ5N6bE92lX8-l_n;3TA9) +E7Tu9#y;?X5kuK|$p;Oy^jmDVUdV($0-RHXRPG4VkNy09`w{v$lOFb7v*x)UEaMU~o9F5B?bpbf085~ +_NlqWr92PY*^=|E5ug;evXQqR9Ghaui;SNJtK~I>`0v##=>SSdEZ|z_lAWc9{EffHQMKhO_9Px-c56; +o$;il(QE01vC4gL6v~Y!Uc-?`8dH12yKG7}Q5ifwiH2DVXewg{ag^-tY_krW{Hf6z%bV!AL+Hg4c+9TMKE_>JJOx`F +&czaNG2kq`6b*ewjm4+oS7a_xQ7;U$vxZo`qF(^!H1!(n+iqon-c+J+Ie(CHNppm&bR<8ac(SI-g0?B +_nxj(VHn>RO0K$~Xb!i8@4U}mHZR7*+=?6Iy({CeqCPPky1NHF)q3{;-35gG8`(Lv!c +ELafxS@MS7y+h%jwaczv0;>%wa5D)USyU=l3h%cH8kHe9{FqK#J}!n;Nt2JGfO_JpuyCbqK6;Z#Bc7# +DjwsIN(5Zvh;STdWjl+@5ry=ApC?3xl~YVc=J~9n#MziL?8w)e213_7_*@aPFIozf29<^h6`OtJc}v% +>)~wPtSM2X?mqG_O#Bz_vIKd%BM6*Y*7V^X&Mij8dU@{M_ZUpaqskf{k;)jbUYsvIGQ{?;5qm&r#|bm +Eb_Bn0c>2mQbe1$#UsQ3|}7xN3S;xbT9>1SVqSxz|E&{G4o_>FN +5Wo2lNB+y^b-Op8{-CR-c^0QQ}EM#Z}F|DhLpZdnWG6UDh2{NFEhF`AgE{rHZosn#F^DDI8H4GS3)gS +UtARqxSox||y7K689@t$IxB7NH6MJCA79Fbm1xpIu@1d15D-9-7|biYvMZiDN$dKTt~p1QY-O00;mZO +7>Qttz_hi7XSdSNB{sJ0001RX>c!JX>N37a&BR4FK%UYcW-iQFLPycb7^mGE^vA6JpFUqwvxZ=ufU_{ +CQ>WYPTEXc&*)}c$H~*H6MJl@og1Hrfk;R~O_6*AX<5zWf4|)YKmsI{kDGh%r&Fi0h>yi$vENuG2!bn +F+z6#)o-v*!Ow~n^mzB&`EY1@#h3~B5GQ$R*rmT>~p(;cymogq@qT1%=11rTpYoRI}yOgWCxs=BZv3Wga`Ls9p23F4I&l!h)Ad*tRV%kp^nCEb|S6HIX2uZ#-h~x&5!J^XTmF=V!mo-@Tbq(`mNTK +VHeW(!bt_OaOiIQa@fuWjfyS0%7Wh%K|CLQ~h*<k-OAes%u(^!>$cbai=o@$1Fq*>6* +ZK))T(-*0cPx~MsApMkUizD^rBSEYc17Rj~lp0aD9iadjp-w#1mWgS;FFlkFLCFU?GQ?ine0?)fY-yA +^*#Lh&FCtTzX&70OMXx%?sJjH_8qgfmmjsY99g+DZ_woGBiMF6TqNsl52yv%@Hz-u9h5)irKcKAwEQ6 +iSSPOC^{kFw0OjmWCOsYKE+mDz)xeF49Rh8mp_^152vv2S4s`6>skkijv^_!Rm+H=tV8lB!YvFaCijUPxE-nNHLkG{eWOv!yyQw$|FE~4N7?F>#7sdB?Jd)X32VUfkG~xs!`i +N#E;0s8=mo%XtWM8@LSz98jVnAL*L9Q?+_U4g4)JWl<|#-BG6zr#KM=Qgds4NwA>vC+LY0k)rfbb^X_ +1;)Ny}*KcX{VWqB#AmS!cFN+fK%78$D*wj3UU%&JNxhwEHb;mGRqa4AzDn|&Myk{|_O_;@8&dAXYnix +;3gx;a04e|;WZUeDjm-`$*$HotQ>at}MXBk~qEK&Jv4d3FK0&W4Ln +lG7NpXCHovGI)%Cv06+#mUjpLRMkAJ9_@la(DCF*@U2t1MF0=iQqf^HJO?YdIrAXDPl3y`TWIC;W7Sq +^z%^n$;tDh=P$ZlKt*Afdj~>fIu3xNK*|-k4qLucW+S1Q1bse=`YdnPLJ0;snK@dAJ@QiWMG7jn0KJP +Xbh>4o(TUet0z8I;@>&6qvpHJyN^Dff*lRF|0@OnT1b7E1>(UfMOM3`VDd;~9h6fdRPo@ScH$fLsz1d8FYC)5 +xkEnd?y|LT}69i})c9;D)977O1nxfl7#QSAAuG*l1hOv9liYVmtRI_^XUBD@AK=+cW=+%-A2Ek +Ue8Z|y*R%C;Tan?I{ga;0DM&eHU`{1Yz=;WKfibt(f9mSU^(s|B6|QSafdj)r$zC**n#`S&SV*b)tJL +CuwH5{iz#~}tCTMoHr_x(p?T%D1RqvGtA=?tZ=jt&6{(DYU_+;-**v(KUq$nqh-RVE0Fw&mkw2&?l@B +6{<#@sl{}T=;PqijSPZ|GS3ve+qWOF!!5|4$ZF`TB&y6Uk1**x?h9I!tGOY-bl%i3V%*Li|`fnMJO&P +9ghtg3cwEW%Z26sA-hn#PlffwT}YM<89n0T$erJiYhqZ6nFKTh?eDBAPO1gN90~~+co=wD +G-u&pU6^+oZexnX~VPK7&R%`ZxxzTpdH!feavN#0`^o!V5zc7p92_`!DHaqL$*a21%wYsQiQ+=f=_+) +rB2B8*LDn|0kj7nsgni*n^q81i84509|Ob~egtr|s1c2?1}%=?_G#MAE_Ogl?6;v#s9Wsl@x&PnmN^y +3o7P +8*>X+k3Lg!0f^s9ojT2>OriS%Oi?aY7~7n;IkQfBzxRLHc_<_AkQA@;z~l9R>1SR%#16YaD>3s-J>Mi +^#qpJpuDt=A&jDO^m2jE^1PQGczc}&4L)($5zyB3ubNfpmv|sLx-&f6cRWKmCrQVC-&P2F}_(OoIRbe +r@QC((_a5yvKX85a1ASz(=HfQL!1DAMjg0$0e +-YfBUj_Vb9i}m{tg20==byMoB8ECJq`9*7ZtdPiew0e?#a-MCn5DXf(DdnW(SjolR6NMi;n?ZmjcdE4 +_UA-9Vk{OIU15uui8GkC}i +oT}XUqD3&GysnBGDc9zIL-K=7PhXqd8GJz;8U#>VDB!uWoH=lAZeLx#zn!v5ZUjJ^9fvRXs1cpPgW$m +=@;crDcTAK>mGHZPp6*^;1+dV#GS}v`gvH2Jd)Dav_lYXi(5w%OXL{uG@a2o}IUE17h<}cMe6d)t7so +H2A0EFvJpN_!-`TSd1Ct^85Kj9U)@bBMhv*fi`H6n&ToG< +{Yg57>JMR*<<^$ti+>AS7e2c}g)1$|G_g#J(#GIx3w{EX_@b6R=>Q^^oOTW#VF-wGb2t)L9#ML7YI#* +zg%6|1@O~Yilqvi}Nf_YmAzKu@#6c%sEX}K4n`BRG`ns?~gpKeYCUOfYDum^@cwn!!r;OiKB8tQfDA= +GsM-XlmJ&>58T%=@zrv*66lzSppdx`m@xD +gQ#=(>lV8H~_M`aZ~h@F}t4UNNqA!VF`__7O3u)V%9cM)UfhQo*^MxGEuCM`w~G%QK&;@aNc~*|P?hH|J%U(_I#{_&^S<;-&K*(C +sY1`;jB?3c>B}?{_VDf;X14q3glEtMRedf}h|_<8^oo2!Jc@qS+5_ycriFTUBeA)6hW4zxGYK;Ho-&Y +sd+6EM@z{KXu%ieFu%sZ9_3ooaG{z^vA|&t^|pbo~RpKQ6;9u?op#UY0MZ^CTRoTqL2e+HF4D5V;K$n +RzAtBN%7GW3I|UT60Z**&>@fYln)<}-uk*1WwTt>l;VZ)c-PGArF5uKLl+C+NUg});b8cHBe%RvfKzo +Q6TH?+Ur5p&u(?1Bb#@=j7&;ua_5)#DvI81QuqiMN{2a`fZqd@i=HPJA_k}^2orN|q^_tVk-*ztvnoN +fV3v1^_=fbY(dW>MfrZJTd)~U^BHk6Md%+p5EKEw5$W1(6)I@&#G495l5P11F42hL +HnbhfVgW9ak3^tA-8I7W|~$5cN2BnEN)?UfKh4c>UP)Z1@w(Ri|~4=J7*gp8I7F&X#fYXn5a&bCv3(v +?zA#(qPcNB7tb(`@wQMIhghVPP#H^$=M@WH%EGhm)Q3n4i5Qd&Tn747)s8}U%!(GI-_sD!y{nZ)p}!2 +S;%;gKA83jNQ|TADU*THmoESum@x#{UNO6v_HiN22d-A69kb7I>Pe@Zr0oTF<%F)(zv$-uul&3HGHaN +}-2h@r+(6|?Gx&dMu$gIpmR6qO*Q7_Qp~Kiu?*FG9(ylG`8#l#FroqsuA2@(kvwHd)%s@w(;Rm!xNuE +AH1c1j5jquoUNISBQ1;oz*dj}4hDtN5(AiUCPL+~+VwWj;XD9{NUaFt@Ng +swOckJgpYP>LKFX^{#l15LrjRT==oT;MF?!O-KF5P@5YnEDn|N8^Zx$BT4+clR#-r&UD-7j=nQpOT%f +jFW~-8%Fn+HLm$xfa4Eoe4BYNx50Z>z~VH#{WTR9Yn~;k&cNVmN=uW!8r8bZ2Fu*WEw9enpWfPM5FWu +{;M%t4>Ka68$0*|gB49kdtxQwxIx@U-&shv)%{O339bjwd-3o)X70IwGqdJIgm-2&^0}wtPi&l9B&ZD +kz!K|X4PH8Akxjh+E%}Y}v%(D)xHGdSL*G5oh239wYJ!LPCe=wmdU8rI03KisP_jn_CrrJE%YY1zg6$ +DdPWObmmt_WM@$lqWw${OrOg1k=IxGmp>-m1{CqAK3B0}LFn77vUYz)vqRK*v%WctWPsAXE;A@C=S?1 +%U_hB3{{apq2WJt8jn-$KWdMn!2POc);*{o)uy*OxDTVuxP@bC%|H@){0McyLT+0Mt_~ +usCRT2RKcUMpJ~3Uq9XVPA)-Gs125p^Gyz63bG7_8t`|CNr^3{DlxeaM6>pZ_0B9W6D@B>1px^OY!((x2~cl?gPT!PqHm%}SMPNdScS)_$AGb&e +<7vJDep;3jq97}CZR_ObWZ7mj4o&4`@}oMx6!D1B^1U*!%gSLjV47F8Y^;ee$d1)P8Rpi@Y+bJxHeu- +l6p%?a8Njok`LHEB_khCGo8he3M`Ike!_M@FcvaC>_050o{nw<9dZjkI4tp+9En6a$9VU_ri?qJqw(D +T*|f(t(G}fT$w1%qt6et7adeu8x9nQrYq*XF8A-l-0A!o~$s|L?fds>zb{nyCdLd4cDUPz|u%g>0)+k +OR{LQsABMv0yez>(~IU4IXjn%t|5e>fvX-4NH%1NrxM4Hr_rBYBjX4Ibu1ht*g#Z5Cd@g0#BlZ_$WV(+o-$043gtlTw(?} +VcqG3=Ilnii(PxcPiqr2ceu|Cg;tFb~DRS>#2gW${n0N1%anK{08CQJ!(iGO>-^z@mvKzRvCMPJbu1I!+;qQ2wrHv<&xrOmAz}-$d7XkLzz;&ahn%c +)&-X-EWUfen>~;)=@7gs~Qacox8Oz_&7l2V)WLdj-UBbD6bJ@tTYihcCx{)+ +vMSK4&f3R1_!w3=RPh!a??ZP&G(x9-U)b5#AOoX9UKDt5yMLX@pmO!S(V_Qxta +<{Ivk^1WuEB(T&w`^SIXk4?oqXrOvin#13eb}-4?Ei&pw_@fzkRbO!GK?|7sJoF1_$gd#TeilCowM*d +f7&MvJ@tu)arME_Uz;fv^)6%?HUCu;W07>(r{6Gb=ABN>b*BKH7XUqoua*+6ltQpJpa|Bo_*A>7JWUt +YsmT)A3Yy_k`NOZExo!fgqYmfo$~i)tR=o;M72%yB4FnY2Yo!R1Q2E;GofEI!HcRg)t6P=zVbvNseaX +0#9;cQ+H@3Cld90~>*94h{TO3un#IDoL)KVE?b}$ULGRl3YW?OjvtBRMJUbogYF_Lb>O`2{?L-)z#<3ir|qB(U<8QMrZ2j*7blPATXo$YMfw76qYT;J}^vYu +JgRl8xY+~Q{4?#(ppJPWTsm`KPIe;Jgj}LSpnC7uV4}Y$yw`tvup|Z#3y(!F2g666~Ds&=%nY9?bHKm +)(n&==8z|u(`U64nNVNTmfP4haa3VV~$q3Jc^$Dz+)KOUJgCZrbt#}nst-R8X@oU!J_dqW3mP4Uz4f! +o**hBbx$5Sq?i5+e!@ZC!bM$b`gDDrO=hdBu +g!}6V|7iBBZT=td8{jtX86$2bcZ={g6}tdtr$}dQYQ{qc~yywVQ@6X-DVzZn_cT%R9%+tMUe*NYig4h +Q|r7=6I*V8ak(EaIPIhTI8`R4SLF1TxIs>|h63frd#D=4HI5BqB{ +(gDgD?*StVX1vBkC=_c!{-zYZ8RxdYaLNGJlXOQ65H|&H6I!YD$4`mOQH*sTQS#-dJyY$&3pp++JKpF +2OL^2ZDoK?`KK605-0_Rb3lj?>|bfA?Y&Nmx~i`Xu&$yv}NQNIbEfD?hW%v%iiZ~)#qQ1O1;gG4A;k| +EyuLJN-}+ihVUT`4~EU*`uJJPSL4gX`9Pxnt5RIMYds#?3X?5yrdKZ?T%JNK+Ny-g^7@!+UC057;T~P +Kk=%Cgsf3=Gbd^^d)%jPH~`=$BefjiFurS#Y(3B_N!Mi)Fh||Jh3%9SS@LSa +5={*`-7D+=0Z>Z=1QY-O00;mZO7>Q?wHpV)1pol)4*&oj0001RX>c!JX>N37a&BR4FK%UYcW-iQFLiW +jY;!Jfd8JovZ`(Ey{_bCKa6cplY{N~OCNY`;U6L)ufNW?|3|LVZlytH=OQJ?naor;SeRrhZ?KErW4~| +0W?w)&p9BY+=-R3e|f=qLzEnLaaj^S6#Pz$T{U=Z^=xy+}F2h1X+ryBn$&{#7~RfLJD1TTNTy}!P_Ut +HdNxczN$_wKLjF}z2UtIS|qi_9$QOe9nM9Aa)I-B&7Z(QcGpU#cv_kZ-QFW(Y6feuWULBvBiN8mctK% +o-3`1UL8hf4~YwgxbIcHGj5(HH`%%Xpb7c`OYC8pYZ$SOE`0OC;qghK<-7QW0TJ8DSMFgO%EVr6?KH52%5JLzTsHS!j +$1Hca47rPVbBN+tJFsN0f+d5*-;fe;5qGi)iU(|@M4~b*Z|#a60Mne%QZ6k;rH +<0~AtALCfoR)~5!rA^o&tm^A~Am2TA@z3uZ +_$DU0_*or4<36P{Xi@q7E#*=ZC8upZZgm>f>hpR)j$rZ=+*0w*2UHKRvNm5J55 +GlfmSki&bm!!fG%_OBV}GRgN>?k%@szC&k&yHrdo5F7b@vV$sjuVyoNQ(pNpSh%FTfs@& ++(Xo^L!w$T7__eR$5iS!+6$joM`05Qmy3iInaVI1~kB`Gz*=SHYK+sN}g!cOXLc8-D=|`Xuu&%{cpt+ +%*U<`*{p>e%eFZf|pwPbm?2v=*(_z{Fo$`MHcR7YqB!0G)@l0fJ-1G|{Ivva*D&MABY$!#Ph_su&#?f)-y +rw+92`H~_S6%!fsP?;TXM=vk_T)E$;#Q(_@F#c!a1&G_?rN%is4%4!ohu`D;EThuBH3+|uf%(WZj)p4|7+!k#$n(!_65#2xwnKpB4+@vgD`Y-A7GJ)#Z4Wi}W^Px8wh*^Txm>FwWX~IVn|a~64 +{RUzKPWgw^6qpwi4+VqML$#GkU)8))KP`3ft!jG=PbSMM8U)I!PoYLj`}+CTc)Xvr=yEb%FEUSzGdid +MI<|4x1@F>PXPY;JCT#t}gz#N%N#P{{m1;0|XQR000O88%p+8Ew6iDx&iOVaA|NaUukZ1 +WpZv|Y%gwQba!uZYcF_hY;tg8E^v8$mEUjEFc8Pz^H*4wCZ$ql-Yb=tX=4bODv<{eLY|vkTZ3Z<+u2$ +n{&&vK@21d)EjizPeg1r}E~WHTDO(px?pSVxcD1pd$f{OQf%d{nqZ3MLl3*+>DRVeTedu2st3qiltb; +z*Dl*hWg=B9{Mf|zOBGM(~U$^)C?(zQa!~3Uq8Oa;mEol>LSu-xR2djl*a4MhHd66X$L7uNT{d{3Hd66Qd +j+CCGklFn}+^iQ(;zJ30!jYZyoTo7oJ`U7ABT|=e;y4{^;=IjB=fZlzKO+rdErka!D_E1AF$zIE2!*2 +*lvd>SEqO8;)}we+BbgN&ww+3>hO~Bf97^v{aXO<6U*bbLbhw(e&bTX%2n6dUDy$7&W_F{s>5n&ZvGA +(_=P$4fZ%3SmHKD^?(8-d}r2^Y>8={>a-mH-Np(ly)1-U(x#7jz9kShxN+q5C>0R +A0DbtaxN&reak>|45%I#5u*_UIc?ERx7f?$B1QY-O00;mZO7>O&00002000000000a0001RX>c!JX>N +37a&BR4FK=*Va$$67Z*FrhUtei%X>?y-E^v7R08mQ<1QY-O00;mZO7>P*l&{*B2LJ$;8UO$x0001RX> +c!JX>N37a&BR4FK=*Va$$67Z*FrhV`yb#Yc6nktytTR+cpq=_g4^J1d#wmXcM3hF5H)F0|d>b$Od`YE +(BVlZPmJ!q;}&j_TM|hn@CCaZju^EuqHWYhBId_^m4iULaVZ%vTPgDZb`v9))cJCOD6DpNAm;SF|k-K +my1PNcP*EsYInP`*)6ubt%*E#@Ha7AuW41$O~n#&S>`e!?@9sxf1ouhKKC82f9y~ftrBv@U`wqoG*nl +1X?6?xTb^pgfShvnPhWCYvqlQ5=F_ltPE>R(FSjM*PVgPu(foL2*Ut&}vaFokhnCmisDD|3cS%T2o3< +%)wUjlmbPnkj3_$+|Hhj^G{?*GyG7hdu3-B5*H$8gFnNZUM%lw^e+j5W*5`3QVS&ulxnn?;#rWS0+WL +B^(?JJqF=Cg!i@kr8fY1>76d*fP^;PN;Q>EAR*Kx$-X(58(FM8MgN=LI6k=iY0nx!g +Oo;0-k3x%X7Vk?TFrC&qF(&5&ZLiyATLJ1hftj0(P^?b0@06M9rsmvDp!iWK?<`90S$_D$1n|z8?CDR +)2ztR977-q1=gwFn6a-%q4u6st3k>q;{y#PewhNitj}s0$*hTBuA}+f>K3(v +lEyD7$qO8=m6_a|5)ZVCt%wE@k$JUYjto)G9>JyUpVEtL|u +2UqzSG$oA9GpGm8#4=HSO$ZxZe+c_$-fItYCVSrao=My}OJd};&*5mytQGOOEB0+)6cpxiK8a5PWDGwor-SrO-frTqETc_-#}YBy%Z94lILkVc}MX-)=+Qc&3Aj%Zh}Q((I891Ek#8LZC +)UBB7~4a{1TN%!K-BJcPN21~b~E)-tUXW0O^?n`VYMA_Ku}==+i9t5{3buEd(0%!4uti!R};u#TzyRw +%+^B6{GM0yWTnAm9bGxh>R&P20;;b5)pc#xhJKU=u{tm0Yk3qd$Gss)P#2Wn_PwzVBv42;Gqc83AILMR$)a+dQOjfx7Lm4 +!EUoasDc3eY6jD6@yW{tDJ?w(RH2JqDQb>CQ$_>e)_x0Aiy7m;l3qM1FXbM%ca=Hblb3i=z!SOu +E~`Wjc}_r`?|1*;+jmc|y0);K^SK$C!W+yAs|yK`(R#S`+X}Qkd8EI}vo{J?97q0FsScSwb?b43E!k2 +HRYIc%tI6GCtd|;XGhwqOAK0Va=*~O=RuX^Jqe*o#KLggb_-$8C%BTp1G`!FRa(3Q~#?m9dhh$56!@i +i*WB+o90%*B6Cf*7EKMGv&e8Ka-8)2H#CR&v0+XotpDqZ)|-N=a=HqNsx#Cf#adr%ID6X1>ey4Lo17Y +K^n`*9E_`A5V|v~75?U*=%vLk+=Lu +j;g|F1h6!k37sTRSY)YrJ*w1nZ9DTbLJr8CoIJtw*_(2YQYrFsE?9Z&w1AhO6q^)nCwB)+FJJ0SL&)?E@{CRS;DYvK24a&Y_l +*2#;9q!Cjhi&upME3-XW<8|h8BC-TnGJ;U5JY=|Vxs=zX5&rVIUrT8Q?H0RJ@yf +n7NJgkO9H(gz@AhZhiubCQbpLh_+czH1$U1_kAXE4fWyZ(tOhO2)~ +InO#M5Rf|sADqGTbFqRbXT*RMQ8U|(9FYu$woH7~b9vn||sX7Gh*-rhi-V17hnqP+4|>~KmGD*y14_=L19=gU%pCTEyMjdNM7K>^c^P7rz~xbk;g+!gk<~!d9 +2WJ3*iB782D$V`E#tq&sk<_H~%0zHOLG@y)oC_fZW6C!wg%Mf2ZNlS9pxI+Jygyxf<(-*{flm!r(bKi +EnbySwiFI9H|xV;y+MJ0|XQR000O88%p+8<~*X@H3|R#SSA1fA^-pYaA|NaUukZ1WpZv|Y%gzcWpZJ3 +X>V?GFJ^LOWqM^UaCyxd+iu%9_T68>QBXv}%EHWGunz`2SZt?V6zF7%w$pjY1_CY7iKeosCFvw?kbmE +Ec#%k1lHKhtutJgA)ZyW|-{@-Ba-m4U>zWm+;*DHXW~t`4w^eiNzH+(P3ce%i(84OQt1o$@Xw@*0l3% +JqrQ{zBzEH(s;f;b%o0_HMZ6)E=72UD&k0W_mlbRA67L$B2baB$AW2!x|9A+%a?q&qfH5nG*Z#JrU>v#2rkA@?3kh@R +kTMS*)PDis5X1tn37wja@O2ef;T(Xs4*mh{VOYg3MKXh$gra(UJ!UHLq`r!Id0BbSt%{y@z*sdCS4>` +2WhyfW}Pm7`}NmXS0D5Duf}$0J!EAiz)bSJsPj8kx2z}8dqZX3vi7H+zR&HVU|S|wQ?SJXt%?bJtDWPC)%+`E&1;_$D<6>%TWE-*mKv1G(tR2J*GwUs&_LmlmRZdS2*VB`_o5`@pbc&2*AgU^%(P% +*wpsf#-SHJX$5eI3iObIeB2DVSp9r`OH +{Llz@Ot_?A2%;dfNR(QuMI+^3Ek-T;AN4?zh0qqktxNLr;XC6TyWFLiq@uH1@fg*nB!fMhC8yi~^~)sD%`w@Mv_1GfvpgqNE$h+`BGl +>iynfwiLHJ6t(>Aksf-Pxw4k0Ki+;#KE3~fPUOcxWk=+NyVa`fU>q6kG{aC;Yy>)Fd(#^dWMfZ&cVU| +R49qG$I(S}&b|(Q5*DBNxA;U$5v6B9Ej@{!wPAZ6+HKX8OW^6VJPW=$4=W&Lk4s3|=Yh9^od*cziv)* +U-f?-J6aY0Zn3T(sflZcBDjdFo+a-GhkN#S2Abl*ij6wiC#nrT-jlW68eF7itJ!@l~;*|M=6KQz#m#R +4mbF(xLfHPW_F(R8j4WC2BAVDD9Bv_Z@ru=^BdC`(Z$(T!`;%pj?W<)lHa-M%r(APh%Zcc_82DQ}r7; +6Ze0r{jqBCE}ZXgw355KC?flF6!mJ}Q~weh$JFm23B58ho74d6~?yfbZtX@0*QG?l*B1r5a)4r1)QCZ +OTwx8bY!&tj1<|)g4M!VH+|_So8tvRox(ad9teobhmZ=Lf$|PR8R;&B$~&jD;}h?sG*23B@|Rd54w)C +QeTl5r@~~A@L1n^L<*$OO~2L2W ++&%?qU%d+7tA`Xn{WLO`=Y9oEvj?FTEK^R0t=%_nW={l|ygKlL@S-W-u7j$QRiEeW;)bg|zjFyLamoy +I5m9y7@G;Zd(z{gsSRpaTB44>%zsCN~n5tiT?spe_kfI4hucErdFfkF#}c}7iv&sTVwd%i6ax+jqK=` +HhR!0BX&&y=b&Y`a~hwk@Z?{ +GsO$<)C{cAP7SJ-WfawULLjNIs)JzWUB{oYNTmT`c@R%J8eF;BlfyHiPM~&j0aU0hB7FH3?lL~`QccR +=eTv?#xLX~+vwaAjyP@h;7?KZC$E}Vasj16j3WUoRZJ%u&(KDQ&RG{B9GdhGB}%wIPo-qBM}cnHl9rMY%pMR+@C +zJG4s+KDBn?q<6J$Mwexg6C0iZMxAmM6zOK5_pxXes62H26yqk4ZQlH)wEcT&OH>gki$sLZl$ToV?hy +;Dm6g{ukPyW{DXfY3c1a2AC158_W=el&6;V~FW8ycg?ocxmS+P!j9LQREHD#0j_cC+b38wJ*Xc8WDOl +(Y`&&gkfb^!z6d*Kum9JTXrK=dGy`CW8<@!t*y&f3p?ie4CBbg*;Po*ke4J +^nueZ?!ahP0uG4sNP6X()cVF_N7_{mcHoJ2Q1ZJnlUwX6;;i#-h_nY->Wph_HulX?$YYgmdR_ZaCkoR +YxLjaiJ0ILbEU}=`n75zrlend;EXl`FNsyLI@t$Mj*D^p;)~!yuCOs?YEJ>N +OFSH>GvK50zKuM)kyt-L|i_x$80a^=gR@Wl&t#;;?*!HjMh34WM|CCB~UGPI2>@k#J7EfkbEx+JvV?d +C}b^)VW}`j>fCMu41nswGCCD6@bO(Lte8${d{oJvVs8T399@_jD=a1@6&a|9J7_y05@3x5y+7V>1>+L +{!N@8P5{gkh_CT!m=%9@_$wZKkHT{b*x?bu@OQ<)&S}rTa#i37>phHd0AnVL~&%EdK#9;yY-FM_nT3m +GKuhEaH3tOXM=>ti*QhDAJC0V`giO^#)EbCIA>cL8QGkwg#s{1L3e=)6T#DXeJ#CU#YlZO6lx%0Yp$HL$=%YeRwGarp!|E2}IW9p8Vohz#r +oI5TA!64p3Z-^hSx+@&d+YKSpjTkOun(Sk?NV^`hqZ2kX-&@*e4fJC>#lB(@7 +wb0++4q}%)Y_*dNZd6`0!O>c6qnH?w`D`0LhhY^TOkkXKh=z{P>mb@=f05FRC?+3loGj@|)MXJJjvXS +2e&`N4Tx;#1e~&*>jVs1|wy6(QS?8O6nLy# +E@=&*P)qg()rp$jiE`yPovA%xy?#hxB4F-~m>Hp_9m;59P&8RW7R|T(B-N>IV9W +EaSs^F!pUbWV^7qU@O!khSrJ%s>#Q*JXjkev}(|eZ$h)4M4wJu>+(GK+9qg>FNq9ai?c!{V&ZWzH5k9 +H0FL%R{areAXf8`ywI)vZuTU6xYvTtrt5mPswZkENJv&3p0M{(6PRc+8yrgH-C8>kua&~rhw$WR) +MCs75R^0^Bf$;zC`7|l%V02D@5bw|)+;IdDqc5ZV8`*dM9EJKGcGjF;J_YD`lE@43R9Cj5?oHhiIyy%w8s +nO+ju23$>n!zkUn_!7n-K8_iet>sBMhL(UcF(XkpaB1JFN$pd2^SdlbiIRSi{7Hlu0TZ@dOStYvmgHQ +qw5W-^&YVNpF(}4(RJ4~ggXM@(Vx*HD)!P-w&5F;brEV{FkD_QTeb08u#E2;5h8nnIQh8{qfDdM#kK(Tj=#v-hf!5twmRP&U30Kq;a#i0VD{ +v<%l_?BDD2o1s2d=o7XqVwm%N+(Zp6Fu0McBL2Yf5zj6m#%Bpob+j5Ky_BmQvf3EAmfmAZRqdTe$z;I +`W}h}Pk_^BAn{C+oB1rF}B3DZ(m-lz)0HQgz`!lj2}sVp!GjNt131ib$4OveY=*0fK>yo!%F)DWR_&{zP# +6-Jh3H_?IQDIRQ-e|$Q>FfG*JEsE#=57!`t%fkV~dx;8pwJ!rJsn@)LA5Bvc6{cwn*YnvCq3dDXWHNJL?CwvZ!K^-v4)@ldNPIpd1WnbaQ= +maUUNpNbOjcgQ1fE%s8`|4YqI5>5O>tfxjn*#;l>K1U3w*|;Pnv{y-B{SLtIfm~LO@d)z;tH7Bx}Rtj +qVZ=epVGbuXAPeB%$l-q=N^5MR${+=VqlV!zO9PJsU1y!0J9pXOA>Hqr45PY)a15KDUjI0W{XfEjZ4u +*AWT6dfaTT?5(&f}a%kY$PwDoU{n#PXPucJF-vpEDU%BZq>WSZS4kV%JfIrEJA)RIkktuzvD_|J-qpr +%Mzi$Mbz%V3~k1#sJ1Lo#|NZ5ke-2zX6f21)qur9DYM-~+YaU4N>8q%8q&umFJ@`MN`E-rFI}#w)GxFhF}Ek8`93mq`lo?H@2ICXzJ*!O#mbTz-;cz{}3X +Kn&z0GmA61_4I6Mecu&N8f{faQpiJq7=)$ph)s+ +Ar$X%b&e?>^k_P;LN7!|TQ%+_WDjl8fhq;{O$I8v*reorTlAcSyaBZ@<|x{U8(r#-l8rmC3-eBc(#p5 +s;V2hR&%)UT>TVHiK7Zyfj94jdb6K18ixegMR*QI3`cS{+HM7$%R*+|^M4;pCi!r=72s{Li8|s4Eq0d +42ZZ_g0=-~zg*x`c&WS3&9aDTQ6_vVG9Fypxg8yJ8GzZ0<8IK%0Bx&$^l_z4NfUP^D_i31<**Y!cffw +ru+NA@g_h?WlaKcgpt?j#6CvF&#s?i1iT@YMH)z<@E!vPD_h62Xfk4^B6H;=_KWcXja#EIiVuV8eno- +Q^9{4S?4L*8C+IRN06u(C8i)>+T1NVvD!?>u>&GR(-3L{1Or1dQ0O|57U$UeNbV^irAaB*x5Y+tknclB6GxJGvj7Ss&TO{J1=#Sz{&?oQW+Yf +p&fp8{8E|5Aox`RIO{E%w@T*vGe)Fa?y>qBrNuz45r(sLt_Sw*+^yQ8;utD$4MY^W5c+z9*i7EDPpi3u;VrJ^sz +SD2B$(fI?sqn(w?>tj~THL)W6WOA>HgR&WFr?tQMd{WtvG3erC=9SV6}o=noTh7LJdW6YWBW;i;>Dhel%i;_Ms?)*>)qgXj3$*-B%tZa3o0F4=BhL3-&7&?j(&N937;_Q*9z^SSIULAA)3M%nisLA-gE9NJuMQZj1%9~Ex=~j(TGr%n1s=&c3 +Vd5ihbApq>9j@Boa?xnuiUPae#3gyjFpYyR$=r{Bm>lAptZgabN3E7)E~pf-3*Ye2uxq{BL)X*fj0tQ`JM784F5E)(RTb&l( +9(A0o6kZcU%?+<_d?V;N2iA?z<0i)Fy7CU_qmN%-J{`%W2Q}1xm@E=C7Lp@NL3&f_+*nr-J0YMBRzyLwY?848nvz`;Q6zxoYX%IQvs +fm@z&_DEY}Bu#u->`)lJc^uD|WmF#K)Q{V&}23`p|F=$yiVB*54T`4SY`gi1MXIa|+P|K5q$o0I-e)5 +`&x!+EamvM#dGf9#uc@>Q?tk`c^BA42a2P%p>Bq!j13m#OZU?G#~O^gXPLH3x+r>e-$NxIidG&UwTm5 +obs$RjD##vcV~e?NFM*tS-nh%WumN*scZM9TiQ&g6o!Oid+j;mFcE{9^wxChGjt|Ei*g+@aOOsPNy-R +~7>s~%8{JU{#C_kJ1zm^232;P)vA=xH1>|5RU4T)%-It7bYu_6}T%U0Zy7uJ70tcW2@HxAqdFLb{Xla +UDH|!*BM$LdAUq%QWq2+X7tlvi`Gqe*`+|z(Iwr<KJ{!7nDWlG6Y-wHh55==)HL@6n@2$nZ3K656mRuJ4DP%LbOEo +XS@NbBf5g2J9s5o~ZPkW%4$nFFC4_PZ1%jdZvmuV1R?-{m$c?AqxJamrDxKig(_{Y;+za09bDV>RS2_;`tVT3OpXmZ^k +qg%VCxw78lCV`xZOB%P{Nm~3nZg1);#oAHq|U%2Unu}Y8Deb}RU$}Lu>XtvjNMLbMA`6todqPTN^8Jv%0}tsb;o2t^D}N2MwHgQuuF&0P}ZJVW_BZhQ42S2D +VEGISb6?QJ94Q7P`G-dWiAQxqs`&Y|)gN=F`9ywH?*#51*0m=0ev)09Owot(q=c^n+%85DupdZpn>)G +2Q`Jf#@ci3|(E#?QH)V3ElK)6-o#fRq3*Bq{+8W>@g2)oHqN6(qX4y2F@y+ZB0mw9breWNBLl_5aY(T +)NZpKMYOM9|Hix>JiF2AAK2E;!~qc;M$@RGSsk#drFTf!oU~FSk_)}LM8{?{9@)&n#pp96cwvFj +u2g$nH(2~%W3@o;$09K2A^|l-2*@y<@6<#cv}(q|~F2`Qkf4VH@ +NA%5^#Umb5JG0W5wquw0fyr5*szHMQ9fwYxecYEt^aR$`S-Wc*7nc3?s0`Z(_iPOeJU#W +NacY`HkfX(`blVTj;>>UP`M_{b3g9h82($8G8g>Zke2f_Q)lG80q%*6Z@0fRkJtXZDFpnMR=u-E0DNZ +wK^4#o2M-Z2hCq~K@14+~{)~?2pFPBdh>1!yh6NaL<(ZR7XzTpwZzey=hNJ=ig3^PP# +@Z>=im)0^i<8{$EpHA)H(X?;{X~k{ukjh}a~sw9T{`jO<+UrD86hcW93v~l3&@@58Ja{NS%6|qoZiAo +U!Z`W;1Y#EWz*4`nyh3kKD;BWZ{mz;Eyhlw(9d0^fS)@Ge9jdgQYp4#Qs+2=?-7G%a6(aY0&cKoO#LW +jRD5tFW^atjv`4|w8foifSUCh}pJXPSz#*Lr66_SkSY$u)0dVt};ZE^lwB9>k~-GX_8R +QeVSjuykQ)I4!a}`YGidTFoR{iFRzySj%MqD-QG~6F-!4OiB$uF~L#yQ~1OWh4Ij=B~wJiwyhyX&u^s +HSQM{E%4<2uj9i9i(|5%$eDjsUYqPKthV{2-#MpmKMHv1#<;%-YsNlN-5BJr^i8^*(w$yu{cL=#q+azJD#magkw}&XcpUI2#`YzGIo&XV)$C>-rk`BeO#rCkH)OjU)$YFb|#Rn +uW>+Q(kSxO5e4yBx{C!OqON|^y7v9@l2LcBMC5hS5Gcf(5JSOo*dJ&A!OM_K?d?K|iwYNVMVjqMwb=uc!03+RX;(EBu)B&TmMygJ2Q~i#AVW{(Ts@Ohgh +oXC6()4sdVh5-`JR16tfQtz$BN&Yb@n8pZ`eg)B=0gUoSN+sIs5UP7s(r^<<_ilcg~`@y~=O2qU3_LY +jSt3Ua0EjNVH%!w>J595C=R*_ME$%5CNsQr&G$Jr0I-|rW_XKz`wC4Yv5%BkqhB$LD|j-?73hVL#3i>o>_=HWra?(cj%Y?Yn07irnm4f7ld +P+H}+5B+2@1>SkY3PS8$_qz)Dgg2}KUYJlKd6(z{&p*13zETTT1$i(6ywtI(wwHfw5fglV9Ou!oa>~D +Wx{E82LoA@FDPhp`f1?HuLl{PyD8rDCQK{bl)t?q9NB>^ba6v>*(V(U>Mu188`%Ok}1vy{l?T^Ejf&= +WPi!QD8C_uJ@Zp{Qejh4!#Te4R?Fn!zi<>fKj7EtGOyKlTy}k%1@UVqS3s!x@I?jIR+4u35RGZ*Ysmw|L1^^71|>08ZICzyA=;{qpox@}7sWq`dp*I@AP=5Gh +-ignw|KDn?h)50KxUvI&sW7tY%IH9Sl8j&%Ng4ulb=KFaMt=RJiH^dMx(D_7{x@rP>6_2BOIv?s>Fyf +8&8Sv`eHwiI;=;$s9B-{|c*E~mT|VUUqqA^dCShJE(U1_Z6+#mLl2-m>vgR<`$#zR1WH}h6b +3ao3%T$JP^e!{NwMm%*5r7eMz_l7T3GfpwkOC~!q1Y}($_=kS^ExGs2`ft+hkmj!q2@P5v*g5$cQfNh +i5XGk7yZ_`hrDt3moHbxIc+HaJ_GiSygSbEDVE?^`@6aWAK2ml*O_ErD@0006200000001fg003}la4%nJZggdGZeeUMZ +*XODVRUJ4ZgVeUb!lv5FJE72ZfSI1UoLQY0{~D<0|XQR000O88%p+8rH;n*fdv2nB@h4rF8}}laA|Na +UukZ1WpZv|Y%gzcWpZJ3X>V?GFJg6RY-BHDb!lv5UvzR|V{2t{E^v9BS6gorITU`+udt>226y$4rAa3M1{WAB6#hC6L$?BCK!t#~nGw*oiBH)Ed^&L)Aslx +31i*k(YLY;`UQn-o$#G$OC@eT>wIFZ724>v&N#62p55LM%LfogZ0~Y&$QBp5k51f#8Y5qQq}rxB7wV&r@{t#LkC*lhhyaEk*gy%JLct(iyqY4II14m!ir-SZmmG2NbAc^Wg;Hxo`p;X2ISOsU+#}Y9DqFJ| +9L%uH(ge;$Ww=I1i#{g!iiqf?OnKxMoP$NJuDz9>^S;(^)Y?@>FW9-y}Nn!=KbYO6iVPpx=pWrCYL+u +ZXKjUF}EypyACGA!b?X4NZ}B_S}z!&7>%gu2l1fq#1B&C=aF~uMeSlVohBuBn}N7vIX`GMN6wv-HPh) +pVrY0tE|%3gj;@q+k{9x;$eFi>d26WKbH|=pKzYHXi6TjBwCUsE2A~;9?F=JnaXc?5DXnN4;4v<5i~!05OkijLq*GnP5=;+a3buQLhd +DYwq3M#?enC^4V8T^K`Ky8Uod<4!!mQsxQyTK~c2+NFN-th>D=vL^#o_(^kO8&YqaTtUh+g&u_xFdI@ +KgY^LU2`;aUV#E+&-mBsMCNJg}Gl5ycpy|Cx!MKfOp<3I*HxzmoNW}i6^N{ky5(q%B3sA*A#eEQ-=K` +MzL+gE|-{9{Vb@_McW_)lFwSIxQoK9*4Hza=XsN@;>&zwGZ}F`ilSF}j&VIg&YV6d#02DB`PIFd79;Y +pH;>+71E9gPl7%2CPX?0&(*-?x%PBUDFM$MT(6~&eu^Ukx^1eEnGOB~osO@-*-9L2?l!Z-e4Rx*-vo>r=1kL@eN%migS +Lr&+~SZro2dP3Lp6$Atga*nd%pXdut*LE&LbenyK0SWk3 +(c%&c2?-c)y=R8sWzKVXY16+tgIKTU|te#)K8Bp>0Lv2t^rB*)74M42`G`&iZ$oO>apF+6?<3~UTuRY +Xl+W>B=lbqFf$p_HCJHC;Tg?O>DkcmKoO!?|q6Hh>?jvyBH{!Y^dYPk$V?PPCcboOVA~MjibRP) +h>@6aWAK2ml*O_EvuIe@k8h0052!001fg003}la4%nJZggdGZeeUMZ*XODVRUJ4ZgVeUb!lv5FKuOXV +Ps)+VJ>iag_KQi+b|4<@A?&lcbE;h26h+*1jr8Ex{t$#V%yzNEGA}aOM)b)&A%Td#c8}Yoi4UM_>y`^ +swj#d7^IRx!VWb$8OUfD%#J$P8VBA?u0v4dgQLw9cYPy@q7Z^cYg_>1h1fbXf+gFud(_If#vZIQci4r +tH|E~M@$B_PgSwX8J)io<8=vGbpgXrG9nfhxG@E#|MZI6x-sn9!qes-?B_n+7(54t?t5N`u@7f1;l`i +*t+Hh|L#L0lE5zvljAgm9#6rmCn+`GubJJY@4VlJ(MBTcC5-Z(v|1OVQOxgZK3vqW8_b3; +b!bPanYCK~aT8WzshI`;anV>A`g|~E?Tlzzi{lOaG>jqi@=^iE%ncZ+dyuf@wa7m9&zXpIXr+6~V9VQ +WcK6UD4mI~0JaDTS{Wd@^_w3S~*>_yk#1~IUt?`~VG*CF?z(P+RVz041`lx`9+_TL|AeSZsc%??m;x+ +}D(|k--dFtgRm~p6z>ZF;8Wg2JQ&}J89{!~vS;|T7sT!2X{#hTt6{HqVnz^D}Y65;CQV?GFJg6RY-BHOWprU=VRT_%Wn^h|VPb4$E^v8$ +R7-E$FbuxyR}kJ|HsE?Y^$;LCbn7064aK&*AzMtW)|LcGPMd!}N{ZZgZK{i2=(*sY!yXk +{?xj{R>b>k?r!6==+a5tk=dJAK@aq~@6a`~)GTM63jNyYn2RW{ZP)6sTka!xg+0tbnez#ZR +;qIN%*#K@RiFEMa`<$><|y5YutjAt=U&^jaIWukw+$ICDtC({aPGss`Mf035R)Aq<16S&K{! +4`$|yc>~VlgBB*g{{pCo@(e4;hPKe*B}UsJlOyxnN@K0DI7Ktm@<9h1R%$-Q)!~Kv1L;B7bjc;MSgI}6x@B_b8$svU!2G$*L*wI_yJ~Yx9h_Pt6ea>J@-)%DYI3S +K6a*k{2K6s?&)9Ss4fgi}WJ_G8h&LuGrxi#83*~wA*WAe}s(n07x(nl8%@QJ#(SpZV85)*G!ZMjIt?A +%4yL-Mna&zUTjo^QOK#;RCUtL2oKO{|@@Pn$d_(^hR(M!h|*AHoXrNUEJaSeuh>6OO9KQH0000802@m7 +Rx-2@E;0iE0G0><05Jdn0B~t=FJEbHbY*gGVQepNaAk5~bZKvHb1!0bX>4RKZDn*}WMOn+Uu}$c5El5-Q8T$Oy}eI&6_c$^aD +69xZ|V;1%^AR6&Z9dTheJm2IzTvBv#L+1tO$%MjmGGWb1-bS`>1ev~fP9m#R;n+7?A;^hhQ-)l4b}M) +9FO%yJM6)K3!qUU|bC^-~(HMo{ijP&>%lPEsK}o8t5Yibdn482H`76kw)^vLN`pQ`Yfe2*-S5j5fP7^ +OyNB8Qp@l8;8Ww+f338mjj4hQC%vJ3=Oun;O5C`Jy;eFo}i5U9@yqj=z9$9G~=eXkEF}MQlb_yv(@}? +sDp80LYfr#6~A9g1Fh5MSQNf_U`Ug?Soh%G2V2RQ;v+Ct6-6PSBdn83WW=RNN%jP#QIYGH{=E;Jp*(K +TnQ%pF47m0I1kvUgA{DvKU$?c_lMn2WSzpy6qHkT$Vds6dtp`*fMyz!!BGID;CG0Qh?n;_V(dv5<(xH +RWZq2%&^N-=u!nr2Oj(*e|%q!qd&v+n&@+aH=(khq10%a~!k(cDf)3vfgpzMP-K-?WtrWHH^;V-R`nv +uH=8#!A9Wl&`%FgI~`SO3sbt=_wplW)(zrw+$G0Bnq!Um)hu-JcEZG9p3HY&tSuDaaw(!|w3v+Zd(QN +CJpGIp7E^Hi7j}tkLUCOL6poBwS>+sMV!zJsqzP+=9R?9mmt5SBo@CJK~}WDLw}(EV2vvjABieS*cUO +@Loov7I2vRwO91>tkkwRDZ{C+u1rs>`*qg +*j`V{*pu&!9H=P*#^WWku&m0m_yNmy{zeKaj`T-w3Vh02hU|Vd`(cR(2n&pxxqn +Cr@Nt*aRcdAY-%We1AE#~O~|KpsCWidT;!Ie?LEUL4K?}~)WCgfat`7BOY7_bYQtKKf8|E`g(f~w*lP +)h>@6aWAK2ml*O_Er%7#bxRN001)t001Wd003}la4%nJZggdGZeeUMZ*XODVRUJ4ZgVeUb!lv5FL!8V +Wo#~Rd3{n%Yuqppz3W#DhJX!QK@X*{kRH;cv{0ImYbio&kG)D{NtMRkE~Wpy`q*{6m_`?AM)PLgycsn +*>j|_S24x1OifIGuy&Zv$E_&hQy?3Onk(!lHryLi*&~(;wcXZx9VM}6f?Q=LIZYHI>8`(a`sL%1^qtq +Jx60Z!Qk2(27N(UaMT};$sm{a)481&+xXfHx!lZ-L`%K;fnw9&)$nLi(pzGh!7gpnf((J;n3>|vLvh{ +GOH3y1FA+xIntcv93B0{KlgcZKZsKh#Umh$K5nQgq74o~=JFG7yfgJJ}7q`7q1&J-U8ub4*oLIX_r~@ +Wlj*$sh6O6w{4p2&Rx^r#-^M-2+6fg+83L;Vfr)M4G$_;3VyVEac`u^$?~uh4qux$BZNn_c5D4sDK;r +C)~GKouB#STZVv9^L2lq0vJmfQo4*i=SMna47|hLG}P5C22w_*rePt_Rj4*aoQBQFGTk`9SH@h=(dqO +>xuj3&x3?btq$&P|dR{|LcGOKZBb)h^Dx9ahST|7!)a7cn7d6Y)q#BXw8M|1hONR3jY*y~Iu-9wXC9E +*V-z_?VyL?4qSK6cZ*4Nc1sWf(FTP`Ka@~0MFq$qpvGSic715X>ppR6)4RKcW7m0Y+q$$X>?&?Y +-KKRd3{t(Z=5g?z57>;kRpM!wR*~7rSy=Gs+HO%%C)L20|PiQHnmN*tE&IK{$L3kXng^XJ#Xf{c_8{h +8wXOitxzoyX{#-1jPAi52OotgdOtX!6_+F(4q~9}1y!vL9gc(1PgptDY5i_;D$? +yW(VQ@tFShdT!0DX(D(v$bV6Ec$I_!#XlFb#Dusp}gzK2~t%XhR&v}c41P*B6>O_XN=)S~Q&Y0poGFA +{mYHXkkeX&D_C>hqm@ls+=z^`y`57wDo+?J{L{lA#ROFcSXbH`aNjF&*0;{w59Kh)E{fOj8e6~Ft?kA +1w8Wm&`rtq?vd>pb`){v17AkIKOqgaX$gJls9N<>mWC048-Pi71cgMx!hY+;!kIM0y|^uwxajHKUZrn +CU3odo*(p7I$E#yl=ibJ@pfx+*EHrpj}3NmM= +`T7z5XVCe4JFF%t7xXy|5*lsbcNpX!$Y*0xr+Zmsb8<1ig(9axFFeOm4|#eN`?IoVd}pBOr%>24H~}W +Boty0KiZbjrkJj2>twZb7;w*x{DjeMMtMj7;Tr-#^hsJ2L-D-BjRhWD-;4Jxtg%T@1T8uVLr`a~bqzu +#=lZ!h3lF~d%_4RKcW7m0Y+r0;XJKP`E^v9ZR!xuFHW0n*SFkDyNQhQwuLhJun%!O&4bY;e%|ei+k<5l71(I6F +Soptp_#uh3oo-N|K3EoKIGlO&9>;W?gw%wIVW50SsCyNvoEm*ddMQStPZOGmH$76N3-W2wRPcr+&Sj! +gSyQe-;#%1IDwy#n +DlEH@%)*miQPhb!iB!9!J5{H?<%rJT#Ec%aB!Id!x#=`1MYp_L#ycvJi-gGEGO^R+CJWY`0Kati2XDx +Aa{2!Aol`)X+hLM9G{ot6l^hWre6Z@)RF2#mSUax=6&5Qd%gkVGO7MDtIzy3-g@>3N~E*ocB3Z}qEIs +!n)c+_?+I>{=;&m4cg-UW<5YK4dIl|y!Rvpd;UaSDyb|&cpZ5WX$;`lPe%oA;p7zD0BxEB`#K{=5!{! +QkT6y{-`O}EeP`CqB>_|CPW4V=yd{pQGU1neNkaZ4+75h;Q=$)mH7CV2W=zU7&h}R$FnY0objE50OPZ +_AvG`VyNv4`YQ#CKQf@yZU-EH2>NLU?8$8_AKw8AjY}zV${6tC|>aUnw&Gm#g9;E`D#NqmbB(L&FK4W +Q#6C&e~89xat7U+Jg|vT=hMbO6REkbLbvnRgj=9|086fRQyyb{}z;I3+sI%Pd`5|_i?BlC#UR|DC{_? +r<2uJxGrYd!iAs)CiV@hg;m70hTM_gvYJ%8w}th=brU-^^QmdSU<1vg8F0b2#V9Lv6{WuSeu$M0Cnrs +{>e6enCu(kuTR$`ASffkw3B$=FRoF`D_ZdQ#OFH(sjaVe%TT^%=X@qUDF2^WV@$2Rmp;<+5n-h*BO}Q +O}rdO2(*_l0S%)gBqsi6#l=4Q448z2L@7*d=`W)m)_Q|`;gArSp#$--bcX%z8>-6eZF*v~_je%#XAZo +eJ*Nu`D3gHO`(11*s#{fwzD|Et8eK-{0=$`%gpDxFUHfVnJ%lAw9%jlIXN+Jtaz_tjVa89Qu>hOaG5n +%vt5LJ^ngH@6aWAK2ml*O_EzED6nixQ001)p001l +i003}la4%nJZggdGZeeUMZ*XODVRUJ4ZgVebZgX^DY-}%IUukY>bYEXCaCuWwQgX{LQpn9uDa}bORwz +o%Ni0cCQ7Fk*$jmD)NzBQ~%u81&NKDR7OiwM=<5E&m;sO9rO9KQH0000802@m7R$jL1pbG*300IR705 +bpp0B~t=FJEbHbY*gGVQepNaAk5~bZKvHb1!Lbb97;BY%gVGX>?&?Y-L|;WoKbyc`k5yeNUx8gD=gl&1TlY +b?;WV?g^r-`lYzq?-zY#307N&|8{8Ne@7}LB5#*Ty#0nMTUTVy#98m>=y8d`-Xt{5g#UAc1l+j;wEf* +1>p{8z5TU!*$h;cjAZz`|{rlRh9$Ve?ZWep*QbaGEA+m>HSjzPpG^hru==E=yNM%Ns4{MYZAeD+uOru +r)9tEz}; +_cTsI6;d~N$bJr5M`vA|pEhXegUj;7PY^CvMvnx6KeEsyGyk_&73O|-_o=UN2vTwP^m^j-KPU8|7Yse2 +REySCcHkK{j4O9KQH0000802@m7Rt==d!N4E@08?uK04V?f0B~t=FJEbHbY*gGVQepNaAk5~bZKvHb1 +!Lbb97;BY%h0cWo2wGaCyx=YjfL1lHc_!Ch*-EOen~9vfdOo%AR8@S=Gjmv*b;76&C`MLlF@OAOI+ta +XG*J`ZX^AlF(+2*G!@)I@{KD)pTlAH7YOLE-Q+>yi{4aRP$|KEb<)+)MLYh6TY +o|jp(Q+2ku$}Tl_+hkpG5Jl0!LB1hSi>fH}qRXqYb&oc6UT6UBo|K(_(-ryLK54JbSN`L@E2_&&z{LE +l+N939wfWg-^Q~QPyWEb^?koZ~SyKXl_F&ai8#T{b{qPGJMs1s-%~m>{Km0<@=iBhRTZsyyI5y=IaRB?fH$kGX!TS9Nr6N)_vm&t5>%SU2MRu?&!_bF{O>PL(#L-}dHmPYXW +#LoN9B&cd_z<>Egu6Nvw5NU*OPqF@we}Eshg~;8vc0-NVsP7%f97^SeRIC6rP1>m*_IXj>rDT;1 +&V0%OM0AdYXkIE3C;QIB14E1p673Q4Meb1`$t}X3qPm$<+NN?2i&U8>mRgRnM0mqfo=Xrhtrcgl+ +CF$gT=^ +l6MXNVm1|24&qk;SvEs(Z>C7i<2FvC7MeR=ZuJbm@@`}FC_`J*R~&L5@E9z8vof&gOMS50vXymqnH8! +HcIAm&d`(!ZYk{cH*>##du~i9gB8&dGGomcL{R;BMM&>U2ed)#$o_XZxXS@^;}^U3c3?FVnVLR@-i>b +lJjhAWt(@>#SW1&^l|nyxYST2D>d$oCa%pv27YeaJ$LcE9OGprZ;)F&dZc(TQ#z_TawtW$rc@;gd?Y2 +l`i!v+ZG@sK)Li*4r?G_0`{G|(o^!*`=>Wo$_b45l!RyHVXGTMe=&Tn_)m5P34|tw +M*0v;dv3W7!t~9e|k-}7~W|^Y8o~h_1{dxgEBHL=I+fLqtblS(DOUp~WoT+(bM&s5i)n?Z^ZgsJmsKc ++-v#Qi?O#IY<)WN|4ZW_8#$7c6PU?F@@&*GF;c$!WQ4jf9*Qj}rrBZ?}#nHi0Pw#0>MQl&9?gn@Q#*vh6NFv%r3TS0Rm?lp5 +`^gz=r>h`E-$zoUNq@ka&P`}xZ!#BALH?4nSm)=Q+#T&ryjTZ~>(dssc%8Cgl*TG^QJIwZu9jv_EoGmj?FP4Oh%V +qE17?$D(M>EU-tqq8MGi6^!dOrWef*Em6wscUhRcpe#YM3Zol3sb&DE6ctBJ%|e%hugJ@(~5}*DLw@) +;I0v0$iZu9E7%~7m3XY)zGE9_n-3qNE+iaMLIRgLqLHB8o_a6Z*pn@gM(RaS`lZPc)EzgG&qMo0U6dc$PJqlmMtbgiQV?`<9@q#d%9S)z`Ym%DQMP=<o +eNzFrJ)qfmszBd2H+iecdZZ&?%Nu)~2U*6)Z-W~BL!wh`KX;mc0J($L1M234E0@+RKG);-^ +$DLC^a9b7D;7jn<@q7yL&m(URG9$D-t-Do?pqg4*^1p^OlP#6Qi8Ep=~P(EVQv#(Q9(VwP0y2^Vxg!8 +y?!@W+jp$~BEPVR7+k@T+JGCXWB +&nuiSvoILEs(eZcp1|-~;IgFLZgGH&w~{4o4Ox*VaYe8K}1RYnjw3in4N>jcW6*WnCZcLQ@b3JAavX- +X#JCKj-TlHt`C*K@ugLjjC1(?2>E1@!GnP2pv|X0;eIrMk|Y>G}vy0T1i&Jrn8Jcf?|Dn31>2q?NYWi +90w+|Pn6$|h~&)3YrqKieri`0uqKo=)Zd#nG$7tTAqRRgq4f=L10oT)cfJO&^SVt1d~39ns3Cg1X)jZ +<5>QTGAB;^8{z<(lnz78}JX-+VF7#E6uEo#|krqu1dKY`J0;+&X9f#d%qrM;RBV=x+7>y$%pBlmz3La +~eunz74xw!Y*4Flf2em}ex0mAGp#GnU$aH%D~z-HASY<74bwv^9QM4JK{W4#9OK7clH-lFBhUP(;Jq +Ku09%B9xU61_j2pzds-}S$vO{;N~HdtU+PNWTuE7{yorBX_psE+~lBxBYO^FO%~Vh(Lc8-APRX%%5X$ +Cb^4hKdr}}oAOOV7XV1@1W(p-#f=Mg@7&p&AV9Cmefju-$loFcg&%ggX^+bXou`qA49Z54dd-_ccO9{ +js3>J`k1>`Ss*;$gv6V$z&8sKj^V$3t#jhKV5RMiHomz-QGUrUNMjeaU+`>B{VK61|D8T4@X2$S>00Lo(KmjJjFwS`N0lU*?dXwOE|oK03(x@hB(5rHDufz8#*s3&Hy9Btpv4?CQb5=RofdYBf*Y)DHk +DiE2ruF53^KQhH(IG7Q8`2MUEn3P(*IC-Hy|KmgIjo-g`@JF#QLEMUVk|^Bi2w|iRfx$Q%Pl%TLYFm= +MI;B>K3DxEW)LO#HtpsqE&a3o6O1`-!4SE7|BO@t{QVTKvJS{lT$nsZB5KGs2F@`pcF=Ou$zEdii?%E8 +)Vv94sIDi3}zZ~#^VK`B7hc^ZlEg(x#>l+#4YLjb(Nz6#+WJPK9~q-!vaYRsZefkCu4_D&9lW78g%6n +Bv$LgLut0|y1Jbm9bJMR+s+fh|alUq1ZO*BoO0SUnYc(PoL{b8L}T1_wt+o1m< +swr8y(I$Ttcbm*#d)l#=Oh-v%r-y)xjZ7Tsj_!~r2j8#h7PFhcWl0u8$U7|eMq^m^n*G!kcPa8!=enqvtIK=^hK05gRaBRl7sJ@eRcm%OGZWecmkq$<2qA2ueo +SbjnIP}C^nF$eNtOU~uudpioh@tN;$a{Uc(7HH2I*rnOV$M6wsLp7&(vSzVHiI81^ +0~nQQ%o>Q_1&r#2O$=zCLVct{e~7lTGNf$Ze}RV+^|ksu^f5TMlZXWZTxHD3#DGQ3+i3?BhJPB^-i(5 +ussC(&?+Da6!!W$J&@lEgloaR@xILx5>2l*SW`|n8J|j&TL{5dX+fYjZEL+~V|h61McJJ)38J?2_dBwS(U1VOK^bALlCd#Qy`26-dFqB{Z#oiwsg&^D*wWs3%6-BiDTFWQ2Ejbp8F6Gh{ +7IM@UobZd4kIh>1clX^!KZ8V}0Sv(@9yW;ePIz*Tcl0QYFWDn?Q +j89=a@Z@KM;85m;p0QUQUjNP(GBkr?>18(=q0suGJr+UBhnUO^XCpk?Y)YiE3F2#g+=;7GoJj}v7q!D +77uU;Hvbnp=W3=<;Khm7ONEKXYQBEBpgeUn}XzXYdl)3c`wqWf#cKaAi)tsmWlfEI@G!{J~_ +S8Usklcg8%_OhEccT2G2^Gr<(y$VUWL8EvT|YiD&t2!}^}wjt23aueW$ybhW~XW=>0b8!?aWOc&VY-y +WsZQdB4Xx&;kWWypG*<_KBNCRt*LcB{B$D2rNhJ&S +FdqE~@H^mJygvKmUC4?CjOclNYBi(&sPEPoFbtq@=6#VqNn7fa-IAl_KtT47v4y;hG-M=gG;pk6wL$?o9!AC`lp$s-Qx0v{YKp$j8~4DVhoUQ}xC +iBRBI9I*Og)ZOUj3NAL!W7R;O2*r;n9*!V$hxY)*O&$v`v5%rZy9$*@}2pi +Y@`PeCF51U*+pS^~%z0FPf+1Ouf+624 +RX%|D5F|^)i-pU=8j-Tz(6@@4D8lbNOTgk27!P9fb5_xs5&L|TM8D~fpK@Fvj(XgiT4|O9u}OlNQ0Nd +OIDuTB`;WZ-2J8gA#6n%39K{%6%6XTjPB5^hb~-HE^}|?UweFOvkTHueGe!O!JRKJ1BJ89G8Z+48ld! +oR3>w<+VUyG#08Px-u!TfY;62w0y7`0rqpo$O#Bw}gknayiO8kEPDNP*w+sc&hc!^5#?!*u%vfp;J!D +cHk7dbz}$4T(`i|b9!4H=R)bzm14Wl*r9;ZNRo4EW#0&EEq8CUj_n;pf(>p^z(HD9IMk`*juv-dI0i4 +h3+f@>^UD)wZj*9nTPB&o@)j8~P2nB@|ttvL-N5nr*qTwW$nju2&ULowA<_>mW@DOsQ}9a1pFQvBxnt +MKxzw+}m~@Aj;gZV}}V5_Ex}N*OzQ7v6(^<#iGiVIc>vnoW)9{y#+f=cNh#;ppHZS-l|^1&8EO +T~iBFLWjKEYU8UBzY9!D);lL@UKWA4-uA^S)Wa2?wm{qmQf +QmO3dMzqYK2y;S*TtUF3SqnGgahDUn<94g(JJN-Ed^DfES;Io$$lQ(I_m10|8Gl7P+K)U7rl&M>#A?J +HIhl91T|uXKa5ikRm(E_Xu1o9n@ZHv!EK7#_Hh!Yh@;WQ>r3tEfw#W=Chz`{w8=37_h_r+*Dw{_ +Cx`mAiyw=Ann~`-V=C~9;shZ29J_r-yq$-jl8vT$hp`$yX0}erB9~$?6WomSLKv3Gz*rB2%W43yf35u +vBJDcULmUJD>->W*01Ej{t%0xOIoEi*!IzNG$OIa8~VP6=ZCXR+7*9gHq@`0&o>OFCi^dhglLyxid-4 +h-xAQ_l#gUL=M!qCoKId*t-2J>-L%t$mHiT>l~BWUcOfndu?WR?&Pnp)&6D~$n8b~4WnjSfk4;!i%!0 +BX@%oBnfWm5Py#=L%!SF<%huvSa5B&IZOq?tPz8BkS@KpKKh#Ww$S3%bIbVl(PV +vbIi6q9_toVPdON^dnhYCc`t#uT(3u?UH@q0yutqk4|K739)G1aWHeP=}cb`4U^7>Zg@_EPsqyQO)5> +Yc9tn3QRra*m})`Jmad7ZOaZ3pLLSih}OZ7;uMjzQe@73dw@L_f=x+YCPwskoj#NoUcy15({$ +b^AkZkUJ$NwV#g*i8ymJI0!AL8VR1@Q@d8roV(P-tgU~@_IRWIpWu3KB5a&Zw7!GTzVtwd!~e7aOGg= +u7mV9*~cn)A80*(ybF5C7(+$9mxDwDcjFM`+tGE6r+*yyu>O>Qefvic3^%_T<<>HQ?X`bi+B&a&9#mBdg)d%S}*`!qo>XXUTw&qD%&&}Ydq%zo`C0RDbhQ!z+C9qfWxzeJMZ~&+w>Qn0JbSf*`+?L1jQYa+E<1`c2_4 +x(O5N#mrdpKtz^mdie^X=Hw(5R`*8$*N}CawtrRN>73IEI0?6mntR5>{Y4_Bv@XI9f3K4+B`%4B4dLx +wsc7_)+F{Fe5^No5trNG89Cp7VdjCArs2C!DJmfx%Qay09m(Zi2yvg|^YOFlA5Wlt6&ALdNUih!ozHg +MLAW@rZ!E`T$QmQZ*|H6*`pYPj-fi?#04$Tv@jDdL( +RFsMT=Gk<{h)ny#?kjF2=nN)gCRS)`>)s8H-HetzF$T`T3(6vwR9EAH4g1|H&|ZA9$5dV^K_oP+7WKS +(Lu$`SHz#7pNalEmd)r6yFtk!Xo{=&d>poEGS9&)?cP8tw?3GPlJQ2^uukZzpFYo@1cLzh6=uhMSOeS +D)4}&PFAyNw13!mEUT!4E+g(ot&2J}HV0AJst{)Pouw5o^0-KL!I6TK?IQ`8uIQRe97+jlckD+yRxXe +4>DT?w5HwG_cJCG2xe_R|t}n=258yI+aP4Kx1w=qsGg*LRa4jLDep*-Ksg>{D(sYfs|$^23ujYl^Y_T +{u3y9wYl-^==Q**{gKjQIz`!!eW1@$#7~Pz__cjx+#^Tt@TETf$vmI^@V1ynFu@g@ovavkDXY-wVnH? +T8Y8?V3BE`DHFl52*z*UO+JwC{vru9+km+Ls#xq*-5!U+h5CUieAxfrtI7L{E;Pivm;a1G#@Yz1v8lR +G#qQ2RW3lq;*jrO{OxAUv@vVzR=lYABZdjp?w8NO*-koEyXGL;}yKvm!xajdz^(Wb~gU8n%#UoUhdc( +>DMmI$&2p+X)JfAVJt;zA3*?Y<^z7^9Nv!SCyOotN+O=(l|`f)t0>Db`$fQb(V!L5QW@@Vi>Emk^HX& +e3=Ww!VFKA$r*w>mJ+lrZmcXE=e7Kh7#2o9qa8*}m}D)?WdbXdO3VJht)6$q;S!T|JidVNFM->2V)d0 +bZdO)350W@QKU6cjB94YHv9#De&()lL+td5;RA_-py&AW}QrJqrn|vjDId>hhI2@g6>M$1I^9<17P02_hCQ*TK2Ih_D@#y+j~qny)5av5=;3*$FB +Yz33l@`(7a+8%&GX#K>+_{ +$|?>|bOxOcD+PUSmnkX>Z-gZ;1kV8lc`S(ze<*3sJg6+?ZCqz1EZSHk4kVe_#8$0Iycjhs!M=<nPZn9=FZYu{x{8W+45#=i{$z*XL53P4|Xv +~+l(f5&(5p(Go>F$o$6jGx|pgi~^aO6$EVsotCM$tPn15;OM>SZi~?qWHCDu?%4I~9Ao87OXn=3P|zPhtGP#~ACwun +Me4^U;E3+-JXgb^5OqE{l_M+c8s-Ht&YP9sr7J?5})=j{T}o5iZqHPwAVcsfnh|H-O}u>hSV4j_$LWHg_ut2O3+@IKCME%zSHpDc!FqSKkAuxY0hX|P)x(Dm +)qknqgB!0+!R3FuTmBRIy2(nILt~8zOB=WKNHX%%PxpzjxJm}RBzM69Fy$1tn2y}QyG@Bo8t71D8yGB1LHlcOCM|U}9MS2sv-3yae}D +2a@~)%pF(8CUG;)ttfkhNPm&<79c(~`MGU~4`Ij+(J-kV6*VU_+-z&Q446he8tHr;aeo +TSBme_oxOou54M? +z>)jrD>k{%A*ewCJQ#|(TNJBLs(KMJ#tI?-id_(>{%DS=7O&l^kZps%QC$%cY)ZuU#JV+=Or7>qP?c( +U>kV=In+zHk9mh{xZC@#hUpD79W0UMvHj*n6FCTQEPtm%VjSCV?xiS;W1}?uknUIw6uLj`^Mu7~p=JC +YRtrJ*YK_2r{q)oD?)K;gL>QNcviJNKT(~r4iMD+*Hj~M)yWp?`^ZIcYG +O^4=kv2C*&9|`n +_e3MtBUtcb8d97C4irhF5efL +)=UKVWd_yZce5J(6M!v>PP4UJIS%)1C{6h=FMX6i-L4;fi3B0J$`l2@$b>PoEzaaDp{pBQ61a1e;})q86p3Ax{~>D_;Zh +YC7GTkCH9PC`avaA)%m>3sx>HYv_6!ze)chD1xN#K^ +!!hq;2;#3dBSY48bzsNsI!UT1fL(~p{ZrvkGjA$5iFyb}Q!lJ0bj#`pF-x4XDq2}9c`j!4(=1AOIy)n +T%X4jc1vdQFfa`651v`z1^$~>95^PYTrJt4Vjr`X5J2Rz-66a8Lk=L;Kc?Q)I{^9$G#DD?a*jFL-FoL +Kd6f`?E+=u!g*@(sVf=d+nR2hk-~@GWFv+X4SA0}gu@>cj+9e&7%t=gnmzK(tcGDvo3#xG2{VJAy$&& +%f@9UYM5Dw3-iYHd|ls134)rCi#fnj(?gOZXp#6_f}m89N&28>z4+X^iC*Io=<5E00KAAcYE<$LK1rc +;!{TiTB)a=EvzCZAnjBa*5isH7j|jt(x~pldi=2z3xCm_lCE +&_(?CPA7wd_<9%XsX|!qLYc6|L%!lb-aqNS8(7eIV)0Xdfx9WnazREmbNk|UJ5F +aW++LE^Ta*jSFfVlO;uxN<#o`X@el@9Lh&<7#%|(r(!-Q>ev6qwlwMCoQi_hgJL$i0dufEzH&FbqXl;Tcl=C>{IhZ=otVX)+_f4JSX!T;f!HI99U%U&u=%-FU79rhcJ +;Ry%OrQ^ocXqNhJETNsf;=ASJZ0^*S20zgKcC*%zQL!BX&A5TzXwhE~{hvr;b*|hN<+|l(T+N07WLNC +`ikJsH=!Rb$vugG)P)h>@6aWAK2ml*O_Ex^YvKb8$0081Z001EX003}la4%nJZggdGZeeUMa%FKZV{d +MAbaHiLbZ>HVE^v9>8*7i_w(+}v1*>gpDd$rjNYTQD)3kRv%QL~*UexeSv1ZR3_#>1+R0okU&JO$?U`_XJdCTN>8okPY7BeLm>>%%uEI@H!yv&$t +t4h`lteXjzi7Zcf16UiWL1fz~AOP{w(rLk!n(akV0jaeBSVeoEOMpF3pWu7(*_GPZ%@7EOyjTZ~{t$@e3Ey!}gzANG7OwR?n;^a=0nXKaqM9y)k_` +vI1;Dbf^H+nobq3((AX1y-&c#&t$z81`RIEPMEBF(qCL&CTAjZ1f4g3MrhvG^$!v{h$uVk#-^=XEXXs +c8q0W1-?4g#MGA_~!WLHE;@~jo;49DsoU)GwDnCeg+gYK@=ADANVi@3_QgDlWkszjv8KrI!$wJG%?qE +9Xccg;H{-tYUYrZk;?@M +|YCP~Q;B1xi|tVGSxQvzJ4;c?Qvj&H?Lh0$y_%fyx?;8^o25kECGS0QeF$>3GQo`0%M`x>5lqsOQ9dP +yW6V1ZhIT{r1Itb^~v_*oR*Twi~GvtB>DSx3JO7R1Ylp!Etc +(+t94+#(*1>YV4FXg&vnnOgsWtmJSFnN5LlE`MxngfI2O!7SFs^jsLb=Hvo_GPH*%1;6Q9?>2(p#Vzl +9+D3hu0X4@qmLhJHouzR!w8@XLX~H9btgHS_YA8m*|}ZIVxBL-yMj>)0gqj9Jh+F8&v%oME_gJWuH38A>efU|V@0|=&MunistlOH{a^cp__`RoC +P9s>_}1B!I3DWg=SmtwCO=ml{QbT;wm+`yh8DMJN=$PmQuWL|~<5n~KgpqB@sde+rEheUXka{1y0vRz +w!M6<;8Iioaj(woIB7~pu_FtwL$0r0>zfr&d{ATx*bnnScrdBLh$z5~C)Kq8#|{l&NF>HI%U{N%;05gsv1FpbEeyRksoDJDaSj2y5FAiy-*#jWs|R}wXkX-ak +Fu>3m#`ql`inPSM2ehkpR<5Y?{ySLHw9@T6i%)^LK;T7V~Br<7hSx=yrK(eFK{~YPA9&KLMCYf`a>_q +5J;>g@^%lP@ZVnWySxC8$+7T)67`24s@ok9qF~EVV#PWFyJ&v+VZk=Wrz$LPb!Y$ude=ggNBg0OKzpL +<4zy(37ASYS{*nZ$Cd!;82B4GES;|Y4??&j{AnSO}DXP2b-vvDUmxKwaVc9Vh;!+9W1CXzRw%I=aBiO +1yrBbUP-<7f!L4;VhZuohjOHdBN5LHEd&KsR?f>tH!eze@#+fp1zY*{7NR$#7vSB9oDWiYAOXii^Bs~x;J$OcVlQi1F +^hsd$J_}*@}3wEAsAT(mH^d0$M}qBI69ViNPEDp0=tO6hGmiVAmVzdx3+}}G{jladO(s^B;7jUMtvN4 +ELes#=TKQAuXqL8k&)_^5TJ~X*7b2E%GBO;%FyzZp-j4+cVA^HV@FF3~t2*e-N-o!YKOk`~ +;670l)Wh4i+3%!&W{_Q-(H#8AVoq!ntBY#u$ek1q5GAWeF@39F&NU*;&Fgt6;@Wc;_(c<5XkWoEi$Z& +nUP4;LFg``KLLSh@RSBPW&fbniVAk4%V+zSL*hMh7BV+5rkj627a@rd4u2q#IW)@SC8R*eZSiBp^U*p +u~?pY8&HZl_Fnw%+`yl#Mg7X?J1pLi1MBH-Asf(d&#z0`Jr>;bmLMgl9uvvJH?Yyxz4G{2+p1T))dfb +H=fpSR9cB01>7Y$A@4OY|EK)Uu$aa;K$svpqoxb!0O&@b17a&~Tc+v}#}tq +@w>wCEh`!0=ftfTF|M3TGJ91@7@`t-xXrk~QHFQo?;M4qe5+02^k&t@V*ic@btL&W&8&fuu>GoLZ?;u)nfEg%mUW+oHR*!|aSzayy +;_QM|UR^F52F%40xw{`2ZJf_*Yntts&@Rg>REXpC(N_o|P^++`eyXwF8!&XsO_DetH>*>mW?7>|1T)7 +V(iLQRE2G7GFW47rwak|EMA?2+~iu@L~0y$LZSDUDsezmC>Fp}B_7^^Y2Eq5#+EDW&(wxY+W}5Gy4&k=N#`wiBxn|}DKEG@fpLVFM|=9HogY#R9 +0SGB$6gv#Z}WXiz7m+-g3HA5t!yE43{Bs}Ca?g4*|B}(MFVf5KXrIA**KH$_j$SpE2^NprW}fnL)*H> +(B3t3BfoHUdZV}+GuNj_SDxz-ebb=C`@#qjL-4jOQ#$=OBo7hwnY3BY1=ySO1X%7f+V_246rijEbM8j3_vZ6A?2}JEq&A(KETXK9DE +hq(RBO`sBTr3qN1o2NyjJY^ymAQS=z`m5?~lsx1NGV0FTY+AtHvl4yw&ApU>e!3$%!Fytz!T;KHcQYB0HSdd2A@GG`cX$&A4N;R&9_C%I`#_IqAqJF? +e`z)6GHdo}CBps{RS1#Q}YqK)Q{DNPfoWK2Tg>0FPXQ@wsYf7a-o?Y_Cb3zOswr`MghA!>?V+%tU4jL~n^*%jvi_H#&sUAn@&+CF)vi9}#;BvxZ3ihlgPQs}jyL&2H}h+OVZp|Ga2(2|ht9P +0*&bVg@>9#sgkVVA&~b-#a1-Ce9|XF9$E!HeX}Vpbe^SDPkGL5{ZfMnar|1sSkXDc^jrtVm2`hciN~5 +(-j1CR(aKlDQYJ3pjD+4m@$atdP1`mWirryYEWuDAwtaJW4HnA%_F*6qle>Arfnb_@^JwS|!@=^;Jb% +Of-;QOJ{7+|6VY9p`&#jI-HmPk)FpX{tn?PGj;X|&6~EYjV=n{EM(Jl)K(M|d@PImdM=1ddSv6LRC`! +v^3cP63SE?P-t=3R8(1Vi*QNkQ}~#WFeJZ4-SsUDCj9k`hcXV484s`_6yIq^nek=dkP-PU);zWS5rQT +9x#4*mL=wt;dSFjcft~!obhaR;+STNe61sR{Iv>3RCUP|)Uo4RAacA7I<3?1yW4Qyyop0AXQQj(?E;G +=W-s9h&l2!h7X93xgTWStraM6GgX@>B0-tE8K6u-Mq&pa$d74GWs~cHtgPZaT>f1dS(7~0DQw0D(p1b +FA#a?1Tj~Az{_3|y=hVHN7E+N|o3;%TWQT*YxhFJ##B-9fzy@2A#?qv;%wmCAMrcl+EnDwR{4^sRH8t +Ei(=;W~BVTj3f^@m5BrUl|CRkN4o61L)ZP&?=40MRf>^s6DMcWLymzj!<{nG`NhvdmqfM310kDXfc^9(-Kuh;EUZfY;xmelPPJomLlM}KtF_0DWnj*8#6HWp5 +y3oe0fbr2GLJ;3MNUrKWu7E*9yLYYu$op0za8MS)QOXRLz(*+`caQUpj7h;^^e$X~f8IrjCG4Ix)wWb|tfube?f&6JG3Mw{)RrJL +?b5+Od}spH7Fpxbbuz{EH8>`!(oDTj%@ofZe$SVjun-MBYR8V1E#TAN>6fm5wk|)_DI>&*-IXehodCc@Eu`iTlkoz_I(kF@GrW{Opcr(ybOcAtaWua`PdOjpjj%rn0239tCC`x1S@%`-9^M9ll;_?Kn>8E2u{^eZP1`PSJgwu$Wd`Op2^tt>;=ooWlsE{_N*JaEPSkTV+E7Uch)u)qmnsF{iCxG*bx^}EjWa|WV +QOZt`m*wq-BPRsztB~EuICB0IP)h>@6aWAK2ml*O_Ev7YiIQ9t007WM0012T003}la4%nJZggdGZeeU +Ma%FKZa%FK}W@&6?E^v9>JZpE`#+BdsE9OwyW3Zth+v&E)OqD&3Bd6N+L$%bjU0H=8awsB(02BjA=5h +Sr_dezc3}rdn?T1Aik(l>=&V9^qI-O45$ba-jE3akMDX}g}DVn@hMRhfqLhYouZtAuZWqoypW%{*lI& +3BFyE5NeTc$Rp{9s=Cwk(Uwlsm24`q~a5bc>(zvX_eMx@`lKnVMuG;5qx-$x-&~k4Mk`>-g1=b1HgTZ +TZWyye#v}Qu5mm#j@jXKgvqBc~`gmdfdUAZT6y2y4mZdE9xpQ`R%N4u*qz~YiWum&58=TLW^`+7G_z? +>s3u1Rji@>u#{YD+84AyWC`ISvMAWr&M-|MX{ch>~8D +!J#wjdkbawBeJtBADqTAXaVO-<8L{|Nt@1x{Rp5cZ;4$qyhK%lG*eAQxX-O +Cv6QPKA?6mH`W8nK%^ZQxc!7^>}D{F+q7oR(erXQb9Ar^XJ6Y=P>77sII+$me_6$BkBA3hYvn8zXPW|)WL=i^t)fOJuT +PKv3JA@sRoKuV5|17}{3d-ghp`?%eDsYtS{0}P(#g@QpK$dty!^+>A7K68z5VL#N +BoW;e0TnI|GyvZKglk>p1u7vLwue7`Q+sFo3o=IvezeP$FEsH|&y1-d5N)Mjh(Dti#tO5SE`35pXtUm(ltTGT_Axb-Rp>;o@8ipG=%ruoyar}kA@U*= +_|Uq=f?fMeub7gw1ASpg|hdi5kHE2+8%s8mqASU%$`&VD#LJ;Mb-+oVorDfXO^Qm#>CqMcwKG(Jd4X) +7d!t>FcgC0T~Tk!2tblVu6m6Qt2r-ph-k0sO1d7v^)gd+-!7%s3pI7nbSCz{qt`_Mp)YTwYwTOt%!3k| +d2eAe2QjpAFJU9Y*M|n$;TQ%vG=_Nz8+lM8@!i{i*tn71pUSvBA=3=Vg~M0%0<8#Fn73DI&Je%+Uhj!i$Hr@h$QJmd51Hhw?Pau?$05)2Qbiht1L1_WjVxgaxNxiG|7n +XEwb>JYkz!R>55N_H%^r=Z>%Qy?uxrG=1S0_cNkZ`FKnNFn{_w(sN6GsDz?hm$kUz&}8*^%eFSWhC@G +SMYZi`llrwXQloj48E!dTzfZ9?02LGvLmoArZFg#2uss`HVmo%1O4_k!^PP9!xGd9bCirwLY**cP%}j +kPw&AzD>SsZ!?O1uQtf_OJxcNsfkji{2&Lb)yMVzo$he^|T1J6Q$uaj9_|>bqkyWEqAup1B?fqExrkx +;4zv_)NkZtSsbqg*baa>?Rx{ZJYgnW0m?TLEz`V;ZCF0Q=(yu2Y0So0_~=i0B?*H#qn5T&)FW*Zjk~! +&$axIX0ipKHJqf}{AoWfwLVjKhv4Y% +*XNo(qjG{em2cuOmhn@d4<{ss|Hg;2vTj&Ls^fslvrlk7S0fV%~O#k7k2$peI6*aXVKYX;)lT;5%htC +$O%&v$6cp05;(5CNmD)xQE7-E7o+;+~eWG<0SmH@a%;z){I2n9meGz@&vH_jpPUePz2V7Q3ninV9;RW +1Trw6rXN2E_(w3pm)SIh0j~4TvHx^`IYoxW7XA*8TMtyW)6j&mw#A^B4GC?QFkhjsfuTLD4tFfBHSg$575k++#f$VYZs`5Zq`o!ikz}g?*ZXW7g +{;lKUuZ{Cnd{BU5$&+vByO?&R}8OL>slS-jjf3o#nrtvUqrm{0TMR$AIBKP5+-U2R>%6^UxSJ@Gy1ge +fL=`(2hKarx?fnOvDf!IY4HUB5w2foI;Tp$?96U&G^R@!RD&&CazbW2u;S>6a{eA2$ +F8dBF0-ry4lfEs=5~DP{rLEFwNQ$xMb*KyI;g(BS@gyHZl648&v6cP4ft9a(%Y~o}gHwwb&kWiaKPD_ +44XmS6xUnL4ui`@wTVa?`R;y$r{%cY|Yg8rC;c?zInP1!@I(jgw#@`-rNvGK`2r=M*%$%ObZvuzpaFf +06${Y$zkkf1C%4{0-#B+03CpV^|Um +wD;zG&eviF=2N@b4EPaG-yt-Pa|h;PCsCP59;z&f!z@LKYg8ZNt;xmBJ^y9!yB{>e>H!DpUvphhfG9pIM-(1_}jM=D$ux@&m +5NVzgcwq{C!keM>1rXazKrxkA4#I*3b!>>Ms=mE41rsQfV*V)ehDA)!p$dBkRn8h*y+ib1b&$pb;T;>(jslG%iaR9J|4H)_!P +yhtOQK5T>5jPYu%AafXiXkvRTES@4&zW=Ph@d*z#X%IX9(D%Lp1nAZ2G4@<+?id1*w^|3_KGb?T;)5R +95C1&62xK50g3yJLj!SUANIyb1hc;USuQ)24MY+Lc2u~FP^soH4Da0*N^0*#5!v-L=xfuo>%w4LYDij#R52#jvAge&i`NvDG&9CW71#Ne2`&{G(`a +DB*le$9V>$}xswRu$SHR1THNkubyr~wbT`Na?;Oc3#2n2TZ$URExr6agz$u{NS+o(xicAL7t+6d4^0O +XPmy&xn64l{~2ROD*L6ag(x&9xvw5fhVP^yN_Nfl(Bv9mcKji3MOsA+#Xy>b5KGXL}J+!g>(S*yVSc# +|8t&Zc`+dWvhm6mD9RW2RbZNel7){5lAR|Hp>_fir+uuxHFAopwSU^%;=jVId>-zGoz>uM}`pBhoOJ( +ZDLo=;rVdM1K)p?3UZ4YW`Ss{9H)Qq@lrrnU@}%T$z0gY#oHNFYGT&-aQvMllcD0&8Aga(x$Jjl7aWX +{9{&P>*IJ3Oo$3~jbA2u!f=|I7aYSrM5sbZT7f=f}dA$s-@y_Ff{`A=X+GCOcy>t{io?aL%IlhST==h +*>g6>BU4D`DVeJ1npg0djfJx)X8GfvIkIk|_PFPWQs^#}s%wzvW-x#Zj(X!H68BoWWY#ZYI@)oIJ$Ie +u{n_Hbj&DeXy#Bvm^{6(LgsPi}q=c>hc;p1dTt(Xpp#Ir}Vl6c}Jm;rCWABSI9-+arkWY5#X!Ax}7UPsg`Ku0(_`djecj@M71 +pqMdJa_ynJ8bw#f4adnfIMWB2a>(K&b$$e8+c1tLS2W_;)}(%cITa~Q#o*bKwuK}SwQlfbrg1jxkAg +L216f&3tB_=51XGJl^@t*+2=WP6Up(d9Ae3JIS`NW5VkoLT^hRJ#O^C1Nb2$$I*X!pgWG(Ht)J@X?#? +({#ql+S}-fe?{h~p;b75X%;%Sxb$E($2VexZM;WGiW9(>$dG19cW +>4TlbZiqkb=m706O!PMC`6Y+`uMWkej?aHsPHm(z(1EWtg2s}{Z(P*F+;T@+p*NT&NhanmWRfSQoGh& +qlPN4qg$L&v{qyjhIZ)242chr#I<0Fq4=9AP$h@74)7-f4uhkIdzxS;R@@Js-9K$3hVglXoHbYuGT*O +W0{})ZACGhI((zsU}ZdniFoLh+5Fo>ci+P2}XD~vTU?S9Fhy5*ii+*k!n01*nd4d_M=%Z(LltGPLPcr82}xBEEXe? +n(vj_PSk4(}tqbr+D(V!bKqULP+=^geZq`jf8@;blr3&zy^!r5u6AmDwu0e{pD&VJ%`%?sss1vVPPx}?*v=zTaCBqL5W +Dz`(es&;z4yh@8`+eE{<4hT5;X}jFg!0QlLlM$4d{iNA{Xy!g;E;_n*! +84K$v(dhH!@H^PaPwmYCc0b9rQt2Q1ux@KN@RzEFuey${^}iSIXp@WK4r0jIbf+A7@ +k5h5P)95>|vRVRs%eDqn5pdTbh>9Q_E?haRz*gVXh0(Em!kjY1u>a(^w=a9nKj_Wr9dF&D>C4h01&@& +Kee>^+ek=YE-Qk=3pedRI>~nz4zI&JH75qejkL4zB!IT200nr^?=rxycdGhcQE{#Al!h}tKsnQzM%~f +0XO3SnJcDIG;WD$@)wp-d0NQToBGsfru5GqD$K340J6Z2JnKWid>=rG{sF?p6k)cwpa_I`|mLm +EGy({wO;&yb+NTJAaFj7@c{}T?mW5EySPSyugDDGz{?ymZUDBpWu(1`%~@c +s75YfC3|B3H~WHr`CtLs0$j83oIJ2*fb0cu|0X}Ra8Pggm3?yY8bJOaeC0Jok`5vQ=H(5J#LIK28(l~- +fy2fs9tV24qj77HliS9X-{)5#P`N~|NyKtu +$2&2cwRLP%rPx}@O14&|f-pdng9ro|0Gts$HUE9z(RVjEBiE*OmsQe`=vzA#gXHH+k!*X4H2Y1{inGno?-J;8Uk;;4WZ_@S_8 +>+sn~r;&n=JXhJFb$)Fk?~SC|Ai781gE4K8c~cXvHdd(Two>Cpm{z@>=}kAi4@l-gNNjjDhZ(eO6qFu +I`6yUsfMVoJqDThYw;HRbPLhpQjoO0K9=UhDFz{UYTd7nvQ!>HJirj?zMXITAXZ&i>n(|W!HC2RkqD9 +qUg$(5Z|xP*Ni>@5N86a(y +Ac7at3(Oo%hpLSQx(091DtK`T2r@j9)21q{Y8uWWg0RM|yoPS-OycW +^2vvHKgsiMH!&(T*99qc{w$YWswqyctflYMbtZs{aDM$Ex;wIIy(5yw$<_ZAFJQM4<{k2}&XAW+gIfg4Pi;AVpt%>~Bb?fiA6`J+!yZpN|Aqs-Z_6W9;y(cR +_IYC^7tJeC_c0w1R#iuT-SJY2wTRbteOG8{TU=Hj}6en;TO_+tm<G+ljSy@5DRhqAR80S*Rc1ej98Fid$2##N=~0AJ6}P-GE9k}KII17iAuo-GREo<*@dJ3Fh|5+*OXg +vHO#X&2HorN6Hbxkh6BC|gaJxX#lhh+4byh{BgdToAv2)j+o>u3*t+d4+|We%KMVGJ;(6hbe)>Hi%^E +lZ}|FOOnzC(Tb}Qq&7>~NXiamx_0bAH$>707~vO&)ke0=B)W?e>|T2;-gDBn7NmGZh6RHGW3 +XZdUpiv_98j)mv*(-kCZee~OVX)2d2h*IVFJt0WDVG<|2aSA^?4xd1^EwuvNAQ;v+qf{eW79n|`<@2p +Bvh_mXk92eF+fC#e}`>!7T!K#cshgF-g$33j)M)oh}>$dGL%C=xBdUS?6PPh*5Lhwj~A}vt1<&`sS!^ +Ra6pkfr$1p6ItZ*77FDhdmV0HLP2nG*4UPK*prz@Xw{=`8I~jEDWcZRQsZ7|;$-PO~fGG-IsxGFg8|B +R$uR;e?8T4h3uul_fpUnvpOcF$TqCzzA-)Ds +Jzj98TR#Aza~LYYBzB}@jv{NDXyH*&6D)zFA{-I`Z7f(M?23AYYk268ZHPJp3JCfI$c_Aj*vOc4LeyB +Z8bpZ*hqTCowD0$amJ8eg*0@C%M;eZ`EiVTF%7hp)*ux12>x>Kg>nG?F*-<$(Ew}flsO5_cg$|qqoQA +_5M3m?>lQq+691--;UmyvE$5&0MXJ44nb61(Wai-WhaPlM|HX72gaZ$mbu!Y9BKhAS#h0n4EP_OYDMF +s)A_Q>z2%d7*wf?pQuL3uIKZvu$WNWfSwZDi_)1{fmHEf(HKYnhWbuU_zaU8CkV^j9F1)M_j7q$AtG9 +~L|d(^iQ9!2xP9IM_m{jZ0lx$7W1$VA+{Be|@%*;o)hOe>0GL+%rXt-AQPA!6&lUA~j +V3zK_${O_DU6h=6$(kgnPaR=wL{`9&mkCSM8o}700jpM(phzbDhL3D}w1A?*F>%9j#n9z;@NV-Qs(t| +BW8Zf*@%Y|?3Z0{Xb$!`Xmlj;yJlL!nE7$#XhtfJNCuumQea(G)=*~q_@swzHf*sqd^xQ}~Qp!=ZiXO +ot;O}ynnhi*|2Gi6aVMJN-vr88M3xPmRc)XMCp&URy55bUMwj`sKfq01KNVSQYae2;s=vbXShrTs4SA-pH;pe@I}(!b(_A=ZULX7}`;*?mZ!9)jWmW +M1^e2mvj$LW%H1lN+OJEa?9wkqP?B+w%`u`PPCv#%82p!p<(1_QLr(bTk|Qj%{g$gSpnYgK^?sF&p2Te0iTD!2^cpZAr%)#3ln^!QGgaid5EOqfEvr1l_S&!2 +|_;J^lzt?^LdLTnDLvp{Z#%Em#JQLLF!1ni!qP@70`-390h|Ipfd0Epr}L6lG4zH`CY`TziNegX*RHv +zc0^~klW6R?D8Ke_MP?MXbdD7A|To-oU@TpdAxHzUE7Yv-u4fNSC-Jg*v*4X{XzD5N2^h;H2jNA^HT; +_-{;0B3t(LwrBj9k65ptQ06nzu))F0#_C^k!>?H6^X7EUDaS+NpS#@C8Bs}@8~!%W#{*qhiFoNglg)m +TS42q&jU+0;b3U$UO4jGGZ-ofK#%^gii*hBLM<&7E7P5}f*Md;fffyDiYmb01PT1S@qi_Jw2UB|rSXz +DumZt^U|z84%K}v)x#`9OX(1)|%pwuU0~MOx@QF5?E=IxX3eH5CG9-n9I!!w+y*<#zRXyDqj_S~J5D_ +LosY((f2U!+uMsgGiPee4I0pK@(QLKs*85hV(JTOaBg&Z=o4s(H( +7}nYJadd$sPb@^=8^=;p?bqBPnEan^8p#p0SC4&lq?Zk)5dVu-4!blt0;xh!}ag9ne1DP#|}+=*7t1c +R)F=%C@QTe?W3tL!@XHIFt>DEM>_L%;FFP3~0r&0}O;gEOw$q6JwIn`ru(=XH9CMAJI*z|7!Zhy(?+4%;UAN){k +TBX&>PiQ64DEwE`}H&r~qvM>UM7R3S^kwFK38bM8ox}j8wa^Ey8RO!+wwI$?;lAL^=Cn>94H@9iVipU +~nM_(8r8I?lQ3(2}OeXvYaIhF +BdyOU7EES#B>of%@9rr&pXuMNouFMiy}6}z(aVC>S5_Zy@#{O*Z}BT)vV=?>S=7X=KbBZ~aC1Zb-QLbEKi1FZ_Q2gJ1eB+%B+d +TbH{F7MTgG`gN8$pg9FPct43pU(%>slG^30*oZwnpFpZ`tSU-Z7>ILv-JCn`$gmB#Mky<8nf$HF=jryOhfe(w&9nIxcv43;q)!!0I4) +z0%Q%KrXb%RBS*10*zu+3Q&lQI=0H}pdDNlP(zJdy5St;W|W%79Bu#+GPW5{#zLc}>VX8f(z38S|q#MYog&K12tRWElMkZqUq|(B+rvaB-?kOv@e5 +o=TSpW)cjR9JdGmB4faIKo2gytJsPL+zaTIm{ZKt5p2-GLaiIos#nsi&^Swx#U&4JaCBg1j!7 +H<&5*V2_^Om$45vy!6R2IwXvGd6@w4SsQxQt#Rl`@T+Oln`=B=Lk!<)(HWP8)GD^sky?PTdySvSW6HXB=<)J&w&N1(}eui>>RqX>Yk5H^xViLz{xB +E%b_K9ad#56r{OS8GDt=kM?bD3Ee*#&#TKU|Kx>di$!E)w16s%ce*tH9N4XwrI*>5>HO3v99hI|YH;q +|cYjS^<(O4a}9@lF?zcCTJ3sIw=MFuVh*1Z+sD%RD2Ebj~n&~Umyo)I4bM+YEl>or+f+Fd_R$zI^-iK +|h6j?|C(_lOQCaZFeVI)q7ftZP+;~ +{JQ|BG!yK($RIERl5R6=C9+XW(wMc-wMmIsC2!v?vvQACa8nd0E_^ +;07D!vU)0*yGngDFnGtzN@q;yS%TU<94v1#Ofm;JR4UI9jwlJZO7Q)iEVl|nIZQ2HlZ8vbvY7Si>1Y4 +Uk2yeTv)unk{&=9G!Xixf;Oza0gdm4^xA!Nnxw`Buo_&_diYAQ|aJP(mw!)Ri<)NAmSI%yS&h3oo_?< +~5>NnHciiUHMKle2~1x>>CccA2|T@x6^LVO2ByB(Gh0)jgC5?8)H%dHS$n|qeDO|UyKK+3az`tZhu@N +dpaNo5P`d85M`SLQ=`Z#o}cZNR+||tXzqsZhLRr|3vwr>H+%K;KZ+mTJb&?IC4HXl5oGKR)PXv8O!I^ +QXP>t8y0)}s%MclI1b}2vHe|rxw`C=a0}OgU;4sjegyD4^3)ar+JW#<*6rs#MYZC*OJSWFKb;E&hEXz +vub>zM*V9lh4#;ySw?Xq5vLnVc!%$^K|6sdEfcAH$J9d=N|v8sWGCvV<7f1<*ESfAH}w4fnjNAKJY=z +W8NfCMPT4Ptd0S{#&pL2^u(W)UVs(1{1_G{O|XRW +p*h(-lmR^^Z9@N<>`6yoYoViH;^JHauLxs!)j1WqA-hs`0}S@ak=F{r(bN3qIPRK<0d&$#?5i4{z60Y +h&u<9XKcz$AFFUk<9PjSOf9PDQfTM{Gb7-+-GQNg6-Ko?2T+ZQOnSOtr5AH)&Fwvsp!bTY3gczOm@!c +?Wl)E5UFi&lgy^;FYgZ8iO1^9m$r&1t5{|+(#;0E+>LSr!Gn8&?p%L+cq8zqTe!;ai41$9gTw|Hi?&j +$ZE=TUKlxqrQ@VV*UEj~%LTp21x!7P)iMU}`3535#KYKjqvT;>4642EAg@et$!l<7l=P&$XpF>qUq%w +jvya+6vWVWD1*SSBP>F2^Y?A-=;D_F@3jOsbod(4EB3;E +9JRam{5vRb#jZCSkd3sG+gvbrTIMIb#H?w8l)Nfk8mW@cdKaCejk56H~p~oJLicx`j#*b8edoRU9oFM +Wg9A7E4_<~I+Q|E<{3)w&(NWsGi{XFo{V))Kfajs6foh)nKbD71`h2CCrlKa=u()ecnI78S;C4r!$Xv ++)V#CgMZd*RTv!jXk5LF}V;LePA(s_AB>4C6AQH=$mQ5h?iEAoP_d{PFDhu_wz6`jb)%^?<@Mq1(W)#!5i4if>$BNu^s+GTT?)d(oajydr45L%$yyAXs)V#jnsbEmIE%SyGZN#sS7te +nHIPd|$H$-jVTz69;f~2>O|%ss)+rriRl{LSILwa>!iFcREsu;4WV62Z7mXC1@)?KlFzkjwKT3>+G6m +=a8^mL<{I51vA^@ZaS(SuWdRGBUl3sh1P`Po@Wve7fZsypnvSUkmO>F4`L`Akbi=-^pN$a&nEpF#*48 +K&qJ2V0L4UNmmnNhB|1XUB#D*N6VO0_{($N!i7~jZK)>+T1cZ9y&S)sFSzJE}5S=Yj#@#_=!H%&;`-S) +}s&IL5R5i{&Ob<9sJ5!LCe``LD}5A+)Ew)S(B5aiQE` +*$Mh`mG;#;U3Elez=D?8&&<)Y$Ttr=y1Cc8y8wQjJ&`wt5GE*sRCg=>rBGP_=eadKO4NL~eAf#9TG#i +I0!Wp)Oa&XZv+*F6Q999k;vf_Mo*D6j|XZmSNr}9wh=x+^&FObrK_XrOLCT|^CsN=8%QM_hjX}`8pW@)B2~bzrLFNNz{dD?(xtdEw;_}%50&(Bvo0M+`(FQa!<2Kt46 +Du5*kk5gy<{5Bg-bCEU=LNzPv%E$$EW`h6i4_>gN7mwbNiB_Fz7i#6!Hm2XAMF;_wP?CL{qlF&?;5>f8 +wDZf)!ElKPltxInI2qH#V{^Il{b2GtMcFlC;&-0G1wHl6%d=aG8v*!yXa**I$330pp<|ubjF;03Rfpea=}2B1A2SgT{{?8=w+1{BkWsp9)T0Mw +=<2040AJ!Rk`XPi>}z8H>JpV*Pz}*G>A;A$W3o9=iMUkjB)gc+?2#+NA3600!(DW~E549=frl4->q6E +=yiFsK_=LWFuJCj=?0h3!_on{t#*o<^e3j!&S5)=f)sd=gObPLPiYv%A$=Q%h)NI8C80*xoi&eFg5c- +bB&nB`Iq|2u@k59spTg2a{);UQu$}rk$Zy%4=p0#HpvmKNH(%_0BPwdUyz67Cs1s ++Ys}J*%N=o%Qy0t3L1p?P`z!41Cgr>et8|K^{dU3f#N3iE^Kzm$rkPDbc7B${LZfq|9-X&|Cu#}vh?f;ia&Rm{wzsbqF0>e4pe|`>|$s +@9Q*g~Jy{8OYD{3tObm!Wt3E39{N{hSQy?<-HG&VWJF)dX=h%qy!J04wKiTvkYOOn~S4bd +=K%R8#(LHTYADVQzx~EGSgc7SZ56~A=mi6I|jx*VA`l`O21F45hz{8CaGD9q`u18u~=%nj94S=mWI3O +wt9U2MHykHQ6xV+W|N%1b9FHTCYm4WUA&^{v9ejt_8s2m8XiRN*49O=%jN2R0XmHQdLSgheNR{u7vDC +KoBM1juIr8PV8{#$muwY1_r#w~3IlyOk+FyJV9eCBz<7wee;TQ01f4rX19iJ`?t9*^l;=G_D93YVAAa +eJiPVZl4Mm;J_(@;s6phbNi9JlVr$Kdwp+!+%7Cw!7?oJUKC$1?R-)lDFH8=>`d^08;g03C7wssdCb0 +pDN?^DFY5S>hX?mEj6Q`WTOrp +FF!y0win_t~!@jl!`#E}g+QD}R;dsniGIxb$&3qdL+{X=PAB-P= +9X@bw`f?7qRXbPMMu}t=lY85xjQ$kjxxT8(dRR(1RCT9=nC1)1>)i{|H358_xp7#u0)HPf_r$>?P`8T +1Rmvr{0YO(Bki&ak1-g6Z(pD6+<)#S(O8xg9S3shvbdx=3`lrdoxT>o(oKVUIrO_d+TZx@7kY&VHh~S +^^>xGT=}s+}Is9)75%1sIGk*Ubk1rbME#R*NCpVC5ib8)iSP8Q~OlT5ACiMP2g`^B((|iPIYYk<(Jd9 +@9nN*;yxWN=f8IhA%VOHc7!=QZ)L3T;}Gtxk2y-B8MUqWtXEAP~WnQ(A0Mct^3=W!-s(~H|RR;vHuFt +IQ?%o9A**B_cay;%jsILug_lZVGljU?^W6- +2TrQPoxEM`xDJGe)-M9#z}qdg2c8604tmuSHB!U=f1L(s#U^Qcv7|#b9a;dH1xgx6}rg@Xw{0cD}HJA +3{t0AtP)1~nYI=hVUr9-Jxn8_e?;aWK*{IYz(0z;%O;WdlF&yNlKf`4B{MlPDuT-{7zyTv|Ke3B4iFh +JjDOx@dOlZMLbs2S^LKMqGwIn}SYuly>2K30s+ns)XkP5Yb3!2W4)DIXm%Tt`f4$j%EcYqE2V*S$9LE +Izn-P#c%Ge+gA$DiwNO!_SL;lMv$|WZVY$aEhD(fsK%j`Z%fALrn9kw!LkyE;?-DHu!N10pIegCUW=j +yJId5PF}&a#T3j*9P_wNDVnOW5e^ETy)p+h@7`ugN5_kms8W_Jnk9MuSIOdf+3ZEkmy|9pmh?sts8lX5@U0bkVH`c#8E8aHUZf4)a&e9SUEqIZ +x;!Nv(`CMDg0L@58XC%joee$p@>XCA&~*fm&xfAKWFgHJ=H?kdg6&fz-mZAZm@Xn?s>L{w6%FQPUdnO +;Y-A!qIeyX~QO5QjbV1?_o%NY5UCuFoL|tE1xvRw3vt&fmXTFNVGZzoTC5+r=M1F^7k^ +Gu5hT$d2JR0i0>~G|W&hRe(FE$Qvom_91hLvL +|e+%Ve*On(JVSwbDFIx`EnjLm*6KvYy808@HSXzgBQ>*?3sNYN*Qi@ym6e~AXLzGZh7*>$45_}B;S6U +Jo*}S!7utheD&2_dx;y__0ZrAcEf^U0vV3_@$Ja^{6UxOVZIimjId^{5Y=uA>x%)3wNRI95ZmpR|3_x%S +6>Cz6-Q6|q_$F^#9OUesiOL?m8iB(vV27>Sk&@e;?8I|Fjdae;FD&wY$*o8y1()IIVMn{j+jcPye@f3x(bHUdB9ddfI~`Sj?3`?~n=AE7YIbU!KlA +QHkFyhk9GJLF3_w(_O3USfrwlh;B<`xTA%I#MxsUJW{*IsfqvmiR{j0wU27z`xwNM8`WwibCZ2n_D49 +lJeL?FP~q(kd(K~d5McYoaQDbIu$N4lrv!;Yp%k)%r#B3IkK~xe0F9f#&%=^FU)HLis`tgw~?Jk5{_K +!&Vz)5WGVdBGKixN$acs5&;tR%tAL~yFQsc`2Q-Aos`;kFqB?W}(3NBI8nJS;Uv2@0+8A=QBfL0|Yh#>(JGGMX$=h7 +d-sT@lGBD;{ulNQ0n>^sJ8F6b}aQocOTNM{3uob0GgzRlz*rwUhBPjq-~WpUU@p*1324w`wRJ^RsN!m +;WDGY5PXA}G_5-@RK`L`C7N+Oi2R!`AAJ#s0@6q*kxo(o3KwABywDOMzsj86Jvho2HH2i~-AdjCWK6y +$m9e4FCkncfP*rVyae<}t**9Y1?+w2SMZRunbR4+~CcP(d23`!Nlg4S2K9*_<&DGVFo7Bo*4+-kznw) +vEBrY#=ck^nT$eQz7uhiAT@~PCluzK*4o>M^F$`T5{gU1^TtaS+e-t;J4uL<0m8xrMMuNjzrHV6xhpD +@Je`smR&U;XQe!>03uIL~RVvXeYub9p?5C!V}D<2g_h@)NGtUBA8n!UjUtIc)HGQXi}YBYb~uMZP}o7 +7TASe(SG=9Zqk?r0#Z@6aWAK2ml*O_EsmISv2_s003YT000~S +003}la4%nJZggdGZeeUMa%FKZa%FK}b7gccaCxm)OOM+&5Wf3Yu)0)2U7<~Y9^5)8(DYDrkp+^VmqkF +((%5EJCKZy}t!wzdXNIyQ>S24gSpmDS$(e6n-;76cS8C%}p*I_;HnX+SJC^C9fXqp)ER*&9t;xYa{#I +tLmT~(M<(q3dpR*fj-K@SYWts8^P`Nf~DYAP(=&3CslWPg4#l3-z$o3za^GnK=lSQkS38j_HL?QnJuE +Y*5X+EGq45n}MpZYNLS1Jcai6S-dh5qosI#Cpj;$QXG_wc2X26mv_l(P3T&7m}q2^ZLHjv$}SDBob%v +Ppcp0e9oiV(#PNJekdASs|=tfx!>p7BhxV4r|8IsC1mi7K-(pWn0MZxvfgF204G+0u*+^R$3Qm>7%J2 +VL!fM@3ev%gq|ph{xE=EF)=Wd;?=B`BO8>IMgW1S>UY+e`9x+up~ZkSanc8x_7!!eF9vQ@ic*yGzB@# +}yJ=7EKFxxxq06VF`F!BlE*&8xDq8PF++NcYqYPVXL$XY&ac0s7QB+{#gvm8i+L2(I=^a>3d`ad#xO> +(VortJso?Jo;Za +WC(EA#!FJ&7W1dQBtUgBG36mXPlLR>Oa_b$aBLBQ17%;E>+P6|vcx$Y2bu#{U)<4=ldV8$ +G%>=Pq!}~f+)y<+KlbiIFq$oaZWrKI*on|cWkIdwgP^&*c)E|ZUqjCJ`Pl=R8cjWEp>ki{D>35e!Vrz +XU$`mS;V;+KP{(Eb|AFI7y&C3X&|yARSHcWZE2IVXH|+m3Mw@sYz0-|J_9)$!xh?nr*@6ClXO7Xyzu! +@;L@w?-rq|49ZFUp(+|SiwSmLui#(rQ0g2d-%?^(A(Y3_8h={(0f+z#H?4pKVE5vKdVcp-3p0OZVRwt +}WrAWyqwf4qHri&{JV04OglYkWl=q}CRH?R#IkQq_P?rN9do^!`m{!I9luG+#_ktjY< ++$tF3A;Z$r^y}MwJaU-p%1nmP1~;b+I9`EEpC_=H|LzM_AOMf>^usic%3$WT7xY|Al|LwUCsMb_0onD +P^RB%&8ofhI}eS9oEVxkJNPhMX*~>9F+NGm`}=v)$FJjR_UL$YSJz2ct(Mc5KC%cmb{f$X7&bK#fGC8 +1lP|e2c^k;#^x?>&3wTI;S}KkyPCAdZu8=DyLN`#45?>{dW|x5EMrmA`@h6Q*th80%yUr(EA##9*=L@=+Ed^M$*Rl*odkm8&XqdKu_=XQ?~tL?H#W@5m;k^Pxltg&0nt7A2&98^f23%5CuqA=mO>f7 +4*8YPSOOtjMIb6)b;#_0un}rv*01Xe8<~di6Se9gSs@QO_c|PTN+QnBt>Ic~wz=7}HX3%oQ*4QAAQxJu +>8mI7PL1_V{X@?grqT1XKBDJ^d&xt-Xg^(v^lD$xbFH_$joDXFO9KQH0000802@m7Rv1kPWzrh}0A^q +S03rYY0B~t=FJEbHbY*gGVQepQWpOWZWpQ71ZfS0FbYX04E^v9RJZp2~Hj>}{E3lkOnVLv+vRjq9^VQ +sKE;G5Ex@?k~B$wOrxM)g*WX2T9B`IrkUjO&&2i^odPB!{rOC-=}H2RGOs(sh?qu3sr&Dgf}a8~hy)_ +faI?$=G*Y^&Ybwr}@>pVrknJB(GWeu_=8xs^FK$cOgOZzMM!PhHjQbfYhu(^7m@)OE40ufC}Uc=cU3R&7(%OYwt*pFbYDTAnfBuIjS9YM=wW&*X=Vq_%?|>Zhh^h7pIt$8Y++ +?PDG5c2ju$c5=+uRg-tU+*Ti!=J#QcQ2={67G*IOy8nL`gM1Ch_tpAysUb|809a;2FCvpp1-bo30&C&t9U=>q1{*gcqnSw+-rFKp@H?^AeB#VEyiD?t$J`Xv4iNLJfQmI6VGcFXUoS`K1EjZ3jUQTGRs$e{&EfngsN!h_hh +J$2bt_p%W;HzhFQ%}qu<-pTRM1E1sj8lEF~1WjWN8YlWMKS-J!P^ldT#-MU7CL +MsM-R17{2C7TQRm-cRH%+)q{LqheUJ4p{f3Ukjx +9)=F;e}t1QbL4R%Ee&ROegW`<`sXSDuo^m2CWi*87_4U-vhQK-Bu`rUBJ8ujJNyCZ!KI4=ZmCp}0Bvj +C-vZLVRR2RWt-^BVc60vQFZ0Mq5Mc$kHSp%(z#;YveW0_!!6W(t`(Ukb5#YXsWKq)P?hRk?qWBR^_(P52=_6so2_28x!sdZ~m6Q5@gp#<;xTb72Fn^?k6{!$h@q(kPBIe{nG^pvp@CwQZbXhOVy06uQC3+s~DmPm%4l^aM +(@aLyMsMOi1lKB(~b*U~hN|U@CQRftKK!j04&~{9!#Z?jZ^uAB-Y0Cb(K-p%m`EYASGfMRVA%QHq$d> +^?PeBZr}YMRYnLL$VdnDjO6s>eq|n0-s-?PN(kr!Pvq#(v9N>Kn2wGx&>rx_ob+1aSvL&0iO|`QSadv +{9eF_$b|}!pOOfh6_uTBn=7(tB6v;!@*|M|jXDA@nTktsCr_`ylXjQ~pdIQmAMPp~t7uN?WF<4;{C;H +KqrvH6kG8r?lb6ZDUrDrb*y0KXinrI_VMdF^HR>-tH~qmGn65MpJ89E!5$Z>0F1~a)y~&(fu~6=c3*N +F{4-An5Dmh0+zk(IDyIv3%DsIBiZ6sAhZpAHxkBtN+1pEeyl*cHQN&OKuDE8eKs8u%gp|q|78m8l|WR +`-TU~++}2MKOgvynbqtG?jggLYOv3fN0f{CAmWZZ{yC`My9dc+f0|5h6ZMyTzsGn6Gd(2>hcW;9gCQI +Z%UP@`5LPD>rvA{433g4}peSTzz(JA?xnheNi9KAU|5BRX$2xf)`ZAc;9&0h<8p}=Vd`L9<37m4+9x7 +nW?9zrKH89kxf42Lxg_EanOo_ls0RP&{Ehhg5wqQ2wl`jc_sXNRtYC^)sNgYB0VaLL3eN#M +E9Vgif?{V1p{gW6IIggq>v7pT8|kNzmxPa}Pe^EvbmW1|?Va2ulm2N1LYr)P0S1c +7=Hln>|xz3>hs9s4`cK%m@GGkLMH$OY6k&$F`HZe@@9AXl_9{e7AmE+oxJJpL&zC@NA0%`+?2|G${R7PL|qLDb@ +f^>At#z5W$)Yt%&Pf@jR;wqT0f~R6d2Hn_r06K*Ju~FsY#ur3fo`EZ?0WoaY=I2MRg(+FNgD*Em)P-e)GFyR>yaoTb$z=AQ8;lN1>vWOuy=seVU5Y>3 +l6a__NVT2Q4Iqa%Zoq#>t_TZ*Y>B(UD~1Wh89XGaS&PMJ`QpgWuUudWb0x=`UlRK&8Co{{;ZAp!DFH3A?lsI{vlk%TfB<{2bR|w!zinMA>uW9B +BxPe-Igo`HJG^;w5bc=%pb;LxRqs?eK +V%!OVVz^LAm3G{PMFT`I9CN}vOy +i=zTLc0w(TGw3gnj%-W=)Mg6%VB +VJTm#T&02o91MdT2pkXrdNBJCA?ygq;11&zI|#g~gMI;Ea&_&H6LDBJGm3c04eE6=7aLs~je +0@$=e=!bk#`b3k!1yPHt3L&iu3E +24f02MP(M`G_LBt!e;=3Ox!xe`NpnMQ_Ra3r73~M`ioq&a|reP(HPSJ*HV`rwFPW2+=%#gRVq%nTWsy +eA_{Jcd6n;2$1m9O+h?Zo2QC%QQ>TSGsB-_L{ttjqWSoVDjaqvke-c&;|7Cq +>XA#?HZ1T(J&wYb*Vg5S&sgdFmdeTPs3CQ+e;g5=X3W|OPMWrm0usz5VO_&N&2QHf4OVL6YiX9i9LxU +^|%jC3BvB|h?=AIXF;eL`6SU3!$~JsC43yJJF^m}XH4&!Cr0od6eMRzU4KQe>{pBY2cb4+p);ZsMAQ% +z@74$oT_j7h;|b_X7X*mji&;I^EC{yec5pJr=?YtsXV#yjJlxY7rkW(5c^hByvQDYf=KThoYrlSgC^8 +YhR6XULo&%>`_d--EZ97A|xq?rJNLwI9+~6TL!_CQD{^M;Att`E8P$2BJK}ln_3plp-V%%PrSx1ng7B +7X3x<~vv2EqU2N{2nCMU76UxXuW3vpA9iW-PIh&%!J$7HoKgKjq5)$N8X6Wu849%3j0Ecsv_3R`!$~_?dqX~<7xae<%b +o!FF%h9*Jtq~FzWgvtQb}92$=5|yvySLNzqFvAJd#ck@mJ)HE>OJWe)2=8S(S*Y$A1R*5kkZ9WJS1W@ ++p5~!P#U>0w1Oh|13<5`pn?eq&(LqD`@;uzcdDum?}p8`vDZ?9^!*1LQ<|5n5otAWtH)NYLn8o>g5*v +<&QS&(4DYBs3pdD`-b=2gHV*aQPB@qRzwt3$v8rTnFK)UZXBqZKlDUuu9-y=`gN4ZVTT&L3vNwDu$e5 +K0~A<8^}$uTg|N4m<#2&jNds73N;Sh1cwRJEsaw|r=hVDWF+b&o(7L>ci@!J*etYP_NC0C}xkIy=1lK +;yoMwpru&rpT`vX5t7pdRc*?!AA%<+!6+ZS^MeXe&EH0SfhkNrWSIfYTqV!HxOcQD2DSuE0`6TGyojrl!Tkr89FZv5-3Bx?M`G8-FNMYGYy+|3^)?U1y`fOUFxGMfjAbN +GO_mDLP6qkDn+^MbKx_GE>F-*#fp<~8!=iqvsNpr+mRc|<|w+BZa<}^4C_ZaIa)EOL%`3}$9MH(xJ|% +Rb&CL1sc}K&3`cS3M6t!q$8%e0$L`o<`VA^cir|hoPtF +$TT@aUbQM+sQ#e#UiQx#`Zfz%v3>$WBcim$$Iws>;AbfZa;tQo}=%4&Nw&6Ma@SGp4#rHHMH;55^ph! +~WKJ%?2ceCuG7c2s084r80s$&h}}rSE_!Da@s~m;Jg$lk7f@d*tspbYm-8VcELFunxry~W&Q +aZVwn;RDu=mMOHDsthcI(3bY<5s;XH%cm;!)amjJk`C`*r#N%hr^VQVbitLb?A}h7k7?xf+%f5(q4-4Mxex-)7mXohKTUP)H=oS>nQg}k{bc&alsFV4Hhx-c@)FNun2_G97B-H7Fk)=6P_-J`){vRX$5w`D(iJ$eneKxs>b<`S$1Y=-EjkkQIpWr4 +={>tWrmUyl-YO^q#?!983DyC1lNSv0}ubJXf6i}PHR%J;hi8);|Gl#qi^rD!Vrqe1C{1^^NekMj! +1t7`Ng}<0>PF_3XkeX6<2#LUN2g#ulgoOcy7(5RYkeo7*Sb@n7Is$P`v*&?C7uSK0*-qdgMoQ1`W7=` +y?jcZYQ4Du!@;wW+NdI=xEdE0Ppfoupcvb@cB#Q-Z7Ql~Rm5zI-nh;xTZi{9Ii~-o)R}i}*&IJ%dry) +UodD%G6pgYzfb8{t88_!9dIU|o97!?Tc@JHd^iUDj)<#KA7groRS7*1k+!KeMg2@x +6+p(6j~$t}Hnow5gz}@P)O$Utx;Ip<|5-ACLHPt$TWe9-+*f_uU{aD~=hu_sscA-(fXS=;HiA~$d76T +-KKP>P{W3cf59YXjhWmL${$v=-x>_fTrHH)+V=?L$j~-MEUCo2X-)VL5{1h>&KGHIRXIJ#400dA~#A2 +r3aiN~O7^G^@n>DV6ZDi2G!rZpY&Efo4Po2w81EuX`OUN@1gb8o<*wMAgeG$=kb5L{AGRV4l9sPBujb_U113W!1Q*>%cV~xg0>;+!%`A+$;lVDyNLKgLr~Wv35W= +I-HVCux5qrpla(yA+RT^xpW5SSsjR1*f=F39#a{^M&k~!kXWu)w%}FKnS#QIAin3?chzH1HRcX9+6+) +7=S#eYfwtlYKl4h!4n1ad!FwygdEa2@rou%(c{a~E(?jNS91yDl^2C-8p3n1vWk+4;{L#a`9$tR$Co$ +VPXKuLzl0N`36TwF&71RQ|`Yr&v2wDH@xBvUsH9>(8#}08uoDeqwrI}?c9c5@^8(CxCnXl)46iu18f7;_zP!8J +A-7${*(>_gxD+APf<x3-s5_2wsA~if;6A9i?s%gjE^7bJjZY86`p{ +8W;fLGPS(Xpijnm7iM$@V5+~ScnmvIYjevt|3I5POX5uZ`$wHVB91o@I+h0J<$4=_AJw4F*xD7)tP>m +F$ROW$?^0;}wwZwQZ25sbGDr(Ew!CLJn(mo)brs~J4sNM9tKA0aR`4izfe!}Ylo+N)k4osK|(?_(t3j +<^^oV2|Q+W&0m1W3a)TP1zy8&Pt5EjPu1YuRX@uEyLGO#Z%bYKR$DmC4L8v#ZatFE*clzUoeY%)UT?; +(iET_uhLy+K6!HOjJh{a;AuMnd}w&eXl;hp6}jG1%I+sVZE&ne!8>?j-W3P$zX)!sURUTdD?Ca(Oilo +yGX>Rlj(nicgPbal+ja}9yQXL;n~}7B8G8Z(*>vmAxy}@-@o}@eDTF+I&LtoET{u@h*&$3;2)cy;^u~ +Y$^aIf2&AG&buJAUmoEJn^f@2RjKg?>F{bebe#ir9!SO|?Je0iv|%Zpw#K +^!7E?9t1egKDklHib$Wblig8GOKQkT{kQ36&cKk2!cE@xw8pZk}QlVjWD4a6H1RgHnd3|P%PA8=+xFSYte*>=Z>yMz0m;J0ad8&mcf^CrZv4`AN;1tXEvXT`UK4hjvhy(An+ +%#oN}Wx`6vK}cC3^*9bG68-N=&!)U+O!%K30N^yI^FJv8}$|_KMD;u1}*J=m@GkbLW?3K3T}d*M~)Xo +1pOPh4`)!KZ9tt$6;E|i1G^lm^ulo8*I*t*9L06iOC;V%c)h=q>m%rq- +K3xzv*qYS;4lvdpr{aw}U25{`|fS{M#wtc@YdIuWb*Wp2-02@TOwLTLj&vp$K&i?f6SGqcma%Lp#Q!@r7?+!9738BR4~L?CACtS?)^H622U@b +J*f(mn?anv@837)>YC5fpYB*d)hqkrkJ$%*YN+q9Owoo}VXAPV9-%q?cd3atWUUj#fo@Kz`4*U-zo-Y +{U0AZA@$e=~>iC2c={9W=Prm4wC|*E9l}de4(Vk`_kJ!OQk#W~jLBFn6pI_S@9W@9%*g!;`CWmpm`j; +fE?=l@V8R+|`Vy3reaLq-Mo@MgDKXry|Dlp3K@WCbDnt1r?B10Bbi|t}ES)c6ebN&w&pgiYjcbtQdGa +L*&ckIEaFw-%IJXNCP*Cv`OZ*Eace};+^l1z@E(g~IS2T)4`1QY-O00;mZO7>O&00002000000000a0001RX>c!JX>N37a&BR4 +FLGsbZ)|mRX>V>XUtei%X>?y-E^v7R08mQ<1QY-O00;mZO7>Re8_TKx0000-0ssIY0001RX>c!JX>N3 +7a&BR4FLGsbZ)|mRX>V>XVqtS-E^v8`P(fxYSp{0jFX-jf6#whkW5tY5l?k1(buWV +zt#A)e*4jO4@XQpOqKtF?P58#?Wtng^Pw|kEXJUh_@UV}Pntv9CbJ_i?8g=-Z#Vk^c|cOoDfd85W=xN +h-uc8m?S`WIPHN8%604%L{s&>PdmRmiZOoMr~%WTO1-Hk&i^Jh^2oH^!9Siok;v+@g@bAg8@%g*EViE +P9|DL0>1pL|R1mlJ-5_+Lu6i#PYq6bLhfhReT%~ePry3e6P2JyZfuq>9mNg6NDGiO%r_ojMWn}@nUHv +dk569ecMGZh+{i5w?ob8b-|N|-!lBq%@QbZKvHFKlIJVPknOUtei%X>?y-E^v7R08mQ<1QY-O00;mZO7>O;&nGf68UO%5UH| +|q0001RX>c!JX>N37a&BR4FLGsbZ)|mRX>V>XY-ML*V|g!fWpi(Ac4cxdaCxmfYmeJTlHc_!IvV&!0y +GS6_x8gLPJ3|T7-wAK7>;xK;2kg&*%~pXNQF(#Sm*VBzj}1DpOh!T3D}K??5?h^cU4z&xm>=GC)rlAE +zjbuR9!O;b=NKy?+#K__EkT<5_Kz1^-0u9oMgZ6`lGBwp~Svxn(k5EE*9`i4BhFbkq@$gnZJ*9FORYv +;H7|Hk%_Kt&hURysFN&3finRLz!Rsw`%RX^2Ik!tebq>%M7I}Z*Y2CT9N>>$wB3*qB=~j1LH3WelA;j +HsvCEWbeCKT`LUEIMqG)$fRPo=-DLu%0u~Rl@9PRs8Fx3{jxF99?a3QZOJz2*+}FJtfB<7#ZNy{Us-; +*x0$LJR0E8>p7c9Q(Wl;?WLBrRJ<#M@LTnQvJ)2?psVV_4_QK&;VHkH`nW{!X&kQg3zM=4HS4ex;o2z +x&cV=prRb3C8Uw?Yj)5b36)4HwP!Wzi_PSk%W;*AJrU?(bm{^QSti#lG*3uxQf&&0&fX`duaW#n=oLK +;tp>Ap4=~nrTFNDC(Bq4d)Z$u3x<1sV{2e;wzX|08cjJ`&z-@KWfZ2;wK4zz8g;s;73qEsIvS4sN40~ +sVF}b_YP2$lX_pvUR@ZRxjZv7-`Z#HLO?{>10q>jlt4&5_?Lr}&A0d@HFOfC3HXX+;2Wui{{htC&GQD +Ce16rc0i;@18yfJUM#k(&e_nUPOOh0N^IwHJeAo9~AN;BKZw;rwiO#UspgKRup{RYO`3Uwo`*3f7H~I`5yo5&BosF)0gxxHTkHQHxs@olytY5oYPgZQCU;BAI!Ax;oldAJ|aIn=A +;I9U^s|GDm(S^RP-{z{5Sk2xfzff{2x3Za{JuXJa9lUwLwVA4>0ASXzD5l0|lE0l^AkR2RTD>vyhTQ0 +!2`X^dw_@vmH>`r8rq^FkqVmmT&s*p`PI$Kyx&?JDz}iySk}|b3PRJYIaH%k2O&4<&aO}HxHl!?8+e@ +`zAlg1{h~ej(vl(^JYdg7>>4OW-`htkRG1SPdV1AG~-+FTfn#fm0(0jM_};2o1YsQB<8+0I4VDpzAYA +r7HHAd*xC%p@;yCU<#}5iWuC7W3s962{f?|&IQ|4?IkxjXh!b1Q_c3qp?iPy$*l3Y69u*7=kzwh@H@A +^?vIM(Zzhzte9`?Nv=Kt<^TzwDZk|F$Cu>*eqNRyKRffqM_C(FcM9^D)M_gh3zfFh%#CZ}~@gW(ozp$ +rb_AB}8*2Y;-=PZor{LHU~f?NU=g-7tWq0R0=m1=8DFQ1RjzZ0x!C>&<|h!$6`&*8rvfAOIbap@TAEeC94UN1tEJraL`^){1s8ElWEQ_5Ko-+Uvm~l-V}LHEPxY5Gw5#z)onm +;9X8)tO9JRtGZL?wnUR#f>@q0I0HyPS6tvHO6p +%_+|vCfQE)u^P&v|izZ41R3K6kuda{cw^5<0gK_GJ(;bK`aij-5D~GN}4JAlJf4-dv5TdrlO{`EDv*6 +X5=5SmfUe=8?-krQMhQ-nWi%g1rHv!(z5^}wcv8h$=XvQ{8zST55v4M^E(p8SF`-dIoZOevU*(-K-6G +v!+A4`iXo4@R_=vyGIkJM7f6yjMg4luDm{t`S9XvD2}rj{GQ1TP-RZfeF|g)|9SM(6B^-Q2{?Pha()r$xwQ@Au%?AVo2}{Pfvu2FETlOc{$|bzxW2PD1Ph{CPV9h*mdI=Ffyq*G2a#$UhCd~0Z7_Z``Bj9 +_A_H5R+mX-dI>l8T07;=#J91VP0N*D?fAeNXg-_-jX#8_7NWaw@btgZIv8+^I^%#bK30Qs$lJhwDfve +{eK>mV5fEfF8vVVn`%`niC8O|f}S>C^3$*S)tH)SVrAEIvpRT%#J@UI`SeaZ +azLUWa8$``(#+-P@(Hf9Hha7ugBAUUsTdCj&pE_)aTp}MkYcz;Z~MqcXz;VbPYT;h^2^yBM94SolVTO +3Je+m?l(dORSp@SC8TR%sjv@`M5pSno!ALF#k0!XNAGX{dN)mh?CpyH(n0tt7`R?|Vw~kMpx{X`!H7E +!G)bU0K&QT2E%E+KN6cY_^H;D+MEz~VkYH6`uyA?*i9b9c@GvFuG&#+hv~8pp$q8``lztHq!BttLfct +mA#?UCh@bFQ{`D)6W0>z(AsGK_1s;RN;Hd#p>ECxnW^j$}(6hNIzzBW1ro1A1$VxbL?RbW50x7uJm63 +A0ASU2K%qzY8f65u&HR|N+0MuLMa2lOWx2a6-*7+tj?hch|06-M0fr0#`FD92>unMJ*1UQWO{i(Xdd;bZ`(OVwPAr1^Ny01Z<{vxIM`BCqT(!cjode%ny*SRLR)}oL;5HW1D_#nS$g +-MF6{PrO>mXHHz);nJqT?ukm-WEVY8!Yj28n5LK1CyBa#&*ecj4#bb^+z7o&NVWbq$)ZM5k4Uw+ekR< +8KGDSYNdYXwJI!Mfb@dpQ%Q(Y~K7DGl*0@@9VNV}UN)2oQ?*)*Iqhb4i6=cwwu&t0eQOv>p;a(>8I6C +^sQ8YhM_=BytA-Onw|d>Q^iU+9Kwl$7$C4qNDS}X*W?e=nTx*z@tqM-#WC(iW4f~3MQ<*2*nxWgXet3 +dA)G76w{heS)C|(M(AgP<^axZpz}^xc{$E?V#5OSBpgR+j}se$%KGsjQxOzdT2AdhG$1j(5w+6rS%?eSm9l +8L{YVdc;2TEwg^rl~TgJjsqWvoXx80*jziU?}t2}WJMp$E4E13fB0rmgERkhAw>^c-8KxfaVE&d?Hvo +Z|t^0HC16GG;+IO5a|z4|U(Q#NA`jf53{8U5wTBR)B=nERBBtjHN^{+sJg-DC|+zRLXmQ2L{47e1Rm@ +Tumu(|0PEfV65IdVtl8IuBpn20b!?;XTzw4%8NS*qQ6$`iCGK5Kl&aoKE!^Red@pRg*n7P) +E%O?}I8BNVKyj94bzzyi)6lGSC?X~r(XW+{WhS$&3HAZvS8cT8R#@=OLP?I7w9sJ_bR5(Jf=5A5q98x{o_w~!XgR>ngR$-P&L9>%990`iWHxqFoqAaT5)jcivkmD%Ll2eXcDf4tn>LmXMEelA +z|j7_^td|9H@HnOFH@`{E>n5XJ+By66-jYVCF{W;<0GsZ~5u-r2SSxG#!X3*u?^1u)>$6OOp`+CpsmE +t)r)2z)UA7EvT&6)nrz{x?}td=oKwc);(RqpDykAD24WbGi+nusg~?HT2s)m<5qZ3fiiawG_*|}jm8BUHOoCIuahq}?aNnodQ8vG?JB83xk)|;be2=; +Uw;0ArJ7NqTu>n1`|6ouIGBg9yMEC+v6SQW`c9A|k$?l@HTHh|hTgLvj^t&Yq8Tb@ZZ<}uWPqTLTTwc +K=C=`j=Ywt*0SNyhMpLf|eKy+l5`1DwqEBA`si9NVoY2K@B$%oG|fjMV!OKsKF;iLbWdZ2QvcTae4l5 +jmCHJi*j0UqpRxe9lXUFDny5;t9;>4F$GaJc?OgHh9jqHXa2wk?JLEp8uDl@pKa1b`wMoYkG*agDSQj +Xe1msj=3Nff_bT%2IJ(w5TO-`jEASFUo;$lz-+(<p92EGj{|4IOi3nHQ9eOx+$NRwgcEqwg3GGf1a34j4PdB%1DoDDX%elWKCYPy +qe%-8rjU8bF;7VR({pn&urHdwdR|d2zneHL5<;CwmMW9fUHMroM-noQ2rqRjxfgar-Bi0~~P91qa09g +lw9oQIbs2Txn+OU=WEsAUB9N^cOzJ0DQ<|3Ski +`c1v|QH2#jXY$BTXMg1b5Cere +d`iu$BYii^5n82=RD0Q!Y{{BiK}Je(hD(T|;Cnd(^FzvbBD??u(2ru+{sVv#3>UYSy7O3Lv<$`h@ij> +S2qNAFt5SV$$sUt0&7Z(Snl=P5J2nV03o=WIvavJ2E~?lT1&f+E}U9kdt0Ap6FH8zMc9&DY~Ji1}e33 +-=G1V3GoufV7hWcrDX?%SCD&@Y<)a+>7{g)fHc6$zXJ+Kn>N|24R%x41EiS;n1_n=Seyw?E|#+7i-n=p$P48{mL`2#l{dn2ur<3+b1`S)T;KbUN?SC+@PB{~ +3)xfh2`JTYpINBdw=sB@_y7@%VDkI7KCR%ghN&v0)Qjjm|~;A@)Ax-?lu6N96h;n4{%? +v>3=x$?@;L3LMpaR5|8t{lMuqp25+1bz7Gm-ohPveO2Ss%ZIQq&Qq5^^Wg7V`hO~Ni|ay_b$u=HQK`c +YNdvH-rZ2$R_i;t!3PTayi~kqa6ApQ3ZxGuLkiTGH-`)^>3rof7vc!G_D<>c&9AcnZH_dj!w$$-KeWF +Uk1UAO#R(zUZe*QxTPcpsnrIcrnwnZRy++1Tj)Z}~sInGLRZ0E5?2&kf0z)nC6g>r +W(m-?eJ&b+iehTChlCVRa|jOlsFztBoE-fgim}#;j9~@Cd$44LV|`c>KnR$h`)DponT?XH$OCFtX^Zn2zW?+BstQY=)B6bB)m;@H84>Qc9c)<>ykqw+~alI`%*TjNC1POA4BCQS(H_LO +VrXcxS0^?=POSK+fWhei9d!N*yGjt-q6zBNWPw`Mnf1h-+>j;h@5aRl#m;;cuk?KX5c!~e+`ue_x$RMB230x3(D;Z=dI*>DbXEeBhAy?gq*^ib5vrveD3 +!WFB-?>@FsFbK+iO3N~G`20!xz4;J3H_Pp7<;safki|3L +gvTk#`KQ#nB-(jj+4hT?RLu169OU#9B+J7ke8VgvurxKePFWNX+)kERkmSpJUsC<*8|B;(1VchE-^m> +Ia5OykwndZO>FAA6{TCyDJ9?eh!O1+Vjm&ODnYLkfypJ7u1Eipy2Y`BxP(8z9wp#K21Ca7_m;NasQl7 +psVQ6D?&b?~ICYAF677bV9rkd$L<-jeS>FM&Fs%(dI}rmswS5tx_UP>paqgE|h6@%B)C{o}juoIm}~* +`OoP`7{N=#!+Jl5aYNKxQ2)H=JIRpRw3-AZOUAo8=5Ax(udHjHMAB_KvH4REAtQoBy@%N)OS0O-!p!+ +LX?dx@H})pQA3{orp|OlJaadzTRbLlrpqb1QHih6&^~{D`@J*i^x>X9IUo!3*@&q|hsaJR5I2xygthA +{vAX)#e~Z${Csho8e^N#8=|ZlL9Vmo4C$qYAeeJ8np;t83ggfiEkS;eQ4rt9UI-m9Tt}Pf^1IO*TH$ps<@+a;^eE +B@dxv?;JSHW|n`pUBPlwJvxTW3=>d%OrpbyeGH9xp|@2}w#9m&g2b#CH;sM>;Kf)CU!OxsGoA>#Wqp? +ADpv?a2JCj6+@9YVroH6c%?7&We&UJYj(m=5t+MGsOzO69!%aTR@zz3HUu6(J@@m(G$EhA;nBL&+I|O +6pH9`Ne9h0^nni>=7{9YRo5r^q1RmD@X9$fy!;J=)b~{!Fmqp8;rkvq(f=q-_(caRC13nBkGtCopXf+ +ldiKYsujXzq+~;*OGn;_xfnU7VW*H?(i*V9PjXMg7U4>3u9`WufCCWVxH^1F)!8cjjiJ2h>Mn(mr!3(_@OGreuli2 +hUdPPcXMtWH?Lo66eXz&PhN<$! +weH%#4h}fc(qs6}HwZ8teEMYE7{%{&FP!R?TsY@qL~~AnI`Q{OC~=MR3-hyysJl6@J!(dho~m8=3pFf +QZxbm{)R~VzN2DCS&3$Yl=1m+;u-3&R@i*UU=>C?kY~Ln9^B8v7Auv*8f$4@Yb)Hq>=8liWQ-pNmKc|?1ru*Y+%25J;_J0@}fuL +EA7`h_o2MZ=PxsB(XLb91bU_04=8;tz4_5<|qlTwv%Exy4=@nPpdUb!nAAJ7N52gI;tM2UOb>sOUg$@ ++~&E!?4=zxX$T2#i=I1L{|3)6SZ$4gVhx*uX&x8^cw$C1GpFN)PY1}7zj$`8FTvFYg7i%-p-!m>2^Jc +zR*9?ce+G*F7fQDz!DO`+0{VX{>*o_tq5W;Kp%@}JKbk@KNfw_4s!1Fh|6H6)ehU)2v^$EMt_9tQBoP +bJQOYDX$5~HgI0FHO#NrZ<4j-Ut9UcYdOC=a!yt|3cp%_9 +Sb*YQ4XL64>nVt{naBo3%nYd?3u!b~+b^tp*A9tYzuQK}O5^BEpKiRjcjI&;8X`l?D|mb(nz5{j!oT= +G2G?{LjUi{@|<-JV<%MJuxRiR_Gyp;Z__PD>^)IcbYRt1tc!P)h>@6aWAK2 +ml*O_ErD@0006200000001ul003}la4%nJZggdGZeeUMa%FRGY;|;LZ*DJgWpi(Ac4cg7VlQ7`X>MtB +Utcb8c>@4YO9KQH0000802@m7R>J@y-$(@j0Av&Z04o3h0B~t=FJEbHbY*gGVQepQWpi(Ab#!TOZZC3 +Wb8l>RWo&6;FJfVHWiD`e)mZIr+cp&aucshXV2}Y#4`3iefug|z1nH2b+YiH#8R;r!Ba;eAC3B0u`z} +93QMThHn}cCNg4mSq*ExsxQfj2w5WVdfzawnZ38~4s6yJb<2Az`&4U*orkdsfHW`ff;CpUd(7QS%&o+ +OR2?^suqkHBjoi;hs)LF(45S)7e;!uQ`7z60vo;uA3Av#y`v&zD-C*E~a=}?;mr9OH$2>h^b +j)+`bqRO0+WzhqbK*b5RWObvW1YoD)X=J;W&A!^LML225ygM$nA_1YLq +*gRuwt1s46ts#K$tNoTFA1JR`O*LE!ao>03%+awJ=vk!Uj&wP)w%CunT#n8>4(QFYx;PBeGD(se8qyd +-w1qzFr1%Pzo)e{IPT)^1shW=pTHx|>Mt=UqT%RWde^^7f(8H~E$^1yrz2wCEv&(_#(%p>dDZ~QmY)= +9i%uHGY2;<)MEQ#Ev-+wH=|5&Dl%Y8^YCuyD*Uj^gYZm`)5tF2n96!{1}vU|h>TGM$yyS7|O}tsQ_NMb~rPgyfO^VM$m{qV)d&!`%>iB>JxKoQK1Ia+FA4U8vKh9lnAR>Vb*W|U#*X@B}CrI +1~dh=LOOBrK@_ZRl*_+Ll0HQ|x3tf2!uMDmK^vxA#BPfV +{%KSA-q+*D~(7+!&muUIB8QBIPR8CRawG8*Csa5(TD=Oh`t>FD|kP)h>@6aWAK2ml*O_Evl4EVRU74E^vA6Ty1aLM +iTz6U$LbTSOQpHlcKnT?o>3FIv3+|i7#=kC=7$4#FfRG;<8-Ij%wt;-^}bgNy$#)v_+nwf4s{qf`|ee&$+$v=-@{3B+Mt8L7lHsTjvz2_8lk|#LxoDC)TU=(iezIwcQkvynh-RIY#VFy(jS~7a$d@$EUHU0?K%7&Gi;G5|9MkfWhJb+X +Y%!m^U}=z0Y3~4Zurj$ZlP^9i&iupu@p)UDU3>Sa?10frE?$d2KM+4G2lWtPR}vxXi2}qEj4_-V@sqf +Ne&|xZMtd7bj8b>J8r!m!^h=rI_p9%%*@ItUaGYS1}}xyon#JYWkx>9s=R=M1nsoHYE_Gw$A8S0NV8R +HJ1Ul;Elo*siu^J|nQzSsPu0iK2u+kXY-VgVx!~<{Y8$6SHccm^5h{}91O}7goe}xyaoO-J-)iad+>V +RX$wf!{AbTCCXMW@PXmr3HUx^}TpeM_+ShVOSu8VfX+7)Lf+mmCaY>~6*WZSMp6(yrrNB@3v{Oag+dh ++AxvzIT@?O+NaYnyh4_1uDN(jo(JF;Ze?)ER!joO1dySj_q$#cQ6z7<+s)~o +duFQS0@lf(J8RsY^t_cb7p*qSh8Sr0^?|WxM;d4Gby=*%_vL3pPfRF?U0_>T~{D@)jUyFwq%7Qv#A!| +ZsQ#+O{SJ)!`n?$F*{`BHNFrP*CJhKmpmP^OXd5gY628{R}ftKS1A({gG9V|~b-^`tR(Ba1GVPPYG2YZ$NY}MjxKMvfh{YC{pg9Yr>el(Ac`tMNFk +VC^IYWE*eUE#K07p>iNO6bPZtl|FOxw?O#rl!NHngf_rN`SvjdjRuORQ6+iuRo8Br#o>O-5|G#1^R?* +-m~x6O)wGl`F*t8qwkecPw^_H8nviKi>TAq}qqfxk(T2;`A>cM^0H2fRo^!;;nZkiOM4~agx+) +q`srUfcVHM}OWUYWHY{}cj3fRK-!hpwwAv{HfF<3`Z!~8PhQ`Q)Ug5$&BgRKSd2S^rIE;)d8(Omu&J{ +8KdC~5L{wLk=$)ezp+O+nbJQn3pnVc-qt18q@)8ELsp>#WJv`a(nw=?bp#YRk=(9fIv`XwRmu3kVUxA +c$$T1`sDf(Gt1m-x8lODK^c5ryc%?ic^C~!-?nSgf~o0XXSmk{Hb&?YR_ +`a>~@|8yMfy3_Pr!gx3vp!j+mj&nS7hjD0_8K#?2ai$Cl_Qy#j2+z1^GPg$fR;mGx&SD(6wE({*efq9 +H)+;Hvpy&2t@J)A?AAby+%n@!R#!gM{}R`DWqpE~1AkUul~>-3vT2|ZodgL{S^$|Hn5CR&}DjfVYrh8lc=kL(PL+r7&jC=a(q+yK(E%&@9n4aG6A`T5dVLlEI%TW())=}4 +)7@dj3dxR8U-(Q**{q8(?!I&Tj`+3;eD%g-tdOQjSd+A(eUN6icOlLLWzm#Jl{5FFJpYfiJ>1w*!W$D(z#QkQ?z(5b_Ub2MCGOaTJVO +9>*4wo8sQMll>4?6_y98(`pxQ%r$O>dVOC@KiMJqDah*BH;KJTQ)40^?1QBbZ`xGbh`Q{59dOUtFf9E +Rfm)!eXoa$ncFla7BUgKS5#y`aC*lJH~EE*AZEI8BRL=y_SPu7T2PgUe^~2qIV3YafKG+MxnO{tjn8nOGQbw2pS;@(XlB8jw;{;JRw*vs>7D$o4N!Gz>u!(@cV9&80pMd^qPaR&&8Dr9Muv@G_ZvZ0^Q@pP!oD!g +aUi3enrchH0L!)7^W0lYR`u;HKt{#^fM&{rP5Qn+)Fc#4bSXwZjS +b^@Vn3tk!*p>cge>AA)+rjkpt=8G{}W;2OBEpaQ|MoOkN)xdnxW4F7`D9^|*cqlj~&|i> +{2})Mu+tV`|;KB`U^0fC8b?(b={%*s|AbbqsMoI^yz`GwcVhvbjwNtNu?2(CS7r_PRQ}V92P?c4+Im! +8ab1{zINc4F$G1ut{CGrO{ihY7f7<8Dkq4I7xC`bW7|DJkd8I)KYbzM5~ias5gVR&Ua5dtdCDbps}un +W;)C;>Hird~;m-C!IU%G8cFF+}g7%9y&7{xnnnq{bo8T^B#-W%WJ{(&$ao+qLs%A)y!|1hd~@mk=xr#|q}h66nwi +A~*9h8GW!hbXCo2)2*XWUAiLnZ2@1bC{##_8PqwDpW$j}|3qj}Wue+5+@6aXU`MU8|L|$4TwGVB$Z~o +b)3I+wBt#fNoyPneu;=_DgV=;-Gj#|A>2kpp3#$xZh$5K$Bu!#BBogw1s=qGaCPHkMHH%eo#i@tN6hL +-~cWa-{;I@Vy5e=(g;B@idlrpKld!7v+{5}Ph8yGj9B>fu@e<;Zn2C^MiWW&{@Ss7c$zR26#u2VHN%fwnrPo@Lo3~9%QI_JGzWb4FL8pnz!vsu&4lQGws`z=snYur8^&Sm55bCLViM7F(Xa +4gtqexh1for@)d_+_wA^`O=I=8%7QEeHZj(VlQ#-ju?cooR@u;HlKq&UIDfkPSUfLS)1puo{uisjax& +~;X+(IHx^vX`x%s}AkWg_fbX}j9JV4TpMHXu`1lAapfwq3Uwguo??Hr`23FOS=o8RRV~bY? +9*#{Te~0lPAmBb`CFiMrCSD$(pe*Q-rlDfxaz(hIv!>+JkNWG2zny9 +hu2+b!+TzJ(K0=%6SQ(>%v>}r&&Q$ec5GrW!mu#wE+Gan;!nD#(8bUVqnCSpX-Nz8UR!?F=*8u~90w- +C@UvZycfcIVQH58`0UIO4sqo4AqqzZaDnQ7^0`nTxMfWkp{-DNv_19fFA0B{pvTpdYhw4dGg;Gec09) +7Y%c2%%<5;&cz(vu)?*oBO6ILrOz+s>p+yX??9gZA0LJtLAfnUi}dk=dCkFQ8ioT%ZOnL +hSRsHpZ3HeYa9@DC-6$WsAgj9Qfqhf8&C3jM&~I%_I8>wZZsdLY>Y>FR(6kvimk_&^@+dI?C355ET7{ +f-oWi1gqKIkQU_mv^KS*xsI1vj_hZn1i=czbvcl%^Py>nSlee1{G0g +?*=xZ!fv#J*h^irti;&To9gg+Kddm}${_?X_ysKg4c~zY#e*Zzac$;hx`*->=5G(EKRs~gl8S;bbZB> +}sVJ9SZ-YNP;nm;h c@H~3hRBC-GkQbZKvHFLGsbZ)|pDY-wUIW?^G=Z*qAqaCz-LYj5L5lHdI+dO66MN@Py5zyWt +U==QRcnav>C$sqA=E^rKnl3EsHid5K?JzAgt`&CuHACxq=i~F<$l2~MSb#-^WtGZKfcU?cQW!Db;!_d +@=i(0-}@7v|jb&b$3n(pqdZtpJEeYa)9VF!OR^(nt#@Ld1&{&o54>({TodHe29Q+oAPy&U9UZ~E?Uy# +0ZzpKk}=SBr)_zlN@tzrL-7YW3%8hr`Gp@2V|dUGI0g(|5a}?%Jx6zkcAq?RmT8`j>*>w7)*=_+KkE# +GZ~FhB-vEZkdhUn1 +F`eFZ{T4?38(-lXOab|}YwIPwDqDCJL+ZKcng>GthP?RqPQs%a|x^1AQ!_Wzj=)v6k*w=EE*fZn +jk^2=XZvELzRzIuNcHeLHySXt;CeI-nBdlPup?TUX;E6vL_{_Hiz@8#P!^fJ|Xt8mjD{#Xg_PH@WZ_) +xC+y4p8G$=ipz@7jP$EU<3H8&NcMd#~Yr3x6MJ`iS$=ZE2FJ#@!CN33sB{^?XP><{`F&)f&yMyIh)J;7c%AvY5^T)k4G&T-kQ;7{XoxY}D2Ihy#5sfeXLy +I(MZ-Zmk404R@zzWLw-g9d6}n7;=XuG$QC%*{d +x-mItQmsCPCU;Wt6N{IKfl>D~Mw$(i^9T7b{BK>SHQYj_r{FyPSZmg=mwM}9AB`txd2WY$i9Y7Pi_Qz`h|71(4RAgD~y*4auvbC~j|oa^7{Vk^~cW^`G=}=50Wswf +|i0fd5r5JABrE2Hl;tgGSD;pq-qIl{e6ML~`Z93?~7kaF3-LMl%y&h985z-u6Lr?{~N`{%+z5`EP>u> +FtAV=?@n{_r<<$R^_^?n>{p|NvrcFvJY`rpPqxhWP_Nc)s!qDcq${=n=1u01|d>VNhpS_V=z`mjB3X; +Vx$5`;F-4gIHYf!EFAInb{Z;D(@za)*>BC-Ziu^#)fdi~d~M+2P!|yY)iW2foZ78-W00#B*rzx@V^#IJ0B!5o*)-~9UupJW@Dz +z&NM!73Ui=pa~JS3B&q9^k?pG=Mpy%l#TV6rD~)Mr);nnT6RXbMY^EW-xA?m#xcI&O>1u5nd^i5t3tI +SeszmY5`E$YFtYW@tRl0;845PP2g1tdEm}*|Ha)dt7@azf?+HSD@tkO6p8E(t~x0bfa +C1>C81BuUZzya2TT#TD#j*}<=%w;5W6#d%|!G>6Hq}7n`#w;fMHjBM)XXdGGvfl^3MdINp8_BU568ji +!5cyQ|Q;OgwA?D89C^f*S`p6OV)J$p0*3$l*lNdTG+Ri}|hDHoR$)Z*`!PkCsPw1uj&ZoBHM1dk=j3} +%?{6`W3K2j+eW7ChvInb2p<}?jm>syDq;zqZ2Jzucv@4osD%n%HQuu5(|Gh1i5Xv*L(H0D@6;N}cqj8 +aGmUOUjVp~KcqbqA*6u-p`${+D5>Hf@o4+x&w4$PGroe1Hv!V=GY>9K^E146s^tT#9mnS_Htp>e@^A) +B)l>vKRWS)g5?UMT)pk&3Sn19W3+=ePs2_$h>J!S$&bs5wW{VdgFj@a7<*K$bQU|(C}$I!i^Bo>^<7T +W&80~NXm2k*LV{}#q$R@x*9%{=&cfBsqihU1WPO~J~BK|ptCc+G#NMY!HV(QPsGj +3+Znq#@TTb=@h1>0A8xbp2_9eRBP&DFWd4+GtNwn!%VzAl-}C2l7YLsecNt>g$u3F4$0uQ+SR>&mVIH +x5ll+qOu|AepQb5eqJx}eQJmNq92Y@B)o%pywk|x>hGgK(f>;!f3$IgKO64)NazO(SZh8H8=R4#TC&>MRVMMGWOKV#= +ufOhge7$d)13`P=cfa!Gz$kTxwh>!)eUER*SlZTtj4%|kH25B-&n-BOFUHVtv2H%MQo9m@_k*>3;QN$ +iHgCa*tk6xSWGCC=L>GY4>-X$mKmRxDEu)N+u9}}u&Rx)ji~*c5snLU{7a&)hSP;f+5PzIzV$;=3YLz +Gazsj!X|MDEYG(Dn(+_MSH!l#!UDMxLBM&o~5iI$Wb +4d^`7XicJB@KXdt?__G#9^R#_830CpiV;P{wiru%}W9w)yO6?cU_SHkQzV#|MP$ne4Lgjb%0;U6Bv^* +S%gTmOrY0i?OttWD;Pr+)viIdT{p$ZlI#*)4Y%Hn^@1qb1l2zBabC$&|1#Pl`9HaCC8jifN0Zvx9A-X +~BuEk@NFBpZpi4Cq%o%%7tHkxwQU!QPVW|M%a|*iTh^4~(Wsno*GpWiM7>_cmMz;-<6$cmWhAlk}jgc +R-y$BAPw(hq_{y^Z6wMSquN2Urqp0HIN2hgaSZx4F?!1KM!Ls4gl-mA?H&T=H}G*IsV9TAqpL*S6@hL +;($nYxKP1D5ZQ+bhEqKPRhK}?|QgtTd?S2hVs2{0mQyP`oyscsad?_#ZV%jAHILOn1fT$WQ1^ZW7QLXUV}tbk677q{{*I!pkNC#N)l?~>MGzJ(=F +Z&xby2>v?XB|VRQJTPw$oF?5q+Of3ZPR_E45W4p{tO$of^8)D3)2;8Lq$mVEfM8($gpyaw1*(ck{b52 +Zyn*DY?~iYL~5Hy|PqLs#_p)327}N$HE$7PzCJ6WdnO+iKoLd3TTu`+R8Ahfuv_sxZpZ6v}M2ut?%l# +YDgub$Bt}FI(n2Fbt2DBhID1XjmkwhS!;q{CTWRq)E)E_wfadbZKbfC_|Xw3@OTkA;cJuO2|%RN2jA_=T +?Xm8_)H07Dz6-^$Rh5!>z>Sl&ojA~I&}72o+GOBHLUO5sGD9opaAdJL!3pNqkb-K)Symok+?q93CHLj +fOCS7NF~wUR-M2%HSb9s@_KPyi`_TSZY$FE0Of?FFmsY?%&3DW(P;x>}_Kt^ +P4lpwSG&V5#CHyw1QDKRMa+S1vrw%-c4$HskzDU7&sfFb!`jVwp-B7$dn>&no!qEYp{VP#N~tky;;01>57n&#^(;5Ao= +eQcO_7cqDu1j2E(Tf9SR*MDI~>Ur<_~}br5oir0YuMB>WFC=m^M4JBpv6#k}*iOtyY`?*)H!99WXDkc +aP*vY$v^#e1d51Jz`Yjp!<#m%^2dIkP2J+0d8drkmzy!A&@rDff@iiimPO(&$E(hD0gNO|9edxmrDNo}IM(iA1}BO)?|pL~1MgNZ8wHl?r|1b0WMF?$4+o)-7BsvqF_Nl>RQE{KzBV>2Q6nM17c?CFW-Ob6}YH+VOc4LmA3&rr}Uu03kg8S${JqwWedGpT}L3~?}OaKiDnGI{j$uXEf+Hvthbep +vYCcFLZf@t>e#X=;j=c)Q8zILJ^2^;e@2sK%EMn+n-Bx{!}N!NQy*bNU-u4-^Z|HK~nt#33~+=@D3k*|}4JmRcS*jINr9PMa +p$ICV{@5JQ(|EFA5Wodn68t%^f9=b(-jET1sO@xGxmccT9gT+Sejd;h-wqP{q1fznA;7m5{9S$M*Ks2Q-xhUA1eUF5-r6YqLwyYYM4hSQL1_o&T6$cryty{ +1b>!r*FVAyH3=jf#%qF8!wqG)WWw_`O7hC|#7J(4=>2ysRbRAwhZV)#%YQ|ycCd&x#BkE~ye2kYnFg; +cIa4#*wLZp)zq$J)K3Agx|}st%7>Cr+q-*>e?r#~s9*^|+96|CaNWkcTKtRox%Vk;p$7NXH7cDF$iqE +ZjO(&nuP}Lj<)UHr9q7v6;PhYGifBF;glU=}m>VEN;h>CQY}f8ip6B6teDEC&b)=(3EvRp_knHm;ozb +a-l}g62yZ73lbF#6aqgr?CeIcU)TozkicWeI7^Pg89WYTN(nWV#qs4ZNt&lCIZm#K!KVOey@hdQd?AT +t5$<16w}iw$3I|z7;g{ySDFFwP;ya`uy8&_@KT;3{rLEpI3IfRLphqeWYPh?>IAF^kB_7_k0CJhwR3J +YyEIDU?1oY%@ME;Rv`eWC-QE|XV-Q(s)`h+eF<>;-kZa;X7Id%_RE(<;NTI2jmHS@sx0a+<p^Bt8VtC^ZrnN(2dokuHZHjWv?s!`uO3Tv7f7pKp~k|seD;9_A3y8AI2y&qs_Q +U3Kc8|L1Kk2cBS^_ZLCAkU3v9C+khNy!rgrb%qM3==pqNQuWa6$-V?t)ht+EtkdfCaDsR)g+hp@7RBjjd&`Ih3FR#R5WJfO%2eANpj>tXOfl_Ju~>CO +9?jr$BI_jBv5BFnV`#T4X|!t8mHh0Jy4TDqJkKzKF2!?jdg6-5p)!{eoP^d*FeBX*V-Q@7H3E|Y{ZW( +EF+#>q}Cr|0^|u^FfJI(vd&uykZLKRINM>t;P@Kv*P?@u0WqBr!2yk+z7$Jyo}si$mg4HV8O}6ao0mFP_sWQ5P1kXp}`F7N$23M-a +OtGU{jaZCi<`$PvL?WY>{Rhj6U%R@^$LByvfecgQ*;r=n=)^#WpjjbL31TwUh;*8cPN<47>kb$P6Urp +1>E*yVO#+-Wefn>+sh+0po1BzqgKK7>b3$H_Z1^x_ACv$5`t7^x8T+Z*Q$-J2$J-sn9K0eAEe#t?@l& +B~yqlLYM-`*Pa#`X7E3c=zHnd~ygYA*-{So6!P{!-qg;~<0|v^7QN+Q{5YQCNgbKqR3EO- +ZYKoxSU1A{epqj-1Mq)5w7&Z?eE0bZye4Rb!>u42_UDjdkNmTq|HRHw<{{PF1%IG&I#Bn}`?2Dsmg2Y +Z6hdic_jWIYFTkq+oSZ-VmjI_Q=U^RqC4kckz=msTYy~p&b=)Qz00RTsS-Ci&Kd;icD{h>9zO9)BSq3 +UJHze3)(xdGh-z_9gmO2fLsplKsl1Qr-=q6%#R4_`BAul<|Gd*S +D)elN=qf;ga3H-84L3jPj~by5_VtEl +q7TPvH88Oe1y&}uEXJ%Z{$0t)SDu5Qc*PsP&x8F={x-3KmARU-}dDX__>Oa~j}$ybd2y6&6Cjx!E5lv1RlGTiB{wCtV +98DS66S%oxFSet%agL(@MZ_f*T%^>UU&9kC`m8l*-D)*$Cxw;Qurgj&xxzXsg-xuiQ3?rr+CBK1G3rd +>I)e0GnSD!$>1;f9$x#$+gDtGk+a|>s3*GhJGvx%1aX?L==3282f2gFy)l&QKB2}XePHTdAWFmodwTPS+{`)hW94%v-P2!>4zJX9-p-fR*Az0*yritCd +>skj~ElFjgvpB(DxafE{F_q-TOBKfgF#_cju;87)BeyNl_%eQkKNDEz7Se=00jOsRG(DeS40z}Lrmgz +9&BB-0m<{|OGg1feD3|105B&jrw^^e77X=MSoy&SO_z@mf1YSya+sNWYbP0E5qy}@s+Fj# +^AXBX|@;!Ej>M34{_C)=)B%^lMbW3F>;ufE3Tm_zA{jg;FrBSJy5ggt=z3xlxPW*By2_VOkE-)eS;T~ +*ZGOWwZ3idiwd9J*b-6ff;GuI&G{=Vbut#fn#$(YZp0?#k(+XD-}{|1VHW0|XQR000O88%p+8Db-rPy +aoUONfrPAHvj+taA|NaUukZ1WpZv|Y%g+Ub8l>QbZKvHFLGsbZ)|pDY-wUIW^Z+FWM5-pZe(d>VRU74 +E^v9}SWR!+Mi9O8S1go6q(C7Mm!%75uag|ztkY)f +YMOk~2u{`+l*1t-U>4psX_*m+;g+b1h%I+)mf&YNHah`hScSMtc +Out!bfpmGbH~ZkW-cx&R-39hw7I#stSd!PP*}+-PPSC$+Qe0w-KIBeXKQ?OPx2*7_yHJE=%aURj57JDtphk^tX}If_-h3dMKYD&1A|));LT;_2%* +XFsFTo(jGC6J~Oy1;a`_*`35u&-&Z@lX +@g1}Iga(R_#KT8Cs%9{1q4UZ2mtWP2HKOhX(X>9t7%C7>-Ow8_DL%m|IO{=F$Xn|RV*aJ7!AW{Id-a? +@G33HEN=a$+*!TSDag@BBgqb+ugX>+huam1%HySyOkI4N}G0n_C;LqqgxLg!dV3J4q_DEbQr~+Y8KJ{9b%*7E1ivA-A8M53q(Y4UY;F~{efb_hxJ{rfIJI?!yOYUxUbz{<; +V%$T$x8n?SZPKer75beGF^6O!jdh`k4F`iqf$H__ftA~RMipq{R#w%}#SUZkfeINxJm6sL^)ayjyVuu +<|MOKB0m4DX{f}#)!!>Z(fOfAUTJ9ZE>=3o7VZE+4CFRHA7Q~^@SdRv7zl +PNEuUN~G?(I_&NvO6DxP#%o%=))(>>;B9=Un4XoeCDl@P?(;B|9<)hK8K$Gj@^Pq|H1m<;;+5dYAXd0 +N;vhhdAOosS+1lypbt3vNSC#Ch?_pz#<&4!+J?VsU}(;^EZ?)FDn!S|i4{Kg3Rt6zPHD*_T8d$3!pS3 +42fZAX-8g9}FQ+LyzQgKax@8r)9CnAJc=uaMm%;>#)MvcH9PKR7P(lC}X&@=@G#Ui?(!rOaK)%6C7j% +VT1|RbIEl9Hc2)qHd>f7yXFOPsIcwJZP9l%&qAGTn_4*uju1|6xCXJ!hJ7&2GP1Tw0a(kctJogTK?Vc +0>!wT_sxou6U!r-JxP}D;p!U(POrz-($DO}$b<^tBQ;)SuyEh@74JXz9)qSLU6M?@BwA9%y!& +=TE@@iIXnA|AzbyO52R-V6b+4CeIz%#%UtvogDrz@5MTyL9ASh(JQ8HVM9Eg+7aMWV=c~7Pf(8&Y!A7 +lZu9&A|f1Q4Cww(tr2OMsME? +Yez7Xll_OrbN5ewF>`utSEHy@I+gHMv(MWF)f;26x0m=mjUoBh+)=Ku}P&R(48CD?HAJNB-iCrhXvdh +=43VkB~Lyf9*++z0UIIBdx;;juitdXSs(LnH^-bDZl%2@Qqc7YWE`V!f1BAeshQt~_gOk#Z#N)*PHn@ +8)694B(7n4k4$PlVvsugb4`X_iE{r3?Y@sZ_BE){z{ihczQzuypho+nK-pY@3@?tg!6I0qSPW*i|FMW +z~ytj{mx5?+F8O#sOSz7MGjnThQO9KQH0000802@m7R<*OmdWZ}F0AMBn051Rl0B~t=FJEbHbY*gGVQ +epQWpi(Ab#!TOZZC3Wb8l>RWo&6;FK}{ic4=f~axQRromyLS +z(RAc_sCAv?PpT@6&8%{!d{U)eWwkX=x=_}&++D6*W97QJ2`|F+Y$cs)VP4Cs(1mQ&OuUA`fY((s6Ys +!`wrZ!UCR|##fF1gGSO5O$`rXz0{O0G|?|yunU%$C}dwc!u_0_v@WdqvA9$NTs{m;=zZ~7{mvyi_@v> +U57%96T1c{e=9yNPVq)y7yD7wR1`qKG$nB^GDy+$G1esLFefD{uAw4e5JvWR5epX98gNBUL&z|13PC( +JS%RkYT`T^Pq%PC9~%qbS+k@c~;85Xw^95U%@;x0m^g$G@X(8%trpix|Gf-myKQl>^8wAv=dSkfLk}D +;gaFK-pnTO(;CC0$q)ukSW^;7TyKmh@D?^=@XMQP{41OEp88k=e<1%3(a5l}63@myif7$CAZ&?vqsoJ +jK|^Akh%jcE#+`^`lGnh}HJFhurB)c_X5>%e0WS#10&%N@C?#bAumY-&n~CC+nDgXjVzqCC!XpUE65{P-4dZExN3VB0K!iGC=2FIDb^h5?9wSvUWT68M@)%TB?LLX23@DaR%$)un?<5>TZ@_Kd7=G>q^o`t!#5>y|UF$ +z|1nb8d0N>tp#G-<0Tt^(u!Uc0VAPu%0B6}a^5b+MSD-=8s{zwOEw71)>gqh%1*PJ;9xX@fj9?XfrCJKf^7(ejxX&OoQwfLxecPB{1iisuE_z#9!OV1EWIBH>iZ{_R5iSF^g`RiOZtUl +-=N-9mBF9eqmBsI>UsBj1&mPz2^#_=lm#v%`U%&bb^M#;I-tdX2%e@P@yluQtA^zRP%>lt&%}8@_Fj5=JloTu7AkkQrC2ojhQ{ +^jEp+^PPA}H@q=UXZNv?Fh&1qT +fwBYu2T@ppoI&~pPEELZV6DG^s-eJ*-GkBjm+#(fh0Cz$RiqPZJ!M&kuxHLQ(-sp)-@V$cp^k +Ze7D`KE|=-FaZ8rj^lNZoLng(xooH7Vt8RmeF{s+q1f)94r>?;IxV4{0Zt8=H51+C0?yGcygF1iTz2p +>y~$a`DIobt!c>U7d5%lTC>yhUBwnxd;7BWHIsRZde@vxAXU1yp=k49PLisLtl6|J~<(dQ!FclIE^2V +&;^VQ7TVGN?Cr$s6OeJ#H6>_kiapAqM9oA&MF+(BVT-w`X9OM}3YrC@xgF=rlR2TeE)A8nFz*dYR8LPEZC#TQd +4gmLp)F3(p%xdIZS9T+(22sdQ0#dWtBl~uknBZM2~bX<3RX>P*Cc;&^iTy-sq8%|5Q;JAl$U@?Iri`Ltr99Qau`DUoN=>5@&FPC3;X8hYZD|%LCSwF~C*Gu!fNLoQsuhs` +K!Qfdt=ytW=Zskgsx;adEruMmwa#s%<58YPrTxH +S}$2<>ueW-#R%c?IJoWpN`E+*Hk_vS&mT=%wS-(nn!3XdDD^EYnoUd5b%xEz4=@B3MkD)+=@RA4&Ra{ +p>F^_?p<;K(`W)Y(J2j*B?>(=UYmS6xrO48*1{$aCTnucNnjz3Z*h+pLA@y(URIkes?ph +%_NWS$E7pbj0E^4>UaDVR{}H!S)b$p!bpOu;E)Iiyii;hDGeae9iL7BpuPBajk($X#0&qS~x+W_3#UwN^yuj`nrc+(m(R|G2^pS*ll2Wp-j0r*$dGydntzRohP$p`T^m0%yhA +3J2$5#(`bKEqt87^UEEpe-!SL#kkyTI@pa59yi8H0-qMGF967C>`X>WP7!{SPenvuXcZq&0TtfFrQ%~ +6DaJv7B0du1LT@ho +r;!tgxiYoI>ijpM;oqsUHaiJsJj`uTqh@gpt?^FF`0_mrT6I;vA68LC8_~|^CN)?x(nL-& +mOr~i*9B|s(Rc4$cvYSqaMP`tE7e^;2i^b +s6^6@@$E_%zyWHs64=@U8kcn_HkDRr2p{e@(@=?(0hCE6|V(vlRpVCQR#Jv@?A$+=zgkoP-3xbY+w7p +SxX=(E@(GNlyMi<&U5{ewP|>$U&xo@#&C+ng=Yzh{RTt83ppB-pUsf|y4XhRL!=eSW6Zn +@6aWAK2ml*O_ExMKo!sXG000~e001ul003}la4%nJZggdGZeeUM +a%FRGY;|;LZ*DJgWpi(Ac4cg7VlQ%KaBp&SWpXZXdEHiBYveW*eV<=(aS6<@Q-{!(Vn|_|un;KQvMrR +7E=FTt&!~|luOufOmi+fU^0yOD+O+hgCd_0c-E+?UlnZN0%8XK+d1^)Zt1#3xDE*Ip-NV9de1sHHn!}Hx@RSK_%|4_9k{<2jz4sZooT$oTEBlKbS +`t@c}pMoQ!Oo*tU;n0L3BBaTc@cOC8z0TwOVCLI7dIps>fbWi8jfK@W5|MQ>mp-)3sw&Z0PIn=)KY0? +uREVL-ijBS(=$z`*e3qE0NuaJttGYZ=JVWlG=ZSs7XZESc0M%URttq0C1`B=@cBMpNFI2xkzX|TL@2t +;F5Gmler6-5S~h~$x1QZBeqR$O+lYKolRUG$s3BEW95v1lu|Q29AtLr0Rnzz%1U^-1MGwJ2QvPEkS}1 +^F*7cvUk;6Jt#b0>j!6V$#qr#5vz`6?_9aK1mrDJw+`af_(ccCOe*g-eDx*V@`-9L_yIQE2elsa>H?nSyv`QkEGkr%9MjLtbnw1%$@zI`^y +*lNSeo+(VKsES^Bpn)$gJ_AFpE*#*XV1V0q&-tBiworXaEQhyU_SPgaV9PIEZecD{mFOp{vtzeOwJ)- +S!m7)2yE#O@nVKvl2cd<4HTYhW%t5HLn~4w*y8TD(@q`xulD+ls_h&O1fTi0*LG)|rXm5{bms}PFTQt* +{j4kCghgv{w77AsWj9CxmgbqxDo)D5Dqm9BKC1m$WBtD^xM8k187{`-7-j1v7xtaY}bfaB`qdD!4^iE +{n*kg>F2Nuo?r5etZtu=OL7@S2lM8E5%y6H=h(5O`;&!6-zq*irrfvwQbCtcI6}moDHair!NHN)W~z-bD;M!nkX)3x+1$4 +3CfD6Zw-K<;yOH?bz=~f(`9{>K31sa4#YrtTxf+IaX@X26;C&Bu40;Nx0r$jo#s$*-g~pOvsSF(^T7_ +B}^tvs=rM;Lp-w`#%b=3^&c~JjV~ce}la*xpO4m2&%X4^H$pTsMyj5nOb8P;@n_^kv63r- +apqj8uG~#tzkD2;cI(n#%cO?4)P)h>@6aWAK2ml*O_Ev(dFx~6~002%E001)p003}la4%nJZggdGZee +UMa%FRGY;|;LZ*DJgWpi(Ac4cg7VlQ%Kadl~OWo>0{baO6ndF5AaZ`(Ey{_bCKRli7>ETkU>3ymSw~+I(sl%zx=wE78)u_y>zXC0M!K{tWaE);~4L +5pCJ0KBrFx4o>U%EKR2f6<344iijVA3+r +ovUt0KQXBPbi9A>JQjlraL-fEu3!Fx23o(@}; +8IzM3Y$(f@IkZg4v{aFu-2_D3kC!}pq2i|k)F9%W +ASY{GaA?<#p`{gG8=$)*}>rl_`Z@W%m3oEE%|;nx_FB#Gf1s0QJ;z=Lr$P_i4;j_!JCDue|$P*w^dST +-e-bT5{(g}@+&dv2ChV>}$i^$pDwB$h3iyrv#6p+qL_oB`qQb+4 +?O&czkYW(A7XmI5f>s(YdvR4A}bIeG-mWrlF!=3zYNGDq +bhF<}#aWrtPa3~Vc8Kmk!*^DK4|y{{UPyF`Y93%Wg3;<~u +`=!(Odw*0-Pd4u#L4DEGzU@9QW^!VKu~&z}+co%j`5kOjJL28MzjLVVASZkmc)WM)xSzPp&IVmM^n1b +of!zHl9SHujSobg41*5WG-CR&;|K$8~I0h)&_63Q^v6lD0{puq1%MIG)r6B;M~08mQ<1QY-O00;mZO7>RL>tC;k4gdgpEdT&700 +01RX>c!JX>N37a&BR4FLGsbZ)|mRX>V>Xa%FRGY<6XAX<{#OWpi(Ac4cxdaCx;_e~;TX68+zwf^~4HU +3j%=f#Lw;RG{6Yxu8KCY|<8oy@ghmXq#ABQbZ~nH^_ItH$zgQWOZg}KzgUT9Dl;qbN@@K6m)fYROpBFxBk}c +ZTNiRAvQ(Ryl~k)bO5V#dubQY%v-^~`i&~=!?ZszlS(PeF3-yOg%Jd*lS2j{t#d}$(jbHRbs^zt$KV` +EL*EHUeDlajtwb~S)CFR;aX+)~U^(+7Fv6YcFO`Dlc`Ddz;nUVR6Caw43nNW#i>q2TDVab=btCJvk^L#(wNUdQOY_=}ObTe^JNgd~v`LG?uYPI22 +0|WNtfiUQ7T}m(S=Z!SKX^;Br|(`*2)hoRRGKTs?u9Nz1W2y +glbzwM_U(tGLSDnXd6A3=4$s?{>Ve@&v5!4?}3cR#3hW!1aYnjUr%0QFMC?fB#&S(k^wdVAr)Ew#72)oVx6W +fGP<%k5v$=WQRu+sG1ER(FIxTc5OL6e`aMAz|@!;EV;>cz3==*T +;7$x!e!Q}K2XyWO|Ba;oW5Rbn1lb1s$O+XzEn|L~0b^?p|1S!J>(4I&yz^G+G56zxn9HWl-MKo1q$OM +Q)DvxeXgZvi7(Ozcv2~f?cAy3%dE_N;U#l?4{37ya6qRJ!I2wCybgY~UUXV50|IpUFb-4U2> +VuW9PTpoXNFn>hC~SKxYH{No=zesbXa`Oza!v}+!jyz_as;vwu98rD$s8@Fa+4CQMeK99msdOaE_o@L+4tDdk!Fo`bTJxTi2LXe@^Kx2}T|2SFXY+MXu|AroeLlCzcSK_2oD-@L ++NS%{G{>RBg*t$i_tis^RR>V|homo`z+z>xk0(}M2#pYF8<~sf +itp;L)6Uq;ntj%OkK_%13U$SYcW*m$tgFJt7NuKbmiG!d~TFj?*I^)q>R6cUKdc-1U!*4k>eyUK1TII +QRl(PU~wvLZSEVP)Wu6X)!zaP>MHF@>~6h+q6(IKs=&5Yis1%h$092yq;zJm(g!qBK8!7YXdL-6uSrJ +1wW6Q(MSD%;?^^m!t#=oGZv>?QS1TBJoI)BGsXQiRSFE%#-ZCNxT1L3QqGBn(OmGGQvVMY>}PU@=xf% +ns?xRFKj&TJSLTwWVz|JdIDnNsfdv1v7e{m@4VSEjo*x8fGZ%tcjk8x84S2g?PYZz77bDaB+b#v#00b +VWJaZN&lOtrDt@;m6Z`pvCUS6K+d`1!4j(PHQKf5RK58CQ1Eo^f@w#h3WbEn3kk8Q^zDy +C0!jsML5qC;})Kp-BJ+4rpGk%){(&*talxgsLBdPd?`Jr$8~H9sOYeMHy*4i%HJlRH1ndYzF87^gJsK +g-Fd(|LeC1LJF^N&ve~VV)46{|!FJRo{7q7`5>1SEn3nhG`_Dic0MM{9MDyOL833>H;`=vO +FJGcfNgJ>*P_PoemMW`qIc+GHigQkKuet3L-<;y}pm#XNG%V+&OGQ +*$R56H@sV)xMe`|;zwp@p=>GV0PFQNhN3!Lo0)X=&In{RYQNC`F#FD5{Yr9>I11`+0asCj^S2O?;lGWbR0=6 +17>v0K@k~9L5N@p0^Gzxc{j5yGXy%EUCsI*u +lP5^I`X!riqsT{k1m*e{&$~Q754xG9#^9JO;o9HE3xgNY69Esi-dET2c*sL@CTFeJ8R&Tl+S-Q}oOOW +9F2^n%tFQ9pJ#-vCusfVkeQ%8!x?Dtx$UD>%2z>#UT2Uc=GJk`t}VqZOoc`ju=XQDx@c`jkn8WG4LJy +O|J?!bG7Lzg4cNA9*YL~g_b6p3}Cs)l6&pDOc$5xg7^LX&j0J@VQy+lUQ>H`H}ojwodMo?;xepQ9nHI +9M0m!Vy!TQ^e|5 +@#IoI6i1+^rSg?e6ERsARaFZOxM<=os!+Sb!k85FIF2jEfd_0)J9VtP$5iqL)=(K=4ou0E5%Oy1TuI3 +2D)PQH>UVI_4L=x5)ihkla)+4|13p(274U$qxqZM{$k;@Oukx$INsoEVf1s&MzFC3_3+GKk_*x|z+#wMS8;GGYI`v2bdU|xN86*O>`x|+yMj`wXw@k +yX`k;RoW2u0CbtK98~YN=vzc9{BVgd_@o-Zm01NjwggAud;BZ_aI~{^$+;ME&q!Ed@J#X}BoRrlq?*v +Lsu}#~S%LWg?(`EhtykljJtn^jR|(X|rSal8cvQXViRKiBcl(Lzgf~Z=(hOmVnSqu#{6|DE@vY956hBl~yv?lQa?DV19_WV3!Ig} +4SRH#wx7AH2K!ooHW!(8;}=?V~0a$f@72{jj3lY#W;Zr8buu623Gm3Z2#;|6bNfc$qzKP=9OS`kPWq` +xmNo`^SLHgVzZ6Nuwe1|#$3s49(|49_q2qS{?*BPgsqEfyLHup*BC=IPT8_oFTZ~J?YDoO +b?AIh03#Jql>ZTuxu +{V5fsjOG>vBiI4@HtYZs&#%&eV-d4ovU60G(6r7~oxUjdw6U;$O$jQssJ|{PwL+zf3tF*{kdb1KxtWU +pQjZp~Eua#gmwj}sXd_C|U*3d2gq>gmMM#G;K_Vk%_s1IZkb=yUKaHebKoZtkuR0sn1VBGg4YAbF@E);1X_de?SHu) +SzJwT{#g{ae}LCtNQ2)tA&3zkx3W1e$=&MQ9ofZ_O3;-Yj6nx{oGaX_sr#R?ueXt*Y-@cUbg3<2W$J| +SxUQ8zj<^&Ci~OOzOwC4@Z%~P>$;wwz&{=7w%R(7RO#+k2UkJkyCPTU{NVc|eluWonhWhmHN+ZYqqD7qgBXC~H*xO_CJ-Jp>B29CowsszB +?ggy{v2mfN;qz@T6o)myAaeEJ*Z1^Mje>FsjD-#0V?{@~v-{|it{0|XQR000O88%p+8000000ssI200 +0009{>OVaA|NaUukZ1WpZv|Y%g_mX>4;ZUtei%X>?y-E^v7R08mQ<1QY-O00;mZO7>RwCX#E|1pojS4 +FCWm0001RX>c!JX>N37a&BR4FLiWjY;!MPYGHC=V{cz{Wq5QhaCya6+fw635PjEIbeRY2iXu{ZB$cfQ +TP)fb+a-%76i_gh#@6i08r6&pIA5RBBiq>QC7Y)Rsx0;NoYS}I=}xC}zN)fD>B`c~=+&ZVNIywKbEB8 +^Nf?popph?4;CX2wt)%%*Ul>W1Zpg0cT3eY@qiH5-#jnq*DO5#|wW@;$RW;I7qI?Xf#lTLlkY&w0-=T +lp<9}pyS&3OG=}A_(HmX`sC7-47GF>ZaNzcLedGTPR)hm-p`{>NjtA+~k#Oi=(_r8G^RZgn0z;6^x%V +iBMS!HX=bR~ZytCy1f0YV{Q;mYjUU}dwaNm&<*Y-J2kO6(N1Ggi;N0{A6V5yPR%WQ7PASsR_NGK9*x3 +aEubh^RFkaV+)osps`|y~Yy4@*dUP-b9+EvKFyp@zE^ +2_a_~J!^!j*m?mM>$L|>IA}pXZ=N}^91>xz&XfQi`W0cFc52dVJh>S?fbBR~ZNR{*Do^aWW0>AHNW4PqYi-)|^u|p-z!wos$LHyFm_!s$X_AaT# +)D`;op6fpj!)NddNICA2^vW_N^fX8{a#R(-C8yj}r=M5+-TfzZ!-KO|Fv3cp8Cz0N7DHI#0k7T}Go +c04rn^eZ&V%FT&xFsl4zC)+cNq^~aN&Bz}L9(#3c +z1(e*{dBIXP6-%sQ5h~4OqM`?mjA6_QuZtHbCjeH7|cnVX!^JENo4kvbu9SHV~q85ciO$Ql)3V&Zsqa +7d(qHqYTsfW$`%7NGEbUaTjIo)j&)w;A>fkMM=1s+c1oE|D&%}P;y?&6g6PCde7(6>xQ5V-1doxCvt3 +duBj~#qqQPZWwS*t~p6%Kl>WSKvpy-C|;T5ft<2DXu3Zdiw&M5%Ienp~WM>G+JGfVUe_ +q^$q$-8<;nrn?V*eBA4~)!X}TE8QN?wLbRgcqYv8{dU8F@H-4pyQ^5~zizy%+}0!LF~Hw+l<6h+%da5 +qE&iJ$tugH`_k(cVRs8pL-wri5@Y^J8^1UmK09S*m0=|A8w(;*SMZIr^zspR|Rl?p`~=uXdsN@i_*!b%$DqdXb2mn}LpXo8z +ua%a@ky%+W`TvZ! +d;1cyg>#~j8(MiTDKJT3W(b17E${`+1Cz*5vs4?INtw4}pL~rPk6WU&7)R01Wd0*TT;MD_%kpx)XW2r +#?`<`L6eA2~M!<>#!{Rymv&*U@e?lVNaEWKbGH3@OH7o%VB#tY$R=Q{q9=BrUs +pS-Zt_jZ40oiOn#mV#cp9Sz}&pKUGDBWy;l`}8nZqW@Z%sI+w%7G=U?yJ#?kH3PZurw4`v*`<0|XQR000O88%p+8N_aUx)&T$j_X7X`8vp4;ZUu%)`W@SGXz2P9 ++wuJK%|DZ6+3jGR?!+g8)NLzN%h(+rghC7Z@GIP-fxWD)g@ZQWjtJX$%iq!Lw8flrUTmR5chn7MOeo> +1Q7|u{BPG}&>}x5pp&iK})anG9#0+%086;(N$J&KQ35K`mo?<4JWf=)~y*6%QJs1LXr=IpO{`##>C23 +g4*pIUkK;k>fz`tECN9--=!S#cqmpgPHhzjo3H-8=ot%1kQ{WE)8zuY{(uHh$KT{Fg_32!kvg5?P{9* +Y$NT3K=y;&lm)chz&&FlP5y%Yel~Ov)wfMB^C(Ou*tvI=&~xB$Zo~hY~v5uE09Vwx~^OSJ8~c;_t_FE +MWNyHdO*azetF0ss +Jk1^@sa0001RX>c!JX>N37a&BR4FLiWjY;!MRaByU4a&s$>G)$KoUT9-kORk$R{#Lv9L!twYP;Yl3I$3gf5%w6@TWp*2O{5 +O@P@h6qQ#cO5)q(-Q+aQn_g@dcBdb(zqcg@13L0gVlDdutBCoNl60Tc{$Ri11T?g9c`WVFPNUOlFXIE +qQ|D$FW`yD>aH3xdt%REk$Iu8D}bb9Kg2=djFlM4ilrw9@^MZkZ9-mSS5c9uNVqHATH`zzp5J)Ss&u9 +WzaNTCpYJt$H693=BpU^qPi=!DR$GoOYzFx~=hb=9Ey^G=IEvuPee +hNgAQH-(qjW~o#>>EG!+l0t0L}pX#(F-D)yO=dRRKA4Gjs7&cyFNQ!%n4$3Jm?uhJ1Zs*N}U7a6AS(S +Oof6tJDH6NE1_AS+v+a5Q1fjY9jcvDIrHo*u)j}SsjI{4C^Y9ibMT&{3SAK{Bg=v~$_@6aWAK2ml*O_EzL$jcert003ME0012T003}la4%nJZggdGZeeUMb#!TLb1!3WZE#_9E +^v8WRojl+HV}Q+R}8cdmH|iJW}60eu?14>rf^}{i**1u2m&oFjU-|zQY9(7LYjZ?kd%ChUrdUe%QgZpSOBG(smkgGF?*(P%p@;gjyrf5 +U%sY%MIkt4FPsKKpN(F~PFbs)3fbX?8$aD4lj%5Xiffm=j|j%Wd}bdGqwR-fmaHYzTIJ;Ne}8mbk?Ns +1hD5mo}u}%HjT>gBN#eVt@?N;Qc|NAR>KCif~yM3CgX5RKKn-h|G+X*rM +_3qsbX&~>66Gx`s-QB*wy}b+mp9IUXAZc*}WhZkd5~nS4(>k?v2v(hxmQylx{$csIlbCn#mzUlwh||= +n69pgtL1e<2*PQIB+R9j +yVtXf`kxQiGn)vG#w~%$c=nEl52KAcp1UITp3@p;mPnJex$>r{~@d|_E>8Yu%=3D!U#u_>NWkq_UeFF +_E*+P8GmzezhCL(H`s9@JPkt^x=dZ5u}$LXWsi^EgH0 +}V3vg%4O(}>_elP7qLvfS)J>DY42ww$p|>akO)ATC-xawxC2<#MQ8!$Y(Vo!^qwXSCZepiGgNg#&M%XY_T=l!%wg)^{<65f7?#= +i@ylpqZN8V(1Xfl@6aWAK2ml*O +_ExsEiLrAA0003M001cf003}la4%nJZggdGZeeUMb#!TLb1!3WZE#_9X<}(?X>@sCbYW+6E^v9ZSZ#0 +HHW2>qUvX1DOhO$cPTRCK;11g`3V>qOw{^=0N2bV!ch5a{JUR +%1UwO%Era7=OgRQKuVJq|+KK=3uUR}I~pJh=oEv`f^^bS6=m5N6}5R68msAR37=A&gT3(&htlq+aQe- +lcN;bWzRELlE=&rOx{k)2tIDo!@M%w!!`EWKtcTo`La?pB8*z`svBy_V%)yjBP@Hbc0*eMnoYa-Pt_c +Ca!loRJmzCQ@fal$zI7&G9oSS-}rSH|~Z7OOq;RdMWDy5u(v3k?JO4b-GT_0GOfwF&QIr9@aq^zlf6X +?`-(QfdE&HGQNZsIT7VjCR!$?%%l$8(WxbBj`oZpj^jlHr|&`OdTOjWfsbXrgGO;^D$o-2n!{4mN`u} +?D4XI6!D2HPooVV4f*}4XMHv>X3K3`Q88M$;ETYc3#Pc;I7pJtfF@fSa$1jU%ON&m|=)6ZpY(~w($g; +Y$6YU!MGmPWlSBaJsqNIN;)eXxH3L=^X@v4?hrNXFRNVc_>wKFzgq-%5#QidVNfn7gi4@Sv?>op}{F0OfJSlTEpizJBQgGLH>aK&LIWk%Ae +c_BCZ)*yrerl7&(Oq+sZT4aE(WgHDmsYnP-_88qYC@>y-2*K@La(frV#2eG04L1d2p7SzntxAFjri;j +1Ncs?{zEN@Q3e?DTSUEZtB=qU3Row7OdRmqHJ!Snkv +UOuo+`QZs|!BuezsZNlKWRZ&)8|EvSfw0n*lGm9rjZGm3U=)W!MaRp7GFznPr5lg?qg=ExY-O^Nb{LM +ja&3aFjZyig97kvv77-=y4Bc96>s2o-`Aodk%(W!lt=tJoYOuiw3+6w+Kh!!EU!O`efnQ&}n$eC(yDtH)j>E6n&hYqhV(!_ayBpIeY4vc51ijcj@BL1`LIJ8;Hxdmskyg +o41$A<*VRs>WSP+A!Zd(6?#!S58y?wg#OVWM^D0-JO;q?M}%{Jk8mE7%6TW1ULX$4Go|uo@{m*>5(gZ +U(eowoX7W@?O!oEguOoA`+^o3nO3pJ1&9@en*q(LdX;-a-XnI&IkMs)ns0`C$zE_{d`?^~k+u@OV8yz +Y7Pp;+srov`7VR0|Gj?Qr+EG|a4Fg1PS8K!Xvcwjc1{CMi#wf1CP6DdQshgZH>< +l^&DS>pT`)i{kDe2r7>|F&f +E;*-uiD_ehuM!dnl25Vkd++0~M}Du5Zu+2rtr^X8gy6rQ9b9h0KDoQz`2b^Rs*l`(La#tAgIj;cyW4i +&UC?qw6se}7OB~NEGY|S~FIf*~oi~*XcnFGJiAPX|(utSJHg)kn`$t7?nF5xjgEP^%rhya>j9cyNHsj^Q)T>$cvCJg +3u-DTO_{>E>H&K{hq~_|u2jHK3z++SG<(Y7+U=*3_7@v-rAJ&M`LKpXrd3U=ps$Pl!olC3;qD0X4@T#gA^*) +GS2O?X_+UEeWkw%|uX-0}&W}d!gyFzEb_6}_g!^OGp%V^oGw1pLHUEBt_kN)B?R^H}Pjc%)yRD^uBhu +SF`Ug-;0|XQR000O88%p+8Y!f}Mm;e9(@&Et;9{>OVaA|NaUukZ1WpZv|Y%g_mX>4;ZWMOn=bZKp6E^ +v8Wj=>IsFc3uV`xTR(AklsRi5&e6Qem}?(2}xB_N#Q#GgcR6fh6>v`zORR1 +}BWdNSN4?F@`8ibIPKd-6f)d@8xQ6TNjpYEPn39yI5`Pa)S`%gegVwhc2M18s`?bl*WcoMiX%P1>0R% +b*oy5M|hV{{|Ie43N*VWg)_@5$0N1<-qHt9O9KQH0000802@m7R?xP^j!Xpr04ojv03rYY0B~t=FJEb +HbY*gGVQepTbZKmJFJxtKa%E#-bZKvHE^v9RSKDqIHxPZ_uNZ6|Di!h?=%Y{tFk-hgkj6pcA}xYoLG6 +wtHWaxfxpD+U|K1sLFO)15El|HmBIkN$&J3OBd3FO^Q@0AiZ-fWAZ4_=%dk5YaCH0y<0kv3?1PZr@Ct +)=mI}l6LdZ@9nSP3tUc14zx!HjGe>p_|IngsiwydM(=0v(6AAqX7GsL{QU!c?i-xJZ +<>y?sMg4)D0ir9V~a*qUj=(8c4!^R^wt_-9gYh9aane9WeLJT)mlq=_J;tk;N8RH-5fkR%Dxf4+7Kqv +ffidVvh4Qm{fE0ZH;?xZ@5*04{#??ppYDH$1?0)i90-7{Qo=cSFPqyDoli0UN%uLOXFz`q36er-tfW+ +gD&59Bbx_S27qb7(Xi6u01)qB8f4q7PZzH^LuRX49aFNxxg4Dt!_}HRzVvX~N +>ed4Rl^=q^`MAww`{g?L1Lf!fSLYCp=%hYEg<1^m**KI$YQ?0mSN0wu&>BPene3<6_sd~(rUo<-}tH2 +<9ypLD3qhU8~Mrn4@=coKh2gGp56UOLw +weysA*_kP07?YC*c?EZIBAM1g0{g&_Ak|Nt!iA|u&OE12ZbYkmexYVF5KTi9X7OlS4vQ_Vm01dPKGUp +k*^6Cy+>z(S7D=>;?Oy5JCFvZ;5diGViFG9Z_mO>;X1CD^g)KHpRAJPgurl7SDQIevvv(^LzO`7clvr +P)bO42jl5(DU?jq-DbNqs;p`N9EGb91{O*+rXu3BA1Gt%#iEKO=aiN{V$b&BV};Al2S1Y+vhZe=}f_H +nQu*(B1r!O{{@Cr}pfabsFl(>d|CGTcrSoFnj1S6IA2^hHRnr9O6{M=ys(`&XzyOD}gL=a0iAtv!)Sv +ubfvs?jMK?$;X458i>bp($}DX&3XN=|J&s&5p*A)Z-ZWAfzKj@`l#ZjFG|Gcfqfl!S10>!cpd;>b;FOR+ZL{_DB-J4bMc&-IE4AfdpqS!+fbp-w9eR>gXlf~1l?| +z6hCFdPQ9h~MNEfk6HuaB?U3INg27+43PQ!%`{nDasHUYj(0GMZ|hZZi= +1)SvTHpslDu$ad_GwMPnII-fjVom^x!F6oFpvLkm#hVuib!^qH37a?!U*x+h~W{9KxxJcp+6dePlG1q~IONmWsB*NYJ4dslL*7>Ke29AyH9=irOmm4Fv6H+3D(bj37 +Wt7PwJN9C<$rX=`wQ}qlBf&O{4AOb(9r7BygOan@Vk-NpN}D33_fB1Ndh;}7%Dxlt3-%>Js9o~P +Z2w`}eip>mVAh;4?mO79Kw(-7k^Xu!i^qY2BP>k2 +jz7f|{p6g)BT|XaQg7xL7Dful9dGV)_dJKk#hwvpWry_ox?1XA@nhHQTnkBMKrlVF*-=C@237LYS^fc +&AdtQfm8YW0GQBS&=ek7Zxe_eF-7-yCZrq2J5F}&7}-$DH4<-=@o;Tc2^vFfYB9je6-)9i0hO9KQH00 +00802@m7Rwj=1UF`z^0EP?z04V?f0B~t=FJEbHbY*gGVQepTbZKmJFJx(QWn*+-b#iQ9Xk~10WpZ;aa +Cx;>O^>8D5WV-Wuox);4M92VsU=n$ZM4!zQF2JKm(>cnp%|P2o7kp1t5*N_R2eXa?w(bYgbQ|2pU+j# +)LM1Se(8mLW}@qrw(OVQ3MF~F+tu{37kyR;X+fhb>r@4;$tt0tw8fy?+3E*fHsT0RvQ{bk(S811w{AJ +gK9+_SuQ+Xme3{+-gzHh!=D36JCK%)5bz95oaquMJMvak+(Mi%_P|8Vgm +j{m*+yU`X7Y9zo}FpvHFv48M;5VnZ1h}f{!HHte*E;=iO8oB(pbK(F%#i2C1X+B5RW;F&X>pY<=y!bW +wp{Bx3SmU)Itsk|1=a11mRYxR9z=*Au8`GaVu!Ql5Nm+_cLBc_gbR9%A?_?*{{m+0A{@JK~~!ooXMQ5 +fyI~`L#^;y^3!M}{~+&m&Sp4_JEgxYk*`vYLcKh~2D#%SnPYet_q*VM{6ybG*&j;C*c&8MSK2|#(B^v +Zuz3oogyn}8VxtCK0*&A@S$>B0p-(Se+77+t0Sg4ql5{z3L^_`kkLS4gZ+d`9DY=m%(n&$AG{&OU-Z4 +|k!h~nS{Dhf*#zFg8Nl2Y`Aq+B6acThCqU2?R#BornMWqiM+hupFP{f~XGqlCwSxx{q#etVEc%*2LyX ++b4EOBre4A4RBtjBe-?Z?1Lh+pc?3XFvc3Gj$rt7OWGG0tq@;t?*WQE-gO2ivTt--RZXldz2%Y%w>+X +0YjbU>}rbAK(A#tdJ#6z$8FLlvxWb673m=VZ5rCXNWAE-#ZwuQhH!dHIgSR75bRyRckQ4l9d^W+Sykd +d*?c3!mZBZN;Rbk4M-zH~`2gt&BZM49+d?(Gua +Rl>BzI!rYgZ+~$vG;gY7h(NXz=$Yv|0sXMNlV5Q{`0sG<=)in{W&cH$GkfL(MR9S#67Ig!JhP$qqb7$ +_Jr)p4m7zCn6eKOg;3PmHzh7?OKF}&$<@K*gsNlqsf_F90^9p51~+nJdW3YjS&)_c504{;mK=St|fY` +Un|>hxgrJj{T83eqbrpKtD0*GkuioZsMym07ahuCAVum;#BXhjhaa?&6;cZwtTGA3qpqT!u3E+4(sUb6)ySNySYUF6G*}TR!o6%_xtm&5&IiZO9KQH +0000802@m7RxA2Smkt8}07eJ^03QGV0B~t=FJEbHbY*gGVQepTbZKmJFJ)(EUuReo{Dtct| +tu$b6+L2bk?O_=FqNF~>K?ETJ-X#APLw2%EJv&!X@J<59pQV +*DIfctm|WxD&Gjnv8DmW{7W2TxfI9@0@dNpG0$;P-0LpYGS^)@X5@?q1<plR8p6Lbfhu=d4GM!?!P}= +XB3PcPody=k+yi+Oxj`bf5>By%MN5K-1|`W!rR5%J0_*kr_^sKXmso@=*yJ0qf=EY0YGF#)NTuMpalRK56}gfYH?V?Ob{xy&-l3%2Rrq +KG-OvP{MxBf|C=_hoVjA$FR1xe%3^iR^j%J9V*x5)^6uxWvXa(mWz#XB(mmB9PP3xV_^a- +*?wXsVFa+Xl+3^W@+-`2$c(0|XQR000O88%p+8#v9NV4gvrGkput$9{>OVaA|NaUukZ1WpZv|Y%g_mX +>4;ZWo~0{WNB_^E^v8`l;3OHFc8Pz{Z}07i?t=DNgxzM_RuaJgRBi*_vDx$+j1i6NJf$g`R{j<9XDYy +Oi%XR=YD;4wm8&ETgX+xa}$X6tx`Fw`1wuuPv&HTQmX^lQ!V5UI`c{xJA(J7#+cyo_1Ev%n-XtHvXkX +z1jgz#g#{!5;0fD;5z^Z~@6Qh-AdM}@4}^|x`6u%Zn9K)>?c=hC#u*>xRu^0~#LcE1G@A|*pA~1*;fl +zuF1WU08U)Lir`PX4Uw&-gmMDwnQLYZPsCcL|f*DZXBwnN&^Ce8in`4xIrGy4SQ1B91W7QUGV4bjFmc +`&jrZE2IbdG%+gpj8_&p&{*UgvR_Rw|7qY!0l#d)J!hwmLzIJ&-1i +a@W^y429g_97lsAAr!GA0V%8LIGhu|7B?5qd6Z-)kd0Z>Z=1QY-O00;mZO7>PfWM%2m1ONc|3jhEj00 +01RX>c!JX>N37a&BR4FLiWjY;!MVZgg^aaBpdDbaO6nd7W0@Z`(Eye%D`d&>ms~iu}}v2Izpa3)TW%8 +Z0T&Aq!Mmq9S$@DUg&MBgp^0?@0Mq>~-A?1XifK@4mbH?r7EZ#(Gj-T5nA25)}{3=E*gKSMBD%l}l=C +I`XSt=?5B*KPu-l`qX=6v}luP(yKl%lm;};^0HNVCzM{h@bfV{j^^^xbiMFv+uIM9zkIsAPLkr{%f-9 +TH|K9ZUKE$-zg=9@8@fvfpA;x*79MMUege~cA*}-=fY%1WzZv`dBR2Vw7fMIx`$W}iK4$@{5N&IgvMS +`GmzrH1SUsg_Mrke|W%?Y={*ij1pXT3;(vx0T@1~pM+E@xl#D!ZoDetYRhSEWl9Nq)ZSXIJe# +Vw{rIQ&)QBq|G-9Tx&IrDc`VTUm<1NisDl=LYElmaxjP+JaoJ&TrmlEW{iNX)@}D)~mjibhYvg(#^;> +QdtU$5+$dHGodT$jPn5x=&rF=>wNN~?Omj+xU9mu9IzGTnmJLba@PLG|DVD`}iTZWwQgKlP6BRVxLQ;-sX;``(?+W)I3Y!cxh)t4+i>xK^SRI_%3Zv(s^qA-`fSoi8|QDOl$P5-gZm0lY~VU`+I8fNxm +GhHCY|wKane2{Q&H7+#GD#BtcugwzXVjqdQAuVpb;TG&+)c^iPgq4YZR*`>5ylO$X8DTVc(TW=xMNBE +dH$#DUOxejl#1N>3;$$B?3xWkWpn*k*c?5K?R#R@um +PEinwl84L_HGY;$r-Tg!F91UjpopPv8@{T#LGWlHmYnUYpCg-*T`_sZLgzyDP(jU1!+w74vrBzFyN)S +jwK&);kd~Go#}VI9GKLqm50HP5y*~O9fUkI+b1zSJ1`iBd|jNi&_6pFwE6Y4`%2q6A4M*$JaO3hlxnQ +BbjKjy-5hoL^!;5C*41SCOmSDGKeS**r~kcUc9%2}|271`J8(EEuLC7<;2jjwY;0u%rMApigXaA|NaUukZ1WpZv|Y%g_mX>4;ZW@&6?b9r-gW +o<5Sd6iggkJ~m9{_bDFb3a6GwaRAOw7msZ$xz?=z`nb8$9Xcc8%1lUg{^kOWDs-L*(sM%6=7ESniEY*>BSMlzq_B=#-V*`B~I&WRvCh0-o +B;3(ZUEcfB<$yoEjIO_TH53!7I$aopK*>GN7Xh-zv@Vi#6(m9<_fmz8FNX!N#L!sis6yp`4CIveTAUZ +XY^an7CmQ_N6zgS_6x|}h5zG81{wiT>0`k7~IelHk@(>vx(G{XE^F +j3c{^b!x@6Eih#O2-;&O5t2eBD%A-70;Anx2^MxJJ)Um9%5@)0e%Eqm9(&B>@Eu`vZj1zjpClR!In!l +-%9=M#he+-v>~S)_ueK3E6ouc0Uzdz=stZA`_^i9V_WQ=ZeU?C)>tMTFI)(~B0g`dxR<7NY{}jkEyBI ++nktBd*guwo#JC9D?CmM>fC}j>juLVIuR_cq9ZVreTPr;;V7u6{vb9!d&szAG?I}j{-R6=7Md5tkbTt +3I5vAk`G0~NxYDUbMH743*QX^;;{vdlnL{O3%Vb$)0G1RtzgKJ?$P^^rD#06djHy5V^3S`^81D>CyoTVhvTXU+@6qt*0J4A4Ke2>)o^exrR$IR) +qh7nkG1>KNMnYVY|sqFf$G-4pUHNo?o-sewitS@?~8zkv)R87Adb43SGT-G;pBePp6@WQ_sZPS +$R&XyW<^&2|s2xKE(tV*Ft}D_kL}&=WCtX$2-NlY2$P7{Fc8{oWz>j0nlYZ5$7I%v31jZB;GIV;d+jB +s64E_PhVAUEGY|ttJ;PC5v!MKOsE;>ewD(mAE0RkOyf?ATHH~{!)-(|>iI^LCbd9pS}5mXciVvs7(m8 +Zm9Ta(y4LptW;?LPhS&I0Ad>M1t5R(Vt@@mMmJLp_%GQ77-DP*tZnJgA|n)PZD_$!gWQb+X^w*iG$rx +=eZ*#7diF0SKOb#cq1~v|JJfT@!r<2q2h5$#dwWK!-+2$f-^+yx8WB{ +2dyPBL*(Y=AwefE7LlWuBT*;k*~Lq8w6h9(`wM`HvhGIQju+17J<#V!bEy3M=yncbW86}eHxh*#$o-9 +yK82A`V6;Kr0(250s +=4EEmjZ_WRZQx1YfMR*J6m10NyJjvjxM7?iedzlp-Mu{Q(&1cf6Q45XS;omCMCl)<8aA|5C+Oc5lTP3 +W4YgOTO?txh<(Q(is+&-4j&9VH{4@0WRj&D{m|1t+34w75s;!ETJGGi3#cQfhcEyjxirMc#y&r!h`G%#Ws;M;U6$EvGB3A9+GmJRx#=EF2adGPE`JZj#wcw)VcceEz&>SBUNoMXNJo +SI;E~PF%da__4D@y#Tf=IT@h|qX+B1uyFC>{D)woM??g09(@X%iGaHi=vuc%^ud-cFEM#?TjjCdxN6q +C5I$%YB?7Tp%iBA((Wd#sI^W)JA9jD9zqt7JyO-bp@Z&$D7R^*S%~Cj)tVHtSd{R-JAA?KZptiGH&}9 +|qwLX^CXm~922*`(u2gI54E*it +%3iorujtePMRx0O*97pBhmvq0L^$L>Bg>LbK+Xx_ypSUo)BfkzDKtQlHH0pD?Dur$Wf84L$|6n%q80B +v&PDMBw~dPgc&|ocy>UvCr04o*%YY`u$k1=5gStg^<5~I#VOn4@u}OXn}QuS8Yi*{X_1Y)sXdv0z+i} +j3C(_(v?@uY9C)Zdp$vzhA!qX*m;q*#{pzw?ir8wpp)%rL}JGv9usK7 +Kw_#0cx(sb%@mQ}lSob1AUf;y!0kSy0MytfER{qXMXr!(hw@ZPN{=__T5-odPAwm9Zoj((y+KYtnEgp +Oz_%hS#y8GwDu)!iXug>?lTO}lp|G&y0VkT^F4W-;N3FkfM2#+aP!K}lS+ju2GSqMh}1|r>NUWzgEK?H#Frh9d5Yi5sj11k +kz`+I(OVpf@^9>%B$1@^?EEmtu;%_c&mkpU&5>A;KLC01wi_Hy<+an^@8;mwH_J&P2QzzFpS&yTBJ^Y +wo{H)TMTGYMP)h>@6aWAK2ml*O_Ew|SPc2vh007Ja001BW003}la4%nJZggdGZeeUMb#!TLb1!CTY-M +zLaAk8YaCwbVQES355Pr|EIPzqf3(QAh12-xP!woIlP^3mpHIODHNvG?t-_=@d=Yz{ja+mME`@Z8Sr9 +-KN|H%YMofw;PFO6n|Qc6f-O$zuY^$uj38S5d+GbPSHnao%$wHH=%#d0r|W2tnT8tXD8y*PS8K1X5r8 +i!v|IGK&#Ch!Q99rlWn=Xvjvp7ksGh4Z!xjIkvGJCu2D*}CY_E>`(XtxfVZ`F7s +>?%w-CIT`azu%Tv$PDHEu^EtPF#VssT~WEKZ*I}$VEQr|e}oMNENIJtJ7|v(6YZv$XibX{vK0wP7jwJ +W%Wn}UxBwl^Q_%-h7Qp=ztc_9qY8aGGh0m>q$mX~s39L%!3JHM2#%L$E!ip_$j5VA&q2y%c4wW3Z0w} +!RhEuCx@vvmd9E9xzCkzsoFsb3%b)}>{_9BHe;vrW~kY7+s0|XQR000O88%p+86k2`I1qA>ADGUGr8~ +^|SaA|NaUukZ1WpZv|Y%g_mX>4;ZXKZO=V=i!ctybS}+cpq>_g`^P2Ur3W+Hw}8&5!|WniT0~t%&gI>1v_+HZXqt;be1bgMGjy4 +PO#Z*RI)so07! +)Ywtc8b5kkV;3LLHm}v{#4>M`@M&=`wS9b7+<}KBOD~ez(la@LgP}6vE56X}T95?cAv;^G1Up==BD3s +hnLyct`?2bwZDU(m@HGwNTH$a6^Aeb>A_#G2NM6(J?2BlVaOuGHI+_=C5zU~kmgHX%1gNxJ~Rs)v^&v +2c}n(7*{N{yR}V4a}0QM!dz)xBV6ad~HLXU-OjEw{VA#O&KeH`tw$iz@yvg(??K)fDw|d0aL>H_KwVt +UlDs538bSj_cDUD~i==MT_#ZSgd)u*oPwB@NQk4o*b=<_s2dS_5k)FIplJF4#@+T$EzgdGinFdGk(qL +;ql{E_M7pE!1<`=X>xJ9`F#Ctv%dOxvH5=a<@W04I>`>s82yDm7SBa`j4+kL^TT~5Q1rv|jLq+l6OzD +-6)8?f+uY>vxQFLy5z`57(~vs*yJDS%^f!y3WpCM~)=KAaa~mJNQ_+XT>})z;PoIgLJtOHo<${)iL5f +z`uC#`G2bmE3)l(h}aj%(A2GG$S +q_q<}C)O@Yg^sRL5j`{cXL@azMrbHJ*OqLj-E6?C2IbNe7cda@9b?1(UR8zVXmGcTR!0ArT}pYD6u(1 +(L*lI-nuNJFT{wwn_^jQy|2o +mF@jjeWgPS#LpX-yXk1fuxnqGX_~KFW}kER1sa($93S1&<{!8D?a-D=;C7jjbBp%eSl4Q}dCjnZEO!N +>xOdm=zBn}&WP}hdqqiF30?RSEmpqp773QzVNw^xOe2HepRq(Nr)u!4}z3IHmsOzIbJO_ySfZ9H+V1d +8Kfco7bdrFS-5sx9>yByEbT@4jWN65L$ccdCmgO1)v%_>#1G@+*A97T0P1e_fT!Q)*+_SvH0wii@mPw +gw^GR2O?+YZx{hDpPbS>Y@YB0?GhmjrJ-?3#A)KG*`7_2C9_%sQLBLl1y|eu8jkt;i`HRWOf2us8k%` +woemzDO_4vbje{=RW>ChdE-*v!}4ftwY$DvHcMcvr6nZWxf5h3zFy2RRN96~*ZQa0F=?l}@ARK_=La_Q)J+sVnfa$_L8_l(j9B%*`4#lmm3 +UgZHE$D=&Hs&y+JAXAnQa;4-4%tRfh#{kU?gMDG;+3|tH1s+0Dh;`^HA&fV*f>#`7fFGGmtM9dI>#XJ +_*k=zfhKlY8hzfqFpD-LrgdNnqfZgHc2$4}#$kd*aOo_?Bl|aeO5sVo@wFqti7l1_U&mU5-?2?yk6p{!-Sc#@JML(1TmZa3s6e~1QY-O00;mZO +7>Q2YVO4#2LJ&66951m0001RX>c!JX>N37a&BR4FLiWjY;!MYVRL9@b1raswODO$+cpsX?q5NuA1VWm +fVFEec*w9UP1|5=Qlwdj!7yZ6rffEnsFGA%qv(I%-I02;l&-@z0|_LGynF8Mx#N*CFO{}z$IUL2>q)9 +r&g`L-#g<9;>GsdJ7xDS)i}PPEuU^gAFS6kJaCtprFJxk8?9vL&*O{2H-=u-}>(WY9@ND8_m9mUtS-> +P1i^Qiy(05Zye+`P?NRxA$>}K_w0reocFqUT-$N3jpD?KK$vNAJKE=^KP)Pir~WLFh;W-^g!AOC#9;7 +4-2$@Q`rbx-K?BX*^3gspVJK#x|NDiIraWKW~#(UTwj3K&vFc3`Q}=rO^Pok;G`aaOUM!%Fz`@&GeSR +9=EDYne%VV0&qIOl2FPS&6o@d!Y?EMy2fS!R{2PYvJ~TQUjb!CXd+dPJq=QKNrOYV4-DfipRi_3WnD_x!ZHS>6;TxUY@0f4nWP=oj4dwOSd4BR^%p}_9wg +id&BR59f(++|rk#uNiQ?A6+CQC00vDP^R92uBfx2_jKHUm*3CR8k1W@&f?X+lnFAHE0@MJUQ!Y~`Bnqg@gcHNm`h!SN4TB|83# +f4-FTo+9j6?(w$l)D9rWA=xB^U}k2TP|j{9nh>RWD3Xr^`uq0U)PD#|AoYj2VO8I-ZSVqmxNY*Wha$* +h?c^;r93_Z%ZfYuAgO7%xF;C* +D5{2$y4Rj$n!jwMcwf2Dhahk2Joa;`6;w5QXc+5KIZjknSfVYl6l%fzSeU8uFcU@SlYdIU{L27O(w{EkHc<9>_Z-I<-I66)d`tigXz{MZ%wOah75qqgB4~uUZKxX4x@dc-c6iLx8vcCV_lL-FAnqBslYA07PaA)M{PRXa7 +=`5`{fdcsCKq7Jz!qg1L;IQm`SP +3YSs8SZ%!otG<`U&KglHY2Cm-!>+I-kQ_*PtcMc~ZYM(x6CN$SOZzevrC9ZFwJ4cjIZE+TOU$3EkfHs +$W^Uxj|jkuKHv}nLtF=&cadem&Tk;eA&o2o7^4t0;=i?5GFXLs$o@re2T3jDQh4`M{-9d96`!p6#j!+ +one`>Y>($ulFm{TmwX8@O+B!X?)8<&| +Z#{e;)(ea~Wh#BgsZ^m>->FvyoR0^2wik~fmvScrDjRJ-WFfftV+CD2#?gi4($e9*vef)S1|}9xJZ_2 +4(#{$>vr@^1eC=M)kgVoH2aeThPZtyx4~8fN(CO&!4zc4!`vqM%s5JF0+iK=`>^fh%Omy{J*X +&s6Nu|KeOg7}UkchJe`CJQe@6aWAK2ml*O_Ey(_5u>63000{U0 +01oj003}la4%nJZggdGZeeUMb#!TLb1!LbYGq?|Uvp(+b#i5Na$#*qR8||Hdi +?D6kqBw`_dqiQ@{$gYCvpE46KLP9U^2xEgi{HF*RR01dn281xi^-8o!ABtRcT4~%ob_-DW%9>&)ypd2 +C}U_lHAA;3NI5{#U!7$vR2G*KWQ!L0xF5>0qAiZlKJ1yt;7k{nN$gk8SQRC%!y6ZBKjS${9`d5^v*Zo +mfNBt_DV@U{XcGuhr0`?da{Rgxr1oq9e7rBsd0tY3zSD4vKGOL%_;natyp0$CT3k4%gX_r}LF%T%<@s +Hb~pVnf`)yGuxF!w2h=$J^r)@qxXX^2o=3g%eS5(-uBd*hjdYb~nt#uWMYu*A}b$0AhI_z*?tzeP#+y +=RY=?6w05Q+u}OU%Ixm@m(49)T|)Y>PSaHWZ~wvAFm=$|UYC3HHL)JpIlKF)EAo-;?%c%lDL!pz@uZ^ +_Rya|@Jr4FCXeEdT%>0001RX>c!JX>N +37a&BR4FLiWjY;!McZ)ay|Zf7oVdCeMYZ`(%lyMD!jhEXz==_WlKdO}s4lQ^kysU6t9`U;&vti+YI7m +CzzNh?C(|9&&G4=yRvc6#mM6vQHTXJ=>UJ+qSQO;J{CQRJ2QTxD`Tl6pjxWnS2qthl?A`CaEVUhzE5M +5*j};hxmaJyxqy@Dx`s%VNz^Uhzf7l@f{>2sfl5WML<+fuysGo04xfqSTXSYYXvCW|f_)b{lWzB;SqM +8^roEFURb?$VJJk0v7zpHDAeUtFDz*`2690EsPfI_nn2dcz$-H)rQ>FK(`e!B +ep*#e$=3?3%ZyA1_beoLrw>oF_kBy-(on-Pu2eQLLp}*tH^8bt#fmmZK4#AW;>s1e-R>4m@7)ER05~o +^Q%xA(TQuI1@m*^MwKK)PNLa5RFD7G6wd#EIx_+s!EHRzya%NSr%n@`gtKp&7uiuN)QBEW4KfzWpZhj +ev}(QD_F5u)TLrec&t`}DZ&X~KiB!p)uq8mMNCZ(JJ}(j?aza)!d(D+_RNam +l%uiTrFk(kPuz68rnq;8S;b2mA3D!XPDMeM6IlC@v&>|c-U>7;Nk)Uo{HD;V?;!t9OEqKno6Q~#$ZO#H#1}sR@v17r2H}_m-l|~CzG>XS8BKtR0z1~Q4!no8SvO)F0#L +vqjwF>_i)2ZYz?b0gm!re=(P4Uh_~Yd8r^(?}FtXq*0di%PB%wQ|64`R>-d^zX4y+xcsvF~JN?QQw>& +=9$8@3@e_ZGhRwC$`VW}u2SJhj5snznZ?G+szA=3=Kv%RG&ax_TLp9Hz;Jm*C(!be?ER0f)qhkUNFVW +De>G4y4XfR9y%eOZi!(4j+EDvRWYxvIH?MiZcBQNV3S|-UA^g;}#LXrq+7{T0^E2MzIqI@qO&L7&Q_@ +6*D%3%2QS$NsKKG)6xk#_%X}Gy~y%{%$_(8qy3D()M}|C1jl@CNiPngZ?`N&U$-b~(n*AKcr@S8{^5} +LB{(<={nd`PupVNsIf&a@uPheKC=(P()#Wc#qoX4%qcd=o8yx@eCc!j;$boaC4 +NHu^YkCBLAvlTaI}UdbbXytSCOgAwLNgSknC*W5Qzi1W%u4pA)B-mlv1)d~iDjjn>;ay^MKAt{G+YL~ +HNF?E)#rB@I2UByYeODyt)T%FER8ACzqlW<(J|TPScdvs&cojzgv`ZoUHxEE%LIB_)}jQzd5fD@~)7Q +)f2jfcj{W18tcJf}=t1+mFL&nG+jXM7^7~v3}jmjk=aF?TC2aA+)9ImMw!{309-N255V>2Nra9flwB} +zZU}muuFar#Q!Q}9?tA?k_LrodVs9MGptP}E0R`-saSLyb=A!~{phbvMd!yZkv5f&4TSE!Qx)liik6Y +*@*@>i8HuUL_97ND6BAdSu|n5mlTMA+#3H}=nt14_%2B{8gEQ9jk^#jH?QVytqu5hhDQW&Cnt>K?ODv +A(1k+Xr%0(=yP|KJ|(EN#dfOV}5s2)S~&)lmvun2_w`-*A;Qw@e*WoSI--%fwIxww3LHF34|%vRDfEU +ahXIBsum;pns5p6o)4NEaRxSA)-V2@ZurUVvaTC;%u%_aq8?0kU;KBd4hmbhw1dW~z5LWY$DS?IKr(zr=-4}-H%pRW2Qy|R7Usn+zEy_<=18oJ%1=u@y;5ER+P9&IhKPwV+_>c9xCpoI`QQQdDIKBgRw#2=8DcGI7hZxIQD*0V +rs7fvvRgp+C_#OqiIJOTMSt-Nv{8!`tv1o6^M&K>d+cW%LJMs=9DlT4+b030(Ppzv!c2%B2Aw*Ta@k9jcV9z%W$r(?8D$cOdre6> +_>OPrn(6hYUdM_A`PNFaFIQ_NpCXz=r%i%WyLm#8kx81TUv)~v!QKMLX-_#K@FVE5^zid33giwC`T7% +u`w=9?>eQwo=Bv`R55_-K +dn<<@AbCYke5R5@WZLk1KPr++OUgP|~J|?v#7|PI+q|e0Br7LhA+3s<&JWL$|JuYIrbA_`ewb5#%Ujf +`EF$9MY4d#WC)s7_IP1VP&hNOo&nP|UWhKL10yZwo7y2zLE4pdkf`v+EH)4tU%pesjOp*vH$?NEw;*H +AR;ZWM`)1V6@X7bSw%Hpq&5p4Flwl>_!xz&Y1gYtbg8pSn?Pv2oLX2AAfJjG_2dBOh9pE@HE|&!SpTv9+rRmWFd(IDs`Y>X6+m{|4%q*_4_hx<_m$EmnK&Y`#r`Vo2#8Eir-%8q +!xb5<}Hcp0OZQ7)!gh#Xw;d%=hv!(MBt8kwL>OJDcsTVb?RIWKuBh0o=2YL=JQE=jGs +N(+#f^xLsw($-Nt~@=sw32N87);9iUzBhRiq&ZR0}q9JG(&=1vs5XU)CB|C%rYZryCa8tkopG2^Y1>S +w$4T7$=PrbW47KMXl*fay5znRVE=LmFl>Us&o|$^DSd^LtTfW+TrK`o>Ak`mAW&w-JNj|&i{r|^njMcx9*S|Yo51`sKEPz5rZGg)XQ!ly!vOSoBvRM0*L;E|K9$flokC^ecenMsv<9JxA +&vD9sqy_yZSv_ABe5nqE$=}~%Tq5+et?1v_VMxkn&k1@6aW +AK2ml*O_ExpAbZ9pl002i<000{R003}la4%nJZggdGZeeUMb#!TLb1!Xab7L-WdF?#?bK5qSzx%JiGB +;;3OVb}|+O4+ACQY5LXVN5-xZCY{JuD?c7Hf)B3DUCa@&A7B0|3DfIqBYZZg%c68Hpl*hlhvvjR$9|b +y?M7UKX{yt@CWQm+>1}RYhrjT*-Q!C}n;w6SZ`@X0~5mUR`CymHAoP5$jTAx9g-{nip!>)LCwS)k$ri +x5{}0#1@$a*i<>p8?BQ{$-PBYu3&DV+dkA4OnvrNzprIgmu0R@^CHXP)gsB23`H&Bwzg{MJp3HKoe=&8xON!)5R{ZXpY+m!zx3Z +8`QkS~n4A5+c(l6WgI#YVk+jX6lMUwNcbAWE9K5y1$FrwZ*{VmZ@zAs=XKh6^b80_%^*V#IXZ)A~{Rk +Yr&tMX?#uj6G|UR$z!-DG+CHIM~*{S_K;p|TnduVsF)LVlByZ=E$Y8O1S*}(|k)Bo+P)wZOa>snjluu8Ko2)8}l`QHpczHQbI^clq +<5lu=S;ecYfKQFh*jVId70t^PFu582!`Yi}-v01mcXz<1ZjxLUHv{O_2hL+V;J@IEID9v6zY +r+BvUU)s>VU@1vJyY7O*^QO3+W@AR`oL|S%?kwyNZLMj4N% +{h(8Rt~!ce%HRdP-J6ORcaX!1Xn~^Wzv75yv1C=g>h5j_;j&!QScZ{H$1%_!@=m0vj*BPLw<~oOl_E& +hN0RM=tmN#w^qU16&ab^KrTNM%KWI)MUG;8u*zXjpXAyn6C5l64tht$^~&QaGc>-KM)7cK=`T&Q3+et +*H=h$n4we=?qc+FnH9eI0Zm8;AO|Q;Bu6N`DDwC-cmPT=Q~2#Wtw2ib4=SLk3gMxE-`xw63i$Td+j?0 +R@ECXzb0Mo%{O4$Z(7tp)w=Z|dsvmHdKoh3rTmv|RtpFT4zbEd_L{W4}^c&kPCaC9mM$rS9S|p;T?JF +19Nx{Zh4hj9P1CfbDl72@Cxm&`TF$e@7R +X|{MdExZHQaepfUUogA}O{SH-iyxW(NiCz=(@%A%5!N`;*8NOjI1;Nd>8y@55b@I-&{R;!C8477eK?N{y}%r|c*7vmpJ3}nlNJ+iKaf2 +Yi4w9M@<{xUfu-Ll;*u(X4Fqm#s;q33IDPZ>{Peqb=RcwwSyn0%AAlzweg4_W=k3M=MP)>8ruf5@{mq +Dx1b$0L+6>SZ2_hs;?W#R6IX!*XSu3_X`f3L1nlv3an^96--CP`91}=`&KP!aq#h_p^&_v)Hp&~q(dq +EEs-qVG5hrr~moHsR^&U4)uog^qq +tI$$$#Upf-{3^hO~j!zsCa%>W{Gxa8!c7BDZyl>}I?#?ekS*74?7~SFS^*;d|pT@!OIV9;cu?1a#-kj +K2Ck3<7hxqJ=_h;v)W17@eqFKEUYW0^U +yFov{CSO2nCV30dp0a&ep7#tV572U<&b1!`(6IS%Q-QDVYWw*TkR{B+F?<nJP7;FMUR<%N|!rowuU>;!fKU!c)t~;8VX7VBry5NrsbTFKeft +Xyns+{sCOsF=rb2K4DMf5~KXUVlUAlxeojRZ11QV;DOdTt5Ov4T*)1j_*ALEvN&pbHZdU3@L6ev+n0Q +k)>9&l8xro(G121x$aexxNOU1^H!)&-8Ay%+OoxusALONMO4=G%CQmF?_6AU6> +;;L9r32~}|xKUHCCSNF;>XMI`i`gGk9IQZcHUYQG_uieoi^+HS5WjeNe)jeadI%n<1q`!&XGOp +)4ir7_m?yx!+?Bx^4Bi%HV*_FijEfR<`MRk)>l +zu_vfw`#6Ezv8h&SC3-67YWrNP-Ed(Dm`t<+df&(9<4*o2DX{16B6LF{4a^Jd7LA$H}#7@GxUZ`tgZ# +`I0Vg@OGBhgr)3_I*^lmu*qS_5>c3~891k6Qxl*aue+G6>+{kM}T-pjxVI?FSKVOyzg{Q`;Sl(k`A_S +FpnFZV)AYESsu@;NShBx({e`5SR{1kq&mRECnXQ8-lyLw;{ag2x5Rq15Pezo((!9+h04qVgt+(<{fO_ +-(-|p&Dic1Ti$+fzx3?A7FVGtwJEFg)6htS>Hj~#MQ7q8{oTh3=B-O~fqk%uEjlotMW@YCD9V_(Sp%} ++wBuES{9ob&aK&+eDFaHKLqr_t6ac#LY;uSsQeVZkUgE-#ow`W^=mJu8L%H5okU)sAfnq$0?6#Q16k +bgqun7Dv3;aCpC_CPg<2FNJ!BFP{gL!X3AGjO>v!LHcBXEv(IY-W7{e_nmFiV!|O=8FA4!%HiJ;Mf0G +S{DNYn0YSUM6*yHvwU46wk!*(b183OkV@Myt_$syto(NeQkxChaFP?=tw^}nUWUlZlL$#x&?^Z$pspW +0($_t>s`o=#b6E&)OmUkdjpQ6^dSUst%`lXc*;!=;t`Eb&C(+#3EM2;du_=@4s9rEJTYlj%%vc +ow~g~MZ&roCZB$z7UID|)+hhe(O(y*6*|TRYnim)Qi?W<06$s4<5CG7)IJ!+C2$)H^xYVf)-}EAKU_T +VKCto~y`sK-=o<99@ABPDJ~=#YT#} +EXl~QLKV5ZO6FUhhEG8uNJ6iFwiQ{53F5^TF*A5X7+OOVxnB#iK}0bVQgf&I;!4cqJi(x_Q97tH5wwA +WsB-XqlVn6af~0Zh_=@p|wSn&hp*(8;B*!QfX$F=FL;+|^Q?_ +a$-eII`~`>#{Z-Pm*H3sP|*g#`TBDySbbnWti%$vH?ZsGu~d6F#a*XySuBy9TdthLjNvFj6i~-@b~v{ +xB}5T_fQRxZ&^!SK}kb9Um4qk-BxgzZvwtwr{qC8g&~S=5!MVm=#iGej{VGUF8{2&gKOXwSk95->qLn +;tbwTR#S4jEV6l$hnT9D2X@L7oRS$#($vx(o$Pf0_A&|JLW6b(@u{552>b6{RQm}60|1DTx2cB_v)1e +1$yB)9HOOsn5A8wfbK#!KbB-a=xLMM@CU!9S4Lvp#Gt4j~*tA%Ps7^3_P#VHu%AK+mcs^nZL!!>amO9 +wiIBO7srkq~!5TxruF)-|Waq!vXk`n;772liY$-!qG{cn-6;zozqewnA5mkPPr5k{NEn=t_|oO7^>LD +5m%yn+Z)r1B;Mv9iMQNSvV^Us>0CR)UWQkv9vR3z*g35`+hA0;pDJn>oV(NLrd;i4B8F~y02$~xj!ZjWC&j7};8d=ILc6=;=~HHU<=Aa3;m9Ad*7W_p)w(8mhn5Rj +3tR*|BY=o#k==&ax{>>s$>zcgbRiPa)@4N2fO9t;gS%Zq%%{*C94CQepuh&Bo%0CY$YUTclZF!>Wb|# +W71r0Rbh?v>S!#@z5T=7uSF~!7vznB==&Ht1tGTPz#Q(T`>Zdz+I6QRv>pa@ob$z=6Im8{pG8cgk%#vBvYv7#OmT^q$Deoli|61*0s6$MLJV#2-sd5 +%Xe!{LOT*O}DoPzhuJ#`>LncjN#<8EJp@=?h60boZR5J}a`0%Ug@LS(+k^fZlL=fOSYf@jG%|gCCG&-~pJtwko~XYp?CcW3P6^xiye&D5 +^dpz@RKzc~SV*%B1EAWRR#Ec}#45qDh~Cwk=rEzs_cds##+J*kKHzM^@7rGb$1U{8HC(z-(n#kTq!$Ti~xNS>u +5aN?#}(3&iUs~Jlxr|*63{sAM1CkDZ5+WZud(b$_G@$L# +Q}WGchb3JR5yEoGCHF*0DF7NZnt`JTLdHv=gI)W&xQw{ROnBuf-H-LlG=`20Z2;#eVRwV*p!3(iIj@S +J;h_5t#Y6kdeNzcw<1IPsRjhBupNO57~;c6pD@>ie{zjTAG#rSjk@+3`{X?$)@2kvPIe&2dcKibPFSg +!!WUsX7k$r72^CB0{H(}Erh8C=WP%K7OL@w(Q`-F@Q$`=BCh3@B3Aegb#5(VR8<5Tas?XxzMcKT(-7h +nkPW2UyQHK&z3U_y0CnFxg)v?zkQr#W*`66zD#its{rSsM)!Okyewefcewaxb4_&C#5mXF=FyR_PI^g +Q-B&+%!09S4+$K*Qr;!xAkn|MeA!pru1B;I0}zX5ik78W7}ybmHbtFY=4f+srg&uE&ncQ>Eg(KG@ALh +s@ZAMY$YO%7^@=N=*|Y$S$c4{WNgGDTEV=<3$~NAQCle0b=@w%k2JJlLw +}7l@M^;X2jDJILUa;dYn%4k6MRI&O4ZeRzu4%loUv=( +0L{RGCV{D5m_d=OKxw9DH1gy`4oE7-fufXMpzT0@yhtOiwx&Z;Ea3VIw*37?~g>r*7*b7Bw60)fFPYX +=)hIYZ-{Vsg)%h1ohCw9oC7`^v0~)UYN9k~nO+UsJZ?F+ieF9Ba1OOuD7f&>Jbz(`pFMmCPQUa=G};W +cmoHCL^Ruu9B?i5zK+b>DJvuqaAf=Wyd0C0atBc@wieQp2ONLow^yMUVmmH<{@kvNMZ3yQ`V9gxGja* +aGh`6Gc5#lbohGHtq|`B2BKpN-DH}~>sh898# +Xt2q!JVmHAZd9GVAVDt1Mlx|0`opu*OGKb(n)g?HBJ7A0tx1}~cLK*D=gnm%T*}bAo-Q@&uU%RdL?7H +7wa|Tn;Nk;1a(Bkc2wLU}ixv39l(oSKI@UZv&o#}P|+?u+|Vm?NVxL9*jp`Xme?0?&>Y&wflrLuYCSH3ql&NALtzS1d +Xj!(bUE75ZmN1KJqvi$kM<|giPLHUb0-`5E-*8E+k5Zj**$+VU?o+Kdi&8R9t +pI`(vYmFc^AtdNJnOSWj99qW8)7s>WPE5|Wg~p4sd!U +5>z&E1w4&(8MDgUA&MBbtq|tO*J&vgmS@1&lJDdJ->>j8MCd@mA{Y(_l1 +1kd*P5W!+gCA-OH}01FNG0WP59zTJc6$%QXx9eb>Jj713SV`%mo`jWpmjTm#_ +JCL*L1l$VSaqj6AWl!-#xo?Z9#70IsYvJBz^ZbAYL8BJTUD8mMYkpkM(^I;%KV;bpN11w| +v_>FZBu$Q*aP+iB%5BAaob2#3FuVAdSc5AI05BP-MZGf;tEFU1i$#SYgLqe19k*FR7|*vy`Y~2*G(%R +Ty2!_HQzvPHv&;^SQJ=V#i@W{dD^G^V9clUc7$&$ZLC)^ePuS__DygpJ;6}dvBPbEwZpHI;JXHbT-@{A!LvnaFyzkb=QDqv>F+RV +!ys>+|d^*VJ7*K8T1=6$3W$aY9`=j-e6i({iZY}&@dR~t{t;-5GqeS~+TVP#DU2lDBR}!_PKUv^bgU! +G{x^~Otj>T7BiIXEI8ldpJbD>{5u0BEx_2qUhC8g>MPRi-OgcGS{a`NKX;u8Veya86nR6|g$xfD11Eo7(mD91cC#AE$J6oqBa;dKoqA6w8L4Hgg1I)@+XqGO9N9G +hO-Y6hq^o9MRJJwVDfus8k?5dSHMcQT8~`@L;-4A#`6I!Ge#0>wR?sPfzISGkAI$gahMe@WiS}c(izT +R_T@tH0(>4Ye)C&T(HUY2>P-TJmol}$CLk{g{dFb#ZCHREafZT +vQ7l{0qi(Dp@}fV0|O-Rs#@voai|os8CrcxW2;Dtoi0%E=)~x{D3F>kB(e3N_N<5DmvCaA_TNNQ|{n? +(a5+<4M6Skom67qQ=r$>7^<8p+?D_JLaIN(n90FehF=Uh>2++Tgv +!*LLW=`9+i#Rvt${-URI2p@*XPlvBu53aif3S8`Z*A`IY@4f}U6G3@py(Ns%a6l7KyTs%F&zU>O@%u9 +;db}0>b%aW)7~G(Ex5D@r5WJ@6aWAK2ml*O_EtNz46t_r005~30012T003}la4%nJZggdGZeeUMb#!TLb1!XgWMyn~E^v9Rl +TA*;Fc5|JKE=qZRth~pC6Is>LR2hZ6-AbtOcD#n6KsRX?XjKyBx#rR!VAyL_uiAnobykwwDJla)MTN; +S{Y>;=%_;N80VZZ)%9dOP>)u6vaGh$fgk$Vcc?v@A%kbBwcKmW;A7~KF;;5n9K7Q2Bpp`I)b-NhoBC5 +`%OIRUf4B6js-Rw`s&ZtYl)c;^UHMjeP!9GH0ii143y0(@B7|_7yb}WMA?ISGpolFQaB<8dk`-QzL*X +?HhJqJjsPM;=fbId1=a%g=_1(KF>`O?8v`#y~YlW$BHcjj4;ZaA9L>VP|P>XD)DgeN|15+cpr +r>sJh%!^((Fv<-Twfi1cSS|C8N36ec^v5{zLBy(3HRg&_q^XvOaeOPhQ_#oT-cr)|n&68?7V?9;9F1# +^~OVqH?n7UTFp8i@+Dr;Kurw*$$uHN$V-KpaZz3;p-S~eTH>$?UEg>*g{u5_vs*^zZ=tmtI%B$ +p&rV(T_^#1kk#xFszA+bhd0YwsqAkLR@{xiz+xjrzjnpD*4@x}I`OR~K8W4LNVuNfIh$i<4u?qGtbhT +uZY|%a*eYr;?H@Dh^ycWn&U9>rVb)bea%87mK@@l6ypXn2XDluFP&=>0W8SAtA&=vZn3tvb_6U$ilyEhN{ddkJJ?=5J`?`+Tc4{LPND~CE{6)pY +q74^>tvDM1(wqPpC^XqkJ(p5WBaU-Pzb`jIE>--uRs5?j$?&^%NA`7Rb>nB1&&nSt1jqi=z*qDE)d!6)Lemh|gRoeY*#5170yz$K_f;Nl2h&FkF%>%P{eg&5(xuw&K +EaR2Q~KIE<`nL3@~Q?4f{etltc?=U{bX{c)COasxUBA^7V6^)16rEH(1+yS{J3otXbG +Oz<fPR@A;4jG=_Zsv^=JUvGKdJ=GAsGrcX?<+(uhKBxf7 +@Zq)!MlmoxcL&9?3RZ{vg+w`k41C4cRGyOw8yVMi)+qfnu72%XV~fK3IlbUJc&BjvK3&~^zS;gGw_kq +R{wB5$ukor}Wq3q#*CJM%!WRAmP)h>@6aWAK2ml*O_Euc^VO#tK002`F001Wd003}la4%nJZggdGZee +UMb#!TLb1!pcbailaZ*OdKUt)D>Y-BEQdCgaCZ`(Ey{_bCKQ&Gh3EMfcVH3fzu+X{3`(WKaiG*D>iWO +I>8jil^&!~Xm3NJ*9ywGFy23xZhIy^!}j_ZGRUv~ggMb|%B4!v}XTqM8*(@4y`^sW#Ba-b(A1a9KI2l +_-~Rg@4wlGMvqx!PQohx(2odi>|Jc>IhQpbtYIJ4$^Idz-e7tGWktA1h*9q(zK8zrYT4ZTcPq2bJ$A( +?x5lCZ_GdNf?mFW=Vr-Apt7}TEGyx*7PKOFze|f-vtyX(u#w^!-aFK>Q@^A43H#~hWL&wzfSDDt29fZ06LyNY6%pPilgm(SpOOVQBp%Su +-9DeyQGWr^k$#4!j6Uz%8C1+_)XF`8O+CfZacnUR&_TiwOkT@vUNdo6S8<%KRwePHJYVHAC?I==urp=5#9r8{wtX4L@%oab;BXJ!G;gU5z8WUAU2n(L{R&sHw{P_)F>-6TFMPkfl#ksURA9WxUt5q}oNbmqx1{6=4!B2U-m6Ic +JDaBZb0+my>CCGKp<$?if-7?)s^5}Q) +5MVV?p#_Xz_XE*IQyCKzEX8$*>FxV)aQfD?4n=0gH1#3Ex6n+v!LeQwyx-JSdn5VjSRqe(~3y5F&3F_ +Ui1{36GbOV<3jv7c#lWbFSrrTzCay#HDRJEjrc5)iBN#JEw2_tr60IsQs_=y{5H=ya6#$n?$>06Is`Z +3RC0ICr|BN|wZfqGNGwW%o!Xr6F!Tc~KtdM~U8L9aH=^?RNs0PUQKylKKGC9s51wr!?D$nO?EC<*ptf +HF$N(qB$O+-y>*3jKs +E>{NXVpGcSKRqjg%P23jh +G{BLDy)0001RX>c!JX>N37a&BR4FLiWjY;!Mjbz*RGZ)0V1b1rasty=AGOxeb4idlt@WQZru#zY)KEF_m>~3lly}8`$$ +wsyb1=Lcl#F0K!?)QVT`>XU+t`D8Y6@(=Qmua82N+T=zSyL`{heRBv330v@kh-sw)nZPYCDw$i5qJ*) +5{G+?RLYgfv#GL7aP-{;%#zlNC1nJlExKg%8Bk|_P)?1gx=urkuEi0RQ=La4?T;9CC{&+c$+`UGUaUC +3sy|hJG|4x>rP`Bc9J(! +V;l_4)q9PCKDkX*9(SR6md|TWvaye{~#3Qpc+^nY_{aJy+!`b7$Fta$-p_U)tKtBgZu +`b-I=~o=qU9iy=a~APl&Di-r;v3E{S(dpxHGmHC|w%{c6IL0AD05<-bx`VBkIPegvdu5e6{EGI({;ZV@?SJvR5 +O~hTCbBzaeHp3~Ju&g{mz@q?Gnh@rN|rFK|s{RJUdx7BFxH7*O(}hI&GM+2(;2&=aUEYBQbrx?-;?tL +N+_SgF++@H!kIl0?C39jR0AI7$*o6v?K%@T}?`_`KkX)kb2YfYb$t6uJ~|1^!D7D5~y1V>Y0sL5kE28w}q6f*l~k4nw+ueEpWnRo(tt3C{b!*QE8!UV}z5Zl&-mJnP3t)-x#PW +s*b7x6RfcLEY5lvsd?hOhN~#K +=Y>1JGE)q+6xJ-AbX3(r6Ve@fgGn5x_6KWsS0;9$D5(LMziH9!5mOeY}ybSx6nVL@qzr_UcUbHE!=)9w!EpZ0cvMNm?7ighVCc5`*DOKK*CPg4ioHPp_R6}@B$GWp-?J`vXtm +P6!~?YXh4wxlmjr6?FEPawOj>-dg;*9#NWp%XGd-%-B)}sh%c>e^*I;`1q!#@iP1VU|A25bjE`Z0Kuo +q2YUb{SFuO(zZI?j4DCZ6`Q2msFS&~9+biMD0-cih5C-Dz9y58mNM?`*gn~h#KLbPB#(u9!XH1P?ci= +qMOO{g(V8XQ6`BXHwhDga}pm)vr8>tS9+D?!Eq#K0^(cto4_O)zXMcW?oQw}UmKR4t?fHc7;X>(j^W! +WZm&4L}cYLLsQ(1C*zfkBV#IQXRYssY-L*Wt+6;=LsM`XFuZngcefNH^TSs5Fcc}g(lCY{GL;WqvB8s*x_8*AM?Sexjuu&E%v%jeBhwn}*@FvC;>?dcFL68;o(u +LxsEZvximrP&g@JcC{{c-ShOr6MSA%d%RG3(k$o{pl86O+oL_k>5x@#SQj53;0WK +f@c@-*Sm>>!!EK7FYax{?anvFtRwU3KnIQNGMqQ!h=qGyOhC~d9!w1!)L3Bv=mN-0qlTZ%9TvPqaE2{ +Ld=^Dh_j7iz4AdFkj1xP3+U>mwh|(4AL4B8uu~shvon_owOfa-8cLa|;k}AnwcPlR{=y}sl6BusFLoN +V(^AXgFNZL0xihCAH#6|-hUi;;&)_m( +yH)79zSteM?_oLQ&d%t5cG+nGt?&zT|af&w9Wi!Rp<2CC`$OkOe-Y}LW4U;Of{QF8~~B;8(Y)-!#cww +JM8>358YR=d@Bh5LFdJ+b33;=NF>4zN(;bj^bPC|09k}NQY{dHgs!fzUnHk%SiLT +z6#j;&RkFq#cG^3E?qyb3*y%CMXC1s8q=7B1H2D4H(!#>0qyf!H;IaEmLYl01yj)Xv4=(LCk`(3sJuG + +Cx;9czA?A^df$;Aso~2i<)3Nt6h}ve4tip6({F=Gf +w%G;E&fkW#|8D~UosnImKqfK==4M;9I%dU1R6&ZKRVDj#as0uRdHkp-`;FP9W0f^%-s{4uW1e! +F+#&??C(Rgq41DPV2Iej;FgfNZtLXUkp*Sf8YBV3P*Y*OAd?wLq~|;j1G~K3y{lFJrVc +hMUBJ#f#)>NbJ>K`0swP#cAz=GWicsO9KQH0000802@m7R>e9LRN)B#02Uqq03QGV0B~t=FJEbHbY*g +GVQepTbZKmJFLY&Xa9?C;axQRr#aZi*+qe<`?!SVk;BYc3t4-fs)Q8*d`jVhYHcj>}1@;1gmMELIvZ# +{u?z%zpw|8bp>S4*-9&l~-A(6%5a2~&TapkU2+A^Vat!C2wkXC4`RAs`4N^Lf>-h`jZqz$**@Y9N2Q_ +4!rO09M*SG5%nwvub+*H;hHzOy{PO~T@i*L)+i+h7k3@D9Z1^&w%GGPenPCTgL%RXSl;jg_kARl=?hj +rg9!r|)X`GIJ~%*`%2S)*5Kiy_J%uTqk};I#`oIcLB85rWRb3reEkq#-Rw~U@Eex|;ZW-4 +C_HQb+=2dlAIMWpA3>g87?3Oq$Nr!mG;Q7mUFJ4||tJmKzqxD`^#X{6~Q9?_rH_unEviHwdtC!iu`Ni +|qG7`;J>=yT1A*w~r^R4itZ&!cIh|@CC;vWm*1WTw+7S8s5I&1BJ)B5rQi1Zq?0P=5+yjqCT>vPN~p0 +mZ*onLQ|LBhI!Kb(6hkZaYK)LvEPCSGN@(RTalulMyoze@sM-T0T}EI;Qv>q^OGaODSvg* +pOu6mcG}w%kQMpKrtgyGK%d=$G5AnnikGc +m8+j*c;>?_IpkB*|@2y%m5g;mh+#gfx2AQEQBx9$}=A=5L0je_4;F++Pvr4(yYlz&QEUc{fhC-Qp`G= +}ORuf6f3eM*H%Cc*{7t=~;VfVV8(go8~jB+NleGs5Jz@>}OlaTs15qr$?I95;cod{r1?2T4;vH;+6W^ +&bl@qASKb67aXWx<@+;B>Y(Kxh>JJ+}ZH7x5lEy?oligbk1p40@bE!dbK9^w0{iJV87~dD0T`d^XA(l +rv5+A7zlK)2762^G9~+faO>;@Cnfo(6(&XZyq;qSvT<@(go#Ch*>F42>Dt8P4OqgPFEdWo +&6x&{qB9sPf-h1pJi!S+&(1#eUnSiQysD{wq)Qv%q|IZCd9`p2$Ulm_`a8Mv;Oz5w1KQ6)#7IdeVV4QA<(bOOL}Pr +!Y*ug&hT8lmJ-w-!1sUy}=0$bj%$UKxM(yjMH-4fZc-tB&^_K44RCPq0LgV%Qe|2`Y*NwX#R0GCWV%i +;~?#xfD7SMUJp)Dw#{$85~+!VN`Y3x>1q!CivCrJ{^7bo4lrjWdR?ickf*SPwsME|VWtteEDxl_1wjIF2S${($3(i&p_U-D9*c$ +1xwl{1cW7Wd_7trweWiAJh01de5N^#A4sw%Q32Ecl1+>~4;V2)`A~i`%0`>rZBQy_0hO7(|-MBgJq4C +i!HWfHqWj+tM_R<~yO{1Z>YkO$p9o6j`P;ho9iWdWq9M~aYWdvCr&a|%l^E?-(O)5S`eC(|H=|tZ|rt +W1PEH3?acp#16Sw?^MoHHt4S(b(=@RL6i*?yrveLcu(&Locc8OyHefb4q2AJ+%_9@-W?f!NN^+{o*jX +?nM7dYNddWjVme$!yYMs7Kurhzi(@TgYh!Bf9jchfW>#JLbn3q-BNmWg=+~sflG*|2o6@NSkWg2X2Mp +b3vrj?TX7(@P^63_%QeQ!5{se{0Vz6z>T~jDoQA8k#nf7%qYl1=3w9i!(bE>m`$Lo4Xu|YqxCqM!>*F +`t;aWbt>4lfQr|#Xfx|vf;Lw5R&{*DQlWW;n1EZ?KGzhJhs$s^e^#mWBL1?G^iej8=iD`Ti|;PJzFUsmSxn)X7Lfd@o5pwI~2Gp769IQzlps+CY!G__N9puI(U8@Im +2iUm^d-pg5TbXopO$tbbM$55dhe7S{(PS7*CR2b|%F30E@vEj016OtcknCGamxkDbeXfmrs52wj)0;e +&jj0XwaF*gJ1JU8g-|aIR$smoM1;_#=ECST4@-tHaM3Il%e|T7kbPQsQp;$EkQLN;%dO}ytJqUR4hcO +5A8V)29#Feqq{5QvUWwg+*d=12&%rhpPUr};s33xZ@XaEzT{i3c@D~Xx9#|$Kk;J4X_Yw|PACuY>K=L +~29ei5l7K@$6Ot`Tq7@<;-nH6qw)hQF2Q_GZKZkR$FLK8~qABqzGxe*@?Iy#Ha<8H6GR>jSL#+x~mXI +E5x*Z1BV1tzwfck~3O9elx!e7cgao2JXp#}Q$tHoC&o2;R#$%5^KOL!J0qC!v!cfqT4&nX^6T_+7uNK71mR+iWoowRhmp}XvbxCN{@VoPt? +!VCfP#o@Pe(S{ExJwg3ZG?&Q3i36+b``6;Rv}eJ7bqS-p3L2%2)g+gTrm^>w5zFzGhS^#GrQg4XkGE# +4~)S#Js3jS!xd!Gav(nrE~lCEA*O#F5mg&b+~P0Qq>p1t0 +KYI>0W>(rm$=4hZMt;i(q)gad^7=ZO6R6iI(nvW{D^=I{|IZsggA(RQplfx{O|2cr=YKJKC^(DsKXH& +pKUEw()1R}6pr3kpthvZ{Rhq8tT>+Fj#pFo4C8!O_|yd;D)r|MQvs=Qa6H>Q%K;)}@AC*JWVbcQaO-t +>yzq423Hz36O9_3=8WI> +&wZCk0LqYhXP5PK;etOgWU+B931yD-^1QY-O00;mZO7>PegReH;2><|fA^-p&0001RX>c!JX>N37a&B +R4FLiWjY;!MlZg62^YiVw0E^vA6T5WUOwh{h*zXGMk30a+K%jv`&wQ|O7H!FwZfX{69hiJTjeF|N|Jyb?N|1wk;I$xT%%!}4;ql +EuoumfHW)>l#=zZ-yKH+i`i54S@7J_x`m$jqpVJf +WjV8xm8{}K7Qg`LV{wxTTB{q@1&EZx$zbqUsZvcX!xQKjHvzdsvo@SDc>ebBS#td0?D+NR$(z@wa~eB +)`EE4!;p1rNDjWi;>Vd-)T=N+?wc~J(U=-GF^ +pevnWO20s3XiCdsF0Fwk#EUyEW^wU|+>w?RVwOZ#Vp5g7(bvKskx~9fHm>v1DnjlqgJ6Z@9h;Blh?O1 +DjfQ5INroh=8^5WQ@qz^;)KDpt~w_2r;(iR~QDSWE#jNtPm&}S2Ewm7=Fl%79K&!#dfQw5e+x*_>Ub@ +!qlpu1x`p^L3Ag%;2G?L9u-#HtPdeh6-kWob`CEWR&Jbxt?*Br!?3Kv;7JfQzkmEOusg{mg9$i@UBZj +ZMGHPe*PA1TOIc*Zj{UVefIK4xrlwV>Yqyg4MTZSrcRE^1=R#(4c3>qEIm*=8_|Anu*IB8>i1-|-=U~ +VSr^z+fBR%TFCUub#Nu`Uj%o{IwN8B_ssurj;NJZoov>MGX=v6}?GsoR(4MUrlgfbTKoQW%;AQUKRYC +K|Dz_NM)qKMR#YKvpM@Yy@^&Xbm#O<5$B5=(glrhN_TXaa+ac5>>{tqLsh98{qVESdJ9s3C`Aq1EiFm +P<#OuQ;fNAfpiMTO}rvNGvtgVSD6PV$3ZW2hYBB}CP1C2;)gCR6XO|3(Cef+>L1tTZ`TWd_J+*r +L*AX!d=JiFFHJz1%;uAW?%D$i^vrEA!i?cYQ;t3NI)2qUmVk(Jk)RZwRuTW+#4)xE7jQM5R;X(4=CK7 +an4eZl@ctOEaHPs@XYgUDA2056Gzte(i;Q6(UlLl0#J-h{dV+aL)58jBJVZj53tUa*6&U4;byB0@~BO +04RfE2|qE;KjC42OJB)g(gr0s&UJQ3i_sNS(GJgxFe>EPm`0=cgNq+Xhg#_|AjlQJ8fOyc~OiI|G^~Y +f+sIi!eH)o35d)Hc*CvWuo#Lvto|N(Qk2l~LJS*g^-q3}PZ5?-3M5EbvY?&VC%@;Xn6a06Zqptt(U2t +tyH*lKU<>2hiI +8OBh$>WERuO5>*9g;;DGT8(lWyJC3py12n6TI%N;8QO&VbZ!GLg1KJU3ky5_|S)KP1!E8fRDhl2>QaX +~e!jg7cxp%$-(Ugb7XAabuT%~Yaj90H1}S~wWdAU;P8v<~gZ5F4-1kBG+{3uk!^V_*O2hKDJ>Cq?zsgtPBu30gzt;i!fKe!lk_sROF +QWlM(A(V=!M+;0k#PEh3!y%O`+EaC}$9gRHMrh4fsEu$mhfi<@zJj*2;rjD0=(`Xxop&(1gT2zWXDpa +zRn|sJNHeaiwZ#zGjMqe?5yhnz<0de4hls)c8*Vzjxh&dT6^&H3EMie)8dYYIEVxf^48iPJWqa)9pu~ +5dPWTM1)Q6@M3*f);q4(IgW$~rK91lr|XCb9WepSkh;s{R%>L;|gkjAzfEJqzr@JiQpaf!$UQW&ZpL& +j(0FE!vUXk-gBc5fHVF$F()^@yey{@sVNq|F*&kKh{e1p^<#?7pm~m*8XXA^^=)Ta>O`{1jsDu*=5VL +;nHl*dfBQiqpK*A{?*7N8VZ5Cw!oh@z>)b?-e6?DK|D^`VtJma(@ZlS!`kT?<}^&2nBvu~OILT$7$j=l%K_0q9m06qWbfwCzZ9C}w77ltAXdF=n@LVL50eQ_#nt4 +g{PJ#!b@xEmToquYet`@A&I|OxOc?Yb=)a+DLWe|W!W&K7dXNh~(!so6!F`2kMx%){{n5&VF?i(fPvZ +f4LTYgSA}tX!90j#mKK}C*p4yF!&TWJcIwG0vJAxqC6t5Uv2!+u_E9TvV{ftL|rL)WLMMaPThnUFSRX +q?DJqYj6y~b}uJjQ3A|24#8JKEit$5~Now8MmUIti(NFGQ1hmxJDJra0_`0qA4CSSFTBX#Y4{jfJr_; +n;r%K-2(Z)9m-yX}Q5DR;sQ{V?hly8fp{Rs#0ZErk)YG`(t_0J4 +f)jf))VS3nP46xM;f<~VuXrxA!*ROlKI7Cgd^&-;n+Vi6Jvn_)#D%h`#lRh=@a#_e6NW@5a6Uk&#tOU +B^x;3mmOOsWUK*q10%|Y4NB{SQ&&n5wPn$c24J*a>m>t&u*$jGy`!Lg7Xm_8se{k3M59uI&ZTGOdLD* +yOtBf0aw{Ik*Uc)yP!OdlzQz1<)zC#27du7(yCORZ={u(*lllS(#GZ1vWr)+b +!>oeg;oF>}D#SUfOo1xxZ_B8XD?fEz*!{N)rTSht20?`bOivfy`%v4#+d^Knm!cbGkhDa}CgLW3{^zU +WEtLD_DV^K!4Vj>^re{|3humUF0vs0J^xJ19>_Xb>jZDE$z*%>k-CHBTEMU>Un;4yC=uZAb} +*j3V}g9)MU)kyzKHQOJ2afX3@i!fHjs1NR-oC)Z^$6lR$_~G~vLa4}c@y&Q*!&fq8TCSkyg|wwx5|_z +S=m#QMp^g_h0{{Tu1^@sX0001RX>c!JX>N37a&BR4FL +iWjY;!Mla%^)haCv=I-*4PD41V`t!Kp7ca`Cm<9y$XlFkCkvK#>4lvta0ggW{uWZ937FOuQ}-`G5U9WlhB^R$JyAJ_}>jk_Tla@rQtVcpVp +&~ZL}xVe3Q_wZ{`6dUZ|3&u4gu0z}77E4|>!l8o8cL$u$YCR&Iy&tET6|XdIz`UusGT +CojMHG#F8Bs@TU6w`mwgOLtj>z1C#65k%UJn>mvjHodPJO((j7gGjb9-IYA^|b;e=?_a8sh0H1oCPIk +sxctY6f4T63OWXH%0w}D;~tJ;y5D)!}UV9>OeKUI}nI05!JZCcHnKg +qaKC6-)(^Y$eur?+@DC!e8?)yqYaCUK2Y$g?i}PmK6i%cw`*B*tpmN9xM#2@@AxX6|qFW|{cX%i5B({{w4y(fQcjeMPQ{wvBvXDIq${$M23uf%R6p$eRhQhV47rjcMad49f_IWzogrx5`+(@9E542 +SO3@R&7-`y)4zvWht`<2#TdDC0`uORXGus5FF0Q{KP30*4OY!kRpPZG%i6dAQ7m;Cd?EAI=99zAd3-y +nqdc=*CVtIhar$zJvG^BIO9KQH0000802@m7R!z3%i5~?30Duhu03iSX0B~t=FJEbHbY*gGVQepTbZK +mJFLr5ibai2DWo~vZaCwbaZExE)5dN-TaZ^7;9&EK6HZ&;a4ngJ?#j-YNQfxz4sI+vlxyqzKQg)4P|9 +y9)^y*gje5`sclkN~1U6_BCyT!}681ZVGQ|C$$n)6 +tPz{FmK(bD|AoJuu{lt}$xp{+l^a`Gj*Oq17qGgHYn>!1A8-DqM+tpB;|=np_Qvs?C+svF +OIK8_NZ9+Uu}@opfuc_q90d49V^|)kogoasL@|%4}GLwXlP{2Vuo+t|v%7l;bT?fiv9UnZb4=NM&V#A +jZ;}!V6{&XtzeQlNQqxl*Wmq%~wFsVnxtq^`3WTdAZ6esnCYt+je@--Syr&^Wj66%UcwMg@1bN|9c#q +q~yf&Gek&`GALR(kB+cVku%UG_=5tfZx5!gwZbV^NIKfmR0Sx_LLiFABG1uUpgKTFt$2*%AbkEvO{*y +^;nwNjTB@WEk}&L}BnWx4Gy9NLV8lh`*1B1(yWS}@QH|rUHRiJ1Pkj)}MpkYn6|gth=%S%>MYR*8Prb +99A#JdMi|=k<%+6CtJmqRxKRSQ<-P1{D>}_<7Dmlq!N`%ODL0Hb?#+9@4c*12Y`=AGWD7kftWRhCych +_P`k%+C91eh~IR?Iquy8r-h#?Lv{4&K}>jM1idd~nLJv}{p2TUB~Veeh-Jf{*v`<0F(r(IB62B%k25) +e`LjGg=Tr?Ikh;goE80)%bovo#nx5>RMaU0jFUZT6!z3ToOuteB9G+ABRD18tXiD1>x+Or>jcucv`VP +uHUn$E7n6(ff(sWK(M|j3K=@9A~w;4qG)!)1T`ojP@P{|w$o-=oyc_8tK|Eh9`<(Rus4;V$A*0W-HQ> +IjFJ=(TcI%atR`jG#5CJ%kRu^0$&GNR`@qEs0-;kA${rKGIM0kg*+YRT;u{B8bX*cCe>gb4+(r%JX)1FwEPI8 +j{A^3WTgLDALSfllEOKE&ok?8krY%x`GD8`Wkv?Z8)Q|tOICx?zCHdu%f|iGQ_g=9crCC6iIt +w=nLF~KLT<)(}iC~p{8B^`{3p3lF30z1 +}`gS!E{JyVu+$qG*Bs<&($_l{Q89t6{Y&SRn5-6xk1^@uX5dZ)j0001RX>c!JX>N37a&BR4FLiWjY;!MnXk}$=E^v9BS8Z?GH +W2>qU%{yuDglbx^rMFcXt#7L*0e?2wZMYFpe5R7E0F?8#dQt;_ucVD5@n}N7X*nd^6q%gJ@@dGB+2`( +ZKX0~)rrz@DKuG0MYd9X;bKj;+-%5p!&psLyk@$XB}p=y@y6QJa=nIKvz3w!VGZSV(J-y)ni0R_%6%R +0XsOang$-}*H@9wfEynaKAG)^ohTnLL(ZkAfTK&u07w{w}{=>{|Y+NUP*6uUO^knG*&XDGRtE4P!==CDpvNzOdfT;_Jc~#`oUzeA!AcCP4HM9t4wf~LUxx?cRx>+OQ;-Yz^Cn*=w +5*)3BV0SZ?Bdm60WPRE3!@)N2F8;-6h*X(Fhn(Pl`13RgJ__x5h9{l|wDO`L`aYz*@-GA7w6%VRMrvjvyx1Uhrm;6%1z8>JXcJr2bh}GE1)3gmuA333MbQ~>9iqBMjknqc +S|_51_tcgLbrf&nmkH!a-Lm3zOg#k0<^>dR$!nSvQ4vtOE4gc4z8VUYP;XNg|u`5Vx&<;(`0}u$!@#{ +mw4~!$AmiBa6NcQ*88oQNPE8-xgh61bl$I9Fq^J69Hiz{i8<3Q;CxJEKrSghOFj-fv|AQ~2AK7`0PT3 +u3MA|}66-W><9_{agFz}LVB6&agiaY(px9aVlW?6Z%>Ea3u-vtgZ7g@C;zEyGXU`00wXd39zHdSgq6Xh!A4sk+1B23cgB8+0~V;A0`V*0oh_%F(}ZVV +I{LjZu{%!2;xYH2dw=)K9OB3bMfVg`U`gnbJzTEl>@a~2_PGquK^)VDw +yDL^8;4n>NU|&<_(xiIj@b9$beK4!0|TRdkg;xkXWX1mYdxJ82N+Sg+{|a&N#m>sj1_hgf;+56!NS|m +j>91u=*ff)PLe=u^u}thVk_u1VkB6>9a_Wi9h(5NsCfuuHgwWwoWdARDAo<#plD0vHHE6)>1&LH5^*4f8?TBhv&r;@34p{G6xS&&TQQwu~sn+J?k9DIi* +*a3%ulE1jhmgd>F*tYq`A<@lDqPf+04S}~+L;Y31oV^G6+i?en!IK~Pq`5ld(OT@d+RJ3Rc+7JLxLx>8&Y#G`vsB)N;ygC< +v4MHTev_bS^lcCB#$lcK5ITY>wi#70|XQR000O88%p+8FRk9iF984mR00419RL6TaA|NaUukZ1 +WpZv|Y%g|Wb1z?CX>MtBUtcb8d390EP69y;zVA~s;UFZq(ZrKi1Bn+8Vz@44w!p~Dbm?@6`1IBVqe7U +|B;Rk_0ZcwR&IAa-N3YaECIw!B3z#!yz|_L3B&VKJhRonF1dLCfhzA(8 +nhgO44HLQB+<7#9;PI^WfePfC(7)TUk3<}-XVBc*I)FXWWv01*$6)rWBIw-Sz4{5v<7@6aWAK2ml*O_E!B041lEs006%Y000{R003}la4%nJZggdGZeeUMc4KodVqtn=VR9~ +Td97F5j@vd6efL)o3WAjzTj{@8^|Y9wbe@Y3*!ZW- +`m!fzt@FYxzQ(HDa$`7`(Cwm{;SqX_Y(&Xit*#WyvcXoq?duK)nd6=AiP|YO^a6q=CM$Wx4TycJ_(NN-N^6^FbtyCFLSxD +cQE|C;MkFtlgyfwJvd708W0@6M#cio_pC2y>fJJxAaH{!%GQBr3yrjffy(H_O9YMD+ct{8Hn%h>_WE- +t@w1Uy$VKG=MxXnsh^WzAC;j#dNa$}v<6#?d?0eR80^1Gc*cb))|cFG8QN(m6m=Ym0fP&J>%bD$mn0s +D!u1Zdh##LGe5(Xzr6E?qbAl}XApB4;@gGi14`qx1EXVmV9#M>WlAku@$ +22ffNVZ3&t=EsO;5Hg2xeQirHsp4G<#dVRvpPkz{(5t``C%~k_qZsla>ol$n^uc6>@vg-a05m_s8Z}p +d*t{J0(7!E-xx==rDKC<#|$(`okX|w;IofqIWo>11yC;j&n%lE)P>t1@5p23Wbd3@+G*FjC}r1RmMKmriC5mmK3ZSHl;$2{gK>is8BLbOEy$~gMNf)geoOcE +_aTD>42WezpD>jZ|V0{aAM4bSmVWtsy(5X4}?7gBDLGdobB!H6ZcoKZ(pjP%qayA#Qnlz4)%1d^So^C +&cUjm(kLMeaU!8yhUUO2mup|E3eqi^0w8iTDooUpRf^dfQB4heUi`uVE#0-Z+z{qhVj*-Hl`G=-FRGT +)kfnt$I@WUyxGSLOi+c`18|U*YgM~p#4KWl{z5$*nfz-9CTeRef=6{*DTM87zyT%K7_YIUT7$j1|c(T +1D$)B%*G$=4A2tB0Vln$b2@@n*VkV~Iv<{L!HMH?GIr}kkAd&ytbq0%tN02*U==?d`*?E|^7BsW24LK*BWG!<0V7_$O +bRBdLF8D%kO2V}IiJL@w>mT-a}Ap1^Zxo2&M-T>MrFRg6}7wq?a(2h^Eq@sm`=guUApGD~?fVHf2t5h +LAlCwD<8(?aC%DiEI|LCuuy`y-D}vh9|q@r1oeRl$>lV}sK?&+L-i%dAXeT2$-B&peUM7kkY~rGNjd$2!qt>1QjM#O^uOceKJj;p@CmiNtm(fq%8fH8hjYlJ +31TaSyY|+@~z&^(YWjfTvycvx~C|e?jCPv7DwJ3g~XMPx_d@I5bMjO3)a}oZ0dUpQi-P`c&^wsJALXT +Voz}lTmgl1OmeOSY2hM&x1>;Ze9i$m<7bJJk_12wU-$z +p5^oQpQLZSd$jYI0t#VAu(hX_S;PN;gjrY?q5Ee2LjEQ^hnFiQq~wCDvP!7K+^npqL&NEaJIfe3_L+f +UJJ~{QPy_qP-87e7br*efebi_v_#P{N#`CJZKGrpS)-Oxrj=j5@kCaH!HFzOU6uXK(;n8g@B1*9k3%{ +*fa5&Z$Zb9?tGPHvn9_1cz!ejLbov6^FGiwhasgB`NW}oL=F6_o5#<_KbXenBHuS!ZOSE*KGHn&FF(x +Y*N?|@)BIwC9KtSB1qw*k#!eoGn;D6v83*iNr79RGK4{PlkOpoWIy~9Iy34J|WyGQl&v^to$W+F-Vrf +>eB4bPBSGp<{UnauzXTN}YWH*WiR)$z2KM8hpGM^cC9kDFO-&gSa3V#Q7;g=ug*N?{Z>pS3>)*;{}$n +uZx&QD2|MzL6d_-CI~h=jupR~uh#_#76JkJKSg%JCUkzSN=FhQ?m;Wzc&F +cq~5cGLrodGsKbxqTK?4@vLs<0#2ep`WX0Bep`pQ!NPiK~j%5MX@srY&=u#he~bQk8sr1qZNpnc{Vm| +2W8%armzBJ0{8w%F_G?No~5WjY|nFvJEs^~S~9XL*l*B2+DkCKR;p-T^VzHxzuE4v<1=V7lS+YanjIg +1|AbnDtrA6O2Et`~XpCOGIrAorzW(y=?4RMq+3BgLCkori%cKZ-z6R^LNB+G&eg485(c5ProrR3HqS$ +1yZO_2!g-fso*a15#AHLKAfKKreeq7HhCK*Y%gm%}XijGvF+-}PP`9QQ_x1vvANqi-e)dUSOQ3dA){< +lxU@DolL_RWlUT`=s7mKptD=)Cf+Q}p4#@wg%&SEN%;%K+C?y>}Myk|zKUS{-RTSPQyNS%fkU0Wk2y$ +4O7xWXDaq4@f@1`sFATngZ|zq`fo(2jn$4EBG2M4QI=or_tuWYnO4uQ!-%@hYi<5@Pf!YM3y4rrCvl +~NX(FAl$8lOC8=q&mTcy`Dq#)IMyU|GhFKnJLLek$lqyMqRY|lxAy}n>eY<>eJ#Nc0?tYv0sG%mHv!U +_vG#be-p~OpAz=Q=s;HZg314f4PGPQWjZ|vNqXIgsKO|dYoLs&wz@Em~QHQRw}y$MwjgRU)Tqg$`(O_ +-(0VR4a{&fA&=WW`~o9k65UyBWBnX7skH1R7QaH#|>aBYp3Lqm&T-V$K@o>>-$E)*S;FEj}kS3IY)%nYpY~|Ma%SoBdxxlnK?NsuW3$DQ{IBx +LN1~G1+hXNfaqKOsfhT5Ct^{c_QceB9P|$N0J%3+hwU;;WCR#)#l&Zc;0-;D*zSsh*=ihC+kI4SUQu^ +Q(X4X76#ztxVX>5f*Q+e7cfJvF{J>`8juG-y$b8>}-)go2j;onB?sa|8; +xZ9wt0|jbkJ>7Sa(stIHS8_vQ{~~w^440VZDm8ZCS<&CS22W_Xp>Wz%LHw-R-*9){3iHS=DgMt-{;AI +Pvrc}WC@F6+ngd;qnD_;-~d2_Q8jKP*eql~P_yMZh=|Kly;MyZf-6~M`2k@q1P1Uea9R>4I#U=@_V); ++(LaOeWAp}~v>;^_5Cfls3`|jd&&VE +6VL5>E&0|oAhOkvyfwnL0-AjJ*g2p#hSV`=vsYWh>Ak_?g(;EgzJkD3_Z6@dkW5NylAjL581$pHr>b0 +l>#hw%=%8r9hitv3aI6V4QET~l!bnxyYtrES%u_6zza$aQP)D)*YM(fD0#)a*^W2*H+2OVM~yRozNkicnkSmA0VTh^SeX%e72rzk@{lQEM|o%0_c|plqY66%3+>Q;1^ydYA_4!d6n}4bU?6T6dqe7FB +XQNZ!n8+%fpNGpYM+Pzrd0Iv1`)VNukO)o(_4eGm#}s{Pnr+izqR?6zymwVKkR&a1V9fD{Xis07zl=IQK{tO?twd;=#_PVqgk> +S70{_&Yg_Iq$>ApQoxpSM +QGmH`J>n9{!m;WU=OyMy-2tWH0+rl5F +}u>a*yVpv{9=9fWoPUaJD(@ub^tt-Zi8RL+OA_1FVMg3QZ_}hk150TP|;?S_4~3iPWJ!fpH1OU(*6s& +P*w1(p-xp;|`&b!@3t5U!iw|%{5Uw@!V<@Yyl;UK$`0O3~d7Ft3!!WHDCa-A*UU4V0iH5cBYj`=(v0D +g^Y<5mXVtHogFmUl{;@O3lMPSTq8YD%;W4(wm=-c2V1a1{PJ%&Wjkey@>{@s)PcKehhZLl{fx! +Lrqiw!BeD1a4JxSfR`9{r}H*-JtLog7Bnp{dJ|BKSiXgHnHSj)Yzq2D)%D)p2(&L1UxIHWW}Ozm4P73 +^>d$R-1QfY+k~pe$ykbEb2%fUlR{K=IW)+5arqljeS`62O9UlPjV1V_uoc +Khx=Nr4)rXsfmfCK@RwP+(Qm~yM2@xXaqxOVuB3H6n1}&**LEmgxFNOn-L$ge*l4}6L$R9Uc|^@|6)q +-;VA?3Fc5|p67Vv?V#MW7gX6!pdOG#8sfxB#FpIND%}nPhb#8S!-w6>bR_=&2ZrAd_R%IIDF59XcW=b +i%06rI{UY)HRg=;1|Zq4E2r(u*kZ}DS_nO)~YFQ+EQvsfnCa-I&id_=p|{|+Uw_eD3mgP$hWxJ^Pj>u +>rXvobq3tr14AUCvq}rXXgQE6PP1*MkMI%3HS0P^Vtl0qolIv|VF!^-XjI(#?w^_nqdT5KbFdE +LxQ&4Pz=|u`JiV2bPK1nLUK=?g(Tz8pNHqot%%`6X0bRT59&aOCcbQF+8xvSDW^17bRn8Ca5Yw)sx<= +OCLVzW?jBP-PC@fZ5*1k{@u!iAI~mxKvSp#q6k1|nr|IlZyJeB{=u;gLtvs9Pa^~qIyzIx>(8~dfc>4x=)8)V$XW& +75O0yp5@2;rZ;=1$Xi}tz4d=et=l&Jl#e0(toZom_hE{K^%Cq~_yAjMTgCoY+j>jTX@P$HRwKL+@>&E +hf!?s2W~uO4(AOfl9?*8X18#YvEM3VpA{dT@8sLGb8`Z0q&^$HNm;nL{NbIze4Ub|T_Vk(83+Mi|Kr6P3#+H83z`&^Ku;K +KGPCbuqF>tZLC)Q8toBkf|M_NrfUN@?g#4Ps^#Xb|q+V37uBUT<}i~30^dM))nFLb7XFS4)q37)qf-{{3HO8vT!B@Sb>l(`_@-6D$beAkA8Uv2 +6OkQyAl6k0Hr;BJ>BLR)IrVy@UJRX-{zP#nk;l9?;$vZFv~bgMpp!3o+vbXVXKss%)nPco0L78O(Y!} +xOw)9M7MJj#8b_=(Xx7pJ_{E9Wt)oFS{762*9Ou5iOsEc&^|FqTGT2mR2pm0Fg<}ALEw!$B6#2HqRw2ZXLkv!yJ;Q!Cz$PB*#QaGH$gF+E6O +E?r^;O8!<%H!Hkm3Y%}aL*al&+=P`kn-?cN-3cmWRLNN0H^o@TSh*LY%J2%TfYC+Fv{^$$9=A@Q^`9i +FR5-DLSE>v4^1)>rJzt%O&ycwH{X0sFC}qjwawa@z(VSVak?7;n(B*8&698;w~&7qRTO#!Mn#LEwGE^ +H>?bPdkdQ6&0_`66 +GhT>El=W@=bcs`$WBQb}8XP^_!#B?fd$R;j2WFEKfyJM926x*Vx5!rEIz*o=7nyV``Nwa1`f9y`A@rE +`x+$AN2*?QHetiJcy8^ctN8?AMWsheHS>%P}|(&mD8cUHdXQuml&*Slj_(b=GJ8*jKfZ=(R@;;Tjm%j0Gcmx1~J4cJ#JA? +xAywmF=bj2QWbsKearQiJ1mm4_zWQt=ZWmpbJuPfHBEoZ@uIr|$Wi5r?***x!lM@9 +YhU(dzzG_*wz5snl4@8gc-V`d;rVcAaVNsjjBKGCI)-0l;7a$RlQ0}+oBiB@hN#J{hzLKet0@!<_LK> +~B6_L+3`p-;Mtv@zYj5>6Sd{ZvfN|SO4NxCa)&cyU$SIX&|4>J_(CS$ju+}$@kyI5z_AafD!M(RI-@3 +j*^fb?`;^Ikp0)V-5*9Gsy+?p#ihc9^?hxGsvO9KQH0000802@m7R+Gz(-}?ms0DKex0 +3HAU0B~t=FJEbHbY*gGVQepUV{TPmL$eXW49KLk5bhhS=p>F +fZI*N~vHCP>NKuqGo9RZ=i}!aaO0X0@>2ZwB#r$skVt*ms|~7`LO5I76~|ifwC!FZOIe;L=z+NH}I}~Q2BTxI6Zz5rd?{y&yMep+V +IWMO7h)`JY7aYHZ|L#Q(X19b>7SK{LbPhSu8$w985Al3WGWlw_1^JCd66phj^Y{;K36;9mHp9cmxB%M +G5O}n`99(a@C2U$fRM#N&CEUoI-YO$&6in<-|J_c~oT&2#oZKrJo5r0gg+BEQ@1W?8}usGmPYjeZA)h +{`8aaJ_&8CBfbZrL#56wnDP&j3x}w=x45rBbDRFE2pl|ehHWf=!}d3B{o1VqEEsi&L+SZ)1*}X1U9u@ +64JJ*je%i(i9m2XA(P_=ccLi3Bm<_#E5!r$WB_roP$q3WERB9_@tW$90UU36yKpDW&bybqSdh5F8Ja) +lL`4;=e&{Exb$Pk~?*NHt-wkV{@jqK`@2x=zp +np*Oj9)X*pL0JqM8&NUD3xE-e+<=-Sp#{X~pNv47z(-zJGQdCTA0c#H##1qjqYd|D +pQZr!(2e5fODH-!ufjFvmJ)~GbkU{&lWT3$oZ(#K&3i#_k327gNQ$wzX0%r$Ae8}=|!g7oP$GTzohI= +WbV!HB?Am2V+AemwX-Udb)72LBD3f^ESt!a3zx)LX9MSgOege>9z5-4ZM1A)RUwZ?Hnc}X!~tglQ#v9 +@i0Io>keobF<_ma>k5_CvjA2aJN&9bQUo$oh+zCZ!Z#+RGlcw9w_fzRn+4(1ce<96_+5+B$LpMUjqI@ +nnmRZKbadc$gslJ&(AH9cpH;!#l79Lrrk6@>5Z^J=vn?L%f8Z?s5{%l_QpqEZG@~AA1X*lyMv&~?q8mc)wQ@su|FSTCo9MBTq#P9 +_e6=WDWoT(JcS~9{>OVaA|NaUukZ1WpZv|Y%g|Wb1!psVs>S6b7^mGE^v9}T6=HX$PxcvpJGcRFnN&} ++3_W|5a*f;5}bkDT^swZEgY3zlPig5lFQpAB@0ErduR4RE+2N%I}}CHAb_QP%+Act<2SQP^OB2-WPH6 +&i}fVcU%9lO1e>e`&q=i}VV;=L52>uC(~myj&D3uKD}Oa$d0?c=evwM +ed&{SRcNpl^f^7#Y#UToxRuxNTFb|tPCC-)|4DtKii +${N#Z|lQ^SrOPBSnQMljLQm{~JLJa^l4-WB+X$d_hQ)WhuJn|IuQ`)2;Ukf!Qf|DdqW5 +%DRi*n`+Am1gd|ZJ5$S#ulqhdK{)rKdU7q%jD!|N=GXp%&5rAq!%uvL2R#~E0hP&R_Y3nD8KklAZ;%X +y}y)4W%;D9DwlnK=erp-SW4IbasiEtO2&4FzlV;X_LkefS!_EC+_z4(0C2Mg0qB&xe=m7!GVhd7%&jJ~ARte{n6=+X} +#)(6=UMu5sq&5eilli0>MJ`Z#Q?wrtE+T0GaWUM{?9R7r1g&%`EUgGABD4lq@CrBK{_Z+smGXMAghQ{ +FgGme}X;Cwjso)?*AsNAwF$UK*(qt?l5WbMoq)$3TGukrW;V0-UVn8BgGFqYBAQuMS@E4vIzLwhA2!? +G|dqH1bl+o^H_h3@eZz%^uu@A`mi@$#VZehHD>`*RIeDk0RCVkPsF|?^=aB;vpG8&;VVpP$=WUFKdCV +ggcl$@WphB`PE%$z~&*2h4aQS$x33pQA(Y1x%eiTBJy=_K7UW!8ejPMGQ{Ju@mZ+rY-!Zy|WGsN^oKH +k$w6Wi%BjDQ5DO;zpu9f`UjT7~H44OG}bg9TJWkfJ^%JYpd&^A1A=On`Ms$JiHv8i6d}8m7e{EmWmvD +R>h5}iNETLTBRb1LKxR-UVpUULxr#wi>R;5<#^P&+Q5y1R|t_N3}X!6Nu?fSiC|C=xiSn=adOK*gJ2E +`enHi3Q;LflYO`4_nf7!L3%=s=9Ty4VI+YW|D2yg}31&0->EnlRls7O8EjSvehdHNv{RhB3`+_rLS{z +o$e2uJwL_I1dqah=pc*)5fW97IK(qhQ6d;N6@kRu4kuGauBBa-8ZcJC-qGuu9AOUB9D)07Q$5~gD=Jw +vg8%rTJzvf&lvUn{M>2)Y80r}FU@<1T8>WEHK$#j|HHJA*{-`jU!Iz}^&ZKty;!;8L!#2K>W7;2J_N7 +&}o?c3>~}9Yvo%G%(vb4S%750+!sNl19DM-2TQ;`yA2aMlSp2Rm|-#khomO +sv$*17KgImHQ7MN5ZGJ~;i0g$z4++>q0Xaa)gIiW91P@BKG)of#<$NP^BleNECfDO17EJp}Napcg`UFh +U9yiLrr_KkZ>nVU-yU*6OM)M6d%J{J}i_^WG{)TOB)X>;}Di6lM(twe!kZ=4`t0Dxo7E`Vi$H +yvHtx$ohUU@#(R3J7SVqMdX@Ve|-08{VgmerW87y1KXb1t+$F?-9owu(h*?yF|+w! +cLFYqbKqc)UyGH|%KJFHxs1eX!=I6BZ;f_ +OvO|rCLU=*4P4*1@u;?w;+{#1BD(DjCdWtP_Ud2A;|^#2mbkk_-C)W)GAWM!?oab=^q7+W?9K)s^TC3wg)wrHygG7a@3#G>k3u; +2FKOH4Rt4bA*d4jv5Ycsf{``Yn*dXwP!&o{84f&v`YMZ>qCq=P!LTPuK|lloBlK=rOabYT8>~$bw%_2 +^TZXhn4M<^4F)KrAQUzR_Ifl6jQzE!677hi|1rOjJIGWx<+CyBZ@NW%(#A(HnoF`+9HC)uDc!K$cqAqPruZ#fR+bX2~Yt10~4Fv>aHYo@+W81moIr!Q`U1BqDHb +Wn26T@;mamo$fMY2jjQ|Ip~Sj+><)QSkDImgc3m!#N^L-rb`)+=FTxY4vR^!~@X7-K#*bmTAcs9r0$b +uHwu8{Bk?9OlKLYuO4$nv5{T>cCA4IJfBgf7`;4!6VMBSI)=KgF5sP2%#P)urD5^Nd#s`4Zcsn{!ANV +q+}ZD<$9j?Cfm`y#5=2Voi9x+k;W(0^|-wP!>6*gNegS>3V@-=mn>VUD#(2}aachPmPVn9r7qOcMVGs +G9FFxDoj>qoq5fymdPbc2EN<_4lb3KO +9-b6K(&LJO8dHA7XR9#Q{pVMeqDFpLw1D=8+hJ?l^+$Ds5;jw1qCn8?6e}n +}l?PSpn%`Yf@U`mWtbY>b0$IZu|hQF70m6hhQo|6yDr#qjc55!wN5S7(-AZ7X*-q={hXP#j4qCVr3)c +c(3k&yt}74%4I#ZYbii$+HiRk9?iq}`WC&y>Ur$J{a!M?3}^h(Hkc1w=GCdc!mvne?`Ssc$7&(9$ +Lk^9pZ(TkHAE#b&nLd7mEj$VfIaQ@h-FTU3UYjkr=Cv9{B&`0wm5q3oLa^+0K@ +hDY+clkN_ia4XO~%ecL{jJHy@tq@yAZzo6}L|=Zlk*#n&&K)AKYIT=G>l1ECYXlP|*KhL}0rJfE3nSj +&^wUxV{I{+%GpvpG5N=pO0D)pprbTyD=c>eHmtC4=wmbj-6EN{{qo3+rT-tx?_z9AwRhwQtMH+NVQ`KV +(d+RS&igBwgd%}vU7*paS6|W`SzR%kH987yu60R7+(iH0ztXx#uPAgqZjlY8Q<#L?b5!OVWL0;}*qo} +*bCEGi9aAL3J?ADa6_l&mUThFmIsDA!ZU<+*>NU1T${#7BXj5Mxvms-_ej!9o_80gMY#7YkHR~qB}sn +S8AH;#{RcyHFApp-VDS);wyhVW3Z83o6~?%VE+*q?CphA|q)o8DkuLbcCEolnJ3a5KXK!8Q=^z6NAoR +8pISRDTV>5869g(iq(BoBC{4@M7lTVa+qTM`P~r-WjnduqruQR +t46IFvto}g&ndjH-s@R*KVZ8l|J&xUsmr2jm9#we^`Q7nR}LM>#Ay>ZD7m`&TSzXIW|TR?1Y~b)#{#) +6zhjbI{`|wV`yitHqW#gAA57JTyq1-|+u00^GiDuD;J#Ub{SSu_nlHXI;ft$^oT|%V8WXf>YfN>O`fH +sEH##B?0_F%!u$NJ(r`_SX+IvtrxE7RU@Aq+kC)`-6rX0a0YQm?9cX;xP)|R{^W +zi5IB6+WAJ{E|)B!KGHq4JRnqL&)3%Dj_*M;cc086-FYD-7MShPyIVi0igaTKMql9)saM4U9BZPf?8) +x}-GcSR6(Qio%0U^D*Fl$?%3Dm2@H7QNy7ooymu%v1$rRIr`j3B{dSUcD_lKUu`?fDNt-_s4rE)h%`= +_Y3qnMRBCW#_i@8kLrhS4Y9D|cEa%>^LB*!zT33&-eTCp@QHo8C-R93O;2R+jRzYV{mzFyraR&O4AVy +iI7zaDdmA~)t8+5%UdAe;YXsx78CX55+ORlm!PuqknmAANd+d4Vx0gM+n_uir2jsdVO>7!Vi@{JU&ON +AC@Zd9=L>-N(Xi#TZxtt7-t16NrDN*fs +9*b)ru^wcxST9x;i%OE@o4hV7u2^1G?5Fb!HoN1E60&B6tlLIbbEdl8PBv|lBu7VLi_yixpU-N-F#_d5W=%=_69gWtRV{r**U{>`iN|9SKFZwq?#qTcI^m!fF()ti +<#`KsdHT`L=X_pMN-)!*|S4xn$o+qE#KywZ2?x}EL)Zoe~EKi1GKjp(yne?&nEB@FH +rwUxCgUG~21-9%<;88c%SvI$Qwp`K@;x=wPd1|D8F0tDyUVN25Jf0JL?a0VcW|0*+i;H5n#5Rjt6d`t +u2hJ(nf;6A#^6{0^vt!^57)BU=51E!##!FC-sZR5K0Y`;%&8ywhPOD!Ocfixe3RA` +IbLfiVUv>pA;gWs7Z#SS6e@g#5)oo3Wci+AI?u`AwxAKm&>ml50cE8~@D;r6&XjKVk=OF%BLyRbeLFe +e`D9eyY8C$Z;|V4Rme+e$b6$cR*S-I#HZ(QdH3lUz%4%xVSY+_(a_ZOMB+Vrih%w@zkwdI*)KopJL +u=j2{bnBUJI}*WH4LVkJL!nW~hl7A%(Y^{go`pO!w?JZ|Pq1G~jl~-PbS{(G&j}#WzEKp%}tpt36XZZ +;B1-f48HSD7UbNwtP$0fk0hbOs-R3XmBE|l|7RPX2Wr90mB3@u6vZPQ(xjxCav-Yok8`yJ_`mCS>$z% +r~+oPtIJRbj)|fH4BbFB;wH0LleyBD^zwcKb7VD*H(Y9Fw&mP0yUJBN3Cw2{ypt+LvZMeCCCGv68^gR +^`(+5+jj3n%5B>#GvdPgw0%bxTNv(EBF1a4E47E>o?EnlC30a&EZbXTdun`nl|?lIT +WMA3`Go((3|yqS&l$1YnJBUNQy;ei;~;zK7syo-9Vq0#1gn=fPd$Wb4SPt>uOSRv?vTy>Km9gwZ1q=7 +9wRvWq5~R_3}jWbSvFI=S}&Mg7d-r7s +fUGlp7t%FjUy6vLk|U{6#n;yztdI(41AzmuWB&DssnXKR~-%ciQ4f(tVMxKksyy9#L;Nb0{ARJ$od5U +I%q!V_e|2Pf-j~TwI@~z +d(MEE$@Fd_azJ$6a;QTf@Ifq_L&%q3Pkd+SI2}@u6`Ri(kDG)rs5|;-S#P1p#qhqv&5_}CH#-r38r!Kwji+88$?>I8Y>0&3eBMG9RbO; +l3)b@G){4fZj>A|>zO4AESEv}pgK`Si6B%I9DbK^5D5KPF5RM!j=GMpkY^(le$A3Dnc&fF$VX +uX4o^l%6VF%#Wg-JXsu4&OJ0#GtumX>;SHaq@{P1b=GnPvIc9Vbeh}C)Tg;;<|tUG`Mp^HY&bwzBu-Y +7IRE~hQ)pDk-QHS?qHAag7RDAY*s5!d0SEv69*0OBvO{Ba*!rM1KraD~T!rq2k5^QnX{yLMW08<~3(s +aO2YhqO?@xug&=Idj6`374(eU^q%4so8vTG~<&GM~hD7(x7G? +dE$wAh7rGBa}4@dlUCma{09d$ZO+`g&^jI(JhU+elI6Lb#fa@e3d6H-jS){{V~T3hX^KdJi#;yfJj|p$p@h);17*$iB%NY4W)5Frr_PB?TTcF>)AYpZOK<1;A$ +1vp^*!zmE@ZdASe5o#Go4~4dR_#mqgx*`YkF5(G{JXCGvaS +k~JMoLg1NDD*WVxEzbYZS-3g}9|v;zrzI($Yqc@udP2WmfE+#kLCKy&co9w+_Q48R9p}V=Vpt}%z2qPsTTydODF~g>exrODxpEPwkEP3A#wc{z<8&3y?r5m6ey^y@;{0d) +9kcU|l!{wl5UGuqv^8=5Xu1s;;M-zH~F2$mXM=2;I{4nSZ#MLG$fJolp0z=!bpKDjvTxhPSVZCi7UO%@O9?P);9kz^Qp +XGw|u{>cs;Vn0S9By4k)=vYHK~$H@};f120f2;)?o9)zM4B2b8eu*I2p6RGZG`3`iC=$Nx?$nY1;ft1 +EerZrB4J6<}|+ERK#~%Fx-lyED%R4sg+ko59TR)~o3ska&fUj>#KAZ8KX)u;N*NRXg{hABvrO?rHDM= +*=JUxYIO6D`aMP-0!DS?xDUDjjS$?Qz5P^J(aHm$-KWwuT}S|8`rSu#IBFw``Eb41hWfcFu)#1p9pyXP=SJ?oUNzxS +YH(Vg@*1w2rPb|B1Z7U>+J*aK3d{Rp?UXQN{)G&@WvG>#ZSGGT%QeS{5-^{SM&o|HVE>&Q@zs6o!+^& +oqj!k=AaQjx97N6fQ_+)q{F^cbZ|N_k&bGB4d0NDsv1HL88+U3xyL=V(r*Hy*Py*>`(Ki0d^8mpBZDf +E8F;z?9gEDsQZGu4M%Z2Gcq_@zGvU*;k+TqE{<9v) +w+v6+{%VlyGJ@-4710fcpY5sbh}SPLEaUcin|E?^-6FT>Q3~bM?xrvGpZO?XM?1Qt8eke*QABiOlzVi +-A4vdAr~m?SWa~4mW~&~T43m9*OqMQ>?hr6`-tV+dyDUvE6uxiYY+;>6WVqh3avm1$soK37A^>;;#jW +hx!M@c3TC4UEtEFG^fzuY0X#h_i;e*YCAyD5DDzLtV$Ic=Ib`y*vu5KR5s{4Gef;x|d9SP%`QgWSqwz +V|<1PvlRBt(d-z#hPgqQ%33A&kuc5b;3tLhE7$&WwhKu2!+hF_7^h3q7@bjTsV4)CF#j?ZnQrd1!HJZ +77S}WD{|XIJgGr;+AQ1KT60tW2=q+)V|qg@0Srkk_3N8@_;<#Uc)}a{A<;~>+y>>$_z;zc6MP5Q`kZF +dXn%N=o1Ntmyd7@?dPZu_WHo`m6BE0Y7a^=a_My1x!)w`I~NMWy7Z?nG1?BAmJNDWGpe#G&i?sNSi(x +~*udv1y;YcwNoxsc2ej?5}# +4THet)Zr+mJ3Cyn#*;}740x2`b&{iQ5JZ}M@wM87iLj?0vJ+v_kP5LsQY2v{7Ddpl8tjkOd4P#z#-+W +4|>DH6rpgy^Q$WDva!Z6BJyT-0Ef?1s=qx9T@FDihpEc+!XxS6-?{_{PGCMi9JJY<+TP;-59tOvcI2B_%%c2ZC>_J0B_XMw~| +i53f8w%t8HhMW!n(sevUtd7AVa4q9nK2bm +nEpD+~Al|7>jR&pU59QETLnop=LIp|A;0%SHA&{c_*PraiM0_6;QeFYl$_0z9FD_U8|>g%)9)32cC*N +a$J$}gz9`YBecLd)$QS#=6ePXE4tvD;CYL03tthX|*QKX-R*OOc0$AS5fE +H{vk*-lq`SdV{b&TU%_(_csKe22nv>3M=28Yk#A<8>!p<7KdWPS_2zlR80i7s*2spGU>@V|MqvLzN(& +Q-oMEzTLHZ=abB|pZ9zoVIaY{-w3chB5+K3nF0<))s@`$Zw#m@NMP|A&B5oUBHY-iY3}3CJ?V?e0)6R +fP0HCpWlNIz$O&*N-AlruaI5!CDb#XU8EbGhG+@>^crV<+hAZa*Jf+Qu+4sXwBd#5x!ydI7=Q6kaoj@;_V1Uy5HCacAPoh`PgPFY`GG6ynHA=36*R-%;nOa+y)A;fb3)jWcs>n +Fm}vqkYkA3ad{&YSgX|@;^6n4q%Ak_xwO9k_K8ZEYqxm_QxX*&aRE1H +b5Ny1+eFYgSv9ctZ6kKGPxRmVa;2tp5*s~8d{g68Cje_M$SB|vB09G6}o==Gd$AdEb4 +S2>}wp~U$bZ-^6Ll|)`O;`hOQ_u0R}wn0j&jaR8Pk(X8um?WTd2Lip^aSI~YCvJ6`?bj2lME#b)-4`Sk#6E(pBa69K|sU*j;>*R($;fyH#%;~M1o`ucDV<}>N^ND6ejGH?0sM +u8s*JUVH4)j;I9KcVxEsHQ{_lhD_@4Aw1cpx7@EBjemWEJmSydq8^L*z*sKkyA8eyc9Ztyp<(4JDbIAe#2eG3B-S +}6rzpmK}CxUJTjia`jT%SpdJpdg9n%QbjYPcVBSf;Ae^Tpo#-=C(a6t$c&yDEx{)!j(0V!)n)ogo`e! +z4sE;MjkRF4LD}v`1J#4WUjq5Rb*H@E)s6=V1j0lpzue4iZACD*rpIi%^@Q;@_^ +8(~bft!YALB!!$y@Jw9Nq@yYVj!Y3GJN2|K=<>`E3~67P7(VkkHJYL=SoO9O=tYQgDYV?#CJa&p%SGX +B%(VpE37;<4D%8+K-q@EgU{C@hHKVNFJduX%(jv(TWxn)Eyw#6NMFilWjkDs`v{@INi<@(s-jWiF)v6 +sf{blxF4?+f0$Cq#jVh%)mOa8G1N2*Ef}SfR-TYyO2*KK+ULQYKE_Bi4`fxFFqF?ID`?(viatc~&*5I +6h7&n9{O!P--L+WJl+4teoU#R*i8)3L5Gc;>=VFI{S9AIR154mKp|j`$|GTHt3x2j`%$o;k_tHP!X?f +%;ya|wVE!k;=nj|^D6^%m*9Wy$9e|CC$0}Sm}XzosTR4H`2-S76NLaC0c(`V2A{9lu#eSn}LEpbe&Yd ++I~n^z=zh?JE71G%UvcyWs`uCh;*ztE24ae_{P==iAuwF9-p-0R>|9kb*g6hTQ=%D*u7@l&7c)3mt!< +JEj&=yLh&DqLb6*dztTuYJ&DP6G(v0>q))AzE|je?5Qv@C__cA!dBwrvbbYCa`wRgZ?x36wUCT(d{tO +$UpN}kopc%%L?rRAD4+018$iFdjkyHM}Ic+e;M>>dCCJuCmE?_E4;#pK7rsM8`Up`R*`#Hss$PudS%- +aaXf56x_`so3ugf0-YWk_@?A~yxUe(;4yR@UOCYk2wF7gB3697Y<_k`j3C)k( +68zQM$-3J2Ep#CuHF-cfyV`s?32R0)CHj}P3PuT#vOToBZ(yFhVR=J`o(A +U{O!Z4yDqDUyq%qSQLwH41fXXJr&0#FifjgKkGAoKL5WyD5T1mEry9@T8@HR+K_>7HzksxTXjD1CI76 +2WcX0+QEg99F;l`mzk}V!_#1E0(c`k-Ed86QQH(|`gS&_;;-7d~BI|-D5 +fW(?S(?A&2BC&5!`lcvrz=bEdddaT-y>wN0bsB+4L^3oo2HiOtGD01dixHKMM+&0C?hA=yv0K~Y$@JADtAu^AJC<;qz@p0bveQfIxXi+o1m3b$8FwSn<|6x!@cNbCp2(q{>`Mbm#FwZCV4rIl0^K@>IxEUZ2vZn +c7qEo%QxGzR%X+*#68v=4lK3sMCu*LiX=2Ndkm=?WqQ*Rf&OxS(I#Fu~g-Qnsuyul-%~H&kx8g;0{RM=Ne?cT27d>8&r#)E44ARCeA1OyP +YmZ_Dv;gHT(|o&Hlo_>Ig#`7l=df^DUbrGT4LPs-n%>Z5sz1-zcs%n==Jfv|qp))@z$6TU!+E830}x? +d@6aWAK2ml*O_ExVfyZc@S003qa000*N003}la4%nWWo~3|a +xY(BX>MtBUtcb8d97DXkJ~m7z3W#H&Y^bdXpjwGneM3sN +HkN~EQ3JcSI3FCoc{4*T7K@}+ZJOOuQy43h6I*=6C)JdqpccQW$rU+awgSq+8_`;Vju#D4!D?|iiQ2+ +6!kLDNnbHp6e@(pLIvcyxQ0B#*?U*)?$kff +yBjs!ivObz)bkyb_hXCE&K{nnEKy)7kqd_Q#Og8F<46umMy~+)CXM9|s_K+fge1;^m#v8qcaM$wCoW6 +ov9QuGG;QD8mu#a@jc&0d^;}x*@S}>Tc1yl($mp2&N0rCzn^R|Ni~*-PNUdCEg$&;^V#EBiya*v|kHt +%nkAt!z+}f9Ov8zib#P2B66fL-Bns_LvAAiI|@O=8A9dVIqD6Ss%e-JFaeYSTCBnuBJ!T<*0U7qLyDS +xI7`F<-Z%);1c+SM2AyJ@j3@W95dgTPjgn2E&l>{IBtm4^cgj@(GMIYCjFc7dCEiy=I#$6+=AbWC!GQ +GX<=~R{zg=Cu%P!yk0Tg@2q+l0+FNbGf*T|Zd;306zvehaHRHCiId8BZ)M3O7gEpogCu$;iUVlW1OVcQa`_{d@xG0scx=os!` +U27h^=wJ`e=jWk(~Kv*Z32Ho +A3^HPN0~fcb9rTZYdMlknt}F`l?G|JoIVu5k$O~|7jY5Z1UeqMfntUa_xW4ZFjanbd2NXmLyS^TI;ti +3((Dw{#uQ=Ks^;7YuYI0N?{eU8h;!s0eFGpWpi_CH?TnIz09oZG_+&a>Pv0)#4z*xpdM*$!{4wraIC{ +fU_?6kB4PQe@zQo|93x$s!#z5|&1$0JWD(+K9!Y8#4#By_M5!?~d;bfVZ;22W$e)h#qT#|?>aGTVnxl +XJ4<%8t?QLemYs*WIg>xKeKO`0v^^qaHDmO`t^yW`r=Aef{L&3B$9 +88PhlMdtFJZjg=v(VAPJ#&G60hgO#_D6Wmytbwv^96+0Lv!FbrkImR7v=ux>ThRG;F(g-Z(zgIPsKur_YCLx8ncrq_uLxCO_cX-we;MYT$-S#9_C~u!Xxo(&%vq$s*NHP$PpFX@~hH;-u`QKr%d_t319=2VJ#be%av08~A54OqIQOC0H@x7PD@ +wiLxweIC2mw811&0B*Fz7`TX&sLqT8;zk`%M$@-2b`H)x8`J-?=cf=AV13gyd>?cp4#PW1>j^Iov5`k +zmd4Ml;_J^H*(Jk>- +{CmKDnKYzbwnt?5v^6w1cU^#+Pmg^nhK=)tTZU$mVgLiA1JZ5_m{u1VCPx`^bmR4ueIY%0P4yM+)SCaGpXvklT@1#C|Zt +UrCi)=F>1p=Of_rZ45Fh_|BzmWDTpwO!!Ulp3VLzm2UxH49jWgMa@}csFR4nb_9)JPW99>Ou`E@rveLO5hGHs}0`(Px#jITbOf4&j`F* +!WD`dyrV6o+TOi@%T0K8(ciZx@r}%S&-S0b=9Ri<9v&q>j&yPCkDapZy}<1LoQJl{gunj<101)j1WZh +>edgk=*I=3IyoGlj>Lz +<)5Bkm3F{o_U=0zFc@$qh9b+Pvc?kcGuEytQSec{qv#SZbj-alSs{;AU`0{up4kzPF6wAlS`6*Tt1q( +RmLJ}LQBliEW +oR*py@?okL+$A5O;QF2^hTN%o_(-6%12Jg0Jn`dHCH=f4e(gB9{b>Mb_mB@3&UkO^H$T{~p#~t0 c{Aypn=R#0;{ +Nu^Nm;45Y`aGKamu!Yz=7r*ZT3E<4uB!g%ce|>!|4i3Z~Wbb!p?_>7c*8F#S*W#xGVeOB^+aZW-CIIx +^j(|UOR?)3P592YtG=gS#1yHJ8PThLj}j2h{`L_(X4 +gyOZx&XQFX7Km8CS;JwGC?7q!%)~iZ=M4I!<0OfbD#{Cx&?}HoC2_5cnsQ=VKfK+D)(`tNTD}a$Z@Qh +lWvb+l*k6Rf)Xu@PX`hrXrRQM26UMGD6Vs4uSy1j-97%8vBxl$69f)cy^j;&3ZX0%J7`W7?hO1MC4j+XKshT4bu@8@>FyY2K` +ojnq+c=f8u($8b57j9Rv%>QB{kb)n}cXDQ${xu2D>CjNVG8xs)#=(-mNUrfG)K3rGW{-ULI!Hh>?T?u +W@)Ut)W30#$zxCjQ4{=gv7w9nHiQ@vqCx17gnJ(ceu|x*5;cO| +LKFcE!I+y@^J;$g-qvaiDDmHCtdP~4Iq(-`A859aUg;>!)WqT@>xkih +iln_P(k$gBn(>IlxLziqBM49!WT~Mi3!tA$*T*p7gpdI3mMLK +?s~riCu6QDGVq>UjTZN1QhfJ+{PLtk=N^LLn|8V6Wbb^0kt=Nz!WTpS|6tuukDSoD0u7kh$Z*7Nk_H4 +{&tB_ZPZyiUE~2s*SQ-=^FdH8jNo6>8?<2RqMFsvq!sMOPNuEa>nl`1bV;?&?^;(@_@7Q^rXf`;(0*u +Pv~!z%QQ=cAgU3?C9E)sq%AUn63!jn0`Y{NENXHv1g{R2EZ2Tcv +kttE+0zHL+CMc#t|v*Sm9Xdo-Y(JpN6XMwaAYtpU#wBe<_sO`|DA(k^E!z5tk%YMvVUig`@4;2=$K-A +RH|;Mgn#cP}Y9@B&#m@_qZQ04^o=32(nQrT|O^RVC+cTEx*ETB9HnTQ>xmhEXCYPA&DyV^VLKESo>`A +i>p?z!WTsr0CM5Z!+7%9oo6P2aFv0trfSr)NFRGUIO2dS{mnxr-mC;8zh&an{v}4t*o<`NH>$!rikk3 +@bi^+3t)DwUCT#saP^+7+5o75?KnlOaL^uY=uv_7id~;vaaIG~YQs$AL@bC5Z9=B4s#g#h(QsRF>fNL^!@ob4UkoLK!8Ddg`S$bHR +T!2v%QekmhR#?I2p_84u1BZ#-5r8BN>=r-0*!PHIqI)Itt?Sr^X2UG|Gf0pL#u39x?e!AGDtE&u1#yn +O6OpSI(dPtzX!IM0-PAZlGeXi5|yB3>r3C)3W#CvJ2r*NFeI0OE%(QqpL{EhLM%(}5Z|zNH?CGKe_gc +T@LsuZ?W6@Y98v&O`a={F%oYp)Z4_WZ_^sK5$kCw%PJ(t*`^*b1IV$fk(4L>h_1KeRQd>EOxm{q|)?q<=}H0oIhPSw-Ngzc*bA&u`#I&JSyQdI3= +~gJAS@ctek8P9Y1-k`g;|iwdy}o&HqE!T*I~%H_x&qgdFi*ApdX^t~|b=gMk;!w#)1_6jIhNKGu-iML +y3Kp@ZwTnZm^cdS5}XQl!`gNGFlfygpr>o?H}fgud*yHD*2xpajivrRJQ|hA9_r5ITzhVjx^sMHEW1P +sgXn?<*+?WE0RDLeNLv)+mjuNkodl*3eqy&?cegS(V;tGnr_X;A`kcanwN?!rU(E6{Ix<;97E(@Kq&+ +2og{mA(Z^5tE-DkncTApG*d{mDZjSSIQnH=MQ9|fqi{*1iYiW9+B!PkMd00W7+^du`Kb-z&s^pN^I!D +P29n5SqHG)=TVc}JyOUX3@=ouU-HaOk$?#M?ekZ%5AQQo^)MF2r;|FdSlW+b|JgGce<@KTCCbyErM|f ++JF<|;grCx#^iLcG{QoU;<)#NI0y#&cFO77TBn*3GZ`UqwL#ba0eqtP+77c09XaP_0H;I$76I(OQjma +-x%ubM`g*sI~lXBz6E&~DUgK)^v5C*C}cmB-KSjxgm&r!;2x4ZaM_{vs9&-1Apz?pa1Uiz8)>AB!hNAnxeTH0lC|IncdKV`V+3{Qf?!Sd#(OU#+@zXLnWm?v~wJd(V>){1|urKw3{o +(400cY1lOgg~9PaA4!R`IK3n_geJ-F-6Z>n_2%_(nZ(mLD+8a}t76PdZKL(L2$Q8}gg~62ItYI03xgD +XTI=S1J3X0P9F-lNB2;#D*SMS0_r*gH`SC+ab6UYH@IIO}<{YdW&C}+5M2Y}k+Qf?HePBZf+f%bXkVuH;!SC<%Lz$Som^F=Ac9o$aEOO6H~%~ +f@ru&bx-?C=`jL3NU@VReJyPCH+es~H;%jTLrUPUDp|$a2gkx6LddE!OBY$!Y_}^}n4;0h`YQ(uiO}+ +i+ClSnm|f=?fSYWZR=Nvwx<~!40=T1{fGfCGPE($bZB^v?Zzu|5V%*#l0nN9zCbzJleYKVrs1L>YJlE +%P>!$j+;kY7OD`RSFDr84sAy6p@8oOlXb*yMaT%(L7vVVsSid$$!bg7tY|Dx#mp-`?~_nz$JiE%NWtS +`^IoXV`^grM1R|M*v9~okK)8#oTpOXAx_UZxf?GF(`?byey4ub6b|Te2zqf~Li2}-Ax)-y(-|ppWr=x +FP(+95!i-*k+Tm|CoF|?Vg<-0ZL%XTIy9CU( +WSnaciqDsI=Q{du=zfTf*JSZV9v3Zrk~Ks@*DM>9X6Zh}YR|F;HtGvDR)A3GOM)zM17JdR5z1iJMz5; +KfTBwHu%=qH}2%#wKCtAOYE7!)r+3a*zKU0=T}9%H0F-R`Xwj4^VrW#y0r3GFv9`W6%+~tihhB>}_g( +PD1+-;I5i353+^?`kxPS=E6{f{KqFZ^203^fwU&lCw87ohnIaWc%6L9x&ij%>N$+iT>mpGasdwEAx`| +$+iybSOOVZ#z9!NQ7hBOhoTgdgdKuI&@DB5A{@hA_6GEh}Z$e4e??Zq~&flVf--Z&^@Y^xQ)D5Wo +rLXW>7O|0CC%%V*N~9MS&R=Zca1=JPxiKqV61;p;q=MIXKK0I{+g?{YfZ4KiEeW146*YgWw4+l`WivF4NIdhP12?7=@V&WDi>4yk`6yZ(O-#_P#b +UdM7b2!ZV5%_{mafl-j&n7w!>6c?u$@%Am6%x +Ob_Y@-d+tE{4u8X%oUc#*D6m?XHHbi3xi=@eBk^4_5Z?)^ +^=OFlFh5Cm@S89PzYoA;nH+j@oPm*tM#g#ggn67vkQfr>iR66sQlK*gegwtY@Knw7dFCYLeEn91xeeo +~8!8a>f)M40detff3wms;0c7E0R|U-Xwi*;KT~(+gbkxCo$1c0D2iL#HT_q0g+{Y8R=%e_@Y)7g8476 +wHpPNk;!wRydGYB#p$ymc3`~jkMERzS%4MTTIpR?=7p7J#aewyAa*o{^eDjlbzodJdhci1l#R!*N%-| +=_n5uH0lbRd6`#QfozX{Y)N!91GAdE`6#h#yNOG#OACtZen69pkFcAP>}OSEuxfePQFnJY5=Ns1u01F +m0BGS&Tj|70AJxVX9l#!d7P8;IM_iR&~^?TEfzQ<>?{AX9%IA5I$1_F_bb$>sn4NM{Hr?WVJ14YZ*mZ +`rr+O)!pGvrzIB$NwsOf=&``-{~|L!7E@DLP{-=gK@R{zpI8= +dYiMFAXVs0twL}bEfZ@bi(M-oy$7J8ug9ZRK9K+>x<7uE8Zs`fTQ58&^2SBHFCs(P3=QF&#dua1_K*z +TUh#(Am#yr}Ku^wN6wNVIYcilPy4Aq8@Gg|BM8Op@J599+@%ak443|crK4KR0LKyxL!;W^*LAg;R+C> +!9b4sE?sNE}J2lc%YKu}zIUNg<4G=;KN1U~EGhn<_&?JV_x1%oCJSfFMrcb(58i`$SDT%+eZ4s&4CO5vQiJ?W1V{d-@nI7$*kFditcPr_uk)tY7yO(ey-9tEK--ZQj^s +4RGI5^y-Z{XvyqHGU$)$u`_kZNDjk2Uz|{a|S0fLfIlnrMi!2e_v +BpbC`3eXaI2N4|WdJ$oh`z#u3?0JH|cXOqhyrZ@fJFG9q&ext1Sh^cebOTn!|cknQuDTRw|o@&_06^T@j$#Wc3?B^gwAi6$XoeX5;PK*g9b(omY`1E%$v)!0elwM;dM$CrFyK$hP-XW+7 +nnB)Esxa4O7Ak@g+{~+=N3MJgYi$Bi{`nk`q&~wG=|Q`5N0+N5xvHoycYAdeTaE|M8FAk-#5#7}B0mf +^lT(xl27yQZxsCsM|j#m32@CZcSS_h!Y>uzrMaUeSTr<)-<1uM9_hj&%OrN!-nw}U()05I6S%ADn7Nw +FE^JX#2%W>qtX(LMp8Bkomg!&Jq>~;9gGI?G+W=;&Yxi8g%6&NFn-3miI5y_NP%&YpHn`5PFKlS6G62 +at3ym)y2)+I9W*{Eh1JeeW6*}shE^TBQkEf|tqU_~tSjw#3mF9Hf=exnP$@%2@ +gUl@vLlGC2oKg>9ZihGlQyhHG9P|K}A)k#W}}Sw@i|?c)ASqg2SQIQ3q2xVeF}w>cL3IfRtyFkB!H>2 +FlrZ+dTOwm}!db5}hULG(G5GabB9z?2$CQg9sZLaK(7pKq|01twvJXf%1fH;SC*5kLNxN-?cD?MbW`_mMV)5x#PI +GlQlL>n(;2d+gYsGFmYM6*bNw{l?&BeLSL8ldJuvb@chvb#iCFuI>+)kz2UJc4FvD;h~&eoG+Tv|G0N +#3jBtumE8o(om3#L0m4-800;^2eUdBs0@CjFhQB`T?$9Yycjt;JF0vln2qz^2 +H6iyAj{_?*Q&~4YNNH3L{f1nc0&7^4p$Y&kHPIA6ji0L|ul8Q#gKNe~B-6_I-1?83KF$fNB`V}}k*67 ++A&acyeK@YV(qdp7#~AswnoBxBpiy%K+_2^V`mJkDBsZ)%a=Jmyp%AWi+k&4#E{MRP20qT@;GSdwC^? +_ui&rF}o3n;hvdFtx$yxw_>?F4oLo`Zn3KL-Zgt>`8_1`p+?w!eKHyj#fcMwMGer%{F}7WyX9AY^KD3(~7;^@<@CMY8?9B~Vklqt1=u$P@4HqeDnjODj$1Y6IsS3wZza +ybgy7r_9p}9CH1^s1mcyVz&aSksp$G@E6wPlyx_=@Qk^v@R`4zG?cYX^wyy>hzTss%QN!1!0c#X{Gqc +8dl6ZE%YT{&_KIB5L$)ZHM`%l|qw_>oLrlDT!1`-L{b#Dx0=iYc>-9{GXxerwG$F%ob##p6j!C3Yh&> +)i#W)GrHPV^AZirNDcP3GMcBDim|hxI^$TYO<~~&Yc0L_E4bGWqk=W{%TlLGw~DKhV@_dEMiW#`Q_-{ +v-dRAK04#h0e@z3La!XdR5fRhXb^#iTxS~KU24~8hCVU9AzEdd1d6Z@7MszLIJMu$Q{Fq4uzI4(5LwK +}s@30g)a#7JL=#xETcoxcg8PXMQn_)ov!~vg``EM=pnG&A9YY+O+B*rsjeLU-1eE-91T|-m2EmTO{S>g&0{ +kHXhzIy?}rtXY7J6t$e2|iBLFE*Rl4O;3*cAqO*Y|>H5oNBSXw1uWMbk)z(OM7(cf8BMQI0acP!;gnBI1{xeC2S9oi-umn4EaeH06J+$wW!D4L_m(dIX8XIbCZE%HIj>K* +zaUBCM`lFQHk+6p3m(AUql?F{uMr$TZ_oWx}@+&qh+FGRUwe4@-+Vx_WUfpy=O1pPyhz>c$&N3Shcs1 +`X3uE4_qTwm<1g%pBD7-p^Cg@EE(Foi%^3&yhrHAWEJ}s$H@xTym?#GuVjgn`KehZ9YQwRjGt7WiTA0 +jv49$>JQ$XN?0R8gPePlg7-xRSz~eRpdWE$Du&f?XF%DHH2GTWK{6$xtJOr<&uaETUqsx_f6Mlo9Pb< +r^ujbX3e~9qi`W?A5>T>dBacU7pO@HS9B%cxM00m+8xwdZV#%wWYdD;iEA{s73Hf+^Eszjd=BwnmwcY +6>gxEn-U7&5Q3BI^!DF1Ax&De_GnP!ccN!x=>}7?(2;pfXV-FX1E;w1vuBe-`#e`;f&$wSbj5f$GhWq +)mxbr;x*+Z2pe$u|}jo@jFwNmUFmqphdUut4-61= +pKfV)4>qSVh+Vof$sSacrQ>%?(cBm?73P90}h%?d<8vL#Mo@%(|-vC-i%$f{-#sp{N%~1rgn7s_8gn@ +(#Kvc;+ZA6**mzR;(Wp$CM5>p2>u?BXKSH&ri~Zq9@dQ~_CJbvP;o$xX +x_fQ8q}ipQQaQf!-WZG+CIHT&)B(Kjs)Ba8Y1XMxfiW37$X+QVq0=!v7(YYi(12D|y8cXGG0IYw +P0)pq}sWhjdlw1_E*3bsC=%=OYp)b0rr_3GpQ~a#&bQ3ON`V7=xWf$OZ_k=;|W8YMUH3cq9?u+3SH3>Z-2^{I!S$CN<=8^H^5=^7n_$zH9 +p@{!$95WxX+&cPAVgCV{0%RERaRaUMj``hk-9@o`nIqx!4Q@-UqUv$^MX}F1BG}Y0n|-F`Na% +9GD`YkiKx|r;$El9l0gN&a{KxxnaJI=C?SMVCEn90)AXG@6aWAK2ml;P_Ey1>u)w}&000`Y0RSKX003}la4%nW +Wo~3|axZjwaA|I5UuAf7Wo~n6Z*FrgaCz;$X>%J#vMBl;zoG|aw#v?Ro!O!>8w}s*IBh(7PGdjR!!p3M3r +1@R70rZ-4IZzq24nvEbeT8JFL`Z$SF2gx7AyGQJbF>C@}>Flec3|qmv7n{A +e)D;^E$tH1C2%x9@l40^zb_vfqVR>ST4->Lm0LGt~w?^4>#+jE%NhfozLFFFB$#gCr_JfxTvd(aBW6r +o;Ht9-5SfICs%C|J$zbSKyTiMk6$dSybT||T+QQ>FIQ2ypI}nvuJ +f-}#a^{oWbHX_9J>ovRSS4NHgY74O`1%ClA>=4@Hy^|XRQ(8IYM&OC4P8SbWOTj#50QPmgrR +k5y%nQrQ=Xk!Qqz0t6Q-X>I|F3-wU{3KsaPxGem%9>S~UKAInMeV)A(M(|~TKg2HQ0=v{vDMWhtTmr2 +ED>5@F{`-L`_~Ll|E|lqeg8UWp@wOPf#{j80kg}C@aL59PjAgb*cBReA1}+ii5`T+>31)G{F@t*!h|2Ip9;W#JN-YwMk{N#q(4@zdL)SVdQ%r!j;$B4Vb1e+`0FyiYO>R!Si#9)Xgg +!%X#==3Yu^-hI6$gl=UEB@Yh7Qxd}a4O(0`vs8N#9Hc?)z#(Fe7FFRB_y@%*R465tng^=q+G?ML*D9+ +Z~S|2=Qcbq`$MfPP*yzm&zLdf*Y%km`O{TzTccFB{tppp$EL;K)y1`iH!oo!cK9BJyD^e)e^i{1qphZ)vp@bk6tT*Ma4o47C-3$e1nOe19^5v%RCS#Vn$yimqgJ~ZmA5Q1RtTK~( +-0R)TfE;C~a9HAc!})Tt&fD^|T$b%s@AdQ7(=Y$@0Jw<5dQqY1v7Jtb4X_m>evY4-_HNeyYa>;@=>wSQMP9!xY8a;lH5 +|cng2Xd<#?vvJEnx$%k%fU1x`yw?vREAm0BNY-??1&#Fc~!veB=;;WKrK;O`M`U6R;V8&4$5L3A1u|U +N+eovTaa)RnCeG2q=KMD4GTs03;rO6$QfMb}q{10(Zg0bcX(xiww>=>aLoOyo8!a1)n3#;i7*HJ-NxC +MJ%lQ2NT^N8Au2P3(2@0KxySXZ(y&~JX`(g)D3$&?T<3(J(hyE6aSq9^ufKu4|VS#gMWu{8(b`RMchj +}K6?9ygJ8Dc#rA>u-B)W3ocWs9pO!1?lbN?EoUa?=W{2z1u?KCnxtO-^rY#Z+1lVjYhGhdXMVkY~8Lr +2fW{pR#wQzW9G(#WgH`#*`ZM`9l{~I2Ew&%AUjMD9@Em(C}RoV66A_sxPPVYAiy;wro!2(F!jh->=KC +Sj&`cZS{v*>4VN^W4oTqDFA*o^N{!UBqNjj(U}xQU@X5UnOetE<(tTrDab+VJyn_VB?-k&NcN+AQZ&; +ku_NKd40-wlNt|^Y3-C9cM!(f0L=uQDQyDBG65e4~g{Sthsd>^@`<|{(t?+x__r0kFZ-!@d?{bHO;7q +yP};;MU=HPSsJ3F?p=;eU@r+!0l&G^2R~M;LeXTd1lUI+agjm65|4bKo6H@}g%g9l0ADH;h=8n8{Y6< +fP<6cqrXPEq4MA2ZXVqDqug`%5QQ^@f16~TSFh@~lf4^r(o#^rKaGW7;bMywb?(t}p9l)Z(vp1uVZhM +15>WAZUGwQ!SFq0|dfQq%)B2poyDY&^>J64at`G_2?l52>T$H#hFEQCs}g|7qp)+#LbDGF7e;8d^Xx> +$KPM022k+d@!Btr~cO0Q>zv=7t4biS!NW1=18F1i0BYPmoD^Q(Y7pFiF570Uu9z<>6a1!Bf2ex`#Z_r +eO{U_#KOgwGsI50ZQ1C*u4ee7xF@rQSUHX?cv~XFd`ku>WW}Ydf%l=eJ90$*_L9zq>B9_#fTA=f`>5s +sdnM*jyl>%1H1_V8Yw;YTkH7;qf~X+;ojo>>STMvHk$iRr;}NVRxP9a +y9`W$WJS1p+UXW1xO?f?f(|AUeFebtSgWg5Y#)656v-!AFKr@5^p(NXWVAl<2Dn;$(TmNlr)R6NqT9) +v6^q9=R81|RdER`M=bGu)K$Ts`)^6P8{-cOIy>(HALF@aZHWrB+wgo#mw-GNZMP#i=82OjpIhV=41<& +X!tzM!8L&uoRS8YnSpprzdt{J(M%!yLUBS83!;|C{a3M?NLWGiO!5VM?aH7P6)(P3%d8|Lr`lPY%<01 +0-iVQ& +7rvNGU&*TsQ6%A^gu`aP1c0284@Qv4*`@t(LI+Ga$2POF0DxN&HK(vC{t`_H#TE@@A${&5?J%lK9N4* +#T_5$60Wz!z6A4d@WEJp?`*$5F&H|bQ`NK_F%!vGo+HhUWkTv&WcGG_vVyVbkYS$^Sr%nQ~kr^f1AE} +`TsuOw-fO7g9i_GsgZqlkUhi$pC|+L7|(0rn-rbf+B&1<(`vP1-DpgJg3sa0AD+MZ`A~X$1e@#_y8Li +!hq;j#dDBWmK*wB@3E4T_X7+KzJv6joRT-+!br55agb+`;uJI1CErHPl%o^>;Hu)5QeyY8HFZ-5DfOO +QfBu63f8E2=PHoGiH`N+4S%**0|XNa2sI4$5s0o5KpM4WzG6l+iahlk&EO059nj`XYq;Bs68_gxg(dE +2g={XhQkbaU2B)>m*wuO?M}_Qw(q7zeg4HGOQ#9WaN#o*q;s>%Q`{={ko0z0<)5;&?VMZ&{S+8bk +2wXk>)bXQ6Zv&T35VSIYipQ|Sj;C|_8ep2le{TnWaG7o9^*XBL8j!hD%<-ekTCUUGMOZrd$TzmVv`93 +?*P67cJI=BVnp1d?~8m9cP8EYS9q2VvBu~_`h`I4!#i(1(y3cqb6i}}HiSYaCl=XHP8_-;@|~RE*$Ge +ahSI)i@;{hsN +Iy`fO_}SO}T7IbsHUoYrR>85m<<#itkg0cnr%7_?&`~bg2Mwe2+7^PK74VvP$`yd5b09bFHRJe$9H4W|-u_kRw@zcRWO;O=~llVmfUzfotBtv|dzb +mSW**?BhnA%8C#$R%u7_g|8ByN?V>@F{TRKn9&oh0nbrVFjoZ@$(JdTDs@F&wH}42z?vg2CEOp)GKq* +&SVU%7v7e^Ye#eM|(O#7W2ykoG%VEo!`A!VHOTpXCgctvE8Vw|ou{cK>GdJZ_YUgQ}83O6|C7xp_=oW +!W5s4))#=Mw=bT`MFkD?tWr;mAevv&UCQO|aUM5VhxnT1XC?I6)ks9ph@be>kP2w*IL)=R$vJ=<}p$+ +iDb(W_jQtzCBzJN7dWFoLc+F$t#8T7V>qKz*8M>Ms1CvjhHyv$DIq7a^U#jiuezwZqL_oUSqI^*YP#K +neZY@5FEg3YG+62|RFm0Tm1%yP{*4=ZH|r!bQCB8w*GkpU3tjiy_|2Hmh?n+EAlTOkC6KKn3r?>3vZy +^Ez8rXQ+IttT0S{UoyiCe9CL$#_{C(EP?`Six;xqXh+#!>0vP3G;@k-AOG6*I9ZOb;>#LIs}HzaK!eV +iDqaV-i*$aE>@fiqj~*P4wpR68nbgGvNDNdnU5!oDXmfZq$NtY`U2o3##PT~HAi7Yh0FVP@sl6eg7OC1kQ-w^$7jhfR2OK|~Z0(fkKS)Q%9 +YSnER@$mtF=7;Oy5-`Em7ubecB)AuN>%}+w4;u01B%B82mx%4L%KcIh4idp=5)TfxVTa}M*jt0H-(zj +zGvFmXSotNc&2*hje}~Q&}*-DyhaBgYq1_NQ`Ht@iA6X2Wt)OmIGmvkfus~Q&44&V2}>h +VYM8uaK6`N@eZalx7UVn=E}8`1?ADB;mCqXE0pfBd?Jcax^beLN`BxF1IP;iCTfZR$<7YHOsD-ZHR1T +5h*^$_)e#CFOz@`DWBO&N9{!Gw1}reE=J0YdtEP%7dL{8p#;$}KFkdSbF3yi+wYUPl2TooyS%1mOGk8 +?ou2qT-9)l15I>Z!Hj8#zAP6Q4WgdPUm$2#{jVtpDhVxIiQYX^s>UK(S36R%)WNLGwJK$zkgf~@5&%O +5uGFu^o$fZf)pJRt$k)CrMm!e#)&kvi9cb9v}5)Kh#*>*Us_WZkYAkhd)Hc=_NTvNDmbF|4(0T>#d3C +bK{tpH-_ti|lk(ZQ2G+t#kH=p>w*25~OdKgK&Sz>$2K3>}YiXf*-SGfC>%UBl})g6Vhug3N{sQkVIYK +r6b!h;lYh}Ead(J(sqFg)?UxLXu0O?qn>rbQaAYKQO|SRQnmHtp6BqDDulXta-woC>1WS(ztSDh&wB| +6u%MZsf^yQ*P1=V&zGt62O5fyb_bovx^~ga~4+Y?ZJp!v(#AL-O`LB3voGLfYhTk2IvtJGo*~J%4&ka +538D}^3MxOM1H@aY|?DHPFPAG(gUp>!fN4?XJk)MD4)2qW*)5qU`|LPy#zkGvNtk*Q!L4dA<{mdN;%D +2XI8`OR*+J`0WHAs-0kBoH(@f>q}za8JdB=8zMHtrydLd)_LJ8d0@LJvXbQj~H_G{7Q@6oM)3LZKU+# +OMF^^!uNmJ%2WRe0ccN8#{+U=5uL6cyFTk_}GyX^XRh3-`2%Kekw1s{#by)Gvnpu-eFZFvj&V@>-1wo +d;^C3;@Bt7lru*G{qp^TrsB;YEmboO$%w7zRrS`uBIxYvCr?;)%HPQZ$pNsAc(`qCg_&R%zcx;KQ@oL +)$~Wmt`M=WT&R4R}!$tWIvnt&5Syi8zF1>F7!1SW7Ob6geotqzRT@<39mc=9ea(}F#iLW6%rH36I3=G +tR9!~Q4e2Al`xwER@+e{E1ZHFdGJXFy#tQc99R^5MRy?s2NNm?qjB9n&cx2Rv4kzs!-IrYe`gXZ>VfA +8TjiTo&zz_}D&$igtO8d|C``vLW@l`o`ou)Fa&5!~_RN~tu|{!+{r0tX<$_5--rH;w}%-XBV16i{~pd|=%+ILzSyXuxbL)?@_O@N~gs$0}2rSz1W^=9ym8E3-To`PFGL&FBBzG_5xL +c54Le(ck43(g-!AEQD^D>7B?%HG3MZNTA2lL*wp@R;I>?Pz2vYvCbxAOuEJgzRI?Md=1%+iWZ!zw3R5 +s7wCoToIH&$McfTm4Kcs>R21bGa5`kC=yMP_bOkp3uqXqc;0+v7@}er20jZmCOW+C`jzgGx+4n&H2yb +KzM~4r`D4ve9!__gWQp_BCFD44Y%LUKJSUPb5D~Tc}*_pw{!pSY$paxsMt6Ip{dW9Y@n~D{LjMgyWbw +M@&>m|;h^mnYPs4^G8W0k;u+F_B>5(@lL4+$T-HqT@ZQ9a;oDW4wbnT)rQy2j!y+vy3D)M3h=H6)ofv +X5kA+x<~u+p$Ba=T8`G&H)=~05H~|Jxjh)H#ui!p>XlK?TJF8-%ed^|0(N`%hfpytYt%JyaXqi++5sV%Av3t(go>%I#3&VHya8woY3RU82$ZqLsr%98DrXw!m3jKrTiNpuZlND)GEGQ|T2RG?RZ +;MDgZ#w;L2~)gWHCKkR;M{M25;a5l? +^qZ!y9QoeCMuq*;tWpju=_tjz)ba(7YBSPc)YQa1BQmDn!7Hb2l8ESdm=ZfD#KAzGq`CxG*1x?`Sk!F +3MTXc3T&C^M%SKb`06!3YZj@ +ousv@>V?t!}b5Ot^OfocFc^HIpbASlg-!gB1Z9JYGJCT)FPf4he9 +cjyjWZs2#SG61fbZ2wwE9pA@e0VhIeNQWHv3TQ-R;s +t5gqPkmaZ5QY#X?;uOGBQo_?_^2X${NyP<@$8s%(P324TSC%wx+y@?1SiQQ-VSeI-RpLPJ8SD&(Kbev +3vA=M$QxV`b??qWaY +y5k?1)17=j>DB1wZc^5{;A?kuWqVR|r6Hg;Qnsq)aws~!_q1QpyH$5VbitKU-3>xV?N?Bb+#zO$?+5j +0x2y>JDKg8ZJ&={a*uc=I$rxaGGo0l{&u30P}XI)HlqKlnCrEi{0A1WQ`l$TOQ6*UGCuG8%I8j*5KtYJavqKw|kmE2Y&X-?&fva*}o|bJR*R`5gNK~WSa>vqiU4?UDK^Q&;|;TW9(Mrl{Uh?DaeS=dk6L!} +O)=Yx0-l9P!a7gA^|N+#MfF;sjMW8cRrIUMb!tr>4a^rZqu)HuZHDNK{r_j`I(WamHMrEF>|SCCCx1w +7(=HNb3eguKocO>z#`ng`7u|kEC7O$C1)3ebCu|oDUJ#OoWQtt3I$522+28ZoThz+20d;sU6pkvUFxR}H;(fuPRE8r<0x^~a*vk= +2ZhmO>#NWQUB++v=-tD2>NmVnM_BhbeF1g*Or=UPZAuInI$zbS=CbAiW9P9=x43&|g5z<1gtdYie45i +#@{HHH&>B(9eLB3TjOl_u#Llu>HI!>AoAPyoG*1eZK!+XLDq^_mdhtP(w{UuBLhyhO0f) +A#JZh4zYFsb?KYIy3MZ$#-FS+wRJiLt#Y+HB}z0|8Ggj&2Oea`Q;xlOr00vh={*E2k|~qQL~!Q9GvO@ +RUYOG-2v)2yYt6_0#lQ&eBweqZxE60v5pZRN9Bz1|ZcA>=haJ+TZoTzT9me-gBOGz-P6nJ%OEuq7~VP +Grh~ut3kL1pWh2${p>DGQ1eT4{T$QYZu5BCu$vJq51gR3%M)P@p&gNA?cWTQ54bUA +9`Mvvt2e{ww)1?uYOs(a_;kV(G@Jht^J><=P!v9ju`p{l*rPmS(401OvoK3_b>5u0*g`YQ@*}2y#e8U?8JXX5E68M~ZK58_rR@X}^A8@ +}J?S>Fy!0KK13;|T&qu$}?~P8*-+Hk!j=9g4y#&2diVu&v5v3VnRvG7reNu?!Ro-1*FR8qOQ`fhhLiu +t2}XZAKRC^i18?Fo=I=5?nNe7;9jV_?6NmjoGtEytYaXDoZvSIm~C#1eh5OT%5tq3{%jWu>HGiCfL6I +emXil-g!$MZr@P)w>DhB&;La(BOPRNp5iQa(a+&0(g~3Rj$4_e!NXr}xq!1%RkpDviG03JElo`r!F31 +M40htTHVET->PxY1&H~MX%BRpVZK+@%TLOLH$Hopar8`yNQc{j~W2|T#NH*j$nrdNZ&G(}J+B&AAK^m +T+*USz$N3omGN4~gNw^xakxM_ar&OI() +5rIt$k;8xwbyLn^pO*O=$d=zDSbQ!<$7v-jg3zD;~p^=pHa7-GLo1&(X^{Lc^o-IBIiyIFOP8S$3b8c +e$t!@F28pZ8)0nX*8v`-PUIgWv`*X*+a^>aczfulfXrR +Fn69HCPk69F6{<~w?{+m%oYQGJJOMK65M1oSxqlLq2zlf?*VTn=|cD*VA|8>?{`>++rV}~#N0I9D*fn +mO_`K?{sIZy+3JJxUqLURvR;Ir-_-*DEz|$e(5c^QdUG~p +o3yRp4SDxI98!!$)n3Pf@03>PNZPyWu-_X7EPy(~G}6Neq~JLQF5#f^nmvZRRiWUMC0Tj{xLN#G_6sN +y`4O~RKQa2n)ymL|NS|Dm>h3K-c)5&~O8YIEY24vb-qs24O+s^=#AUIpBS%1Y3lZ*=@ +gRxd=Cc@NWC94C{6l@CYUa1@&@@K +Pl2_!vvY#VQ0jK6dgv9Rgr{*adh8XHgsE{+dEphNl&f)3dK~m|o`Ok_y@HQV6Y$`nSBMg@#>MEdqDnF +5^%y-kwzQq7lyZ(k&^~_hlnD*p%8M(yvA97Zi>IX*I>Xl!*yrEk6_M}DZr>Re%Y?^FYpuxYdG}$_!hZ +0;7Fhd}4NaiA!O??{8lE8I2M>_rg1rA^L$vVRAm!0V4U;y~2M?0sJbkhu3N$xB4kPt3!=vq=8s5ijqq +)BfAG?u&_9$f|EcCIz4EFo&>j6(-M9kDQDsL6=187>UO(2pjeE6(T%SsUn!}zF4AxitHrlm$;|49=gZ +HrskYF?f}gZqdysJYoXS?s4-teQ<-Sb2f?UbO2dRv7Ed@V{8JTmiQghS8F9J_u509b?v4>#Ke;jgAEC +_&VUefR`~*tFFBlG-YhVJe~IUy+B*ssW?YWpG{O`C~-ACPOq}>&C(iYFKov8v5aA*#)n-rqrd7Ko_nYzTWM5Ybkct)hzJ6eEwtF-a)`3a>S~m|Ew1>cuabPylkD~+C7*xw)#qQ_y{}(S9{lm4>n}X4Hf&+mZ> +u(6_5~#L>9VRtHmjENy{5fd77i%NWIJS1dGz4RuXpPtr;2u7!4EqoRiLSMa}S(;>V=!7d=(GaVK8sS7j$~~_`BzCra!!T_Vf48@lu0L1p0K6h;6UEsZ3l`j3z2*c~24O+YF +zki~Mb2BG)@3&R$HK{E|~wR2{Sa?4(mfs%t{uXAR|h+>~J$exm68{!?5_^dBJiVZdpcZ?T#ui__o&_f +R<*;OUI++jHI>idr-(1hapVz@CW<>x;$-Hv!1zw`2z({&BMSSh`?jT|Fzq%SKwbN;&baD#eF1K8{?l4heLTkcZ(x`+Wf_~mJomyXUQFg* +t7DQLoW{VzwoNZR!~A*g}pBKk!P%^3-DL>_3$ +MkHWG+Y3bJ!BK8O?iA>!I%D!l2zfod_~ZIP_Q;6wPtSeWpWTyGrR9wo5Ewc(wN99^gJoAK?0uYESPx? +D~D0xKInS%qo|H4c9CGL7g=$^6I +V?6u4%efRvrtW-Ir?-OhN)o`~C;`wL<5qxPNe8fNKN?uU;*6r{b1xC{SpRN0S##F{Z<}igKE~by@?#i +)PTH!(ASe$pJuK$w<66~`7+2URe!acPxLK^%Z2}Qa&p}xA=s`M}&4AK%#v2c6uI?i&BJb0_#bkqnafEU-)d)LK21UudX;Jr)w0jJJ>n&jJ#eJO;T5q +?W4b)>MHE29X*tYOGiVWufI>=i9xAZ+a%?cpBt*92A6GkD2K&e_~R%Bb3B^&)QGvD{9(VI1|loO?A(9 +{Jp&SwJ|ZULddE#%a1<4Oo?jJP~V9O{yu`gR1Lll&Z +=rYxFIdqcOHOKAHc6r_;X$HG^cv@b1!>=M*N*d|B@rk%)t8t=+4yv^o1ZE3@F{lV1&{Vr8DX(;Ac}2Z +e~##a|uiwkLs1|U73?yDGf%{p6RC!FW{FBDLv8-_ckQsmWb=Xg9+Wt3Kf*4s8iL3LLE(rGH&{n$RrPu +m!(C8U$U(H&8!a~#EqJ(Cm1%5@)?XtK}6iQ5vWNm#-Q_SD?9f4()*an +T$Eft9?EGBWU8-wsFO67~?BO4A+0-y7Z?nza)Nz(wB+<66&P7Cb`p8%^cel-H1>-DoasTAnu{bv#AyhB +^P~QMGyuP^^o%mffokB5yl~1v8`=Wmx_ML3*-9uVR;!H4jMA?v)Bk#f3oWJj`*}jB*T?rTDsgxQ2i3@ +p<$torw*_>ZC);)5=q|zVD(yiECMKM_8teBil0&t`3GdruFW&OYl0hq!SwA4)S%P^R;arVqT_y?C9Bb +Sb0cizFV{3!7AGH7{_F}G>ol0eAMw%bBF1!2|_`725a1j2kIZVq#UY6xgE-tJe?r--BE-~-}_vMcZCk +v}8x${;o6dV9#J&(%eXT4tTk&_}4mV(W|Khrsw4eB;!!bMK=0;pgUXxXQ$9DTO|qhZNX5Jf8uv>-iq$ +gS_%xTPOY4q#?27Q5lT_{#(U$uzoVq!|gMNXK-cYbco`td|(40dut_cJLB4uQGCn()^$zE1zZgs7#NS +$=wc5rZC&D+eA8kc$I-xPdG)bB%IU%z +5nrf1JyK0TywZ`5^BbVK6(PR1Kf01Kd~)NA^Jdf2Xm%xfl)ORnR7u%|{qHQju_@O-KDVljDrQ7$jG@) +m8`E;-#}%pVk(1lVIQX}0ws5I(s)AAA#gvw9_eUcqRN&;t<(XMJpHv2UxI2m4(t4!#;Qd(FX$Ns)4iq +%sv;DTN7k^nEt_N5y^`uKe_RzA*z9`6xC3>bA}<7L5*Cd6Q)dgP24)WcrjHKAU?rUHHi)gACF +P-U=$Ih0h;n#_${wqoXX%Ekx)?77Wma~9e^@Gd?Ck`=2NCsU(vdUVfTd3_s=wIfDxQL@O#9#s;l$hN= +YFcFlYm82&!@CPs!N6foG;j9C`|_((*0r5L*)Uf5-~I5!WcqS?tIn2$uY#%R+=F~P{t2t??_ +6wCvygz%Xz*!6E4c)u+tohuI}7Z@TDT+Pm|?OFdZG56F1-M18{}|y&pkt#x}dDYz)Pb|#9AL%L# +KI@mz21x8MZ$9y}@_tT4UTTanU7tu2CYeCrxt827ObhVn#_SG4>LQ355s~+L#`Xb@z8|p +EH;pJvbh953Nl&S_be5E)Xko4aDBlesj6RD<;9%HXW(Z15gL5Y-dxC7C{|2+;40K`dY%t>Unln~Tsyd#Uh}H8`4LkBRtWN( +q&Nze3syV>JV4N+hnfg_(X3NdIkby@UHkX~4h&>ZazCDhO+)O^(YSGHkNNL4$vWE+mt7A^_fcI6a`BK +>s19bElgTXC|x*&}{9;$N=hQa1&MxTa(Jm(-uz7kdIQF}~GqB}cSzauAVfz~s4AX9KqG_@;QF<6~#>I +O(HWxf?MuWuIymb@MvlI_VcrvMf8(Bbg_h0cLi8R0V$;0K4!Hf+dtPT3%scVyHCqd*&HPe_#F$hd_ii +m^!@!*^^HrPAj1=9-C1O1wj3gfU@7RLn!-m5sNcXWk>t{mcQVpR+Tpb9k5vxfvUStp8w`Spj$`4z|VnVg_F?tNJY(a~~>IZ3D8!>>O5g&U$5PyJ5 +D8!u+fFJ&LmeLTj7q?G%v3@mBJB+YbjJeIPlI(?bjNd5YaK^9~tJ0J2vERbaF%j?lzZ_ +tzFu&2R_H>btcJtp6e?jd3ODFKVsNv_1}&;&mLAXW=}WdnCyHxKBIua6r}9y&$FSO|4}jlxP5JmFzf@ +BOxrvXi35(4%w}OX$tCh^y*XW$v+VKfmwbp~2z$X=WmGgW4(MzUH4>Pxe~{)dUIkKvax3s(a4Pb>&nI +6giAYTB{gT5Jw+=j6T~%#pNRP~g$cS|zl$rH9l~(kK$UE_bOB9=Dc(I~RW+0C9V)l^!eI%zesfh5ls+ +4RPR$?Et?pP)QO&H*r((zeJRq0>=nzSt67G%I(Umz>Nq5PjsN;QIS$qDz=7+4H#F9QCG#2h26WY>w=5erZF(n5HjHD`wq0zx{28J<4kK-6HR)uS&S9g~y4{O&4FW(i^o+f~MNoVWl +g9StR@VcqP@#-PA0MKO~84yD=V=a9@PxB@-to(yDya8*#8|td|dP0Ph!k%`il0&1NT}% +mlRb?7bmo6}|2gdQ%jqM55$0LX1kQRn1{jB#|O36en`O4}k86d)s`5dln?a!UVTxE)j?bcED5Tw0-H^e&odk>FCbZ>Tq-%@j3D+>cRm|la@*2_!~$VOQeD$-Gu>vpE_chWxa8*Ks)#i +V=(cHBRx3!8^-IVXr`POx=Cy0i+kwNhb=A+?h77W(U8oUdQS=wvtxSEZ)e(ifn+ah-DAO^x`o8W9!Yu +Yvj{R>oYfOnYBggp(&#SgdsS%G<(C7{F}QFDQ-V{OhAvUtu|Ir{u^_^f%AjUM`qDX2^jPsy44!^{gd +{Th>wv77yxupuBc_uKa}sw71zfY`uNNG8t<>7wI^8Dt;oyHOor?OtTJ~^d +0)e=Ghd(qqA5vRikk-iHc$;9Sq~xZOMA?9^=Ky6^9zGbQ+-8Xy4TtbM*Ig5BcQ8iyVWY90zVu)KiW*J +1f+Q#o{#iM1K7zD*C6dzm&nr*mNqzV7Mc@WTZ1SnR3($bxfmoNaJ*>8H=L+-DgHQM-vnzWSiBTO(dg@ +Y{TlZwd9yCv+CS`7}b)Aa-kRe*@`SLySz1s@E$5h3G=xp6a1x9N#RW?UILIOV6Zwa&$pQ!4NRsnn!;i +CWYND{g|^fb+Hc`BdcSrA8%(MHbDKTVf3e|4dgoy6a0d-tbmyw}fu;C&;(2~SD_GIi|6+i-n+NA&!Z| +^_MnfICOc~i_=yF6A{YOsw6&~F0P}Ls!U?&ch*u9OP_dFsPX_ON_FfsSX)9;%bqt5>iB#`uepTAs23DLBpt<=C<@~ +4skYs7Vv|Ne|%dVH}=>7D2dg)$G7 +4&U!Q#}W($-pxXoQjVYg7IkHT+m|91@E0mFH!K0S_q*Oig*(_;BHEu8HO9+x6kYRSWj8QGo@F;ctcb^ +$|H*rnGr#%Y{O!nX>!P2{lcZm}2<=5o8VK|}cKl7#esdhg8@s2%l!@_ha+-3 +2pu7JFGTSrJLQSCh0~y`$D_S*;rNy+Y|LLNL&>8_l6-P-3Qv``6Ww(Bn(fgKU&Hdc)&{S +26u;X*V;shkG;1YLv{K@PvOnLdSd`>x$Va=UJ#X<+T?_w`5B#h~X^kvz3d3H|Gyyk@?fyS4U?R@6NtRmBRua^WSRICHQlar^_3I(_o{C{%d_9 +_NWO+{aTE<+vZ?8(Ul&`684%r{Xl9jw;$f?QFYX-7%@sP{|=I=O^wycn1U2!=^xBr|DWY_K$)UIza2j +8rCQ3hPuka^0f?T2o$VJ8J=&9ZG~SRPGE82cun4P_}(OpHG+78L%y9!^OV4{dkSn#kBqXM-jCi3oACi^286@s!zRZjzJ?lD8R +#FCB(fdtqTe1C)C4t(wl%Ot+WS@clk#IUHk1KKj;``A}vTCJ{|XR~vFnhKW2Yh~G@ip>*th_E~my7iw +eho780PgrpFSDm$)!^3hvuk_}4?%fG~Z)UC_2z`{*%NItbum^0dzL>I1BB^jtvrRl|7eAjlY<7gE;s&NxsSXY8z*RqXR&fnwxI3Ktf{cC+(L~25f&jHUE_Ij8Qbaa*LOe`hfIiqoV2N6ZEH*tha}r>SW$3*ssk>(LiSL}F3EB +11IT%Cg)>*Z>Rj=cG0K-L0$apo78_fG09Aa#46d~O<0H1Ax;9=o5D0?B4q-1J~|9^uNrI7cZ`TOe5&W +WPd^h@4R6{fUR6v76Je1IEk$3X67e;0k{S)7vcuN7wyv)@ScGXpoSyM4Te +kN3zdOuV_c8A|~I?w|dJO-@VHT^pARL!FyXLvM2u?yLQB`(e17uf7|5sT)TeI178C}kA4f+4+6ZS>j( +FTa?qKQKxkv&`pPx1-XT6znSUZ2qzT{KBR6WblY6H<=xy%dTEXyYt-!Ca~u3(&=Y9;-VI(!R_kY< +YfVjz^O%n$#CXzEUXScOfq?B=;|0?cArgq}^nzt-*g&Ti6v7qmJFMudC3$0OzzSR%{sx+ +PSFB38Rze?lAJxp-T5NE7=*C0>rKV5=jS1k~6^KC-1gh|XLQR}d;uw%#!y|(L;z*Vl#w%splNsR>$O& +|tsDy>`Gnw#1b9jBCFt9C_O9|>0gJdN#TV7G{ZVS!~MNzqj9AZwoYs0>)O<+nsnwSWgVPTjJIIB)h#x +ArSAOVGymmO(=NGLisnF9JVFZ{i=n5bu +Ov$(j`!pQio)99?K$lW#^I9_sC;sgtPH3VJ?~B+dM|rUTIBY&25Am(F1y-vo2adA`go5tpj=Ym!8HS* +K+brPoe1NY=B8nFbBsp-b&d>npu(`^xVi-z`cb9Jk`8kyDFNNdMFj{kAlzKr!qF||W<~duKrzT +GhNDqooG5sdH=Kflb{LWXDwbG2z+%oc*Tf)7Fth^YA&!aiZ`C!)vy4L0U?(frt= +r^RGeWSFuq;g}TzW?U@_fC`0DT@0anX1W@6O46F*dS>?w9A}D9A+?8j`??dX{Rgc@7&<;+~Dur;P2ev +@7&<;+~Dur;Qvl;kk#+M6svh390Wn7yCwe*sc4RuGufnb +kRFc2oB2`ML{miVEJO|M7EqITuxtH$3-7m#m@ygK?-5m*pV|x^I2k8%X1TY+PRB|&sPRu1Ezi+nK0+5 +MNaDDU`S}adDt=u6|o4QjMbu|S_U6(~;J4RU_w_HReVjp6~Kh!sU4|e*}*w}K$wl5RS{SJIcX>*8<;c +vfEo$oX@6&tlfSql5kdu`m}6o +OgLlmyK{P~MCfvlYa)8QF4vRkJ0j7OXxEvbll~|a2<$Y-GxgQ5`4@0qUO6(52HqlMT|9Oplj{99&%b~ +E^f0@hz4+162%a;CF`+`0G|gz(Q +Nr|7Q`Xn{|PyJr#@H$4Y&=d*cW&J0U^qg&2L~`N12#2?x_`@MXB7*kkBsV=kRBYTQ1Ek!`_7|EW3#6X +K^yZ+y>Q`O_@XkHFQ{W{v2FTO|5<>nc&HEz5+Q-D%afJj3LJ!_ps0W8wvz$XtfJZ)BmrU?qSRJqd=1h +(Mh`^*wzgbYKp^_!A&T-GLBUK-|8Yw`*{JcZ +^sr$?V0GDeB`sym(CZC4FzgWcXcU-;ZP+(?1>i?V>@5O@!`c-X7TyLcVIONg=?SUcj@cfL6ARxYuMen!fpCT6kwx59a+Os(BbCNU=IdJ(0<6_bbk-RhwLqHYoVlqvw*mDQM +REozG?j6;n!b(?Q6bAngEIl5Xhs6qCl;690gf252*37j_eu;j*3#`7Rd>roQD3e*Tm3_rEX#MR#Ck7XN2nfrb7*cin61 +=<#RDLUrd^bOHAJN%6nvq!=Z*l`fP~r9KvGekaPiqKdaxz25ZaA74Ly`rY)|tEXto-tYgP9%-*Pt2KI +Q*z|wB-k}kygIDAuz*knY^KvPPBv^97D@&6)T7oo;dkx(gHrWZ8y&suK^2aAEoso+Em%OIP@F$#Hda& +NeW!^dY-(fsuNs9_6OWY@jE5B-(y5;|KH)=hF +K>}3T_a{33JRiXwb+GwM9VtkHHXob}+czL$U;e;)i+}Loxz20^zgU7fdEZ$LbkF_ASiQ@g50=QvFg*e +#2UUna0-M8{AU9d`8>wDRK1g%~|8*~$jtjwaShAvcvtTT1jPM!0~Z_RN-EC=nr=+rTp7cD74`q)GH`i +gEL`MO+c(G4YEbmBEAF*MpRB#Z#p>%5|qt2qv9%rUSYO?o8Ar!pYPqK~r$PL|i%>A^TmsL-;x@-RC(mpPz2x*F4VsOhm8nRmD?G`UzC8|G+# +$-(_ar}r*`XF3Rj}d(u{9_5j>!UIZt;E+puFpXco;}5+7>S{KmM&1MS&A?dvYQ%rZ_j46QQ40Wbs#vx +fHTPxPBc*ev;0+uJfxTlWY5SPBw%MnsUY6WDC>N0)qjwO&AnrmiiEw^20~RI$#~AhyiF1t;6EsBm7zy +kp@nK)huZD;s$B>|HfiSZZIz!9HUm&hNF)!X}T&hvFaMzq8tTX>s>118 +Ail|0>o87#o8kd%n>1I6#dtMZe;B +-zjij{a>Lz#uj>?k*n2-UN6U9_ +YhhMuTLfUP?&;Cb5?NY_u^g%%p#+8Mio6TEexBpL%Z}p+}t68;K5#y4Rp%I_!x+ +f#eK%p-V=KL-$$~N7D_S`vYg~!=@HvKus9tL>eT6?dM@Tbjc#rHO;t`9NnC$bqAS8HH?Mj07Jl}(iJk +2<`rD?~(89j`Yn@{O` +k66B{YKNBgjUi;rQ?kNBw*FkRl^5bBtg}l%cMEEmqS=CB@q#0nKiy#LHl3B}%ya*!zxH!|^Tc?~}V9D +m3a%q2sp$X^6@MQE=a0PEn_LTQPsYou!bKoSLL?b+d1oQeu659hY{7P`s=8{&;nDJhnl-yqvMB#IdhW +)mF;Klv&KZcUMQQKEEaU5Vmx5cE?d#|+gwr+Q|-((7&V?adVZck3UCQxT2`FU0RcFl?M3Am~*EjZTE87>r^_tC%JP +N3`q@tNW_-SifXJ0q0ivE6~5ozOVurl>c~)e1G6<*%rJ0`Nw%Rsdnv&#QT$iFNn%#;@J&{J_m&Mze%1 +P&q@v+&ZA!u{)#CmsS!*HhAJw#uFZU@h^DkZgo)k-89gRIiKlj0VQ1mA3N-6sVt2sT{$WkN%Wt19r4b +>Eq$SKL3dOKypDZDcY;%QONXT7Z{H>Vgx}I5F8F_;Ljf}ZEjHGg$qJ1Wcp7u{*pxD|i&T%)u_bKoyg) +yQ(}FpcW~?Q~lM}rcsQ^mJ+qhb{NoxEyZ@lmcCRe?!kNYt}PH}r?xpDMmf!F4HLFydlVzkb3Up?HxS& +n#1(@{KrQCM1p$p`K8Nk<=nX}FGPYTs@2{=VW&0<~U1UGQjJua~97*O7KQm;eijY#QyNU}`hzd*c?svLbZhO~DeXa7_$6x;8s6mTMGjFx* +B9((zeiWTLCAn$0#fi}e};c3%`>N_&T)I#_q$tX#PikC82RL$ugxP0pNLgAtf0rg}*izUL};mW9(4qS +r&3Z002(#B?TbjGQi_Y~k5ZCdrhtDcb$@S3A(|w!R9&_2Vv_=q`)+@6-dNU&)4kmaUk^wCGUH8l0uY%!hOZkXB8~E0sU& +t(YHKJ$v^TMHR#8v>3AlMKq-0bpaH9dDW%?lylXV{+Y}#D)O<_>)kjF#MblwbdiGYyzekU3W`3wZew_Q;=x&Y{<7Iv;y!qKAzGI+PX%qZR +_6U;4l}2@yVeGGl-GoC4h?#(rcNYt*QoD?h11@!b%#6S?!gGMMbK6$@plwcjEd_P7IC%Ri(y&8!tpEp +cWh{s}^w^4a>onuOAyveWp1TYm}S6INcq1JP|u4 +x6{asN&*5lVwt36fE0|EhRB|rpas*ACzmh5d8wq9h2m;wZ1XOD6CzM`uFzkYj%qlPu~T|rpe{BwiGG{ +4v09qF4VKqonRl4FHa)e&t@4ezBAPpb224tW2xo!d_GDCUbq8n$13Z^aH1oc=#BUCyRP{+XO>Gpeui3w=idTa=uTV28laisSQ%&(x#bF!%*mDLp +usQr@C~ne5C3Zyh~77b}q=5Xdz2&aTmy5HYyB02TeRRxoJS +?B9!sN0XP*&^fJ?dg2XN46-m0bhnRe`7oU2j+cC+-E#{pJW4?oEg0RQqpU<6S!b<3t)k@$r=2gh5dTGaq{3@Cs~Ga$4Qo2!8E +6ED8$m_Q2L5zRL2Vm;Hzr2hveDOYkNMqQ!SkkYS9;-obmzNOyaobu4y{ZZX~Ly<}RU!x|jU}?TX)#2t +2RYi|(RYp@Z)l4JRaa@Ei@6tOfynJ~L~-3_#cp?vL8qejgt1A@obkU@WdEtT4_qkm2XZxkv?Q7MYSWW +G<}RUAgL^d1b=s(}P?RJ8>Uo;XcA{<>G}rSPgh{u&+1ux@ss@JHaN?DIB5kmDLVbO~w2sDLF0+7g-0R +_Hy|#LD4ot^Qvf8gBGQEiUj9;6yqCS=-jO+fK1UrTXA7KC08Jnec-sY__w +e^Tb3JQp< +|(NldUR8Pm7QU;vR_Bl|H`T)w0|2&k_*mn_2`k2<3`#H8Igm*5Ml+kT{ +1JW!IQzbDUirbJv@L!Oh6?3w4`H87z1*mL~w?9Y$FSkk*iW;1dB5FQ#Rb$de%(_==x3ydJfl1sm#)ocq +WI};mn>Sn1hkBFImvFx9*Jpig3DT}0t_RHkXKPukhTf<~**DokUh2VM_by%I4(2?;KDe9U7_kS0e-=i +~xcy5@JwboyMwnpq3^|KH{bNj=AlH**Hj$i*ga{@$GaYis)!~H}M +v-QPz#4^`p;{OJe)xRii%A6OnZd`A#;H!ul@x+pFl#1(Rwy>IXbQK{lZ*xFum})(86}`3*bAX1^siTflSuEq#m(==*-~HS$qLh8xws)K +VJu_g>FV@TIswjr8BMH5dAA88dhItV1jy-xCZA;KrVcVwFZi!-Giol8<4NRhnay~fr1KLSi6YD=lm)q +pq(AIet$^&+rjWMwgxjs2wlM=bG0mZtJgPb{+)}%DgFc4Say+hA-iVjsegCb=9?Ot?Ea?q`kGVQkJVn +|V{$FE;b+{So}QK#CXUP15Q6?#6+ja$xEruloiT*N*Y<;<4(RtV?K@z6{%UTeZG!nvk|y(6{%y1U;GNMNJ9%AvIIN+`Rk0U){vUF4q;diwMjp;AP`HG@h#hi?~wVHAg_6{xJHM3{lJu#6^a~JBXGenDW +&7$GWq=Si~+ivS*FX{(%O{x}+FT^J)ouK}eKCM&p_aZNsq7Yn@--D!>GWxXvFW127(c-p^Nj1oK{gZ1 +)lWu(6QE&cG-vIs`k52yUt;Jy_mfpVbq~aeuFiMyX2rj%_w1DHTIz0D4gD%$X6`9A5GxViQlpn!qZ)W +_ntgmo^j(feq;!DFVyK6T+xNs!YU%q8Gs*4y?x&})*?U44cZNZh9&z<+{ha&syoPu`V+VS5}{J%7cr! +YA39DiPt352mUm+>$MkNA~9E3do8bHW6%MGC1{I8Mh_wdYEzO&@RA=JrkLU%%akE)$MRND9=05X;A+h +CJwlfcRxUAti4!IWr%1&-bV+-Yb5lPP4~rVuW+2g>54NKgF|JEv}6Kp+QUn%|7fGkD0bOATmK~0+5lK +|Etf&%oVY+)kf(tmqBIA{=`KSYwafjD2neB-=M(8`v&hOO!B#9Id`YFyHEM^SU^Ja|3oeOvY@LvuK~V +|p)iq4WeTD(o)yTG7qa6IWAhL-aj?q3PrHurjjh +)=wW&h_?g8RdVmm?f3`Mn=ke|+gFg@8U7S4N~e+BXc6rAvjD6knn9%6~dPoDZh)e~ngtxVPg!H|woga +KUTQ*IT=N?Sq0fmv4Y*2FIr^bsREBt^x0y!J!~{OWR#wyK450TiIVGN90ezh30CVv;>678u!7-R+H)e +K5dl%2OTvtlHjn#Ow9O0Yryx@}ezt{!_e$Bcl`Jk7RDVE$k*aFWE~6L-m-BKV$V~tk$?L8Gwsryv!R4 +H_E73OwYn`EL7ZJ9t3B|)V@Dy9DxP_6-;Z^lsy=#+gsa*tWcq{j$GC_)o|pARfukvT#NcPfXqvwzi{s +I;aGAAlFcbarXaI!=Okrez+()$h6AmOGdNFcvA#SMc;<|hbW6AZK+#X(l+pH}tup@S$=>7TL8LR)Fx* +tlhIepG?a_+!fGr&3-+}N(;mTxle1_hTrA7{+gqx^}f4+-AoGhN!@ea_VusN+akF8uTmUT79{Gf@Q48pAYzfa#4yUDVkvh*)mUJ +r~7oWt=7xDCPo)ya|afe-z_3=cj*|G>d;xRW81ti+dgx!-(S&KK@veEpI9N>u_oE>LyA +(KRauDk)@B1`l!w6=Y7`RjI&|?w9jgtK$H7sonOenXw1VtK7q2=P@B|xfMutBQvBTnHeRAEO&|-5aFi +VYFB1-P=lZ{LeLM8=u2pgLTsO&5Zm*1E=gM|McCJfq1Xxv!`qLV-$c4z-)UhR{=8GLa#ZE;2WkL|EHi +Vmi9fI8vG(p-$m#`SvSTMPxh?&>xG-Ly5>_ZS1sN(*qC-KrSKL& +~jwD0%>WFxqZ%yAtvm6rihZBYDi}(w5VwbjX;1t)MN!BDF{B>YC81L=0;=)Dmx-Qh*(g0?vYhfnML}B +!---{o$_CsLT=zWZs(HDwO@c{#Y(7E)J8eb4VY1}*`&+>J^ +$-&|Z=Sz|~^!^k00U34zzr#-P(6&f2befCTA0y8ZR_=)JJy|R>65fSV3YFf^Rcw;!lI|{Br#`!=}ZjW +!m<%a_VFc`V#_cBznHfmnHYO3ik9(_(vor5~oubyQivE?SPOhzO%_yoQOR*#bjc-)UXWWHPBtWJG`nk*(C} +{%c`P7X@LQzx79qkRwKJnq)tC4E%dcF^*1S%G=2hh&0Jj5GK`}*q_@UGwYd;1R^ON$^J>GJRP!3}uF* +*y6bED5ar+?JBjn5d;0q&t25&*YG{)bk!Gva44O!Kni^{HXa;xJl{))^+9OrOr%KBh@UX-;sb$u{j9| ++e^W#5_LfigfGssO6qL*I?sJ`Sy&DftB65}<*De~7@5^R%k#qFGlI&#=n8!Xu>n^iw>RW=h7Lc;N=^N +o2I_8Truqc`f|?OpK3iDqt{j +gHj!!u|RQ|onYM);?G!{=Uey(Vly@LJxEfo>NiCTk|``Q30Cm!KhWw2zNifs+HKrH47QC(Av-C(+wAzNxP;dxN*~APOl}3MU7aJMFQr?Pk!KUdg95&=# +44%^M4FG`+@VPB-bNS&lwUlL +6bWQD9Q8Uo#NPJjp7vC@Y{7n(bD6qFOe}p%6;b^*VerjwpMI!alr#o+6^${tfJxeCs;O{A$6iXA4lYu +qwID`*t^nM{T2{dkTk|eBGE+K*fVIw7$*lhKBokfgUIv7=Wut&nS+K#0S2dE&)5j;$)|fyATHAZOycX +;du|u^Rg^SBp5p3L-yR(kKPKxmj=f4NIQwoN4A_=ufTz$P#WD}pq2()q33B%@jM+mA-L)n7T59h2&{ +U9wQ@p5QM!2DMLc+d=fbgGbl!r`bQBA}DR_b(x%PO7aIH^BdznjHLA18~=?-wB3kX<|Ls6kDlNeNsbw +KS#SI56+2E%z3F%&m3wpqCD!-TCK%ZvkjJfRn?A9*@sSszw`?<%56Ci_l+fhjh5&rzRs-i-gEg#9VJ< +Q7AQW>15D?y#bYBo7gaSc)wb!4@#{5W$ZNz!B0SjT#a2 +_t!aL9`TIo(RI8A@FcQ6Z_iNdV#aWiS=AHY$xTTSS!07IW@V^*EM^5CC5b(bZv^K*rG@tM;X_*hr*A? +^f0j>|2#J%VLr0K(eM{`ObjEW9y#TYdPs%vzSBIIUKE%hPPq)H-Ub>Y1rQAe*Bo@7 +4hu|Tkg_YFn8gyc@T`1StZYV>JbN9R*mR^M`R1E%wB11B?%%)~XW^(d4A%6#9-7j@;z@onPRimRgwef +{DSH}6M#q-)RK4~O057y;s(f@8MggBaku*RcpRjsCquRB^jPi=0Db<;HigB62RCQr6<~HL>SlE!O*Qs +d^3z=3d_L^G#u`n&#tWrSLJtIzinR-_$mbbZ-t7tn= +-zQfVy6IWPU3ozZL_N3%`uhdL9qqdW6|ds{;x5S7T+N!aG76W@c&{ZN)tqFsPx>$WnpcaX!nM7E}`V} +m3V2KB#f)>pO|=tZM7a@Bpn-Yr;rQBr!Pf5#K>r?!Ug>$ABSZ@(F+~q$@;2ykN3@DL-x$dZ&B>r9-pE +`;;dygveZ)#dyiLFZ(5Zfi+}f?l&ieHdilz|Fi(0Ws;YZsp7fqtJ5cq~K7-e!3D)Vj?4usq2f62DGWL +QQqn}{Q*%$6v&ypMU);#L{jpKY)sxAAd_ct2|NWUs)y}#$pxj#1Zq^IWsX5l5G%`YWI`7=+=BYau=V9 +XPK<#o$E>3vsR1rW`X9;)lzWBk?oAz!cAb4Q-a1M|X%q#pKIGofC=&)yFkV&-G0-g8sz4G3Gr%7^953 +sM(z>*>>a`xqF0%J_kyTJ9I%LZ +ME8(>_I^QCO0b0PS#Or>MZ{mdRi$Jd_s)Cl7n-{GYU$#xK=1ztP)h>@6aWAK2ml;P_EsMici}Ao007$ +o000&M003}la4%nWWo~3|axZpeZe(wAE_8TwJy1b%!ypX2^9p8quEyZR$%RwSeM5&}#iqfC0&e5qS2% +5T(n_mcNf+y8yilx}f-xN^4N6J*BH!#CWmz-IN^(u3t(?s$gLfDUN*`s~Ql<}d6)G)3-Y7G}7Z8pDWx +LoWcz{8SzSQ8a4fek0_}}DpC +c2lWRwpuku2?E#4SnND4u`q=o99}mdpN@BS)FYju^MxPKcXL_Z3c%OtvT=seUsJ$s6k}!W=7lxy))7_ +Ef}9}ie>g<-4gHlj*rS;^Ax#zfD<<}77Aauzol)fM8UeIN)(tgoc9BYf3&U|IA;OteBF#WgJ#S2^LE8 +P`;AwOjz;z>?aj^p2T)4`1QY-O00;mZO7>O(bDXyj0RRBe0RR9b0001RX>c!Jc4cm4Z*nhVVPj}zV{d +MBa&K%eUtei%X>?y-E^v8mk3nk!F%X6C^DBn+q6dR3^db~0+ET%mLVM^fgzOlD*~~HFe@McbgJ|UU*iit<$AY7yEvUbQ`d^++uVc_U6nwr{NM6K#dKx=c(wrGuy +SlsN|SyUSzdWW*}Zgg^QY%gD9ZDcNRd4*HUZrd;ryz47i`62zHbK)Mcbysb=UlhDpd1&8&p&GR2QpDy_%kniDGY7 +HHC{mWsiIatbVT~>iYJfTJr8sN(Z&2k<3JjcJP+S{cxwE73GurmpELvUc<2Yls=;R +W@I_oS)~~NXW#YWVn?W57E=a;#GnRl>3=&E0&$$oeiUKaIq@g@q?B6YC{2B>b1_f;08mQ<1QY-O00;mZO7>Qh=}f +=>1pol46951s0001RX>c!Jc4cm4Z*nhVVPj}zV{dMBa&K%eVPs)&bY*fbaCx;@-EZ4A5P$byLD(237p +~y8+e-jHWQqF$v_a6g=-vccI>$t0Q6s51?y&!Tccesp(Mk6(0YN14c)Yvc?~X^$*xl{*pReDG9Ij+}Q +sRDZHt=LF*xTis-`QG;lzrw(%VIK{%$)FNkwT&2^`{I9BX$DR1-mSHx`)?qi#PCzG9$2UdzQP9CAiq>2^%Re?4-<{2*yD9vj5kDJZyr5mf4#i=Zel6elQ5xOEHuKe(V-LAi +Wxx885Y?80*>rY?TSDWN+bGi9;x4vtX(2{2TwMdQCQBUqLEJwx5vb#l|Z+ZGa5&Q0@CX-2;bFF*Z*S1 +C-yXO}ZhL0#7c`h<;L~)-S!0ctF*^2!=z5TYCF4*+;a&z^OUar@l*PHe9pNU +S?hmvv@!BMTUeC1CiyUTsv>o$-N6to?(98k9goe}EcH?4eRZ8h{&EnYRSgn5B{B9IG>RaSnC|O$+UGI`!{&sD|- +IMa;e-7zWX)2QWo;QXMcuWWkKYU4al3#w&m)1O{tEVV5~*IqGz8PL(btRw-I|1XciKq{;~uanLO9$vH +&B`1|#^RpXMIA_KknMGqIFPW=IIK$dvhS3|&KFJZTd>i_eeCU{DrbQLxRrKI|qjR>=zSd%iX)L4xIQ> +nrkxvfOr%0_*9v=a+Pr8RMtit14(J?NP+oJb!S5`bBDTF7{oVLI~69tb;zWmm}(iE8>bH_@*}Lb}PhfF32UaCnpDZIa~B25M%B3*474Pw===51yLWA>is8qQ_iu%33={ReGov#thx +dtwLvfTvKKibV2C(~!@rxbS*i7(2hn=tPM969%n1|Zu#5To*DZN>`);~_WThDbc36)^@iWKrQC8Pl@(I$0K5n&u~9t2uc%@RP& +gZr6g*cL%@ZM9i!hB%SBE{V4r>7JO%)<$pYTN>VHF0NasyW$IU0wNM+^x5fQlNy_WZy|*nNer2lW6ld +S0&3PH!?0j7FLq!h=j7Ne!^A5gS^8CKKh)7ixMs39({SdB>_P!W^laQIB-QnA%WaPw#du(O +DYX`qx^0n>2x*i`1X2(tPM0eq0R_$tTQY4Ev8^l-@W`f_EwDGs--pD{&(okrk1Z>vt4(RGc@wf-q`e}n`9k3o~S4_|WT^ +%5M2Hs>xr+VNXcP>ZUb&GIfHFw*%5VV}IU=zu0zS +gYV}-4=H#F;s4%+p6kB@UBV16V9muy@9kObf0Tf3=hunzxPP*74E~tm`Ym0?@}UofqkEO}NW;Nh0ssL21pojc0001RX>c!Jc4cm4Z*nhVVPj}zV{dMBa&K%eV_{=xWi +D`eom5S4+b|5h`&SUnB?c0sE3iv|9{M#PKu{D*4!b3aO~+bf$&lnMUeW(P(uxzO9g4O(*pNv+K0cBj! +2Id{*Z7B2cxS4{%1z-Hxc3vdy8QMX9;{4ZCag0$8bpKOJd-JEhvR96+RI#`oxn{c(gMd<$z>E>TwFwp +0wLv3VAj7e^>Fhvh1^(>Wu>s>K)nebE&=w=Fc-D*ARP$sg+-A?PUHwuk4r3#Z6K4WmqrWQByE!1n)2Y +vXasGnhnY#YQ52<0I0v`F;T>lg1|IPwihvGfgAMwjLzRzUiwzmzaIBCH;nDbM#}%^&YmFVor(o9)9>G +qi8b^TpN?LT+k4EEX5i?Zi@#~CIWn2jFMOGxjQH*=iFNkrJrLV_wDYf*=<$*$bVaZ=t!{$Vh%5dF^O( +f1tnec*4$9SN&KfioHcl5Q@Cyaz0TXkCkU)27O^u>wt>H8Voq^{_N!)F4V1i+V%wdOr|nX@wqaY}UVa +L%DJ6_*apyHwsj2uW&rzpM!_h?1C)RfQ +DMU&IyFq3@RKSbiXL2yX5Dft((Wk))G>l=~>DcLZWrBuSIrl*N3Osz6lBPb&>;?mWj8rVreRcy|x=qr +7g!C>tTBHur|wVO9MJ%lbHpPyknjs;Lta{6ocJCgi2@I4y;08mQ<1QY-O00;mZO7>Q!HL1E!0RR9B0{ +{Rb0001RX>c!Jc4cm4Z*nhVVPj}zV{dMBa&K%eV{dJ6VRSBVd7Y9?Pr^VDhVT6qlW?X%j299QMlp~OL +y&{FX4wvQq}^R-rwISv0g6;3M(ttKosVbUdD{*=PVb(&_f+Do4_l_^Lgu)U0rYzpS1@FK(IzTO!((Q8;-AP)h>@6 +aWAK2ml*O_EujONHORU001CB001Tc003}la4%nWWo~3|axY_HV`yb#Z*FvQZ)`7PZ*FvQZ)|L3axQRr +y<2T_+c=W`u3v$pD`jd%q7^5bo%5AvE^(YH*JWl>v1fOyd>$1=LK4Rm$t6JB+O7TX*WCa}fNxfkt5wx +ZBob%<{rYq_e8jFlz5m<%Lz41~yx5n?W-D*_lRRUmiV}6$0e2c=AXK`4X-!j?p$-2yUjPJrE4Jw(Wg6Wq9Y@NrUGJc>Z-2S`xsOVIBX~;^mCJTKoVbCox|uUM{OL@!`Me*@V +Fd1ojCZ3l=imUL|1nOagmKzTr=71FXwfl7W5{!1CmYy<_>Beg1I4Uj63}Z-NO?@dYqy2OJD5xkXLc&t +NsVoiPwk2|w0lxY>cSO<`Z|Hq0*Q-%XiUg@7@zm0%ajXiH-P_1ss3OAd0*n6Dn*zPy|1dAF~cudkc0Z ++ef0!;LtshIX2pP&sETcT;f*uh$y-!0`tMqxJ&Hfa3t2JF +PoXB_z7E6zMCaYZl3G0_Ttl1ijvVOqf8)MKya%qRFlrQarAOZU)knm3ziy1hrSE#cK{9moTNbJ8!YC8 +`46*nxd$~4Q%ogbxQ#u}~GBK!qjDVs%w{_XQelw(wKFmX_K5w1DA=X>&HIDoclRPp~qt?>gw0L@^ElG +c-kUb9`&>@WcVT|Py;kV&5T-mh7@XW>fZX(jOhS74?k>^)8MM;-@wVp#cw@YoAF{iNZ&TJKzc32=ffn +|cQ@r2bxyYVRHo?QRvHz*O(VYX%>&3$lqAjAft6@QJO!iN-9`VhHh2=slD)uGoqL>$x=_{UW%3?SbNC +1C|zma&h=CqSVA|zJ~a{9VM3Kb7iJ5NcqQJVlYfd(5>f($ui6YsNwFJp*fi1JXx#N<7Wo%Dk0}3e*mpV? +DEX$qFSX%^xU8_c{qSnnLq6hUs=pSI3x1=SEJxjwAAVaI;6iL%JDto$XsipH*;T7ESg``FFkKMb*u(< +ib`p%gzgwttCbefN3(h$0FnaXnKyMkl6F=0OhkoA;kO3TX<$Q1R`E&8sg;rIdz6NJ3Lo +YZx?rFNtduJ_uW0neK8|v0%T$!(YDpw`s?>aS9AjDlmb;J&Og8_VRr0a<5_+y^mljRr4bkEt(@vI@<%#R|oP#?-LOB$SEL%(;By0g5JWWJO +>syr!y{2O@z*o~q)gRm6Z__7J<%F(-e=pN{^CaN~TwxO`5>jSaXz5vX{<6~FpZDu^>~YJ{hS9>KFhlnRxvR;1W6P{XVK&mF$&g%yu`o6?vzR+;mhR+@NUN9Wk71dCbTz`#Ja4w6a;-Ip +fX6faHbEo+*!3{wyCPlMh7Y`Pa|MS?E(2;XOF8G6nGZZkb~|_qqDuDx`>UPT#d&rtS;m0`WZ)8YI~p_b@QBCv-&^Xke1*%%FGpUegucY0(|fK2kt`pFJReqPqe&zs=2mHyadFVX +~gf;xeGh9j0-IXkpc6%$u`cv$dIHuW`91d9ySQnjOi@Q4EqJK%tor~oSa31`ORO#{b#GU+2j165d=qB +k2U5V}gW6C11;b^@rU0v&E;Pz4$>dfoYcV&G>?RTuC~SXqM2Va`QbI(4T_hg}TpvR8L;1C8o7X-ZECS +h1)MU>5(-+RRi?M~~7cdJ2rxFFF~Ahld#-rDXe8kS#_>;sTJzd9%ieTL=QRDB}-c3*cWWVfmGY8nRIr +Ajt_q1mJPd8R+`wPgS|G1ZfsLgWWsZ^15Kr@~LXEK9Aa6u}(dPmORK6LDGVNKH2dA|7R7kXxHa;LxEm +zcD~&+@U={Cl|1jPBp|BF67(3OG!TJ!?L(Z##pXnK091JwY8u07s=ft|7AF8wPY{GOWF3HX^XvOx&)5 +w(IV#A4lmQAg1;$a19tZ1jzAye<7f@vuR49qd{Uvl<|v)S75bHdz*MNpg%4z_c9VL78Buh1&`R +rCKZ*MM;i<)2Y#V0UiM%!1Y^zbgAW$yHJQKRe2UC2pGcD)WZkMRbN^|R0x+|oikn>$0<29N1%a;0C&zfH&3UinHh-A$GdtE +JfiMu}!TipioB0yw#8$+*zTL +v9dGt>QlF>>5Rg@~p|!-#M56iaLzaPz)J3ak9t#!%XUYTy}yArnO|c0|}Pv_A*33?l~YZW)HfcF&u3r +)vqE+mdic4LTFiDPt+wjuo^1EeFLteBDVKk4F12G+k#lpv#>LF$22%)?8Yv`w^%f&0V#6AZ1RJjL|hH +GXkQqn_~5@a>PDhgf~~^&e(g3QYx`HUA)1{tWLrZ2I9L-PU%$M$)@U$iUt5sAipicY*SZ@n%w8$do?4 +saN3TUaHJYwB4_`^kwDr-h}ViBPGm1?!qjYREjk-mZp2KKyR_Y>qPToMtkA;U(1K21)qZr!XOnXeen| +bHO`)d{(Y0Ppc+siH9zS4W&og~5FC@E|xy4j$KZFch?tr=GA9=RHdoNgFuoGkFH6k6=zysHrp{imPN@ +b4C)wkL~(>^R>){f}X9w;7;1qT3T1At0^ADL0nN@3bK=ai)aoirsEZxIvsNr7clWh3SWn{@IUSIcNmR +%YqEq&q;7K5a^;?Uih2Qde*-DtAF5Vt{YbpLQmjV(d79Df^Fe20wh)o40~wTg9UYTa{nbr)#E;PjfeH +8O68tA%81kRZ>#I)OUtW?H}DIwkO?=q?1h}m`25MJ6}4v-iCNJ6?KJD0~khEe_~Y-vC4=`dJZR0sM^w +}=Dtv0qG`><`!OR)L>)0&4N67(csjNw3ia#P?d^;9En5?%8uAdDAIp?X9hsWkjZ{_Aic-ikte^L-kDr +5T$<{{4jp57gue}al+Wnum>@OkgS#u4LcpA@Dw +XO!YySqork&|caySB8u^6HIaSbN2d+w19R7AV3 +f1os~*FC5_+H-*CYNTU_$I_?+(b>z_>^t_uo7am)E7NvsJ=DKbkl0jA5Mcjv@h=b#Rw#rQsOFwSe>zq +_y3pu6uIR3PPaBHlZY99ar@n61>qL~dv1&xuktEFaJ)KQ~ZEU>GOf^__TbT^hwcomV^B!TFQ60CqZUc +g^>d^M{tGnlKW1HuV^Qpev<9Gl8PHsmYO9pi(!XiUztw61I85YbuMm&fDpxY`eTw$A(B-iG0rx2mq7X +Mzbt~R5A44y*P9lg58^kdhvJb%gP$MzkgVX5Qg2s)0wXNJVAwR;-4ZND*C?eEi>UQaYqK){btu7Q_Y; +dY)9XJn*7Lq%J36V|2&ea#FW%Hm-yC=?ODdco=p2-xOx_ ++6RZ4Hp9Q7C8t=_PHsmQaIan2p9Alq+7^mhmpY8om4>B|SJY#D388tUGV9ez +3Gi-MF88Z;qP;EB +TYn0C%ooex#STy6uEYu5s`N5DV%s3A9AIs;OVDsUlw71ZL(7rQA``% +QftH|dgX+U<%xHsX@F>BP0WWi$00;8FxD3~Fj?d;eY +L$q2@p^7`(dMUb?BuYaYSG8eASGO-zFZ-yqR55H$Uq6=mPV59?818+0a+KlRJBUomk-#xN(Moo+8k7F +MHr^)VwmSu4vxb0@J|`&o;B#Ls4v{m$~*kqZ=8nLdQ!Yx7imCk)7TeyfzrBsWJA@bD6Jr4w5q4ws@0$ +tntD^9r0P1f7|X65$Kl`4*hAMnVm^Uy?1)-ZPbA+7zc-J%H-Euss?6;J4OEh+ZueQal#rb+78FU0tI( +Wa4@uZabdVdmkbaJ2Z2E2gRKGd|tG5>E{&7QVz~uh`P)h>@6aWAK2ml*O_Erdizg4V8a$#_AWpXZXdA(OnZ{s!)z57=X&LKAHDCq*d1n?ovC +cpy0cCiVt1-gYoOCyVGiWEpHj{kk%kg_d*v_MgyK5W+3@bS&en-5)x=Pw_Bzj{=f?rpnqYFURDdJnha +!_CjXhzF+%@g$wMW^^&SNXAd9Ami!kQ%NSMN|C!2cdaaHy84jaj7F8SjR@4V6;Oq0TI+)NEE_6ch`&g +;;rmAYlctgFXf!Ic^j_RctrxObvB5Vd+m_tKE`CrErne)3&v-nJf1evzighv((%H_GLW-&t$y}^!Rn% +gwU=@f2y^-65TA8JABuk-8utG+tMORhiW^iD+=2v-$7Rs0@moS_I-SQ +8KyFKm@?5_&d@Pw#*rLvU&V3=i5QT4&!CE&v^12)IE`Zt~xUn`43aVp`@M)+Emc8yr5`8an@EwXtZSqxVlhcp!?qIQe@d!Eo-3n5o=-gKYK +AhH?ey6f4uAMzmgkV9n4hW0$=EG^8Y5(+DbW1Ibu>LbivpVi{`luzG0b=O4#_Pku$*@M4DKHAx`^U>l +gmyzMX`34(kNE)AtYEMS_0SXIR>b2ZJp%jq4UpGh#Ykx-}Dmu?&v2#LNr`}|sAqcqr +aq}l9&aPRocEt!?8|`-(psPEII8Lf~i8KS4yRG4abI?$1EU1dol4rrtyYgTdD;m|r;A#o?xOomxl&Lt +e1fiiWIv4NZ2z!=r@6%RX1uBIvazl6ap{k`%QUwzZ&)#TY&YssHwEp(`y0nGQT69foGV7MtAFk0sfDR +>I(|arykE4h_c~t3#O!v>xTr%iYPo`o%7dMAyiQhxhhScv*pF&if4p7XZAN0F$ +f#o@{ugQzBolZOpZHGQkgw&w9Mc)U&?2fS$%vzvhnLt~ZN5i}O`-kV}{OiNr$J09xe>oD@Br%#Ei67;FCdsWbwm&Y}sOlM-S|JetVR +?wI7572zhp}(Ozz=}1|FJGmcExJL`xlmz#;3*^^K+lF&*#XY&Ea2^29`!`i`8)6Y;R4?x1WH#J6IhhC +&jK|GT*qu#1%9yrvo6gW*Fx=CNE|aTiG%b&lCb%ANacf{Z-mSicO)1t)2ziPc43YG(s_icCR911w8;sN<%Ncg>Qj6JkS3(@$Th&o3ze2%e?p7pmip7 +6|MNcI?-jBMbLKnfT(K$42Yoo2!*DumZftefU83L`x#3L=$=5M$yk{>iFL9CRl_*0BWm@Jf-ZGA1Vz{ +}JV?Cs7nF(33pw@Kx21RS8v)R674BR|d9{l^-VI7z--|3m|d@u>8w`V{O!pEO}3^3wg3V2yjei+$*3h +KcAIJEfAli)*CLpsRa{k!{a#>cVhoYrcweaZ4k`SE>?Tc`C&*!N$b9z_g1-s?CSDWzdK{|WPEu`|J +=6`63R7cwdU{7kE4_GIUGXJCVglp&|X0MA84P>+UK+O1^jtF4_d8OLQ|l(1(P%)siv}H9rUf&!w({-- +B}CpXP(iS{S&edij#tq;8L+6{P;+VXh}(80%_;kS#1#Lc<4f7a+Tpwd#B5S(@wYB +MW-<*O2HpQ(NkJVrZkH?V(~AHjn^t;47(ySXZck}QJ(fOkSYW+=i-`Q5Crx8=PC~ZH-xhU#<#cL9;%e +hhmxXW6-%}-@AZ-*&&Yi3ge3o#N@ut-RM@KD&T*WN4opEI#SKen0$KIIMJ +e%#C*gw_Ij&xf$0Ha(@OjI_g+|tbR~zul!Y>(S-Qpyh;F8_BC4V|fyKHaoHL}i54q4gUT^@029erv +HOY0-Payi^ZUY=C3ch|EuL{@11B+E@hK4(E*M=LYSh{OFYfQkKgg)VFbZ5t?+c0wL)_shs5^*W{^4sL +HZOPJyjgd)&7EJS*?Ew6Ejz0oNaWH$(>)jR(4s!3R?j|2|`i`vM8nQc$&f>uLwAm5ORf5b{bely+a@S +H7jIr~raVeV7);29MHVtckFRqpi>wd{NECUxau;{V;9#l;*T|=)0rn5WzYhYdn-jf1ct~#5atKV +sHPqH#Wp&ZWDJd&7Ov{jOl77h{|L-Sjw=$pj+njE|sl^AN9JKj7ELD?3>HfTweEK +-@Mx&jr%Y)@1~;zyo^WYf{zEIv9-SYiGqBvXPXaU-Glu1M)ziQliF41#zw*0HH&e}8f7SXyhHx7X^}q +@#^zmt=;g)M*^^-ZRQ^_EyicDxR(`-uU1hGWc1Jwx+R5AQNn-mv)!7xgO-nSfBOkk=lC@1Uy)Bhw)oI +7H5VM6bb7{JBAD*28o~xV^*z7x-5OFCbZb2h*W#*WX+s>$OhYxJ%%|I_FHle{iG#YtrtWsoqnNy`A&e +H0vSx3oOUL6r(2&OFX$k2oR*P~IlfxrVUem~sK``OuV1k4>L@4I|T!7OY_%hW>WkHgg&IlRj3TQ6zdC +xvUPj$ItXUh98QO9KQH0000802@m7Rvw9Gffxw@0Kppo03`qb0B~t=FJE?LZe(wAFJob2Xk}w>Zgg^Q +Y%gE?lxl<|GrK4T@P{;yHo!I;><*FwgMmRy^q3i$Qbo#+cai_ +zb4g03BztC4v^9d*Baw%f_i^syjQ#rY%|9>R$x^(o>U|^c*XBliHP`Ir^A|s`w~fr%PrT8Unw`zg+~S +{PE|eA*?+c-fTuISfvsX3G*W%)3_Ix&zo4RTYTXHRayz2gvRrl))f%)IRm2x?ot(t1XYFTHCtx!ePF# +mR=?*aQTd7m}nU#-waXWBGvZrVoZZqI9+`X16LEsflYH!?T=fGn>zH8*e@Ev3pQuf@{7n#Oz-oOYt4QYjpU{LO*FB4=Z_*Wigr^2Ta>Gmc|FpPP7#D +V|4=F6w>Syi+r=8^iALkdBWjMjWtm?Fc{&8SvcODgM%V{v`kb-PYoQqLmZ)>XYFtAUOsbUa#CcgVdsV +X*y)YU3EJiTA)D>&%f*Ubd%68JMfv?1lt>Ecw%~er~hM9^j#~>Qs#H3+7H=1z`psKxJ(~}+kQeumJBu +qBgXGNbMgrgO)w`N``gGiabgM1@V;_FLd3()n)=(k-ai|0eBdLUJ3 +fQfMXPa|PvX$lDdpTDI7l6|vDfTk^O3H$koT~sE3TTxi*%=hYRAm?(Hf6ZGVmsJsf(3v* +qkD3xa2C$D&YwYE!Rn@3mSV%!f;6k0KwWH)ky=Hr1HlVzZCJ?F3j0(pU@Ef{kRX)JMvDEl6BDd8-&#{ +@VaU`Sl#%2h9U2bVq-r_~2W%SB83f} +L9TXXox?*j*Fqt1n*>(MLsn*0c_=qsi3-)?HNcWN^Y$sB~HHdTY5)1u}LR8;Pd;Q81FWI-`MSwWw;XV +Lf}2{<+0CJ9@C-yc%8MQpP%V`S2ZjgFHEYLXri1zuuWlB9=mPmZmHZMCdaxY$2Bwi+>{?c#&zVPxao3 +4xF<%}^kEa%}C88^S%qH^aT(W`|#aQ@zGUn+aLfHf+nAJ@XnJc+V~LfOpXyxzV?foc)W?CLP%ixf;u) +D1t;VUf!RP>AI9Aj?!poPO`sSOQ#L-t|yA}E9ZeF+@r+mwm#x3?)_rR%T`zcZI0_*>|PiTk=k4|1%N& +V7>Ep_z-i(-apG81_hNd^z$CYBEtw|Vb+B?*Z$P2hmrAPmb{XaQ{3=R2`p8^rC)kv=Fv(IXE64n%!TE +PSolp@_R=k)L0dAu$EGSEks{mAgYbu;_+2k!)6>36i-V&37M(-sh2dY4NbIQl^2U)R5hEq%{&^8VQSR^mG|hhG&*TQ4S?SArEonJrFAg>nUvp6#_K9rrw +rQ$n0s{bRkmpf&k$><9;YtdZh7q2`MUljsI3Dl{~x-Z)Zr0T-*3B7U-=;b+Wa34ls0dA-Ww;>QMY(OlwzkiD|0B*R6fs`ciso)5t^se +y=a1vG6G50t-)NQ@Sb}(5*mbMiDcOl7Et;!9(h7>77Icuxm5!jbjL;J9_v4tw3Zlrc)kiQ?r?#UP<2B +;g#@?z2P_m~WF>gS3_=pc%sPl&hGev)(8#+r89()=Iw@mU01jov&7RES#BUaSnxe;7b*(esfsvdPw}& +G7E~~3Lc2#o_hAvb1K3dQCHUpEC_3F?4vFy2Pl1PUogk)E +Ox`5WCYi0%BFo+%wF}sDXqrTGBMXPC0$>KKF?tlV4v|YqT7B+yI9<{gdQRV8AybVMap^WIgKx?Ep?D1 +uoai3tz@QD)W?N`(Aieg=7>!pa%83u!N$sdQ6ZHGZO#0cmgq=Cr6Ku%Y)aKHw@0;xCn7NQTv +@FM1+vX#p?CF&F^z!6U$S@I1M>+9$T1|tT@w06*ZSA|!&Z9?8<~)y!>c=tT`;!QdEuVt+(~tl7!nUId +CjvI9vF+1}@KcfL&Ho26cKJk%4XE->Ye$xSvg;}d+)`tA>c+}6Pu5yeNsPsIP)WC>k}<#%Ur}$Pqg~S ++vvPgf_R`k1lAz4G)gzXqiRqz*(N%!fm7;rU*KnC#BMq{L?J}`td({Cy&8b)aHBRk@EeBEsD&LK#`~N +R~{Wnlc0|XQR000O88%p+8oDoK1G64Vp1_J;9BLDyZaA|NaUv_0~WN&gWV_{=xWn*t{baHQOFL!cbaB +yXEE^v8mkwI(1KoEuR`zsc6QP60GUIICYwos5#sE6KUyP24Q&2E^TK-*tmO`_REbDDkco1LAPNPd3oz +80@&VC$-1!c)Nq_~AsZS5MFKg;2}ABJZq-MAX82)L=a<-ZHR^IRYheQz>1*VjZu9$jOy5R+*|8NMczz +V!6HeTW6V^*|b&%>Wg#5cBr{Qg%BC!gSM%V=g))p^QLMV(|2bfI3I!4$)^XZJJkcBGWZ7+pas-~o>7P +G?Gyua)JAz9SlupndNWad8#tE0h0CtqED^mg(|)VzOk&>2=;ei$U_7+a{j78E@a*Vel0oCRrA_Vjn)& +e9#TC>&5G2=^z1?SctAfkNVUC5-I|d=%DJ?v^{r|XhwdLugV6Kwoax%ZJ{21{grhy2!Cfl&$A5cpJ1Q +Y-O00;mZO7>Qb{)fJx0002;0000h0001RX>c!Jc4cm4Z*nhVVPj}zV{dMBa&K%eV_{=xWpgiIUukY>b +YEXCaCvQwO$&lR6h-&*D<0!k!-O_Mi#{Q65!oV$7zTaj19gVyEb`yiWEer)bMLt~fc&_oGc%*g2{QIfa$gMU7E1i-oAzv@ok)ldA9(sI`V2yiYs`AxEo +OJ6xM`nD9qrEUEuBPacgOA(YZhqZBORDkoQlApef~;H%4uac&{D_|f%ce4o)AR8A+?sgZ4gFs6oi))z_NGgtt`|zD1DT%U^+!d`~ERpknelr~EggyN0)7SUcq +J~S=?6p`|_7R@#f}NlJ^#^vPMb2)yHcC!TCMTZwR^%WJyuT?xT2Ttn3-+Pmc?Iv!)3eD$Y#XI5tGKCZ +vFcuxnUq>>nMLV4%^Mch3m`EQznP{v$?5h!as!vNFlP_YaLp}zRK>nu?wORdTGch^j?-;l_REur)%(S +S;g4LsQ}$Z5vbfS(=?OgN&{%fIx8N2$-3e><=jqp%dDsO@ZkFzG&Xy1EVK$je3Mg4-Aa6BfssWkpw6K +tr$S+a+oRwQuz^v2o1(=fcp6$4_%qr%n>A_aQFyfjsqZmnI!ZINPg<~Q~PEJ|#wLy!NNtfA|`{mv3w@ +FaomM-tFK0LDD{L9tX%UiY+bn51x^*?wFX}789WPPjnlmIx`H&%=d5`rVfW6lnRIZ*G?0sO@ +6BI9&RYW{qw^18gOR~)Zk<|kf%Y423_})N3&Pxv8jMRLPk+03#jnomPl`!pS&6AvJ7)IVTF8wd0~!ra +h&FU>3reDB!q1j(gj_S;Q9ApAC^RmJ;j+L4_}pUjT@*O;a2+hfcp~n(wK`E4!s#-*TV5}3Zs^?$~QZ@JzKkYF5RD+N~K*EoJ*?J0eDf~3a9~L$7K +Kqh0B@ZGAT?Dg~`%Bfqk{n50z@zs;8k2W^OOk-EkoetS9WTL+7?N)}_^1(1Eo>Y0f44P84P%8pbQ!Bg +4rhG`5#VKzBblq`f5N@7MDluk8ZcYtb0cpnA_C6`V_o70T4xD1lO^ +%tjW1t?b-C{oN)UrenPwfHwmVYSLPAq^Z +2tQr(I!>?#+iFRCW#Kss1mbQrolT-d$65U#gP$tLdeTrZIuaT(iZXKjiFzIPiC3%dklPqdd94hBshAB +GP?)g)<~4Pa%pGS`nV-e8kY4e0I(?`(slp?ohAp-M?sA3jXDekZ)^o8Zt_4Ehq{!r5aE(0^QrIj@dbI +}Xa^AlcLZhKd?}sHSttZeI*St0mEu$ui<;9sg`|<2-B%)jYN4$=y&u}lJj9=VgCWkFr%ADWP29{7yfa +#Cf2?JC{MC_0_2#B|aZ1wSFKl8fvM`SX+aS2KbED?v{^N3wDF3*9DFpSV|NMs`CLxd)4~3Rq>~>AA`}- +SdJ44miRZMrZyIq=)ivxoHe3Z99V-(Jyj=ZL~>~C?1f}Tp-8;Tv7N@6cVXJn9L-+O+`=>^qPu&<4rtni?Obh1p$G`Pr$SaZyIi^^t%#13J`WuS(#tDn@+j>i_9nNGvDAv!p~SR6m__jyq+0E0}k?l3?} +K2fiL7(LNBxVsGetsxp->~rQa|*_Bc3b0A3A&;kZNM^~m4caJu2SZF$X(y0(+~!tIfyr9S$jmh~#`5O +AK8A|I#I&+##QOk0-X`F9Y5HR$Q$nS%OA++fFM9Hl3uC|m@_wI5~ScPe5?L>Tq@!!|h>`sTyOgIPZSy +8lPb{gjS5=y?#*3a`F2MkCs2NR-$&x2}~Kf1Na~uzXd68yIV@*FJlAPm{IcMVO#R6XRjLqd5Bj2LhLh +Da}a1t@vxl8!o>1QRKkg&(4eYfceV>-F@Hm885*>X1EsNdYgAws0R2ZgaxMozJ?ck#y*>o+&|oG5na= +iIHs=`ch~U`8t?z9pB~sdcX9detv>FjPR{=Thc|Y;?(UmYtLx0Fj8c|Sr$}&N?lPEu65^QXXbE3{RdD>0|XQR00 +0O88%p+8Uefd&!2tjO2?PKDE&u=kaA|NaUv_0~WN&gWV_{=xWn*t{baHQOFJob2Xk~LRa%E&`b6;a&V +`ybAaCv=GO^e$w5WVYHOp}WZj@d5s64*n#ZE0aCr5~_2qsrrCs3I9@oX!6Gj%>$vAgNB)%zIC7KD>sX +-#%bZC8GsOd7WE9IM6k6d+lU-!(3}&NDF*P^8R2=hC?fGjnLCcPf^DkL`+XJ2k7v%W-RLP#Tt +zw#29kV>Y;xXcX>N;yq)<^o(&Kj+tt`1`s8bOzXFcx*_i3`qZwfcg##Q=>D1BBcTw6(ypXrd6MKG3Nz +`j4zs3BM!43-Nx^FuG$|>^Gt{#gdwk6L+?i)Uy0m^*2oWh*wPaiw<3snI$rsOVaA|NaU +v_0~WN&gWV`Xx5X=Z6JUteuuX>MO%E^v8Gj=>GXFbGBOp2G4emJ$z8sfTXR4I;}SDNX`toEUVn^mqh(vwb8kxL4@-#CG{!^ +g+>l&v-7uU$onjI!`RZ5-!6>wW^F|Y_^n=mbPGRL5ocF67PR7MptxBi|&RyrdfhWVi)?VL*4^T@31QY +-O00;maO7>P-?UaibQUU;|K?DFE0001RX>c!Jc4cm4Z*nhVWpZ?BW@#^DVPj=-bS`jZZOpw{v!h6wE& +9%{2s`Jc+jLKfd5~E>FbgD*Kn#MHYs?@(NPs}_>#w6d#E?5Ov#Pu8+_cyk3dh}X5cpc_n>O^nzbRe!N +ZQReUjL_DHDOSlitPH$Y?}TX&we}n*mxcOPZxYU*#CS#3jObIr$=~6qW<3S#s~_=P>RM0hJq=QWMG&;VI1_)1mc@iT(hLTwZ#=I1WPtJu_4e6LTq4WLm(Rl-6Y5+q&7sn!Ke+ +BZbD>3QyY}o(7|$=EsOj-2RI%^8;0D_;U>m6DZXLi4T)~hU=z?ACfLx(CVe{tvYaZwH%Ppp(G3&5&4& +`?&od^ki4b^$!6T!n2n6YK1gK5n_wy*$U0rQQ_E3*>mjcKTfjyU?hxDFg$hF_)0!RI%|j$X +RxC)i+hbNWr#&=k;^sSDD9_gv1cyR9gb}*QH;9~Rr65NZ$64=;nnLSRD6m8dwA>9SsbDcP>30s0gF$0 +X8pGVnkAM$54LM&hHFy~dy#`C_@O~*{mWh +SN!ZLrl%O@qo1ViLl!)94r;e?T0j_=q8+-yS#gh${*mx#RJ@M@P1!?VD(Dp=i=?OMEd``QtW6GO{K6R ++cXe$Mgy?3LY$)C-Rc+Noy!V7Nig2|i6^7g<|&+gR{U(_rUfuW>9sX*Op^6e2vHJcX?f)rr1I3@&4iZ +t;h6sn2w|y+MfEUb?0volA*_qemBV74Aw*x*;b#RIXaRF6rrUW{T849aR;t`Y?)R(lX_(c>$lG?rIq=Maecl$XS225^DP&0m2k=uiC}aZVhX9DR_$cpm~}}AM>f;IlPq713wWehTn0~SEQ} +(z9u0AM)VT|LT%L`s3Hqh92yqrGu*P<+aqwH`%y2o*%o5ZKOanbnO=avcSe&g>vtG~4X=ZgUEAM$($j +k_H5crNImOb#`C1H{4@tTSZ3M=TE+TV-eVo5t;hor}6cjd=+FzUpnnD%^c%+o%s_}yWKrVEl=u`ctWw +i-#{;vsfOx9!kc*W92=yjgieQFs(Za)vWpWqyG1IWqQK?J&ug +HLX+AM0jwB-iJ9Iiqgd^wM^ZE21)NWFfPD0S#ION;B#_vz +bdsZ=bEj+tjxl?_%gBTKacb-ry6Fc)5ig=8dn0MTnU1q=;tlVlGNnGrfvFx?cl2OX^L&Bf&^T{wTVHR +p4wAJkNrf`FF5>zak6h6mXzd9Fd@uKcd{tajU`Omj4P1J=yZfMX?s3l(e_R;nKxF!HMlD03>A`1GVxq +f3GecrT1KYjZT02cIr0yz4yGeDwo9RHDgZ(8%m)Bn44{Z?F~roH{+TR44dlk^|o{-Y}DqVK*{MG*euK +iB^e{pUA?VVG|}&bME>B=Ik(`2WM@eW31-M~`h%7mxbeYx+|XJ=$c!-yvy#0b>RyVUj^{_$%;Dk_A<< +7kpyqhDsK&OaOQREF(54wZL%*U<=<+0GHt}!IywH7`%bW1-${PGSmx#iA|7fFlLz}3KuX0s7}SpAClO +h@iHZb{04lo`0Pub8KEPKz$j4+Q{JgQe_7#EWdO@@`PFXf5kbMfp@M_ +VR(+TRy++PVDnwl?0@S&LiHcNQ4=Wuyz1Wz76(WzZ;6w3F`oKtkGdtjA-=_8(5q1p5zSHqO_5>EG)6IdhxoCVe!6}^Vl@GOyMa$*xkDH +PF!raV?P1-p5&Cs@MXnwm%1jZDXWq5UqS~qV1>1yj)q_>e3ba%T7`BaP>8aEqV-i&&0?HD9^Yy(gC4}1BB(Bj-2w +#Wn9cI!&zj0EaD`F1&@_$>E(b}jOPKD9TSE!iytS35c8`Fbwc0m-o>b|S1CG3p%Dr#3fFa^*bEkXCWS +F)Bzun=k7#Rkm~=XZ9;HE&x8svBD#SE=Uk+huEsmPcjK&y<#FCJZ2P^uXh^ul +s$aq`h`UrT$ihKTSwR9WUcXMonqi&eKwFy2W*xfyj2_YXazHLL_C<>HY-KZsLRS}pK>cO(SYT&a6zJO ++tpN^-^42EJd5;FXg(unkza)zTLt;vd7vCj;&G6`?vprQ%Sh7B7g(IE18UP$Of$nhtSYk=dh;p_4iQ3 ++HGQGb%%7e+OGQRu@6zC5CQvky8;ZS=?xlfD0&lz8j=J2tcr$DGZhqr!O%c0{M$<5M~pT8ytV(HnEA~4IXY7Y&kKI%b+I-P8|NVM4lI +ky!u3GHe=a`^%Y73-Dz|q|0WYkGH^Cl#&T1C|xVreI3R9H +`d-UMgG1F;DQE;2;5of%kF_-ECA!wc>uQZ)`g3)Z53ZJ>fhTo=4ilbSOmSPJsisLZ-mE{&smc~kHo0{ +&#%Pw~PU9|E4Yf&ShCV0M`a@-5NQ!2NlLx7|-?7l7L3-!2cJnE&JD0hlMe+L<8d4Pf7tyZ~+{xVB%mG +VXmVJF?4hqv2oM+D;Q6)2@YT0XdVrKh>>6ApEt!lfudN@`xjk!)~erUti_t1O<2A>Cq{B%=NDD1`;mx +=0why7E*kSHBKF8Umr@$XwijE$cI-TL3X`HtIRZZsX1%6;&Q^O%lN2meb?BU){ukSs$Q~#>YEqc-2^| +5v%{>blQ^w;nQ_MG$R9^;zvH?`fX7#`(cUm*xO9(HYD`g8L}t&9_tOJ9*TSm5d(wJu#T-)dh*WyDd_- +>UF)P_8a}E_b84eQp7;Kg#l3XCt_4C{Z;S6|$KS6QCV0MYklh1Y!3#+D+ZXKU=S@TlF5#?=&Jd!%ZkN +k3LE6luT6w=7Tc1#om9oACJjEoL7_BTgEIv>JllsYVlvK_MnDErrSqeyF^gZsyS*7CpB)6?y9LEhY^DMn&tare$wYsM1t~}vthb5&UCd +5h)9!5LkW3_D!ZAP!Yu}0PknRp$uHTjV{hh5xT`x9g`3cl}!RThs^kb{lzZKoP`fLDyNaA-1nzYF^N?yVfg<24^5yk#y0tSo%9y4Y7Aw8B#A(F}4_bX?z2tn-~CJ^pc+ +V29Fm-3_g&c8?%6G%KQdash7nVpec@RaB%i;6Oo%3plQ6=wTW;8C(BWws8MPGY6_e$-Xz4*2p}1ZUGz +Kn{4z?T;ezTwJ_F%RCmTHaOYvJSmS?Q~gjLh+4d*xFw-bKGE68!Qr*vWM55N-aPaW4OBG7FU{#+E|hi +=ujKMP{$yCT-I;0%6T;sQWx1V2D-IpwD)1_0)RV*muxciZ3sf-dwX@)Ur#-VY*g%ub;5{v!4o-+iEB` +TaQeVNkB1MFIa#0KbDdssZ7v&)_5%4c4j#v?-r$kZ!#i$1G%jwslvIrJ3F+u^&Ph`7VTcP7Q1Ap#dTQ +>Ek>1m^q+@qlf>q1RV^7vw#SmKZP(@owsOLo-2S#0nEVlE1-(OLHoIV7c$G;mWr2^0H|md{lQ4^`OWM +twtb17#X6vV@9=5s2lN7v9slN`68M{WD58YKGT;hz1t->b1_Avgh1ds9U+Vm-^ZQ-RC>UXC2PcxKPA5 +cLY9RfAb?aNhT}Tn%9cN4w{d!-*iQI~T)rID$x=N>A*P7SOEc3uJtXnCHnNXqX?sh2-;roh>tW=ud=u +l0!X5@E{m!CtExgsz>tg~`ZTZ>98{b6JhyXL*S$R5U#zrfy{4h*yd8ebvo^Imk5ElL0u2y18cvO4WjN +rB_&JRlF^Ny1E>ORyIRF1g!X!tG_}2`lw1D^MGf^o4w$s`7ESJ**UG(I-VhzcP=mLtAGMDm#j9WO7{@ +*cpy9z2+WBSMcj@B#Na~dryZ=1_$}LVdOH{L{zj}Fa+OFV)o!&8(0~=X)i2j3Gm2Gw3{b6mXAlP +t>cA`7TK|&hZQrc#tf&fX0c9-xdRTsb-tV07`s_V5OTn}Yi)gbkxfeEl4L)h2EU<3InGIAP%qFn~z>#Dvz{Q~9B;0~K5}+Qx4>JaUIb6K)1hnc35vN2NUJk)3oC*Q%ud2q3`1tu@B2&QD1{nU7g=RBCdp +U?5PZMIQmGQa3s=g-fECrf^@k6e<{3{Zx>5>NZ`E5aSvVUt;F4yLZ>AHP#rjMz7etOW;wBWD&hsCl@O +O=P^Zkr`34AMSZx8%lkuHt%fy}JV@zukB#ieU(e#*-dD6#ThEh`2zoCckF0$lugWpHpe#ircevj^9%% +M34%_pPnMe)6Lwt^eU>??E(_&(kJDs)8iM87r^xq~mEX)3Y*>BeBB1$?y9r&hh;%0F`e{GZh7l(e(h4 +*92=+6=DFS;A~iWvZirkD+kFN!t^7b*ZWD^ROA^)f?8i;7(w)ge&FKu~`zxRGTjiY?w@GG0VG;2U(hi +J1*bY)E_)0TO^Le}GyBM?p(WF9ZVr9WwxmKLCH=O&TtIg-R9%fFX+|fC49D77sL@EX!L2H?bKU!{qT%g~;eH=Zo81BQOz2G^toKc +aMh6TW!`5nw*O%O-zrO?@zG6!IegNGBhUM*zXYFJK2XoVZhF(xr{G!|(Ks!gp25u;XoS{6?+QKdO5PJ +c%#&_?Wzbc*<>!F}kpLBLtbQcvG-7;YetPJ*rP34;b|8ePJ%%Y55Ef(DHKp?8$JxKSUF8iRy9G?=0L1 +fKSUg9B)~2KQT-DLOFYAc~sxII)GW}=O@zn{*vu$u!1kt1N7mO=cSNl2RD-~zwG9WSbGV^CKg3kDG3tT*qhC>gX>3s>TN({F(nX8(du-gq=l +5Cd`k5eS9?X#H&b8kGO;m=84mWB;FU7^Ru7pqO}hk*RQz=PA8dlapwP+)7@6OrlE*fH(qQlsg)J3B^& +c!GZ-fqja%O1D%eMi_IAa0FN%B8GV_*X?Ouha5ez;47LdCl==-6)5L@-;n5kSI6_yWZ=`BzXZ^~*+2eA>Qw&M3bO2{Wyq1-enfPw +1zRFiAo}#LxXjPyPTqKSh33KlG8lhlgc=`Y(RqQNJuA!p(vK(@AMf4wkZBAQ+^lW#lJ;(DNBSG*md{-lC>KUh%nHhw6OEqQNXuK +TaJDXfc;8#H&Z#!dtC0doHWdKb;%1a*bc;M%Vp$QeP)m}HDs)4v$jQYhuiL{V91DmSEPf}hR*`yv#wt +!<%$|xxqAvXOjBiPoyY6I3f2UnLhfy@wUt*^L$rQWUAx^j$k_YW^}VlcrSEq#+fq!NjdWrS3XGPizFz +5_DblAX#zGEi+lUVuiM-pcfM@6W;Vhw7$?>$6F3Mv(`$pmv<&a)zKj0tkh$BoAEWW~HEs?NwgI26L(~ +s-wtQHz`J`4e6uIAW`*rL}Ci9A)l-5X7MM#6((P;n=&w+WVp;z}vk@cE2DWp^+U@2m*e +y|seBXiv|p>&^4=&D^M=cy;jH70dKIG^m6FxOOlTNjWWjifz36(S>vfpV!eKa!)B{=-A86dUp)ApU0=z&tK)#2or?l{yavnjDInd?jR`0mNZlle{xpbu8?K??XcQgHla4?l&@89^@Z=#x0ks$tnwFQ5=h8@}1gb +}(~~4+ZbfT$O#%`Li!6sW<Z7|TUJIuej!9c(6FrOPt^V_Wr@L90+3HVMb(G=pUN7G)Vct6^Mzw#{8 +JBm7Z*hb#dgwWag5{CENy=vZVLFQwpIOk0KxVyT-o)vw*LfN{|*KC5L6{GL#WXg}2ZyukRt!(RbZ>fU +OPxd-TxiRKU!UMXR=hd6;T#k6L1v)++70IMS^G;Brl!kD~Au=?#|g=Y9pja`&|VaG+eKRQy%SBs&H~h#kT{yui!19=J +4uf>v!Ygn=)=uV|9eVZMq3~-hZ@a630jqe%Xr}9~acTFt(-=n+vTVi2jyd8w!DSWXYLXU%(3WPiov`2m!|>j# +y-OD4_yH5#nj^&fHuhI%#TB4BWN}_t-_Apca4SpoYT}%du~DT7IjKW^xhvc=_Ui$wa(W&kJgoV +%gl6OF>Yetm4>h@H%Pu+PdEXq%43nDzJNtfe)zU#l6$QVE{#oBYl^7)@c*j?m%-OSiyRLo4YnchcgM+ +n=rkz?f>OmI{$cVC1YvMQSzO@u|cZ(glS}UEnuW5(hv#i7241$8Y2h*<(a{$rF;Yl$o^MV+6emZk%ei +iMUmHgP{#Y4+$5&Zb~X&Lr!(6Zl*`7JHOexhYKMSmH!54a1l!CnlE!I$U*5?g}p0>HCg!FhPHL1e=#C@F{LoXVzdCXDo7R&N(67hWSI}JH}n;+PZ5jVg(iL%l0d&qD$#~Y7ve>{C}Z%N +F$tGOuo#$H{z*`asRgVMqc%jaL{;GN;zyydg}0I6;%ix^N;Zof2U!kb#9z@e`D*bzr?}r~S>*lSpk?z +fwCqL2^q*;&#iF`k{*Ti#?O&i}f(ALT;yUVF*ZR@25^6<7Novg1&JT}`)MPL^B7S+cj%AyP#t!CBv)m +2}Y6g0i33*kwI4kwWhqIa#rR>d)A=7N!baQ<9t5b_;C@f^ +-hWWoJ3fjg^i9X*~259S*C*QQYOz9m&rGa*`+QI=f8Mp%r=vFVp4Nw^a5x#}His5@6{=aS0DFrytf7v +l9eUyLDPVIIQR-kLQEHDAOcLDJC4&8~M0B?;DHZgK!+6!OaD!3e?>t&dU_7+mZtE)T}BY-j`W8aJrye +l}pHD53Z#|X4je3;aZ?nKe1iBx1ib|NviIHfI8?sAa+-62aur~q*hJ>p_4kGErnh!ed$RzC`wz`I-#zi>Goi)Qk!b2%juBvS7Fbp=_PmKXHR +!aB%S2AHEwoUp7t|FrTXqL&4cZz^ghdvUKrKXN={=E@@{W>SeKPeC!@OB4YWGiHjS3g)WaJmjoD@Clq&lkry=Z>)^wRg+!z&pJ1DU79l|{bIZ3ANQbz*0**GbPm#00-5sAM8*k!ZFTXL3BM1B3d`ebQivdPFz0)5E^y +1vC~6KsYHH<*x?>$?Pd9>b0jRMSC4cZX*1#3@-As+%{JfH_YOc)KH8xI6|$ue3&0+??7{1XvS!F3{S> +z!fxGdOx)>l5U(Vv6i>938YJ{{umudKeWveYXth_-aXsQ9*=Sg)-gYQ89}f>!Wd`I{8u}HvIj%7{f}< +)=@Vd~&ihU-{WDEAGnkcX6`=+QElHwkKw8~jia&k0^i!TbCk;#acI$+xi +{w^)}fxj`Sz-@lipm|)rBDQ3idMIo+c$5Zl|jox_r=*esaFw=&~v8A%@rC?LgeKn61etBzT-S29%}Da +r(d=dPgV^a!XbPxXpM-U&9HB@7Kq6owf04Jsp@+ScX@Q4==|~^9c$n>1{c=WpIqR)+*GVy7!(Hg@zG{ +Em_j19HCk>&O64>c$gGC$7|%;tczxwnwm;>(7ZiAvb8&Um+LW<(!$brCO;C$j(Y^d8V-#0b{DTZ>VnF +6aLQaGGn0sKA|)=4cnDu0cS2x8-TU6N)FTGD4YIl&4jjCr_H7yxoJ^frvvYH$>r<-}o3h8fM{nPE`_3 +$$3BU!l$|`BR%t&*s5PT2houxl&mNiy-0B@2eZ$zmP_YY6ehoC(7Vp$+gQfGNTjCXBqSn}G3R%mzglN)026PY^I~z~OP +ggqekT0|HF`WvGzgE)KzR_uV1*Wtb3W{IaR;Y(exOjq^S>)uRnNf(65w^Fzvqc8W&zBMSFPSiY+xuQ- +zvxA^~hD0Yb-5NLrfQ!p%X+~$$40pfYEv{4g`C1-?im$u8#ZIFBUdTD=mWzg@I_Pdq+nu7}cJ_(hdyE +UZc(rtoLOeEjX~dyW2Y>FYd_E@$wkkXdddceSTPzkZcoA=iO5F?4fIP4&DZd5h- +@J8xBGr5Ed(LD(M2Rm&&1UrLuG|!aID|g!(&yW==wQ2y$jHBd*m^+F+H|Ek|!=4EYjXH@ZF;?t3)P|Z +GavRyqLC-ePQtHj?vrU;#l-5sQJb)tm^dCF~1(Ep&aE$S5Uh^R7#_s#%^v_b8-z%n&C{VD*ST-c +4zuL*nZDk1vY}kpJy*hB?Ff2xs8#Q2*S(h!&zrTo@998|OPv*;`t=y7Hl&)up0?TvP22F~1k`OHuZDF +Na3#+t@3O^a&roi;Pqy4{g?Mg+lAV=CMPsk}?YZe8^ORPaCM9&=SkvV2w(o$)=-DaJ6E+@*wVn_A%g6 +wX^M;uWAJdsFq&>zA^|)upf5F#vHiP>1?X7~rpM +yt+J`4MY%n}5q5SV6QnualwKnR#28I+-61jSJdqi_O7aEACg@dVkxZ*DSd$p%5-#ZCi21po{aFJKkFA +d5kl_>vg;IzpCSAciJ4Zvin(w0sVT4UQ}UHc`4Hj{-}YTbn&7k#FFG0k-vexC|N)^2CxY)i7NoH +z%rI-TL5yzg0V5Wbcu?kqs0S=zOq6i_~og4IRxQ&$$X_>`7i**1N5)sV3r_?XYm1hhOL? +%#a-@*|I`sb9b#{lwO_`@@Enx=lH##M%x*7i^ckahenrIMQ7!e+Rs=cRXf4sZhSY?Or{3}37DDZ-b*Enf0fbrj;F>`vw*g-q9>#aEc^o0*& +bhwf<5cANB}y({hiB${_Ohx&gDUWc71>6@}M8D@9$TJ;60Ti87VxG5Jo53?UbMgw!hJof4KAGLot!a) +am?u#xL=aNYd8a^<>a*Nh;&=u&9y2_c60Qd;AgF`KT;2btL$CALnf29?YuSGP`4fJorZ|*{`r>l^63hVxpZxKFj)fZ5fspdF9@*&&cZ{c-6w_r@2*FTGgMD-uf;or9rqGMZASd?xss +3!C_+SLmKlCGCpfYH8ir#|W3#JL8oxPLZb}2&294C5NGd+9g?&Dl~p>K1>Ti6;?%j-cc+*3kKL%CJO4sNf9G$IDgzgk2~owhe&m2SQKA`TsU7IWw$#uNaQ=PXuKF&-4r%zfD;z~?E%obh`)hdAy(vN@)F8%~_KI7!NvQk11Rn% +LP%inWi6LyOR-W56n$QK0 +9YQyo}Ss!j6Z$sGK&EM)if0^;wKKk^oLi}^#Q1PO%jA*(dbnf}ATc5QGaBl95 +$g1viHM^ag?c{CXuj^zCg`n;rpWq7AV}ufW9IGHWx%ifp^pKLo0Puw^$YW7?+f&IUYI|7U!bpE81PWB +8auu-g70kL_xLNQdz#wsj<+o>c0tbJ(;+6=najn>O(Isw#>YcSC4VSJyL5BmSZ2@u +F`bKjTwrJ9sHnI*Y&B=tOn-5<9u+8%U8Q`<3?$1t^?Ro{PZkMn?t%y28me$P5DvOHUJuYDBA$4U&cy% +&cLl7O~Reka@NiwQCO3Zl@go+RELPa^;j~8AzHU_kgSC(PzBip +O&H#{3C_T#M7@+5p`jCcdMk=$JMYsufxHo|Gsg!58AXys`#6gMESL;nyg5=?}+*R_`d|g|DDr*8k7Gx +=C7d`pe`OPa2LMPFBV%e^QzsDydochSDI(M#6REwuD{MU4PIFvNVJ5JhVX)yNxI-I34j+{5D@?`f-k1 +t_?3$a^e;fikoc^d_{WF|e5uEj2Cr}lY_S0Yy-tx!O91{4FU1`gVo4n(h(&KF_{&E9im(WR1(Siyj?m +W?QA>CRioDWO(xqYrLH{Kb3risDko*-CH(%^574%u$C)XkyGxMnSZ@&9170IkTG=GHXbMVosil$F&Th +iG+uB^pBN8|L)R1#z6n-(!qY0{oV#`yyUL(E +_X4lyR68wTyu)s3!GG9G@(;;Cdf*(Zi0b2?t&DewL^~fmKPSiRUW(YCXgbnQq6faS1N2FyPdaA7%JNh +g3=<|aP1n2^|j=JdeLEV?ybPq7L7*bKYA=(#s0Ir`>JW$

gcjP?dlKCf$vSlk}dQwhdz +uMPS3wndybnQ&0febQcyxHlWerf*X@C3|#Hh$*uEc0b56oNNYTi4i^NAC^hElSx}H5fOfcDm-%#M+hc +hg(ShIA{6p{<@-|y~c%Xx*MP?Oi*6vA{aCQzWVt4kh>b6PXtJ$&;;^LNm(i4p6(_^xB87$<<3rQs`92 +s@oa{2{F`rA;fRk2~^md!+=!MBp8Z24bp(CagmnsX1Q`Cx`lFh|H!;Dvjw5Hrt}Z|NZyPLX#2rnlL9f +9T5rq}1KyW(v1*B*h#9lbY-uud+lgHSyYjSm>>lxX&=RNZNm~y`JyerYks_2rnA1GYP>Jb7Rz>-oFmQ3OmJq8?tx +nOF&lP^XCh2{wSt}#Lf(QqZR38AIY~JUS!Ni0Lmw?F-V^Tmd?T2&jPpe6tB^t>f61kV(>h~!;Q6~j&P +nxH{&>fqs<3nVEw7`R;od6leyNn;#`UV5S7F7#)~OYlpda+PI9gYIum~jDi5}z{M^$N0$E)rnyFRJ<- +q?j{ff<}6*EvH7^=>T?X@~Q4DiOxtH=7GZRxHr?jRkFO5`R;KAIb)Q9{s^+9fP(Aw0&V2|1VGW0mXkZ +(N8vT3L$U`p%@HDP?(@8nnV$jq8OY(aF{}Hlz{0kQ$guh8DBVDQc!?Q25^cqiwQhI78^D7N&*EiyObH +mHWd0LY{SGWI`Rej@T)L!{E9k!V**Rbq+}rmak@bDG8417eR049u;0KomMmbr^m{95oGdH=rR}Vevvi08JtO+VlI$Ke>J8pBTa+%?LB~%z-=-u0 +YFcc07EY9tvRlkAUcC+$WT4rd(YEd)Y*cHw*9gaKyd%p82XzQ>t4vu2SPls=e@k8 +sW@Am#nUaIB1pZRl>1;Em=jCvq&yRj1&iKGr?S-ixObmB<+KB|H|4WQkg)r)+)X*ctJ2D +ZFPJfshm+QXguB$pKFuzCEa&a2rc0KkJdLuGeAq_q9amewatcliDXtc`dJU(kd@03Tqg#|Te+Q!QAZG +SM6xVCOnlZ50=zdF(^A#LL@${k-rtPMb^$cbsc4G1YZKeu=!v^)$5h}%)r-IU~JTtCvy<_;PlNUi`x-`LZw*OOXG9q_3mls~iJZDp>ZCWg0Kt;+Mf3NMFzg!*fd;ZX-$>JYsA3>UFFK)f};z!RP&$bxKy-o8^Yr}d479M(9L#V&PmvU +P6`&FNGWk={Hx9mBdjoGkWs;K)mVx*$n^UR_GL#!ZVdze1Le{>o> +D{rApMy|Q3IjX#xE?r5cL26X1RRGP`~)?(K}0Py=q +eBQ1Hi`ycX{X<5hfvXPkPNIhxr#=zJ?$B`UZS@!=)4HL`m3vruhu@^G7?|>Ljw)vsSa~{K@O +Zvqa7_21Q9?ZUnkXJ+c#4q*geUIlv8`G0)JBE!yk+nihj&LUydl2pgfaea0OhvG@;6}@T%+Wh==?We= +6`X{Kfvd2=liL+FF{~948s(OBQOx648l+-PGK;G;TT0D;1K;~09K5@Dhwq{z8VoNLAwk5VXr7EVyPYw +5euRMe8hkI%W;U8jW&6?fiY}>UV>SWEQl8$Fdn^^{0FlF4T%7-MHWblBLH>3D^a-A?qA?Iec75LWRb` +?xkyrqS}+sHWiU(f3hcsPwH2^%p##CIGJgy&9Rl*@k*1b7WinY#4-g!omt?`e0@CN=Uc7()avX!bKO? +nn_J;ZZTlRHoORtK*;==;h{Z9dDH|{>N;U@J*Xj8!FMiHes!klTTcC@%(C8ziSroLAde10g-cmMA5m$ +oq!e#y6fRa~@K>$Bpb_#-A(4{!bnVb(Q|Lc;-O;H8^V$Gm*GbInR`Sp1KAA2I@7;D4=UT0Wsg?^J^&UR# +%-t-)RW`)Co1>i$2Wkd#^I4vWOMidVXZld)$NcD3@@l1>J9I8i_!w5aDr(S^&p9U@_HI);>2=%aA)`l +G$qbhUx1}D_%!l65QEB@3Yb(R7BsF4BH5Q48yBa_Sq?APEc?oAYe4RLxisjYLVeAJ{!I3>ZSG5KQTpp +Cc_Go|`Vx4!=6Gm>d6ZeUK5HTp9IhQ-wX3v9dbC8IGR}^e6(JBgL$3DjCuG!s%qUCFgA5cH8Jd0~ic9 +J7-V6$BYiQSWXp!nXl3j9QW6#mmMfPH;YN9d1XQqY%iQhRYVqNT9fGg~%3d1~G$?1Q=Z{2;vha${vQJ +dF!j=ubO;QI7Ae8Xp_US#ZYmB!aH=fVC$$fZcQ(Ur!L(4lFYkkpm4|;k})}o7C`%2U7 +2VN$*uvf?#N;E3O5uJ$Quu^=3Lo0V%7iP&cw|dseNT<<8-%Gc8Ix5#|R_riNz!7JwxuQ=Y7YWKw%t1Q +_nPz&I>5vw_>CF>G2QtYrEA5sP^Z!TOdvvRcx8I_DK1DU}8Qyawhqp%#5+p0*bjV0h6b0eaUw-VoYwg +;*-hboPs9NPhq6?YNe8QZver7M4Zcp+_I$-Z(bLdPlI1#ivUMCbfFBir+ID3LWKls5t#WvR0md=V&j? +c>3ZGzM6%GdMA-}rE1M1*JG=-wq$KFS)I&!JHqPQ1H==Qkn5Mx$u7;+~GhT}|4X*iZCT-{pIiOFQ&vQ +euAB-0XT2iiK4*CNT3J+F8v@Bu$Lun8P2VtUFzXVRLYkZ=jON{`J5Al7WQJw>Mb%ClvkDZN3&tKV1I@ +j)~!*+n*pvgoa6mK~Va`>M4o?Xvvr@J&J%{;uTSfcvG#hUwRuPZLso6~*ef>U|_BPd@XqlIx?UEv&Ox +|oi{Z2u4Wn8U^#V2#|(e?@@mz5==TltYwdAM{I^;&R`h<%y;`xD*M5Jj%f1&&@3<>s|Fx)yimTgdXYL#J|&+x3BZG418K57Jqmg0v7 +itrZxU32yy@-iaNQBzI0x2)^c71ocg`I^IPTQ3ra$ui5l`jBjyFua`o9mOG*%fNty1!`G+^H?@BC?0VWOoe`sf(YHyE?gxP~Ne0{CpLM-I8OaxK +A3a_0>dw9#)Dc3ew!(4fLr#ibMSZ)*$@-X7w2@`Hw7k&sqi<-Tl +*+9oatPkBJ38#en}<2^W9B9X`HAC)>9o9-H)9 +R8Ft_n$>EJ-C%TvjA1!T%)@-rM0d?R<}~h@cES5B&vrekogcFg_r9VWlgadr+Zs#JYFO=p6ss;H$;^- +>H&&nff^fZ9+NI)YHr3A;){d4nbhtOd!rFM=C8MfZNG$K`!lSnW89n2ce39=O<{ +NwQOz4C2?$$qC^jHS*tH|I33>Mt~ozU+O%sQL{3Tp38>z_#6DvoR}M>#1xuC#^!jah@0KUx<8D?8=q3 +3q{>jvh;KWm{G!n+t7m>w(s5cHqhqOuVnGJLOsDTz*k2gmg@6Q<+)fle6CQ%`5%%{a?WMe+yIHkDItQ +DIh*4A)Ih%I;L2xDmo&o114~%a_>@0fJ-^NE3z;ik5&~H<9p79Qb673MV@$KXSL;$E*9jUABrzXFWm2 +UGSBp3a%Lh`eBihx5|@KgU$K%Z579X(b4D*87jC_e4E5bCm(Yk~0IBOjTiR^ +I9$Y}O#6-JYj{zZiqPb2vtz81WX-aSM|2cCzL=}w8mMRDiU**W<0O&I&@^I2%!(?wnKKs%o7sBmyk^8 +J{Ro%|wSdht}Vcd>iz<`I&Q{@pKFqSEppk?~}jM=_H*LQB|##_WsJQzaHSVC-frhIz{0X_U>aPu{e5- +`(~xL{W$7sO^-5Qy%q*R&PQNa0N~;Tk!5!1lYX{vNXZxW)HD8$rP^M +q@C6AqYh<3_)TanhS2OfH49bbEyo>TCFdN6jTIK7!;i;Vk-*JAi<7&9J4VAXfw-+&-2Z|uMhz_dmP&e +=sAeifOSv-q>+owu6K}+~0ZY(NnZmtH#&&wvr`KiJD-<}QhLs0k8+59GM10N#)C2o5kWu +;(D+$d6fE^49kE&9FBX>*rZY0~R887xZhesu?22uK_^VPk}(1ukZTxQT{@KdMGJyaiHMI)Qmzh*)m}x +^%>Txe|NU)s<1ccy2+QPcQPh3ojy-JfO2j*@>xiyM3Qi4hmoIEE1BkJMX#msYgnD>Fm4Zc_O2L@eU6=xiR3#2NKo=~nCqTbJZ;>G=2kQx +P#1%7_OrrqOna5~xvr;r|BbO}-2*;stJ6?t0fNG2P6P|ZD*(u!cg<-IKFs-tS-E}VpldkMor?s_fgS? +{i~N-3i074Dqw5nPGnf}aT~k{gkqG|0Gd2j%e`O>^#M3|KQGe7Hb-;cMeUC4>7=reP6V0UP*Z^?bRvR6R0 +DZK^`t2mrimA*^Uyfx6n33J|Rk(rWny(PprpQr*uu=bn0ueYRkXHZ6B +=*eOdV*;x^%*;x=fLnf@QdZ4j#*GJxQ@3UuY*M1|s~S00sB#It%yyuRect!wAn +Xrj5a8FHJG@@uJC)|+!BiK?vrP3F*E{}ohShk?a{J`Grnj`?$f-YT;vTY${v@&}WAJ;W&m-<;-e24~k +8cO3O+a=)?Z}sD5tC*ejN{!TqW&S-k`s+Zr0iTQCNs2(WE(aI>RK(*K}1I53uh+7d}^iap!;gJ2KeM0>U})f$oJn@=^6f&>bXzpX#LK +tG79)t|~W0{fEmI(;5?Z@;)R+TR?WKr8io4PV&cOFm9$KTXg_ +!6zT7pUO!S7|RFEjMgV#3j%mrKJap6XUSJn=3%fDCsa}?-lHEre!T>t_U3RKjcndNS&_Iv(W~3xXKOsSvj!Ja6Qc5hbE0EAQGUJWxZI8rT3-0BoYe8^nWbBT43SpWCOe#OphsqSV^+3<9%eoviQSTy|XkIw0vS8E@T7r`df1Bh7wgptQrgDUMkSLoz2FR=aE5!mpBZA-Wk&AA2kO60d+1 +_q>*d=1MK2F=z1|G6hEN;XHnmf=+oozS%-uJ)Y}o)`Pw(51v&_V%7EUkBh8mu(tP;*3B>aGoYWt5xYL +1(s&d11(CVfL)aIYVRJ}qlk{Y=pRFpubp5hT*O!oXKG#2aReT<;5572_ZPWEW;(Rz=4}BZ2|0fq_;px +GTDinG*ZC^-#H(U7-Q@;1aATq-4Fu|m_7_|9>9m1V9`TD`B<^NziBnFAB1$dypXhi^kjc~HytDp&5PHCa +UWifopaOb+`3-pQ8R0Y(sUE|H2>7I=T3P( +5!flQa0Pp+@ZQw-_A$~KIg+{7?dV!Poagft6QcuRDlmMK2_)3*+g*U8DZme*5hs%u?u^K|5%;%AxhCO +Ox)R~T^r&v51@lz>FHD#}q0@IVHUAC?|8T<}vGChQ-=iTzfgVN@wEtrWOdte-qZo=H3{E3BK{6CgFds +H!te9176Y2nMa6tW6cq9o>@WB}Xj{s`o|0Km$QTn*65sLxDQCs$6g)Sfh2LqEyfK?Tpuksl<3R?a_=?Mqxr8K}dF!h%JHJuewfdj=RV@$xH=7lnw6Gj3q0ZBo61^_7ubmM@a9RkiQ+ +prk;YcRhB!*K&c_bV83$obM+E8D%bCc_<7`dHulk4FIx|8p?>)-$-Gn)Df~-0#$1e(3UNF~e(MqgL@x +f^f|nGHTFd(I7_6f@!_Ozda-9=TrKVGx}a~2Kr40|7g0@`60i9*pA+WW8T(>!Vxv5%GuZNvvAR%i+$| +Jyw9E1&@gF1!dHzs?u{oGsY2#v@eE2S*c(=qJJRUuN{RBZri*m;E+NefF|?PIXKW;Pa(8IWlwbC634P +gk;YR}5_m}GlZeDF}5C-a6%H1g%Q@b8@J=!-i5yYtm)mK$gZe!-CA#Xo9ZQ_uUHpP>!ogU2n#5MP$G~ +HA6%pxVn@nIg}D_f5H=Xh%!@2WydcOsg7c&7tB?V^(D&*xYW{AITum}Ad@BFHR7-p(eh}r9n4`mhoQFA8!e8+t@&b!?xVECvrfoiUJ9$c|iz5qMzWDc3(nqqhEQOcs(kn0Ymh8}Ek5 +s8dGsF=eo1QT`NG0G-K&pN(X1n3Zn-{{`AAClLPYxQv0|}HaSBVQ +zQ=_FD_4&C}lOd8`;TC-uEDou9AP>nRd}2r|@iI%_Nyae+?HYPTxups}n=NQ>dVIq2t~UuZmd`>+#4B +FD!Q-*LXRI1V?>o2ZBz3Gr4wD-}^W@Ood}9nTl0TNT=Z+10ru3mSFvZEZmnBer1aIU+v(lHR3;R{Noe +=ju9(NVkAzZII}_}gRgXwCSZE~L1Q>h;so_!0f0zB!+i#Plt3~a2$iNFi_d&=L`=6ias)6!8vYMOd^Ge9 +YY+uTIz^Dt&4UW<(Cu)7fY;<_mis$?cE^<7CFev-97Cf!P{Mxf%?#gj``+{WUV)m{*&R69$KOPZl=H_9ro_=y_yh;+4dpeum2+ +T&S{^f`DoFu*EI#QgpLQ6tm^8PDb8eF=n=s2mhcXlCPB^WQ}UbH?m9qI?O&`SZK{$&a5m~lAk;om7Tb+bkguxxkw`T-g(wI9sm9p3I?A|ci)(%DXzZm2DjGQ9R|Ie}e-|yrn{Qid>e2?KcLQphHV>C&@G)XcjiP0-AGAKgO3~Hdfh)`Pt6W=05RorDSN-gk_HW_)Qxm{{0pB3y +_SJ4p{RZEX4I6(bcT-Nr!lIuc{L7sDy3iBxl|&%d7o?P4r@?aI0uHW?B|hG6&c?rJU#M3oAkk;8^rZ8EvU8&Zv#c?9{1|%4RwB+9Y@kQja +;syuMu{){i0|MN6m<ycMd;VLO`jJZUNj%Pez5lRFU9f-dQjgsAz6PoPPZscOE5>W=o`(z^pNzNYeNtKg9_jp-m +>nf5PzhVZOcwjBl8G3eF`2$Lwf9ZBSSVRz9#pQX)2Rug8rnRMt)&C+w1Hn??hv%%>FZwnD$0soH#)d`?;S+f~OE? +|6yghvJN)-{cQ~HV(7oUjmxrO_s^wqOy&=xhkl +A-&Z$O&dL3e750Y~G4BEKjCZkcW{M6kJoPccHeEqUK(>B&w8XI<56^xi7f^N5m3bLlF`}o+L!*!nqaCa|ACE7jWq4+$hpY7rxB$U?CH$hQ_ +<$n!|MepVa4@ie`Lz1U+H9oA`u%A +pUf%$H2VYBJSv<%D6x3da;L(sA6+UAi5Ui8fujKKuTz{I)$rAQd3J}jys8z#Xpn2lP|5#FYVu?z&_MZV!40T2(gaI7)<#{`37C2xR_||rkZ!IAt13FR)_*V+m@FstWZxucaU?;%Bt%sS|L^shbTOcmR+|e6yI4(hC#$S^A(ZCsv{w?s=fE9~af~pNndKu +!!~gK=~9vms4z`6_Rw>EkngJkb@x#L6(9Ok{KV?cp8Qx9)GX&<@Q;O!%)M(cH!ph#P9D?e$bZl6T}(l +1#ShlI@@GIeqLrs_)jt+&dG3%2~zFPk4UZ;VkLM*Xoa0!AWZmfLHz@?q3D;X^c1JjYw>RIpZUQwRZ+m +{kn4V(ul{Fo?O${oL*I{yed#vFUH?eku;e0`NJOJg1%pYiLo;}$eHu04c@nF*F7B1YDv&!z3GbVO8WH +!|^aUp5`kG!Q^<@{tKD(>6jOpBI#*a7~H^+U2p5&+V;ClG|RIh9WU20`+F<0XqwNo_efI5du>n$-E=6 +n9YH8>OTPxv&pdk0DZk~Z0opaHWNS5IPaa)*)mzN4LTj4U^tvBwU_``|g1 +>aRL(SzZtrCQfOPvu!fAdkBaQ4u}fc34qO>LCxK(g@q;pG{*?_ciD+WkXnD*sMz%SxEZ5=x=c2z+5z9Dn8WFVD5Jj9Kcp+-yd;Nu+UICQr_C^<3V` +L<8;M~lD;j9doS?H?>uSc8dGMca?80~ke;suD<36Vx~EybbN4SF3Ke80de++>vrEF&;WD+i-6i0<+hIU^1f@|kC4Ik&R=9p*hx>vueg-SSGq8m~Rm%v1vZYHh`*3XA55JVpOJQjX8}tRnQ1S`=n(po9e)(RY^J{6-vd>~P-;|XP1C97ew>QCF4RB?lGfrA`yN_3r{5f=J3MfAzJ(+i+=`$xi{ZJ@$Pg87pKAU0v8Y`SvnUcwQCsU;{IYoT9U8@}yO%b0`30B8-IG&l^LpD1SB3|u*Z25zL%AWF4pK`8Ftq{5=n?yO +1j!KnL8cEXi2Ft2kcllO5NW_DdADqp;d=9#y2B!LN@vS|b&JnGYnB*F=eN{9Q)o!MZ@YGfC^b@{o*f)z=yYIzeD<6sIz&#x%%-`pHY5VEl$i=}oo{7y +PJx!XlJ@rozFYPmeG%PBMWX=O@+aP^6s8?Xnf$L9$j^t#Jx_4$&jX@sGCbfgFS3^#*u6d7gZ;MI0^(-XibQDkyF?Znj<;q6MPWu{s>rdngiick|JQUB3`g`=(CU|q +~SXW+;w!$Zvt@)+qObXhBvv`}z+0qULywzo4u&*2j2%Mo?`Xc{qlEvTvih*WM25>#4LGSO1i0e6l8Y& +I|65l$2ncq8hNTBvPeidJnU0)oCl@a{>AmOJK7ghQCvu?H;c;9m89LSwtgXVnV+7z?DpCmQ}k!UmVJ) +Y5_bkPW2c|P#I^1l%MS0GETiRzb$MBr?&s^r=};!tWP%6zDN_4hP|vO+2`Mj*=IjH|>g4&h*?^3_1!HXIa +$^O!#|k9)W#Z(aABO1YNIBe0CX+uQ33Nd`Z*jty3j~MW*x$#7ZLELV}3Vwu%gEiDz&*pK74ccXeK +G{gBX2$!>1mrpGQ{643g0pT2$4z-5+)&pso-wrNacyH~|TYEFMY>hJoZpzj-_-n^5i6rm@MC)^6JZVB +!Mvg}H|ydtJ;aPEF6=-V{kjh_7^@zkma$zp9@}g>kAeI4+p0XGW)9L)9rqzi+AV9kg8`>u`Ic3zCxc$ +iEL-b_>hK`5}(3(N6Aho?c`2kqV9SxRJbQQv@efzf}fc{d7}`gJs&%t%UHov^u9%|d8o2vZ|>NmYhM# +q27OV7i++Rs$zE9ZdogleVCBuXdZpXqN9yHV{bOllIri6P&!aDsZzqmg{5@(wKC=@L;{0=*sDB;3IXw +4NHHG(RqJ5h$Cr(tkIFE>q<}Q*`LCGi+@&P$K3X-#V%h&f$q~PhBi>gg|R&^#+-il1(XRm&gMSr<@&- +nbb{3!J*qCR(^7;bnLb3ouIyDZcy#z{8d;Qy`U2G((7b=gv6n^#^(CWb0b@* +MDIGn#(Tj+UCR`>?>wX#;|G@y3>o{n+3E4ZMumBMq0p(8@x%DcnX6Jbe4M|nv)Lr}D~m$b4}0A>#MAj +Mdvr9Om%*tskBJ=WUyR-;w`Tm0Be41d^W6w{kAMgbH~DH@ +1!;22z5r6-G3XaYfm7ti@@^zR#dS|844B}Gp(_lofD8J3@NN3D0AnvJdXqUQr4uyxxspVJ{? +(i*0|+FwY09mzOCX?5v2xjT6E^*1mI0%HS`3lE51saTi7#*^CV#Avd>ee`w(VN}~z8O##>#=`JK +{uo6>T4B%XLsdqB|>mTBB9e>VB{85wXvDkMFw~vkM`Z*)vW&Z37<-%99bhHe5lYMX!Ddf#c1qKEw@{Q +nbxtZ{r*wbe9`-$X{D*+~?yHi;3QTi4p?aWVf%NJK;Mg6sB*;+bofB9^T!dR0qBK(snzaCr$V}ynIF% +0s07D-XH1Qf38XkO%far)TLtDygUy))QO^6i!+Tr$Cvhg@dVmS6c_6r9+otvO=EBmUqk;&azehK)b(& +7!{Oa5B;^)&UBw`3MUIv_zn)!sMGld+UukS~igWw2Pt|UYy}cGoUiVJ3^x@~#Q_tmOOLDOh~0@MidO8BELtJ1dz8j{ARcKaSi$WQwjOt3*rO%q7cnj#>MTV=^Wx8wT^yADZY9arU +rNW1PCl3NS`O#K5Im6g<2cl93-}+r?t5qmy;>@s3~Nhr$ty8`hKAyl|N=Zo8JCThWjAS+F_-YRq~xzG +4wqZ{K5pkde1u3B!4x-Fuy1nd={z#_Dc@P%Zh&tKj2xmKjrt{nQhJ+bx#`>=z`pQC-$xlmpB+wxT?c9 +(jF@w=DFy1UQ!&30i#L*?Va;JK;}Z7_NPg@W?UHJlS)Gs^LovbY)zxty>EfUu)1tLg3EB=f0EA%WjWF#=Rsf^yL`ElO`(z$dEo^c^ +cB#d3_Vvsd;E*@x9bI>jZ +_HJ_3B=itMNE?oyg1Y6uyeL(a32M6TSE4%%=76@??|VW%&?A2b95*#aI2~3J4YL?=JRd_<_~Q7JkyR+ +*hVY9op(?lkh0j-JRd@MRa;H_|`lH{jGcIr)kcAvJ3{q24W9Kt3nwec~gPJ>M#E=1~GdsW2Ei!-Hy=a{aXzmkez +6L!c6UwBRGD;joNNCEHtUG>q7P--^M_T$PIwePtae3^Oatd|d%_K>Y+v|9ayuaP-d`{(ww4jNlZ4QWS|W1V!Wc$Agk9j^GSvH-a%BT!Z6) +Q_vU~X#}Qzn|vb50Hx?6UE$_qnH>h?BUivHasU;@X4H$v0QLa1Y-YgJ*4ewVBnANnhA;x)gwB6uGMED +S0ia5q|0mu|f)g5yYpkcS;#j;PXP$zZT)Nft^3DFC*v2AP4i;0Orzx$QVvwmcE89fxGJ&s4H$;eI{FHs+w(<54uuQu5tM8o7RT8>3kB7ypLdx*zVC}sS7 +6DrhuRu`;ZUoZP-3Z=<4rjFFDxa}MLjm~!^R8T#y2yZ-gelNqS5fv$4iPan={sTshy!AZeRe3F&op2v +2RCWz^1jP<>{HAhMaEJ!4+Lu=wSID#@ja{*uB>$$f2eyA-k?Zpv_>*a~D| +W^ty?9p$`F5nr>O~A%0cYiTvEi=VPcH=8J^ZigKc&F!eu;J;_7(r&PVDIdofLL+G>+*Dae1qfulMfo7 +~oj=*{$%~y3hwJC!hGDC`v1LEFG>-x@ff5J1_nG#nQV9`I|@v_?~|%PYN1r6S@9883Q)$=KaL%C-~Mz +hB)i{csX^rO$bx3QzQMNE2z^KACX|0c@aD%aKKulpGSGTjJG|HnfK%C_Ocbcc(hKH=fjH^ij%sxj4Op +_LoPQ^mc2^6wf!NvrH9x)Gu45-Rkn{WA~K7@{<&kNtSvZ2y3RX&-%xIFIjP=MT=8O{C<1!UqO+`@ba3 +UbqNe8Z;&RFzP36H5j~2|IvvbakW?|im`LLIyYUeW#Q&9rkWnz*_A@taxGOA7d0;999&&D$&&w1ja3% +160SFK}~Bow_H|lESVx8Gl_1Ez@SB$*x;S~Pn`X#6*<{ +*mz(QhvI+E{6eu`?_G1K`A-#EqENwBF6QCmzvZCZIL0~Ug@pWpYvoPcX#^VP +Lb=h*EZP7NxLKm6^{4s!pKCwvmkfj)PcoE-E$o;zvyp7DLLjqq#ZEiSC{B|XSDpPl?96Ce=&-J8Y_Z? +5lsuDmsqv55E=1H#YRKHD%y8dzOy^j#dr2ulIQ&$!}|nty34ui#vYidfIc(c!w;8Ge|qyXq5bK-A +0ZF=XFsH=&3caDuH{7`L2&{7~Wi4?^1BjbT_9%7uv{zJp`3>&sO<9$rSR +^;nFGRrd!F%G3u>-qR;%rAtLl}4pQu)*gmFsrS&3aeJ0Z#!&BqlXIN_mjopPmkc8LQw=l(*46<#!v<4 +pTNu?i}ovqheNd|@fvRyZp>(=6|JNY7lyiE&Wrvi3jM6Gq +ub`l`iIAEW|xs6A|G=6j%rI*v*UTaF=P3_rpw%oU8Z*=%cHNJeRxcvyaoKePtoj9(wUfJb@@PvA(@OG +s%wLAQp+Or_LLkRwAd$uPoJLz?+mZVsdvnJE6Z7Uyjl3N_|-(WxBWoHQ)}H9jXywS!DY^E4zN>yK5(~ +D$taT*GOpesVv=RvLCbbDnuFw@XsbXOJ*Ekt%)7OW+%-=oXA>cH +~fxrq2Z<>y~HQD^Msf#UH~H`+D9Umn_cy%JOfy#uVflrOTs>`^T-whxJ#W5vNLju_Q`jI84$%q169mK +VQJ=zuw39a0aGmPzpyG3I+1582zzpAccX*Its3wz)lNN=9=)F!<{t~Bvix;r>%e4zn!f_(weI~s6FAf8`<9c7#4E{1}B6^sM +`N28#vb4Ak?h1c@{CQ3Nauv=dP**qLr@e-R2aQ<7@Kv@FM-TnMkc;DYm6g9qPSNbr|Gx^?6!uw8Oyul +Fm7s_P$VC;pNhod>ENTf~nIN*!EWua-E% +?;%DTp()0@BxohV+xYZWDNhuTJ)-$wEO8S}@!W26CEnXqS=f1EIQasoFETrE2B)q&-)<+Eq(28$ZH6z%P^)EnepEY9%h5*p5qJWTK66m`!t-o_fw +p{TiM?l9S@_j`7Fov)WGay!Jw>*Z0$keCc7J?b)Xc4US=T~KXj6?A^gWF#J5)y%}pOXK*bzQWOdMq}q +HDPyA-uSrQ5W$!@;m41i8=*{+_drJBt=?luHeO2pr2Sy*6>wLKjSs>AGgH*Db%~ghrmML##_mxkh1d( +RLU4$lX@u;ygpQ@(!Ygg{&(J%dRX-z~UxaVybDxnoNOc$)-)jQU3jH_Sl#SbdTa*v7(yS0@2=}uFp*H +;N4Wi(V}FgW%5$UrsNGl&c}3!{${$UkTfIqj0a9;Sn{$Ls$`-J3Nzifu`w@BE5>=YB2bq0Rb%c?KaSi +8tm!2qXqE{Q3*Z%&N+)%u`i;`nzV=?3{=!g#Zxy}V@%hFK +Us`E^Vao6_C+0inrt|1lDwY2OyV1W9_2yJ6q{D!s^PI3Sm;KMjp +YEEijjDi!w+FF_itvVB$Z(f7)XrYs+b`D|&s@hjSGYi&qqB=qFmPUj%}SSAd$PSEo&>K{t!ABHy^37X +d%bo@9M7Z1H&OU_;#k9jciq6nhHH8r@ofh{qZ98ISgtmhiRzX~ngN@bq_h8Vh!nC|w6~~xwSGC#imrN +lHQGqRa@XDky&4IoU!xrhtwBpA!^zV +W7zt^IkZ*kQ=fODfAE{g_fC>c;LeL=L4;?fu>5rzvz0P=pG=8810b!d-@3SMfqR*6ThJY1ymVdJTk&- +23+%<%rGxzF4Njb@o}_&YH_>oMoJLhBl}Dyy@ENZ!i|^5z}Jl%{8e +>&(RFBPoY2?bs|(>qbQgBjqPi3cd_)LIN><;Af%5w6lu%f4k&Vwyb?-M_BZKSpThrf_WDH!}(9<$Fql +j@w9IR(nm +do0gwDN7N>CoWxVvao +~-9^jZ*d^jcE-6TulDb>h)_i9e#~PbUWX{Np&?z>glwUf$B5Lvi$DAa%?!VaE^<{Ff!6NFD8I`tf^^w +9cWy{(EyJPvv?Fba-hW`Jax+2a(|!?8h-{(c1*?D{dTP*1F?AMYmx^MKpGr`&}Eu;O-3pgaJN>&d1^W +Yab&y6vW%n@3;7`9>v?&$M|A3Hw6xA2K(`h;~(Go+l_(a&PQqYH!C|8LVJy0h~bIFo%rxyz>p~M#Z{L +kZ9zeN8$FmcnJt~Gy$U4Ou*JThnCVDE)onMjA5H$spiU21Pni(V4}@gD +Ax?VqyX+`C_0-wXtx$cm<}&(jLZnur^n0Jn@;gSMJR5;w^q&E(BBDRja%U@6B!Zy~%eX$o3v~%)#yj+ +TMfB11slK^OcXSldbB$-8$O*#u8^Z54C>3FRtY#HE9|wm@(=_fX&e0OWuFUO23X0aRvdk_k4=TsgiA< +q$VljUJsYbd8<+pVqdMn(J_Zsndot`CX%(9b!nip9V1=ymT-yVa#jLmhcbBsAjkm`hZVhhek +>fd@WJF&VX$XxK>6teT6QuJCepNw0HcIpaPZ3XM6YIqd6 +%_#~WOwoR$`08pyKS$p=5HqtMYJ=UC}4S{nSk&?Gtu?g#4X!=d>bQ&y}KM+*P?6)-nTz4@P|LHqglv1 +-mN%B_7-C^(4EoEy07j$K&|yAe14&b!gxW%!>aU|Rjg<1EYEaUY3Eplw-t-MvFul)p}S$-MI@AjowfN +p8H1ZpYM}jMM|gY_udH}n@U}C%Fc1pEwf7Y;^M1hvFgP}dzR_tNrkRg13O-yR$T4u`PsC&Zy7w26 ++Yd~#IfGp&En5`U%;_xFgA-li#c$fv{CUrv(7{#wn#pCWiBa{dD}Oa|N)Q80WVec!a!$J_*4r0z-7xA##Cp|p(IKYuxQOn;+TV+_OnM5t6{p*fzpDEr7IB0VKr}yIOVv)1( +6ola+_PE;EPn*4%RHgMtoFIG%s17V;y3>8saNTq7+W)lAYVdfn>#=?CrG>o6Mw4+y*)E;zmjoJUkR9; +0`p!!Gj7M&+w{7`iV^=h;l}gI5w#+0v+qUdo;&^EgdBWh~Tzk`<>lP%th}m +7b1A1x2CWAjR=+TF2ZeIoyZ?CF-Bch!pM6@HJH|u831b>~`RC>LwbG$P8D?cL~GN>`&)xq@OU*l8-*N~6tFR4DnUZ8$avj8p +fjkA2I>TYmTj44-17NQ=)(p(TzukGOz22sKANuFhgm(N*PX!G2v5~_|`uy196Bq5j6#qaPDRuwBM`^n +L=#P{A!N2os%%;?Ro_W-9Kb1%Ro0t0SEbMPy=8v`HPnGzGflc&DPeP8fs*E^#fK+-^Vta9gdX#uG_|ffy;*U=qdCY(x@f +e>x_ZT})+0swG(_T!{9}95wBl?7>qfjKWqftqqN7wUm>!j98vW%V*m4w#FXVUKE7W(+h5#_A4bi6_Yc< +FiZG|_esLl`-1jcVEPSbtWk8sp?!I&=mCgGtSehSvx#bwTl%e!7L4L>1Z@SULX5>o)cE{H4f>6cR_u; +2rM4|JYr{@99vhnpe4)}Ys?DTRg>IH1>5WT!rNjsy3(A)?|;{kz)MMW@J3v(B|^VN3EUvy{3?u&yEl) +EjP!Vb0vvJg4Z=jU06@3XS+7?L0MSjWh~>1+h)jlsR^y8+z*Nd@9r-xL?uZ>N`S@!(4XtI~hvG^WqwE +fkrnMhsTRsy2CP=M~^{g9$5Lse@)TyZIvNxjU3gG4eIRbejdyu*A|kW1Yq{H%ba5ncgEDXD=2ms_mD6 +ba&XATc0+5t5ISmim21sM5p)@phhvMQ+OJnM!w{b!Y4sSg{L#p3~4q2^q +6M?bDDp% +mXy;nZ+P)M3n^fW5Y$#D~@ZTRO7^6O~_FuKXfpxz>*Ugf$?V7Hrel=k5zjxW+1`hu2CI1pmkC+Mi&`> +7CXC5It&f$np4EFxNBswO?vh>iJ-pkJmB@B?NK!))YGcjVb|xAYTk2< +1EE{;Kbi90z(yd?0H6nUgytqVUmr`I2%Se{!~Q_*ip-9&>VOdfautS(+WIA*heC6F%w!3O!Kw5SGTDW +5B<(x8uYIH>fhj07ES1ORUc8{4Tkg-@FTwd30@s`~(y{?bBvo%GkYgFFNN +78$Ido|3{q*sX;?h80wNiz>!9($6h38YPzr40tnRu%Zqhc=wp)6+m`lvkE~=i8V_T|jAiIsSHegzlxA +%sQAGWc0%d^F|1e!`M?tTVci4elVDYk0dpNbACSy+~2W|0sabaMR_)i5^NEdJuNcab%@q=3DL9g&>nC +L{2XtYOOSVsJDcxDy_1g;^Uz0sy%tHqCeRsgp#4#gyP*%uDd4hFSiWQ%(aPjG@8yMEk-h3UX~0uI+ +0E>0Z>zg$jAp<63&uI(0?{HZr}tpcn(~}78pG+-g1&VwxQN^~GDV)!Ij_UO!+7567J89SHcSu~H``05 +Mlu{0-jr{5|K;?gr%YBEw9x+mZoduI{olgf|Gn$~8G!%pwf_={Ve;?_+X-NFWJ4V@bvumhMDl=5`Y>_ +X>0$P9Ov#TlAvpUH!tItJmzi&fVd!C8@_2BIa?v`_S{*`$3@kKV-zBy5x)rz5pgoW^JpKt}jRDxvfGd@&8JoYQ7>NM +cUL_9(lML3J}kDx#oy~il>{|$tJ>%tRB(@nj}%QLsHzuWyPJ(JD24*P=SWoN#(Wm-={KL%_|GMGs1~C +6{4OJr;`Y0x35v_lOp#JDr-DvF0Ze5p1v9fA5!&nmUVYsY^g3&c0Iq~4S3B{oZ8EHi8HIw)?*m4fE3E +V%w$a{=d?bdZ`&5pjmw_5&fBOuQAkuir*oXG>^%i4fj{ToEj=xyj3wkrq2ChN!M^*tb2yY-E;R|AVwl +vNhm +ofV2m^H{g6Mt0n2~PY>_TiuTbqYIdKGVa+?B^z-pCpv{o%&($NAoMdk5r54rN${`Pj0c=!$BTDRhu~voKl_-%e{vy@n;nZF4t$P2*iV0Y+{dgX^cjxGkJUt +f83e_<$5vVY7JjUxXd>;k|8uy`KOKy?@IPou4;&T0nrD1MMj-LA@9t~tU)J%!zs+Xz0k!!HsQvcXXC=Uoi2hGk0(@J_pH}iu99rmdJco1Acb +6;7R=O_F(u$#`$lfoiu8*pEaf5BJ(AT=0m;264#hegcY0OXB8ITu3lA`Q2ItYH95p8~fqItcqdORezI +$sBW)Xqt7CjRMZ_k>3nbp|@B5*l&yqHaA9N03OJd!M7g2+vvjsSMR4U +fn+0+naPNY?4@UBe-ksMS0As~6|<@57;w7#1e;s*1G)Qs3tNhG7R>97U}s%5PpEtW$&R|-(qy@=4dKA +RDnD_=({|2FC#L~>xPyk?t$(rjs@-c>Xk6@hLkWMVp7FJ5@x!K_#$slar1ImvdBxp4xw2C? +|8#*H!j-gyJfe0^}0fmdWG69$KRxGWU9GiIP66XKfojZ4q+IYPdCA>^1aZi-XjoFjRLhhnZ`d(CQDmOCx2$V +X0^Q5OYvhZUK_t>aLeiLzVF^e2#sO{fw8A*>`tqG?F%wWR#k562h=>ETx>xTQr8Vw!%`NKhA65Pl0L* +NhyZ*j!pnn@;{>4jv1~I=s{X2-EaT28v3L%k0xki%sPsbJxr!V~IiSCfFZ`$-{x&Zqe)KSS1Pj$@Yr5 +}OUPUL=`p0)F!eP2gDtxPID#?X)M>L;&iCuv`Tu2T5pbe$f=>dP7AVe9oPh6Y7P`}TmS=s>`oeUYCjy +!~bS>vtxWqX%~2=^>}1j|AWYV9w +$!W-R&`)-ZFGwS5G_XBeTxk6Xqrb4qjtU{S7!?qCFsg8OBP2h`Jz)jiF=2Q~=So;@3d_}i+c+v-=Z<` +3t-5`ml2)J`*|L8L%RT4vPK)kI`;sqN$s*lL|Y3}=IWymk3md(qncQoQ*3sW3i^>=l7_|8QKY{6M1Ix +pw$%Ah54I%f#Bm(Pm=pOZHdTw}V$ZCNt&81Z3)06P(D6`G`OR}iUEGO}O_3+6*UV{Iz!7=CLyH +9moKLeMq&;28^kQ9xQ2c|$I +iXBM@@K1Yx33lwy(8o>Xh(b(`(K8e}@&KTZ*yZQ|Qin?kweygllb4kGa2@m@CK+^0!0jv{h7UpYPI`{ +8#{8JNCO(O@JB}eAnzP>r`4xE~KOQM)eoV2U*8EIZnTRCd +UwcR<{sXU8@KKTzrDW+I;mheMt7m+I%s(Fj_J-;gD~&EBP#9^8ceh%Cl|EcTBBaeuv^|L&;8v*w?W#Z +Dgn7+wAGPpIND+ZWj+(VHD3d2uW_3;ec$|i$?qTTQn +RIUqgRw9w{P}hMi2R_eiC)K2htq)OtWzcB-bQ?X5pGo4rA!j;16)Le|OLQ4%>f+jHGkNX#cqz#dl&v? +J#Jp3Zf`PWS0i}V@d>Ya4Q6^iVoybME(s +w#kQ2KTM+Vb`1b0tWNGl}XlJc2Ia<+BShVa+z((nfurcIb0c#ea=B^ivrA!gni)5<}6L|uO*Q*1~4H? +>pHa@-|iT_?!qoVsy_GT#K{Xu&{|J(}?VXuNN(Ng~%yBOg2nfmWLjy&^alz_3h&4(cScE<=@+g=m9zK +!av3{Vq!;er5~0^_eHl|=dqd+!LnScP)Ysat6(dSRfV@P-)Q_RA8IlhJXqv*F&>^EkH^jWwOI^RE%7c +;xO@nh%@LLU|5x-Wy~cm=Ss&34#I#$Tq90$bIa*p8RLNh240`SO&ZHInDx<6|BkoDHOHI5gdG@;mEkv +AYu1h!ix*2KfK3PG0{O5D**aiRes%MuAk$IbZ}J(8G6X<>o +Er6`rb$6Epx6g6ufYA`xlC?xw;!u?@%FuThI1&dtymPLwzP~+ +VRz_cd#47K3FHqrq8*|F{_X*=znqL3BL-JGzZQFoIw-L{bF2e?o|#7Db1R8+1%n;rPdo8a@ +gPnm(v&{0X3r;!l)ydI)&2^yktf>M-QqE0+(0KDw|+4MiN|S3840rpc1S2`9pi0_Sjpi;wfsLs9fArE +i=blVnFc_Q%e9r`v~DYko+Eciv8XJPp#%2-?od_x0~37fFB8LaEObT6WOt!}K6Jn#*{0G^LMdDC8)j5 +cIE#BKC9?MYrJhj;@+C)sURB#-6$2rW{4lRzv&#Y0Q@&qV9peEJc1dpxH~2^t*!Ss6>EorHCTY0pCOEj(iU{%&6#dcMYATCCH?;B3uNo +d^hj`oG6|5{N8s7T7e)Md~XWC|)ltlx>01fy(ucRrgCmxz~v=&_Y4e`XKb47oRa4&Vsl54KDVQ%XMdy +b8^KI+4o;Rm-eC+gRRm{|cfo$z8XJRiMr29C(!%zp{o_-0tmx(t8d*x^+T{3HuOV~gVJ_rBxYY_<%~- +kF-Ya=9f}A{+J8{s#55d_DOC5Or~}a4CfIr3=sgN!r|cslaV5?v#n6SDY**Do0AJ6?Q0A!xB1sUJ4S> +d|!isP=LHi=vJh;=;XroAIm}nf(v8s6#-p?--x`BTK*a>kjKI>%YC +W+Oo0xPMPtUNgddK|mdS3W2Ccjbt#k<)kb7!EtH-_v<`Wj!>G=4PN@J;Ihx67ko^fS_i}H9& +!1`A}o0k?v59nBTlADmfg@qJ;%G!;;TYh2awo590v$GT`}O|KzpD?Uo^N0ZLHK?8LLaMt)XGe3M3x3% +(R$*z@z^)FlNKuH&qAr-&5E61oAn*Ku8PAy{Pi=++y-q)pSPzzFq6n;syd;3=QugX|%*<*eRhATJ(Eh&sp+#A5%Q&E?@ +s&~G8BPZSfF)VA~I|v7?g%@5&dJQf>6XBS{8@%eSmGq!Y7f|vJAbY(a)Zibr)gqGkV9*?@Z80KYT0#BwBQR2Dn#-tP)>gEoA-Q)v}8b_!;r +>R1g7o8saO1Hb6@ORJDT1R31d7X^UOTifmwkA1}|$+ymnp7KXAv`poa+vEZm=WU5BkLKfNO_6mr(-7{67Z-djI{^uJQ-e{5LN14Hp0M5`USS5PeeZsr;zFbNW +zA#GjOeogVDS0%wO*;uz?Qj{F0X{wXrO-QXd;13dyRu*Bne;bPOvCAB?VFPqCwhPnne1== +l>ZtXzk%JyK*HdY)#Qy0_~InNd__ACwp)M8P+GWhOYTO6LlhSLFU50Y$8dcgWT0E +Z`)o0J;Edt+oF$q$Vy-PtXi@;vQ*3RX^9uzhiL0JYg^s477IE|^4DqC7V>4obm#9`Udp05*jF5jwOIJ +ypNaUV4Sk;te0^R3)Cq{y=?aLMP;--bR0FEXt*;;BRzP|fO!g-}^s&zO8`9u1FmqIEOB_$DWL~%^_(>hFJ0=Icio=>eSf=ch$Iz0}P>8?#yr}uc?Z0_aXTqw>#1jd706O=}qcxoea{@nu6@RVekQmsU0i>=SebE +y!(Zc`@h}1vi{o{Z89WCD9V)`9{RS*w@_3N%Q_KJDYke2|^UEj +xrR1MTJ}sg2=s)eCuv4m?jK#=7(o*6;kmG0#IdmK8&a-}QdW3&$C#mGXz#U``)`fnWPSnw8NhmX_)T@4P?<2ZHP8^F1i9oBTf-b{NPma +&f08K`s+#dk=H%H#S=;;|BuzYRtdW%^62e*i@)7MW +vys`Rn(t@`+APpToLS^l)Z@%Wr4Mp*Ub_f!=W^*}uQV(o#1Ex<91YHap<=!V76P~^S_{%sEF&aL-S3^um-AB)7>W +$3EJqB1T?lw@G>}zLz62qr5F5_2s19bw)uuAL2cVqhd7ANHKV=gauDdWiacIk;S_qBT@x8fLBKv%bUq +nES17N_d8FxQ90pB3XJJ)3@`@i402RJ_Dj`2=f%HCjb^BYG||ypuG-jxz(%Y}&H}QQ`;-CYMBMKK$}{ +OJrW+>zX!fSW>nkV?3S=fOQr$lHbn%3h@5i9{jyj0e%qF8`jGddc6^cBy;nj(7C&whgO%6O*3%%OT8k +!<~)Ne>nM3ipCXWS>cZE(vq@k(YC=7oZugcNuBUE&Y=iC~)M}n&nPS$KjYVkN;U5XzNsX5h$xuA6i-wRmg)8>&i%c?#+UGHc!>^#A|FXlJHnBWbOK-`nuqd1KxRY1z_*a(S_d7?5WSqN8m^8iBx+0;pHdO@3>dt> +QytziSxL;BL~U~gnqpJt1>gPa!F2rQ@0n^yX#N#h8B^Z)>^k+JE6<9lvcrk={pGh`v)+|7&X=Kp*vYS +{t+*}G#|e`!THbUe=`4hg7!Zx3P&KCAb&iT`M0k0+hNVWyvldZ&Ikz~@jxVkA^S%Nhp3+-8vO|nNIxF +TA1bwTpB;GeFWl!dj~0K9T=K*2`R6M94^HhUc1)TPhpZ8yKBpwn2jkFBD>3`DG2;V#b}T#W`I4O~{R+ +|WLm9b)6!kfJp^2m8Nqm6t5b}Q$bnX|0;v=Ay_G4q_+feGD$<+(%RhxUX44*iIBz=uSAQNS$wMb=M>c&*IFH|hTOkA1iN&#}|Lv;1TA +z`wKnWA(s4k>*yKbEt$p~goGv9#cPC3`~>U=LSZcvhrEl>A2kSC8hp%=K5=Zc5-?W59otcIg +=9NxM6lL=s-QevgO0$8+CilHr-VqU7p;aIXG*m`0vGEwuBbJ_fQLeFnmzWXxjWH`r2`LhyI+7m_{+Gs +%2(4u-l)hLEd-GyF_o4l4{_@%9CgR_V&XK4Y>nQN-?5mXwBHsyu*jIDmBva50e3NG1#nLw*zN!cw?R% +COsds1EatGF5SRFBY$j8i%+;QZzbm4A8Hm+4%eQwF|0`xxjKR|yE&s4nmQry5h8I^;!^%JS^?-S%+UC +m2HH$tCWz-Lfs%bV+uWu1L2O`@43t0UWl$7ym?50c4frTJy7x-SLa8=x-2aNH@~kbdg^|F`2F}w<&^e +4pn5a-bd7VLPQ|Ws7A=Ot8`aO8hTv`Wm*I#C&HVw?tp!!x9udI-xB4&^dM8XUY;CCz&sajM7mfqKoh~ +4;AwHA!1n_=k)>&(!P9^Dsw1=$LCwPJ>^#rdg_A48^Gi>_Y{W9)T$sbr#6;jt7*b1B@Rr#*yd)BTefg +>AnGxIXC_Gl73Xhl?6fjAbitW%JXyFJdjH5Ii=uUHe5}*X1q +Hko34&WW4{axYI-hF4Lr@{RlR$memf1(rCo>uv{aB@Q0JqR@wMA|0#qf8vg{a>uzE@hbe2_cM2C}W38 +{^I~b!`7KVd9JWbqDKW7CXlC#!9hwbA)p@2TCdE^(w{Bqd*HRnb9gVVT}8S>ce)0ibH +i~LmtwS*EB3lZA6N#th@@jZ31;A3Vl7y;DWN%GH|2r2O%CCCJiM?|#9`}t1+Eabb8VM6RcnpNw`dZO) +e^uMEMes)-yhifOutetB4qCDT>AizA&no0k;0G@v6o~(p1kGeDtE}sq?Sa-QdJQoY$yMm>e_lVobC&< +ohHv17`R1mr+5nN_{BZcmxk#&c>y%+N}i^N=nUujF6q9rN_axc48)dTc(xTv +d#X2NobdVbr)G8NC_%VDplxyX>=2A%6>u`j(rzh~*@l4|G+324guz5K`p^!EG;s}kETkoP9Iv%V6T-j +S=Q^ac_86o{-TuU*ULp9B-L)B22t;Xk0dc7)6plP3Oa-|d!$3ft81f8Tu1Tje;X9Cz~YI6T}jt)xl37mot;Z1}cZG0# +@ls)k08^r!H+ffV-2OvoLy$SKB;^EVTuzqMlSnejIfWZ#mTGXE1`gkIV+fkx;D}-CTvFw}lX4J_aL*) +$-QsL~%mI-PcQ5ehTAI2YQnBO$WF0-14%koyt2K-Hf{M}5f-;xySx{31R%h&%IKhdS}K+2(~J-=a35 +#vJBnAJ8!i$)|HmheUrnw=NYTPO9{GS&fDO?lqL?8@K{W91R1ohDc5_`8{k +)u~|q02FE8*cu0QBsQQadt`GYnvYo14@<}%?l+&=c$Yj^mK;T4P0Ha}Sw>?4S^E{NMWxa*zbY(<~-Ox +l-ULN;yU&(3Ey{vHAr6%~d&_llM=k8Y0q#XkGBdxA&UUbcM7{}%*(jL)6?&u6bhRa=>TRsdEy{=|?9? +gBsLx}6Xs$#ni6V=>VAnFry+JDLNZos6iesE0|o-?c@v@^`Jaaw^hR6pmJ%rL%)!CgVCbGsDEb`d)`X +?nmLP~p;@k}*HueALK@(BA9vlZ)1MHj3vVx~x?`IA@$4=RVgeFn^=sn$%viIi+4p!KW +jSZ9J{u-0d2Scs!0_$fEj~HLOaJiln9eQK$Z2cb-Ul&_>24*sJN +A7$u&uo2SU35np3`G%N*19$bO +cQwydaCxl?`24VE+T{prKsh~zRJObW8qz0={#bK8LFofyPBzv$-td2{(N6M^wyss!lxHCv5>># +8=#46l+DR*-YNHc#^TlrN{OjH&SyAmHM~XJUHHvYHCIxRCvF#t38p7zCOUhj=fRHFhLq2vG)|px2r$d +(lRuS3B03{%sFI8p?uFZ;CHG4dYpqFIsvn=m=QNG&d$LV3446gFMJ-OuJ+pTxiDQsQ$6`KAEC8E5ii8 +kda}9(R{`u)(+@mFM>J%%!evT8+zDa=}kI9k2496QBL$ay~z?M`a;#JGU?MdG5E^;t_##@?6ubI*6tq +WB%mcBdF2p%MIvM*%2;S5akLxo)|T_>)x2){E#=$0aZ37^s<7*fCnu`SdV5v01)LCoR~$}P*xtT%%4| +(dM1!F)Od9MH`&%j(YEdALaV0EPfKO|{>$yt2>7K6NM6*KP0LYZ1cm$5}<0DFkz|vWyp2q87yh!Ht+E +D1a|B}(RjNNNg>4^EM(yy%}*5ScJ-KyJw0#07gKA)%!o@_B2BDk@U$O^H51Z!Kw)z$FPrTOCZP? +(l3RSvM`tqBCY{B!G`J6;~2B2L@3;EhaSla_70z^4<;et@bY|}U$|zRo+r8l +(6bo1!MrNadFr)#BJZ8Uxl5WP8kprhS0Y8qo!}%&W(iWCw6ES@t9y^6Xy+#P^9I~tI0`J~jV@uRSSe#x@o}qP4QgfSak;&;ia+TVWOVej(7hr`kB +?-0oCjeaMc!U}9fpp`$EpcC?u};02s1(-{~2W;pNjauX!N5S{FG8y`u~?=Y5f<)QeD87_(QSu<+1cS!u!$qU7Ae(cM2bH=3p}t1+yZ5uSvTygf(rFAkr*ZMJ?ul-W +n6Q(nNEG~J$Twr>jhzVTSBt%}Yd6ygDgY*$bnd0P;Qh(>-teMd84#ypp_fE(|-}He7#;xV~LX;mq|V3EEZ5e%o+|Z*yveu5qT{QK_ +)OA70}9^K^lEWoZ$Yo5>dM%TwJ)GS4ghzBGO4FD +HNnRISPA{ycf$mqdP70Pf`A;9QOun$_&MpQtR~HktzxIL-EbwA}-nMXe))DSSkS({p9`&}D;AH(7^%Er2VWmZ9DAjQLZ|xW1*+CC6 +ghXrPw{3{+N;N3F(|5nF)uL2krP$lx&puZ;H#n&?P*-D4xmW=aN-nJGeBulnCz9zm#4d}VsFZAzBvUa +3oFgai6E70XM`NLj8p@otN9TNP*jT_cXeMq{Qr{oUdxVZTd?3eUvb_Qw}m(74TKloL-Y$ +C1VRWS5MSS*GG|wvs%>uHHqjkX6-x_YCR%7_jxoo`Tn7P+1rwADqvS*vva%>Zkh;^WduzqwyJh)HajD +D`REuK_=$MU&{-v9ut-Jbd;P;wK5d+6e?tH;(MQYD4fD$nM%TZ_ +*#R4-ywYhc`@`94}^~tgZo9Fr_kqx8@6MnfCH^!faiI~GKn5!L>z|@Va#}jmJpfo)J^js1%wQ3OTyxo +MjKc7>KJL1d?>-Ji;`qH1pIZFRQA?N>a@;0~g^j}}R#lO&&uN+3--~Lw3!gbS!9gO*Z1S*AlA|BN(|J +m!S>{fp4{By16|D{*@UJm*vSNY3I&@}$!a=;@~g?@ykDfn0<+$l_vA4`Jp#}F1KKJ}o#mSZ{esqw>fe +5XO_m+tiRqv3)a<(`K_EqJ7#sO;-1Aq4$9M-E<=LBHW*MRu%S?i?>eK04Isu@qWB2fac*%4Fc@0!Ql@ +esq$7AFGIr`gHc^M-Lj2A95`Fg^7=L%b_-w9Xi+W&hP$`i?Pe6M`HeyO3<%P5I>O~i*r}ty3zeN)+4p +%^oNQWKIyDB?U;8QXsyH_y#j}Xqqh^CYzlWU6gN8i+TJy){Er8J*Wfx9=%PUSL-C8Mio7{_f;UE<)&k ++1`DT0mjm&+cQGe)@px1-M>5dD3sT@_mHlZC$g`d*VU*F%~xjo>|?(gs19`I-P_h+~Ft<)6wt6Edqyi +i$WEMYCbE63Lp6PUlSnNKPqnq?WuEPwuUmIp+IHO@@&gmOW*)!|0k&bv$F&ujN{=9!*8ka +-uO^N8SOuoB>ODJBBX|&IoF+7hTFwsaqn+36H#mu+Y_d*0lF2FDu44M)dWh;{C21GeMdTLdIa2OaTQT14DYfK6 +6JBr9|_gchaSij=XTh>4^!m&yB%Fd>IFlUFLom7D9Xf@%?^0|s3#dI*(ueNq7@M)A*9UNR+F{+zJ22_z@HJAU#@ulP-<#J0DaD29TqNeb$trFG0<sz +l_zibl3(D2)81^$oKZ<%xN!DAgysz-93yFS%ifSes@SHvGvgv`j +*Pl!lzu524QU#(iKvA?78iI_+KyxQak?bJA4rBK)Roh^Pe934GaJLxF2hA5FA8)35nUK*$?|Duw=)INS +q$Q%YfL{&k*Q5G%ohb0@LQ(x~j}&g5DCqt+aJ@eJF6YadGYB +#DI>8I`cI17iTd5(^A!Pw?+jyi;9DMPiT&#*BD +#Gg;(bu;Zs4S&}!VHO;*I$Wf-I$ALJ_k_c2$z1>(YOku3bg#wYqj3Izr=HIPj@wTWc|t(qIE(qHnH`m +s&k*SC!QF=nCfOB`*?K(8t3Y`!>k@SS6v2+8d;`KA9)bdxJi5znjkhOl($}>0Ey2Ev4ctaO?Y{S1nY6 +2zJK5l@RyOafyBIO7pjI3!geQVkrR?LSz*e +8$y&G^CJLWdlD8ZMfO}S%jcA;431|3hJ?RlCjJJRyyA#>mAS9Wu3Q{V1;f0Q-m^Gkiyb4f?#*ywD~_s +92m&w`=azO1a_TD$@OD3yNb55NihwZQAX_9h}3AK;#zeCk{5b_^?3UjDyF3s0-q|Rg0(fI31h><|=J8Twb)J#?nz6l*GnqkA +dX+mRt}5SQ5!Y13ThzR(fjBL;R2xe^OiZ@8oCcbQDiu3*Abg%+n&=GUD&}$?YxS**iq;fZTdTzrsMsh +^^&s#-&Mrb+Li)MBVt5>7+j>9zV3xh1#coPRPN%^xhOH5r8@xCX2Cd;)2;7z+)bj1ixeJ%M7{jJT`zpW$J? +bU7+7hikyh&b>1@WxAV=+~p74<4kPIX5@Kz>vUbb4BwgJbXUVkYe%1UmtTpzZWHN&hQ=n)CioVfAkh| +7*DZdgy;4_AeX#cT7w1qlN+gh=IV_C;CK>HjNZKkP!WpF@W&#BKdW}>>fy_)W?B(k0n2bgAjPwRqxnY +>~ZL)Hv@)`Dv3QV#=&D_LVbi#kl!sRhig}*^l310FZwH3xQ)Q-m)uchU)XuscR+h9WvXKI)|R(>xC4ugbW +E{C>HCV@`jURJ3pHH%@kB+K?9LOZt_}PsPuHY0E}04fh+{zEBvzZ%^+Z(}4qwk`MffZ~dj3@6vucJBL +)=v23e`cTv&uPYh0$qY(te!r2zZ)~%I9ktv&nQ3XM{>l4&jGuiEcvb*rHPv&jFpSo3;98jMf^2B4mX` +pbcsY+*&|SVE6vpjr0e?O^`cYzdtDLg`d|dOJfR0Qr6|0O3(rOpT~JLcVq`Fhr0dX_z}o$cBj5dOkiVt$ +PRIFXz602UgYCmFX5hg6-}_9^ +AQ4QlakbrACu1&fMTIa2-Q%wlt0V75N)~?z_FANdX$2H~zCl$$3c? +7bH1jnZ%N*{#W{@b=#~Uff}mt;f3j0;bmC!}R6%_>0}TiU<3nk5Jo5jK}Z<- +Wri8XkG7CK_B{ZU_~;7|AI)#<2w4w7*7$&K@Y65}{<=SN4{UMhSews3jgVP>#I`$*qRA1Y?`C#IA8Ri^5qIWd@NW5mB<$dtL=K}knKlD*D01vo7`Y@?O6CvBV!ydR7pXmByH-RHZ%6n +%YWPD_|4`T}Q=-2`OQh|(*53qs0ulCEdeBEc?d8tSK_G^%qz9S*PezGCDq<0?Nr-D6Pi2i6%bl +uc}^gB&~%a=T_bmVytNy_>` +n)D@$`M`!I-bgMBqUV5?%BvUP%n6?aS7(`mYfd2dLdInUlZVkvvu@Qnd+wuBDap;BSZ)+iphy<+g`#xmq+7sIY%lS +Jr?V&4SF;Uu>Ly7Vs8PoB-SlH&z#G6Q^EkxYL>ynBLhTV +O`czu$y+>fqC=mv<$#u0-K`u*ZUPunaIb;u!6ZtJ3p5_s_*TmSCX2;n +d>H+^V{lK%e)ObU<$Z56lbBmLUW7Sdf}h)`ax^94{c~S+u~?!OJNL3>Ahg +3((f*k`0mxQCK#7deJh-ifGVQd~g#`&f!_&Q5($pPy%tz^l4y8O2l~ +t@~Fn(9jm@N$9PIsdR>^7|bOb|HvR$7ZV?=B +xt=ZY9dbURJZtIlVvwf2Q?JKKh_QhR^2u&38mYi7%B;-7^PEi}zBQ*`x>hQ#g;)RjAk`ZCmp7M|>SYI +0a8Uv@iwUx(Ia?ck@`HWAjOj?uXPEFpd51aa%@JXmSGX=ZOH%r?I!)s7AHg)1N4<&I6IQ+D}^KyO7CC +W5)CCthboc1q;SH@?Q*2^xqPh=w|H1RrbxAiWH+=P%ipV4i+vlgJUUlUVAf`xC>Vz91$QbwYWl7`WAu +?=yYU|;Pp`_d}lWTOk2V0-UN1tBX4&69)xVWpZi%ir>&GeDUWH;na6T +aWGzXkIp|O5AFn!>;MQ@X{383Ha-S*k=zj9adSLp@x#o|Pxoq*FkQDN)7^hhoGzhn9YXb&K(tQCSw!> +^*i^m6ee!zir*>&C{1oe@yDF*|W)+DFGsi{M%QdT6m8eXmPBI(dBAx2@4XocYxdICM9=QQ|nU@=R_48 +t^G$X1V&Rx4+tQ(IwMbjvfQ*=hQgqa}gX|#4#^u3lUf4_tHa`7x$ilC##cYl2>vm00Gc=>aO@SpDay# +x5$UO%KONSMUHofn}5O5zkwg1`2T1`j526hP)jB7f%=Aa!(Sz&pc$j+(<`*&9Am1SI@#@HdW?$Z;qUA +LWw}_Q{15M;+z_$J4f*RNS;9Op~iIdwtOf~k{{d(qK>M_ooem8 +CnG*yEuVT$7(RI3PJp1#u;DM_i=Y1B3_7}&k8;X=D1S+hL~y%YnB;rzLJCG5=^O3b>+~=EP|VwZG>rg +yS!C1ISg(~C+k31(X&RX}*fJ)l`Kk#nm+zSb>+8000sc^4$-CbKT0cq@?xG*wkz01;E4Hr!t>3q)oV0 +Q`+62C3MlLSDhZK=*x^2938T0Fm&0Us7bh+uvC~usm(%;KBxnmpP{k-t*9ZTDPoAD&TGoH_(`wWKnDT +w+W3@A2gUx=~C`gdX_3u`kj*3~HP6Ix83lkvgYYZm{S$x^-DA0Z%4wE+Qqf+Tgqpr5f-lTGP;7;BN+%+zx4k>QfBu(Z6#Ok`Zh_dly%~23flRI7QJ_gdD +Z{leOX+hQY2~DO#uxO;_&7Z5yByP6f>q3|%Rb~#)+>hXzqgH$|Td3^gyf<24cE`Y^PwO;v5Yn7eRMAI +$-J=i5x|%Q^Nh;l^=0Hc^7|;GVzmL@Am@b7GO2Bmq4?$9K|E_bX!@N{(5iXbg3uC%Gv(djuc<{Fn3Fj +zKU(vlCfyn2Z#BXN>7qDo(mlXz$zsydv`PhuCW4T +NQ67>QMbmzpOfy%>Ft))M@;1p9A=sDJ@U5>5np~&wg^60`UC11Kvw9wGe +ewVah(h|)ywBQ~;+2bdWmJImWp=5emQ0dUzndbgGxiB+*p=r0=57a?AFX{3)L1Z33mwmteC&$r%x$I} ++!~Kl(dS%KZeN4{@UxrYr}hJYXx`{{mqXix&7&w79*K+dA>Czx=1xPL{%^BzH&N~*E}?f3OK)3Ih5&Xw +;yQl-|q#!KXMDZ+~lCInf!fc(Dn5$BwhDT1E&RSWlr(RMXAq=tG^`a9-QT;9H-_`waPcG$mgdlb1zgj +-t$d%6R(dgiWyx(5NcAUJqNRQIbu`3n)ERCgI@HY=%UR!KHNyq4c;^qE4!V$#%&if?((0x$0*i*d({I +}Jx0kiJZVEJLr3!0~17pXKDvE!Tbt3fY)7$TpK;)HYaj>-0f>R~;=wPI}*EBkGQ(Nd^ndW^dxe9zj2~Lv +Th3Q46e3d?Q$575vunLgyt?zAH<-&{WKd<%dcJ`8OR(}Fl&VJ`YoXn^fREs_sZ0IyRk=MmF$UWN?h;g +HV3R-D+e0v5IlveiUMyv?dq0l~*JL}bJdh!$P{tnbz#eXuPV +jdje6=;be~WIcOy$d|Afh5g%nl_Lp0BL#eNvXKoRGOTpa(1hz2W)h8b^v@H{M`^v%n$mR1QmKE?ZnA{ +SfK7BC%L-@-!}d)@VrlGr8C=1-XCdsQSACv48i#A0%OaJm`yv;64!CXlbq +Pt?b#>d;Itj=EXuHw&lfkrqYCBcwR=h`!hZBS*Bczs|{{32J|R*hGMb;Y5Lb`pJub-BM2#K5EAX%A|s;>gn;7LnKyJYmT(LKHg!Ci1gPihq=iaOh~Fr^tWfpQ8?A#=kUU#6XYBz<#yB;^nt!DKt1AAdorz0T=J7?1XOA)fkI#;L?AHp{^{I35Baz6woz+Xfs3zNMh9NoC&@u&zC(>js6ijcezK78rqf(IaQ)h)ME +OzsMCH)|A98SGiUsLX3X9HnTjVRzD7pNI;B-!CtIUG->m=Ig?V&17!?NGpYFMBV_0`uoI);e{O3TLZb +b$D=$~1V4u1EGWM9_O0zjM_dC>;W=;E1Fr2zt8(3ut^mp4%5iIPD#9>0U-#_G}r_&RuDUHyvf=8hc7~0k@CrkP+UUn&qr~&Ci +}EsCnW=3wWk3DZVV;~O56=u!i1tmqr{ykJWT3Q^L}7vXsg@VlPM%n3Gn0+Gw4_oe5#@) +CGk29(d-BtYFGe|J%Ffsrb}MGa)^Y9B-~6>}ZfdqDg-ytc^eXRX<646k1NkKl^S6 +_v9PkY7oBBM>#C;5E;<}#iTh&N!=_zN@HdfmEtO^^n-(z!K#R@|d6fdz}(wW!7OOtd5P=~h)M@iY`Yj +pBf7s>gJgXJ&WE`js*`%bH1vzcr@`C=BdQ`THU)by7aFH>IVC~y_Ev{V4-T~c)*M$enZ;FXyqmz{eF> +qQ9gBKF48oK7`34O}5-$}M3Z`*S|`e#70(r`6Wr>rEuDH(wBK|ToeK}tZR3*J!aK_f7*3E^l9>sM13(80nL3v&B{dJnTAdftr8L?v +1R-yK_2VhCCLkZUMVBzda2do}k(6AfJbM7R#Sy!`+QkOI{PVWtEkcdHAr9{2L?o6G`FyaJsb(>QpZ-v +Y`hs*h`1U_PdwS%1_Y<*Z+$+u-|tms`L$p#W9Q>H}(wr;XI>8a^g0EC%w%_^(Znf37W?=U0xj&QlrGd +d8Ob!*Bgf7KKm<2o!4Z-b`FE4hc})&OGR?E3(|SvPKfmfAv?h|@VO`AWjw8O87Zh!<~qN&?9$?&A`jq +dD*ES|T{oomuXQ+-ubFfo@cWa;=Ma7!z5bw^ZH6oo;V|)?!6|+CybRmPoZBD&HO**mB;n@baTgW{hWl +#cse7LsUhpBq!E_fONOjFy4&pmP4&yRz<~W;d;hyVS{=(zOJ~LWnN)mUUeloV$1yvhy~HK>bRFYxmWU +WUQZLUJwtpl)y+W?3xigd_7EjE9GO+l2KPV%#nv9Jo)72e{k;d`iOHzoKj^f|+d8%%T|UI$sm$w0rT* +cKpV`mfJLEg>`Q`3EEK^eyh(Z*O6DR~yAOsQ=j_#Xal7JzCAaN3eFzlB)jECXMCw~cjvYQxwEL+3)R| +b>9M;G|l<|*vo=&$?KQRLt}$-$nEl<3jojvw{kDRi`3;UBl@L)Ygc#gCw$nseev^&-DZ%^pg@AOGtjK +a#`+dDMav#BoL#IdaDFM>Y8P75+Dj9t;Zp(62*QI6wGaOdNM{c+?=DDs}i+M?YL}kbjfZU$P_pNGZH8 +-`J5w%l?UfvWU_oYrfqt>FcR%(J$RB7C_rLS7oGc$xh{H>4|;bN#H$By>ox)4^maWwc12zD;K8rNK&p +2x&eIU8)bI;X8Dq?JNrQodzT|U`=ymh`f=NhM!wl(0bhNx+{j=1(9>SY-Ft@)SA5Oi9EynkPow$|1+y +6#P=C{Z{>%1r^?JhJ#UjPV7Tb|m^au}lR%`_Z@{$tDDaFSG_JSI2;zC@UWx)eG?G`gBOk_F*_d@KRH^ +slyI3`Wcg+>Gq0*Fhm1*9NSHq%~F;bCpKEMe}#_skjRQ_O?XdMEDD)2RhATAMuT2F0O@!16;%_I}f4mSLEL_JB?K) +^qjsu5JLCgWYBJ2~7vZyM`qO>_T7jarA-n66o2^%-wpeOl0IfZmr)lr@a7OZHtlOO)Ab|rNN#vN+OEi +Q-4xk+I0;+hR+JSJyOrVgG~XAYuB>me4i*>ayz%gUCmiA=Yd0zCz{P@pd}qI5%;k&-NT0m +V28>SgD_%W4vP892v|4J8jeDf#}FZxkEE{C5jOkTh4AFwf84y;VL$DmMg*a(kqX|6d8et?BFM~rfCJ)c0`Z_;`tmBh2u&>NA%cqjk+Yumt$oYbXzF*RcgMmBaI|mzx^>H0Zcu~LiUHl +Qr_XA^aO&nKe<~gB51m`U4=w>aBYMqNxil`S^tUM4M&t+s +_@5R+s38TH_NzKBdvu#E`iNEY9#V3F=;st_rV$er^I5xIoU_tPsE2_l*M3V_1YDaz%6?zg3F0bKEGis +IBIH}LLlLMC3qT=t4A0@a+g8_sMS$;yMmp>n&+kaGhZvoF)TqBUmW_sLnh`>$zby89D6@^jux&E?rn% +X9Xi@ANzDQ8t08nsLxITQnxmfDkV8__VQ37^B=qzXCVZ_RrN5@IGRPHocjVGz7L7kOisk5hZt0M9IW+ +ltDE#WeC7TeRB=_aJ{?NoG;#Mtr~RJls1JLC+x~+cAuswH067+^PVw&C?kd5F@N-06TUqqK!3Na)#AY +wa yY4)7^r-548=BgA1_;_fH^xi3E>As0ar#td^8(1y8*UBEOs3!t=d9NW0kp3d!g8E*uY-#&G?8p +(hm=zKm;QLCLBTl8uSjzQa-Zy9wYir!X41QX<4@@MfHLqD0uJ_Q+g#3JGxU?29AxAEhBd)qzOU=cCJS +LvDmxIQZ)#$~1o0Wbs)wBad<;;1lfroHllIx=LFK?*YN)t4vVMDy_yxBs;uyOZD?fQB}6ZVFnIWgh9j +Ox`}0s9Ig_*6x;W}B}y-r)Q``abWznK%9(#VeXLr6ZAegWlVR`aXl_2qFoq%+_A4frkOwh3O8ty|#~SXj@Ak4oal%LvbpL=*L?)5p?}K4Nwh6Yf0fXw3Uqpkh0d`<*=FF_Pk-gz%?NF#A+2?q +l1}xNpa@W05`mbQ#7U0+)iH1_$)WWbNFPL=IQLJpME$QXe^R>}Y>DOa%||#=l~6f_*Hqv;R@lj_>8E< +Avl6{a4!llkX~%|EfW>Y}F)>oj)~*EFCe)eX0&7w}ZmzTbsBBx|1gcT!-u|j`J?oo4Ek-PQccP>JL}H +$e%vef$_Z^cdFM*V$@1p)#rDp&4hI)VNGe;1mD%@D&Oel>j*|wbdNK8-G`-pU&`1=fPA#8wf$s66!g7 +nmy7&eV2I)s&w_yHGn~IL?E6v$xv8wWXrF4x2K=IdByGU4QQf|8{NV%oJD(5mXCKht`Fwys`+)w==L7 +uN2lRJ7pMN9@j=tP;vv`*H@M@38@XOf^0S49Q;z^MdWnW|5yP4Y^v(E*)-fh{4%-NF|TX!1K247iaW^ +hHm&kE+Ln+SJCvWftURm(|w^I~V~%^gfkO^DcJf|9ZBV{D)+R(QP|L)oWs5Z~!C0+0O?X)u? +Tw!N$Sn!JUo;uoYS4%iCV@(jMZW51CR&Kn)Z?0%mS)?E=&_L*;-^h*((HwmDCKGEOoW45UGw8dFtDbO +G5%BJYXw$`FoDgwm?-D4FxHK>V|RsUQy88#wKNWTNl~=zAIyZEj8xQMmd+RE`t)A1FSP}Q6Y#lK0eLC%!9PokeC1#y^Q*t@826APpNPqSaTwX)G +C|T!J4wU``JL2WW?A^5|uc_PsaLE~oO6=|!H!O4CCbr7U3c9}+1-}qCn2=>qUfrFux)pGNqIP}`A8yFJVdQD8#vg=wh?+3&9C{If!S +p!bo$zG6-u~xNMqgUDqJi;bm(jrH1es3&{Pag7$kS7%#`>29*++T&oGVW_dkWKT|#bE&NEKWXBShe^G +y$~&kzunprpCzx<|mRijI580-_*wZjHG+ouDoYL(2eL+vi};F&-y6PH@K}T*##4c;28CAPDVZks#q&s +xZ=Ba`p{af0Wm|Po2;dOy8ghwHERFm3PG*-J=+urs*3q*?n^O^BUuH|Fh +R=d)cD!qYFJBwj9DF5v)Q`<+kUlmyb4jw?|ME;leM#2aOSQs%Hs#NcNV_iKgot{!-MQcU@FN!iKy!82RQ-`qD#13r{@@yipS*>l=sP5%4;nnHWjp6M_?!v)L{wY +p+hMtW2aNIMp~LAU4?Rq#mC4znFC?$PCbw^GU)|1iW>)VeXGC;Uqe)4+*qnQ@2los00#bl*;r!kd*in +sU!IRHtDAS1y}kT(oOJRN;e%#vdOV#o1;hn3Z5RlDIj`u +PPk6crl9SuMzb}ZE<=||f9qpAs_pXBRD;{x8oLe +>iwF(9+tPw=#FM*=^2LL(LjkK6Lubzvk?b8X(q!h`nO921>6EXg{!6u+Lpt-F>6?&K=}dQ}bdY`uxKuBFLaTT6Qde>cQ +CQ`gqBX88$H@2;!AFRS4Bx=6D~5&??@k!7L1QyF`mtEUcoB(x6lOB3HGo7>sotqWioLc9JX7^RjA;lw +?rK>S}N-E;r?F%R@~KL)VBzz+6^rKbP%RDFT$-48Ud(IMLc!ERdgu;HFV7Pa +94MjSS*TK9TdzJL3>__f?roE&T=MEOqRZW({0ZKhQ3PB--G5>G)J-z-0c8B+tO?jd?)T9=DE8S>c +5H!vLWmMzqq^n`7Led_0qnKM;;#URtn&)c@1Hhm{?jy71W0Wfy1b2vXQ|HOQQo*rK@E6+Zd1cT701!C +X+bBDyA@A|z(;#a%<&@O?a2uTnqMUW7JKsXBivSKODKhB%zfl6p{C?OG_R0)nBh_}Ptj!j8&R4?tXv0 +qm#9qE=&vIO}gOThH#S2&tCp(B%X2sRVP${dOxS2%XS59x^gQt)r}3Xx-VFv&k_Y3Ol${An3YkYj8Za +zGsQ>6IYyug+^b+>sxt3glD9bS&e2Gzt%_CJ#Nu0zF{-u>JgN=jxx_KBa&)l(1flar>`zs!t}zV7Xjn +mhbHno*5h^&|fEV!E3&(cpaVT-1>8yL?%$%A4RAEZjtvveQ%RMjx{-rby0O={#c!~|97!ZaT2?%;*Cz +fYb)_C4g@o!pV}jik^bzBwu9T>Hksbh!>gP*NZt=ZxxI}3rS^cfe>sKga(Yf3>cs +R(Xmac&4+XggV@6Xv@;BI_#ed)Ys1#)~P +1<^@GLaw6vxsP6V*;p5j9$zk((@`Zv^!b|42nuyU;Xg$9*U|t`%O#h(Hva(DUT%l0hwp5%Qt}(*$GsD +w1@ZUJrrAqe6TNs&s@rZIy;^&=6ox!)`lefeZ{;?UQsC#7p35oj#Rw1DhrY~Yao9t-{GTh7~&^H=z4R +HSgxTQ>V(yv+GQ9PLh!dla(-(({ZmQ&HE)@L8~d^?iAf0X;4;71F-o+tQkn9LIFm%1){a}pbFX|_2WV +kYOqJW8{a@ggApD8jCwyt9K0N6>zdOIBgPw~6A^g>UQw~d(Dou;5Z +zCws=>Ky_1s>P#$a>;q14)i^3L1#QQhatkC%6m!W?OAdjNaJ8UtW^e#oq(l8P;q$4s2?#PCIVEry|TB +2U!G`zwhBaLL~?HJFC31a+h>?S>}_j`b;Uk465X5fnBJ9@O?ctJ=sixGq7M)_Fm_sR$sLj42#?b|fFx +PiJjCHT0eeEOg^xeSNR+G!$gid{ubGB{6+dJNgI^J*r1=7{_r6#y|o&EImPlz(E|tP#i`u>nw +=6qEsPjD36*G*eeNMqE;6eVsH{rAem(NM71?2m|GHsBv*1Rf5@^H2gsRdg6*a%-RXEg1Rc2Zp|N>p8s +lhn@aY+ghqNBdXi?>f0TXq;(Hj5RJ7Cd;@`>H>zCulWdW!{Xuf!%B_r*`IH~}*5SIT0DqJaR+Zk&trwJ=dyna1PiivPh!BEp96z7EH2sJ4b2|uQjGUICZ4n`47i!1=W6rC7@nc!}V-PSmy&(EL-+-kC?NW#) +mmBRtvu_K!M32c{AyCa3lfi7D+k6ECEb6etcSMeyf6u$X#Xlm6@4ei&N=F6EgsF&y3b_wY#RS^t*Ov1* +LPq~Kvg8sng|M*yQbPeMg696gKG*E^&&QVDVM-w_kf#-5uPHoGv|ni@OTqa8+1yFUvQ@O?5a32XsqZ`BkDa7}5HUd($SB}Xb_WGQKA%HB`v-! +&!WIpu^pgOU%CPrg2uB}4Bm)g~UXE0vGYiH^ZsVowQ8@^M$@N{wpN +L#hCUu!7@Rewy&>&3ty_+yWH+fB?ag285%jRE1MoP79J^3V2O)|y4-lLNCtlArEK=e~xTNsN;RqIJ_O +w-d7BYX@fQV-+~4^MvIEByfPi8!}4ynp1|YyzflwY5W=)4%R<)M%}_KEiO&YQ~D696>_uWTV6$jjJ&h +<0Rv=ZfWwKFSD1SyMp1S#PE^g~k-Uj{5<9J74R%BloGO^HGX?Y1<0jRw?+q;CWj!~5W-|`iIy(;**uG +S1+1bzUNK?@1rrSc7RYfWLBw|8nW>74f8z=4*$zfkvBKuG621MnRewi+=gLz8O^xZLh$xTiV2_Zv4Ee$@j{2*n9;-6M>Ys9ZeflRL<}XSNOc5F*t3bG*UegHSxUE157_^CRt5jhv8vx6_FGm3 +|HP^g3WP8mBq$UkDQss|1d8IH6acl;tbO}Vw?G&tVU&Qe--sy0A4dT4Q<9N<$zf+lVqoV`dyEgkA491 +4cySP`UuOkC>S%t#Kg~mAa{SBEBU6oGM@&v&hj;-9e-&HKp(7n#VE<0Ze?q#_^P{i?{d7$2oGU>OUIvl-J^o`>W$`H}^D|RNBS +elkdfld^e=C}F^PY-E3_vP5SbRuBCImdFvsTJGbQ)ty8N%5fZa5UKt39U0P+-h`(Al84a^8GuJ>82*tfk6iuw<`TD9D$WX34oojXL+Uvc8W+{3okd}|y^$AF+5Mnp>7D0h{hw)K}E +3a={0lcU?1Aou&~ffD;#Vrjk~uW? +mS&Vtkc5teK1Z4mZ&gDCAF%^nTi28N8xxv0(9z$x_HuPD#+ZtV#)+Jn{N#eqjwlXj!eCm>-_rne^-`k +z8oM3%nN`f&{OS@y`NPKj|5+EYAp;NPGsS#(7`Iw<-CLqGNzB`_TC#kf +}hWJOF^yb<2XP^y|{%d#7#3K|14jlj+>V4XA!)bGm(!^Wt>j7J#`#<^UAtVE&6ybPU7PtDouJYS)jj{ +l>rr4{vedjRkeo=O)rZMgUB58%gm`ykui@)l(_JwR4HDoo?#DeylkOW13X25cMQW*P-Sj#MI2*9efpl +=!80{uu*8wvlF`!5mGxH{WYXe1Xff-JxWbBa6%m?&wm|)Y8jTx^4a&_zJ&R0j%Cy)8d`~L`>Qh5}T~q +I2i_CfR|JY0PkFvO2&c*4Ji+7m^4(?$LYUkF}N?DsVzclfi4(O6hRXJnaf7?7133k6Qz;K`iou!KoGZ +!>jOD$Z9X`Y!md$;y3ViB*#gU>G~zfSzNkY1!X~|0-~!gd(gyGm3Y2X=pZVZoVkeeGB}Yh8%H=u630B +ryUaaS*e~=Aa4#ZbPh{ELBNXtg9yV7nELr4UWT%Xf94YFBK~rxj!C!kFsm5@y^vr8Gq&=wr991Fz3{` +#qvhSiQvDQtJk33laqTkjevJf?w%ye_uE7HTTuHg=ecWTA#a^dYA^ewUr^OPBmn*zRee7AA4XN`FQck;nNTA{iH9I(ty>_pQ5nppT#dJ8378bO#Wqhi=qCElOt`c2ip4E +Fx2t|PA&yhn%4YUTiWLs51oMv??Wn%AMn%HZ0CttMsQW@Q1xYO#JfJ5jrF?emU^VpeJN;+P42cf91@_9AUYLqJzbID@p!*RK4KG%2tE5tt7J#Xyp4&3Q`4MqkuIc56y5gO*QuLkm$j&!HO~qjh2lEM$e?JNtRZNFv1bkTxwvj9;KRM4RguqR4-;^*Ii5&PcD>m5xV +~AG4`o3@)wi8U&OH(56UG3k2S%~Y9KKk;I7wk$bb?wu|W2x)NpkzqCg-;WWpY^i&Q)9#1BktKS +M4x(gxPrGsQi*q-$QXdFdX=c+|0~7m{OW`Iq4yT>y}-j!!Xg;*qH3!_sGG*M7#q5ob9Je3Xan6{SEIo +;qlrCC^T8jtzF@ycZdvPeDFeNNM1`?4C{iAX?}%JT`gph{ScCHN+{XQ=fBW*4|EY+pONt<+MY23+cUr=4$9q2hCxn&B-Jywnnz6eF +gH?bv@O_vY(iCI5D@&!bDezt9)VO +$;G11SKIHBvEjKTogj$C;}n~4sYNKr9cdYsBfXK9WaqQ`r7dfvZsPbcrU{3EkMLRPAq%3fNwyE%zvlS +Z=$aatf6mho(=eH$Y_@sjrNQZ3hwecXudeE_GyA;vti +rcC%f%-+NawA>Z5VBJgoW?4mxi*2m#&`k@l0k_eswHrb +gA;Ww<`!pplQfv@`Zr8Ee<(G?1~5$-67>bpW~adB0g0{E+t?3O1e({ +OLlyA)sH6!VwjGmU&gHQ7gQXxBmv@5c2&>wU#aSY1y+*O>k$jAdO;1f9uCMdU9L+57Xm-UWr~dSnXz) +^V8&-2J%B%)tIA417*dUYME}I`R+uA<8yR(5GO$Yxb)8$Wm9DreLeF7g?BRp0@zwTv_G7&Jc4(Pi+a*&7_;Bz%Zr5$Yd` +xiyJK^GBJrE#U*{%n!6u@9h`5bRdQm?C>h$+|e3744F>w$p6tD}BJ96Hj?am8%nScr +a{6fJlZ0lpbFJ}SyV)}-+vV0*;=XI(-#Em5*T6q>i0YKS#r~29@ZnvR80PBqSHqyCt5(vq5n{wLm?wW+H{ +0%Ap4#AMoymfNke^@dku*~I00p(217VBiSUSa~e><;gXtN!x`NnyR#oCOnd$_jldSw?%d23HEHQ;nQ& +E2#+upUw75McnOMPEEWFrY1VBv~ZWUh<&W%}+ZC;9Fi|a}sCp40mXF6`V-WG+g1b1lL5XzZGA7pa6Q( ++6`vi=!&j8Nf78}uq?y{ZSq>72)CsEIm@MXS}|b>LM-EDIduDmn`f@o^*R9LDx{#y0E_Czs^SDYM|#3 +Mxzf9~93LvttTrx=cjMLy+0E2jBr>13VOpSQgD{M3FMTtoo1wcUBBQ-~bkEtoDf +B4%O#_;~c@I&}_h=zPzhgi*)48p<{kb#m!QHrL*dCZb$ +R6Bm*Qdx^dkfibnP$6%%|FO1yfePZJG?;O>0X%FN&K5h#iI9Q7y`zfgvjL^oe#Rb&`G{CT +Ia#x4v^e(7Mlz=OPem27T99*UF&cp40u1@Lg&2&DgK#%~*aQ-+wTu&gWz5>1$YvingUyR?o$A|u&Rad2#VR0%!CB(y>~ne=G`kS4xqWDz#HT|H%SFt7N(?8#DQUD +>pRcgKqZDwvQ&dxLY%l3?xzRa+x*Ze{?QiR_r>*9EgU4JQT>)O5rj2CxU8oproD;&RkSE9}lejfZ1TZ +&6Tip)Qhh3n(}K=;pM8c{VJzYk2WpZ`K`l-or3L)rmlr6#|nOaC0qr@pv*S +rirJDS+tm7r5;!H4@9~3Z)eVxPYFB)fwgi4QZ&`emwzNyxk(km2u&4IKu1EaP-z>ri{A#^@GjI7Gp?P +9jMK)jDt*G!-Oz|cOfXy3H7u7pk?Xi1}LoZN=$wxFqsU%wugx0tl6?t=8EnB45vAhf?Uey%qDe=qZv| +lKoWdaPRhsNk%qDhyhd<{Wa=%bhh9nbbC^46wfag>J%5|s)L&%~HK3Ez?QQ|Lq52@ulp-W<7A)trNxD +Ow>DRXO%iU7-+7X`DZn`}mQ26X2wX41eoca^-VYp#?|x3-Mk7)Mer36MMkxA*K-ryFP{szsX +x$n)@X7&iE-*?A49{V!+|ecRjon<(O+EcExj?~CZ+U9?2e&F_H-xcNbtf(U$zBanRvA&DY53ZdT&9(H +-0bWe6+Z^gzueviY+_C~D@i-YOzt0;WWu3=k1@a=|a;@y+Hxge6-oy-<=Y`zaq-<&Xfmu}oV74n|BlT +!OY!(IP6-Ffj;_8s51IlJiHzqYyE{M{|R6Y0sF5Vrlf3k+rZo)P#xA`#h(r#oXT+H0QMhQar2pgr25e +&3=a_}vMR^@+4nK7>%A^=)^QFr;l>4uMj@&G5$~{oY>Z1pbZR+ua`U +Z~Wfw_JCjTd#fr2y9T$o+rza2ciCV8P*9$yS&0j-G>=4i_(UHldmPUBB3k4bKO{$W_00#V>3ueLPZE@ +Um{eDTCAz|cgcFc==-3``J(wn+pwo#;4*dDXZm{}#G(3>#Q~f3gH#n0)&N!e356=toz@-is`q8=QKuA +s1aZ;1}K|1_u4QuxWz@MGLK#fv&X^J^5pW01Sc~9$tvLP)pnAmF|eSCI)|G7joJ%xre1GEB@WHTN4TW +_B2mSeAtpByboyDySVyqZ#ij3P>_Lx?U&6WQERR0pHEAl}j+n(+wSX=&2EFrFGqd&-!xc?{Qz&f`q2r +sb199amCur}Ta9&_jN7<=~t289`lLi7ja4#=CdfqhRsElqcOyN@+b_uT( +xnzl=~IAPDYyagflOb7XqBfI@~@811TYWQBvfW6BH#G9s@zekis`ED!mWVgh`+g$E>lzb +-*Zdl3e9j`kF4^Z&H!`Y2Qpo4Njv1ysW+?wtF$ +4cWUg&K*i$7bl&yBr*O1c=ol5{EmQ%RSf-}1iWUm=nlv5r}z4a#Ezq+B?ah!bjPGq)MN`FYcsxOa!hk4?BA+ICqA(ZYnI$Jn#QK~fIP9qM*IerWO97wXzg9@PDY +aO}``P~&y0+Sw-0Z*RbOV96=tvDP4BUo6kY6cZ)q0*ghb!Ezhd=bYI?X&+djXjZ#tk}xBLA$sXAc_ra +N(b$~=LuRlEsl>4t`O& +)vI;|Q&l~#_18&AfEB`S~(yev!)$YTxr?8B3}Dh+PZ{#4kSMfSzOr7nbq;|*6GD^fxB2(VfjS&_|RkL +DUu@J%L`!d@GQcdtIRn2C9(ZWJkWr5FWi1%5$A&XxdQj7`(p3UV7VY{IJk89M}~SbB$22 ++iG0%ag~h<4@Ljm}CnC8)3xPOE{<0nV~rq$j!x_&?6$n%EHGPksr@)CDMc8v5&!115L+^- +`tLd&b`D9^rUs>p)1mqUg(3Yu^r`E@|1{y`);j|J?*U3^CUS?1#jKb5qERN?7*aR&SyNI5pif9Ci7 +7rhslugH8@Bl3fuf-wY*0TM+fS%{O|?Ng5vwRet7q!!;`f9Inlcz0+Fq9crXx +~#n*4Ae;26~DOL9xmK3jKobcnF?r}TyMDjAOGsGLxe8OzLd +4kcWAQ2aVo&i0i!JCW=ia<&1k3g?NvPI41vr&ja83k|_ET)kSSk(n8hk6`g)NFm5h8-WlU!C;i!3ug%Q9^?8K*apQr%!= +Oyr49BVZ-tayRtw*$9{U7?x5QSS{+?~e$!`-#@;wuqzEw>katBKL>{ocNhUNL5hNbd%(>l2mf#Y~bcl +#LHWP^v_gKhA;(H+|FzAe4Wf4y7S;rEz@56R!XbBowh(%a7C_w=V-jEvkvbnHD}7AD@a5|ABsqVXOZZ +*X*rgpnV&n!`J6BR`KXFl?pAmT|1&1D#VKvBj7Asr^f;cY14v0mWAnpG;7iPb(dM2>-4&e@mCp6LyKO +tL+IJDl6(;&?Koo41WUN7w*%aU||RwY6&?$+LbFlf+*7fDa{7W%bPs=C+_bjj?7fAomJx1RAAhU1 +z5E?5?L?YtqHG+9H06*SvazV#ZeQ4&JaNj%#KY2!+vTr7k%+#b#oQ9%O#C|eWh1Ko;L18o+>i!^I82a +5_-4i%Oh!b_uxeJGq>ZF|;*yCmc3gPLzzJ{%1MZ%-?Z5G)H&jCnZ31mBg^CCBRHn9uDNvQiw)d1W^CX +YDdp*fkN9@qlI8^Qe_~OqOo&P3wccaDijiGq2MEmq%5v!v5C4bJyerYH^GJMM%Nfvk+XSrLOt*b;`M= +#Np`Bon|t*-p_mvgefMm=f}-{w96F?kV1dm*X`S4z^R^;kJvv$>S-wzpOuMG*?!8|KCIxvu}-MRHXZ_ +p&hg=PT3wJQD_0y@B24;au1+K6FWiB|U4(CNN@y~r@=6PQK5FlhP_M^}lD#uA0guZOd@4w%9iRB~Vx0 +9?dpAHSE7Ou+=F1GnucGu=v|m57j6WpM;H}=b>2)Net2 +w}_gWG#`n`<)UxwFrJ^z(5JXQzu?i3Ofio;#<`HT%Q$z7$2ieQYLNd7RnP!z9srONT<*AJ`L{fl!2l3 +&b_i(Nv3;pYSNo0pT8B;R{70`O7fw#qqwX__>}k!#ptoi3W)`^n?;A<0>S2(}}36VO|#y{ +`q{Te`P{~XaXd4sk@=%&LzyDO$$CDlPWouggU5f>4F4JG0Xx=v;?#kz5WIWwWFg@rQI!Fq-4=EHo`UQ +jW_Wbch|f7HbkEHt4b`E2;1`9sQ%zn4$y5hsgE +#HP_GL-@u_SPlV8c@oqLV`>(4A?zO7b0UGh7M~-^EjrD_zGTEu}a&p3k^tK)}kcgTaVmY`#C>5M&_ +CqzFS(mKPV}%8j|aT(3!P8BpDB)brrT&y2ZPp+pIHnO@a7oSy+sn9?({=b?^8>QO*Mjz$OZiZ15sMer +yE<=}D#EHJE&R~j@#j+uz2K%#I$uhu@Lt-H$NBa~+xwIuGbEg4T(ZNrPWt+mDv4of311`NH}BG@do`j +$?YlXgV&1?d-+nDcxDZ)Y%+zo>XQzWUZ_J$Td&M>Il(4_`Im4(F^#qG{2As8E2Qc?2D{YsMd4 +NE}np+3!9FOWsHK*Ht=xcj+X6GA#ip_wlt18A-;Ft;vkahN!5u<56Pp<`{YzZF{l9@g1K_S^eh)3NN| +v}~O2{p~A!_;1(x1QUO1oi97SP;7&T1db9U1Y#sX5*P)M1hgH;af~3he|`@q=Gor=w4u8V7{lbsx#6Fc=ddn?YS(Z{|r@_S{GjM&-83AF +=QINzJJqG;y>WN$GdI_Si0qUA@= +uh292(WjQZ4oAej?@@S2^7IdpNQDs>RE7)tO&u^Am_-Z&;l!j1v0t8v3_c;C2Pb{k6nq!ER^iYISBY+ +$skH^)@ZCiniD=e0elF?WTy0Pi1m*G*pMD0KgM+QJ +HqCKP`p)FTa0rN;tB2q6J(8nVx58)~R6*zqWK`lLDb +RZ<-8vFBP$Bx3&xm(4(X;j7eZ@Z3v)pO$KAk}dc+Q~ydD9e1A~`)(zPgR!=YG0YAC{9bQ`lijvFpp^SURq%AXoTlR{TVb!l2om3N9N1Q> +^DAD5J>TV=!UvEi&osi^AY(e2`T!yz(jCTBZzjmsMh2SC(Zji&5vruiYhwe!9f!h!tVC>cJ>YVEIUGyT(X&yNA_sA6#Z~ao|4*45|)tL~C&}bWvm!8JlspEyNO{m2g2DhONAXqia3?qKX&N +&&*ezCY24~SQ)f}VBx9K+nTh=;ER_wA!u>+w}r@OH7~BwTKDI7 +~>#g~H)jPtc&3J~70NlSj4L9kq?edLX)Fm3&WolUNXQrSM=PdiTn +`mDS-$5-FE_EqEyOz>{LJE&8jT?rz3os9!7iD`ZUs%qw<0&Y(lMqSK!4qmNtQ!45o;6 +=bx|ThO-)!f$S761^$$@OuhY^yc=zX)`#n*MT>@nS(o#b6+*tN%F*YAMiVf5zTjoJr3?`q4668;%}gt +?_15^ZF3O3(@N3zq^=J-mdN{6^i5HX(;Z}PS8eCu(Oy_i;a$l&+OwKJB+^tnk*4^ANaIV-aCRTgoXWF +15?UE>s{9us%?A_({2R2mqbT6tpv4_U0bju=!2s2t&n#|W@rRkkAAw@v-=M|)F@b-87Qc8*;Cs*ciRb +?vnt#*_X7tGGJ0KlW9J0)pLx#}?Hu`z=U@!~UBqGvQ3)CP-;=6R?p0wi-Laq}vctp$7y1sI!Y}+evN4 +3K_%#9$i=O&%%o2U2ry1PcMoN<0_FZ%IZ&-UOR*4&nlWo1{H|B>d;fFJzy2kO3oqegRrR5F~p5Kj3TM +symQ8?9=p?+-8szMf!yT)+CrPTGC%rlczx4sqUNwe?m{3?n-D0rgY{Sl8#nD1_Txh8^9I%*>q$G4F2A +4E2m&KbC_e&-!{B9FSEFu2UL8tOQxHKr~1$u?Lu1_@n|IkKK}zwc4}v{xvJ|=^7Ld{T7>#aMTu0j6fI +iqvHDWRL2jTLaV&Cn?e2af1E%6Z?~VHWL5s_Hxv8HVE^MqK3VF&IqM5uJeVLcf`UP6`vqbk0mC>>f;d +Hj&@Pxqp~T)pg?`gQQ*V>J8B*xY5QMTlR^Gdfh&>uc$US1-8siqaIfn`=*~FI@? +CUgGt&9{4x7`M{V!Xc;Mc=W|(7Rp~zpGH&evov}4}kf*zLf1v!U(!++--hi*ObY3PEU@#4 +LP*852WvzhY_}?0djn&AMJ%J@ZAVTZF~Kpg%e|nMIU;N^=4NdTqwlQhY{k^rH-5Kz`t^MrIfN#+k|IA)AK}WA&Bjv}(PEYybAd}~DbP2JOa7(x)0C| +$;bHd=t1&umrS;t}VG0=CsOK+wy#N7v03~fUZkv_%OX}u?2YDebF2pu#hZy8YNFSEThR)JX}$2|nAiS +pKMY&%Q~<{l~^1D;wlc^mF@eb3qRSrzrW}UNQ%G +|Nx~3_pcINDD2!}|4+cSSpA14!IEhdw{7s5tjO?ZCecBbWlMu7FXl9E1o*{eb7^U`F@%AT1y^GY}ma5 +w2u0D*tiuM^D>08KPgR~g-JBjY3K>RH$x~nNj165;W9+$00M~!wLcfK~KLSW&$BM9O +UuU(F)(`m6n1C-1R*dqiI|9Dm($DPZR~Uw^!?f82yHOCQok))|gFsmx&fyZGby{e(4IsvJhtqsI;<#% +dN_dFfG?#kDc=EF&+UC`O$Tb{_2Y4xa14g +FPnIHBitREh)kdslPKE(HUl&!SPz3IG`Pvdse% +@VM|Bh#aUo-)}T^c5*;?~Ii!pxL%3fe@pHK}9d`ASrNbBxJnx_3S&3O=lUwoSr=cuOP>&WvJLvgY{#w +qREXW;TE&-^VRO>!)qszQo5J-^{aZM9M|m!kfEIJge-8-BO +?E&p1OYPnWq)oM;2d!GFCT+^9|?_$uk^d`btBzH(XZ(K#6E^&Q+{RfvQZ?eZic_+JRLwqS7UcE&1=E1?i+w+_Ow&DWhA +TBu&<5<57MqK@?>0{Feen_x6*En}}5Rj>wY_r~^>7zl=}GABDTd3)B&geWgesG@5C9)@#8OzL4KpVJ7 +O&6i3jk7&TV>$Mn0oz(e@n<;gHCuv;SstR3MNjE?gb4UkjCIJFzkgbTjdzcCe0a`k~Pf-&1uHq6&o5~ +?J9vsTO&|6i`gM$EvDKHML8-|r2DrpBd#oApT#H;CRlR~dm(= +={St+gA_Fa#Ow@p!`ggm5JI7?^>n03PhWN@Gn0CAlthSG*l}J +QUF2OM+?Mz|IEf={5MteI>jzWhTkXqvqf)(HQpX{bQjz-E;fk@Ex0KHkU&6x(3wgRXZqg?C&@G`~Yg` +%1a}PPUJ$y$!*n}yAs)l>!No{h0aw`>QrnmUfcOs%UK@x{`NRw6MXfnaXJ(y04>AN6rBwAy)6s}jY5W +sKpoRVn3Q&a2iQ|PQuUMQ2Sl;%LhoN+tCI$)0Q1hL4DL2)4_(C;Oz*?e-M5m#|ZG-_VB0sJ+YOhX^a&BT5k +eCc;|5ko+5jrTrqpx=+d{warEN5@9)_TgIol+Q)VVi1|v&ze8CGq^|0+qby*DSzjEi80}Yf1$=8;Kf9 +|PX?;&k>T9G0OPx)2eEGupv}a#Kn?CL3g5C)ryWI!XdZ<;*cGD{K4>5%cHiCOeU^%1eUAH8%S4oCGvf +NMb`N8L1&;}Wwq1(nz3t%!ah>sjDmQ)6l<7z}WNuL=-0okS3dBP&4vCXWaIZTmW5EFa87#M3n40cFHw +YGpb9mM5+fV?Bik>F80N50f2PghcmSrO8}ds^BFd-UxURLSdo)i?XhcFB{3H00@+0&ooMvsSWaWQ8l?GhIR2u?IswmiE$~BrBH9<@Osc@0!;^G;ZH~}-pFuatmj;|Vi=ru; +gObX5{l;i6RKao~aG%BqTwP)Vx20sp@d>T)Q>N_~0 +m|4*i;h_FiK|SFZ~|E$8`0!#0d}yyL}c?gtzWKkiC(4QnLOBD5Lq>SLm7*&xk5Fy{?}bZr;s#yV%o1c&zzE+{Nyuy3lNo_hWf< +|#acvoCjz%rYE7OSh)YKXtBDolY%25Tbe0UzZ5EZO<1 +6MNE^qbPG8z1@7eIGbg>e@G4-J>p7eQZ@w$2TC^B&h-(aCjvBsweYp{gVbH&2&xA!gH$9%($=Slz+IK +M)!d$GnCbd$BDn>|&Z(=!gl7VpxD4!fBMODSc9Z_g={9rs;^=4Tlq6{3M!kfW%&1d<}+QLkp5Y)cq1a +$YGvs=)>DKgUTSf7nGPa>hatcMpHigq?Ken<(Xb1KOM!qVB5Ssk|6)LF*k13i{_#gYz}zW~z7pxmDZM +xXC(Y|19Xfyr%1LOmz3=4l{aO+`6KyAKcP&(S!N_XC|BlAlIed^>(1$GuU`cdDlU^+9^)8I@ZT8`X|8BGKSZCL${;x6Xo8kDslK=f141&~eUtalNzvUaQiaspAX6bLwA^Yv#*2`B&?*F& +FeL{*qy{|9J#OQ_yDFplGI9@c~33&!u +;mj@WSgT}n=O4F%{ui*Q3}alEg(K{{eXb>HhTL6Kb+4tv)+Hn_TH2J`)5>^+bNO!p=B>4@(Fa1Ol(24 +_2rMd3YxxNQi{cJR5uzzwW!m~oeIh~DLhU4P+6sF1(xsBrnr9ptTASCzlC0)Lo8|N3f9JFu5Ug^G4Q$ +-#4u`%FJTszKuGXRx62wpjB?mq3KHI8Y*|2w3S9SD7pZe?R=bUzfZM)AB5h=s=vNr-+I4*UwOm6^?m`r@`iou{Q`dF4g1 +#n1^nF`_A|4NfsezE@!}jVFovqpK?!l2W;!H(sD0@gW-r5ypnKx^a(qQ>7E^0r1WIS+!2lg?z;w_g8A +Dxt$xC@XBGaNXH?V-y5IP#DCRCCd3!tL2^`jQTjFPT7j+xr$}&mfw +m2=X<9T*~Ov)vQ2|p3r-6fob{L+!Ho?2+h87VwqN)%@}$`Qt9QM=gw{9KJ@@mQ2x*_ro+N=|X686JFi +=-396bodxU3?CY=?7|R72ymc7a(Nm4Sr5COM;H4fBY2> +=*X$|xDa0y@m+nyVj&;Xy{7XalNyoE+Y$z99VTkK{qs23{5p?zpL2!0%OZxNCQ)F%;nNm(A(@28|^G~Jfb*NU1z +OFwUIcrVB!8?!5e!lalK;N|c->CrOytf;nPm6HTtUnQ&$C_=ms_Up;j1FBw5jkrb1`Yj<9`v~zy&$te +qRwNUj +4}6oU3yfe8Lh3H2C%%R8lS78$wEdf6+1Z&n%-?@oPl{}lPAHKf1C$+tTjz`gfi10pE6g9zx;<1>g>VZ(8>LC2dqXb(K)kj?^KnevF5@4D@X0MesH?L-+A4iak{|Y +dELKvy8N;`-LGBy&!CibyZDpSeE@&wb-(L$9d-W1%K$zMCFCKsa#pRrZwFQ6)3FVu?8l~SOnyUdDwqU +%=ODa!7myIQAJS;&4{}pa)w)jd^^;@@Wrv<~O*fo!Za%-c&$!vZ#TlOK5Eh2BZ`Mfzt*X6&r%zfdD}4 +`^+x6YRQ8ItI$^JON-k2+GA#ZvrUP)8qp=VsTlWOTiJ!A2<^G@bZj5+1@`R;#ENp^v&$P2pw7Ph~sr^ +J6=49s?#3GZ^M&A@x+kXbgAqN3E!C1TzsAiOv;GXl(FwD*WZAtwm~XQq};ZSmGn3Hac#IaU!OlWGsBO>shQ{n<6SM;SnTPGwpN +DYXbNE1^n?#;TjM{!yFcpIPss@jh1v7$)F_b6R0@pH|sT>S~YcbmrmkLd`p}tkU-r%YJzF?TzGUIHA> +(!Kx<#gLRh$Oo~kUS`P&xe2F4byylYw8oHEbmR3fVl7ev-d*X|J`NWtW)#Hsg9v^MfP159Htq&A-avJ +?XI%Y##f)D4AiFk*i4XYRAttrNYDqiwqD(Jop$0cOE#l7|;Bb}b;>E~q=1eX4dcYiZ=-QKkgin5J%ix +#zSCdYTo_t&xs|8$YhvI&1$;0q;OkiakuCcmv*h3+Z-%?)e@7k(%GiFc6z0pF7=(D(F-c#l0vXgl~#2 +e8GrG5M}r#2=|3+eKTfxH}a1Jw^lHeM*$Psl?>2&$nyVqQ8^K_h=!>yA$m#LxsNm(=KC+?Rvc1_CWNb +lHjf__}19nL+bY!jV&aFk~gh7+a(u3aF0i~JtgSw+xGAo+)E`tDq2*#13*3#mH89jtKXFT=NRv+Y5)h +=6+fx$hc~Df#L +j)2dmo*sMThRKOL<8y2T&u5cts+f3!nj-{Q{;4sgEjy)>R9bH;65#sLvvFo>b~zs$W?v!iCWEqu?f@I +BwzrLpCWR+C+2B=zP;!Bh2%X~So1(fK#^7URcDzD@~7qw3pw1LEC+FnaC!r?d +qg*ZLuIf8bfJcaR_}BhrT8nm0x6-(2eTPAUp5tV?zYe9Wk2x153MwaOr7hdU5>(`;bNlw1x}nxw`>iI +X-)ng$N%wXLxl0xuP~Nbhs&GR1vo!;1X80x^RjLSgeepV^`sH`Qw#`2FHb1I@`c@<@7`@Y(2AuCh?lh +H;zW?KR{#1HAd~ahQQvrPb1R$yAq<$wg*QA4bhxB$>LfM9)crKj%ilK^Lk{3D#O`OKES+pmz4Tj60(Q +oIaF0kz`5|&U|HPt4upZ7CtwRq?jK`Rp5iDSnfT$vS5GaGl8dGbwkACjuTFg!UL}Ab#7@rH?9K?n&kz +jxlF~cme)<^d8}M_+lhadGG))e@by{ErB^P2kTm6!5RvK$r2hV)t$1Q}#WRH|Mf8?%hzObhHx4@;zf#)!pQ~1JKT1d95Wc?|jmc9!Iggt7T;q`oHwWDQ)N +J-!q61+0Kwi4%RMF48Y&!p1aN-?Z#B{ypy**!-nuDIFdV3T3DiiNFg54Iztx@>-2@)!U|rj!4vq!xC|3VG*$ytCGR!|OXWVj6?OUug10cFB)0Aqv3OvW%#hPrp8Le)M@=%Q(= +$~*orqj(R6yIt}u|Wo54X=@-*rJ%3+C2xLC*fb>lk!H$T=p$lG=|`oq*AGFS{I!eg49B8eV}JBQ*xhI +S0;6WYqgmMqi!@90W3SP{7d%GHj-GpRC}~Ak)V5x3c@BP#^*PI+^OfyA~emVI?7LpE0nRGyPiS}d`1A +#Cr_BaP#3Z#9*oPYDS94=@MJn{An4Z4}u-j$4X58!Cjx}w +DAXY<$qO#LVlXfVERZ@kRL`~PzQ-mKP?g@aTLWIS4jSfB1Z%w|HL8i_}HiTAj%#89YUtaCoq6~YEP)o +P}+Ut-%g|z<^#{&e;@ynO=9Wx&RT_;{D8_vekT0-)AYkmqkcqBw(z@*6JO~k4{n{vo!gy8*E-LBYcz< +QeY+>RC17<7l62O$zA0&c{X9CgB0(H&D$RrBUiFm(oQkFAH+bT6L+ +0iJ!Woncjm0~|b0VWIoDZc=Ju?VD58WohB9p*^EY2LBc237E&*+dqP*GQpG2qmO&#m!(Y6$n?~YD~G> +RbI^jfJlf5QTHC@FVTrEA)KM01evjiKQD{E!xr$Qnr?VegOj<~S9%Mj!g*#-i~_|eFmAv?Wqi~1C~^w +3;*R!H}GN|cFiI#$$%kneuJdV0E=jhA +kOjk{@|YL1j}uyv5=snn~{nTLA_luFjj)z7}A7DZU8*T;>Y=to +Xt(A;sv793QU_(3v|aB`kNjSX~1jtuSYG4LOdLdE@1Bz?ZTP_OC}Ch@1}M`PPN9S==${p|RaNtSI%Z8 +p%{vK&5#yMo(jWolG2UZn8r2b}-i7Q`H#po@*Z!hAkyC=fNt~vo)`0UDuDPZwN6UxK$LN!xNB)m4!9HRS>+`}3>ES;yYx?poiKO*+-W +XqEN9)PQ<~R!7*$fhND)1@JXgN+A+|j7fWBiddbwO(5`p8v`5-~8kc$9~uFM^NxF85E~>%tez7f};q9Q +WS>c1WjQyN#PhqQ3OGw42d#7&Br6irw9`{N+J*8oR9v^A;*XuvWe7ZY{Bl?cJIg>~&j|S=8!J%Kq5`3KE?&Egnm3)RCWPhdQ6MqI5Fxl~YKNyLBg+I~k-4PO>i3$5Z^wX=DQ-^c +LK5_f~`WgHZn@DA-!kML4$cfvXc?IE>*0^ZBW*E1m*W|JSSjpz2V4W6XnxOLGKAl)ZOSpVO_hch)zM{fJc)_1plJqU{CazBebbQ +9pKu{6k#O@c>Uv~Z|vzovZTu2lm0=*Cn8G-fU_S)3v +=sL8y}fBrihDXY&qFBF?Ti`o+Q+tdvsG+W3B64s3K!0OM*m%&PHvwlK}+sz;42dH*Nj=KKK03W`F{ps +P=ccXLFIpR+Y6tdtbfX^6O0iRT)9P*X<{O5%`vSo`WmSGOx&gAt)K^V@|Nc!yS3xwX1ZFbm;09}%H5AwsGSCO +Nh=OuT5PWmRr`cH{dr2aNF=Hg{m718B;l(f?& +a1LvzDo@}%q;^mt}@m!-wQ=edg6RuKxSdEmOCIMr2=Hz0!v;T~2+)#cSSkKsU;(d+yK=dF>3GGFF&QX +6})DWaIyhQDq0odhAnP+m)#@LdW2X^f;3IDBBg}F&Xa|Q1NlV*oXQS_-D|?XB>DEf93 ++k2Mva?!^`h$X7Irt;lEcyh7Zq3{pJ}>B}>d5LAC;Y+YD$MgGx2XKdIcoUsUc&;m=-iXFcV&oUrWe?j +^XlnM{|S>@~4WJa@*|$G-PIoFQ;{!}EK0S>KYxw@*+WNo-E|Zxf8pm!@~%E4NHO0v;!CR-`Yv&Lf@ab +RL|c%ocB-p8Rb;AQaxp`e~~dSX39vH*j%W)UV3j?y<0O +(EezU>PEH^3nn51pt!Mwk(-2!|7EB2K#DF6n%dLT2hitQCRcHaBObleNZ#RQF}q+t{xc)l!v?SfwX%% +KH9HVo+DA!J0q~#4a%s-aN>7d9i}lXGEZKr_NVeWc8;A2OEsmz~e= +kTsvjV{d)BCXE$fFX4i=CITz2=NjgS) +u2>P?+J&MI#uRIBCOKGr7L&VmsH>9sz&Pb{X$m!*~Y8*rYK$!GVKA{x?Lq8X*Uz-Hd9#=G8j#+O{AaC +CnItNN{akRB6hZP%}nZ~IAjN~{gUS$Xkqv36M=9avw_^Tcz{o5J3&&3+5)@?y?1v-p>+z_z#6n-%iZx +r!?nTt{Yv=mG3jJUX{|-Br3x59(EkqNy+PXFvbOg=1T{biYf7S2}}eVBj*z1m7oO*;~Dluo-FUUvKU&}Un +m7_RwsBx2-l8EI)*voOfJQC?K%5nb;YQlD;40q|ZA`&g>cB1ovPmbjd=9w=TZKLwQ#r@!};H!x`5ARUlJDQYh16mv*6~@t-y8_=o40)Oop9a_` +n`FPqm*hWh*a|9`Fi&T7LxWUfnV0eNNxe07xN&FgyJc=SLGL|NdR^@_p8$+b85LtM$wFv +g73-Q-f5TGF(wmxJjPYtBC@O9qPvf5e|wC;6Hs8EOeTsUWHhSYs=Vsk8J8MF!W(15a;e+yy|blm2bsvgiSaVeUD9ZH^qfvIUG) +tRu$rnF)zdDEXmf(tJO$fJO{0V$c=22z#NF3I?LF_`y`X+jUMIc$w!BJIpUOr7T>;{PLn3-wP-d8`LN +KN~y2)%St%O>oRY6-+T=Lud^)d({W~(uHKi90gQg1eV+#g8|2$N4dYy0|-;p8cA+q|#SU)VIx@@{_-_ +t`I2{>bAgt$tDZ$6U|Lf#4pW$o_%NV1(Eq;J-QY0DAxRmr(Qn)ir#Bq5t9e{|HgxpP|nqPVt$Pv4hKF +;23j=-yO{p%%OjD=pf^t`i)iM8h;^HIO1d~TxCMG|Hi_B3yxG$W1dK*&mK#a*}hy2rOyDxtXKO_G +tF7uJXW6+~@7Do5;9z`a}XDam3m5Y9cEPpvudUQ@4)sw}cuR?sTuj`2Z^$`q;E;oMdcejqiorGB=QmD(06ZORFUk>$L +r81Zh#`4+Z6Sxv@?dIm~cl`_Z^yj&}O=QL7>|ALU6C$>wv#0<{5C+Ci1IIb_ln_(zBP}jmbiBX#ZUm-{wfOs)vW>1S9GNcr3lN{8GKCO4t?auYl3(U=F0bC%u?Jq@b0eI+kP +#)6nS}84@TzPmHrN*`-5c8DUPKG}UVDYebQ +MynHVRA?JLzelBkaR^yYpb{jwD=Tg&h2t;?FS2149#J($y>=>Wm>_`W!ogIrByTdOSK}3BYL08dP)*% +(+LA(}ww7jnW29D~VK;r11cTf758d*$!6YPiLKK!Vj6`YQ;KTN+&`OB&e^!05L)_GMLojwXA#2hWZ|i +fE6YID2p=K2n7u?xj|2Ef`^Zu?C*+MAd+mx^O$vW(v8V9u@&gdeOF=6?cZfqz;v^T+&DYI*&pH +|J;IWk5RbN62GFW8%$YAAeah!=8L9@hokuxX@~xEu2%|Xki5iX78~nIWzNwOgAUIxZL?U(tR)6DuGi= +w39#qslpVswij4Q&Ai>|I)q+*r$0Ph0d!nmz3KuIt(2nYhw8vf>a{vgx)3V(!sxhXk36l3VxJs&N3|K +G;hM{BRe;5xhKX>ZHOHR8Gb4vR7phz3c6HT?}tDUfm<>Dv){LZGaB54R@J!WbsBQMR7Fap!A-j5cS+o>Ge#!N-G=&Ur(!R3C#@wA?}mIq{7G +@rjPSHq#rk0!ZG2_|y@+HkXUAfz5aWYx$)xQdzMAB*~7EkNMhg{L`Dy49++7B+T|xCG)4EcsdT;)0IL +`DW#t(g)s5;sfQ9e+6stFa0;fN6h(<=Q|3jiTG&k-ElZV96{Nm-8elGjfeJVdf?lH`6N8! +17VZI5ot}3W9LJ7G(Cd8$Gy=17J38WRiwEO+%koG`uz`uoXmca4f5Zq- +)aKL{GX)V_fj#<4xf>~c8u1Al|m!=s+7t$Tt9z{FpOaJuDm`AYn=*;=HZ7t*Blgk8tFXqW?-Tah(9&y +l@VYnzYo3wx@)fHx9ID8RwDoPIP$pR#&IDk-69`3UROzX^u&1Y$d +|SdEsw#+?|oqy$UY%m;(heNnA?}?AQKipT{*^o(--q;i#e+$VCI!p?nnWDmM<~t +*j)CH?CqAED9#!U9{BXd{6q;3djDL@LO9%U2Dx9J=Mi!;C)`T{<+wfZOLCs9D|QGS3l=dpU;Q>*79j!{LRM7)3 +Drsl>7SENB<`H^@k(BtE;3?nkE^PCP{{3V4Nah5+^Ab#W4cL5t4yP1VMjl$Kj9CeZd_1q}dmhXzU0c< +k_K>g=dHE*CC|}9|aot&%+bDX-%?El?HQ2Im4f#N(?!|2uSwnl_Zb4%JdTeVX{Nw8D%~T(|=*d8S*gv +`6uRqesoliTp9M6=k!HDEIzVq=%=$Y{?uC@nYi7`!-whL{SXo#y*Rs}&+{WsmwqZ(vf@bnrGFcqkmAF +RBfo_wELKs>HznBS#fU8VS}y2?uh*Yed#!-;{ahJcn{}AyPrqa5!Cw@zyf58`Us*Wt#fE2Ff_d +v7yHUjTb#{^S>DyRz|2KgzcQqOo)*W#*gdM|u5m5Mkqu+4Xp9p;2bw2&(VGbJx?~Y;tcIsKWsMln{$5 +VV~=q&8~%mVf$Ijr-|dkA9dxVQ9MF=q(qM`3EM{P=*luQcS!9{RN9z<(%-iVf$}6_$kJ-4XcYZDk5n6 +qU{ljo*~la=k>saB8qDRcq+!jma})ovlf*u9g$gcIHWHAj`C4iZ0+~cy*fu0TXZ29->$5-n*#Vu0``&pCo)&%H&PxLRYF(ZQ~%;1*8kG1msvmMt5%6xL}D-H(E__eSzIJXcP{f=ivz +pwUMvJs#r5ZH!KYf;DTlMW(Vs&;Lz$tbZ79rJ_m^MFzsn?IXsmPr?kR}H`uK!aZkTd+%=-A4iYs(Z3O +VV^8yPSw-dk-4jf{qhqj!bK2dXc*rvv*ePjzw-Q+Vt%*(B->F$-WCT>zWYVjB8xnK<0U@y?+_XX;!QP +v&-Dp*^>XckeF(5tUgw9qWnp1G{%Fw=+CIpWB8<+8i03QSWV32dL;c~!Ze-hAh7u=(nl6*n= +J6SU(m6iA%!pGRBD&6S$JowUjI(%vU+f8vt1j_-;L=J<)hu}tsJ&QvxSzwI8d^2(zGk +?0eF}QY4x+=x^`B@!{G8i=uK_{KQs3oN(h{unF843Z#@$0^>5}AY88PrD4Tw@?J&Os)WZOnbFa_-t%H +6FzZ#QL6koS2nrf@IV3+Q>3G(9xA~p_$rXseqWRhGh}QqZpqgR5O?iL7XcKbe6pI?uko#UwoEl&TN_fEY>a@ +?N6#qq5X4mEd$HKgAt=5xWOl?MC?)&-|LGds0_$=qxmY+@YO@Ph2xNV#zpP{ww_>N&wM*~;4Ls2CP5J +hY$_rc9thh9M6JwIF7xR>EO5H~Z)x{w|I6a_qmYQ*i7Bf@zkJ*FM>O@fdwxSsKmO^v$|9PeDH4NebVp +4%P4D<=XTStb(&!GP_7@cK)4HM@{S{`8n$(@9X4nT`Acy1?@=56I0OTN;)X}1Qj34@WOUe#$h)?^k)s6an9o5SjWx&Yq|YLU9O{STGavP>;%{dmN#y~Z(B&I+x(3n3(B%}}*rO*Ub1V6F`Z2oC +uP&<#k01Lv4&4Ot3lTZ$S=mZG3YTEU35Y6VclP~vE-E$P +v>cTPOQv5PuL*U}$>^~6De}DGlF2n$wwv)Jgge-TY#l^QyOi%0V&V3QgjhJ +qNEQ`9$;pd@eh4frQ1ov+@M%qHh*rBie-b%1+I;rivNvv*ncd+bR)F>^GPJdGpbd}Ou@=grgC>Lmd)C +j_y$lSWY>2`mlrCDaI{CbYJY=3jab4cBuV_K|W1Yi{27_OVM?HioqI_c~n +4xW0wzI%k=hXtm?;w5k6%X*Eh>8To{No!of=N9SinZg4~aSP`)L7aR09MxHO^XBSujg-+P!P~F%tx=; +ypw{f0bTOEy=_76;6c75-T?6`NwvPN!qiAprv}pEsGB_=>?MQO!PBX`zlx +%F=1bS6p+o`ejX1SqfM$c=s0ws1Ws-AAZw{zR0aJ}I#&EF<5N{u<&QX`7-rp7S^+>TAS@C;r1G*}oaq +8ubeBs+5YEI#k3^y;*;u68qG0>@Z`%d!=&fm|VB-9%lb|HxpsN>EYbw`27-X0##Fbtc!2rlw%i#8nGd +xw@IXg&Mg>?7mE5$0naQnR^-8cf6~1y8i$n|eM2?x>sz8I&!2D9EksgHSK0)j20p9M^Dyd_zEGsIO)C +=a2#Lk7EYcjCmE9NS=ozL+zfi(DJQ;Cm=lEXI=P$trXV8birNqH5B&)%NE|Jz0%+Y)Y-cV}N6Z;dwCTp&QrUQG@1NwGzd|dH;8YCK8|NtqKrcycN38X*1Iwx~-l{(vPlDbrQs|)|I5r_nHh7aPBlaxos{;a|mDsuU5F$n?ZvQfd-8Px@;(PU`e6RK>=T{kY>c#-5#2Q<%sJ=D#SM9UOhr0#y$oYC~$fD$&|u14gE3gt;5w1kH)@u^Zxj}Bt$e +j#rc+A08wQr>MPhBMvnsG_L`(n-OpAz*5Cx_3(&+T@tUlhlV{oVP{h(N4H%lu_5Xma3=$obTFldb*st +P7!j?lVsrs*;+{@eCVeeKeJ$wMLmTqHfyk8?StW2LWI5?SS~||rUQzI=u`RW1b}y%}vQ`gXCh%Wjs|;RaZvs7y=}TUO^Il +AigvMbNr{SsY3>hfPU{Sc@6@Amr*SN*WhcTp)ELyv$I%^)O65Hvy36n!)~At;7WCPGB@f?k4J|qs(y +ZQ`l1c8s|qWsK}3U8MOEm%8Un#lMgrF@o^ART9&v7Wnd!|`bd>`% +%WSjRr(fsdU0IhOC)dm2-A1^Ce(-qFqo7ET@R6Wm*3vCI<+jGFi%SJm +E@JCxp$E;TrL!W*iH%)LMG2a`)5xq#TC44W(PFoZYs``GL|sEyLpz4zshO-!?Xc#wBde-0TeTQD=tiF +tKOc?g_pYYMJc9J4$9B?0&sMlu_hnVO%|IGU&)W%OWI{=)cR#w!eJpNyj2P1E^v?xt+&Qq;7yeSW=dX +pFDQUZTy>srnvM^y3h5A7eR2xLJx?wfXU@d)=y=Xgk!ZcDHc=3+X=bQ93R7^5(0YZN$;HFz9SP5!yu7|kIg0gXMDv6^y?yfWE(L;j1&W4boQ +MGFQU}kxc%dC*x?i(e}#z(?;t~>9w&F51l{!@p7{zHd?{sV`C{=}isC`}U#i4g=sBQ%EZ +?gb_Znn4K|-rddq8bh(4GWNqw5J$E-%@1O|Q|g^zAM-%qBYQ!lzf$SZUW0y|4*7G3Lc|B5&Wj_aQKtIK#x^+0>Asa;*{aNTDBcl+59G4n2wwnUC?KQZxDcPN0uN*}iuu@I6GTJFNb1P3_xD1H=FYSEhXGqe70UeN4H(X1dLKu++b5$*pcrRJCesEbfMjZC +?>B0Iy;_V!+Pa9+HQ;`K_qjd?k)c(#e7l$rjd^fMhvr%_!WWaga%U%eGAcm#3uu`BU2L3&MlCq3SB4e +aSiRz#wWEK`cYC%MDfBjBCuTC3P4aFceWg^A$0KF}P&aTEh$|`(rZu6H(FV$?)7c}jZZ~Pqtk8J!q*F +9@qmk@EU%^vZ#7$&bbyoRcmjiEzue|`F&*A;4QI7P+%|(S^!?m~vh|tNCo55F%QynY5ZckeCAUl3cZ9 +vA)rw}9-Kwl{MJ;XgOQ}J$3rgKRu+>ES=))QWvU#VjGU$rhK*SWF7^Fj2fVGw-!XGr +{Y_rA~BChalP$b+{4|!s#TJ*Hol<^p)nCoGF8U57cjq>wG2)~GfKl7p!69wiGWDt);1pLJ%L^xi>w7m +fQ$t`d)v)D9wyI9*`D#MSDl{w$7Hx~$-qSrl9B)zxZ3~|1$BXkaBhth()7~GN-D)B%vk!J&x7oQZ5~^ +>P-4!!y#}5*hOp;UuAeFZydX=_(GXmtoNu??MzBO{iFw8sAeJwG+dB;Wta+F%<#9VGBNhe2wT>$3zu*0SA_)_&~T{Xn$-_KvP~@MRqUf#JnNjc;%{p@> +6=nP1t-?Mx2Bi5-%~^ldQy}&CNoer+5ZYC`h+-aYFzsdw4kOk04X^EcDPQ+HV!>Fx@tqDX0ZLjD|{|A +TXu7?tb2h38KC7th&DnjV5n4a3hTA4wlaytcjsiuW$Bj$i-Mp`4THp~K#yif&uk0mh +rzZ^i8Xj~o8iP=8ByOi%Z7QdBe;mo(6>z}A}_Qq<;%T;PA#xZ7Ax+pWn~CPq$FRwnd%F?{6z+O0PG=%-dn33Fm+8FRyOTi_nghSJ4(HZw+rbI`LP +2GS_?}TG;)$jt!wp_okr>cZIP4MHJxIFY|ub}~plMP?!S-qWJvjN>shFnCZB9rag7 +paQ~P^8)h$_rEDY*^)GF%5N?FOxwlh;})zQCB?%Ppi+j3#4YD2%etj+gVS;38o;6z)t|vcwp;*EREH313-g}M?UdW?x0Q~wmhw}O#6Km`r;vnoF;2`X0IB5T +2U2X2xW7VTq1v^qArpeEMIG=I7$0iB%wl`~kR6-vW9Q +lX$@6;JXj&A2e2`u@{4T+D^K7{y;C?}4*2L2_rksT+F{}u;v^Wp!!h_Kz{p0fB%&V4(NE#Xx`3=l-`C=zu`L2LrvieJB090<~`iTRW9Iv9Q8&d>7p5h%0E;=)jVb +kwm6jb+Pe60Glg>RqZ~x7%n{TVBRkQkgy8MH{Wn6wPD<{2NJ&pLnG +SnCEA}-EnLZc$p)l^+mizIg#-7$2!c?II{vLv$eR>NZ%e_Io1@n3E)QGnYb~GWoZK!xq`2cG?)3#Bzk +GlO_AXG=??57nh`=}A``{1AQ-Sp>a&w@NO6KMQMd +uHUpZuajm`I!p|r$;gI&dYY|a=b{6;282@T;%?J{ww3LTL>aM3c~P@z|mnC;G^{HP~!V217h?MwmY~J +bNt3XJaFe->8EL~_)OkD3dqo-o9(cRAG!biMyQX7!`~SX`Y;|r{k`#^Wl9v+y9s*x=j-vfG#>B +&VdDY+zcU_&@Hf@1hC0;cE-8&ivj_Sd@MqQclGSCG*M(723YjS?d4?}c*w&Cw5(s!x;OUJ<-nzXTRc%**zoi#i$ +RhDc&s}5=PJO=`F8uaO<;7m^2rJ^$OBh}8emeVNoGF&E-SIlD7fZM2#Rm{|(_(Z!i>>~ivf}ov9eOtJ +D5Cnb$o>m!r+%3P@`Y*Pv{h|)H!-^boF1Pqm|MI_fyEHA`Co$kG)2-g}U%0xhyGP#s^2~p +6^4p;b)6}$7X4vk03`G@K;UxX0;gn9D*|XSGiUFExz=|T`z)y7>;)o$iVv1vGIp(x^lAr=%#?(Pq}U*hC25}=#_ivR} +1y(y!+g*mY_xz?GHX|`U +(Bd?Y|54Lf84&2EZu6SqSGrrNJ(h&8w172;sA_i!&O;xseXdtKe6H3%ZdbJ5YUr^OjFH<7E=~E`86k0 +QH&>r7U&SOXgMigTL3~Wu;CyB7&D|k;EJg33TvW&Ak7K0IKy@VF|O+Ud#B$*28`g)Us{xlUVnxC~Z8U +SIOnv3L{#fs18cLif$!Js$X#$nh~N=?Bp0xgiJiqNg=>MJGC!|u);)AbUeX(I@zZRM?jvBXbkJ8};co +3E)R!_@0_VSHPE5Q~3fm{Ozi(KJPSXp_Z3GM+WGNX}Wj0NAr3<2iK)^Ve1SzPa|he%Z)@PW6z;R|BR! +X+=O`n%(ks0zA6>S7Ts)CKUNaZYxOQZvfRUvx7XQhbs8Ol?Fdmv3$-p +PfIa1)t(VBC$OWO5k1e1t596#E2sSR6s|AhCFem-Kf^a8AFOZims;^+Cy*0&}Tnj+CUI`r3cl5rctx; +VZ+L$D&z|y6jqvnE~y~~7OD}9VGhAvTZn)+ybL=zZYsA*hqpnb1`Jl%!YJtnL`!5?H^wGC?J7{w}{p; +sWb@rdoY(bZE%*QlSeBQ%67OhUN1R0`0IxOvO{2IeEuTTy5B+Ph#bFWtS;cLfhOd8vgtc1EH +3CU%dVUOV%GLP-Xb0>%KVi1_2Dh7N-ns_ddZ+7PRIWq*FX|g)Vdp#_IXTvKPQO^hx6w1Q%Np`8!e#&U +hb4YF_PSV#6aK7gfbj3~vA8dBo +ZD@kAnk2#7{>4UrA^sh*9q=EiI5&h`Rp^wT1`qMOXP$ula3D_t6LVp@3bLKb$NgnI%=R9Vs{?!21q&O +lyJDAuR*fDMg`}9v{2Ro$WgNnhQKoR;aVa5~(<=WZbj%SWpIOq{E`kK$eKAoB9LBf#FNLJ==J20jCNY +Y@xjW|;ZvkRancOv`Yr`VyAOsV}kHMqsH-T5x%{E8DKY9irx@=ef4X* +t_G1cAsWkwfd!dqPdf`uLGkFNmVX7S%<{MSCzWYK|E(0_80Njubd4AbVoc08lpS;%kaLEHb;5y+jlp7 +LoVL%-6j8c#lXM4ug`CwbgT3Mi!*aaJ*LNLJrm1Fp^(E*l&8)9)ZL=2fgh{&C0AqTYyPGFIC@s)*Y^Sj +$@t~ek1%Y93Nbs^c8{j?!5za_f%Y=r^GFEGWU3FH3=5UI)%S)p#o-oW;tHw<+eegU|c;q_o|@hGB#fU +%SAGR0?{H{D2ks}S!T6KSv35$-Wid)Y8Q);N%SSmE8T5&yY_~K!QpznP<;vcHHn|*cDMrX@}XN~(O## +TDbhSKVy=f(^E{o0U8F|rItCm809u%M*>=$?`g9sy3A +8&*urRq2VVmP=Wy9AOdFjQQ?RER>xGvr=RDBJr+m@gmPWiOfng`gn$aaU39o4I!9wwIU|C1e`bSTn7j +0b!o;Day&6{U#Kh}ZWKgyiju1WbYi|Dm7X%xV8;lrU%6TeB_)?KtQ*Vz# +smL{xA$sx6HTLq@A(yTPko=rp{vdXNq|TaQRt4GgM<*_*B_9*-L`vs+&wdOx*U2FUZHL6^{(}V^?YC0 +@T5g&FJvDcp+OhmH+*Ti%e8(Gv1TG! +1#UvC2kKKW8lR>?gCnK(_0mamFLD_rJ`RW!J +l66W7$3jO^x}`Ms=FSUlUD7ykjKxkvG-L7{KLOm16KbEw42Zadk8rE(XV(@~|soQrX6BPM*A+7T?!8D +?LmMA&~dLPR;Yknv?vH6b0uEB|1(t2gd0^xk%4jfReFzFai-AUKy~d3u3&7Dt%oO|L)BNNG4emD0bOR +(h4#Q0kvZ`Cos*;?$D!AsK9Y>yxbj@KT>iBAQ5d9@Jse-lRMv;HL3iOp1RDeO)#janeva|rM+OctJad +kjT&Y0+S`w7qli(-vMvD8Pn8t73pK!_OP>n9ov#%swqb&_fLL=s!}_G{ZTl65Kv|2iv^VaQL +`ifUAiK>zbz0J#PkoZDjHBItaNDpK|W8)GxJpNFa)uw^Arp*=N!w;CVrXUbbaT^WzTKR1ZcKq(<`6Sn +Akxro9{U35{uu_iVI-fay=pyE){r@2uI(>%*a6c}su13=!yCh0=#NG4T2HQ`T~zkDhFQ(l$w?+ +0km)8ve!5P55>UcpofT>XX>Np)u2!;1e?)UD3t-t2PV#m*_OhL>dd_FJLES`y>Au&Fc1{0(-QA57E=K +{r)nUrxa$5qrOBrwq4SJbxm#b7Z6&1a|-V9WM4k%W&(a$9CfbFXrMjP=yg$E8z5p{gD+S(wrP$XyjO3 +mgjFLZFdt(nm~%8h!{u<>?z$YfV~?q~LHFttcFnM>;f{ypxXD8Qae5Hpw(~?xEJ=RNHJ|Ux|&d5GpjxWP%c}FL}kKx +GHkagy!i&2luM4sP!=(Upa87srp`;{<85#_XLFvEAhr@2G9O&=r1&rw-ZMnC(d3<_0ftczdmH`fSD)z +dOxU-BjEGsEoDo0xVLtE&G@KL6=Tp+a#?Hj-V$xGAdVB#kLm0Y4cif&jp1BWR~riWh$wAxkT!oY@t_NvMiX>1!+9{mrdSZXrMe@e2op5qYiERe^Subo_1=@ +s`~3_$V|-#x7*%54*c9co_{u$^=l!O%x=_!Yspg&MN*DWek!^^)a&y}fx8MW4DC*Z1IqB*H}et8-sOD +F@3h*e{VgJ&W?_yq(Si;eR%z`v0hI_b8v#-fHy9~8>@+P#5Z*XBEc?;Cn|t9CoRgHKTqPZcar +w(1k1~Cir^fRt)30QIqUL;gVu+7IA*tMj +wOXiQP`6O&4XH)A@9}+9dy|Y=Vf+eo!s-uqGo2mo{6FcZWy2qdCfQKF4NxPaYQth(@M7p>g!=`Fmct% +pIgwo*L8aK@{@&g*+`Nzz5|xTrX#k%4_DnZRt)LZ@WMHqRR&m5HpC5bE(Z9Do@3E&OrU*KH_rl@Dg;7 +XS58*ryO2X35*=*fXQMp)&xYd*R0y06p&CdcW;#{$AsNx4-&uYr`NU@9L5+O?krrB&NK6~>&@a(q-U6 +xs;!k*75%!x8~pizLZ|SlSx; +Q=G8TTwl*>-^l;p^O`mG!!sOq`5GGa%8mCt+Ro#*v|{+y~?cnb`VDo>tnfB(7&!{CXm*|1mY(9n1bF) +bLje{Ud6)E%F03#AuvEPzpv7nnYj%Lq6?r+KAUa`vKdF63BF~-KNsLJ87eMd)Yy{r^{e+AKHWle^9@0 +-$8fwMMnD?$a{h`j=kHv!@Y)UV~ratqwzh~mLPj<4@&mP8iZ`z?-$e%eRq6^;a=6Tw?skjGL2wgktX( +93>tZ741;%tN3v)9b{}J8Um6Sc=;L;Fcj)=@1iNq0PD`VAgE@i!)91QNuV1(}g;A9+6F4=F2 ++)Sxab#$scIp&I*D5h8FIu5cqFs;m!(yPiW!s0%{SW!2y;`;_Xsc&N9l43IZW)}yQ?utsy>^ +5J(6kU9?MU_DwsB#=df4x}3BVyr>OZp(AKfHc-Xu6Hn($Fds$pp+)kAEk=^$aj?``yv6y;Gmc79xM%Vtkqg0r@^m^ssnBO#uCG +8KW1toHf^r}XvX4EroT{O(VGw&U5+~+QsGa+;KBTzU*Z&xt1^4%g+eH30ME@7d{S4H9Z>jI$nm{lLK} +d|oP#oF*K=DuOW|Mbg4GQl?u5hpyrfpD*zri$p1NA1S;V;&g@rJpd!u2MWH!M$~-7B3A_cRHL?5-gxw +qxg>Z;AKf+)dbTGJacW19s%yhc~*!BSGZHl%5c=DyGh}dzPe4C6A@SbVeo_u$?Om?k~ +q`T&i-5UnXk2mHdX_-v-wKUnbYNwszTzG~ak +9^Nj1 +XZ#oX>JO<$9|xhUD21e|;LYh%~P|ph2l_pKCe|30k$$2|>#|zh7@-R$l`HzMk*=g%d;`Uvl=u9?B@cU +KsO1UR$Qzpd|&2_V%MsudZ(YyWUA7xnwLuN9^837pO3b2o4{^`5l_6l1PoOlkN8Lfl9j%yak#wT*TWzV=S)X +gsP_dA?ZIBDC_@S=zauikCGRms$IP)g---!j38iE$-vokNde6h?TxU;p5%}CuyRjHvD35mqj)H+cpLM +E&>|l}1Nt+7leiZ2q_FJvb^-IJ6A$`j^R=SUm5+Oo8!yN=CPn2^T>Z_-}^c-oiqVp{VN)ESNM;=m4>z +sAWDD-;omkS{FRIW-i_CzcUF^Yzkiq(xq0V-7?QHZicqcM-@=bI;?-w{>5D?(o}DR;MiODG)dqJD6Fiah6hvp31jdUe(|UQM89<+Tt`&A&ffe>pULr&>-S81b +kWSp(?Fh}O8EDnbX`C`gN<%9f&#%$pe2HbRPiq7mzABDfs1D_GjgPLxq77>V*(5-A&C2(&&b9iIfw)6 +i;Xy+yNyjMg{_@zWOK(Kr0~))=>h`lDp4{tg2oG2J>CxBdqe=PA1%A<_oPJUAmL}}vDPwKiS6>cBI}r +USVYXxH9zBO<({$}mR_vh1k+5-g^>cs$^d(?ULA)Z+t-qhJf0+1TJz&5{mFwYC`2ef=B$Bs0Iy$!;20 +?ezo%e(?ZQ@QF%J~JFicgmXT|jsDvhh2@&Wi&MSPd#&U+xu*`t3WYfaXLYb4=f#^b?+{s7}dt_78cdx +*PvV6@5_b^NXJo2`$?`U@{<58DS1nG`gpWoFTNfxMC?<)@js0wJ|)TAGLFu`CJ_FtAvQz$SW)pn~t6y +s;8p?Bz+fKyuA`+@(K(YKlK(7sL3It&-bK>XunCuQ{pa!p;n}VS#&i#&Zk2^2>fastra+gakuCXv4jO +`c#S5?u5E*(`E!Do5>%>LsWC7aoQHPf+2~XoR^~HY@zh(2HkD(QZ%@)i3kPZ0*aL>i4B! +UdQP(NP?D?}3_y#l~pWG#B4oliFt?SEhVPJFg7Q9eP2B^IG$htQ$FVLRws+v+ZXPuxP<>xNE#jns%fb +o?&uBcMzoj(TVBJMitD%f;(-UCr{4ihG#2W!*00J`E-Ighy)tNogw&QBObrf&a<%(kC`kOs|>BdC67m ++5Qq5Qa^OGdboje8?Vkwz|8%W?N*#Za`6#^6Mr`}{CP*B^aU7xW&v+wF?<^5{Pi@>7-d^Ma@6<7Zc2T +^28Pac;@@TirrJ+xGBNpw&vN*iU>lofUoRBwlgt48_Ma1sa<{dK_*cAfBZtVI(b +-$T;=P`CFBgjTBMG@bVTkXnba%{s*Yi@;UYoOVMf}~{9>3eVuy-aZrQfj&@;#06iz6@fTbW<)DTDl*S +-PxpP2|ntlf5~AmHF%c3z@&E@8z4!hyRrMrtAOh4AAFN-w-)(4=}r^ivg)y;5N`TOulr0bDtZ4(1_c)SUI}HHuh&L38j +1YwMAvGFd_dkA(#~btlOIHQ12XbiEyJY6dIMycz+oWdn9ZKuQs(<;k3s;1jujd@0xQnn0a+UxVyUa>DQy_2U)5t6$Z2{4Dpsc7t3v1+Tc14!(NWJl}xjX+BP8V> +=&_Gsi0Kyy2Sg;kBtu)f!Jp6kA)RliZ)Cffn@@2Xk0SDtHMVxkfE?GFhLmCH@$l$%H6@KCPWIA>- +KGT&n#CjQc=I2poG5V?WQ(SoBkP}}>czSXgW4*xb}xGT?hEJpuRhw%{=a{j#LpF!|H%U1DkeX_;>V^o +m?Q~;fJvG}F$|-3H;@gKAryiMn8s1^Q-7&+hty3JC)h5zcYJP_l#KhI|41o;3mQhzP%5VIORf?)8;>&=r0AQEkE>{uL}od(bx7$J@U{!O*VjH=y3YHhfRA*ufpzQ67 +DptJBn;hR2b&(dfI&W4q>yWBT6Q@pkeI;&*T${X2>IpA0+=GYLor)7A-dYs&}^rYrm`>$P3h+q{r%>> +K+SczZRG`QAAM$A6%QeD%7O5x_qPqx{yeFDJNfn8S9HD1TKQq%$xL4?& +o49Z!@-i(G>e&}>}$Kl97~}{7rr;!ffs-4UfY}3RExFrQn0tTAbfOwC_lV~?c~QfX8!rypXqVnx85Nq +MrQ@G#EwHnUog_aQeL8|$>z)hfga +Ik`EeYY(5+5)RJ(-7*LZqrSkJ->(#6_2(sahFr&d71#VeYLNXL5A+Ru%ty3m< +T%{!4e}!5Hs%DS-C^~Ib3U}vseVBOI10o^tS6?Q`wWO34l3`pkatu?rPXx+T`k7p4C}oS2wZ1mY^KQE +OniJRC56aYp@uJHJtbWr3?^}Sp5oAWS9)EvdF`D?H(C?udWMrf{@Omo#J&|+y)y9%{4F8#jOpZobAGE +Al&e>r$kLY0$A5*dPxF`2jL)Ja`<4}@?%M(aNL+Rc2Pq8!r51wIT|Q!q$5w&nA2UXi33}Z><-&UIU5& +F)$?0c--nA&+_wLwGXo!@Hc(_J5d3(fAos3mowzOUB75Ny1mnF^+D7aRYXQ%h5yO4m!NAKASvy1NA=O +w3&m(HJ&f5m&T^v>FeVf)*PH2y{cM1Fn$Iq#^mRj!vkK4?d*Lvi;=yp9?^DPVx<>?g(V5Ns-HlkuYF8 +*LX{fkp%%q^02yt*akrghub8YO_1rT$UWC`DLaKcvV1joxZV7wi1^9Ly2xT=$7exhjc_`;cvpV2#$?`5qh07Y_+Jk@r9G?b +Toq6Tf7@BSja>AEV`taFRN8F7#DUVv0IeZHAN9`OI{;C@>^`Zq{FdOQrmcYyL8L@%5v2f!u5N0Qa9W$ +SG1aCZY?jrWMG{=O-FsTKv@c`bd**)Ak_wF6?{`E?Mi0AtW#5r?BxoYxGU<;SYdbmXXkz$O(e`GkOYg +!Ewl+rHj1GNXcoD~hLgus&e73ie6pEt3ZVyY~?a9>=q$6Z{W*9K+S_(VRzkvS9&ddQS8P-gBLoku!aB +kIV)P`Q!i*0_J_dIE^pD*j~0D4IxWg(gdiAk~|kwRY$i7$k%E^f^Suf$F%EsGx$95scO2t(Fv +x!=*84Tj+-+r0$25E5b;%NG9S$yTBy%fG(Rqc7A*$EIG(7Z{)ybaniNoF4gm_OugWtIyWOq2&2_;4BNgV9 +mN}Hm)%Es}%Ee59lpyJ;u>tIiyC*TK#yU14W@d_F6%9Ale>d_StLHxytfPL;t6!=bx@)rMR24>?!k_* +-L&3V>|Efkxg`Jj3?dpo{&mY59<17Tx+WE+m#|G%PEuQ#~tG|vAZPwB@E-xjpkugq-v=4K#W7M8z!?N +QZN2G;m$GVII7Ma?fgs^!NqXFH|`nh5=D;;zfz1q2jfim#Td*B0V#7$HwG^T7-2^LE@ZEmjOscLZ+Yh +qiae+a`b`#H}i6 +z@U*Qbcpyaqlg9*YU!3ABu7#gqbrtd~uVWaXi$D(=~p$x)%>@Kn#G*&PZx&AKuWB_`pF_GTX1SJ_IKBQVCP@2`$6p@7)jCuy&FA|7`!1Q4bw2Tfh0-c +(C!C?!U*}P1Lh`IH|$BFT`Xc>($LY{6b0EO>OKoG*i9=pSjD$1sZa3|p>}B7uG&CxuRMBtJHgbhMYlh +LF9E@Ksd(^?Z*H#;5_@2g`c(#-e(T*0grhHmzLGuqh><&%Z>#Ke>BQbTn7)Ue?{fByuPD5~b{Ow-F*d +l~ggc7vRY(c`ezIVH`Ar9-zt3QsyL?-SZ+Ll)!5OLVCFvrd*ErejVqD129OHS0{@J{Rw@cxp324vW%C +3j^u5^MU^fHSpbNdJ|3bo5$HrN9`aL-)jFHuJRE98~To^1X$qv8#Dys*4m^S{llfPS8up&LKVt#HGpc +B0l@V_pa~w)%(zTCTH+@eQ?rudpR)Usd`)Ne9n~{{TXCeVsC=?J%K2nuE%u&8@jt%Ty?m?}(;Bz4(9~ +#MxPTg*vv!MUE8^#TCD?ySAW#su{CxLW*>_+XE4rC4Y6}w7Sv*eVh}QAf?3aWU72*Siqnl2b>zvAu7M%Xn>by~$ITIfpjMZYqtw|Z%+(ppUGs*l4ERhC5j~iI+hs4Yyz2 +%zazesN+q<`N;zi~yc0P$urnSxXC8Lt+-(HDL7xfvG(R54*QO7u)eD~tc$}BZ5gD5C^(0i1GNldi)vLyr1YZMszy??~!_rm +C4Li3Jf!8wpeC2u~_ApSvjcaBZraV-sZWleQw68^eg{N1xY3e#AfI=85oaO>G&d4-;Um?qj(17N5-kAwLJp{ae<03#)X2G<|OHBq^0^cZl5ttQrEsdCT8w7&X1ne-$ +ieM-!=dLSVtxarMAL2&ma2BwC%f-3*43?#KfCY|7D7ov2T_Nm&N$p_A>QA)*U_=jKxDja_CAxMITT7d +EY+OD1!H7eT9qjvPMf6<@NK9VQ1S3&yR_GX7=TEu21ysh31iiJcp42w{JH9CuYi$kn5geXc-<)o4gY@ +DM+)h5KO1mN)IfypC}EA3H_Cy@Jvo>c;(j>QBYzOpNE{YIiHG6Mo&HKj)~dZlAG|?k +r*$KdV_uf-F+vW{;gt$WZj@&k0Nso<1#n(6<;0%+F3?P7`?xDY*#ynf9+7>;Ki$RyHc}of +D=c8CSaRK~7+~AvA)W){2R9U9^u{BNVco>a>B#mx3=ybU~kF?0?CPkr<5umta7~Ima9uBX7!-5Jph47 +TO6ZB8)qjvrx%+I8allWh5P#0fi;_unf-@EI3p7g6beqc-x0YeZ%k~qG}N&-h{65ZW1af;kv0pGrWNC +^7W?k<3L0fwi4e3?dpzD<;NIhXF9m*ji8=x(_X@6s?qK4nY+x@QGKXxDnkd)OB8)^wZt+vMSHLWk_3{ +xo{CoOjM(V`lN5PW=UAioaA5(z`yy-w}WE?f +$951V_xuNq?3Ihi?>)o4oiRbmH^yYs@}t}~z=bywdo}%Ap>G7_Gsd(Lo`1raPJ6uPD`S$5Z^m>463MJ +LWm(@i*7Lo&|0dVI#Ph!mMctUv`eTA1n>RHv1K%YGr2ph;fbSpYcTbaLa`E$K05F#?y8*xHsqF%V>Gq +w0>$ywR36rwX3U>R>;ry}f*Wo5+u}yT~s9}us7z(f3;01dfhtsgh;=4H4g$PIDafA+HE3o?VAje=I{^ +tu*(=Ha93`Yg;Wk}7Db05mG`m%uGES?c%KAJD^enU7YM)A9}PQk|Fg!3h$@(UtBERArl_a9tahwXe9x#xV0A>1VM;VL~$;$5B+1I6Y{2vsTwg`E^ZE9?i}-%c9O=~-~I(m3XLmZko**9 +8TdHA7XEP@dhT9Ehv_8E*dn@`iPO!hayYW>P9@sn_kw9VDoYelRdvBg3m}5Fmz+xT3`!J-Tk9#Iss5R +o5BzYb`v+WIN1sDeCmk~hY81G(+mbpQpZtW^x!jk4t9i_G{ +IIGzGn}BJ@PLW5~T-nudm&UTw!i$YU?#&4dHPpteuKkZ)H5KD&b%?tu3_`>!x9?R-S{wA~4K|>$|a+5 +{KznKXXr{5Qi01M42A7=?8pH$*b){Hnp!)*=gY`?HPYQcGQ*A>(ZU<1N9HI`K +-qNS*ea-_N8mt@S%MXL>;5dy<$1T@|Nn>2S@Id-+JeQE`g}^tJ#HW#kN_fEDs6;llWBaQ12r{sNAJ?>_|T|&NIr76SH&R^Uvc_;Rg!&d?;9VG$AHwv0w++7hX)R23<69e#xa4Qr)q +l=`p7ufydmAac8#p?#Y)AUrOlff^^)9`B7T29sMb8JSBK$*!)!=Qs0_1p0bDk^3Hf7|=tv9P2NGRNsF +o0u!^ojJTEU?@+%_`$SL=1P>Yvtr5G};v{=9Qlhtck_5YzPNCKK9nZL +SiyH8pq|qk<^;&>WRT82Ia2as|T?0>J~EUsVevoNdi{n$l5;5<3U2HlIEn-g9$;h9`2x$oXsn1EO^V* +h1r4!?6N5l;E9xQ9H8fC8Xklz%2AwEKjJgd@k+zM)2))fvosw!8={7n|{AxLe#?dpd#)g}%Ze3806^HpRo53bFSL;ME70t8==}=gMJL=OJ;h=}t +v!Jh20=6%d1-xycHcae8tR0qKwBwjjd-4urvb48j4kNsCTmM54Cn4TcU@@`N4T)0AVY#JnAp%#hz_@- +Bi`>0mnG#3pxe(Z^F2s^F`cfv9=*>!Ju8BA*|6bz@Fe5tpl@-X2f(ggx0tzUZ(V5B--B6xT;T&D@flgZdoGX5$8G5!omEvnbf-wEo*~sr4Ppc;q#(dx@R%$k$7mM +HUD}C%u$HiI1YsVTzm1Kt?y+^<;`6~D_W(48^G;T{DGuzkEsg739=G`R|5ASOW$e87lP#24%d=22mygOY&M~{dgYj_pR9uBfW +4n(DruNQ-0IgskgBKvZ)V|D2<(XCg-uHJOb^4{FpbZ=0h6L-fFxz?87vtSwp@e;Ks;(=qMX{n#8) +-7L+N_aHG3d`s0r_3VsfVQd#9Ygal4Jr?Nn~{XVGS^z_-}iM==|M=ie2 +1d`iW>XJWvgRE*a}zOKc(nTh7&@!hifcTyRBJ{ndS@do9&7PVxW0tjQ+4|yzW) +FM$%G=ldufbE#*1o`J_6@aKB=&(X>q}PP6x?0XC~{+$Vhs6dbafo?*~_tH)#^|cAi +`&SemtXdD0GL>v|aOj?ru!T3%j?jm7un_Qk^c$bK#h +>^3TRZ})XWUHYJVYXTD%(&Q29`t(Yl#{fuOn=rBJv(I*0YI#%f#&f}-YI&5H+>9KYho|rUDA)0JY~=J +v>q1&WRYu~oMe3yTMU6s8_flWyp33(W{?OUE4(zp#_^XM&(|mxS|OOvt&7n2pd)$(ny<;VzskcwzaqL +=y7SHGLQJdT0_c!P7f(_I61NBKjs;JTm{AWF)t45%JKBe^O+4#9-A3RIrn-PiIpscQCX~3|ohqQbML +*W=etx@zxF*?|Ydf>VUjf`8cB7pAMI6=%BalNu)D*BZzJG2+81nGPF4Y8+j>EKRn$PQTwm>U2aT}znx9{Zs};34+5)b>9z8+dGHL>Lp~1&wjc2~;A;TxWPev^%gvf{^Wd-5x~yv1 +LalMyM?p$-b71%;=8JE!4si4IE^O!_MB@H-)iQen>)>kc#V6m`?zZFDe=hh#`L6_g7@`|I^KQ}DNb8@ +O*Y(F^ni%~=WNQP^Ol=Y_UyDknz7E}Y#d78d&y!|3ll(O+E6j6kU)?H@PWkS8?j!lzL>{;ybsa6Nwh{ +GxcdImWn#6BKC;FcY{yFgDDGSjz_i^Cw*pFAxND^M4kUbp2wq--?kW65bjN)YlQbflB)z;Pa2*dc0%@ +x$Y)LZAv4^+POp6))@__2|!+v8BdH??;wvAb!I5<{LYxduRWA~fSS<6%~=Q$4`5#KMt=ycA+^n^L3h4 +@12VFn>s|_vn0huCqYa8g6KLXv+|D2V&BAof%;W4dkZEubbU8{_sl9mylT!QSx~Az=QV_Q{`}U1aH&1 +ytdsV<@`~3^n9)XPrHIcu;YWYBV4gKhH<#4&_)sZ4ZWVF8{91qGBrw-@(aP=EhvvY+;)NsJAtLOJ^>@ +9IZW2sSO?2BQbb?KI}E8*m*G@gy7lq}#_jSVUvryVRS|u}F%`*=ZMiS}3-~M)27;8H_r!$JNI#~+wo6 +gq;mo4S-2J701o$WRQa}Z)6k4kL!3(s7s!<=b +QAvuV{Sk*LlU2ArwjtmiL3maXAJ=0`)oM_O}wy-;6GZJ>-fpiE?~WJYd;STE?fE?T9ctPgk +K(=H6W!hmw$HQMw#Dtq1M0m?#(S`8d!}I91mCw4;csg&?OlGw-(oq+`Rq$~FQ545UCn|1{9&jHN!ql+=yI~K9+@AV^%rte$e_s7Z~AAf&(;J$eCU+%%$uv3#OVXtelN`c%lW9D@3La +Tkj-0!2UkGX8@cMaw#>*NKeX)5-6Y7X=&xf~f9 +Q@dsq4lL1lrb$eq@0)k0Q0Gl2*vKi$B-wztH#Z4;b+66E^AnMz)&0H1hS*^L3Ihl4rRrnfYN?99awGI +X1a0IjGo1riTs)@Lv6O9n;$R=T3<+Ap${UXRLyWeRDN^g#RP?%b6IXF|8qIPeb4{pHu`7k;kS4HQY>&khy@D4ad`LsC14bV2?)Yy9Eb2vYY5^ +u2IBD^mqy?n<~9`DsCWqN;q487V&si+!Cq%Td|q{#ytPFN?{{o+V8cL&-V^$pF2LW8K{3ALD@y(m!h5 +KCQzRP%Zg2|y3L$sPU}9e-2=++)27nvj2FPAB8NSOpsrRtFZ6V@~klT|J>3its+q-y!`t2-^?XQQuO9 +wXKPTqQffHq0T(;pN|B*V_Em{s+@|dE@`v-|HSO|1wi={r=!SQ_uZ*p&I1)rYD@e%w8r7J9Ktaq?h$j<6>*3(zU`5#}^jKWLDKGigQ=omCrXoiqEpwIfv_ +zAS{T1lSTv&V)iaYho7#z_aa?VzasjIexq(Ks;+l$Pd>%LEAB|mWS)f>peiKkUc*n%$4yFIUkrzXT +yZsm@O-~cuJ)7>#6j807k%UX?J>^@=)EVGDIGYhq~&lN}(7NSb^=_Z&!3Tna2j7laj +;O!DK(N%)gUT8i)N{R8K=Io0dSV?O(AQ7*-F=ZWF4ND>- +;q0)RRqdf$XXTb!8?*8D498{ytIqg>7U_(-_xMl5X4V*Zff^1(x*$f9Q)leA2K~N|y)0^N##q@_!Ef= +X=&aT;bocd+rBzk5dSZQ5&r%Fa(FTy$gb142M4Lu-X_gj=m=-qwkT<$)2Ny_vU~|o-@8Kb8@o?X5y|QR6WS +%Z|CQYbKePMY+RKBG?nH1SlIu2rAXhesSukQSpUNw6&JOb*95v>R +dEJ)51&KPdi{x;Cq}-~T#y!}EtJ_dlHZ3H1K{T^~UAi#vX3EG1wP+UtEOiY8%{!f}d1D0nYgArP9RQH +-QW6eVGVLLll>{Da}0Zqx6<8YH?CWeC}=kJES$LhPl%)J~V9x6g9=d30mDWaHH_w1*>*xAW%?f#KVG3 +Eh!&_YZkHckT%QWJjg#OKd|W3CZOYsZGKV@6MaC2j|oeJCg1YNkIOKjw>0u)H4&koWmZi0aT!DD*xLe1F0BNVM +VLN4F`Wy`vm5uJgB#Zqskafc>B*Qa@z?a`mH>W%v0MruzMAvhdvB%6D%svVou8pQo{x54_jkZx4Cjls +O{3_iga!^z^=FRAxWO2F~;+S+~?wr>WuDXh)cr(P_j1;V_90jHMyr%NcIM-UVpSHhqRz3#Y+~f;vK-c~qq +392^-dIZqqfpVg_)qFgbOLB+~me?$wnX_W>)N?qdv$g5)1EbMvyIl(Sd7pGQTuX;w)*Msh*$1a?cI|V +DFaD6OdmhczIF>|$q8nY-W#Rp!hEpkkW9fDlDoo0jyq1WA;R-VodikeZu<;dkN-cL~_E~87B&9$GNx6 +5VISWnP?0->H9PMKhKx!+sO0I@wk2(*HLNRL3}`lNLKi6(+eDn_U->*-v)s=$67qQaZ??iKa`Tn9n)>4h}M1UR-VBjuq9gFgiAdnGD^<5xE1UCsgex=bE~LK#g>4t>~18Ik8e!1a!1=$gbx$ +g>o?5{y_{iV>)`Qi%YF4Ork2KPz33Vyj7w~`U+qRhp-zaRJ_wtZXwE*xZiN!PYxrOYa2lcP|!ZEpuBz6t!!k$i8-&wZRH>I+Yv`~se5^b^_(DhbviXm2ZpC+PndvTmM +0%WH~GKRz1fnZ*tRYB&R67Pm8s4ZeRIt21AQYJ(F%B@muQ3#i0Q#K%@?hOw)ducMfw8ed)l(1mO@eE}KVS9b#k`&-L#Rndv21!&mnxiWzXa=2NNbxCnf?^r61-frk~MOqS#b3Xz(S172% +FfyYb_v$CfTxdiKDbxS**kP7#wkmUk%=Q_{?fK=T;E-^P0Ueb`&F{;Ky6tD +R+kOw;tgvM*2kuX{Ee>#T_C{~EGq(;xq@dKQ+ +Xrs_?REW_kfRV1!BL9D(4DfA_y%NQ7(@t^*!|~loWRk~OI66X`sl`YzY2^Hdm#z=Zd}_BCU3i4a)+MK +p1u1N!R{t=?;+${2xZ@82KNUWt;eW6M}{Rk>LiFA;%>Yerrvrd_^)@LcczZ*_;VLC#dl~8N4tv4zAgT +?GuhU>QTc?}m0LFYo@Kj80RiqUa`0|w{H~XQ_myv}*}Lk%JtdffdqW%f_qEpcUTe*LS8J^|kck||&Z* +Ay{-5C0Pp|mA*1Cw%ze>FS2t3;~W#6p*^__l*3h*=KgcrBi>l@X^+oJuu!R!?`9;m`|9l}Dd`c +|DZ(5gArq)%UqN>;iJmZ|_YG&<`x^OslIvtMCDEwcDcl@t#zX0eT_?_>IV)4H8Ld%u8x@`io(egXgF4g2i<0{+Px_SyRd{F68A +Gf@TLSE>qqHudIOukR7>YzAHCLJV*2<;P<@+cA +?VQqC%ji#R~f6#f6RV?fxw;pn9)?<$FfOEKYXt!~>1U^QfwjgN@^6DL-#L|6HtOP|5^>%xVr^1H%E|y +y9+1RJi2EDrm-O&=j9%)bVYK;hjD!9^MyH5qVvOaW=83MmR`qlRJ^oq*cO@{yt3SRO!@FG^%SM?^+8o +)x&TBRk?J01P5=go|gh0F6w-`nUuVXR2kq0RbT0^Ty#1s}EBJfX9Zwd7+NZv^51+~Bx=gG=|Or7R1vd +}{=8v+D!pPTXSW*s$w4XOIWowS05MQAEDJlvwY>F(Iq7?gLY-jnoBM4rF&duuW>UuQl4(RXAk_RZLE& +D;4!6=Fhrzl`nejKKQr~8zb|H21h{}fW5B6@8|ZheS{VkA1)~72GRMsk(;TP`!e+vQg&9E8U+wyW}=- +uk9ZEcv7QdiJ#Bz{H$N&?mL!nBk+)lp-dslB9Uh7;`eauKOTQZ!s&@+)$n?tB6Jk4wotD^wAD5=b +xclYVKk&eo^-r9IWvDB#WzAJ)k~2J{oYS}cm4sPm|#ev^Xgb&;*50{w%$bjnl%I=bKu&H6X6xkw{e-? +}w{hijUOSks>=9>Muweqyq^W^Q)Ad3H%1c?e4{IRsuSy#x}l`cYNkAJ&}%-&7S85}k#hoYs5WNW4DHXHVTq48EKE7m6dxa68(zMUA)S^JchKsPIvdZRN6j*}qEMdONo5vub!QQKf6;#?L%SZzYIho0+GlzKBfl$RKa1}f-MNM9Tl;^ct4J5A +XBNdDO;}G#mt)`dfA$m2wY&cbVe~RFCEtLiqmHLpv^Kb+hLU%INJq4q(nk2@YP@z*>_fiXIH-fe()eV +;FsvfN@;Yi^`%?gHNbvgWVl0g=QK{12b>UgFam>@w$Df+;yF+QP6Bxl3o=6=WNbBMH(H%jKA2A8^hyi +tJXn(qT!Rx5#f2v*@M}0idJ8}))rK^F-ux8iUcK$8jwxgo)kJ?7baiI9-WDFFvfFZsfD3kUUGToo_8j +-J6?3vHrt>{V9&mvUr72C-?tTD0xlk$$VHHx!%c)XashjRkUj$NySC|9@)E4{ZC7=KDdR0l`Ux0#O)45rP77n7}~-r*IsBQG5fp+lNGe@Td4TeJ6JIQZsz7@uJ>h^ +&6^;^EVo#c9fXJyQ)-{?nTU>;alomDZT4WA^SdPj_&CxD&LiusW(jC8wsi15E=S{G~bp?+u#KIm5Cm< +SDWR?J2CZM*j`cw?eH7N_uj_jotsMYH(bv4r#1|oM{f%!Vh7gvTY5Sr_S_WtcGkn+Nvge|4gT8<)n(T +JO#jpX3`d!IqIq^BgcDxOzGg}+@z+dM{j&z(o~g<-X8PEj^qfmwCJnX5a0vd@l_;3>ubq4xzd(9)(U% +y}$M9~x^FE(D6S+Ss&2#bu?EXey>d$ug#iLrOO?qiUU+T}~X+xL$v0Wq!yp&&-{r#$ee`nd>uNwGwmc +6g~qwEa$yzb0#M-F7oyT|a$SubdN!#DgGbvGs`hI&26r@MuXb`x4aTb#ptDB;63noE($at(>IC>Yo0X +e9Qrenfpsh#DV0?&x7WIXvxO3;W8VWY|(h5Cd0{Io+b;keMX0juLF0T_)4m(&HHWv;DE_PZGYspKi84 +*x||xRxgPlYX>7b^o8Z0?C06Ma)a*d*IqrDg6HW#MgYO&-!s!c|D{cOOLI(3mrOubNXMP)+<#%kDd}- +?7%y6DhG%5;0F^w=t75!dwO6!0773qF^oMC<;!R*WIR;sB&yj_=6@lFNxY)W~X-s5g`rwwL%K*1mYKF +Z$4{L+JnD)ki1Q|i*MLv_yAxkxmhZ_gc{+%%V_L2J^4#WT1oPQ0&zcJqr2{Vu)Fq9xjihyyHqHu7F$r +OT8`0io)Wyb;eR9SE@3CRB&eE&@HJ-X%O&fbY=PgCve{he*!j)VCYO+WX>L-x%4moSdR`%EOVYx(UHz +eC?{Q3vhWHwxT?A)4*o;=3C?`im&MyP&)cPWBg#_brAkD3j!VZ`f_~pgje*4S?Z2?}Z`z&+S68_+5LP +f2`&mm*ICO4n*#)4uO#QP?&OeUCz|S% +5Bcy%tef#^627JV{A0x30?fm|yNW3>@uRlj(;NQK0e5@Mycb5IJ>OBnx{9PXGYq$65Lg223PLDd|FYM +z&S+L67aw7!vff6gNsR{qu#b9Pd=i(kh^eRPSb2xCdi5om!CyXD?vMDPLuP(=V9okBbST*xOAWS(ew( +eS;*Tj?W+^0!oHmIf7DYJQOqx-N48Ky&GS>5+Ukh_YBh8~k_?+7-u7YLA4*T?lxxTm*8d2**&VouM7t +&sQ}66cA4ymT44qrAjwX2T;Y!yEacFI1i6da@1;P>4<`cGK(NCIVSR@CHPojML*8mk4F)u7lE9r6M^- +D<9H|1y@hGx<1{WNnf~(ssfaJLkPm4FK6oRxRYIX2z0#P+8WLMqlXrSzFh4^B_O#OUOgt&VCrQkB`jj +nUKKY1B90pg@LuTtP+{0@s`ZC{b^3#snC%nF*ZcJxTemEiIwiAYqk1Gyk0*5XOgiQCibNoYEL&4+T;{ +2fTd&TtY8t=1&d{TyPJJ=-)=b%ubh0QxRuCn<9!Af~LOBfK(S5KuaKYsu^b-3H-!O>rp0#E@%HlcQfN@kfC?9P +yWWt^ +S#U6c3szt=U5HaOAGs2wA79&zK9leLrknE}3kQBB-^(ULn4{5>$Yiv!?sC%akC7Pk{-*t)-V1y$-}{L +N$d{DYX&KKo!t!CHl->DW4UsRzh0}H+3#f&v3xyXdWLw~BhvLk$ccPo`by#f@;+a+kd-eQ{64padEh^ +^?q*~d>VrI`IWUxS?>1U%)U%Yc{&^fZM=V7=?B$%FHYL}mF^wCxQFT%Ts`XcY2dUyqX;V51kka9 +{{6@!8Ht_@tQ1#VZOvjEDSc#PBeHBUZ^V+`ysl!BirwR~C?X3xnCRyUlAmoyLqUS*fC~IzUUp&9YO~s80L^q3QunK=%eGLi5_bI9H+t6 +)i~;Z7FdMJH_KL@snSRWtMwW6om|6(Fd_-7i%t&?pYl8->>idFMGWe_x4-G(GQp`Oa4%Y^nd+a(fOO_ +e5>L7Z2S*64T3hB4DXtTIJwL1Kp3(s@DR|aMNH{Cx0B<$NDhhaplSC9ewXwo*`6@L$er+}Zy9{*^Aym +=eRqAFx4#_vmfP7FGfM77m{`73+1;!N-FuFscUfoh_AcHXkbac{inBe71jaj@+UBL={olr^;bc$sWaz +sniTWz21LAKa_x5Pomc2JQyt~YHN>1&~W!M`SW^c=;-KqX>osL*@2WQ#k1Dt8JXUW7OQ8iTQlxd!RcJ +R_9{wL$&>gs(bwk&bE{DH0f6h5IXjZ_8Zc63BS6)ZNTy3_0!a%!~) +y1OnA+&x>fL2MM?=Gco5FnGM}nXPaLX-G5e~zp-NAJ7*h-w_F*Fo=iqB^OGdRCFI6&rMGwXFU~gK#sZ +%V{*fvdy0Xy$k-qWt;N>ZVK0J1fuh+`KA-!&aGAR2G4flU18?$`>Y8i&J2P+Z&KK%XW_>a){=d)i!p +9ufxxQT3>6eV#Kgb)x!iS3U6dDc7Gx9tdQ4+_a!sD9&f6uR%fZFCTS+h>x*9&V`Z=Pwk--kS3ivWLp- +i!d0uZ_dSUO)vB<_>fWW0`&AH(!yv{sR0q_{fpBA3VI*Y9i5 +oJWt+sobP_GEyitQ!1u37y34r6_`4)O+O_XN?C+KC_qxsH+peI(n>097gs9&@__N%K#{V4hYUQk1=EQ +%q)EGF+Q(PCUCFjOCrt_V$*u2rXou>)9Vu5G@xD|Ew-nHqBw0+$_BG#gD!DKixqOIOX#QNdb$147eSp +ViKfPEQ1uHsMQw!igl(>MW0t;Wh-3rywwz?H8Hq8H4@;9mhI-C6i(7}Mh+-d7gI4{X`SzTZ+lPHERm4 +q`eePrz7n-B27JH@$eNm+Fd9JRjStS?1W_Rj(@(3!tIjDq*hE0cs{R +Jx;u8Fr(4CJW<>+xbG%%D`7HEZMAYj9y-xJTR-gXK^#@eYv_eIW8`bo$e0ZlIv%B64wG8UxQ{PC_OH +$i&37vr_yAWy{-&!O(W1-fq_~p3>i*8Txk*}gY(|VU#{h~_`Djt!ZGe^pWK;SY7!)JRF0InAme5UTRd +oi{JBKmDW!`84|yQhwjfTYdoN{Gqf+=?73L=TrCx6s-=1LD97NInCcGv*XFU;FhE+{$htxGBQG|p +8Mb6M)LWCjddv|#^7y=zOWuVP7(Y;}4EwlP05$1dPT+z9zHr3GBUiE)F4c;QVfut>&;dy$@ziNaa0aD +vVUYS){)nPn9#+c@aTdrfR&lblLwkhINz*;i!Kq)Y<*B@--r6gF*E$|tTH?dE?cHqr-Q5lNo!i@YcQ; +nHp=KVk0%iQ?T1^U&l2&oAtrz{7HO5!1t3XGimys`lta|inFv}F3NLx|xtn7fwKZ+WJwW4W=2B(ELUhJ_O=ef650x{KqT9xb*kG3IvS+{M+7g2ZD_;%0=ds+`~v4E+kIhZGFOVRj0c>{@@7PdaD#VH^ +wGp?OTM)VS{LdZAiW+5W)lOEjF4{-Q0|MJn7yHh{%w$GJ570>X@fYjRdB!#GN +zBY>Y(iTx2)Uu6;cin+8Y{Vk+03)H!3SdHB(Bf!Px8SG`OqxN2<^!h1q{}N^PBPsG$E+H24!&{GXZe15W+!Y(L~;Np!L&T&!ARL?vkjx1JK +)qE&kHR{-xb6+4Zk>m8~ZT>j+ah?C +_GJ)T@&VO~8!0%k=zq-uNE#95qM~nci)~Rb>)iambV4xbKwO540Xh)K#^Gisvbm`c*_UV#n0$9q);=l +$tUQ+nHg8q63`sp>YG_73xqkghGxk3|Pl)2%ip1|Qs=V}_}UL-qyDv9_MUT#5ocyg!o=&c-+q%VNj8` +j%`yZiF>`e0w>WYmaqkWbpHtg7lv==2$FAl^Zlt{QuN|JQx9#W#pnnbfI1bthM +73KEHWD#fO{JeJPD{l**$;@^*14`!~j9xC{=ZFd|p>wAO6yX`>4I#+f;NWRE-3f`JFIC)F#+lQ~i^{A +7lNEC9Ks8_u>ubH-Z%%NKPCN>-D{&U|hCt%bY&r;mnEM63cJjibv4Z$PUzGE+)uxxl8*7kHz*Pplfc7Cq$r-%{wmp(|!y$~;n +9t;EAQ!Vup#`fr{wr1keN1XxiQzOnV5o06i6Z+&F!nya3$c3GC5``JR0HASoPux(cw%8mL=)%Jf`RpF +0^X(qT-i`CP?Hgok0%Vv8Viaf?#6lXS5xly>=;s;mS}Me(U1`EB`%XS8Bqp9?}cK)Kp%l_S=O8urKP|+9IMNW*c=51k}`xZ`|JQ(#5TvZOxJ}uBMn{Y5XpBpe$Mf0MSYd&(b+ceI1G*PRkE=c3Wy(PFlkLn0gT!zj8I2~pbjtRZttxXWA73l +z($v}Z5zWdBDyulbOy?`$-4Zd%yu}P8XDCFaWl21_-w#H~9?fi(f)clEG7mLBWDj{l28XFBB~8n%HpR +Ou30(1#Rgm?nf&6(Tk0gDfZ-s)L9xzGwys%+t^%U`?V0Ne17Tp(A#0d%qnTmaf4g^sMrHH+C1tCF-fS?UeVxNIYEZ$*X_TEgx(j5_gX*tQrU7Z2McCDlh +5k|zGGQiN!^(N50K!k*M(1*U|Bz8yNFCGKXUVfJ%d!ishcV)P3?j+r}_x3Wu{8wNS{c2FKO^RhZ7{$= +N9Ar<5KzlDZ2EF(5(!HP$&fZNb?+z8>U4a7PYl +(c{gcd5 +3-aPqVFu8BKGVoM@k+Dyk*L~ZveB9!EPrLrn&`=*<$^hcE?nTn<#~GX;X#_?_!y;VJ&w0xn99}C&<>f +3E5LCHE3;rjF)_J?*t0z@ +f_pkN#W5fXuL0wPF~gm8%b)UPFr_E?3#eS>!e&3Dpcw~zyOmcLQ$>@B|o6MMEK#ebz>m%n9%lDBFd{< +cvE(LG$^>0aQs%hzo&7~6BpIQdTcrtd88FI6zGJUA%J-z4^Wqu +;+y0UCv`WaT~lRwc)))8-KSBLh-Jow<`<%t=u(xe{(_mTU2IMSg7G8RB)S)_=Nab)aCz(sH~y-Z&Lm5 +qcW^>`p;4MeeC~;sQf$E{JB06@J*md93#$mA(j&)Cc=cq^XKglCc6)&6V<4$e0HE_JxN_IJJ?W +gr^S^4mI{#1}W@;QW1@uoTY|HBcTLF818Ld){Pa8z<%lkdX+3K$a+;^H|D!QZ*fDTW-WhsaDtR0?Kg( +m|7x{Ai}PmBjEih0OOa<>j8)t_t(EZk#-$B$6(pM8KN&z=P|F{#-nulXE`)}@TeG+Dxff$&djdK +}_LlAJov_}(Zk+Giu~htd!O&aUEr<7#pzX$hMthnXLf@s;U%+$rwnBY-5G3#5yUU`czb+Ue$^En~Y^T +XyUA{Y|zSUH>SK1Xwq4!pAz9-=mqRZTY%u-zzjmAJppp11p!F4 +}!lJ)%Q%k~&#Ma$nGvZZ<({KIoz8nBZT^Bj2h=k@J)45hTw_Nmn0Rn39VzUgg;`zXeIKHY`uyR>5edX +NiCVDf*XsY5Ju(@}zyOljhjkFuhQ4Z;bTeCI4AtGVm|AY@K20tfM)QNM +*}+DtzZ%Um(8nrR&(jsB@*HID +vc5{ovZ6csG^SyU}s1&{z0SKqBGmPxQG3;wH~*@0@^_LvzG$4Z{zXQ-#qARNgZ^KA81%?kDT%pIADV5 +?@uqK?Cb!q*2WEgr+ogsV;C7C6B{$Hg0-h-)TnfPQe-I*Oy)Typd`bb>UZxQeMW3n;*o5BwUsXh{T>i +W{RWjJ~Q%m2Wz0~hZY*dq!ABo2aGJKv5468b8x&HF}5m{qh~VLp6FmC#(5l_ODQM1IVR#O7Y4$#-ELz +S;1r9dDCNLCL{?9V4R>vs$3tNV+AhlpzCq^w1of|o%qY%J*yqEQ&c2{eiPp%E8{$T=u)6GC4sK$(NXk +#NUA-5EFf@3=YyFXTF?U*d@I-*=gI>tn;mEh^lS*Cj(pDn8rYbGZ_|gJk)*tTYITc&&bs}F73EHXAC4 +LY_wM=~py; +2NuiI1c9wfuxnKpVg@F2l%wRo+>Xv2qrPuZv3G->+Vc9>LZm+4F(}mHGi!o3qB4qfZ&p@sz^^cJwu1u +su1ck;QQa!k_A2yll`-bn^RAPYNTh+6p>W5)sd}en^!}yJjDS~ET@f7b}e}I(4%<1KTj|ux$g7q)~#J +%MCOQ@FGD(RAL8}GV9pC37I|dD$q}AHzYD6y(NF@(3rWV;DtU)sk|pT^%^br+E>p4(4&$YHT_3oMsYY +990_T)D{p?M3s0i>e;L}NLfP?DJ*l0mZTsw{LNpjt~LXx5)5v91fq9PK{*TBizw6zJ`x$#PhXF966Vb +;nns{+?2sFMO_*lR=>3!UF8<${`hG$NxZS6Xk$D}mV^7&BAydN>7-pjYhiWEaw7L)FZHTIyvbxz4Mc3 +Lfe^_ocqct!~v59n?tLHHxa_BGV)AOxZ7Ma({b8w1kp9~Lc#e+atf{FlXQf +7twX6XFbux3`C)YZ>;2(!M=wxVw*P*8ha~{{2}#@ZE3B@MD`agrV@p);C0lUeIJnM;z~}KuNN1mhSu)-t*z{=a3GE-=K?nb8B#~qTKgnk)3Dngd2Ga*}sJ +`;q=YrseLDR!(-^LjO_Q^`&$k;hTip+Z|99|B{A_nnD0Bc(A%^MjCL?bru&2F-H5kAI*i%ef& +1-h~Jgt`wl?--^t(Gbi_NP%fHD0nZXz=vElP5d`(1HP46t8*3Mtrq|u+V^uiA`zL;dI!gP;?Y!d`$uH +3@yR}WLh)$643R?R9VKzP?LT32qnTW=Uoa2LqZ%3{&WjbHKYta5bsQ8+Dw!rq6>*j`(FOuo_AjZT4{? +Nv3u;lphL#RGOCrqq;b|sv#04)e4tjH*=;)j)2$=62qI=>rB;3Fu=P7ik>ygJl +hC_^O{PSmp0A3C3g(S`UlSn`4>_$B2ZT1l``hbY5|)W75;jgxV9}HXEx3apQ*nr_v|1h^dDSv;<*6R! +7sk;Ll_}-oXY_{;CjM(V7_S-On5UeQ8fq(?LEzc~2`SkqXtsSC~F*D~mvjICAC9Azz7QC6>dOccpf*) +NQ$yG0ZzAZw%bzaJ!j{n{-}@=Pd}jyKy4b@YaQsX2D~xY_JAAiYO(mL6?w6OnWiVtk9SA!57K`jE8?v +HT8wh{^_q_q4*XjzdL##|ErIF3bwy}(noCl6YlmVK*|1fP1*vI3=sz#-3faqvD+yW_$vp%eBWW+f;gVO-HxK&iF%9j+brZ4`@ekO+$7#I65EUIMe)$CQL&BOp50rc!J +Y1B(H@I&u2J|(Jl>WeDIT^t|DkwH +3{Er^FA2v{6#EX#9-CeuOBWMz*n1$9|wKqlWu9kUqd3M?QYb&aWwbVJ>Ns(uI;-3-yNK+pTyA3%6+>` +e{4eXANz>H7hUuC9t=PN^WRxkYYq?oIOr=ew{WVD|@EAI+Bt(D!J1|kQZ0_D+Le;m)MFUo>u)l9P?8kml@oPXQ +(5*oHEJcK=?wnaF(M#6^a!%t~o?U537etv^-WS`twA^Nd3wxIPPOzypV52M4KuyaEl +Dwo#Ae7W-CeK2ecM>nh%`##3%4hwCNBWe$pVIW@9^m7RSrZmbT-$M&8i_k6Adu;mfBvf{*Q=lpch=8M +m3uy|yLQlSoqFm>_pAmj^?`g2O0OIVoK9+=@iT#hpt@=I99;q(VzzIS6avFT}zR%#)*0FZM +TT#sfOJ%2~+lVueu0e7U5Y0$9NX*IC4{IMp?_Q?&0cCP@~^n@8$sa<|y5BNn +ig^~+-dZd!V!ytU68*TtxX^4GL#UZ;a#a!Vtb&j17m9z2uT376Lz}Gg=R%^zEqyT;1O4e0?dv`HS_B_ +ZH@VMU}Qjgy-QV4~ROwsBUogWm;vd|M&AHJY>96sWjm)%XLyKCtx&K4u!zRDlK*%AQ3y51D#Y%d^U`0 +|<2N7)=qT#n=N>OBq$@(3HlO)FKz^k<_GBg}wee7!uCQ!kY206Z^O@A$ak;ObtK)5l6AKc173$9FP95`4M1Ni)PwKQyhk1nYP^oVMn@eH<%$;P34E0+c5*3>vc;EUFSEl1m-`t +&$10fPcD>Jb$p|o(uGL9}3M`Ydd}3yl|CB$e&8YNf^Z_976~KLg8)1r&)#=dy_VN=ZtB#7vqq~P +8fIE2=8WT2)b8kN6B7ZgMSX8Hk=f{{S8uVr-J0Wt~-nO2{ww0MmxXVCPt9AyIH<_y~J;ZN&ZqZ<~?Eq +OuNI(n@aAMkl8K_gMm9RLZH3AhsyU=q0~;7Q{tTpf%jKU@O?oW7v9#t{iEJ63wo=GVLM^{TT06AJ$j+ +|BZMmT7o@I@B9;Odm}z5NxxZwWH}nMjC-7-UPr$F>ljZz9eA<5p{uB7LqbJ}u;FGY!r(gLy0$+r-?1X +zt=izY6+(!}_BzpXHfiP{}(-98ViQ5{;>(lq**csJB4Ucq>DTjE}- +Q$m+C2^=iREqQ{TE)pxaOV`bys(BK!%NNxPOA@o1sf~vvLs$;p +f^9KyV;BIsEUrF0E*)z%hXxjU2XtytB@rT&N($DeiAD;Uyto?M}4>1j-ARMG96h>i?geizbF_c0v82d +DaeKB#z-+Bid=gQt&X&Xb^n~&edEE_ij-=l!}gXIVwa+VkUggCfg{-)gSX@R+|j<#h7fz)f! +?){=v%9b*uv1SDr2^&15^8%yDGwan=eJ*HHz@QLgH=4MaBP3pnFbsi;COvZ3P&-$I0!){H@ZpO|mgh7 +~C6J(sz=xk&Lvk@t89d<}Tq9`GEVVoi5;-P&_JcJC{er!R3#-X*K%pW@i|c#ERn(?I8^obB( +d3ixbUf4i#h1fqezcsTwf5dFMim#5Vy#4++poI%88bOQ9iJMQJiVN?uQ(GX@34<52bz1>RzxxB_Paq< +duvk~J_7Do-?l`_e(j9-+9x*4Yy>#!OkhTIA&1cD{jAIsD5&My@08PX!LBIgcEa#U$Cj!spZKwL<-?a +TBSQ9Hc4<0}^k?a>ZqUzP}9Ue2d=q#h+)7_}4EtJ-L6@=y^+H0!?9q$6~xOJ{?jYp1HXpf=~U>lzO{f +{)rdPr#zw9M}dg7DihIF5_}1w;rxOk3FlMPHdBBNoSTXwh)avn8^)GElb#y?DmC5;)8Mkm|6<0z|8p` +t=z^ISnx(WZ8pm{dFo!T*Z87Yl;_%>KhZQKc7ud>mY#A!Zrq|dQUE)$%Ln^9(Y=LI;_WnL7Op>qQN)5 +c1sfgGDlut~XZLVU1k^3;VF?t%NlhgIPijN%hT}%lYxVD|tH_LWx>UOnMSpa;Cu~4-Vvh?h6e$ +jhzTCSx#NAB=JxJ(Wf&hA|MK1S1qm^Hvi1t;qu?=nz@q5qnLYNf{0zEE%Ta4?CLFb#5y9H8()3p=Hiq +bM5J%xSGt_VFXGN+AhQXAqZG9rmxyJ-}U=CPh+DL?6;fO(!$mR@h)Nw3}Br1W`~C!iy;QVY}jGa!8qU +Zvy!R<$_F1@<(XGF6zSDSNOt$?vpfVV^pVEWb+zDg*_`eE7mzwy?I;FqRMBrna +^xH0}KD{JLxa!EpQz_r;XJw8!sEd^8rZHammj#34_kXi4sMK +dQ1#XhFsT3ALCT&*w%4)XCtTb!XJXDnb|#T@1wxGb_3ohZ#%O;Dq4GT|}w;6n#JJRy#wAK)Y795ng{# +W$cDleVuEB)RGLc_l3&RyQnjIc%Rk!9EY$Z9hL=`184ev!b9gSW3Df&K?Z!qeH5k31E6I2iJ830xoI= +<;vy6x%_JlA5N0$$O2Z$GlEp{Qe;%?OMz)e)ZyV|+jnoaN5^0QM?PZ6#SaAyJ}F)aqWy(?n2iaElN%5 +ojT}b$s98Z?fUeM{t^#8hOk(+$I4%4gX93MM4$yW(ZzK2xb$!9V$kQ~lnjHoZLJyGt6wZ)Kg4~tgzE> +V+WlhPaUEgANcDPq}0Da!a#ZY10uJQEz6mnD+f-X4W&27otM2tseLw@c$k11-G5Szin;W(xuK1k2s^cpBOdAM#DeZb4Q=x+TZ)gLAa&Nsf{T}-8Wr;3g3k6F{41?cjpNfXeb!^}lmL +vnN)i@JdNh>U(!{DUGmS{F9Rlf0hUj(o=K`Au-ect1zKcIcEw=^ttKXU$o%*PI#dd&D->if-%ADbfGI +dA}t|e>LY1MQ_wbK0yS9KoH!CC;Dl7$?jQ#?;R#swC`*0wxn+{mKfeST8!^dYu}I0_98dx^Fn4U-fPN +m@{P}uz36N=IDN}tLD>%Dwg=JM!Y@zuJ$?9X(Y-;$U&@r`@m|B0CGRXMwD&jf71rQ>@-EKx#(mqGled +3gg6%kQ=cI1|nT`5_@7%*qpVPgxIm`Esl(z=fd&0lpwI_DUNquWCk-79-j^c_%1pg^iD+2SIo!zb~=& +)`AYpaavtGgkpzortEm99SklT-0;vB;!aSmk-y!cO +1k*E0?iTK_pDBM(V?Vq3wNKa@s{r-raoj$C&QsZ&DkNIhVC+^fVOT2cW=LsgC)ycx$j&DElsi3VAaY3 +(H(yJsi0Zd)u!(n>wajQW?z=Nm)cQFbg`}>%T%HLLp@&k);s+wVgtOrN~VEvs^G1LVQQQjNukDj*J`~g9Q64Rl{nHQW5+};```8MfylFP*$=lak*;9L- +uWAc^=_QbRP!^fW34fr{?;+hddACiRzlzc)m*pZUtvoqy|k?SQEJA;OB`GaO_@XXHsuq +witrYO4gTsuF|=t7O;I)_u;J04AKYS7%B5@H5l_?EEe#9EX|wlUfP@mTy!&{fx2~S(!+f|KQFX=^rDc +P-9Ez`xhH17S>Jz%F{<8g#A|r_AYml4yd|vy_ +t#5kr{6U;I*(g`uJv9HUV93LaSh1Rwdu?tH6973YWI6u@%8beH~FpA_A}N~ +<{T8~5@?}=vpkG|J;B-)cY9ysk)PCT5}#uF0N3-1?3{+a4W*pb>i^5!n>9IVu3Llm{0iTz&&NESh`s; +;Vi1T~z#X&1D8$UKZ&0~vm&;Yh_CC8i+7ZsmM1WppO3%C2Gpr?SthOUi;2%^femcht92Cud5csnX^O~ +b~6xlFgwa8AlT@@I{ZmQ_Il9iR)BRlQea_fP~2P)xaL-TonedfVv8G=OY5com*K!CxQTP}nzU9`mv0u64g42`oNJdZ=)Iv`(ky7_H@+DvN>c2^EGDBmlv{LE-D;hAS#y#UF7G{{R!!VuIZWaU0;t8Jv +Cr#GNWG*Po;os+*1|QFfQkBz&)ymK81CkjHIU@I8&dDCY#k&m`o>S4p0+J845X{N`8}_uZd@vuY1D8n +^H2(I#jB_cMNgbwrpo9L09%>L3N`Wzf+B +ptuC)}<2FcoJ_p&vV6U%z#F(jsCL(3G`!YondqQ*I0jmE8p<6ZHIZ2#G8-gvSTH*+LDVq#4BrGOMXEnVNYDO1}hzvX4)J)U*e2KI^}ELKYb#+K ++x%I(cX&Rj#pz(UycFA(VpPo!=uEOyeZA|3vN^DMr9Bf)Y3 +mV?XVB-nsK0BV**yP|eaKq8<9zAJ~lKRVv%pN8i6Qdc*yCa +_>OZ+JXWB8|T9PVNISA0J`29EdL0w+J?%lOgRw}=0o(TDUg@=S(D!6u=Pr`b=7KhDaDN>9*7oDw*%{1zjl|yyKZw<@{Z)VN?mVPE|%y$UyEoDMxH~P# +l?WW%tu-m!rg7Klvu96$eiEj5}Ty2ZsR74NYERKjbig9oXM_H@7o~5cs9>|&7-8N-At +>+xeWWd%qliW+g1>A6YSVHIc#5A +~U+zKlwJvF+|<}#;|hS)wVYf;aSnZ8(Se*=f1GVQ0hDAfrgDAFzAfd)|zSGeqC& +XZHk1H_@a~zSIS!m$p%6u2&SYgF?%DK3jbc?JsFyAf`j&m!jn?ZI=A-Ku*boubyfNz&a?5kx)XX=s=YNp`Z$ +FzBpz(l@|(h!lRBtDwgrju;fPmko_y8-CUk|joI|*a7~reH?c|ot4T44_2YOzSRqGz4$L!QpI;#PXKnGAcO5n!yhN!{>Ki9-)X#hS{4|Xn5*4D*l1}D?h978Ts8glGeZ=^|1meF9l0^!l +Mv&dPuO6t*MR{ic5?^Ul*Q`xrf)mG;TaTDXD1~a`BdED#7yllcVCZS)9j_ffcsLj6r!GH3XclVnY>&; +_>JnBYok?_gj;urw|no!VZMfC-R5DEMcMCLko%=}o?2j2-%XgTViQXcj5a2OrdU^1KKO#_Q}Mj$FGP7 +DriYs`flakGrs|fe7X!H#$$)7lfUCAD(at5i1tvs6kQ#G{`R=dpY^Hg}>26SG$|<43k|pNnjyn5mcYA +0V!I`+D@&<6p8e3sW2XgsYiEd+g$?oF(D*E{TT1sj0m((GV=6ckh1QF$pvl_RM!O9>5@Kk3Rp> +9uUo=;NQb7c&jPcLt-R0LEq=e>yu`B)a0+zDwL!AZx?Z1Z-2spj(^`|Z_q+L)DR7c0v5>F#3 +r*lBwM6FhUYNJ^Y24KtH!)NXzq0q1Zx2b$!eJvhKqsjQ<*f3w_K)qJ>K14h#9)9pbRx#^Zky&(i>pWk +0zP44=&1bw(|59>TTmPoTLn5fpaZ=x?`6wHmmXWjoHOt@>XP-Q+-rMBiX8Qx$h_`FNfRs)f0ax#`oS> +7w%h6eBw23D&5kig$~uggVMlvZF`EZ*nlQ*Qg?lOWZ|r^x;&Klu0(4K3b6Chi@+P2e|0VJWpH0}k9~5 +L@14GL9KqJuFa>F%4g6o{LSJRbYgi$?ia12lnA@cs=-!U%+rKP~!DpNdIDA8Ii>Y +C?%agli}5JJ5=#qY{)+$7~t;fl|@WHO1oWkoqS-+OSY|oW3*Yqe&(?G!hPhf$W%=z@kHsAAvps3D_}i +_jQ!+PaqIU4@?O|$Hd5?DjRoqKosX^#DIXMnZIPi$vT3PhBm^{rh%7Rpo^p7*$_nU%t0L`Bv1?xPyD=u(mnK7ocbS@m +R61@ds}@$1ZqO<(#imd!unrs;Jf~iO0XJsuucv`9IcfKJqYt#Jl~D3Zb|@`j7-Tu=zk`AS3!pFOEkb=I}Yd?1g%Oi*6 +?BMZU;5(K|ly(hZMOFHGjtHpi^=?T3L^ingHbeN1cr_0t}X)ZBzYlHN<1@nIo;pS3?p(k0jQqf)rgfVFWKDLvSg8TbR +?-zb9q|Tg^d@y{preEv0gg4P0H60RzT(y?K$ac4@R{BG5)f +PM~i9=jk#RuE`aQMYJ0@VTC82JUfpzlZCGI7PxVK3Wg@FgI4AVCWs)THiCev@6)ITA~1r8kxBy_uf*h +nHEL}ex(F&4FNk}RhKD7&T8_S%0DU93iGRoqIWzl8$D#ldy5me~OkJyBo^WB89DcjZ+#-Ysr{L +}1&kTypPP&-i?5p*i5%A$Ahrl~t6Vs|nn`itR6NgD!@85(?@|&6VY50yGze!C$JcscBZvB4M5BTpteC +w}J@PD}N_o$F0D43)^5(pHG;xtM^1h&IOgxKLCLL=lMiSSdi0s=q0-0A3$4xor*5aiGtfR0w35IMjd{ +ORI3l5RVE`+2PfLJoZc2tQh@v(M1%;i88fC=enJ%eo{z;2``#cj*VtB_9U^;#akr9pvH3??&%Qc-#U< +4yc;ZhX6tZAKg6r?Gog;BlOW$pbw|Mqt$4CR^l_HyDue54#;^3G|)%&>G0s&FZ}C-;Ojsg*!nFYc#At +*ywMy*g}0K*pg=0I>}P?LWBVTyg2&vzA3&9Lar+(G?F>xacLD7YT-@RK-oN8Veyajv#C^oI)=GRe#;E +y9ME8TR;`IPbe5szLub?Wjc)_xqqY21Vot3p5=t#$XMOW6Rm3sNT!FxHrzuEk(+0J(R(%=n$9^bwk)8 +KZFxE;>^pet@x$D!ME?F02ZP7YhGwR?&U`{vwRQnY98>s{r4Vi}1|KQKLbjXTl%w*r*3WU1W}Ve +uc3Y=)pQ=b(l{%wxE9VH2&VrZ&m+EACEMhae}UZdUl(tFE#nv7+h^r+bETn^U7yvVy^R!;ogswyzhG! +PL{G!djTbgbAK|0B6#X{&I6cxIfKMz4VNaljC#WbvK{FSU!WRnWv$8bDud}S6mk&zEeF$WPXz&MYpH%R(i!ti1Az`D(V~$mD(2(KOl7!pCeqX~}o?MK5;N!?JThDN~kPMXwff-m)$M$27IC-$l+%=0EOWyFQ$4V30G(=eU_S +$(^|DHAd!(&eNV#+L8c2bj?@A^#c)2B-RV#wyJkV#%54;T;UOO5fNbc3Sl(GqX~xsIuIYu7hyBfae_2xK`&WQ`mXCYfkkL$?3AQR7Qb?(I_i7CPY +Zm^OOl8nf60=fo;v$ZxgPO!waL7WKf*rX_Zh2S;y(Sw?!OKEDOhfu&YN3EqueCfVCUJ?Xp94RFm6ou= +e<-6hYw$EWR&pDVPs@73S@YI@Tz-xB}{I?9HiO3PYZ5m +n1jgHIW^9uszLyG#YK7#nVr75s`O~dejL}k&L?l8z +p=%`t2X^zv`;6U%qOXD%BG60lo7*KvQzS91MEd&n@nWv6ZyCe)>UFTf~m5ar18kyRh45WhGq=Oiu6k{CNBAbF3zzmb)Xp7SL3@5Icrxyc#}ja^3&*N9u7S+YZ3HwV}bZ+4v#;ADLby)@fVH!F7bbqaX)ULRPwvI^3n619@#(${%?%>1@J?&1 +FFUG0ZOrt$o8RGogB65Lx!3>U>@6>$5pRbpfNADeXkG3YxmAMkI2zAsAE-@2ZE4*HJb?xOw?^c~0MoBIX +y{bAPd-vNCG<^%p6&}TXt_qDJK{0sE`T;J-OycL&A;QNwSgG{^?TD}b&gQ+SBD9U?@Yq(~!vtLtZ!dSxK2FnxFFc;{5kNZ71ZV{^{F9{`pusx^=2yq~}jsyHQoiukkBbz(kDFP6Q_j2 +n09WB7%<74W@i$5+jc)!cVe^Lk=6=MIG(Ro5r9m7O5!+dnB(0!)6n{8`^BX4H#uR#`E9Pep9E&$-K(= +R1~7<3{JPg^tC1WX)OH19L@j +YKHuhM{@r{0A>`8J9#lybp(varNMtA8KMSu|dPKW}S7S$z-GkBLwES_1Pd|1n(MO(nkFD^|w9%jQ>gZ +s*F>*8^?g1I24+4wQpHb-Oh}d|16g&=dnD}e6OLi1;vh-K-g5*b70{;k0d=wfYL~*!?u}GyQWEXW~N(i$o47uIv+D533mHL(BL3{N)_EUf7nFNJ(`k&vpAXhJ-<4E=u +fsD3M*bQ~Zq8p1l}LQ>+5^HKUPXBdYIi#h9;(m=R9z&VN7TaKwEY8i<9-)_KGXpVbm7(UUZzDZlCwhQCjr2U(OX$gi@3Cuy1p6-VrN$!hntaRzX@!KSn=xxe{%Lm +s{+_@yr&RTLBc_6+LJ7up(1?s2=L`uA3=d9XDq=I%H*X`|x;~afkzW{ZzFv1Xod%0N<5I8V10B8cn{@ +>p6Te(Nwc#6s5f*fDUg_yK@}RWl-hX +k)X0ig`Md%FX!ycQB|tv&A@6xt%@d)_OXdaj;A2%NpIHRaK)Qp7TI#yQ8~x_lz^xMM&GWs}AwKH8Yfv +ZrYF=o{dm{4bmQksmT;Q-DA1fVhqs*R%>tcuCiiBn(h*NpZ##xML5wrnCqL#Zm*{h+{gK$-%S4J9?Bg +yqip|2C&tj~^1T!C6tLQD!Km1s8D=Crz4V>(X)KxifV|p0+>EZ>l3L$mklPt~SS9vTB +y<1whKHM$FPT+{UqbiNdB&v*eu2sHU9yQ#vw@74YOtd79W(peRl)Xw(+cHx7 +6We4LtAmP@u+WK31~R3+%ig+)#84!QT~@%yRw7T`{AG__@Fog@^n0yq#f@f#5JKY)#mq1}#%JhGx;1z +{78FmG`&wlJ{8YOlvtpHgf!_6cBq +dJd3r3bKoJB<)KT!1N;sZpO;XNi>HlRXH>hA9+l!nQ6{*;L=**Af_!J5>M|A{@g$U?-A{%YqTNQ;yt@ +D#I=ys0HTAg|&1_pnDQB({eMTzLZElVoih^KZV|reB%7SMBWxK4F$Z)chH13fBIQNsm%ax*-3OOhFU;_9Oi7uF9r!dp%Rdz_^HVb#y4h&84$}aD_^ +kx}saML`Ime`WM6Rva3&FtaMH@=&JjOKW?**`fMDBsSIbG<~6QQEcZ`THBx^+4*}yoeU>hQi+8PUHTc +6Hc#8lcL6ddo=LeIr<~8<@aiwuLyWY@7W0>S=XK|-zH4f9Tgk8yjytE$spx!No*{4V;l26SA9-XfJ@Z +z!5ZrmFDH{_I$Sy@F%x93>qbc8;{I*OIUUV2f1u5`|ND6yJ3pRH_}jVtKmXTtNPcuH`TJLWg_D1};(M +4#pwQttLc$n?&^Q4>1cK4{jwLZ1KIUeQp==tZ5R}6AlW6Q`7;=YdhY%`!44dG{0rGalg`uAbn)K+h#y +`G$@P`Rg_|NBPJ{s4D%Gal+bM)pQpY9*xBX5HoO7Ua321AaSntkQR6MVXO(&&?MKtH}E;Zb&q@dH4T$$uy4Us81LqX%~X)~6HI +9d!?1_Vmi}fVf&AVUTzY#qiM1e>7NAXTaQkP_Kco@AOGqn@6foud#tVUQx~G211Qjeu08Bwpr&-&LmfW59O@=YMB4!1v4f%hibf%rn0D`|se`76d@Sc%C +^+oS!p%;IhiQUGS>hffG}3EpgSrs=6CqM$0erJT9nq)`qRG2AHBwJL|kczyv-Xa_jbR>qc?ISzt0e2a +UF$B(j*o<7woLcxvc~zhx&wM;Sy_-|CpmuXkEY$HtptY+_+_W9z)}Ge3u#YqV*2u7#ei-RUyO3QcB;7 +>N}1VcWppRg?bV@6Uz%o*5X@47fO&xRWSiry8?d_FW?l7o9dPv%J#99ovux3)-g5U~5Y5WDaFOlXKMZO +AJe>WrQ1+8BhEgPc*pwbIi>6Qv-Gdmp$2FS7s6!|kqA?OCFdF)3LsLI(H;kxbv<@XdLwp}q=RG!1AAY +%i?$eJX^B0Ne!;UEaxp@+H$leq55qkD$xYN$VejY!13XcrmM`-M@K>9Qd(jSfK{W)Q5XWhRtPYMskus +y^Q_%Z#mM2Y)wnscd_w#%Hu2(d^nk-TFocCbBQfTAnHqJpi^p@hsfb@R5hI}1WQY!9oeVc?LeBz{W$)xEXhpG>EL_? +_8{(1|&Yio||^~GH$J*rpcv6c?Ntg9x=Zz+&&aM;cEmD87oo_pV(Lmg$^nt&Lm0d@A;oV2OinLs%D6L +GhB?c|!)3AnsqPpRFy^aM+$HYBo>x$M3%fo1{SU>~RIZMG)a +8jY7IWM>mS@GMManGtfZ)cQ@uP^Gvxi)j}ytq{+%7kY^-e3|Y&ykvlKvs3m!wR$;`F`8C=X{RkYJMTf +IT&t35DRbJxwLwWupSAOSNXkMZ9KTWUzhwXabYn4GSxUZdBZzQdY0#yR>lWa-cFPaqR;cwYS7v#Ujo| +A`%VO*iE&~ezQzrN@pOf)o;9#X+4z*37aqPVt7D3zz~h%E*=jc#U*SqQHz(>fQW=9J-QDUd|D^}cQq} +r7c`AOO0k6w6g0y)XhdYF|xU?gK;GFRS5))>hWpBc+bh@T^1An=+jS0ezPJ6h95DD8J%FO_ruG^TGo1 +ctD-TCzho3VBh1#j5*0wu*OF9c1pn7uRn;+F7!A)Z!><&KDQ=9zzqci`q(qs?E=2&mMScG+rB-MZ;k& +#HT)Gb>4Iy=Hp+XjD8V6khszKp^!#H|0VRCnXT^JJi>>t0qb8T`jO!dmh4DR%vZ%ZBR0 +BH8dNLTZm!{-;Szt^YyL0GH(sKfA&JmUTs-&zrnCrl}V{=vN^COsV`PQyfFUrlri8o}kE7{O;*&CMnW +;2&{$P)f)0T$&3XC@V=-s7@UdPh<*R^;ryzjq3H|3Iw|*f3|;?$s8@v3vfEdEqwmD74Ws)?{x$qMlMv +^z_a1ALbxI*wWR7_iDGZ8#);d1*mQBZAVY2_;=vu)oEYn!Pna0<(QSQx}6KxNvm(UvrDYnYFSK9HKJM?ntn#25bR@7$CVXG5y$shGetkC}N7k(wnynKN*989sn +U1d?Fw{T4`YWFL{>ty+6h&h6(UwQk6h%T54a3l3SOo3Z3MTe11WLg0&$%G_F&V;+X1twp9jXY#0jT1S +!3Fv83{If~@ubAJLE9h8waHO+h(7ie34RnssgEIi{83TeFU3DjkvmzW@k43#&;rre=OG?7Q8R@9Dju@hiNVpc=8x{#vz?mB;IE}bw?)d7Tkr^NA +esnQF%%{W)W=0{syM_%67{5D}?dY;!{)j;yA<^Uu0CdFOKcN=b#-ghriKrQ01vV))Xq&R`*Yj{8h1fJ +&LPl71wyR-`obisC!jVYW=GO8=hG)u4y>tc=s?){~cVZmaAFjS8gce>%k3yAIahO+;9sfx53Wbd7Ncy +QtwuQ#~YgV=#EA>6B37Ez5^$?y9(O$jPD#jmUox;>l=|r8NIf5QL*qPc22Z5k}F|N){K~ +uOeN5OG~`P3dPNBh&ZkoBZ^;6$buMx&*!|Ke!T6W{5b!grgs((0PDEd +}8_Kd;*;$WWsZHtrCviJxl@2A_uN4(Dcm!vNS}On6IG%QztsJVZivZT^09%TrgRZ%psMe!CovC+El5< +c~T0%}%Q{ZtD-}`|T$@@td3*$g4$HRBv7a`k5MLUEKiMea +Qz1>MkN^3{*!CFo&yF(ZVM(4I`J$g1vxP?|N{Ss3DgBt3ryt<}>?0hVeng%3=yX(6ljHaoSGGMa5x+7 +L%Ag~_#px$JAx9N9%8uITp*0vCeJ*9|9C0Qx9XhsP}1&JPdUwohyF(X)aavI%=AJn~@lXA$ +vV(#7zgi|{uiA>*1L{Z=bpr3f5x<+P-Jr9Su5j%5AEj%3H(_(DDNUz=0Hog%_fowNzU=kDb*#Wh906E +$h4jKC+~v@yZA>9g|ja`-|1jxy#~ixt)1Ep&HOw5o+P-#$wCW(a)o(B;>o=|r=C_t5=Qv@<=`QfJBdY +N7k$LeU)qMJg5$JVjL8$&DVDB_OTvstEUJ#&!*we^K7YM +6sVs5Ho=J*+xtK2Bw2va8mq|4V1a*`F2{3DhUzf{o;cxd00&+WY{;Ns@lt55)9HR?z#q^2jJ +93~{*dI{gYuAVCLURYXo*hn7V+IkadJO{I`t&$uQOTDD%{G+flHp5&=!l{8Jhs +|m0Mog*N2ACvH3T)rI2<|4lMKvCTbtl73Lz_?JR7y +KlBEjEW48fu}A>%%$CTsDgODc5n+Xu&v)7GhbUuzp&yHAU92UJ=LA*!Xm+V^sk#b9_B4fCPjF^tIK50_l)M# +XfU!m16T%h(L2pAZoxNXd^>wfBdw5AMRyXqVf;Fv4ndPf&l0elb&L=B4)#a*JM|{@d3pTT~9RuDLv_V +mG)ibW2eZk){B9!i+8@JIbL#|Z?k%p|XOs}!TTi +-_k(JOHPTM$aC>BQHpZd;Q`@RXAnHayLnoKGt0z3!Khd)@n>d^-!l@DKh-qA~!3hld9j(h1i%PIV-$rUC{3UwPVb+_pYqEctQHW>IZx)p{8ve~f`lpR0*B1um~2keQUq +ZGK`gM<%&w3Ep2)4z*R2ZB1x7Rh63E5nZIEMf;m(SIYqY6p%!YJCZLoJ4<&K4ALD{FCSh+)qcFZ5=5%Aby;6Oa?64 +>b`(EL-!zPu;g9w~KNRSsGjupHF8s!rUlaG|@{7q2@Vm>;cMq!H9{l~W-xKxHqT2lS)u!V})T>)Q1L_ +xZqNAY=y??i~zq>NvS4;alE4%Nk82F={-q&B0%CfU|W*7@wn#L}Wx#C{bcr(kGA*`_;9h{xZ`&!=?VD +uyGS!JwC;pbCxd(en`1{acLuJRbqB)F_%by2Jx74cxFS@_n@$@jDo_$Ll~U-w1#%93yAj$tJW0;l;ko +=9w4?X`Gl&JQ(3i8J?Gn2eN5B@M;zYDZ;a-6+hRb=ho&F%pPs0U}9^U9OmK3DqMnb|iWZ6ZfJqXp!WO +3QEWgN%9Uu>+4z+(4uy>@s4gFSp|Ll_d}Fz{qF2~khndh=T-VeM((2%|BTcAx9j!%V@&$nSA7dY|GMJ +OK?g-36rxFlz+e)C4j*m|gGl73O|sC(qbz|wUas&#){a~jaa7O_4NmyTXeHFqsE{0nt3M4o=|?IAj=# +*O9{s5(b==?!D?3Ev(}UjaF^Ec!+9RDD^Pm*`YY*=I;bZy${RojHhf>~`)?ND3L4kbkf`4v(Ord@xMf +Y_cvU=nZmC}zO$`^gpJ&ckcwtA$($Rp;(*70<@um_;?I&=-vrao4 +>d3mdJ2%Gl7z3iA&0=)(I|U#4lJs%@ec4+VV+sks|g>=`|20_wJNwXjzU`h7;OG^>|++R2OIGR()qVn +1AM=n|7kTp^NT(DVS%6c#Ui|+T*dMBNbsISuO~-;r|t`4s5V2+Cl6ZLR$k~SR5NgD0fbz->nrtIY`LD +S3F)&v^03Hqitipeo^YzuUlXgyUYB-GH(}D6U0>Hr?9-E1d*uFCsK|GyM9@;bM9zT>^`zZ;^H!|vqqj +~Qbq=@#zPw~h72C+RbPLZh6|~NNN^VXu19p5pr|nZ2$+vztoBJC(%`ACN)$DSfV=lXP!4R?KZAcy``S +B8%4Vlx>aufy9vo)%Kjww#nm{Xx_+GMK|&vWlqdA(v{r(PTH-rUB9xG8q&a;>L%0Rd1^_z7%!^rh^Gh-S7<{Qi?AZ&Bwxq;3M@(Ae^h7nG=JQXitMv?L3ElLW$DDgL +55j(1JH#J`s$^a4UopKf*3vshcBi|)CIi^(*eTBgCYISbZZ{usJ?siJc=8ql3j-eSb(lyPHEocq=Qx7 +f^TzT+!24L4_u7Y?*sQyb+gcaO6-G)HfhG$$Br1r}k>zHYyzchmXWk_(j~alrq^FZS5YfMYwum7Qufe +5gQq^RQ?ucy1FVm3C}nzxIojPo43MBH)aVCvYL|)d1>udzMR3RZ#W?4muP$~LI@4WrO2B?Cd8N&2!;lr{*eMn9kG1?F&+YoK>}cPnx61}cg2_Y)gx;ifa~_#2WKm)R#7Hyu +m!WuPHnU4M@t-n+g5b@M5LH48OPjBGF_0#2r!xHp2g^vF@7J|A*lrS-CIBFwSlcN*kt6AY^c-Fj0@E7 +txuYGlI(w!iYEf%@O^;Vi67#|BIf(Y4or?)tRyL>sWRO%{8@HmbADqwV=@OvA6$FvF-aqj-N)mFHNQT +7#^^{jt237K|ilQ@IoQx3N--(e94N(GgdA7#bdI6V#1HW)1JZ}!#?Fl5@FS0bf0W#k18XEdCunqdOz~j!fw6WPl +tf4WQ}!`djN+rUY9~gAKnD2{&PWe+b%;Kw68$Tu9x6Gyu#P19XGCcSR|k}Xj^QNYBczU{hY)-mACwLI +Xv(F?M@Sv|prlU$GeHmiki*(B{V2bE293yc$AkYCR(kh$jXgVIxA0Z>r;P9aH&{vf{}on}{s~s%Zs>& +6r_jk24ii +ipV8gH^VIDcT$m2m=moyhl+^L +#`bDc8607UO7dt7OHwV{e71v*w&y{rTMMog-33ml&bo-0C_(`91R)kqF4Bot4#&5_DHQ-#Ja9xit?K8$WOV#7b=;P;Y>#cX-k4vv1EYaq}=F0A=wygAl2#vG^>v+%Fu2^ +vM4oGjhvrvqcBB@XZu&hgSxVu9_8|Pe;=vxo5`jvr!TrtP%n=2FW1NkzH!46ra=qatL0`SR%JyGxFjF*i=_NlkS-%nRljyhUHL+?a +r`GpFP{mt&sQ`}$qXWn=`j+A?==(Ozzy;>Q)*$r-=KX$1=Il>%9CygRgv6vlR$2nRsBY^i-!arVz>#eTi>7x0vt=UeXB=SLqyseSYpvi +OY|VEu(XjWI!*aBKM7WQs58gK}Z`rKby#k)4E^C(K8QU?K_zZ0$8qmTb={|%x%xCdwcr*M5unQuxZ~6 +p&*kTE4boR%g8;D&if`#ZsbJ{NvEb%($`GDzEuJCWb-Bdf0}!*Za1;CTlk(|(f2y%C8xe42}BS%>5eF +b5JCbG^y?3cZM$vPcDt&p-|>B8xXLyg(2|AbnopWjZLW9CU}mlq?~iC~3nBG(mJ(ph`YmGL5@W5O2v( +i#dd>txYDN|#+%u$F`vZ(Q45E|Sepp{B!8rvU_2+XU4(IrGIzE$TB4CXVq=&?C%P1sogGG|3y|c_$0M +VewV7TzNq7h}WeD?IX(nghMu+*)oj%Wh=JfZhjE{p+^5^<6VJ(_rynRZUa9UF)WbuJ$1*bBmt7&vElJ +tI=F6twNYW^Pq-3G5(tXo6d0I6XojR|0>_X~TL|z1WNlV5LNZ|7g(ZOe +Ecwx>O@d?%n5IrPZ$Ka(@p)b;2j|i#Nbg`P$J(?lQWmI?F|U5FtAO;!D>LOdb)XICtK(GZ@H2nfNt +uT`hhFG4*K(8of{QH*wY^yrM?jp=e)Ltgv2LJgIkVYS&W=}s%rho!Br=R5b;K;x7Q)#fsn7Z`p-axNd7JumHp^fErwc1K@1r +FQ2VMeg0uv2QaPE{>yNl0;BR&0(A<>KH@_#TSXFKlb@n_3tI(A)fbm#!aZZfW4|%u@HC#peIHMw~`%y +HXoXf-O813pq+_*N$19KXeiK3dtb?LWz>%p>!Y?>Ns=I>Eo&UH&uO@x0xrAV&!D3u~&y_|NH8L6?B1Ssm9hF#|9hm|E1MD!mZ7V#e;*6*(RBVv +70ek1V}s1TY$FqB+jizP6cVlWi>R4!uEtzLlz6vtSgc(aCRz(FhD=2Ny_CNu}Qm8<~9e2Q4>rE4Swq# +S4vMy)W1Y;LR{dfXr@j)Jx-db7#GQ-EhHEUqs_{Axx9D0b(YY6SwI6in-2Ah(WUTb`W-6*T~~v;Q<*3SQR#IpZ?g;@6Q5X*b#irFTI +;rzUs#4!tbLm71Ugws=~nB+jpV>kw}b4H(6G0)&ay?T5?z2C1YS}u^S#PlOe2!}mCNK$i>*6RA`K(@= +Rymz%1p=4a$eCr-N4L>N^g=v+Hxfm;#Yi4b9ASHhMUBq%Z@0dPwi&c*gE5NJ;wff+}sCHie8^od{5k9 +ko2(jw!r9_Wbf8It9PQ;b!50x$DNZveFS}8o4ya*aHL!CK3T+TZC7BHROZ(W>>EHO|AyXW)EsLb=AieZ)X#GMqKs3x;>YptK{ +@f+sTm^nQ{RjP6jA2&lijWvf(+GuPIEi8yjj`KgABmHnrlgQ<2sz0>&3)x4E4DBs=rAt{P_F~^u?$qq +*iE#b_#Cv-O`CNkPaht9BmyucBR~ZXApu8XGf>}Sm~H5|*!n6{fJfP{v|}*>oB=x1iLJ-{!!tVFs(B@ +7O<>q9ZyK%-|nX9dG`H7E+CbU^0=zI6_iU_5cXTgvDm0o;?HPhes*6qk6)E#fi46P^1!thQPLcTpW&v8F%SGJ4X5PrbqC3&z=?Fb5CIjo7 +#}PH=)MVb$O&~m}r}0NZcVxp7aAMcd8vdU8~Of>`A3u<mBKTTsdSmhM-jgC=lyGxABuLshu!G_byD0n=Hs5KX0hV#|*{{ +2+M&suc9oB+Iw!>OCr9lTB?q$Nf(3Bv}yJI``%8C+5!=BcTboJ&u-9qCirx3ZTSH9wxnM_)F(a%;1tMX!U%b|%n_=KcXdG8(*;6GD%z>cRPr@B>{a|+ty>FgU0HABzbc|scwgWq4ARxEbHLK94xE0>ZYLbN%>ul#=QV)h3)5k%22Yxq$h;m`N&DxmN~tg=n-D&eRGgFgqUfOnh`pk-gyI%6M6x6|xdokyVMGwyt~ZM+=G4ovV~SX0B|lU@)UxdKXXywDf)-9y6w3*h_Wm)==8VPLGm +4?y~#L*7h?Ay5<1ir<}R&_+ByMg2(naMhSK7HW~*#dF$o#c5ySZKi{?~{$J1WUGE>f^NsqTKt>f=+0y +#6Ex40!sqz!I`U{tRPF_;P3RQi~AQ4pJKto~WDPLWXe_~rJm04NN%7ID*T)p0Mz0JyOfOT>X;pMc$Gl}1Z +h!`jU-h^*+F{N>{G{TGU-Q8NkFhP*v*!(d}Jp5b$T<`s_ak(`xGaLc^aGex*B@NYs;U1>L7i~XjZX`W +$Tqp>R{y|dR@8@8Q5B=5Yi~j1gX{~x3c@+KWLx0skzaE2*qKyIpR#*H{%)ycT@5T!6o+#PFSMMYTLoe +(3TNwE@NIik&`c2>k0x$TN@7g%UFLJ8l=W?phH$7D&)#s&lc7%uCUaYVe#LC8oSDxUEHklHtB%YcBal +3t!%=GOjJm}ntU1*II2a+1EmPEh~cUd{cX^v<9W +Ct3VzM_}?Shpk_na*cP{Zk&tis?w&2tz@7OiJ$Bps7X)8vlIE?C|*{yui;NV-4jRQSXd{OdBF~hAfSv +s=B)8iZhuPW{h#Q*KhWafBcKT(p!M&*l|e(Fs%L&s7IK(L^$}^of3+%eQ5;D_POh~^+b^;?@DpbZzq)m$a`Vo=3!kB%NM}{^ +Z%`TGI)7w(k27nzUz3JGInnG-8eS?Iz4pU*@5I-pUv5r|0XPa{Z#Z!(^RaI5b;#d{IC5Ww_^+0LsH-x +>v(YnG_2<21_@6l~MT%I4=eN5zhaK%YMfBKV0$y&STUXQsXpBVJPUnM;VHu5Pb9LTcc`{W!Ei${nU{K +j7H}m??xj))tX?wgrqBYuKzE$wpO46i-2Yjs`$JU9|ctfa3^#t9k3wn#%|5DEAFl#HE3IIe*+Q?NUv8E7V1Hzsg73a~xTz*W +rVA(*Fs8Rw%(us}oe})=XFwN5LPZ65Byt3OteI|bu=NX%GVjMCb_MO~|L}!(K^42i;b^C>v{eKnb1MGhn=ba$_ +{}AVw`Ymg1Bt&g+F};|J_YZwKnWy)$O>M8IEAkqdY-D)M{lZcL2QBao>75~DBi)7h)&V!Umf3WDGq!U +X=I+SRjyiIhec1QcJ?@uBGVCaGukofms3~+V4J0{R?V+yhaEF{c#0dX9&R@cNhL>$be!jic$#1pcIY~EQ1gzO0NIWG>Nbzjp8JR|3ct{MuBlXm4Mj;bg +K&|6oBvI>u3ZR!`f=nzoPdvVA**A{1Miz1KGxz86aT? +yK{SY?@D+G7^j|g$V6vV24*qQn+760`_gDD$>YprP17qI5x!@c8U!Csji|PWeLg~$T8E!nO-*zNaO2W +L`gv5pK?Gw1SDSq=+RgPcW6QEz(Cm4I_o-s~?3JHEOg`-^_5u=aWT%SukL%sKyH;U>)batS3=9?<8Aw +HPP>DjkTq94&$-mA26_E?{mg$lhH(n2dprdhIN+n0vv?6XM-dIurz;QsE0x+Prn=A-fbK%;?K1k0C6< +sYEl*gH8ARJtXCxk=E?;n4ug!G+z0`y^@aOMoz7YtialRK)AyEl_IzKDhE`?yl#JmTtZ +>Ag|Usakg$bBATc_LwLp3c{w_tn2z~A^OrHJ#K90)~2uuA(i1#P1{S@$iy6y+OLs=knOJOukkPJ&BplU=QE6$ +NPi;>LAcuDfpbOtbS-YjT9k$IC;EHO~=P6>b%V0w!Hz6T)pP<|7m{R!_98We_BJXy&knFDG|Z_{AwuY +7Bl2AZ^69X3i@nWe|A-W!5~3+BbKKYj-12gd^;D +Vffg=9AEX_W)S;hq2dq0ex1f}j)XA%UV6RNo3R*YxyRd%GdpXR5eO4@>VA +CroQKgmeD?TcPbdfCPDs)D3_B;84ByX|5S=32O;>$Kj|XPB``%5|^8Mr7LDRX<%;QoTQJ#%UQNu#f56 +g?q)jrZojmH{nB3vV~?v*B8ESu_gIes>x-dD9de=?v>#yrYnD7x;LmDS~}L|L%d*Nco1#5qdd=Gaef1 +wpB-3l5^?><1%#3kAcxbA@z7sP$1eQ@A54KAj`X_w8xq_UbXF8I(7bkO8Jar2^NH*#=@BT}qz6JZ|G; +Md{n3tUJ^j3QJR6QFzztCgK1=1c-)t^u}>zgw-itG*@qVu5toQFbb1!}`vse +{xioq9=!xCFX6Z}C8yBW{v$^iXm*9mS+>&Z_{5q+p7nLjs+r9;E{?wIMrkg!P}PP9dRHAwhGG1>QwjP_34b1pqKL%YP%M##GuyB?8#A?5LT;Prh~7W$#dSr%?j +bSMM%p<>8jKQg;Mx(bY}U+PMgacdSWHKx@Oi4iQ5Dz%y?NRAH0ow+ISWBvN7F}3UsNqC<47vrpt*R7Q +KGu@x*_tCtsaUwPzWZ`waRORuN=j4%AAnQ2TW`fa{TOFHr#^z>3ro8vNq(8;yEVlQ%=$bFtOubOw#&4 +kW{4F*t*85Hn>3hf|Ug}P?hmJ3s{Vq9`o^UvL<Hp5uEy#M$Le9Z+m +Is-GXHjJ+LKA%r0N^_c#-v649{eJ*Hok;B&MuV>r$Yy`)EeZy4C``*xLn(t0mI);D~2?~B +>U;Ex^cT_yy8%fkgChZB|W;iCE`xS%BKH}eO7$y^%_g%&IrNQu_d{}!rk~p#Av#En4WYRyG4A8L3l~7 +uf3bs^_@r`><)ngnzmZ^V9dcZOJRH$k50VG3$YdtxrzWy|#vM^S+(Q +rl;03R#$_d#E&+kVwH0tt@4osKP>8Y1w!IA2A;+bJ;ustQ;o)U?{p<~>hPS*+=bRWy2lfu3FI2ZLE=| +ZN`F|~VeC^<3f7cG^ByUuhzdw(a!NQ((!z@~B9{b`Z~$WC%;o8`M%_;1I4B2~XT_bZ9|!+Ae2Cx&5g9 +7ib>NIqgDO0U!jC)baHq)41#a0X*B^waj{HEaV(Gh%ZAz(4~_f`Bf)m95~^7OW9p{#vYb2K$^kA)wa= +xbdy5g#nzX-1yc?kyiReZOR<$2`e-EsNrN8i0}yP*IJNkc(^slfFvWenO?17EV416mFod%>=bk(QYpw +2QZ%^x8to#RdN+xJAo0Vw8NoKyHUyXutlW^=x*C(;YC-NDkRe>Yg^CV$AgtZ`t}+^0 +TMkmda`Fq5{~h{d2>-goJ3CjT%lzy5v{*VDeENq{B&aIEIo&&Zn#BD +gO!N&T>n`goD=dU4i>aKAAlC=Q!fs(e#EbN`ZS@{z+Jl!qU9GG381z3mpA!yV(8wEabzm&lT<`c!KfM +~BDmAd6Ugo}>)U`pau~eeO)susvveICt-O7ZU!2j88r1P3Nf%2(DmUkHHaskCtGV=Bc{Sv-x6OmU(+O +_@<9%x-`-vhm!r`c4O2q(;XQ~mcGAS<(?(ueiBEHRECE$o0*5Z7i>{kt*#?NwuD&69hS%i~afhgs1Wqu~%A@}&M9XkSl%c +@Jn*P~`N)Pa_cmWL(q3FGNyv7t#BB=0$>1V?H?r!fm1<%LtI*PN^_k?TH8W2sWotP)2Lv;H|p7c&byo +2!dcZ3Zety?8%+*baYT^xD{`<1?)K{qfc6)%ohMrv$#P)Wy~sI_l?WeTev>@t>V>ZBEWH;klK-w&}A` +F>^sCTYYJWq=tiD9Js_)OwczaaEJY(W{@{PS{QfY|sBi}3v6fPxc^K#LO7C-EKG?)TdWr*l +-m@?Q>Xsnhxx4=<>a9fXAIahzUx7PDTXpFLZ8xB-l_Li_n_HgB>$8w-}b0`-FO^>MLonZbT0sraRJ<`op~i3FdS&%tXX`WogOUO;Lj?!zR`i?qx?qQ-(E7|Im86le)7!5IB!pgqnMo%rd^0O5$C4};QnE%u==5B!QTH@$Vl)f`b5HP@vKhhIb9l24 +SgM0DlYaP}|L#VSbOy85uJy@JgQ?Mu&m4L>9*gYg%i$b75lh1pUXQ|i$IE@v5|uB9G16I +i7u?0lISLW;EPOV)^o+#98LT4Sks(2#1S_ +a~q5l}1K;+4e=bi`4F=l>R%ozuH@_FVG@P?<;wjW`r_Mo?)N(l2!R4L}FYpZ}%rPo4R8iDh9bH1D==Y +4m)&;-k8oVXj8InFSlt}u460JcML*gibP&7uPIJ+_mOIBNA}aG6ys8W%1861xa^iQxifc(6zj>w +T#%N<$o_;#sWeVVc&8|{KbFwekK}Ap6y~Rd8M$i`$Nf;V?WEC|0OB{mG&%K*~hr6rYm!ep4iA6H6A +o3zo)H*J7n_S;*Gpc4rU|Og@b)IxeWcPt@AU{GG;l)3+ABo<@%s@*z16v38*T@!{MC$$_Q!ZblD6qqS +#l>#mlM5W7!w723*XE-6i+;4SEffm}FhoQ`h})O58nB3drke7YrTS!N5Gvhshb}$AV%BE$YI_NhVL^G +sChHuQg?eA8PYrJBQ=DByjKOw2?3tXcTiU$*!1WR?my`ry)uDzuxQoO#bm9gZQog!g0BOj9|+2>(3=# +{s;Fz?*9N`#jnRcnmL35sQCYcUjN10eg(RJd&?h2FG!X|F^a(vhFYn@8UbPiMgVOPWc>+f6+|flCFnI +m#1Qh+j*%5#fW9;WihL_FmD`XNkRHPU`rw<|H9>K}G5|!1G@bbLH +dNV7nq`lRz8Nn`{s09^(5^o?JHDzMBY=y87>GY&HMvnco)v05N|zJljQ#8Gz#i~t29KPTQz2N&t}nLq7uoJo^u_`^j=35O5!5sjf;g8EZv2&@$OEc0zWU$rB +wV^B=p$~y=ppwFJY(M35-2T<4hM_-L;Co7-y|$D^rd-v(nirXCS(s6uQu~f +L*X1T#0HRK2$=!SI@=ruaD5YaY^$W*$j5~aBQ(9;S5K4hLdNmbG8XX&+vw$Z~epu`GblR!FmKYBQ-)R +)8FciTM+b6^zw2&zG;L_;3tvqIKF2dc9GwNVy{EZF2_fAKWOQxofdRQ=O^-%E0+08ojxGLIN+(UC(V_ +U6NDYirvKRZn? +G42tN?eyVYFca>+(B?S%YHAw+u{FC>mnl9rMwM#c!^1u7;tX_etb^c1ln)sC)IRTFvVO~`=Nomy)>A0 +tF(o-j*g?8GLAHrM%emw$!~^R9E*&+Dj-f=C&t>Ha`cgM9bZVBa-}l)4jskWd$K1L@qgU82C@#3@KG8 +aoeW+V{FATNfie6ghvvXL@Jd`0=?vRmT+Ga|+b9vfK~dn2VxyMXft7=plaL$~eYq{c$9Wj_+}8S_V71 +vflI-^YmQdS>&nMgmJe!BnBn!9HMFA++DOs6JjpRKt6UQ(jX@;H11!M`!K#}Lr2v+Jsq#OVISBBkpZ)S0>)OxpjrcTYb}C#w?(= +E_Zr(9xmAZ<};_WM+SM<@7Nc*mruv$V9;-ex@ok&%Mn3-?8;K;TNYvmObMzw?u9vs)szwx(!E-Nz9Nu +P8vPlY;@y3Zh&hg3R)Uyn$-xT}j@=8|n9{Wk-Kt^1ZJ)2>(|M$vmVSnfnD1e-xlp0WKbIBi&YF42SJ-?;^{!CFT7*Ttmx!Ap-up`w#}s17cLqwcgAyU#TYtmk{(k3AuN`&GIyb;y +FVu`J+#aZscophJwAP1%&*J0h?{ZAg_4U8NG5?d>{)S_I-12*liJ=UM5G=|v7>1E+7>xlza2jPvh6Z& +8YK3HKJ+?AU8l{O(T|5a2H1RSh2ufFamXctswxmIH&2B$JHy6x;22}*=bCSty&BDnhhr34Nz>I3sI7z +m~w_+ol)MoQZZ=ra(MaS!1nP2M9(LiWmT^mD!k_TOaeh+*rT438~%z77mTV76sWv}u2%8&^bR2)idGh +rb>X9v1@UJ>QioW_7WLYDj!{k<7+wDWZsxhLyE%O+=Mn6gnxc>*czaJCYX3g4$|-5ySz<`J<5mErs +0c^W4?RZ2Jwol9PL;zIXaD})M+qfSq2_(7|;Y0^*X_R!Wb{gEQ5*L6?(JLkj1Va{rroaIyh^ezRG^3F +vI?YHIs*m7s>1jvXEyItnoQf=jCgFN|bm~=JX=r8;v{~)>meeEVD&q17oN~34@;{r=3LB~TlkX^YY>w +t6Z>*`kx8R@sCq{h=-Pxu +rlQAGaKogt076}Ap5)6!!kmc41hk{NxAc3{HG6OYsO@ugua?DN{u0Wn|sM19|=ScLB4GcKHj|7&0%Hgyyz{& +x`Q~9UQ4YZ_+P=a_}&T;vN3i@qu}*1cF>IJmvIyuk=}pCZ43Hpw?#0`;+ENjpBv`k3w(Gq5p%$(*xq@ +*Om>zS33M~Oul&$@g3?FtkTyNy2?;lGd|}A58i#o$h&{Pa{9Se%`$+c>+KeF2bMOq5T@|_2%Mo{cERu +d_zHJdjCGE{Y&9i9BHerZ)aOI3}l&N1os3>N`E0e>=E6ZVzDyPpvz)5>Dkp||XjS{-R{6fEBLx#NaR- +UYU*WD8N7_A#|%o|g*=R%&^-4zRiMbah;Zd7Y@7YQa>L}IafiB6Zth8^u`h}}I5zS<@K!eyvVnSLoQ7@u`61z|uN;ukBqv+$$Wb5r^P>6#`LrRAZO!%Lekp2v?`_@VXr%G02{~Pv`T;xmxr=io$ +R>f8_Ha--SeE_ZwgvsLqsq2lT63IL$xXOik$uQK*2jf;QkUm1HdG7xyovpfDD>gBC1%{kI1n$UGW+Z} +EU62{o!NHoap4z1h>uK+qVABmq1z5u|4rUVe|;REw?#A2JGscy8mAvFsjW|OzeY@m#XajovU>k@ofDo=d^*#vXl?ee1?Rn;P6 +Wc+h)MkxreES^FlumM$W%sR*-or@~rixwhxdWhi5ijkUd&Igl4A3(2~sy3^&*7sWQ73teZ7xI}c& +kXb90%k`$kRLp%^BjjAfuU3H=`bQR(E)1~?>S_5MT^ukgVeT|Pc_R3%_(xF!;A@Vr|jw1@ +mfIVC!s#EVAQl1?9$Q8I#-HHaaKC*bc-Q;^-JR=^l$U@1t4t91TB!+SlcDDEd~Lt{Mg_w~VjC*Gh55agFL +V+q*u`8F3t1B{0NJ&{}+txsv#P!0?nx{8NOmqmZ>>q$5J#8n#g!UlNgfQ(5PEvtN`-r6s9#Y>7(Ub2+^P=+`Z%$=wZBopv|^HFMi+n4c4qJt?#laV--|=c=t ++`8Ualg>5xx3Ol%9#BgHkYSkm&Jr{tH77zhFUMO|JwF{jadIzjVLfFtqRP^`jUdK~NNp6D)?T(Kd-uB +>EXkTcc0{-`uk{RncM_Kv<6u85mR +DKM;nz!-44alHhT)`5%{118p(&4+6xi665P7_-T3l_1JrIpex +MBHb3TvQPB40k#Rc)KcnSEbVy8Z}gEkyY%z>As;Z~W607E@ig?GGqirv?62|hqW*h^_MLIoN`2;!fiM +XJWDGji9BO#}Vtxz+6JQ|B6u%vt;-C5s-G=*a_Tlb9%Ko^w5%&0A +qIM56!WO@nA_JhlGGxY8s+wdTZ)9a=H{FzD$gLC6?0fGz8si7qaUoV2PurAjn)~8sVT%Gh(eEf1#KOl +V?}G-YLE{t$< +`4CyG>dl=7`TCa@uOCy|vSOAyJqfhSRPQuip~lDYwv+jj{{Cxa^q<4o`x?zQT+IcMe_&xfeL?K|H6LG +>g6$nyldf-MUy0hy=vJ;h56a{6vJs*S(1SMOdfy)NJ8?eD(TqLTQ1-_hIlTH%bG$KKsVIbYv ++!KL)keHJ(;XrO+Ltza3I(yoa6}=rN5HNA~*XF_TvX?x$oRKs8=EsE4Ec{nh{Ad`~f*gBSklloNpW8zFKerr<-gl`p;XLK`HBjAaV>)X@=me_o`X|*!_P*otd=NMX$U{{%*k8ZPI~vAbQZ*ot;9&C@-}e +1?k_lg-kpzZ?rpLXZHJY7|=r$~iY%Yd1NF#2?rr;;3g!L=--GuJlP<(X{xqhd5*KECe;0977yZ(_6B> +!v&f$vJJ>juv0yVRj5@{ZtMK~nRp$KrRH&cD<#y)q#|$4}7g1l2;=lAQ$I5zq1|<>_62#9fv1R`Tb(l +*K-tkhqBJx|t-#A(TaYXt8)A;Ny-vL47AKPPMqb*H!QFfjb=`RzvnqUpVf>Gj?RjSJNptiWARjE`!TE +dR6Lv3E25OF(wI-Ox~)((Tb<8W^X6fMZ?y@tE(Gc-DbxdVs5B+@i3X=qns=)o1Mc$uNIUSt)vsr4wMY +EEL$hVyr_J&=*)b$-h<=C=sZr_@maV$)>taKwO1!6O!YQ|Cbr=HqL#Lq^+eM^^cJ)qVoHP)sdOoMb4!FJjEw$m@p=(ut}=T4{EiT +I4VL$xBLXeUIfG$E`VTjQ>v0TpRbfT-O{Os{bPUktu79kmKl{3%@7^Jy5!^Jl +hVLjl(^re$^(YFdY76WBJ3YV++#%_zM1~K46PaOvsXm$J_mxaFN)_;;$vuL6-HZE?AdS4c&*(9;)goH<`&gJS*lf6d8t +yihJab2>#GfaNT)K4302R9pYFF2d%7CJ$yhOzAAFYKpJ)ZKXcmL`$tFdhxl?)|E~#HqO +vAsW($`A9e?EPFZoratViN65X;ertsE`2-)!BpQXqFU*UWD%Moqc=bO*vF9I_+8GWuxU +7plACwP!p?KFe&^?fGD6HcQ^%=5UUQ|4dO;R2c8gAD-JwubB7;xC~dP)5xirClHLV-6MSy*_Egv7h9Ew5gQUTWuB&44%@U0U!}mA~Zn1K_FQSO~KeG>U!maqbM$aj=HG1QlyV<5j2#6RCipXnp%WSgHA +Z-0@lcAsj%8-0(GPHF*JCvdQm@H~9@2~Qkb~Ngb`Xv6(J=TAjdjaYN{RQr|{LH-!@4P@p@H6*1FYB>C +#k~Oag8mNo0@Mro#=Yv#b*sMVm8iIWR^_9Q73eDvW)7|!*I{|+aK24`pvLH>M{34*oyyx^7N)N(rcWP +rz}RigEs{S=23s0ZKTE^I?eVVK@hj?TJ7{<9oR=tQaG?}3f6&-#fs=CqtHjf{{Ux+yVdXDKplF}yl^j +(K>VrMZkD_y~STk*Q&oew9P+f3-Ib!|OI5q^{>(9B2Z+a#Cg9+Jk_sCUWymmQq!)E*VMr<~6-R^plnu +LQ`!axWwFkKmuL^K0~g+re_a_(KP&&FZqjM1o2OD_iI>_?_OX;m8M;@%fqwkl~JBJi5%hLb*?UP?W|4 +ceIDgvu3A%o4WgdP)h>@6aWAK2ml;P_EyM7nV-r8006fT000{R003}la4%nWWo~3|axY_L +a&&2CX)j}Ma%C=XdF@wSYuq>#efO^r(}#FqVkmv+5=bG0!UpzZC|S0!6jx(k&nS^4Bgr#yANIHJy^{P +f&SXjQu-%0QRMZnSaCb~Z^8QPE4b2V-%2E>xx2hf9{eTd}uqmlsLG@TrWhS=c7UCN +p4lYYMP&jB%S+Lc)sC|2FK&udl8@e!9)CZ$Gn3_KQ|lAV8G)|m@GPR|?Wl +YsxM6x*GX9dlre*wC4q*0BSy16PhYjX08b3?7`}g%fJ)b477Y(1gMyS&ZKwh`Y~>&!;GX63E#%(ezmo +t__)Ol@pR#T|rLlPL8NUE{MC4qbGQ}u<^m30$=eWyR4Uq2R1fN2$S5mIo?ApG*=G;2Q3BE85Wa5f82L;NnRJu{p9OFwPCI8{2T +DK9V`l;N8RM!9TahHLx*_CYC4Sx$SsA-gGc}UVm=N8{r6#f^#^ivdsVJ*&JpxS0@%==H`9u{8$ +Xq1==+kAugf#-uZ_pHL7N9)e*u)w3US=JbVqgapcviG3`ByH-8|IJBFioecCzYUrtj_A-#*dNwg%2%( +;Us=_vf;uXKkw|;B=Vgg-B!splmk}rzp!ULORTFtk;*BuaUmrv%zV)gToWg=j`i7+L&2u_F#ALIf4n~ +#-{+p2Ri$3wINo9n)T?~mH=X(Axhz-pDS?j9vAI>HVjM2sp)A&ae?#xd=u0elQ-GE95>lzBIe_2w`mE +7@T^N8h36aEWKy6hRhsYWC9Qgec}Rj|Xp^Sus6DIrmkdMkeD8yxy0>p +x(%C!$!4c +h)#tDp9OhL$IlE^LK--V{aOTUu{B#U52ysaqM)fG!&*qdX3PeF<_SA|7Fxw_}Hzp7-3j}Tix+j_megTxB+?@SNrNSL}= +bY!KWgcx15}HJt_$7$Q7`8CzErbN4%PpNo9R#~f17JLHyAlzmwL*uO=!S(M%qYZ)F5=Ke52y=|?)Yvv +8AEsQ1drX(s5=-xU7)UB36#=TntEdEC}ijuvyz1CEe4Jb?t7!FJGAItx_x*2WTL-u$Ag~d!Od_4U3ln +@#_rXm-yOljWb`ofdJcd`X%4Xgvkv3};sv@~3q&%Yny^r%sXt+P6$Tgvup+M%%RHy-6~iMi7t!{+Y8V +Q>O!7sO9iSJ$7QltJVIvv#juzGGqL*ELZE*SAvEltWDiQuE#1eRDe!~{%`X&@Y+Hfh9Hmdhsv^{%wet +vd({`1*6z{JCyV-NHO*WbvDlSX49#TxWBVwe_IO-PNoDvV^pG0aTv%vVGzs+Us4Q6$9-AU=mJvZW&`klZjxnrr*6&MZ=xPoFGYa +_9X(T4?h)A+71!tZy8lPabNvy$t=>V1x|wm8Sc>T +17-7P>NGQc-;f{>zhuHW}DES?{tm0%|TVWiO<4-OO=Jqn9q1e1Z$?3GxEzZ3=aPKGg{`hXx^X`WI>nw +=l4>MOa1Gp;ZSrhF(b^o6>|HWzZua2AQoXOBE1kBt}&;YL}(CL&eEY6*Kd*@$1je9$2vo38{hJ?9Ryc +BBbB`H_iyCfZGnhS1zr7BOw^s?pWj`ono=u3T&s@OFbw}X}_1?JGtn)09-rRSTYQ;Cx)wDv414#(rZN +E9W$(ld`_b~KR#jzeMPEHVGN;0qR*S=emB#~nVE5mOB>hcKajCwIEHi)UF@#Sn)cmHvTwdFdn?%~r+o +1SUwjN5%uOQM>C|;=f5W(+_^kq217=~0srZlXz`yT4n0i&m%yCfdJ*TyXbqJ5@XLgnzD>)Pjs0LWjH8R=@Rz8nq1%5kyUdxzV{Ep`kQNb5|VwIRF4EdH?_)0001RX>c!Jc4cm4Z*nhVXkl_>WppoMX=gQNa%FKYaCw!T +U5_P2lBVzTSA^7BEl3H3e@Aq0j|4RIl!gYj0F8QYP*IgMsVOR%tfCS7>(`#gqNZ0GM%v{f$vSzChkux +xz4qGcet!46-~Mm==#xJ``F#8A$;Y33{?#YjN1uK1@xN^U#e08u^SkZqAMUm<9`FBl|LWPx?c;|Rcia +8z?Jp1i`~Kz2XYubJfBgB?{qtvU9v-*nFYoVOz1g0}d)~i!`_U(_?%&+UC;xQ!_q&%5KixfU58rQZex +uEJ|L1paUO#{K)7}5o?ES;5*YVHCpB^6lM>M>B`S6&pKYzS?_U7)z_T8_x_@5v3?2)#9`ZXT)!<+4$k +Kf(Oc^=WkZ@&Hg?ZcNZx2L@K_4f4c_1)v&?_T6cK7M%h=JEc!pJSNsyngrh@yfRS<=NxEZC~EM{M+OG +kK21&^zhC0=3)Et*Ec^ryt>98Q}y!xyT@mbzcOpzKi=JKuOGgD^GkH>58JN~KX1>Uz1kk{UfjRV=3^q +>Y|mc3_~79&KL6vxi~H|?<)xory@=t^>o<3gKfZoDhkyS3o9(l^*U{7M&v&ox9%D3L{`}p``{(iD&+e +b!y?TAOJ$t?V#A~mA_{U6rN=LrBI8~Pus`4 +b;}H#+_6_A*xE@$Dzy|II1*$4Fmn_pfaBhnV~LB^r!@{t{-jeRsG0{QB3O_{QT>`{vqBQGaY}8^~;v-fBY#-@WmFRdVKck&9Bk%Xy7lOJpK5e;}ai#^yITAU;mZf +fBNL>&p-Ln9(7^Vx?_w=chW`sEj2ee(XceRX&Dc8ZzI-C1t8;M&+fI9{k_umvT +fgd6|ekP|81Xq{-^&(r}Dd-X!xW1zYR9TzyEyq>Umh@v+rKg*D%nppWXlR?A3n{Cledt*<*C^%g2WwA +LxJV!=GP0zkeBT{2xEOdGpinKltF6Uw(Q2?#0jVzxn0&NBQ8#_{if2|K$9adTiT!@BP!Zzu#+}C0?py +zI|z|WAM@__3cZm=SyX;{d(y*+tK;p-jD00bB&|Mjs!3{Eg4OiVsG2K6q#Jy}TE{z5B=g{j!hTNWcCR{_r;+_v|}%K^^}Mlgds#yozo5`VE`u$Dg17u>B +BDO$QS0=INiF-rRij>VumAexlaD|A?At#*`RePZPd@tQ>nC4){_WEb<9|7X!9sof!?Q;$E3E3@zj +)LQxmMZ2haY|R$+urU`Cp&V_qJwy;QJrn{62c}eZC{M-qUZt{^HwDpMLT`zy0FrpFVjS?|kPb{Aal># +~we%K5wJ<s*AkyRd;BQJx-Esj&T-zhbMfXnZ(ASbw$9PA-Bxc3-HJEF)7#i@JN?*tJh;{}Znwr +i#~J--@xXFU9rBSg?(8=)M^0X2xH~;>@yQ(Dd0UqQZzDc5XY{B?&yRS^7CW7pi^V9l(Vfb(u6a22n;1sS%s +v?I&U)}0&x&_Nr%SKV8Z%NVv%fpjixuhgH>NgcgbAAbM!V;{iH}ET2KI2qI>!%I9HTfH-|T}Q@z=Ry5 +5+Z#N4G}fi^a%4@XsUdA%<`h(;NS&?cl)!V>>a57|_{bDfY9n&e4w*eJefYAzte=9&hO#E1WT3XUtcu +Z^?FRtXo;}@_vTtIID&3?=hj}=#Fn^*>^t4Kw|XKt9Zf@t!*p*8E$mSeCGsn1Xm;XRnTW_4 +lkdVS`RBULou!%$aQxjYmT)G5dbf?h)2q>KU`t_ULp=J6pV&?pJ#^Y`5;QZi~KkdqfvzbSq|yF~>5N6 +7w4?v-l8i>25G)?ua&ySl`&RIF4BEu<)7f)_xNMr`;N}x@wHM#`_C49m`IqX>GcrVjss}3mpvGi(jMD +EL3BdM{(1qoz=eI#8|>~*{SjV#=ONsl~|+bpr+nfabAn5jjpnV>F;Fwg^lwuY%iWw4m>OEj(HQ#qS0= +2Yn_vbc3CV-7@*@iSZ1J!BK`RF^HM%>}Rxp$gez9oJe-F;z%3FFzXeH`OueqHfcfMdm!jd((sLUg)>&j{B +$=sE3TriJ)AVn)It5wv0j{XAQ`h9kE2^h)%oD?cyK@P1(V235MQ{9b?_|qN|-V;auUXMSJA +O`WV?Oe!>{YF=~0hajTsSdO#B~jCBz>r6hjIF!u+c;&%AF +*y{biW*|V0CG+xEL&d#fu6QSj%~;x&c<4 +*b6Sgy724RW5++>aw;Xfen0DVa3<9@fg~SI26 +7XPZ*0Atr4wbp_-O)#<~Rz#3(wU&EVY%N#9`7^ +Hf-2pwi~#l+8P52BVs^J*TCqbtMP=18*1M6|fq6*nN#%bK&XCNE{*b1`+bXY^@I{1*T0hr6?D6HyPjD9DZh^cLMf_x#K75flBGIAfs!bTh#_RY6yUHD&cNuh&eJc +Yg0G*fb_@DT@*K-7SZvb2S9^>)oe;F78rJAMD5gJ#P!Q6~Tkr?R=pfOL=byMuy0d{=wtD5UYbU;)ECBOs)-;wAbz*GM!R}7gi{VO1 +PhxOp#kkVSJh1c3k7rN(umYEC_?Sw>tIIvBvGH`wKepc>Hy}_>Q*Ne_eQ_+E4}&ABO^kBxSG(X6_CUN +}%kN}kjLQ6}0AK--jUkaU$AaQxj7`9w5g&`?Zj1p>)zTi8xMcD&nO6i1-ftXYmjQ$}5{iST&OlT#sVo +NJ7@dfe`z?HJtXnK%0~R-Y=m7sVf!5gdUF3z8#ipyoi%Qxx2&fy#GCXT#$%w~q=OJ)O$& +C|ThpcfKj#^xP%~w!v)Y+9UiU$csX-#IfljB!wfMkC)3h8M#3ThAnRs8!>Mk_w_MElXSs$`LMn3`RH4lMHdNsjgw;!Og(P^>)>cpfOdk_8KcG+#i2ugq=wJ58Z#(?W<_V6&Dh&De=Pz +F2P_MVYf1Q@o^wQw@H?Vkya-D9JmI-(P@OCmrc8YVL6QIX=jnbvTh5yL$(u_YAO#2K;Y|UQX;@kmKo; +FUx9qx#n1?ONeHHF!X2az`&Ey-v) +Lo`gQPcxSW&BnxHI=12CP-?qjP&>)!P1vV0mPGji_PZs3yW414y(F`aB7Q&>K83)$IN2jsO`GPbt3bU5b26ZKzfs5_AZsAaF3gNNB3dUX}py6xT?TrU^1JA(q!jBHf!~)UTNJ|R-w}M +mKk^{^*A8(5Iwvh|#zz&(C<{@xNe27MQR2;)DV}tDGX(JjHq2m=LGGK +0Gwn9&0c5XpO5R8uN)6H@Qi};7WR{{Kb}zep-!)2RHDMJd4n#h(9st^df9O0;@5H)wRtuz7alOv|f*U +90-|)`02l=87+!CEOPv2_7>#!K?x$@k-4s7e1lZcFk|#lG +1(Il!XT`Xb~KFBgY@b^qS1x*M7>w``MlJzcDqD#}WYH*E<~J +fDs>1)ivvog=`6^zMw0yt_JT~stB;)u_0@z4Ahc!k!YTE)6HtXaxOc00AL2bq?%SzN-I_iihCP6jeIu +}wn>v>4+-a$?M~ubX9R$>aT-hL0tkhb8O&;QmGT`f(io|aBN?es8kJOjNZ?+4DkU7+#Z^~f80jBYi=Q +l~ogQukkeWoR#1GE8xJ`6);Hb=xkK7jxTmlmt^8hLXGeO>zmYgi9Ta~P(`@@|F^qvxHGI@okvMiS4lH +Y;#4L=D25^is&9srXhsRoPH<*Hg!CVzy`@2~?cJJ|iJ{!Au1<4~6`fg^&ujD~y{c@9QDciLD^hM5V#c +roXAJ4-|0j4HYi&qANrZs3v%P8^Y`V3K4_O+07g04y9iD;~pj3mX(BVr8<7*aE +OdR#ZRwOMkR4s{BHP{m^XYNK=4@|rX+E3|_+!yiSo5tJ^5KVErg3lTuvj9woyS!#HNt0a{i78Iv%EFs +xIhhfTYaZZBhFi!w@6-y+0ZoP2V8h{3Sf-dlVIu&UYwxU@pOZ9zY(X-#8?P`AdXY)!cA+|e0SX$1qfB +Fn?#+Q<-6nfe!UN!jqdlyM_O!D>9jCbB5)`oYR458WSha$>6>Bg!3GE!DWQCdxrV>M%n`k$TUN|*U)4 +)5Vrg-|cJE6pvDRL(tTGDi+7KDoUEBGkBIV|)yVuq3KYBz96M*`A!OPdk{xVOc=pIR +v1nR;w3668cAsQso-21>?2sMU}#?&WG1zC>o6qSlZIBLj|Q2Zw{tEFEJ2qsVsR$|`skeM>p3DTtJcL} +c!_u%y7hB&jRy7e5IZLm6sVKx=R#R!avt_GayDxQTc|)4dt02p`AQuKE%j0_C0N?mb0ixH8_qNxed3R +|`DS0h7ERj;bXO-wZ~;nVI-u1}-U-HX~mnWM#1oAaW2n4!K3r0+LCfWCTW#8r(r_0^R}-PcqUh3SaSJ +S;xV!oCng5PLDe6Zg}$oh1W88EoR9T^fBz&YLy3e0%zUo6^|FVgxXxGCUK`F5e&wXPj+zd$;A(r_Vf= +Vg6?uju>+&9mO9g)E$cWkWIUZgRy<~<#L(CSoiX>TN8qz1O$O#y+@YbUFTtP9tUJZ%_nX)xsuPIc7{H +LN0mU`Ton2DZ3B^+gEWo5lYzBdHRFT^0F1XrV@RP+ih&Mp9Fs9XveP@T95VSiA}c&uoyKlCR|eL#A0Jd9?+PXqR*ExWk3>EJ +$f`$l}yXuQx-wkDDPPrIH(tBcH)Wm)PcmiI`INqg@YS8QplrI-$mMm>pP@&PQ4>oY+7^&yMkgnsBpQy +sRIdzMI9$IRRkK63)L#pL8qDI?%MlkaymI#p*lDYHU$|4&+-$w-wtpIf`^L!p<5PY#Yfd4z(P?F>U3~ +(R=e~c{I85WRuHdI(a8n9<`;blseCou@XSHY1jj*-*uwITlz|kPP(SHOnlIl16Aa9T>F-zlA{Gu?hqu)J|5)59wp@8G*gwDFz1Ep_#Xa2tw7_WCbaI$B*pqNgb!DrGqJ4r~S2E+n`1>=_fm#qzLFSW=g>1OqjTT8EL*#+?TkfL*Qw+aK=6wBHkb$%5Zj7gR+0bY{!dvcCj +dL?(ETjRc`t)^3*hi1<|gq7S$e6L0(5d4Mm0d`a!sJb+7F=czu5G|4I1^=m2VE_AF&tg$H-W>Xla1;^vv^Ri`9=3yOX6jh2fw5KfOdu +NdzwG|SryT+FS>@}-o};Uz?(Id)U}~iMDKafEg%>3aF8LQ`71cfu5y|FHVJCjjG6`GNhDBaXHNi?BYE +12?zJ5<403B$GN{jpm)&`mEJ?*m(Yi2Vr56XhJ-%p=xYwcr;kepG_X@1p7DH1Vz)tO`aeB4x?g}bIqD +(Dm?&uHUf$R%$1))QP#WULtT+)`_W_N$uI6mCak_jfBm_ipIsk^@Lxpfz2QxyUM4pU>y$ac|}pgfQFv +t-mNs_^5KkGBTH_G34~3AbTBkCOo4408_UIW@z6t{b}7Uain7WCKLNnpHYo+^OUo1tr)Z6^ov0Ef8Bs +ab!4}4o1=aWoPJKJNT@ytw`rEL|7}TAxqE$ET{HbaB2-AV_0AzTj>Y|M^#S-YLa25U39O=IQo=-ZJIh +x5V$s#154h6Ng=sA^vS4Z;1)_D2Rrax`TxZ~RCKRY;qWYMCeW-*50I<4T2jz#D~iqh6%GYhV?ZSijp| +D8#)Pypbg!+N6_YK@s>fnlNQ$g!0A{w72+KmT{iK75Z++KoPy4H4;aWHNk}i#Y%Hbk&5X_MN$pi~RRS +^(S3y!$~YdS%80I;fsPa;g8c|HJ-b<+SYty%C&GyvhN8<;M;bx0I>db_3&2^TGNaVPOtbs&9Q?N;Jj( +_=9fT*=U1FFy`&b6dksU=LlcW7j(`-^cvsXED#@X;Iq6ej~nEFh(O?NI*KG%Z`$A2o3;A4PHCC&mI_# +*=mSjiXy%FMay`M?lq8-3*d)rlJVj2YQ9E}GBjcz!@zaI1hm62uD8SXX1&@CTrzaVjcOjQtW$Q_a8Vt +FP;rBGnE>3Zd2E-PDP{!Jh^b%e%4*h!s=8P5r;69>Xh8>Ftbxy}M6v>|WyRM+4bb=&YR&Etv@|G{M)E +&u@==tU=`XA+2D=q4L!q>B7AL6bB3rOCNFOR%!sbbZ>Jg2Bv(|wgE87iRQYaP`l>o(Bn}Zh!VxQ{hDJ*C2CB6XAYI*M8gWD5`oAjNIC-}*HVf +lIpd0Us~&XV^+rhzX9VgwYLRxG-O%n$tmi?`{dzt9F9703mHRa7jm-v2_LZ41Dp-gR0P}kI_%kRf;av +Z1hm4X2nCD8U<+ZdfhL3K=-;eYp)g&h*5|<8q41Bj;(3QE((kRE~G+})gmWc)&BTbK*5Dex^*1l5F|_ +IlO<29q*e$_Oe}+f?|c#{L*QZSoc<7l=^`Z^bAHji*5(lu43Wi-MS-F9D`piAwr2AWmM%E&bcjo22*Cr*Ee!L>`T~<|SH6TPPyr?A*ey{tSuB*z@{)~{V_V&*KtpO)6AU!6{#Mbw) +~OCpZ3alqxkRqIp%Lt=lr21}7RT5-mE@wzD3vBvz7hC-*#o-QLPA`Dh!s+36C1<$2Smgab}0>nNS@hh +DnD)91%(`Rt1Ww%^MLNPnByWQtw=T6vI_^OUJ8bV+O?6Tyo&cV=gTbtdQ-CP#_G8*(7jgkH>u1$?qEgbX1kgnF?J(2&Jmxi50M*4EWua7lAbiz78}X#5iot8=ACXoV`#uN5VLLqJIWW~wRqdiw +mU-2p$zva3?Fn4*~)zv0c>QX^=9kaMScqaKEArI0UF5cX*D8rQm^dmWjmoc0nEBa@Fax7$|ldBFaXt+V=}TpnL6Xg +erjw`YvpfMv{R#jZYH&x{X6Ut8qmspsH}AC_#y=?T$71GwSZKj}dUGghlkzbkKV~9b8RbrX(IK*iiS> +qQZmHALl|lG;<^#uu92$R&}qi$s4tmZ7JdOI3;P~LU}2Jep=TX4un0(hQZ>R$C-c!)gyKDvR`zsrCI$ +uJ%|a4te$Tf%p0>>9Vt)sJZ881{_t7QC)o7i_D{Vh3l(uljx+rq08+R=RY>}AJLqe~UbNBL)UJQXDz{ +lV>GH)BpnL6D`2c8D<%E6`2-T-2Qg)9aV4(#2W|apPwXsc!vV?-%?qC$;u0`GvRiUWL$yD_-ZBY<5>=imqep@;6i#7rb?5_FEk*zP+x}9=! +F>-n#?AVz34Hf}9=@Q-ElwQcbV12#hPZ@JWU+%aC+%yOJ)nEtW;#~`515Jcng!_KuC9kYQj)2Su-`=3 +Y&s@%$h9n!z2wg-y4TUttg#1ahRP&&p1`j3>@J5vhHe)1Y9)qj##c{`o^o~O7s-P|w?v!=uj6P2u57S +IT{w)!syB~(QGU^tDmX*qItJ$j-2kZqH?HFbuRwY#UyrrciDU={d7A$n;0!qw_tQQ4QS;MM}65&`fICl +Cg7Pxlbq?vBEHu(YGeuVY4vS6arJ+{fr%M{{eE>|jm}B)km|R;GyoCCTKLIg*>1U7k|(!_BPmutUeyF +8UJBqN*e($P}r;A`yoc)^md%LE&&kf$6DmvXy#eDe57~_*8vd=UXbe*CH=RWvQWVELC@*@`&P(bu|h? +tz@UQ3?^tg-E!#AJ#^>Xul}B{8%R*7vDB-{$iNnnvr=||$V7sts6d~n7FM^Skb@xW5-jZuJ~8~+Vk1D +&TIrMzQmB%YJ;uqF9MZ>di-UxsfXdq8A-H@qMv?WxrtVb<2FElTTv;K=6ja#da*V8-XZ4|xRPb%nAON +T&3&K2ORXmrvXS?7MFw1}z9@73Bm}yF~r}kU@B=BR}b5nCNPgmMKh1*oNcJZD~-Rq(BK$A4lrPM8V;P +FTj{W{$ZMV>;U$oKIq;X$+Dh{dO#tD$?XWI-MOut{P`DUbjaj`y~uH>WD3G7<7~Ln~SEzni(@VKwG3K +pojGxP*N%Kx`vVX#nbEcr6;@8mfl@tz~8`B|mRjp{Dx@DrqHoL|E;xGjdkL2&V$N=NSmt2v}pkV!~jg}pr#!2i=7)w6Cy+6S$E3&6dJnML4Le>hC{_{W+niEE_ +J5QASfz-hsLQoA2j%V_c)#!apf3!UqwUr+L)~kg-k7`=!b-kCjtp1RaT$9eG-KgE_uk1v|uDUX1|}ZD +d`{3y;j-W4D6;Br|Ajw-_lH3c3FKn9~vsD$YYXa6gmIlxem)pw97eR6}FyCgz>6HW#D1ooMf_(>W9dY +Jnr4qXR)e>C%O=3zzVdi$2N4Y1%hp;2~(aY5oZ=%b6Ydh#t<+byj2BabWH(3Ry=DBk2!kQV;j2HigPP +UZ%s|7rwXBuvGReOaY?t#m)}W`S;nfy-=(L;df~^!lN&;lur1WMAOSs!rovDnZf?G0f0=#c(vCPG~xy +!m2$at-QAt*ED*u8nK4)fp`%RtfJD0}pomdX6SZ8va9mF(|DX$d=;I!f0?7Jsuzf{{mVm(M35?^><9j +Cw2(Zco0tseAPVz@8WG*cofOWKATDi3+iwuGHUiUuYUV&?p(Cc9$5|TfN%lJkFWNX7M6Gm?Ir2PpCHT +NC`p3x~cM7xLZh*Je&&6(lzlsPX-SSA4Pa7E{{S;G6?EPlywH|dwkL*5Q$=JWMNb$%5n~YhH(DnIw!$ +I#lA4dMclAk+_MzWHF{?-OqblJfp8nk-pPGNw=&Q@7v&8@BuaT+q@jClCNabEhtWz9++1SE=Sei@0p< +O$gOnnGoG{Fz>(cS{JHSrzwb1x||%5ccOazX$uxi;~Q?sV#GMpcOY5 +#T=v(?eM6n`xx)2@)H*_IDtF5`J3)4^KN2f_#MURo{DOosX?qeJteJ8q4G5QK%$`H+%!>!+nZuIeNva +1{eHEJz65x+odQGhO%IE8E5xKnsS*JRbAYTX(34c1<`;^Efs2|91y$Q^uA7~K-`;J=oC-8Rbnk+EH-c +q8a?2qvSIxp$2SBh#N6)+;rlEW7q|MFat>_en@EH~D=ER>#h9I|t4?~K$onmPHliDVak`a;Ch1b!&j& +7mdV;RL_ibpbm&lb~9?^6Kiso3fs{3a~nM-+8=L)9>=$^01EVdSwMEoApVP_*SvRqvyoYKc%N73xL3P +GlO990RPwuEGsI>}q#_+klBl$C8?@dvIKD=W< +deh9hxNgn^I}h;{q(x8u16;CsQXo~*j*tkG=h2(GR~X0!en3VD>xu(WRpNzUsVq>HO1RNkLqe;JAtUJ +|RNXA$ZM;zTI^_hAs!a`+lgM2pK_=C68hy>#_cvzRi(|-^y!NSUOY@os>No)8(bc_H4=sC6fs<%eX(~ +%bXj78p)gBuvWsm8Tl^3cDBPU~~ouPXj(&HQv9{ebXHcC;{N?B6!hAS}2vI;?ZpD^1CRy0TNeU|WXZ>3y3LF!&T|BL5L#-IF{JfJQyCLnb8qA`=%W*uKG?7?G +LZb(P?>b|Egv!Q#f1gha(V`PIB?zDK1R>AZ?03%|S^d0l6uQ^uSwa};hInsu +WdL9WRl>mR+LRH35ZugGmz(-N@B^4}D83z6o`vppI~1B$CrY}TM0cs`fPgangfyXg)1~pTf>yjx!^+` +dlgNZx;%aoSm6C8f@=!XVbZZB7V(YClQI8GkTL@rkpX9BV +RZb-D~$`HYEmR3%tQw#{egCz4C+D&*@blm2QEVSjM8%Uj+>jf2c;KU39Nt=vqA)-nBtodUcX@Gz2+yO +cJvorKaj)R|R8FCO@32Yu%{hG(Be`a#A7O1(olP^0F^kK5q~uE$H5FH0(5HvFw3jQS=!ZwwF4>a6gI-kzwPPs!! +!TO1M&saOkJ*)IAL9^5#-T-13hf*5&A8b`1#K6wKSoX?>bgD2pYt--0-=9wnsQvK55b#<@1b!@X;84l +c#2UcCW$Z(OF}%t1 +yIzGe_Q%`=YCR)x&yglSfwQOqIIwKCv^80eHYvy(g!WHDXv_!#D?Os(|UzfdDRvn_8Z@nkg{c$dEx?X +6cQL0OV0TMa=9srnaM;8)A=UvX^?8j_$QB=a+d<8=>&nqfyFq4*d>g9irM`GdBe-Q?+YOpVzU0OD_9W +_ew_Bn`g0%z17ocl=q4AUb=!rt)R;4O)zIQoQ9?)FDxEny07HVI=a`2oO=u%bl0ZXpH8SZy0UBFEKAA +9_+~9r=fkRcZI5;Qj29i<>)>*Q;XNNz(}n^n3(=sQE;FxKLDE7XU-E|w1qL(6ClOs)H|^f{Iy{qxK!T +*C=P~2uvjUZQkoyBWC1TUJ_94n>Wx>aXsxm@uE0B96oF-_0{1G4B~Z_7!WySS_LUryOn!-jkrUA17YQrb +xSM2uyNH}Ht7^Vkhl)k#XT{MQr5EbNPQ}I2HX^I36&z=N=dq@vN!D0kd9H`h1AKrG4u4QOMsrY=H(p( +>dd=2KN)^K`v!m3v~mIu`;BVTrWW-yGNUb0ZUTcepgB5h+#vlNO1>xx*e?1KwOh)Y)U+teSQbZ$|8yG +hqFq{#6o6bpbNNJiBZSjBO{GET>be2ZB)qQ4LQ*5AO@|N@H(eAcmcJ&Jx<4JWJ+6!|Jqa!2pAW4j`6R^2Z;{A)3*-&=3hQ+Vw#spd7zHtxdU$nF +PaVqQaziZAQr--Wzf)BS{kyeo)-7!E03QpeR9SNbgz}_bytSpa_&hwPsNT_UR*LDHA$54*#q!0#-yQOPE>uY{Yc<)yV|k~OCG0K;#RKH+G4g_R5O=|;U>;56N?m4G_X?%am!eBkO)I0zo436 +f(!cT;jxBi07PS&`v>2vwD6jJ6%8<)HpnDxmdlgooGAbr`c^`;ZN`Q{#%g>CKm>uLThLsl_mB|N%8xEYuaJ+Swv)P*j_$Q0L)vEE%HkV{L$WXMuhOd=1* +-6OBhMXFrzNbGk8RV^5Jk?J_05j%wNM=H3Tt@(K%8DtS`Ve;%Xpbdy>!_$1b^^-h*(b2s&^mWY(IJl=~gIOJloYh-Wh3(gC +_BZ0n6uNNTvZ5mg4COlJWe@0H8?}`VKUuvYn^-+4JGA9hbXeD}v;=l-StuX}B^VBwM=!d8(Ht}Hv*DW +3@aul*0963e#ag|YR8|tVnP{3*3D@G0xHe6ZP5>$)DL1m*-Y&d@?iJ8O->ADU&(K*+=Y}1nze@P6unF +(H^^81gN+58`wB_@~>uG1h^Ejub;3-otqsMAI1InQsDAF+n{@OIf-szR{Y{aYV%halaemf8FC0OXNU9 +bJ1=+Y=&Ht$lBnTHIoic+doZbO@Q#(;6<+`T#T((!YK_-2q(4Jky3!vV|JEec?(Lt(k2_ml{MvU(g2K +DRFqb6MD+J06fAVj!RS+ZM4XNjBl&Rmkt>g}~7>)o7ni2jlfnLTPm +-b*#|Hy4}nna&YGvFW;sqFXNYzTwuuBmVW +mw>7J=q<~EF=UPB;3La7o=(YS-9dHc|*AU4mFQEk836xQgrxC5#Hc!y +eU88Ry0RTYj$l#8h4PXT}v|yQ +F>B{bx|GH_eL~J<&vZ(wBG1flInIe(&J~oVYwl#+%>aad=YTv`^GUyd^IA2ht?g&CYl~3|P!#fzz*Mu +!BGni4<@AFlS|7lpL4Ks1^)3bNMrLuZ5FJ!!sYGEZr+ROoRQ9UjvcHj2S5&vy^oGK9H5x0}f9gUj601 +SMPo<=5H#)(H>4YOTQ+d`Ue`yBa=jTMGY{Il-CB5w&t1He&HQ-uN5Y%H8oD0GMzdaH%C4JuRjdguDCY +`Sv_)?AT=Au+j%Pu?H=5Rr#m7k$}8(3=O_~OZf?uwhh*8%QMJ~D5N +)>vAw(ZgLlJ?-_%gNI2H=i^W_|ClB)Ku_l$2Y5Cw%xXVcNW4&Zj{c{MB#UgG6=x&ngJ^jZpYxlQni&H +G-duaesex|A1O_wRIcuO;IwaK-B@y@a=ULmzcWpiV=^(u+3=D!P{pn)aMDT!y>Zc*cuEz65Z2>UVVHi +fZK!NV$vmoaW_=pb-fuE992zyI)=cGnHS*k?~F4>&(%!2P_CVTT3OmS3-F(&Lv~cmw#nnk_t@;axxMu +xz)Gp#=UAN5yli{bX9;58HQ&cc=(kBKr_GBgPOhHo-=hwVpknF6Tj9q4}3ok8iq0R{iwe3@=#oL)s?W +po2OJTMbC}8u`Ua>m!KD> +_ZwMpxFSh&llp2BCx>=&A7BLE!d3!+}_f>#obc@rYemNhnwnhjiy +{}ml*xd^55?x%F29cMwJ~p-ACdnn5RVfv)3h%sBfmt0(XGt1GknJXiYrp!-y=o$T5{2Q-bk+M-@;h^I^V5CAR+n7HRwXWRO(QSD +I{d6Z^bbSb>q^@`c^LzsLc#&L?)`Fk%Qwu4Mg#^KUJY}2%OR|&*U}diEN|O|FF`DO4*z +Fr}5BhJRL6bz(jAvAvX*xSjVrAm42OXv$&~Rmy8cUcK9ttepScmojh{mj>ZcHBv+aTa@mrH&j(YS-oS +A67rtb9o&V_@*sdN}cOQGQYuY29#SWp~E@QD|c0F{;WR#4XSAVJF;K~Z*Soh3{=YN$DDaJs*Sgx-K(* +F}xEnsz9BT!31>$uw0-@qk9@^R$6L5zApc{+nwzKgAf!Yh!x$BUHEi(9%Fh~4Pzi7mfZgG+F43UOfbd +OF3riUkyld4$vhrZU%lJBC*Rl3NZqwILPeJ%6WPpLAaBa<7_408M5ou8oz|VAv1ScF!_$AOI?=f(Hsb ++fa}SNE-@$pij9{rUxx9Pb{9_v$7!4k^% +pQW)~D5D1>vIJ~GyrV=**^)Z&SuNfKIJZOm>Md+q@DVv_8Q<=*=4@x;@?d1} +b>#FkEyPvs`Ez7H?oKw>s@~$i`dfS&LWmTAAt3H~1%xG<8mXkrlZVy*F10@$N4?-tFs_^bkO48-EaD^=M0=Y +i^OUyU9UP2VnQCqhFx2E?;5GdLmD!cK8IpXwt?V4HFN3IJ`ZETwLF>Cw}0#RKz!45nR*wNLNB)LJ-_+^7r!`1`3B +_&Do{3eHJZff-kOqFY-mCCSG9>K`;p0nhNTNX5AJHPhVt}-Nj@)h6Ey}tV&U)1^P=KlatO9KQH00008 +031s8R(v6Y`a%N$0OJM#03iSX0B~t=FJE?LZe(wAFJow7a%5$6FJftDHE?ooVr6nJaCwzfU2oes5PbK +qSi~s +%cI-ve|7XL%<)HWMUT5OqSR&Jygiei&9NkxwlAC?I*E<;R<0|w*>dea5h0og`<3QWZLBQmmY~p+H#D#P*63sZuu93b6}Aqp6_%NtDf}yhJy2~b +%7iA%xpW757~x6i{5NBPFHJ6sI)o~nqn)7Ivn}1p!>8$z#_XV!o>;R*H}h((WCrk9W~?2PaI_0Cx4k$ +u3_{YQ5E>fL@7;YMmdJ;*U)VZC=tH-6I_=2VACLnrpj?qsSX%WOJ7Ed&$H!%qUkeziIOCmE}kDy3#XCA{0>aA++hWCLRVI3Uqd(uCW9pY3`Eo)jY +o^;pnf=7Ob2O7!&yQR&7)*7ikIUkq4_eI&(cAcC}lob9L&6C^4a17l%;`_IeQ_MJGB0c@j3`pIc>!YW +6T*#Y!nInBD?y3%}^P=+4l>`#n1_W;celOY?|MYb6p{BH8cZMmSkME&kGTj4Fe2+QD1KEAK%0yTobj!6>)^ +&x6m8sv2CTuwo>y681FM_$V?8IBcxo1vmE=KHETG1te;fM%YN-SMmA+X-)Aov=gPCD(bN4ZovbFa$HB +PKXk?^D3CY*Tl9*+`wQ;E&5IO5Q+< +D166P)h>@6aWAK2ml;P_Esz{)OMi>001y2001Wd003}la4%nWWo~3|axY_OVRB?;bT4CQVRB??b98cP +Vs&(BZ*DGddF@$kbJ|D}{?4!HOX})i0~j28H+EjCz{WO-4L)8fh= +U>#JH?3(poL1G@A6kd9 +;G@5yB5O3PH9PnIKvtH^>(Ik#NebfuD5c`3dL$D0bDydkL!Ovtx{3M_LfVzAP41N`YvJzxI;V7mAGaN +P2Rsf0~~g+@%PGJVFHE}&|xD1T9)>e<;VKxB%SspdG;VcCE>A`t#JVTUKV@Lc7~!3@XD2ZeKt5Z#&nS +Gt$Z$d?l;5q79Zd33)}W6zDw4^ugE;G6z}tv2+9puiidIX3p17T%03(;e2t}rAA9+63Pdr-= +7d$j`Z5)Spt%N@bmNvX@_32gw*eP8LSNJ&1{cPY!3yW{1CEP%e8PJr+g7Ml?SG;5ME03&o$Jw4#BaSs;cprw9 +bd(#K}+rrqyRt@S`3Yn@K5)q8jiSy&pg@Lz}|CsR<+CY;JQUA4f*0dUvoT-;zqt=_zC_8xfmSIu6l(e +2VzyF)d)uXTFOi~en`L-+m8eY@Kz6Lp1HnwX85W#pBKhk%@Tu%s=NDV-oZ`UBh>V1tfLWAmGUn=K*1C +^8Wu)5SqPbUb&Y&x<8%gT5v?B-c|#n)?#m0sB(k18|}_zqf=Z@!q>MAqucyXBR=?W-@K^Z8kfy(uhXped(C#s=+yA5m@7mI#mx +HZr!_YE7auxXRfe-kozduiO4U45Y477MrI^i7%A|ZbGe-5m;tKI*z_-#X$oYlp-3$P8Jvd}ZngW@H^ybHS2KDyo +ksVjeS1l7saiQc(E{Cmr(rxax)`gJ&(79kt+pPjRQ1qZv(>!o-=%?G=JdQRhGZBLHOesZfpCTeD!rqY +=ZbJ2{Da(PPIU|^bKvOs9|^@}=J%O!EukVuGQNz0{MsFqqCkae@5mr2Od(Qo{#N9R}1{(4Cw|6Ih?+k&P>pJEUhs<`%( +nqc^uOn&CHQL^zw5t289BmD~x42)(nM-tpw1av|$+$Cis~6SgGmx(b0T9FHgeWrtJBnqp9zG5ta&$a8 +~CCxrvLL=`ckCp)Q5OB-gsJnm92uE(pV!&!se~3FS@8aO%U(3_Hd%Lw7hKM-q0l;dV@1nNA>*CiWC?* +hv8|P0VViZJG4Z;V8IlMYnKZ3)h}Kd#2}6=s6q&3WeneaEc2*T%`mgVkT$~!%{VL%n`ekm%>#&e}*J) +FAb+#0zMpi9(IrA%yic=HiH<}K%l@+Ognk@{mr7u#X~v{ObdNVKp+AtBR&dygM)(H*<=uQsxk5T*yCE +C2rjP1sNUSbxTvegyB)By^-u^~o4Tk~oQHMq(_o=QP$d4vJJqMdrwqqEKV`jP6uv7TzdUF +?>b4O|%_l+E`FzM}EI$lGC2ACs2U1x#FK}Ul)MTd1<(i@9uKxA?E7w2!p3>j?Tg;shSngfX^Od)wjT4 +xIZ2cBmxD^=tB4oBa?lAF?LpT9sxPMmAyFQgP{Z%nSB-=q@PrlLp~ex`R@^W!J||L63|Oj_uju6V%~T +BoUXFb*BhREcO*GvAHnG#Z$k^WX^a9d6--hpx}u3`=Yt{t$HB#@2 +5d<_wFvrCqW~8)J-f$!$HKuIvCgMkSQQS0Sq*%*Gbv3tJ?LXe8G$tS^EkQ?7YZCv-r@>39l?D3cu_V6 +%Tu4%JMZ;w)k!Dh41V0z^i0klV}m +KRrscr%oOnOdR2*Qql7JW%(jwpbpmRkuT<#cv$86u6$5MVdhH}M^31Nv{mkMM;;iHry&rCRNb9UBPk~ +2B)E%PuN&Wd6-vxnP^!7xjO3EJ42owW^+mg6S)$OhLYKt-_8mBmWPDnj|1<}DvUxDMGIu1phm6pu2#{ +K7`)I#gIr6S8bbGb&d_Lc1Zj;c-|AA0Td2-CJSKn+G(RIZC2oTfylU1#_dR3dWrOxx%hWUJOkq$a2!+ +$kxj)THi#n+UWeY{$Db8|-jsGAY`C<gUbIOm+O`Nxm>Rt%w_!m@;8_NEH3ZEfXeq*ueb${$-cqhWg@BZ_ +bRj@^$Q)J{<=wkEb453^WqCxo^gwyNU9l(u_k=qds?*F`ck=zH>vl_1BmnJBj-=Zf}zfRv)}{Pox*VN8tq9oiMaVAxC%zszfc=krhw>A+&)@$1Sp7E +-=w(l$F!@bT@kxe4G7@Tu`k>!FQZcdh?Vx)4TSi>ePO{?ygTKDaimpRoPAWUXtNXs$eOlp_{iW2d=-T +|M>VV+d{e9L?h^^pD>@negimg{sfSe6=yodu@?RBUXX@%rHp-|I%N_1o#PG?uyuhdmEaOog;RgJ%JYu +H953$}3(EHGZs%j&Mz^L2D3A;aH0Lo=C2*il^ZWZBESZ1tm3jA7DOTBbU +6W&lYwrv4e*sWS0|XQR000O897^_9G~vD>zytsQOAi15DF6TfaA|NaUv_0~WN&gWV`yP=WMyAWpXZXdEHfQZ{kJ}{?4x$U33zt#D%LWHRMv2aF9bK2Jxa(6h+ozuVHn|F4|oua({h +i{R-IRMe3(o{$P)1o|)&F+403i{ROwfyRi$C@nGmKhHyKX4*rHSz88%PSZ*my727aDI1FS!foZsx|1i +!8zVCLCU_LQYfzKHg27L5!8hPkgFvBqN8-1o+?x_M9f~mnof0r8VlRZ6vWm0H-RePzD7X*fxRH@xplo +%SoI!+L)sfe;z`xsMg4KxQW=w4m@fe^Ls4)i&Px%JgBr<$r~8l+4HQW(Y75z=hxRu3%z_eA}K8RHwpc +5n$;p#dXd7MraU8Gn?Dv$Y~BwyK4SQqVFqFId<$#4-Y(2v9U&I)z6e3=k3gDiy}>WWd7Mnj#S(9b2`b +YNvAz@7xtksK!d+jtZ)f%`94T=40@L`BZ2MM8n?N^tM!LWD8kjg}_K6e4_3(P=?=a?q{krTHuS*&9h~ +QZNOePiLv!#H}^KMg+<4}u^TFnbZR9inFhc_0=q@+@c{vm=nIY&tSLmAh7oTg0{X&_@$&n0wS>O=06+ +TkdEZ?=T%#{4jbE{UDH7Z5aRdWER*DD{W8nyJKb#M~V?_UUJQ*(^Z1tn@(j6`qFq+Px53~M!IUcMg{W +;85^VxJUbO08V<`P>m>qIUj4iR!EQOSTB!njVUKVW+`7RUqGl4pu-_9?@O0)j_mAAhKaT#8M?7eh)xu +YrXiq-n#8VmLcEeURJ)> +IpB6X6VcX=tx#Lp9o#-zXGOxFt;@$1B`Yr*|^2XDM&9IObFM*ef+wun#_^2@rg#B6{RdDd3kI=)NV&5 +%<5)hU83KD3ML5C&-SLf)qO9)_W=mq{sHG{MBMNi0n%dvVaPop78w$n!OY?krFAJ6WbXK=aAhw1$Z{_ +|3CIQvvTf3N`q_*tYbqxB-l$g)}wvbsoDO9;-^H4!`0LK@UUkbj|RKS3?(v5oX)zD-0i6!kRz1yD-^1QY-O00;maO7>O};>Tb>2mk;q6#xJv0001R +X>c!Jc4cm4Z*nhVXkl_>WppoNXkl`5Wpr?IZ(?O~E^v9JSZ#CSI1>KOuTYT>3pe0|+1lDU=H-qdFheb +Y6p)&m%jSr%Wl+V)K1oi({`!4dHrN4@%j{9LTd~w?bwAzxbZbsdj{n%_-j~6M4F}!cXxd|+hvV*V>>1 +gU$_bmT1RHC)l!`~J8wY|(!!F`KWE63BrR2TP20Q+WYa=WRgcZJxHS;0rRkj)iN?HlAb8#=CI1!q~A+ +twRLw+Qz@p&S!v|gu5`rOJ`8GPy_*6ss}*=4M=bzci^MZgvtr}6MAu1Qz2!Bs2EYTY_}dv?}>ilgEl> +qHTokgQ=7VT8UHLFP#}R#wYJ3jb=R-o^@J7hK=5OBpS-T(c&Fg_hZvU2g0uR!(BN+TcGP#k3#q{B;QB +00GiSE;QF0LO0YxFcXLN0hYgKn>b}YS4@jQn(TfcWf@n&X{>R69S1Vp&{C=b_)BhCq1R>$@|V#S8wvw +g*cYLM#`Tx!B9cB152Y`Z5sVv_(3)8lQ1;1?X>JJX$58L!f6s)(KHW{>#|nGvwYInA8XNGH)wm`55$= +Qz)?xI9MF^MPp7f4FP{HO0zvKh_( +azmj}TDiuVE>M4qsD4ON=6V`?r)nqpbR$0C}b{WQ_2C))KFO%_vN>K>8%#f>P%9o-qBcx!1UFvuN6;q +3htW*Z0S#L7x%m(98XE^^ho}5qTmz^0cvX8jc^W$|QBT=iMs@bA?&6|I8n*YA}+x72{@6MXPKlb}KZ; +HkLWPcTV|EN~#l}g2rxB;7iFsH(j1wywgPUZCLcrxo=&E~VOlO8^X=eYOG`};51b|6AFpGy$dd|opm3 +L7XTwVa2}oA#b}IQn$K8;wW3I%|GnqgaWyQ%OJ4Yg= +`=zfvF+?uC)--{}g42=XW|HdeqxgxIC_HGP|GnJ8>?Z{_2|!eQ-z?4m_ +hZ!=i+JVK_oL_hS{xKq$YYDU4!n4^*6aXSpS$M*3?3whjsuT=L9UXvcF0L#(1rZIOmAW8qedyC9!cut +^uSMYV>4J)@%hwSQB8Q^n~jqyML2ecwE+y}~>RRJGz+sOJx{4$SLpbf1Nt)oOM3_c!Vjp;Xjgh^fJL! +gcgL-*}X)_(Iq=6KYBQd$(fPmW+17PYi88{dCoU(neoqB}UtwU1QQAnn54cm~M!AMTFcsB)Z)Z8vUe7 +(0qYLh=Qj%WB;_nHD! +O>@Kl8U6J6%GSu>OAY^kOos`QoP7Un)Z%P^MOw8>lqM)gBClU9v@d@q#pa^qZf+b8>zLX{1C4|V0+~| +bn66XflZ+tBi1o2@d{S^yhYT-2SuwEYLB3|Cn49e8Po{vSY)pA;;Up{teu@sL?JSL!7H_K(`;ATnyMV +m5`Zbad>xr48y?O7lx4M+!2!*r+oD8;N#&}S$XqU8W(;{A2XO1LIcEY7V<;=YmeTlxcSjYEb{q)+2% +N3Pu#VpG`8Q~Lf`FeRydkwer&sfqGWkOCwWO9kh+77nxDJ5%+O9LWdZ>X$p?Yg&4*7%Nuf+KIRj^06C +CH7EnO~Rr0l+FB^$`NED+GtnRW+*(r&{&$#AisYK^f2Zad#8bK%WS-|g)Xkx{K|6286tAuih`o}vI^v +(M3zcFUl&dm5ql27xugt~d8mJUE*0d}L9r>R0y&gA8|5L6(jHgdP)kdSO1IbM3v#>8^x-M>X&uenMdI +TA&{3STv;PB+Xu$mJot}T?`p4-lvz9wTWq;o-RQa>4F{{r-}QQgj3i4@isq|#-zfWzH7aEyI*@dQ$JG +ae5$q`i{ca-x`SmMn8bYXO5ZDC_* +X>MgMaCwziVQ=C%5dF@t7_}d+y`{ObtCMb9^>mcdF1nUhK((q?6*7qf);cj~J9N2Uf6sP8NTIv8s1n4 +Ud7gPQ^PCh)4djRkuym>m2eVe8r_&a!o2Fy9WdILvBsXmK;l2R^%dPR;b+IQ7@JD4*;eIrNerxIkP;W+1exS9>ui7TCQ=_!q +QRim^vRn{sD-+<`H$Jq>-~VG~$Nq6;DDYJ(XK6W`$My$Q++xh3Jmyuk;|~T#JlO2oYMciXPT>q4F|+P +$R`mGre{!rJ8dxDzz^N?3~uBpqS;Pc_K`J2N9N7p8TdX{47P ++_*?JhX%}BtyHUwA}9x*j71w|EMABlgArRQjeCA~>~`{MOfel +C9Qf}p`GcuE*^!A(cJs$DHrO!ypMvLcC^w@eg9^okWSX$*!6Q(uJs1Z_@Nxw~TksgtQMG_+Z(2R&Xb9 +i@!CJX)#;CB^!q8(i!(uCR7@J8qhIsT|@MdI1#v?DeO^0~F@;L`L)@;Gen8CrAZ!FVN^OaDADfgg1K5 +UPTK&z-dI=b9x#)lfqxIt#G*>fp5Lv*>wAq(7ib%fk8sOU*W$yv(-jVGAF0C5&H1Algt3mox7TR#F`e +;7$V$EY+a_WN1z&cl?+yl}yMxaeL%}Fl{fu?j9PK?` +(dG&>d3-L8v6>$`D_MM90BxgxkM3H3}PM0?h60TRVYF=<~T8=<7y3GufwDw0MXm6w7Tf}~kQ=Bd9+V7 +UUF{Ym4dOYiF_NB6mmE)XOxdqZ{$g66$JuLqg%Oc_4Xcul%3a?qHf8y(TRAD!QxFSvz6!KN6nObS +m(=GMye7F4H!d?CcFaJf@RBX{ai73yjD#n&M@GOS*vTtBttOdg{2GqLeT=-M8<0aw;((Z|sM9T{NUO@ +>AWk4!Q=zi7t3^%UuTG_@cTMqB*y>e(h*F%~{5$)8nQGM1|X<+g;P&36;YZPKuyX#*2E&r`_>8=A +02tpj?xAmsTJc{v&HeP^1=Za=TZXupreh=4S +Qanci*D`n?WmU~I+a_}Mn8bYXO5ZDC_*X>Mg?X=8LQaCwE0O>4t242JLd6@pzh2)XRA +5$GxHJ{Vie*2@^9sJ7CWI<~QumHhkJ&6?3tXii3Yl=q|dwgv8;G8<5>vz}r3xtOi0r&;y#{Pvi`vU8f +UOx4GWGK2VC!LoNGr%EvFFn}`bt?UHJ_=H8zjWuvv+!q6VxS3DqCA?E>e+alCARP$S+KMfpk;g(H8jA +B>Lk6U`Zf`YoE_wa?Uv2%r*4?00p9yxbn(YapF$AcrjK~Kg(<_Qb8A`P)h>@6aWAK2ml;P_Ev>a#sY5x002Y>001HY003}la4%nWWo~3|axY_OVRB?;bT4CYIW#$Na&KZ~ +axQRrl~mhq<2Dd|_g4(!7aMTYc-sQa23yo|t$)M$tO)NzkB$Yt>^*ywFNxBZ&@=2P@;h8h +TX*Mq(x{n`+6B-Ts@g$Auel+br(Fd{3Mw8}Srm2-1sfD7xSu;sTh4~?s5|ObR%3zBc$IzjcULx{=Us# +zPTQWKM3mAx>uy>i*F-%2KYMBWyjdu98J7f1h0H&F-wLP;eJg;e0?Gb;zO4xw4t8mrUQ>)*m?(OX#fV +dbQQBNsKLad{N9orXP*Ol~*_ExS+Z*0r8Kf^*a7WO;Mq}o_n&^3XfCvRw0`K{51`XP;yD=TajRLU(gI +g|TW5WAzwl#~fgmeJdzn} +$0Qu>g3WEuN(};$^gx6L2Jm}z?}#P*;qDi<4i@?pb&t0l5IbZnwS*7+(al{*Y=d+~3f-{B%;=&7=StT +k^&WN$?ePIX6#9xWq7_r=I4@NP2#BR`!}-_gVotruGkxnNNpCWLzC$dOh8O*-kz}z`62Pd+3hgTp2f# +Q^`d^XJyC05*^XI^RFq}`~G^N2bp&re8$$Z#fjCu*p7RhXy#t~7+Y{NU7|$_%SVUe$-{qKBF#o4x2B*d+X{Och%jtuV0cCGqW_dU +ym9ofCn|>>amd5F&>QD5ZiUFUQu8shzxSlUGp;>{lwuVwbL@B4tRVFveq1Uq?<>!JcvJSXz4x5sTycV +)-TrpFOvlUd^dTNK8jVZ|=W27E*1C*_b%AYx*~n?Rlvp~;Wy`V3+jRXWO^oK+AC4=rhFb0D6xTkxkZ~ +w{2~uA!l-hbv@tljzI-SR#Yp&rHiuxwV5vCAwUq2-sY^MGAyz9tefLHtmtUj1KgyuP0);@aA^DV?ugy +#K#=3hQ;>^}>&*uEadz4b5j3~YUM((%yCr4|LBwFK;0O3S{qny&UWUws6mgx-{5^KT$ec-%r?HU0)rO +9KQH00008031s8Rw38C7UKc{0Fwp)02}}S0B~t=FJE?LZe(wAFJow7a%5$6FJ*3ZZF4Sgd8JfsPunmM +{+?fP;uA<)MR}2?N}EXA6g)ywHDMB;Qj=WU2-ogx2ZqFt-^Fgji$EG$zQlg)yL;~L@$K#H1)&h27GPH +hk!;q6R@?&D2#vvtc3L&dm1?ZJQm(kJ(87k7cYKMxAQM;+6c%Vk65nAF0e- +fy!w_%vs+bpi|L#lQnPu6(l*k6;yW=V?B_-sLpjrZk_wNhL{>mf+!^L!Za!f+bNF6&bsl#%#i(u|Ms< +I?ZR{nB^=@!q^`lx#OSVG;Gg3cH*$hbQXn)N51Vs7Neh+4(j`;VeI|%CojVg^bVb;_gHu{=X7WM^i1b +z%cASTRIA%sE}dfbTRnZg2c2#<vXCcpK>8zv1^6V|O41YH$ON{F~6!l7}>K_S;&Y+zl+zkj`l73;KgisIE5OQkVny`h&`4eSTXa1 +C6%P(hMk31Tt!yPx-va?%4}jyOi2;^mYYmKko!ix~h(L#Ta?x$4__tMbvIStW|}=P<{1wAY@czfx}0O +Z9s!|>eM@*0}asUjiLR=^3kr^<_I)y=1Ul*Nj^(sAX250rml!WMJ*L*$@)#xNbS*t-efyTj!sX +vZAzkINN@7efqqnEH&??ncEGWJJM7#&lcQ7L&0W|Q=?8xSP)h>@6aWAK2ml;P_Ey*v2=;CT005>B001 +BW003}la4%nWWo~3|axY_OVRB?;bT4IdV{meBVr6nJaCxOxZExC05dO}u7$wpPB;tddPP!zQ(-C44w_ +p$uRk^CN7JCe$?8Lg}eT+`yNjCp4ZM{x + +Rr-g3VI);%4;3M4=IwhLyGHMafjpCNjwrfu1Dzl-(OZPU +rUS7gU-oaOt#*+j7JvNCkCfZ2r{El?`lA(i=#q8>)zODk&%{jev_3c0t_J4+5k@Ur9zVLs)B!*GZEQ$ +QM3%)5qb{6gvJheCUkF9e?_KL%zf`ol^fYB)(iFoPen+#e~^V;sh{o$K6MY=-hdIZ~APr?|D<-olKxN +976|2o$=J`KJ`0e7(IotO@q20;Gu6pE`r41&S%-t9M2gEtvowk|KbezpDGsM`L1ykl?FYPB$7T7#>@y~^u*|7a~)h_kb^^gErrJqJ5fn$&k@ +H#F#QflE9Cwjyq+)>OqLU<{4z3W}W2EU{PG3P{@nndZsHYFW+&|H@_w0%l8_!u5iL1RB4rDcs(rtnAa +7q*Y>M3dJK7#_l!JDrtLrk+V%d{SjddLBMHA2SHt95;tkThFVd?_%a#(w +mgHhsO}Y@v5RZ+Ct-$_TnUS64s{zlD#Vwr4bd>G4h={BoE{_tB$m;|3FNFzS0f&%H0xIljQ5hM0}Wa^D_dNPAO`^Fp;QH9-Zfl}=BeAuUU +pY|QbQ`RWcsaoR+pTnz8(4C;D?_b*ZP6OV^c;S}%{Y_hs7%-bHT{&1F*b+4R0CNtN=M%Cr}k+*xV3XVG-567ocIU +1K9Bi1XOE2rb)7TT5GMOzO8{-pI)oy_)*>Lue3r@_EI8M&*_H2%nw9{b;6%9~E;it6mXky&r}yc8=5k&Ab(w~ANRTfac7y7klV?88&#Eao*n%MP)h>@6aWAK2ml;P_Exp?*ZFY<0059L000~S +003}la4%nWWo~3|axY_OVRB?;bT4IdV{>gTaCz;TZBOb*6vyB5DNgi_Ccr{@5H$M&5tSB7|c+q5i}cf=FuSeDPMq;1>uOjKU+XKpz&F2FIsi}65xmwT?xW*m#A)46Tx%rhO^#j +j%K2>%NjL*Eh0a$Rud@iB}RzT@Vtm>{p&0&g|(;G}s9#d^Jl7P0a?G%O2L(z>8>my2gUUiPHv*q$&)b +I%bc?&&#}0NgY27xYc*Ntjc}0k+Ts&w>8Jn>cpRe+-Rfjsz16ijpBX2iGyYpV+VqSU7XgnGJ%EO?PRJ +f$)IY<3AjM*H4|XX)LH{ZjZ4$)M}55sk@5dP4@vh+{Ko{4Y#?#-t^}q%hd7Wj;V9o<-lB+QJFh=jnp+ +YL<<@M*ACkI(Y*j}Vm;manF|+7K*<+ZcMBY|0FHnY=1~W5IA?U_DYkk677mA4U7C-L!E0B?VA{U#1cQ +(Npdt43C(a`nahSW@m|IzNKrMW44{m!8188&~;d?_>8{NU<1!^(WSc?O=bTX$i%S2=BmSDEGz?P%IeM +@cL;uVd{cBegfq}E@z2i=yY!F5lC2J{>1pxu1vG*sw6sQsSS$^&SeuVPHmjEnM1j19D$IvC0^_n2w9% +dS7-_`29YYYY?i%yFD`ZsL>zhHGTD*l7?g$A0qriRxTRmmFs@hH$2k#|^`@pIi? +o&y?vC)5i6ndo-A|q?&%MMz#0L+AFLLF6H(dS3{r-s(on&8*NOza}2VPx&1M0b~N}CPBOS|p_qk2+#9 +f;1zgF6GhY^~31l^r88TUs+=>ym;--w;ic&X)+!Sfs6mnCfZY6RnN!?21R+74v$*nAPE0bGU>Q*7Qiq +x$_ZWXCpmE5XQw<@_+rEX{Bb{26f`+qB0P+nx@c4n;RTkIQUHFB$+NoC|#le*Q(tuA$|lUrTt#>kCH- +59wssoRL$MpCyCxs9Z5I=SgmH=W#csoR*`#!|O2xs9c6oZPt7jguRfx*6nVNZkx_!>#T#L%W?e@He~g +hM3m|c*ZT!8ILM}MxHZ-w;e$Q0hv5d~woI>kt#TT}};t(*^VW- +s{B4u`3wECi_N3b&Qhxfg!R_8o{Yw*f{{GK1_7IWR9$3_-EI)^2~WW!Yk(=i>vx@u7D6IA#LZLqpv!o +<+x_L|*Vivyv}rc`}&`y+RAnNvzd#fcZ +b@Y!+)}df?+9+C$wIi5A`9bInk=n0OH@$$m=+eZqJW5$R^1%C!t|fUTwQsLPwqx0fyf)f;U;^BQ6<=xWBmH*_+_n(6U)27 +P84#kt5TGW&>|rM5lKp;>_>~H|Qp|ijM8LN0)XUaXs^v-%st(bl6t2UR9omrJ2-|BM8_(SJSmq9&1xe +_g)U61Cqbx_dT(kQI1pn6m7zBSg@D79EXUPa2%io1PBX~8$-?dZ(ultYxz7YHi!Cz!w2p*N~8NqSW`< +)Q{ld=5W2!8N~wfh0^{qehu$5-6DuY5p;-S56{l-0KmP?~Tr6u!6lyB7@KW6t1X_~$J79rs~Eup9zdT ++TrFyM4~EmNw8K^1ifT$=>m4L$Q#SH~0sbByZU0T2kKdtsj>+d}A|~mN?w0zvQ#~(z-12?7p0E(*r!a +uWZP^p4~@fUx~w4;_#`718#c%hs0rvUrqcvFrJV+e9D9G1o)0m7d{}O9|7)%dszQ5zl!j)hBy`@qS)r +g5uvsCKK@!F%q~Bb2yX)t{MzVE5`NvOarkxT2+nLDzWxJHO9KQH00008031s8Rwv=Ag8tbpY;f8wFqa!qPUwt@rpLvBsLwj4{FO(aFYtWeV6 +Rd1D!UqDP_rIYnQb7uL1=DV2j*cH=`YrBK1wWWHuLG8D1{{%&)d5bz>NE2skXu=KGPkfPN(pA!GF^{a +$b=W_ssTT58V8sHOdiz%FU0A_|x!%@@KX^bo=llZ) +S#27aR!V!3lmkz61h7q#VjV=@hIhcg;*gGlL?ORka4IGTqd0C*q*E)6GUXzSL@x=`xI1u{(op&JGISl +vtF0{hPQ6W0bS^t#o~yrqmyQkv?_y7*Detphd?^o~MQLubfVr36K$3lcJ>T|;~P03j0niZ-G-r^xVS6 +gI&CTl&=Z?nX0@I>QJ0)R{~=L+{}dwon?os9(Y)Hrr5u7+Gm1?GAy1;J!EM-T~3M>JR$fgNy#A?+ts? +Dcy`F)S+=_;`O_;L1#ka*dm?z#@Blr9Z +Zm&!SqORI5Bc%EE;jcg0+x5Lw4hxsoS|vc2WqJ5e`&pm2SmmO&V)8&p@g5-`haiZ>4bDY~bJQK_r{G% +fWEF1qo`KIfz0lu6CApu{-5<9PbtwV>Tsne|qoVkFI-zTCEm@%$Q_3fz_+oFrP4<(2Ps+eSsg@W~RhzHpH`uWVM^Bp?8d%lLp&SPPZum^%4!Zaqg#bPc +(vhWiHGSm+vD0BaX(gJLExu>+`zGNF-I7Fs}Kh15VC7mFXvk&8w=a@n!R}owNjO7E{GKrW@ifoshFJe +>^)82C5;gZAk*}a=&*-gmZV@$l?<%I|4CXzku0?Z!Fn^?k1%j%l%qjH!Vnj#6t-30C!V)$8J&!TP=!M +F$a{==J(7kN^Sl_l^vnI>-5lnoo07!rfcxlN2pTv0* +JpaCTgqY)7?aPal?0ZuXJtJkc@0-zVcHdvVxs)dH-P^|@5hYVG4Ijn&<^^n#A(LyV5QOv$BKa2 +asezE9StccY2doq4`@H|gPJa9zST{@pP;yl;xEi~HsvqWr#EhKkwh`{V_;M5dPWP@K6#1rJ2Y(W#>ny +1HGSrARa1uy6JKSehm7M(rO^O9KQH00008031s8R%pw*%54|`0RA)p03ZMW0B~t=FJE?LZe(wAFJow7 +a%5$6FJ*OOYjS3CWpOTWd6k-Lj}=FfhQIf(NQo~-YM9EXtV@oyD*=X?)?i>8(8&7*HE=LT)AV6aH}3i|qbzb)7$t9}h==B;oSxaNOR%IqoiQc5ln;&r1ost~hl$%3& +_W9=h;p)r&Q&gAx*T;+FXQcM-xZ9QM!@HYbld6@|WG!?wGrI_UZNI{!LDPynnO1y55zG>++Fz*YAIq)FT*q{)L +h9=#b=l{4_E(a9pR +&(Cl3;H1*EFm0dRIPO@7{g7JWUEYw*39<#b2Mkd{OQ_`KkQ<-ud~xCog{bKF6k{^K0sFqqP6uu{lrFyTE!OOSh{lzc4RP&qNJ}s(Tq>Fs~{QuJ7<>BgQ_nT9 +G7wGq8|E^peZcfXu$9>vPTHhbEn^b<+?`azSo4coFsyXQ5>K~V>g6BE?(f(bMesp;_98b&r!}SeAf4r +A-hp2V9Q^%o}^747U`B(qS!zVxdA3^2TNfLg3fwFQ}?~aEL<>EDpjN!+$?|rX=kJ*2ufKcb@2SRbzt@=wL$t3idT)(6L +tB4;)w{Ypd%L?RKYX=a{NfyemVetQO=I~}`L5pi^5b^CU&gxux$|T(aej9H<%_eYPhOqh%l{>%`R?%BxO?{>KYaE4>^~p +o`&=y$)`RyKM@%No<-eXD#g~iMm%D<~z58(U=QPlFU*@O(B$21D9-Tk@k5^C6e|UJFbiO+&rH-fh-;j +?OPV3m`)6&*@T9?}Mk?+bF>uH&Xep;di@9Jq;h9$=h`8B%Z;?#r5=lQhh&Ij}735iwNOTpIKn!fgn$ +F@(7*EX+zJ1$mOvWzp7nI1l>s%*t$3f-cXzR83L{HJAkF1^(r_F=QH2hRXy?XknRxRTwj&kBLPl7MYU +%yuEWA6ecLla6QBIoO9P4cTz@~n;AX(7Zl*ItQ}G1a;>J&)&bUsi9JGahO~-GeWWI4a;UFA_{fIwrVQxnW)(7|lSWIpGEi+$=r#;7?@Hn +bBdPL%Kt->0#Y59oHM#cZV;T%h5HpmwzENm|Jrvbx29o6<;2a)H?V`JIv?EpN$7>ZeL~|3y&3Tb^hv* +c6%wBwzwQastAE0bUbCn>DQWMK%2rUIdA11ro|q0AqCO~v<3fbbs9(#uuvrpIIJ#@m#=;ShsNz)YHR8 +s5H7SP?i9%6zQrPk_yI95x-D8B0v>>%s8WSFd~F4M){#Xgt%u3kZ1dIaHC& +>dw5Eu&%vErEpn_EHIF_lsp0Rr1hunAUTdpO*He>4E2v9Xpq$}NL#E9lm6)DdY8A%{pHt+s4>%!Lc+T +(n#OBj3p|HrB+tL4akCm<W7ATL1ys-ECrCZs1&?EG#jF2xRhVC^TnU6EL!5wD>jfwiHSf}8Lln|eK)p(*fmrpTUn +8~K7kKIlRtxFBMKADMMPF! +h^C!7B`k1!JkRxTCSN73^@v`Z~B4i)4Hx|Fw$C{#;o^Ed#4FRCVZ%v5-^bzHwP}q*_}*x+6#e#N?Wry +JMEyzEjK9l{;$fc)&nhHWeXcR-#f>w`{(;Tp9IcWr%;TGM8KK6)kt1q=9KbZ|aPYjIno6ql2xnh%8XW +jkRe4eF}+eh@W*eUBLCsn>woR(5e97FdOEG@u+neOh8;1!vp3JZkj6mZ!5uOstgXnCfubH0_K&^*e2M +Wf8o$F96lO+F^)A!4N|Un1~IpfTTH=Qg*bes;x52mCPqXh=9x`4;(k|Dp^bqbRS%MpG?UF>Wa3@rGJD2tml5TctdcI^qw4WvL8WM4geFvx$PCVb=jhE%g)42g$-=~e0vL=SW8f);2v>m6+ +%lL?J`y3y@@^GJfg!Dfl|qcq$PpV*5&~9}@1Po;fQ^w%jsl|&7*JgeeJVA#Z&T$I46_@92b`FcAKj2$O=dQ0#SL?tsYPy&b26~Y*{gzrVn8lO(UCp +kb=#KZ+loUVeuk6rAz={)sc$K0Ndj9Kb&BmDB9K+h;%9`tAOZC#Hv~^|r=TheDxIJZ2}*{tZc1 +4pxH4IX7CSb`;e&mTS!9Huj7Sh0s1(gxYzu*9h9CfI8PhZZ@I;8NTlopdYvVq4k)Qa_WXcICpU^(t%I=03RYBmElx8lpTc$@o`PCwNalmg)b-%Y1Oe;I44{PGWn3d4(5j41&mRX!8)sJLiPNZO3bR7Lf_0kZVidylm`f>^GMwc>*l@J_aXmeS7(BY +154aimYTC4I89cN;CTR~ExIwy-4+!G<1+W_zizD_((5%GyZ +0=VhT2EPn7zT3&Uo!QAY3|O$AwwwsVj%S0~f^%*ja3x(iRKwow7QD!46H9_$2MW22>M9H!3mC_yqi(# +LV|3!(zUs)7W*s@vNb_8kPlr|Rl63kx~9#Ux0`0pfx<5LL7xjh(l5>`|5$C@fG|l%aniBzt6rTe7cwN +l1pnMO!j?#Zg5Aio&R77XyHS+Pd890XxS8eab*ht!Xp4)6+_p +EQGkXw%Utd*$jHd^PV$MTpcxv!g&Z|F0f^W#GBC!dP3De&1ij&)hZUBCvqcmUnp_-$EC2+BeN&q^#F+ +*enngiVEa-RzEuo-M6IA}SZC@b`Dp61-L4b$)EGVafyg1Czi1uN`0pxQs9p}-XOvWUDv;Y?ZpA4KX36 +4xQJ5m@;5;jb7)Etq((F1*@lU&YbRZZQ!&EJQ~7lYu#BnTPn4y%k=W +_+StO#TS2CGGI2wI#$WY!eiwDe6jCj5gzye!i^+ZqqStg6A^RqjMgG8fG~mljltnRarxPe4S5FQBSfC +_ujeIZ=fF&~K7z6^!js_`mlrnjai{|NT4cdTC?un0ZSw;yu8rc}8oAlWV3f7)xFksw{!2!I9l}NrvES +~626T>lbtfPWRv;%ZF7^ejCQj;JfulNwq1njnW&d9&;}Y|McyMkVdf}+$U@;Wx|LZs))#C95e%e?YgMx<6>|l715l#ZO&wg_EH`6N#M~F@6p2rJS8 +?yQJM(D6Y$Lhb%<38u%w;KIx_6a0^6BHl2u=(?=gahqsQBBMa_UfSVD}-VylDfGdlymz$b2T1h);6Dq +0pmIE4>R6oaFuu;ijIQMeEh9DawbPtN~RG-|%FN2l=EU{vWCRh#o5Qg9qxCwC%CD9;mD64MMl(C^+Av43dA?Nu1}y=B%W2-ICv +#M4HGg*RFYkClh*X#}#>oaERX6b8qGvGqDVAHwE@NAm7!?(ZR&{nZ&z=g=uQ01C>k;E1Ax-1rw4i>fVuPvHrSb@Hy6Hb$;YY98j)ZM)C2ND%!1LFP8*3$r#2 +vJi+%*QT{sQZ++zGXtSu^v ++tFBlVa(U$*5T*ojgC6#iGZAQm`IPJwjw0VBYpO@_&OX^|=mTi#+?p-revB?1qH6Q-D)CqbtGRh;m}v +{m$MXVlmk=UD*Ps4@X1;oQ<%NRRp&WT<=Xa&yx$xHA#-lkx#ouxL`&Huk*)wRkUbup0HS()tG1BfGQl +P)wHWgg47nBJCtC>r#8Jtl3C7z2Rg^IavylkIdU$CpO<~SU4KK2!aHzN>8S4o3}#sgw=Z4hW$L)a`ZE +kCum_i9)1%7inmf`Pj2Y+c@#8}V^w>>ct1d#XalU$(hjan1vjDcVRz0$J4v$=;O0?g@l4obE01UGRJu +j0Q4T(1QeF5Xt-at@T})f|+IJAdgSm1N-p=5m2o48g^nH{c#F$WODZzSH8GNNn4pswcI +P+&s9vL@zz^Hn^pApwkOa+yObtEiFh3;Fj~bpFuId*2u=CTrcOn_A!L!l9v+2p}F@zVdF&0Z37$HW|t +}ZmG%WAX0Ya(GXsoy#dRTE+a7J_jq#cIx!%&w%zEvziT+3tYG(k+mH?a_;_v{5H@OV7uvH4_9mG7gHb +Fe%{{S9k@ld4YAZY^A}9HH3Gyl!aY9ZwxyP6sPx}GVFeKkASJSY{;s(0T|*T*-GU&Ej;MtZeCE6q>Rj +%JizjuuFs;!iSui{P~*EN5#lDA9;*8{`2-W;=-X+hHsnG8f!T@FJVbA~^yh2aRfa?v$8#Dh7pj9Es)a) +i`s*2!U5p?NAGVp9~X-kHF+yc{rdILQfRW?x>??Gp4e8vyoZ8#;uA+0)ML=*;dMlP5{DCqQxD{=x!pe +K|#+=9AmMU0lC9`5#bA0|XQR000O897^_9oK{>vJ_7&%*#-arA^-pYaA|NaUv_0~WN&gWV`yP=WMya3Dw7X8^{`z~CkI)a +Iwtj+{IcH|h%ucg;`J?{eel(@=C>~7H0rkhT_<>%DZ8n;;*fPzm+(<1H#U^Kxj!N@QDkZwcZYYDT+c< +@ewDb~@cl^Z4?AVei$Y0<<{FJ@R#ExMq%Sy{kcxkl5U%NAQ{|Cf0H}+u9EDO&$t?NDGuc?F$T)P^nww +_vXn|klxzXQd^@GeD4Q4(SuCG6Nfaen9|HriXcuDr1=*ZvO{qKU9yX)e{q%95@L6?*c9=C$7%ee567D +7m)6)AmX6RP%XOLx-ne!8SFJ5);dtl6TQ +d9_wD19&Vm){aRy+69~2o|PJgLegU)G&G>!t9wf<;Saf=*g9C~ZMS#Ic4X|3vD6Yi)Q@TI00c)Ud{l!h1cJ21&|rzD +8cl@;38C>#WnK@xukBI=LEqs3#WemGi82Wd*fSwa!bqhv9Pm*Xg*`7)W$(m|IfWj;w9#JnW(i^K&eO9 +Lfy_ChLmX#El6btq8fv=vW`F=s5XQ6%t0cJ*J)P#L}1_Y25b&|8uPY2!QeY$bLF+xN?K!E&B&2TPxIJ +M?c4K|+637(oj7P!^ybDr0Pi`o{SXI*EY2dE4vVT=)LD=@BhcSYG>RFunW3BhqX%3TsO0W?Nxj79uPg +1{hw^U>QFoci%Z8+=<%ZMjUZ?7kcA7)~yuTR%$*WRXSNsW_N>eqtVEeaPD9}z*?8ta4v8yup0%fRuV^NwQ4z5MVqca($r`^_`~NaoS{~``x4 +hayMQ=0zCuwSF0|VEvBaOS-wVAGLC|l|;lt}<|D4?7B6=A2)=$t8wfg#{x1+u*ElNK32ikLcmVIS4T_ +NT5Lvr;RlM;GUiOug=9`NFXzH0mrP)h>@6aWAK2ml;P_Er^-^I_LH001$N001BW003}la4%nWWo~3|a +xY_OVRB?;bT4IfV{~_Ba%FKYaCw!T?XD%om8So569CcAe +pDK))tDlJHS+NaQpP>@o$eWpTD@h|LWU^+vDroUtaz1#}_Z2&wu~$!%r_Czj^-V)zj^p7mp7w-`u{L*S! +1I{_00BAKyIAZ~pn=?+-6t{rK>7d-dJz%|E#3y#AAiH?O~W{^P^{b=${RFJI4pKK=OWiGR!+-#&Zw(N-=00Zet7!(!?(`l{ +Z}vFJUxE>)12mSUjOzF^OM`{FVCO;?e^K@i@!ZR{&4%0Ei{`AXQ +weOxD9&WE+efQ?)`Cxy%{qpLk+c(c&-ku)5eSGch=R&->J%9P_?_WL5@Bi@X+sE&ISs(rM^4mF`_0(@ +3o_=_}*YKY|`SSMh!|VB^w|{)PH_T{tBKK=ZIcW<}PA0GA+uf_aFi+o?=@8&Ikcr{n@+lM#LA78w7>VKWv`+7d$i*Ik +=KmYr~+~#i{9;Zazp3ful<1hbTd-&qj%fB(-`ITQ6^pCg4?`|(&y?N*M^V8#$ohg0)VRz?Ue%;@9rqF +-$?mM?L&5NGD{I?f#3qGG;|M2mE$<%FXn~+?fW-xe*DARKmYvmyAR*~^zNIV|IpL#f0*BR`u%@jW2fA2 +x8MElzu%5`YtC1iALaAfADykA>!UN}{ZXral#X2EqdrQn>x)P2@zH2!FKqos-#$FQ{dtS}dbnq6{cr! +Y@AdoNq~FD_zrFoaeix3BCCrzh-}AE%;x@sEak{`EQ@`TQ +?S*48ty-WQ(>LGWzMt9-%`)}Svp+w3>n-->t1tfg*+=ic_wiSM{^;{Bo_+MEFTeQc(@ +(y7_TKzI=WwoNeE<9BPfO;ey#4#9PberhF>dkRpFaNJtIt3BUmvWeFYQ`i`R<1|f0z&RU0gHI<+HE8` +1Gp}pMCH@zxwpqpFemuul>ziQ@P&KJGa!2WBxpk>zz4?bN*V(`grtrZsVO>&+pvkUs_4)s@CR3$NZ>V +X1SJw_SHU=Pxx6C&ypcgD^s?jIgerf6M +$_^GIzybDCdn=Ncogg&PdK=1|wAsrG{}%&W?|*0vl>C0CYe#2Djnm9Di2Yc1B5y{%QQZQX5NxSnLK%3 +8?W&&)%vO=;^3+buYfwc-(kBaN;2kw`Fa!lSs^c4sp$YU +u*FgAiL3xb;)(C8;rGs^XFPAq-m_>zv{VutW~}aSG=>cNXWdlWmb{DtO=u}BGx~xb(+Dntjzr0x#pdxhFhD#9rD`HHT}7@^V-G}r8TE>ZB8z0>Uk))Yc0+YrrzD^xd- +!mYr%74oU)eeTFbN~eDJu(ShqbZ(_8yin8Z3MYs*thY(q}`bbxWV)v0iRk854%si{6S2V(t)6i&5yA- +MBTgVT(Ntt~)c=Mt?WQLp)sXO3Bnq*QrR^C +l2i~ird|g37&Fo>V^Ku>sj6WgqkRRcget7uX&P}aL(WQ+UVLZG?@qIiUCfydoIlSa$Y-|*IwlHy>u+G +Qsu5ExHz~6&)HpZwB!0lE+^8vR-mmX$vI?pT<-UrC+9w)aWcn^Lzhm~Ay10PuMgRY1L&hqxPmW@R-cB$GEVIC~s8Up=XjyM_1OF%=R0hZiM1&E +G7+SmQ&xa&z7SB=j8A53U{I=_)QR-A`JeRU!;ol)jD-0_0e%$(bVfI6cA<(6jKChI#J*qa>E|TpO_1-pZZVg5J@+~{bHvVa@iZteZnn;%a%kj2T2Fk&nZ+uxSL)F?MTP{Ia-Yh%~R3zfAV0iA|yCA1*>Q@ckQSz$Zdx~_GGW-ct6C +I_Z;E==8IUtBfLDBPPq+0m2>~I@3#t7^-@6JgB*r1)3`b=xcDW@tKh66Zabms3FbGWXHU1&>{a^&aVk +2oYL&>Na`PDb^Ruz4VZwI+euG~7Ptx^Yos0=?$@KavM<|-U15d67Ld|RuO639T=5!u$QRdQgt+(AGbY(Q4Kc)+!?=|U%W4Bv +^CJ_SU`WGfs>P(*UoR7PVNvJ&G2pqp2_z2Lf~#pWqoiwAD6I5Z9IP!THNMeFD_j9odgl#I{X&!-cz#p +%y&t9%dSLfBROQ!(Qhw!$O});8;m2)hOMW65J-I;F3btypN6qg)|wIS^g~^-m>(4~ESuZ1M?&5Nmbpm +-e*#v!Le|>I5Ym9W;{xh1jI9$#b&<>4Cq7woGt@v&b>Qq)m(m>{_ESiNpiV>8@>WuA^UHR~O(-3=&8$ +qkINWS2+A&(S`?iVVSUC?mpXe0x43_y9056KEekr0@DzM5@0W1+74!LqQOp-JT_KylXO(^@Ho0lnF4O +--_UALD(wLpCnye$C%6Sb +-tVacn~vm^n0n$}(qwgc6gIgz!9rcV^?={J@4Jy73{-RUIIwwQI`_mpKoCNnr$GERR{F0e6~O-Sg=g8 +MQdEFc4GTf(@Nu_a+}@Ruo=qM$Kk8V^U+aFmr|$0lF$yIk%E)CM#mFQi$Yiy<8bb6dADrl*h2&aY|=g+KQ0D#VfonyEW}` +|@e{5LKEANnmykoD8%z&XzadAx1HlW*0VJzY<_uOa?Rrp1kHr149@y+P*{(m0Rn+!nO!nUPv5moC8G* +_YG#3--up)>`kW|1Pyc@jMTtave3nUx8hs`@;xz50W^LFeUyIzx0tFmZ_w5oxPZE^{MlqKZyIZHGP$9 +i6}9?pyRKmi8NHC0ee1svDm`9nD-l_0uTt$lF5(F|FP(g0JYZ3EEei6BI +DrF?wY2l(!Mzo3zV>~@=~^eUs#cMYjnmI_OTap==ZV+|_kwHGx-HiQ({u1Z2Ny5ffs4{kkvCQeL(t$Y +`<_ie!UVn3im(m_4AH%$3!1vBa}Wxv2Q9^}gD)~vLxotF>jCJ3Lm7LdfQ4se*OworON_4#p1K&l3mG~ +NhJrT`LiWyMjJ*N^Cpf$FigR-XE-YbLR3q9dmk^VBtnoQi9j1bbr0UFA2=O3$i2~=k`(&;_hI0j$ATG +4MOw3tvRf|WN!<~`7j56c|)FmqrW%DeqiE&t`<^#*)l$yV2Ya$p!`BUnK;zV9&21r^3>1;C^jPi1C}^hy#niRR&PB_&YL!py&11Ar{uv7*L9Wj-q%j +p48*Lm#rOlK>*fbO5hEL00+f;0B~>=gz-QWjWO~ku_FDvj>3t{eFafEwOz`d}0b1;x-( +BEr7EPE+IYv;S3gcAQ_MZ*H)vgY_m|c0$}#Ff2SnllEf=aKB#4Jbvi{wr0x=cBDq+X6c+gEu|qA8VfK +OQTITsP@zXg%$t@xEPle@0I3-hAHsl~-Kpcg?-PbK}VKY!t*b##sSpS|lsc@P4@9^$JALPPTcq}u2!= +>{wdF;^Dt#5(={f-l!WcNDHb+9RT70F0Kx~4vV>2Z53dV6QU{;sMN$r6Or>J8C!K_W{ZGj?;wo~)WEIM84VqMg;&*FEX4m{OJMGO(3 +FcTTcV498>07S=dCmp|l9+|@o2LbvL+Z=*t2qbV3zEgq%W1IkmNvdf?%m{K~tvo7ft`e#jb#lUo5~^u +n&}3BMDukoZfIz+Z34*mjRo~zcqEEnrqQ2z04W#h%K)a$>BNoF5==mo5!B2g?f~nlTK@W5|Pp4iOu1C +@B(e=d2agx}kIcs6OxSjo8ShsTpj&_%&W?HCvMhcWz<(q~lROmv~reRpJ@<-Hhz}v5nA?h-YMysOSq` +VWPgZJ$bshN5?2t(-2%(-b;?gUL&Ww{O_0_|{VbyZP#C;aYO^47&i3l|*F_a3IO-D3kqSVBA}h^`6LT +o;DH)}j3eG6ikXboD*8L@?9k?2Hg#z2VCkD3D^3N^u|vk7BG5_=5kTVp>ZktsuN-HCf~Vzr{YTJ;m%Z +M1aKYTS#pp0f_2_4mu%-;rr&ADeMS*Ih0f`z;usv@?)-95jI+-hao#ToYL24SISUZjzI0nAC;wj53F6o3`fKs*M +}t>#kGU^pa4ZM|NM)nw$@OsDfwNK=uizObyvjLb8)T|RrNW^IB#cD}-o$e`46D!m4Pj63Pbx8n~2!kPX?GKjX-z9YY43XG3gQ2s{}gl +-d38nNg#}0g4mnq~i+e9pM5HfL>HcjB*Eymyrd-+!sGl0pf1L46OP@8h90)<4&9k&|OQ&1ug@z+ofjC +AtIS}v2MRxZx-_pA+g+h(=&)VR9xS6RX5!N6}Tj=q4d^5Gnb-9X(0tU&|(?p1n6==oSxkNR1Y=ze*y* +B`4XC0=l6u|))i?9%dkdAL|ch*b|kqPO}+dp78Lb?n**vB!k_L3Od)#@njKw_4xK9?>Bds_IQLL8PUq +0YU{`cb44LiQ#b7Zaqq4(k0WhFBZWT51ck_HQ$(0X9px>M;^j7sH0_C+y&=hR7&_ +8{&;h`A;$da#JBUiLv?%rp3<%bQjXzMq^iRshBfPHWn1wuz@Z1S1@N^cI2zT*jJbSq?P9;Qlx{1@@=Z +$dXv_Hg*ZG#tg15^mGM{O)R>Xb5rp#7xlDu$Y?IP5(yeN!JwowmN|B)>@c&88!2J~do-M?<+(f)7V(VOXEW50l1q~`Gyrjq>W(=Nl`OVHNq`ewXIJbCZF!h1v-X$UWkb`KaV +eY@xY#Ujb8P5S^W8MC5}fGBY9noe2!LS#wAx$_^a7683A0r-#Fu1QVecR^;6ZAvTiv*^U2ZRKmg;`3E +u42cBwf}#YVhGaSH%nFwge1FA8Ame#f*$I?QozTabx=8}*&#fIOb;`dvs-uaS0WUiG%YY#*04BqYm3-tq;gHz9?pc;*r4D@ZZO9K=7{~ +u#&FQ$MJE50{p|+h_|kHaPkhH%_&811#*v)EWD?do9H;mP;!+{pACJ1B4j{}Kel(*_LZM3$U!R0;^DaPFEAVnhuBYY#KB_vi{38LmIbkA#}8=0d9DG;%&LhS`WA%7 +UcIiqO?$YP(mv1p$JtO*tMCfnCpugSoJL!x#36(d!@!LbO<|XFd+-VyFP)$P#297rzOqO&nXjH>J}m%blehtlm}daJjc`J8nEHL;XyB=mTULrR4t}lKK0viX +Tyc*Pz_9?6GgcsFU96H|g0lI)MrOlM=>XS_Rb0HnNn0kVa%vi_ye2^rnFw~>uI%&S9M@42BVD`|x +?tBo?@m_?&S*s<2CyOgGmj|(i0IOAmhhBlmrhX?Iq9oxQrBU@i%nm*G+O=&Ku=m4~0f`aQ)&SgOz!Hr +~jzBUrd*oUYQE4PMH9|xglv;@fGSIF_=`zf6x(F=?!tf_{=`_oxs|Gupfri!nAvNPP27>yHfJ_XuD^m +*Gb^)YkBWY*kM|v}ifVjts0kjJd*JL8@Aui)Eq7NIUsBSQ~>r*luY9-p*SlV`~vX>d^=(y^ULT8ekT{ +sg7wjl#G=3muJ_=XZX=N@XajwzG^uACM{8+ify0so|acHuSNHj$?c&tIZ~41TSs=Pp^V5b|BGQV9wta +uyN@zfk^?jcGNr7xfKbMo3Ic!W`(MaKf{nU~aV}WAD{2TcPxz5^0{-gWm}jK|Ll=Xo&(PBcM%bVZn(q +L>8eQFH$Hg9V%}ZuYi6z_tmEY*$^J==m7U%tw>jx*RrS&!{>EOLMtSZ_p-+rJ8fksH_i#)iY>X8cC@k +rzRrRm{mh{nB4l@}RUIC|@X!TnJa+*L!KXwS8Bw7UUL_&bWT7PJY*?D8yXmGCMdk?@B}ibrGdh(Idn=^XJ^vE$uXqo9UCfMOp$C6jcD(gJ^_nw)!)2 +``1QEy>QAE4gha3PUo&}Nrvt;#=@=+hjB{G6760h-;7HD;3ZM{fUL1fnnWRXR$xUMv0ets8#oLM0;D} +lgJt8XhvsSNV?C8~1!Cl;k#RlJg}gc0-35)v?zyX55;>bGhZnsBp+YS3Y-zzZcNLjeNgjgWhE +54W2rh>;JVoQy1(R>f^7?FC*UFZGU8hWHMYZY=mUBIdQT0J0F1EoW3t~;#I3)aKL5vfA&;VOz>-eI+; +oOxj#dH{U#{etUx&?1E6Ob(?X~40aCZ=+*PBUaQB=Yvy#Ov^pLdoTOxcc;SxVO`l!@B{jAO_+BjB2G0 +4xN7N!dl~7U-XQmjXXlvs#HrClE~|b+8ehX=Yds5kIj4SkQz@EaY2M%Fw4KK?j-3VBvnmB+gy(k} +q>z66X;(E6jpOb8WKpI*@{Z1Q2A%x*J16yd$ND&*^@^KxwlJfVgbUBdTXDA9aS=L?(!>1TZnL1rst11 +A&9c8mVf9CRB-0%1(AhJrfkkRG1ek!E-fE$vG$UaRM8h$4iG*A-)&`3K6qqnNn5dg>u-HFS(iJ13)?? +nVG=QnUs5+(8fkUAT@! +Lf(HAU1K2>@lArnA-@&a)9}m|7FphkY^!i46f93o631W^wi|?HZVbWLN9#t$Nhs=B}Xc#MU;m#|Hc(m +cpz;xh&PfVr@9kISM<@T?Ye8z-BjnNOL+!3TtLZAkx5Xg1e>pV36BH4NFv-;g^#Ux6je8gGt3AQY?rhtQgKz8ktcp$uKlLV7MM}*1GT@;h)Izeh?BN4KZgV+ +qM1m!?P3ANJ5aB5~zP*PAVK*33G7&$9h%a*$Hjv@h}dy6INF@Rt%Q)<<(#{4lJcmYI7%dn;xasvj~h~ +E(bopaa4R0?XFCF&NYTWMGI!BI9{QAho7@Kvl!3{2KEGZIx{Ie*!bw<}{VEsf?IJq_i2sQiF_IQ)3Oi +Q?>H{^lUhL4pbtq%}3lW|D)n7ohgC`yHg21zuXzummz+*L8{G7it)nm!N-gxVV1^2^w=xY19TGepcOG&nuC94!!FjT +TYou8e5_o>NiQd^O_*coU#-QecZbTh*^`nkB3b$HnY~L!+fPxY5=wjY&9J87iX4uAmZ{hE#=Wm7~FHg +QHTf&A&p@*nWm16umhjZhh|hm|9J)(C)H7fOugo`qV6HPM{S75`vHpq*mjYHv89{u5}#Ldua@gSK|pw +Gi!eMoO}+0Z%i<7OVe^9DM7KqKrSgC!U=1Wqk+_UFORXxkYH(v0&f5%QGjHuJ0v}d +EP5?(OLLlcZktuYrX_wv>%L`$Knp?O8v|LP6|GnJO>VOpAh0wN(%BCed`O;0;#=! +8~H9jZE5I1}k-Ux%=E+7?QV(WeQ@PD3clLNx(0nY$g-EnrhXA!1&n;iq1Cq@{F5$g_#99CgKW=v3i4~`issnR-dI!Xhzyh)F8nS|+;FA=Uw?;wqKHDCvZNlN=Sc;wr@g3qrrC-+^D{#qV;g4FcAzjW +NsWvMFdJA!w`s)JjiW$?cFO$VCd_?l6RSd!@WuNqeE24j(r)3{5XMit-(jbSqjBPb`!A!&WUgnr`RM% +-AmUr?9uqmiKyF*cPlPv({nF=*Jsax`9?P0X?ICr^BAtxNvl1&v3-O3YMOM}2$v_Tz;!-ity~nMIi!Ia@4#(lN~K!(!_HYWNXwmYLQMvd;EBYobKG0)AYc%T1(lbR5eGB}s +pPfFlt6lz@M|gJfo2z9TC|H#r-+0F+%+=^KW#p*KXEY~6RW3g0>(=25mwfHfR#y61Bd;C`?m?+Hh;Tl +Cc~qrL%L5|Uz!;Spbt>9$C7=5NKPVHO-mwb{Wk7LTp44qK|)Kx^~L7GwQdUY+E#oLiEM(uJ#xs%a5yO*s0Q3~(kO*5-WV~I+J^wKp%wVLgI1Lt+o +yJ9{sH#>w?3q7qp-qw#L|HTiU*-ThD5{dLT{)9U7|WO4$#8^+w|CwAYUeh1(yX=;Z~H?tt~`lw0)`KE +QR=m8XA)#nv7^P{jM4%Cim_S?W=#YsHYJJ&CB#Y9)&v1;EZdF}+E~6_K2s=~lcL9|%#lXk6(csz<~{h +$`1V67HED=2MY9u`EQ>Ye#~x|Nv1PQPC>RAma%i)KA}#AuvB?7xb=d2J{D9#JcApkQW11&2(_@@#=gH +eRP_?p-W7pk7<(MgB`q-(d#@X=vGhz)Zh4>Wp&vM$LIm)$*X9{T_sNZDsOf%7`3Q6#U1zeb<7RXo2#2 +K=QtAb|Z4{G-mtGVa2L)qDL4lmMlJ5{aHYto;df3&z!_e1??szOndp_5=@NdD;IFI~HYCX-{LpUm+q_ +O!A}cvu&2`#merl49)YIv%o6d15ICbaR9du6&1+pl%SciF9YUp6m;0%_M`CE4q~8yOVcAqDJC;C9a9O +9#v+P8(h1XChO|xAm~*oc@hbp+p4xkuMs0e_=o6R&)7^pd&tP}M3Kw2%V~-o4To7s!-W{jcyLEzPtl= +4g8iUv0GKGJSctO3)2UragBgrzj2>>Bwuib}>z*Y^d0;p-J}Gt*^QBsMKPpl9>`n=+f9Tq1}kBzBlApK5k&lsUGir|L0&LIt% +({n`g$Qc(Dps&^pD7Ox$3W&lI6AH(1ym=`oqMC=F|fIiV(o=Mo+z^%Sl;jE&A1PDo23vT~m`h(3ZGLk +&$y<4RmGmQwiz?(N6sbg?E({zyv5EuO>yW9GdFfH|pNZQOeBL4xCv&Nb!)cKf+6368g^eQGh_o@zOBy +Oj|4O7o7>OT~kwt;AI0b_UHmXW}AUU%5Ajffc1)>A{S}3hVWtCoA=Rbhe+(I8auOF83+TBakvYmjjbt +V$*4dkvLywvs;MG1h0_2!HwB|fVvHd$de|8xfHd1c5Hlb_IbO4_u3jfoSF +j{7``%yfi&ZCHB)h{h9l2m9c`0jZ3joBCE*@7bxV&}o9I0cbqTJz}@)r!!39GCk0M2RxGGr&38SK<+_=Uc1aD;YlY_XR~i@= +^(gf#trmvk|!1XxX{GOvJ1si31@vH2;r=<+gV<_)FulaE!l`_a%O7X9c4z1WW7rbZND6J5S@t%Y*rOT +lblnDd8fWj)ULM4R8E>M+@oi=(NQ!jF*JO|>`G%~mSZV9;j|nDv_}7p^Vp;8bV)O1$c##0hGY-1!@j& +GtKbrvHD}K`A}hl8;1#4h5C#%F>h_yLW&|3|>G5tAG=;Ugr}-qAM|E~+Uo)H|QjY^jHZ9q{tw* +ElYnR)gSemWJh=DaCPe8mZ{bSX(UW~P@=I|3pw0{buST5$Y5=5!d5BTc$a5o4v`$mlePfbnOFs3}a*} +2_EQs=u8niy;?#<5!vXtv>Iadyc~VYibyjF?0^EKK%Y8bjm3D4GQ_I!r=UB0~+Uq=`UyoYDA5RR_13L~JMO>RVor0owr(*K3w0SmM;> +{Rrn+!XV(No7&rzH?2prC++~evAfi;yW-&{qJHfeA2sTH-1!NS2Bsz>sF?V8Mnk~s{#F>0A?9&eB65jYgMm2R3}5TS&D +qO^WvtYuIILu&-!_SYY&_3B&aT%sSLg8HMSqx;2M~aancB_9d;*-6l`-Okcc({b&Jo +2qs=Z3<*RR>QW`3sH;<^Iy0@wRYQH0--73uuKN)%762kfPA~~rWxQhO9CBRO(AFjUpRvnUs>)#x*PVq +bx{z}A2Nw9-)-PmgCuCX^d{S#q}!Cn~zIT47D4G16ZPBjr64e3j)j7AXzJ9nNvMXQ8Chh-_>;~6(E +oCm13x!KuL7qv>%f%IN)H|3a%*MVytMr%o7NQVB8=zZJX$J8hYMJw(a_xY;6be`D6^${(|(9&JgkSu) +f3WIPEyP01CO;h25$}2=H>4&=mo~whM5wiCu^M02PHcY9!jkutQZu1`&xq7I|mevmq2#rIEUPn9>~HT +H6&k&4e9W9(7GD*OzEdu^eIhmKA(NOZ?e9cFL-c`(NMyTL;B--{lrC5Y$-?EA+yfOiy0%s8 +_KAsRGk6WN@|LDXmmI-qkD}+c>u{P)#mtJJ!9YUx>S64i@egq@_!6d+qeWVctr<;$9qM^bz&Q3m8(5m +aO5CT&>OmPbn=(b#LvfSPA%ybCph}l+w%EnaR^3rBBzvgc1Ss1{AJ?UM9l=_wcSAfbY!}L!2!?@0=Vo +OIp+nOZvz;5lFKoLSCtVV^(Xu{q4P30ZMEm+mI*PsuF}G%ui6qEHM?`P#^>`+QM~Y+H1v%9>6IqyrZI +ZyZxiU>B?9mQ%Z@xtkeUOsAJqbqiD`~!;;qXL7Gt72>|BP<{0|pd}+A(sFZxDwSx4e>v)rKOL{2hHoI ++1bR5z9IFeUG%krB&9a$X)o&*F}*KXkVZaiNb9cMB-fZUwz&4WU1R%`VOmHO%M#|k1@s6T-|jUz&C_lZGQbVGrI?G*s`!;DltZkjU)n?Mc0eO;hFldN3b~@Nh-iGH=E?U3*$k~2d%?3U3WACi@$ua&TP5wvoG +MigOfh|$!-qMS=`st{%cS9(2gsLt#IeJ^TjYbuJ0m-=?Yf*ctl#or;MEv4CB~U)mQnXlCX5e2hl>_9A +^`ReHkNVqit{Jtj0n-)E}a!{HV+C)N%B98K=~7NNJQ{p}9hw#guZJMGHYd?5#PgDn^ +IiEGq$w_TZ2qhqZKfiyrXeT+^)W;e*}Y*Za5szQI2?wf<6VN7G47nvgY5Zx@=F3ibxSFv^S5lKzl{u0 +G`V_hs*9dh46bVcZHxmPzH3YHzd*)Z*~c6E+aAK7Ku#;BJ{9#$g^6n?E4PF8yxhdNwkvhAuQRp~lxRqantlwhS*W%R@bx*C{KAh{aTRCG_kzUhW)268p?0B8 +wzxFby1BV%S@nm(30?sV4#oR5_$y-)88H?Wk&|Rs^bx6oZ5QfP8^lNwfUZnq_I-|I2{zIbffF`$_YF_ +>c{?l`V()1C)ku$NJAtpAca&*cK>Y<(nJ^CFvN%vW2I4gP@<$rXR$4|0S!DAqe~ZhEXM7Dd9V7NhmVST9ba-&}m8K8C +~9&iXdKMI%L|5w@Y=%M{4Hcbl=^pD&UpQr)NgiVO=4}ao-7Eo#HoixxHdw@k@?!w6?2tQb4z%&|((FL +6{@Ki2j-HcJ&Pp@h}e3vBO|1L9~ucFd=Q1>~sPpoe)VUo>2&+7jQCqwrGR*Yi`g*R`FqRN%(^Z$-e5; +s_jxjxn4My>D9&`Xql}qKr-Yd +uTL4J$t~iR(Aqq-~7y5cVjK$i-orv9b`A(K~u%DX!XS0O^biaw$FIgaFn3qJ1=!ov?ePM_3_U!*h_`e +^)E4E#}lkK(vg$bUlM!z^|ndlsav;DARp=CiZebrn}G!)0BA4aVx`Pu>UwySsgApr%1;53mKS#8E?jg +q|w2H`PZ;_N#!LiIV4dLFo|XC57W+AiMd%1n*>u>RIxHj_DENu8y)aU~Z%p-3E0s$9PN)Ns+|?|V*V% +Y-oy_)1FqdiV(V8&qCZY8?6@eDp$$%f+V+Uk>1A#3No|vPZhV_^_|bf#0Y1bKe{jJYmacY-^;U$)Lgt +Thj2}kSNO}Ok5o@!;E?J`Zgfym8 +vNwd$=C34}#SoV&9$|>Yi^urDg#sgH%wkz3|W7qH$-_kNXwjE(Lme3X1b+@<;5Z7#=V(3q4LUEd$Yx1 +_@90zI_@pNC4WsS$_yD&&Hj@W{R;^GOa&M_y=LOx@GnjPQep}7W{#Opv>zX!>!qC+9O)tnPzcF0~oth +u{<7#9fnpl3ge)tOU0Z4rzNw^p1y6G*)`qtec$TDXwEAvHoGZ9C4U>U+BSYjVAn{Vj(YT@Q0-l=9R?|y_{)iV +nB-N`+!zKu$(Hqbg@TMIntJ;gpcO05Feb?r@h$o?AccVV;VhjsPuQ?Ftz(h|s$mmr}6(8PXjF&@+Uhd +JbsO>VI8XFyt-B}w@)r7n%aqBoD2{@EUD15AC1E8fUS>IkObM3R*$##XAM12 +bB0I3D7m4u^)kOc9CH)wU~n@H~@LyPhpK>+zycNE=TuFE}`qpw3)XU%t7{_PLzED(c#F^RDG7;Oz=`S +mU@@r$M}oC{p*ObH1XlX#+iEG{rpZL7Ds(I#DzcE_UC=JeA->C(SuqYJJKBVtLKN!`G{aT@z`HBa2}s#ty7~85bP#EZsNULxw|@K9+qdt2c=LzbtCw&6A5cpJ1QY-O00;maO7>PvfZ%^W0{{To1 +^@se0001RX>c!Jc4cm4Z*nhVXkl_>WppoPbz^jQaB^>AWpXZXd6iV#ZsSG_efL)o@rw;OZoEZ-Zi6i< +D|XhXZ;)gJc{0WlWe;{VW6X>aXurN^e2Mqspsk)HlIM^-q}Xm>e`q**oXlxDiAVEvM8oMK{+r&3ZMWK +#?U)u;ZlxBAVpA|lN0s?1l@fhpx0J)yZ=FLYT6&4d2Yz8?_H4Lcch09o`f!bF&6mB6 +tZlBsn!MB2~~Txl{;(rIA76}9V(?q)@;$uvfe0}13Z;EYsVxU?SswjE=r9c=$q0oX;+4Jk~+o{{~l5?nehNa_Ra4%*`dglO~?YeXBS+HqN{9vBcyPm}E +PVwF)ef2OA>Nuqi7e2-Wt4KL<*WRlfhNf4teE3|J=I0$B=B>oIUG@MK)*>k9VJjv#xG^Oz(p@^1Il1< +{(G)ic>N|uXs)F(=r&k_eQuZjF7aS6)GK*@r=kjfoff5vzn3RDH{#0z80IZJF53H&1a=6}sl8NEI93& +=&#dy*w-<9qaKC3Xkf_tSL2a+z-rOP}|9^xFVILVr;hK??X-mY^OhV{DIx#`zFBi-5iRFc{q34t~8G5 +Uo;J-g{^?fB26_q}^(j)>PEbcf!6cL|8TqF#JWMRh&INeB+34CmM$val+xh&|BxRZk5P)Qu7%ZA3SOB +%#NNDq}qgJj{V-64WvHB&uZo6u<;zjo3dXXycdjf)ptd$)7g5qcoGC=o^IY_QbKQQvHdreOvqQEZ(9EVP)h>@6aWAK2ml;P_E +s>7j3jX<0093`001EX003}la4%nWWo~3|axY_OVRB?;bT4OOGBYtUW^!e5E^v93oZD_*N0O!Q`4kBl1 +K0rV+IiTgF3bS3ORg5iWqTwU4bP21OH_$yNpzER71U2Z>-$n_H&AY%#}_ime;@J?v0}xF$p4d1e*eGn +<>Nm;`L=xXBL-8|gn&OcrM?fT8_FV}bF_EmZKooD9yZ?7NjUtIli{huCtbNl +W-zux_Fd*?5CaQXiBZvOqn-SySO^~>_}d!P7^ue!S9S--r`t6o2pPrv%C)bl*@h~K^Zi}L8roAQ)v@5 +|Hc`|G>EUB8?&`RexF!`;o#zh;`B-hcK_`AaE(xw`w;^8L-5zuw)vEuWW+@S!~1mhay`yuN++Vg6aFH +#a}uUERHB)n46QUzhvaR}X*B!2Vd?-~L)&T)iuI*Dr7G=i{>w59R9J%Rk)S<^H#~FK=GG=TE=BdztAl +>WAyQxAz~{@Xz1=P`-)8!<+`RL#ue0 +{~ArH(%{~og{KVO$$@2_9|`sOlE$hGBPo;?5O?|yh*9)0^``IkpepFaBb`Hz3hwOQ$Wn*Ezkx_SFcOz +>sNtnRMfJ-pAr^MJoRe)`ov=Z;5TKKbU!^B)=g*H4~*`}ocGeD%XOkDiwAe|Y-+c +h4SwQOdLH>yJy!V*cJD|Hl%)$|K+2W+h)`X&}tuHquUy#N2& +!#B6@{_1>l%Wn(%$8z(kyt{q4EPubdiS5Mt{$Y3Xl;8IEGDiR6i_5asTy*vBU*BX4p5^whZ(ilmU%$D +%y}K-5-rhfO>0ch@-lf&Ld|vxfOZnkh{_h9_Vw)AlI^PB7Gf%zK~%;f&l{qNhpbsoItspa#}|5sVQ*iU})hkQ_%Ha>iCwh +tfFo)6kGj*lN4A3j*Ne2_=Z59$}o^6`V_aGt!p20A`kGoJF4B`*Kd*VVrM;nVv0haW%7U;7-y(>QmA` +;Rxi%QX!B59QOouWJA$zkTN8>bg%~djCrx@ps>P^)rsp*8hQ3&9U9Si>KZ{;7@OVeet@y4z7tIakQs@ +dU|m&ci^YzKYst@t4H7b^rt7!oo^NiBJ4-S@#Qnb}eMWyx?ZYp&s08S9d#)-~fA2k&en&)+hbtYP0WvznP5M`qiOEyG;%gf-)8y=6Ym +w&gLsj;zJT^Xsu@NPAw~w@mC<^C`o;$a-a2Mmt!HmM1J3;oh^(OKxtNI3FAtS5~f{OrmE_rDslkIdj{ +>9`(F4bI7KYy0*)@9eLH-bK%}Ivvti+Wo5v5bURz#nT5@LEDej4x#uxk)-`*yWWXz%o_AhkvTH8PQ%A +#qu$YmH7{kggZKV~}Z%-}kUyK6QXc5nPMa&zT<8BLbB)hr)25gQm-Oh&UDdE +HpD7JD`?JHu0BAuIFDLiWpxzGSd5{L1SY9AiJQ2~50>Gt*mRhjETPp~mp~wz>is5LP-jp}KiBYq;hyr +L)yRpWK&Q>dp!*nLzeC(`(poUUfOLk!&#gmSuyn)r3@O{QVR(5n+uA-j +W-hK8Uc<0inygap&6{G_*_7-8yVvvj%6e@li?QPYc1TfGTZ=XHd=QVyBo3D0%<{!H^Sat_ikb& +D_9v@^5uZ$Db8wl!II|W7BObiT4c_@F`_FYNkIA!E{9%a!mCX@il7WPbVP~v +v{8#}nP3>{|;0%Qdi9G}0%Y_L^aHsLUDIpTyB59@e9#=n75e6VmIKFLzmm6cm~`+nf_dHcwsWmArP93 +uc!+=VPq>Dhph$6l@rCeHG8z#+Jqacvtzz|H}rbpdGB_)#nlJFF-B5nGB;v2q9Al);q(MlLMX7MrL`y +feOpPi8d16s!$^Em-7<&n&UF_%aTj4Z-PmMwovFUcF*k9WbrCJD+9fLT1H^d_F*SfHtgfTZOMtSzUJY +%;aO?@!)=l`T6IJeZ?G@BLl|MD@Y#<;|YhG68u=$MtwQquCoD5rDJJ%U_1idGlDZ0&CZt^HjEHV0iIFV2+ubO# +bMAL&g-d(FmA;KDr$OVC-vQQ%1+l766>3*@#}9r8(G-Jv%rSylgx0<;wnSOID<3GlNo{xQR_*zm*U64 +kpKiI-uUQxt7=yD5|V_Y-e!qvJT-ZQ*m`cq^fnFd3_|`VEa~%=0LPh!xgl;Pvz@8CZ;>E;$5Q!CzUomAi6Za3 +Kfh4T!0@XPoFThn$17C)A6!5X^@to@ZL!cVrq&FPmaKC}o%1iLS#+=3(E3HJAY6#ezax;G5B!h7Qs{w +Fg_vw1<A=MY=U5-#0utva72zXhFv8+(3p&Hg?&{*ZK6$Ozq1QGfg!PlnJA$HV=QIC^`tDs*9w3Knq +owNR7~z7)@U>kBnb|9a0fj%w&y%#ZR`S*0>$Hb2biBXvGef{AO~C{7+)mx2?@kty!9lmZF)5(eh7*O? +1+WLA+mA+9*`L`#b;mu8$g^-FM`BG4Gf)+8#q0nO`&O+uTdXsn@XS&1>MO8!P{2uz5}&8SD{ +01e=>4U>;aKoy3YiDA!ute7Rnc*t!4-MqM-SXLtx1)K0?zz(F$HJDZtuQHX>@SA~DFf2|dwZ*(di0YO +>`(ko2G4?IvzbMSM;$guQnTml7AY(N3bPcf+aZNm2c{c)f16f +MPCmYtys%$y{{s3|84LW*gj2L!UAsZKbADDPTnpm+A#fgjnRLiphs364Z#f>L)LMv`tS?dG7lTTxZ*w +PwH$ad@`987?T68gGM5B9Y#I%B2DKd^Id%ipq){FFIm7cm>Qu&`8#2#nAOVb-xrSsCyN(cYMR%q{ma( +M=nJRuVpRQCR0i*UQ0XdlKw3RDNcl51jz<|!+{wt~|Q9GcsMJ1%&W +3}DMH7arJ*5}RxoN-{)E2@AW|Fv!;Xz^INIQm|4L&;NOeh!WPr2D{^2k^TQtQ +gRVY%^llR(2k;)LFE6FeV68jm-cnH>j+ +u%tRO>Yi*dw>dwn1Sbsv_$y&hff`??>oygTE?5!jw*zbX9Nj7lp17CxsHwbgg9O;5p!FG~;HjIM@l3S +J&aM-f9u?zAdvMbbqOcYB7qIV)Rbf~gEjSR99P*+~u*uhM(vME`cE{=9nSr}Twc?+}yUYux%QSp3v_q$kFRCpKI`b%T6-BfTNt +4>+J8fPL9eu3=eLljhvlVUraTFG?>*1~-UB6$GtP&w<5|c*gyR%NzT%HW!i{PIw_cRVY4`uP=rZ!tEf +1&wNOo7TC&0K8`yMtos1Iwn{W6BoM4CD-gS9NRXv*xWVWUfRENkz;4`D7cf#GWSHE-yz@}?J%Fs6kCR$r4PDj=uC2t2^E7iND<;R?55g#HwWA^xc~}Epc9F}KBiDnTjsqw^J +8q0D#iAu%06uz#^ua(6Bj~tsZ!-5q_eb0Wj=L5~{8&|Rz^l5LO%tI9N}@0{X+}I{oC;@JAN+7I+l^Qx +kv|c;7HoP4r?=pA3}}OBqpNIcOfVB5ZLEZVv+ACbAUr@}3$$rwH8V%xSD(Pr;N(S%Il8PITmS+P@F1K +EDIM0X)w*bNzrjO9QBz*rr78JN$YJSs*0oKI7Xu=uv|YNzR5~1D7Yb40I&lCtnVna}0Bv~FvC2-ft|# +s;CBokdnMVwpG_R8$;u;0JZ&R2<{aX&jC|6(yv3ZWQ+2 +t7y^`~wn;7|Uc_%C(wW}w=r$^474b&wP_ssFjm_=26i}sT3_PAn@T)=@K^EU4kIO>JqBhNj$P50i<|& +vi&#z5((UjhJDgeu-bf{Xk8cP<#7{Z=UR|#iBgD*fygM~K5)G4z-6(~<6P?{}La5#NCRs;kP4znq|1l +%EcxX50+A8}dq{DviG7g#DX1ye>Seur9BnGd0<4^e}N27ng=$-s~=0!EU3j10uyrfdXb7Fnzmu#fC=B +@&MAej@~cYlW4neQFv(>;~d>bnUza3I^CI0iT9|ll|V+dswP0O)LVS1ldnSc4&T{g6|Hb`@%XyW*Zt{ +W5pBQ){Nil+>yVMn8>n*tR$S`C!!cO#(g1pocSj}$S?d#$}?Fc404E|Q6fG57~T3aO_Zs>Cqgu4> +Up0$I``3xRN;CspfNQ=L<3A{Wc_a6L44l*EGBlud{xJy;1gyJ_%rI54G}dxJI#Y)DlF{V;I=mlA|YR= +So3vJgQ_Rn^%Pi20QOaiWYN#1^=0u@b@GlHP*(r!1E+N~tDbX9Q9N03HF|54h7wWz>L(J&Oq$oDPSkp +cr4aHcCQcG&_VG0E&%jn}#7)D9;ctP8}LNap*ZPpUu(bCzl-r{Mq%gt%xJPlJ_IlCQ)yiY1# +OHM7N(R!2lh92AmVHQ1&|0-^1uq!eg~6_o-+LI=_t`@e7cC_<2b!(FWX5J_sC1x&(iQnSe4>Vol|y%O8KHQ>M!JMQ|+BNjeijOHW){hB`hXn)??N4Q1Xe(U4A%I +RBlnmLtqWgp@iqD17AqOM+SLX`49;3Wku`2RxW`tufi8%<^Z@&7}T<;9#v4Vvzmb>$mK@Cg#8!<2uM#w_wDe{izXRZoK +KbIQilPwC?2;ageo7$c?VwIG1`i2R8pso!d&R1h-0#bP?KimW3f?#kHLWKo-~uT2b=`zInb~METIE{4 +MH1&No-(5PI({)B}_0gmTIsVyuDGCs}GeU(w0-lx4Vg65aM0OrijYWZ?zoSAVG6H^DW(dHrcgdf +v1$f<|#)#kA8N=1Pm27)thV>x4z|cpw>za|7!yG6a!eD11eKRGJ%n%IRN1%$(TA*(2I5cO8JjnvV`a<*_o@T5Uc;IT^a_tfyFo$D!>A+SR6T!e+LB*_b6`cl)B)-)u`D +_`uv}XACnt4y7t`S8^Uau_{=gj3wPM9D@Foz!rr|$|TD~n_)Wxy#w|ys9f1(VA&lZmQ1RkmNtaz26rk +Zl$0Nn$C2n-q&vXF9qG!_X$2I^p%tl$;3>^!3RFEfDZK*)$)=quI50|wN%}MhPZy(GkS)Y>Ary{UZ4* +JJ7-a95eYI+a(9|TkWLE?F5HhilL>Y0K`XNM*9CuTaXXA*Lp?G#Ypz2`k7B8^hrt2Z{b+v5^GIWI0O@ +6rAtAIn3FUK>vPdVd|46sL5tg6On#|C*RS;o%aP+kM5_CCPAXg8=V@!&BXFzV<+nV!rS{?^Qm%;*U*- +7Yt~?aV<8N+jyGY0+g>$#MHWnLH$MQ5q99kjT-UXauNq->o!MHOO&WtY;aNP8-Ko;md9*d^@ppwYo}) +q)^`M)lw;}Z8b4L9}y_Y1Vc}pUCah~uM_RV4>gf#wQQzAiVP3yZV>rODYjbh0~{JFP$``?%hqrB-_rIBw^b$Z#T0s>`;aq%Bq;I9S@{*ZBx&+e- +AP?l|D9Lz_t$n!=?zT6@^`{r^`igq1pM^O&pjbS%0JYvg(O88&#{;NH&nH3bu|eNHr^u_|akktJwra( +9%)4MLZ#rv|~0QN?gDuq%iAx){KVAF=#`wQjl>PO_~?OVj_V!&qo!rp9Ika<~uBbLO_ZF64GQr^ION1 +$J90L4>K>FX$1pu7fd>>9GIlj6J>McVQKC(&&Dhvgu08ZdZ#szUD8AOgAzj}V;ykFJ~Rg-)oGKDowU} +stVsbcr(KO&7KlCf4M-+6>z4YZ&8QJ&v%AQq7%V|SPp-V#8V2{E@uyn7v!J2wblMYj;ChF(KP&pXeF8 +ELk^-u2!|WX;U|@KwZ77ycWrZkeih=a9Rl{h7yvEQi*D5NZrVxm`speso-+oQ{LUQr2E@}U-_(Y3Cby +bKVw8y|+Y@joi8e;6p*)&EX6;N&uoT70-H>C(s_gV2_t@S^38;obr{56`gmrc!`dNySl!bGEaxRG5IY +7_-gp}HxH@)DD$K3lC~wHNiN?(PuqScYt$pHfa6dAlaa!Bm^EW$anwhype_5gF8@z0_mxEZniUxWLTWxJgw2@|5pn) +Vb#IxE>@He^5W>HNR3ZWu3C)G%TVajEMV^iRjLpjtHrjf;@$v8|-8~RQdJ+&&;ad2pztWB;);bpf(Ws +5shdbV>JJS|JJa>KS;L$^hLdz@Lru0_&!Li}jpZBgnOCchLH$j2M~FMYF=$*Qc@Ag$J0pgAXmQfk!0s +23DpkHHea3w=i2+6wQQph#qWMKzg +vZ)vufvFTM&36V#BSXETJ%|L~!wF+Ug?SZ%>)v;UOS_b|>6v{HtG}M$O0FlkG+3nPIj!+ZKd0CXW)X8 +xOdxKmHO&6)VS#(59u~3gEHVQNa{&~?%uWVRYW&$9bW4v#NbQnwY&_7AFu#g}lBdxHntXaik^XRH-Fu- +4wVx6jD#0+!rjcAs`X+HVr`cMTobWpLO}ruYV0<9&I%kMZC7it;%ZVZG|BFWPk29yN`WAe<7`tNXoY(9{DooxY +J+{?29I)VsFZr^no?>NDB~$iPU2j%&@@Iv5{$+YCE#GfL}4nSVQ{?15JqseNz6`Q&kGF|e@d6TWQ +p(ssVRW%ro#X +Wlz5OqcWmmwT|og$BXA*575XjS>pzIuuArm}qwTc=(R{nl0!|r!-{`b?n*7N>> +Uj3ayzwSPaUBx|u@Q?QW7oOaUuw@uN1=k-D4y?HZaC(8sjOi^(!#HnT{J1`AA3$L++(+Qh3vvzPo4Mr +~%Y#gEX0z*bFQ=(BB9H7;t^evRy;BgJM__zqmT(Wu>Q-SgB-o2>+_p$XDyA(b-2BCVunsZXr}xvaS8#l9BkTw83c)pkxN>IpFI1S_g`cR^#OE67$n$z_EaA60JORPxyr>*aJMjn!ih3d?GWp4?k +Jh=^usH?>`QRD3EWq^i1XGQ7z;EjUcDHm0aZ*>MCT+wGS|PlrS!NVVN#g!InU*->hP&8)|>7#P1IfK! +IocUsB@NqJFTs*_0z;Nlf^z_H6E)U;bJqZP&^%ykKvB7-GIy>1tR!H2ph +m#DO70w8SIAv#h`pg+-xeH5BY4_mD?A(NA?6zm?QQ(>dO9Y*LSqxIp)5C5l@(2%|nsPNo8EcS`KTGec +@-g6PUB`x+Mo(>X5~oKYJdR;^fJT7y1_Gf{brz_tvxW}wvLxx@7E6Ca#Te{qKUy_I1(>azlQh$AJQh2}Jbh)i0hVjS2)(Kt*=9v{o4GyXrEWt!N4Ht#k +vkJz!Xf*%dEmvq$l-YrRIpi2t5Oj)Mj+I{;>qt>1~)V*AYkVlo>}Z9VFM6?+To$SN!mRjK#=0JQ~?)S +wY%2RwQ7nfqs=ss5}w*IiMG$9%NjP&$v1jt6Kg>E=rW2*~FeyesVtSY0PjNzR+TvbuCF_95^+!z) +zS3pnkD+dlwSkJFHAPiYK-xO+~wyW(#q=QN$D&eQ6f;?hE^tF1v?KVzSgiJID0LlsgGB0Fdb?ukW( +UV+-=FwP35HGvwBfqV^S|u3N9`9;7Kv`^TAprYczYT>*;Ab +V{$DI`R|}FNIySsSQKMCIQ*)U1fUJgJeMAApp|a(anRXfxsgbw`e|(FALG9r@bu2D{%Pjea83npJ=+nzqQZ0YIX!@+GH;Bc5|`sS)?YXl(tuhFKH5MYr=YBD6Ja*U+y?8{?Y>^Ifsi)$E9m?~BDkpV84LjwbnT?QK&)NGxIot1+L +AyiKAPcadMbCsaDZ57ejt3x(tg?T=FaX?1fQPf=p-Y6A-HZ*S74u6Ttg$fxWv^E;?H}Pb-fTPplyO9U^uxT9ok1YnYFb7k-G|-8e4YEQU7M(DU7#wz8+a+B&2NKC8FPmd2d +nsVz@s9)6R@As_Jsn5hyb?%6NPfmSQJA;1m@zjlJHM-C`@6aWAK2ml;P_EskB%N;%g007?x001KZ0 +03}la4%nWWo~3|axY_OVRB?;bT4OOGBYtUaB^>AWpXZXd6iV(Z`w!@e&??kg%^+r4XIALUexN011TyP +gh5rF&|;5a)%7mgT_!ziKgC>|u~fcoQU{Dt0#Z8n;;*fLG +6+(<1H#U^Kxj!N@gDkZwcZYYDT+c<@ewDb~@_x!}l?AVei$Y0<A8PQ}4rvKS6OZyi1W%G!LAmX6RP%XOLx-nAz9Lx9V(><)@;$utXeCX0X&u&YsVxU?Sjp1&q@tLA?dLY8XC~=)x9H@@Q2(_Y#l6g ++wGmQ9T_`hEVYCW^<$d50Bob^niQsCPsr{?3C^A7B=w$l3+?d&LNxk}HKH|B<+!L+2Mma%ucO81>2g6 +)@`TAC4BuAWdmFol``!XucT5%W* +WP*>XOcrh_g~%6yVIhrkM|X)B%>W6oG&qe$Qv+10-_LuK@4-!C9%LG +MTwq>b;;vz6E#Y~N4Q1Ko@n=p+L6=C;?nx$eEc=@BhcS +l)VQklg>`5otCWg*7F0v#qc%3lWwL0}OxBL;v=D@Am!&PlQ8JI~<845I=_AIFEfRMYffiPtbVpNrPu{ +@EjrKI%IR~ch;<-^)Y@@DKGoA=NR6U{cP_&W1K7hUDYa`tR~a@!MM?AWJ)-9v>$-2%Y3*PI2c%uf>tY +uyR%xg9IK*D*Pkdcnh*Z)xC)o3)$YE;wa+dfj;*gy*2fFYwq6za3jCwkD-ksP4k{RySzUCS?yW->fn{SUubom$MZN15_O(MTE#|PW&etkBv2pI_*v%$Z^A{88(pYK7dkAmT!mO +VIB*okX05#M7HBFXAjUD$I3I=txaBOkkICkZVv|Z9sWZsLp +v;ogN>*28q_?12s$%bxUiFy46PK&pHyu&$G11<7o+TI;Q)ES-T4ICv_7i^E_UvzFL7RRb=XBQ!KL7iV +FoL@iY(e4e4r@YpqN+E7oA_2ervc6$Yuwbrc&R54j5!q|u+U9Q&796wfyFs7xb>LtGa+UmB;f_mhw6Q +X6op-s@E7kmjbGCUJ<(mvN|5qg7zkik_K8t*HxQeGo|Q=t6tGr(8sC4o{&=s3lh5kY@b-2 +%x&QnF))$>{D*U0mO)p}HB~-jB~eUJh +^7&BxoD>$}ka9;dZaF)6c6$(4#{Ai2z-Nw|CvCq_j74D%W=m_%w3JZqRa)QFKt1&D~%<~P|e$pX_gX8CrW+1!;^h~)M)bV8&k=vgM(R~Emg5u#SjZtqtLl#M+%Z2r4j^?tQpvBvdXh5G-TJF!N;8we9+TeuPNYlVwaf%Qg-DNaijyYwcKS|%M0mGajF)h!zlw8*sd~4e?{ +<7)X8?rVu$0q>;CuOzy4eQ`$Ocgf5ZmT+4uB%9j8&YHvN;+*RN^8Wj2q)#)_Iif84&}YkwJp{RRJ~6+ +iv7wFpm`<+BVIv +>?TknM{fQ=hHcVK;dA*J7%DK+6ektoNAakiehnLW*?pcDlW+(+U69C$6voc?jL{s+XdxilP$Gc#wJ9D +PRX&@f=kpDL$B8wxzH^WRrSqEhw+S)Llg%g!afcSOXnSb^f|^4vp7Pcmi~5wHB#v1uKedNrOYwJ)%HdD&)bO0>v7>&|fWqH1p*_(1ckU>O9p^57{q4%#> +7>D!(A()vmxKa!tzN8r(3}l@3zMGhAY9>@ky7B;bVABADu;K8s{vO_>17e-4ro+5lyGfBILq&uSIaI& +m5%Wkl!97{u6D-5U5r*%EKyW=>V1Mx`s(TB;ZMn3E3j<4KIP6C<`o@o}*sGaX8d9&Wu-OGs%%Mu7=mI +rn5afCJSWL$!iUuL(;~0wlgsJygIb0HGA9v+=vJT|uG*$vlHe7Rv!kP;E6@oB9}dAGu>5tPPcLPPd7k +u_YLJ$!=k(Fglr0oTe3TDY+;oRWOY3C2G?kd;(h$>^Fp)k-~_>MuJjv$&J{E%JoEL@rm#iC$@(qK(br +o4B9t~bNoWN!}}G&ilC4?;fs1$FS10-YsXae8pJ5P+O`SRZ#n~19-uv1M=P+o!Jk0}$6I+d7z~sy!Xt +G^XGiQ*I|-tvQ17TrFn1#qF$BEm7;1kOJy|axJ6Adm5|gRJXX{0@xy4}Yw1+lL@Ab7^H#@A(0qHQrV~ +)JcLT+A0K4J|$uoY7L(yRdL<7v!iWLw@^P=YY4B-mJ5d^EEz=pGA52J#p7;>vgF0F;r4R$xD(&iV}q{ +T8*_)JcE0L5;t0Ms1`{ilrb~W>jhcF$^S17@8q3z}BhlJqk*0;PxWto@Pyf3&9Jl)z6gKz~>l=jNl{f +?2+0KcT3~2vT<^3nFrJ$v9+D$G(%Bas&d8o%c?e*X3qfg9-deUYJz={iPG3C)l-TxM=O!&Q7gR+OhML ++#{M>%sJvhF*H@o1`oYnJ%uOP4B9}(5UcFw#jc6ostY1N2)KIqIo>iGc+1hrEBvg|?nF`>e1l$>?s7W +Exy_AEPabu#F;L74;>M4YjglG#HO6}W7roRiB(kx#Rs;wi?1xxKoTPTV#qba8r;WW?&Y0rVtREt+ntq +L7DkX*{uaTaq6yN!M51j+;TA_g4ZK@>;M?ftz@zi>gMtb&Rt9}6Aqf|xAds_Hg`sYgR-TCNG+U~85F9 +8h2Ap(pfDYc51a{elWylOELQ9CibULJF{cL|84SfB>n|@NQH&0K*2nX4CtE4b;1`YN5zGJV_e?UU1k` +m_vbR!?d(tdk>1qR+TSt3x^n=&8YhzYXWS}$VVvR7qNH*A%Ciw(u}BA4%O4MiY&BAnIfuD1kmi=S)-4 +!*wFf?Jfp7K<^hCSxL{duK7bk0j2)^VHb5C|V}Q(){!+$_BFOnPA#pcK04bb!_dd_cl~A|EzKs>@ZJS ++~5>v2|(#0E|Vkwovb+3vtm`rkx93n_7g7@*LLednA1Zr8z^C>Ad%K(7T4nKTV&@(g_$a)oW>kD#I24 +2MJ#J1hqK5^-@RYEE;c@~u+buGdkO63|_sm!;fE-8h!^d&M&pb-fpXj?SxT#S`PTDiZvq#_v1QLbExs +)ME~VLia2b$@twJ{~t>(_d;&X8(2v#Y@11v +(gC(P-<2vzs!(&%h3C{OYFBLL0igvVTV!x#TNU4Fw9Z{54sP!pfUi1qUPiU+Q=C$oyQ1HYvLLuE+Jqe +hp~w-E&k0D0YBD=8+ulY=y2psBniTl?wTEg9TSx$M2@>*a?p~*5oLJawq2V@pLQPr1nq3A@BI+uL^L{ +q79t<)Bi=kYr?xqrah|Reat5#&w7-rUA@fXO5{;#B|9(#yGm5pJAWLP5KC{szZE-tN_t)d$tNYx!(h) +dO*Ek0!`i!dgI(qa;d!h|x0AFvf{GUa?uwZih{a*T{Nw>d+&iC7@_!4-no{F9nTZLx7}eCuC5hgi#`* +c!^%@zE=u*=b3w(tsw~K-CS=gJd0SDE<T;KcQYe$^45nv28@mW_4hmw3)p=9M%Rf}l +kZGaZsTr8!ttWuqX9-2{Od1cQ;V#*1>5TvOjY>3j^5sr?s$dt~YV%V-CKf0RLhBUon_MW9tl_Xbs{9d +Rg)^}4GTUlr%L5Rv$TAGK+Er4egSt_YPN{dtw@}PRAPJqIx&{JoP-#vx)Tx4uPS6IUb^n-(ggAh%tX- +M2@WToz#1HP@f8J>^STXp>P^q3#KY9@z2shVzMo&+x0VG+H&?_xX|UOxUjykKIZAwQg4-(LAo&%C8qq +bqs%&R<$R9eGP9A1Ctg!e8p1UL1Q%Z$y}j@BEeS;h(*Q_jf~dp8WW=zt(&HmjkPjD`1W#nljr&Jy$JT +V}K;~_C`hnyWIwXH%o#WOf@P#SCbynCAA1S%}h;}(0^?2xl7)Xq`R4*I^t?Pc@(M6KMZdl-;M9?hZ8p +I>t7c^eD=S=JAfi%)zk~(mzlo0emA=G`9)ei5tjckzWWG)K8((8N1q-i!z+62J2|hqyna7E-(A*-4xt +z6nRi?)FNc<8A+}_YMxzim~>$>$T%Gi%=k4K8jM_Zd7MRDMK|57PhdsnV<{fx)_gT +ry_{Zu1G=wDst7?SMsrn;ng*mG_21;j}FEi#tb|BMQ +aOAEnZ~SnW=NBLkJeG5!$nr;;z)Jqx@QsKOgoN}E45}UZ5k>`6v12z;@l2_Wq})6$W)fRExytGQHJG2OOth(cTBxDCUV{O^A+}+s@ExO0cTYF +3&Uu1A{G9J7{^MIow}{vxJWL93`}0O8TyX6gOxOQVx_Fk@l4TfTFSSfI_hG;AZ5-(@g8g#?cg6>k&@*Mm~ +mkpx;Iw)3+^~FC1e2R}OxtHR341-!~?f_R1!+zU}0zX2#omY1`|%pRT5L)fX&wva_$L_NZ0uwQH`o+; +o;DiHvy3vqkCQLWfT}@+CUys-c7leF?+Si~u^ZGWN)WV)uni&_K9uxZwvW{sdI5BiEGf%p;Q^j>uyg7 +h08zG)Xn^bgpGK#Bpw1K?ZvR^~V$61Oj4xb)5bt8FzzmoHqHg$=1zK#i@NFh^?Nw(#xKjlxfcf9Ovd< +lal?4iBdxqs?7vf_nUN$Wf>ZM4K*XZE!}eqOigQjqzR85%oXeW +P4t$z2Ry~D0`tDb(fvzJj_WB%i-aMe{^=oC;)nEs;Nm2O!liYK}tq8T@D`1$idc%R`x-$$*0jDUMi<} +Ak?oi+a?{#6J8;zgf2p+jfCYyO~56K!|UAPLljH4*w;fHuX-I +>r-jw88IZ0S0SOREJXjr73TP!iqFO~iKb^{9S=X38R428F$nn=yo7(@;GtCv1*=VG|cl8ec^&Lw4XK6SI>S$-DQ*EPl`oqho$%!dks9$9Ls;Q(~01J-gd>Gwp+nkf`A?K +15bKJVdvD)ZMgm|}|NukUk(m^qP=%2nJ7MQy)5B-xje*ArTfTG{XyS>zYr9uDWTXoX97tLOQ)ry_uaDFx{D&9v)4;r*h4|~hnMN^F +_AORcVeEGoU1lnE2ZD4`n@9${?W43t^p?pUW&fJcDkdn**Kk}W1;WaEd->ZQ6euU$lE&^RdS<8tR+jT +4lqcRIGj}LwJou}8{Lo2@5ht(!r}i(Nhmf|yJ2qv9_fCdo}x9YibGGYq +U1c|*$#y7hp+zg7L;cf#&0+_%fv755z_7JwUkt6kqHcvE?TUVH&rYnb1PhC3~x)O4%C93ENUe|EZ++8 +ge)v#cWA_0?1z{bOy0=Jmkuwm?Pt@16m^mG;ZJzvHBlW;}K2F^ad}4|>xv>*qbS3^r4`A3nk!L;udC3 +A>|UGidi5x)=GqEOu%SSI6~N|6Ts4L2`RTpAO1rjr8ee)}@>ilnMQ8OdITHir;F2J;}^{Gq5L-S*nN9 +3?J^J!l<|=(LP`K-zUXe%^#=Jw;0d-54iZB|G4i>W^H%%+=bEdhOs-XA@&mX?@A4CiPL8My+osq#a8! +vX~)Zj?B7sJ0|XQR000O897^_9>m=F$Aus>{#AE;f9smFUaA|NaUv_0~WN&gWV`yP=WMy%g}QzRfPV8Ja-MMVA`V;7)g%0sU#X-NtL?~BkPn^t3*-JE9AUhb#g_?;|{7qAA{)k +CW9<;|?B%##r(PDIvcpZ)rO<*TQEc=lcS_Sx4@zkBhteD&@3U;p>=pZxZ-i_gl-U#`pd4>y0gxx2b8U +*EsJE;sMXpYH$n&F$?~et!G*!`;oRtH=9?^6K{H`tGs3%HMo>@u};xyPLyp$A_DrKjdTn=KbgYoZpo4r>lp5D?i-a{^jB3ZTX +_)9X^)F`|`ub$6xO6&gajmy1n`N;p*Wdv-alU`ntTofAjdays_VvkM|$StE;>6aQ*t`{T@Ci;;~%az5 +b{BhwT6M{`Jk9k9_sv?sYy6cl~(%@b>+uIsC(SKbCK=-{+l{KV08kKjhQ=@Zsm%n^)QS+nZO{cki#u) +%)^}@819N_nG>J8+mcwNcrYIhu7@>u3X>bzjN)sUO&9gZ;F3upRRU}cv*6-e{=Q7{pV`FqvPj!(;v%i +uExWso?rgz6#V^3UzeLZ4f{*Zeg2UH=7at%VO4&^Y&dr@O89TTOL=j9{b`Ds%wL=2|Cr)8Ipo{>oXOYMk5@Oh@AsqsIhXf+-r +())^2^ms{av|vQ||5`FU#K^ZW23*zJFNV9Odupdzqkr_ +2p%0mOr|>`?uR%f*0BSo0~T|^f$Nn_YarltNZtl{Pa&xvhU$$#}^idmGa|@{N_LVuRQ(k_y3PZ<+F<% +_{9~Al~48N;r?y8`k96F<4=k2|Mghj0;JrV?-L`hZa<`bxXtHF3-BQ)Ik!hn;M;c}0NdmBt6%Q!Z}0! +|QFH#st9Ms-*DT4y^}m0(zI&B;zxw$$F?;oHVG~Va=ZBW8@_MW8b^{5x0~bef9R+?!TS!+u6U!FF)77w)WLueteha_ +Sdd|na}xO0PC~gKfk!hN&D&LpMQAv^^c^MQzW?s0=TGu~Ib?i!FkN5&a`nJmCl>$x +`v(~>)}dVP5uSYY?bDxLJo}$dvqz0S>GJ07<3Hzh-sl&(W1s)@^825@dH(c&{Pg|v-#>kxzx>TbDfPH +a+iNuave@)&8qLgq@YOEeeB@uVU6z=38GKjcsF!i{d0G3>FT;AfjAq#bLvH;dKiFjs=9fOQXZ~tjnyt +8W_kP)qaq!*D%M82pwfd!b&CBFv`EK!(yra{uYhBtnxc +$iAxz8MYRhr3zRGPe#qn_i|d66Id(vG}RHrXk^qggbSCZE1W(BwczPMWDZcV5OEJVQRAH_c7k3PU?KCSOJf9^WqtguTBcHS;DDtPd9$>_C=W(kO6fAznb4{LW@?4Y +WnqA|XIrpnO(yv(E#iQl5vSC3NJr_L}Jr^^w=&|UL3!30MXpF}9xIQoPL-0+dX*8W?&`cV7GL2^ROpH +{GYeqMto6*haW@C?AYmRGlH@Z9Bo$d+eZ0PCqbb113P?!xpoE|}sphwUH+%Rs>b+e#n&@<>6^bC3iJu +5vcJu5vc*Q)fW^r-Zx^Z-T%JQ;WN9JkT4(K8n+myGU>?v3t^?v3t^?v3t^3wFA7y0NjW)1%it?#gvzX +J58JGED-1H?ESmFFygPZ=s2q%6^E +R7o&ZEmK}1Q!IRoqH=zx7_z5&ccritOP^yb<h1j&3I;Y@0u#jr6z-G<%}3+zJzW3M)Qk;#mYZ5gWRe!hG~-d2knNXaSea +WZeUjPR_Mky!FHN#+=&P1Kz_)cjtzBesHY9hCTJ>6qhY2s$#Q{%adU&^E@4KvON>-AkLY+r_wC;*yu|U0j%O6nb37vqEE{@ +)P{%Vu&9dQwctqARn~nw4N4mv%zsXDT1Kn|t80~E6j(hZUj}Bo;G#mE|Ysu#dc@s}~1XMErVsMPLh~{ +TZs$b;q2eeTHl3Pf}TES&y87os86AH&Bt)^Xr)rWReRx5|(#_+!0HqIKn#2Y`w0Xq&DV%PvhHar>**t +Ie^VAj5g**d1*J3Vl`j^p)y-Y>iarNQHuRYEn0wtCyaZa@A +duP66X)g7HLPxfUS>m2%_>dDMhXzY-aLk79Zz7Pmz{cV= +X1_7#+0X+}gzf#-;3WZL3ve0WGQeekOI$!U^n|Gecnt6u;4#2sfX4uj0UqPD4;hc-xZpCtWq`{7mjP1 +?AQ>>V0Ez(=11JVi44@b=wScJwOf62F_2eb_fu5C~U?pH`0Y(Ch1Q-c05@002NPv+5BN(-8=mAEe?{( +|ROByCJ8+wA3fTRT&2{001B)~|3kpLqBM&SGnHs5ge+0bL`b?c3F11rRPMVISUkzDSwnLo|H)FcOD9f +Hm5EJhF>rGvqg2ZHNNfAej6(8$f +n)&50FtnVZ0HFjgH2k2Vg-s7C{~~d7tE&7beci4?aoMEk{{>^BrA}Fv1UV0AX%{)xW{bh2^1?&Q+bdI +6f01yK(YGfJ0p2XexN6itU$5?$qFPZkR+mQ1a?hCk6F!z9zd}I#R?QFP^_oF7|BcW13iIc1(Fp=66$3 +`PlAUE6f01yK(PWv*mO4Z0E!hT*0%lpNM4d3=m{h%kgPzm0?7&_D`u+##R?QFP^>_)0>ugxD^MgjZS~ +~q171>rWCfB?sBGv7BrA}tn5_yFD`u-=wklk^f{_YFDj2C?q|S5QChG=PDp-NgXG2dgQo%?CBNdEPFj +Bz?l)g1E(!fXqBMpqSldsR@CHbN8O{W<&lZKvP1l6OVL^OzelP{BBX<(#*kp@N@7-=W3p2Mj5ILPz(@llL@U{FEilr+2*fEHdVrDE_qt7ZNdqeltTeFFFk20bG%(V +@NCP7cj5ILPzz8x&HuL}^4UDuizL@Zm238taA&AO`o?xVb5!ie-^aLXfj5ILPz(@llq7HKQ{W#YRUed +uz2P++{bnHn7Bj{ocI^58?8WK>Gpo5yy6rU5GHN|6MSx^_oUFaq7k_P=~vW_y09gK7^(!odvBSe1La4 +j&>!AJ)q9gK7^(qpgNf|qo#(!ojxD;;~%!AJ)q9gK7^(!mH;KO1_0kq$;W80laHElcCVOFCHTV5NhVj +y>sMq=OM?ZZ`A;BOQ!%Fw(&Y0$(=t03-d>9~Qi%gOv_eI#}u0lMY5Y80lc7gOLtKIvD9-q=S(TMmiYj +V5Fb%#e$b~u+qT_K}kcL%Z8p{L>e6GRwFcTM9+=Lw~17CFw((D2P6HAhZek~gOv_eI#@x5%*JS(CTJ> +6qv9rD+7Bnz{mh21B?tXGQh|HBLj>KFfzc%03!p8jIAT0qvyC_Wq_ +3dRtENDfRO=41{fJ&WPp(YMoKFfzc%03 +!p83@|dl$mqu&7hW>J$^a_^tPJc4p=Lwo$cCO^WPp(YMg|xeU}S)i0Y(NG8KduUlb7TNdV-Y!RtENDf +RO=41{fJ&1U0)Mq%_!2g9tT13CnP6ptl?hfR_GE&Q2}ULunP6mskqJg77(s~3h8|#Kf|0p-Ne1}2Zb-%1&=ago>$7q%+b<9W+McG(of1&EX{rtSqpyz{l?7H7_GEz(LWXSU2}TweSzu&=kp)H;7+GLsfswU&i8;Jvft3YT7Fb!>lLbZ=7+GKh!8aR +vf{_JA78qGzWPysc*z1Q3#= +@#valx$j4Uv+z{mn43ydrmb&vApELC>IP&@<>6^bC4ddRBT?u +2tz#=~3xX=~3yid5L8>p5s<}HhMOCHhMOCHhMOCHhMOCHm=p^(dp6Y(dp6Yv3W_Fvm?jt^z8KP^z8KP +^c?gY^c?gY^c-Ak&|}bJ&|}bJ&|~uw%Wyo$o%Edaob;UZob;UZob;UZob+5=YtduTW6@*LW6@)s>$X_ +8MbAY~u!0QP(YZRBR!7e2$XT5%T1VsTWYIcVw2mg-5h*)av~DLa$vF9h0%g%=)(Gu +dBDffW-jd=+KhfeCPlX@#i4`!!-%Q*R>BKtUG(cvJ073pM80>@XNt$})2hz$&!1*|LxlASFX2v3e4Rc ++^;EaT*hf*Wy+j}x;NWCJH|a02*3j6}S9$>z*UCvKeZvnX$>42hy`=$a9#Xx!u_1)k~zlLqNAv@#=-3 +++l$YDR!i@N_1{iO9^Dr9uc>h$M|?a8v}1o4mv@IgW~2a6E?eS`gTbeN?bY1+MKdDwL+Y%N#dQ+*}#* +LTVoE&DkazH+hM{qYRfBEREhn&3g@CbeABICSET&;S4A*9a3f(r6$=yjR-#U0+&T}4c&P_Q##z +j$dWPb-@M()*!o*g3?@VG+E;g~^}W0zO!%B61oH}N(v0s*Kqq@#lyd)%O-4la#&-tf@{$72veM}03)S +tocgs7oOPuyx`rITv|7Jpnge`M0Kh+{jh}wn>h*ksG9n0G+~c&xHjoOKA`CtEe6hab(<19*2pxlTmkcBRiBm;?mIaN<`~Q!iAougPYUtB&IH$46$QP4Y{xog*XXgL)|j6(WY5-CEO0>V6=O +cr(>_OlL$J!?mSuE5qTVPpXAA`s@4G&^H8=a@57sNM*A|Ing}Kt`#>;TVE?5_(R8Jgw|&T`LiYsz27X +C>A)2^S!UT+l8#UxjYf8Yfcmz2{5NFa|$)6PDN4IldWSpG#0W*gZ8cn-Q+e${oF)ywiWuOq$HnPh?%Q +jb74}OdeUf4AnH+f0n7Mz-Z0>?s20z{Y`mXW!PLm71jA{7zZ%*5x8-E&N|xhz}k{nn{3L18VrRbux7H +FM+uXI6_+9S9^&1~bGJD7Yx(Wn75`sF@Krd7N=T#>v=qmW*@_>ZrljjeLvMT>t~HU@c@zI{+}^P3Dk- +0BvT$ZlZASb<0h1;mYJH+fOAz=^LEb2kx9Cg1fw5y5d5fA;@QuW6Mc1X6cYKL?EvlzZoZAIC>^bnC(p +tm~BN;tDv14B*IO?I~!&Q?2Cj{$Snb}+&_*WP)K%Y+~g$=5DhkKgu#W&I7JACyK~mr37G;B%V7-x7jv +ZQ+P934p~ZI3dd@@g5=Z{3h({)0LQ>0EY(v~|MDE~moj4?9)L1u%l~w435w05^rlLM++~g(JW$Q3oNE +L?qT!=>s5t@^SG}69KfKX5%jf}4mGn*>A2w@GbU*hDclll@!m8&8x+~gWSpFKoyk +Uos3Cizl06c^xM!}N2xaZ5p>VAmN~IyWnj8rltHE}??fSl4k4oJj_JIf#P07i}8d8aGu5xJOvY-c4Ae +TKt9-LwuIc0(;N4_p7X&N_qi4kBr*}v$UYid>(yFptb{{e1eeKR%o&k)(jYfWuV6-kBm)HyEoC8}XSk +1~){kmL+wS;#4vwl$Wbh(xGX6M->ASdgBLK+RZ;ZYO?|QCt{yJv5hVvvlr=q8K#qLRQE~of!JFnIOY1 +CsRe>gDu2%!W5CdHE!|}!ygAaxKS8ZNyR$((1IH+WV-^OR8;4%JtY4sp4!!oQbY<`&N`-?n94pT+s-~ +FD-_s*Lb|9^WM?QG1&!WOrfkZ9@*ogtJ5;};^3OvR6Pn*#$@Fi`krRLvG*L&rvmL1DyQ+1Af#Deu;*3 +;FCA;jzyCr0&S&_Nx6q2712T;mjY3 +N(Ck8g9DEsKy?bx>sV+z-T+dfN@PDXXHelj?!aQwYwFkK|vqsdMB^fRZ#_jEl6@6{hZ(AC4T-MLnDas +%!H6e%v{j<4cRHUfYn(k$iLNXb@3rGKALz?fD&!clp}p2R4~k}%$ +CkGFgMbYrWTn>KLj&Ow3A+Q-+Yn>lILytm3?`0f(!@Xs#jyJ0v^&(jI +!@G7Zj2yEu?2gU9mSR)C2d4k4#BEy7^l9wvzDPUw<+BU(?B*+ya>Dh{-9M%`-xPQE06NsU6P0L|1TVCzlyi#uO?RN*6(gMM4(4R6e +;Bs@{wVb*N66o9pj=Ox>%qFI;n~@4O6y8(Y3EzpW+z?&KhX7~BrS8?`6cGW8Y* +WZHqQRL$2I_*0jE+&6Qo6*7B_G0aBacxqWQXv$c@*Wl7fuDA;ol6pRFQ2f8_cVUY8evW8@Sev@Z#Fm( +qu_QR{-c#krk-F#!X%l3jYYD!{x-;pQHti#0?TLPb2k5Kanh>0%_vBrRsfoaEVvv{U$FlYLN=8EDk;C +5~-{-4Yo%`KLztTSwPk=$ZBF-n0;u9V!tYRZ0|SfUY#!i?qXx}T&8k&7X09g+_9lYAnft-n +V#9AZcTXc(zE4@Tf#ga3U%YsVck13Ke9aBin2&c3)5wQ^7ZHuK+J)omo;}mpV@PJI=nU5U@IC +Zb)s$XY9sk3ntOV$Y`0x)!1S1CBghtd(23@yJ(FIkMH%ZL(fzC+!sQ>$L6(>0YDkd>#BgXd$)rc^W0F +a^Yt`j{X?opr_dnlyIpI>+tA0ooE3a*M&MIv8e>ipAV<8c;rp)=bsfb)P%axFWD{FnP02_nW-LU=+dh +RH{u4#mh*)lqs8`AQh_*NwTKyTQ_1$@`MNtg^Gnzciz;gd$lS5Cex=nZyh*utR98%nEBAjE{<8)(qXm +WB#2BN>{!9{lG3yDyiVO~2?gu$J}MDNuH!uUDQi))8@q7Zk;yh``9XEy=4@M`3ozO&kk#!0x#60aztx=U +Hgei=-XeD|>#ado?Di#t!<>e~I-a08t_q=q09&tNVp?nRY0sLNGZ#N0OahTCxEf(OF7J ++R3Y_do3E>wyZT&!W?3!naOBCmXyidVtEG6;?6_`~c`tiE+g-+e8N +nyYu$50wmx}41SfgB;xM3D&wR4I-s-pA25l3zAukQyS-scTU8>XJLCh^q=P5eZFY=hR^sbt{gBVuyC4 +WL`;Z+W`X<(OX2RIPoxbPFVh;DOJa#q3D@5*ajO3@LDFvr%`{86*0=AARGIE(-dgx%ZHWrynjoUf@wqsFv3G83$;h}|6wB2PQC7WX7@Yd&+khhd#Acd +Rg`HbJFdyTFTXOk~N0JM~;!`P~~uM&J=N*JWQP>M3ewYgP_>kRG9j#D?H?ln~Rk0CWzl({6ghy!7Fbc +T*OCq-7}7EnMR=mIt~#lzR#mOb ++4u@l5&}x-!Y7h)i^HY +LR8D~)1X8I@N}vr{#Hrh^D#vx*j`EdFQKPu?UN`Dqi?T8 +@QCfR^lqui6w~3Y=Cr2r7Q3Q+$u`|&RPe#2&!76R%hn%|CLS323AM=+}Vb&!k?tGTaJfakb2Ms3dT>< +gVUt@pqNIDN$cN`nky?WPic2g)M9>Wiif~FE7lpmqsJJoEQm}4U-z$A|=??9v8iYys7Dca_ +B57_@O$?iB_!daUqZcg0-VIDxU`VRGh^F>sg1L-0)HJWZ6V+>WWD60vkCA>p6babfAb6oV2r7SFS>AS=RI71An<1gBS=wj7&x{;a)CFHTJOUWpm2 +=FJA3#?cVmq=Gb=>8(-l~06KPzGAAXR!+i+!G>N&3RI9*yG+JqFykkYNH9dyqrx{tf#8OWBpayHV +JrbP2S5BQoL7jjB2@gO^?IDPYQ8C)@8<*FsOD4C;!;y6CN#-nS8;QO_LQ<~nMU+P}HYg-{bNR(I2=Fr +Lzi>s4_Ko=gRE2CT=avzY5x(mo05^>%yyo57?MO=u|J}0MZJhO4oz)W00N2;rsJ`8*4y$f1FC~8-Mid +H)IxXDYp^0#=wpq8$%GA=mFXrs0UXVM%u>e|+DF>^^URdbG$Wu|@eYTkxI+%0WFn$n_^R$R7LOcW!$l +}#15QBb&RWaPY=`28J$&yE8K?lbP5%JGX-YWDzjmpkVGu@onX_f3gM6`{eNe+M@U5RLbDAT|dysH4RYBC53vGAm;O5HgiQb@eLrtSBl@KPD>S4y +*dpEMdVHB2@0fX-iS%R{KRUDaOl!t{{4RXtggH8|%1y@Z5#QTmMVuT*t1rC&tB)sqB(i>%5SvcB*>*} +}DRR(gra<|Wj<7WSJ_FKpBdRh*X}TZ&gkg^jBW5J5&L=v6LCw2J7J)TW_Vhi@Jg)V&%#4F(p9cvQuzm +l~)Jj(OmmXFe(0iukce4})As+s22Kza+sn_qf!(Dos2@f~y1|{=z;e@@qg7?r +Xh$v+!RGnGXk$mN4@K?GMP9>Nr?Qq(4>Rw&RLkbgI#j?uMar&;PN?7y8yAVvJg`5-!<)RKnuUG{M4a) +P@9cP8=Ubl)mFcPY&?%Je!yNC2X5j_ULnRpmcZASNKE@?JG*QgK}+x`N$_2i3h +w_!hO73@P1)YPQfTJEZz@^2wWC%+obrJkl7>vj)nMCfT`?_vV>g*7hAsz>{WRQhgW +%l-jmam+*8RF8^DzkKgT7Hv$WBN<_PFW0174lSvR1AcobNLP;iQ-6-zQV$1(IGH%|2R8fzb4A_U>I +8|k*^-3yf!W(C*dv!^8;&{AK1-!e;hbf(@s)&LFIXy|<|2@}(=+^DASj4xF8x+LRZwongUK@`PAWD9_T^6nC-DaqszhA>SIx`K04*{%} +0Yv792NDR9m9SR1fG(r-<=p;_x>rMqjiq`Nfk>5IKpkEkNST +#eEkm_0t{Qw|pDwTwAV|Yu$hPZ^g1Xl-Ra{0%gWl&wknCvmj=tmQFl8w+QW>ncV3Qf*qa4hTd34E;Y7d3b3c>h +iI=#x_ZxMuMP=V-Nf5H>=Ih$H;+Q%bfRHiuRwc9~ZR^lYW3DI*Ii&B@a~_83C#AjsfGP +i?+lM-6rg<9VO;eftv2glvYHKTU|_af%h +t{o&Z7Q^pdI@pEeIRaxDsc6_tG4q_9FahWAre>UPvWntWKiWau3R5cn+`!Jj~;qeoYEwSG1MaHMNKNq +kl4n!;w9TXA<6M8mCcSV0;{+T^+)ErYsnxM0JbST!S1D!$A;8~)1FcHN;J!(seVT!SY;L}%vH3lBNEa +*RfUK_+A=6XGF7H*zvxN@uk(K4CFp&IG9U6oiXNryDP+-6M6rO7A2R88s%RAhB@sgf7Q3rUQ2LD0L(W +lQ1XXXO8qBVsUMVV6Nu}?oEGURl201aEbrV@$O2oKwsZuIqj|(qh3|GcewGj}3*eWV>(^Vy>%m1|b5(lCr>oRnK2InR(?>bsfRnq#Wx-Ard5X*!;Hz) +%TF+;1ywnWaMLQml}!9Dxu=9QsQs%t@}>*E?6t>T=*m(PTv-{_r>cr +pM;67Su=Pt-XL=CbAgd~1D{fM?IQ!?!lTq52_E&`olFoEML?Z02YQ{Y(ktZLbBYj_9RT +so9sP-H+yGpkwdD~PKpm7Z@xa01+>t0!BZMC`gEQY@;J&KW#2bJcoBVURQx~iv$FS@Ec0jgd+y5E0RP +4y)zo06X=8jNlKbUFcB!09Q~rI)4El2L7tF&W)K2Z +J&HOD4Yj61nXMS0y6pOgO5JNHZN>X~OkP-NNu8WS3#|%8(4k$oDm$S^7k-eYeCNFV`7Ov_ +g#lq67A?;+Nqsf#F*)A!(ghHk&IgI<#K~%kQnl<0~w@TfsD_AE1T2LUhlq4g=bk0c0NS&kFb@5g2d&SZh0{E)(e$ +~!W(PW(IalDTYg`_IWL%b)^AF99=``1pgE$3~!qxZd1G7>r(B}IBFjFA+-<8%^_O3%u6byYO+YH%r73 +W}sTh4$(mm%3L|?V)m%m>k=5f~ObdQmwk)>Z#IHn4PJD5h2bcH4?2DfQs{dHk>)hxCIBObhYpJNwrXt +17LP4@N_LB2^M8}T$4udzw75E{A=Mi&h)-lYO)Nm)KvjX;2L5BqNqyGa(>8QvE3vWc3lv%v2&9O;uMg +d^FVzGJ~hyCc)cTTLGDJ;4V6don>x_Vr70jYaVw6bhRS}r%IBn}$2o4FrF?dK_5M+>xcvM-UIl-5@qY +kNO9KQH00008031s8R!US6?w>IL00O1}03ZMW0B~t=FJE?LZe(wAFJow7a%5$6FKTaSVPa--WpOTWd6 +k_}kL1RcrQh`{67Y)+XtWuTWRM|aA9#aITQjmGv?L7tWVE_VR;^pz+Ul0Q+Qt6&jX0;Y{DAQQyMiBF( +Us&$#*O&0@*W=k<~RTRze^l{`0R_z=byFXi?5GMeEwzox66O>yTAG9H_7kbW2 +@P-?rafQmt!0#6SJ@PcGY=HkX(>2H^>9^ +d@+>G8+Qzg)V)50?+`E?@of;fHr`pWlDKRBs-?dwTiw%eZRaKRrBL-oN|)!#}#P-(P-t_w(h|%eR-Oh +u4qqefWMMK3rbDef?kFJ@wcB`0n-N_rHvxpWnXjZ)d3f@bL8G`(Ll&AHMkZ^7+I2uJrPUhqn(;{oQ=^ +^LK9^U-cJ%{`l(Q?fZwz%lDU`#@PEG&MS2vLcV?$a=E|jhu78p{pI1Y|NA)i-yfdd_mNBfB>j4}=Occ +6>2v+tmmh}yKAS&{7yhn`{^jzf&&JcQul(enUV`(TzP>!Z)nR|=*S`Ose!%`l|Iy3p^4-Jb=l2ia|NQ +3TeuVzJmw*54n?HW}?KhY0i$7ofeS7g@`{J8F|Gxijzta6*`~3Mxk3as@OYrrjzpJO0Z$JFfh4%ye>3 +Grp*k7^5XPxU07AK$$9TmN&P-uGSLo7a~gUjF@|PxGsX$6isFmwk!+^vnO(9KLz?_OE)s{UzrG +{r%G@O6Lr{qg&L==+;@@18!s#Jl$ +&#;^Wq>#x1$l&=4hZr60VeEW4D`Op5l9AE7JkDJPGK6>68`g-{9KR>*E^~EJ4}Ql_f}9x|RfOOY1*V+}?kNy0wcvbH +CmN`bs~3fxfYB3-pbJTcB^O+yZ@LaTe$stFu7gSdRtz#(FH!H`Zf;zOmjG=o{;4fxfYR7U&!6XMw)4U +KZ#Z>t%tyu^txa8|z_#zOfz_=-c}HU!ZTSj}`j1u6KpLturU; +=z3P@8|!a{zSiGQg}$*KEA)-^SfQ_V{T2H5e!L2Od)HT?Zy(2lzP;TD +qN6e8>C4<&8A8QYqqRCGX2j4eBXIiV^9bYgtoN}x`RT|5Ch +p*jV3LUju8gg!`tJfXq`dP0*8_Jk%I@Ci*e=o4eNO~6lR_Q9VR`)vY%Vr-}h1d6evCLkz6XMs``3?LM +t(?V&CPkF#l(=>oXP167nwE@xu5;e^WP}DRHU{M>Na}!|HG!39p(=-4_P167#HGRGXkecQNL~8m33nV +pvM1@lAFo06i4g)GRZ79G})9wH+HSG=nQ`7DMGPUtZH33ac+XdLvR4{;3Q^9~vP2~VQH7#WDsc9htQ0 +*APK&Ymb4Ma7qY%r>gm7T$;Hdb~9quO1c;-PdvD}zyOKr4e$`=o=;7#N?#!Ki)GK~JUote319&~H=>j +I)4I`>4|lM(z4L42;@~{@^&3?u&i{N=Hd$Flrwal)fHkPb%eV{S7T +wTJ!`g3__FGZ?jp4hp5AgF@+8*%^%5LuY}~rn5ll_ym=~sC|57%3##Kj|&7w?c+mG2BY>-mKluN4;>U +rRUcr~q51%$4wVBKbxhK6Dm_#mVAL@_!(=e(Pd>FDVAP?%TfwMfe0IoS)S(&!qYf1e7DWJRS14We4HA@&{o| +IPbZi&51f^rUxFsm9ItG-E4->ZprDOlNB`Do=P$(T0bW2dW>7Y=m@4hmWjtaVEDD}5rP^z!9GL-83tP +G{cGzOql-*9CpJ#<@_rZZ{|$pHOZ$C>;d4-Jo=Qb#*%#Rnw%i6 +by_vcQUFE=qxJ+#(Oy#RnuV9?POFR7)nn@^#Ofjm@zPvo{Z`PL+Qz=nu-nzrDIjzCMYdBD3p#hcblMe +(J`P@D{6*PEua}nwSZa;-V*y(NsO7%5 +vfl^g+fzt6O&#gddjB+SYs;_zrl<78AH7{@pnRkJGSWKgTjmu5dP&8=IPD6hDu)$HwZX1Xs&ZJNRI|TAsrJGZN>vUkl#Z`%ZYz{(b6lZRo8 +t-|&dZPrUB4CtUU2F5q`VAQSXB?|_0mK6h|2>?dj)QtvU;ojtZKdJ>VQD$*kL9x>bB_^P`c?DP&#&)35>dh4hp4 +Xhnc{rTj-!rI(C=|jJk~-W&)!oO%xb4X`;ZWsi=E1VPKpEjGD%Ws|k#nG?ie~q^Sg>CQT(6HI*R@jGD +%0+zE`DG}&O(q{#-OCQUXNH4S=9VAQ192css@X7;HK{(ps7dt!Mo +m@6fYPy{OkmVhbqpx2YLY?e*kL9xYEsdGQPbEzCNOFmAB-k2Y8qe6OkmVBc9;o_nnv$$0;481|G=n8- +9IpD8lRLVFltgi5R96%DS=UwI)Y%-q!u6;HQmQ)!Kg`1KQL<6#s)^s+StITdD2-526WI<>8za&jGDF6 +fl>3|+yq9=jjLORk&c(tilDOW>p~=HLK{rs9EIzM$OuTz^GY!5EwO +&uYM;mYSyL%M$Oukz^Hlj+9oh+)-DA`&Dy2Fs9C!d7&VVwY6hd`@deQgM$Hjp07?fdXE16WpMz#FYHm +6PlpcBsl#Wm1GZYGB4d9s>mfItG-k(gCGa$A +HqYq0C@ZscL#d>DWJJFsh7#nZc+sKB3HDRMAxjqsp#hK&dV=7*!6P7D~tdF@sTMYz8wJRmNs8gHdH{1 +~VA7XrjQVMH2-^EpF+bN*7HO7`3QL4@NDT7cgoWB%HyhWvts7j9LcxW-w|Q3wZ{kmch9hj9M0X3!rpR +YzCv2MQ4H1vG!*$YEi*}QHy2{j9SK0pTVeQEcF?TTE#zLOKs3l~BfYQ)Gp;QF}MlD* +|VAK*i?Wt4_rWuTqhiL|*fV#FiPg68H|!WX$GU@NSeVYS(0WjN}i+{jFPEn2BYLEn!za9ie@ +lMzM>h7lCfw8qvR}_!6;daW-vtFiL)&8H|#lX9lC>;F-ZFS$JkJY8?=t!6 +=z}W-v;wo*9git!DeWZFv?CHFv?aPFscrS&tR0zJ7AREJ7ARUJ7ARkJ7AOzJYbX^JYbY9J +YbYOJOzxhi3W_4tEGTZvb7X2O1_o?M#;!hz$iIQ3K%78Ndcqe9VuXx%pL`dk~5=#Q8Ho_FiQ4@0!GQ+ +P{1hJ8wwaDH$wrV{ZTvM~z^FF%^#Vqz6<@$8wc-mHrB-|aqtuEoV3b<%1&mTFzJO6`#TPJ2t@r{)sTE(qD7E4X7 +^PNx0i&8G8;ok2IWVee=D;X5>kAmAW_V?;e!?s)~9lO*5M#&PefKjppEMSx@0 +Sg!<2fzYGso}qXQR?q6V3a!V3mA2bEoT9v)QVrgD7E4jFiNfX1&liGdI^+{4P^nN)SF+xsQaYTE*Q`; +Rt)I0r_#IX1B|+>KESBE>I00rt3JReSppU?N|t~HjFKf_0i)yqSimUt_ZKiq{rv@uQh$E|qtxGDz^MB +`;{ryh;lF@U_rb~qjJn&X4W-&0z$iHY7BETH|7P!N7R?C!;nw0G5+cn;Zbk$*4^ZfaPS=CI`TBGHR0pU|FG5)38FRreTFrO~VSMnuZlh$0 +w9!g;K5k6-u@CS18rmuTZMBU!hcMze1_jeuYx4{R*Yy6UtJdRGUGC(kf#%8)>lI4JcClWebZi&v6-o~s6iUaRv+ET~?>Z=yj*l$s6-w_qD3p#3Wlc~zZbMlUl#Y)q>&d81&HD9Z +)TU(`S}o0|3O$*4`u`t@Ygre^(`p>!;uHACrGKx>B +5u{_ocrHg(8N*A3LN*A3LN*5giN=GHHHz?gS0Hvc~)*F;=(-?r#vE{5cC{;<_pj7vR+@Mr9eB7W^cYN +HSRJVNGpj2f!L8)%?n4olgLRlv$9gX&Ng3?{bIF+jNzMhQQ)OlY|Ms4c6uP386HQLvcQJWg=>&d7-Fw +Sx^YE$QZJsGvB(Y~IH+SF)YPeyHOw68OiYO-f2)y&OMI`*J-hSF8XfYMdRfYQ-uUkj9uU1}{*I_{BJ3 +zUv~B-R3@V;@@!l#YFDEl@i4v9&AeB5E!NI{R&2@d%uEFH&qZArDpvKMyWTyf>G+tuV9oq?<*LkM*9jzsRh1 +*QEGv&V3bbsHa8S1?LV@)eA_jrLOoqi&=9RKX +~9%_|tCu6YHc)HSbQ)KtbjvS5_j=oO4o8@+;2YNJ;$N^SHCMyZWn!6>!SD;TA&c?F}?HLqZly5<#(Qj +@%bQPZNcK0ECOcS1@WGA6Y6Gr6zd=qtqm?V3eBV6^tq>DKJW1^9n|(YhJ-9bH3Y_#3erbT&8x2}K(_j-f@IHmAV8TDL9Sq#GeNF^nNvNkpqX<$uE3d-J+9!HdhmS&nswjg6+}}PzK +=k&?wq`WY3jrG5op%ElUGnpo%nPG)>c;wxVE}l0Jhch4zjIYr$F24bqcnvUZ;SYQaSnvG<)R&Z))225 +op%^&{qI%^}YTe-0JlX#8t0%Fs^!?0&>+g2Fg{}7%*2|E#O>rwE%S0)dJF0R|`;AT`gc;^_2(gs;@j~ +SG`VwyXtic-c`>IfLFaLLA>fU2;@DdA%J<#CAb25n>B-ajv2TDd(IiSf_n}cxB`3*7Px|ZjtRH|eU1g +Vf_=^fxB`B@5&jDLh36Fb^PTTk@XvR;Ujabh*?t89eJA@B2=uM&S1`~wre6U;-RH$74>Hs4N<*o!`*$^-{}hdqKX`iLqM9P~~1h`#6}sts_ +k4IfxS_{FB~e?t1jruKhA{KcmJe?tDnCIdi10LCTGdQ?Aeg+)ey>EbnyY~%naQ7|&4({F)z`@m3~2z2 +3pW-Rm73+@Hg2pxJ8~9NfK@!NJ{Y5FFgS2EoDIYY-gVH)#e3cdtrtaQCVN2X}ooMm)y0d!2%VyVof=x +O<&~gS*!$IJk$849#A-;GnVw5>hg@-D?>f+`X2;!QCqt9NfKf!NJ{Y5FFgS;=sY(s|g(3y>`IC-D?LN +JiIQzL3te#0y8$59TE~VHn|-VA~QDG9TGA#Hu)VALNhk`8xm48`bgGD!H8eAV8myK=EcW>W>q+%GdB4 +k60$S4!&!mgpqvm1=^2}>5DD=ao4gPS`5ApgF-YLx@f^?u&5m0F2M^~hfrEzwm%zcpiA&(%QFUZ+@Ti +J<0tb)rCx1ea#wKS)LXyTNYehno#wKq?LYBs+@PvdgjZNtZ327Re;u8|$G&bcYB;;xI5xom=P_~VPM2 +$_pjf6;zO~#FcOpQ&>jf7B*P1cQsREwt+uf@G9K3rKfP;6h0&wu|RR9j&T`l0Cycr2e +8=K4-2~it;M5_QClwTtuY-78JQpmtTSvV5nHa2-U67n`SnK%*xH#WIA5)wE1h-&`iU|fBjPY%Y_*ZJg +NADJ{#F`{#w9E_{4{K>(%`Z}K+jH|En$-z)MLqhaMTz#ES4u;Ga3E>-Y_5QO!v-h6`n!W!l(Cpo2g=X +(ID>QqzS)o~pXBqVc$T +m%Wx9FZJ@cZFu>;9a4)Ss^rQ$3{Fy$V!lq&k@OKc?p{D{+&~^>;(xC9U*@~LPkf(V2}{f5xTcOA*Cba +IY@}<2w4geaymkOfP|oqkRc!;sUu_nNQmkPHT)B@Izs*Ygs_fKLq8#{Bh#48sh~P9)6G>?@z +4+1Ky}%~}P>>Sp|*NLc1P%r_Jr +_`xcSOY(Cq6xL9?&(1kJwA6Eyo8PSEVNGeNW0&IHX~DHAk%T};sIbumM;=ztuLxIG6(o|-3L=aYjm`8 +uB*jLFye9A88taX3sk~IC;f^gOe)(9GqMU;Nav+00$?pIB-y{_k?_pQ13k<;3FolN^o#;MS+8pD+(N( +Tv6cQQ_N?b!aa%S04wOwf`U>BxJit +NC*k}E)pU_LOzRxjF6DgA|WKCkEk-iL4_P7#Ds)W4ia)gV%A?t2nq=~HWHFTLY9q$sF09nBOxm!WZFm +w3kiiEB&3Ced>jdJA$>&e6daVbBOx#(6o8PB7!vdD<3RInh0v^+W`u@>JR%9HA$>%e!9m$Z5^_WOh!l +c@v(ucx!Pz?!I5>N^0S9OAE#P4BE&&b}?-Jl(@h$-lmgi9CQ*-ey0S*@L65wF*E&&b}?-Jl(@h$-l7V +i?^VDX*+4i@hT;9&832M3GSJ2+Upy1~KXwG0jxuS#%G&X|NaktklL;9&7O1qbDYNk|k4nPCzlMWT2ug +M-Cu85}HL%iy34F$u9EQM{JH!Q!LvaIknSgM-Cu5F9L8g9sN1J7o{AA22OPBj2Z_6Q+&B2#Z9Luwx35Bw@!CGD*UYDTI>5; +`I&=+Czm{lCXygxg>o=*BKnNhYHCgVGk9eNx~i~WRvs}oeLaX{b&Gi&^{}~lZ1U%$R`Q=tPoHV_E{mJ +B&&}`>YUH607SF99%aa2bx`(;Go@E2rY@#)dCK#rUN**ng!tCY8HTlc6lMWB<%7+bV=Cdh3t +~BrwidFp{SOG^pdd43-KkfdY=ae?fXK2N!aCu1e4HnmJ%XN!oDwLn1u3J5<*Nufh-9rCZR-@gcy@hBu +he$Nhp&gA;=^Y%94;|684xO$|USDLzYSCSxE_DCZVSzC8U{zU1x|h3A@gaXA<_9AzzCZT5P=HNB%1QX?Lx?#EKYj=~Ct=4Of=c=Fq@9ExI)u2Bu)7a=Cw)ZM2ORW+h>&;^b^s#sBeOr;b;CJ1|{ruL=H;$sXqup2|xA+NhlGn7I4shNMxae9|eRkl +(0h*X((ZbB;ruQ4oT#pgdLIyL`Rl>e6WUGW-UI@`BlO4uWWn3b?22stZZM-YNm;{L3#Vno*m9JC_{Su0`x55iW$ZXcwrgxx-fTM2 +u6khjuDR*gWjK2;)drP=L+$dzWd4>DJpy*&tBX-aZQNL^`m`yh6u+3kbem3Do8YTJYnA9==zX7J=-v; +PO#D@}1N3E?Zv{vV{TH2Z%LztZggLH{_kR>`JK6?0K)yt +iOd3#L|?^l90sG^yrm@D3)fg5wcjCy+#OQY4#c+jiuRZggBOFuMzTCnw>=mWNCI5A(5piq9q}crP*tQ +OqOP^5kgs-y+%l7X-Z{Dh-GPZA0d~e*?ojymZo5qgk+YcWR`?zmZoTyglv|kD3*k9mZmJ0gmjjs?3IL +gmX`cP!3@oQU|@!3KOitebM?8PdGo8FS$hIPTAKY)NNH(yLm{T6+53c?mS*o0f?Ap)R1%U}n%z){YH4 +;uA*-d?`-HHTrU#fLq_s5Vs3gR-G&`M;*V62CLSRd?(+P+8{gB5Qn*Ef=8Jhi=#~GSk2{Sai5@u)?zmVY4?3hA?OCRx(q +1o%bK(kkOfo8An0?l6C1)9CO3p9Im7ijicJ~`NKp7)c3?dEwuIoNKV_mhL|=6OFk*zEE`o=daK3xO`p +E-xgyG`qYI>C)`-LZ(ZzgA1W9%?>W4x-?~_B*eP3n;#eo4%(fCV3%fh7Lr|>-C2lsX-ZH@$aZP=Ya!g +Fk8B!&X0L8=&<-x-yEHqv5b)CM;6lPnvx5r}FU<}vWV|#xxDfKv?9M{UOS8`kF)z(NE9AU1`>YW3((I +~2(o3^r3Q;f3jwxilG&`mc_R{Q_LfT8SV+wIE&5kMLy)-+f5ctxj=Vz@#vmdq!4%#t=%$H`z6hdE`T} +()QX?7MN_NCcbgxr^AXAy#5nten_erfg*A^N4+BZTaiW{(iUUz#03NPlT|1R?&V*%5^Nmu7Dd0$`fGJ +xG9Q_VyqGrrFzr447tb4?49$ePq+fsaZYfj69j99&|>YOj8d!BTuHO2c3~ +8)6|2`$dhU6IA`R^Gks>7R+C)3p7&B&8!>hNad$+YT<0tc(>2 +OO-f7I3h72Ejq~YBTa=ntHVvc`{AC+KfDzrp|0eo=j6`HX~1_sWY3AC)28{1strS$B~gI)6{j%$dloyUwknYQ^Ff`gl{7&y3jRf2(jr$dhT|ogW+wJ^31WGA+FGgM;Cn9~=zt{NP}?e!#(SwSa@+Y5 +@ns)dCKNs|6ekR{}T~Ud!NMcrAm2;k677hSxGUsGepP}|l$@CE)d +B%v3Trg5KvS1`Ma%xt0G9yo>sXLjGC)3oO%*c~z>P}|l$uxB*GxB7bl3_CPWSaVw8F?~I{mP6ynWlba +MxIPlzcM3Frm0_L$Bn);O)c`{A?%8Wdjrev6mJej8MWJaD$Q+F~WPo}9 +mnUN>c)Sb-8lWFQsX5`5%$-L(o1?ygL5aCc>bgS+bx9Nb-p;Nb460|$3k6g +arMe!#)q^#cy>t_^T-cWr=!>eXiC$u#w9GxB7bdbJsOGHv%R0S@loCBVVmy979>&TK}WOjC+TMxIPNT +nXUd;duuK56=!bczAZe!Naoy4j!HhaPaUh0S+F%^5Ed%^$rdmUhm+bvNbaDWZL2R0tb&(dkZ*tcuxQa +56>w$cz901!NWTMICyxL!NJ2<9vnQp1Av2v*E=|Pc)f#zhu1qeczAV#gLlstIC%GbfrEFiQ*iL^bqWs +Ny(+=MyH_PRc=x=6gLlt6IC%HGgM)XkN^tP*N&pA%t^{yU-Q`Pjx+LPcJ+=k@?>`Pjx+LPc6Ev~@?`qRp^@bxBTr^m?> +Hk*W>=>;BTr^mUpOOAW>;T0BTr^mUpOOAW>;T0BTr^mUpOOAW>;T0BTr^mUpOOAW>;T0BTr^mUpOOAX +1{t)Gc@};XJ|hBJJ2jvB2Q*lKQ|*!W>-HqBTr^mc0@*=%&rb^MxM;B4sS-D%&rb^MxM;Ber`sd%&vZJ +MxM;Ber`sd%&vZJMxM;Ber`sd%&vZJMxM;Ber`sd%&vZJMxM;Ber`sd%${7CCkOZB%ABCtl{rDPD|3Q +o*Wm=suEQCcU57I?i>Jtw*^?`ChGtjh49%{?8Jb;(Gc>#EW@vW(%+TyTVTNY!2?d(HClqMjv<3?_dzU +EC>|LTjvv-LC&92@8&7rC*(A<1vXl~M6pjm%qBTr^m4>}`HW>33+2b#rh=F8cF +ix)?3zD0xM$b=$-zFNBcB}Hvupn3;GSLcCkOZJ3O_lxXIJ>i!9BaePY&+c6@GGX&#vc_gL`&8uh8u3U +7^|4TcO#NS)ti`W`$w6O9$?WR&XXMHBk?{>V@?>^(|1^(|1)+!DnOphF7H7`p3E-qK}Md;e)GBj2X9^%;NZ>c0vwdDAR|v^m#-irPiDV)?F@6aWAK2ml;P_Ew2)oUZ$n`ry%?=c^tA4KJ9EkQyP!@gK0XT-gwskLa)>|8%>t)BxwgXA!KK17lQV^Xh +Ok?zO+}f|WI31aKo28437!9CEbyhtWl@JvrE|0sRC~6iJ9&6NUDB8xl+rzGw&-SFt(D9G9?OiiV-k*b +A?CJcr-ngDdK5xK1NyzYx5N_paP|{hhX}oix~J2Qj2$wTTEYkYnC313+Ynuo!Zhp&8C|sC+-Xiy?`gO +29zQ^cMxU`pv}URt7nSOO0k!mXw0M{;7Zgt)>1&)M@pSQci&{91EarD$lI2cG5Th$Aw67r?1d~D1e*h +xxjmD$JV^BXFEvADsrQs~0nC5Y^81Wli19iIR5 +@+M6JyL7OKcPgT#;SAALZ}$BHayImqWI@{a4n13m-NE+#HeIls=i9;3XHkdVbx|brH-!> +Y*~mcBp5Z52ceB*qb-q?#*@g?M;_xnIiJqM}z6z|9nK6jYeTjNl~^H_GKZ$vSEPX7JZ&Q#J#&85;5M1 ++To2j;&3nY#(At;DYC89d_wm3nKWc3hs+UDuESxD{mz;-tUlIHD&=Lb_S{D{Wk26%&lu;*pG2+F$!ap +Y8;l!`My7;whwuT`y3B^>0rGrt42MHJT6k@VN@lP^%ri)V0qkAdZc%AnN0VR$D +(y`~~}?(JK)Q{SF^KzHaufhg-ae9>IO|ANUBZzJBTL=5kT7W`MiI*`!X9C0kira!GRKTrL$0B0&jjiqHX|745Iz0YEgK=^oI^ +Wqo^ZE`$aLGd=zC^wSMUfuB7K|4~03y*fEnZ%z)6PA`tsPjAi+|D;CdKHGVwE^jk+R^~VPDqX6>VxFn +IQg4gD6f0Y@;Y}bUuK`PWpSUCs#vHxAk)o1&FX5F-e({-FIJ +WNTHY6B`^5=X?~1Y;pOsl!XLEJ^*h-XJrDY@Q{?To9TdUo}J*B_(7SeCyrsRhGN!&EB80vT`HJ?CXBFjl_v^vEEJVMt>L0`)1* +uQ~FUYT{Oym<$llwgQatouUfIU&iC%b33P@&I9RFcOs%VIv0hG`1UFaxa&r0l?ER%WIQ^i0IXFK*IKB +Mvy_@Si?XJ4`b&~SCdk4X}a#oe;s(y5eI{|Nx&JSO^6$d|^yg9l2&}e^ra(Q}maiNaS&eef>cW{1ra` +^tu!MS?({`}q9#nHY}7g^T3xN&C4$>idN6M0uSPtLPC&6ibY`Ue-@$|<;^Ss^=sGDwd?ya{v;B!mw40Q!J}q4cF5L3teBneNFN>m_sGo|e +Zl=CHaBCx@b@W`@NGtXJ!i|jmQAejQpGYdtc6JtJai{9X``nr3qSmC&8;6XEx?JBc9m3wPoVGhXe%x` +Jm+4KqN|kv&yG=_~W&g3xRx@XFdfhB&>|U+!uALX1O<6j-RkO@Nsm$(6m&q%a(RtlwV7u +AaV{uo%aM4)o{Q2HY8|l-TOZV*8TK%AQcU-2c-Dsk^!}KTr1&991GQTKa?@c=Moz-zW(tTS0JnoNfn( +YT|J3?RS-1ajy%y;KW{}n%zemmPwF#T8EpIHAy_m^UEDTW|+0_h+FNo?Gk|MkN0wZr{;4g=YzZj1F?_{xuYF +%3oJ_w!>~=!J<){1cd0zia +*Am=xWLKEc!9bPXIz_vh`0rI|;B7G9fZ-ZGabKFIfgCWq4d_M$YoOFxKYR#a)Gf%6@^&l+O(u%JJsKjBb|EuI4Y1O|h*JVqE!J|~xAEZAg%4Z4?MgU)d#Y0gMK2m|Z +ufM~J)WOA1k03*9B-0yetAPht+o +M6Cff4O*A){{v0B}Mx__W-&z77$<5>pIm-})m6Bm#Eq*EKX5Q7VJZN+H%1@-}B^_#Lpum%(udP?W=8lB(Uh?985!+9D$|Uj}#ge`zB}*h%JHfos23m*0K;UTDw_3Qumn?tiMsvK* +S+77}#=L5=?0D`YV_?)U#nAa!9S~ +g@Da=Hs7)F!v43a|(4lzQwo$z9cYGY(c`LX+bJg~?HlTky8NeL{MS!5#^#FOnQ&lVa#Qc~=gU0T%r;A +8@)_Axegj=@S`6=r1wcVUt+W$DKg-OrImjd>}2N&J{Y{;+@nNfIC#YsDe)%t(($v_P+g$YLU3qT&6Qq +(19!VSUr$$!OQdT*so%9v#SjBl5k6jjQz~v#en)t_&YbKr31T1|(z#rkkF4KjTY!tTG?svtk|kiER)M +#=lAei4QR@vP5Ec%|fsJEl8g&Kc7l`9VnW1cAFLG#I +P-hD2eJo$Hfl2Cl*V-W0Gx74-R2Nn68S`-X1i$2;lR@O~O0i@{8#$=@Jp`M@Az>SEI6F6u1#0+8%!RV +7@akQ!KsW=xW+?TeiOj#H?k8eN1ino|<_F7%%zD}}76$gTbgZ|MY)%m9X3F-9Ghj6 +Cf@oXBFQea{&Ggp~kdewxp6#0nQlhL@jYzHBnJP}{<|7uKi!Qmge>ZJ9GR&U9mj$dvh=fR6+!!ZSm4U +;8?<>KXjB{zB{%SfG)tSlfeK8VNOm=|GaD&QXu67|?~%SoD#*7(rRcYtoGo8YYGD=dgU#&?YgTAPoi~ +Vf{-;HOyDid|6N=^UM5VU&nx|Aq~e4BOLoT#C%YuhPZDBe-JzogwFsBSf(&|NB(jHo=56qd4X+Y1dzb +UY9C`P2N^sp0Dx{B@FX6&>e@cS1Oe161V1u!5Eb5Gbi~YEedXo8rn~t +?KPeE^xc)tehg9nBq(2|WrA6s7|nA7-iXSak5u^PO`SekBjA~6B|7VISPu8?-U<NFm8PvShg}e;9WL`Z-{R@f{%QW#N_|Ug$%%iSU;NBylK#!^p62}nENCXNkjK~Qin +$U5@~KhbK}DjH^orKA}%pV8~gwGFa^+Fj9{=~!!m@(G%$zd1#*#v$r=ELh-DE(QJ5Ez*n;IXD|~1^KJ +r3#V)-6K0y^LL?!w?AR9F&WM~yrm2^hq_5SF|K0D!j=BxJ1VMf35J2JZ!rFZKYz691I;{`VgNG8y?X2 +Vy`&lxffau(bd@GPZ?%EqEa4Ni1+i#5w}WtWmTfIT0Yr3ayy=5sW9}TA>`4XhA=tnXx~gl2O+BFCJEm +jG@JhFOg4sud3D*Fd7X0lnS{khQde +u#u_0FO&w~Li1P$|t6@3PJL6ksT@2GmG9i%WjQx6p^*>nw#cEEUSoaMCH}>_X!9;irZv-O0s2K<};w1 +6UW53*Bq_B<*J;T=MlNzKd8Xm!pTDS<^lK3j=K8X@BfsjCYVyrbhal*Ei)-ui6ZzfRmhoT8>4@+45On +g6Mz7xD--!lXo`J)^IX0qSDpsyHfq;UiLM&LIh3dC&;yTrcE8|RGl{D~j^*!Qy$u3+@YsEoqLT!)#f0 +Yb9GXg_eq7f5MEIwM0O*C98?lmCBh(U%`|YXM-QHSL3+h>pO+nqkI8lnBU~2I57^uqKBNDyh~FE+HWX +l`x|tQTU#C1Y0%iZCE=OgOU-z61x#>9iQI$L_XBXA*fjF>%cXCg85o(5p=@XOdC?q`0m2^BGEl17Iq{ +ek=RHc@{Mh>`XfgfKxqWfvQWj43|WUj;gI#hiV?NoH!1+}7rRKr9Pwtg<--bSA;X|XO=4={!;Dc +>X;#X&EFj#%TJ`jr??iGQ1Bk&u0=0I85fPar(XSv$h*_kcVs!~6v_vtFDvcvABmIteJ`B>%Ei`G}C@8 +3?(j;DtQmq)M8&OfLh)W5XjFV9Xdr++#<`^Aks*X_{J>Dl{NuczlnZ>E>8&yOx%pS^kMuIeaiFTXwb_ +t3S?#go&Mw{EMK2bTxljCiNLKF=0vI?Y#kJ)Q1W*>bU`p8u#$i&fUC+5BmS_osDHr^{)XE#2aXU2~(; +D!sd3W-r>;iGIvi^_83V@rq4cW!c}G_-tL4&1;~q*Na8-HrA?nLv8zh)?r$)XkLQr{F=_3#uJeCkf|m(&8uubt%?VA>; +AiI-Ywj$tD4~^X3ajU<^`?YCV#`|Haaw~t?ovgp7&+;IWN{#t1c*dvglsLyI7_-jgc<2jdqviI-4}_0 +nTR43wPUB9ybU{S92GM)oi(L->Q6@Rv_y-%T~_HdA1E(1fzvBuSLV}(FAU|GB +nwR2E+u;`tA_Bm3@f&s-%K61U!IDY!Fo@Ke!dXY0|yi019rRo+}l(MVd#2|8!L;?nYz!)MNv9W&a(XhK +MtW=@{Z;`+~$r{a`i`mq=&XP=7 +jZsIgo)3#h)^05p&7aJrvE10Hw%aZEhi1iI>+#*N$Zm#|F(@$<5a$XVoJa{j%>A(&5hXml)3Doypc>K +}oq1hm>8C9k4;AO*s0{vM?vFwDTz%~R3W~W3J7g?$?cW3EI;NXut73g~JDsO>Iu!BiqF4@I>Iiepy`c +sQws1Ii<}LDhle)tWi)E44FFX$JUWxwa^@biF9KkkhmM&-O<`w*E5aS57Ru4t_sp{CLeN#Oq?>H8M9b +2eA^k5xCJeQ%3uJ+U)_+CN3QRnHqrlWwa>`h1OUF)&-9r8`@H)%@Vl?c!C=C+L5e`Xz?%qDl8dA>&f0J%-nhT~wu6{q4W@(vJ_GBz-*ihwr|#<{O$U3k4{qu^-!bxM}D+=4uLEjsI +W*I?4&4BD&`iUHh9QFB=}`~TgJ6mrA3Y@nvZa$PL?4f?wEt6~?A#O+mOGD0EZe7J`9MDv%YOw>O9KQH00008031s8R +&z>e++Q640CJcC04V?f0B~t=FJE?LZe(wAFJow7a%5$6FKl6MXJU11XJK+_VQy`2WMynFaCz-r>6#ox +cKxrXC<}W48<@(hrK@Diwj>w^TgzAi<3)D6q%K4at*&-=2_(z-6CjphFgwFK4C~CmVD^2R@9QVXnn%! ++I3p@f#;vFfJ8x%1Wp;Jy59lZ=DsE)t$y4Xvh`eKCc(%4aSifQU?*7`!aN~y4clI_``lA~*Pl^BA?2p +zrhNt=)wZYl-;l`+TVz72*wSVx!sDCK_=M(+Wk>N^zb(>Z}ok(qx~~I*>2Cl+YaA+;P@>EjxHZQaP;QoBgbyM>2Pg-?c$zV{HGmvd9hZzs&;k%Y6$=gt +~1eQYhSHCK~J9Vt>}Mf1j#={PZBuP@KXfV-9%55-WwWzhO}PN@Ux_KQ^U_`xJCN@1!+w>d7i*^4ZlD- +>1y~zG7>Vtmq;gV4ZloUcQpJ8fl2FMl1|7BzDin?0se}>WPq<}I3!>BI%!R&>J1Vl`N}uRNJuAd5tw} +C+oTgRC%+~z8TW4pOh)os0+Xxr4q1d`fbSBR^!+~RgiP!Q8YX>wNYGBQzN3#c+|}sEWLoPQ{)F@qX!u +hy+Zz3h^g-ayNgpi@e?eO7L4Qfmwr>0t=_Aze*90b`{)WI!4S!1}RiodLi>}e{Nf#uR-)nd&LC2Rgsw +aAUAGuf>y`OZf*Vy<061ToA~Ty-e~QEywP_y@H(-Oj0sA0t^t~A54S1Tofg0ou*dTAf)8q}*AaB41c>~Tw-hd7A25gWwV1v8?Pm?#$Y4QeYkT>9 +I@&@@#lQ+<5@&=lXya5~J4LBQl12)JTutDB{eULZcT;wfS_u>rc+NnFRQ8(`EY}6g5Q#V>0)Q#2#bpt +l28=V-`jn>(y8*n!2M&H?}8?CcZH(DFi4cMS=zy@^#HmDn4X;3#>XQOVw26Y2As2i|B-T2B})Q!HUsT +-(4-T207>IQ01H(-Oh0q4=(=pz$#qp?BVfDP(yq*FIqPg6GOUN6qD>o!z3E~Y`Q1*zEUFt_WK-Qd%fyN5ZlqH;Mq*GmV1v35Wj^XoEE7tj?x@? +RVPa9;Ji`Q?%#peQ=Q2!qsvDi;qHc@ZZJ}-_h6(!4M%@^9ChAUiyFF4jOLe1gK3!mes)C{i|TffrbOdRs@p={Xq+ici7%XAd7vdtX;IzjY07l!wxlWZ8zx|{Y=#No;>!a +8XG>H1RNXD@brY&{N8PyeKD=%Vb@RM#Cx!`H&(SaeoU5|iLfvS+7?s@?!^B^j5?@$&ni6o1G$qUH1|# +KA-8`?m0M%`wZk}O+zVoSWZI}>FD=JOtM0J

*lF$UYe5Ub@L1p3w2vmcL8Zimg;7uDP5>;7u1a}^i +g5L@w$O#GE8Mo$Jn`f8+_CeiF3=`ae*}QHZbr)cmq*s`r>rB)Qp3hd-o +vylpW}|L=qlf*JWj0JK6((S>T!snY((b2pLfwL%sW2&zVFEal*UhR +hLC<-p+hUkl(v%*(ZU=SUtO^rsSJ_!xaxALbLfuX(OxzeI?S#4s?`-1$Dks#9PUcwGT>y1kR5!1}#EI +(m>2*6%-8||}SKX{Mr3>mV&2GvasJcsE9LP&k`Ya9<40CaYV2Y~i_Gy?DK;1mU#OFqMC#u_GnC#Frr9 +Z>O1$8^A>t=0)cc8i*q$x{J-Q}t4ZfMm_m}F<1VZuvO@~E4arYt>m^Xj?_NK<-L-7csbJ?|{)cA&ah) +Xh=dV6WmUOn8O~;A~#Eg}MRfGfec!frK(jwk5|x-4@l&qi(>(SD2J%OAe2^aT^z3VUkYWKs}rsh^~u1 +v75IgC*A7?I;WEZ;fwp&k^?x`87635-jf4$aUkJ_Dot4)!^D!N4r%VdB&4worEgUbmCtKqrQYMR +i*Y6Q5~H7u1c$d3IB#8z%Gjx-EtY>s1RZ>IS_1vp5M_3=~Wwk|47iIrK}^ +SaBErgUPMIH7Kf>h?+9EU%kI-9U@3FmYm-0G?NxGTkuQ6%{5t)$Jrr+0toBa{n&kMtEMDvH-6eU+Lov +lhSX5w@^1PO_}a>Thf&NR5x!+PI{WsLfyPHrG>iF)0FAOftI>%bdjYvFg;Ca@wx%e@l^{vubXF>Skjb +$^HrE=!-U+ZC3{N_U>_Ud%d;g1aPhk-G4A3G6BpFY+LE(SUN@_*J6(0Nw&ZXO6N~ERQFnQ~Zk}P{hPr +i{lH8F+9s0^s-A<^Rr@8?b|AI3Mbz^|l8{sE^ir7tA8tP`L?s8D~PBlylpzfAF!-U*=B|ERXJgVD?*N +wij876>pscs8(SE}2`TXJ~R%`!}Q)D1X?>c;4*Ltks#F-&}>DOMp?Rc4C-Vs2gzgs +s+_ac?^^JznhY$y1f}D4ye2Is+(t+pc5~uTUVHno5{QCUNGuL>x#M+ykn_*{;Jyrb@M7r(hU=!#l9uS +Cv{_Zi$dM{EjjeASwQNx7$#1wKy*-@0YQaf`i6u> +m*13uU(Mc}VZ8^ilXK|pVuG>-^XsIyaQMb?HK)@cT8|d7q8))ILT3{I_>8cxT=SAIsOH18=i!TmrYp< +K!2i}jLvZ!ueg$a+kEvnlIbvyC8E!1sM-Ig?^g}QgM>b6ifW-3#K3G4VN3w5K9OleA<>h@=tSg4z0m; +lXG*Uh7Dbh0a{yQL3(B@4bN)$N43d4`Ek>UN^Kd4`FFx_Mi2D(cR)C5NZFS=0?U7j>s^giqg+W1;T!l +LL1qb)yp>)a|o4Q2QlhW!sN137DcASH84XH;=gi`>?v5FgM^zbWi@+H&dbvB-2}k0UOK>I8%WM;7o57 +wj5ys*kEqJ*^V#)Jk8udGi52$k1$E!2M;vU5hj+?rvMww4cK6Azy@;zHkcc*!Q6m-FgN7tb+qB)NK*ntTSINwSnoPn|Jz@MRc=HpGv20%Mm8&ehG)VK~uR74n#SMj~B +vQsGE2C6b9&ny7gX4vRJnvx_Nd<#oT3PZd|m5X6|&m1YKlvx>=&z!rUy;ZDDSn=yt%|>2`?&(akFjv@ +kc|Y@!>j4dwCBD&M5o9CC +XtZs{60@%YVa#%&(tojmfMcqzz!Sj-oJimnZo&~^O{E}8e-DK@len~~$yOX*-mIivPFIgPbU4UQ0%Th +Wy@zu#Ia>9hU$s%!AU&3Q<%XXadFt=qpj!)*cY=Za6+`FQ_q$=tzBXgJDFLCm6GdF&T-bqQ8L*PvRuejF!$iG{i?s@utXDVMq~_lZv`4)jcQbW*t2>|171 +Yz{H}u9c;)+_qx-moAow$9(6l0Om^yCN^NzMOIFpFl%Bb9`&G=XI&rff$6|H6$WmI)d-W)~EmpU;t(5 +*mwRM#_r1E9%CymgvktzzdtBacFX^`AD0ycFI(f$eZzHAU_!5ii_DbE_Fd^|*s=K1@-AUaR)s3 +Drm6!02FL7a*SoXj>IKIR|c}cp}%_}eQN!@7eg +H7j@=ogJdELC@UO~oQj(ZIf>Lw9Y)Ll{c?xb#u>ULq6Skja(3=@mj?N4=c3=Sh@x^Y^-W)XmuhZ&BTxs_t~^cA~m@)a}zSv8ZmZ)Sc{tr(>+ByQ1#h +N!^w-rG>g#s@o%VTfAISdvsNz5 +jy6ECS2Rm|H6bCw~>$VgJT6W}E4ln_|W}|LPUAGg}-O_1F@~sthSJb^Q)D1L~>IRxeb%S>EsBWP1Q{4 +`V13^7G3==dit?K4cw@<^Qsi~WEUr~2O-3vqA7S)};58h&!uy#{gsGD`*t3`G5&YohWDbo!Td|{3QUs +-kCJk`xRd&;7^c@-v33==2CfkARsH|?i7@wKAvg`sYX>UOaao@JO=Doj|^J$J*zLfzh8ki(;H%i&WN> +gE*(T6R6To>46QI-74b-_|0@R0L0=Rg?#6sPG^SvM^F-($w +k#NP`6?a$Mt){i&?uxr711t@9-@9keRq+hOQ*hhbO8<_*T7RW>`p(`)@ARm@u~{44*&EeX1}n9-;i%R +>KiC}A25Ys^MsKh-SUXc2_0Nyw=i@LB^)@yJy)*qE9`BFNZLCQ+zQCIo&Yl{s)~;DUw|07Tt~VMC*RE +CF4*Q$3-&6h7q3CQ_yJxtuB0newTf4s2m4At6{LpZ1v=MishbG2-2!E>`>8-De0o~d=wc5XC^V)rTZa +HymxzlNNms`sRZ##VRf#bIvIJ$i3#|Mrt-+FBM*zp^0I$qmfyC|nY{2Fex@I0#5CzBIQqrn6Xr_peNH +m1?W1Z_^E%?a9?Mq3lKJ&m>}=+ZR0G(kJlXlH_UDLPUy$k3V!TvMPC*(n7Zp`BK6gLbi@KqEBuY!8gs +?ojY_Z~-QGI=Vp3a4;QSpr$zJPQz-B!|96`s7Vf|V+_ldhb4yU6G)I^8VQ3hQF8&UQ +YY(%MMJ2awHvmF{yDs9v-qEz$UFrrlR-H=f#C!waW2EqPFVNHVVRalE)S1GJbus>GV62bmNVI6|~Kw( +{t?Y(;M=I0^)K;BfYM +HhIf~hqs4>$n^7HAMmtLV^H(7Nxwm(t0T3Z!f4Ulc~y(!VN<-rLIxqxbf|DJ+hot; +V9*xEsgORuiC@xGNTunsq$$>TwutH3KWa;xO9k3-G+F$6>V9mB7QUE{yR&UD`7M#d@l%Pu~eJj-#s@4 +uFXjg|+fifWp(QEATFWVo|B+R)L9CrN(_0Py09)T%|+-#IY=?t2qQFmQ|oWybe$VgKznFfN?OmHy{Ya +qra=-+XNyzudRpi#v7>Z%z(4^Z4Bpp6Rv#R>y&{0Bg~!Y%@&OYA= +Z(lvH3K)T5O3m|=hoIJmTbr5#Ql{|UA1JxtV|8DZo?m+f5>SPjP9J)uo@E!9I;|<^;nTi;10&kGni18 +NiCYg{JZvz*R){`t~X9>6n7Tgx&9pEC~dJ=D^3tT*H>N?&YaPh>c>v$nIr3$F)cyaGZB~aJ#;{KCrps +wS^Jt!4HUB`?0m#QF0;_YJor7{STc)OT?sSbi9-Y(`}Duf`3w~P6gDj`VX?PC6=QV5cGyO@8e7J?++F +6LhIV`y9YHEd2I>MH&bl(!|G~2N=iHH0;qpXp +t5!;)?)9L>SUb0ON?7n7)?*iij{FuK*P3ZnRXUXdom>6SMRxK#}S;4ADS{k_LwK8bGlI@kOr#6gg*1$ +QuAfMEIgN0mczE@$t6+iiq$Ggp-%9N9QJ1@=yt@6~dRm1VbR2uK>gv +E14TVNg6A4c=6cR-R2uC!OB>`%7;fIFOfB<+Ip!{{HK*gb3!5|)+M}&nCT?gbl}FOSQ$R&f;K}jRK +xI@2>lvVNSS?(VX93Ea2ys0JRET3VO-3N&!f2ldDlQdRb^HQQajiftW5uAMR2krw@k>DEPcXQ`F9Vf7 +!Z3@k03|;qw?H{l*2MT#pj0G}V;a48PrC1Vc0A$l=-DSA12CHiIbYV@n-$%b6UmD*xzJL6{`11I{@k8T> +$B&GEHhy&c*!c1B6Y+FiyJyesw;a9k*zG5l<6k&I<%}z^a>grXTw;|oUOD3etDNx(RypGnsB*^f*sPp +!1y;`ZRXLpT#Q9CkKKXzLCnaW|e7@tBzH$kkkbJ&_)bR65TH=Z2d*u?$9{GgFEqdh=ycPM7$8x-K35J +h+&f_{|v#s9&m7%GL*uw4cf4bZl@cTfg+(y{)LP;!P8gPFJDbZ0aw>|NzI<5-&Ts>B;9V)Es}0`VgYS^12oM7l74q$0V +O$)?vw=tyS_VR0l}J=mbaNg5Ncy4g&-|&Gld}Z#!L!92#%Q)g0#HN6oRz8%@l%A9cBtaTHa;~VPbh3D +FkVGn<)fod7CK&>3Ewd1nGF2DFkVFn<)foc$+B%X?U9{1nGF2DFo?wn<)fod7CK&X?dF|1ZjDjDFo?x +n<)h8dC#N}gaDaIAqWLBlR^**WG00$u^fORg&-uzObQ{SKl_ptg3ur{DFmTGW>N@b&vBAMkb9iX6oQZ +-W(q+n5Hp1!6Z2*YA?(hk5Ty1nQwTC0Z>A6$WV1O^2vT*JDFo?zn<)gLIA&4^Qg4_k1fe#}6oPcU6^0 +aoG`-Cff{+1QY-O0 +0;maO7>PsLtEjW7XSdXfB^s{0001RX>c!Jc4cm4Z*nhVXkl_>WppoWVQyz#gP@;b(H#)*p83&x%DA-V#nu +P*M8`?g8#w4q!{phh!+sv6jK7^kURMj>(OX{cL3aY_+T)&Z_vM3+3al|=se(Wtn~*6&RysZ*8Jgtt+V +RCw*2AdpnujMRC*UT`-5TSbZ_JQx_`%&p?@g+>(l=5cz?}bUs>5(sU56*zrWQR_8#!VKe-1h$9nhs>s +P}6b`DnV*;vP)ss}44{PW$Y-OABZM~>XpId=Prqn+a?4<9*JS*>iZRKou?!e-l*%HGO8e_td(bZ<+mo +_Da~jMY9^uQQq +L_ZzGe*7uui2iYNhi^v|RgsE)zK4tsCTC*$1WqbV@8>m?Qob|!>;|nIUWAG)DSutO+Yc*ECW_!&Vf5VP?to|lexP|73|B3NErusMSQLg` +i(xa}Fqm!K@c#qDikI6r<5IN4)l3doEr%_K(TTOgyOYCa*x?1`uma=uO|GWio=i%^~2b`X*gWV<}mum!(kA23JwG1Ne-ind>l +sW6o+f3a9E*k`zF#^4qFae4o7_~TP`7R7;=)sP^a+2*&GIpa&Z`4BsmO(PUVNu_1rmJi*pHvw1wp`PM +j2nA*VQuUzSsG7=5QW3^~PN$SDp(PH`A=io=jo97Z|$IE+qG9EP0YFys`6A*VPDImKbf%faE<`(Yg5Y +5XuOZY~ZN;D>?BB!{7L#TpUK@68tc_F2N6@>v`wH=sC}GI-3ut^WoZr!wgd9a~R;t$6>(_3miu4sW>e +7VRSM>4x{gU92Wer9)|@#EO1zOivyjM;fK-pGH@8CwFC|qkV{w&=i_kfhZ&8`=dh4V6yk>k4nv-b!-5 +}%Z2d6FMR8d0!`U3x%O$ip3|QvlFm|s54ntm6Ka8Hs;4u15aTw1L^W_qdQ~7ZGoQ`3~d=Bg75`rIwT* +h-cn-AyaFk0v1Ft9U~A4VtBa2S2$nzx2yQ$jv0wFv*{IDK}1wT9o4r9FSe2MW$#j2_EC9?hS0>0L)mrH<^bI&CdNw#kSiTNBZ^Eq81 +xrD%B<8ujIdQ;^RbKtP>oKB0wuz!n{59@Kbyj;Qz4wsor=y6zUzJx-?_D#fCSPlaK`TVeuOO(msGINP +SI9!MyM&DD-TPyQ9T{edcc}|y|OBmyawK%NhhjsE{Ee>br60ou}d`^c=mhqe}&W9Nq7M8<$epr}4Y>p +qs4$jaI>&=(I4o;O13mit@W#q%lGL*AFfT7z2q1B!g3h$qH$R8!$K}m2#1B|bOMJ3KP+%q$R+eR4 +7rT=IA+e_T{~|rd%nbO;ILjU(VlRaUDfjX!-jKsQT?#MVRQVjz+t_7xDY>#){Etb3z@f;&0(l!a(LE$ +81fAB;UYNf#T;grular$va#p5-VYbTVaUsnOPIo8sLRV?!4E^8YMq~$!|Xa;SPl#GCG_6 +o5d5&fVPXF89Nyy)I1G9I?{Nt8C4~1l^f(N8?(>IdK3_t}C9oexuD4deb2`2EIACRGIDc3#AD(=jj$e +@Gb9fH91X|DVIo+bpm(cUWdh1IF`7k<3aTs#0T%yGJ5+&xtCC-=7;xJ0gI>=P-X*kHdQJaR?k1=B*XthqF0ch#$`8aEX4{lw87$@6!qEUKWx|2 +plfN56ACuuuE*&I4t<#IdE9WB?JzaIbTBHFl1xbm(b&I5%VP=&(05{^)!B1$R*0-aQ1V$>|CM%4h!qR +>v6acKdi^$?D@mBc>XZEmShfRf3JB#IjomU%z?v#A2vRhDCE87+0P#~<~@!Qa|waN+4-;*hjnrZt$Y} +b7i`{|z+oYmi1T4~(U|XtA(yfKurOaj;IQzV4t-Cx4*VRR(+TTK==ou6a;jVctrzS$o#DBJRz9r7VW{ +(+KMZ-n=1X|-cS70KVLpfT)`8da!;s5Z2VUT?Ieu8kB?LbVdH(a(is0~K&RZ+Q50{opWc%R)IIK5cVh +$V@*1d$B;&8m~CA$oEH;0XXPKV-+{T@dlepq-;C*%^lm&1B~*bELAFkeF8aG8EsFCW(P!|`)EcD|d>V +aW4ee^}r!))Md^k%HuH9De_^+Q{=;X95!bCVJGG=JF$!5uxkv5_5AQ0IIQP~ +A(z2n$h$F@5IBqnQ2D;c0Xf%v3CQ_4jMgQ5j{|b9?{Ppb{CmxilN^ScYrcfQVYE(h7;=ikkZ0$Iop}B +*JA;?a4_gjf4(H&oFkeELx2E-7B!RmsYhap +enhtbDWxkM2h7UoN6aTt9pkRJv@^KlrhXP8UG^VZloY5lO}@Gj(VA%3`sTtaw{!}{ULXy|fSSYJYq!) +RRwhbI_jC!dXnEru5i!+NiAXnn4^h%GPy_+f#=LM#D!n%A1$IG122j^(iBu&EptUgMZOhanr +vVZjeWURFOW*GudLKn9(GENDBJk|USPokbTMkEku+^LMy~Tmn^X4!b=i_kpN0AE +PaJC-?=;qB~V`2#>ev5-2f?dsFn@d;@Px_E^3BeCTHZ&fFJWVcP1c#yKdy7MuwkB}6kX%CRZMxVFv$3 +|bB`k(5hNC{pjU|eRhYNX1ht@NCN~eY4B4!VxXH)HPEw;mKRCl)>wj4H@!*;fWojv?pI9%qp=>!hzy~ +UCJZMymd!)*Ah9kv)Y6~mCHTH`0%4wv{cT_G6Ovco0LmdKtwy!>DpBTS39Ti5CLcuNLt-&(F>Em$^-=Bzk!J6PwO--Sa>F`v)`Sm) +W``2l9|$ee4Htr8y}1&TRDu_8QwhsqQ#mZ85|HhOnk#b(hoR1QX$i<>gu{?ac%`{H!7!`R+F^@fQ!%W +!!q4odtQl^HwZ2QIh2hestj#_gF4GPR3&2k>%<7Rb>{<+443oTDQi&qma3NFHvfZ$NVc`vqSz|cc4GZ +CLc^EeS4UU+@Y|pJ7wj4HDu2$YJzh>Iy&gIG11>wH&q_HkHGAxda@Y{SZf`?Aa38?{H*u80r)p20~}}Ar2vzK^)z2&gw@Gj)AkV^=DxWtu*wdPBpk9n^xv54 +~}1V5}dU!o8W$FFhRupG7=-qjoy{IIYBJX+`S!+N;{wm6L+E-{zL_QTn^gdraY73NDoo@*}g(8|i*@c +4#@@}7+~|GwUazg9VSp*!fF8~TH-%J4#WSXt|>RW|y=ihsGcHLUbDD#Jmyx6#`;Um5zBhta&nHm{q@Q%o0m4u4KH*gd!oBDmJ$2;BU7bVUyZuz>?vtI +9rw$)ERavcUM}wg5?VdLtm@}8F)$!0cN!1yvZc=r}YAvbO#%evO*2k)sRK2m?F%`Xo#J5Ql>F>lLybCL3 +WchIyA~|vPOr7*-ailhsN1WR_V|{yU8k@Hp!_fyZa^MJ+V7FpV%qPM){eD3`}T~liz;h(3tcM?({3e=*YO9W@jCvHG+xJT +r13iTlg8^fKpL;(JETQ*(5-_`)_R+1bf$V$rqOBYIZUH-)N`2z3e=cJXQx+Z8l9TaTXWFpy!0AOqm$B +WGL6nij~|bNq_N|%?~jN*{CNB^Y5aJslE#n6pOD6n$DfkMkH?>p7S_?C-MgJ6H6^l2H>5jAQ;&YCM4L +mTg=Mr!I!sbnMY}bYju0x9GpN!X^}D30AEr)YtQ{pyJ;klko_voqbuV`4-`+`@`Ups?IoAH1H1$zdhq +Sv$3#({TcgIKyi)hi_9Vbbd0EdR;1ZiOrExLG564}Edn(eXleUhRYsJFi$EvkY3=_%5}8d|i0(LQ2pPK#!sfdC5@jqw&!)d4 +jh{__Pa40vJEZZe`yWUP%V^TUAs)BGGMaSK5sTaEK#+vHUNtPEMQ6ksw6Kgu@`q`7CWWIyXBcjG)v$^ +-?Z|m(VHGVJsSD7;DjGCWJ!tA+xU`Xfgs6@N9mxBksX9noho%alowZ~FSc0k?u8?@mqXkjHDbqG76Zuw1WcL|!hG^!*%05LkVXbc~OK~cwqO1=zDT^>~uuR +v7SheOBxpCR&dYa1dzx&8$rKf4}+$WO0-g~;z7XD&B!1|;t8-#+{vWOjEy(ckdNVDX##w^v_)@8UQ2| +9%Rpn`rzp!}=QdnSLE2zy5Y!hsrO&oj0KJD{$vcsQeP#!8zXG*Wk|EP`UTnc?T+YKRXodI89buTym +4&w08arZTW3$2N$+75Y75zba2CHCW`w2DlhKatItACRENu~nW*j~sEO)6hMK63f+0tF3b#.Z>? +4DegFds3xy1H+oWol3qts6{}(I-+&mFRL7Rygs2V@&b_xFh9!BaFxOQkq=jL62Vz)~hpoR0vBiv6@Sj +kX4{>qq2m-D$BKR4AB-o9d2+DsSqPn0`v(wxt`a?}0LR7yuTP(O2v->p{A58E?2i2hiJw7K=N)OELsRu&+h0Ib<>0`52{HN|Na9xz!+Oy0YiR1ML +&I+%snf{(A45oeUF9_B*dp_F>+CVuHE61q)TcGv$YWqL*CDE7lbZYB +Dt}Q=J#lj*cYz%ue!9wAG$1|>F)XJ^V?O#fh^ihG{scsI-=(2M8XU#7E~>l%(M#P2A6m%@QIDTIeB@Y +VwQ{<*aem#up!@)Uv;u){N!)FyK(+f_k8%5M+d +%tKdI`Ub%y;;f3W5cIv2Z}tN&Mso9wM~QSHL&xZSbaPaGZZ?6}eJ%4Y9ecfGUO-|7u}5BQyd%A!}R2i +h&S6*50+bieO!cDh^VdcDrNKU6XE>UX>ATmJZOyX%`5x@Z02>Ym_w@M!Q@@Obbe!Slfj!Ow#igO`GrgI9uAgV%!BgExXVgSUdWgLi^=gZF~>gAalagO7rbgI@-}3O)%w4SpSb +7JMFj5queZ6?`3h6Z~d$b@YSL!=oRLu8kfUT_62u^yuiZ(G#N|k8Xs+b?eH?ou^NBS}m{LY1(+(#@iJ +eZ`*j=#@kG@@iu8T-nQ}fShDfQuSi|y;Qvze=k+<# +P6r-of!T+X?XnLK^cp76)K)PpeBb}z;X=WQIE&hiNyI1lB_3t8vL0pFK->o6 +W6=iUk!l0+v1kMFr4Ph!3SIT%@UV;hs*C+i%`p782fn*F3jbeFO9KQH00008031s8R$MqzrP>w%0F-| +L044wc0B~t=FJE?LZe(wAFJow7a%5$6FKl6MXJ}<&a%FdIZ)9a`E^vA6o!OV%#(Blx=db8cNhl=}jY9 +8CCW*LMbj-yer7#QQPLl(X9Syt3Jx!7HL^+I-lV@wOwaeNiD>;cI@3AePGd?FTekb1fml%cqF7Tmk6gJnoNfiPBfFR7|Lq2|o$2IqFl~&k?@Xq%# +)Z-N>UMDK_AEFa{q==lc4o2_Y;SBl&}bfQd}FdZnvI?gqCfdZ8>dH41>3iy{})FaPmH(mC+}$EY;biL +x7#>%^4R&47YC;wJ^R?;%()XMPd7FjdmD}DryaH1Ycw8c93CEy1&H3ms@jQEF;}lJRV3)1;bF|^l)orG>5_Qb_?I%YzVQodycFNkOpSH`?wBH%4B5ivkRhzBk(=Oc_dh7$+$zSaMi1pn|EPe}gtbmPx%(;mNRi5)u}+$)%nq=be&pRBGr5z>KVdTK`=_bwvm^N#Yn_hC-J +i3O^;7v5tdn%8?((5Z^PFsA$*bbk+%IVQ`55~{ +2uF>$uF_itdo~n>r{P(HD-0c${I8IH8x7A`Z{aOx_*PrOIqQZ>;u#DCj36@gB|o+OlA}CHj`N=@36kv +P`}Gsv-x_DwN6jf@O>t;YCd3{bklM^Ol8)`N33xQvhWA&3QHS*%+7?g^m}*M8PZM(ckd3HvR0S1XVcK +?u=Z>o+CA2ujcvT4RuX?c5%#3*jWvqE#bO%A6pH5w!bv9O| +p@EpPiqL_)Gh2;h23enGMx3yLe{f{#Y|+`>SEL#OU6A6xA}@U)pDT20lBV*(N@Fd1h;Uws6dL%#KyZ> +^1k<2~(H!*@?a&vlCrmb_dMnyfpOL`ONlnne7|+?EGvb-)DQd%+Ak7gbijppY51ER~@q*vln8vEuY;2 +vl$ufFuSNcdwFKNXONEB^yBhu$87pBXZDJ+5edP)`xsaeW-p&-JD=^CO&{odwqrImuDj1p){QXQwbSI +LLRunjqjZIrSYw$Vox+fWz6H +a>8v?dy;gY(p+$TMb$-^vV&a3%zm#YC+rAEn!>IXYW4v=-(5S4{gr?+MWTlJp-?80&UNE?R9}RF0-=M +`)QzU!)u#CJKt*?K^u(~XhU8K+LiIzM$k6&+AD&##LV7(lvy8Wdk)$T+Vc;z6Oa@LZK$O=Z4Imi73T?>lQDof7fpX +f(YqT?faL}Gl*a3pJumNp!vg!wsP!(uHR-g?z3vH-bXhT(?4OxLUWChxg7s|9DE6|3Vg*H?L+AyKZJc +vYV1=^4mXhU8M+GxEPw9#6DHsma{p(@aZoP{>jYD2r7#3P&p4%#$ftAaM*=bScGY3Q`gR*zV4+M3lPM +ot@jEEA7tpq=lut*joYy3=0A2a(Apc7Dk_Xw!rpAg8@n(AGF@4Yc!}cIiw0G|+~;YG`Ymww2W*W=sAw +PCGx-HgekaSn}6QcGuip%O!s`g|-!^jZTV)N6>mHXrmMRt4D0aBSz5H#3M$~ZW=+`C?0{l5VRpL6_1# +$)7Hcz=)}b%S!gE@BKbw^piL8YfSk4l+K`KYwhgCklxZ6|?fgvJ%xP;fZ6j!FGHoNLoeyozl0P$OYn- ++w(?;vXoVEtqMyp5G5!%W3AMmTyL7OJbLEAyQG-#VWh_nFh6*z4(Xrpfj?JTs@Oq*Sn4%#$fwT?%uKw +A@!__c(#h1DZw@rcptk+q6PG)^0`i${`v%%Sb4t4G-N=b%j!c7U8VVYw2Ol_7+S!RKO0WFD +$uqPk2q)>Iql@#Bm6>h(54A<(AGF@$n{#MU3xrH;Odd8K-v61yz>Q&WE-p9a7t6FY@rW +6;EjVqc_TN3Cfi@bKw<8jAMc+MQ=CmQZcaJOr?N*X$^NYbjnp$&pA25qwkk>zLF`I~Dr&^Fq}Zs +fEz&@M0@F@m;HJfeZNCLXZ@ZL@g92-=!>Bp=%Kh)0seBmAs)%%=JJqVI?l`OJn~M5GN;D$ItwdZTS69 +Wi2dema7M9}2S}E6j$x$l?*G3bP?A%!aHm8}dRc{va#NhMZ+KRE61)v&@F7FdMSMY{&|;AupDWptZtm +$cv>TXss|Ca+cXp6=p-uG8<|^%oc{sPTFUgorWXq{5EB_aLk_fT?e0C9cCjIUxwMZ%GQ_Jnnc@(+4*Z +niextA#S(1|vo%|5N@sS$&T25*PhQN*&tzw`X~Mn;+J4oc4OA``j%c>lSV^=s;Rre@&1h>@X%`3W0=L +%WuN^7hXloYyX`pRoYmL!k$Rdq)0@Z?`tqDg&vWcCavJTobVe6P^SI24FfObWkw%I0jBWN3~9kBsz%_ +{98p{;S+nnYW(O8da1BgrfN__^uQ5t^?rGTItuTS>Gvd)OhDm}qO(`&n5!vPR4T@>(IRvSj!=rN?(7WN`H))W|y)X+vIhJOaSjciNhGq;zQKJ8jJt_Ub%_)I{1Erww@_Xy72Cot(nyGS$ZkkYq@C_yXEW!RP4iWcMcU42ud~tC>|n2WIHEDy8fI7AXjd +%J_R}rwY>phXX~I^uh20tLGL3frdOsthje{%T%ONXnv^6_xG)CLX&Kk(28SRR#9TDjwZT1fwv}wZJW5 +{}LhjdQ6qE5S>k@l*bcA_iLPQU(ujk<$2O<2XYLt25hW@n8Vw9R(bpp#X*%zLd|gykoVF&@E^ZUM*@H+MulUQ4N6LQ?seyKpt4C}+h| +Gs}O;(SjPMg*0piL9Dj;luw@H_sBgLZ-ONX4ODoh5(ezj-8a!sR}QOqtD!a?Gar`XXlg`OLPmc*Km^K +;l7MJhJ}Fv>_kBW!jJrYNV~(TjM3r=7;Z~J)f{uy?I2l;?K-zqm!k+073H{(rlTwnbF3Tqd*(idnKV= +pwTWJ+C?tYHiNe2%_BC_ky=1oW3%UOj@=rM-FteU!IMTl4A>bh6YkZM0ry@rco@M{GH5Q)( +x=;uJKt*y18R$0Y70m0b)k0tKK9bRwr0(r#%n{amDfh=%6e@JTOw;h?M_PV1N1Ej`P6m^N +e!uOBOx*I+E5Gd+7+jEb-cEXge2cQ@k;^9}j@mV(cD~oPL2Vle$vSy$O+sRY+GYuf5w)w7kXTr({mjP31JP*_9qSY0t>DS +gIM`}jxi*{*ugrq!Zewe<$)y6EiHH-g!bgCR5PN$>c)!-}VgV~ +MgIF_S{+r54L@?^X5;LeTlmD!EqY&01^MBPq;-MHV&!S+OUHfh|POt<2HCNJnrN8Q{&8VXGb2;=BklRuyyA&#L}hZD!TxTy165)?968)%INNWYx}G?Pk^PT#(sA41P!vEtx?b@``H==4YQw +}KSAT{XR8!6(0;Z`(I;7{l8lb8R7s=lD^=2H`%0DMfSO8`G~P|6N*eEGTqP}EYmnAt+V7FpV%j0n+Dv +S`kcph%{cuACboE_+!#|9S@Vn>v)7TUdIvAcpYCMEv +|!DNhfQo&onwy+aA;CG;Ir}(K*`onFb0pnMP-4yTvp*HRD};(CEBucbG;eWxLBXIwRYBK8}*c=Hu|65 +PSH1{3&UCJ~m0?^YLe-@%i|3()fJ*1!++oo&H>Ul%%MPKHb5NkrY+YrB=sDii+sb{p-&C28_rF6cAAPFi&QJ(5n7B#)m8KSNS<{C&D +a&yo}!e~+Ydg!Jh6yL1@eAW1qnpAO@%NQ(;Sb>`A}lA;2-)an9BQ2`yQ{Be@x@q2WzPmm@Jgh%cEnzX +2Z9>M-1NzzUU(*A}tX_P%G^l$0@BTbN?vi^=Veir>bY5Xj@bcsZM8vO%l{51L|Y5cMtkj5|Te0kILv|b3@@pWpuK?e+n-uqeX*4{e-Rffcd#>Mm_P-!Y?UNbm3?%j +D4a#UPPO=5(+t+#tn--0U3^E!~x$ug;P515G&o +B&HH^Fo#0+aRSArz{iUQP)EE#hz{-W{<~1+oz=sc^6bZu<*Dh@SnR(KRaS?qX&)!IbO?e5=a;D^bEajGCsIVNZ-)MG5j~gxHcT`|3HmlkA8 +7^N%tUlMbQj|)Ee4Jr@u!d;qe;hv@KZ>a(WHq*^eLpz!0;jX^rg|jtRVLErPFBA1S0hGy?Ck= +;`!2Sbd>L@@}=9rS%DbT_u`4dLH!D9JW)_ThZ;{5s`@olNslJ=8{UB`%YsPZsxNIviz-00>c^1-CN=y +wsIo5Hzrs(TO6p@;!cU>f!r;Ne&!Ec6;6K72LX9h<8p0nzm6f%WyNMsC4J~zSVjH*F}Z$gvRD4P@gcxy8b@-2w691QQ<5T#dCw +k7(p-@pgG3sK&)Fca@Vl;<5zu=gRxw__4NfG7(>%MT&SUqwj!2wHrzB9ZP@zVsOwwvVC3zf&pqY@cTg +OqCM^9$dN+F(*JT@|^3izjzXV!EA8c(K8#lQRfEm8tZmcckBId68d;8$A^SJA>iwmC){*Wo8kAvx5Br>cfxnW_rmwX55f<_k +HQ~>ABUfWpN5}BPmV^x|58vp?Fiva*B0001RX>c!Jc4cm4Z*nhVXkl_> +WppoWVQyz=b#7;2a%o|1ZEs{{Y%Xwl?VWFz9LHJ3-|JHhGGH5Qr>c7X3IxcKBPo$qMzUfEhGnE3OB1b +TmfevoMVKQ8^Ct%a@o@-=;}9GI!6zid$H4*P@M34@mA{2P30>XwR96qTaX+c)o}QWBH@2=ywf$6g|N7 +3Y>YA7L$G01M!@Xy=?+!*g4PO*H +;FUi{qWa?y+N!HG&h3=f?-b$?)zV{F6G-xG=mm*u5YAzdX^nI@+Z_i4%>j!Hs^@?%27jTW3#QK7VR!` +@*TMv)dOhoq6s;<7DH(u}1i#x+fYB8jZ&qkDq!x5=azJ&8y9@R~f6%CTgo`)zePf)`^CS)ia42DBJcq +r-!89Pdh#IY!7Fh9=f*eIY&*}o_G2UY}?N`ZQHi(XPw_Bt$M*}o78sEQIp!XoE|#1?IlM|e%teo8d&w +R({G|)anz1gUvO$me%n>2-(++?=d^8E^>-ar*?vFooOyfV`-v!%Zm&5_+g82qv`r3o$5Cz5fzv@!!^= ++7KTRk>7Pj)am<&&X_u_-*;rE?+=_#WFmj)w06q*$Qi2SPMW>p6rY@g*?Y14 +>Qm8wG+iIe!&QS=d4pI&5Gx2`0YNMvhy?_(WPn&g5K9PR0YNMwh!q5}fFPC-#1ev7dV^R%5DNri1wkw +zhy?_(Kp+kvhy?_(fFKqK#410C1p={zAQlM30)bdT5X<}^76`-wf;fO677)Y&f>=Tj3l4}$&Ie+W=Ri +#I9EeGt12M^SAXXT}B+r4EI++78$#Wnkc@D%R&w-faIS@+>Vv^G!)*O8>k5QEj;le?T9We*Q{*IUfV( +f?o!x2jch_NH)fLJgbu`Ch9*b#F;%y7hF$siUG#1erx$PZ%di2Xq>h$(0;FC*5xdoYi?@+a&3qlxN*7(3!}qY0JY5tj<$rAHH+1!7egh$|mWWPrFj(S%*T=74Q6Am%vYqE +<}^2x7qiF0I +|wV%5p$vmjw7z_I%19^_FpxD9kGBQE^{5RfFQ=vgn}T38L`L@V&9BdA`lmo5i1B{0YQv2Voo$67vP8& +5>4RXaYN8QK}=xAg-f{Qddogr90w^XT(K1;tUW|CtewG28em +lM4LeDV5uq@F$cuut|P`EUbW?Gt2J2%9dY0ZVn#Fp9r5xrVvZx`L=#13#KgJEzRq +-f#6?9D7{r`tqFfM{x_oW5))BALs)^-0;ss>H3jy&G95D~X85wa#G?9@Jv!aQ>zFFs>W-%{x;K{mmcE +tV}ak-8df|xT|hcn_bLG0^@aWs+N5pUdNUFAVsomZSQKukt?4#f7&ItMC?am1=rM_k>E*dN6Hj+n80t +)NvCr8?q5GGgy&!q*YwXo3UcO<2BGB#3#Fb-ZYzLm+nWQI$9AiUM){X5Ie-V*gbW5`kDk5K9PR2|+9o +h$RFuj3yKUFTGzX;B_Otwbq*-j9mE_*EDCbO7{oL=&|mUJFMo5r`SlL{Y0IBnQN+`ie6T#JqLHivV%Ug +4nsC>x|g6Eq&Dl=M`tc05KyYE_JdFgBZW!3|CDE2x3`;BbErng5iksgBUwv;SFNWWF6(9^;adCP70o^ +vzz`nw=VY;XHk$N77)auAV(}J0AlQjF^K&gu|ObRhfRM9nXFrbj2MHMl@S-|h*a`B_8U6f+i@emDyi +Iz*gu*mC?oahVh95F8=W`Wpp*tzjka^o%h3pxbkuz(!qBo +hL1SU?U7$YGpJ2*_apIV>QD1>~@R92Stn0&*Bf#DZanVKRX^96$~Ski!AuFis{E?dmn>zNsCtX}gIa#*SDq9I+|}#410CVKl)2u|gn*(S)}nE-0G7AjZ+eih`Kuh) +K@ph)FKb5!>B}-8-b_aNKqiIgAZ)K@o8PIb27?3`2}LtQZ_#(TF&M!z&sQD{F|Io1q4=X}gIa#)cRt6 +W$Rqo~tV+B9{3Lv2QXV@`G3+5aWorsANK{6^JtoF%QHUhByPn8HSjgZ$3k87fraAeGTHcZ4F}mlkJFc +M9fGg%3L%7lL_pI*EN|4Ac!|2nW+3+ogff97kCX~)3ye&{^2@eksri@;fNU#F`TPgx+BiNinypn6F8d +K2uGX|5oheZ#shIiG{JJjJ>sx?Dc2m1+twV`KMRS7F^9b)V*GwHOeRW5pn)t0>4$~A55&(TXhRr +GO>`EI$khAD=Fr-A|?(y*H_J9)3)ZY{wZ*#uArrB`GW}@5c@7&E7K6;U;@w7VMDxLGj$8$FwYQY1QV> +KYj(RI_fo1k9Jgh1Sgr$yOMR)%o5RaqF(Da#*f${Nyi~UVD<(4bUE{5osLo4uHkfd(ni|BWZEcA4Per +!-@eL*z?{fIAm|%d|JDA|i)YZX+UocVROLg{L4(Ix)L2TOAAl5$>NhW-^`&s%_T`@~0IF5J|wwd68cp +*#Ic*z7#i0xM-Iafl>Vbiweu>L9igEQ+Zx3Br}TZ-R( +{b0W8bxU(S+$_6&6ht_*z}dg4iALY!XXIVhKqs86=jF#0rvFLJ~_zVhKr%cl=S1#1fKNF-R;Si4`QVf ++Us<5=#b&B_gqaB<5rj&=LzsVgX6a2#H0062okQVTlDIv0#wcH=7WU#QxdDB0^&Nj9(&&NuDDy$#Wzo +d5**+FFl)R+MOocp)HcclHrMK62~M~HHph5asD^!I3&*Ri8&;e-XvzF#Dv`Cd*byXaXazE&KOq36H7> +9PC9`}jME9pAhGl&v4kX+RrSOIl2||z3ttlBbVB4Ou|Omih{Pg4i5Z?)XSiE%o?Au-1jV-nY%nC +*#6C9$t3W{?ScTQX~iEB?>42j!z-ygU9bx$YCC9zmv5?3{yfQ#4|>4dK*W~37pn6VQzi +7P;2yW@mgCzr&c_QdN>Vw_H35*IXM=j(|fiHk`m$|P~^iPwU}GFdy}6jZM#Hf=X?J+Xu&_TCIrLK1s# +hKcu_C~q@N2}xYco)ety;W3FNB=L%_C#J;OYkT+%5>xAZB&OD0B(}R@x<_7<*tD%ltbe%MurWw186<| +=uvK&SpP&#DV^8e=irspVnB)>Y@ghhZBssBrh&74hwwp*|PCCKZ5EG{p91=6q35iGyXY71mcE;%hUON +Fv47dL&m&7ajvh#YS6Dskv?j4>yNM*mo*0uDuVRB=*1_?_n8ec46Bm+BU{72OiR*MCMdJ1H#6j|oo!fg&;<)W5k{IX2-syz@j +2$Gggd~PDb}*g5B!=sWz2A1uPhz||G4{m%YbRDTov2PuypVJvd9%amrY5mzTa#G-aNn_mp4flJ4yF^B +#J-*wlGr;ZhHrK-wu#4{*gu_EQ4(i(;uR%v({2;*wy#NS+SVl2Kipk0y-AGM6Ei$9Br)8c*w+)|oVXY +g*Phse#JrrCN8&}~#4Wo|JpDz~drjiFZB1hRvyeACmQP}sPB7l=V5AcQlDMjGcB~hPNnRmO{OYk|kA+ +7Q9?sR#&fw+YXt2|`ezU*dzdjl4A2cR6`;*4baHlaEPa1=J!-GjNATeO?Q7%R#uIyYM%O2I`jg>!^rY!_JUEE@y*Ah#>(0iFSH}B0(LdBBG@fqsq +CbSypB|4U`(Zc9q3Pp3O@C@!?C%%vBvY)jP=+vl!sojrB={Hd+&)8|fI +-hScI_NB{bp1a&Q*?15Qg!U?}Ry;T_-4h}n9+_5Utg5uCVl_ysL98~@YBN?_X|)xr?X=pC)lORN#A-L +Kc4M{Ys^$icY*?#lL<0~xSD8VpoGZ<+HCjhtyD$imbEO%$%DLJMU1hpBNdug&PMQ&x>GGr*V%bZ}W{g +$(n3}87jJBGq(u}s6tI|Bmg1IWqcn5P;n( ++>zDvi}|7%gzLUocwJ(T*Ffm}Op$B)PNg>c$EVeS>_Xipl=t>agW= +GO76Msw?U%4lvKPaDmx;~As5b$rTbQ61*tWG>cdhhEammD&=HW-il~bTo5~wv?k01p-GiS7)o~Xy(#v +wH(b{m(iDgkY+B*R>#rI71?sn#|fi3=i~8TGwgBC$FCdBJs&5H=AMt=Fq(Tle$!~~`S>lP={mY8Yu^s +bXr_IAJFJ3OV@!QJETY*;Ir_Gqq*f_A-w8`-rM-M7tf1LT8TpQmJ-aCv-_eufy_ALTgojT@Z~S{l&*r +pK=DnkjKK)L1N9Ui-lx^?ml@6_xXYYhZ-|VCedpA7#W;^B9yW!DyOpU*7r10pQ=D3V|@9LvBlCkbx9i +p1GKdvyCi7?o?cZV_0!&*K>MfTH +?^p8l>^KhmFdhF{5ygB&?B(>*Pm&oYBo=$IM^ij@Kf0wkdhPLVM6*3`V4Q(?b_ej&{!PI}BH2q;K{bs +mS?#1+gME7d?V-nrV=~WWl>*-HObe|6|-Rsavk9Npxn*(_CrGFz?pB8icTjSBdSSWpQwM{3F9==7+Dy +pkF-&j{h!}93iKTu{|m|bEYYcZ^za`^)*tZb)_wHw9g_72&vtMAH|o!*9YKwMOpSGk9aG}W_ +N*%;GCg|uUt|cQ`^S_xqY9&aD>~+yoPCc}U17&mIQto?y1sRPM;z_n(Xr)ygBt7dIz5uV +NwTi5YwLTP+8~qefV7?>t}u2_WUNPy1uR*jIWWb3+x7S2OVwr(KUnd=ylt(uCPm^L1#) +o_hr|f?hi@U6?V<(o_&kds6bQ7?4L=E$Y+ju_CKWR`g-(HkowgYK=veE=6$m0(f3aDOo1Q&1*y8eo;l +ec)97m}AbWJOUn5nQ*E8iE(j}nnfb5y_j$S8KSJyMu9nvMPO#xjOhugXE?Glu*6zJ0vq7z}>lzjLjQZ-!!ozed$H7t& +Q&Pd-AvJmoP;E2}+EXZc(jL*xuWf~*$MnZ^FKTC@=&5l;S8!x5=;1p1h*Vu0J#}VuCu)DuqP&_eNo_E +?^wmx@j*-KlXYr9$^+}_Yg)Y;`WY8n@h)AW4hEg;%e@?0{OqrpXnj1)Ix1q>_9Q`$^+Oe7rDCSa;;~> +j%^bJz=IU_$k{VP)SIa4%VZ;={SMo%h=$yDS#0;=q9NY#}ErtOhcb!B9@kG@K(u8hp*>`#pvB@SeQXH +>7YAuT%EH%W|)2o25GNYz$^o&xl{v=?cax+uF-QQAPReERpKMuia}X}ls!Le_uwBT}`mn#=x_b|v&Mo +SFMnMYe=4nYT&Q7b1Oln0}j7T^4<=nSGa3eZuJT!t^_&>MM~3>+eX^Y^2NJ9TLN`=#e?Ko$9KZbYkcf +Xlp``?dkVO46C9C)sdmTied%&NO43bOWP89l25-+VpJ7f-Eb}g$wk7@#qaw^{JwdShLqjjf$dcJ?~@o7MMh +@&6B5IsX!vLLSLq9oT7E!cbf)MI_$jIS#-h)tGa9NmYM=|~-;Jsb2;G?!14rR#i>i8$L|qjP^+zN|Mb +W?i8L4jKF#9J`U3dI(cv!C<%L-I4UOMyKg~rLomEq{d?%>(`lYvfUuM8#^$2)`FC!+pzYI`u4Y>)c42 +PcoeG92xUUpaVMcXT|S3*7CGZrthL2>w8N7Vs=I{%*c5jcyd;15IJ9oc0eQo +;X=~t$2PQN~VYx?%|o$0&NA5Pz&elY!T`qAvGvo~fxp1l{2!;{C3oxgHvyW4H`wpE>F8?nx^>nuCc>M +Xm?vb9!c+11XnP4{({U1!;KmK|%A%ChO>^-K4<;Gy0ly_=-qTJMwIb8_%dCo>sm+ey82pT+5RQZLz2N +YhBG6R$ZD&xT@S8edgx_|)CP(;fCuUONx1EB?6@J@kob2$Mu0R}q+c0^-;kS*AQ{lIrI!T4!c7h{E_- +$uua)jS@N+uP4+sT(4;rDE@;Wvq&9DWl{>hL>KHzE9{Cu1q$H^JtU!*3FEgx^$ED*PrMqQO!)~6|-8`*+j^O)~GvIZLz5f?bO9KQH00008 +031s8R_91_uy`i`03-AP04D$d0B~t=FJE?LZe(wAFJow7a%5$6FKl6MXL5COb7^62ZEs{{Y%Xwl?VW3 +v99MOQzt6Ae*hy?B*mPA-cTX!(5?MB&pbL?32#J%)NNVH>qZwspY)es;HMRvvw!zqp1Q=rOS938}b2V +#;weq=|pD@29b#|S1pG(!jb=;@=ro$KTOSz_YUEbRL?0t59WP9sk>(ctAhgP2&Zl2%Te(2m2Yuo3CyA +SQ075{5zxO-`P>+Ep5wSMu^*7k1e%=+eqjp4&rc85pv|NG2v_r%uu;l|R^y{*on)(5tB)_2#R8s`6$9 +%>z5|LAbzO8&o-L#@X)H}FsGL#>m;3v0C9(&@*}oH=&n=4U(YDsY`KoqKw5`9U$=cQVx+Z +IX@*A40Mfpul*8b$TG`X+jZ)@jU`;*_%p8%j;JYx +W~0Cv7GFSd#}z{)r}cl>Ad|-|DD;rm5QP@N-SpPV1XmFlrya&^FeB`K5LoS}?y-a(*29hxFK7{4cRho$d++Kwu~DGIF@ +lku4UXX8Oyk_56ieATgDC9GH(1u)3{-nmT_YrmT^NijT=X888GH%G0aYL?^ar+rJ_A!oemT_b2dd7{dE#pRzV;Q$2VU +9+PI<4XvU2YP2+|dGwy(>#9`bHRN~0E9jGK>7%FiXw*!^nPOHbbr;u@DClf>^j*PoWYx&baYMF@8+&fTxE-hj=GsJY67o11cUPej% +@_SM?kS)WKjZd8C6FhWaYK%%L`K;xDseDw?4uszt}agc8FzE21Zu5}8ylO(jl-ywaaSAn-Hb~7j2pq% +Gj4@SGy`n9tK_a`+lO%^AEpb~7|1S)YDcax|D`<~H0Za?F$?kd4iPr0iEJFbOF0`ze^jN5@q{Q9{4PzmH&MJ2 +-1u9hv$y#S|4RD!L?%D5pfdo1I|)-$^Rr%B@uQk)EuanFo#d!d +pLvr0l3cL2s6z_`6KZW!Q#WZZD*u~tqxj9U$v(*333>3}fTnO%T`1DyT>oY~2^9mWlLRu|wvt=CoJVB +B*#9nfp#BrJNm(*a>pV|A5a>w2?F9Mb`@lbJ0}cGc?YUA#x1vr{D%+{=uSez^yHy-S0+;ZzcV_h8 +!aIp0_V_m&aiJx(Mp%R}3IF7CoA5>DEaU(k)E8})BZU-uXJeF}|C(|D?r55059x$cpfD2~aGoNw$p%R +C2PoX#&BIEWm?ui#C{fryIjL{AlAmjEi?wQZH{TTP;yGpt$f3iLIOXkb){sY7b;IZZG3@WZc+#tXU-vU>ZCGmILuv$O8ksFM +8@4y?SR^|3=NP%eWmGx5Ky{8Mni@Ay3S>9T|6k;$#5ht}aei8+VY5JBV@5YsRgLlluKW$B +cXC+W~{1k|r7V%o}$IRN^phM^{M*<947D>|>mH+||bIXa|hOZ5X#By+$d+$IuJr*N$d+$IHhmka<=c?!d`H324Tz2FjN^t{ +igXm+mJ2ahHUvZN8?^}<92+x%q!z|Aa1XW+hN>Z8Fv8Vo^;0Ts7*SI8*;tjfR2nCTaRVjkjMItg2T8~ZB +n}_qj5*$UU1_M(p2JQ+&H`i$hdJBb&T62t|ehIgu888Imo_KB2tBc#Ksl;dbO*JWjca4>B!xzUQCD_UIzwz! +MC6F!SR<%j(&WXkyjeEh3d-56gtdNob88;4NQivON*@SWXA?_xdO1x^5D&f{{ktvM^oLAqDUEI;PdEX +1w#T`k>%yx0B(xi4X#DqI0+>2t|euGLvkP^RP+|>y;j-vrm;!~OoKuWxf+cBubVcd?M5=S$j!?^uQlR +=EzFX6@kT0JFd`Hgnfqj5*$UKHc@YX)>=++OS5Cr?VengM+fcK}i{GhN(%%WoW{#9`b`HUm~8ZuPxM? +ea$7j=sI%zU{aA#z9KFdI24zWN~@{tLKzd`?eq9b|l<>%O;ylxP$ZpdXbVuH3MoFG8%U@?r7YQ-K4}T +;dU&WoX6ES)yC~ZN}4oozh#p_22A;tCLM^|t2EhV-`1{J^zG=|(YGPbYfDLhUOsb`_#=N8?@;9g9s!7(N9#d!lIA$quZyp(>rqi;vwUU1*`n^MwX!aZ?N5+LCoA8-c&C4LEakhy?Dxg%N`g$63IR%b?AzKYoD1KM`F8Z}Df_lpC +t!mOC9}~9=;Pb7(@^49m~7Iwz4GmZ7rPrLD;jq+?gcUK`K(Mj#sWfK)K8ZAt(m+#DwEY~COe97YiA`I +cQo!rG43FhNynN=Y(4GDWPp#mLk->v=vA5YYbk***84`VW6flc4=CUO>#dpeYvFc`1yra+3qKlnH0}j +AZfxAZVt2nOB_U9WmvIL`B|hJp3}M_szI+p8vAd&(+mUfM_~o0FSG((RM&pjgz2L?j^XeKjl-z3+8w?#&LD2=W2||u*t&^~+rhXU#*LkfW!${lT@NT4cQo$1)3_aT0kP|GP>G{B83L7fH3 +J6tf56*H0}jAZofY65DRd;7EU_)xUuU=^>I5g?n(D?JB+)@K5oAS??H^)k#ReWTdjE4_a2 +Qq8uwxwcaTxsAy!U$Ex>VP+||bI$hZS6z^P{30luN&$hiF&_Y@|T_!)PIk*;DtS4+Ez#$9dPc=nq3Of +Yt088_syj2kiw)qUJSR!;hLl{ipIlf_9t< +MvyCqcU!7`)J%T<6acw_FM8E;xoYxlt^MabH_nx;H=e{NNwkJU{%%`sVO_>)aD +-+iT}`hub@?-6z&|Tj$r$w>GzSTf@uiJG-s*&DQSr+WO}D=7rYo@bWJG`*}D=*S5FU*Deg-eR{b2J|`jn@5_p4>dQ`{dg0`qt(Ha<^N<9op~N;l`HO*;ec6t?l#lFN(*z^s%5T`|>_LLKPpu8e^~TbAOc +6s^e4O4$nSoRmBIF1Dp;g*KG3728MdThX*Xm)byS_uO|+@m#dE?e3S)N}_2GNKKF9FQlf&@t0E5<9L_U^f(@rnj +XhPQq$vjx6~*O>5}qhT^?wfyi>bvO_R52H_DS}1)b#7|H&WBD$KOg#zaD=lHG!g*yO@GY)U21tCeHoiH=7ET#FJqIgJjmnd$;pNvB7XuD;yxKOyG9V28*KL>_=MwgXMXvLbIeq$jkdd_`WDCn1VhqJEEEg{a?T{|!;U%dSDxZ?pe_sJ~7gzud=dK;O`mrxO4q5ifgK9q>mYUiB +UPc6tS39u&+D=pzxY`DK8xeTaEbusdLnM7-$X9)KQ+c+B3||#?DJKKA| +iR|fI1TKy6@nSz6eqL$g~QJiFo06a6(>#D1H{pO9#Y}h*v(|2ml<3c;|O;@Lz!_BC_B{BHsEiaiEPvy +!LVF05%fw-iH|jYb4^u50?ehNW`1JYaxw9y!zqM02+yS_rpMeGZOLg@8e(pE<|ApIL_B0<`KcC!8a1| +{>Q5ibR!W70GuCeBM}(@Ub-L~Nl)VjZ$J}fZPgJIkpm<+%pXG(#(=-`6Ntj)ES{0{wBPzagC-s%T=kz +r6b};q&YKYPc(CCwpoxUW>eNdp6R@T~B_b8TMf){05sn3(6OjwxlK&Q(#=hTzM!A3#e+NXpfq7HfD +G#pnL^rbL&mcrl7+O5C~qOk6}oDO@)lf0qzq{rQQpGiF0zKSjVN!y8bsodwh`qm*a+ngZA7^b6Q%T_j +VSlwgOotD5#>JaKguE6h;koK52X>ftq=D3NJNylj}G?tAd4t**BtEQwh;+MnsAixFOgHE2}da%EEQ=* +nsAh_LZ*j~ujM{u5oN;H@;i`4lnGzUZ$cJPCVVZw3|T~(^0oXlWD#Y`*YbOiMU*LD%WpvzQKlT_w;_u +tQ;t&JnkfN@l%xCxWD#Y`QGOn>h%)6UKL=SvnR1k`Ko(J^9OY*qizquBFC_?pchte{>ySm1ct;-G#!d)AAQ?Egjmtpk2;P$ixA!3DQOX!ctcmC?U@}Y{r@UKhynG$u72L$$+5_7P~XIc;kDdMaUa=;(D2~ngXIHczx3M0iIdI2IOz +f#1x6RAkYqMa#m2W>@)hEpeUkskj1%Me9KoH*b-5q?0@HNw+L0D{vdoD@P3c+-wP +3zgn>c#Gj^2}2;E8NCKo1ctot&Kpq0wZgmp;07KZq7g{_?z{#_oJqetT)Q|td{4 +21dx~n}#@gnEC)Y0I^64jccGlN6r7r%J)!nVtt?l!}?bVBGmk$3i(rfN(^`iLGhq2}HBPZWm9A>fE?v ++dH=hik>FKz9t@2)>JT-_G9`f&T9LBH3_UwO3AM~A~pt7|*w*4I}zhP#49AAZl;#?G+#f7doHJ+XFnx +O@1X?3wJ-*~{50*=Mr->{fO=`)u~P?DN^H*%z`eX8)VLmVGJva`u(%tJ&AGuV>%LzL|Y1`*!x7?7P|P ++4r*VXFte(n7xtxDEo2tlkBJ2&$6FqZ)U&9ewqC$`*rr4?6=ul+3!Ysqi06Xj-DG`AAM}}@zEznH%2! +{&yQXhy*T>h=u@MYMla{5?181F4;?%C$f*yVS-n`5VGufsD!-mTk%DxM7+QqUw}%$3qbxQsKmSQ+Zfc5k^zQed +*J+(4=^0t1L>!ffZ^C46MVv504jUT@CkQufS`_)8!#N(!{ws%fZ^C4=pAJU5{{kR+8j!iXcsQxb>216 +F2snVrbL0!*d8D-`_YH|mqi5?*?+@H@tCBh}6yMy9HpSTHb&+AZ$Z^_v1pgh +qhyd`71gNLL~bgReb2QLnhQ!os>gBnGj7#?8#C!i9-Lv(n~`h-pp9qJT)LMMps<4_5m;My_zCUPR9`v +bFVA}5Fr#flZeLWmC4iWR~_05vFAh)l!gN(E47dUKtRfv^xZ#&bYC9huo36fFp+BR3o2=_Q)Z1wF~|3 +Yd=kYy_%r1G1?oldIisYkIq!iglsYogEk5Z9RBp`N?lF+$bq$F@|7byvw ++eJ!(=5~>ikhxu?Bw%hADG8U`MM{F@c9D`$xm~0rP;M6~36tAJN`mBek&+O(U8E#HZWk#DkK08`g5!3 +PlF+zaq$DtI7byvg+eJ!(;&zdekhoo>Bp_}VDG7($MM{FQB9&Uf(>w_4LkzVmIFG`|a6oB;ZA5qT7w^o5aYzBl{*TvhT>gg%;U&WZ#i}3oWwm +$i5440_>ZatjWGX#0~bXAh(Xb6(>(Ca8m)k@%s;yui)PJA;Z1nU5$n_FQ^2RSb=GBC}5gT}EfxHnLLu^9chz&HpAl`@#G??-LaBv0ViFYG5Zf+ +;sjo1L=3)+p?m?snLMr^?O1?xs^V8I3JMr`251?om@V9y2WMr=U;lo?n*Ui{TEuv^?kqEI59M6>}O6 +cwjyWRqMX+T4+AG>fZ_T%%cBb>te&;;JLpXcku;xkj_N4P}UGmsUB51j6pN7YRgZeF!0K)#PjRh^r=F +qeomd`5Haqs>#>r5x3j>MLJPhkV5$cw*#B~;oJ^v_J?yju-PB3+JTM!aMccM^oMghu-PBZ?Z8HVcxh3 +B$TG@=n#wJF=8WEOZX7my!?|(T>J7&Qw|c{ot#hBKNIYzI*{)a-M7X;IG-> +RFP2dT4OAVBh=49AZUYqs6=#z#g+oYay51WS)epu!UX>@@v^My3J!13H$UEo~VF}lD@r2$Gp?CxYrLf +}zm2Y8uxksK&=K&?5DY)H_m +dp+)K)sdphxfO;!dVqk9tugsor{PH%zw_-eI(>F67v*~-E$C(&S-wboirf=qDX4Ch6b<;QVBcthCp&z +sB8^7$$u5Ts=X4^OO1+(v)my?@~-@KgM?EF?}(QN%@v}m?|Gg>rTzhC4*D@N=0ZI+GJ?;G5SZM1&h;G +S!v_4_8G)LXxE8JEHA{l3YQSoGd+t?kX#ciRpzQx<6bQ^s4d7~00_rkF2RNfZkl3OHoKYYio1zDtQ6R2v(FD$+ +Jl(?V3FlcDW>Yv*U9&5kd;cjrvFk +{KPPq_7*@<2Aq4dNq?+mIcIRiDYmWwunidhwe%u9{Dgv{+;Q<0v8+C0t>X*rW=&CRJlMiY!HnV55X7Yz$gJ3c00D# +!GOM0ENvl2~XI_Qlu%R_Fj>ul-tI;Dl{6!uAk79HoI0LvXN3CFWeSmrlu8dG4=I%yXiA%1QlKeqcKJYLAf-w +HF>$*nKup}`|I1aVf4OQ20(FJkM4qO!+3;QD93{?PHKmP^c$uDe%~JWb5_R>b;bI35p7y5Ue~k7<8os +o2;>gjXA3lBT`0-;$BN8VuBJqgCNsCB4B5|QbBp#7?MB+k=NIW9(LX1Pf<52L{T{#q7m27xAPA}0)HV +n~eFgPkUY9L8RE7;t@Nu|-?sMn~0Bwf8+!;@nuAwao?CvH$mfNBk|DWIeP#TuR_LO>g}8gr%zmuh&L4 +DoD~YIq$6;cQfDc+CdUY%bK?Wa3Pv7u+9Mo;1?_-iY);5T-T!M;9P^b!6= +-HEx0#leVD{)6R5p90Gpav!zz!4#+E%krHI!8SIgeom?ZeU;P{>B?>#8XkI&SEVHD& +~T7^#uc-in-uieZhdIVlFtho9}VEH`Q4J+It$7#d)%H)brv2T#Z$zIb;}5U&tRI$EbK0NJ2+Qgc +pP)l4~`IdTdE}C24z-HKb~-da)UShd}4P2(oFSVf;@matysB5)Vtgw9!H?W5hzn1fx>u+hTvVR#;X3z +`^4J0qgS@qH#XMKd4un_1-p@cFD)HAb82;Er8`)SQ&0*q!tV&bNsI71!f&BP_#NSQgx^Ao@H@iqLYzQ +9_~VyXkOCPE;kk|+$mj^qbtFMXOL(q#pzA&1xxNftZwk-#E_A&sJlA{B^|tU_??c!7!gGBEy51O`>jU +U|XLzo!LDyTua~-*-(HoxYc=(Lw@Lb39XLN_>I-WtJJv`TO{*C_d$1e|X{*4ClT*vu0I>d7w=ig`%&v +l%Cqenc~asG`a@m$CGH@d`g9p~R@6VG*=f1^)4*Kz)hM)6$7`L{a72RQ#mr+BX8{2Q&}xsLO1G>hjt& +cD$vp6mPrKyMh&b^ZlVYhXB!<S}iCm;qU=ztmslX<(ky3$;hN{>FHj$5 +%3T)1)Dy0IONJ&ZsHj$H*3Tz@NDHYg6R#GakiL|6tU>j6Stss(KDzFVIIzNe{mkMlyiq21h=%oUiNKQ +%xHX0Ca7uaY(xZRK`5|mPtCnYF$Kc~o0N&~G$hEnP#6)8%o?^L8HrN&We2e&(uMUGPHq!l@eO>+sD+c +cMwlv1a5QbBV?mQtd*L@cBRn(OQcFSm5}$~3q5Y^D@JbCZezH-yJ+nwwV5e-UYlO>-$rks4?&adDgG# +;;`F+iK=3ncCo#T9(rJOnSpxDWS?_glc+d$w5%9s8hP)h>@6aWAK2ml; +P_EuwXtW6#p004H80RSWb003}la4%nWWo~3|axY_OVRB?;bT4dSZfA68VQFn|WMynFaCz-rYnvR$RsO +DDG01>yu$AeayI=uXc5H(#L6W%&mXUTW4PNaoyCX+Jm +2Bs%GsyScGXPJZ2OC?*EQ93s;l3A_13A=FYWK%ZS0Nr&ThUk+S%IOKYQ!WaDQtwIeT!^{@1~1vbVo`b +F|+W-`(5YpERzGcW!Two_;VHT`>Rq+GuiVcWbnL>eOS6*15(fb`Qpr@hc>oc>eNpo0qOU^X$dOM&seBhWV#+uJN$Zc&u^e=O6w38Hac@fAUed)nxd +mW_aFa_-AH#pTVDF&ASZ$JiDrirAhb~*kw)k8R4(7_8G&!IK$gL*3TQPy%|r7@HZLWW8?V}>u13Fd5g +7gG5pI6?=bvr)=!J|{}nb5d^+Em;n{$V^Q)|%HtYXg)}BxQd#rt*wSS+@Cu{zXteYO2&JWly`D*x(bu +(b?zc$0WIm7>nwdc3%pV_qWu)fZQ-(l^)F~bLJINxN$@3Q9KV#8;X`E54JjCJ!bteXyN{~d<&e*TS(G +H3n#JHxxI{dZYE{Gs?B8zvvmf3SX7H{WN&Z?R~9z`EhX`60V(llAl83}^6<*uCPT|1s;n#qgi7ZaDr^ +Hu@&JxBtWX;g8(U*me2vKVmpv3y18wY}O9{j`h!G`H(SKo2|h^&R}dUAAXu$mpv8@MpTE4c)D!%4?U5*`lS1prE(&EHV=H71LDVR42U1?=ZPP1K>W +Z95O_<P@6p7=5PfcVi(K> +WZ1;z#!Z@uPh}{J;a^$9Mwb2VRW$(S1Ptz{?Rox(SFMctHG^oq+h!e?a_*HXwfB0rBIy0r7*{0^$cA5 +I^vO#EB@JIx_kM<>GYs|L8k}T4R{|+TS)Mub-??;6AAwAjQBYP%N9RwOF +;a%Eyai*tL=mcel781nhO%Y(&CqhUvhfTO-aG8wfF_Wk8a9Y{9306-Io;nlEp7LJzC;NxM?hYE%9reo +>+n(cuA)RctOFhBYwb(C4S4bMEvZ^$8dTC;s<=fogRt!C5s<;L8k|JG2#at5I-KeM8uE&7fSr-rKIAI +Mf|{vIX!?25T(<2Z+;7Ta|7PI&{rRw)~1>%?Xc2g5Su6sg=Ka#~SSv|3 +cA9xyzUm*Tid%H^_{#aHI`YERP5p6{*ekrM^YQ!JM;!mykvl;O->Z}#N>t}_CU#a-<5Eb0ptr+nG4u~ +IkImM6J35XxL#4n}sOT>@Wc0 +!0>srUh}W;Ul}^{hDY3u*jlUefAODt;~T$Fh2W7qog(d;AjdOKJRQU**l+4$JD{R48xm){4K9#IIER6 +7koH|0t^`7V!fwwz-SuB^5vLdEy70Nb#p8eo$LQ6u&_HfU80LOz|_ytF0c_Pfh%8m^JY?7DN2ND^2{8 +$GQNGZtb@D2YvPYU{E0n&%V9+Pyr)|6H%=t ++*NVSZ{O(>I;{aDp{4t3?@xk2=tI@`OQ4_!GXMKrZTd^S)@kctqRTF;&J^q^bSIO#Oil0$ZZS}Z*jv# +(8=V}uFswjR;b1CAlS{}c$Mw^!SrIWh?@yE*QS=Q$6ik#f7mg3J@aZW}(@v1UoRVAv*yy}V{ah0sqqb +%7V20i8}O1qwDsyV$$2b>St0#5I2B}D!VD&7i9KV+MCB-ih|FUZJNW>pY@e9OX +t#bTrRO&@yAm9kq&UhD$=I?4h#B +OO{+&)jz6)-pE2TRlyQucyFyuJsa8)-{EcH(=hPBEZcCc7%v!~-Ez2wrzohsDt4C7&=%yg?OI8p1FIb +j2amj|nAK;RRU$T18zNF&U*6K-pa#te$IJJ6GD}Huzmr+B!te#~RXiql#jo}FfuD%19? +9dE6o0HQF9I*9_|g7EWc4K8++|5Ui~{0)hehK3SbMv`6Y)O!PsIC}wt`>Wl`MYgJ1m0Wmn{BR-(k@Te +oS+@y-$^U@32VU-vS=+KJbEjyDK61Ggh&IJ?qCHej%wx +>+}Gx2=S*D{IQaHB;pqYzvT2t<@jR}zhv<%iC)^}KPc7BUJ)5oaMwxqpXE%Bp$K>S#33s(Hvq#lX*BPI1nPLDRJ2l!$?z!i!3r7!Mk_jVisE0A#jhp)idp;;@e5WDdMQc#u`GV&-m +cc-mxv#DH7Yhpd%I{~FsWxnEPlc20l_a;@!N-#h@U;9b;SnP&-xPo$=ci1W^)2xuf5$!#1Fh+Hm8)-B +Po7~_$9@^BE&CPJxe0~7W)7fe-`!GoPyO;XLC-8AMuv7dL-hP4(_6#G+$m!eSRyB;+L!*G+*$+T}kmP +t)9dlzeN18&TmPZyOPxt>-?71>d~h03m!k37bE`EHQHDjKYQk?r}(AK-Blret>RC7ehY9#6h9v7N+^D +b_+u&lYMtNGDt-*7WI2Ax>d|`qlH%7Ae`3Wi5dV@kciU`pmpxm@Ab#cMZf*6%wR)6UJi1M>@9~t3+EQrzh6AU0gR!i8eGZmc}2;>5 +9&H-Gw709x<^*0cn-k1jDvf_hNj>&yA>wBb%Moe(lEp6&f2`V^X +kSq9qkTn)UlRNkBYr83Uz^PtOYkd+Ul9D-bGx|eg3oV7%I1u8eoG>LZ8oQ5@yDvvBN4yk^u$Wzk8^%Y +BL37xnP;sa(cAqb|X1GlHx}XB-5YONk^GUu|6Z^f~CQWHN*>S3#MeLa4xo`CrAEC$4 +n_K7xkmqh%4D^C1Mt4G+}jYa&xOD6RIPec5Y;*Ui9z)K$A(h|R<_@#5ZpuCmz_)}Xw>?Z8d%=s|Wor*6K-3{1{I$t4B!V*IGT2#~-U;gGBtwte(VHPpoSEsk3?{k6$8w$?B1aKa +%2CmS{^%{5i|&VXN&3k6%grc!n0N_*W&3AM;a?_$9?3iTEYOAB*@So!d>E&MA5Psfk}u{1rRD6{}!_r +1+(R4Ztg&)e|S3GcobALOpC%r9Zw^C-odH_)pmJtw?G7k@j|Do!gaoKT;ZhV!6KhmS6w^|kn`M*P4RTay`Z#Vvk8@B>bh#xD^++OKAz9^iEve@ekGI6Z(Xk;b3 +1q#m|R;wgT~>Ct9$VmNC?{921&Qv8t)?n;V3RvN!_a91M!I0tvNd%If2uO)tEr5-7pQ#!aSIX!Di{IR +k*wTgeGiQle=hmS6YV_5vkG=AWTtRD1VlK8brJ-BYcq#iBtONw84ek+#MqgDJ`;ujRZ(&~w&__Y-qw8 +UR4{xw$oN{c@=@y~dl-S%S~-$MH|ye|oU>G)RFwsvFjzE<#K9@2bhT@w6Kr5^NOX^UUl+ST$t+Amh{O +Xqgil=qh<_?5@EswMbaEQ_Dr-y@PaC8tLq{#8%r#5mXML+cXpqnm1%=2TYdiRJXfI=3qw-x7!)&1=D5 +CE{OcrzdB7yX-!uPwI)K_$A`6n&MxU#Sc7<;+JZ3O2n^K{K_o;#1_A#_<@(K&8Z}QN%7ajf2^b)>D+E +wr$y6iT#dEA^Z(;+KwZ#j4 +aJ9p6e@sVA}GZ?nB!c3!Oex=m|_+)=}aZQOI)K>597AJmt10Fs)|J +6(5UsuJyB;wa*b5@M_CB?s<#IHTSRTFd|`qk +v4agm3mTd?#8OkDOf%A=5EvyZPlvOBYFIZYjd{PXIR*L#@-r-w;!j+Z8LMF>6+b4Y +BGveniXU)I{2JodZtk8a#h=ac`1!mYLHxkWrE^MIJ-`cQ^`QTXTRjr-FX`Z}HmgS>eo67i%Hv1-N|b0 +zeSS;G>Zz@sb+CE>S0kNsR+O2~PrR(2SQQ)A)#H~GzeN0KUy%3(#UCr36YWc8^#CuY_#>rrO2ogUiVd +~ZQxpI6su8n#nBr&iaEt?7k{Aiyjol~%S>U7SU_@ +`HmsQ3pn;%D(5<1;KmsUC^=Qy<`3b>i3N@dHou85VR?4UZpivCUmHuSiypQt_jkn)qwtk3{^f*#WM(x +Y#pP(CWc$S!_Dzx+;Dx@hhz!zzbIV67ego9?9cJ^EBVzl01Gb@k?1fLOK4L_-o>yUNs)^cV-8;`1q@r +#}9nX6hH9#09ORXFA#s#6hH7ftEVRZB`N;d0WLnycpkqtt0xxmW0(~ue(3;L;&e{!=5ED^U)tPFZ1u$ +2+?5o+K>YRQ?ovOs9x0E%TEssq&dJ9UkN2hXTd|UQQae4%D$XhKex&nT+Pz)v`7J4%Q+s|(;C*FP=F~ +OXw7jn@)uYYkRDOm9-531uV%2!RuF8B2-e-EBUAX3b*G~oYzLxiudVg8xcBPyiCGSVd>Crm;v2uC@hd +*%^|ElwTtP*V}swi`dW%2VX=!svM(^J>xJUWX%aW#IaHm9~4zt-r9MEsG??E)`I{Hd$)D{FJsMVXgcn +{%a!zr&m!-V>YVdTTc&@dK~7b```AxZc{Wi67KkmWyze3Vrt@d{nW(ohFKGTqb7deeNFuCpVd+P=x0X!ubw*fn0czrl)kv +LHF{~hGumq0x-;A#-kOZ|4;qs@!%1Unyw%v*ois-G#|M+fc&9PhAC7m%JGUE?(fx`0y;+$ThWq>D;qB +4Ku8t=6_IDg?X6)d>-J84HjmP)y?cAE&8&1Z%J5Ts-cSi@V-?(nZL^y@kmZ8TQK_>oM$CJl1E}uX=1S!%m+$J$){ktli`OS;-jogvWRszveL>$FF;g$MK}ccpOi8jK +^`-V?2(JdCbM(N!2gb_JCo2rFNPO^UKs#iX_ah(N4}VVu2RJ{OWW^7YXxAv(sUiUzd*Z3G<7x(_@%lk +sW?N&UuX8k2Ak<27({hZ+eX1j}4FU`|(>I`qwIzqFq5n0>wV{WYHTY-!D`zNe~>d(7_R^!+39oX6}AUDLlDpZA!FW8i=Kf`{yOW8V+&cRl99@WcB +(kGU}VQ|zM0To{8XcFAKljLe5|*<&`0%n#*?r*{{I@9z^Hb7A=Ye&1s@jDhd(s>e(i)a;svOc(<{0oO +fbqsaWtc+q1vip)>wA9&10G4P}NLyy@g2L88yJj +PempL>iy<(nSkPx)VX%mmW)WAVH^Fmd#J7@m^{CXAl{A-(;YHjK>wbc;X}N6%yQ>TBA&k@#6piIi4&Jc(DdLv0^P&z=kXN+O)LW+^CwBJm{0~jl>3C +4P-tyFAj~c>nuot8kT0-@1o9R3Hw5w}_9}sVjr}cw{8e(}evcM`d({&c+_>K-`QzU8#0Lf^{o`Kt#0d +r`0p#BH^wb)>MYs(x!*%;@!Yvo{0e1+tQPPSwc$aV+Xa+IS?-6cer8R8uKH)Z8T8IW85N^ZGX4f4O?! +ui-1IbRO2J+c7(39tO;8{QP@HthG&!&eSK&K4y+4Ru#J9Us_dImK8P9fx&hXGB$RS5$Ob3n6ig+d>*G +oZ=0TA@GdoaWxig?!eWCf`Yh9FsYq$#=3L$7Bv@@||?ZF$n{jd?z3B*(ho9orGv%G6yvIPDZpaX#<*k +CnZ{#v;j@NlM}63Tr~MkQnY512H|hxZO!6^^0)D}rtx0h-5Gh*MC;qaOU$ggXyNbkpCZ%-+UgPfX@YH +>Z9+dosEe{s@IMf2g6!dXpC#1ZPy#c)MMF29jL_Ulns%7+mOD*Di;qmu+$tJ+IluzVou0wNa_ +HGNw{oTv`l7ij(N8l>u4rzh3{4t&qPf*EX!!*KO=w-9uMujMC(thvYVR8Yew|Pg+5kbkL8x7lcvueKB +-q7kDnS=tCe*~%!BoCYs0pr%Ir|EsHafI@hfot;H}r0Et5>EeA70Q2YcgN!|lzz-GlLD{K{x^-x}T<&9j5N+cLD^8ofLk?QISZZj +HyA+oOr~vNt|H+&&mh|L<^n@6Pb%XtHtINF<|&jP@}?$A}uEV2oZd;>0KsBRh=7FnYp>2cr~>EHE0ty +ztH2-Mq@pd)vIE%^TUge$Bhqyjjg_)Vwpz3(~v|&8yG6=gdpZyu!@;%DkP-tH``}%sa-sP|VxIyeiCl +!Mp?v;~RcAWNw(-(6r%X!@!1a4YwL1HLPhU((s`nKf`o}eGJtYUNIzM7{kzmp#t;Vn}^&y(&hm+Ppf$ +}&2wlTI`e>;hsiuS<}oqPgn0nWYBx*UtYfoi%?dTk(JVc)%*^e2;?(~GP)h>@6aWAK2ml;P_Erp3qb= +AQ004}10RSfe003}la4%nWWo~3|axY_OVRB?;bT4dSZfA6La%*XGXl-v~Wo#~RdF`F)mm9}*#lO#Afl +x^#B@$g#jg<)#Q8y2iymxDN^wj;?=xq9*ua0IHCp)9vt*u8|$*I=!lY{YW{L(1>H+8CYVSHz_dq4eWd8&1NZ{1k3F2S`srEz}+7wf3+h=cV}pUT|KT-eq#3-g4BARc||^V8yZX(tOBYblyzZ^Sa{1xe=g^5ItAtzWB!(NCiyKnf7^Mrb0+UOHRj_n|Im4Leu5 +9)aISQI><8~SZ<(v_Ixo(T@zLwfJLc7VWDfo66WRZ1wjM5{m5X8JjbYU&hGnA|mJAF_ZwyNYhGlgOD+ +Y!Y#IW$iuz(mA2*ZMbVSz9#8pNMz1u?7;h84uHtd3#jiQ%MC469f$%yYxFU|2#7Cv~{t0b$t1 +<%VN8UN;MlEYHk=~SXm4^U<`?22{9}h#c;y +Fu=K{T*aQqmn+-z@<7`+~$8b^|!wJN&@WilpHY~j{tRRL(4Q{v=43oU_Y`6|LtQmN?jKFdXONe0!F)S +Gv=48VPVpuXTj6(?yhH)q%A%+#iFb*YfHY_2ACB!f%lu*?%EFgv@#IS@ImI%WFVp#gRVdaZq0Wl1-;V +_{@1XFa<-wpFZ2^A2-bZjp-j6(^`MY5OF)X|>EEpIT5W^x2h99yiI!31i#xQonFqD99n1SJD8`kQ;Fu~6Y!#p +<}dBd7!)!{O#j+bwk^F5xTTa1|%D`mq1VwjN)dt(?+(J?TL-LOCy4%0UbLkWyw(I|!m2g5|Ujh><_!L +Yy>Cb@`VX|rJmN+B_vtQm$?1Q=GU$1rxo2?N6k!mxxGP9TO8h~WfbIDr_(eZz?-hB=)QFqB9Th7*M0g +n{A28^gF$qK+xLcrZ+sdewcyZQHQs;wK=66$8T(Vi;$`F}Pv1W^S0Vd;_~-4u)aFS|B$ZC>w?t=5$I_ +&xRRObfMgE;|*&eJ0(g&36g#KhIyS57{khj5)Np%oe~@ji$>kBgc#=Z4aeYyal=}pZWvF|DPK1nXio` +@VeE$Co)S14_U;?5?uG@zu!0y4HANSB-*8fg8?GaisKpK2P{Kh<^PLiSiY}BJ_Mf7|oe~@j$I!6GaKj +OFN-&mh2nL3s8&*njy32g +97@8{Tf%8^f`7N-$Q@2?mA*!m#kfaKxPw6f=ataL7)Hl2C$x>$8}-1jBv8u!D(Mc34|CH|*ax%)u}YB +?4i1b9Yz^(>JWv3BxKBhHF{AQHvW6xQdRfv)A&CHeuKS0vE%fR?%TMEW9zy$%g&iaJ+rP-k}6;SYxcB +i@*&-414ztZ-5)FA(SA>MZ0_>;1nIp4JURLo%7L;1;e4-Fvc(rCH(t_Lt)rIlvuB9xJeA-Y#3rVOy4l +XFz%Fyc)#J2P@)z$%$uSMxO}5zijL)m^{4!B`9r=r%QtYR1Sgb;VfjV`eZ&5p5^(tj#;}4IR@E_#mv2 +aj;U+sJ6vA-z{f2R;1jew07?u#jc#1CaP6=KpL2@-WTnB~)#xTiCiL=Z|~H@v2u5+xWeal@=^nAbO4;)d-Mo%6vh?;FMqYyP1GXQNAh4 +0E#K#M=#H4C79Tjll4Fg%YB=8(#Gk9S$Wz^$o|;utu+5eWwJ*a2=sU*EXy4p`;Fzy +@14QmWH90Ved`}i7+gyW4PIDc(dGa9aD5A%QskV*v`>mv?DD1t#8m4fd4b~D!Va5h)FdD{<61Z(RK@|4gU=5pLyuq4cP&i>wST;}LlD1)ztC?YHd +TXvx0-IrFG3Xk9XT}qdWUx49n^mX6$nr=odjDhM^f&2*dvUYMe}hgW(mJVczPE +5;M%pBrt|+*M03@eCXPBhGM!x+QZ4NHh&|7aMu4J(LYPBffU$FPJL#+d|;hT%S +!5@J|F4C8(^RUN}PlL+I6aWo9uhJD?zgc!z|1a2F~ZkV&(a4l}QBpT*r5+!c9WGyqvKEHv)bHkJvFJ% +(8QNsOvY|K8F3Szj~el?C8#uyIehH)t2?S^sNFvAUF3^PIr91Y`80n-l7)Hb1nS^4PVT@tCcw@7o;aK`4^!rYR-+k=@|B3ywR* +17O}cv!N4$_q4WRk4LC!`!El&H2^>m53|9{&IMFb6!<-qq$lY)>7_K9fuuJINTb3CO$4g<{t`=&74); +m;n_*5S;omjPpm3meHJnMTRlC~CqhSt(BblJ%^-1tHlqjJvFB-;u61A9NyXU)ecW#auj-hMVfAL1MeG +;-dhH)n0-6!E44Re;z`R;NVCX)zchH)mL*3k?{6AjlgLnjEs&V9H!7{-|d#|_6YL)YvYItGS0ZkUlt1 +nL^bZa7T48pbem!_}i<4u%B-!~TmmRuB!7kqyx(!CJgg#~M1D3_EvRNDAY2wPqgXC +?IfgkCbdh7210WmBPhVj}BjA7rk8+ +d{a?7#7|b77)XVgJDADy8Rkx$$k65T4=$yMCmLCK4r?7t}5yJ!>&Lm_Y3VU~}VG1)c +2~6R&$|M*RZn9~ZLt(tR8m90H+9X!cG>j=+N7Jy~PQtn1u_VJdl4#ZoKYYosce9#zGR&cHqzmXUg;&0 +G!=J)<>Bb7Ya4i&WzD)wp&kcye&b8f~M8baoUA$g60t$O~NpMzfcvJYH^1=y&!Z;Y_c;R(TB%*0nvuz +U2W#s<)5eLKm&1#%B35;RkZHDpu9LBK!0=j0K)fB>Tqs?lZb7aF$~u+!#0Ts_Er;dyWv_g36fXct +=6#4jy?%J-iIF>?noLXLScB_LWxlON#JZaP`jGiFkV6zq*DUB;ZWQ9;3agmgc8KWB@B~e^>V{>?B;%wnHtk7Eurh$ZGGG!JPwD=`9>WMN03Sgh +8-4&!+}x>0XZxnhlMAH1;Y-*R6-yQ3&ddoIgI!9@lPd0+;+H@RD$G{hr_mG%^j?G91gXmL?{m9 +EhYS0)-Z?hBpqk7VeE$)9QIzzEEpWdYndU3u^+}94wDY!oqZsOVJrbT%t?p+{cz(PF7d;)aG1BH1dGG +Gbhu=*VY}zMJ18MJth_mlbBR!X7{|jBa@aqYSThcL=MtF1c!xFbJtZ)QVLZ&>aF|?z5f5Vyhw532W*u +F~T!Pg)TnmTo4r^}p&F2z$i?vX>gbd_|aV`K-8ssqc!;E~GkxNwPu +-Fg|ufPwJ+}s+Dl6gA9^U_i}J5T4V63GRki-7@@NxHWb8Tx~+db@_cr1Q61`a=bei-(xZGs=h?ZXT|yy{#6`(e&vy3O~)MAnu2VHSs +zykEjO!*UKw$YDHB$Kf!}hdKQc91i1U9FoCdJWq%FCH(t`@iGqoc{&D%Imu2ki)otxP-$5#SlNi!P{G+WS*{s!>s<{5)M`Veo=zeTGjfSgei(C@ +<%bD#A^b3p!#qD+(l0^8t;AuLA8y-x*gbg2{$a`R!<==^oLoXMIIJLt{c{QIhnvkM7#x<6!`}VFob_~ +g>tV+B5;&J&^bf;)82e$&;p+2r3_mOw9On4p#^>pXtRdDlm-MZb%+r0)#_=$tZ4Gl6#}eM#{J>bk+YU3P>8iJ_VLOc5B +{)Bb6t^8N;V{n*m&6iv*x`~`qNIHoZ}TJVGLDZb#lsv9HyaOgVhQYr738q+<}fE7#vB%q!~U^^gdAqX +!<=>r7!UimOGw0FJWVGchhZ$?9S>taTs@Z9jCeR?yF^JmT++5yOFYbKmnh-z$76|Cx3(ThZ{>6VuJ7% +PUKsC2WwRXlkt-Z;tHF{-yFl&wXTC?eJyf@yv)tZf7nPq>U7I}6!osNgMMxV +Gcn%$f3WpX-F2lwyZnC!M5+rPJWb9Qey8&CEgH;0{!4zlCk80}8| +}2?O%Icwnm+Hd^lz<;!~K1ILf3{jc1MpLJbr5H{MF0b{eEY#J=nf>{mOIaub$gJd+yAY?Q55}FJF23n +JcZ+t;1{(^exfpEC%NDuL!Xi8d+53LRCdoE!3o_CJVJ)RND)+Q&c+(wOdrX3$<5NdkeK+RQn5c;Hu^` +Dhs6c~n8D&+ +hQJP^^#p@@{IICin(hRgJRw)NYUaHbOtJG4JX0+8(m1eZnQkCX$l`K_h#yeT6(u{ZFR@pL|TjI|d&8_ +c*(cI!5F`8T1&l}Ax>lcjX*7S=;a|$}~=!q@DlYQM|=Ck6K^0?95I)2G$ZXLgDG`Ef?jONzyq|w|uP8 +!Xv;}b@6>o7NK+r2R6PVKmLnA^1D)?x0^j#~#&z^%jFogKFhb8B|oI?R39aqBQQWyh_<+>sr(j#EZ+> +-ZJJ9=DEPHJV$;X`{Jy{F>3+I)2?~ZXKUAny#bxGpAi`g?j}@-%YD%_X>8ttG^fz3SPdeYZw%ad@n7b +Q*`k?Z5z7<3*So%Xm<+!y_cSUCn=csUV8f7qI2(Qr`Ijm_Fj7S?Lon__w+R=8ung#_U&%Lt@qNiZx=f +``svxX`vsrgPtU$<4*wfQ(r4dZNXEDKwJS6Q8Pnd^maLeZ^wYEN6zzInkG*+E}x(8Kd4$&%S +F8|64}VXK%*K825o5dsCxv?F0Aj`E8>W*!y`Sy0_2o7|p$XzF;)>W3X*B_haz8M$5kr8vYLShoJaHVW +96HQ#?K8gtkn@tVW7F4WI{{&|kC6pVHG!=o-vTdyBNJhJ|*Uw9axuBZV%(T+$aw(NYkI= +;-W&SpXnX*4lk1n(RY)1i&sd~MHJhoW%ej9ejAX5=lCIs?(OvZB)a$0t0cNN)E|&oe17-ns%JAVeL;G +kOidQ{_6AlbSCjerUf=3uYqFrT*Ry&;y1vtTU8}cA*WW#Q_I&ys()IV9Ufb$j(lxqziPd|g>#q(yW%W +Mk`r7rR)d!^OdV6`jL(+A<-Mn6Mli7EWZeA~a(6TQw-Mn7pWUdW{+LuF%=(z8Zs-G1kzE5IS7medjNzLk_ar`e*^; +Ju#hM$qDuNwKfqYp^cSB;+5M<0@^OQVPG(SI9tkUhj{6#s`*T^c=%kN%fbT^c=m=YL45zG{NhKO$8>2 +x;!}OC+YH4b0ij|Cq$IGJ^m7Pe{~P%)I~nPf5(`qJ#b!srrdXZ*O8NweThsA^kb2X;}j^F7v-2F|BGq +ul!3Aby4Jj=6^-1zF=fm=YLJAu8LmzHzexoCFoqgLaM%Al8XIXQnhC^r}f}<67}UG-!%U_qh`}k(p<# +(-;=8ABJm$c)K`lf`1~J9)rFC}ng0{1X<-B=%J`HvE&~$(nM7R|8I}3Jkeby+)&47~`g)N=oc|lCSz$ +C@Uo&dvVd%78BQdS3y%gao?P3Th6xAthWBOF<8ziPx^=YvGoy4>%dM3}mPGVXWJ)kJHRNBUn`w4>NK2H+P(DY&%Z^Y{)V;m>px2S7P2$*Zo^gfAcO?|5ArzB?2Wx6DWtV+8OGL7?}k(d@mPH+AJiJ8AN;)f*a>tu$6s?UsvDV}2 +EEG|yJshB@cnfmG$%3Cj9e)^dUt<$Zmn`}>}JEQ6L-QoV}|1a+?9&h`uK84ft$_r;MJ-ayF#m +i>*_s2JfyW9JdgYj(q(rA0CgX`1cz`?9e+MWKmFW=i!11T=Y}_K?u=fzb$k5co!z^8ll|$z?A}ZFzc~NW{LAyN%)dJS ++WfWooAbBkZ_nSGe=z^>!Rrs+c<|1HcRzalqc_qaczo;s08mQ<1QY-O00;maO7>QuKO_ju1^@sA6#xJ +u0001RX>c!Jc4cm4Z*nhVXkl_>WppoWVRUJ3F>rEkVr6nJaCx0rYjfJj68)}U(G|Wx7PAQA;3W5MEtV +mnE=FF!*-E)wlq?2&%0j~(4bHN^eoyO-0oyE9$kKG5KK&Xo?Mh|;ht7Ln2O}B|oZe{Cqw}Hb{ErUkR; +&t59|XBt-pkMn$We1aGNMcMw+sT0u0#1GbOee2;OR&hnhPU*qcr(YZCIIwK`4!c+J$%$fm#VoYC&d?Y +3PrHiF|J*u(VvRL+N`)sxZQ%UMU?vz%X4Yoviy>ct*_WZWCMFrSde}T5Uk}V5sWUsMT!RFtJy?LERvr +G3!P&7Lm|TVxC0esL*J6w?=%`sP-8vM3pi6%arr+G_DRoCb=+EwW+#OASe1R@X0 +VxWcSdzc)zH%qM28t_0 +mSRrkz9i=8F< +H9kl08bz!BbZymeBZ`A43hLRUP1}MP<@V;EYbU#o^*XdkEhE*@Sz&2a9-8`||O`GU?f|Wxb>Wwbm`0} +Y()`n9WKeEL4s6))1Xh=NAulS0Znu@1B)bcgRkv=U{On%vHRYDvEH8O_3_F1a>C|-Dp|;aqo|r)1 +A&n*O&BP`e0et7k%6`i7N57TF6k$EqD42i#9K+cn5SR4i;Lx;6=Wa*r_XNP@F&*6U!%%w+nf+^nya_d +}p#)>Z67xhd-&GB7oe5{B(~zV`u!wk~}e=q9=^*>_`)=GlXwxxM!?m6dU#f4^0+XDC%NqKBN5>)(~A0 ++A~h+R5`2wOx41>jgXM~TP{q{Y~lsj>>Q6fV_g4eoXvdp@q9M(*~jy>n6Gs_UyJ!#$MdzBuYEjUoB7% +$J~Z)b7sh>Hz7GlC8z2Ra`9AcI#+@?X>0Z7U<2z-()2;X>uU#1Tk@-Gme6KVLqqxCR}!jam?prd~R;`03`SGg8439<#V6=dBJ=a*?wT1! +xuNbFs{dZz0R9(*(Js?UoYcxbF&8^d3=55>%Yq9KKHZFeEq&v;~&W;eI^`6#l08c$;J~7wi_H70{JL{ +4c1y%3zc5R3hhiG6(>Kf@o(xS=7AvoVXg44U_bd~$1emP6D0DCo{(t73-8z7{cD!q6?s_*F^{$D7yb>T+HhBX>H +x?Cl`#wD2CST^r@Dy5Wag`UTU~9?jhzEgr1rb&erXGS_AGzDKY5DqP692>nu@ECS`3B1S}BpgI0M;ca +Im(^baA|NaUv_0~WN&gWV`yP=WMyAWpXZXd7W5kb0Rqs{m!q*Xn%nonxUZGw%fB4gE_h#Yl9bXPjomOgj@();i?i-5_Yq{eqX9 +CE*~rr6qJ-NUtUtB>fm7aht9g+&iiz6-tP8CT{^oMwEs;n$R3mqX#AiI`FbkpSr`iSRLzu}(k#{?1;V +Pxo5%W2#?O5f_((F31qIFFQ75bMY%8R$8v^o;Bd)sXK?YkV=67z)Ehqdv4l}dM#7Dmn%K@}PxOMjn_zYng*)aw7Fzgxp$t3UpE3R$EAv#5nclM3enFCBOli^wivI22rVh +wbk`w9d{i&c{F5`ri4t-yMypHyBcju3E$KdHedJHKeQS;niT&ts%KmW+CQa?h0}q;u%zi8j-|$5-Ko> +>pziR0|SF8J%}fXH2V?@a7qFdGGFeM!$3#(&R;-^fS!_?QKW5^o@0gDL3wUv7g~z^Rk|GTC&Zgiw!9#w)A`4@E(yaHlX>hTdYYSpeWn+4ev-Hs-KLqi{v +?0WB+HN{R-Hul&PI?5sr16LxiZ6OL!U(N~rpt3nE;vju@&~MTbi-8nXSE(Z8iIw>25Py`~AK +#Iba=T>#XLDmF_Za&BzY3b>3jxuoSQ^vvrTQO8K=5!_dc9JGAA@ +!JN@&mJy$EpU;SR0-GH)p`*1%Y#)>POB(g^THB7(I1`}xrpZL%Oh1{FjSOZL`t&FDbtIE{{+I<$`Q=J2m#Y+0K-4B6vlDJHCw|(F +WjQTmxYL~YI0m5!{pwB{H!8BXkiEHG7mXzj9N@_9O9oyt?*FoYY3sN2kyg(pr5td;-85*8>saTp(rNp +iH?`<>1sNU6XC98Zo6dUJ(-k%DsK_rEaGsvRZa)1RD`LxiHaEp+ccCZXmnUf4i+-A=aCTbN1fFn#&KR +mR7PSxZS3JCFaBRJLX)BFu@%;;g7`Dqi+UbdJ5{Xb2K{qxoVjfGoh$H%tMdQOux&j=_*2H<%H#v6X^}#n`D?@E-Y`I!quZ@p*h1M1Zho$h}q;<7)-3VH|1=;sAN7a`J9fNHlO0pT5at*g +WUMHYXHv0ifFOp-jsKj>_^H^fff2~g>YT3nx(37eqJ}Rr~_9!AYYA5tv?t~F1uTZyI6|H(zcGZTH>2q +`VIB8LvnlV6*r~LTsufo+j-7$Km=1X!!a}RS88PzXkiN<;dy~(|97~z9+8x@UaNbBA<5Aib+HJ`+pZP +`QCO58FS8F6ZssJ>`bWaGIwu1oX$R3w^A^wb9pfHvOAY9$(C>-bT;B`Eh2mGa7^t!hm6e?%(9h|M%M} +z(;Q3x=bh5j+cNfXQT=2_)^$n9e^5&U1QY-O00;maO7>Qnww +sbV1pokp4gdfq0001RX>c!Jc4cm4Z*nhVXkl_>WppoXVq<7wa&u*LaB^>AWpXZXd97A!Z`w!{{m!qrB +B}&h!a}S4lBkskfdnl91V&YqM%G}DVN^4NX2$8#U%zK;Fb|vTcD1NgVCLR)&pr2HUtN`d=&tuL7}0Rh +?TuzVx*LwWf6@iptI8F5D^6oAmO`?ax+>%(3_YozB90l&rFi4o;3ilxZMdb7TOL@YDS&CSvPT$5VFk? +Y`5TYbhHFw0*%D~jkGM4f+i)DM*NGGXvqDLOpL(No`UAi;Q93&hG-sBFwAiMI#ZXK;tj!joR+ehrI(= +-l+JGoEZ&D|YY3ghZO}XLvjfYt#T_vp+iv;=AO#KEYL{ChAr-_J{TCC}sz|fLaG}+pflBvaPwZ(rHD| +fH*d{c!|gNaru+9)Ay19XDDYS1|r3(fS_Ngip=$*9QwKsvXyRS5-5lIEc>87atTiOKMHrQyF;p@_EbD +3KvVaJgEp*JiK$!)Q)JZjc{6aLF||O_D_{0yqyvz@_2D3~k(*SsnE4J0i0k5$Y?zr!~1HE^zN+yPm)a +eQdV((RPRp2tYMv9e?z|#yQqOdP^~CsP~@b5*#uONyrp-g^J+|0Fmeq^dv1fC5A^y+yDY>>FdCI9M3) +KjGpOhXFBbSyysikqCU7q)iWfq-oygH$VxM5w;&FHr{1*t2#?O)U^wue9sB;k8}(*0>W`|5GL3henXZqBEZ|h^b-MVgnYPE^xG +n`!dP;YeqFE`Rvr4ngOlHG0u(n8hTz>U*JVsXQ5B#d2V({xKIZyMK-JM%8i?~l$qaNQa#pMx8A*+#1c +ggx?^aXU#5_lB4;xil87yJo>kNS<4#H#2whl-=xHxJ)pDJF6j+%Y-d?U&LsBsFsB0|P0Sev~r1Ib%wn&kmZS@u*j)>n} +7?l4s%F$0S&&wR$s$J_7xG^u2!YKqWZ1%tyQfb#M5|C*i^Z9WXOyT$zlS}80J?+_7}a&)hZ&D+7(cLE$xW&aMv%+%twTqYzAN4Z +__wMQ$$>Wm&;=>WWB14N(Y-(L{)gx@50Cz0a3`@JW5`~#wdRnMX_SGeA)i<3N4F!LA^6UDt`k| +O9KQH00008031s8RzJEWEu8}Z088O>_9VyxrAtGUfU4?rLMCy*t+6B9X_fu!?}ZDij>T}d7g +PQc5ZI2FX|6QW1l8tZ{SA*>QAQLPx=k^rglTKHK(Z&E1{SqPbZv&rAPfsNXclf#2Ysjo_NiS<&F~Wc< +i*H7|>Sjz%W+A3BZ2vH!gL?4e6BJ6>8vp?rh954%23nD-km%w6gd#nKs)G5L2Md&K?`i98YLjY#A@NV +y5KE3T(A@RQDRxZMQq1xC(DkPf7|)tfi1!Zr*sZtHjgF8L`Y!U)?so!9w)N%qs;#u8i2wEg?fkPE%00 +wN_h1v@Y;4sY}29sCTQ-77!S%(@YEHnn));s0N*-SQ=)EQu5SrPFAPx9p&6np>v9vBEu75cS%7$M@%J +uXoL8TPDEOird%Zup{&($v#|%|NB*29+@d}jam5XC3i72CF@h%|=E`znma@`j*GGMaWg^i&5gKaHZ+m +h_T)1jN>pU!90^PlOd7lu85_Iw8{`U5YzzGD)bObQUAD#Mg3kT?h)2cdV5h+cm@8PA@}?1$r-KZqh4P +DAQZ&2n7lrd}ml5FvahQcw@2)}~2)ZCwd{>>;+@?R46 +=olor!(L93XcZ&x8kN*q+!xC&Qd&vGyS{a>ZRj3C~#NDX$R70|_e|| +l~1~64XT;(`Md-sH?RnAs?D5N_g^7!R!%fZ}x2&y!B{Aw;hKf`m7d%pn9o+_Z#viqsit)fQ#(pG`J=r +vnbQ4iDmUcUmeFTz$a(Ze{ZKysIUsY2$n;g>z6R*NOG_(k-+=tST8rR2Ckart5HQ(7zpuHnU^Zn;dGx +C_*3k!}V3b|x@=KQpxasXu7Y?O)_;#dnnDvN`VZdZYD0eM0*Ez}~H0T^8H?{oyFrvBG+zS@9g@ +Irp5j{M>cgohxC7AvkxP%;LH0IQ>`n4)b#EI%)r@>z}G720uYpZ`A$;P)h>@6aWAK2ml;P_EsrY=JHV +v006gk0012T003}la4%nWWo~3|axY_OVRB?;bT4gUV{>zDE^vA6oNaTPxE9CX=Tm6vH|>nc#l{b~oqd +s{PB*RdFt#(DzRF-MC)~x53plCw&i(H92mwBT5E#d)cQ=8SMEdK11nB&HB&*R7|B%z+my0Vhx;PtNO@ +`!jbba=X6lfbq4KjV8~pd& +|lHOH;qRMNz3KRu}mYdT*rq$y+_vz{{X|}#`So*=}{w~b23|p7W1bvJZ9^04X1ht$osPoq%|1mU_vNA +CMUK{#!TyzG4-kUMCZIFXRZ@?)@&8H-h2PUM@S*$((wKuHW=Sh&4%J~-9Z+xS$8GrK>XwYC4_8 +XQP=|U@o14|lwoE8IvP|mulo&pFWNH4xrlro=g-oIgA?Gfb4_o&Wp%&z`b3akfhZNF2ZpCMdEH;Vjk@ +rTxt{?8^BP;j;E_zLDxEo$v@>tjeo0`sv<%DJ*pzYxoFbEZWhR2A^C|UV*v9gIuU8BVZ<3kL(2!|~bgP~znDVstV6!LI)NVtO^4Ovw55n4FNC +lkxQ8>~?fACO5a^o9oH&n2-sjaTBw~6g6_v#0xOFbfG2ZG%zgN=T-j!x7UXYwCCi(c%pEdO=`gtMGSa +F9@kslL)&%k!}>yzO@n?S)`B>0&?L{E1T|_Tq;Yh#@Z2RihMvN5?)?BBo26ksSPr}hX25PEdza3_+z +HFQCEoYZ|krm0Hu7HH}+q +H^n>1I?P(9Un*nPI;GaS%v!h9T9;YtmRjpEYrRryJ!Y*}YOT+#^(EGLyY-p1KDX9sig%Fd%$nXWl`(6 +2skH&KHYl|=VAckSHBGdG%wX0GZcV6(WXzheD3i^YwOOgP8M8JkwPrGFW~nukSu;zm&6%}%skJ$?HZQ +eCnKfE!jWTPr)Y^hsTS%;_p^6!Up76sF?+ho?q%Xw1=kahnz8*JINs}q~M^O^trz|VKO;STu6}4aKvI +I#N)5+cCaKCBsb?Qpgz1MEO{U$Y-+wQ~tj@_UW-FaZ4@!e>6bq7E+8nUM#aX8R9smTL!d3SmJc{uu5; +D@2$f5ZB}Gi}57?*e0H({~?9>}s1hb77c3&XC)Ul0%=LE;E_NMIH?A&ZbVrWa_dbsbege;N7Wp-+k9S +`X6)pXE%e+06YIMA@u&G?qBB7)pCz6bKz>a3omu;wTp9Ko)jV+Y +V75q>ZNfv0>1*@Z6i!e{6>4@Eo$;_LYra$ZmB{7jy+Tn0*k7WVc<{_L-6qSO!<0)DHu||-lSq+65xws +UMz +pOopOd62!+*E+NxvxhR*!8jfs6a3_mg%f**+t!;egc;76?mKSe`) +zY2Z|gP41RABhaZk4%Q(MQSy#$Di6o@3Y5e+5lshZ +26YUf^(eQptoM`U41y1zaICi36uYW%|#XdAl$ixrQTRw2G+0Cu+;WqLEcWu2Jb?5 +DP@_v(xCk=qjGV}B*gM~+IAk3EzyAGsz`KK4n%eB_Mc^lr3ey->F~KwCHGeg@6-mi0TreB^LM`PkF&v +&LJ_#qbUx9#kWy?Za=;m|E(mB}i)v(lkL@N08PPr1b=8eL^;$VS29;hS3U!(Js%NHI=n5yFlCV$!J +)HFh~@PsYgHS7dUcfCW~c}aLA}gK%^pH(nb9cWguuNGpB2_R8jY&ba=&QW6lt_|rJaxRF-{RdTN3Ewy +S91B>xcU|6Ny=^Uli^O`xQyJFIRyKs{K^x%2a~qnAX0WwzXCp(;83N@@sQ{mr*l`8%W0pBd3_R_J(WQ +;+k;EkbGW#KZ-;a%Mh-WPSY>7hH*_I!?-4sAzYKlFs{jD2-hSsjB7F(!ZnEuC +3C0HkyuJh1f_Lf9g`a1{=a@x?Dln=KL1VJnTCuEIqrET65iKpbjP-)9S11TI_1!t|`A<$zfz8$8o!5s +Kj1CBEgDY~=-BcIDG~xJJanqyJe^$6OP`G3>Pud9Guxn{34o@=U|!N}decVv996_fVcfHN*8(h-WzW; +er%ixT421we4dvaicT%#v)w(!KdN%2CULhyiJ(?I3c;RC?q3W)D6}o+6^~ugS1KCTg0@&N@SpxYht9G +JXnk1?(4+_Q|$!){)aNHME17W!=9M7m1Ek>2MR9Ja!f1v0D_rTBEw88lOd*Us6T;Sp+m83zSxQ75dYu +sH*{KIAJ7b!KvD~@7v +`2tZe7NCyp!c9{5bn(ch4C*vtMc*O>Sf)ARRE!}Xy{u*wAYu<}(V_(VDWl+?tG*pY8D*F1VGix5L_YnU;TbS*DKj3Kdx8AHvFb|f;y7!qrkF=R5t7 +!ny~44DiuhD3%LLncFvA(3ImkjW5ZNMx8XWU{}`Z1W-+_VzLv^7axL_VzLv^7axL_VzLv^7axL_VzN_ +;eFMv6Moe$?d_u;5Bz=R+jd>0l195h(x?_BjmjV?R|6p=jW&=3f9okknx5-huSlL*kKA}$k-Th>N ++Nq(t{^Kz_=MlY?C2B1C&|iB`&*G*vZPafp1fFw@JS-W_#~6%@aZroUg4!>_4G2c7@sn^11v-M)MA}N +tN33Aqgre%R73yZzP7r&e`5J*nqr;fZlgy#LJ^6tIjkr~FZMHt@>?lqp9X<0hCFSX1;c+;QqeT`gStf +V^r!>;l4{Y6se`=K1P$wuW%^L@MshMd=x0sS6>Y~ic9$0Fs7mL+Td1QeovojnzW7k_lwDQn9(b#Cb*2 +0AJ$ZwZaz+0=QE4{6sp@H~N@sxd4=^uEj^mV2GDzva&%jv43A9%~@x}tCUJaN#=A1a=5pelm{Z< +T?r3|{Sds@`&jsu%~}a)z!LJ3mkLPd`(Y*@3svOjl;LpRHF}Q@&xUih0njNE)WDn3-=c@EBpiL^?bo( +aufp@6aWAK2ml;P_Er|0ik;;E005E! +001EX003}la4%nWWo~3|axY_OVRB?;bT4vcb9HQVWNBk`E^v8mP{B$AF%Z4)S3K~dAia1{7Cg13ir^O +MMMNYfligr86OycD|K6l)w^W?dFz?O0_eKZqkw$M^3)6cJ3A@+r`eXU9UOqiOZ?4h!WY`H?RYspK+{ +VEdyTA7|hm+Rrq^1AKcC=a@7}AOuvx_*`RHm0Rel>+FMNYEHH|qHnZ!v}U8JFy|QJF@6aWAK2ml;P_Euc&qVQ1&0000P001Tc003}la4%nWWo~3|axY_OVRB?; +bT4yaV`yP=b7gdJa&KZ~axQRrwOIRa<2Dlh-G9ZPK>?@rx=z|0u-@CnIgXMTb?m^8>*j)-K+CjEgciM +&l&t&L-$&|g$?5ih6_AI>nc;9A-^}Rbr2e5x=VmaX;h^t~rVd>W$Nk^w2<=JZgk~E~V=dOgXOjAA$w> +rsr~VXDGMfA1h3f!2-iGObhqUA&_d=z~18KXFN*MSe6d=3eFI=h}*Q8by)`$lEh=+m4b{tFFZR87&g+ +loOUiD6C`vSpquXMcbY0g5vq{ZG!tRBU*k+s|7s7*-C{wbYzI%g13OWvlQlr%B40Zn+o^$TCdH|Z-s) +M63ge$C*tixr|frl09v$h8(*dPmSOq)^fQKHMnZQcO2{{4lA^zRK=vEtD7tCY55Lncf@gt~BQqs8#q1 +%|FmyMdUG`G+&A!J|C6}iTTT4lm`D+EyZeYmLh+N`{gj?X&c>L}Ckfr6voF{&4rsFrGr`JxRDrr=0Ct6XZ +8tlJKqA4V;62AP^UQMflNzQxx!3BwLUGTKZ!!yB*JG)Ehm}AHB(>H<~?s04-dBT{t+FB(^&#AQ-pO%n +$ca90Kl~N&glcz01LHFnciCUk_#@XF8?p@q~JG-zYf^tb4_QK(2k0Yy=#1J97{onGTRQ}rD7c99!Kag0Fufi6+YJu26cvid%DY<`J$O^CB +qKh*SG5=Y@1yitJixos)OQm#+E>#d3(z_mbJLhL-op)!ycFu_AQ>+|&$QfOIBlw&&8Y_*Y3HLjJYr$( +Kx2}W-O@@e%7LC}JFK5wCBG|?@z?f_qP(tR#yzK;+c3Lf9xkN@Ei&Gxn+r@;C`wH65{62jfCO6IR4wl +1s~IyY3V(#UOh%XVFBgGb@R0{Hb*;&Zp=v?58FIHoox^_X;bDggSuaM!|WmsV1-&{8~ezH?dN3E@f}! +V*E37D`FG$>eUs7CelVJbLlizRl8Ga`|cR?tbXF)4_in`bhuyEnd8IozZxHbL&o=p*y>sIMdtl@CpQH +o%7#_5N7T33;L6Z7Y$3dAfXh` +YdrJ&m@9vyT1g_==>-aLZAP%}_68M;rmKZ{8%#E6SzjoPnb^W-6#9n&2@`Vx|*l$OzYE<+U+ +1IER{Y!Jkxg|Y18(*bFyLSzRR0uvUsnGgj(6ncS-&Uwds(EZVwW43>fimFW8p +20h8I6~ukPilHe$q?&#sV5Z2UY@mLv`I}y_oBu*Y!J^?lGk+1Eb1!7+3FLdq-GKzE#A?249?io@`&Fu +T@tj62wavOR-4UJBD)2q^P0x5s*Ni69Yz1twTD8zzhTMfZ-MCUen)e`HHVxVS$tqr~x7{LMjjzUCblu +PBr-X#_0eJZBv-qCwE8;n@<;(Oy{yH!bVRcKZR?!IPib{&tl)aKwQr!_amhHF-&6G4x1rF4ITb$-wg= ++kDE8UQ->trN4XEr?Qv%nZG+pRPa>4}BlD(z>S<;LQGt-P7>7(ln_1=H7Snu1Q{TmvRev)ZaQVeR?o% +DEnloU7B4uoL?H`Exg(Azm@ZOdswp$3wT@o4Mo3l{3Mh0FBVK+0K~J3zHEy;COz~X_>*{K$?B3r^jN= +bb|E=ShI(KUt5^eO!34dLjCb*HW?4=#G$NiP0V6;@uV_ceA0zy1EGl_KWy7^S@{r-p>D;s!P+{3=B^u +VrZ6+9n3#YP7Ola!7MNWzA5wyzMW}=+>f{wsco$*P4mbvsA|XOAcH2K|jFViw>}B&*ApU2xbI0Ub;zD +?axK0U^NY6Vpx?9uN#u=ylWM(2ZC&z3v~C{t?+-r)Wj%jH-9*~k?*!{3<^^Q|zryi&ItLxW +u{%wbh#xwWM$#bsS24rp6K}=_Dq8!?FYE3uBvxl4a(F^7HKloBh}C8_keE`HXQ +u!3?HhZs`mb+a@nIKF_hJRPbo1Swk$I{PHSFQqMN3%k3VmtGuWrCLLmM;Yz2~!gCYMKp0@12k_UN8*r +s5?Jq7ShMtCpM=qV0co+Rem6@O}&w^^y-?`np#(*v`awOVn9CHybg5PF(Eq4KmlRk(-)_5J(9*w>B!0 +Z>Z=1QY-O00;maO7>PFa+w;D1pojk5C8xs0001RX>c!Jc4cm4Z*nhVXkl_>WppodVqa&L8TaB^>A +WpXZXd9_z-U)?TBDI`KWyN_bWIkPD0a*{8z*=qe&t@xYD463#K$T6mi +2tBNc^UZC-2<+(Z}wwg>*fW4Yprm_ss&(shbA8@eGhliAbVJn22@m`Ww=#VPG85f_-qLIppZ7h!`qlwfRyGW +0}k|e5sX16>g|8+6+O6HDG>q`-&xqnH8uF_uy_s3WzptS@cIUf;A@ +*H^7Su3JPCA1|jqat04)4xh`5ih3XS2A6#l$;_2NTF@CE+{@XPnI&uD22#^PZeL6nS0q=i7jT +ye455{rUXmSMm*j!yZ$h^ySx1!+yhQbq{!s_`X+z{uw-Zj`L +gr_{TnF4=i`6gtjMj8}yux=e!nTNXVXKnpF5#+u3px^d^IngW1&j#kZ&Y`VgSLXSYhVKGJ<*5!}wp9S +rv-zQ3n>OZoP#lJc3A3hMLFy$if5%1gW3ee>Kt3a^S>O6UQ7-x*9FQSxN$`oXuG>tNh#yGJQAsd~pat +*(IY!Fb|5yMC`Erl&=%mn+v7$vozrjp0SX7e#z=5ynhw@;hCBuLo+!ZGmNF9gX;mfocvh#Y +zi1#6p4088Q!K$+y}9G7??`zO28D4yOC7bx7Rx!7b{rhCpKznL6FkU8xvCr33xVQxZSd&lBa7{dN!XG +X3VxQ6641Z)J8v`-bFvlk3$|mIMuJDHPe66K=*Vr(|t}3iL=mLMp_t5fBnBO$9(4;Jm^R4 ++IG=30*x?|3t{?~wvRrSk@dSUhK!K*3&@!K#H+jp5B$8`=WeN|Uw%P5tX&&(Y(*L7-<;eImB5- +`OoEOC7h5yy`9W4F>@$#%Kk*ddWiKS{Ze{%d&o;C`9+8S)_{{T=+0|XQR000O897^_9`;4eQRs{e6rw +;%CApigXaA|NaUv_0~WN&gWV`yP=WMyAWpXZXd9_w;Z`(Ey{_bCKNKnAW94~G$U~SN1j +$aaYeuE^V2#P?UCCcV1kp@X6Re$}Cdbcg7%diOnVv+YeclX@!sLRXd2X4oA(*?|?!|}o%!|iN2{1eXD +UbZg5*-=XAJBH5)p$(I7sRUR8V +L7)6%z50@L7&<4%_`o6%tP>ud6X*(M8V#ysa>Vi!~|mJfxtu{e +X8yaP=@=)_Y;*0SKw{0UwF$IyC5XAi8k?LHTMSC!Jrceu^O^K_8KLqGWCJ+ly-;O;}0anM4z#ZU_&8R +G>F44GN3JdnL78&wF8621AG}+)?nd0+@LKgjk{RCjEP12kRdTr6B`pMK +;jBOhxOA7|D8d%PBxSkCxSgozq(jNB!Y)T7>jhIa$ml6j^83|O!T6j-$E>bjqnKzka`jdA-ixZls +2aG0b;M;_WRdY{dd=WfVGV)=N=d@M*nb#T((+)6cOlT&oC{lnmIaajsfO7J#R;3*(#rLhkq;_Vox1#p<|*k)&*?c@;3V?0l}Rlb#)bzvpNrU2#Ef9*Z4zt^KNlYxHMM}b5e +lYydoj~rOpfU*PgrcwOU?CluB1KP`jB6XA_PlY{URum*IqS-L|4 +((1EK@un?SP`{q$G&b)S~SK4)|EkxvEx){>O2Km|t0Ukl?rE`gq<_C)K@of1{U8kZRLyxpRMoApm8W& +3ivW1_dU!YzL;E?+dtSW+aQhLg(?Q#;)${{9N`IA(~lAock9ecT*4j&KB};x<|~_d*?2!pt0HSuo~uFh>*Tp{DI3>P{7md +5R!RnAjqRf%UjiINeL}odYNi&O`LagTvn&Th`J#^j0=UD7IUynNK(IwtZq^o{KjM@Gn37JG|GZl$ade +acOdI)J3-un59r_h8N6=erPsfr^}t@bV~i|Rqrj9jyoTpAU62P_3c$kP8}PG&Pdtpd$L?FM())454_t +BUhb#Pjl^zHkT+7Qq!pkh$$Y)+IQ+Kx +(DAE{dNLT=gF-Q*5Kuy6oM9V=x*!Xtf+sJ!KZP!QdG3%%lQDbz9^s#AZg;E^aLI(Kw{Zf=*e +6nW+m`QEO_(mmkrwEhNAO9KQH00008031s8R!S>wIo1yV0024w04M+e0B~t=FJE?LZe(wAFJow7a%5$ +6FLiEdc4cyNVQge&bY)|7Z*nehdDR>JciPDDcmEYz{H_PDpaY!b9MZei#^BJy*w5gXrb!+w(gLc5M75 +F$cfJ1Yo!QmLN?Kva<@H_lau<7dW_EYJXLfUXs{hDk?|LvI!$G$<8u!TM@V5ICDShd=4|_iu#-d#9{$61J5TWfEp(;g@J1aHh^8RC+2t?<`Zv00u5<^AF+U&b +i-h1y$)U5q=D_Z9DemTo-cj@!sO2L<+$lH8Ze8@cYO_B@31Re7xgBZS;g|B>9;&eFHnq(NYZ0SP>Ew+mLDDzveNMuSXAL$T% +E@Tr-#Xu>4&(cWe`ohqlRF&IsjXgJXP^va*jG8Alr-eGkYB_P-&_hIw2Y#SbH-ZM5RmB8qi5E2+`|_Y +dOUAYu(Ry(12Y%a1V)Eeq0^3C*TkX(DfNiY&>J6s<9J&2dX1|5s6=o14mn+&YVA`~=avpY(Lj^{4Lc>yJ +5=o?}$i0cJ)vh{7y9Z=7Lbb(4L-#S=@Y1GNf764KA(E$;R0ljZ8fOOySJiksZJsu$F%?02#+pSi!(K> +6k2zeO8NZChvqpJg*&*@2}QaQQttk7Y=3Kw|h+HgMHd*XPmVtkVi~}cadOh3At;UC$(nc7j+{l-%4;dOV(ZnMsVh)dV9l2fb;aVOx~Bhxefw-d@c?*4) +RZ%$sh|u11stG7x#ZM~ZwZSo<6l9r3m-NU>^4X@8Xbal)yb_eN$S**o8YS?q!D@Y +rSFBCjqlBYQxRGhX2vYI#+f@YlSR=&X8@TSf>(+F2JR*d#sN5kIvG-T8PX-|+Yb1KIzcShu$YZ;xtU# +{$4%WD;9YFDl?5G&u?*Onn(#Muvw%0$yvNZj=-lKQcHCTc$3Pi*p59nXFAN +r~S7+@k=) +?~UKy4zB>TX|$VS^6lXI?ey~fq&L0lO?ur4gqa_}WDV0>Z)}HE=amm1Kc6=nA3yi|pDJ?c-nd%|UH#Z +RJNx&4eyK&U?*^m8$iGg=55~7sM7z1T1K?kh(^hQmHO|jpzHYQC9a4E`yOy`*jaK_*yHeLcwR2ESE!4 +{#R7(r>DhKsS3-x^t>P!oDo`ZU+g?gQXdZC4Co#nv3kHCHv|A$nYa?lOPj#fdi;hpcki#6Ne8)h0?NM +^Q6!}FKlow{_*cmou^;m|pActioS8j3Cv=s>(#(*S~+j52MKzwIs`UvylOWQ1QaVm&o3yomLYx&YVR`1mg5^x)5=xlZ#Wpv+RIzF&w6tGNYJ+Cj?1 +N9k!(Au0!UY=Kx}#I*=m^*cxg4NJgH^I+-mPQwVv}$c0X&3+*IP`$InbeFCCLBu68PA4U933@Jg*F3v +k5bNEnL_QR(vp7ewrCGusoOG+)$Sr82_T+hy?9v|kLMl(0`g+*G|qrHITT~n=~QUZw3b_TMfr4Vy +E10!52MpVfYc2a(zT7=B0`50AecB(0H5h6Ir4I+zY_+tVIS-j6?u~(b +T>V$hDYw}!YM_eW%NkR;9WU*-3P$2(1zQHyG6hGm<4iOX(yY$%%y11qL@^|PP{jqH*%vHbW8D +Rd{%Ur7fO&jHTZcNk)N^M%SQ%7eas`SxKdzycg(y(ML?NysO0+slHAg4Wc$p_-j3mfaU>t-MQ9^zYT$ +aPwTiJbj#wI585laU!(9nHEC8LRQ37f#BIQDyj6%l2`qE4GgFt^pyUa%WM=uE3 +o5g?|8*KWeMHhb6s2?_)NgD$q$-ZjJp`jLXy#Q+=MW#x#i8@=llTHJRJ*;kk)OH1C4`=!ow}n<0AH^2 +KzDg_+mAy!Kg?$H=EyT)|X{>c1i|OU5#!I4& ++0K5KTT;M%9cn?`!1{vtbhcF~6Vpe6-$N&OUz&OwGLay5DlgmqT*(XS}ah@z$0L?l4!n}~W5 +(Pvme%=@lt}Asta3(;s^66g!rUZ?OhSaZ_5$2TeBF7Bypu=9B6b3JQH7%R^ebVodW)uGiPMx*u#Ib|G +VGZV5Hg&a3JB?5Px8F|@=k{_R)}Gi}kt8bl_}a?H*GlC;#>^`pKlgjseE9ge*~I@UdJc#q#2o038!zf +4bVD9OlH!>GxMa`AdkHuYsh+)PwQFCbLZ@VMdv)89=}yukiu?@^H`p1I>5nXs4KkLVP8P&}d0sF7-oP +rg)qM5hRYRPkfxG=(C@IQ9%0SAa-_y$?QYxX7&&s0RdU5vuBMaQ^??M);!^pXc{Z4|&9z^UO$X@rLI0 +i%k$FL9nImdvVk3$@TCa+X7Y>egk@S?2zAzk)UtPG<Ssk;T0|Rq0#DmQZ>b8p94&b#3ixgd5=o +EF0&-M*}xHal#|KJY{<5yp@4@}h{>p$_&9yyc5HEm_YrFQty()NYFw_KOC+b>jTurVgVD8K_wqzU?Uq +QXK&^lKFuIx!CM5zpBFEFN9+#Wga5}`1Yl$p@Knhi@eGONiobYIuBpnaOW=Y8-`|>|F56up~%tSPVjyIE1QZdYLvZB1TC;_-?8_cLjS}Ceb +(T3qftcw@K>J`P&X@$M +a8UwI2<-O!Z_XHTfxO=`&^b9l~{lcZyhE${6pCJK@V!#kwhbBdxBe=5+5bqYm~;zm(Jj@an_?I+dg2^ +UrnT!C850rjrtkNFF}(d}poz;ZB^g1wT-GA$Fzr)i(|uL^KeqUKL7Y@#v@D>H3le43@V*9xd~I@Eeja6RTV>c^b%K!)-)3y +RONzDfj?enD|4`PUTqt?B2f4zOu7OW<@=zSLEt7M11J_{CROg-P9(OPpwVh)~gg)N?|=tum$+#-71WQ +1aeY0YSe|)i3y?|*M#hJerrCBsK$d(TSqlsrHp +6+O{Po4#_ZE)T#NVj>zp^pcK}aJt1u{~BuSbKujMUISmm*$dm0HGxi*ibqzPvdK#_U0*z+poxfjSdAb +BuJSq(`BiMS?H5sE~MFd}_7xStQz3^M +qxN~IQS<^kOJvdalIwj5heD^HD|EtnPK*;gVfA#3Q#S~T)F>0(^^XvhHeN2#Pnk?|gQ-&-sgBvVgpi2 +s18S|fi>Uyoh&E+0x7gZhU3o@RX^!-z|_6{HSO&D!g5jn6a~D3)VYiju>~C0@NxkB(o_#?cw76!zF|S +u~3%DK7(5f_2S5aVYU7@rly2sDzBYM=9s)UU2*>-*EB`&-(Chc*(7p`W;m=<&gH*I~s(;t1JA9s)8SL +cu7ux-;u2QBDQ4&dN|`UXos;bgK{|YQpRa&i~j;pO9KQH00008031s8RszNx2vGAWpXZXdF@&MZ{j!>|Gj_3yH=-#9ZCu9R&$-}==ud_+3n +D((A%5qjUp2quxes9c1qdP{qOgl69OSgq22Ckb-J_?V88eA`|f9FZ?E`)^VX+M7p^)Nt!}Rc=U3MkKS +BxZ-VXl3g$hh429Z%=MORO71mFjzRRI5Fk8C6bvt<3T&>+PiWZ@j}s~>U>TKQ(iLjD2fDguuM5b~kWF +&+nDfCa+?6(avc4TV3ap(q&g5X_i^;o(Gs^6igL-8;DATI1X&9`KM!xQPZ*c*uMuJRWEcOv9AM^q3D} +Ft>bd5~P<10&Rux%m@{H04{Kz)_&xn#t~@NoyBSiVhvOX6=q2O5Y&_yt0?pwBtz*Iob^HzEHxd12rTS +aO;N2e0-~ZbAtekrM4J1NtRVu@!cU$4_t$rQIO~3apU!S?&$|6DACT6J70&WU9wUj#R0;$}twI){e(P3JH2p;W=h>WeH(gMI+vL!e9zW}(2?F)Hs}L|OW`2A#mG* +Wfz`iCFL;(FHwZZwnux+fu2p2Ioo}BK>@Zyp5*gH1?gth68Zd!;!KFTHVVnQ)q9;57h+Bd@4{Ybeakc +s;W_ty8xx!iRbIyn1z~~X{ZKhoP_QI2YcMy*ujL52NC8(LO?T!+cOx9J3Bj`WLm>rzwI0!He;?z1_RR +$Ey`@nDUYl>d?7i7zLkNZ55F +8Xp(536p9e#M6#xeZVwC)9OwAFwNRe&o}<0UQn;Fte4*YW3k5fVV`3~!ZNKbc?#~dLN +{1Q=4?~}P&O9t5nikPi9tYf~-10e>B&DYUUkrI*$OTiXogRg@1^nFUx|r*?Jr^_Ey=?Vc7yZt47fzws +NLFNM%O_L;VaT`!Ldp!n;!jiVQC%XE52sHV(eiimVLRSSm4mf&rG{tbN)>FFbatb->i^T{yJ7dLinQ>7poV{;+g>T^~OOrTp6Pc +?iRd9ASbLxrlx}UPdKSwMEszLJqHA{eTz@!+uk@2150l~JJ)chGDxtZv&xdqZ;vkb`>p*HTAN#E6&z- +ojFx>F`X7YnxaxmzylR?KB9ha3wSdNqxRcwj>~EV%yQvG9LaZ^{fy@i(l%=vOmOgCN=1+<;B^G8V8gT +808{?6K0?rLrvN3)@txOCowgZlW-NU7X&h`Q1$ras!0i1VQ$D4ua-4LOd@8%`YML$Y?B*#%>8}f?F$X +1U4FWirII|OfOk5irA>k>KFmpygh)UdZU^HMsxjY;ARF +mA2TWLKqTx!dI*((UVp`JeOrZ|HWp+3mJAzufJsHR!F~UMciDdh1ibUY^~Jpi%-4dpesQ%x& +)*X?>Wk=dz;=yz__WsubW32DqoXypV)dPrFCJJDBzI>awlf?WPmSb(efz^{wDC!q`I9H3A@xi#<>mX7 +5`Pe7N;)=Xs!t9Mcu=2-2QkItHLI&|bU?ojVkrt0Lw&MwM4D09{Au;PLJuCS?HoGzBg)|9W-ANUQ)IR +NTF|Ehji<}^@YmCL0_!z?#i8~x7uK4~l2b-MjQ9o(4xiDW`Md^)>l!r6G;m&{flM_hj8*42AFFhhd3~ +%BcP3U>g4H=*3s&d&*80?%|tytT*j+i9fX +HYpFkBjtq!3wWE$rmrbN$}S@PHu3V^CpgyTRP5pk>m2Hn;o}th1cGqyu@(@wa@2#n3OpxP%fS8o +$sIj?(3_!oT=-V6xRz^DY4NCfq#zk_umw3`iSCd-i3fG#b5!Q8ucR_!METvnn&weAnA+)%0RbtM-KzEO7cFEUS7p7iIBi!!8eB9zR<2uu +e-moZ^mkCXp+;!NIY_grrRnIo4wHI?uygN&1<&WaPgMy3(=_Gtw(>Qq-f=DJ#&QA%amNQZbuv6>26D# +!>#7B5H)Npd)n&Ge*sWS0|XQR000O897^_9|3CYAWpXZXd97A$Z{kJ}{?4x$C0`&VZpi7Xy?yv8xZSW +b0uBqxz@w`0q%roO5=jW#%c(5O*4$P(_+wpC9n7O0h;hos?&N&1d3a(^p#z_hz5mR6aGVwP{Q(_6Zh6 +rs|XBRr8x7r24+gilHwbYu+=5}6f@fkCcvMmOO5>`Nk>FagRj!5Xs@@m00L+Pefdxy~l4WoS$WVh8uqgCK +@VbALCS@y0WMNcFB&DE4qTi6%9TXymh;WcJW~zcmAv6#k|12ctXCh`vVN5xXu^q!&Q<g_ +lc%+<4>}HX#QWjzIj75|y3Pi!y*wm&`YGR1^H4&HygfG>-1Io~EYQIvc&;r-iU{|e1?1PZdCED=EX>L +to4@no0;xyzg(>f_Z(`gKhm#`bu9{)fqN@Fp4V4sPGk +bP=VQZy%RFvik6OfI)=6q&NZ}(Fkj?Pw*QrVtlFu4i+c{q2+_N>$CL@80Kv{tOTEsl67@J@j1*dcD97cM;a0q)_-`rIiUF- +;&08Q!5V>?)cjJ-GY|j^MOPG>^*hGIe{_KCaieFryYAM2hz(TJE+8AEbsTM3}9@Qf$Mk?Ur}}HI~~`t +qeclxyw3D^0fPyNa_C#$B&@8*fXftBIG2a4H3L34G&3mE;*d#I=Q?#dk-ije4Uf^k3i0v<`YWeGP;jP +RMXJJS3CQMc~_O$9G>FO67c(7YLLyTh|J~NgX~@A*$HKmP>2KZIQ>gkzHZcO<>4J@r{1?V+p;~+_1dK +x^>@|2XKXoc5YFwRj1~UmzI>?kH1HAVL$xRF(iw--;D4y@2YB3%i(}e>wrgCgD3_9QUG8B@dAA2&@8S +A5yP@A&+@J1Y(CHkO*z;<&IJN_O6il7RFNf`vNhKS>6Bbh*QA4D%`%kVHjFv$dJb5<0%*Ovs*uzb4m^ +K@Kz+wNTB&i^}DVy_@z0mTk-QFH`-&W{X_@&LaPQIvDy>%%sE{+je$u;+Zb%p@6 +aWAK2ml;P_Ew(pokO_*008s=0015U003}la4%nWWo~3|axY_OVRB?;bT4*ga&u{KZZ2?nO^>lo#4r#< +dw<2q)m>vjL30ukLQv3v1c)|rl1;KY+Z%b;2lw|xr$eHgnOD5=-n-9*6@Gi)K8s47cAX}PBgnJ_G|j%04ClJU*9aDkGw)kKmamJyff%0XMd#^D%e(%{@7Hd~|~@DvLkQ!E +z@yD^r~|HX$nM^0goymx-{c3pS%=sMiP--X+7(>0+gKj@;=Y!w7UrO&00002000000000Z0001RX>c!Jc4cm4Z*nhVXkl_>WppoNY-ulFUukY>bYEXCaCrj&P)h>@6aWAK2 +ml;P_ExC1HmB +-Mm7%iTbXac@dsf+*8;SGN*e*CR010Awib9@fDw&0j#R*Hg)~qICvqo>NYuG5WWBLb^vhRXHA*B&6ah +Z3A>VQn6v66BcuacHh+*i<>-C}p_OdSY-YzfRy?=)vZVS1tOIhF<0^Y{&2lW9U0d8|CPn0j>w^8(|gj +)Abpihm_JCqj-kgjuCqEE7JS_h@k68{( +z*qFZk*ILS5Z)o<6be#cEVVcWmzw3Ye{Qdg=k?CI^92d!G}(OW)-5u@LQihLeb^S2S&+S*krKdZhdgV +-Y>kY<`u@uIs+*%?QY|R_Ln;bcxEoP|(-lm36cl0Bjs;E5VHLs(1Moq5La24IAM+_6RZ#doW+(d6W +p+2Wku(qIHG8B<`%i@=^+OfUeT2#~I-c5gr4b4fKJ`S&MuqHotq7Xsrf{%+-tD^ +WZ9!^pN5%z)L3vbkDKQr2R2Ps(m~4kA6bIQJkW#)~3^&DBMG@W(HwKIq&Sewfk1d=R&BDMayX@fEpXT +#kLJTVD>8VWo9L8Mk~#k>ozxWow8WV2O=yoXlIrjXFSUMcFElWr#X2u*`n?{Chkcfs!^ZkOGrQ^2Pqm*;*l$oeuA$nSXpG2 +uM5M@|&N^heLrkoAk|a2Y$I}6N>Oyfs6T5-XT_&&gHwR)Twt{$bH{{!cahw$Rv;~vMS(SHu@;LlO9 +UH6+%xg=75s`MbKXa<;O7N#Z9hBWA3OQAm&^t;M?{{^CdRYdZyn0hiF{-cHsFPtosVpbc~NA&(Z_?CA +BNp*pl2&old_L=@P3Qsf239HqrImwB_9N}AsJvQN&?KVjyRT75+>?=1W4gchp-UvEw0R%S+n_3t40l* +vhPM4JIM5ovKR*Sx-Cm#x)Jv+-Yu71d!BfBjnf5lT_aUYh6mA$!(ih@hNx#69PXzDGi_Z{3P-n4+<;>rJ=zI%h~XNE&jLGz`1$tca` +^$=ULu?FaG&s8749c%#p{RA_f#KpgB0m?&OixlRNQpXtqC{Hm>3B^Vb8JRD9H;lb!_GtnB|f*|EfnR? +jK@>v}+1s%v9kxL(hck+%0l~yujd7`7QdfmiioZAB(dWi=YzTUod4(m+KNQ!hTx@DPBsmjOI?3v5;gx +gyzvdP)h>@6aWAK2ml;P_ErD@0006200000001Ze003}la4%nWWo~3|axY_OVRB?;bT4gXbYWy+bYU- +FUukY>bYEXCaCrj&P)h>@6aWAK2ml;P_Et9qQrImK008(j001cf003}la4%nWWo~3|axY_OVRB?;bT4 +gXbYWy+bYU-SVQy!2VP|D?E^v9xTia6;+17vOuh<_x9H=rju+)OQ$=hFK +}?mD!E?-6j9`&2;T34c%+az;r`KM4t>0ezx7NO-w6wIO3 +Cm27kr~AJ1w}?2!*?i52bPc`;UMz&AVCpPSO{Vc$s)t@EZ>0w@QB(4k-jTwfp0i`q({beAx3P$a$N>dr20lxokKhh%#cN0L%I~EX!b>BOv!gTmmOpNv=SrA~GxR3|CUJ2eqE6KUIxtgF$ +q*({`TWQQ5br8V^;TEiWl432=-gp%W?Ns!I_3f!^~VWwPc24($qOBas2q##Sn^8&|VkjEHz9jH^*AI2 +TgF&?9|z+PI*opzt*S^sE%BavFm-IMoOeavMzt+k0 +DL3DSKRRm)sBcVOdUxtf>7(bS?9WephK{hw&|0ZQbt59P*`Y~hskAS78!#}o)3jVPI^#}?~A+87U7*G +P!83<)LB*2JKTvHu6@Pess>$BG7THVcdvOY;>MA;XiJmFY>4!(`xAj=^8OB*FB7_lMDbn=0ohJ_Ija0W`>{F;iW0$MS$S0kuJRnbs%$Q*vC+zXk;VpIra$Hi +WIzFl%;#K@%w(&{K$OM4Jx@(_P7uNC3{C~nm41W%wcS%JDJ1KGy-d_gt<;a0`UI4 +qUf4p2-xt88Ia^7kWyu*WzgmG(J=M-E|>k3G4OIZa6-w6+LOo5)*fqU0egRPx +T02M{A{4KI+M=Ja;TvuEl|h?HKmML>M{9i>@m!~*n`ethmF$-1oP0|E`{U}Bio4-lyZWl +d>3G-1ZC`iB8r%&$u%H^!}o8G8Q#u +)5k-V5fmScnejsSnc%4S1NL)W^TB<8cRqlE(jN ++$uBJ4xoVcHOka(DQlz5zYl6abUmUy0ck$9PSm3W=qGa;kJ|;sg&8Pm%+P_sL%9@0rhjSWP@mjwROM- +?OyI<<5VlM=-~TiUh@Bk??T|d4-T4?<9o0Fg}on@m~6D4~~s?6BDTG5m}Cd*!6_kx^7KbH>_#vrgh8e +wPvi_){r%4&07oB9qX>OXx+1xtYz!I^}w369$JsA$JP_;srABoX}z*uTW_otYt?#dty%A^_pvGKgY_{ +sV@0iXYs30veYU<>KgX`bdSiXD{@7w{AT}5qiVep`Vpn71v5DAZ?Arf-Dn1lo*$U|T5?cgqE3s=nsti +6{0o9+Oo0Wp$P`D08+-5W~GM9wpIOfV`#j_+?v}a*?pf4FV`G3jpuP>}cL +CsG8+X9mMjY^7fd@4>MOC(_>Kk{jrKc|iZ&8(wW$pU|&q&-4ZDl`*2N8FR+6@ +km=Y-e{kVFIulYu1{*C`n)z_3~STItg&P~)aH%X+Fj#wafps{Oc$@cqB(`(nS4a*86MVUMIcEDcwrJ` +c8Q4-Y?-J0SlpMQgEImwDz|{^v8HN|v>|O)8!YbrQ9+d0AkO#u7#71tCtgax({FDI{8RjXsD02rY4?6 +xU(^P)8SPGS=QAsm>f?+OktGaOO92uh#+}80u-|*(fVthW&%r#;_j`M5u9|PnHS?YM-uz&GG*vTduA3 +X?C-bxU#r%2m%4Y9o|7PFjz~}o<{ +k5{xoF-qm&|4J0sMVvJ~AJhPt2$0GjqrsF-OfYbKIOXx8WXhX>$Us`B-hvLub8U+-g3yscnxU=%yt+@ +#X)WINM2Jd3euA=SjH-@fIxZMTT7JN8@*)M^RVBd#W#!0_FI7?eB^tv +yoTaO`;FH|ZE_J=><;atNLA2XRM;y_gPAS?SW=JG5UM>jsJCa7fNa_$bS76Lo0`FMQkCzb@(qDIieie +1=31{zEBLv)8r5}Gt8^x>dB1&PzeNX +9a7(k0^yO_}5RP`!Q~(x!{dj3uD63)?MAj?rZa9KtW<43tC +Rt+@(5=tKm7>95b{%va;B@tn?CMjxcJK8W=>{WgtK{Y_8K#5dNA7j*J(3pJn|WTCF`MHC6*kCh70X5_AIpiW76;@t +IT;+#txlrf4{clwnS+BaZ5aUalr{>``ghZFin(@A4619=MY-W1@|usV4FfWNrdsek13Phs8|%Amdj%v +-3b%ytQR;T`VWE#96xjqc6foo_t$r?wsktyyct-h3Rjw;#t`8<69!Ey!!u)9lt`Y +}(m;jNP)-*eu%8+0WHc(hk_+s6zq(UC*Xf!rtXF1Nk8Q4VaV5GcGYqdR=M +*~>)h8KK<0h|nftk2^}K^6?)r|GF3s9vP}a@_z0+`+C}${BLtILtZzjv%*&c$ +{hcB96#>t2rP2|xlJ~?4K}W2GfL_Ju~<%N;e;-x*xr3gz-z=M?}Yvb9%yL~og*#nmAu&mNubB_0RNROS@^@bdI)mda$)jmyp^r-&sJP)OpMoc40CNUO!I7v%N-?qWO +-#yzM7i+*~aGywnn7sLa|UtNqB~Wdw<5WFz +w5niEmYwUfr@Ry^%f>-?AuO*s>`7(dOC{f3VdX=c-M4ce&BvG#s^*T|fhV)LTTIA?j_S&JuNwsPjZ! +AnF~W-X-cHQST9TiKxp&y-(B!M14rqM?`&0)F(uJO4MgWeNNODM14urS44eH)Hg(3A?hko-x76=sPBm +So~R#)`jM#XL{*6zCF;*4dWA%LNwkkd`$=?wL*F2O&`@fe`(rL3aqd?=ONUihbQt#7vO!vm7h;(6k!i7<%Cy|5HY|G7QF7c# +lE)cLsJr(oS=-Jt{eOGMPXCiW1rh_C+K$O9QJ!&{B#k*p)Cl3F4Ft~ditR~hic0qgm=FzNB%*#?0;G +5`e#oqUA`TuqbDHR7*t&j7u@`1srfYBuK(pt+jr-S(?DJ5w~aTx)5>ofQvn~}0S_)dxLootP)h>@6aW +AK2ml;P_Ewc%k4VM<000UB001BW003}la4%nWWo~3|axY_VY;SU5ZDB88UukY>bYEXCaCuFQF;Bxl42 +Adm6_yyfgj3rfHU=7s03ntjF`lf{X-zNZ%D&L%-*XkM#Psw&J%7G|Mh=tmc9-Ez66PHyctOpPCRolFw +TD&v1odqbaWsO+9VMd*j0oOeoApP#hMpsN*Q*TRIdNu;T(&$ML^GU81snQHy<{g*6bB5MPNja2mP5~{ +aOAX+qlHeANu-MvlkwH}EL#4ru-jMA<|&rajB29)wpog&US3Zg3;L9=GjcA3A5vpUA;1I7=Kjv#&z$= +OP)h>@6aWAK2ml;P_Eu8at%iyN007zv000~S003}la4%nWWo~3|axY_VY;SU5ZDB8AZgXiaaCx;>!II +iI5WV{=R5b@fEz8*8B$Y{V0OQP%FpvVKCaLU}OJg@!8zXxqv4p?hmh8b8;#B4k9{}~!d);omwz(pHmL +CM$B#L~+fJ`TTFk1#>%~I%EwryLhgh{f^;#~@40 +~`cZAjxpHoRBD?f<_7iiLw}jDkHgITrs}ENSX-~LVU|OO=Z_w0^E>9sr=^p8hCflzOfu)M!T8VT1@&ni4D@lMRUdm(%ZmwXONme_s7JIdQEA!&uKr{r`(ir_*LddeX~(YC7u +MA%-8L2Wl#S=q)M*V4MWD63d5F!bX~^vbwyMHMDCu90v=op_)7pO6sb~zJ?-={?-Ld!L_>VHeFA}JSc +yF387GWOMe)d~Vz}CaPK-BF#v?dU>zv4$0_YY_;=uyDP+%7Vs;t-6+XdJ`*#PJ>ZrBO1 +mP%-MUa?@^V`=KTN9osv#X1R3MI94%MR_cjr#hJraM5BZ9wU|j9$lZ+ZTJ~#h%WYBaK?ST|dB`keRqV +!G}-JLId}tunS{wCG-2h?U2LI=?6nM!>E_ +v&GN237wb`{YNIKZcyeL`(svB72*$OeM8Gw`?IH+fb)aW}FTr#=f2;xhGRH%3CgmC6es~+qR>M +=^l>z)O!`T_*wE-UQCaa(hyeZ3F2CF*o)-uQyStWo^c0Fio%0zm=!Aw(}_&m>qg1BRbN)JGe)+gDzm` +5Dgn2NZV@#*CDZe@BE2A_@mTc-lBS4G$WZUvZL9T~j}D0*cO1p22r9)eN}0j~neUTF-0R~4XpRWV*yf +a%pS`J0*-Ml|~N3tsS=yx=u?!E5q@*W?AS$qQbS7rZ7fcuijLey>>@o21LDaUi2ahph$hKu +_c#ahXwXNcPPzKAFL)-cvP)h>@6aWAK2ml;P_EuDTtYomjhFf#S2BS1Q +o?U=BWOJEq^Z0SC*>^JKdd*YzmghPVTXu4E{A$9^uHLX$<9WghC +0G(iB2{9lgV7OT$ER8tIA8M4h~ki+^`8xRm|+u8C-uOb3ntZGah|1S66zQ +z(f0Uwz#mmPjRY6zA4p3${!LDe%4`@%Sb2{r;j6c6Q|c8L>F_J$egYStA=@XQi8zEGD$}G%d|b8pG2gGMr|+{&@aAJj59@|EKo +(XW?aGjQWknRVm#8|7Cz!k=McS&=OWb%T>i3UE15H%Bm~8^&f|wd3(&!6Fb<-cklj7?ca|a%k6eo-(~`*|C#<Os_>ba2Y}q=1HZ3)=k2&l$w_E +zJN6w0y?c-IB>ju6dDJw97f|D7jvPDJY`lYXbx+oT{wYxQ-QtU`J;lL7oVQ+su4Psi74htIf;pspNfn +66Q4eB*Dj3BA+Ia+`Rl(0hu4mRJ}Yt`QxjTKmGX+FQ3-i@lp +zv>aj%~Kgs;l7^h||zQo!A*6Tvi)NAoYC-DQ@@W(i^CPOO1eB-*otc_t7KF8d(+Ye$hc#}FW1i*nA?=&v7o(8p?`WM299BPnW=J)pMu{Fp}Xn2okKfj&~x5 +*&a60ohV)BMV-|;-!=seUT9pVS-|^=xvwO}mq=t#gUZFS$a9%Df$V7C@I=+a3ApJLYYZFM*#8wW9?0L +0i!%`=QiVJ3A&V@d1?UBBP4 +8-xdkYZg3~}YYgqJ8Yq*2thlR%SB31rW1Wlty&XB`S=P3=!VvCK#wY_0Tg`jD$dyK{na^();x!DEKG@10w@y-qcRm1BY(jft<$t~&z8D6# +eEjd(_#gK}kT`ig7}88aao4#_*p>Bwo#Xwoh^%Kc&WEdc?>P6t?LY7MJ&bY39rXI|kbQu}=M;det|^+ ++-epG>NQ3Q#(BC~P`qgH#V3b8axNmBy4tlVEksemG~?2*46~5D{$)uy;cV2o09{dKEc9@NUDA6Ih|Br +DfPSL))Dg4)J-Y9?by>#N=_p=G#Xwon2e~;RI3`+*F=kmBF}GcjjbY64mP?`PzR~2M +g0&c0QA!IZYPisyZG$t>Ew)}c88yM}QYsyB%YO^{iT$cdy5r+|u4yraz_TIj*W%A2q&?M?Dz0N5qd;)mJ+s7jFN?NomIA#Rm +bEsY#V_JVAUv(8&NhSKgvc0`q|KV91PtNkqMqeO<#s^UpGN9N$7bASu(I~2hGd6WSDqUf8_U2Wu3%7Y +z+)$At_LU02ffb3#tYfQ&@@Ye8pB+M@C2krbjYqCbY&Z$fLqJCNjPxy0d&rq +ExO)eK#i|cT9adFB5i!mWg!+2#4SZ?7FiWlpEcvn4Cgi}B|CT@S$f50~#|7rg*RpuODi;Ig7H@gAM`+o0#T=Y5r4-X4eByuoW(MdfFb~mqP7RXLqyIIP>2rv`PxKc3Sw-#;Pgr7@%s8>9c0(c>T1$TkinGC +g1e?!C*5OvEXPlbln65_ap*>SDhTR4kw-sh}ZVJF0=#QIgjo(=LS`1kN6nlc^8V>kEY(aqdl{v{T@%Z +bSmI%dwUL!%WWI%`K(=ZfLxvwn~i3DMpYjY`^H8^uIO9lQGF_f_D&lU6iO9&X@iIR8?N!=u-(!*=gyN +AY&anbdEmu~9Me1pH$;jDRs=Co8Bc@Gl*H)6t~E|WL<&p5)S*^djb|9a3p2!HE{jYB!=`xUK3L + +AP#2+h;LVl_tSnx}e^dm66c9=0*PbA*QJRsz#=@OCxWVZAQ0Lo^ex#W(^pd(!?ud02f4Xuqp$vlqW-!SSe%;=7%B#%#~orWfTq>%tTSew`0q{J;Bt +GiVDnY)67`Nf6JwK(n*?n`ehn&8=ToYx|!n*pdH~2+8d1N? +w4EZ%YA38Y@GP=r$Xrf2kMJay9#1M)7i=G=xfY(?*BtyW4kM=_)7+-z;74iJ>GvwMXCaBq|a|k7LjQ<-VeT3FE)`|H)J~2!IY_f#;b|F)2VI7%WSjRP +1g|L*(55ck8rG4UA(kWI22C|=4(9y|`T7!S?swK>lH;$dY%vK+qUMC4nbLRiop04)ozfem91QY-O00; +maO7>RFgOr9w1ONd23;+Nk0001RX>c!Jc4cm4Z*nhVZ)|UJVQpbAX>MtBX<=+>b7d}Yd6ie&ZsRr(eb +-k^*AKA+TWym93j{FGZJMGfitIzSn}@a#XlrD1OOXOet)jp0kh)Q_oMZz8k;LJd3(uTk7I3SITJyuv! +XHX9dt?%RXW9y^;p&GUuVeV-@mILs+zM721i|+x4X(i;!k_oIcMp$uka2-=#PdRF3(Vp(x82)bNqV<} +@47FhHc^yml|#&=;Z{{#USC1G`hh-uQcvzv6h$d!kW{?DgcFRGOeDsVgCtWrv1qJWu5KcrpMy|4CO}a +pWRj}VuKO3%+S>$+u!RRDarBwYy3ZkGrCCv6+M@>V9q`yB#2y>Z!8hef(r!oaPUgQcVaE0uer0G5L8=)l&7Fd4LdsBxW_1%F)bf@=iSj$`~ho#8}ed(p>G=Z_r{b6i +}yd>fZBb*y)}?~!tZdea`dJY7I@xv9s|f_K%7+^_-`ZPlG`OKt@2A-U+=YJ{#Vp?>(HrPs5V9W>`1Zw +q)Vg(x(r;%?mIA<(r>-|y9ex{T9kiRLv1zV)Rdo@D4VJ;FFd#p!EMW^Un$*++oMATWsj(V>4W!wvHDh +~zt(0meMbErH!m~Y)-Lv2sS<;|J4%0yeY)ox4JqNlF2LBHZu+OU>o93eBg>bgyv9{uLvSE&&uS8|OX# +J5gZ@7oaAR|_>Ggog?hOIIq&qDKgU>fp1vuCLr?Tb9)iy_pzCDIs4cQU)>ippa)>DAC;Aa7v+Kt%hBl +=Prn~+#erE)S#=7lyq^g=L8gD8jZw(!nRv3EgdqAj)T%xA&Q8BYis`pNHvkN2qD(i35{&^;i+mbn)HwJkL1IN>LXg!M`GWczC=AQx^V0z@;;QZk#a#3=B_kgIPvW9>Wt=MG~4sOu^E8+1rw ++S+|%P>6rEyu~J+e@Q}@xGJ!mC*kYbiQj6L9an&Mg=CXrDT_3ZQ2|V(n2ozS9v`ox=B=N0f>0RnT +I5UU$9^MM1jfG%jod`T-j4}hwQoi&>zpC1s(-@}|8;c2zIgRiN??s^jSR!|`_FeSOxhlCq-rW*MU@lr +XaK!Odm$&N!!-0Wp!4gj-3^vMhywC+V9~QGYgn5ukP8N&hE$BXiGo6%@Eqam5Ik3#_1#FQ$Sx%>O=jQ +X>-E`^9=64IN(4OO!&)L}-PsxbHQmsd0Gc_m7^m!kav +}(gI~jLHWI6B@;2zs}IfIS&=}u*|@5h$qET@xIix2nio@|m$Ce`8Zx8gq^2gq?CB#xtT90f|Ka-gw1l`oBOWBdS +fi(6)!O~o-!qKxVcRpZY3;&}FwTiP+fTA`3#w^Cs`%6<@ZVH$#d&&w=&E_JJZe_}4Y4NQ^m_cO&pIU$ +)$MaijTDq3E3)^ED$2Un3U^a^b+U{rv^C@K~Kx8hJ$(?r)t-CYmb;wPz!(=2OakJlX&6h`qdSRs^Qg$Np}Rva}1H2#NJDd9+u{jzZ5g^m|(Ny?g3+94!h%o +6sveb1m@g%{bNny|L~IE-PJP&gTpyw#7j%JxX +BOAjtX-qHm!shS+ls@vkQ*PgDl#1fl%WvD?Z%h{k5(BtwCpCP8>m5sJDIL4ZuB69VHUd +^hxpgIy3)(=I7LWNVYd%FW6(NbA%tDL=g@E}L{SG&E1i4s{zNDnK3y)%WoI +0H*aW<;+VJGY>MGl?|6Rj~gghuD`O9RAW7ZTWz@!XzC02dkAtV&&)2=BMp6H-#!8wb52mvHD++QHcCr +Cb(R>^qHED)7(Ig`r}HcU_HCrlcB;3jAk+ck=Q+AzfmgY%nAT+qmE|qq0^Zw*N_))Y87Fle%+C&2-OVYSm;PLn0hMi4d-g!e!!heGK#fUp8B +6^2fxn2Pc%KG@Q(gdRC>!o0ic_zBt)9J4S@Sag%wZmg}6tfG9#&zz<|P{DEcvTpIS%WJ+^cd*-0U$2# +;uI4#V!j@mQ2wzt%l$Q}Ev;(xQD?FIk<-Wvb_9{>OVaA|NaUv_0~WN&gWV{dG4a$#*@FL!BfbY*gFE^vA6SzB)!M-+bNSDXe +ZhJ;YC(=>`iBpVZl)`kdh6F;q%-N9_L?4X%hV=Ak^y=N}F%q$l}5?6gFFYN4``{kQ6P=+?0FIjXu74R +EPd@=PC_~LtOJ?@EB538OQZ@wMR_pf7 +X;1H;sP&+84to7st*=fmI=#^;u76SMo6hBB|FhhGlgYc$^uO*fbSRb>56946e{gkr3C;(NzHc<33}G| +_LGDFOJ&pa~>n)>c5^9$Q-HVT-4V-V0bF8c35oYfUqjWLoU-!-p^IKP~5o0m0oEZ@|Tltdb-t(d)5}s +G#B%WBAm&>|`6n+{D)TNSIzaXMb6Ha3?3>YEFSvr{z)=eg~q6vnh^ZxayT3?(B!J=_02zNzttTi9^c* +W6B-K^M~P)1O@ekm7Q=%mXzom_08<1(c=p<3mXW#}eASx6WN3S^eXzK{(kBj5P~QIZvX4Hg8(6nkO;V +oI{qDrt2~+>qk7FIFM~S@|5W&m0~IDI`=t5YzD(wG%&>0!7KJW=TVxf=M@VV}=gQeTMeMOT-1NT-#6% +^p3Cvi-aHvjF<2M1NZZp9hI_e#5#kjd^9&Cyk3Z8tV&Eas)HY^oZ9d~M9LB?d+^dq5xjur8+=o-k^v9 +4n(9i2TQ@bh^W&7P)vGBNwF%F$x!rA3B^AB@{yn^|ZlKC)B$HCfprCHz^#Fx8R%2QwGop-+V`DN+P&8 +H-YxQWdX8V4PA*ll0ffYsF+Sx`Mk|jfA43dw^mKrF+)(1b9=gi7b^}@0=()_YwB;RU7ew<#l8+db)oS +iasiv6~T#5AkWq0^{ng|gRGZ+raEDyAqWCFs$1#^){gdmHCk0##Gj2cKlziQa6ivnPw(hm1;uohkvGAn84rHV7V*aPvgVRn3@L2iM72X^enE5U3=04yGG%@dT6xPb +Y!ngKvqE{44cE_s%uRQXE=+t8oQ!ch6eB{P2&&;Y2q_|tCQ8Jb0UX&xgyx5bfO-$276?0?j>SOZlffT +#lBV0u`5U`77@#6YYHu9ICopJE6rN-sR20P4KuaNI +5o##lw3IzLtlx12jGLk0Cz8%UNy;9hO=#hS1zOPV_@(IwFC_Xp<{?5sw0A1n6IdHkbw=AQ4EBFk8Ocw +vh|WyY=(qbz?ri`P3+&%xu^PrmX3w0%GZ(G-lBgCr;%eg2z +n6d?1Zvc{jhBzvu|7E!$GQEC+MYc;n^R#!;6k0|2_Yc~9m+jV0mL|HR@cBjjuP^eP%q{G0H805*$(qA +<$nvypAf1XIDPdfXnreLZGq{>;N%c%4uG-mvPQ@1e`x!`(B@o-L?rUdvwF6`WbH<(Xf;)-R8kO?_~V# +(cQ`;Z8W7IWgUTc{La=7E4EggghTmM70AWO4k@7u%9(fo9QCa_mQ~{heIsZ#Xe)9Hr|Du^#LD%KFh5= +Xjdadt^t}3DpcvExUDGd*Xxzjs6yX<&>bO)pB(@SsE8C-RH`0#1aZ+C{nZtr5nY?5%Om01ry-&^Io4N +U@6aWAK2 +ml;P_E!DIPZ3Q3002t>001Tc003}la4%nWWo~3|axY_VY;SU5ZDB8TWpi|MFJE72ZfSI1UoLQYQ&w=! +FQ_caOwTA$@XF6iEXhdBQ}9SED#=N$R4_6yG}cpa3U*O2)^*NFEGQiMV_qh0ssIP3jhEo0001RX>c!Jc4cm4Z*nhVZ)|UJVQpbAbY* +jNb1z|Tb7^06Wpi{caCxm(U2mH(6n*DcT&WL9qzpmBI<-zv1&$f+0>c=B@z9^ld +{~hP+ZNeuX~KbJ%vvRM18Gb+$Be`N#$5a!BFWt#5?eXQwo%N;0Wv^~*Didfi2%)b7GWL4>5jy`^8>ao +j^^}H9$6OSOPcbKib3F7wqWNc@&-Xr +lWw2P|#p@O1PXe~4X|#lA3`i{mXd-4vNc=Ls2m1UIFSxDM7<1T>C;}QsnIviB09gSMPiS{wjTmeb{6= +WX1a6n0?G`VA@S>7z&K85zkfZY>^yHxa|peeXG&R^_M~Vd{?xxZ4cBnfm^uAe=W7p8O!Ve +=HciX2u|R@C&@xjKAyO_>+0RpnPnm44?Yt5Esq($J_C|?CF!qR%iaaAY4j<=KG-x4e+&mm57&47w~V6 +E3e9zzf$WN=wAYCS@+)HQ0i9`JgZ+pPzRokSgW~Nyp;HZ +Pp{UXuI?uo!il;oH6O~Pl6D{BE??|+SA1B(Ymsk>Ny?RL%;W*J=y_EYA%0zqh5-R*jqP= +?Q6h%+8S1*~uA0^tWPxOAPTB5!GnrKzOGSRMMpm#;2UB^K9%1pbCfdi^T?K%cls7kf#7SA5cpJ1QY-O00;maO7>P3YWLnZ2><}LDF6U20001RX>c!Jc4cm4Z*nhVZ)|UJVQpbAbY*jN +b1z|Tb7^#McWG`jGGBCMb963ndDU83bK5u)e%G%+*$B|!8Px@8d*%wd5 +%nK)>P!3(_?Pg7gVetX=Ki`-2Pvonzk7 +pKgCY`{>!wTv({=V{dsJbVXybiJfHr)_A7vZKjUed_JoTn&YrClp4{i@ +FINk|?rJbg|BPWI7Utew;N3G|Tq{{_P;B5JJevsB4ilQ5)KGXNDSiplkH7-PTk(m%X* +EW}FiE-K|pvy3`cf%QN&ab0RG1hSviXZ#Xq1d=5rwTYD?Uk!&epgrAD17Nat(lx<6=q|0umViUDr=IM +XR25MVA4QYxaTyCMFGeSy4RvGMF3^)lq0Ua=fI9e<*dgq;F=$D}tp^(g&Sh$V>k|lj+I@B_8Rd2Pem| +Oj7Mz(pP1MV8JUMm%9OsJ`4MW*_1IAr8{wd$gLLRs*#TYnX-r&98_u|+WGUg +BH1?iwxFM&OTC-~P=udNsU=a={#rF^>aO5e3H00G}vM96J)}jN`z^`ltXRl5>4JMO|g3-1vIxtnz{sf +pln3=xT>wtsEOG11OQIb^z7lI>VcHdYTwm$oAWx=pjk?&9)N($?@9>vhpe2GKT;K5^Z;@;;b$1h!SmE +&IVG~W2s`L~0at!pnb_nRx;bOj|2xnC-NQ1=4|#`^)vTyY*gaL_@_51^=aXDwYV9OPFE*}X#jo~|>@+ +M$07Z+28R5UMh{rOci>dLarJ18j3|i2HTp#lhbPQ@Z#!WKpGPe>YU8s*J1F@|PNlxu%!uxcb-wN3W5S +iAH6h8R>%TkwFejm1SWD8Yse5Nw60hnk0=UlcVDwGjaCVLGYJOH-xf2nvBPjqoc|3v8e0Ap@T*v2aN^ +8k%Nwpzfo~-I7L?5uWgAdMJc$ym8`j13=`6Yzca!~fQ>Uhk+ss-bHUIsq_zQ)fX%F4PpZEPYEb +JiqgvZ543{cI9-h@MjlLayiMV5c0K8^z>S%Hr+ufQKa4@DQoCgPv*n*dSooEM2YFt_U{sX;geFw_k#E$Ml#5i7TcoX_iU_pKf9?e-Uk{B;Ql8ly&5kaJ(B}FmiYtcRk*ytM0z_nvc{hePG==?%k-P?Gg^w=VXoet5l*So|nq{S@ +XNv?!W9-r)fE#@6aWAK2ml;P_EuwfBWdOZ00 +31Q001oj003}la4%nWWo~3|axY_VY;SU5ZDB8TWpi|MFKKRRbZKF1X>(;?bY*jNE^v9>S>JElHV}T-U +vYCDEDx&ICfyza7+98OSc>%rnsj+63W1jCm@7?cB$dSf{f?AmQL<<|U4soHiXsv3j&~pLJ|3APIG4p* +vD>AA*HRF(Bm#aT%5b`d +d9&3aQV5#I&KZ7OcP_)@g#v!`xTf5Aq^0~xzWVw&*=g;+Xp0~eS5nRj3@01t-n+75bFz6Sb4~`K2-iS&h;GeBDmBTg9lsUub05Mc0(^^$>%V +ma9^1mPxe|ve-huh%9{20Kx@~kq%qS>AW|Zy9WOgcJxMm|c8q6a4z=Ue0_sr+V<6Ls7$clJ8kt&K+k`d19wYOwF?n&`~QZSd<;{-Ta&E^EY&rZQ&eta6=&{Vr;4AwsF>;fk8`st>!S59Z7m#b%|UA!hMd=nP5bTHaVI<1UTZkUm&v-dpGAbzq6>Fwd@UPL3bXAqQr|#a +|`iF{dKltd-_Slo?UCIzzV&Hf_gy)I+MDUVOIy#v5P8pA$I1ouIwvL3`f<{By?)_N?o}|7iYx+WmO>w +fG%>Qc!ITON+q&8+tBP&vF~I;_ElbNi~AK(7sHUvsv^R{;QRr>SgGM)Y~qJBh +GRJn#Ztdnd3DNd~&{4XV&NK}po&yIKo#S2C61|JM}32!|j4hbK~FrCN;nZ|Q*SQW|AgL)X);1Z6RyugN+*k-A +q;rHO|o_TI15PBckt?D+d^I#HTnP!>2WMvslr!dYk`?s7d8&`#8e?jm522e`_1QY-O00;maO7>RR7i +?(j0RR9>2LJ#m0001RX>c!Jc4cm4Z*nhVZ)|UJVQpbAbY*jNb1!LgVRUqPUvy=2bS`jttyRHl<1i4t> +njF&s12#vZEl4e+O*4dS=vGjr6-{#i6A~%gUZE7$`*1R`wW;Ejyn5(MM{NLKbA0?Sx +GYP+$wgMYC|M7F0!PVj#re|}(YJ(v75~aZj7)1DcJHNSG+(5wvW`viO(iTi(NTF2;wdB^KvCtjlZarr +nTQV+5Ym{Y78gA7qm(%m!a$z;H@=>#@LOp>?XOXK%Z_$Bj%Z2H!t7f@ox}1Q{SYw;Kk7fR1!bDCw= +;yWWO^| +`^Fmkw%aFltlr@)k`~(c!&%WD4bkJW10ekbV3^5q=&+R0B^k?7JhE2Dv_&+32fnj*3~uJ%-d1TN-=*0 +Ee=yUIo~ZARCh_gQOF{@QfKtgyq!eZ|IHlD8z$iU!)8;p>72W_)O9KQH00008031s8RzS!y@qhsU05= +2x03!eZ0B~t=FJE?LZe(wAFJo_PZ*pO6VJ~!Lb98erb#!TLb1rasol;v*gD@0+=T|iN!jegxdowXn+0 +15iGs4Sa4C&x7nv|BbMdjbyauL|(H1>gTzRT(PnsbIco|l5`QiUEk!z#rLEwE4&N_2TKnY!rCpP}iwM +{ywmQ4#@@Fh&vpJubcZ%AcczP;jk8@C-$qDfm>BY!S(FE>w$TnBg6WT19xv`brSCTdu99mZfZexwI@R +qF73!0-OcG#%cAVD+}opLxPCJN|lZTnlz|60#qRw3Pb_JE-TLd)94;ksXN{5#W6lE8P32lpT@`agQ9C +qxCX4D`d_2pP)UHsd!%t5zePY!aRlL3Pgn)%-4#j|^FnFAem}P^1|$g4k-DhJPHVWTXCH4o9p245aP| +f`wD$%$9m@y_!}JgN!ln$vA2Nnb*$9T7!>pbBRNg^{W0?nSe$AbiQHn+8v?-+v?41bCUhU_Nx43K)a{V3|i1`9W0LUz0{W8XtY+mhCpZ%NqkdIg#}RP)h>@6aWAK2ml;P_ExU-2 +ayf~004as001fg003}la4%nWWo~3|axY_VY;SU5ZDB8TWpi|MFL!BfbY*gFUvy=2bS`jth(Muo`U6#_GZVI3Jd=6CJgta{1`4Qb|IeeX7_mNpuKPZXjaA +~OXrzoyf?Q6^TJyLy!DSV~S>I&;%n(%2LA8Qdo54XqByH?mK@lEywR4olYBw|xq-6GtLuFz`4))J}XK +crWr}n}bYvSF9STaCk@p4!Ux~cp@t0G9W7os?en<@V^APq)8#7zP=zwhSBZ2lv74tb|~$3zj +u1(d}L0kycpej!;$ZUz;0t~o5#L^sWm_9*p~}~U|cVCy_{~TVRstOx7m_BD>W+08m_2wDRNiH;>GFH* +l_wZUom}NHZ1UAdzdN=YsBFRyu;M`fe_bDtTz^`zCgVan|qtmzW0e(YGZ-7&Qd<%%W@^&gr&6UJ`+oN +s%@R7`Hi=*RYz+!*E2OccFna1bRAc(8PS7$T5fYl*GmTP_j4?RHDqj_+se|tI7&;mOVaA|NaUv_0~WN&gWWNCABY- +wUIUtei%X>?y-E^v8;QNeD)FbuutD=cxKoz{wV(lk}#GR8C{1QTVuR-tuEwl=9;v%%lbEoI_1Ut+&JK +R+iU2yOz%TyAU`!-0$7u|w2IjbP@Q6Pa?);LUu9CU;Q~O`-@sjMXPd^rvY;p)E~@w#6LS0q+ssPP}&( +5?An#CWIGvu(@UQvC +dD{LD*ym1p#T6K0001RX>c!Jc4cm4Z*nhWX>)XJX<{#9Z*6d4bS`jt?S1Qd8#j{Ze?0}38ZVm;)wb-J +T(raNII=v^j$?T(Imxb~VOs2_*io~a>5F8JH{Z9fv9EWY4?f3V_V)Mo#c{D(m+8%-7QyjIJb8Th^Z@?(OnjT>(OR5GKc%Y>n!QYCNnRzfs0& +dq67hOnFN$287xVf)DiiUdX!1C!Q}_)&B&CR8yu2>c>!vQ?+fA7y%OtN6)_IbMm#4=kug*`x`duxeJQ +l}iuP)x4e)aa^?9Dm;2<`UfWw8{~>Ab0%GMP?Ax?B|{G{3Hjtf`YJf8X1azZaGIR3_?EwXXK|>T-RwC +*V*0SY><3yIHcT#VP%AQkF$|B*ea0mC?;IIudyS=-(wJb%XX|B3>1F0%Oe)W(bcenCmpn=f&mYEAdD1 +w7);i;~Z#_-<+N)B-D2vfxb-Z^(v`gsOzXo_@^;Fu9J5)J%ndXo&qD27BF{`P+z23a)Aw_O3dLi^)lx +^onAL-R;M}86WYixmD%LD$mi+J>!_@djTQ0{zOU$eJ8V;C^ponPzK`)!leeFLD$<-jRI4nlgHO8Zz)ct$TV~~I=KT1tP12C`{EQ!3!hU`6%}XibdL;FWJvj`VuE%GFZD-gG;DUm_g1)f&LG7K+$aUdKcoE9n!v}wqNr*^#`ncwFle6+@! +!j%70OS1^(Sl$-(WIV!;v^R_)^#nhGQ`_+k^nj0DY4c*HPBuWjq3LY0-tbBfj>^+ZU|?uzxq_StjAES +gt@j2E)Gw;lt6N@SooewsFHg?9(@5R!dJVHFaZ8|}q5TVd*ZYQ?5wqAZ8u%n|@}O=VY7JX6#vB(mZdK! +Z7%MRmfo^|`z~dS{k_9CjcC-$q%(9}RPWdVQ~To2GGIAp%p@mvPRTj&WK>@22-4aNB4d~)^*k)fn@7lpN9BBmUH4_r1MX=I +X-uL@b#}4v(%{fSO3 +qP<02Z)#L#Bp#d7OY@2>*)7Q0)r&R^6(Ej0PUwr=fNJMu<8Ux4j6s95ySoTSI2SJ{=UIUu*rb7BJi;g +9&7f~+2fa!4s+K@R)5r%g{JQLt?+$C#-3Cl9N&eCd;#15EMk)^Y>h?WHmbAJ93r8uJqk%GtyEJ|??;U ++jTAOzRo3^@SGB&v!WpeJIsC;;)$=2Mh{<>^RpMdI9Qm!x=s9Az;5G8l=;M6Vz9n>ljN@_?_+HE5TmI1G)}7ZABiO?dwG ++1r=T#jCRm5!KLeMTBOzCjzaIRL}|;5TiPqr>F^Yalc4s3j-jg0<<8^7Eu|^P+y^cRRRM*C5kM%PBKw +I%aVl$`=Cr8piuMU&?JwNcbqy1yDP@uEs`96M;9>Cf`?+8)>V?t$0##24;ns)b&s=Ki@2z>q^cZnT5_ +%5Q6Gfp9_4W`y!>l;^@*|QlFYy|1QCbjd!N>eVE8USH~_B~ET86C_pv*ntt@;RIz4_B8y#5pz?YDK)@-01ONk{9Y~(Ku->W%b8MEvz-3O$Im*(%0f@Lj^+$`TpWNV{C)F%kC4nOG$RvH$@%hhCw +C+>%B~dIi>vPR==_`*#65fR3{{H^qhl~ym#wv>-;xC6kJjCz?A5MmzTB=M>`;holGR4l^xvxzR!lJvW7o|!w{Y903nU|r+*)!Jql0%{Oat@$?>!ElTMBT8XHj +$a&2T7dfVR!P-C{H-nQ)DCS^Kbwei*=1g#K3~r`5T~(u80o^6u)797XnFA3?fNM!Eb|deDdaEdiMI_^z7BMmk5Y{cyscvZ%^NxJRh_oD7{F> +DXT!OOlUeN48agIwLIwA3ku@(Bm=#ApOBqt@^~=n1n<_) +%{c`?*tc4Nvw_wSGbG>+B>vESOYE-t1_j||8)W_YnYh8+53IA9>OgCm)XB2R>J*UTMO~4pkkObj)Lx* +J2AF~uR8OY?yW|fa-rh&$P33Ntz{7Aljnf%44VzUAqz+`uZmxrJjEY_yiF^oQ_OY1}V=`2^k@-|61`u +}w9n6;C7%ag-nG{v{VtV!$@gD*{eGi|;l<%8h9@nPppVK>_TSU?V&Ik7&lOGa^*Uv7#)}E+zu~6S3EG +MwSG%GkomD+8+A-D((oeJ821z=+oMMMHshN=6((`aE~ZrAWvencog;yfc@FOATdUnqV=r0iDg-B$2b! +vft=;7-?FS{6Cki2>p@7!hVOFxDU{NdGd05F%$!0jL8C8~K?;rGY1oXy9pY3ml`FV+S&l)WCVEF(UG; +WROCbf_1@6DACM}fS<*2$`Oe&o(%Oz1otD;569p{YEgnoi_p}H@4zoF?kfY7YJ!6wGs$kC8V85C9}1N%AhO>IzzqNRIRXZGW$!!L~RR1tzTkUxZ?XEvF#u$*@!s7D-`bK4}_IhH5 +ObqPWH2x>?CbK*BOY!Lu>~5ymldWI9`Sl6!+#AviKAW?5!g6DN{T*tSeESeSQ-lr{tC4EL}g+BLXBp< +MW=nw9CQmB@6iCk$;lFj>Zq9+sof*Iq_AR*YM;|}7FUJ=n7b +_RHwI_DrIA*y6GvQWhO1qf(s)G%nC2lr|I^vS^IZFcA`>gX259lj>8qGyS*G(wliQv3i2^hcktI}?R@ +nC2CRb&tnw4+Cq)2uXGF{RJ)31lboMLWNw$eTX1<3{(812Y5jJ!C>SenmsyJt+1Xo6_A;2DEMje4t!U +P8Hpt@bdqDONY%~az(nrTEE5sNq2RybWU2-2v!&&%No%&lJltSKmJ0MiGH^k`GKR+JAu&Q@4i0ME79s +UUJC21)l)#!7i%2fCPIN%cy4-{r;zChLNFZg*X)Tzc`f)_g15#UZ8$SA?g*4;H4&Yr`PrLq^+g^M^@V +n!*fV@efT&5tEv#4eXsG9jaeJ67Uz?@lwId33$RfwN3G +$GqikO~1?a*#TtwTaP&x=DhuZUusBhqS$GDTi6%!--ab_zR|@16oZEKXG-cE*P)DcwNfluVEC&fo$2e +HHZ=rmPnEEqNx1Q5&8BlauiPh$g%3~w__b;Eq9Xq+FJcd4Vjx!qouO<_Qk)as~3cIC5zQip1^I3ZaCN +dB>apsLr5w5{^=gqr2Q>j@qF18Re-=z_zI{kzf!H(BC4XgE(7Cth^3}eOv;wi>2TEVt}3V#3+N8@0$u +D*-qj9*@|Qg$VrYL2>_!HeJ)S+*s_o*{V>|t$q1qXwqzA$D`$PDo)9alXVn8fF^>{vPW?2Etz*Xs1Jq +9dS(sF`h3Jeg685zKswiHNHV?Y?)z5^C*M(Ct(hp*SK*F#dcZ5gRo9=N6B*|H9uh~ZDs +T{QIf`xnB)>S>H+ZAhJhFT`lO+foXc(ZW@v!(;Ve&#nS$Yj3$+$l%}%Tpil8o#>B>Rharv5&TB|HY>7 +>H_9Lsx}NE>RRvEoRgHit5Mz20eWR2Uo{x2ZJMmC)9Vgo@^-VtF{R{Pozp2?gtGV_$*`Ac(ZTT<0I6^ +0Z>hpkdbpjqrKtYbq%;2WNXe_%SOTmjJdrQp;HIT(%r1uhd-D4zp&xw8z-ZF_YPUvVzg_y(KVd-KpTy +TAOC|*#C3glX{8SNqXOS4MGX&l_W&C_={!gj2VWCnqVFVn=87~qdsWA}gl^S>yrfWQ&s3JK3j6So-%b +hYzrnnn2yD#YOU?D@(0$;HUch)5{DrnEJa#38TFsbnpVG}%a77(mwa?#abv^@XLmyq;CY(!|Fo1L)%o +QFY8}?rOWKb~Dh*1!`g5>KnE)twbi^6ZK;J36 +vDV@QzQzPM~6?YbkMCz5e8GtS{w|lWD#|ohykgm0WunXG3`A+fnHw0ll@$XFu7nr3jg2`2TiNu;PcP_ +@J0JgJtXD$g9+LhU&A@D0UbDy0K*;>jlo@et_aq$)sevfOkbR(Zq8nR#)OyKWu!Oi(O5O|02H_4WRKG +PlxC?GMU%COLb)l)UC396l$Tf)O*ujlnngDxcQ6EMzgEpN&j09C|={20{XcO` +&;GHvNZbx|7W^i%2IqfexP0y@j+UAk@%aV8pE5LMa{9?AWUEUjuT{aFiy4*m<)(9 +kg>Qg;|+EU3yOieVZyc1+XC9&@gI5H4iGbR)o*C)lpx86GXmb$bnubpehn_mQyzDi^C{b@2Gfr%#dMb +LdZ5ZUn;+~6ZPK`B>TRSB}1)V2j0 +V7J52R**Ic>Q5OCQpqf +HU_vk!oK8dARC?X!K44bn8;CI_O2C5`x^kL;wl1C3LrrjExpMLeVLC0!X;l}iz|ljU8RDN@Eoz2N6fS +3W4$w|(T$i1I&wz0~9NK~RJk8@N%TM6wu3mih#dE1`Dgjm#o%xeg)?X&HG+7i|`ha%IB1WxyXkcF#e( +JMDtmjZ5Rt>C{XXnSKr{sHM>7^{kppiYmuv}O%Kq1gA+dwR*GSHsTH{}m?PShDhe#-~)c={d@!oOvO5 +C>}c0n*E~qH1R!Il4Rj>AaOhxn0=Z?6C(MzW;^tr0@Ts{ACz&xmy5_#u`#L_sQD8&;{7R*%DEk50lpU +2~l7`#|H)hy{TW>w^tIa1w$lm7gNVylEARmV!L>fYlloFS1wR+y$pRxZF)EfJq1QC(^+L +DV=(Oo%Di^!UL_$+Ms8!`pviCZ;=WxqTo2x{J$eS-t&9!uw1^yu_rDC3nCO^cqnqw0BL3|nN9nv(qTU +)fVi_l&{y{6vzVfbL#S}T(6>4mFc%FWywX7r+LoSHy64E=xk^vK?|bM2{(zf?HX)qmQ7yN|Kb(`UO)8%R*xs +E3#1CV~NM>S=AT%?g@qKj{!{Zngz)7j_Tw4C~{uA4_bEk^HC_4fn+hti>tvJ1lVnl>Slt@!(7GkI{1V +Pa>&jp%cjC!6_A5bN0y>yBmS}P3koFU6F9~1n)=)o{q)urPwt&HwO~6N9!QMRlo1Gi?oB6ZsXu?WQKK +CcoC)fH__@N5)d6&-|)^ZKwIfGXjwSwh~<&SWd>z3Yk&}UXq;Dc*mi3F#JRv863f-)_CcMKQ4V!748z +b(-VcYi_CSA#0zy6;a+^>d7_ia7ZJT>yu%zsw7I_OC#*m%yT!lapZ%KLLsTwB0M6dqlEw)*e)3V8PnT +Ms#?@lYK(c{C*zWp@yMdYZNtjrqK$ld(|XW +2b^Rsgo?`|ZZf?4WrFOZt#pe5w3)6N4ZFCvkm6#t1q`*=2!nH<8-Z|>jZmT->E^_#J(V10{cx}BiQ4XU<0829um5rD?v5}PFhs79b}U~*=CAXaOEy~PV%1jb +xg4#5Qiv@3X*Cih0V0yA=Sn@~oZtO_n=Xuw*RoYmpXQ;5x@nzK;gHLjZLES*tiXJ%wwbfqX@%LB)P&$ +`kvA{e52h$rbd3qe5Jx>}xIg-KuoOO8=0S6LfKsC2X6h(5qNsVnG)NlMkSwNem2(hi;a^t=2{|bZyEhv`OhGA7HMf;d+gFnp_J>KD%gH&fgC4 +IT2+s6x{4PP|xvLi-wTrHd7K=EcTN**l@37e$4Mw5P_&r=AfzrnhtXO^8GJYZM%IDW%tnv;Mnp6`V?u#%tJjbM!(b*<0Rij@1<0`g +{i$CQfVg@i&SLRezU(5an$w;K``Wr6wO1jq^jzz|lxfuuK}9l+;~|s9LDA6<9i4z~poRLpyG|t#Sw(^ +p5O8lhp*(CSjI^F8Ml8BEqt1No5BznDuNUo(Ry_{ZFoUJ8~eCud)g9@cm;DEs~k(R2%AFg7=5NjC7pd +kyf6+?{JerKNC+HUk}OOQzB@Ekj%ABP0O%kOx{XkKx2v7)Oiyl6@lAoR(_Ix2z|SOS_V0f&Cu@os*5E;~zP4t#im(#D+A_ZlGqjQ$-3gMUDYEZ0fnHAWqOH32P96{S)h|jhV +5L0ab}mG4dbU_luU&+0-7$R0=6<{8nD-%k;+z)c%TmMZQr7gyPWB_J4uyarI2-ROe3LATyTsgzjY!Qe +K@8H;;1w(!@y0?9eI&Io>G}q6p&WK@+QhQfq(ae~n%|vTuL@*UwSijUuuB|MY`lt866qndcxaKtq+1G ++2?|-M1K0oe@-+jO)|($09^(@=-!ZzrcQR6;m5llM2y97@Q%%a6V0VguIOTg{g523N5@7mWj<>O-QAdIn-!e`TsmL$iEOe%F7&K~s5FV6qieroI&^^XPF>wz&mZug +Psdbnic5L5Uaj%o;MW`yaASA~){Ra$RZUv6vP|jjskFsXE|kP^1 +~+tyqyfr^{uay{J*U=KFa=o%TQdV@ZwzD<++F<@(_~TEW+w1d>t1Jabp27L1K((6rZOz@WNCg|a +V{$^n(K@TsPI}3%4|7+>1-ek-)Sm%n~#Wt8cv56cv2w;KPbr(`?4!}tBCU){9IW5NZ*Q6O#nZj3uh}l +-hc%|yrUasHwAETu^gEoPe45rT2x`g!7deysS+{Z=nWmzE)zV>bou(t$+u(i>g4;2vG~i$zg>l!(^Ks +!l6Xs+N=mAfFeZH{#Cdv?M|i6^BIt0}@qD|S=0jn?I=rY;m+G~527y8>mhe;TTzaQ6;4mH?xwF<1Rpg +Y{XJnXT#pm+{+F(oGo8vaYqp^69^i~gUQ~k#Mdn6(S7eLWd +>W)osA5v +Oq=Vw7H~wE{3G5u#0aHxc=%R2v@(I@kyz$`pG@Ij7F9}8zz6j{zC65YTeRSS)he*O%qXeO^3e$dO}2H +z)*a$e$$9xiab7p{a=rzl*p3k^uzr2zGX6QG{gLx{iVwI&sbb_m=M`JY2BU(N2b$MX8dCTALn`_#DUz +aG1~FG{hiJ1TOK@X4bx$?7vD(j|8^+?q$?uy~Frsy<4MLw{-ek-l^W+`##A2KZta|z~$94a1Bm9+xLH +F4`!O9Gh&n0sx+gijUCq=SZe({*4lFUO}=72F7-?X6039(gog{<*LA!>K|q$~Y$0D63Q<<77~b6zF!r +db+oav&5o=u*2(?54U)- +A-g>4&TR)x_>tyLhwbL$9j$d|SBF%aLF@kH)|gcD1Z&E=&KeL}XPJ;I!7XUp(YQ@}#Zo(gWeW^l>@hR +1;TQJ8^fi*emzSq6&XkKaXB&b(A0UJ2?HATlx_5iK^TlOhUMuG0acsj$xj3gZU<# +baxLEymwr|oE`z>G-ag0Fc@R^*W7(%VwuBFN^9Vv~addr{HGPLE=vXS9M;j76~}EPx{RCi7`y+_Q3}; +_4YjsWo#TwS(C^)MdNfD({I@3e!>Dq=5T|=7-kE9fhzF3vR7(8Er?V+(dExoKWKYJQvwv=8Sl{g#~io +w!;F$ery<9lD%*2ey-58`#o{Cyi6t!Nw)Eb8}y=Ov5cF3EQ)9Mi>7^}swMUDgL|(t>s*$+;}pAP#$8aT>+~A)1_M2N>?-SpMaKLAGeU_~jioo +dWD86Ip$uJ@AYJIn)e7l3M=0V`Uz))#%i67k{u-`sT{oqSn&(oE94SP+lwuid0Jp4XVVry-_&$05>r+W|;7-Fz5A72x416zieY?@~cTY94M+tZWRj +&0!yamLrpF*Cy!_z;SQEJa&ChZtlzrL@HAoQt$Z+T$w#{?+QuW%#xm*oV;y}_Sr?6bDam>S`C_JmCkN +6tl70WNAC1QTwb=G@s;gMI|dnSG2O^V)}WMby)4D^JLJ^N+Ryh`84Z^eHD{(eQl?w=N|nQyu?W~t9gW +-0UX|3>Z^t4?OcW9gD2x%+f-h)Qb&oyaoTs|+*A$QP@h+dX7!J>#x0Rmfm(Z3~B^LB`_d9u`hAXh>O~ +e*7G?Fq_Onz>_F~;3`5%6|_dH*+IvcEgD08p})>Pt^C1DV&1Rn5Z0x9o@}JNx_woua6R-+*)giuz(Xx`bxvv&2YuGbh~asmz8uW}(^cx +ZTFFU$K;^OQipq);-b%%`n2dwf&4n2Q-<(t>jyH7%zxU^D6h3AO4eJxOV8YG4xU{uEec1FSYZNj@Qtb +n%L%qBx>5?RckzJIQ6*o6n?#cH;>f61dD&d!oHWNEd4OP6QcmWE#Gr7Yp#MBkCpu{Z_Uk@t{Os#DFVFN +@wxa%b&4W=UAWR#uw?R&UHxI8INw>V*89iPk7$wUsM +BpAHOU){0D{qV%M0#Wg0uyi_bOgURFOsbm|-gwQY#L`k?t!|LSYp`HAE=-}9(?#n2ZI#@_R&Td6hE%T +*D-$)k257v+C0ldAw=W-2osuM)y3>a40*~a2j#(bt^MGPeWU<;Ek7hO4%Xa#0d%Q%m;xp7Oj-hqtQuE +MPf;Sipo|#AXOa&Wj#A!!z|7j#@ON+)wGoJh<`hGTo%`-0B8I&FN;9Ip?K4j2biXHFP=Iu%4O +H9+!rD969O_IL~)s)o;Or?eyP?_PuoX(A-tth-iS8ZG-Zr%}P^k +tA-z{wW${Pf3^1C_o~G|d*!C_yUD@fCZ1bu|2ufSE1Uj@^I(SsO7Mf)8O2?q^!&eNjS{5aZjt_f!Z@e +l&Wne+>xPS)-*9-rI<0liDJYR=npKjEZmXrM_rY(UH&x)r=y&Fk$M4SGJnsdDw^sjf|H%mcH}-&^pY;#_L+I~w`s(?~_x +-&tjb1-oIm4a)`E}jm{IugbZDWtYTgpOS;D}+&G-PqqmZ8R2zm)B~rEYMNTgLQac5kO0i*424F0=h8T-4 +QN*v5Le=@6lAGZyu=;J0L`R}uvyJUQHHish$f3hbC}v{K?L9$?z5;w?B>o}(_JcetT~r>1Zh2fx&1G) +wSJC3F!4q0*flrQVz}e?!s8W#hYx0A%^O+#lE4OTFR@L4X>^v}j$-iY#;m-`odME7W&##87A-iwc0)f +=X3;_fnQ>5t$}O<}|TW$KP1?U%}da>HOQA(uGDE@j*EN_Ig|ye`$=w8~?jp;vwx|Ze18RcuNLu`;C3GKmJUAhuS88U!{qi-%%oiZ!o~^2+t9|pEjQIF%q+vc6}Rp9QHvOlom)6=Zr|? +NhnBY>@xj<-^N9Gzl!3OJ2ir^+^|w)Zn}7JGMGz6f5f4R`^jd|y!DG+z-i3fl2yuHP>$xM*Kb3s*L}) +Usbah?eZa8nD)Mf_nw$2E-jg;>0a&%R|_DW0rH++D}cDWT2$g`9r$WF3?@NaLSr;`;vT^zeR{yS)0>$ +~*a)D8&k6nG90fg=ohMV1?s;GecvPW&t_L=Q&&nk;S&R7|x<-c}rlleQf4=b>NKwQiO +~(+y*3(}Y3zYMKm$b)OB+y}H>2(BjWMXt}5<4_`;0{G{I~DGAU`10K&?0^+%OO9=yzqN(a)xhd`i9in +nGiBaAl@1ejeZV&L5#|&$9=MlYAyyLvi7*~vhF+LmyZtc5VK>GcTGmosz6cd#vKC@w<2Ctlf!l93?`{ +I~hFpj=A@7}lD_V$@beyge6X$dZ|@=bcEx9YeN2BoU{!7e$t9bNUL2xo!+g{gRFB+jDeU@E<6PS=G~% +l^V}{Hvs3@xWsff>%_0dF`m0k|Nq{V&B$jme%X`{&D$w8C5vIb`&cF$c%O*#H)O^=sf;Te*sWS +0|XQR000O897^_9$~So0vM>Mu@yP%H9{>OVaA|NaUv_0~WN&gWWNCABY-wUIWMOn+VqtS-E^vA6eQS5 +y$g$ve{R+fBJ|Go}o_BJztL#;5&0L@B*V>-U+@ttV5ZRP40s#&HN@BA2zi&PI{QxP?WH#AdoMbEl-PP +UI)%EJ?>c`p9;ZZhk7G-^PoNfB$(I4^6!Q+F+*|TQ7ZOg0cJ{vt7XJ35w`4>m~~5ZTKY&(2=IKb!XVeOAwAKtzA^YYjnXmxF_4+pMzLY`N(-t(eWQOw;z+rY@nwVpf&CXp5>lIFKJMi%xv=mHLHc=AHRf_u{^< +%1ia;y6CR0pH*{p1xQdo8(nkVbmjfJ=&#j_ZmYljtz7e8va?gw$LD2-ZJpiE#To}R$y(7hn|3ZbS#sX +20H`YezHg!N7jNXpThVqemKbD&&#v#PM`f}SYa$%-}GgrTV5A!CuXhqc~iDDvy*IIz#ta0b= +#~(+ixcsm#K@Dm;sH<h$&VXi1>472s7BJ$U}&{Rb-d9tw@}7q8!cIDPr@>|Gw +Ej{M!(vp4Ua=k(YA{Qm6yhqLFoKR58{{hROKJv)03<@`=V&G-L)`VOj%x$zgTzkM@1`|$(zaY|h*0fW +%9gLzeST{e4$6f$Zqe-iV4d`xX2E5LtGvyPYqQ&F}m)@#6Ch8XLJ!x(@WF4r#Wn`~jtK<9v4h$ZlrvM +&4CY}AQrDVwv);PV1Xltoo`0vgFmB!+q}rpN_>@0DFvH|+}OW1BU#$d*l;3Fu$uPyI9s9NV4Z2mm2}| +1bA7jv?~;O4PvhVD-bdZ-M)X13jF1C5m=vEYGk&)?Z5;P5@%xBNEFVkyZ9xR`PH&7-lbabb87Sj>CS( +zYKJ}SS+aH6PmF^_fHp#tZ=3jd##6KdV36frC3qPV-i<>9cV5-wsJ{j8ToYs{JA!xI(BGZzdqMBKElB +8M_7)~1=xnM7MrRcQI#73Eb#o}cnYK}>IJ$TuhP<&Oq6Kjwxz+GVt;B;oq(j|H(S>#_ppiqWJHtM +(Qa!ynW(v12(o(?aM3DrD#4OBh>nDc4_B3N8qozP6C2oAV*?<|~*Bn5Y4k~Jh8sUHjyiLnw_#eAC)I# +8l?Pc1U)H|xG>;GG_%7`p=w$bhDf$biNU)4%Cp3`@J(6EK2V+8u4-9Wqjb=eto+?7=YlAyUE)`4uSg)z3Z#6n6E)8o!FjOR`OF!&dAGhkP!4$-7b9V**0!1{II6=Y_Aq9nsO_E|z1E>ahl6>*+OZ&SZ)dWbPG$@=e> +cXV5o74VCGMMxA3$rMYWhYR6f+X|HR_9767a1DJ-fffFIGDB&V>!JkEX)SuFsez$u`2^BY_vcOt(P&} +jja+W@U-bv`R9R84`CUy6AFd;nIQ1&a#5EbGlZuA*M7x@;t-S5yA*TwIn#{q+0GP2F!`W?T5xcVk2_C +>LN@*^suYs;qBdm0y>jV1P;jE4{&z*|Mwz@)VjFQQwwrQ=`T`wXh8xKSBO!O#yNoUi(QQw=C-gDXV(C +Q+_pr$}=oKBH`gw0(8lGQKmwa3+5OgMK~X@9j-`?jPg8yi+s7wpx^YzwCrXGsVG6Q*tZcBl7I=aphoR +f56o?g6uG)@Q7}=R5WL@ADn1U1*}ZP32H&C%K_+{R4?vICfEFTr7i9@`X$rbUcUQtx*(&%r4htW!c)J +o9#0=?*^=mMo_BRs~&qJ-v#FH{YKPKVeTq?s@j|nFs9B3LT_-G(X^eNZC^rxn*nE)nXQz1>*58G-UEj +cX>*GBzME&tV +;s@S-LJ}}qO;iCVK+Pz^2u3*yMK-=4y8ir!K;`2Be)3x(y^-32hx9nY1i9ui?V&?*OpyPoL-jKMp0p; +^3Q*v?Hf7Pnh`>Y=?RBy4vc-nVT|zN6)5kdED0qpUyP~aesX%KAGm|XAJEV_IR_UT;)JFo0P_rl5$iS +VTi9f%o#I2}KzHpUXHq&>jgYV}B>m0-nkiPRBS%%ES&q9 +F6kQZBV;!70pUL3}}Y5J&|Lp}>05t-|OeP)|jY!`50#8hqClxMz*^E0F@QKKr(z0Fx7o!-ew1HafEfWm=Ne` +!)>SdL=#0cB6rN#W*6CqI@Y+JRB9In*H4alKg`Jk4EiE5APaZw|L~0KCTkx0KQ>ZtqOJFxsm3 +vC?Pk>IEkjmOFN)Sik3_8Ha_ +P5gA^YFaW=d>)5(k4rRCr=2)9n4az+IbUSKtB553RXHgL@G;F|dT?G~!?rV;uxJlJW^*j&%b?cP()D1 +CgZAx!P2HxgJ6mJX{mUybyJ#$4pqJk3J?frhH_whod{zF0(CR-70|fRXuyfNk2EaLgfk=!7NTPAbZ2N +qz45)uu^^QEas2n?Dv^%#u&MUE|0x^1#jEtwp`H0K}Oq%Fc20(=!@$TvP_QMjFni@@kNwt!$NL@l94^ +53+)W-%=Q5;dYD@vp_RQ-C2n<(p~#LtK(_;X$DGS| +q5wyS+#eg%p|SUb@uNwpmf#~P23we%H|T^XnCmzshLD&}$R#;9x-X?;Mi^m0?rWtnQLsGUkL%N9n}iV +m0!>uBUaubXVuw1S0|Es1EH$YsU-8n}d5D6jf8Z8voa#>EBthd`XcD{c?{=mEhG6ZvG2q616=VpOwH{ +K-+Ckn1YSy{hI;*uKnv_^MFbQa56oop2}O6r}qeY(uHX^#Od^+QRWl7mB^K;@#3+@{1B@|9JkH>$n*( +Vk2$R%G$qOT|8`C${ns{mfuoN^zjwa}828sTOBkMb(N)VKg +@h1U=e!V?=d-Ua3UjyIROYa<1q-^?t2Ikk~W_MT)EC +d}0#Fz@5=~1V!foJb;P37xR{-n!BPr7lA9s$=@a_vW{gZ>34lcJ0wtE|At2tV9ul$SM-Kq=R3V@=zWb +^`y0ruTF^1sycTm(NW@4bWqyw6${$tE0fh0{nS}((prD+?Kelf_p9k^z<*k%I*q#;~bgKq7hx4_a40K +;#M#;sZRECQ#zYYrvcAwDC!K$NOM0|74rzwcim>Mo9vM){pgteXQ9O(xAqlhkaA(F;Dml$+7cqyxU3Q +vGwT`8${ItqXb(6R$2D3fyjM$2p~(#*t1LG*EcetCQ6FPZ+7UkN1P^*cq#*~x>uW?f8SsWaxSNhDh#m +*tce78>ppH=1*sGM%r#7stc-GmN>1#kzJ2* +{pX_)UpOP3DBAh8-hISy6u&ad8jz(XK7~cL>IZoTND?xIlEX=<*0HfYoRPMQlgL;^pVR3f;pS<$ +6p#_M=c-V+ai{gml++6f0#T$_oLp_J^F6>pig>vt*gNbt)5vW!d@B{uh!Hv&7Y`f`6BJ0~Lb3w5T6X8 +*M&Z38jkA(U-4$C>{8pQ;XR$;P2703jeiBjS9TBno6XP7D9};^2>JM~bGv`2V)mi%Ea#Mn((wD&E7Xr +i8L_Oc?Kt6k1?*&B@qUV-;+}O@d-P}>+-KAh+Eg0Yt0?aIcBlMjT7@T?TUREr7g9i!_AA7w+4Ni1+Xr +cTJNN$=+kT2EI4Hb5ADPLUZ09P|8F*lDDaLAPmUb##Mj11*Czk +K29C4B9$QP*NUfaIjdkWr9*TXv1*w4?e;ueRFr4FxJ!HFI*bd73$WTa;I#3;YE +1N!<5s(aBHz{xJrfmK^SwEk%K68ahL_Zhg6fZUcC12ljx<(cS>jp*`7>Cg=7H7F*R6i`k~Fx*+Vw>U_ +rm#`NjmRP>l8IVoHL$dD@)DMu<1D7z5ts{QQycQ0vTDCTiDq1e%LdR|N~2EumtbWjn +C&RfL^N-hLjj#ps@~&^EAU?Q~;*K?2Ns>u8M7E_FPaY)RtR~IswetH87Aw$eHqa+jf~I^NB;TPJC>g*$F^(Mx>t}NYBPq?;DG{NHT`(C(FHPD5m!ndgrFRZnq +U=)tJ?gIUiK6_E0);SiXJi@CP+Hw}MR||hM7VMvCxdAXT09OkOf-dFSd{4UL><<3?1_isEYHBn`KlS~VWfu@hMv^fI@r=E(*SXP?@FQ#a +iN>q(#3hFdw^2q-Z9r?%nLO*gR+x1ZXAUDXbsW;?mFOPi&X3Nb0RxSHLdc?l>REjJY?X;n8SQ&vL_^c +?ucDvnhMwNV0O^D}bUQE6aYA)hlkt?I*s><894Q}M*_tVUNb9vQMK*Hm`I?i1`!#j!9=X9xb$YQJNBe +GN((J9PVNg$L75-G!p9W$y<79Y6Koe%e(0M6)~7rH){Y$FJj{0qVJ*m@y8#f>^@pG3&GZ8 +O%b#hb(M1IdO=kAyxCM7N_e;>M{4Ivt*dTQpob>Xp{LkNL}j(xHp)A#_AThPC3eEPs1v1}k +OV4xa#2uAdBr*9NrrR-L&jc)!GBPm?GVwX0?HRd3&Ivs{+(glLBa&OobB)+brODUL0K=Gcy&S+a)Qk%o*4CqW5#HNA}=+hcQ%0|=+t2l6HW$AKww0_Es_cvaHs+eT4D&vAPQr +S%o}BFI4|uHDGy-sfcYpA+^IT2Td|U0JoljS6uERfQZ6W!OF_mRaWK5^DZ@USt;SvMrHMSKi8}ny2KP +hVO{et^Z<92w+2_RbmzL6wb;>BD{cn9Vn^yTHo`EWLPg7J2k36I5IRb0xD@2Sqp@;S{Q=UV)#UghKps +)_z#Q08(Khg2`7h!;kH7>PN&Q27OvctItFiG42O+^|3SXUw0KUh4_0dq{1Hb+CAjvmZ`kZq9Ep!Osef +{e|dYU0ygP1jkKJZVbN{_-n~-re)jt8p$Hhh7T^FC{7Z*ujT;z){zQRKC*O!0fU+^FN_VqXF+iLN(jB6eS_x0yrVB{~ylPPuvbZKpe@uK +5T>Gfsy>g77!5lX2RLkgZc2Cm{W}lw +*ZtcW}judoWz)Wsk@IhYq=Ov=DC{GFTN>1PU;yhwxTzHVOksHWUH3Y$|lw6CLI!xo>y7Hn*n}8DxgLf +m);yPw<-Gx2{|mQy@dvaH5gAs&%MdX)wyYr`DPLGETsJ(w%5c-|M*EUf}m5;)~L6Z+Y`|G0uK8MVEiE +6tl~3z7jR&KpJt0@w7!^UXLyT^#DiEx4h`)Wf=*-e$qYZ$U@-0PGmbqyNPQyNmlQvIh+{6BA-L#;)e- +|Y2=`GOtS}sVChZE22}hPZ1D4v(E?l@5?*i|mpqM#S#d5<)j++VS37;Y%t1-On6(KqL|BC#aenf>00g +C`82LDYGRsXxavI^WAZoWq?WWGykI})p!S#!4I;oW9k|!n=Gw&QBr{jodXYA8y7MqGs?|yW@=yTltI;cWPFY$)=R5_Sn%r5@f +q5r%BT2CA;*YZr0uSjGK(t3R;2*AqsIt$dUwo+~vhQjc2#4}pPn@OR)d!hc<(Idg&II-Vm;tGPJbxnf +Nt|TKfqwCcaq|$lxqVEXSZ?<|to%s_lsy&sVd}AEm06GmSC89mYgwqwOBSFY4}(en*!+ +Z3_dlSK86qUaM$y`SHi+eyuH(Gl>51O%NEdUh2X)kk{on<7S-KWEHwI*wAzL +G2Bp(fV#$mhT+&4uWdLZiteyE0sEv(m3TjqQH7r+-kj41=Z1URV8glD#@Oq#$U#esJ2drIN~Cb4N)bY +>{(0S!!`4m+*TRIy-24mhcBQKhVy0R*IAUImj4*l(3!4o*1kHRYZ3I`b&Jc6G!f@}SRbdpZSE-OaCns +Xtjdl|dYG-(Ds^8;b&L8+j4I$S0Bk(Tz8uF>i!kLp2o!oZweroO`qeu7HeBq2!d=>*~Lk +HjP9iXx(DXI~5p+>hn(P<S(9a-YGpexy3?F1xst`c~uVADJVYj1dekm@R!bN6^*?A2hQsM +ktzJ;s;%yzJEJU3c2R+oqgR@*@>hZ$C~>b`biu6h%ta|f6NdDIbA?Z(kEC^B(nUUqOp@>n`Rqq6%P46 +eMh3{jXv;0&KfjA=%kETZG%dYT6z+Ko6}q$g=*jChAI^?>Q{wdP3yy)gMk%Y6ElTliEpdzOgKTUn^wK +6ytr#ED6?CK49Wo|}d(v5l1X=wy=|RnauqmVLOq6R%wY|dzB^|jJt+Uf;Hiv_8sSOs>0PO;jw|HQKWw +&;zvNQjN3Z`@?g<5o6ydXS5Yk+Hd?{wD1vNF1o8Jf5piJ^$~R?NGloYd`h)^6&-oWvw#Iv;ZgRBiH^ZQc$u6X3AXYpMO +e)9kc1CM0<&u$l*r_M|XB5N@;JWZ^h^QCql``v7XA2lJJyog(&1!<##ni8ixHMN_L?1=rmoQDZ05q`- +o`OfxbQHslZ1;PKrQCIdtSC6nV1N$A?#aQh%FG?igEF4jWpSjPEbkx@R!T?9luXOlA!-6M%i6ux>(ti +bVADb~jK2aIfZlwFx#hZ!A|PnxDCn!CXmdVp#+^fFn|$;r3g&M_Sh{M#fG>OIaD4Ox%tP0uDLE_Pms+ +DUQro3>Rs~1hU +N}aMu6v>ZJJ_-qXV8c27_`oIoC+5pDzCjG=O_d2Pm2dz|XjA^Dxf@x`ma(C&Zl@}pFe2u~t44C69uem +hB;l!PJHS)S46xU#KNRb0Y(&9mobq{2Y$0My=t5SOYVU08Si-+6UmpO +_|dK*-6OwI&t-=dH4offtunN)V0iPgH_j+M<|V3nN=N}Fb9C~$R}VcN2kU%8svZkIBgwmMnbXmD}_5} +WKYN1WpLawPd22c2id91$uLTVOOf*hY&wmx1gW+=w8En)OdYI5~M+OjppOOhUl&PlP<4sMFAt)!Dnzv3K;?qkxsSTBQuPnlJ +oqdL?_yz;kuKdIkj+`T0C)#0Jm`O)2!U7Npp2J>SJQC8lE)Mc$KR7z1BGc%pXZ6U>OV;_V +H6XM^4@NwF>^C(fnsAuys3Wm}NX!{NR$;Ulx}SyVQZgwMY2$-n7|Z+dxv*~K8w0nSH#1&sMN;C_#CG3 +__m+h5{yG>2g2X+)1a4QWP0<$d>jq21of|6M#E_F +Z^IHK7AGJd?aR^4*{)qX_@AAJTpTc!h?D=sg*f01u~+0Lc|+7{vvhG>R=5aYdPZ7qWw{hkmQ=E2 +6$vPhCsG!lLgq;@mqW%PI@1~c2nJ`HHNps~{y6O&x1an;%VA +L*Zd|A<8>ar;?T%E97=?RL!cRc_?Y1@%tn84Mj{YleeN6vY^{?HtaPD~j8(5Z|2h|>YTA`jMJsI +=viFl>gBahUU~%Gn*=A){=Yyg63j(Tk_~#l?}#FfZh^+k0h$C$y3Q-E`@ltB?J+G#FswGEltif_iqE# +*Ca=X+V0%A;kMl4x-NXXJ2Ol1@2g{rz4aO`_%;UXd(jPZrHy~#B@s~Oi +rHmKK{&bRk=AXW&WLK-+ZQ_)>%$)c+!k*k^PpH6^ye>2SSYcYl*jO=tVkK8dIEU7{ +fdJ?*6nRECl$hN@3@#@T^!x3(%bKi-zxZ7%JbLwD_(y4I#W>ixZ(#$D+}9`3Wj%FF`!gJk4QD-XD!sa +Yu<Ai=OYf$=2Vuq>g60ikSK}E>Odv^hWL>mFMN&8IM0bO%GpOnWyG7%C-TignuaBQT1&alSz3s1?d +fK#CPeDf(Eqc?Rq8^L~fvU%x405VYwi6Kb1M%j4RtK&kx$XBkZngWQl{ty*%4J#~>cAC&Q@1TY@W7Qe +gLp4$q7JB+ODk-L@Ow>)wu*ZxOn)XWc)BgXa*Yr;YU1MJ?SFiCgtvphaFmurn;l9oisp=@Yz;J-bd4? +_7zcw86A1vOFnuNP4cuXc1t({9cMB=FZ1~_>tnjq{}{k-SHa3K^Tj9(nM!fLo={mg7bL|?TEvEXOZ(~ibYG213=z +37RUv80!X9LKpXer%uX2!aDxrrsw&yG|DeQ?fMSb#o&T@;!X!jy@Oyy_W3utnMwvtgI1aD_oJI;UZ2D +yuja``#7z45DY7zJN)8Ip^9N!zD!m_1lbG)r6H#Z(=Wn>CK-G7-Ban~O2&MBJ9l?e^l$KDpeIbJtf&3 +4y<)aQ1Ih;Xnu#bSE#b4MI605qptNC62b9ea9MTWLZ%X(U^gRM_HBTnyBZWF~-U#8sHw6Agh^-wQW;W +q^iMeP&0Lq*J@SC@xgWF(nFMCJkGv-@#CvA(9Y{crEZk%RCc1jm20(sgRD!Z8wkK)LFf(q!}sV;^*XNu%0Og +K5IW8^^PvRH&LO%RwYvpWvVtoxlA6{PkkYq;PH7)u=q3BeSTf5nX;FK>e^ +>-+z-U%>Lq?8xE}v2p)Nb2$9}sObOqqN3vw`9Z*lCW<4PoSmsw>?1GngD#V72LF3dWVCEbInW2AnDH6 +1nD>un_K50X5zUEM(O>JlUSwMS(TDepb-Lr|o?{5lR*SD=2uAhUk09P{T1wBEB|u!DWsNack@KQ-L{* +HU*1MIrZzLNSPp`$jPL$^k$i$aNBcO2Oxi5My0e@2YgI{;q=}1g#JwIeAMl5vuK+QuUDsA^+LhA41KE +%g1hB56dX{ad-k-IFFvE|E`dD1PeV0)-z +Yp`{+{M6a^D12osSMcrZiFZG-=s=z=hH*9E{z0q_x7#s#h-<{Nq$Xl91zjH^t9xH`bnPJN7{vsl9zet +SVT(jzK%C+2q{uf~La;J#m;HOU$0r1wa+e?LHxk9Gr|EB}144$d43--{;^ +jiZG5z9Tn253aPZu9>j8 +4&P%$8t1S>xQBAZQ74Tx~_U_{19%YpoFU$xqN6~dnOP3wopzF8f-dtR?2m*85LFx1RkC-w3wC16aknv +y_h^;0-F?_kCD`AUTI!w4I&uEva5}aeb-a2{zoP9u?YD(loo`J15^%obiA`a0cos|92hh#J@4AFtJLr +gX9aQcCl5;*&AN9-}*X6uozqJ4|8SaD2#o@tk6y)$N-g*~1jB%j+I{}{>e?a==~;m7t2 +#0om|f$K-k!~G=NN>&rOQUm9rNUpcTdzu#a-S*H+<)z^rQY(kSR$zOT=yt`qWxiJMy$*E8_$BViwwMgx)`ss@aC=CbTQ9^vRzmg2zK{ +&bQIc(hGfZqYm^!tMG^+E!9$FO+^-*9BPIWoTzYY^QR`*$&a3x6^QH1!n&uZLIj8huDQEqOZe|4)`2V +os3M%d8a3|6vcHnK@WyCk#`L-#rGE(!e^?vfD58h5JZyBT(pJj!5{PhfZyhX3b2qZp04w!QNwmorxY-R1%?Ak&k)|#f97!b58h0x20ChkWzRI&h +MpyQGO4>mq^^&M>E;VN4GFC`eNRR+N4=U0<<*L3qi~ZL+kM>L4_jGkLR6GCt_+xl|i9S2Y6)ZaFo7tj +KbOrxI^Aa%&rf5xjH796^y96Oq#ueG~Hy_m8YX(6UfYdRxZbi%M_wN-N=B!nIsg5y^&|KE3zID@p)RqWrT}dfOVNW_y|LaH?bpx9;!!>ZcPz2xqra+kK}*2h! +-%YRY%jVahAr!b8(*|GcJfEKC4BU!jQ%}m%?tnj_*XoXHYh}F25Ihm48^;8(jJItZ~j{QT-Y=+4#ENg +Y|}j8)Yvb_*`NILiMnmYlU<_vd%!Mq1xE8p_UO4);ZeH$<<-2Y8a{2jpkl!^6F)8MY?y2ubRm*7jv_& +3*2(Wgzy8bl513(r{80tnAz5UYN+uf6RFz_~JRNX^d +DKFy1&uA;%$Y>F8OT=S{Rzq7#7SMZASQfvM~^HJ|>JJ%PXG#fE*)o*{0 +TBozS~a&qC&Ots?m46$a}Q97An4fcFm_{9*!>(scapsFHW8Pk8dGffChp>000&Sb_vHdbD=9d!z|$Z2 +{`E>MY0>jcj7oC)^mu+}%(U)NfqKGI3wSL&VMwR7fVSKDvJ|BT`){dEPP4KFo8bQG +NPU+9WVoPvhQxqoK8jbh3xCd71yU2 +~+lJUjWepkc)M190nvOR^_Q?JvC?E#K$1-xWN{iTYXiMf;}SZ*BJuP9|##Eq>fk&WJ%H{04-q;~AUl2 +BPS)-e?HbLT(6Q4&qj`cR|TEJ=41L??DyDT#B>~d393(E+h{rnQ6K!Hf_m|;2RcRjVmKGDNMK5)6RhU*afTGgIP +*q(CBi`*O{<;dmWG@};{;W8TX+^Kwl-k9jIobPLWRO~ +N|v_yP8=NF)(#r+Xqd%08!7aiF4ca0(>pyO!fl($b;RTTB4&5mTEcE0$yuC}`m6$V4YN1maEZ_?oqO} +d#x7i;kW@k2pbf=**2R{Yb~ZY32*8`~`}ISC=9tWv8XG#r2oG&Rd&tzG3DVMG)we3BbI_iH +SN*z21`gK7m4OR#%K)fMVdL8LSl-8Jz@;4Bzt&TX<0htRC-5_mKM-b~kx9oPfi(XK34sC1h-oxF|p=4 +0Shqta@6L?l{=+E#il6gKFg9(^{((MgMu=9;$*#d<uLId`0z@&T*)#jOq7K( +PzV&;n9-9|nbB+iE__jq$J6@i;e|UHUD&ke=MQEL3Fd-0ONvCu$J#4e!V58!Ou6kp#Wb +RbaCaqio7D<~ZcZ|8kme+pZ!tRiE#AR--BcMuOOlx +p6_r)>d`R)FjO4Nyx11QY-O00;maO7>PNk{{Xe6aWBXQ2+oO0001RX>c!Jc4cm4Z*nhWX>)XJX<{#FZ +e(S6E^vA6Tzik(xV8U(J_T>mK=Q6fd2GArbvqYGv$Nd+ve{rVX>XARwk*-Mc4SGVq;cGf``!CHhonS3 +>~S`^7u&@OK}MFy!}H>Kk<>$R{>8b7@;J%XGf`Ko^WW2xvxjF7#f!Yyt7N^Yg#Th9o;`W`mH11Ng?sT +P{4ps4w0N0BGAm^)s$5hXDPHfZO`eH2`KsE5N{VmuI*Y?9!86=QB|;39RVrE5RgPb4CFNFT6~THVrFe +Pu;_}s-%b@yDi7<=BiyvOyTwnd^?adF@Z|EV~oh92MSC!a=^Bi?Fhd{O@jVUjHd9ljwO}ZN3AaQmLlmTK=P!Wi=J_`gK^AyIjTJ +uho=s8C~8d?OVIeXa5j`?E7*Kg4@s +WoZMc?KP^b@nzWh-})kI6~!Gj0<_hyrnLN`{jP_l%ImC(IH_Y?_r6`HHbGL>SNR2wMo>#M1w#tg~~CU +~@<8HI0_tler4442+{bKPvN$~pd3-$`(a1v=*u=jiY%4zK?<|Ba_1#BwF(c$rl5xnIh3H6^)Ryvj3aN +OH-d>k4g?FilE{2?9dOD$F7ooHY$*kZQOkFf;H$inrGPh!t)Qlu3Wbc4RU%_eGqqb`P7)VH +QjRK3jj0}tqD8m|7!$0Z{{g2X>ux*J{5kBvDp}Wxt%E;lTSOa}gsI46mFCeDGOIT9KZq))0xGWdQ_QS +zt%qVA_U-U3m}F)85H8${Znz)*>Oq(dNhW;OLvL#I(Q{Z&mkNp7{i*4DNp=T8#H3<*nZTayXW~)miAQ +3H4nHZ(q?~W`oCO6W>29O4Oy`(aB$@FB>tcQA`FxXus@_Z1ZJOPFUIwwe%jz@*sXK2X!cvq;cH1TQp? +C{&X_V!j6f~^U3Os<0C@2b)4zHd~sW07-3W76WbJl|bJG%v^VDd<F4hn;L32v4UnD?HHTb$R`1rdF~EFQklS1QGQC!>9#a)iu!- +VuIWb-Wt#I!yD~&Mtc*N9m@hEJSWqsB22gK7Fx$M<$!7Vv#i(>kj)~CUkcNS#gt@~RAIydZOoC +B>L%Tw+?dVkPN0@@^&UigGD2XQ25oLo{J3(w416RB_CI8q=j@jCci3zI5E;h@4CCMD_*Ud1_?buxdEK +5l+3J#$@q_ccW6U}O&u?c$3#&u*v`g##LBv=-;1}DDWn6?_m^}S=A?_z=6S$;oInl!i8o^n2A^6A?785#ASWRFzVD@s5;AxdfD6_wjQ=0qGmk +^ov@U`ryNfJbtTnsLeTJ9vyeb1a2x!@CTeusO#(%Rqv(OvBn6rZzwEx$!7K&AWwOpXkvlS9u)zSqOD@<@BkL8W;#TfC1TaN}k1+5&b4ZLy6;e^ejs9s6HpGm;P=762TEvvI +v;G=op|{%PYOLd-$}%y!cfyvA^Ixm~{MP=`7>odDxL~m;%VMKK7$oZhG$8MA>ZPwjC3mo<#7oi4Im3d +%$d=(P>8#!GH*gCSwvz5f8!mi?aBYkTbvld$kBO{U*8w6EmX6kK4g;}X)GHCL+2Vj3rBV=OMuM*k54k +!kdM6bN^(1aI<$yOO0ceY1eY|5LUQQv&k}N@gSzE%kx!CX~Q#*pJn-ZRqeg^ZUAY;F& +-i|Z!fw)PA1a#c?=UoVf`q+x!oBYlpE~+TxRB_E`1GJI!z$Wn+FI>3G(Yj25h`MkEnNrO+ +nDa%D%uL~^D-cH41Ua$%&(v?_Q4WRX@2^4+D^om!VAJOaw6d#YB6GoXmd_&8B6u)}o +Wfg0y&{jJRO644D=1|r(ok@0=-}2G~3@G{oOQr_MO$J^c4+*Rj_SSv#$xV_B7|ba!I#y}Dt}|Y)&|07 +H4>+M%SeHI!1?QSRtyb(cTtRT#Njfx@KtWF|3z~d$oek_ti{*@_erMWKHfhoq#1#8VnzFq$GPDvhCI@ +pdipCzp)><>&A#cPQJQr16uw&qw9|s`DKsWQ+L#hXMkOQ4sJCU!3$Y^ih#3TbKi-oS$QQ?HG&L@JBN- +Yjpo7-Ke*5vvBrvuy52zWC$uz7v;`f@sG2H?k<{jX~nTCpCs3L2w#1Uk{taSyf=JrIs$EZ@7&fKc~Y( +SyPDV4e(Le4$5cXVXLRS7b{F%PVpZtfqBFUI_7t*dQr_-b5k2+DWku_sC+|$~yoMyoe~3@L#%CI4B-E +Wx>{_Jq40Fxb`ccB&!Ry(I!xb9stb?-^vBkFVMHPuIpHY>M7oZ%1wEyXD5Rvc^dGzof?k1;GYe^pD0= +enqh+W2K7myWVkHzl$tjJbwmKN$hKIvBHH61ZiotV=Rh4*hXBc%HMj}#`l-}?I(?5g`SlekyYfZvC1xt@`F?p_vh9wyzl-AN9mZ0Sb$ +JW?-)zIp&ap{RkKO04jy3-DZ3$LWI9~tw#DGTCJUMnd1z;cZ$)LTXp(@#mNK(@B0T~Pm7R5u-ysQEf# +6b~3xcMjeI;osP-T9wd)(p`t}r6fr-gzFMnQXWmQhYO|JG9${0>xg0^T09}}Qy~KfrT&Eom0-c5f`G+ +hNk8T=hcb0zRzpam$(jG +RV-*ehKup`TZ%z*n+3*rcxeHA<#A@UdO1b-e&2c{}&D*{N`%BpPZ0jIeQg0P}ZP`19=^#2S?6pk0~>} +N5>71`SYVavRxsxz$in!*B-9TGnw-;}m6Vb`Q};T>EbSG3MGFb*!gZfq{oQVyZ_V~#eA{*1xKFwkf>_ +q4-!YN_}3th#7Y3!R-@2)zl*T}q07H;d6e^hsnRrm+HYj-K3>|}KA3&mzq#+VY1xBD?T>ybz-0zi1l-?FJ-}Jk2TD| +Wc;|a$$}ms8V`aK&E5ynB-i%;v>q@-U4*UMFcCawf>X3po*xDrz-m*1sLiI7c?WmI@N^ZxTbcfR0b_2 +Dol$5@O4D;Uw?1q^qE*CNpKWW9#1gvo$)wF53-D~1Tu6RrPV?QDhHX&vQ?9VX59N>3s0sUYEY1lI!f* +$kkhodkTFB7|l)0^Q3@7`$wzaPVk{$v|V+JrXTNl*LKlte{VQR#RZI1yTc%uOk-o!ttSI3sEcuA +qXrR#(LOu97XgOK_pD!PaCVITsOQjhSahA%$E~AA_)|#7#=A)w(Bd$7hl_}0Y5Z_E*n1Am^x$?dfQcRz+d`Gbt~&%*Q!rmtM0Wm8^KPPxSws!wzVS0f;2 +;a1{-pU1^EjPOBST5JCo`7)nNRh4Th>E&7kgj!wwe1oW21PlW(V?rz+0S3de~`I8*7bA7_9M}MKhp2K=F6;Dr$0X0=Wea+&Y)e3Wvr!`G+zRUW{6}3viI|M>Hv +({<8S*f5?3#!`W`Ry^wW2?-3EcQ-jiKMhm9~594WQT177_+xDqXT=bR=&#BmbP))0xj+B48N2OawfTHpoH0;*YYNg?IZ(Y=mNAKY4b{&-0J* +nwyLveM(1I`eU->NiG&M}^m&1-V+&~F$DyL_qDvLhSwM3xifiq(5hqRaU1@~DrR;h%WsT`vL0t2$8A} +MKzsvw_J416Is-=@=N}FoXZY>#eeO!Jh^!`{*22MMV>8!XoL->e3kYmP2QtZ}d+$ACD0;imRzPzz<{% +}vQRnwl^XxCg_VG>@n_HjJNr-L^m2|hFao-Ip0l?ka7%G?@1PKS%P8%+(8x{Fzpftc61!ffAaJMSM`U +s)`ue&;O~6LWZlM}@yIu==v=QPfUlvmug?l9W6*##Fbbv16R;De*z?VzhWPJk;-oheYY>24t^t3#gHZ +<1U8vY}(3FS|971>ZR~0j-G0nOh!&{b@iX?@tlXTgVFN1(iu`b-y54Ch;^{o_@Vk$ +dTuXjjGPNe}ZO0-sJnb!9Vphae`pYvX&&8{$v-I>`u)@Yd^S?L=6B~$N6Kg+#Y0cVV +P^Gq^d;uN15;Nyhxpgk<^fG?gY5X}TZ2QARlaHW?!gNKC;(Ez;ALB~u=F2>8&E +i4OgS?Og0{ARav5OnStv4xexyKfJ7|fFQoS5MZYsYJ3)EZfh0V4(*03VtF&6<^Uu49zlCQqf=YXDTW!Es#)E9ZYf)%5{QlEokS;YsoK06EVX!Vv +Z)BfFN$3c&@TLx+;YqnJ*}kWqUS^A6>57?3{_KH*`Q;u>R30UM!ESO)~d$`J&IU4cv6xGIEERflQgh@ +b~!OOPE9v}HNWp~%LJJNRu#`y@4|AmDx$cuG@fCCY +dcB>A(B?Y^A*j-o{SB*>VXctX5ndhYq{%T0Bj~P(a$z(Yte|BhW$1wHR!Zomr1~^q9zCWBi1eZyJCsC)o(J}&;- +Nq-;j_*hHtz~QR9!9L(gR}gaVZm4CZXqlva=G?=W`*z%%O`&Z^$dW +n82D*+ew8xN)F~4?fb-=|;>M^yLbbNiNqKz@?h@|LKT<`%Zta5eAxsPr|!2sxdX|Ah>+=4ivm&9lKfk +QwM%?tYS9NIYFp>rsylC7jqJ$)5CagN6H^G{ptwcr9nXpcnNmg~VYIaQO@6 +aWAK2ml;P_EyH=5KS#J000-u0018V003}la4%nWWo~3|axY|Qb98KJVlQlOV_|e}a&se!^gn}*QG2QGa8MotfJSUxaY^NuC6gLGDAcYnQFaRi-Y3IN1eN~~VPyi`Ay^}p>aT1FJt8Tq ++-FMwO2~Iye4d!K<7t2YoZ5F40z&A%HM<>CPaM9$>AM*$f_WL_KK#;XWOQPZ_6sn)>+XYte069eEaOl)9 ++qBjhi=3kQ8a~az{t`dQ(2+894)GH9pq&o@BX}NvijNgs>na;vQVE_< +#GuFRbR_mecF_D{$`Uj*Xl);9W~W%aumQLubS&BOK_f!vN!W=(*)1x{nM%{t4R=?1e+>Zu9HbnlrX^C +tTHR7+x2=k)gXeTHsDE_Usn0-h({BvdSB&r1FxS_i!g=q^SoR`Z&jgaTdn?4=EXRG51XW_Gy1eG+Rx` +WxFT7@H}&ZpUXQ7do#*FmwuO>EZ_5U&7g;?|Ht=b-%~$EP++;;o^;G@l)vM<(vNW%>EF1NIrO{hEYoBEZ~QV2@_Nb$na@_)(Gl)4SoI6F-C}_LExn7TJb>xw=;)irFTZ*EVv2vDXq6F +8!z@Kr`1k0iyN^Z}(PeVFcy#*5*B_0-(b1D{9=~|`^p#uoPe0Wk&aZK98u)Yp|3~qMBlz=g;TTIk8y$ +V~>aX8Uk%~P1?$z|w|9<||h7lqfKD=($tGCGp7Hdwl>fxK{x*2`BUP04eJ^lLe58u9;KKt&gr$0jF@V +aR>_2l8h&2E#&WwoRa;n7i=Eodr})rxRTp&pI8#%+4>U0Gx!rYpzC$489j&9*86s1%r$0;&|;6y;qJ% +yvKvgBcv#&4J3Z0Jg)c7x +Yf|X9`6>rYg`W(R#4*E0&(~SGZ7x2JAC0J}rmgsJAEkPX{hV%+`AxEfDdXY`;5e(5QH9G!;e1urSvZ1 +XM|0S2b>PsL$U}FW*?$~xP2gU4WpxuI74j=!QnT$6rZT9@wdQFb$LH}I=_`b;UDxZ(y;2Er0@NLOx8C(O;5Y}=cGgk)$;bAhH!(MEl$$ +V7@vuvJhkuEkto$PAr3~%uu45dCS?W|mG>u~_v8ZdC)Y;m5c&ULw6r2#^~niZ4cu$G#LZaA72YD~;qC +MvJ%Z5EuIef-%UKjUe_t%2KSnjtZne*EY>sT+IEV%~Z#7|79YlFC +FDqQs)o!E^XuVxE`DT^5lT{l~qPhe=UCxpj46_ED50)itG91beFP_0{2S31w7kLGng=@#Uc9GpN>f?6 +MZ?Wo4w!`&~!j-fPv?ttFd1#D4c9ayAo=rLwTQ?#lE}_EfPN^A)4mbKOfR$=$ul9-~_MS7hG|-c#hIW ++*5i!V-*@`L_3nECgL62bZ+1DGzhKosF+G#UpkXaRqn)JfA+kqjN?u*x>8WS&j=jXRzANZ +Hsf8Nsaio5@yn)(eFu8~V-_T>|ByCGnvs?H6zcAGHaNsIJw}8RCce;$!#4zq>Ea6@{eDQC3koLlLAy4 +2fQGNyO|Gm#%|T*y_N`k&Yw{zs@WU;*i6-+*Yt0z*<2Z!k?i-Q5ZK#6)%6m?Li-BznrVj^cTc={;tZ* +ehGQ~X21ffT?byaTvEa8O-6t3u2)h4g!I)m1;Na%vPl==SZ4LaV2n)yqXXVa0sx5Uch~v+Iv{zc4tC` +>xJ!V$0h?GS&HOqht_xHx0jeXgKu!dz2cU2daxtJS$uh}{x*<|YZM3pdaF;jNOkcM}fvNxlVk%0czte +C~Livi@f-M4a5BG6Ouz~0$YS9#thsKj)o*{&!9qkqPk0v(KEg%7byACftYJFM3lNj1vV)kuT!b>e=EheDhR-volY?*zC`-~2545G&A|x@iN$_W3i +5jU_WqCtrzAkIj`Q|9U2(yN{g^AwvZQLSbjTc2JT0zC*g*3uX&=g_`(ByiP&GSW`)qT(qzGMVqftE)a>Ib)6Lp-A(vAzZoUT+-oSFK>#+grE~AkH9t9}1vi2EN32g(QMn&+CEv(+Q#<2 +m}#sab3QK@rbDX@_%D~?7+tIrmurWTEmwa!5}f;lBch$$d4Gwe9;^-80l0SK2L_9#_Sl9VWQpwrc2I8 +oqOic;d!9}qdsskq)D8OEM~Ni)h=*i*088VAgAr`evHKl0+k8I%p=j3+*z@LD8rVScoTBuDcHFWXEeU%Rss(wY+C +JPJ;i$Jp{8uI4V%k`qo@mOyJf*a+mCEG&0a+4aC +@6}tX3cwVgr(we#h*Fb+;KA#a2XWfXW~Z-p)w0v#J24GYMOwX^~R;#x^~<%U3IejZ_Uq3JpjVP +5vbB%u_EwEGGfxE@$0TK|>nVLn;z1`d!&PYX?IXsVIC-ig-IprF5%th7L*d*kleMZ_q&m+H0F3yLVnI +vEpAoir+EjAtKe*{y-RMsV5OlEMp=p#I*I8(Q1NZ0Xwovc7R1CPVB}%nIPeqlF2=^Ug0`i+nh``j;ey +$3Uz8U59Ec})MN-8mAJ%OuMI%1x=Cd%!ajPpK5AfQ!(`g5l}f_!ysV|3msLjkIHM6N%6=^R3*Zlm*Qn +MfTu=B*D}|5@GrulNz~fDoEi&Z%Xe4SS-bEJ~l_kx;$c<@?u=IG;QE725TE|VSUPeuf-`moLx^)q}st +nGY*HboUUucjq@ctULvdGjWrLd{xc$t<27+IL$!sj6&eI0b2%CO$ +J^bU?8-?zBn9YANjSsg0fXwC!ul>4lwt}O@pctNmd99kXzNqy{$+r!&7?zkwZ-M@~RIjSl+gK+0#Em@ +-GVj+u)i{n^KdIX3JW_*k$D*epXYoKNfUpt;UHc*uJ?N-mkC=vbjsA8ZeRFbekUCV)t7M~v +-XcIRAt>a_Y-G#EFta)AKTBkS{_qSzeOmsTY*z=zIGp2ga4b`M+|ra|HFd|d1jjH7$A4-C;W1kb4Y2- +YAD!y}K|Rs<4e}$2&)5Xg<_T@gK0{exAtHCY?dclEU^uB+|B37}Fq*YJ?a +69cjsaJepf~}5b-b*~?Zy=Hx&+3lgEKz1=e?}*CC)WUJQ$^7v`?=Qsqn+)>Dg;y_z%PV(`W#^oIphvY +DX5bd+oKiD0zzI6z-|OxkKQeMbiMJqMCbd=&ZIPs*rtOF9lBn^eajore)ks`W1R`)*y&XF?U +3Y07lx4i3G6yKX}5%l~vm&_745Tptd1-dHK!b^G`n;vxVcYUwyjAs9_1w&|)aVBl8!|n!PykD_Y;+w@ +Y2xroE(V!dN)utG-U4N67it>8HL3CDdZ2w7~OI8{KJZqUuXR4nOhihJlg21K~{Gr^^_m21-N+M}1?Hf +VS>r_Kl0C76Yvg8B^aD*ly4gprW8DkXi>{VbQ@o@dH%`)Re9n`++P66mF`1q&;Jr;YY0Uw}v;PI-4OR +++*G{F1DAq7KrO|mDbcnP{J)6=5m!KMeB>x@(Befik+j-c0Fb2G|ig+Eg?b_;+n?)4 +;v-DjZIbL`%4SS-U5SLQe-aqSKIhx5`OoPbu=2Ju>?jTxDPCN6Q}=XN$H(oWVe22H;$Lzzz@))xXv1hsfyYu%$jI4ijy?8O* +J$>;?$Iu-Lb0ml{s!VsQ8d|Jfv}sp~_SM^#w;_^f)hbHe_3|S(f=;1vzk-9Tp|;lF>)qtihNhCd~K|q +vK6qwZ+~RWf??g@uN|QTX2(A*U6?gjEf$w^2J2}wZ_5c!8!i<0Ka}b3f*oPvoS$yh1ix+pe{ +;a}~d!#&HiWUyfIO?8FIq>5wodlB@kBLHF_^s$y +?DwxTA5Q6~$c2lXf%D}ZCD9h@7iFA`qncdd&PMP!ztF)rucz&=!}>I> +lnS)#=?Xq4dpg8r?19%eIft{DHL$B@C2*MoCsadZa3C{hJb9YkNXrriw=V^Vbo5cjPTw%C)1bX%*5AKt*f_W`}ms`J +ckuH7_NT@^yMRuBJ+&t@)F8PP7i)lINj#WJkl3Qz{q~@#6Utby!9II;T}$8Buo_g;WPW{`K2asMbbQJ +4aN4XB6NH;&WMX7v@QUR7{02(O#=5wj)^Q90nWFkry&S@GH{Fh_)lMwa>kH4l|Ho{^5r=u*EHp)*&Yu +TzSzt@5yHjXz^v>oud!j=p=ZqBh5Lo5CmDCXcz@d(01M%xJYFqdp+1shhl;ZRFBSHax|n*^>Hx!w*ya +$>Ph{dj^9)f$ZT_q{ijv$6(ro6m6=pbU|Sm^1%7Qwf-8SX`_lAOab;1DQHf~RLOYfGl8q2A6RdzE;-N +HdhhpKGd4DA`#&|2~!}b^YbbYi}ftP}@STOMOC0j3E`wOJv6>pl)Urm%ST=Q*>=jbFZc`_W}o6i?Ic#E9xH-+l +r=%y5!?cZ!8Dw_=ou_N`$xbXy59hd4*IMp{q_?dfPJb|8qYb2%EY*cNQX#K=N|M=>Sps5J6Oz;0lUbk6FXd0xv+i~+p^J27)?@T~uC7)5`eKUoTNl)an +?YvG0eI__Z~$+jkQjCTHqm?a0T6_oOY1I^pX?c)mmLSjMDd0`F5cD$$c-WKcE*UKsfH`hz5V^ +F(6&Jt$CfwSCejJpqRcU1iJdQN-6AA)E;5P>1Cb|7#o}ksfgaNj=(GOpBz(k>3RRgn3~D-)#-aQ=gx` +~y3DyrCYG_ieSD4fgz3@q1?M_wvOO-9NH+(FSgHDulT?{>F4HkIlfiLFQm>(rCZpQl9driMIOoOuE_` +1p#$3d18Ezj5MEX|W9gO)ew&rGva;Fb2T+*KG5X8j6|fvg|5P&x5P+XyEDk+XOL4`i%WQYmv*T5eI^B ++(8thQMI;SGMA-FQ+mRU?VA>{uFRH39GDF0xa>3W^Zu%+w&t+9BE*sRm0hjpzA+TS~kqf_T}U_`tteb +@b2u3%fBDL{%~~sHk3E#@Fs*e;ahp}F}(N!s{K^J+Yx;DX!PX~w$g!zr*3R`cml_Q2QO@RM-f8o>D?! +t-hJx!?lTJ-^bAJJ>E?pB_~#}6`H^vbN=bF>#C1^Nb9@cmpiyz|y36qR^_LMecKpTbk47%g{(LE%v9S +LPr{BETrl0wt$Wbb%&C}w$93^90qnQh3j!YZ%mw~JyO7b=%E?g~+zIW||YfZecED%QwcZoFAq2m{B3v +&Jp`$TDi*+Uj98f{pHc=AOG!teKdYJ`P0Rh{}G=&nEv$B+tb%Nksv|%4=3NFUJ@h)hp3q{q +qR!LN4pE_a`GNpf~Ewr&4=w_g?w>B$*kOxEjzDV3Nhvrk{Kxc>%3@F8G+PBN4gu3;y@8?g-RuK@QT}o +DM1JUTx=`q5>Z=i18@ha5XIEu5A#htoh|*N3b2VIds-dN=vHQ?z$QTL+@eGh-c9%S58PhD +DKN#ZCtpx0V0rzcccGN+JsmDZDPpvy34&6@74`lR8l~C(7Xhw6bVCSi>Gx1s`3kGMr`-G?$NFW4{n;c +41re{gTs5@>{)>{W)wce5_4{yx&=ds#_VVHl1jiK*-U{PPie7X4ST++boQn-?)|p5I76Z9)ABP451=w +hWrQ5bv7t%)JTtlWm&RB1fMtgCPZ0~+#47RhHh_3$yQc3r1Fv&mVoG4ZlPyMeXYBLqikhAYTfy9f6Uy66X00^|MJ-JUF+DXDkTso6C|`B_7x? +FEG3ujYqCsHpxFV<&@P#HyfEyjf2JYX>HhYny>KoPJE#g^J-j5US=#y6)apn5krPL-fNegU(36PfJ6$kO)7rxVOkr_$}sh+pLF-M&+ +a7!m~MY5)VmN$1sR2hL02gL%Q(EHQl^`G7 +FAU7_+R^^HWmISPet$1tJ`5Hyp-^#&S>ZDF%_syJR*^7>5yJE%M@jx&#EXXge1`3c;f((H{uS^ilcFMkcRLyLWupco|`v=*>EE~@6lqqJ%|NR~EA6VL6u6Uc_o7I$c^iDT_@{kG +0Vj-fILglDuGpp&gi7qCunUIZQIz+R7LuQ#)u4P;6>4Pp1^mZJ?7a_E$^w|rr~c1$F3JC5`WJ&KC4b| +!utQqI8ud^(Br?W)$cb=-xp>5P@@(?sxJuFeF6IOZw6a#!O+^?S(RO*`W-$Dd(V4=uA3$JFC*>6m8y|dYd;CaG(?BSjlp{*>0+A2`XMznWiU|me6x +(&dgG4AC=Q)s?MP}XeuN*#ppmwl(`|AG0v);*GPx;4F#mjm2C>(gsxpEOSBAVa8z?RgPlV4i`P>WXI_jnv}>XNORQPbD9RZCSr~6PKYC-2SO4#n?##3jG +}v?hl=!_&5W(1neN8Gmu5~^%3(SsjXdYA`Fy)dIu=$V!zy(!?a_EGAJ +qn-llijihA|XjAkD<{F~R;0BYdw*HlS?_)6n3jP@I1q*Q664u1)V+xLjtgR_K*=IXJm5&&^V&XD6Wn9 +sj*?CPWCAX0l&8Ach8$WPzdzNC6b8&`a-n598sp(9V1$512p^;|OSV22aEhTan%0Y8At0)_M>BnUA5O +pqIkt3IiRAbOQaZ^hiJ~rTKs!n2eZbsCAg^9|k_X3Xj^)dMEd;zQ1plO0<85B;v()vEM`^lS+Jb&=F~ +THts>v_sGI^c)(4sCWm2qcX;7f%F)VXPET%27!pv0WgSdT_iB#5D+=%tX6)V3C-y48RF)dVVDz8n4cZ +~&u-iCi{Lmy=lY)J^&RU>KwfBf;s;MMnEeNU>#L|v{SNy +Q8Fy|p?A83FK6Jq6Iis&Ch5H3D@-gQM_q41BmNQC&eE00Ik8Rmty*p8f@$Xy$ap+AORhGH~~5XhnwGS +FDZI)=2lk-h`=C?=dT$sLaC~nBTa`R&=S%w#b_u>f#T-%5I-vczX5lX;SZ=VHZ}b6FE_IO1B7}0*kr7 +1iN(-ej16W(d88qTPthI(;eq^npcz|SY1rYuKAIV2`PI|$5#P@7GHaa_c(@zhu%2o=oobdbGf(0x(6N +IT)-$unDmUH8%mffd%Tx)=FfT{LnSzRtlDL=&j$5U9h$~t<6FAC2r5Y)?lYpv+gh5*-J>48Mb+Z>jZV4Yd +qyWCR8WRx5OLnCRad_JOI7=IfP@kwR26rltYpJ5CA5uUdCsWx3^t_uwfn7`822XE+$wvZwOm89u^yfV +qZdXoz^rx#(mucTaLj6se$Hz50?wNMY%SapQvLa +(t?;?2rCd2BL$=hK#a)rj5fb1s!yxR6TA)YxsE*V9epg<=?+}{oxnU<=;Pl{n +6<2-_wmq{@6*cRlqj&nZh0C9bUT9Tu-j19}4n>K>tuQhs6vY1U;}lF`UL^x1pS5C-8)F8WDy@L?=+)- +Ua0#zOU#`LeNzXIPJ?dI7qK{?sX({PpW`n&~N6%#(6@=Sfu}LqQelD5gsNd2kaiv?G?o7-YauOZvd81 +1X#;n`tl)0Z*D=?zr{N?0i)0W#(5w`&BEJuwlmf7JfldHekR4>=u~x6(jBg37aem<3I#fpo6{9+_EkF +@{ov^SNSvW89ebBUTq|k}7cz64!co5u-J}p4JmzBC0dZrJo?7W|$*jh|POfa(nYNNgMcy~jnw~P1V9j +p0-OW%ikn0jOx{EPOlI3@=T}d$Ds?W8`J|80|=Zwxu)S$oMS)0UrNy)%HMPCMw>)?(eU^snY+Yx)G$Z +cC73gS(|L2jgytkI2o3q44&Qn_7TvxTPS(?jOHvi5~VgIP{gM-IK&swF4CZ6pcKArcQvhKu)sW50;90 +bboDtDDGLZHEApv7F4_RLyIfX7`ni6pLa^jrM3qi^4#DzTupDxG4N`jy7EA^9XDNLGA6(RNKlja*isP +yYZmm19ENi6LPbZpW82k-pf(F=SGUYjKKKxyNdl@Vgag++nTU-n>Z+g@b6Rj`Tq#d&+irCDJ<{Z5Z;g +n`E)R6oA~W9oTa08$rnQ*I?0M}%)samov(?UN^R;Axig$Eo6FhzfqMcFGpBQH`jAHxDEIYzhdnG;TNO +)`GJ}^Kwfa)_BOVeZGu3&n+XE;Pp{t?h`s}Md@s_0@AZOi`TY|O5)4ss{l4V{HYfTzeqh6_7v&~dC|I +?)5_n1IN%q|AdHqDnM_2JttS<+cRbm9^sQJm+*OQ?8{Z0Kf)*Ju!t%OL`RhL<$7_cPG%6cgi#Pr#?w_ +e&?eQu5Z+4PutcvSQq!r?&xz*JCd7u3PY=b^gq9G$crR8^E(7TR^pROVLK{e(->3cSz^QP9k(OXN_dl +Z*$WeKp$nG7u{qs?{Wi3_{JvHU9~WFYWhYQTcl0W@ze4c8J>MPKy{po*j_qdod#Rn+-Xn?6O>{B%qwF7l~<0#^)pdgd2jPGYrLh~$8vCE!4JP50KukO1S& ++B?O2QGp#k$*zpx^z#sX&1(w!h5f!Uav>Ldg>ef^nk$OLOrY6uYLPXHe46Y_uCukW&%D3NwcKM@ZzzgBl +M7MMQhrzEP38z;WBuKKVVOOXLyvp|1Y +xfks-KI<>`24_JKzEcz~EXO=qChPSj(_*LFbrpT}oJn=`fP>zO +2a-5OW0=mnzgXgY_gIGzxg+Xf(Kobn1_+<~%RlsqhcQ+MybV_;-bs<*QmL*9y}<&vUrVDg~cx{V21?1 +6PhJDtX+oH6DQDg6nSpbHQ&PaEkieWTF)ld;_E?pZM#ia2L@cVjO>xPD?y0@F>}6CA{SjsveTQE@=ocP3MBAil&8v +E6@ck$Bbauup%Z91U2X@{{QT+j;FC`t#Yav^IZUfZHSF2armq3$%84{Xqo}!w^UrV}qboVBk~=?b@Ta +fhlXMuxzT4z+SuG#(^(+tXWu`Rm6!F&+8)V2+ho*E%es$r7+cP?4T8I8hTXZ>3l|Omuw~|WBwC>TdFY +AIDU8PS^mOfBkr+aD0W6s=fp2fzim>gEycX3X?;?^RJ`b5bGtr@dx^j>~vD;|d8kffWHNxTBJ$V?0RdYO7Z+aYG)R_8hyjqshe>Y1O8n!_>McOXaOc +E6|ORnNyp}>gjApl#8YN!1;80Nm5F&dy0~YSQXv3cxj|2!(}pFGsS80FW2@}vK8)pa%x +dP8u+33x*3G#NvLQ$b>ugK4h3M1@mjOI#-?ul$x=luAM&i<=Kq}9N3K$Ro@n0FhRgYP*Bifi#;qQBY~h4X7Gf}V(wwd_rXSW-?wX^tQAibh*BQw6!rsU^u+9mv;c?i;E)g%~ +#}-wPAWa32s(L@R=-ZfW_nJf|CU}3)YJqgkL7s!K$It^0z@IuJ!Pj#YwAqiQuL*Nh&Pkhgv_gIMakK@#sC$!*-bq=f+t@jl1 +Tj;f*oa1Z&tnvn!SGS2p6N)Oh`7wu5w5m<9~(s5iC+C0r-Fx8#9Pw-&FO5hK%%i%0}=m8&}?zNl>-ltnSfe?L6P?vx=%{xgI}h3UER-OZ1giP6$e!Eb=_Cd0?mNm`s@JmZ3vCp;qnXzT=a>sEGmy +Him}4e?2C)!OFl|e{wjzoVsk*lp5AcTCE3w%GRE8ed)LQvWZPhUaB~JNjYDw~KT+Yt+^&wf#(NvKrAZd4^r&RbZS{J>-oa#?UrV|^} +PMMq>&2-RoheI9*q=}gEMg2$3jJZGwQ3ZS8>p(8@1~zw%*2qbd!NQM&$!wdiK+W5i45uyw^RSHO9(t; +Yfm~hTQX>7L;27)jd2NEF3b;C5+ky;{*>*ySOH=@{v7pK@ub6 +@_*sKjSV?U-UY)ZFGY|M;QuEwB;*rw254T_jcx~!X1iCkohk0t#H|95)r%&wL@+ +WA!S1D#E)Ir}$*Vi7{uzq$&yMGY<2XbTRZr%E<3H?lf?ChQ&;OjXg4@S-a5H0#}JxN_Hd@ZdIdBA)FM8~{&69a%JA5JLx*CaBwq|pALEId@nCi*AA1Pc$&@W^nhR0fjjM2w5yPaT!t=W4cXw%{z=EipKpkaAR-Kpra^5ElSBIT|00P` +Z5RiI;~Z7u5x1WtpND}bF5R^JE7p64;yqVc&K5Y!triqHNN<>EKtL#alGhfI%LM{>tF6Gc`}x%ndNIE +MAUcV*h9OiOu5L8T@(GpU)X?L=itrx0t72*tZf_ya}f*~4FiJV4~lqG*jRHn%^5x{mtJ_f<+1F_aJbF~JPiX`ARHqE1q*jf54N2oSEP)wVvE-wiDv({ +JryzSm;|Uzt1fddZ^29ztUJja^Myr;D2^%TTvHZSbw^Es6WNZlqaxixT1B0pl&jgTn;lj61~cYN9@G= +uPo}5A=~p&LoIT@vOP)t$*E03tgIZ6BG-i(;ho|O7R^qLi*0%z-qi2v=PS^B=0VwOM!ZODu>oIewd>5 ++BA*r#xD~*;o#sqr@WC|1tza>o@Q+kN;_%NEPo)eUii?$m&2uvc;0FzP)OQ<{gUkk)&XROQeWekOW`~Z0*2 +ezWO&B*#=F8$Q}eX1OP{U73n}NrG&wU!KValAf9>pHdoBqYD^88t=n-YP=uE?e-|>74z6K%kv +1?VpXAWwo`xfe*jQR0|XQR000O897^_9c&0UHY!Cnd+c^LL9{>OVaA|NaUv_0~WN&gWWNCABY-wUIZD +DR{W@U49E^v9x8)r$IXX|NvjO`vfw#!f?B(bIll^`u^rayk)djLrAk +fX$HrpioA5x~Q{FF-qNv^!#vh?8tKW@RxQ{Tts5b_P4_KrGjJGMg97J@DAxw=eca@ZU>zwl3x(V;5pt +tiqhL_oB?=ut-Ga!>S9;*@vTp!;_0czql`0n8obi^yKpV=s!PRo}OReOPDu!`t<4GAPuEtIbY^n@~i+ +HSTW};O=Q8ulueVAOO|9TWHG$TlS!#|865szCU;>9Ygi#@5Jpnz;|g3oWH1%h-@|$-7Mv|}F-iGC`m} +GdSc)9Rvqe}$bMrM7vl*aZz6xoca&DgFS`G%&Tr8N+^s9HUU7Ab|?<2lM?DTLX7R#_OlcqA}z+|om5m +_@7iR_N&MF6wsgTWvO!ZZy6w$HAd<1kC6To%sFV1UT+9A0ZI{TVMl&^tE>vT(tJz#9PjLw3!kB2C4L$ +854z)XQZU0Vlp;ho>I~!NKW=4{y&d4ui9|mzRgHm|74Ae +W8sI0-+|1Q5jJqdm~JijD+Q`inlizq$c;f+s4=CQ-mTFJRBrT9=*p +c5aQ;{1Jpz*aV_<{(4uLTf_3?f+Ja+$7i?p>HCqLd7@2PR|^iCWD;@m2@}Rst}Pf6yz0I*JqA +aEs5r(iCj_Ip71ll0}$B0JoYjUWR$NAUI=o2^8QH2q*^zh=y72t1hzU0g1L@+ufl6{6;Xbze +Y$qUz0B=4yI*;zSBlZd@NJJPQ!1|#>!~Nekg9(h<*HMaQgVsSPrm4q-9Z~hkQ +Wem$gn!(Hr-3mz)nsu5wEk!O_L};eUtp@b2hbJ$yL%VX%=q*5lPpH7ggQu(@0>FT&^+=i-cd{!%P)Iu +82qT)!{nycz%{=UH5%vYNxjYOLm`wL1WEy~coI4XF|u9%!SBbG<=h7MeZIL>E9Sn$T%VnWi+QR?FW+k +`c5aEo#A3+VS>IBr)hcx+cZ=2>2)2XvBUi(Gg^vtrF0LSPVhHVRp;bqI}YUL`!LFd~yvoUT5M0VvOQn +0aF9~+L@C~joe)v_qx>L$H-Yr!8H-A-n_dzgx;x9B?X(bITMA^tA}<(*;}Mlh7`uJc2Wtr!wvrFZ44$ +w7GP{;p`CEoMnPqToh1V$W;RV$?`xgl5u8Rl`7 +lOboRI)A6e0HgEYIzuKif;+exM>_JdGljFDQB*uVIVBP;hA(0P +*XpqEM`wEf7-PekOkyDLl#(iB$sJYwD-mjBqfky>NQGNGv+$5vGd&Nncg5$t+M=^%Gz{1thLj_u^mvH +TL@H;)-KmiYzr~s>2j=!to8tE#1zp~yLB1RMAEPTzM(?8)0TVEIRCLvJ6fcWI$Sj@aTgzhqFkonzz~o +6J0R3`&PoZJLcqs*aZQJJQ)_RO@-Xko4-W7>B?u8D@}QSSQ~ge=)XYp8L(M>4ZK@OHKxsS}r?#Hz`v$ +`~Q|qV{(=#z3M{~v#%rQ|Oy4KQp2qqh?B&LG)@uPk@!~`zq3vtK$R_j?^CsZM!98rxExEf8O_U3Xf$} +~oIM1>@X1nP_40K`4JWgw?ErOTu&&u5B=&NPtN^1|%G*>y=!p#kj4(72y0#I-=hR+}wgw%ySOT0+81 +b;V)(z5(^?qcnOK`a33%SkfBmG3xp|rhRh|O3S^mA9w`^T)^EHz_$a_bYdOG$nPdq1rAcU^GzS4#7E{ +~Gz{2G82gn4)3M;Q${99ldRuh4Yyn_lVCLCeXJiH^JSs;4FXLx}OC67XYHBiVTB~*Mi5DdO#0>DmE;{ +{ZDi!fz8&qY3p<~+K^27>LJHMISG<|J8^mN9q69d~Tl0(NBB)^*D|7gCJVumU~Z)qhd@qPfWTFY|I!H +mE};RNxsP>pGdR>l;Qr0xW=^fxN>KKV6SB +J5Ao=^uuUK1XspR6=T|6G$Z=AdWrLU)pWyJ +h_9t`uftHY<9c)?EY9reQZtWscsi_0Dvos$1lb=H~q1N7}ukmeaiL;mc0BESp +rgj{RNDvw5G!q;K7aeI415r%*2Q7s>!S;`+ueag$&Qz!Ee8^jOF!4CN(W;1E-n+dYdo_`7*X!vIn9ft +yaCRt>TLnEZYg23YL8Zf0gu52DI5fCNjwRjNkViQ(SOt_cAPsvYBKq-is+0;UryC@=Hf8p6ZP{b^q6) +j!*@Mk{?h9g(GN2$bQU{`;jcx)YwepCUNMh+|U9{3LmG`)A*)@H|Ft3OoX+mH3t3jQQpVo3Kucdw$?~YMX==2ECG`iO%%&KnE`#A+2)uyFxIgn5i36_*qE22M9E10rs55IChLD$8h7sL +{c)OFSBf>^!y1(lEYb~taMW4naAMG>$E%bi+u+po;UzqhMrSDSrc(s36@h{De7#!w2O-E^RYvoraA3$ +xWoICpbG7ak{6Xeo4-dhz}*Hfde+Y9-<-;Q_`?(A5aHG`Szg@7$qa0ne0d0Y?|9pIYiR*L|{O+l%;m` +gE;xhgnHv&@cm-p0F82HlRXB+^_Yt#uJF>CqQ0b?e@7r=v&Q>=X^w;zn|>F=*fGea+mLL$=pxPQ(f3X +_Oh6Y{yDmo1p_I4OuXxNyhpx*$xT;jBuU6@%$y`SgL*r>?TERK}{5T4B7(O%Z#nb!%uZXsJ33~A{#z> +_Srg4z;k*UhKE*Tbu;RYQ`4&N`64J8g-h;kx)0|#UZaVpZ}s9`Zb+89(c8!i1t2Ph)&lz!n_lg2 +ki?vn8cBR{MpGx7oNp|Xj;MI79g?akK^wm*m@^#8!y&Ctp3vFat +(GPn>+Aw=b2Rh{9XJXTPUvfWXc9>nvoG-LU_F)xyH^}47Gr@X+z}%VG-L*I&iaT8qV5cRYSdpyPqNP4 +l^23uSdGRpjS>CdE3L&UMb#myH?$}hvK!;AIsz$dCyiU3odS^D>_SgFgxihiL0s>5|wxK+Z=V1ug>#3 +zwUvAT^09G6JVz(7F@Y?-5Tk(iR#?KRTmJkE~wf=G=G{z8`6=6WV*<74@7sUZ$*8CU5#mrV-p_Xy+y~ +@)BJkg`QcDqlC;1b9e;m{p=*^3;20>n4Ii)CIt4XogjCs@;)Cj9C92gmw7^lDYYmbrct%wQ?7P;dk_b +vKFE{W~or*W$t+5Bj*oDA(*^JXoRP5NPC>?`{3RzQF;|NPFK-DJn`-f$>RiH3nZdB%Y!8l!y^^YRR&s +*~A4GNzK2s>=W9sI44v|BZVYL+ZZZ3OgosBX}ypO`ga;E$GwXnlR%bQ6nJ5;grn9enKYV>m`5FRp%lC +U4LS*mN{q1q9-9;^vO)zIpZJ`r305p112=`%u0NQfq8p% +1H?4tSW5kYeKx}MjiMz{0B2|*Iy?4ES_M72DtCw4#3ujj%e>0pnXY!n!zC0y`EVZf{8PHYMMjz5eDDzWzn7xu6GfNx$$W2iSXFFrisVj3j2y$i>0zr68AbYaXa#lB{OxLexysF>Ayky)?T7W%$MYXQU48g?iHm@XPEJmu3#v45+X4?fm +Wg|=q(Dxjrs^wGn$)~d^0cC2*U}v$CEIcVf^tDFRu|;kv+r`t2@YOM!xrS+ZT#b(-)5HQi2i}DgY=OrYKlN6WtbjUiRm0HXB-1n+;i#&o +NkrsYWpq-@?ZA>f`14hxO*-hpVgA$2A}nv-0tZB`QvmoAOn1lf!rV^5)@X`uj~hUb=~Yzd4yKo}_UaM +Gzfi!}X@(a44-3nJyv%AHF3*f^M1%ZKx<%BC~-ZQUW1}5a|f;12VTtkax7wEb1-D&u6#jx%ehNiT#;7 +W;WhBI??`A#dKR4w8E`@$1tyva%_eiV!A|;JJKV04(?~7OxD*)o$r)v8=a&eL7Mhu(##|d4^}8Dsx?{ +N)2hWlOyu1cRv6D2lLddT_FNN(gohB~%n>o4euOJ=%z>VW`KP*gMBt+_dOuSkne|x3120N7L&7ZRyf}u1_HNbk&2T3)>0I +xoTizx)6tq-wTFavYz0s24ZV~p$&5Q%2hHp6&;s)x0oJ(^yuhN!~ND#Q@&MUir0bUD;l;>jrGS+0rMr`Rknu<1JsdIp)E)xx(}waK%>lx38j3%)hTAJ*rlJN2#Gg^GG5bjFNMX}IO!VJ57@t{U&M3 +iInJbCa6sZOZ$|~)(NE_;pI=>$n<8j(Y^M_%9Qa28zwzEp +yehbbT^}`@Z42#}monjED|_089uAE4Lg)6+h9tuBntqXx`|g>G-ZM1ON4dXpY-Zh-VilvHF7(4yzxY} +VT*EQ+HnbWQ(;&pFrNv>@eTCK8NdX0k(`sTSPZWiYZcjBeKm(mG__eDT*5ZBt4tF+31I$?qnB_x)(Dk +NIF%-0B=<+Eg?nEv~`(^h@f#S3HFAj-gr+(Ai!&_+d^I_O0PShIX)rAx|))e~$sN9yN_U&YH7CK2EyS +;91MhNH~W>&K&X+kPa$oxjX#nIsesq+!npehU`#U3U=0Cn2?r6irhd((`23FSLEud|5l@0353iwD?4@X^WIwIm{qX7i +)duJO)xWPGs3t`VN?A2tjUpC?79cqeFuw40qh#XxN^D12(b{P89F}V-*l5x@4`j!N6t4v@G7W)WGMuw +;_=a5U{ZB;hGHwAkIjuc|1&mMKj09s-Cz!!5TwW_Qy~2y{a|S=X?^_kPZwGjG`?L+n&(B9i8S!S193DBT<1QY-O00;maO7>Pj*(_c!Jc4cm4Z*nhWX>)XJX<{#JWprU=VRT_G +aCz-LYj@kmvETVCP^mru9g>z9C%sjx+bTAl=){(NB;{2_g&|@kp#lL0041x5|NEVpePb6OrR1LW_FSD +dmVn*a+1c57?aX337(X2c^D@nftG!^=T#o+>pA5DK+rf)+d0%B$*G&+;7zWdw-Dz-o&4PFL&2?D>C*@ +^xmsBixRj!IOX|l4IK&KPNg4c&HW=AKpN%Nrzk|GUWygfQSKK$aF)3?Vb_z>C+wzjqo7t5S2SkX}Dpu +7wk0RM(H34lw2%d(PVEtC08a>eT4-RxaG84ONV%Vk+Lb&%wFurN9-5_lNxPIgAY?qnDLGsXXW`fNJcB +~+%9>2QLB4=$^65ybK3s##Smj)QE0AcIwr0X0~hXAP^8ydDh13w%1Sc{jF5vRw99u(}51Vtf-{W;v5R +{!y2O{5>zPu7Jey&x#EO9MnXDd70Hso?Xm7%-Is@F#_anSyjWYaV*--%f&KjWRH`k0+5GqM?p=$#m)V +a)uW(c9~$~y^jaj<4FHnevI1tdtXLynuNplU=vgK6Cce0jZ`l1PxMEFA&&0$h0wdWT-`DeNSj7>p&GY +9w@wc<%lf$=1@$qahz*(~@*q76vK>OG9EQ;eISpeOKK!|ys)U{gpH(6an+gVkWRn(@+aBmR64=msT5b +>u~Rnja1>{wdPkbYPQ +&enTUKQCtS^6!f;5sC}DQ=4LuFzvj$``A}1*^Qn92U1po~7$4r-3HgI0~y8+Nl=A^YQ-*Dgcm+W$t=l8)~2J+zo*5^7Yu2>4qVNF3M1Z6R2$gs +hu&p*TI*Ib|g6b4}K@2(l_&~Ee*1&}X*K>YgfXhsbVzmIq0DZHr|?NdNL3V$pD{`2FHzl1}(HT=`pBf +$34=ezN%!`bVXC(tZX6dwyFLLwi*Ho||6NAL2aL17q<27&z(o~#xi4(_qlOIFXTjCjM-@N0H|S5|3_E +x#%kY`g>+hkqQbKqOcCw=oGr4}{m*oE0^LX$;~|cc1^(^AHO*){XupvxAl|a1&-A6R6)y*tL0?q~mXn +U*q3zFX}R94XZss-c{vomNG))m`k%-(d*5@@z=m^L@)zLN3Rwh?V(zD(HPG^`wv@_HwoxA_&;9={g>= +uteVqz!$lmUSoN>uKiIsH?d1}Q=6apF`m6Wc;<38SK7hh5l3cBt$ryYt&MAKG){uUVxF&3hEEspp56j7ISYid0 +j5e`-l2^GMi%}84FO7omJt|(L-em+W(~Z@CyGCUaZnrhKHr%P;?F)eHk!HZJ^?}+dtwYmG0eOfpMt;O +p_iXy+FD4fCtNoi+g!Kq;js1R&p)+W--Bb2&5fNieg&3MDBJ2;8FTz3FyTPUgV(Qh%bGRO(0n#oEn!J +mWY|~!_u6jhc&~J*|Gg%>|Gl1q|Gl0tOn>(M>G1*hRkp=gg<(i8+K=_q{eAdPv=yEGee3*_;npu9{1( +D*;Sd_3r0FihiT!idWIUDx=)-08LJ>80t})obeOrKs%of_ +)T0w@;B@z{wAXy8CvJ+?H7yBq=!=G~EEb&KjqfHaLuORl=b$Q;D}`xUTVi1lDXe&}gl7UM1H^zPey_ +Bbh7WF0ytyUxIj;Hzf10okA*QOIFbOlw2YT-x^&)a&oahfxk}bn1?c=8+Koh0uk3RRvZ_IKgHD$@AoX +B2K3M^5YC<>^8{5+tChnSuadlG1A2;m(G5Ny$dM^xB0TCs%wqzQeNjgSj<_JQO>}k+Z2Qg8*GF%^I|2 +?qDhoEW5t@U*%nAb&9Cwl|k!nbXA;)G}f+ca0Gf96!jkfNH@5K?{T=b9HEF;bk=g2|Ga1~pWx8NraI8 +$9_6|58p_OK3`>K+vnDH_;3VmASYB2O-ps=-JBpfAIC0uPfoF2w;7K9{@4@ODYJBKr8knVd}`=k4F7o@NxFu^CN(1@-bNE)uJKyvrjBDuRElDnHDsl}H-a{6nLoNkEZbYmn9 +Au5pk^w%Q!>4r#t`e-Dln=QA1^1FT81KGoEby&b=+Z};aQ!lu|2dE{NU+HRw17`Ot>jU>l*FQRSPvE7NPM|uqW@JXJ7gIEVYt&$vlKNYyFrSX-sB69 +E=qWa!-RJYUaZ0DQ?zQj|R;CI>z=!UwZq1qFCyW&!vSQOoiRXZpRmh~Q=w9xl0)5afq51rsPzzG%2xW +W;d235+c$bJI)O1)k4$#HVk@ou~;WYyP*8-R? +F9QhjNJvzaH0*&ron#CUhaZrxL8BUtXa`Sd=Z9VDXYJx#$QwG-4x&bx5dhRtnx#-;>QD7!B4*sQ`Cje +V3F`x?-@!8&V8DN?pSc0+>)yu%&$xEBk>q;XM|@tbS8*05PpZgqDKJijb;GM8mya(s=Q%EAmT{E*tvl +1a9370+$yKD$EGav+Jdl){D~esx*paW#PS399ug>m!_mO`Z(vGE%J-SkhYl^%6c1l2FpOD(N0qhJU`o +TmEOYP|>qf2$$wj_3q~9R8+xT;8zlDV{eWyD-)>`Fh5jKE0G)s#6pkA^$2(7u;i>B7-IW1W)h=nxkhN +0MvCgq4hei~!8nflsH%{EgXoT-Gfty)=B(d_`%!0tJv1F6$%5>Tkm`N~wQ99BKKC}F+9O)qMUQli5{9 +t1D|w*xMS#{s}~(=6+~XV32L?j}o$%uUMb>RHNepDo$4#{Z0WKuA2>&eGjK4B~^+)8i>AKiJei6He&l +PSBvJq9G`>WuDAgv^Cxu1zYjfaF5=TRZhR40@bmH!N&prZHj;qBys;>|Ga%#LX!^L(Sw(r1ZZ2+t%R=a7KUWZ=SMSM +Gg-@XXvx_GM{opnNQM7`5*-o2igBs8(X@Z9Y6?9J@x^h7daWW?9;{WnL4FW$a1LFh2M9Y^m7;3JSXhb +Jfe6u7*=;Uj$b>hSgKoY`>-|72;G|w%L@>&T-e_}D#jt!$C9$QX3j<`FNjMx@68cF!22( +Y764*+9?)`5;3##xayaU9hwzZ?+>_A$~n3dD59Ujiuq%RLF<`0a;cjL?qhogr8 +SQ2{##VYC9nmm+(?pMz=3?jh3`4?~2xYeAu@$*JP5WFO}Fs)nU8JX4(1Bo;wWy8euz)GWnx&j_Z39>XB&Qf+DHY4@DfOh{i*=0r;P>x!J)OE2d;K{#>xE;wmAyFn(-?z4_}2b&NpHp-=u}VTwBHVGe>i5 +Un+}ElA!v1~K6XO(QOjjsCT#zl)j)rWRmiD-&AE@;+>c7_m?moNgd^fRJ2|N4BzPs(YIW0J^_9rZKU^daMmA-gI9q%hNj_L+nX=K%To8*zn+AoP!-2V`gFYL7>#;>+F&*Lt7}w^GJ +2>GW25OO9bTh%#Ub(i~XTE6O7OGV7wsg4Ue8%7l&HBp;pd2EcQRMHxJ(*2MxD{nX^l_g(`oiGE2qd!8 +XUx(m~6rTmmQGM`Wp+FGm3i9_x`YNokC!%GG7!DhUW|}0 +ryJ;glVxrtMx2C$NuVAiDVS}*2{I+L@$pfIyLOJ~SR=&|2YCHJuniVu1ARd(*@X)Hx1;9|GaEFK*n1- +RFPaS%UOiar7;AcdDhCyJmx#6)_YCE=diIT)Cs7xM8ruu +iMj8T5?ju+ZO=dI`o>6pp|_CSt051m67I0j110!b&KY+Q!>eQk+C0a)3nf(?l={fp&-1=@NRbngn#fD +)qRZ1YX>SCjVg_z-OO-E`!@l6n0RmN}1kc0fAMZV4T~I06zm#a+_p%a*^wq5&*sC%j6zYJR^GxooyLG +t}Wa()D5Oi{~~35?Tmr#b30MMf_@RDY&2Gl6hE&c=fw=%Y~luS2g4Sv0O}@N#^Pwur)IyS;u%W}$pV^ +Ef!SEp-aSIpN{@Q_pIT6j_uJVYUI +F!3SHinm{1@${kuP8u75V!6h4(|D|KfqJHIP&;{kqoBbS^>~n7xi2YRulrZi`3qP}{W<(IqYmO3Wt}0 +mY|JZ|;)ns!4qvdyE!>ftE +L$%XW>mwCms_1nF^TFg?gML&Tvyk*%GiL=TaYWcD|!8PH<9dyTFPFZf~Fx7y@e(s**g9f~=-CyG(O!4IX@G~uY<{D?C85kvkx+E0%iN(d20|Q&6;sog1U##vTghX83WsMqK0{ +Id2y%lgD1gFIXFL4a3c=~w!z18O&YQ=-xK&FphFksRPv&BQ{Aiz0bBeq={Qk|1SIk!oo7eJea>O&WfK ++s+#$Q@|W-eMg<7LA&>!2*iCfEU(UJ>(RACH3Tps9y!GH@u3)CUmIPe;B-U0Qlc^4m~EdQJbvl(6gqN +dzl@#u0oJ&Bb| +9N9>YZJ$oLOe(_BqS-?i&0Y$V!oEQ`$>l?Pdvv*@8;)QG+59`nJw@YBTNwrTBOI+B<MJ#!eV*219*mm&!_u +mKgH6}1CD~0KdtVp0uk+wTb!Y&gSM^i~$BQy$x0^oc;vP?di3(EsF?RyJ`dpuuQ+);FKFYlXauP}iyB +$&IT0vXAd*umBig2t3C#M@$E{OsHY#ZPrY*}}O#i7GjQCp>wMf-9nFi}9^=ee1R+7-85&AK&!~uJ!(F +i6LxKoS$MzOudhg9}32J(3x|D$jIBV^hsBElujD#@?;_g-%Sp?80vGK>2Ad#fa@Kzt@au!>n!x&#Ll+ +LUOg|UM%G$zE>T7loB(G!=Y_UWx>oFyeP;n +avCzA{Sw)m>oRoCLWCf?!qivzS$Z^_KIpVq?l?nkRVV*4!H>d8mfzHn_{+`hE#oY`n-m02FE(WfL|Du +0h1bDB{erD4H59vE1`P6(9Z}=4NL@GE4L!=R$^nLTO4VrtX%HrPRR7Bp}=c3bcG +gYj2N{+5*~-W)z-Kv{k(ovc-rcO1arv?z`(Loj`iyUk1!j6R=O@fg}l&d>yY?raT&p?5=)rm@h{l-e! +!h55)$N%QAnI5%?OkIY{PMF1VE*ObuEQ9!(v@GI7gcF#jLl$Di-1B#Tq)t~h&ogzD(*fS_0C;G<0}ekYQP76#!NtJFOV2cACgz`?)B*kPzbR*#OM;k69pZQ*uL|X3P +AI~#PUlifqrl_M?DXK}!RY~2j~pGm!AhZUd5>v-6TGK29DMiHZ1y_#wo_L)@s_V=Umm>p!QWIq44un4 +opKWT_tf;AtYQ+OB;!FFY# +o2{!~63{^j=}io?mFfJEA%7n}*J50X0%&-J$0@Z_5}Zw`)s=ooPF^X_Q6^ZXAV~B7&`=o;fcR3$kMw{Pbj3seWsREBOL-Ap46c7SC~=)r9kb##1EZSMm7K= +O4@aj5-^bq`ygsCrZY^@#)Waq0+KD4rbaT}ugP6NzWO6_U=eS_~T@0>jZIR9{skr$Kx;Puz*GX_^9M^ +dX1dM5wW7#SQV|h6-_q{?}@BYV^go#o`Uqx3eEcwlAy|%@L-^=vkPxv2lHSn%*7Fpqwwe?s|f}Tc-9O +I;6u>d0LwMK#Ol+&4vXl*qTH9hODGV8)fk+4-6D%qli;C?Tja(Yo;(u8ke1Zj~ETts;#DCS7YRP+l>A +gF^bGnx+|>he5CZi3M~XCtDgC^qx-$5R#&1NNUH;N3@AlK~-WSz26_Sh|Xr%YZw6f!AQ9%sn`^7W!xJ +#C+0_y0s;u-*ybISi!2KDJ$7eDPy<8#}?UqE}^=GL&fhAIpcQzj!=u3B-xuO!z9?q$#RuH-p%CD3$4} +(YRH;U@Mi#aY(WjEdl!mLrmr#VI)*ta8@9K0GX`nNNP2WY$qF!c!J?&{IX!3)Wq8JuxF1sJfWxg_aYn +#H&R)Pq+{Olgw!1Mt*0vj&e2VJ8+H~5GbWNyU&F2)EM%fM?IaNReVkyv$VhDePukMnf3kuJ|D5-WTxq +>z9S>`UeCY7K3vIOUs%>taQaryU01H=Ku82un+}=brF84M3n(N!wZ;UIPrO_c>Y%GJP$ +S#@&T%%K*SHHx7p?-;fiLST-z;+Xe7)F*&PSBp$ +-W7>B?xzX>VG$?aprW$Wg@g)^UDzg(aLA9*ptgr4hnRIjtjV)rR1kSJPdmDTmT9%s#;PCgh1kZ2Kc)g +)7x$yTC_-X5YQ?_{ZVSdwX8sHD`Cr`ehBMP!6Z;ZrOs)M0N(L;{^6;u;HIyT8~HYs6}r!9{_%D$yA(% +e1L`(UJZ)X3UCYsId$8#)j0tjUvAab>4EKY*0myhbX?>C{6cRLtH5&p-QPhls~o3fLaTWj$-KR3ax}&lIZPM->La{&?9@mnmm+lJHlp`R{YmAlcpsM^KBU=q>%Cp{aQ)kE&_IH+d&3m)?cg(7f +JM=vIN=E!BOP*hgGVYiS(Dre}Lx=k9sgqsG9{6P;!O`8LDCnL9FGG58vER-<5MBF)zIAsh!R;2+-O{o +J#l4cCQ_?ueN4%|}AC=!>UIU@&tQ_pt1=(6{(V$Matd(sIlwnmdk}(s0OSZqKSHVRhIs#J9%;1nKq`` +W!R`6B>KK_JKFrp3y63oBo5a(9zjrF~+Et#8&w*ezIZn$@<^1B?k^kQ2RQ$hfN)e-g!>vBD$T*+$M!L@On{!7H;ZIu_{kEpeIzvWF6nVbx!?2_Ld +}D*9rA&YuMOfFeGDgAAA3F`b4eW^Y%rP1K6SBy@nXSMTu8DZdRPZCu(dwjZH+6f*)J*ZfF*i|sYFGBi +j2>NOP*sHwyxZ)-*|+#39R`tt%PXn6EQOx>8V@W=*?V*eubZpAR@7M1;eng4Yui%8ze=^D6oB_O95-2 +@`{o>;8DJg#k0?WN*fpd)x@!;ha0}_Ib;Ij<5BK>jC2Irei)GJ@3QIIrKq0I1;I28_>8x@W@j-ov$eg +~q*G&7X#0pVuM9rx~!|sc+yrk@yx3xI&W5sRT$~4pLzQAlhUOtV%Rt +(kJ*HiHVZo1JP^Et%Fb7gnDjw5NZ`tqKFyGOTR%u8Rx(JQq2#I4m|HIlPXzog!h623Uwj)T7H*oj +FDk5SS(~Zqyg-KYfJ`_Z5#abjyN4htPD&X40tmW1vLRodH}-V6{HtTCYZCjG0Uzcl;RdZbWGm(h!4Yc +6rYy%t;S~il&H4JeryRxL6UvrE69;}1}L718@eX^ku8*Q4k_PuxMDul!eg$X+@C4`aD_n#{i-V%vnqg +5ASj2`ruUOzvI{GI%IOGjj8JHW4o&sP;DOn^&}=Og+IL)OT1K +p@v5EAX;qu>9ssAl(5BZ`HV9ba@&75{x?h>)|w#2%fmrlUg6ra5~uPzk5Ao8GxR>?VMwfXp*!#8Bx=u__~ITT1NI?0{@zMYllG~JYTL| +mrtv^i>oK!2SXA6`)4uw~pD>_Oom9?%g`{}N+1@#J8)DYJH*v!^p|u|=bV~YcJ+HpoXT$_$IML3p^j| +k(yQ*SlUVSgGH(wtsG?2AE^kgLPlHVw2n)r=qY`PW33!D<~+y*d*++q +t}T)olo3VQf;3Om>Qrpm!z&i+S_R@>Px9wH#jKTIb@H_9or~pOy`Q_i?q#~CS|G3HWf)}#x!pdRd+P4 +ONpuV*HhBI&z}cY=+#q=OQQkWUV(Pgib{3_C5xM&6})_a8$z9LoL77q&kBlG1Ab>WTB6_F@n>AU7$0~ +yjVZch3h_S+!&a3v^|x*dr?GUP5XZc^)8p|Bp(s*Dah!@z`dFz#o;0c`tT7QNb~?6FhK9D>EkS%1H~# +O}!o*6sYP1oMI_1r|OJ0kHXFrEVhLGs<(ItWvWFbzLPxOxDN&nWDC>HADKK66ZhL^=P2k+kTul%@04W +iUuKi8X9fALAZZAI_hkht18!#(xgBNMvLWY?c1(r;7i>z>x#D{FBOrHS-JS1D2ptQsYRjI#L$fTOG7< +`jM35l1luO@43b@t~++!+T3J^QSQYzB$o@zdf%#}?h^Fk&4G!cSdqh8RFPdF%o`#2?;=nNs^{f^_Zg +u71{_pt6s>X??=LhBnZ48Y(1&B(2S)Az2J-6H9?ZiVktHr7<(r&iVsP_DDpex{fQ>)vt_J-0PY6M6aG +n<@=>L*=(?rmr^dLG!m^t9mz*q{iSH%XYHFnm>!FH;eR`^1YzLbZj?kwF@mk3n<$T$CpwszkrPw17el +wsyA)nwEK7&Tj6ICBf23nzVv>N(XG3Bj2yAwab+Y!lB#$0jZN2W8kUk+(mJrz(Ff|F`v!E?bgJRoB6c@<4XmybgQQ#&&K}Ic>8cyyZuo&iL5`fpEz(Wh +<&jlQ^I#XI*wvJr>?v^Ka{l&Lg|HnDHTH0#@oczpLpvT@7I~p +r{Mk^n0~rR>{d-;o5m9`k-+4Oz=h+$Heu`vlNh*1#5-9Bf59*G#=qh==4L-bn?ZLg0n~p$NbtZ7*q31W1!ZHYv~=hC*Ak&8#d5qP)A#;l6ukh7XZ?c)fQuf~+mh3}=S(_>t6@U3`1N)_6Z^N!`H|i5FS3k*?5$jjT#1y`1*><0{dBB%Mb2)D +O?}TR!Cn`KJms~7XYfT-jKh3+UCGsG +nIH&P!<;eLFTrT#Y3D#ccle@Inla`ozAElM=q45Lg-vgYfZNRzTEN>SCv8QY3Fp@+$eD=}j)@%w0mI) +c}(t+FxDy`^VSlH`03_D@Hn5pe?5yCN41K4Z471g~LT?sp<*xe%#HkuKv(u@6EOhiWZoj)?xB&PT|Xi +Vb*H=CV$bNQrDSW2nwG$~7eTksErpgWnZ01rBu(ia|Jh7eCCI5-w!M +5MJM3#(^lBn4iQYEV9nJ_M +}vS$p{@ci)X*)#Y3jiLn5D?!|B0}+nTw&01}N%`Bz@yttJ@x2?FvyVVMr$w57wi#b4r@x<$GujLbzp+nJDy3C*}%ftj#nvzcwS{<&*m46|MgTfQ{ +!P+#g=mi-1<)eg}^t*6dF=?cEXiE_Bc}JU|PQJb%b#s=G2)^^wlgYv5^ltKphU|V3_>T$TLqOJyA#vHsnR^NTovQ +hG81&i65Zb^IHJ~%2Az?Cmx`zDm!ymp3b>z9ccbQ*G)b-dBi1rE7jZj5K&jwnB7;dVHo0F-ri?4L>CTFuO-wU8yOKD?~e7=H#T;f +J#IYGcUjx07t?`b7Y6v!ad$Qhb1ZtA0P3Auwai#sqbF$tN3JX_t#;(+Gx +8h`81L1DSLHv*f|1A)wHxgRKpChWWzp%Um@qgDHR3kW&eaUm1T!M5$&tXT0(XOdq&UH9r%Pm7L1+|x- +CUEXbHdQ3q|w6<~sc6+psNLBKyI8$gt+68tt==##E6_kVk&Irgsm2Kdi>x>qCM|quOVOg@VWob;kA%u +mIf*8FPP5W0(9*#_W07YD8yatwi3{{}26_(p*@`c~=iC@@mWm$ru;-QAE4U}mGUWz7hd0bDRAj98+{* +u-c9YtY_IY)wrHe2;}U!;euQ)%^6kfXnZgIU3K83zCI8sM-dg1M4Jk0lArOej9&Y$0sh-h1RwLZXxjg +6l?ko_;x~x=GazNH$?$r+^BO5wUKtjUcQ;z8JffNvrXU0cyi0f{p)pIj5Zs7BT7+6>KhXu2^|U5R_fWE+}b5b?g%u2<;Z~J_FA{xgCj*3(_pH@m +a9pXrW1eHNDn;Y9%eWZ59#=O0}A_Vxwu8y}|;V+!56Sm(A=OtUhVfn#?uQlZu;o1_q)JCT12YbV7?}j5h3d*I0R>ZBVKt6FL%u*c{jjh&lJ!884q$;W75W?*P+;OhHmbf$QDH;2V(Z6A|8+yk=+q2$yf}kK`*_>U)F0}il8-37GG|? +~>eqdjb_?yHby8hBOAPHKi?rw`pcWnHB+4ZXZtYLxDAQ-H$VIJIWAln)KwjWwxf(U6uyu;ETH&;gliE +;4xzp`v0=i`#+R?NZS4-@j&?x{YcT>sJ5CTV2g#r<;)2&gg|3r;L`%VVy>UaWrHM|SC^tYZ}L4vM~*c& +8%RaHe#QE_7FG^q_*^=QbY?~vkDtPa~~LMz8M9NeAG**6vZn`oP6L4=xP;S+A}lY3uOw@;IM0#lx|Lo +lc2rz=MLRGxyw77e427n?TXy)*qM!BZdX7>$q%4=7LK#xpUrEaDfaz?=6vEE|m+&yx^tuXlzsH-K`Kw +F~4}E}k!+41=FI^-~gEXZLkGy-VFOXyJqO^U}5U&IX@S3N+S}>@Os~p__KfmD^tqmW6KOuOK*X!yeHt +E{ihsS7TSqQz+Nw^gz30#;&G|%jJ||o1`NQx!;Qv&&D;hs6==}A<>;zk0ox1?*9)R;x{bzQx>mN!=GB +eAF15~b-R5*lW05!$+Lxx80~(3CSp9;iu65v>!@@_;|)ac@XWjm3rD|Yi&d!1&+Pk#(wiuilCcUX}?=a9aeGBj3z=y>4uD*M*?!q33U;G96`YcRerjJiyPn8*{0^&OGzQJI7L7P3uDJi{KyI?;{0lYWmooYqym-UELe={fF +qfV=no0uJS|^WIW4`H$WSp*dT65hGYiH|fdEs`*T)#RM@hX|(%QHyD1ndJAR4o38Tw5D%OYFh9vDpK` +vUGaY@AAH+9+UiJ;^tf>nof7Z_Sw?EP}KEm#xCAIoRdf4xei3=KfLoNOn{rrfw)7w#rAxr4)#>?RZlu +nxKm?vg3VtaiTZf-){mA!e-j}Au6Ep+euO1~wuEw@#-dn7-G;LS4F>E}ls{JWyKRrIo*Vzx)>cZ!<(W +b^hP!D>t6wR#t+bfJj3W{HyK0=Xe~Z%CXOoh~xx-GUITb!#}bH+1bw9&<20X&0EdNi2H3ko)g%wBj|0 +)Gz}72T)4`1QY-O00;maO7>QZn{?EU6#xLXMgRaF0001RX>c!Jc4cm4Z*nhWX>)XJX<{#PV{&P5baO6 +nd96I{a@$6d|M?VSYE=MY60+IqN;!#{$72ITi?Dfq^KxuI^Fp-sAq>liZtJch3 +Nr0YFO522R-^Fw@i1-`&&50ULfdWMMKF@$!Tfaxwe~pY#rT2kdo{ZZol5N#?%x*z;${FNW~f59~J)2U +~U>d=aS+!_Gy>G7jO#73WX(jv#5zqf$zE+G&nK5X{WD6IOW05OGs(-ljF)%0 +%!*J}zqZv7Db`$p|0>djh4(WW4J4}pz@8RjIbd@xc_^nmRzw}(#4~`S=Vd%kiY(- +LIdvhBDEe(7MO40Uan@zZvjE66U`q}_@9_=5mf3cIRJ`Nx6Yy9KdaQk^DLgD>FpIbXkw$@BB-whvL_E +FY@txP}A-#Yvqmpnw3_Pc2?sOUlYd)RAsOg*W+3EZ9o9Wxri^Mg!TdNM^@}V%$-@<22l;R}8KtpeA#mjgVUl9sJYpu{2A7?0J%%X5mU8lZp(m`S8<^@Dg`9>L2ECE4L}!Zbbaz +dH?8T51>C84w-_y*pi3`X&Q-8$#f3%&2Y(#|DzBYiE{3<;SlixDa$2_%_H~tf_77a#CwDaJLdDNOx%G +$EO~A{9ut27k#l~>BNpM`qkfQ2;=AOQXMI+P(-8MP>Mw#Q=eQ;I^U)5lN9NK;752caqmslgf#*J`k4R +k*iUS6l32&KLlzK0676wrS_5gk$u@)lBK_^Ab*+$3}JcA)1$)E|yqTbo$-Sy4%d~z|Kz8ix~Wt@}?ka +B0vzaKpDze@@7=9iDphJW@yf9HN=pX6u%JMWi2IbQGj^}ET{&Gcsc_Uiofh5!JD2RrOq+Yo^2lkOe5> +ttR;+^aP6&{a320v=e11uNBojX?IN({%t!G<8l$`2dn1WXrpc&pxYNBiFQN*E_yPnrWE$j8>d;xm(k0O!W&}z57^%Nd}rUQRO%<5Msm3cS-CA#G-2ia# +E&IlZEDpG=7&%pU%ki$D%_0E_N)&s@`>l6kZ6?t|v6Y&uYVf~}I$P|zBGys{SH(!W(eFK1qLGXe(Lhf +4t9yvb&OtXyu+%~HO3~TuYHeTZoA{t~eM`*5Nt>Snmma&9q17=VZm}i0kO_weZbij^1YhF!fb256YIF +aTv3zdFzc|yTl+&Rn{S|W!XGPj9bjRFT{K4~I^EC2q5%T?>AG$G?)DNSiYwf|SnQxcgZnv?ZpL57Z=x +hNlTvhfBi$Z-S+8Yn#Y%7_PG2*3rPDgFZXmlZjvClm5XZ`zh@vZp;%~BpZnGdYR**hHTqOUw_%EUy%vctaQ9k +=@)e&2@iJG#P`js75eqq;x;c*)1C84hlw!buk?6hYKA6v^Q2^EfXq%62vVwPb|9Q5ZW<@+YL!&=n7|A +5*2-i2n0N3&BWdOnYhdroX9|qMw>_HY{FwXjI4+R2*NDxBLeicT^Y)0XT{qlfKq_R|K!EzW3Q77^@=a +PZ#=4>XAg)zIjyq4zARy3Vo?Kq!(5kI*#sdV&IyX2EhDoPhZOr3Z|xoz7!WgILi +7LuPH-yKF07G;|R|zz*sia@SE&2y#ltBaS`6QuK0-FNug!85Dpa*M`rU?P5j;!4SSactXLRw|6;6VA|e@Tz``C*klTNkw +w}bY!Gsh0+)ih``jaWJ$T|Y7!&do@~O+~ah72!HMyjl4u7aJAs_PA7(A2foCP9MnJuEl4s*vMX=}blN +Et4hF$-}&Rk6%IHSk(4y^?Xo)}2G~I$(cGvRmzl(Z~F?ipeoDh&i9Dxcpqi#XU+Jc(2@V!O;OQ3SF=L +azQWr2%MLHO(0M`uQDJ}CbzDmO^TzqK}$J%`PYayD!xT_L>DC+@b;e1<9VqSv^@-|yMmho_8w!z46-$ +jAuBgY0{{IDGVf%Q?_%N(tn^zurUb#TerGmFviEvfQgiI2UdKiMCPpQNz{PrL|;|N48fi|g#RMHFg6*$5|t>d%HGBzL3WF +k<$(vhIGZ&LQM6m-XHKU?Cg(1d6VwxbpyyMxLI9*=Q>OX-r5&OIrr8mO?chF1t>yfBu;>IS=61@SF~h +Q9_`0JObGviUOOv@eF~sfg1;`nBw1T?%wMIbka))03m4&1U0^cvwVhf6kBo6ViFggOAX&Cq;gwH*s_j +@pvg$9nz10a)7j2`s?JYfS}V)vgMJ!ABI@u38EwP`$tlwN9!P5b&3aiN$td+Q~`&r4T^)M5Q2Ncpm9$ +oallUJbM}JBf#wu^4hQ?Ck{iCETs|)r@W{3udV#6hG!~Q^w)`nQF-0C8y=a*{!wZrPnFkqO!`oBtOVP8M5yRCmiu|sfrMv-a!3jbfs=+M%8LktyczN6sp+Vnns +x<#&74oMvxnmrEUmTX3}ehw7}R$h2Ul}g|DknR|4+~Q@Yi4Zd*NXzv6g9VWYvg9IN30zRh`vXcr?R^T +E~vfq(>)}O)V&0ZCBBRrkUYgV`QVAP{OA$fSw(I1)CL1(bs(-lx#t@;@KK8swfBOiggCe8{Z@|QLobvp$M>N?{OV>1^bdHp9 +4ZlSb$Vt|A&I`e2)$bW_L7H6?u5et3^R +WDF@f*EX$tsp#{aWsX=8jnvfM=2nqhp5Xe8-oSq?BMwM_dYv~V57xy#SjM!*DCZ;4wpQJ1)^w6yBk78 +%i~-~h%yHTJP04RI6QVF#CDqic*4%=+V%is9HEy{F(MwY|NEc+Vr~^c2L`zF)0@c!dwqH}xj8*&=i{5 +3@jG_?>*>2Ib`3;*|MYnL=dq{4ZbdT6EC2Vu_rkk=dwSl4<`B4N&)%GQinel-(7w?+0Jgb+kQkuPwg$ +M0J~_j4s=ENf4T@oCj~G4Xz9}ISRRsVY&e&Yckw~f{05prOKwd})W47qeVvYw*LAHgB8K6Q?k4zsMR1 +%~ZXB7LHi|7|I%7B%hS-Pp6>mJ?Qe?(&LP|4E|$0ux_;Qr8FEpiGdU|S%mAsvxKguZO;iq5^{Dv)Ldg +j}3+fI!%&`Zk?D)q(P^lLoc0>(Fq=#MC&7nPf?RXw=6MzmG)Frg{vB39lXcX1Z9_Divpqu$Ehzd2M9gI095#7VgD{ +B+>c9b4b+^EKSZF8mhi3iN2O=MyN78bb#0A;SgTq@G|SJwn?;Xf(<52sbWbD>c9967s2ELwty4O$RWa8?nh9Zf5O*)Dq!8$6gu~@@ItXeU+Z}>?3#C5% +AJI%-l{+2|GL6e<70^I*fkYo5`;PbV+2?Npi46c2EFHp?T`*u3TRk~7X^=AI$hz!Etqe6`X6RPxU=Dx +c2)7*BNx^FULz-`J0}U0|{LpJsWHDeBb2a&A*s-OKCG`m4o`D!BXmxa0T7Ua^v2p%W%|tt!^Hl9h|Jd +nU%S>A>q$$NplW?UN%i}u%p^!QkyuN(@?)7;3=JaMf9ba9({ +?%!p%wX2l1l4^gLGQ5*5Kp?{AAh;Ve_RMI+!hGW`*`x>=T10Z_35Z?9CS-rA61P*K5%VNuQ#gh+UOI& +g-;3wWOE6)txfGo-3)DlYM`?l3kPxZ +^=|-u*Zcb44rd2kv)bdg85W;J7Ei&ThUo;w>@@4niAT>KGGWK5L*8BRMbC}b~7MNN6ur;_dS|AP>%># +r&k=lr7#9UxT`@|8i-G{HV0~;u8T?JS>0* +=PzV8jOjRK&sYb6iS<9aaLb`NCj8Ib$Ce@KT2`BrE)0m*;7mUyN8l0SrIo8tGaIw +I>C;r+>syhg$ZGAtTiM2-RVZy}q8jvcgX{3}pKAZ?{I`oInw$gF_Yp`KK$&#ddL55sBKW&=+C@C~z_>M%r8zS$dwZO +skv{@=fKfm)2sRhpE1Y-ZJkV;APfpWEqY>o^h{5$$wGf0|+BH9Jlr&GHvkzZ{cHohJ|wm8?(2{4=qS#o?f>OTb`-3)G@~nv2wem?X`(-hbrpGB&AsLFlTO&4rM +aLM9iylA+KSc&=#0SC8u5?$HO;BTh^)EszAlfbpW$V4!UFkuXP})NMU`}>y2sS%j_#(ZTMA;U{j{SbLy=(KO8QMjBpXCwSyLIC@E-` +q{`f$w9MP6JEf(nhU8gh(XlE2@w22tLR6(rX*v_z{VdXjn*V^4q1*%(|sOktDZG2d7*>Jgvx~|uDx(!n7)fIa6ms +EF2SMFMKot)y16N`p`Xh`}ODuvX9ZNeKcs?Ie(_-VicV$rbbBZRJRMjrW@$bP0DuyI`psH%ecsMfDNW-qCPRpN9Kk5QVw;3Ss4y>SX`v +gmC-u`yt@aVl;hw1XTF(`^LuyodR5re(~HMT^`R*bypRphe5V>5HIuVZCCZyZ$yl*VNF_xIX1Mq5I|X +!E+)FT?E#@MXf9}rblD^EqH2*?1=$>9ZFH8wEKj0BVk<{@8!S0f=^4a54IuOS(-Qph7F)>gc=>+|fcP +fEP81EqnmQM`jKpkOE-7$DMhr* +?EqiUsg>NM)%MO=HzP|;E8ILu-B*>pL8>SfR;$$7e`j}O7}%sn+=hl?wl`R8qN +8nQ+-nB8}DrldF}J13CX#>y;w+sP+G$a4ilrnze8Wuoq>haU4pH$Vydh(WrCPJrSFNVpd^x*I!z-h4S +OEa;U8o({&L^kw%@`vAe8e67oh#PRR}%0{Kt%DZcqVGN{3Nuvd|Clf_-%C;wh)7<`K~&S0mO#& +btN4v`ZEPW?YnO9KQH00008031s8R)k7<66(AF004dg02=@R0B~t=FJE?LZe(wAF +Jx(RbZlv2FLX09E@gOS?7e$@RMoXOe&&(nBusJ!NFcl;28#wYGN7bGU=Sw6N^o#whKNagU>v8^VmJrz +N+9uMEQiBbd#_sUExl6et=9HiY~^JICk*DHqKL1wphin|k3(xHAsI->`K`6inMne+x4+--^Zow*`SBs +=vCrCT@4fcgYp=EUUTZ3Df1I;$9A||;P2;$Ioc_;3lx5MQ#c9yiA=F{N?l?7!WeOzY&#rV*wrj_ZJzp +X+HcCtU3C*JV%vpUoVX#+m4Q^3EMK5z%JuGy(lwjRc6Wse9^1|5oRkxViXnr-|$KegiQhCT?FQ$31(R +<0k$Snm;X=hW8EB^A6KLhQCI>$*k<<5t~ +(8BIFQEYrnsk!$MK*4+co)K+LScNhIOGfCe%mfPCX&e}$ndAI{Z3cmi{?7I3U&6r@F59=0t9KiYyOEs +yJRacV`s;|QCZ&2jBfy(H^DE2(dINuO6IOX|atHegBbj5JF6_xx`t>6pzZNv+`h4lP{P?-35BMXp)Ed +6uzdqbWxyB2L(kNouB(m&|G+Ty9d`d{J|FQE`9#9ksQl3Ngs1X88Ppr=iTSyWgP&I-P(-sqa_nyVx9P +kfYt%Dm>8~cora*u_>CS1SybLK6rm|630EGxLI2NfTqcZ3zg-k6nDbeT5nP669Ab&pzr9w0V{B +P(Bx(RHJ0PNY8NZfFi(NARUE5!6-6!XL@@1{%5eSrXPyI;>fhs=4#H5e`dAT+7scJJLOC#Q`zkLj;>* +ja2p>>N>SpuC;#)Tq@gY=8?Q%q%y~2}`z^l*t0POoL)80r;80a@h^pNW>Xx#Ysy=XO +O$Hf@u=`@0UN}SiN4-YuFe+w&vY2M5ne1k%OIpzqiBZVP`LBTCF-_Uq2)ORcfIR(v+&M_-(2|s)x*uN +Rabj)o17k30DYksR^iV>x2j)`t=1OYk1KVEuK@W+V>;ITIWbHqud!?`?t77ad#n-=U@&{-CdP6)$Mw} ++I+!lG>LcgC{lvGJc8pYD}g5_`2va31v=~b;ImK5i&z$a&>!GNASZNtfEzsQSC{KlBm&=0e3k1 +QkZuaQ6Q0-X0EwIsFBpQ-GZdwii-3L~%Ns{5>1>hSk6Ro)Wr*$rtWk~HVpO-DH;Xp7TGw3(*Yx$5?+5 +@VLbb*>PHTWC?Ni{qHGwto7|ir&c{V7)5pfX@Lz!CM61^4U<%C&E?{V`MNeG&d8Q@zke5;BQIKnTh#e +(X-58&Sw_AclB>Kyf*@3yDO%~t2Tb$+iekeTb^!eWM%9RofJ&pjBfwgJn*px~XKt}RmgiUw2*A$*Z24G&M8-(9`}}NAHj0ej!?=O0FCdpj3OsBlzIQ3W_)ldUg5z--S~lPgS<^(jb3kaq`OBFQn#^# +Q2QaZ$XgUkh(b*+5{T1%m96OBnrcO+35t?uoRcE_^eBNM>_F?d>+5y*YAcgiJ3_&)XYI7B^37eFx%f! +nuPdVhNtaSE?X(d(NQikI=AyD1@&IBa6yQQok7d)$dDBs%;Y?~OG+Ks6RWM*enGCPht-Tjgk;hgRSKE +S8a71-Oy#T>a$6EnTqN01HBJC2uB^^117zF!lwfb`l&UM!=eDuIa!N=#rP6tU76D2=C9WepY9AS1m#5 +UPH^5@Kh8h7e$+I9?+fCrlVCpVPos7VQ1jKe@m3~DD +X@JsAj$I}@n_huwZ#eSVr}**EGZ9p&L7l_g7fmJe$@NPq(bU9%6hh!vhF@bEQC|{TPS&ewbarjr(N71 +)&Q6nrPD`hSExsv7cS$0`-Ht*EpW{gIWbinALKpkXCrm#Albac&*qe?Y&fAR-P$6?|NxJl<@!>P$gUCA~wMsgC^|L#HgS@>Rf!yo`+ypJit!VrbXAhX1JuK2gU0 +W+Vr%G2q7J1%Oukb`9P@avo`Z;{n@j%sD%yQHW6$}c2$Fs53-_1y>JyUAshbK=J^OFc*+TTS8?jrOys +2sNG3asJ$WDWBPbDSD_RWfk|6r)2+U#f1KQVd#C^-4o*6lV+-8_=;@o)x;eR-Q9ev<3^}X~C&-#9Fz` +!)~x9`>-|iKCo0S@Nv8_yv7_KiN*av_=e(?-3Y1c7zaB^v;c!mrvggxeX7VSyH5a2Ji9f7_s=0wW@hz +Cd~vjgw|Bz5R8=9?X_c*~VhFN`E-ijRUA9ohv%y^9K1q>;jIlVpLQ?f^uKbFEJM6BS^AQ?@$$eLKmxqrVgK2Uxr62!y|e0*8L}+BNIaM`7-hLE@N6Ea78g%9A1+8XwlYgJy3m}^7 +r(%GUz?hc9;eatyvSznf4(M-wP8PkiH7wM`{%C5-v><+zT~(q;)`&Y(FgUdQ{`BCK?ZvE~ +XS|Nf5C2jqu9$Hg5Ab7}`&BjnpFs!(EJD57PB +~Gb;nNsM217UZUCxDeZKiOtn(X&V!nyw!{kq4IC?!2GGuan_V=)|%dq71WM +h}1rsqTR14>7%9oHc=iuN8Uzr@`yj%Ldt3yxs61Tj_gD?Hf|Nn-ib(=BB-EbJZ@D}~yZ@oM>MB}Xj}C +`+wsjx#$@w15*wso4crf{jtWM#^AMI{_|TEweh)rcZ~DV^qkL`p&an2ja0pYL9D;S<02m*O;LYQ#?!M +0kg}BA3;+;WaV@MoU+4w>MwEIg{HUk2^xmac=nTmRA+n*7SKK*zx4^q{+ +M$mOI3MV|_Ltxh?ype|cZ+`b-$cY(0C3&e64&ghUl0+Z7zIRcZSR$&(x)q;{Rzt-6$M&omkp@qE`P$% +YtmbkY|f6wr+$L141>vJh&gZyF^=1jw!ZNgso-%`}vP~Xp0iy3M~LcVLwg_@NHt>x0&pldmw^bqh9wz +Lr&LOef~?V6HmXvdF&QoRlpHOEH>jX{K*B +GU?lJ;_O4)R6i**MHyVmrIccIVqoASO_C=GFyBj#|1R3n}0sd2ggOL8U^DNOs7GXU4s)@BBV)I1Z~wN +X|K)!#>n(vOk+Et4%%!ii;YGVdr;6(`1?&EfBx)k-$};GgU%TW-EvEkRv>ZGqnSxayGM0NDkH +bqtvWrSWUmSCYPnIu?>z-Zg1%6DB^XjkS$P +V?lpRI;a#YkRc$C37`#%8LP`*}HrUa~!1!Wrvwm;|!g3)tfcRtD}DR)rvd1$#ZzbaJ};0N(%18cM14# +ZWe`eAYzFW2XCVlHS7W7(+jDd~J)8cHZKY*Z;9WWNnm1jn)7-V%52y325d#pVry5&873x`ovvHV*=<- +!1&MKDW2bX2WHG`Y!cTkyp#|6?b<$qt)GwraY+8O@D%d3?Ok4%(6hvQen&Aak}VpcGQ{GgtNY1_`x3_ +!LMD9maddw|GkXkuH0lb%`?>}iarI&er0{XTHh7JMGj|GcikOweK#jg*^e`eoV2mUa4Dcj%TP +*e25kuijSa)$Jw#otVW&g{`D(?=m{VDPmsKoU=4VZR^!7c2v?chiK31x-Tm=GA6sz~uH`}Mtul0vI2A +fab0@Z#pm{fIpmeyqLE#NuK@)Xu>{Xn6>TQ~ceS+P?v-_42a`xZ(NNdoNxY!R7YN@S2Z7l-P-7DE+O? +rwwehQSh$5DxkhWo)XnR8XCG>mQ9$wX*i@x5Eu?bBUZ;7m +q>8l8sCYuD=l$%pb6CTw{+`7)m-Kxicrh;;jVIRiJO-|gKvk)-lV^-1(nJTn&hdg!DQTPT@asys%_8|cJm(G)Y+(O#g@x(?F +auVY?n8;4-w&@z}JpR)%_19q9zPYydF;tRS-5;{SNo$#uJPvG^*4!mxEAFsQQ;`P8=Xvc4RGC{ZSDY|WMqg&`O-JXlk +?KiK&?WaGaWjEDr4Odu|Fk1huRGtZFri2)NyQyCZpTvg>n-a!)sTFoep}L&v#}&0LxMT;)nU>c?>u^| +Yn6(>pa@s*>vl)$k{dQbaujsFF99hYpKAjTCK5aIVP0M>6*A68fEs|~UNW`Q?cUAKRO$WzXxri@lW)^9l?9{k5z^?_vYj_S7KW(nJro9gHHG+b_IkAh-lqs +6`o9>n$}l_%l7d=T(g1Q>gaL%n{aA64XQguU}1TP;ZZQEF?g^U%Bb;7 +w5vnC+wXAh9>6B6ZT$*nOgw%5Wt#^%-`lD%#4|&_v5s#XM%TV0!)JL{-WmIjtP;=-TgJysd>-lg;@xc +CTAZ=hM$Zcw11GGAQ|ojp7!}!CiZt1$g4z2jkVsQ)L4TiFPNj1B0atrnLb?%Dc{^Ywc=R!?&{})505_AJ(0PJSgC$32H +Qk8y-!^Hhj(gJ}Ozy*bm?W0?H&by-cNBfol9^%e!OGkzJQir9@G)V9Z;n)M6<0p` +aOOgaq5OLf;5;_e%xpVo*SMOItj%VNRB0(cHWXblEe#i*0yPA@0H)Gz~VcFy^R-($IFFLskQbmi2b^7 +F$m+3sP~8#b#XowjHOydLzG=NlG3ZXIQa#JjpvA`uO7e<-q{(wW8qMb<7O5e=3+5NR$+XjK3T$#ZiwX|`O7_KFF07HP7 +~IMH4%^5xQKZ?HSgdxK}=7TiC|!(AmcLz$Qk7FB!RP?53ze(h8`Uka;ZRs%b;(4sqo~p$=xJ{i;l~_5{vKau*|G +k}R$evDtlOP8Q@j8X-#P>9=^S`h*|z)TKRPqhOY5ng1nrEINWe!xBw^&df>pK;OeL~sNf>(tF{9)-nSc!FR +Ad)=uoS1I)!vgQ)PP80&O4j|f4kI=Lqs;fwys+CgGIG*-6<)?5Ymsw5HN_i8{Npqlv8xfJ?_(GrrrA@ +e$6lqG;iMR!bfun#;$BwH%!AWDo)i$r*Zp0~0@ahebTU>Ejt?y8!PQbtL+eiV*@I?!N$vIk)UeD%%bR +cGDnxzX=J-Pm*COxz`HUke>xPk@jmg~{nQrw*|o!k|+Q4Wx_#3%*o$i_p|rbBDTwz#aE%xf*Kv@H2f9 +`}ABQ~cxRFVmFzlR787qbpEGf3+av_!-X6NL4G6`r`5YNDg!&kU3N`xT1He!0UG$4}$eq8LmI6RCS(t +v7{;&7`rt)B{?RZ(?$BLw4i0j*~{4{P6Wd;pFKMWb~d4dJpnJR?o&5XZ!zewMyP}~e_bYUD=inLhpqE +L+-TCou-lkXJ=$`fR$z0TXGEVfy75M>;^c-b)!n%zUxSv91W1pU-GJ;(s^nDsok(s%AaD@vjCJ>G%ZK=AK=yk7(f~>*@@_w)&?V%pYULx6~Q7{*y$qI9J2Ce8xcJ|9u=9 +r2Yer!XNdZOMOXWzQ3)Hw^izQD#UE{^?(l#G0o7&oy808pMYRjOjE;7OSizfH~e8-RfU+~mRCJY~tI0 +fZkX*MqJxmd!D8eT!X#x$@(aVdj|RXs=ar5X{cT+LGo7Tir{I8I)E~sf$d@vwDrH1{ZqNGTZYq;CW|6 +vRyb?+$VQ_4K*++Wi~0t$2cI9$!(MGwR4p|mN^^vq0Cm<6nTi3JNp49KIVCoIO=&`%y|LlE-9n6EFb4 +JyShLd$S36F2-4n($HJ{PSDaq3kmrQNzM;KGvqkm0JZE|VBVqEuBXRn#LfJ2QxzTOWka81eV03?X_v4XoeZs#e`3E8*r#I +@>>E9gyqaDs&)rZejr0SAhdouDv{VJT#j2(Qs$*!iL +&ty^-C&M)16fueSsJoHL}sv`qXa=avR^mO2GHv>GQ9u4;GH$D7u#DNmc2fL25Ftp9PQ%5iPt^~Lj^Z{6u87iRm}t30`Wx +=)N$=xA(kRS7l~!)zuMQR=zoS7v=CU(p6Er21{$HzEVItOL3`k7q(^% +O?UKlS1JVTcXoju`;jVs4S&CsLL&rg1nq=rpH3rzC6c~F3IwXEfjY1WYD&85r5^9EU5UL?}n#2|_YwI +z{r)KEtpUulYFx1<)!2X_|DtO5JXft^AZYVDbmG!cM-XT(<3kJMdOk+2K +HfNPo(1-PswZ-$PsCT59~~^D}MoW+*ffsE*zxcabPhG2j}%W=E8I1K1{*m-O^Y4N6_NJmV8K&PRRR2x +aX{U&(U78i@_tvNu>hj@G&HB_~1l_%cYg1QrMNrc%p$7VY4gIM@>2BT07v +rHxerGPe(>h9Png>8}MPmnk+-Zfk*0coBG1JOLTGRTkSwJW3X`M!2blI_m9#5o}H+GgoCLpR6SKjgT2 +d;DkUOkP6lyIFxE=2&af&r!z*D3>ud)V^C;M((!0_OtGamf4IwA) +0s|;fCtfZm){A4=DN9NiE%=%i8t#HQ`VS^Zj%PcL%N6~mc;+86QTkQ%oAp%&NBaVQt@Du3^dTgNAKLs +19&8V_3r!PExJD6?6JFtm|AAX4n`KrYH1*&ZVpn1q|nn$!k`(q8H#wR#QOyMnvKOnQ)#M +@<-Bf86h4p6B)nWYP%aRAU8|r+^S_9qX<61hkXdG`S0G(S)>%>*IihiZshKqa~tjZi^1FvkDsFd4jD*~p44wRI?!=p +*Z=>Q1u`H`!2s}=Q{PA6@P3gI_!AmNr|*t)AEUtEd1QmCSqzkpBA +H8@wadrW{TXLCdRMKxdR#Eq+dMvYgp$kRKT;D=f%rYaIcyCIHZ_~JTdglSqvBFB8IxJ{-9z1Uq%>(X6 +u`dt1Q6+av=ev5C!_$;eAKhf-328mHE1x6X}piNK9w4)Qo}1#l$we1E%wwbGy*xgm3_?NBt&HSxM--I +?Pzyek30_2L>#?9K6iE9>2Ud9<#Qv~=LEA?fR2s7#%4|>9$de?S&r~kQMOGU+E&rEO3B8@${O5K`%BfbUAG +YJxK(@NnYI(dotB-ND!X94E6mP$}5`-jEDo%kmBG!a&0?t~gE_pOJy +y#J|JiAc2O@K<7tA50=C@9nvifnLgUaYzkY6qHGZ#UFyoLH!)53fSLQ3W4W;oFR-Ds=0ui{geEok{|- +HkRD8_IsVum2&AEv^B(n0N$Cr3WpNzp9$)6ko>o5d6+IA=3y8?3VXsto987fY~rYZPL%i5HujXBtX3p + dUYNB%UtR&{@(!zXGv4-LEua=wCnQ{uG=O}D^sUh5RaIn!y#t>Ixp9XE?^GYi#nsfanAqv*hywqSg8Lb=QPvtqb4rd%HRUYAio&<)pU)CT +zt*p51yc$L+^H#+Oy_65qAzhp6@9E+`N)3TUmp3#_1Jar+QXbU=ALG|17$p7^5%+!M&x*4VT)cHHBgX +Y8*M3^YSA@55dE=yE{At5<3dPShUE*B+eC@(jR3R&Ke^?^VCTLynGPjvj}my}OcZkgwF!YBLPU->yRA +6u!2@cTM{O52X0~LGISTQ;S^OGPddG97lHda&>hff9o<$EX-xQ2Mqi?mr~h%?CZavjnS~mIJmfxz4S4 +FMGBl3rOQ@hw^dr|^@AQf5wtFOKKBQlfTRL1d+~CVqZu%+M~BODgJz|SSJGD~W%) +{(HJ+``S2pp=Jf%8cskSyQwV6lK4zX(`>g>toRkB{ch57^(g?PnrK2`vI+gNMU{)T2xJ||zn$+bLJna +l%n)g`9O8zAR+bQNJHYSez(JaaZnX>yKM71h#Hds!Q-mz>Rq0v*TwYCWz|gu~DG(TQ`s9_YmD_E+hep +zBe(cGIL*?)q%xF$nZ&yOkLLN}6eoz3pw8Bv3GIW2D!`L*a|I^e8YFmif9HQ2ShzcQWZS2cT)V`WQ`6>UEEmBJgwAGA?#q67dzo5qBGiM-pP9LM)9?c3*@N- +(z3#dgBu}iGDc|5Xa*BRKG_U=?8m9+f|yF^L8x9Y +O{tYPnP*A)a*Nf--jK?^3bP~}I?PNR<~fAH6G#S(i(pm>O*hcP{X)|<^zeP5X&OC9ph(lh2BB#*J**I +#vhd+KU$J`Y$MnevClFLI;;|Ycr9r1LQ@1c!%-ujfJZYn$!vjW2d1cmKfWj`ggy +nSDmNFj5{n@sowN0pyJYGJF1#IG7inKg+1^DwheJUK|2w3#QWu~kj-7(nUSi5(q*$np8>AsB^Ml`{3? +p5o(JS|9QW~Cu*$iyZLr?`nxB-h4ntH8x)bUHCtZ99d +m_s`?Cjl!cr}%kX;l3Z8qXV##*X8$ZU$sN3-t%0km^_j7ETX{Yb4SuRwQc%)XmXTmaN|H_XmYi=octL +Zrl=T>%-l3EY+jZttEsTJe;=c*Ql}3caPx-%4Xh`TvZa3-8i`88MO+1(;@UWD!Z|ovFok44VoGy(2#G +dY1WLgUk^-jfXUzd!L5fmw0?&kc1vq0ET~M>?ndH)(AgDYe{J8M4K;hudw +YPrQat!7QJ6zXv!BrlsnB#!S~6RKXscC2$j +P{<$Lo(udMk-Ks(H6H$wDr@h%D3!D519*$DYviOMQdbZAnPMtXCBn6DG0rx_Fn;kCv6ajUR*asM;%(LhT~v^)dT<*`Aa387<~$8 +H1#dF{yF4*3|@`=`Oy86)9w=$Z3shh%nW2KDM~L-=64OhUO=-)A4j6KZrstCW5j&933yeRyJl(lwfbL +Xpks2q9jrQsbYw)#LZF)3|WoT&!&0M*N|km(P`KO1FfO2wQTN#t(!;h^VNg%%~npQ4&B3i7l}Pm@GqW +X_!+i-loy;Rf*$tDb>Oi$4g1J#>82T`El(!vt|s0;8n%dv0dCSL&V~p>5Q&n+mnvzaY|5^{6;D9m=vCK!D27@y)y9*T`WiI6h4uz +Iux^>c(WdDB~kH&<3Qg5N>oB4HB<18gvQCUhBS?Wby$NKVac&yYYcN`ZmsXlvb(g8s=VLTGB#+f38)Z +e3mLib7ouYyXYgC)0I+m?>P=b_Q1cSTfQdQ$gs-;Gb;XiRbIp=F~Y2n5HHeXL+4&+ggFG{Ti#LpPjFY3csxYEB|z;M>3 +Ux);rL-ezY&h4?U$1hRjU46c6ZgxJOZws~;ISMq5H+dJ+|wHWhX#EWOo)31F67{R^0*#Hli~ieqJ#E! +xUOiz5=dHjl^eF14Flq>`;umFwyT}jo(7d!rnHuS=nSo!)cSP`7`^7=I|!_i!Z*op +DSEIeqUu1i{5?>Gj7c_ZLzyq*m(|~a$QBs@{X+o)w8M))D+s6%>9);{#q> +0r#GHmD>67eRx1}N4_{g4SvzUiT1(*bgwropBg2gko{FENBt*^yz)yW9+4WRiiJPy}>B|WQ8;x*iqgP +$!&!?zd6edE?=H7v=QWDhSZh-b^^$E~;FYe9Hf=N5b1(!y~IobS}NBu@(l+mXkEwmvvojKJO<^teoFq=&rhr3<4&9pwCNGF}M$-C(KP>(p?r{(R_SyyNr +KrRt8eC*&GxK4;b%Hn`Z(2xy4Uk1=&`+JY6(n%=b^RYvYXxP2bGkC`QG@wx?O-HMLU_hGOnud=ks@#vBz{~T=$A&}GLXwU_%VJM^Jb{-1j&S!)Io_;1;fsq+Gw|tt)f5$jvk +DhFd*fqQT9qd?IYmQJ~Ye`DcM&HZ4{b*geALA%8}wyjLTE^WuwL}pz!$diu$% +mT^ZU($Zg94LofFv5B;e``@9`+gf4<5S_x0@ceDr;>e%bD;fj+ut~zX8hkQ0hxxZ_R0H7n)j7UD;CxB +c(m|0yhlJPcW=O#YsT(-S%Qd>I}c+()%V}w6)9xgX~IS&pa?C;y|*_tqn8aBfrQw39y+KI}_`^QtQ#9 +wP7A79fgk5+?{d@FV}Zk0YhC(ZH2nEF67BlyB|e=y0Xv~UT6<1vjEZUwd!@XO0nFGd)eQSPOE3wN2oH +rjIIMqER(D*XY;xqM865=ar7MvFPyx802|5kuA8KD>Ow0UiSzP8o8sl;7G*7O$Shq{J&d9%{>zft*v~ +@fHvMR3O4#4kLIWW;kUt6r!j!MzYelEVwx#1>Q0h-G+`v+60?_B|K|B-e=A8?zLX#i6q5uXx`!+g{qq +l|7R0fIraR&p2gxYuJIr(yOgl8|4TQD7Y5<=KPs&qnI7f6kgk!o3>R39fE_Ealw)m8_*YAKEk?W3NuH +D)376)K=A1%j)-~?SotVnwrrXHE&TR{Mx!=W?a?%vQx*TX1uH%Y)4bY8$q{YFp;Zi(9gQ< +rG(JsEXi`CApPtLheu=qQ6-!RoajSf9Cx^S9A3<7FE}emk#L}qX1?Iy|?AA0)pc8))!yC@0&PGb%SzN +#Nq56Ye963>1JovJ2=*!DPUm}Aq;Tlh?n@3B&@!>=v&{)i*FevyG{S12V~7kNuxd!G|Ieyby^_KqCFvyb>Z7Z4dSNT@E!mu^=d-KW2eg}2cjm +i{_);~vb#+VQ}7rTXa0!vOwrI0Z1D0JJ@bj~y9IKXe!efcj`e&$tLOy3|Jx!`sz3ls#+*0H}}RX6S2Z +{1vr&*khD*)Oc$kWSE~xhshaIeH0s7IvvR}q2y_-6V}G<18KYGeOlwBQp1~4MBJuCd;iv(^eld;p?zIMxR!v?us_crxrVV#p!-!4hz!Z!ya*hkNpjo4d!u`I0-&}l|B +isB~Pl>YC0mKpZ7w!^UJ@$NXaJ;d3^#dTLYTgCYXRHG7~1RS#8Y1g}goszw#HHg>CGqk%kD+W~1w`+_ +KuoDm7i)oE$a=4+1ov!mR`$9Yw7bT>_$7UK`xVx7Vl5UQ>j_`z?xc00mCaW|z-Qp8i+6QUFKswWT_`V +>K3;(8Mud$9qzc+EFTgw6{KR~KHWly#r<>^pWVjjv!8BG=MDt{J0%vDG86Co7Q;zU|W?wb@ +IC&9LO5hArR*1&6hb=~AYS|3^twevDrN0&FZ*%pxN&1^he-re#Mf#gbe=FDD&P3?#j#BIz#4if*Ixq +kLS6cyT#68!Iva<@P8O%+z1)agItL$=2sg1|JI->yqv$)vy83J +F`QiyP_&{)+MKEc`lWWGsZgEN_5;&H7Ohy#AEjf%2;dk2K;AD7t*9IbhhEwRhzft5dSool5OfmKM +_n`nUkz*^O>m8o*FsP#;XHWagVdP2G4J`gj!{wAn#31K_;P=MOx$9a7My-FF9kJ9sBtD%FIuWv!rQ02 +(uh@m8yIpzgF>It$CGsTL;$@QHQkVFE;3kR$(}JWQo%9R-2sQ5b#n2`KtJJC|jM1K@;`v@NKBQ{c6oP +4KwYZT~s?-plni(954CHlL2iMQC9eG@%Zq>S*Y=7|>=H;#rmtQ6GO_sXBq45no&3+1XcjGn;u%(@HYL +zrxEr?FP)ora+RCp}nW$!=xmNlByF@T8Xh64;06L#A06GT&Itf6RM+Lb1Q*Y;_D_g16DAj-*xTS~8!VaKj0N+JCa@dD6dX&XJhIz+uj@F +%Qe>*VLHyTng4%_b6?cd{-$^k2s>Fg7lR^!mLu+w*ttE)+9%Fq)!@6gw*vJD3$q3Lg^j +SckE68iOMN%d>MqPZ4O{~n)-q0l}%W*pEGLa2>WprM0@;FHf=yYr1%3yg?Sf9NyQ5uPc5w*Z0H$6iS0 +6KzS4M0+caw|Ftfxu8~a*IngfpTJAl>P88swiUe{w7h#Ra2OC2{n6`T_pUUKbV&ySuR`KMtd`HuI&L= +(sY%aiyRpfTb~u&xqkz4@#0Azs20vS%d-lbvz4$No5S~Wr*797WMLExiWf-Gyfg`fjws_07G=2bsMNM +2R@~%=mb5qB>;Sd(!35W2TXG>KdlW=e+6MR}8(?h1}M(j`bk`ZfR>sIQd7xUod{*^k}rzHKp#srY!Y} +x=uX~m}rq<#!K+wPl_a<CeHtViU0=$PG$ +9=k;>yzt-W53q{-!dT5YRK$4fJNq_1<~XjNK@>zbJPW0XBe +3lBhNaeq_=7EF*2d6rDJ|HuBAyjqsG&!z969STSTlj~G;ajmYo0$2i*_AxEiHAEgcMmz-xj4S#Vjm*MM`(tc*!AYrTCLacq*B`&KC?KEH=Hv|t`~6qPb^W(c) +vQY2$b$kGyEPO>w1k`p~luE-Dm{J*A#h3kUKA+CshUpl#t)lY9OOrYlY!ZtwvW4ao7T52<#<1hgTWD* +2gw#qzj7VO*{n;Z@Hk@Tp#Jw=^tw3lX()|GEMA8vu-jDJBo|TIjN(# +bHr+uBZB&%8M*sVj5?0ps4VYK(o!wA^)$=kwI$1WX{3jJM>B{%8HgSqS5nVQxws1e{c)}FNwQ?RLAU8 +=xGh~4Z*qV#+&OrRcM=s6O?JR2QZaZFbb@G6-6z-!ALu=SdwrGofR10f#p>vJtqpido_qGyBJ%$YThH +!88+ZxXjuD_R-Ka;rFp?7GsdS<0X@DTTIGlq +cC*rWOheyaw1zzVGw~K!n3HtC6h&~+8X~}FrWP=8j7IS_DKTkeQ$8@E$#_su@#)*Dm+;#B3k%UzOiHd +%=SApC=aDF!C;GzQ0B$K{5<{A5c4xR7_N99yC92QsSR@#59=+H=h6DqK1#A|)e}jwZ!8gH{IWXvAx|oeXO>oXssg +jxCNtqM(kc%k+KA{7)ESjfrLmf;%dRNagjU9m}B|knf6nSVuOFE~!m+pHP>q73O*$4hd_tJC2+)IH{p +~$8Y7;KS^=V+CbTH#4$(DD+4alFv^)3$CDDCiiELd1DVCg#8+@L!-l@A&p!ffKYkP&M{Z##bb+_7g?@hCC1$h8(PX>hfA%_#KW`G +pDAm^I?YjN0F6HIU0VM|nHXuV^*)}sg0|X4L7wDw!4ED#DjB~xBjU9O@d8RiTqeMM2M^{rbeC&+?3g| +K$&K7>5se#t9cy(U5+@$2FOY@5oHz`Z4&c02P{1jm7lNHGb8u1FbtpG|zW`9Vnu*v5z(#|=Iw9_niPE +p@&KA%Pbbe6B4 +fV-RVp%Ubxz+()Dg3~~pCZQ~~LW|s1>8N;2NF-AjDxF&+2tufwqd_66qlx9BvNRa|D +tFZTbC4DK>?R?8K-Cd;z%-cJkW?o0yeK-s#JA5-_uV~!VE2hE?6-@giV~iby0DlIdU|y#XTS#jFbnhm +EyQ^DMvljdlZL+{P}>g+^$yH@W&ATO;44^v8RquNgI!(7~x%@bNqVs(7Q~jusfUUEQ%WgsIk-N?%M&= +Q5Ois$?Q8vlJgF`l^%J67!O!+1uVHr@U2zQSV38oGacpjZ8#0N%1O%-J*u#ENzOC`b`$XYExanY^c)Ck)|S7Z +gba0Oci&GM)j+N^Xfuivm*Hdt`CnT6VgE#E^mx56&`U=^lim@2x%i_)zPp#_0$S+Z)3Z|-VXNMra%aN1IR@2<^Lep>IWp1`;+hSR`0B9Wh5lztD<<7RX`QZg7j +7o!{e&AD!B`Yog{o~)$-1Z>tw_^k=ZFsGduE6jF)L}p!bgfOdmW#O!^;(v=zr~WRW^Ap{wk%0ci%)Y_ +b%CvfAD)GN()0*poA0JmSCni8_=Qj*j4b*{86K<3Q#16oyRebwc-mGIR9=l1xx26fppio8D5sx_!Htl +cw~lCP-;lmA_9!VcE@4l5T#ysi&7-04x}2MZJtJHV`#Wdg@*vwfTG;c3a8SY5P`i}PzJ3Gw34;;F|80 +S@O|n`#;x1L_S#%vVY1+aqw_TS8_TI+Kt)o?8%WXN+7NW_Qe7j;{QAd1KZ^tu1)2-y`@SJ{_7c`)vs; ++k#ib}60bp^R0L9;QWTtu3cjEwsPo&}LBPN6RjOl!D>@L$iPp5{IU(n$!ZWZ8GeWc{v=b+kL9!jsLG2 +q03Hj46({08fmlmH#9ulwB1`;TFndL?6 +74jdxZ89)Lrx|0a3r)TO&p#Dh)8jlP#4#u>YW9>;t2FfwVGiy~&2ZK%lkrw-RR8^^d!L({;3(WYjwv^ +;N-G<(6u^_4}iSG%a+wtMvQ(Wwh0oobCi46?TV${g{05;{#zOPLe*W25Mit9bxMc(W`GQidi7WwAy{} +pd`J;b5gJ|W_Q+4MUGnrquqrv>(US;yXs{M`*D%*+1$Tr&1Wd@MWPnkw6@NHvxv5$ceiB +7ttL5Ul6us(=1$(yj5j;n-jF}HG{?+h`k5C2ah4AE6~aLQHBq=)jSy}<3vDi++NEZI+9PJm4@2w09I% +6p;;SQf`Ebo40OG2&p!ZUhvzJ%p}SH+IfNN2`N}E^)M +KPB~gtDxqzV!`Ym2wED%PE=~?3*?_iNh{8m--9*pbM9JMm;oV5dSPb2)4j#J5&8ocIaI<=Y{FM-%j~z +@sTVuaVJ)2_xO>S2BjNBXhaq5#frowZ>`J|f_MMy;d7p#kOKnSlM#Mny#7-P4SPZU0)Pt?ITbe|~tG( +Qs^ldxs6Sq9oGhNEQ$+B`x_G>ByqLUs{C!tysBvD_psZ#W+i7d4z;E#^0zUoV6{;_e|zlN)N?`X33%RWnR~r7x#3|GI#)%_M&5>0-ZcO-*Fg|7b%^H>LPOUo9bzd$tP+~a +@P5D0G!O6J7n-ieyQDi>&D|h0U4i!%Lepei?B0$1a2fS&ID{k2BkVckVXfDYIkq)~d{-$>Rl_*$!o+W +Ed<9l_pzOMnm<2nAUMow4?iQ_YH`GuA86@!}G4#sbzUEXQDY66|{!7T+k +2R>wF*ckzW-ySNE&>88?jLKjemfma+yvsnn}r21;rB4eu%)(2i{%b<$BS!lx6Vg2LiqeNK0p&+ps9Xs?h!p0AaghM#QJ!i>tPo7b$jh;czs5|8lezmG}Q`hvf%~$D~?cI1>nlR| +@QOHq-dlbpV^g$ZM=B1@5PN-daX}XgXCM2DtI)^$*(XWsU%@w8}X)zr9C>U($S(2~FKIlAycRV~64@x +5&&fh29qbE^6`Ag^-eQ5CBNOFbzs!CNtko2n>^Cd`k;%Jb=`z|ECtEjx)Sf1`(mCQcOyQ<-W;a$}hcym}gbP0jy`ru__60iDMP@r|wvV?eNB$smqzsP-bN}4fngrtLp8Bl48-Xkpxib+F~kp#pmBt3+d!8)h?YWA{Kl%5uZ +N9y>w9e#WQYdm;=hCi+*uOXICmoK7iv82#z5L^x=jyoDKbe8$FqNUtJv^kK3KbTT$GGJ02tTw*tKf>A +V-s+C_x8$k5=PC5$#{nwp4D8al!3D!7F`Q3m8WfNvVP41KB*Y1IR$;i=mRm5_w +ODI*mM%(@P#WkfoR85t9p%52WStR^K9$K*;iZQ@@b@%)lc)8g6*IVY>SI57!PphEvjwjVu`S~jen_KzUXPZ?oi?_uarqo=Thx1T6U)P{dj +%0dh6$GjZ3|=U;(`mQ4OI`?wU@s##IEh!uUne0?;mQ{K5?;KyWar&SqhAu`Nh8$rETg9|Hx|X85m-wNhk4?9QNz-jMm~O&ewB0;vl+8QvHhhHX_Px2byj(&Se=+}LNK^*je4&))Mhr1xhJUwIrGXn>)`TLTgEXaN<-j}l +9LOYQSBT0|GSNjI-x4&j#AUfxO_>Qo5tU~UFx+k+jeuc_z3b64cx((JkKNOmNLD4KKE$5U2=ulWfIfQ +K+bPFpP%;J!+r(suS%EnR&*LNeC!k(t$qmUlto0+v*ccI4L8O5kFaw0?cLAGvnO?=`T!dvVbLWdJWC_ +_ObIw`yb*8 +Tf8+qw8a4mZ83kfG}3ZzFtkPNzlQ|1_{or<7LW0IP=mzv(zsfAvw4;@3Y3Pypcb+38R0DG>rDM?Dx5{ +^mSiA{vSc8O(qtfuS;;^aMae)GQ?fbQ)@q} +^aFt!{hd&;SkkDrs$>Pw^NEXD^Dv{U-ArL+=7|7zlU?7W7yP!NaJe0*J7lpED=LRWvfcxf97U +XM=VmR$JGPa_rP!`nn>dJoI`(9`|hnCjye-g?<6CN9x3T2T&p)62*=%FmGD!3$+#Q`If#fhYO_9=$40 +MqPAtT)RLnz}KTMRPKi1v2E&SQbZzNTuPiEMC@KsPXpCVp-Hq9ums}NAmv|%VKa${7bPclKf%BvN+oJ +jj=3ln=~wz#b3zmK5|Jci!U$eu`C`yyRGozSQel`V!3w@iDThQRTg@YeK1kC)HaZ%c5xsJODd3srImy +lRl9d+APdBwmEt1ZLVE_iSU*u0CRA^K>;v5N90k(jKFL2MbivKewoSYds-Ug$oF2R){E}{7?foPA#3E +`iub%JwhTs*6VZkdbhItiD2CrC{3SL2-o(x`b$Ov3P%^w!H!avl!+MWzt@#my@l?q%z==>K2uE2_-eR +X^Q*x{;7oa&c(|Wfyy>S&`|54-8%C&Mfh(|N-Ll$vQQ!&$9TK=Aj +MGrekET^9DNltEz7LLVaiad<(&>X{)kH^s6TB8V@JWf9B&2eB-udU`C2JAU +_nCzgfYF3Pyau&^$TWkC(Pgk)!4x-^yrN+U9{@PWOe;&ex8l`5&3Dvb)zMjSm#NY_PMfQ^pE9z2UWDC +!HKx>HDu3?XDX`;F@EMFA|r3R+#^o4DVrF7Ee=i~9@wxwg8vZSeOr{Cxs{AHcViZ@pS&2j0wZL}KYCI +#3)r$KkXnLYLehcUwOr+Wb6mxB=fKTbkm2wOj?0nSg?rUPJT$=3KK&?{6u7^4eK +*2Comjt^+1JdY1N;8IC{ +FMArMF{)2RSeFlB~TjO>)GJszOe+jhu;-3N)LU@e8pv`YyOts3KYIzgwB;&7uetHlSN+kQbT=vKy=to +b{R$&cTNB>)XCUnN|p9!U3*B$)DWHy_RH7Vf7$4YP*g~hVu?>l|r4O$obnf(}@lBrZC(T@!rm*}wXy*jS=Y#%d0J`5UM +8Qx$Lrv}Y__VK$^dGwrK;PZP})3eFwzc;~b7M`^Ue>qBeT_Y514Af7ahL48JZ>^5}au

oQLoR>o|&R=BY!dchbqL$&-3DVZ5_|Y1{#?3w95$4o%FZ&v9pJDPgJ34CQe36?|`wZHv3ELA +gUs^@P0g%>TIq1y0Mv_n%7$OpL3=e(!+<2k*GO~!|8mQWW!%~aYS6Playf6eZ*iOdDZGWm8Exrf-Ilo +!PJG+U}2@Y=?-~S2@jLCt??Y<-}qORQ#Y+DeT?HGQfBM_#ooIBL{)A7<9lF$QPIKFLem@-6~h$67mnm +0i~@n8C}=(qWe^a7h8Z8-6m*~%M@(1Htr!&e^%eu;QfZEtvQSFy^8k$ddH3tz9chO$3L3sT$$+(0D)8RGI!oAKm6zO?6UF_1Mxxn%erFh3<~MTqGb}R|dwG1p5zcjs20Kbfy%cvM*!*O&iqHWm8;9K`9ZHsm=h`Pp;{ +Kd$1P=cYE2|VUf+O(>lA5I4QZMuN{}vm>XZ|tG};u0=9CFTr+_(pzKONx7{r6JA1a|g2Coja-p65aY{+Mdpu`YX(KZAr8Nwb%drNVc5FMl{{ +;8hN$zij$3aE96M|+L!62@Q_&f$0%_l^c+%nsC9@q}R2EyLr-ps&?37V48eeGITt-B*M;u0A|1krG)h +B|?3;zi~@v&R!b`|M{x3)P@)y1-FKDBL=>oo!;-=e8*Q_R3@m+)o;J(EI6Bzw>$EqzrZ-{-R7y(4iaM!1ZY40|P)ii$I<6=I8>-IpqVcYVs_%U&w0d +Keb!4)n-!Cr0GoG~#Qlk4_N=roo`!1>p#BIVo3QlpSLPVw8SKejqg}N$Qk3^g^ektEjlNA}Eh}LjLw1 +(Sj(_B$OQ1+gh8djZpsKJz+lA7u?*U8=5ZrkW&spNfeDMll`-I$t+UX9!9C=_}%bf}F|`_=M2qbZ6=C +hlyf*wuSwdFtz?plG%oKWVKVpB0L)QeTflsJJVKQf@TBTB^g+Aec5KZO5eVOmDqKB840G*gBR~+uGG1 ++=MF?+?G_7A@61YZuZ6qdjs#*6S&pN$FN}ri+6D)Z?~?3hPnuIZA7iH-X5@hGNtSeDm!ZrbnWigc_L# +~$Adc5I)%#d`cIrAkc-!T{!_z=an6~8N>W|5&N;`*=Uj5mY1m`lFCUILXlv)3^YLyo52DGo`D5`Fe-5 +uOA4`^gh>=y(mwoBa*z1_}mzjg?*VLPXm??naNbWJ4wj_y{xXeM0$ugIGaIAdJ +-FH9i6xo{-nD1>aGe5sA|3xTlDJO*As3eV2~`MWKYRj$4zcu=kz+4xod9F%&;okD1sXadJzOUB$NHA$ +mw;W*`9LWu*ZyxA(*=xS%lV%K(dIrnXhvW^`t>>Qo^j?&Nr~@IZY@kGgcKx*g~msgi!Td7lnDDZCVtg +Wb8NXG0VC)rjbP}mNCd8G8iAl{3zh?C`KhhQFqz~NsBhB6i(WxQn-1od!~<=5ywtJ?2_ +0Ih5_OUF+&NgIY}ep?*ohrTdlzD3?Xh6!Dr>Nl-hbU6oXaYYrSqr?k%srPjXfR@81W*xJedI(LUHLAC +(-B}j@VLGE~;J28&yBOQ7|e0p%+AieqoMzg^f%|1(37ps{k%NbWx$QhH^lU%z;R<>oAD$M(-Nu8org# +juxyyKKwA;|`u>8YUedsS|D;$3v?l}vY#B~TgQu+e?=|EZ%=Jm3^glnL9BVXQuy>b?gwv|g812} +S$@2`&E^4^gxp{c6($wRvhVJ@8a6?&{X)Gkas5%=k64n$wx|eo)BTl<;8PmPWgvY{yW +e_-w;=wtO&B}UFKu`_SpT}B?Y;C+nIxi3H$9aR@q{sz+ARLG=e0fZxk-v5N|&6o0f^urIy)m*loU~aR +*Zu^*U}_5nRxo+a4|=JuC&u;Q4>-4xZg!T4427bGf(uP +?jGvm#a#vPO4v(xsBGi^Fwog}}c3psJdn%C7@TzA0}1Udf7!tXyaBBc%EDh}FvNn%WYbp_wFEDJ01

!M_ +UwY&&8VBcg%`U{3<$aGR$4hY`TClMmchK#zcagFmQKkI$yTro4Op^UH7+#?hA#7hpuIOo_2G_NJF&1K +vTwiIjz2{uf=zYu#Jm}gj&2CUGK0ofQ%*aJ@a}N#vs7G1zh)`=p`A4oeVGE61aa=B0&)wn*JxvrFT%I +q#$hSO}|Y8{j43cj6QZ96#VV%lZOIi +2XSzE!J6i(A1?M|`B*3llPo@!3M>t!RABujRj(luT1(h%`(vgbAzr$wSdhbup`&a_HFIj##+fp%Lu?*DakWt& +Y3xDrBExdXY{t0c%RX8`?#vtm_kB|$NK0?C9KR!kCiB={V1UgUI~@8NXXtsLK=P +P<07GHUJ1o{B{a50LW3_$=$uJgVw)pmQ(dkf+DWoBW0<4s0gF#Qj?{Gcf}Gsp|N+OaYgN&2eYV-+y=)4?WJWMU7fJP>C`y{Uf#kv(!oSNjceAb~f6YHxf?h`Pm3;+P=gTpSah%tz>>rsFbf+QWqsTebKkb}K6+9(1w;eCk%16A)Dk(HI!lWRhh+; +L2o~t40TY?wLOkbKMq?}k@uel9<)WZAJb70C|o-1KE*@Db-tMLSZ{D)>L-Rq +&?PC-jTeO53d1MnFA9QmxC0Lv;FW+UWk6j5b^O-&gHPQ75z<;daZW{vj3OvO}dklqv +#9Q(LyXINF7apww+$!QLzIeg@t5PJu)PV6#io#qhNh;Xss8#yO5C51YEss*r<_Fafos*ey^8&@q&L$H +!Nm7G_`$ZF2(X;5n&RS`Q*7lk?U(k{|%RaEV}i1B*vVf*(}+wYt#jVOX@ziL~%J?x7wh`lOxPIDdR*m +aR}>TEBhiJ2e98yI?o$cpcdqjimF5ML(B+t@K1CVcuxGV9VYU0L&eGbnYI>(lTpHg*;)Hfw1a*n~S +O-XV4bGt#D{BKB00cG8=Mnx2)Nd~)^bk<4N*~MDe%nHj1^w~*`5g$lzE|%q(%FsG6^mhf&g%C>Z7AYy +|HVx8}rITXYcSvjOJ!Ohlp9Dq`T+q{-3wkzjfdO35Rd7L26BqOZ7Z3|z4X$|Q*j2n12dIa8*Ztfbm)C +vd?Uxnbk+RQiZ&ql>CFimfsr^1N13~$K5^z9IQH-6TFCj$?;O`V?N*hw7ABxn%I^`*>io8||>y3OAR) +{F97RBTi0;6YZ#gwY3)Lh_;C4H@QelhgM>!dTn43uhw86{Sb9#F;nI`B94czm&#Fv<)rNSvzDiCIzv{0CdQo1mcr8HqqlO_vuhBQ%_ +v!!vuoF|PEW~MY$nAy@mVdhD_g;^+FCrq297v?f4K$v$*JWod9H7qzuXXuT;h7T#ycfty-*V5<0x)#= +h!ivu`(!0Wny|dDr!ulYre;3wAVcjgOPr&++ux^HRov?0!)gi1xlkhTOt%P;4u%fe)<_jxix|AlY&;l +tXswiV=U%EwjV7@3t2rDMzQmC*VgSD5iLdr(xh4j!O1!d@dkX*te2-Xr|?Fy@1Sc75B6;^Bul+uOO0P8ei4S_XYS +VLjGMOX*J8X>IVu!aija9Dc@D=z$&t`XL;um%XL3D(A3SYu&5Bdl?-ewVADoQSt0!Xp{h4}^6ZtUHBu +2CUnJbvCRo23?HF3mI2mqNR`YeJyWFII^cB&QT)^ovtmF69W9u13E^#pRUj5^VHKR$PwCEUUz*-nKc*g;fjk_>f$8BmH+N{dXKyKJId_@=L>boW{Mb6*^ay>yY +nm{C4CFS +V~@8^D|Jbku0AfsW(8zI%B{nU>snNCkVt#MYBpO0Vm?(vOKClc3+BHU8xRYbI#QCu-JA@S$YOC;B*c` +#ScB#P@p=r#kh76`?ea-9C;zJ_z-+C)9d53{^T1+@pbSxpx}hlkqXZeu7;DZxg&q@B+bR&=9-sgfsS@ +6J$zcN@SiQ^Awr2WY&^dM`j(F4P-Wu*+^z%g%+RInQyIcgIXFKMC%B;5`?~i?^(LJ>HDhJ14D1SOuga +)p7t&gDh@Slg>6`4#hsCjN*#nW)j`LLXP;`MeAm8-eCt1>k{#>jKH7$;&z&nL0AHWNMq&ja){L((h)1R|)pM=~Xuqd +x)ATp>%rTtS>3hJcXf60%Eg7C3RDHvnTiQqmQ=aA>=&^pJNz!1fIjski6zrA8e3i~bjTlc({#q< +waO-aF^GDnBnn%d9sZU{Sv}YkZj5G4YEy^P+osX@8H*p2(W6BbgXxZm?V?#~WU|(#cb!>uF3^{xX=R{ +sxAoMbeSyA7y1B-xB0qThV6BOd7;9AZu0oyiLEu_m=Fv}hL>y7-P<*Sco2xq42Ol*1%I5%VWVgpiGdJ +QB5e3BYO-!ZH0dEJ28}Z|w&X>-19hT;Z-BpD;m%n?8tJ)#yZ8tk4e_Nk8+cna`QqPXH{)IHfeq%du+K +39RH>@58B7q@tkyRrHoc0icI(A0Ac{h2SsgVLTsP7bs@8Z`zZ!d#T$jQwR7})58njk>igf=#ayd +fpGmS@z;$9cZZ;urDxT<}tjn!6tGWI7-eqPb%eQiZd5V_tWTq6}%+t4cRlUya&H2gvlk5lKu?dOrPw`$V-luuqv&H)i^?lPkvf+q9cCwr +z+-9RG;K!9D$<=MFvQ3Qah_JSC$*6Q#T@BDjUdC;MY04Wa_D^=NNIEtp7fV!Aay7~PFs^kwXKv8g+pS +6pV$8tY5gUPv(Ja{pCCdv6{Gii(3UN2iP<-^Vj}D*B;mJn`df*l$Cb}o^WelF;+?wb9qdA_rvaFBMqk +FNNw6{bZzTJ=~??NDs#w~ML^uf9OFI=BwImR%XKaG+m+v|v`@52t^yN^@bo(4(y{6hn~A0sL$xt$b2T +2j(9w0lk+Gl~O)vLeWj>_L&!g(67tUB-JXY4jkoVUZY1ej)Ez&T{&=4|-wc5wE**6@8eVd&lh^@OkapdDL#^R4W^;I|}__bA- +fnC#AW=#dU$Hnf#kS +cB0yQOZt^i0xJEy!c+E`8=-%6`77E2U6)cSddequeI4KTuKM18P2%VfpM3|;~*5y|h9;2Yi21^6qTTYkE=I=VC`|4K_V4O29Q4OS_6MA*s@ulk&wCr4K+~>&C3zA1BICMNlo=?X{=TYYmoFb4;5x&*4ab(QeNcNok3t*I*aAlj)bypvK0@Idc;PWoBq +B2Pvt@%(DCQvjUH$xUZpCyGiWLy4v{GNTkYwbV4z8^sB5=w1_Sg+yEE~;i-PR-zFP~mlp +7gV0-e?BxXLZT$TlL$klBEwMna#$yDv)Q``@Ts*G>S6r)!+4$*icbX7JCvJDWoZqs93Rg;LlpdVz@xW +eSNMbMGllK-Q*Ppnhr76%k`^Bsw3f8QGqa1lKEDUrNow(Q048&QBaM@Dg?F3sld@y2BKVyicVIVj}mW +>Q+Oh>s{-8OTjeYR??i4zME%-xI4$=M=*Jm4=9MP4)ifo#+g#VNS;8-ghzP0CuYD0~hGj^fi_~kdQ)j +HkrXA~J@1x%q5wXv4iAuT(s^|nVKF+P6&sl+-m4H*ZaFgEZO${f@C<;km7LR;HqCN_qwn4caXCym?zq +GDd;GP1H(E<-(Lv!#*k)M9``HO^>4%!zxt?43%DW9ev?&{=>)sL<@P23c*53D&Fthte1OWpy`FlU9FG +*;(iPK!(;-ns}*em~+|m|%p8vN-xRCIuA@SkomY+W8nNt>l;XB;o`fyIDumgz_NMgjJV#7n&mf!&NIs +39ChUhst#*Tw1Tj7-T2HX$0!V-TJjgxK)Pw{I2>_RICUKPT;$U3i(Wpw>TF#EP^G=l4WKLlx=WTM(6XA225Yh}c_)YRsl^k}s>I +^b*IIKl-np?Q>VMF>88v?tkaa+x}wiWN*O;x#%r8v|wOr-_F+(h#|fn&+&9^w;8h_qD1?}_-uOJ!vadkF!TKqO2widpgvt#hTlDdJ9#pzNToG_t; +Hsh4t!7&)#Ky7Y?5DS9fwG)-8i&c8b46lGa$DW}M9@KR3k%o0z^e_zt3ati%YRXN26NLDD41Whk5@zB +&on!UuEmW|D6xkid=%IVJ;M8nZGY48^`e3&9tS-fcQCmM#ONU?CEN$Th`VmD|9rRs?ot;2GHiHTr$>0 +2yTCY^BVq}_H8R;>^=a*0Y<9n!5n7aeG#vG8&ovxV?@?XnK8&gDMT6TDXG4%BbQ`Sm|-R<<+PecSn0b +N{Q>fcV12mKqSZT)dJ7M4>TJ)__<^j3iyO2@M(-6%86c+C1Zwn0Yu-_`0gd<<*N#8ZDD)<(CA!zEjB7n*T=RXCZp0=X9dU7rj*fjoM+buc!I}|ip5pw?nh_mO3FTik5pAnRM7mwQR3q +xzR*m@2HdG`2qq%Ct-EFHzeBMhn;^SVb5%0YU)rbYZsv0p#QH^L;R3k8QlnB=7#@h>fvBYuuU^>2A8N8Bc~G)(^$<%nBU<%r-!FXf0S| +2@hPFIuIl8?P`~JK{2E*|xi_KLdBB!fYjVRhF7kW)ESB?1K#owwLQ6zUI)re|5sX)iAs716XY7u9tYCE=jsYOJ%Hq;`H5NZ(-pl!8?oxRi|UibUeBDSWdqiwy2O6mPZ#fW8Btr +)TR2oVA$9WrBhlysmdeqAx*f+od?GcHq%Xo?rQHx$i?KmC5qh-ioZM$L#1Uc7>4#5;Zl2CZmD#K^BT& +4|I9+tQ4Pe41-U?A0RiD``fAmP4T#Q7A)HH6uc4snCoV@8_i%5w6gTSl$xTCe4VqZ$j2c>vGMAk6vu4 +88K#4TbdCCQJ0lkY+Z{|a|jiRLTv{ul5ZR)jl%76&4_xr!A6{0bFe1!XOC`7XzhR{Kwnl-*Xj_vg|5U +;&5G8J+YXpm+6Oc(Avj4e>_ZcaB5)9FCBMt>|Ivo+y}q*kV86YtUHw@t4QuyW+q%)40dpSvr)hPI_E>T%WoV#75%ADdK#r(71>VmwW78)Y}eh?U +JSHY|MWTw5@89GMi3>?!%pH$(a4Unk^qN#67IXm5uj3rR;2Z&YIMOL$&L$JRfY@h2KLmDdGBi!bx|~J +jZ=VoM@>SD`pw4T4i2Hvx3B7TF2Kkw@sFwO=~f??b+J+)I7vEZG>(9Dbi@bh_C$+obKITjMOzWwmAuFa0^Aj?b7W| +lQ!}SJOMkj@N(wGA!gavKuAPo=imRjm+bCtrd6vFdLl7N=9|L~E@ngbIGJfXar?A3_E-ub~uIWa2&e^>pUYlTVa2)rw`ImQ(jY%X05t +UP2haKO*PZOPB|E0=}IFyq1R(r#3HPfkda@+H6V~JxnVegBoKcZ;|J56tcjA!kgjTe@5l=r6lID;c9- +G7o_d>uNzierGfq>E(C@_hf9BEJ8ma(gf~b{>yaH@NG|u-`DtW$yY0u4BqFRxKH6JZsZa@*UDrQl*#v +D%LKfT8p~vTsmpNa*e$E+)MEb#TsC|u+$*EYr$t8N>gb7D +?mJ~Q8B`q~qQ<2nlN{Vw%pxDD6Bu=2&x<<|k)R&15NJ%>5`?hzS8uuG=!*Vp-p)bRUA>&znX%QD!?B0 +dVcp&Aw#{?-gv(!&dEiNCsS8#FHrd@7b%g(LrhblH5l^e-|rE=j`cFxuVTOr_->UA%fiE +wpa>-sQkuiY=#XNJ2&L}l4N{X>-%u}ssnkxuAEafhUdJ7IRduAVv#a{24GX_rB(%MJJj*Ij6bA&*a7y +Tkktr>eoH{s_`(dUw~#vMSBhKr{FQW!#n+8DMMYsM4Aok<4D7YRc6nOMYvGIsqKeFFR+~<+hXj??wy5 +!TN4tmy)Hum}THAw@W(;U&;UJ7=SbU*j1u(A};8|J&$$9-Bb>rEby$0giV$|524MZxPsH1a~h3b+AO( +IDN=N_{Nc)Y$u`~im2FByWme=;A8~}dVIwFo`q^+%VS8yLRnI2jvfI9FBh+O?+5>G7#@)7|(x)`_+9W +j2oOvDNo#km#hOpQsNs~NyK>Y!3Ux~CgP~*pO^WFCAbG6bgoJZW0Et0k!b%sZZ;5os_c^B$p=Q94J_t> +oY3_TMh2R-7CJ;$`t6;P#=B+9 +=d|^Xq&hB5oUb<2G^yA`sZlA|Iw9@bP2$k`Orp+QS927!Owvwtb|Um6{q1Lk)EK9COo9y3rYSmm3f8D +-l#WmRpV5ABPwgcE-8(+X?ObdCUG_;(3MoTW_s7sX_{X48^k3s*EBn=($5^EXgKzbs5G^DY( +*;$uH-W5Vcz^0OmIpnu9ehaE5FVuE(MZ)sK+IMk~r7woPG*Rb~AAw9-8on^-#6g?>*d!cb?m#vBz;mP ++0Btzv#<%6F$W{5=?htQ}$cv!cuZcQp^(@D!LVlMb$>!qA5A%d9E>AHV?v92-s}QK#@x~4bB=-!ztr; +wlCHi&sw#vYPXMTce3CjopkYQ^?RUg?4gEL7sREv$z+m)Z9JJcm_Gof53}6>Q_Jj8CGpx4SNn>@VIgZemwdCTnmp!IqcHljO2+(zpl{+bqPxQW%vb!{KuJ@gk;^1ISlt#Ig^2WKITPeTZqOo0v3kGo~CAKBDxRVlB4?>knu8-sR=~*4 +BJ(nvEmQ3Tg=VHp?!tS}4vgTd+rq+N|X&5eQZMvA!!8{jJ`Y0DuhQ05cL~{rAX3E&_l+PrM?n%nUj!~ +kM{|z@&c1yvIfhp2^bxkZn(_zYYjxNg??CyoH@QujMbwN1IX_WS#RP)6Zl^s0A<_>UG3pD}qRbD>Qv0 +@BS(_Fj7pi*Ck(^IZPj<1WWeh9Ex-2N4h$Ro=Fe}YLgvMflYk!8VLj4T5PCS$OfhQVe!ITd8lV6z~f2 +Ac&-Xs}tZvXDF$VgOoz0q9Z;K#L12#MzOG9r#VKZz|hoizo7bz|~gQdFNLgP_ZPaqBtnQe%kT1-WKAj +!c|orUDrCVt5~9~DAvZ?&*IyIudN++^-h==Z?B&*H+@d^n^+zRI~exf6xU8Qz_sPqi2zknvn1l|XJ^* +nlcBWc6Vd7Fa<7%vl6B@ezbB|urs;UDCc7JBrd(fXg;|s;*=}E{>bDY`b-GELXL;EB`~+n)PqU3Vvzs +n;o#Kdkk&?b-_mgnx=SI9haL7kusahE3Sr{nlrjVS86(6Z=-{8orBHdFWK98KE6JHafQ_~;;z|nYu+NqRQL`d&Tl6cpy=K6^+5zoPt +O>9UY+q^X=h&@r?9q(*-s(T*+oe7}-;LQrwraBN9&_B@0e6fYir6>ixUC&ll)2TX{xetAER3|o1`c2P +&nB8p*@N4oRG@%i6X>s2OTiY1Z$$ygvhs$eUBBY!d16RVhrKwxZEuIG$?Ct}8N^cRzgCW2W`=b2m2F@ +99-c*By#E$TCfkdFaPQ!W-3?>*$kW4U-ppc-1U>(8J1lt +KF^whB71R(_N34(*kP7qEIM=+1TO0bUL1%gU~g9OJ3>Iih*DJ;Qwf*Ayv1XhBT1ospCjo@{HT?C&Hd_ +z!6aEU*)FpXdyft6ql!Ji49B={RaCBadGI)e7sYnXvx3_$|HJc2t2$_XAMc$(lfg1rRC2 +pR~c;sg~vwM6q#g2wAKmzA@#uZev%!^Bdyo7iJVO)ULElgF>}I};nz&vaQF_3`QW<9uNfBoQ3fQyEb| ++(4|KJT#gupuZw=pU$SS3G5~|+RHwSjbw?;EYg|ICb4)H!zQy>;SxuGW7uN)8$DMaUCb0sxq=nmL3S}dA)g^_@LUQpAMUxY@kf4a%@g$ +HvPstbddKgW@&0$oQ%_s(7jRs5q#3tNF_LEcfKC=BwtZ=7 +V%HC_Ov*WhyZc2G};ePa|v*qR7iFjnc^!6gIg|qfp-ONrZF0SDX}qoA-N$pheD4%|}gJO;1fnjgJp-V +-_zSQzV!plb*qbBzg?6ET@MhB@NBY9J*X&l1$YeO%Q8YU|BrhA}V)M!4i*s@&a1{tSOd4o9Y;CUuZ9~ +p+nCTX(ZV5MMXvzT607lW>^=9`m@Zp+B2+jn2da3rF@V+(nXp`3+W&YinknTAU@(E4ok_f*{zuwB8>^ +w9ATT5ZMPOFZfTaxe2elj%`V%L3i5?Rid``?Y>10^2#;_G1AlfawW_JaJ!Wcg5$t9*gg7dZ{zQIxMH8 +$R3(ORYEj1(0F6lx^}zvy5R0TuJ76FhTeugefx##(EsVUQ@r>4!AdE2bnbLOVc% +b33)(~`9?J12Kh-s1d%!rQGyHv5vL%a-5qr#n}yEM_qUR`aqPTe7uafd#d{n0Rdg@rISS#6py1QVT%+ +SOuThh^H2lyFs*8s|aThbaZ6x6~0cS_LM?@-eF|l6p~7l*u++R&?YAnPezLnt@%$S`41U)w9qA<~ +fjVwDLLkvQ)7=+}o2$_c563GT?GKE`~NOQ3Wn@yAyku5_=L|Fns&Oip*Mcc_I8DOB_3_>_>mL*5pMT;W$pe|er5a*9L9Ctgq~r@EM2i^MxZE~ +0d0+(8$xvx=M`y%45=C^v}o@4T240!+EA8|sgM;07D_UI0lit-AW?eYfCV0$yvsAA)so5=I +D&Izxo;J?SewO+6*xhX<_J1l*7wbx`p%~qHo+VDqO=wgtp>`Wkm4>->Q_!Zk4%|bwWegB#a<=aGM{0; +!dI8qx5cZxVriJcjN~y&HSDDjqv1GC~6AnE}`&NF#bIKIuouQZ +O!lt_o|<(PZy(IjLp>)i!S5ea4&hCCF($KJ#vpzNbfl!ra|;hg~G)u-c>B*IGN4L0uSay^DddNDU0dI +#A@G*7fm8WHknw`2_myI+#5~o8n~0$1@2@*73~ujOsqSZ$z=8*^Y||&c0HMeWMa3qfy^7=e%{1-lNn4 +VMmH}um{=b&E6PrMZ-TZ&B)|c@Abzi|8ZLPHvXk7(sKWm@$DX2Z=O7^X +++gWcA0thRT%r`ZIr@Ag$SK_QNwGoYFx+!|3E00XjA+JUlHqfC|-sDr35N+4JaOY+2zldUOTV$ztUFznul2B^{2VYKkxXfz5aZz@=uLx_@6HR{{_$Gb(;O(%>kOG +zpH(kae$`j&*v)tnmFKJ{rffg@z<69yx+Y2u9{?p?&j}TOK%h1EnUP77_4N~YDa0=U3Zr|*SOYJth;C +ZhCknX-(T*3;K7F;e&o@|{`&Y68=u^?`KhO$+4Agj&%f|rfBXB3FTMQAt6N{&_WB!dzV-Gy+jmq}?X2 +GQ?(RK%_r3T2{trGpaPZJahd=(uC!Zeq?C9smzWDO1<6nRC?TM4$eJ|Dg;6C-wA5Yhw`RQ!k&*$nJ&i +`_u@!}=^FE32Az3^~V3lsm-^#4!i|8HLyYTN&RMfpRQaR)^r_9DAhvET1y$4xxyxM7`_{T?s-dN2D1F +Z-Xp?Du-vaXXJ{-|A(DBA<40dSaKtM2os+mX?#BS+F$4mSM{&$cOv%B8zqM{9MZdTYP?2fpxL)NR#6v +W)#_?7ua%^Sdt40?S;ZkewUMBNt9y7Ld*DqWoc|WOA#xLE!PVx*lZkzF9(wd@NEG*2PY +^qoIKWi-A`Vl$Ri(lew!_PF@Ee%<6L>nIw8ZBk!P@2tp!#?PQD>8!=Aq&+hQGK=wBoPSjEaY!sX;yG7 +Yu@LqVY>-(Xp0Szxzi%+JFj@$`IH;Fm>(WXQ-jWE4`MObU-xV=BBMUSX`3j7&q8wO}!Y7SUP1KKN!4P +1M9udCnk{F{4tB2?g^F3n&We$dDzF5NnsFi=2llGgLh0WrfsGvaWn1QFkYip>zJ+)wuc*~DmS*SJEJcMG3oM3=EE`cUaHze=I&^+c{!mN)l7VW(7J +SvjbI6GT=2#2z7hCdeo{XC1Ww9>K${P +1M<+G{JLdR4zyC`RIVV)&jmB+I)bBb)UZ4oyQDa;fC`Cv3-wK3F}Y-v4jY-)%3v0|$43vV!8(Lba2mH +k5pUD`5_qOmK9%1s(Sa`Fk&2+|%ymCG-kGisd-RS222V3}08`Io>znA~>Mibj%y +mI`{OjpkT<7sWD5A*36SB`(*r;!Y!UAD1y`omk$Yu&I_Am#qHGqm +U9amG|4O|qzp{V(@+;#p`02LG?ek|$?8j%?_D_Dx#HPK~+`nZxMZI(7dV6#GmGOV(y({~dynki-)_-P +VTe@mIWxXGJWNL}uyEVB525VU28P@9apM6UIzI{sFHQt~4@7B=VBS!AJ5~=4(W4*K~77VD9Gi=#u>~? +zdp25(lSDw|dEi48jW{WxjB1ws`abmdW6@#%~v}qYRwh0B+6zatDEOLDFGIlMCE-K1dn6IQTt)MA!Hd +ix2B%sWMDD|&^DNv80A%|zDLRTMhO)AK==UFDu*k)43VuXFlcM3-9t>V4VH_4u7%Nf7iW|>xymXm3T$ +k>wjzaOgl4BHSHhACc9A7BKA$F>7q}_NEl^opEYGiJrnkqtVc=7JLUgb6|8}sRSn)R7|lr@j +&%uK5&-~N7SRDy!(SV5BnkPDCBzR`3MXtn$Wv-Lu0@e5nUhhN*l}wsuLY}=S8 +;Om#XJygIp88DU3k8z`>jVpSzgg6FlgWsQ3p1=1PdTTdY`w}WHN%>NDNPznk$o~JXUVmY;M26q1uSVa +A-4f)JB)?MwXa~8)_QmTpXYzcGY{8@E2c){pwSbS^OT<+Y=-{%j+?LW`|{~z!ZE~tK~`;J)k9mio +*Uh-8;tnFj32ChuQ2{|Z^!Mb|@lh@sKirG#v+bez@y!@_J;={oo*dKC@0&wr6xQ|fYqm}m<7`_>?6mX603r%zY@Mg!FiQpu2%dVihrq+&ISVPy!fl~zEQzvvy$I4iu +*PNzYi7vPZj^*0J$vGGW+*FL2W*=zoE_N-;UaU%hUG9OxeHfk8S_6c>j^l|8*Wvp8wZbA%w;UK3d&o6 +3FvbFJIk9aen$M+1&V)Y_59zxVX#Kw6a8)TPS?hrlXq-Z6v$ul5Cn3bL&-xk5X)(a&+!d14}=ed-UeZ!yi8KBaw^#mmZc?6jRvk9gVBof3Dj3pROFqj~OpeI3Bf< +S`CpD0a&V+03(lEd#J^L2tP1osh?5)=~5CYVN$NHCfph@kO|N#J{>@#Vkrgba%QL*F&*a$IrMnEGtdM +6+=JX!c@2Gz+D;>u-!^TzOvbmF*?%*}wE?TA07py)FOW8on+6|2+Tt^K1Led|Eud%A>|N(a1HN0Q~qI +@twi%-tOM%4?khLDo~#x3WO7{hZ=t!ij`+Lg&%Be8_;$%O4{Ch)N($leoyc`xBB`2AqD+^{Lw1EV4QH +8j&ve(8JQTQttS(6uPyR4#VdS(sH;kHM?D=O6Y*=wguhP1N-$qDkcmnfO(y1K$(okwXvB_?>{i^f=(n +U5cjWtEEADt+^kN`y_sTZh54Pccybbr-Hr&0L)!RL&4R=Eu?%}Ptqfa-Hd-8k@lgK=wY<3%M(JT)bw8Y9EI05y@&o?BhF8&@9XIuH$JHE55BBnw|V6s%H(;jN +now+$ZP!>GGqvwIB_CNOG{%pIXTR3x62h;QbPYF?7;^gWY0YF4142^H&|t5CHwT#PuZnQmzes=Q3R$J +T%Jl3+I#kw1Ty`erArTp*8}`?V@Y7+(xrFai2$eh{>B4*e?58cp?4jVz%M)|!M;rAcN$~v$|d2f{caeqW3-YNY?^2bAAiskg>_q=)$e*g +XhyJd%Zk^Wc6Uw)_jId>hvM^(51zs3XSzoNHu@E7>6Zd|j!asN4_f%i*OXU9{3#ym=&-u>R**LBdN`w +tKaStuXfk^frN<+MB=!8NnGi)A-=PTYThLaaq`=^CGXc-Uw;fOv2C;K1g-FYFg#0pxxvdMt0C=wdB_@ +c(erwEYc@2TCeM_ye3@oW7%>QFtRfm2b_X4P1DO@<$Sq?2Yh3U&j&ITcj^=3sm^HTzO<~*4VqU__%M$ +IAVd`Uf@fhDPgjpY6CP%IPZAl+JIsLUcxj4z61dTdV<~@d>*9m`mgc%@ZrN*Qc@C2Nl9UM+;K-U9$&w +HJ$vrC=h&;SzAAY9lTSWjCr_UA@OdGYZC<2y`{K3$)Wn|(|HNO*+QEu9+Z&En(ZMcGZ6G +KHn4rm~qcXR=I_1A(gYHDg&-RU3Lx9 +2<*vGC_4R^R@c0a+{vw$%e_|RPb=vKTZ#qz8??esQz^`FZd>EU>r?BPxcJ?6Oz_#+2*vI^kDo(mo9#nmrZ8%@BiRAs{8|>zc<0*~A6Cftl)D(e{ZYm@yv+DZdl +^6Ul~;TN#UDxW6Dj_DiocBFucP>zDgG-Ie>=tBL-8rk4aX?{Ns3?Vi9eW9xRp}Kp%m_>6rQ3Kc2NpnQ +wp`6Is0iCXLVCJJ9j&0^&2=l{}N{x4slQX>nZ+C6n`AWpGxr;Q2ZqnzntPfMDd@d_^(p@ofQ87#Xm;z +-Cpr^N(pqM_}5eXJ`{fd#UD)ZZ=v`zDE{pfe=Ws-n&R)E_(#3sH-FOQ$J;3;W_de|TYm^jg(_$MUAm=nfP^5&T6apR +-Y2O~iL{(bu>{_#ns=!EE)*cgfryoL-83%ez~3CV<{1ofG4eeiYZLx#vHg!JuW=r((*9Ha#_Dn<$@J|vCrQf4jfOHfC{A0{X@d*h@=A_=u0+9c}8%6r# +lVTIhNwG;2Zs^fX2>|~=p*JNmIsF)sg<5LqV-Y|pKau`0V-gi{k~tws5Fv6<%b)O1j~!+L3JH`(j|mg +Hbm>9?JpSn;{nJfD&6L3;^1ori1c5?NkAIBbPd~a-xH%y2pmzQmP3Jhm5GW=kj3Y*B@{b-hB0Qph#*RJ#CiusUBX&(-P5vk!VlY#O&YdQw$0Ut260b4lo +xc3U1$B;0il%am_wq*ivBI0&M@7UKp27TzqO`IFSboAb)z27Sr6 +<1~<{{A0#*#jQZAW4RJGuElun%HtGuJU4afR52b|X_@F5dpxu37FM2PW)E6p*sH6}?8S#yvQc|!OhIx +k?o-ZEzmdlb%gDU{{`=V@k37O2d+af`apOj|dGls5R(R!=SHyVm%{Si^V}*ZIKEocN@y3fZR@k*`7yI +zT55?Hv#5Z5DZ@&44oj7rVx!rDd>eMOr)0xv^Z1D5XKeLM$FR~5woV`Tvhc0-@*`YZ2GKxAnYM1L5P) +E0%I=XwPqkEcl=C870d^?-M_p;mhVYY!EV=wWO)PL7{#itH2Ihf*8XS;qd#UDxWO%y+Y;?JV^w^RJPD +gI*=|7D85kK#A=bN?Bq{AZl<|LQoUhav-d^ynczfpr(FsXc=G4;?xbnx=agdh`hH(dWkAL7h6?Kz!D{ +Z)pGi1N#r{9HbxAk=}ds={Im7h3y>FduZ6uFxIp0jiEz(wIAA<0(9!w@rIs#`iBnf&_47A#lc_*y>X~ +c7Z^%$23^ysV~^nLZXDV{r)%HN&o6M0Uf)CO-{Zz!9dzyCFf^!lw*XB*-=TPVwev&z*9QA^p|=kG+P7 +;LM(wLl|Lc8wk}pN+Memef;U;K*ojE%)2p05IJE& +6M`!XP|0T=bULhluUjJQ2ijMLByFkGeI)-wXVdT>m?s-}A`>_+l$Yx8A*bqfh}jPC1}myLKd5gic~AD +WQUdB!$?{p_cd`_4q3323}=&lq_)~s