From e46cd609a2c7499eae20323580b0c6e6a863c3fc Mon Sep 17 00:00:00 2001 From: babysor00 Date: Sat, 7 Aug 2021 11:56:00 +0800 Subject: [PATCH] Init to support Chinese Dataset. --- .gitattributes | 1 + .gitignore | 20 + LICENSE.txt | 24 + README-CN.md | 52 ++ README.md | 53 ++ demo_cli.py | 225 +++++++ demo_toolbox.py | 43 ++ encoder/__init__.py | 0 encoder/audio.py | 117 ++++ encoder/config.py | 45 ++ encoder/data_objects/__init__.py | 2 + encoder/data_objects/random_cycler.py | 37 ++ encoder/data_objects/speaker.py | 40 ++ encoder/data_objects/speaker_batch.py | 12 + .../speaker_verification_dataset.py | 56 ++ encoder/data_objects/utterance.py | 26 + encoder/inference.py | 178 +++++ encoder/model.py | 135 ++++ encoder/params_data.py | 29 + encoder/params_model.py | 11 + encoder/preprocess.py | 175 +++++ encoder/train.py | 123 ++++ encoder/visualizations.py | 178 +++++ encoder_preprocess.py | 70 ++ encoder_train.py | 47 ++ requirements.txt | 17 + samples/1320_00000.mp3 | Bin 0 -> 15453 bytes samples/3575_00000.mp3 | Bin 0 -> 15453 bytes samples/6829_00000.mp3 | Bin 0 -> 15561 bytes samples/8230_00000.mp3 | Bin 0 -> 16101 bytes samples/README.md | 22 + samples/VCTK.txt | 94 +++ samples/p240_00000.mp3 | Bin 0 -> 20205 bytes samples/p260_00000.mp3 | Bin 0 -> 20493 bytes synthesizer/LICENSE.txt | 24 + synthesizer/__init__.py | 1 + synthesizer/audio.py | 206 ++++++ synthesizer/hparams.py | 92 +++ synthesizer/inference.py | 171 +++++ synthesizer/models/tacotron.py | 519 +++++++++++++++ synthesizer/preprocess.py | 358 ++++++++++ synthesizer/synthesize.py | 97 +++ synthesizer/synthesizer_dataset.py | 92 +++ synthesizer/train.py | 268 ++++++++ synthesizer/utils/__init__.py | 45 ++ synthesizer/utils/_cmudict.py | 62 ++ synthesizer/utils/cleaners.py | 88 +++ synthesizer/utils/numbers.py | 68 ++ synthesizer/utils/plot.py | 76 +++ synthesizer/utils/symbols.py | 16 + synthesizer/utils/text.py | 74 +++ synthesizer_preprocess_audio.py | 60 ++ synthesizer_preprocess_embeds.py | 25 + synthesizer_train.py | 35 + toolbox/__init__.py | 359 ++++++++++ toolbox/ui.py | 611 ++++++++++++++++++ toolbox/utterance.py | 5 + utils/__init__.py | 0 utils/argutils.py | 40 ++ utils/logmmse.py | 247 +++++++ utils/modelutils.py | 17 + utils/profiler.py | 45 ++ vocoder/LICENSE.txt | 22 + vocoder/audio.py | 108 ++++ vocoder/display.py | 120 ++++ vocoder/distribution.py | 132 ++++ vocoder/gen_wavernn.py | 31 + vocoder/hparams.py | 44 ++ vocoder/inference.py | 64 ++ vocoder/models/deepmind_version.py | 170 +++++ vocoder/models/fatchord_version.py | 434 +++++++++++++ vocoder/train.py | 127 ++++ vocoder/vocoder_dataset.py | 84 +++ vocoder_preprocess.py | 59 ++ vocoder_train.py | 56 ++ 75 files changed, 6984 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 LICENSE.txt create mode 100644 README-CN.md create mode 100644 README.md create mode 100644 demo_cli.py create mode 100644 demo_toolbox.py create mode 100644 encoder/__init__.py create mode 100644 encoder/audio.py create mode 100644 encoder/config.py create mode 100644 encoder/data_objects/__init__.py create mode 100644 encoder/data_objects/random_cycler.py create mode 100644 encoder/data_objects/speaker.py create mode 100644 encoder/data_objects/speaker_batch.py create mode 100644 encoder/data_objects/speaker_verification_dataset.py create mode 100644 encoder/data_objects/utterance.py create mode 100644 encoder/inference.py create mode 100644 encoder/model.py create mode 100644 encoder/params_data.py create mode 100644 encoder/params_model.py create mode 100644 encoder/preprocess.py create mode 100644 encoder/train.py create mode 100644 encoder/visualizations.py create mode 100644 encoder_preprocess.py create mode 100644 encoder_train.py create mode 100644 requirements.txt create mode 100644 samples/1320_00000.mp3 create mode 100644 samples/3575_00000.mp3 create mode 100644 samples/6829_00000.mp3 create mode 100644 samples/8230_00000.mp3 create mode 100644 samples/README.md create mode 100644 samples/VCTK.txt create mode 100644 samples/p240_00000.mp3 create mode 100644 samples/p260_00000.mp3 create mode 100644 synthesizer/LICENSE.txt create mode 100644 synthesizer/__init__.py create mode 100644 synthesizer/audio.py create mode 100644 synthesizer/hparams.py create mode 100644 synthesizer/inference.py create mode 100644 synthesizer/models/tacotron.py create mode 100644 synthesizer/preprocess.py create mode 100644 synthesizer/synthesize.py create mode 100644 synthesizer/synthesizer_dataset.py create mode 100644 synthesizer/train.py create mode 100644 synthesizer/utils/__init__.py create mode 100644 synthesizer/utils/_cmudict.py create mode 100644 synthesizer/utils/cleaners.py create mode 100644 synthesizer/utils/numbers.py create mode 100644 synthesizer/utils/plot.py create mode 100644 synthesizer/utils/symbols.py create mode 100644 synthesizer/utils/text.py create mode 100644 synthesizer_preprocess_audio.py create mode 100644 synthesizer_preprocess_embeds.py create mode 100644 synthesizer_train.py create mode 100644 toolbox/__init__.py create mode 100644 toolbox/ui.py create mode 100644 toolbox/utterance.py create mode 100644 utils/__init__.py create mode 100644 utils/argutils.py create mode 100644 utils/logmmse.py create mode 100644 utils/modelutils.py create mode 100644 utils/profiler.py create mode 100644 vocoder/LICENSE.txt create mode 100644 vocoder/audio.py create mode 100644 vocoder/display.py create mode 100644 vocoder/distribution.py create mode 100644 vocoder/gen_wavernn.py create mode 100644 vocoder/hparams.py create mode 100644 vocoder/inference.py create mode 100644 vocoder/models/deepmind_version.py create mode 100644 vocoder/models/fatchord_version.py create mode 100644 vocoder/train.py create mode 100644 vocoder/vocoder_dataset.py create mode 100644 vocoder_preprocess.py create mode 100644 vocoder_train.py diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..5be91f9 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.ipynb linguist-vendored diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9401d2e --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +*.pyc +*.aux +*.log +*.out +*.synctex.gz +*.suo +*__pycache__ +*.idea +*.ipynb_checkpoints +*.pickle +*.npy +*.blg +*.bbl +*.bcf +*.toc +*.wav +*.sh +encoder/saved_models/* +synthesizer/saved_models/* +vocoder/saved_models/* diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..5ed721b --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,24 @@ +MIT License + +Modified & original work Copyright (c) 2019 Corentin Jemine (https://github.com/CorentinJ) +Original work Copyright (c) 2018 Rayhane Mama (https://github.com/Rayhane-mamah) +Original work Copyright (c) 2019 fatchord (https://github.com/fatchord) +Original work Copyright (c) 2015 braindead (https://github.com/braindead) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README-CN.md b/README-CN.md new file mode 100644 index 0000000..f837eaa --- /dev/null +++ b/README-CN.md @@ -0,0 +1,52 @@ +## 实时语音克隆 - 中文/普通话 +![WechatIMG2968](https://user-images.githubusercontent.com/7423248/128490653-f55fefa8-f944-4617-96b8-5cc94f14f8f6.png) + +[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://choosealicense.com/licenses/mit/) +> 该库是从仅支持英语的[Real-Time-Voice-Cloning](https://github.com/CorentinJ/Real-Time-Voice-Cloning) 分叉出来的。 + +### [English](README.md) | 中文 + +## 特性 +🌍 **中文** 支持普通话并使用数据集进行测试:adatatang_200zh + +🤩 **PyTorch** 适用于 pytorch,已在 1.9.0 版本(最新于 2021 年 8 月)中测试,GPU Tesla T4 和 GTX 2060 + +🌍 **Windows + Linux** 在修复 nits 后在 Windows 操作系统和 linux 操作系统中进行测试 + +🤩 **Easy & Awesome** 仅使用新训练的合成器(synthesizer)就有良好效果,复用预训练的编码器/声码器 + +## 快速开始 + +### 1. 安装要求 +> 按照原始存储库测试您是否已准备好所有环境。 +**Python 3.7 或更高版本 ** 需要运行工具箱。 + +* 安装 [PyTorch](https://pytorch.org/get-started/locally/)。 +* 安装 [ffmpeg](https://ffmpeg.org/download.html#get-packages)。 +* 运行`pip install -r requirements.txt` 来安装剩余的必要包。 + + +### 2. 使用 aidatatang_200zh 训练合成器 +* 下载 adatatang_200zh 数据集并解压:确保您可以访问 *train* 文件夹中的所有 .wav +* 使用音频和梅尔频谱图进行预处理: +`python synthesizer_preprocess_audio.py ` + +* 预处理嵌入: +`python synthesizer_preprocess_embeds.py /SV2TTS/synthesizer` + +* 训练合成器: +`python synthesizer_train.py mandarin /SV2TTS/synthesizer` + +* 当您在训练文件夹 *synthesizer/saved_models/* 中看到注意线显示和损失满足您的需要时,请转到下一步。 +> 仅供参考,我的注意力是在 18k 步之后出现的,并且在 50k 步之后损失变得低于 0.4。 + + +### 3. 启动工具箱 +然后您可以尝试使用工具箱: +`python demo_toolbox.py -d ` + +## TODO +- 添加演示视频 +- 添加对更多数据集的支持 +- 上传预训练模型 +- 🙏 欢迎补充 \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..cbdabfe --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ +![WechatIMG2968](https://user-images.githubusercontent.com/7423248/128490653-f55fefa8-f944-4617-96b8-5cc94f14f8f6.png) + +[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://choosealicense.com/licenses/mit/) +> This repository is forked from [Real-Time-Voice-Cloning](https://github.com/CorentinJ/Real-Time-Voice-Cloning) which only support English. + +> English | [中文](README-CN.md) + +## Features +🌍 **Chinese** supported mandarin and tested with dataset: aidatatang_200zh + +🤩 **PyTorch** worked for pytorch, tested in version of 1.9.0(latest in August 2021), with GPU Tesla T4 and GTX 2060 + +🌍 **Windows + Linux** tested in both Windows OS and linux OS after fixing nits + +🤩 **Easy & Awesome** effect with only newly-trained synthesizer, by reusing the pretrained encoder/vocoder + +## Quick Start + +### 1. Install Requirements +> Follow the original repo to test if you got all environment ready. +**Python 3.7 or higher ** is needed to run the toolbox. + +* Install [PyTorch](https://pytorch.org/get-started/locally/). +* Install [ffmpeg](https://ffmpeg.org/download.html#get-packages). +* Run `pip install -r requirements.txt` to install the remaining necessary packages. + + +### 2. Train synthesizer with aidatatang_200zh +* Download aidatatang_200zh dataset and unzip: make sure you can access all .wav in *train* folder +* Preprocess with the audios and the mel spectrograms: +`python synthesizer_preprocess_audio.py ` + +* Preprocess the embeddings: +`python synthesizer_preprocess_embeds.py /SV2TTS/synthesizer` + +* Train the synthesizer: +`python synthesizer_train.py mandarin /SV2TTS/synthesizer` + +* Go to next step when you see attention line show and loss meet your need in training folder *synthesizer/saved_models/*. +> FYI, my attention came after 18k steps and loss became lower than 0.4 after 50k steps. + +### 3. Launch the Toolbox +You can then try the toolbox: + +`python demo_toolbox.py -d ` +or +`python demo_toolbox.py` + +## TODO +- Add demo video +- Add support for more dataset +- Upload pretrained model +- 🙏 Welcome to add more diff --git a/demo_cli.py b/demo_cli.py new file mode 100644 index 0000000..c7309e8 --- /dev/null +++ b/demo_cli.py @@ -0,0 +1,225 @@ +from encoder.params_model import model_embedding_size as speaker_embedding_size +from utils.argutils import print_args +from utils.modelutils import check_model_paths +from synthesizer.inference import Synthesizer +from encoder import inference as encoder +from vocoder import inference as vocoder +from pathlib import Path +import numpy as np +import soundfile as sf +import librosa +import argparse +import torch +import sys +import os +from audioread.exceptions import NoBackendError + +if __name__ == '__main__': + ## Info & args + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("-e", "--enc_model_fpath", type=Path, + default="encoder/saved_models/pretrained.pt", + help="Path to a saved encoder") + parser.add_argument("-s", "--syn_model_fpath", type=Path, + default="synthesizer/saved_models/pretrained/pretrained.pt", + help="Path to a saved synthesizer") + parser.add_argument("-v", "--voc_model_fpath", type=Path, + default="vocoder/saved_models/pretrained/pretrained.pt", + help="Path to a saved vocoder") + parser.add_argument("--cpu", action="store_true", help=\ + "If True, processing is done on CPU, even when a GPU is available.") + parser.add_argument("--no_sound", action="store_true", help=\ + "If True, audio won't be played.") + parser.add_argument("--seed", type=int, default=None, help=\ + "Optional random number seed value to make toolbox deterministic.") + parser.add_argument("--no_mp3_support", action="store_true", help=\ + "If True, disallows loading mp3 files to prevent audioread errors when ffmpeg is not installed.") + args = parser.parse_args() + print_args(args, parser) + if not args.no_sound: + import sounddevice as sd + + if args.cpu: + # Hide GPUs from Pytorch to force CPU processing + os.environ["CUDA_VISIBLE_DEVICES"] = "" + + if not args.no_mp3_support: + try: + librosa.load("samples/1320_00000.mp3") + except NoBackendError: + print("Librosa will be unable to open mp3 files if additional software is not installed.\n" + "Please install ffmpeg or add the '--no_mp3_support' option to proceed without support for mp3 files.") + exit(-1) + + print("Running a test of your configuration...\n") + + if torch.cuda.is_available(): + device_id = torch.cuda.current_device() + gpu_properties = torch.cuda.get_device_properties(device_id) + ## Print some environment information (for debugging purposes) + print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with " + "%.1fGb total memory.\n" % + (torch.cuda.device_count(), + device_id, + gpu_properties.name, + gpu_properties.major, + gpu_properties.minor, + gpu_properties.total_memory / 1e9)) + else: + print("Using CPU for inference.\n") + + ## Remind the user to download pretrained models if needed + check_model_paths(encoder_path=args.enc_model_fpath, + synthesizer_path=args.syn_model_fpath, + vocoder_path=args.voc_model_fpath) + + ## Load the models one by one. + print("Preparing the encoder, the synthesizer and the vocoder...") + encoder.load_model(args.enc_model_fpath) + synthesizer = Synthesizer(args.syn_model_fpath) + vocoder.load_model(args.voc_model_fpath) + + + ## Run a test + print("Testing your configuration with small inputs.") + # Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's + # sampling rate, which may differ. + # If you're unfamiliar with digital audio, know that it is encoded as an array of floats + # (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1. + # The sampling rate is the number of values (samples) recorded per second, it is set to + # 16000 for the encoder. Creating an array of length will always correspond + # to an audio of 1 second. + print("\tTesting the encoder...") + encoder.embed_utterance(np.zeros(encoder.sampling_rate)) + + # Create a dummy embedding. You would normally use the embedding that encoder.embed_utterance + # returns, but here we're going to make one ourselves just for the sake of showing that it's + # possible. + embed = np.random.rand(speaker_embedding_size) + # Embeddings are L2-normalized (this isn't important here, but if you want to make your own + # embeddings it will be). + embed /= np.linalg.norm(embed) + # The synthesizer can handle multiple inputs with batching. Let's create another embedding to + # illustrate that + embeds = [embed, np.zeros(speaker_embedding_size)] + texts = ["test 1", "test 2"] + print("\tTesting the synthesizer... (loading the model will output a lot of text)") + mels = synthesizer.synthesize_spectrograms(texts, embeds) + + # The vocoder synthesizes one waveform at a time, but it's more efficient for long ones. We + # can concatenate the mel spectrograms to a single one. + mel = np.concatenate(mels, axis=1) + # The vocoder can take a callback function to display the generation. More on that later. For + # now we'll simply hide it like this: + no_action = lambda *args: None + print("\tTesting the vocoder...") + # For the sake of making this test short, we'll pass a short target length. The target length + # is the length of the wav segments that are processed in parallel. E.g. for audio sampled + # at 16000 Hertz, a target length of 8000 means that the target audio will be cut in chunks of + # 0.5 seconds which will all be generated together. The parameters here are absurdly short, and + # that has a detrimental effect on the quality of the audio. The default parameters are + # recommended in general. + vocoder.infer_waveform(mel, target=200, overlap=50, progress_callback=no_action) + + print("All test passed! You can now synthesize speech.\n\n") + + + ## Interactive speech generation + print("This is a GUI-less example of interface to SV2TTS. The purpose of this script is to " + "show how you can interface this project easily with your own. See the source code for " + "an explanation of what is happening.\n") + + print("Interactive generation loop") + num_generated = 0 + while True: + try: + # Get the reference audio filepath + message = "Reference voice: enter an audio filepath of a voice to be cloned (mp3, " \ + "wav, m4a, flac, ...):\n" + in_fpath = Path(input(message).replace("\"", "").replace("\'", "")) + + if in_fpath.suffix.lower() == ".mp3" and args.no_mp3_support: + print("Can't Use mp3 files please try again:") + continue + ## Computing the embedding + # First, we load the wav using the function that the speaker encoder provides. This is + # important: there is preprocessing that must be applied. + + # The following two methods are equivalent: + # - Directly load from the filepath: + preprocessed_wav = encoder.preprocess_wav(in_fpath) + # - If the wav is already loaded: + original_wav, sampling_rate = librosa.load(str(in_fpath)) + preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate) + print("Loaded file succesfully") + + # Then we derive the embedding. There are many functions and parameters that the + # speaker encoder interfaces. These are mostly for in-depth research. You will typically + # only use this function (with its default parameters): + embed = encoder.embed_utterance(preprocessed_wav) + print("Created the embedding") + + + ## Generating the spectrogram + text = input("Write a sentence (+-20 words) to be synthesized:\n") + + # If seed is specified, reset torch seed and force synthesizer reload + if args.seed is not None: + torch.manual_seed(args.seed) + synthesizer = Synthesizer(args.syn_model_fpath) + + # The synthesizer works in batch, so you need to put your data in a list or numpy array + texts = [text] + embeds = [embed] + # If you know what the attention layer alignments are, you can retrieve them here by + # passing return_alignments=True + specs = synthesizer.synthesize_spectrograms(texts, embeds) + spec = specs[0] + print("Created the mel spectrogram") + + + ## Generating the waveform + print("Synthesizing the waveform:") + + # If seed is specified, reset torch seed and reload vocoder + if args.seed is not None: + torch.manual_seed(args.seed) + vocoder.load_model(args.voc_model_fpath) + + # Synthesizing the waveform is fairly straightforward. Remember that the longer the + # spectrogram, the more time-efficient the vocoder. + generated_wav = vocoder.infer_waveform(spec) + + + ## Post-generation + # There's a bug with sounddevice that makes the audio cut one second earlier, so we + # pad it. + generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant") + + # Trim excess silences to compensate for gaps in spectrograms (issue #53) + generated_wav = encoder.preprocess_wav(generated_wav) + + # Play the audio (non-blocking) + if not args.no_sound: + try: + sd.stop() + sd.play(generated_wav, synthesizer.sample_rate) + except sd.PortAudioError as e: + print("\nCaught exception: %s" % repr(e)) + print("Continuing without audio playback. Suppress this message with the \"--no_sound\" flag.\n") + except: + raise + + # Save it on the disk + filename = "demo_output_%02d.wav" % num_generated + print(generated_wav.dtype) + sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate) + num_generated += 1 + print("\nSaved output as %s\n\n" % filename) + + + except Exception as e: + print("Caught exception: %s" % repr(e)) + print("Restarting\n") diff --git a/demo_toolbox.py b/demo_toolbox.py new file mode 100644 index 0000000..d938031 --- /dev/null +++ b/demo_toolbox.py @@ -0,0 +1,43 @@ +from pathlib import Path +from toolbox import Toolbox +from utils.argutils import print_args +from utils.modelutils import check_model_paths +import argparse +import os + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="Runs the toolbox", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument("-d", "--datasets_root", type=Path, help= \ + "Path to the directory containing your datasets. See toolbox/__init__.py for a list of " + "supported datasets.", default=None) + parser.add_argument("-e", "--enc_models_dir", type=Path, default="encoder/saved_models", + help="Directory containing saved encoder models") + parser.add_argument("-s", "--syn_models_dir", type=Path, default="synthesizer/saved_models", + help="Directory containing saved synthesizer models") + parser.add_argument("-v", "--voc_models_dir", type=Path, default="vocoder/saved_models", + help="Directory containing saved vocoder models") + parser.add_argument("--cpu", action="store_true", help=\ + "If True, processing is done on CPU, even when a GPU is available.") + parser.add_argument("--seed", type=int, default=None, help=\ + "Optional random number seed value to make toolbox deterministic.") + parser.add_argument("--no_mp3_support", action="store_true", help=\ + "If True, no mp3 files are allowed.") + args = parser.parse_args() + print_args(args, parser) + + if args.cpu: + # Hide GPUs from Pytorch to force CPU processing + os.environ["CUDA_VISIBLE_DEVICES"] = "" + del args.cpu + + ## Remind the user to download pretrained models if needed + check_model_paths(encoder_path=args.enc_models_dir, synthesizer_path=args.syn_models_dir, + vocoder_path=args.voc_models_dir) + + # Launch the toolbox + Toolbox(**vars(args)) diff --git a/encoder/__init__.py b/encoder/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/encoder/audio.py b/encoder/audio.py new file mode 100644 index 0000000..799aa83 --- /dev/null +++ b/encoder/audio.py @@ -0,0 +1,117 @@ +from scipy.ndimage.morphology import binary_dilation +from encoder.params_data import * +from pathlib import Path +from typing import Optional, Union +from warnings import warn +import numpy as np +import librosa +import struct + +try: + import webrtcvad +except: + warn("Unable to import 'webrtcvad'. This package enables noise removal and is recommended.") + webrtcvad=None + +int16_max = (2 ** 15) - 1 + + +def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray], + source_sr: Optional[int] = None, + normalize: Optional[bool] = True, + trim_silence: Optional[bool] = True): + """ + Applies the preprocessing operations used in training the Speaker Encoder to a waveform + either on disk or in memory. The waveform will be resampled to match the data hyperparameters. + + :param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not + just .wav), either the waveform as a numpy array of floats. + :param source_sr: if passing an audio waveform, the sampling rate of the waveform before + preprocessing. After preprocessing, the waveform's sampling rate will match the data + hyperparameters. If passing a filepath, the sampling rate will be automatically detected and + this argument will be ignored. + """ + # Load the wav from disk if needed + if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): + wav, source_sr = librosa.load(str(fpath_or_wav), sr=None) + else: + wav = fpath_or_wav + + # Resample the wav if needed + if source_sr is not None and source_sr != sampling_rate: + wav = librosa.resample(wav, source_sr, sampling_rate) + + # Apply the preprocessing: normalize volume and shorten long silences + if normalize: + wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True) + if webrtcvad and trim_silence: + wav = trim_long_silences(wav) + + return wav + + +def wav_to_mel_spectrogram(wav): + """ + Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform. + Note: this not a log-mel spectrogram. + """ + frames = librosa.feature.melspectrogram( + wav, + sampling_rate, + n_fft=int(sampling_rate * mel_window_length / 1000), + hop_length=int(sampling_rate * mel_window_step / 1000), + n_mels=mel_n_channels + ) + return frames.astype(np.float32).T + + +def trim_long_silences(wav): + """ + Ensures that segments without voice in the waveform remain no longer than a + threshold determined by the VAD parameters in params.py. + + :param wav: the raw waveform as a numpy array of floats + :return: the same waveform with silences trimmed away (length <= original wav length) + """ + # Compute the voice detection window size + samples_per_window = (vad_window_length * sampling_rate) // 1000 + + # Trim the end of the audio to have a multiple of the window size + wav = wav[:len(wav) - (len(wav) % samples_per_window)] + + # Convert the float waveform to 16-bit mono PCM + pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) + + # Perform voice activation detection + voice_flags = [] + vad = webrtcvad.Vad(mode=3) + for window_start in range(0, len(wav), samples_per_window): + window_end = window_start + samples_per_window + voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], + sample_rate=sampling_rate)) + voice_flags = np.array(voice_flags) + + # Smooth the voice detection with a moving average + def moving_average(array, width): + array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) + ret = np.cumsum(array_padded, dtype=float) + ret[width:] = ret[width:] - ret[:-width] + return ret[width - 1:] / width + + audio_mask = moving_average(voice_flags, vad_moving_average_width) + audio_mask = np.round(audio_mask).astype(np.bool) + + # Dilate the voiced regions + audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) + audio_mask = np.repeat(audio_mask, samples_per_window) + + return wav[audio_mask == True] + + +def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False): + if increase_only and decrease_only: + raise ValueError("Both increase only and decrease only are set") + dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2)) + if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only): + return wav + return wav * (10 ** (dBFS_change / 20)) diff --git a/encoder/config.py b/encoder/config.py new file mode 100644 index 0000000..1c21312 --- /dev/null +++ b/encoder/config.py @@ -0,0 +1,45 @@ +librispeech_datasets = { + "train": { + "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"], + "other": ["LibriSpeech/train-other-500"] + }, + "test": { + "clean": ["LibriSpeech/test-clean"], + "other": ["LibriSpeech/test-other"] + }, + "dev": { + "clean": ["LibriSpeech/dev-clean"], + "other": ["LibriSpeech/dev-other"] + }, +} +libritts_datasets = { + "train": { + "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"], + "other": ["LibriTTS/train-other-500"] + }, + "test": { + "clean": ["LibriTTS/test-clean"], + "other": ["LibriTTS/test-other"] + }, + "dev": { + "clean": ["LibriTTS/dev-clean"], + "other": ["LibriTTS/dev-other"] + }, +} +voxceleb_datasets = { + "voxceleb1" : { + "train": ["VoxCeleb1/wav"], + "test": ["VoxCeleb1/test_wav"] + }, + "voxceleb2" : { + "train": ["VoxCeleb2/dev/aac"], + "test": ["VoxCeleb2/test_wav"] + } +} + +other_datasets = [ + "LJSpeech-1.1", + "VCTK-Corpus/wav48", +] + +anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"] diff --git a/encoder/data_objects/__init__.py b/encoder/data_objects/__init__.py new file mode 100644 index 0000000..ef04ade --- /dev/null +++ b/encoder/data_objects/__init__.py @@ -0,0 +1,2 @@ +from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset +from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader diff --git a/encoder/data_objects/random_cycler.py b/encoder/data_objects/random_cycler.py new file mode 100644 index 0000000..c405db6 --- /dev/null +++ b/encoder/data_objects/random_cycler.py @@ -0,0 +1,37 @@ +import random + +class RandomCycler: + """ + Creates an internal copy of a sequence and allows access to its items in a constrained random + order. For a source sequence of n items and one or several consecutive queries of a total + of m items, the following guarantees hold (one implies the other): + - Each item will be returned between m // n and ((m - 1) // n) + 1 times. + - Between two appearances of the same item, there may be at most 2 * (n - 1) other items. + """ + + def __init__(self, source): + if len(source) == 0: + raise Exception("Can't create RandomCycler from an empty collection") + self.all_items = list(source) + self.next_items = [] + + def sample(self, count: int): + shuffle = lambda l: random.sample(l, len(l)) + + out = [] + while count > 0: + if count >= len(self.all_items): + out.extend(shuffle(list(self.all_items))) + count -= len(self.all_items) + continue + n = min(count, len(self.next_items)) + out.extend(self.next_items[:n]) + count -= n + self.next_items = self.next_items[n:] + if len(self.next_items) == 0: + self.next_items = shuffle(list(self.all_items)) + return out + + def __next__(self): + return self.sample(1)[0] + diff --git a/encoder/data_objects/speaker.py b/encoder/data_objects/speaker.py new file mode 100644 index 0000000..494e882 --- /dev/null +++ b/encoder/data_objects/speaker.py @@ -0,0 +1,40 @@ +from encoder.data_objects.random_cycler import RandomCycler +from encoder.data_objects.utterance import Utterance +from pathlib import Path + +# Contains the set of utterances of a single speaker +class Speaker: + def __init__(self, root: Path): + self.root = root + self.name = root.name + self.utterances = None + self.utterance_cycler = None + + def _load_utterances(self): + with self.root.joinpath("_sources.txt").open("r") as sources_file: + sources = [l.split(",") for l in sources_file] + sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources} + self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()] + self.utterance_cycler = RandomCycler(self.utterances) + + def random_partial(self, count, n_frames): + """ + Samples a batch of unique partial utterances from the disk in a way that all + utterances come up at least once every two cycles and in a random order every time. + + :param count: The number of partial utterances to sample from the set of utterances from + that speaker. Utterances are guaranteed not to be repeated if is not larger than + the number of utterances available. + :param n_frames: The number of frames in the partial utterance. + :return: A list of tuples (utterance, frames, range) where utterance is an Utterance, + frames are the frames of the partial utterances and range is the range of the partial + utterance with regard to the complete utterance. + """ + if self.utterances is None: + self._load_utterances() + + utterances = self.utterance_cycler.sample(count) + + a = [(u,) + u.random_partial(n_frames) for u in utterances] + + return a diff --git a/encoder/data_objects/speaker_batch.py b/encoder/data_objects/speaker_batch.py new file mode 100644 index 0000000..56651db --- /dev/null +++ b/encoder/data_objects/speaker_batch.py @@ -0,0 +1,12 @@ +import numpy as np +from typing import List +from encoder.data_objects.speaker import Speaker + +class SpeakerBatch: + def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int): + self.speakers = speakers + self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers} + + # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with + # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40) + self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]]) diff --git a/encoder/data_objects/speaker_verification_dataset.py b/encoder/data_objects/speaker_verification_dataset.py new file mode 100644 index 0000000..77a6e05 --- /dev/null +++ b/encoder/data_objects/speaker_verification_dataset.py @@ -0,0 +1,56 @@ +from encoder.data_objects.random_cycler import RandomCycler +from encoder.data_objects.speaker_batch import SpeakerBatch +from encoder.data_objects.speaker import Speaker +from encoder.params_data import partials_n_frames +from torch.utils.data import Dataset, DataLoader +from pathlib import Path + +# TODO: improve with a pool of speakers for data efficiency + +class SpeakerVerificationDataset(Dataset): + def __init__(self, datasets_root: Path): + self.root = datasets_root + speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()] + if len(speaker_dirs) == 0: + raise Exception("No speakers found. Make sure you are pointing to the directory " + "containing all preprocessed speaker directories.") + self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs] + self.speaker_cycler = RandomCycler(self.speakers) + + def __len__(self): + return int(1e10) + + def __getitem__(self, index): + return next(self.speaker_cycler) + + def get_logs(self): + log_string = "" + for log_fpath in self.root.glob("*.txt"): + with log_fpath.open("r") as log_file: + log_string += "".join(log_file.readlines()) + return log_string + + +class SpeakerVerificationDataLoader(DataLoader): + def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, + batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, + worker_init_fn=None): + self.utterances_per_speaker = utterances_per_speaker + + super().__init__( + dataset=dataset, + batch_size=speakers_per_batch, + shuffle=False, + sampler=sampler, + batch_sampler=batch_sampler, + num_workers=num_workers, + collate_fn=self.collate, + pin_memory=pin_memory, + drop_last=False, + timeout=timeout, + worker_init_fn=worker_init_fn + ) + + def collate(self, speakers): + return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames) + \ No newline at end of file diff --git a/encoder/data_objects/utterance.py b/encoder/data_objects/utterance.py new file mode 100644 index 0000000..0768c34 --- /dev/null +++ b/encoder/data_objects/utterance.py @@ -0,0 +1,26 @@ +import numpy as np + + +class Utterance: + def __init__(self, frames_fpath, wave_fpath): + self.frames_fpath = frames_fpath + self.wave_fpath = wave_fpath + + def get_frames(self): + return np.load(self.frames_fpath) + + def random_partial(self, n_frames): + """ + Crops the frames into a partial utterance of n_frames + + :param n_frames: The number of frames of the partial utterance + :return: the partial utterance frames and a tuple indicating the start and end of the + partial utterance in the complete utterance. + """ + frames = self.get_frames() + if frames.shape[0] == n_frames: + start = 0 + else: + start = np.random.randint(0, frames.shape[0] - n_frames) + end = start + n_frames + return frames[start:end], (start, end) \ No newline at end of file diff --git a/encoder/inference.py b/encoder/inference.py new file mode 100644 index 0000000..4ca417b --- /dev/null +++ b/encoder/inference.py @@ -0,0 +1,178 @@ +from encoder.params_data import * +from encoder.model import SpeakerEncoder +from encoder.audio import preprocess_wav # We want to expose this function from here +from matplotlib import cm +from encoder import audio +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np +import torch + +_model = None # type: SpeakerEncoder +_device = None # type: torch.device + + +def load_model(weights_fpath: Path, device=None): + """ + Loads the model in memory. If this function is not explicitely called, it will be run on the + first call to embed_frames() with the default weights file. + + :param weights_fpath: the path to saved model weights. + :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The + model will be loaded and will run on this device. Outputs will however always be on the cpu. + If None, will default to your GPU if it"s available, otherwise your CPU. + """ + # TODO: I think the slow loading of the encoder might have something to do with the device it + # was saved on. Worth investigating. + global _model, _device + if device is None: + _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + elif isinstance(device, str): + _device = torch.device(device) + _model = SpeakerEncoder(_device, torch.device("cpu")) + checkpoint = torch.load(weights_fpath, _device) + _model.load_state_dict(checkpoint["model_state"]) + _model.eval() + print("Loaded encoder \"%s\" trained to step %d" % (weights_fpath.name, checkpoint["step"])) + + +def is_loaded(): + return _model is not None + + +def embed_frames_batch(frames_batch): + """ + Computes embeddings for a batch of mel spectrogram. + + :param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape + (batch_size, n_frames, n_channels) + :return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size) + """ + if _model is None: + raise Exception("Model was not loaded. Call load_model() before inference.") + + frames = torch.from_numpy(frames_batch).to(_device) + embed = _model.forward(frames).detach().cpu().numpy() + return embed + + +def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames, + min_pad_coverage=0.75, overlap=0.5): + """ + Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain + partial utterances of each. Both the waveform and the mel + spectrogram slices are returned, so as to make each partial utterance waveform correspond to + its spectrogram. This function assumes that the mel spectrogram parameters used are those + defined in params_data.py. + + The returned ranges may be indexing further than the length of the waveform. It is + recommended that you pad the waveform with zeros up to wave_slices[-1].stop. + + :param n_samples: the number of samples in the waveform + :param partial_utterance_n_frames: the number of mel spectrogram frames in each partial + utterance + :param min_pad_coverage: when reaching the last partial utterance, it may or may not have + enough frames. If at least of are present, + then the last partial utterance will be considered, as if we padded the audio. Otherwise, + it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial + utterance, this parameter is ignored so that the function always returns at least 1 slice. + :param overlap: by how much the partial utterance should overlap. If set to 0, the partial + utterances are entirely disjoint. + :return: the waveform slices and mel spectrogram slices as lists of array slices. Index + respectively the waveform and the mel spectrogram with these slices to obtain the partial + utterances. + """ + assert 0 <= overlap < 1 + assert 0 < min_pad_coverage <= 1 + + samples_per_frame = int((sampling_rate * mel_window_step / 1000)) + n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) + frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1) + + # Compute the slices + wav_slices, mel_slices = [], [] + steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1) + for i in range(0, steps, frame_step): + mel_range = np.array([i, i + partial_utterance_n_frames]) + wav_range = mel_range * samples_per_frame + mel_slices.append(slice(*mel_range)) + wav_slices.append(slice(*wav_range)) + + # Evaluate whether extra padding is warranted or not + last_wav_range = wav_slices[-1] + coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) + if coverage < min_pad_coverage and len(mel_slices) > 1: + mel_slices = mel_slices[:-1] + wav_slices = wav_slices[:-1] + + return wav_slices, mel_slices + + +def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs): + """ + Computes an embedding for a single utterance. + + # TODO: handle multiple wavs to benefit from batching on GPU + :param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32 + :param using_partials: if True, then the utterance is split in partial utterances of + frames and the utterance embedding is computed from their + normalized average. If False, the utterance is instead computed from feeding the entire + spectogram to the network. + :param return_partials: if True, the partial embeddings will also be returned along with the + wav slices that correspond to the partial embeddings. + :param kwargs: additional arguments to compute_partial_splits() + :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If + is True, the partial utterances as a numpy array of float32 of shape + (n_partials, model_embedding_size) and the wav partials as a list of slices will also be + returned. If is simultaneously set to False, both these values will be None + instead. + """ + # Process the entire utterance if not using partials + if not using_partials: + frames = audio.wav_to_mel_spectrogram(wav) + embed = embed_frames_batch(frames[None, ...])[0] + if return_partials: + return embed, None, None + return embed + + # Compute where to split the utterance into partials and pad if necessary + wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs) + max_wave_length = wave_slices[-1].stop + if max_wave_length >= len(wav): + wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") + + # Split the utterance into partials + frames = audio.wav_to_mel_spectrogram(wav) + frames_batch = np.array([frames[s] for s in mel_slices]) + partial_embeds = embed_frames_batch(frames_batch) + + # Compute the utterance embedding from the partial embeddings + raw_embed = np.mean(partial_embeds, axis=0) + embed = raw_embed / np.linalg.norm(raw_embed, 2) + + if return_partials: + return embed, partial_embeds, wave_slices + return embed + + +def embed_speaker(wavs, **kwargs): + raise NotImplemented() + + +def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)): + if ax is None: + ax = plt.gca() + + if shape is None: + height = int(np.sqrt(len(embed))) + shape = (height, -1) + embed = embed.reshape(shape) + + cmap = cm.get_cmap() + mappable = ax.imshow(embed, cmap=cmap) + cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04) + sm = cm.ScalarMappable(cmap=cmap) + sm.set_clim(*color_range) + + ax.set_xticks([]), ax.set_yticks([]) + ax.set_title(title) diff --git a/encoder/model.py b/encoder/model.py new file mode 100644 index 0000000..e050d32 --- /dev/null +++ b/encoder/model.py @@ -0,0 +1,135 @@ +from encoder.params_model import * +from encoder.params_data import * +from scipy.interpolate import interp1d +from sklearn.metrics import roc_curve +from torch.nn.utils import clip_grad_norm_ +from scipy.optimize import brentq +from torch import nn +import numpy as np +import torch + + +class SpeakerEncoder(nn.Module): + def __init__(self, device, loss_device): + super().__init__() + self.loss_device = loss_device + + # Network defition + self.lstm = nn.LSTM(input_size=mel_n_channels, + hidden_size=model_hidden_size, + num_layers=model_num_layers, + batch_first=True).to(device) + self.linear = nn.Linear(in_features=model_hidden_size, + out_features=model_embedding_size).to(device) + self.relu = torch.nn.ReLU().to(device) + + # Cosine similarity scaling (with fixed initial parameter values) + self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device) + self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device) + + # Loss + self.loss_fn = nn.CrossEntropyLoss().to(loss_device) + + def do_gradient_ops(self): + # Gradient scale + self.similarity_weight.grad *= 0.01 + self.similarity_bias.grad *= 0.01 + + # Gradient clipping + clip_grad_norm_(self.parameters(), 3, norm_type=2) + + def forward(self, utterances, hidden_init=None): + """ + Computes the embeddings of a batch of utterance spectrograms. + + :param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape + (batch_size, n_frames, n_channels) + :param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers, + batch_size, hidden_size). Will default to a tensor of zeros if None. + :return: the embeddings as a tensor of shape (batch_size, embedding_size) + """ + # Pass the input through the LSTM layers and retrieve all outputs, the final hidden state + # and the final cell state. + out, (hidden, cell) = self.lstm(utterances, hidden_init) + + # We take only the hidden state of the last layer + embeds_raw = self.relu(self.linear(hidden[-1])) + + # L2-normalize it + embeds = embeds_raw / (torch.norm(embeds_raw, dim=1, keepdim=True) + 1e-5) + + return embeds + + def similarity_matrix(self, embeds): + """ + Computes the similarity matrix according the section 2.1 of GE2E. + + :param embeds: the embeddings as a tensor of shape (speakers_per_batch, + utterances_per_speaker, embedding_size) + :return: the similarity matrix as a tensor of shape (speakers_per_batch, + utterances_per_speaker, speakers_per_batch) + """ + speakers_per_batch, utterances_per_speaker = embeds.shape[:2] + + # Inclusive centroids (1 per speaker). Cloning is needed for reverse differentiation + centroids_incl = torch.mean(embeds, dim=1, keepdim=True) + centroids_incl = centroids_incl.clone() / (torch.norm(centroids_incl, dim=2, keepdim=True) + 1e-5) + + # Exclusive centroids (1 per utterance) + centroids_excl = (torch.sum(embeds, dim=1, keepdim=True) - embeds) + centroids_excl /= (utterances_per_speaker - 1) + centroids_excl = centroids_excl.clone() / (torch.norm(centroids_excl, dim=2, keepdim=True) + 1e-5) + + # Similarity matrix. The cosine similarity of already 2-normed vectors is simply the dot + # product of these vectors (which is just an element-wise multiplication reduced by a sum). + # We vectorize the computation for efficiency. + sim_matrix = torch.zeros(speakers_per_batch, utterances_per_speaker, + speakers_per_batch).to(self.loss_device) + mask_matrix = 1 - np.eye(speakers_per_batch, dtype=np.int) + for j in range(speakers_per_batch): + mask = np.where(mask_matrix[j])[0] + sim_matrix[mask, :, j] = (embeds[mask] * centroids_incl[j]).sum(dim=2) + sim_matrix[j, :, j] = (embeds[j] * centroids_excl[j]).sum(dim=1) + + ## Even more vectorized version (slower maybe because of transpose) + # sim_matrix2 = torch.zeros(speakers_per_batch, speakers_per_batch, utterances_per_speaker + # ).to(self.loss_device) + # eye = np.eye(speakers_per_batch, dtype=np.int) + # mask = np.where(1 - eye) + # sim_matrix2[mask] = (embeds[mask[0]] * centroids_incl[mask[1]]).sum(dim=2) + # mask = np.where(eye) + # sim_matrix2[mask] = (embeds * centroids_excl).sum(dim=2) + # sim_matrix2 = sim_matrix2.transpose(1, 2) + + sim_matrix = sim_matrix * self.similarity_weight + self.similarity_bias + return sim_matrix + + def loss(self, embeds): + """ + Computes the softmax loss according the section 2.1 of GE2E. + + :param embeds: the embeddings as a tensor of shape (speakers_per_batch, + utterances_per_speaker, embedding_size) + :return: the loss and the EER for this batch of embeddings. + """ + speakers_per_batch, utterances_per_speaker = embeds.shape[:2] + + # Loss + sim_matrix = self.similarity_matrix(embeds) + sim_matrix = sim_matrix.reshape((speakers_per_batch * utterances_per_speaker, + speakers_per_batch)) + ground_truth = np.repeat(np.arange(speakers_per_batch), utterances_per_speaker) + target = torch.from_numpy(ground_truth).long().to(self.loss_device) + loss = self.loss_fn(sim_matrix, target) + + # EER (not backpropagated) + with torch.no_grad(): + inv_argmax = lambda i: np.eye(1, speakers_per_batch, i, dtype=np.int)[0] + labels = np.array([inv_argmax(i) for i in ground_truth]) + preds = sim_matrix.detach().cpu().numpy() + + # Snippet from https://yangcha.github.io/EER-ROC/ + fpr, tpr, thresholds = roc_curve(labels.flatten(), preds.flatten()) + eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) + + return loss, eer diff --git a/encoder/params_data.py b/encoder/params_data.py new file mode 100644 index 0000000..bdb1716 --- /dev/null +++ b/encoder/params_data.py @@ -0,0 +1,29 @@ + +## Mel-filterbank +mel_window_length = 25 # In milliseconds +mel_window_step = 10 # In milliseconds +mel_n_channels = 40 + + +## Audio +sampling_rate = 16000 +# Number of spectrogram frames in a partial utterance +partials_n_frames = 160 # 1600 ms +# Number of spectrogram frames at inference +inference_n_frames = 80 # 800 ms + + +## Voice Activation Detection +# Window size of the VAD. Must be either 10, 20 or 30 milliseconds. +# This sets the granularity of the VAD. Should not need to be changed. +vad_window_length = 30 # In milliseconds +# Number of frames to average together when performing the moving average smoothing. +# The larger this value, the larger the VAD variations must be to not get smoothed out. +vad_moving_average_width = 8 +# Maximum number of consecutive silent frames a segment can have. +vad_max_silence_length = 6 + + +## Audio volume normalization +audio_norm_target_dBFS = -30 + diff --git a/encoder/params_model.py b/encoder/params_model.py new file mode 100644 index 0000000..3e35647 --- /dev/null +++ b/encoder/params_model.py @@ -0,0 +1,11 @@ + +## Model parameters +model_hidden_size = 256 +model_embedding_size = 256 +model_num_layers = 3 + + +## Training parameters +learning_rate_init = 1e-4 +speakers_per_batch = 64 +utterances_per_speaker = 10 diff --git a/encoder/preprocess.py b/encoder/preprocess.py new file mode 100644 index 0000000..551a8b2 --- /dev/null +++ b/encoder/preprocess.py @@ -0,0 +1,175 @@ +from multiprocess.pool import ThreadPool +from encoder.params_data import * +from encoder.config import librispeech_datasets, anglophone_nationalites +from datetime import datetime +from encoder import audio +from pathlib import Path +from tqdm import tqdm +import numpy as np + + +class DatasetLog: + """ + Registers metadata about the dataset in a text file. + """ + def __init__(self, root, name): + self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w") + self.sample_data = dict() + + start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) + self.write_line("Creating dataset %s on %s" % (name, start_time)) + self.write_line("-----") + self._log_params() + + def _log_params(self): + from encoder import params_data + self.write_line("Parameter values:") + for param_name in (p for p in dir(params_data) if not p.startswith("__")): + value = getattr(params_data, param_name) + self.write_line("\t%s: %s" % (param_name, value)) + self.write_line("-----") + + def write_line(self, line): + self.text_file.write("%s\n" % line) + + def add_sample(self, **kwargs): + for param_name, value in kwargs.items(): + if not param_name in self.sample_data: + self.sample_data[param_name] = [] + self.sample_data[param_name].append(value) + + def finalize(self): + self.write_line("Statistics:") + for param_name, values in self.sample_data.items(): + self.write_line("\t%s:" % param_name) + self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values))) + self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values))) + self.write_line("-----") + end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) + self.write_line("Finished on %s" % end_time) + self.text_file.close() + + +def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog): + dataset_root = datasets_root.joinpath(dataset_name) + if not dataset_root.exists(): + print("Couldn\'t find %s, skipping this dataset." % dataset_root) + return None, None + return dataset_root, DatasetLog(out_dir, dataset_name) + + +def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, extension, + skip_existing, logger): + print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) + + # Function to preprocess utterances for one speaker + def preprocess_speaker(speaker_dir: Path): + # Give a name to the speaker that includes its dataset + speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) + + # Create an output directory with that name, as well as a txt file containing a + # reference to each source file. + speaker_out_dir = out_dir.joinpath(speaker_name) + speaker_out_dir.mkdir(exist_ok=True) + sources_fpath = speaker_out_dir.joinpath("_sources.txt") + + # There's a possibility that the preprocessing was interrupted earlier, check if + # there already is a sources file. + if sources_fpath.exists(): + try: + with sources_fpath.open("r") as sources_file: + existing_fnames = {line.split(",")[0] for line in sources_file} + except: + existing_fnames = {} + else: + existing_fnames = {} + + # Gather all audio files for that speaker recursively + sources_file = sources_fpath.open("a" if skip_existing else "w") + for in_fpath in speaker_dir.glob("**/*.%s" % extension): + # Check if the target output file already exists + out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) + out_fname = out_fname.replace(".%s" % extension, ".npy") + if skip_existing and out_fname in existing_fnames: + continue + + # Load and preprocess the waveform + wav = audio.preprocess_wav(in_fpath) + if len(wav) == 0: + continue + + # Create the mel spectrogram, discard those that are too short + frames = audio.wav_to_mel_spectrogram(wav) + if len(frames) < partials_n_frames: + continue + + out_fpath = speaker_out_dir.joinpath(out_fname) + np.save(out_fpath, frames) + logger.add_sample(duration=len(wav) / sampling_rate) + sources_file.write("%s,%s\n" % (out_fname, in_fpath)) + + sources_file.close() + + # Process the utterances for each speaker + with ThreadPool(8) as pool: + list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs), + unit="speakers")) + logger.finalize() + print("Done preprocessing %s.\n" % dataset_name) + + +def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False): + for dataset_name in librispeech_datasets["train"]["other"]: + # Initialize the preprocessing + dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) + if not dataset_root: + return + + # Preprocess all speakers + speaker_dirs = list(dataset_root.glob("*")) + _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "flac", + skip_existing, logger) + + +def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False): + # Initialize the preprocessing + dataset_name = "VoxCeleb1" + dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) + if not dataset_root: + return + + # Get the contents of the meta file + with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile: + metadata = [line.split("\t") for line in metafile][1:] + + # Select the ID and the nationality, filter out non-anglophone speakers + nationalities = {line[0]: line[3] for line in metadata} + keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if + nationality.lower() in anglophone_nationalites] + print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." % + (len(keep_speaker_ids), len(nationalities))) + + # Get the speaker directories for anglophone speakers only + speaker_dirs = dataset_root.joinpath("wav").glob("*") + speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if + speaker_dir.name in keep_speaker_ids] + print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." % + (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs))) + + # Preprocess all speakers + _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav", + skip_existing, logger) + + +def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False): + # Initialize the preprocessing + dataset_name = "VoxCeleb2" + dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) + if not dataset_root: + return + + # Get the speaker directories + # Preprocess all speakers + speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*")) + _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "m4a", + skip_existing, logger) diff --git a/encoder/train.py b/encoder/train.py new file mode 100644 index 0000000..619952e --- /dev/null +++ b/encoder/train.py @@ -0,0 +1,123 @@ +from encoder.visualizations import Visualizations +from encoder.data_objects import SpeakerVerificationDataLoader, SpeakerVerificationDataset +from encoder.params_model import * +from encoder.model import SpeakerEncoder +from utils.profiler import Profiler +from pathlib import Path +import torch + +def sync(device: torch.device): + # For correct profiling (cuda operations are async) + if device.type == "cuda": + torch.cuda.synchronize(device) + + +def train(run_id: str, clean_data_root: Path, models_dir: Path, umap_every: int, save_every: int, + backup_every: int, vis_every: int, force_restart: bool, visdom_server: str, + no_visdom: bool): + # Create a dataset and a dataloader + dataset = SpeakerVerificationDataset(clean_data_root) + loader = SpeakerVerificationDataLoader( + dataset, + speakers_per_batch, + utterances_per_speaker, + num_workers=8, + ) + + # Setup the device on which to run the forward pass and the loss. These can be different, + # because the forward pass is faster on the GPU whereas the loss is often (depending on your + # hyperparameters) faster on the CPU. + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + # FIXME: currently, the gradient is None if loss_device is cuda + loss_device = torch.device("cpu") + + # Create the model and the optimizer + model = SpeakerEncoder(device, loss_device) + optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init) + init_step = 1 + + # Configure file path for the model + state_fpath = models_dir.joinpath(run_id + ".pt") + backup_dir = models_dir.joinpath(run_id + "_backups") + + # Load any existing model + if not force_restart: + if state_fpath.exists(): + print("Found existing model \"%s\", loading it and resuming training." % run_id) + checkpoint = torch.load(state_fpath) + init_step = checkpoint["step"] + model.load_state_dict(checkpoint["model_state"]) + optimizer.load_state_dict(checkpoint["optimizer_state"]) + optimizer.param_groups[0]["lr"] = learning_rate_init + else: + print("No model \"%s\" found, starting training from scratch." % run_id) + else: + print("Starting the training from scratch.") + model.train() + + # Initialize the visualization environment + vis = Visualizations(run_id, vis_every, server=visdom_server, disabled=no_visdom) + vis.log_dataset(dataset) + vis.log_params() + device_name = str(torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU") + vis.log_implementation({"Device": device_name}) + + # Training loop + profiler = Profiler(summarize_every=10, disabled=False) + for step, speaker_batch in enumerate(loader, init_step): + profiler.tick("Blocking, waiting for batch (threaded)") + + # Forward pass + inputs = torch.from_numpy(speaker_batch.data).to(device) + sync(device) + profiler.tick("Data to %s" % device) + embeds = model(inputs) + sync(device) + profiler.tick("Forward pass") + embeds_loss = embeds.view((speakers_per_batch, utterances_per_speaker, -1)).to(loss_device) + loss, eer = model.loss(embeds_loss) + sync(loss_device) + profiler.tick("Loss") + + # Backward pass + model.zero_grad() + loss.backward() + profiler.tick("Backward pass") + model.do_gradient_ops() + optimizer.step() + profiler.tick("Parameter update") + + # Update visualizations + # learning_rate = optimizer.param_groups[0]["lr"] + vis.update(loss.item(), eer, step) + + # Draw projections and save them to the backup folder + if umap_every != 0 and step % umap_every == 0: + print("Drawing and saving projections (step %d)" % step) + backup_dir.mkdir(exist_ok=True) + projection_fpath = backup_dir.joinpath("%s_umap_%06d.png" % (run_id, step)) + embeds = embeds.detach().cpu().numpy() + vis.draw_projections(embeds, utterances_per_speaker, step, projection_fpath) + vis.save() + + # Overwrite the latest version of the model + if save_every != 0 and step % save_every == 0: + print("Saving the model (step %d)" % step) + torch.save({ + "step": step + 1, + "model_state": model.state_dict(), + "optimizer_state": optimizer.state_dict(), + }, state_fpath) + + # Make a backup + if backup_every != 0 and step % backup_every == 0: + print("Making a backup (step %d)" % step) + backup_dir.mkdir(exist_ok=True) + backup_fpath = backup_dir.joinpath("%s_bak_%06d.pt" % (run_id, step)) + torch.save({ + "step": step + 1, + "model_state": model.state_dict(), + "optimizer_state": optimizer.state_dict(), + }, backup_fpath) + + profiler.tick("Extras (visualizations, saving)") diff --git a/encoder/visualizations.py b/encoder/visualizations.py new file mode 100644 index 0000000..980c74f --- /dev/null +++ b/encoder/visualizations.py @@ -0,0 +1,178 @@ +from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset +from datetime import datetime +from time import perf_counter as timer +import matplotlib.pyplot as plt +import numpy as np +# import webbrowser +import visdom +import umap + +colormap = np.array([ + [76, 255, 0], + [0, 127, 70], + [255, 0, 0], + [255, 217, 38], + [0, 135, 255], + [165, 0, 165], + [255, 167, 255], + [0, 255, 255], + [255, 96, 38], + [142, 76, 0], + [33, 0, 127], + [0, 0, 0], + [183, 183, 183], +], dtype=np.float) / 255 + + +class Visualizations: + def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False): + # Tracking data + self.last_update_timestamp = timer() + self.update_every = update_every + self.step_times = [] + self.losses = [] + self.eers = [] + print("Updating the visualizations every %d steps." % update_every) + + # If visdom is disabled TODO: use a better paradigm for that + self.disabled = disabled + if self.disabled: + return + + # Set the environment name + now = str(datetime.now().strftime("%d-%m %Hh%M")) + if env_name is None: + self.env_name = now + else: + self.env_name = "%s (%s)" % (env_name, now) + + # Connect to visdom and open the corresponding window in the browser + try: + self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True) + except ConnectionError: + raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to " + "start it.") + # webbrowser.open("http://localhost:8097/env/" + self.env_name) + + # Create the windows + self.loss_win = None + self.eer_win = None + # self.lr_win = None + self.implementation_win = None + self.projection_win = None + self.implementation_string = "" + + def log_params(self): + if self.disabled: + return + from encoder import params_data + from encoder import params_model + param_string = "Model parameters:
" + for param_name in (p for p in dir(params_model) if not p.startswith("__")): + value = getattr(params_model, param_name) + param_string += "\t%s: %s
" % (param_name, value) + param_string += "Data parameters:
" + for param_name in (p for p in dir(params_data) if not p.startswith("__")): + value = getattr(params_data, param_name) + param_string += "\t%s: %s
" % (param_name, value) + self.vis.text(param_string, opts={"title": "Parameters"}) + + def log_dataset(self, dataset: SpeakerVerificationDataset): + if self.disabled: + return + dataset_string = "" + dataset_string += "Speakers: %s\n" % len(dataset.speakers) + dataset_string += "\n" + dataset.get_logs() + dataset_string = dataset_string.replace("\n", "
") + self.vis.text(dataset_string, opts={"title": "Dataset"}) + + def log_implementation(self, params): + if self.disabled: + return + implementation_string = "" + for param, value in params.items(): + implementation_string += "%s: %s\n" % (param, value) + implementation_string = implementation_string.replace("\n", "
") + self.implementation_string = implementation_string + self.implementation_win = self.vis.text( + implementation_string, + opts={"title": "Training implementation"} + ) + + def update(self, loss, eer, step): + # Update the tracking data + now = timer() + self.step_times.append(1000 * (now - self.last_update_timestamp)) + self.last_update_timestamp = now + self.losses.append(loss) + self.eers.append(eer) + print(".", end="") + + # Update the plots every steps + if step % self.update_every != 0: + return + time_string = "Step time: mean: %5dms std: %5dms" % \ + (int(np.mean(self.step_times)), int(np.std(self.step_times))) + print("\nStep %6d Loss: %.4f EER: %.4f %s" % + (step, np.mean(self.losses), np.mean(self.eers), time_string)) + if not self.disabled: + self.loss_win = self.vis.line( + [np.mean(self.losses)], + [step], + win=self.loss_win, + update="append" if self.loss_win else None, + opts=dict( + legend=["Avg. loss"], + xlabel="Step", + ylabel="Loss", + title="Loss", + ) + ) + self.eer_win = self.vis.line( + [np.mean(self.eers)], + [step], + win=self.eer_win, + update="append" if self.eer_win else None, + opts=dict( + legend=["Avg. EER"], + xlabel="Step", + ylabel="EER", + title="Equal error rate" + ) + ) + if self.implementation_win is not None: + self.vis.text( + self.implementation_string + ("%s" % time_string), + win=self.implementation_win, + opts={"title": "Training implementation"}, + ) + + # Reset the tracking + self.losses.clear() + self.eers.clear() + self.step_times.clear() + + def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None, + max_speakers=10): + max_speakers = min(max_speakers, len(colormap)) + embeds = embeds[:max_speakers * utterances_per_speaker] + + n_speakers = len(embeds) // utterances_per_speaker + ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker) + colors = [colormap[i] for i in ground_truth] + + reducer = umap.UMAP() + projected = reducer.fit_transform(embeds) + plt.scatter(projected[:, 0], projected[:, 1], c=colors) + plt.gca().set_aspect("equal", "datalim") + plt.title("UMAP projection (step %d)" % step) + if not self.disabled: + self.projection_win = self.vis.matplot(plt, win=self.projection_win) + if out_fpath is not None: + plt.savefig(out_fpath) + plt.clf() + + def save(self): + if not self.disabled: + self.vis.save([self.env_name]) + \ No newline at end of file diff --git a/encoder_preprocess.py b/encoder_preprocess.py new file mode 100644 index 0000000..1150201 --- /dev/null +++ b/encoder_preprocess.py @@ -0,0 +1,70 @@ +from encoder.preprocess import preprocess_librispeech, preprocess_voxceleb1, preprocess_voxceleb2 +from utils.argutils import print_args +from pathlib import Path +import argparse + +if __name__ == "__main__": + class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): + pass + + parser = argparse.ArgumentParser( + description="Preprocesses audio files from datasets, encodes them as mel spectrograms and " + "writes them to the disk. This will allow you to train the encoder. The " + "datasets required are at least one of VoxCeleb1, VoxCeleb2 and LibriSpeech. " + "Ideally, you should have all three. You should extract them as they are " + "after having downloaded them and put them in a same directory, e.g.:\n" + "-[datasets_root]\n" + " -LibriSpeech\n" + " -train-other-500\n" + " -VoxCeleb1\n" + " -wav\n" + " -vox1_meta.csv\n" + " -VoxCeleb2\n" + " -dev", + formatter_class=MyFormatter + ) + parser.add_argument("datasets_root", type=Path, help=\ + "Path to the directory containing your LibriSpeech/TTS and VoxCeleb datasets.") + parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ + "Path to the output directory that will contain the mel spectrograms. If left out, " + "defaults to /SV2TTS/encoder/") + parser.add_argument("-d", "--datasets", type=str, + default="librispeech_other,voxceleb1,voxceleb2", help=\ + "Comma-separated list of the name of the datasets you want to preprocess. Only the train " + "set of these datasets will be used. Possible names: librispeech_other, voxceleb1, " + "voxceleb2.") + parser.add_argument("-s", "--skip_existing", action="store_true", help=\ + "Whether to skip existing output files with the same name. Useful if this script was " + "interrupted.") + parser.add_argument("--no_trim", action="store_true", help=\ + "Preprocess audio without trimming silences (not recommended).") + args = parser.parse_args() + + # Verify webrtcvad is available + if not args.no_trim: + try: + import webrtcvad + except: + raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " + "noise removal and is recommended. Please install and try again. If installation fails, " + "use --no_trim to disable this error message.") + del args.no_trim + + # Process the arguments + args.datasets = args.datasets.split(",") + if not hasattr(args, "out_dir"): + args.out_dir = args.datasets_root.joinpath("SV2TTS", "encoder") + assert args.datasets_root.exists() + args.out_dir.mkdir(exist_ok=True, parents=True) + + # Preprocess the datasets + print_args(args, parser) + preprocess_func = { + "librispeech_other": preprocess_librispeech, + "voxceleb1": preprocess_voxceleb1, + "voxceleb2": preprocess_voxceleb2, + } + args = vars(args) + for dataset in args.pop("datasets"): + print("Preprocessing %s" % dataset) + preprocess_func[dataset](**args) diff --git a/encoder_train.py b/encoder_train.py new file mode 100644 index 0000000..b8740a8 --- /dev/null +++ b/encoder_train.py @@ -0,0 +1,47 @@ +from utils.argutils import print_args +from encoder.train import train +from pathlib import Path +import argparse + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Trains the speaker encoder. You must have run encoder_preprocess.py first.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument("run_id", type=str, help= \ + "Name for this model instance. If a model state from the same run ID was previously " + "saved, the training will restart from there. Pass -f to overwrite saved states and " + "restart from scratch.") + parser.add_argument("clean_data_root", type=Path, help= \ + "Path to the output directory of encoder_preprocess.py. If you left the default " + "output directory when preprocessing, it should be /SV2TTS/encoder/.") + parser.add_argument("-m", "--models_dir", type=Path, default="encoder/saved_models/", help=\ + "Path to the output directory that will contain the saved model weights, as well as " + "backups of those weights and plots generated during training.") + parser.add_argument("-v", "--vis_every", type=int, default=10, help= \ + "Number of steps between updates of the loss and the plots.") + parser.add_argument("-u", "--umap_every", type=int, default=100, help= \ + "Number of steps between updates of the umap projection. Set to 0 to never update the " + "projections.") + parser.add_argument("-s", "--save_every", type=int, default=500, help= \ + "Number of steps between updates of the model on the disk. Set to 0 to never save the " + "model.") + parser.add_argument("-b", "--backup_every", type=int, default=7500, help= \ + "Number of steps between backups of the model. Set to 0 to never make backups of the " + "model.") + parser.add_argument("-f", "--force_restart", action="store_true", help= \ + "Do not load any saved model.") + parser.add_argument("--visdom_server", type=str, default="http://localhost") + parser.add_argument("--no_visdom", action="store_true", help= \ + "Disable visdom.") + args = parser.parse_args() + + # Process the arguments + args.models_dir.mkdir(exist_ok=True) + + # Run the training + print_args(args, parser) + train(**vars(args)) + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..2c15ccb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +umap-learn +visdom +librosa>=0.8.0 +matplotlib>=3.3.0 +numpy==1.19.3; platform_system == "Windows" +numpy==1.19.4; platform_system != "Windows" +scipy>=1.0.0 +tqdm +sounddevice +SoundFile +Unidecode +inflect +PyQt5 +multiprocess +numba +webrtcvad; platform_system != "Windows" +pypinyin \ No newline at end of file diff --git a/samples/1320_00000.mp3 b/samples/1320_00000.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f0791b04239a75e1eed175403d78fa503772bccf GIT binary patch literal 15453 zcmZwOWmFqaxG?YpCrFV10Sd+4-HN*vcXuz+qAl)LT#HL_cPQ?~-K~YTxD<--ZT|P% z_tU!{ve`|}`Ry}1XLjeA83k!>BoKU~*3#6J`L}-o0wF4!d)n{{u<`J+adL3{_v`$~?}ejfvaLnET%;uBKRGPCpY3rou?t7{vY+B!OW`UZzbzfI20 zFRiR?ZhzlDI6OYPxcYnh_zcJYU7Y2=ixcF8AN=1&gD~^IUqv|df}aik@ASVta3wGZ z!gF(XqLqb#IMhHOoNpqT9CaX25E2zd5SRTn@Q2+4`0M56h8FdRhJtcV0Sa~nA-#Fp z+}zv%HlJQ%zCsqkRMm0&t;y*`PP8~bnOl?aAmfohaFK=DH6~|g zu~PL;KieHqCk(d?3bmWmOwH%C*BmhUeC?UH>9JH^GEO`pL)HO&Q;ew{XwRVKy$FbX zcj|BpLc|mu=V>0=adB07GOVVtDpROZKa(2^d*z9&f1j zZPzYNW#J^sgnL73+`GrBsw`-j9P4wY(lfBqJ)Cw%y<0OVyWykwI;luI7oD@683ani z!XC7TTbM+|HLlM-;W^$ubVaW}RHa?wF>F#V=Bc~VtTK&9%TnAd;03Sw{F)#;eBaBH zJ{-5@!8d{}RJ-3hB9=ot(Tx)>8C6B)$=H23^%i(AHSyBF(IiU`69W_p%Os2vLR(k@ zQ$xVBtpAlEzI?wPbN~}Af^%^){5pcj9-`7i^PbmnbK3TEiv9R2^}iU{ygp%qp{-GC z`hC(ULlt0BZ>`wbwzMA@WYaD2e2S>$0C261Tn;)fMWV{m(>i^r;&EPKqcB4uFf1J! zYy2`gPpuhly@E!L>p|yawHcG2mz;eV`N0x&(PoCUt5a-!)F@Q@dOye{x zB`h*k_f9Ye7j7{_e>)fvyuks%gj#+ASAZdBMH1y_{TKO{m_cf6PGcS6_>h7shY)Vo zAp;JikA+7v2uLrTHI_u`#2qPi+(HJnGB~0Fe_S4w2TFk}2uN{$SAM%edWp|j$0=L# z$1Iq_l+x0IAE6j-@liFnd`!*G0D&a~&9TU!NdAW0Y~WByyqM2#b;2_3GusK&)hqz2d+Y)}ZFo5R3)2gT6 zpR9JngBPb7s8=dT69mIg|4iN|vOE8g|K+Nar5NxW5CHTPPyWsP3@7ls$$A4@* zBQAnMKTe5@^?6dg{$I<%ch%D;#zHn0$U7x8PL6yZszuFh)2U0mT`krOG+!CCS*lqn zs;Xlk3?)M#PV&ZM(vK;jbKSm-kP>X}e7y%YLzRa=UG<}v>N5L1F2H^5*T@J`aWP=q zHvxw2&pc7`dJRn;Zeb8oA#_0p#*ZUjRdDD;Tq7J?rydq^+}NJQ3ue8Q4tb^0@z*$j zsY7%YjX>%T3ztUXp#F<&bhLm_Q@Pn5?TO5UVTx|rW#GZC<30EHt-Vj$?-!|gDuNKm z6h?VuBDox)<3H5j4g|q1Txi|ps;A>%LkR^9*PR7r*4l52yheZ}ASFBJhe1?&twH#Q zwZI5SSDoU~X{iFclZGBedMXwn?$K!DIRt~A5q@he-I1TV%RD~ z+*wEbg75|!5N$piWZi+=vW`^n<{UTw{p;p{RiIQO0itAJ3HeUnOJLOXC2}#Fl2(W} zQl_d-=S3W6lH=xJq(soC^08yM#Q-f^TFv#tEWOj;{IsS5=HMbiN9@ZltayQ{8s6Oz zSRJVE?lJBhS_Z9sO06aOV1jkjc8)m#P-hpY|vV^IcDdOmfeR2Jzh(f{d8>|#v z*M+D*ti*O>?j>9!#(fy8O?#;x4Yzop_VUSc%|=UW2oAipX`^LZ%*bS`(+!2 z=QW5MpOu^p-Q^bW8ctYu0y@Jns#}PtOYQNQOSCbOWMSMdF$a4G>yL+SEytz?4i`)Q zq$*YP=yT1^cugEIShC`uJZJiRR8xan640o_*>3N_+!Q1*$x(kmuAW_8N@I6V8n!kl z*)W`d|7K{Np?d4=nO~4S};>x~f_eZ#iz5jDH$> zJ=?!nKa|?!G6e$od*s&E+u4nMQUeHA;g$*kLxs1=0H%7w_fKo=_4jK+s&X&{r%sX# zqON*|)MhP*W5!=2DdVFQL*?B2r_Y58Dz}=j_$Orp#Ww$f=wI1MN#Wev5obRG03VKXI z4WF}DyF{aS?F9h0)X7tEJGmsVk8r*tIcSKMBDpJoSxrEw$6RDtW832Hb zx_1aZA5rUW#4@qISD~Xy^oO7D@<#E>UnTL8jRHGiSV=x3cp!5UHiXK%ujl8fC(rA5 zwSUW8w;dwH{;m#g{rcDn%)4=1d}-a?1|ouy@W1(eiAq=v=$yH(}A;KS+-AE%VqzCkDS|`#eJ# zDI){l50>z~+DlP4B4XM2Iw=tG$5YdQr72%X;P77lfA0WS=Z}&dTTL1+PU@mP@vg&TPCzez9wGvAbLj{Emh@3E;2yFwohYj*5?=7dH`(y zsSRQJ@lZx|gN&lxp4!<1JjuxY%yJc!0Rx{A_o5{|{07)r5(VKFHq1O}Mf;66sNtxE zL1V^NACXI_%`y8F)97!LpFu;vE_2K!IbAuFqdh%5qXN24Lr2OX(OM3S2RRP;g|5+r`DGC`3H-uT2$~HisUOmzuAuh6mm1pNlgV2Ns#&Pe!;R_n!w@W zprt7Rr|X9~5v|F;_muP4v#*|0Pou7D|NdpfJ@9zsp-eiv>%E(`h$hLX*Cnr*-O%=x*Rfd`#!}E00RzvY4h9mXbLm`wHxSAO2sA9 zX&8tQHAO~G5)KEM9@{m z!{-6a;xcSuX=*JEeq=Jxfr3c^`p zpo9;%g6^h3ldCAKSv%;Z&FEsKP<$}T5a83x)XVax4s33HC5MAd^wTVv`zk5{UWQaq zN|z;y0ZixN$9Q11@%|#GLUSXz1j9nyeq*c}b4(v|KCRHcU7=Lmt{yfztHvmWI-C~m zx2Q&ylB^?wfd9b;=QC_!sX=i_G2YHef*KZ~5%~NTqr6ADn5WhUg@U>j7T^e3JQ{r-;L!+APRXr2aG7{4yO$GXiWEmM|1*`OHPtKKQlNCeXT`mU2o?&g!?}`d(^v(?Eg-4rO{$1XUq4R6 zEitIu%~1RR63@3^w#6le6EGbtlNvotB>)hQ77AqVCw42u&KJXwq1SVN@HAf((;koG zDQnSK-Te5);kjF?vpr=*qaJTaseyPEq7f>iG#I8I!}|V9g!*8d4A2QI8Z_CI3{`2) zz%6Ac=V^kr9s|g8dI~7Siz29S3IvU0a{pLB8d20dfcW4ah=xF12GU9S-K-4la1b%u zzuCMgk3gf{*S|t=u|5Xanf`tVj||EfpBSn2a@eqU3Sv2^(r5FZ^j$Rge43tD`<@l( zzl2)`r2UY<#VmQylic!DPSY3@%}m)=8|IQXn1M|YA}-E=RnCHqFJ8RLnvZVLDq&Hb z26~y}>p1jF-)gWjJTEl8!E(nTCTtMp(I^BdxW8yrN&NAtHt4(69*e2R{y+)2C>jBc zmLwtEQhaskGHjY54nnYF2->50ohA@8#beZGiLCtlY3pxOXriB+=WAYpqOGI*=Ny0g z!h#k~X(g8!iK{OMvzNIh6!^4DJLS>LKRz&@IcoA`ef>7?%Q=KjqiJ}Y+&1*9&Vex> ze}UGDM;dN15=cDn=}rqE2>jZe&RGP2-_uT4o-b*Bg*aVY(SH-o^?Kdjh)IIil$@@= znh}lqej5NzB?aHtCuufWq!?#@dW@96(^la!DWEW-`ASJ+YKLqvHXPTgPUX3xYKkoU|h<)vhHx0;XqC!Un<)elnkMoEI&dw;x4;NMvO6aez zupp!3!iZX0k>HjnfttbBZ9Nz$1`iULRn$-M{$>4{O}_n+kU^c+G4H;EL`XuDjmQw1 zG@)BGXF#ZUs|RS;x!qsN3De@2?423M@HtCav>=P0h z5@Dt?D#enZ^aLB+lB1_4%m8QFfwI^6HEe!S48YzD)_LMbMAbeIM@`~;y##w9chP{t zqE>`aFuelqCnsl=Hx27|Erf&YkevvE$~p4P{eMhA*h#DmIp zLvQ%9#~80KU6NERZQ&LaYIeC8PC5s5-)h3iM{SW*e;!r1)vp5j=(2pu|l-HYmGnC)wbZHMx37e!10G_9qIzY(1f#lez_q|e;mBfSGkFgoc zQmTWmE)g<|{Ly@i)#cz8CDi_Ojn8NBn1{=~?qOfbyNnKwnT_Hwk~78|q!;ZVnCh>oTq72S~Pmiq78i#y*aWo{e)?W2?V%^LM< zEFx0Rk${ID;2l#{H`7LwWz7atQkrL`J^fIXKKbC6ETQm*8EmDk8Av*qJbz3vN*i;c zf;PbBcDv%KC-X6{fFyw%6K)AX+f8O}22fo1`EMi3XPFnU5M|V8kpnD&L)$?Mp?rcN=br+9i@9yW$iMcI0@pBbh?8C<~a#DAnyFevPg= zCGE~{{A;yrEHiKb=^*F+>OHt6`|o{z$;1~Kq8?@aS=|CpY{S(~ou>TR@>Ygr3|T32 z8-1j&SGLiN_-IoY50lil3arL!n;iJ1C1SZyvlJ%^mF3+#nu44|&Th@=hlf;_ zl`Zbln-M%T7r=6`8t;-Ijo zy>9UkL%&Yz zMMWr`zZZm&=8J|ZOMm&H+XY;N z+|7-!LnmzX*@MfqEpR_AG6<1wbkpF!oSrppv_^GmRtCt-h=+%bvnct8ePV&ucJ2Ov z^o+%a-6FwxYD=!Z#EO7`g{k=aLw@BwMce1YXF?>DiV)bG8i>}nUx2t(qoGqPV|~1E z>8(MW`AXr3WHRk0p=HeXRcM5<%jj45O#k&^S)r?vtESW5pgP!08%>{t>_YKk7-93L z=j%tYix>mFrS;w&Ge<%X_O&mCGznh`qK2emx}Aspz!t9U=lss*DUQ$6j<2LHJR{@# z`xrP62p%`4BOGX8`^YbHWDa$=*}09&%;RGZ;AJQP4I01a^O5bgUKGhZ%xkcQaD-(j zB+1PHn}8jh_;n$ZBBv0z8jE0}+IsD-;4V zQiIWm2Zf$~Cu=;~@SE>7w;CIJ?QY+AX6M)Q01HW`gIfl~3I}s0zREeuxm&B;)9*aa z=;anEkZk&g&uzr(w!6p+%;rB`#ugci%zKiMOVo7FJax=0qQyySt|e7HTN%;nxpf>h zJFLue<1I3mwe4H+yRTO_(ie^Kh-$); z)kp}sYMg?gI4wga$cO1l)stuU2349~A8gl|LtMwXmBBoI^r<9sp?5P>dUaBRWDqjY~d2jav$CX+Q;n*Gw?x2s>$A zc&?qg(c8B6x>X9BF(X)KeMsp@7ZeZiJR=*0F&JUx)y*G`X)LP-UnuLHO9c1#p$VeX zmr^_@Uk_V}$c#jiQ@EakZgB_@+F9yL_j{FRR6q-Qn&eT{IkZu&aLWuYQFO7*M|nP~ zp5^ZDzO#@@d{8~Sd%|*Ih%==lRUfmdsw!S%6LbIV4X2)3IE*}`64q}1o&0Fw+t(Zj zveMGq*|t%n0mcF-SVXW1buNRbtYly?vNON1(*qXfr#~?{olJGP$_KaJ5pa4g_9!zT zNGwPU=tTW8kN4xO5+l>5Fw<( z%PVmu<;BlIaS&1*{6gB`NqfeMTY6(7mtIAu;f_k>l}S?GR_^0XrmvG*0gKZM^sldEJNS<464*AFv?Al6TYMb5;)59jLAYP`7k)~A|1SqC0i8K* zubq$wDg501<0JNv`C@C@{ytHC@>RrQg+YOXLN5^=ts%*Ksxu{$7AEU2&0nRfo37|S zOQwF{rvZ`O#Wjg}asLzx$<3 zz$m(yrid~p>cgz8qPUiSX(Y;jDh|eLjKU!ssM453D8aOfiui=C9_S7Os~zKM)%-Hd zzCz-N{H85GF-3Ixb7?+DLU32EJ(!)9mzai3SbzPKUkB{OR^^Tq%2 zHaY~1OtYSr!+5@kVKC5P6`twOtFIc z-}sjqA-LnK{{YgA933bp+d)E&UDq;J_`9_$bI_bXe6TnD(!%(# zurhK&_a30k!9>p%*!A;o=DAGOLwq;~^|E{;O))x{eHJKcLC&YHA(TpU?wuVb*`g=E zddi)jgj*i?>U1kx_!D@pdW~aJ2-G_2yTRaJ=LoqsoF%GLX8}*spPsh^;$GAh`gnnE zj4lyKY{iyR1Cc?*A|TSs;FIA45+#)o6mJA?)lm@+oF&dSgOiV}vNHc_@nH4%cd0cA zJI0T{t5e}tC{(>+#sp)6VBliI=|bynJa6e7El8Zi^QRUH4hd=i0OYRTnydwA2X&}| zW;Tp^6RfoY8wBMUNcyNm3X=-BJ`CQS=6~I?ic?1-EU>7omN~3wMt%|S{4w6n+%{f+ zuI^a)ov>XB+)9M1t4@;p!gA;!!M!t)=g|tbKH6W^?3m8J0DvS~dCkB49N<@0X5dAE zMEOURo0&AGxc67xxU^i&=lcDb7KT`B!Q)XRvpq&t(MQoJtI^P4%s-A+{#$93EUt=l zAsD;2YkWJ|c5o{f`s=>x=NoYZqUfQ-pU^ROF{W z>HaS%QQF=z>xIIta@u*kVbfT7kgEaTZ7?)mazzjOrc`MUBpsHuf306BFr!9+Z=H2~ z#O2d%c6{UqRIY#rDq&UfJ7xADJO#ZfK4~ok7J=q`FzvEC28iPr2Yp?fe9V{;T`9Dp zq1mPUtBCK{q;oO2<%<7Hm-*E+4nkweo=2Vu)-Yvk$n)Br^%u+_TZ)^gkfA`Nt=WP( zOw=st@lPQ{$Fk@O5l4 znN%IPvr|hNDr9Y5XTz-ks1Np(j^zYSPrxtx4uVu#M!Wko%Yf$+RxJNiW=8#Imxj+N z##NF|ANL|fM(;aJvIxrfDWF=ecJx2aqav{p(`8!>V)wPftbb19pzkIG$3{=KlR8Pb z?(KBH#Ul}xq~LUZLtkD7r%eV;nrm1Xt}jL z<_n8@zlhA962*@&s4$B(2+``l>pr{UuRr4w&2s7AqGwG1(Q@s#ODewSh+H{3=={b z2|`k1i#1tDPbXyDrX-Zx-pvB6I7{TjD1$W(2@aYKDYSNQtvROmS0xR=2C{^ogW^*g z7;`jPnp{lYUKh^vt8%c!XW9LCzw!vgm&%^!WYzp$#yC_vn(oRVfXHVS0`Qsp^ge^cPl926OQ2ONof zd4UcNMHvxaezB5p*PBITbL-Rq69_y?6S`CMCQqqA9MCR+e?;;(@!jq zlkJGK?4S$D@|MRNtIlb@HVc9dcGXfD!L324!Z78T8{h~i1O9O02Efv}`q=tYN>_iMajR!(O?WAevbgm4MESos-mj$zVNqktLQn*F5 z=>(>mXTLYBXtDduoYMH^8rFv14&K!L*Z-QuN4MGR_LgCLA{jg))Tx3Fy0NM^Y`zxN z9pY4!ehb_Y$49b;7S-!jl>2@>TrP12cq=)mcRt-G_JYs_cDm4c_=o=dq0l*M>tTC^ zU7Mo6cVpzgYOuvtPsA-IWYU8(M3NJ9b^QSz*LSFd>MFVA#1BK+BiUgJHY+u>VA;gd zDLJ-P`4jhQj{xpJf~s;T#1>Vyz~8nqc0lH*Y$`FVledCxtOs%D4wei07V|zM-O{?_ zDs^Sd@e{VnhHml)XY*N(e{&WRHNBnvRep(f{g)%UfZZExiTWYs1~cpI_H-rJIy>(D zEO#6)oTZhR=O4O^%M^u3L#0g7Q(Ed~l zV@vRxM}UNoKaU3Kw&IWn_&C)kB5MOZuVy zTCsl^Mrl!*IEd$`I{Rze|2dM_Z$7M_dRd+93vkx2AF=#yJ-pg??< zIIWd)Yw5ZOki?G+>S9zJ)F6uUw7U__9?|sRgptnMdZp{tnx%Qn<%Z7o*Aq83 zOQ}cv&PDCczV8Q1MI`^EvcJB@C@0&tV50a!t-`aA?lv{?1s>N=V)Nk4+&S4Q$G_Rq zroS-VOR8uownsbLK)50VI|Ru!3GXjL=>sD)Ze8>}dj{M(>%J?ABdR&N@8p{eA%oT^ z#S58{k%<+p7bu^B-508P6J=%vD3?6Wchg=0?b9v^Wb}!ML_9=rYllojcFx3m6l0}zH#^v}3 z$>c8lMGeR}s4sM#s#_fu@eW#@Ix1V0nPOqE#9)=!ZWgkBFqjLb0Zql7p6hha3k85F zTk?biK9U@pIPIw~rrX+qdL8;+G9K#_L1;)A@^+Ka0DA;sOSYD z{`pT!+oVqlk{UARGcM!|dl%`5Ii|1~ZmDX$mx$*rWb|v&K^WkCus1#2a;NnW>f-ax zef*YS$F$Esa-YyznMn}$Mc}$@s_DV}Hda*^^M#Rf2F?Yz-`!b|rbUTZRUFr3%8_U7 z@1N>oZ53T(t!k60dv@k8I#j%2#TBKVs$Z82?H{NDfTZ``{Ww3laJOWcdEk~X*_G&_ ziFoEKCDtyM=aVN%gqsjmRHsG#~w zmquK&c&nD_eoPt`4h|KQz^q_;9)P7I$TesGAuH13`g-zn2sQ#Y1Z)+O_xvwA3Ebf| zv;KDJ)KXAG%3rERd>Q|yIWXvl&(e4Z+e<-vPUCn2+{Hr_UKBcBR%`9>x}_>-xWx`t zn9A(7kVOiZz8NRF(hIn@X(~2Sz{6u5p?@Yr$z^zV(~9=vOPYSqcm*D>k$wOU1kB`0 zCa&-kvrw+FPT}Bz5+ZAY60QHPR<*5*4+B*nG^6pwZ~ydno?rAX*#<$x%^-0*XclgX zz&JHZO~%QalBf}at@K(yyDq%l)#Tufh#KTf);CkLlTh0~=;HVi0Gv1m6e5rmmO~H} znm{2Ixy=$m$z>qDzgj$C<+=pB^C~T!kJ&a){zBfmpiyr#Dz@r8%}8a|7De`AV~2IP zB}Dc6G&5nIyqz~JzDIVgK#(^vlWZr+7c1hWlQ_fTC4XdohH_Z6B(kC>;ki41uM-ay zTZ*0GlH!|eX41b(t#i|KhJMJI6aKki7~ALYO%vPB*Y$6dWV%-O0|Eiy8B@PJs3m_yi(Ca3x)Hj`c(CvN>W|GmSTsuE zX%8n}sQ&qa86l$A>_1PmcT_b!QZ(%5fM zV&emSoaf*cgh)|i5S$?nmNrL-Rb|TlVEiYGl_JUxi5!F`!mx0~nIFxmGh3ORi`k;~ zUGXW5@CX~R?$*E(=y=|JB?^w=f8CE(fRojRh!>hD9%xC`lx1DrOS!I3oQ}WnN$mux!IfK7R1zKYn;e zYPg(1j-@vzu`(Z#Dr=1}GbX+e$(Qm~6-Ju_LN`G0nH7js-+WZOACTxc6o)xo4!)>4 zryb;8V6~003V~a=(DLCqY~R5-?)M=C90($gM1vpH$#8NFo3LAJMRo#bJhZ+fW@tL=sfcK>hnU zeV@dYO>ss2rvLh1WYDt9E}a2PCy5H?KZV0X9!aN+?8h-L^_1|yj+46Y{1j6IOKl

|NP_E*PtZ|nSuYI7NUWm4o<~XCw7pOi*w4787iC)r2Af?u7N3ZZ_8lwK zeT!a`-F5mI=)#_tx#wj{71f$NQAz0l zt{Dw_pVKnwqsm@O_8LFj;)Bkb6t|3p&Qw>Kq`kJTMhtV+YBT#|a1g*kC-I$fk=5Ch z7r3ujG$_)nRBXVt`SVRWW!NwzMqr=OP%s$(Tmh#o_*a^?(^Xd{*&cl+EfP}W&>&t^ z;0#p2h>Xy~?HIylz1UqV0kz_hfBU%l&(*52kxlEVH8}5eOqxM zL|tNTA@lsS7GI;}^e#T4w>V;Dmc1>`yLe7=;JeooA!$$HiBv;PKpx32m92haSzSM& zeNj3);nK7Eq4u#m;00?fb9F4-QX?>*8vXW09$ZlW*4e%Slpkxe$iTv5QMV34>uOE$ zR#a6x&x|pUEe>~N!#7k#gP^-Lg4E9Q>tvCVzhL3;`7mTCk}D{_QXnE(S4;Vo(!Mn7 zE?=d^F9`EAXH{fLS(d28&QOZ~FSo2uSBc%%lc5KusbMWTZnV;S&0fTGP}i9zKJ@Z8 z$MgG|*ETV)bZ{Mg%j8;q{{D3M0 zd|NW7RcX<;@`=jwZH37rKrDm2?AHj7i|l3e1BvM;afHR=O}58xDsqkshOAP0KV+K? zGIbp63E1XNYGMeffsCJf&Q=}*LCi1Ta0Z z$wd+3IVhtr4ku3??>A>+qJ)rEhSuMe4;$~e6o(-Ng0w5i?KID1F5fEVXRdpWx?5Iy zKc~A!D9l@7l0$gX2JT8S0{7jalra4U8{%bDSGYxuA9#kMx1_ zMx4qiKtW;a9=V{5g9!FU#)M3hiU(Uo6TN8T$T}i~nY3`j&UqRUXvqFv46t*c>3N!1 zSs8gwGMF4166^$(iSKH0xYM?iuBhvjDLWLET&%z?9;iM_Q}4LRXd)B(Wx{v9Kl3ao(Ki6Nr5ttQ%6V#@5r`GyHShW@0BZtHEF!)2|EhKse45(^9AmISo!zKXmrsqW2Do#LU{q9BP7 z9+e9L&2*_ON5Br2w9S|FsIH(w2^{p_fY;ZeQ!TlNj`_~Khb1jli?`tt=3agmXBHqd zDOkv}uSY54W6AOp?w>ccFOGs@W`JG0zZ-w|D6N-g%HWm?6wjcThcmpZ?(E(n+vcXF z-8tTPaA6-E%2G^e22Di^8kbS}eWWt{*DaNH5M6Sbs)5AyCyuF?j7CTl2dO;#g(*@`=x z=jQiSV|c!lR@oPX0wGbxMmr$uz(&9_8abxR4vlAu9i9-ZHY83X@(K zxNnqq1?3?aGPhW|#^5HZWL;oCs(kv^GVz0xjaf{Z7%}i0+9Euz_s~AJxqssh8s!*rx~G+Ugnimk$QA;Q+R0m98g3TkD)?;tj1DYlnzw{dIy-= z_?rJvWkD6+5+cP6rA)ZS=k3U`s_*kGGW+@>`!ia0T8FNi*~z=!4Y(Ebubt1(nv;#h zfi)EVkqo={kGgBoZnxDcDu-rA%UsBZ~Mb_6(2fGhUEM z8)#rcxj7K5AB6Ub#T0kCx1kW69JS2c>`+5jrTV=?58F0)k@sw%$Y4T1aR%tmxn^4RT zD>PY@8ocexw-M%2ZI&_-iT+W`UOSDw2%;e)JGRrcy{{kLZU7sK2-F8hVRQ=HdARkJ zaLl1OegMUU+a=l`^?-hQ3iN~hGJ4h-g&87!W9XjQ?=1B;#L2M!;;1t#eW=uxR#zSq z=D{PMq}zO~@%wyUAQ;7e1R@|OPfaJa&CJuAAB8O5-iTq@cdi6un{OsAbs-|~DEV(b zb}xxr$7Hupc~f6v#{WT_ZY7_nFhjDTd~ z6S>G#-qtqB0>54qSRbGLf<`WSyl3lehbhzt4+JS*y7lrWl~1Iovu&!>`nB#$(KQhj z1&^zlo|~^q%X_Qq-rs$H>2^2!Qh`>!jmN(E`HnAWa@+uRUc71SThp^@FHDh!nz-Tq zGS_^6KWQ|H+$27Sjut<Ecd^Pc=7nqK?{j;^Ab=gjs!QR>GpU7NN{eSw zlE2UKqwSYFTlJAIep*)n&aO*1MbFS}@O0s*Fh!p-r?t1-{RfuOnYbj~FT(xr2~p63 znC0rr1q9LImNK+Gt^Qx#Q1h#Qr>)XF1F}hRXiR||zoh0Br5Fx>nWBTQrT<~5=)Gd4 z^&+0)ID)ozgXvSIwwW0weZ{aCO9u zG9@%3gz(_f0Jrp@7beXuzOrm%ExkhTus>1m1gic~nxs;0_gy`(^u0e3TYTs|)YnMH zsN#Fzqq4mE{*h((5>J!>re`6SO=m8Hmg}@8-^%ySKE+O4{Rm4ZrG}toiHQ%yu-|G( zw+=3*7Q)rb(z1qIme6{KzHT$wJt4DuI-9Knfg?VJbgg+k3&htJikQ?nDY<^rv&DJB z<*22qP>TE=-s}m1F5xJ2@0rsGnu#-B;Yy_yEyvHU8mT`WDAzsLooj)>SYq@kyjUDz z1`{$UQ;F-Bf<#hcxb+sA=+JEB0r5+t{O^k-?I_u$Kn@?Flq2c`@ zp49uTsjn%oK@bmwQw!X#*g1|^|mp)q{V2$O|diBOB&s9bTmH~H(v#`Tl-Yf+K110`uYMK0hnuqc97 zmtMt(%>dfOc)jtHb)!m+Q?j4WgzRp&glRJK4@h8L1)1u_?k9dBGL^V4yZ}gQg2x z@Aa00Ec@8I#UYIn>gM`dj#N&Ji~q(Qs_`N_^a%dV)BD?fA5-0wxaUbRH8S~rdvSP- z+jsE#jnSl1OmDt7Z|JrAFlvYxaGQgQv@}tgwj0GnG-xI;ZGGZ`TKBDl&6uK`6j^GR zg4CLCH8fkBa`UuwUl*a+drq3s*@|941s+!)RU@4;Vx~CA_F#LnC#lC>wc^dH*6w51 zW9~Q7hJ|lJd@kRyVJ>2u%HsrdomF{@C!qi^RSi7%CDn zrP+sLE{%svUBk;u?Wd8m9)L?9#1V{cX8zxLy;N!sHZ5?bJgAKn0fdL(6#2~}KmGY9 zlN~bgN(;#oI8sGAw~IQ0f}hu+3Q>>wJIms6C!0IRo{jtqsW&HGtf0?Gfhe~gBVQeE z6sKm{5jV(o1zyRUL$}|vRhCvjg4?hHNx!3He22$nLyu1vl=pA`A%gJ^hRP(iRAwwe5hV7rLYFs2!5K|w13gcD$3ZiMYxDCMgD zE}kGj`&>4VrPX<&i6HMom)U>RYW0aDwq?02oeddoF}%{K2n1*9fr4PKwflZhV(5hB z2kX?&>*B{{ZG0A0_Nl*9^Fl3o``tM})3+t^gsgtvy5%VI88}!AL}%(Eqe(L9oC(=z z^;(|v5PSMjun|>hwXnomj{V(i8Q9lGzvkeLa~o{sg)MRnVS?Ri==X@>R@6%dR zD{HtBr>T3Zu+xq9N4Dx$H%|~qFrGl}yUX360_Nihxd*uQf@rKi68vwxl3Bik;K0S8 zijC0OyQ73XUYzk&FrGP|k+a?mB$pY1OHbEai}RYT>$k%Y|9;<_qfHrWQoI literal 0 HcmV?d00001 diff --git a/samples/3575_00000.mp3 b/samples/3575_00000.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..545d784f88a0edefee354bba7caff2fadf617261 GIT binary patch literal 15453 zcmb{3WmptZqX+P%8y485yQI58y1S*jyHg}Zx}>|iO93ef=>`b_QRz~;=eoYnd%xXp zcRsK?z_Y*kpP4!5%sFQjWq1%lu#HwnTU++;D-H;Rpkm=`%O}Xr%g4^e$@%ZE|M>x2 z==cBcuA-~0+uxnP{|`z4fkZh$$fy`txcCqv5^_pvdIn}zb}k+PK~XUYX;}p&6%9?@ zR|dwWmR2?nPOk3WK7K*Lq2ZD5ViFRQ(=xJi3yVrBs%q;STiQN$b@vSnjf_rA%`Ggi ztZi)l+}S-mKKpZVbNl!V!~Y(f)!%~?;)3n`ccX)!{m)xb&W&j*?f?Gz?<;U2I0S;= znVO?XfKk1!p&B&u%vX@}JH)f5TOOXLoeBXZOfd$YsdZ-`mLWWL-~V~YFM{1CZJCf(k$9C4;O!E{y1rXVUJhuDBr zq6OR5&RZ~OPJFi!a4+W`jgFow%@mIcUD805!o?wZKwkkgTE8tqQ?I(Ig^@PeEg1KZ zVHPG4{O7lc{zI#&Ld`>iY7Cddr0l;?MRchr6Hw8qsiN-=jed6+YOI`02T?+zLlrYc z*q^3*+wBaw@RDlVDU4h;>IL>0x6`cH6f8sTe3;X~jT7$y>tT<>YJz+892s-u5<>8p zwJglSAefVK)(Mmo#3Con(%n2fa?vfw&UW^&R_=ZcHO#_Yu@f0*sJ0!*R#SV=KBc2T zb%-7tn&nL*HGzc%UsR_=pW2Lz>aU@B@|ubqL`KM;3Gggk*L2N9LqSwVJRtKFQ@s1z zPk$!)-MVk(v`eee|%!1q*%>B>G6S9DJBu*U1l$|Mm+RF@V|wwL4-oiS(yxYWi?E zTdK!l@vjUCIy>Sg)FnVRaYyz_y95Rpf7lfh31hdleVKB`826x1liVn$bbZ}MTPIrw zy0s`>MkvZSb2;_nhhm}?`(Mi1!gsi%V)jtON)F`beKXO%&sa;QeEh2%o~AIjVCB*1_SUU zwM0M3NT>Rj3(90zC%eiC5<(Gt&n<-#I=m9v zp=#hdIy@wXSp-m_oPpkj>DS2m+|ZL5;}yp6n9AQGkC=Nx?f%(`snO4V4`FgQ0_mH* z_L9NIG=` z1TL;>O1`R=&KjRW+PzAf#evRM<|-Mt^X7Wb7r;3)_|V%Ctx891I1DokE`mo4F(7-W z1_8Se^eA42Sr!Jf&iQ z2gjuo+ElsqRJb0iQ<#MT`BRE+8iah9SIhU#Y3*bO`R?-^S5ei@KaCs=JQ(9j)76Seuh0BRt5eZPcr~PM|0{^7Sipfib6vN}? z?EsNB1WPzBR2Ix)Du~m+^95;c_+hFT={-##1D~{-EhMgMpe7stq%^`UB$-~{f@Z^U zJ&vkJd^G+scT7kO96gV^J;MUK$|d1bw;t9WiP8SV;OIk zQAeTYQ;#)&BCn+F76PaPFpCBvEp~3qm>_~HG`R4&j8k@of-ZNV%~h!ftkF(Z;)$HGZ#T_jCY9Ho^3<}a#Lp-o?_dJ->P z=`06L*$x0x!)7hGI6L=v-NhUJKPdxy~K}cM1S2ccl!D>wz;(^Ba(V~n)z(No-7tK4LW?e zt_jG132(_vhBbdKZV^1)@)#PW#k+vHxZJo|b@@uu?c#e#qi;4cUu*UbH-KN5D5%QA z8rV+JPxV@sUZ#)-g6W*bvV0ut%eZ-VCxvIPN6QJS3}7Y1mtePsYJXYC*qlO+tt zj|>%_Y$1}+Ec~TT9rDB+8j9@=F@{;6es+l45 zalNAke!%_o24$ukk%sqY0H3_QIVA^4veSoHd7StVG^Xy?R;G>s#&fLrQ}Zi=_nQUn zI;K*fP$a}qI1VLe-$jo?=M_I=YDR3BWrP2RxypEM^VcMGHPo%hmtMTL-Et*k3jj;i z8}V^fXCLjYU}TwZ#s8gBH3ro2Z^PNxk8wTd06 z33fa&uV7k*@DqwiKJYc3I$&xHhO6^i5I^?O#Yd(0>!Q1mn5}L>S&rgoY4fLT|CE% zoy-B^qWjUaC6qTbC{po3@#9%oA3Evg2r+3JVOApK4@rCP;Iy$`3Ywj*DYXs`6q%?y z{JpWBI+q<)-ZLhd^Q2l)wI`VR8XwcA_ia#nO}q?GLffzIZNd;rViUYeUZITn^~Fn4?_2W}Dy{@({9NZVpRHh4Hqn?~eHym%dwBRV&BhrL<1^@sE*{wb|s7II3nDKUVVwGvld6Q zl*un2llX{tL*uWH@v$>q0s#cZ<}*e~8yryxg_a^7!K^eoQn5o*|3b>Y)l8a@ThK_R zC97cE_@zl87n{vPLaSe6qroGo{MWC;M_GrCg!$SG#*+!=jh`O@+VWf*KDx9?`w%bg z&xgL}nLAFe3u1G42p!18okwT-6gu6Qw2B?x$-JK*i%Ec4mXxhhbG`nnVy@i+jR=PA zr6mq3Wwey=wZ!6xnrL=X0o5;5GRo69(W%B%Z7fhB!<$_Bf4ek!X)}kO0Yl0xL5%Qw zLz6u8=~I)$_X>C=w8&I0~Dzq9?eZpVRKlHjs|$e2lPISeo}Haal|_!n^JSEb)~xg;S$QH}{g>OFdR zD;O<1W^*#Vb&7zBZL_@N#Z}J2N3me>EJ9#~fzFQbSa9&hyz*avo{Ma|pjT%T7csKW z*)ydxLW-otvKylPpzh&aFjSpL`O%jE#M%D%kry@n&B{73jAIrpdv-5xV7iTx;}Yt& zxFeB(w1EH@micMbMq_pt1&$4>hd(qq#2Y6n@?)?^Rk^kqbT4C12Fpc8aE>={nSq}5 z;%ko$8lT&%UyF?jF=ng@U7w;9(T~;VhdocR;4_{@Z&f-CCG@uN*>ecb2~~cK$rp`m z^&L!n`6MsoIi9Y7n=ZE-VCnju@6#4>f8HbS)-N?#x24A#3!e6~xIb!!FpCmeV|XeS zI4pMqjB@$tPKZ2f;c`{$ZEIi-$y}2Z3SXb$lH&geK?HKI&b%@1#dn@4&bF><*ffMv zLVjuj-l@r!pin}R%&>!}&B7mKC--k-YpyJzU8jxHp_u7zPWsc#3`{1Qt+rWKyI4=2NcMd0BW%b7HUVO248k!a$xi#T|N8R|&K`JB@t)dED!(xN) z^|7Lq18S;6LGgUeBv#dS>WhuKTj3eCIxuo}TKUS<2=HB#Dw*?ZUCH~#K2%*nn8goe zJg?FTLUw;P7Ts7Pn;Qh(-FPbraYajd$qpfHyT1+<(*uv)qAaKPE+F`r&CuifGGt0E|eRrU0`f zNEd|bKn7Y*xD2uOZ2pWds6=YZ;bnZO2*Bt_CO*)7+g@%`mfk+tBO0*+@IqsbIHF@6ERLOkZaASvfNcquq3T5`I}R|n;u*0tV# zfI$Py(tv!NU#BpX(-;{xWKa<5bE|KZ$Tw`xll&&?k~gl>U7%n-4AM~)Rwcs)p=r$J z|A?glH+`sm=zhg1uZSkL&lT6Qj=#uLz#0~6CXca zS8si2{W9`=HqOw`Rr{2-`5S8Xtixaj{Bf*-7MBvWu620m%@6Vv2&w83E0?G`Gpl&F zyc=ZY|0LT9v%Dd56RF(IMg3Dc?oKCpXyIj5n|K#vV8O=B8-#R@| zHI!VoZbFK!W#7N2@s`2-0Q_+^FkpfRseGRihptua>p7TRPG%xgaEDj$`x`^UaW2|z&i{3k-c#nkv6L8y15HZ&n$pnj{??ReGZ#o-pl3P^o>BD8pF*JAs_*K9B)|A` z`s*j^CF5FosVRvwtyN#q>7Vl-5NWk768NsHJcnNN>u(u(Fj{JTMhx}b++E`aVR}xm zpe8v6`Cg#DdR`#7Ak~BA%79X%H@u&>Z&6YS6i*YG}YjBH@9{Vt{KO*&PdxC~WOJ5#dUEa(n)u=_A;c zyqP|8_03*Zk$|6t>Vsp-OPEzer;VM*ynsPW+!jRSGM5nUA`>|j*M58T=Q%yd@RiDG zp_O3N$|)aP#h4SJ$dZLsh+c;B&|-j%F~|M5}=spV1wsHqSC|6VraY)7_PCvfkITe7z&<+&eNVW-jV9 zF0tKjwc98D@|on$k(5j!-{8B>G0b{Nd_*%ZWs<=j@bVHo-}C3+DZPa%R{AABjW&sg))Z(=T;NR%~>9#_(eM zv}9*FWV|6&)CC+fEel#R#Wc?5N-GkW#S8J^UpD2G$K0)_)+A2O{AFLnEQ7QnF{>k^ zAN!_6HndjJ1S7o~*-HTAM1Y5m8~HYjjcX@^3_%_uH{e(r!>e(gn;$7%`(;xq(q@61 z*Sn=UrL_FL6GV$rzpBa|A5nh^PXSd<6=q37iKU9vNRc5M6bfF$w><6|#9ks>&hF=` z-7bz2{4`}C<^AobPl?LE7Ps*sG9+XmQwVaUu?jZ@Dw0M_e1#gl>t;E$sxq>gZZU%A zH4VjxC34g|?J?|K1Co?oO6*|3loIgb<%LfBR;uSL=qK>XO*v1NeB`Gp(KH)jzJ1rHQ{S;+UVI z`64j3x_l<^rqM1Yq)`c9d28YNB7f68beN_XX6cfy*_QVX4&|XLKX1u4e385FME!vJ zg-eA7L2%g=ZLEFk$(>)YCI@g2`azLct*?=%rVi`uT^e<%K*_i;iyab2(V*im^9{7?^!T15qwv*z z-`{p}>(`7^$YJD_CwE8uVJHeJl$od`@rs$1)+~ks>@rztx!;9Y)TdQv&Il*1jV_P+ zfo%9&X^W5Oss;DrrV}wwdu^Xv!u#!l^&Y9M0!-IZKA0s8NwF>A9*bOLSif(w>n{F- z%cpnN^_cWSdu#o9{Rk_n2IZ{~Stzj%NX9~s82nS%QhxHB2l7oyfS8ei)a7UU*>fg{S z516i1B0!cvH0VW9qRWQIoP2Qss%FOojj^EiZ|I6~qrXie`^)2X)ZpfpUE0`jm-&yo z#FjMc8-&%3ie2nMh&_6}cwK%Q94n%(OK-m5{h5rrU{fL)gjt$|7IN*WgPgxmCPE54 zJMWW{0pM;}db<}mbPjd5(8nvg>be@qYruFci^_9X8?+>r)R9s+7ND`N6(PMl_hv@Q zv@$)_+?6WU@fFCcXQDEo>otnvT=VBgMc1|EZM74n1O&*M~fz36bhZh27PT_ID@U(CP zYWA(pUL~ALO$&~(D7eCNGyF^|C`sCf7i}**uow@R>6w>+dp{=!vp6Utgr_nBad;3l zQoyl=JfYDs=;_uyzzIML(m%3xLXo1_$s!2Vgj=*Pd+<>TD!Dt^?90K4{w6Dj0RKx> zIAE&WxZPm(g01@cdHjYeLrhGknC4eBSQ<$Xg@wF(4Eg0%MO#)_0L%hIwhN}81BMn2 zf+H^7FxzD)byC<7TlJ#}1$TB7eGBM5j8t?)>vk8B(8a0Rml7*aMmUh0N0*U$V)?Gt#b2lAYGR1ug(3Wci}F%3dj znIJ@@>4~EXGu~kzt<#JoJG-yW^I)-&GFUbHa8eR%@QzD>b{%36idH@m6mEC<>+{sY z)2O~iTs0Lcik^|pN`GK`2589Vw{bQFRP)kNY@SSz?Z_UAtAPBn8iRu+|v7) zn5^Oc##z-roV@QS+6uU4*jls>ExHeD8VbJ&xUH?MuHY_yC<~tvRDVaqxo2hv9E$GY zUWQ!J@xsT3w}(@)caA?bp@`^X5+Vzi7whcFuDD0OwK*!{l#t4VN7f0(MTc2*5QCb| zUi=}}7qdk89dFSYmWvDZJmV9u%=_gz=J2bHb{*o)Kle%1s-qpxpq5Zdm)k$7KOKVL zFeQww5L^8iJVRxz$UToE>RxWHD-dX;B#XM?lsiS@s9eqv>4bdrl)7oyFF5@-F3JV* z&`TAaiyAuE>+bw<)Z^8%?i~DRunm{OY0fD$tN31qrtHfcS9h>Ijo zovtNNG~F1?4nfLpHLysLEh>4G3ws-aRcH(Ujk>^N91< z@i?#wMIawFI~ks_q;*i0QUQC~lP zFunSTjKuD6{XXhu7*@fk4en_X2T?n?@~y0b(Ed_ZE64;63ZAd)I2-uX=gK zJ_P_dyS~zlso%ym%9$0QoV9(AGLAZDcf8EbI=LMyc#8dJy>#%6je!eO>A%vq0yLwu zv_6#UcENH{5aRRinkuM@T={;S*))4j0f5JWy@^!LKw#?T7WYMKk*zcM%W`zkv#5-W z6w~^t7Y$FEGzpF3tRNN*7`4Qq84*Y2+=@9t-a2DtppigL_nE?Icu$-A_O+m)%&OJL z#0oi;$ifGhMMc6Rd}>T=;Kc?kH2!V3fX91gIl-@FQeDY1h+XXkZ71YA7B-$a!MbLF z7H>yVggLjLurepn_gFAl47*>B*kTljeoTDDKKq2`nZa1U$1B$1QO-j5CaAelPjuA}Oxcucl|68b3JTWEk zPAdEf^c7gI^V#6U8ya4w%38@pDj`C^q(>W2js17|NDr3gT-<}vZi%qR#d9us=g8}dPww8cQh zOIS`48gIZxud70C9cC(C zxwh|rEQqpTS0tPusNFCX4#(gu*gqn3@5HX85dmvFKO3w}? z%43#KX6PB_k51WBD;4T#rG?iNa;OVca8>76i5a@=UjD7qJ~6&89W)CI(64f23+w|Gic6Wkq33h|Ver&H$E+0MgmfBIZBzn>!)ArB8+!^+Vt{1T{iWkm3ev{K1e^ zzE{Jk<>vNq3a^dmc>vm6cf%7lIkKR|8oS@~p_7V^u#-HHzVM~+F*O};u-xaXcO@ej zoroSQjS&t6M?D&ejjdyqS18429QmRopE%TA5O*MEyj%P`Xh_0q9g}(X>pOu}DjG9k z6fokl(HV+m^&??iX|j=mvS&!`Z#Y;kN(gaF|G3d?l`NJY?mofS>+vnCxBjQ+4j*vF zkQhei8flC19BZHLwmAx}EfX1~sD7GLIP*OND=xa`68)^jU2uyi?K5afv)_Ino zh~9Xn;kn(9CSi-8xHmDh-b4G54-Nd)Zpl*hPzYRDr2S(cakQAtWz=b$5+i5YB%@5@ zYeM-|V{=mhMpiHQKAyHh!zA1+l!h(yY z49y5O&A?KK83$2rJy@4&86m-e$o`-vuRS;E(CyJtgS>xyrVI_2mQp%(O>$DrNNfS& zN$+FA{k|3leZ*FPUmQYECacRVl-otad{}z}v*Z{^8Bz+f^+D?%I9ncpg!B`HnQgs4 zPd0+z1ZxGn)b_lsu_|f6o@-gceA(ugnJOt7qQ4Q45B!<<+oT?1VEchvhHdu|kN9r@%n_+39$OPvHW-rpge$$CM*V8Q0Nl@h@gojfwF zNZTM}tj@k&02m3Ee^M#~leexu##8^BlR|@%luVn=FoJHDIkWI}o%UD1TE3Fo+VG4` zI`H9%t(}9U6X~(Abi2*{n?)@95Mko1hr7(*?ne+aFqAY zuzhFNo(_V5KY!`MDTuD&%=wiW)^5pwKE(_xLvl#5+5`nYGFA1X?l`<*xM8)M^5oU{ zL9cUwr;VJ0c0~h+eGj^z+-}ptFzUxgr7FM2&T2hh|DJbKk#)1<>3g)EinMdg4@$$9 zS@Linb3jC>NTSG^FHr7YOCebJE|V5SUp|Ca|67;B1X<-@GY*t1p-oWzR8sq%4#>@M zR=A}Cg{vlx*UMrSPsa@WR^&JXml&P_i-mw`*#)1lh}j>(6m+<#cJ>^nAz@A(2qDvE z3j2?HGAU$lcnmEopY9UjHhIer_LS{2G&`=@V7_!%E?!6s|A^^Gta}YZfmf1SqeAH3 z;ze5}5vs`3>SwpVC;iA6-R>EU`L)Ja^VMoUt>rxJO#iawxBB?FW`q(b9xtISN78jN zE6J)yi)C5gJPI63H~Xf&%9qu~9Fh^?MXGXe<IW9ofAwWKiLer|{DA0pUhCslxpUw5 zx5snG?3t~!1sq-#qi#kXB8QWdjE0j=ze|=XA;CoeQ2wpOQicTPG)K!Ja^XIsQc%#| z{1gH4@|A0-e69;tV6!5s^ieU4*i~-N3b4*Jijlgq_3NB)Y zXDJe;-}=}oJrjk{{B5?}ADxjbD7-FuylT;dboLd=iA7dB+*zm;%nHIU=D#)`L>`B3 zd5j?fchR+HR(sE*gv8rYEmvtd^jLkqn->xvv9^FS@AX|kU>l^X;TC2^?!|xyPn)f- zUUgXh?);lG5__zzR&}poUEX^P0t5RQ^)04gvRE!Rw8JyI2 zN@xC6o^5*n=~EOQ0rb^15Tme&Y+u1n#c2f3iDKHbHzlUl^(EmhM_x(K?1<})oDEtz z+YjGx#W+EGbCe9aG(mm2u3NW=K8& zbK#ckiYot}4P41`x6Z;^)$b6w{QB+e3lgf{u3VuLNp_00Ea79^rSseOg1Y~n&D)8D zDsuD|#z&2|HDsk*@^30l*m4%;=w3FESa0A8kFf7`?%#c_p+jJIaTpykOv7j3fp1I9eD>zW|efyhg!iKcseV1 zmUqcnx8G*0XxzH_)U`w>XhHvA);}trZ_`wZN~HO6?*@tK1r~dF;$#?l;N0jWKpev zt7KAGuD6g?+Z-KExqiH6WY0+VivlgQckzO$X9_Bejy8PzTH>(;jJ@NN9=31rxhOAk zntoq3O)-3?7|ln=kVLHI2N&SyARtPAw`W3-Xqx4v_^wL+*0Y(>UcNTuA-+X=#T`?A$(#DLdKsxrVDt&TRot%vekABoyBfQ2n;Q zNpm*M@Tkkw2cpY{j1AW04XP5^8_oybe5P;u4X* z@T<-85U{hta^*saOZ-8Ba^9SrJMU5PG;v55l2y})IxFNb(rrf^wk zk5_e(Oa*ZaW3yJUtas>vCu1u^kmxxXus=3_mw^*vytk4im1S)k{2MQE{H zYNb`)eBAxFZTf4KL$h)+_$gK(+6i5@xMIXOHK9D@Wu>06Ewv$Cqh0jqT|GfsC0eCO zbj&R46~3YHsgC(6+%xuzI!i6nj_B+>w2o{F{&mDE@DGfg@vH$9_{FQB_*VOhqoIx4 z_pAgNMur~Q_0s>Y`Fd#de2TYPyl74Rb*W}VEdCxmmFEDq+$XW`QopG)8+k2}4^Dtv zbIVX@7#E0L-WVjZ8PX|-pq!_djV)u|Ng6Z6&PA!hldM&bpG}h_=tq^i1>9VJ?{B*#gJaowlPpZ+~3=etal+W1eUlmRt{ z8LB7-{ai7%EMtqU*Mt(+LL@aDi94JW5Zh8g?+#mRuo^;2G7)$IKV!ALwexv`MiZ3} z=v-!M=8UWO)$yC!ly4hWhIw>cn?1d6hc8_#$j}dxTG#Uqg&PM?W%ac*h zcdf47B?}=zbS^45dlJu4F@jl+qzCi6Vu2KI)U{X}yOj*?r@WFL;{ zLOt*?BvWF)2~LIUe-U@oMETkL!t9ao!yhJl>tE=O0-{Swbagjd*EV*&K&=;D$PJ|D zyIC}&>C9uv<}IUiN4aU=YR29Y%rc;HY?x;soL$ZU(w}|_A(wn+bl0S*2sfW?nJ!k` zc>LodrYHGv+^0Cv`+mUYwN1A2ST=yks-Uss{Dt@&E^(9Png85_L*z9{lTc_KEhgzk zEN25{41=|>R|zn=Gng!h2=UU4rh{4RMB93sVnN`;e(Z*i_x%Fm1zFIo1r%xZ4iaah ztvcZl6lRRT$j=(M+BOAu6SDc;z=s61qxJj%7MLEj8O=G5J$g z;%67Z`2gT$Uq1F+JVJ;Yjfs<2GIQT7ihQCJ0TSw1B<638w8uPW_;fBCN<=Dt^nDcj z4dq)N@ea6J^<1tr-?@mYL`**!2&VE`yD>%a#^<%&a$gUaPI&k?-b+v3h1dCJV)FJ3hxF*7xpK+OZ0CNv zG6YYd%*fR;w%-~@QVHioP*P1HnsWk+ePnl~B|}G7O>%6FU?zX(G3e);vCsw1k9G{X zYCtbTUw5afo=TQj`n!jHj)YD{k0O(9kHV1Oy~(g#SOhhW_ojj44L!SyuMvH)l3skm zh;e+D@%U5t+2ZQ#wst+it@QOfH_h_LjS_Pz{|xPD(pe;~`38K5ln>XdmGKFxn71c3 zO5V`wDZz9=0#C&<~W(TZ&iBpS6vgL_+;p#^m7l&1o7JM6j~z`kudxr~$&Wl`%7 zT>#gTb_pJ|5d;>aP!hqs)yqw0?im5;f1mEZ?*%YY2sKnO4^FX8L08K) z7-{L-Ly%W2k@a;uBJ{vf;B&*#yRfR{RC>{Al~kM}J-nOAbn!vGc7Ku{B9a5Vm4Qm3 z+qhITVful7yKuE3cr@f4sm3EZM;#hgFqd{)C{jllFw%&a69_9qY$C~#X;c6Aj1n_z zn?4+&l%f81@0bsK;W!P)_X#5Am0$16^po z@QH?I_-=-rG((>`x;jUSKRXBQ-Sh@hkB)3%I^|N|bJ7DAu5zrPLYyC?fr?aIlVt2! zrOx_V^oPKE=A@>g*RhzIhbUVQ$LGk5tGV_=<63h4t{}3c4EGavN*tI4hLY$lo05*R zLjs(sEQ(4!4I&4yT@WLarWq`3mo>F6(KDz2Ya5!zc+vk`BTWJ+aa`?{ zkaZk3dQAEHDn3=kzFpT~-O{@ij8b1{!}&}zr>3fdDBVi*fd`m-YPg+r+;o$ilriQS$Y7+<-=Uen;i<(h z<3Y&9jA&T2;jw(gPw#3TjM`S}y;>fo3IKRg1jd*2=`+X+UrXzt{<{v* zA(|zmyyx6X>gLc*T@2r}G~t$U1YR$C#%9zj{Q z-4>Wd3x#XgH2pj@-n9B)gs}UNhTj{e7$m5Dg}#xJbtylGss7w2z28i&*N{#mb0X=x zdcZA8Ra|%q94HK$bc{)u(Jaihk%5@I#)cyF;m~u<#a30b0&CYPsv9w5xxSjMQ3~|B zGid$mW3xliYI=$0251$iPg_;FhP;05Nz|7N_-`a zS%cO&(@0YZjxso*N-?tMVe}~Z$$en$qYHbHFI~dzJEr?xZ6&r~U979`kOg;**IG^b z_fp%V|JGFsLT)x+vGG8KI+6q!>K&dEtaZ(1qtn&;&gQC ztt*Ql;MMO~rc?KulNc!5-D$vMe$D^<)0AktH71iEp@ge>>5#y0<(JE)H%)|&K&z|u z_`*XaQ;@+wOB!<5knBA#lg5ZjPCI;)FeKUVW5#3Y&DwDS+eril36f#rr8~kioHZ04U34_C(uSB ze!k>YzL)8F(1x8aHHfBTB;@bUxRGp5dPXEr0)4>rZKM(}>vy!}s?ZCCm#0VgSl$l`Nr`+ngbXFepO`s5 zfn?1*9T7W9LPR|NjoTPOW@}P9IJLg}=zT*VBrq^H2(*?&5*ZU!{Ly5Pvjp+wl`X$& z^xL~e2vhp=u-6>Uh=@)`5?Rg22_aUTY~S8Jkc!RA{yTQGsvTZWLWw)M);gkdwt#&5 zGrgU)O;{aOtgNZRSz8U3%N8mmme9eC?A`I%Fz9nTqoLP|713hN2-&Z(nsu=Pi%mTn zh@H1`GwXWt4)A?P?C7DDnWZgO3orA&FT@exsE_O1(miWSIcNDZu!L2yQxnDxHz4iE z22U&%p-e)=0Ozz#2PTthN^!65Y`5=!YE2ZyXyJVD{@10)fHIU0%$_2UoB-%^6 z{}~Qp{JjV9%yy5YNn%DOM#_j3e(|r*mI^gUv}X>8iqG@W+r1D79q2igV`T~M8$ERo z{1NiE$IF7HL{aiujR1do?{&YK6luiKS~>Wwn$E~=b!%$f^;p16QjXsEFN=H8PV7u}yw&d&cubpv z&yi*H({&}7jV2~=fHT3zTWg^!EBeJmk&a-yVffiFrgnbUR|MP~-Cu-XO_}muh_{wp z<77qg!X}p`*)D9>ryb>;qEA9YiN1=Z+p~uw%&H*b&T-}qh{_ZGReRZ`<#&KDsX~s~ zem#ZlzQ~#E?L<^-SG1&BR|~~OjtfiHH^!AfgWxkCywrKGMoOtKn5~an=hy^XjE7)Z zRM+t7kJ~8Lj9?x$$I#Fqfj<5oReVg=V1{Qdw1Qb>#CSQ@tU<__$RR6ua8Oo-KPO7k m*olCnilnM65A2J$HUBRg%qqaIw&VmUAou^jeB%H8A^!uQ#IE=N literal 0 HcmV?d00001 diff --git a/samples/6829_00000.mp3 b/samples/6829_00000.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..34f0382f198cf408c4d6f19676523b780e295e8c GIT binary patch literal 15561 zcmcKB1y>tgv@qb{?!k*&km3|;ad(H}4ut}3aVP}$;_gt~rG)~;-5rX%7ie*5n2)@7 z-R~dVS&JkCBv1C4nKN_t*`p}KhYW%*3_9A{vd_(x5GTI?Cl5FGe}Da- z2jJ46>;JDRI@`KDuRQ-hC>8{Ioee@o$HE~X0+Uft(J(MFv$1pW@VyceeIqF?qoAa! zuC1eIXl!O-Wo!T5>7%Q=myaJbI5aFOCN3c; z?SyB&Ge?Xn)KKTQc%*>rC?Phx z5Dna7quO@nK!}q7fnR_4jLRR*+wtaYMMKsAk<18BEu`%4S+H-i-f&b^ikXwM?L}0z zt+4OUh{`2P2iFq1We2W;AIbc6qJ!<~8tsE40fb=zxH0c)fu|G%+Pft0^0%UA7y4|r zcQN^|;1&V#_PTv#31B@`T-~56;eMcuzat2j4A|)**92DI;bK{}W zq~~T@_rWbPO2kzA>q!X&obyjm5}J0URh@{NKllbu4SQ?rT7=s=K00kPI`|1B!l^TH z!9u14!ZTu`Eo#T&_gQL%KTZIJ$R?%Sc>9){p76fkk#B<&G4??qv60S2JlhIA@{C!d z;SEI+hkh}*g+)5TYmcA~0bvevA!1K%RTK(}1WNMwLW1cO&PF)-z0!@o)lBf4Xa>4a z$ha}If@99T2MyBq6d%}F+cF}J50{8Zs|DPS3h{Urql<&iqO&Hb+XDm^rI4;;M=CnZ zalse3;n^l3a0?UMb5@5Cfe+GX`^L?WI=Tgx%6$0*A%A=I$T5#wcw(Tw6_g;$j>Uiv zIslqtv_yD<*m0=_fNNC(&FqRn(Og&jiag*fRsW4|+@%|I7dQ^zOM?nn=?CObW_QL? z)SHTxk_v+*;1&t_vgs5;Tm%U50}euhdM5pEVb**Tz)!9!4~CYaA;`l?|BXPKSEcZh zeDp9WEa)nim&FJ7Y|(llx@}}a6yvMZjLf#v(aPzeQ&zRy?+;`67}Nkz3ji>IP~Nv` zdI+x6VtN0>nBf)<>4n;j$)r8v@x1GaZE5+t%1}iFa9^u3! zg%Q7)rRNqdOXP+b$~*{%{hq!}!_==UV!vlFrezzxJON;d`WbOjk?zO=7kGED=I!z9-6$g&U=N7s7r z+ME8K>Cf~SW(rb$%uQJ0m&;2jp1*;SVTu{VqtP=>lw zDfYfRgF2&VRf=!JEm~s9ifz;0zJ`hf&BJSwA9f%OBx5`U$~PKqj+ltcQ`|&XqAYvt z50$1%P*%!0v|zOd^(|9hHc#LdhIb?n!B4t#Zoo!d_phuBJEch!TyAm43dcE%qYsV| zUMX2S)+ChLGCs!rZ{J`8|9F~t@(!iVkhrO?`_o{~cPh_3h|VB{%qPvBF=}S5Vqba!o0iTHYsi%-QuKl9C6vqELuUWVu zUPn>NpA0?p?=6bC=P3(3u2*34o4%3Ekv~^18^z+Wd79%* zUP#VLHdrVKWu>-*8tn1^dT~{&FGv$k7f{Z0+)GPAn!YZz56y()yrVke9GJ6!^Jo)OB_t*-zG{E&FE8w26F|B>hWO_OuMV&x|IDGu$0?ul~_$U6L z!0QXe;uk@Ka%CS`W<6~)6}AJXrqQRSPev)D!=wv-+2RU!Wc@;7CdUhsMU+9OS)6M? zt(%TSexvYr3YM^S4&Xg4oo^ER*S{L3lJjV{nM zp1bH+ImSHt7V!i);H{FtRF7R=%dK<2q3Du-{9hIi`HT@{w1 zGKcHyBWYW@8pSVxBMFuWP`LF-(b7kKR}X{f>k>@lf`i;-HKmQK10)0=lhRR~gI%lk zmJNpqDT36jiKEK`miQukT1VsaKN6bJoPOSua7!Az#5wf%3m>!y83E-d)HzT`#N=!brrtBL7Y+1QYkc#(k?&i7 zJ93A!`Ep)N5z<-P=mY)BL3eK?=fq>o3v$4q&hJ3-Khy{qZ9csn zxOUSabcm0Zv{;!XaVb}1%Gd+@Crd$HrerXGGhPP4Ed%oVr?TYHkBek{o#_^Psrqg+54mq_8|k8)h>APytSjpG@PudFZ{U_XxzDQ+sJaFSfDzydOfYB4 zQ2X#7maf0|!k&rY=2_`vT{(BQM}i1_Mf_BcPHn6&hx!h~Fe@UJu&;Yr1V`Fpnu}?I zRFzt}nm0pY)syTcif~1~KggU}B(f%~lO=np{m({{uOwx7$2e}fX8{)FZkCLks*l>AT~rM;37t2=O3PxmS8`A5gRVtA_bSHE{OS{pf?x% za)&gxCSz+f1xtzQTB8JwHKIh#zD3Tf@{x`o8pGB|Wy|?q3RsDgAn@x1b)FO`M>$$46IQ8gL!ASffn=%BdK>tZS z;2RTN{XH}cK7G+B29v#r!C#rSAwiQpDD(GhifvRBxy z*a#wJ%6b0Dyh6lD-(dQaUz{(j=EGDA17(^C&ZVQFHU;kRLayv@KWP=jmmN+=0sZ6g2 zHDX8xHTQQz9RkiV@Q|WnD zWcnyN%=wWqjb>nnv5gD{PDiWvt2p+W7POHIKeXIuysbd>ABiJY^dCCU;FdBtgRRvR zX@KmlRw0C>pDqe{mD`+{ZkILz-rC3R!A0v2IRpL8Y{kQbY_Hz@3?PL%oG9TO=dA`XK&zC>=5ZY1mL)Ok zjF+CG+z|5uH+THoVlIJ${_f5AQ05oL=eX--!xG$j zPfRcoa3j(8^KLgM|3cxD4urLft%2jyT`~a%ik6dA@8$y+v2~DEtQ>K;Ouq_}tSM&M zpZCPb0W~;mgHaeg9Kci5@8b9$jdWk-j-XKYrZ^Mpu{A54u3#(>{$h!bP-spn!~G9W zxa9#hzX-UZz&1ugVjKyamDo2{S#8I-QeP)HIt=9q2y z7CqR)v=4EcM#@fAsBuTI$R`u{cc#DQ!NDiO0G(NFz1(-7Y$MglzIJ^@x=T;a)iPp( zp$4~tNmg(pK-_ZU!NG0W_+!wLDl_3Bz6a5c^A3)CMWJ_QjN}T;_kSB&nF)*|i7Hiw zEM2jG)XP*b{dJL6WFH745RyU#;*S5@Xw*Dh$pdgl$8;gNSq0zq>&zXoLpoD-;k~wu@0+cl)u$I#U0eP#xsIt zMtArw-f+v0se5_bMBNKXU&8&7d(S^@ObXS}CAD1kt=VL1E?eXI;YzCiPq91_yMkHZ zy!i@)i+wazF3MUq{O(ij+>;mzt$Kp(zzK*la%vX~{22M^-H$uVLYe+}-VDO?Kkbkf zpQ}Vk@IvDHQ0qbvr?wlBdnu1{W4jLXyS5Ww;maZMHeNOp{%P%nvFh(rfVTIIT4%}v zk6aeM(+UF8BizCyEfm={o74bFsUt;(b&6*lf(24L28fjTpO!voy(<|(>b>1VF?XVl zQ=3bkRTY@qi51>ccZrMTpS6~;{U?*X7HKvG6m%r{jU+!LJ>-6wSI!7n@9sVnQ#TDX zwAYtUbh2(*>udfW%OM!`sk^UL+>_dk5jzvwTo7TIjZWgwRz$Xn7&f_6^f_OI)rWS$ z=c4~dVzaWY}hw5kX!&BU)=(USbJmVX&%-r74e2LK;VD zgDyzI&hJmRRtzUIQ8WK%*Wegt|7ww@4UY?(%#wZ#Aw2+uPo0BI=;iUp&T2$*Ugbz}nr-^+gIcA|VyO|# z$yVf*sVm!cVfk`J+EB{z=@pCdaBS9+LssT^`x;m;(WB5?v0UVRMh^pD{fDG?M6}*V z^Q59T{-NkLP=5@h(h@0I1rTUm<+BITiGCToZBpxLkYGDk38AQ z$mwh?-emqG8exfKL9SOBXde zbIlT~AZ|T%Z*yw;1P!@Iurrl{zwWEY5NXOM8l*t*PVn~eWx1Ngn5UdR@bBjQQIy77 zQdkNKTvyoh6EZvGCDC`i!2Zu7MhH8n8pa2OkAebz#Nad&@)+(~G{<^@mhY)=& zMaLIydLTX|`%p3_1uRNpp59{zA~ISslw#d>7f0xAnV&~EVW_^x6>0l(*5}LQq*1TF zatC@rf&0^51Vi~Pw<5-zcT)k}A^~rO?Hfyvd}wm!#NF=cpWjUwb-P9A{<$fH>@g7D zO?9>fIuZ~{KPx5cINJTlWLHlz$AE_cp(p+jZ?x8;LURj@@E2gF*kGpqLjFPLYLjpX z)}10^BmSk3ef9BY_iJj+PMAT+3f!UtC!}8JS@oF;{TatNt(6v_dUB+v*g19y1;db& zt5=R5VR>5w30UPGTuJPP{&9~Iqtd{6+2&lpam$W$(}IKxsT%3%C9O(gmVEyULyAmC z_E-+`30UJFSIl$I*VMZ1MY81jMdonpCDAbMnX#|NG2Iud!ZJjq<1^2at=VF#-08{p zqfpqA^LbQj^TD^1?UZZX&mTd>%R%_}L_#TL=MZR?UAeXIjEu-KX6ecxj8N^3&o4H< zpri&d0aA^wkeAxOsnXCImrj_he3IM$+g`7Uwi;$W`y&4~8-0W+-wKxHKIdFU(L902 zPjvHy4EUDskx7vJXv+l&bgiHfk(7Z|^YMwruXiYc7;j zqNlyDT4#{tL;e$g{H^?^PcwbKk${Eb)oF# zL&JUbPB@VY=>1d@*f1MZvGT~b*`07PR_gw6v5fOmSAWOHA}Nugv%FLghRI_)VWWfy zMC@2%4G!dfbIG>L)_qZr?qAwL1&mju2oR_KcYGOuBlm}s!D>Q`sR3$BlSIK^PLyN_ zc36|js^|lD#X{*nh(u85x1v$b5qBn_d(KCpef49bl>>t7piUzx(a!q34&yA2Rdf5w z9I)8Fbo)h^Z?Gp7^xnaVk0Oy9{0VegCi^Q;{wF*xYhpT&8&ii2+_^tNbBJHLZu7$t z5xJi9XdQT=k8f=&fuJsQf-zW7e9LVwIujE#nXJ#HN*fOYOk+~h^snmCi_;#9FbW?R|MbRb(GT-GBTJ;nVO6mI1ag9bWY`C@mm zg;KnH^A7wG6vCAwh|FXjh&=qW(KL$ z`CKtVln^dsEy#^j7fI3;TO4WSWLU2PQ(7s4#MdpKmhQ%CCcFO2cDnZ5J9%}Axg}$b zLp~^#?sjQmdULz>+yR`#MQr!RA$G-!pv!2`@?)U~Xt40p1YewUNmDfP!>u%C#G7v= zJ?bCPB_sm-7{R85*wy|?_a1geMv5PJ40R~mX{o?044QUbyj&B1TMxk$FJU6DkQ63* zwt@@FFs+yG@OGD1dz@xp;eJ)c@K&eKzr7MIkjdew;5=|+tKPMq|J=Un%Et<~Ole%% zdd_-AaZn;c=$~AgOM!p$RmGBrTR}{uks9>-M0)ryV z-7TW|dyX@POZzhmtc3)a4V6&W0-r-)o+S>wggLu@Bv9vBN!G86z&P|gUJt_ChFjvK zk+}Ta%Y<>phL&5l8GmX2gW@s&;_j`R(wn&EHp81OTAOu%$rj)6Wn92o~ zP}Of*Jbhf;{&L9vLl5WQnW=b)VH!rw;_0d`hb(Jh&gSxlM+970AJWkxE_*wX(j z@JEpuQwy1Vec?l9{6+;GCavIIOs{D!jr*Dfm4Jq z*D)GAH^-lRupX|g!32XPk;@lWkKr*aR+IEI|A5C0OFUZHv+>beZ1T?8Rfeuel+B zuCIp9cI`;VWP$e7=vtardL@p87M{3X$vBpJjfUeYDKJBkF*D+-heZoVr@7#_N2=6S^IAbB!YSqdaLLe`R@~EXZUv!J4&=E zbsAC_ny-cvQ(`r#cdyg zF0ZHI7B(>*7pmDX86ry8uL2}08AOGz5)z6edzxGQIh!C*H)9|HZx6aB)Xi`2irMyk zt$6`!y^(**c8`^#tD7H}E7G?(0$kLNHW$L`5)$3yTW^Pao>pf4Er-hBo(LIxUkq&+ z4ubi2OSnZsW^mJzEk`C){VqY8mj`o;+$O~nCdHuFxJHYJ!@2Z5457={c3D~yYnViY z*Vqoz(a?Emq8QE0&}8!i8#;QIW!DySTI(Mbd4}@GPM}O>(>q6(Thri!3}XB4VcMuk zvyDtvzS4gd5#ilM2bhEeTsIp{^bS>yW&V40zA>0LmV(aYA6nOgYIVa)G))mCi)yp| z>!n+$fkC<^&4LDM5ACBk0J9t?&=LBCRF@e**Y)!EEpRx};Bhg7r%?w?u@+wFG_1rU@VTsW(Ue|*#50xIwX#RdZ!WcbF5)(Frmt@| zUy1{FjRA;TnsKmYwgAMDe}%QKo~A`RwTAx+ibIH_%qo$2>mVu783w3GG-Yo3ec`!- zknV4)wAeEJQr2*b2MjuE(dpJ2V=RruM5i+Fk>To`^QqGnhg@}@Un3}t0PQ%kov<>n z>%i^3i^QO5n<(~cdQw)aa-#f<7|u3>Lmq*R`v7L1?YkB) znekKf@KC>NPDs~xP)NKqMW4`M8oXO|+0XA1;)jOkV~Itm7ed(^z2FP%QP){(fI|wr7x}x=z9OwW(kIU|iuOR_2=^{(;~1 z1T8^?V6%|k`Z(QCf(>^sGb!?m-3hOY$i3$R;B4@_F0*d&@gs?Z0AZm#%%_6cfkmjr zEICw~P+}V#-fD;TVhF418@4ovY(W&=Q&3uj4kBHR;V1zI14Z?x>y*SM-2>9C`J~>HkR~5P0 zVHJi-)l8zz?mfM{_$NBy4Y>85$Xjhh#HSxUQ(ka-n9>yjf#e$f9C)Rt)m-eYa;3>+ zc}da!JTnG(cS>cy{w$X)Ke#E?b!`W{`Quz)FP%d}G>`qnAFpivtKd5XTF=FO} z4F+2|e|?C5mw}whM*L1Qq*KdZ7g?7NnBfF-2B_ux1@9`zfVMAxd^0PcH9eYR<1p65 z?e6#eiR1lskb$|3#c0T>zss@$4{n7M6MTxywFpaRO%rBGpv_TkZ-@L?=!-_s3*Eyt znTuK^M%(ztrC>61g4?;mzapok{MqE!D%OEl(MMs-(3g>}wzv|?p_h&#s6m!O;WJK2 zCVU4|36U1RGvu#uv-0?JXk*RWFobsDRsy)2;_5%Q@B@(w%XXUXm4dOo- zb0$Ua?MSPfulQFJv}=TRF^F6Zyk&&pC5X=2oF_@?>-gYm1+o-5sok zA%%~jFS5C!Dw%|0%*!aH6|rRxoaDQwoN1{=c71q@D|@~fws9Cw+u&4ET1wod*B5So z2)K_*Q_pJ!fP-%-mIlVwB-qGO<21y4`f$tY1#3+Q`!5NQ*p4LlC-Pa+A#_6DpgJ9d zoY~Ld`a#!jSh|h_CtA}g*{BGpyYmktZs?zrxK!mpUk%*SB;lbRFjFA|ZPPl`ol<>xS9xA!QTif+ z2m?7jr)e^s^~UxvLL#eG+1cY0p;J~SBup_YYUihv*QM5o)t}x-^ZO1_3@ObiqWWV! zA>?tv?_WkENN1R^CbJ^*9#%KFlD{a@tvV~S0F2K0br=^bXw>pXBn4LJqZ$pTC#{os!Z8jQ241b zy@;iC=yRlG==*|lR}aNI75;xel&b8o6Wj`N0B|unr-g>y=|nejGFempv=mDb$N~ z9CfiHvxwka`qyATMs$1$1__k0g=N~A+7o?beBEKVMFid)X(D+=2A=;CF@+GmUAyK% zuc5U&@{wwN8P8HPNf)4yN?OhIujb{DBS9SWZR{xy#mId=o9vh(@#8cDkCO2cqo$7$ zyw@_r2FxTPFaBOVy$9aff&AFRozd)J8os&Nrfc-a5Bn%d2>(=qkJ=BwVge{;R zUeNwr=L3MUJODT}#Kc5M3&0zC={NR-@g{TRK4UR!!BR*tzNDyLnVkW%q`L9L>X&&F zvsmQ;UC6QZ72_(HthSZt^oR}VOY0m z?Ysd1s1qQZ3^AF&+ii|7s7km}g1S5z^BUIN0-l zY1GXV$zUjQs>Sm%b4sDxn~`my1T$dDh-rQ*23W>N!y-h|I}&vgWx>!VGeLgI&a$6z z&pW$R5O$^yc8-xIvtr?e%skRG*wA*AV|tJxG-4`U&f<;WX#O#Ldl6F<_jEiC zTWP-a?$X+SNYqy~@|&O7%X6`UMV|T8vilvUanB<=r0%^$b1iAnW*4t{dp7{qegWGV z;k45B-{MX6VGv+PP(Bzr3 z+@ol-_%m;#U)dUON`T0;3G>T{eON@q@}7>IQ!4I0A&ol~_{!1iysL&=IHUVFV#he+1#CQ|#fc}dNQ(7|_{r(l%7>JYcqE&U#Rysj^TdY#=Gt&bEhx$m zG9*BROj_x!1oYfY0h{k6gK#ml0+LO67w={hocAoxe`ZEUy$K&5fZp24IV!gXZeuqaXLWLTIk>a3Fu?d!&kbDy(!fOzlD*HU6t}z$LL1 z>q2-wryP)dpq8+CEmdY8Agd*aR`i8BxY2}IrwH;ERV&jciTBb|qHD>a=*(g4+mc{8 z0GJ^EepqPpTdr1MMb*MCDbPzTX0)kj$}e7rqpS_R6tHt_=-qA?PZox1#d$l zaz`klS)2xN$lM{M-C|6WJcy-M^s8I{v+Pq{z2EyaR_W2ezoQcOrL52M zrWLN#Nkjm&!$*Y+mP4a|Hek?81~V-yV!@A7F5iHO13*IOKZhE{+Jz-+CWI}r_5X4X z*hD+bPbNe7h|6W|%l+T>(~m!Xt#W5eN#lA+v7%Kfu~zM4?+d4?OTQjE%pb!;)F~ z1dof7n7re`M3D^fJoGk1j~Ly_lHnuKI1iDEy~Or@Ht&AeCB&qK>vcpdk>tS#H}e-l zczyz-8}*pi5bh0Vu}_6#u)9C7^ND}^<50E~)yF_{Z5I1Qv05`eqpA%=%%6kAitmN9 zTu$fU78AKh$$VR!1frLaO1V&jZe0XF9k-sa!5x)bl5Jhs()bBf|H#bxfo<_=?H2e8 z+}<*yy8uu1lh2i1x(2SvAL+{!DpPdr$Os|dS;&xxeCBF#%{|e) zbn$Zkn}?+#xwt4oh_Oe&Kft3%Gz^X>MKn%gjU}}B{HwL{kN2SP&WC@gLp)^^W;!Wg0!le4ifEL!*76(Dx~Lh9l7`q9~L;v2vGesn1wdxJhGzhNbZeWx)z@> zW8VPxcI!MV;{Tn$q`{GF6QiqDQU-1rfdiR)%+$FNH-0MaY5Th7=Cfd8u+?wVZM(8OO)o2dguFQtysjMd0H;?~Ui^W0Az5HM{$&EPfBt(K1P54&3SWuU#$<7lGe9%|7tBUJ(;8Pv}2yul1>13Y~ZzZ<-tbZ>YJOv%ApqNI}hJ6;g$pw17dY&x;Y5*hv1?{qkF5{$?VzZLGY`nVG__z8ZObCP^)e%VxrPJLvuJ{@+SxM^Z}R z!~)>-Zur>f)-rHYq-o{9^wf&ecesT~x;^voJl6*5VFd@x|EQvS9~3IpC`6v7cB20H zM!fi@ru;0WW5Kg6vE%UfrxwDRpaaXQMbT-p<6RG-ohNzP42_T{-X^)?RA2T+y0JlB zQp>FdH4Z#9hYdjZDavV>si|*S4}lGCVG#40Zkr515N$T^`YAaAlV$1sUTR`zTOcJL zEAT+r{xXX|f~%@qp^Y4YMMC8YFIV)|+x6NjhOAx;2wnkC8^<>G5f!+6o!bz5Vi5|_ zSIT?2a<-n~Ug0hu+V}mA{}ARUj!qf(!7U;p1R5f;UetZ`ic2tpy(~9fh%Aaq%b6cm z(@GV?%zf9_9a~8{i!hai*L&%k5dra6$Zh&tADUmZkKwaFcReL6ld-zXW^8|Yv7V2l zy7(EE71JfDP}BOA)L%-mQss@CZx0{P7W?5AGcixavVSk`lo1VhXspeLioIy}TeOD_ zv)Nmxc}IqejxRR=@a8x^hFF0X|3xiu%=J^o_xYMzxX}UIOOdGD0s-eGX6j4YYNn+* zNrX1E(ENzH`YbXL{H!DdG9r;*lC`Eg9@+{2^&8yajMksDQ6qoe8(L1Xe?H`Fpa1ve z>6^1vd%qk;m_%3p5)?R-2LRP%R)K|AzZ;+zuTW**|Hkj3D6yZ2VOG?v7^Kt8{P8kB ztha68;~V5WNPVd6U%S`b7hd%Xe}m1&$LO^%1KIiDalHoTuRrL8k<~OablRvE&&Ri2 zcJN%+>11Tz0uQ8WDa)gdA4N-+r;Ro$40&|SBCVA|Z8z5^Gx8!l+nSCzQ(tFV&jdQM zQK7xzURR^Hs_Kjsf)W>tANjBrZ>^nDMjiK&oe1aj|5N+u+aNtW-#^gx2rOot};v2m7o0waXahfxm zEkB{)bZ(fJO-s)9kTD3?-hfY5+`_0_p(+Vd5vu9P{HCs4qu6;P- z;nc)$d&;}FX-`B+(v*m7+8H0S7I;gn&vd@Q{N> zH0WM1_JghQ(IkV;N!~5wWa?&2FQut3aeWT9maBA_UrYmlAyqEH=Ux#wQpJALmnu1` zfI)gv96Qxj({`T}=KqkBZ1g3l6I<>PTZ8-InD5V$Q8kTi5Iim>Vsej}b)Qkjww98b z2qH!iwRD7`PaGOv6f=1Nt!{1;<&Qq4tFAhpe6`nI-8d=W)v!cVx^y{z~T zvVqNb7~!Ur%ql)Xg+Y)p*--FxOE{gRKK-?OYIS^1khMHUQX5Ps1#YPj{pjg1G-K zOyrjtnLIs52xm}Ll#pDVv_uSDfi?ce7ns~5FBsk+R;C1|L>?dYM}K+|=Xb@=zuf^{ zpFQl2zYAz^;!U)V|Bf8(HT_Ta_PVX&`>0d1-tYOpRqbEUlhV>Tm5PO0S<>N_9hgq; z!N6-4Z**&}k1L~&+@o|sTEwX4AUDMfZ1biO+LE(x!dSH;jQUyuoBzvTg^}s60U_VY zQ`krw+K_M_AT_Tw^PQ?)Ep;SlA(>6Y|(coou<5WzOh^W>66_FGn&dH>QC*3i*)qM1m_IS$;*F^?43z%3|| zgBh-=fjrH+ZTPCgQflXoU1dw`@8^l72EQekQdF{s7qfZ5)aE*G)0%VZ5<@(MsE(kE zWi!8@fs$Awhs~zyv-Vtl11H)SbfXO^l=C*kIi;crlAAP(ChgHjgTlQme$V{e0W!E1 z4|aU@WK4##Q~E&UhwZ;ca}4I_Q75Ein;9I}{dD@hNnv@g(ba$-oil13cAT^w2DaU` zM@qspI4JLuSe&tUE8oHvlM!#R9H8jsYW9BnHB2rb8Kec+xY08SW!pEp{hodsv(;4z zw{pOW9sAE&Ke_nspb8|Hr{xujO}?_X3(-t+OTWFkt4c8hXtuSC74()c$~KNhWS3XO zt6Pis-Y}qdzmyiMl=`cfEPxxT@GZ{wxpwZg0>x{jknnZI&kfQL{$}VO#{>|TbCbbL z!hdz<6+{_`%Z3yK4rGacOpSWRNu+F~jKa}%wQ4B5%S9)Cr`VDRV6;N^SA&y@jb*Np zWFpEMXTvQEwYz_jk>9he;m$uOy1mPt^C{S>>@&y52y#~-%>04;^<-Y6>r5$%rOk2o z!MD5azi~<{xSMm`7-@_bMMlUKnIojE)WGDdm)lnHM5evGrDA~C(a26G<9EglPfDe% z;Cx$09Kg3{Eelloeob{`o=}wuOP>Ffvuo^U)~nR}N6J|1lOtXR>O{5rsyx@3DJhC- z(2<9Shz%ON4F|yFggwTH8qSbg^rX5n_P2a2t_LY?WL2x3qS<$!KM~#(%LcJbv3$Dy zerzbY86Ap(W^(U3NT$M*^n3;m#YBkb;)3y3IsRV4#UK0U_oV^*&+374v#=^|W}C@z zpLHA$3@6XCJqvElfg344TnY6vO{tMSj|y?jZNhgBKUz&DQ|xH`sxyi7iXqZ6uFi7V zZ{i+phwZTP6;OvW>gW3%z+9;3O7^|cJe0rDl{2*SmbuyjrWq5rzyibU;>>$ksdXcocqXms&5c@N_I6*%EP zh5S5c%ax7k>+?@3$BrXQqcXgM7^Tr?l$r#OV=WZ?sW9ZM!Z0RX(|?f>(iP`w3Kr7A4aoTYUG(|*-!;HKBnj6IE5-{nxdA>RdNJ<-8@s($d{GON_`{lffLwpj**vG6 zhY0Icd(uT1Jf=7oP8?+B-PaOaKXzykwY2_Zf0Z!r#86bj{yAg!2d~AJLUtq%TWE+d z6JZ2K@0M$|80z&a;e-F?^$N+A3_pyIN+57H-=7}&=OiR(EY&xCk;59rrtioNTg%hb zCVqJEb61=kxriK_*E>5qCDrmrcJ*_*8&yNt%vO+OLos~F^o^0$)I^jqdXFcQ`5BSU zDDV{W^19wA+g)rYEsUjtHU{2?S(Fdke#uxcO*mq1z+2Q{$zXDr zjh!(h=HK^G(Zi6xn5ky}jz}kOfflL|t`50xD)%lpBFn(jeO&S5qB}v#?*row(cU;4 zlEhUB%lpG}dHi@ei1ELc?NDpFyC`9JTpvk3QD^F54ycA;w-T0`8Mg8!MreYf@MT1J z`udP#t*T>l)(-Oouda$}eqT2~cmOlst~X#@Za0k)>11ow2qN>$AE-NL;`j)o2PL`= zU6~U?@zU5n`wZ~Omw!Jo{L{OJtG27Mq6N1+h-Q`(isd1o@rD$AU0X>Xgcrk*h#%+~ zN$TOjQwR_VEraT>8r^BD7G_CPN`KNtUsk23?};S7uKb`IIzHj4*E&fv&pWzYH~4}e zN%6>JGECInuW|jp*l~$}{anQ9(e%D{^3&OWwIZR6?1<0%p*g6;yQXxJue5PxzEa70 zfG5U^l2t-NX&DN?V)}2%$gHSfR7jwQ|Hwo@kP{dyDk@4eQPf}(Q~Wqc5@l?dLHTv? zSBrQ2p2w(xHI1ER6mL>c98@G#W#MsY6D4RFBlU12^1*NLeSY`<(E_)up6@4Cc)kx8 L^nYah@5lZ>R5m}H literal 0 HcmV?d00001 diff --git a/samples/8230_00000.mp3 b/samples/8230_00000.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..b7c5620095db916a11b06864b3bcfc3df3611f21 GIT binary patch literal 16101 zcmb8$Wn2{9`!Mh&g{5Ja?(PODX^`#)DQN_0X_oE|>FyGcM!Gu$B$SenRw+SX9@gLI z|GdBF1+}81_XJBx2Y-(oy z^YY5tx6Pg1gTvFa-xoKx_m6P=>CL@O@|NZ&t5x5W< z0)b(K_W8JBAQ024B10Q21S!_**%r*=XLEqxtR7jH7qkR6$t61{O2?j3N?_-b0?||y(nLW zCPJ*Af811VFHfSIY1e9fC6y07vRGe;zH4^5x6!4%XAg2)bbeXOjSIqw;u$Y4-j|s2 z`E;}fw{#ggwai%(`#=>rC2bSR+T2;nx0{nW2LgXC{LarWyy~u7&fc{J-OdgIfLZ>} z{TKTZf0vJxUD5Vgn|81Wa8P3@6&ha(TGsR~ps_iF+^~@XvRaV|^>nlUC>>gg+q@Fi zf}EEuTfi*}a+2S6ZL>`xGXj8s8GLoO_oZCxi`D1jW*g3+gi}+Ec(xf@%LH;{Halx z_f?UHxl>5s76wrY*Z^~Kg#3V%hq24nf2Q9C`4_5A+N$@Ft%R|tM;cJZX_z+NdaJS+XeuTl=G|TnW z615^d2>aM|kJB>if9=Jh)Ymh(Fg7z0p)kkUuW-rS_N0YHRMkqZETM+3cs zr4X19h#D7@q2h{;S>T@qML?Yq52Os!iotcl0dL(P_;ldQ7{CIWz;GWu zIXc1Ybc@0X^3U4JA8?Bp(s6xcI;b}BV`GFra~kEGfV0R$tVbQH&Y5(BS05gtJ(U1s zyNLbGpzP}lTwmR8oF23u3zJmjEQwVc7k0A69JyA>tNcauCuql}m%`NGX z8hc)LFoF04S^Tq1xJ6G;6)~RglT`U`sQU5Cbs2`+w7)huyZho5Owtq=s|gdLN6v5Z z>T8?$?XCM_m;oN$FAidXUPSCbsa%o5Lt-h7FjjAr4trxkKI5QcledI3aWofl+J<1t zMu~c^4e4=USiYA_Vh(QcLrJe21ifEU^i8O~Evw!e726LoRHdXV6r@u+=!hgrCPr)+ zLD&LqgIQe%OVz~bK=TU_{(eYH?dFx|@#R@IEQtBf?FPtan_-Y*DIxZV#j=q_)v{>v6N$Cb6e7KWrQ>6kGiU$ z2L)R<-j7~_WfRM8+e-C9b1_%}f=|tJcp@DpMMU4OsjvX6PnKK`X0WZ46yZiqbC=RQ zIXNap05A7VeKTqp0VA4L4#2G-$l=;lkFV@Z*eD_;d((`LMM{F=RROuB1}&9lK`n9X z-Cab25EHd(U;LRZaJ6WOT8%tvJ^RDbcqSDq1*{`C9?vdU)JH;7eF8M*$%TwvsOdmB zHH4sq1j&fZgof|M{N5W-&;5m4F_4DG=|GDJ=Lj^6lsaXcT9M*Cj^+xTb%M{1dt3{@ zH|IfPukBmZ1d-?>(y<48qlDRI!_KsGup z2dWiXWSx7|u$oZZx#2fjIa#h?xRpWhXQC7`AkY4V(DSHMHpmuY6t^X~&?^8I>T^ zgoYR_xU%T=%~1U7R0z)F-&V4tI&?j_RR+PWZ_@LLfb=Bb<5MfP%tt}9(zrruX~gN8 zU&d$3w`X22Z8|^+F$Z+`2fC;x}HSGOh|^P$|mGG_L;nSOIB{8oM+rM}EU zH$)!7eP2uT7xB8mV=B;36W`}{mH5DpSgu8{;yx{YPIG&I@u5`cc=*E2?pTuVca0X= z?abV2hLWSr!^rK-Il3qRb2;I_Q$~XW)!sq4)d%50t)^3y1bqVb-qc&ia~kTFl>tC6 zQ1=$-0($YOXmJn+cG<83b-1NUCSr1j3Cx2Z%VZQmOl^qZ*SK+hM%)*^Yf5qh>wAdp z^Fs1fjBOI+bp$Q4hlMF4*^(J-{vOGT>{s}3Yld-qf5I%Y4>3y4{6vBG>nzYsvM-oN zCDt`M{KLW=S!|DS_1P%N(Bbh@e*kd4Pba3hAw&`-=+9bYAGj1lqZMM-FbeH55mvPH zk9e2&Qs{Q@tQm_a*Z;e%hP`+w3x%WQ!Q)_h}L z-q7yyc(Xx0I_l_NZyd2`7LF9oim-cxGxBF{YB0sO7w!VUrGJ@f79vXM4Tv=`Ex_+> zBVHxXpXoBo^E)SlB{rT-uURvWOS2!thMG8ja4VBshVom!<7`Y~KuM59*QMIBmh@@( zjEiVI*ULSkpdUC=nM@mBxJeVak&%440|D1U?SqRV8=|n ztk!xhNd}0? zBz>tF18aJl(qBw_uAJ!`CfxiskZ5XS*x{<&q}a8OjgwH2pIjOc+>Q>HTrf#lhR5YZa%a??(XahHM}6DL6d|_0 zhiU+%RjshpBOm+a9kO6iE?|tWMSS z=XK}QcW^mFP^6jLL+!)$GRYXmb&hoJRGoCMfTPBd#%JrXe?L|{Z}SvcL>Xfz2eTp~ z$}mQ&5?wAKB@=5<+q@Q{DUjP2WzEz-!C&}*AR zOJUVfR<_m$EaqhDZ#Bb$?mj}1s9SU}yfAm$UsIce9iy2tp@qG$Iqpevf}i1J!{b+e z?tgG2cN#UC9!5GX{QT81eBy)rTq_lHI+xo-3y5?4#08Hv9DaEt}xCni<qTxfyRxTjrMvG7Gpx-VJqHv;Klx>WtObb?J%b z2s+*^Mm<6TRNIYP8BAG(nD~RLD_!i>5{I*ngs$cv$e-l~*+L|F21FFX+K+3PIZil?0k;Sszr6B# z$B9AgMRHvU>#Z^ZH?*RS;V!(Td_<{f&t>h0U)7#SOtg%j{1PB;-Gu2ru z%D=W3%4~Kebt{rRH=4&fF;rFvK-aB1iNRXPHI_E-4WcV`h8vX?#r-y}x_%z>8_we(Hwwx-wc*e6@g~<&EMon~@aGTr01!0)U4fS{u_DJe5yuwz2H@+0hjJ5c*oFxSMc{FP zp}hD5x+BaW=6+skKi2_z$-0dSQ&7u^zQ{0+wij~}atBE(pCbC&T zjQn=>HtNT~@m1rnN(5q2dHIK-8c{*U?4wCS!g^1C7Ggy?YmD@ruS}6F`(7Bq>{LQ< z3j$erY^3y+SAlTbC5LCeKyIMm!UWGTLWGJaCXMd`n)E{xq$@G!>HR{Rp(3=X6{U`j zy{uksf)i>X^=?{i?1Z!Imu-K$!6f}ch_VOk<~*I*O8hNS!v8W*xCIOZ5A6gC-e>l{PP)ZUCdxP`q`>c zE-u_+hcy0f74!+~D@Vgh;%@hyKkNK)OOk|HZYx4&+THfKye^}h04qVHw28U*X)LYl zuzbZyIf+_sfbToChCeh;z}ghAJZ`0`5mo1tua}eJ@#WkclYI7% zqMd+RodRw8YEv`ykAbHJ_d`6hO)AH^=D*65`RCdmWgf-d z)}1@Y=~AdZRlzMVl=yMMc!U^nOn*9yE|MWTcBM=X(Q+q(B(x}p2_?^(mVIf>DhZxaYDt6^u7Dw3TK+t_ivbS&51OA-Xni! zUHK3e|D0#WdL&zGs{w8alepsNj*Xad&m=LS$qM+>zsU7M0BMm;4;FL0V|QS}LZ)*$ z=$Z_%Q4D)aR8;%=hUWL{IVp+zoV|*uoX0^~hS+b=v;;Ia{AJQL@m%g5fzQ3c8X4g{ z4Hy{}kW4=61#JWy{qHD~aEq4+kF;`ZW+O}!+$C;-SR>moAV$c#uW9R+BPAJ9Ye_Ld zLe5Wf-jd<+2c$+X6ZDwnoBj+`-VS1ZZ}IjMWwG}{6{8HLe<`T76807U_!wpNk-hS@ zNNJWMRb;B+uJm6WrA^(@rhnrNekdOAuqnm@$t*{Nu2N}3)_e>h%)Q3gKo}RN={RVO zrG~b)o8itDfLdvh-jJ`u_)d{?RquAAy(?%WOr(W1*%|4FVOGACvk|DYC{vmWRR31QkW$H4fKEj2W}}5{6%dp^#u#Zjs-d;2ItLyT{VOEtc?vt>_zn( zTjqg%iAOWwsk1r(fX~2#|9Oxcw`+RbabsuMjGiXP3Zo?oX4Ig)Dv3A+4zXEhn#J?l zDgrh`WG$}_zooXfkL$)xBj{%vt8mMZKwG4FfH_s{<~`fm^ownSKQH)TZJHO#<6Z*# z>Ab^J*@s7UUdGl8-{N!EuWxOQ`b9grUS6tueNhM6p56ms7Ia&Es=big7qw%Vrs;L+ z*=lGtC*m=sjNFHb6gx#`+VT0jiwim2a)9u0?weSIeME_m&odZA#!c|(PZygH0_aWp z6KIiCDOs&}+d!E`%vMt|R#avsW{-CTz+wyjpT^A-Ez=%1VOr^yqpM>+-VQN$tgS?x zed@4zU0t&;OZNJ!#Uxp}ia46Xj_o#GaLb#3d1KAw^+?3)V#l7kYM&2lWDoTys^+Yo zS{{wsvJ!_G*cHIrF9sODMI9&rm?SdlQGSTd;f1CTbF8VRpX{W&>^?`PB(ec2xQGKn z8%le%(14pJ%T8gm9KKvw3HkAJerKDP|6iUhhCtJ;@*Zgp-}&)Gz>#;%V!N2hA)Pjy z(^aL8x2CNA)T#4SZVR1K<%+dh!8!#AGBclMQjsAkuXWw8G0pv4vzXWT{vU-Q{pF?hBxLs+`6n%cPF7m?9cgBS@RD`v-T5bz*^rwT(S zRVYnCswO~F*~}YR-b%!B-KZLljQz1&COtuZJ&}x zM2Vr@1$L=*7$%%*k-wJqVVhW&wjp_I1j77E`0IcV+zN(*MY{8Sh9+KRZ+dA9C<2!= z6c`_X4#-7I#uZT*D~*YQJiSi@>oM@q#vv`q0i&PnGG+jo&KW)5)p^_`yRy6DdEZB{ zm>NIU@`f_%#YZzt%;r4wmoK=nLtti?qdxold1JX>o#9q2q*{7B-+O3c(^*4zMo-+l zOFtU81tPmXZ`Nx5Ug?KHcj^ge&^E`{eCdT#ItbV{_OXCMc>}WDQ zLSlha8rIgOu|?!5;s#iW!*#gY_AnwcWE69GO=H0HRq+*-=k72(u1p9%%K;c z9!W{-;i|L$R~8EI^kH9euE^~(-w6(Jb}7UJb3*nCER^gK78@(+9ZEJf1v@b+X z+8>wy3LA1fzEywzHt72NzfEFW=OIPq6>suO;Auzr)eOn z$J+(^`~WL8TRj;W#Rvc+=EJV70)Y@esTmp0`7RO-GwL6)79W#Uwr{Vl72g4qdU`1@ zt$Ts#$2w2-Jx!^UP2kI}KfOTML?vaT6*<<~4`V^>JTYF^bFt~?quzIE@jn4fTK9O589 z-s6Wnacpp><>34@8T=SEEq3dM-Hb>@_Q}9f4O6b=Z)MVwJ^r6*bTmn;@j{sRc)M%$4L zCpI7*REKC68X@b`$*FI^g-j@h=cimJ67uSRo&vMNDMLPUT9P4-rFM}N+%SBOtE2-E zGb!9UAao^+Pw|NZIW}BV7d@p2`uizS(1{XE)q^_lelVV@sqIw%*>oLPJD)LTGdz9s z{@ppfPW8sbceYnnJ{%o4*W|_oPIM3qLXqjcoT;^ztZ#YWQOi#bZu(d z1#L6`#`7nn{V5A(>fDGug9Fm3Wf>k(9zVCac}IX>)`+Js0H7RJYSU5s#UDZ*&B~}+ zuAWy@zFBe(l}@x0KISYnU=In|kEBfx+2fa_RopW@tIJp$Wq)=wI#EGz@r%Ea|J(Ll z!%})L&<>Ali;>sz&Meacak=gEOH}Wp1h8$ePso2NwFw+V!lcWj9Vk7<_u3G`s-2j( zLb?8mf_%d?V%D*u=nuDY? z0(f}15pgDlTjeARv}Y1il8D;4lCow6n{DB@GkHXWlB1=su9R#}mTye|JM#;-6$ib@3lcf&^{PNT>a%0d)K zi;gMoS2a%)H+OoK}dF8g!7~RyJu#^`3x{ zB&}$W6Y{|*&uFlo#*uIfif{TY&{7`%4?hI88jm(-VA2xvQzvgF)>5=lc3Xm;_55m# z11YOR^XJu9X8z+&S3Jgdh61ilvnUK*935Tj_s)R$bEgX+rQWRd{;MKHk<=xL#yspZ zakE|0uG!^}I?UJJNi_fZjVgj<(;;zgIaVs@P)r$hA$?eKf9I54@SJE^>7sWWO{O!- z*9<-ZmEwS}Q|6-xQGMiIQgm*^pIqa&^%+n{OApKAzr7*vzju!l*7#X{UL2FBM;;mD z$W_~7_gO1X-;+-3N1|M@*>iYY9gxEj7>@xr0;N^(_V%mO&g-i8tyTVCeNs@viaxuU zY)gJ#_3A!aJNGO10A+GLZTh$XeGY zg;)YO-2M)5lqcqGT)ZZ`D{$j<$3=z5mB0WVh(bv81`)e`5%HT~1+Sx6^<}cDMOR|x z$#=M!`5Z)x3~_aw3-GixK+Tk?#dyC55^8zujg-T;0bKDGQzrL0Rb%|hc) z@84F0&;~2$yMBFp1s&Ofe3VM$6-LK*aLWK9*0=;ZvW zYpxi@()xx)yj7dI6rwBn8rwvwP|F15^2BL9i@U;FVa0l{M#TsHj$Nm2C_xW2T zAD=mHK{`}UdZy>5`W|=VdrBi?Q4^ctDr2|6-QM2qmkA4U*Eq`3^)HNcP6q?nx-zZ1 zDT?$kp>)N$@Xs3>l6y45R`)h(hKiD%XSvsD@VML|N3`Y;UwMdoHEslRL5g#H>ZfFU zZqnjWly=F~YsN zJG-c4{6s}RsimcFGP?u5o%gPwW-Os_FJwTS^Tb4qfW&WDK=n%qH zOBA0Ge<-&$xHR~HEBxZ9SohmUKOGw_QrR3mwZ^pCf4SutD2u!c$R`C9ol7TBo81>G zUA*JoTvL=CDUY7*_@L&2?9WRDO(I6|#ux~w95qP_qi!sPhB;EB?_1jZp5E-JU|~03 z0p0f;6+2B#l}2Ex=7!MnlpCS2F10vf6B4Tzd;f4TS^oe(hiQ<0*7#Ol6>}F>5I%zX zQ{!}i3GsAMGGbv2b%mf*M(GU(OkLV(Z+q&p;8_*E=S>JFXo1DGBrH1i2l~&`6fiNe zfU4&+F6YVPI#uc zN!fRaU^QReWNpkpkob+;+Nd?%q(AgXD%R66`d zgqodRjFub;`6WWyPemIp{XhZw&UADNo|UA^3tuWyAq{jXXNC8*@VL4l)vR`6PwTLV zM(#2=@BMo;%q3N13*WW-Fyh zc4D|6dt~g(4y47r)Yf9A&fmrrK&YlXGqo5Z=YUWQo06qX%C*r-*5Sk3M)@5S_Z=-7dpxCT)g#Z6C2_~o)9t-L zXr^sU(vC1AtURv8#LsG;Q1B0z*pbZb^rIu9KwWoBi;RVVkG;7{t;FJ=&3`q4b__?P zmAcv9Aa`;VO*X%^v?=1BbQ?bzB1Mn=c>Lb-$Kv=r4LBTfz#THmZW8xA)c&1ejVyTP zM^x_sMMM+yQvB2Yp;V&q3H($eZG%S_3Dd;o`4!hlU3y>u;|1MB+0wo3xnT3ZxgHnw zBxUYoq5&x7w6`E&@;nsTsN&vFRA@S0prnol>nX3UWciFH&D3RSG|qel4~gsP+-L$4 zew$wCmY4+nVueV(wI)NHKL77{X$bCPn9mi5SMT5NHoh5};1AO`$aqDfhur#VJwoBTN=lsb6x@Q4 zkf4r|PcGEjjUlN-g)z`*44W%s?#Rh2G0Th_9Z4Rb4`yM@CG8k_vLp=-2-Yl~*v42Xt<LgH)9|V| z>s8iRuk#v?cv4=%jovYg`#KQs_5QBUrWa5LF4KCS&g(1xdgCwK%^J4l@0c8x0H}ec@IJh4U+DnTh?!zuaDkJbfmurtB$(LpywJ&N)GP3{fBX;YW z`L${0y)}(-IqXv4h3v50g&t7kxn$k7+6g?Y;+s9t(e0xuEyo^%xHO$pz!?TDOrQY0 z8fha&g>$$?2&qP0(w!AT3YO59A4j6(_NPf;z!wsWsbocvHI$N8D<;S;dHIf04i8IL zQ)+5N;lwhj?w`d_%zlZOAG3b?`^7jNGEg=JpaY@w+@w6g552GK!U`>g+%jgvYW?Es) z+c2VAv4;Y)7fSpqNwd<3!K5aABzYlK^?MYu+P>WwSSwWFi>5!-UY@MJ(aP)NFiJ*y z@fJsa{V!DTV>;a8glOsw`-jd6)HY#5P(XGoB>Ll2zk-0qRhTUoUN2YbPCR9495PXP zXldXPBDky%=X9c4?5%NkD4FORMcyy6lGj)`k}8U{vkmA32%Pa~c|Z8Sup$nWhbdlJ zYkacl@qCSdKqdZfO-dB98dIt_AYaSh_FG~gNcP}ECT_{rq8!5u)ZQz(u(}5*CiNQz z%s2B0qXFA;>UgSO3gz`VCOKWhk`-z-xO#)=7&Jg_-aP%FrLPxJbRTGCL7L_;2&S~Y z#5ZuqrU&c(;}%3*;otX5o}kgK5B2Leg_q3NE=L2G6|OIz^vW1chNI?V;g zped)j<~2_mIV5uAa*#XV=qDijUbi4&!HZK$G06c2>$sANwske4*~l*5%njV`q)eXVxe)Y*|4Ot z(JA_#cWDAtbyUpv-1x|-kg#ZDn`K!K1C}MPx^{6)g^7vJJG}k+u|a=hG4r1CT)>hM zE+GR5qd?ahu^|68N%=%NLji7CLF|5aTTxOXi)a-B3Ul*RC+7HPv%m8t=Wb#L&a`B3 zs<>CzZ8>7@cVryop4n+oI_n)zX0Odk7XZ8x+zT^oEpCN72<=UD4g(?D@%gvvLRe3oL<}1mYXiRy_yv6^d zd-=9@{e6*NZ?SQCae)2_@9gb91X<8S!{4v>4v(S_GzP)~H!le0z_8zd3Q^4zt)PQH zO>~qavHGzvgVE&Kc|X=HT20#$!L0yD$I*DcAF7+|=r`th<%xN$yKWrYLDF6j=0jLP zV7Ey8sBfyKiB#j78|&`j9{Fmgt|zNLJ@pnL@(jPbcXeZ?$Aly`(`_D6tz#x_X-B1} zA$pI4iOIsQO~zNo%sOL)`HKdUS8w1}G-Td4Q~dK#+OSjOB-U>mrzw*4Zfn+8(gdhE zf|^zh?=&SIfC*M4v}R3|)CD_fvp02VEKAy@1*}2;Y68x&D}@iAggTqUMMOE@LWMQA0_|g=j$LvoJs5p@?&BODCo+p(OJG}-zx|L z7WLl(?9De>+HVZ+BxnORkRZH5(q5Y8ARG%Gif*x!@^{FXeC-{o&ylOk<@fiK_nPIb zCdRyCDR$q1nV`h#m9tl*r)p<>A0JGM?IH?%l;Lr8Jk|F$Qu-lVzuI3v7}S>+{R;UN z@uwa8H&}3pyP#O@WAd+SgX)>e*KtK}UN-cYdDrVT*Z#qBHY$1b97Bn!HA8P;XJlMj;s)Q0>_^X5nXJJk@EE-d(0SygeW6*ic;@#7HtsijsN!+Au5Oh9A^#LNOZD&i<;=LCc4U_5jg;?= z&Az$(d`iG3Uo&|Z{Ow@mBJyr?x$W__BP)(DRo+IB%=yDXspR#S1Ax37-V=FiV0CwD*yoW%6r28;z-T7O&$Ib0EwK7~6C-ymI_%Rx8Ry(qvlo+cWO>zdBeU!tsGm z;)CGk&i*Me++X^se41B4&dV=OUhm|1PzFnpYF`6S+m)&j`z32INa(EDhUJsY&nW^I z;zVm@yv412=`4-8*Gly5^c}qMC*MrT5e2F>bQvW$x^rFw&rrC?Ekd$2HL>Awi4p|U zwr}`Elp8Lrz8?Lc_TjRxP&B3YG1};5oXe&iBKFMBo7cNN`c_1^$|6c#(HY>goEDlD z)BUXXy#;D&GXsH)!$t2-Y_s&09;8F4W#^u1SkHSGChvaJ@WRk=Lk)Wv$SckgYeBLJ00^+fiJSMSGP3UKxFhN9YHvmsh{u*bLim@r z(T9@CXF>*1?T#~6Lkp=m6q0#-u&T0@-|NBt`X;h7w>;i1rQ6YAI%0JA4jS`u#g%R` z=pT9#FHlt(|f^l?g7izh)Nwi3#N)6w`seLTFvLL+nLg##AHz zS1)P-W$}vdeYz)0jS$KdodiYZ6vvj48dJd6A4k-TEWG5zu{;=;eObgD9L(-B{&or@ zW!o+K*RIcm*_E6FvOCs@AAsZM44C6?{Ugf1cb;zQ!6%Unq?ev!8geNliXGq< z$a$KlFg`8A{$gUCnOG_^cJibq^&fyqI~ku_I0_$ntC1XiD&BWTS8+Nytf84t)Vqeb z$3idIDBu<+q~CWSV4N8=|5&cYCDXn|j!RBoauX2Q8|BIzZzE$;&%j!&f`X%rmgcQQ zhSDaIM+|LkRqP2{KBINjtt!p`UZ|wUI1^GQv-?3=i?(UM12Zy?lHJ;KH?Nc_?k1?m zS?zBJng#6N_X}cvUCurrKg5lQJPLSFWeRP-1s;YsIgybOP_!wlM66A5HOIc8p+edD zEa(G@n7>EGWl2%KfY$jY+BFHNlg9~^e$mSvrymxUrRxhF8nx3seXI@R_a?^Vwo^*( z1+FoQ5Sp6!;MPlg3x`6GkNoAN6l*{BgWySa6xc1C4KdhIEts~ABc5HjDlkF1hQcTz zOv~1?Ordt%g9Dpn4mjz}@d+hQ-27{3IDe(O>g(>Cq{ZroRc20p_^lK+^cI+yNw4{= z*&(U&U8+c1P7ZFFKoAL^?u7{xq0Ok%VYW=hwS(=3RPC!)S7gyTT2xN0W?6EVRM#yK zYUV^f-A313T*ZyI?a6>3aVgTJ!ov1W!xYuHW|=w_Vx+62&CgVgl0|C+OLmT;Rqh!` zR(@lSi?*nc!G-s4jl&VrXjEyc7*UTWBLtQaOCJlaAc&>ZF-MlECMejqJx|qRi3&C` zsImQQ#}E1p;X#SS#K$rrUcUJ9&RFfZX;y8eJOrw5WH0HT&MdmeNS0X)vuSVX9s`S-hgorKPO&OV2YZ50i8J*$ECo%;6!H-V`B$1# zrS*vo-v8ml?s3Y&9li7rSxu7wVtbxbc}@Ae4z3omsG*pSG zWi(OiQ^Wii)2J2`JrbEQNt>`5v+iOMMx+l`S(Hjb$BV*#FT@^I;4hK1k zTbIUy2KH&yMTb(Sk)b~C`b;^jI>%57ZnffS=$S*jN9ZTq;!B{p_9K|CF(J#i(i$`I zU0#>rRid{xMvOe&5R%eV@UX^e?~h8FCw<0rs9+foA=}%>VQ%m}S}r^FX(X zg6U>i2j}H}rfGl4B z&!}&oqSPyr9>G|qz35)ZCO}45Sn(*`^2^Wz9@hqrfw(!uA_CbACFcCQOwd<54K0ng zKQ%NP8iC61X236hRyHIHIoTDSxsV<+6as3Z;wkyb?Z>DWQ%IPrqIl+@##k-cc$PIa zd}L!lW%J6BES5)RF@thT8B8JKO@uWC&%!1M;tcy$ofpYh?-GeyKLY z&^l#br7(Ea+PpA0G-P0)|K7krH(67yOhrmnjt_q8w(fu2;MOlf$!u9{KU9?ec. +``` diff --git a/samples/VCTK.txt b/samples/VCTK.txt new file mode 100644 index 0000000..b51455a --- /dev/null +++ b/samples/VCTK.txt @@ -0,0 +1,94 @@ +--------------------------------------------------------------------- + CSTR VCTK Corpus + English Multi-speaker Corpus for CSTR Voice Cloning Toolkit + + (Version 0.92) + RELEASE September 2019 + The Centre for Speech Technology Research + University of Edinburgh + Copyright (c) 2019 + + Junichi Yamagishi + jyamagis@inf.ed.ac.uk +--------------------------------------------------------------------- + +Overview + +This CSTR VCTK Corpus includes speech data uttered by 110 English +speakers with various accents. Each speaker reads out about 400 +sentences, which were selected from a newspaper, the rainbow passage +and an elicitation paragraph used for the speech accent archive. + +The newspaper texts were taken from Herald Glasgow, with permission +from Herald & Times Group. Each speaker has a different set of the +newspaper texts selected based a greedy algorithm that increases the +contextual and phonetic coverage. The details of the text selection +algorithms are described in the following paper: + +C. Veaux, J. Yamagishi and S. King, +"The voice bank corpus: Design, collection and data analysis of +a large regional accent speech database," +https://doi.org/10.1109/ICSDA.2013.6709856 + +The rainbow passage and elicitation paragraph are the same for all +speakers. The rainbow passage can be found at International Dialects +of English Archive: +(http://web.ku.edu/~idea/readings/rainbow.htm). The elicitation +paragraph is identical to the one used for the speech accent archive +(http://accent.gmu.edu). The details of the the speech accent archive +can be found at +http://www.ualberta.ca/~aacl2009/PDFs/WeinbergerKunath2009AACL.pdf + +All speech data was recorded using an identical recording setup: an +omni-directional microphone (DPA 4035) and a small diaphragm condenser +microphone with very wide bandwidth (Sennheiser MKH 800), 96kHz +sampling frequency at 24 bits and in a hemi-anechoic chamber of +the University of Edinburgh. (However, two speakers, p280 and p315 +had technical issues of the audio recordings using MKH 800). +All recordings were converted into 16 bits, were downsampled to +48 kHz, and were manually end-pointed. + +This corpus was originally aimed for HMM-based text-to-speech synthesis +systems, especially for speaker-adaptive HMM-based speech synthesis +that uses average voice models trained on multiple speakers and speaker +adaptation technologies. This corpus is also suitable for DNN-based +multi-speaker text-to-speech synthesis systems and waveform modeling. + +COPYING + +This corpus is licensed under the Creative Commons License: Attribution 4.0 International +http://creativecommons.org/licenses/by/4.0/legalcode + +VCTK VARIANTS +There are several variants of the VCTK corpus: +Speech enhancement +- Noisy speech database for training speech enhancement algorithms and TTS models where we added various types of noises to VCTK artificially: http://dx.doi.org/10.7488/ds/2117 +- Reverberant speech database for training speech dereverberation algorithms and TTS models where we added various types of reverberantion to VCTK artificially http://dx.doi.org/10.7488/ds/1425 +- Noisy reverberant speech database for training speech enhancement algorithms and TTS models http://dx.doi.org/10.7488/ds/2139 +- Device Recorded VCTK where speech signals of the VCTK corpus were played back and re-recorded in office environments using relatively inexpensive consumer devices http://dx.doi.org/10.7488/ds/2316 +- The Microsoft Scalable Noisy Speech Dataset (MS-SNSD) https://github.com/microsoft/MS-SNSD + +ASV and anti-spoofing +- Spoofing and Anti-Spoofing (SAS) corpus, which is a collection of synthetic speech signals produced by nine techniques, two of which are speech synthesis, and seven are voice conversion. All of them were built using the VCTK corpus. http://dx.doi.org/10.7488/ds/252 +- Automatic Speaker Verification Spoofing and Countermeasures Challenge (ASVspoof 2015) Database. This database consists of synthetic speech signals produced by ten techniques and this has been used in the first Automatic Speaker Verification Spoofing and Countermeasures Challenge (ASVspoof 2015) http://dx.doi.org/10.7488/ds/298 +- ASVspoof 2019: The 3rd Automatic Speaker Verification Spoofing and Countermeasures Challenge database. This database has been used in the 3rd Automatic Speaker Verification Spoofing and Countermeasures Challenge (ASVspoof 2019) https://doi.org/10.7488/ds/2555 + + +ACKNOWLEDGEMENTS + +The CSTR VCTK Corpus was constructed by: + + Christophe Veaux (University of Edinburgh) + Junichi Yamagishi (University of Edinburgh) + Kirsten MacDonald + +The research leading to these results was partly funded from EPSRC +grants EP/I031022/1 (NST) and EP/J002526/1 (CAF), from the RSE-NSFC +grant (61111130120), and from the JST CREST (uDialogue). + +Please cite this corpus as follows: +Christophe Veaux, Junichi Yamagishi, Kirsten MacDonald, +"CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit", +The Centre for Speech Technology Research (CSTR), +University of Edinburgh + diff --git a/samples/p240_00000.mp3 b/samples/p240_00000.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4787405c20679691dd8ae77c5f5d50520e55eebd GIT binary patch literal 20205 zcmZ76Wmptn*f8*632AnhM!KXM|A0tIcXxxdG>C+BqjXAlONY{30+P~#gwib_Fb~Uf zJ@1EiKCnBxT)(}qd(ND=&&Wygq5z14PE$ie>fsd+0FV{UJgxWzIQaNExVg9T_*FQ|s4`?!JNH(ecTt z+4;rgwT-Qv-TlLpv&*Z0cZeT<2+ramI6-d2$^RYnkdOcSR=Mk5{~rE-AOC-^;F)9} z02dH(C-PPV2onP!ev-;SSFS2lERe@?xWx2I038AZP~!#vyb{4hyd!ziee56fzaL#Y zJMZ|*2t0mivmFlZEXHhs((esL;0kZP`h!=1*b9V@2@hQCFp(yKkjT+$Y4AkWIegR7 z($b#NQrDuhvU;AVo^Ll5<1jF0sr|XqpV#TXX3l>}eDn9|y;(&nfj_PS_lb!lW|+_I ze$xXX8>%xnN1@E@9DDDBBD-(z5k5Yl+=kb1x(y&Y20QA{J;WFQu^_sFfoA_4^#vhU z3aO&EUZh_>Cj9ff*O2Vc5a&Jod||S`bxvx>X9C9YkT6XSe!Y((H^<1qo6U+ePEwB| z6Z&c`K!(=#EeGL43DSESkkSMIR$kdWYG9H8-jEw7fh>OfJKF2#RDnd0K%ujvdELl| z3A;3(a<%PpJ2M{X)=|*fVhj)bCHeQ{kw)T~beCQ7uh61R>RO<3>n@&~_8jfe zCld5-Kw2;bw$VItze~;u<4KJYn+{%12%2p)+iNuRsFUu_4F+t{B#HL1}zxo%RF{=0pjbPy?f6`*0arpW64}b;!n12HC!%Jg}KFazT zmxO{MCw93%;3(EC8ChK)5zESL`J!&gY>}h-t{A*dbztsp#o!eQ#+WrIV_e;j!m`a> z#ca@c9W#wa=qePVLI-Uj3}oevFEUI5f@?k*B7AIu1(7M^UiH%wN|w((Z6X6mgOVlh z6Dw%815O+w1=wtf-x@M$-h{oFr%J4nr|fk*9oT+yGkJ~|z3c${PiUBNZE6E^wytTE zByEz7#pR{ESk$Q&orOE_ZR5`&0KylAh~-Au_I3ha%@rOeByz0%>NbiY3#FgI#}t(}m9LAGPtA4KE== zK$e}e~0|z~AFmvu` z!Og;31ygB3!~EASqvjoKcee?0f4Q^oHO0ZQt>AwZm`D|W>w2DI2}e$*y?_3jJ^u>f z!(rdGyHtCz<6g$Mb&Eaq-)izc%k)%fbf|H|WLa1=NAr5~Y1vLx#L@Ghwh&8KeBEwa zDouhNi!sg5$xNQ5EcUGno{r~VZQECYcHw~v^}#JTlC&*j_$Z?}8wj5Qb|TRQOi#wC zm$tBQ(%e{gMGA#e&xCYFt6X$0e&2c(m&@sLIQO~_3lG4gCVABd-m1?MP$b%IF9RA} zKCy*Vx^j|LxAeFEm0U$q92BQ%+;J2Vk%G56%<>4I33f6~WNhD8@fIALNzMVw@p-@c z`76h2ndV_}bTpI1WpfN|3uYAW$|5}hFvL)a-&PHxN-jFJwNw_fa*kfIzJ^&W%oRA> zKk4w(lTGEj{#2%#B`Xb%eNO5^_`Gl)v?jr!;T$I*MNi)?BG2&}~r?Tw!+b)T! z?-~oqFa5zAKZO4hws1LT>?64u65efHS1`VEoy89Y#IWpeGLekbilwP#<5vDj@b)l- zPV5#^WTmg@W1wmu#Zl9B^vsOlugB6&)6KTgU){BT{;$@Q#sfja!n#8}q!^2p+w|dWk$NI0>&;dEPeP6;Bsi0$q6|J3(uE|Xr!K$S%hyw>O=O;T|oCXg-U>{oKTHfLT4%! zF2%W0ct%JYM-(?wg?&|DXAjkXq_q~x-zPR5m}@W0t8=f18c;E;yVmcjH;-r|FYBbBA?psXAjTCDW#w$1fFs|ncg5If~Ar7 zIJ*sYWkzpHqnt`3XX{w-_`S-MlHsfHV4x05f|^5k>30)6@)r@lKFw}V+C^F%FuHxD z|GF=_{}+6h^@1b3_|rGF!sG<-oNsMXimE98GbmolgIxVd zxL_=9#AH%WD!fy%gE3!2>6(>PvTl-UD_(v__zduP?yTJ$58h~r(XNViE-H@|FD80{iw_sJ`c#uc7tyW>#<9+XsbF;N>y$426?3E4QmN98-V)v2pYu|9=Tnh~fOFc%ZHgIbiEO}31edC`a-5bulx zE!E=7%Cj;yQF9Di5y0A*H`QnwAz-{LZHTTw_%yJVpvpOA63lm%>*|BO9+YrHyEeVQ z;Fb!9_5|2gKEJx-OD~eIXd~;LF@TZ+34k&(Ry6>NVg$!cs{Xit$`(zpB2I-kcu~h%iJKo-@a-eyIka)f#bYiOx_LbDS$agcl?!Bii$ouI_=ZSgB_+*vXf2k z1v7VF86AN={&%jRepR`)Wy1{V-{Y{_mmNd;t+<(YyHqs02%nHRvZ)OzRu>@lVZn^t zQK~4e4okz{_B<}t-43u~}t|J?0W#E1K1 z%$Q2CZ&Gach9D(wJNt!}>8XGOAfOHSAbbpX>0I~PC<##VG$=?y?^pN-H;t(A>-T{L zoZqOAd3j&i7<+jw$H7#!#!=VB44du;-m?d1M}CU3~V!3Bk@@*s3& z2L^goAXRt5H3Yx*fE(e%;ATV}#=U*2EVj@yF%w2kyRJrCmo}V4vU82j-G0#u`GME) z7|+AM#%x45+>^O1slS&ewUofK{#SDOUh9NJh+ zvth^xp9~(K-I&sYk}}`S@loXGljxT>jlQ$L`*1rw&!~dlE?ZDQu=dN2{6u1!>M*%L zFvU_)K$z&M)(r~=I!j*Ba;?WJv>k$j>{dld!YGpWHF2dc$-u?-Be2sz3*ple_`qv4 zyZYWdG?;s(mEjkOw;|73G4kJ%miD7cUbell5=NQ668d*K37;8!lxsCMOFits$QF28RKt8`NZ;6 zltv9DFlL9V#H31m7l?e#$-dSyDk@of#EIh}dO|La1hljfQRTkZ&Gj-3q6(Kr&$+u# z{|QM%5`UFtV4(dvo^zrjBeU|)O%uW=h0&2M8mgk&Zs&+J{rkG|Vuk(t;%^=IX7#+4 zf0=uAlOe9Pl~PVlvpjT^=+@7J(4dlJQg2hWn*KP%C?ffZ?4rJS!QxV@Tmgl_g!jHa zlRdcNWCf2x@{5@E__`&Ya`9!H^)aC=isiKVfVgar1q29J% zgQY&570vi>F3dLV0IUkD%%wB6~M+R+DIPzdz$YwcyMtt0pP}08-|&sGW%75f478JIZJhaoI^F zPARU@jrVBd+A%A#WT?6=;!W#KW7^Trl=6<^#T8QnNY+cmoP(2!Fgl|I2V-aY@PUfT~jERCPY z!StvDcxP_{22w?0dC1JrW4nA0Sj`Svd*4mG4T3T=pC59zY`6Mjbd=}J(_oQMx){GE zgIR8X!@PMzI)`xR8H1yoX)dHtC+5+Ib<;L>_@FUX;+k4u|3WU(5|Yl>>5iNwmxT%a z6Ml}bBiu9dV;;Qwyi(2+_BI8i_g0$xPWbz;a>7dxv`A~3Z5v@FD*SK``4t?@p}0(x z#-3r6LhR5YLEet=iQ&52Hikpe?y4PMp`RLB(~jdJJaMr_@F-;?fM*)TL;c7)7a&P`_t`OY zbGN3C6{!LzXwVco!wf)rq1Z^1S7{!QBGqIm$=^c}mbefk20RStV}6*PW*knL#4-V5 zKXcs$4%xn%hog||#8QUPIgm4@<$c_tbiT@2l%NP5&pq-5Z+Nm6W_~yM8L#cfOBd+( zoU!_wbV^^j=wZaiMqoPpG}j8-pGy`UqM}LW`t6R$4>mlHwhQTpDbNz&#-;0B_2*4B zxcqsnu5|AF_UT-O6YtpbmU3n$;Xd}4tUP3|PitfjMZBjS8!Ry}fPpkDI9gs2viRBt zLjYiHMWe(-+PW1>b`{N)&&yvMiSP;F7}`myy=cAyo7#R)y(FTRm?OqpZ`eJvli=+3 zH9&5KUj4HgD&u@bz3MO*?v#mN{z_*pZ9F|H1kU;ti|N&T>EE@6QAXf2$gSy zh-lgkhsJL0eDT|z)02PC3;}1pCFkv0b83rBs365LT{8X044Q znzXGVmm|2M0WLjdGf6Af*l!OqiaEOPN7$dX(;`shDy`c3FXmQH^3%q2_E) zYbA#RThU!#iLX&OW|o$d7FkGl~C&#DS8=*@E|Brub= zq=2?A^u|F4N`$WkyWxcxTg*5L={tle&S8fYaOwJ8`ccEaY}4T;%h7)&W$IwYs#Zmi!#XQ#7Joxm%P9Dio{g!EY34h6~J?~fS@N|Yy;KpUVp`> zOXp*vu=3iQ1)^53_L~Bb!+!Rn@*RA(d!emjZdb-}@N^(NiY2c2H^R4t!}Pypd-a{N z?Z1RTMkR^%X(inPyOUwSs&PL!pJwW7F|%TG@$%c(4K-U%HV5%(JAfJ>Ct_;t`)}wp zn#9Q@QcSGWalq-zxB4oX_umvx2bI46?qVN%L;N`Zaf7`x9udJGfg8|#(DeV*7-^ql z-u;BzAb(WNlw#8+@X=dynWjMb+UtTsXn1%E zh@NWpd0-^{X|qL_ChxFx^2p)%h=@M`&TJlMq7MZEf1p|T|3njaKwAjkhXiz#`>^+B z3P|D;*|Y@1{gu!pnqkM8XohYwUSqg#!FvvfLpsACS2UrtL(s#3kUSbDB{?C4CX>hy zg0guDW&mk&DZ-DUrS<&Ko{dz240dy<9Om+5g}tn$eIiaE$vYPgc@E&Mg8vklsolNo2!1lbVs?yg}lxC$(eVbrAL$!Nu3bB zCQZWgF8?$yAU*J(NR=!>Ae%q&%TUjE(yM0|RPX+QdL7MKcLun`HxvIl`2FuM{(|Qv zMQi4aYNg(U`4RS&@Z>Z=otagOSwMz)^~BKkqx6g+lJC`)cJ{zy|4dC3X+C8#p$o{u z3etvYoRF^maflJ~eKpIK*7u&^zwck`QL)Ie*!Zg+-Ejdf+^1#}d{3{Mt2E@kV9o;o z@}D(ZEPE_PRjmwdfU*qw)-l#_*SplpRH3XAHAMU*M6^UV#@-`)k{sXb4p~n1zd1GF zC)m0CWcB%hx*T-eid8>;m2#YG#f-e}+KMJ*b5Uz=rkC6aF3ISV7f~GC{SpwE#@GJ7 zwkT`Npd5hr^|`w$-;CsD`y|H0BIKdIN^nEj@9Bd%TByB&eOf2f7d@&zm=w}(vM8v) z@)Lt;e@N)f?S&<&PLlqNHl54@cMe`3xj{5k)ig^ zK=M0Zj^T!b>3zIAgb#y9DbJ>Oqtwyi$iMt`UBh&Eg~e8lWPhA?X3qqnJ)AFp^HQJU zM_h3@o+&K|1 zNwZl7?|O99~d1*b4B`-%>mGELlVBz zPm0*oer`l-Gcod5AJd1u!#&g;MXTqKm+eyJ+vn%dZq#kB*wUE|{n&^rrui}yOx>Ot zS7W~+Gnqu!c}bNmgoYcbz_~Yy7R!lhgjB}f{HCc3z^3@prHnU;QcAf138Nxs3TxI( ze_S6~!`~UVzVAV)k#*8b z0S)V^k`!4cI0r5?k1H&V?YywrLesQrqjGt; zZXG!JCu-=(Z>yJX@eG2r4pHDyM-{R}=NIGFK{o#!-HPzv!_}HXGv6s1=hY5|$JtS` z660a(W<4n$&;qwa=Ln!dDAZ5_G%+H6b{wW}(&Ow{`kjAhaL&Lbxb}PEHtyUe|KK-B zH9)PHPzi`E+huC%9YB@@i2;uVD{&Bh_{09AF8@RS{0u2CFis2`3C;hW7?;Cz7O+SE zkHmrtVh!F?=MG`91t*V6okvlD;MM@2*ko|(w;>t<8(#m1zIpSy)dfv1k(fFZHGt`;*2{@cktxxq(Yo~opS1lzRl z^691q0BT{C_@mpy2Ks(k>i;OX zLBwxBi&@@blBNqdR0w;2Qth2vTlaJN)%fY+ZeUth_B=~#+P6-~@rg?VSOuNbas$R@^l75t8$3k(xP-182Tk66i>HvJ@vr7mMLDqk;6@qE9FjFI zVagqEo#n3dwPIzE5@Rr5SAoXxN>jr2EU8z>;*V$qVe{qLUh`~QN7d!ZtGI$cNuDx? zRdJ?JFI}mp&JgOy{uv9d#<4`?gX^H|*{c~YS(E}$(JlWbR$md*LtCka#<&K=qquIw zR~M(@dqv9^%$7Qp&dija-A3v^#q-X&t=6VxBQ&+R(HeV3FZYZcfQvs5%&T!pxs)I{ z8AilU08h^z?o=UxG`(5x;2a&kzl!cTFNu5YQzYSocJ7<|Ok6H(Hu-0=;BaiI8#;C91)l zh}x^B!B#qxhm+f0w0~*c@0EX@T)#|NWCAeRn??~nIzisyy-5@lVjeS7NsaU}kXJU% z(#}l-yw;e%&Z(Zalpy36sx|Dv9iu;bD^#>{lTY#esCIYQ8$bJ-J*EPY;NDeUVl;A} zQbZ`5e6@z+O;=!O-5fhS3bpQW{)SEf&%QAVg+6R9_QnAc=t(JfW%XIt=(5&7tzG@E znZEth7aKzS!C6ms^J*D}u8k2Kx*Zg;(!R+qfCH7B2ZDtJ!nLh;Q>ph~S8Gw|Fgu^Wd83-=OwCOo zmP2Vu^Rn73pf*U~{BR^JL6;2od-B`9>U)FDB2c@n&x!OPJ%9d1?`DXm184l?!=AZknKNQ7I=lk^N+B`VFSKn{LEx-N< zr$3Xsi>80OIYr|o>TDW%rXJ|(4A~Prmdl4*4Rl9(;(If_61zy59ce+tPx(;aUcC2# z_u0SZ6w7awhr}8w63DpiHJ&cvB(RgQ9fWGUl!(b%D^kW;ySpC}#w5Xie_pN{ApWIB zD%%+;;pT}XTsMRnm4-@MNwur)9wqP-!ZQP01BH@OIed%q!3B4*_wkd!c4UUd-gT4|?VmLSD}9 zf2bXXrZIypj6M}S?tclxbE}IVY6yxSuT|xJmCQM~e4CD*dW)BCf&cP>Ap~VR_nx zz#Oi#Up9kt4D8<%5kYb1w#7e~Pm=ZQ+F33ZzcBlgw9RSIF4tFtZwmWaovS%L1CebQ*UxTrqzg^={3V4ID5BqDr!m}x?Xu@81WWEUzS{ZT&e=pvepQ#k^c z5Bfd)=hZQ_87)SS9fz-Se$Hqcx9ACu-e=oMOsY(j;~J^T_mJ+Xs!!jzUl4H(r@t6a z3=Qr{+6&}QBDCi~`Ogv;;d{Zovg3g1G3NqmMfBb)08h=`Q8vMj+wKLi6Y(bti*s}p zg5v)uiLp|U`H7AN!%?L#>P3_&v&Nz_01SF%F_bXxSI8Q?jG24P^8L1~rQJzFTuTzc z-W&X73&6kfP^v1CRHHMlMVHk$bXnNUPa!jd$%>o zD|h2T<7O!!p8u&9PexjHB~)PK2WU#T5Pidx%f@F^?aSY7Te>eITdGWrR_K1L&$!dF zJa2j5JC*rSYx}lR%ARjz`ZcDC1WQ^(8hg6FExA zrN_9u>>H48+n2DWRV~ZR`ENq#_<$C(AR-<^>mDfJf)-zjKJHJ6kYT#!YIlcaIK_;8 z)6%^#QJ?kGGF2tKlPBZ^@5`jkV@$c$Ys$lW(MZs{)pG7o>|k2d_?os%7tcRr z-PG@{wM=!L_<`^L$-g=BS-x9Y0ke4DioAHNA2N|Y#qH2Gou=BLp`-*{FM0?m{J z*REtEq%+UKzc(&Yho6s1+9Yy}8hbvfq5f9w`AeF|2Z<(Ev1G!#!-vE5YDi& z?vW4x0Gf%Zj^9bx`t$&L4*YA;Tam9RA-9s=N56AAx9)eYcS2y;fd3#~E&vcM*}(k4 zL(48<7GcEo#Un(1vG zRT56o(6R!Qm>5zOe6;ZiBMJKUy!Wy!5PRA#HN2UJ?eDNRF2n3iO8^%@*!JrGdA{ znN3B>rdL}jZzMvWEMsPBipUQ?$#qnRZZCFHNKgID^NmKXf`o!}i^2saXohxV#M2e> z;R~bF7uHsn)BUn7^-l?UDf$96zm?450ZH%NPtnnmAY~V1WJnnSZeh=m8vf;mp-4i;(7Kd1knWc(Xc4{uH8M-YaC#g->MQfh zP{d^oyr$%EZP`h9-O~&n4FNxiPGr%K-zK3`XFXhqp;mkrmU><{IEP9~=fnAMoo`?C zr??Z0zC-Eg%;1kb4mpul%;$fyd3`sYZz6mwf~brkq}Tx9F5`J{AdA=UJ=^c5Ot?MQ zc(AlVwqj;=4<6w{o#)S`7|QNM?v(y9r(IK6K>0iCAp;l(Hf;ZwmO^4rGV zlC@pa>x#4%JfAsTqjHv~Zba{m!~Fj%^OKV6taxVtf_qB76Pl_HN=1dmDPOLZIsj?o zGOBs!INa~kp3C^CGKM4`LwyV*0?gYsZEv32Dt-P|_PG9HfnzXk3wMVkh}yP`@+5T! zA{olC&rn2IO1w^i2or+r6WS2Flw_6<4OJgoej+{nibqc~a_ZesoLP4h%Bx{xS{b_AK-(Vb~I8U))(*WKzF0*5yju`q+8h@Kc`6V zv+v8%fxstbShy%3djz<0$@fPKV8BueHw*ojH=CNx&PB=XA2h`*HGQW%_auZzr?f44 z`iIf=C|n!%2=(wn_b&Dkgzp>r*-!H+x(8E;>Tu#_t7a!Z<>~*nwD&8f7nwaMWZC) z`hI6b4{A7aR*9yqo;R&JdpbhJGA2^>stedrX@0Mq-@$hM340)9M}-g zrO^-+Vwog2`Qy7_VPaH-AwL!j->k47S<&!=>fm42#uXj!x$`6fq)*OiYN~!#60ElF zErqLbq^X5nuf-&_{P!Fdm(1Uxj-iTK#05Fh1n8(|%9>seAmS&1s|DTYdP`fSC`lI8 zHfpF2D#&nki@&SjrqH#2aW=;xtr=|e!Qh4!FzXl~ zh7Xy5OVzPS=Ue)=rT4mNCfY4&ih(K=D0sT1?V4_-A+MQST37NP z2x>l89n|!6EzU{T>Ue##%+hM*t9Q^ag@Ja}<)#&pcYU`o4>aG)1`j{Ifr$qIfXVTR z3Y!fX2_ZO9>m~MESK@L1nu8*mrEoM>4CtN!dsMwjme6T`7D|BkD)k$+>bOkn`A@+6 zq^u*SPqZ~}uw)+Q?Al3|tCt)7$&6(+%9{QUx7F!^aF;7yn2A6hO0P6p`cH^*xGEQ#FQKK>l1fmT_s{JxZ0 zAU)TJ7V2~W6F!$sB+SsDI8)tS7d`xn2Y)$VMMt$h5YnzO0w9%@#JpOqN8|?|Zqsx6 z{9O!WZ{>BNc{rH{(@k8RLJ!IDe@64e3B(jFrXi7ToH8jJdUPdGL3zAupoC1GgL7Rw zQW;geZWoK)0-cmp5b&&KLQmMC$g>r@AwZri%gkI07nY6i$zea6ZnS&HT(hK!eqlFp z5pOJpd_-&c)*y$YR7CCE((2xy7ryp87|lZr^&uJ8BZKe6?VJRPh%~0eXsj25<9<0( za$%ySCpE~A__^TRj4SuvlEMZv4D=q)1CcIrP#AuKU|3U6#*cRD{UXyogmr{$Wn=_p+*lg^-~1 zYkmNRZe)6DxF21V5DL!PBaihHJ!tP!xUm2#>l0!j2E{HuLhq;s`Q0V? zMctO^XU1Oq@JdsU3ZWb7$$L%nsCXPo;re{W6hA2=@b<5|OD9|wQg4y_HSAg`>nr5x z%+aRsZ_y1`-3%_veq6uD@!b~A#EVQNM*>k7NW-P=4TM)Qv)b2QclNdHQeLG#eU7o5__*jD$%>3?*{)9COrMC2DXoj^nVXsqbB zSg0csapRmsuT%g2J(O~>1@Ap2Pshn#lYNd3q2yglG@4|q#6S{(FHc8(V*d9I;fs*} zqZ~KsOU$btTOB!u5 z{6z3d&*shEEm&(`(!&IoGHiQ{jSf0t#v5R+We#)s~(@fr65vUY)p9s*fH{d zo*zziEA^{2&PVpLk;o*T$V}DKNq#AC!JF@FsN8y*#GuslOsrkTld8;zsD6pD%M#LI zi5ZW+&U!Iz&O!}X0cgD>^?Hx{%b2iX_0o>+u*$G*_bJtyh6X$3gXr2PEN}SanMnN# zm)FJ_I@usGR;u_~S{&qHc`i*~J+BV|XmMPQd8fw^(*0SP7Gq=xj+J=7-g`b^+?q4iZUjN0= z2msE$Soert^2gt14e+4M(9-;y1lF0En6CfHmr)8+w=hMs3iI<=fP%p5(tm4`%`q0~H`UMj=_r7KLf(@9Sd(2p&Sx*( ze*t(3i4Jm~)T|E8<=NTd;#dA;j!$xvPael7WUQZ0W?ZsSr{g1koIgB+b!E3_d#UFK z8(b2mu$1 zGYB~MAFQ$HkBP;X3)SX#R}mzSO3v7(or&hO{&*bUd0{7H0l;wBn4LMqjO)ck<({~C zwLkcm;0$_h)0h&yGH*=E?>+<^Kp3^>#_aA{?PiCkD?_xo#p)d_=HT)*65ke6_VSa@Vbp& z1WBNfSQ@m;U4w=>?ax5{l2RQCo-(~zc<=dzhR4$jA-`=}W(TYR)c}>_!eoiRXiR22oHmDPr#_`DsJ9Z@%`ksL!QvGMLl4a~oxE=iDNQzig8BsiP0oFBrZY!@ zG{V;+Y_)0udM9hjo;fX|B4>VH=|-deZ$W+DdVM3+>1O~9gd#dMsGP>W-F~yM*EX{k zGTR;?CT(0O8MTSs@kxjCykPiApN?mMehi5SA=DtwSBe7xBl$RR2jsuKk zje7ndd}*SH>PurEcu&P0QJWSgI-dZA|9POQK3>-!#yKrIUYN`dd5E zHx@cPG-m^wvybhEX!o=d2pGGiiH9xc|BNYTZqB&WqpP;T ziRR17I9WCIX8P<@CSp32e5iK8D0!&KPbRur^avf!U)pzrsmlq3dEpS3~Xvhm~+K(F9v+@{XX&X zt7YE$(7FVOk34FY9*#vvxYbb5~IovIW-oe?#R!&CTq5 zU0lQX{yr-|TAotJ&X83v8xQHZJ&!V> zSkG6@y7+InrkIu9@9w7fDA5L28Gp%&1gEG=nL+}f8bU&uS9!fIZ5t!Q?`%^M`LTqH zb2Mp-^<@fAdu`hyeP_S1U-@&fC0A-%BK@sFRjoEAK(ndE$??S(&ODTX($!dy!=8!b z#`T-Uhr3}i)F1A%?B0Fa<-|^FA!&H)`ZaI=)Av)CZMuXN5IxrC9pN<3>ojGhLsmPN zyl2%uG!CW7{@|XZOt4a)jffqPD1nP84_h?QT zizo5}^whEuA#x3$Tm5hSkLFwV|7$EY zIYr@)#PD1TH36kEahYti>{L1(mM(bVhwi&?MXoO{2Ag!)F}E4qU+kG^jo*718Ngh7 zB`-7weI5M4)kZSoi%nhC<06FbKm*5pZ}jkdj-_%}OkaM=sp-*35H0VhoLUMd5hr-@ zB>6w6GpAgM%oRPhV4(t6C7{jnoDRaIaH4QoxkG2DyMV$@Kr!i_npQ+ZDqSQfzc}FF zcu~*q+wKS=d?Bh=EQ&~3T)@-qoK}_C99CY2CVR42!@i@@#BK9W6K+2uTWh`!G1uiB zSr~wVj%V%d2NNBBzhruL+x^)!d_s4uukQ)@JKN2x;pk*$PX;KSy_`ak66)GQdaMsa z323G)SwBI6vOn`TZicJZpERx2#%NQt`-pxY)u#`~{rgApB7ULMb@kpY3tc(s@5GM4 zI=N|3$Q}FQ^<9koZGlYz5s|x4pcaX$lr~QGUXi^ulk)hW4`IGPB7QhAt^N>Fj2OVp zJX-?#!COVFU%g!Y&fKK;?E25$${3NrhR)H2wj?pJA2anCRM$`d{T;zc738}?I|%lt zy*yN}3Jd4qsu0%XzeoB;P;FqguBBAMQ2S%k^|3yQPM{aHYcjwDq0oJpo&S2eXrq+V z+&pJ&96`2g*ZNWUwc1p=Bv5&_W(zH`lY+VsjIA-?oM8x3GfiQudeY)X`W`3I6T0kT zJKK<1zL4?Y4A#JJ3tA3C3D-8S_#?rnXlr%!r1*_=^Wm-tQ({{8lHO?p1y!%%q z87r0~l67#Zq#(WurRhJ!D^taF+(TVvfEuLgd4ZZ{gBu(3iV3*;;T~k|S^bzP2+ajN zp;JviQH&q9x*qa_4Qr|ExbF^g{z(TbeLX9_UlgOpS3IT$*-tAp?3=UNr)_fF_>{*L zw8QGhjD?PVo;Rm%`qS^1z4AnP9!W&G%%{}3C+US*+sQ9+pg{+CRd%}ZDg-*0IlhN^S@3j|nmXtE4rEuA*1WC))Y7Wn>qoVWDv z_p9*Uml#O&+SKsrz9-la`8kg9+&*033Fp#Mp_$!uJ%NwALymh(im0J9P@Hroz&!64*!5*9aWg_y6dCjOn z{z|h3lY2}g@BZjt?C6CNe_@>_J=bUegzihi5(z!^I$7aSVd?VbSE@1_4Ox>E3a(3* z(@@wN%7+=E-Vq5Mf^ye`GAk1`bW3G3Aq*MP*HaPhgCUG~)6KWD{?oDw>E!4Ivk&)=3F~v z^enPyd-=U}naojA^>0Nd??TnO9B;W8u4aqy?bq()P&sG8x_+>d%E}*wbHBW(dNH6< z@&Mth!p)S&`MjmQegL*&G$H4|bd9$NJipgD9*}x|o9Qr{Zv6%Nrg^e|) z|2{0Tq?Sw3xy_?=B*maUqxkZgP`tKBOpo)hD6AL=5$6&T#%j^n4GBF&_= zcapl^<+e0_Bm(5@K?r_8vS`8OqHJJT@+OTN&nO@Fa>aHEssEYqWLpC>jsIQYg|Kqc z?Rq?xWuCl50tz)mZsSPIml<6nwcE*h*3JYP`TNF<2H~5-7H$^R&B8RxzvqM8kVZeY zE=x&tO#4-V)J1M_B9Z`^j3S+c?Ea~FD^U4{rtFfZUGnSh4NG{oy()fv0J(+z?`TRq z%Cok1Qe#=V+Y=(Y8sE2F&Yh!R{y&8O1`daOJM>1GtTu$Xd!upM6Tl>~SYs=ub$c6UIIVT=K=oq@Zf&ODo58?p5bp%vi-G!sp^x;Z{Y?HUWNW~0)m zV&^b61b~e+vs3Uls9&SDwnfC1Dgac$<+mXq=FDSoyW!^34Yi^sUSx9kG-N z|C`hSQM-rdN|*hlTI4HjUcgip;YH>#421!5KcTfVox{3!-Mz&X*Hq-^y6$M@O!Z`x zkL%MAQs(j=#wUP>kWiPRgyWXmMzWD864`E;g{)y%%ps9VGTW;oxJ$=(s7bo_=0flR2QqP`Bb(*YkCoS#>A`_a2z@eEHt% zIZytTb6oE&B|YFmB>O9@hz9EbI{;V?a5TqF{g|Q>UNLnnCP!bA)m2**crilHH}4=W zhf)*Ii{jwRA9$Qs)=A}q+!vlCfa4F$T);M96+rSs5hHrwBW1}->?{V&i3=W&M2#X( zPa6nGPZY;6ybqvsdMgv&QC>;|$bDcpfCKdJ7fqkPOX%64RYn2#s9MXjJ0^Sn*h45B zs2KHz8g`PgtX*{VrpWJp?)*=O1F-5*3d$&l87(z?vYIE{g6la8cgSZ@#PBA}n?1UVp)hOw(zi_E4L$Fy|_$`!H(f4(cSmXpF>g zRkRZ=Vwu&`G^L4eg-0dC$Il*hUEi1c^UsZ^@==Z{`0sVzX<%fv#ONSy@ABZM^;a)5W_fg36R%xQKJ z~eo#xJ#nsJ^H6yNzgQb>D!{k$tr6 zC-l2R9Rf)AGLz|Oor=f#9rAF2^ zF8s*B=S9|tc2Te=JF*?}o)?Xo+*$no_=PKyeKVK!-^kuIep638IBA6eGqwY(Ze_hSbz5bLJDJ0zm z(6#$J4!>mNt@ry~EL;4cGPeC;0K$O9pJR(;Qj+n`Q_@$u-1!Qv=JRly$AP-sDVFnZ z=NY?klY#1RX^1q{`owYgur5YKUzm`0{*Qa;<79wo#vq43KWtkQ!6aOhfN{T&Ni@8b zebfF%Uez5D7AFTsrVZ+igF?M(m!mApiE<#dDjDEGFNN@O-vfFY>wqq%x_e-&U!jxKyC@~ z;evW#G#$SS84bL4-SgMUihU?2Dde~SY-iE;2j=l zuF_f+<4SyZZpq;hK@?(IbxR_cuJ?LBi^0;OnDiR$_U5-g4W>#@20ca3p^{D7p+SEH z-tOb@hX{TCFbGU%@1ZdveX26%Jmrx1wX@ypkcsrK<|5UmT9`!dhJ)%P$!muG9!|Ti zp8R7nx=YdGcf*@Ej9kVQz8;nuW2He(c2u)i;jqa}+%dL;?IfVEV9m*igU^SsLXB$A zr~#fyS>azYrekM!0~Ok%=3wRb+pMf^#McoF2Sd!PXpE#TxNY#nm7DhLq#{{bVptla zWPZsCm7HD7=(8S~q%^5kKL-f$DcSHO%G;Q9NhGZ7&8Lv?YSa%cU(3kok665=ps3m< zc{sAZhuiAe88c{ZOiH4CkWkpz5S38XbR_jww3pIRetBGC42||v05@6{7;*O&09Z8| zzR{EXUP`<9+asFjI|dI)d#i59$%4aQl6}5RV*-gT4;B0;hdX`{-Ft6Im{Bv%n^u5N=EY|LIGgn%f{jxv-`yPKBuVE#-Jk9t^TpXFQ9_l zUB~(YY$)sJ_w-dogp?w^Glt7>fv$e1c5|Y1*jGf)oX?L`CY<7hCF$-M&oOun3CXoh zenum$298PJ{4a;58e@2YHq0V3S)kEl|3(K3r1B$6%zUnD-Fzl5F|jBCU;{%?_VhO` zc(vRnia^30njH={%3%~IJBwJnUa=P3kHTlZ+N8c zq*#X4N|!m>i+^Q}m#|oy5tFU1)1N`z;0HiX{ZGM(%x-!#*LUs&3GbPY+QmJd&0WwAOI7YZ%b z_&1zjzM`H}32tbxv8rUO9|zwBfzzNmN2LR!9fY538;$sNb!Iz$iH)lIa8tKDo|57L z85?GZFRA-SPJaq^nJ)4114&&Op6$MZn>B_v+1fYsRVT}?f>aWW#17jXMQ@vHTrV+N zIvA|+D{ikoJ_w*Rl^NCdCF*phVcmUdCDZ#>vJvq_fDr_yB4c=;7EMdn&qP=_2+XML zh#e1GX<^nkcos@!nSm~@-$ske`&~SeYKV$ac&af|P%+?WbWXLC8?5GmR`uA!4@Q_u zbm!r(Rh7w;(VGbZcY#5QsTYKe@;;3knYVVM-xD;?i=lu zM}ALqPwfO9i;FaVhW8g)rDp9buW_2GcE-QJ1=pQvykjrEcYlk7LkfCSqPbB*Ewqfg zZA~@1ES)m^*;0K3FnRWN0zul151yrNjKfLf{wnPkknrY5`9#yq?@ZAW?L z@9m_V?-XB#K5bEycyE+?jkQB8BhKE0)CY?H6;%>{@I4*wZEboQqsa-FW5=WY_u=q& ShP8gu0*;9Q{`c|!`2G(rbel>5 literal 0 HcmV?d00001 diff --git a/samples/p260_00000.mp3 b/samples/p260_00000.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ff5f5032e9c2aabb6a825a7c0a9eb1c654cceb27 GIT binary patch literal 20493 zcmZ77Wn2_pxB&27xJ4GZUBm|KX5azPJzk5I2 z{lGHBf1W*a<~cLZIU_H{g#d!RsI@dTrJvtlf`PjH$vvIJq|NHHKUx3T7 z{{P#QceHYP-gy3d&>{$=x(z}?L3{BMii1Z;LQY9T$H2_W&c(wgAS@~&C8MCEs;;G@ zXYkh4+{(u8os+A(x37O-a9BiiY+`b1MpkZqVM%#aP5sBlmbT9BzR!arW0TXfi_2?Y zHn(>64vtUHFD`HXJUqg7{v4e7b8!3|u#NwH(ZGNB?_1UFGAuaa|NZg*dj~G1hCq;h zCT9_!5Cl|tc?P(Vr7;vNad<2Ok(Z&{e<)DkKtc4#zlN|7Ff#qD(cnPA>7nO&Rr}UY zYDh0bf13nRsIg}SB9N$IWs1_BKQ0_T5n!3V8>jDwkLnov^&RG8KpX5V^WPAFL^bED zZoFrfo2`jCb2=?EdUJoiq#=TaR7{78FGnkM(BZM*p(TRy**Z{~4xm01q`0Cz49nwm zo{Ys~08p=pAy=#-tSkEza1z%T_}y!pVU--_6)B13BndJjhEZ|2h@$1p*IH z=a$+)iyt<>IzXsLUyi%v4@hhpe8?fBw~uJZ7AS+kw|22EXXlSG)g66;08kFXt#>F< z!eB+Y`OnYR`0{pE433p7VPQE65Z}4ey8#2`OS08NY}_Y=p@!eIEtXTsAHFM*kRqV* z)CZnfjhD(GM^tf_V{Tv~i^CV?=9&I3I&d@f2m}D2l?ep4j0-|s0`pS;=Xc>P7HMMA84!gcejv3V{0{atGzbON{9XYf(Qt#Mz=(&B$#EXOxoDhHQYZd#1s)<557DMR zvJWc&6^+F+Uo4@aCdiXbUhokSlm4+)KIpUz8g>f0@uDPEPM&Qv1B+9MX|`SlD(`hU!@ z3Wt}r%YE0j`i`U@ycqS~Vv!;gp;#QRs9XKik4E;*i_j!hRv=aq=2Ju7V8a=5D$!-L zA@P%zdfEwY0yY%3pHmqfLPfTf-m^*uVGv?3ky|_RFB^QSiE(~cjSU?$`LW%o>H!Mk zB}yDN4-j@DC>>Fg6pJhz{cU0&eQGFS^YS+S5PAX2^R!rM6@jOEP4`d_l;L&UDh}l;ltK6dX$7(*yG-PYu^nxYY7kN=YDx zAINrD6|YEK>ePo&Rxx6`e}55ivo!V`q0ReMz>p>6zvcgB*j6FJn30DcnSeKI{F>3Q z{c2JdkX@Y!t%3P6kT)7Xq#k7cWaer9v-0U*JlF5P<$#EwWa~{jUX2aT}Gy10emk9 zocrlWuu-WetXgJX`;0UO}Sws zNrspYM9d?P_qC$d)HEs~zN4T2{@DFTO%6S}49vHMC_o*&H2J`As-T5bxgv?w2*;qe zdgh?(k-KPj_rg0A`7FBN;wDXzwO}g z^!@1|__LslJdYi}*sdnDxQx8oLGMm+Fmy?s=9F*Qnx~>OdTWi3qDmD0i6uLvW?D9Tqoa3Jqqum|Mj2vIq{4|E3#Gp z@je*}36iG*SyrB9Yi_}ffg$Tc`hKW<(&fVVKoLac&bFad_8v z_`564SmtUwEPlTB^M+emr2sCs{xZM|^dcPn#Y>u+0_4KOdT`asp+S6+5+eD~?MPxPOq9HmA?$J6*qN=bSO@ zJMIf^)AyCr#A6vo3HmH1#rKum`~;hOQ{|IB`4_v#@h!b-YB)5npkvB+t(cZIQXt}vv)Xq!-n2DgpLu% zPYk&*+Wj#gBeix!5%!z{c&A*Q4}ts{Y6cW58eC+7Lh*hEE(aVNzp8+s8<{@SkH8C^ z?Ke@DzTW9_a8uKiXG+PLxN>-J{Ph=}q;d-oUP=hh8a}R@D(z_WzctW zGwQt*migXE_7vs1p*uS|pcCn^-pWI`Gi++jhldICxuKAoV;P@eq%rG{&%^(3FEd0= zV*q$GD;4tiemr*z!HdIjV+&bMM2S3e{_Cn!I5>5UWi($=roMwb=yq{6L6Ly+uJ$jv$43Ujd=W^n$YzD_P)y1BJct?iODL6FDg}D61VTs0x$DYP zF6zhFLv_`GZ+J)&EHE#gnxNL{Y!&(FHGpvdHzCNtJ)>lfl4CgDnlD$ye327a*!TO4 z-g>6Q3NZ*P2<9t78Zo~aF%RJ(Pe)6F4q5lE^i*-=>@*dXxF{PP8t*G6^qgL!<( z6lz(Ea)+;(kZ5J0>poBKg?cq7bg-NJ3x*HIf!GKQPSK zg%WDCoOw^Bv>~9SGHR`-+~xAI$ROrqI_9jptOj1m!$EF6N{#VW`KN2Day%Vs*N)_V zi@v^iKQ%l!ZGbdE*5Dq`g@z*a!pI*2Qka4pNL{2tjb!CaZ%O8a`KHip4V$|+dZ1zC zzH#A9RYC`GkpYgf&AGMYD$<~i05q<*U7Fkk-u*k|=^rb-LMHKFqHxI^-Iv+s%Pwe< zdpWrYxxHY-eaP@GQ=G&s*ov-Jh>92X9Vr{u#)J8`6y{7~=y_tG01{dcf7^D=3vNK| z(tk9DfIM+|ZDNw;K6>Bq!&qMG_+Hbv7zng(k)S9C?J=e+AKl+TgBxm`aiU9y+{=eK zrd=RieYMFtr6L5~1F1grbBi!vfZ~f)fpNA8xDYeR{+`Vw+vTGk(BmrXM+qF=9n;;v=+X;$gcWnE$A8%t(o-Mf6!)>iv=G^bfx zQ-$vm5WPSoxq$w4rF_q=8 z(wS_yxZm@L+UN7)G+xSo*v)Ya(OC$kU(Tx-{Sza~vNcE7%?q^3Ax)^*g6b7KH4$Mx z8ZH`Jzws;soJM|fbb7Smx8m43kq^Z(pVk^mFt`gqZ+)b5p?sG+|3m{82`|wF zjL(|y?ILk78Ap@EcFnAlo7l5~{VPHF<`Sye{he?oLG&v4!6S+-n2&-Jbe{zBp3?H{ zkRwSL5c&|$M@5=4kZHnk*dxoxEYd{W*6G)Ag1GQIWH_P_C1T*S2U0{R@@{P7(MIPv z?-zOaI{~e5wa($j{lSB|toUjW7FdTujXWuNF8p8nU=z63a1Uab%q&%Cl2)@e-8D)g zAXKS?5xJsVOda6M)iIRVUXsHHE$9UaxjHg@uKf;~4Koi>b*C#?WUBZHF)8g%2ecIe zPuB8rL1T?*S{33W!dM_siWUdVe+fB~Xk(aHM}^0qFZqGKL&|-o9`nU*Gf0awNru#E zOA>jaE+SE5=0N_+{`_zS8(B=v3Fio+!ZuE)_9+rAGMfq~r(f&HTm&iN4^|kfJt2L( z9v%h-+As9!c9>5DrDSVBnIJe#Fkn=Pwe_z3L7azyMz;wc)^9l=3TPa?M^Qk`T|>;_y>^R^bWrAo`bQZXd;hFbo!Uf*xNHQ> zr@@*FS?@Jppo?K_j{Yt<=r`7ZH7fBnUBAJqAku{svpb$>z`au%7h-W!6CJ! zMdc)n0~=m009Y$L#t<%*?YMv_ARw|KTEbmRar)6EN!o6A`bxik|1#ak}H)x5+ zLeU~UAY2N^>GZu<%MuOvJXp@owOhM`1Ka(OfB91wl62pLHy{-S{7vb*F)W4)uK}A8 z=kz4KxXLrcw}~9*HjrYN7FhZd9HU7nHpswkNqoX5@rw4e%xV@`pYhV1dAb&gSKZ82 z_e2r}v=8V+lNj2N!^7vWKMf#{7JXcln4^ z#=HD&1LFGT`}h#CI975 zh`P>2oA)s0eJwitXSOmx5~7OnC{ccVLjN_NkSA{PgYkK+^Ju^EHt+J2d{lYPKM|%gAD9Sy#W(tDy!76n{(l0@b#3Lda4`cOqqR`ZK zt(suT@vh2H9^*Pe!mA! zO2UZDUP%MkKXV$60xPdnd`)hrM6Z=Nhl5)8}0{CQKA# zVyT1mQTl^NBQ9gj?W(?uCy$`3&@HVM_Lyf&E$Jz0)Z(_-wHz+teRuaOl*eqsa(QJ1 z^Bu@O5f$gnPeL3&NVw(wJrqtk6pAW`;`hUusRymZI1ia$T3vXJ<0z48AuM<3elyJe;?*c5snM=8V#b2 zjSs%xGLYIj3wtg;9SiXhnIO;yNvbn2xi>IF;N?kJD3O$zir~QNNpwxy-i8b$Mz(?l z2ER;Dc=)hUEF15gZ458g@Qk*rFa$hh(!yW1eRwG3E^omBk1}7 z;7`l}$09=%c8F|PTeGPcZQh)fToj7X4|WD88P3WKWU{%^i+bO zo#E?Qa@09RYcQW1d!DDiZeVUtKlp~zUS{W&bJQ1>zlINRqR%Jlm)P%My;i~HAPPjk z$*)(~M0#1qY6^gwJXa=GI-CZ31>*$%11a|>vIfqlAG8zuWaT-s$_A3=^-{%v#5EPn z=f#ybn)(`m>AK7!7zQ zXD$%3{5Ga%TkQLuO^d0CtV5)4Q5?4bs*i$(L(u+j^$9m0yN-p(b)BG!NzrLVo1@*6~iMU~;~oLd^4KL1>KV z?x|+@q9M<_3aWEqhP0Q)s4|}e*PN*f(Ip$R?pF1#j=dp`iq%kbHL$s zCCw!mR`*Mo^SwXC>#di@Wb^QDVe0LB+WJe??8!?dErSpD!(OLJMDwZdi9!6@<9T!% zum@plIMRBy!F+XSFj<$#Ad>S=@b;8CIFGSyoyGH4h$6uR$gV(vzCBKvN+QQ zasWtUIPyr8$U{K^{aBv=s>PieH5u07glw5(cD~{(+8Tvq%zc%fEdZ#%rYTD^xlIOi2(Zvl7)apXiq7Wl$GR&@&^gl18D_n^ zk@j`9#{6z1RgX7xHi)Y)3mw;m`Akq#m9-~7gp~Vq2`mCcqRXdj3@f$LDEYuj0O*O* zW)^E62n#WOih)~~+ao{;F8QNqHSKD11e(@5!{3V1Ra z4jt#&=d1s75A(SoM+$2?f^!`y!E$Vr{Wl0vD3Fcf1?pb*nx2caSn5w@$eBVF2Y91vUuXbD&cM?udL%olSmR;Ly48$%@l8?CiD)FA8A38kCT2?wj{8($7dWs}#XetO1Dx^NNaW zk2fzRi%q)U=roGTD~QkO7?}%=IBh1OXl6g)#%Qbpe=DJr01YjoN~2a_gM>DmI-@0% zde9!!xQ_q?lHlZ6t1%LR`T9`#wlNW3gcPgw(xkY@fWkKGx=S{Ln^XzQu)dwaIV}n@ zd*4Or%hpNyOAID`=8CD~ZABw6WxS@^NMwigDy&7wtt+F%W$q@h(FQ!nkuGh`~XmLZB4^ zOOGgNgiH0WBw^T){1y*PM$DWnW#bz+>!JfEBd=Z&AE0(W1m5Z%$Y8*V0WjZ@Qj*GI zKBou_NF(P1fHId9lO6Hh@12LTrUB~ltD0{3;>IME#IonoiF?|ncBMo`;MR(pEe90C z2It->QSr>4)4xB^+vz9tc2J0BH$=x@GRY}QFETdUjT`0*QA&iQp-+2*Cvy@!`8VdS z4H*kZBL&BjaFfmdZV~+=r2FRNb$%YVrBym1<^#iAsC~CsbmJ$3k~U$5>yTKm|&8a^4; zQBwL~n|^pY?7YAEu=ro%&gX29$;lYPiK$v}V__==c*>ZpiO6cJpF~j;fjdgL%%Pf= zq&7>;DcP2^^pkY2N5$vklYEA`MG+$%Uf&a29?^R!{< z0R0&eiTX=6GP~MNhL;0!9qc2`(&&IjSdo|QkpPRC?hiUa4vDS647n_@~DM%^6 z2|X8itgf~;Dwml~%rRnw4Dl?DAJcu(qiQ56&=Y=ZA|mlSBDcQh0nKFp_269-)~C zC{kOCqL0(K55ZY^pB`1Q;4m4hKZi#SBGOQX1@@9ZZoQ&Yjp)}o+IzWzQOhl0q^I-A zW=Nr>w2bj+Y5!X+fFoWi42y4}1R5tLi)_-O$s5cwy zS~6>YRl)pe;e6vrPLiiH6Fb)d1R@oROU{jL%C4R$aH7jRX2=n+;_7L{prI^Gen(xJ zeH##yolpfI60XmMx68~ehJC}A@AmwTGa8w%@_&g2+wy@@0i$rc5^*2WnJQXmnpH+J>=+{DcIem zE1bwo2g2ePgYavlJDA2{Tm!(5Wj|gm@e-}#AWW(nxncPIP7hwR2zuuX;;ju9ir>yq zGc9hZz{rO4!aGlru8cTOII&+HrH?&1D4~pe#4Qs%05IRaxR7&HZzs1`Vxj``RUrH# zHAdTfEwdYsts*kiy*9lRDEX7lB|yU(HkrMpTwXplpO$x}Pca}N zGBfM#BLQUqYd@!fkNb9{RA9JZ3NXEWoZFioiI>+xt`+aiVZI?GS%1YyKEjd5k%K-X zcxW7l(`YJOLco8odu=|LG`P2U&y4iBo*1Y!A`vcx==LYP?;1d9ar}1YzUQ z)J;D}#+fx3nH2WQlKh%}1vG+A6^Hiu*x40KvF8A0ph${B?jCWB z)!;GgPPo@PrjN%2Mf<>fH>mhZjgj2YoaK`SAmWb`=sYS_fA&dr+!JtG`Z%d~xr%XR zX~;*@Sqeg=7G>dWU<%>nUgHkWjk{qwkmqGkz{93vj6p*}*i{T$NqeW;vaF-G^_SBe z3Bvv_Kfi}`>ce^VrT74>tWF4g#vt+BTMhPDDh~;;zuNv=|CTPtpTVhqh6VekIM+APQk%?2R5k>b< z?KHc{-VK!vMuBfA3f_KLYHJ(4;`)|TRvs^g5-_1qF$>FI+V#EAg;+-6&*9KysU+3CsaNEKtpAQCRF+6#? zT=6)@1TZ@so>DpZ&uF+#e+eud82Xd~Hg=PashMCyny@D7=|jJ*hsdlzFWR5ylQ~-z z_4+{Hg+d_kiydzkxC(~(0hFdADaJY8pyQX%MQQi}-(u9XkPu5y*rL0RGbg+HcWHR; z0Rgyv(rYJL)-g$J{qf5(YLaxZFq9AD zNrSjdPNo^b<4^>!FWd;#h~cJfz~Amw5h)D*mrNJ8#yxHe>^KBqIto*+&x0AgU82Ba z4=zYl1*c)@g_IYh+@J~OzktfeOc$y$fPCwaxehOAmm7%>R}Tq??ItG1SZF;xxtlgB zj|UO+>rT(MY%8)cbjcWAqtO)E^lx)AJ*V8^o;W~3p2>E&-iw8kA}DG|tpcAZkRVO- zM=y5Sf9+39)&N@{B3$@-ZT@@z2b(zCP#3wL+*gP0I!w$gUsoFA)areidMig4)CLgZ zX(akIHh58z7vbcq;sNeT9MZ32$if zGZq-AJl%ZKho~fgq?k#FYtRYUPx*MQ(v#IXX6pWUNYm^IK+|Om8rMpK5V}ffo3Jqv zq}_@}y$rGp#`rX=u8Lhr#k-o28=|7JqcZ(d-NADR|LSiTP_>*@5}yUoG_66`2w!w`3^QvvPC{OA@w8br-MwvVb$e(U=U z$4n~nv`u=sDIhW)M(gb|Xj9Eqr`!sxeQ>d|nm#<|f26v%CZ%yx>OCg48E|++s9@bt zZ3rqElNb|RK{6^>n*ZFO+`V0I_R%7d_xSO(k5&{p3I&aswAC9sH~&seJ^OR(g4+V0 zbB?6VY2c$W1Z6n%66VuF=6dB&^$9=-VOai{+~l@(!Glzw;C_F#PpC178^hSIZao9F z_WOR^`K8OZf4hV*`m)9Skz+N zL(H7-3U9TznGhT>;Rg^+{zx{62!1r`xo2S)N&C0{1PTS+e-2k-2YK3mOza2k?2i4) zPwMX}0UtztQ?cR&fDOU%QJcv)k;jn9@KDKk>e6#VL%sU<UX6?Mfn`iZDq_u8$(aG2+#`EwzQ`JQBc`pU`5dqq` zAoR0y_#$!V&;Qy7`@(EnJ=z;Q_t_P9OjCjw1#MJfU-sT*M^J?w?{<7pJVj_^H`Pr< zN2)ZRx3+oFz2`j7PWOys+wP4zyMJ0;v$YEsS^LUQrIWvtraZ;A17cUHH`|QGZ;1cZ zmoTt8p6*2_8Q@55VlYy~L)Y>2c|S?}KiZkT&eJMJQgcWM-*`Oa2%GkE6m=PzvF2d@ zcKaZDtprr7OB7qbVzLo0(#8aLr(3bU*pDS5r9CmhYC~>Dp}8q?|2Ka`0y#?93GhSE z$wl2gp-*sOBdfG?s#xZz7ianSc4Bh!*q|v}P!xw%#vTA-Xag@@X8ktavxDH`tDpMc z6SFv@<(Jgle`(dvOl+ut*HdUiJ7w#sbtX_p70A$mwGTZcIA*=q4}niPkWFK$S-1&! zYy8oGx=D&mU+pF!@g}Bz`fpr)d#a|6xS~AX$_27z%ZCLy=TxZJ3CnR4-{k8t>}u_r z&JV2eob)}S8QG{oL(YcOtzl0=zKt-S8xoDSVB{0QAb~@1hzzlQOI~Z%^0P+)=YhKh z)wv&PW&^5=+smbGu$T#0NKHQIW|j}Di}5)3jT}B-C(XuH*O&wz2PT-z_N!gb{izTo z40*57y0t?~qeFOMz8J(bq}82$!ard}^Y2O*YI+8#l*Z$iHP78oBdCPn1%~Sl%tJWJ zWlb+E%4zo7Ryf(RbjAYbRA>1WS_bEiz`IHFJWr~abOpA}+E}#eRlajBz*;58Z@s?l z1m-J4b`qM{<%YT~@k}&5rM}zQa~dRtV5wLB&R@SyoT+7pNOz5%70Ry0F!bIPb-xWs z+YPhAFcZWZ0y?ki;VG5|_e6nXOWo%aTiC;BYRXDRGI@J>i!^kJoEhfpK?(ifj5v{* zCb;+wzm6h`hwv{f0Zdk%6+L8ji#M$lVQ?YpHY`jVw_o0V$G7>efeI9vjBwbOi{3t6 zo>vNFyk>G7+i+~z3{gk70y$?75G2~Dp2MT_de!~Ae=&m+!mSvY%yEaF$_|kbeQC>1 z9H@GYu z@7PC>kdUZo8il@4xy&-~o3OG`^%Iobo>0C03k}H>mwIJaR$!7`c17fA+kLyDZV??f z^y1pVh0ff6P^BFL^F5$qPYs{|3=-FqtQ|YN)+(W-D^kdsH5bWNHX&-dG!5I^=M?!N z1`5JL*d_+&5LkNdBMQo0O1SF@zbFgh%;Wv5ouaI5hiET_jpSDp(|P>PemgJIyE0WwF23ceM+j>bbhqNkYn#Fh;l}#ICFIwh+U|TR4`y z2rR93`2Ab$UwQ2~?c}n{;neWg@rk^BwjM?vmXz8U$U z3GoHZ|9(mY9^rGSRhK_xrLAcZA>)MM6-Q(FK+${EXmvD`vfFqv*WRitE0x{n?Jj7H zexGr4ufb~Qe6jA8^YBuCK*N=aIZwQ4?je3mzg)z|0OqISUMTDrct^@88KerRN4`a$ z-;JF6N+w6OQ?<-v$OTZ!&DtAZ$EYRbo-UJ3L}DIFXy2~3FJ8^0fZ@ZurpFugE!FL>eErFVgzVz(IA1U!%qudA-YBMp2|)_xxOD8x<+ue--v!LnS_+d~ zs<@an10R35%<>c2!s2%zHJg|Y7{u(W@mp)8R>0Qb#(ln$#JdF-w#aPoh7jdCyf|4Y zM{<32!aZ+v?-mlvPoKTYeuc`8h(DpweM9>gsG261PLkaErG4s>VA%47sR@o&8Sh>E zKHC<|mnSqESq}Id)1{M+aTjN;(N^amCBRe&+_!Ysl`|?xzCer)&?zSn)0S%q(+<{5 z_sLD+_Bh3WtC^l>58|uCOf_p|ty_o?qOE77$g2?H5sxFWCoqWs#dINJDO&1jZXrWn#3}-R4B6?PAY^@&BSe%L=pg2_k|zJ)(E^)Pr{|~{m8%bR{#ppZ|ziNhtE!z@Y+ER zV7)$DF8wWvDt*(StDK)}y`wj*%otElhuxra{&oCiLqjRvtM z(9f%{&WQA~CpM&%!W}Rl6W3OMDR9;sbo2(~%#$U@Dmk%G`ZHRh(C2(bJrkCBFAUK9hLXP>pp7&w+UJ7nkTDbw6 zHYR2a=A&cZ*KCrROq}$FMtTroY=OApGWWO{wHszKa6;T@a2L|QylT1cDx<*A?J4|Q z<1&Pc8IGF>Y+OqQK4hcfyVX8N{#dE~$TVrR4!T7C;Ak4?W)#zO&^O?9bZs**`X(EHwH(-Z zUbX$T%%H^53PQ({sEvTIr{YvaE_Ms^QK1qE%VZ`)XJK`0Gvt&(i)(L9?gdr#vA@-Y z(upXv^t`DiRHc56Q?nxjtj_EaFg4D{0%PrFAA#93mdg7c{pX_WaTMyyBclC3{a6^g zWX8kcYSZ2gR%Wi#FdrLg2iKtEEd#jp{d#cnc(-Ri!rrI`q93^pw;~7FFQYU#(D$ii z+F2A3f4c8=G-kPi>fZZYr*8HEEDC4hIgZn9s|IXef~pXvrso-1Hi=(nxhtAzRt-`V z|GWPpfvUBgJ+BYcSA(m5?BVrO3M@h^`WCi2!+sqjE`o zNXs;^a*v6BRQuh+0#Z2MM~CJyjLe?H%4AZE*?1H)LIfqVmhqG%=P}tgu=r`Q{&Y2I zaYP7RXiQ2B)zIlAU!0&G{Qa3Me!IpnQZzE^_qJ2C{XyHtGZJ{)Ixq+S$W|=b*a;`) z=n#Kco*UoP5oQXn8jt7GjKoN#jq8@Sx%|O4#(HyF_Qike-ynZ(?w>w~B?OC&9Ck$Y zBkhOOAqGC#@dJ-IIxh_BJGMb6LSb+vQgsX3W62{{>z&$F8p`KRR#x`F9|i$I+*WZE zs_#1X70dRIxeRYU&&|cYP)K(%Wr##2gT*iUJU>$OB%g#i!tg8eSUCC13tU2un<>hn zZjiaHQfqUYR%++)RmmEC{$^STb-`)AX7c#YWF<5zA@wq3bV5csjLEw7*y4K5t-1A7 z8a%Pmb1mC8%HaJX*2x8!F9)T#7XvM#VWs0$92W@@wCtPP=-kNy zlOJjiY^BxTlk4P@lU$j%1#TGuX3MT^CSim_Ae4wDSzkC4mO6wurqaK|Bw+RzhAuCE zHGJm#x4u*ZeU3^?TOSJqZxI}VMtmN@Kt$aQ7a7}tfdq~Nsd+eip}9im^C9 zK&tp&ApSKb^!*Dxb?Wfxzbbpq+f)eaT^7#zSo#ch$x_UDKy|9>*~shX)9zpW(1Y?j zTpT#32;|jUXm&0C3U~^Bf7JJAB~!msj)FxC&k@`f&n>ke5iB`F7mAR^!H^`N*&*#% zL28$;$2rQHj`%~8OS+n!j~JAMi7JVKgcueA7vu74ty=uz-}+uL9KD)u-dPjae3e0- zv{#5(9XAt^5E<&D0<8v3_g=}@l+H>7=_)U%TzILz2eYA&^c;zSE6V^6*cy>4dYIQ$v{#mNCwd@KYz z^YrhYPkUhD+_U^t=zubOXiz^vsQGbD@&e8dv4p0&yretSq3T`QqWj-S1^zGW^BwBW z`HLhNuVtoGkso|DTVL0{gdJZ75~^=gPdEDlZn+k_z!gB7>>2i)A`{rQ?6=A+|s zlw^UQ@2?MtiID7MWiq8mTJ80(AjtgHBCnv7RaLIKe_jQXH9%3)bzWUg;={vXNn+}v?J z2#pP`j+(W!zW}eWR?epC>!;qVOnf23gf4EX$63yHf5Vuc5pj<93Yk&4q6PPX-?#Z7 z{IiLCvJpPAtWT>S5Q+k)fUkBnu+I7K^&CG6b~EM}_%j3eQS|4R72fK+`j<@=t^#?Z z`06%MLD5KP4P6{#+~T1_ra_7LtWoiJT5r|V4V^pY?)AbrFh@BU1?FFRNl49nmmYIx zNPbzMYZZBaav~!Y(_s4V{s;rIJ#ldF6M;FQpv*1cwPTLSKf?IYgP|nc5|g)$FtkMc zv)GYYLq%h`)p-w%D%oL~Cx?|vK0H}6*$bp4_|brmkW2L5Q>O1(eB z^78KfPgYp_<3pnZM&7Ctz(vmY6)}}hB1=!OOxFUIH z!VMY~yYIj6O9?067;wCo*9Lt+LTk#K^g8hlG>&)!)NSDv$~MH01tSB5-0~Oy?!U+h z7wIg(SxF#(UX~zl415l7@o+O@&65L9_}(Qbbnt8hLEf0g0D#?}_dL)z_m@bGYyrTX zR%k4t3#b3*n&-Lz@HC!2O?w$GIgTAJwVW(f>N2*MK#LmoU;mtsc|N{+aAFi7godj5 zJ2!M}7Hy&ldcx5X4~ASX_15kwjz{%c()OFJS2XVz``Ua%yB%mSn@s6}L|~1c9AdaF zC9Qj5@-ES>&-hf&*$h|CDkD0|K^^5g%twXl-+U(>!~)|F37Fi1%)X$$=yjNqC)K1@ z3zMM`0BCz`5>iM3<%eW4>v3Ri_1+;?zfv&}cy}gKSuSCbqftOFUH`?&)OUy}2l0aCaP?X=rOFhR7M5K}Hh#z$5rTZTj&)uSpSBGITIbCaWD^Mi>^E9jZn$Rm9G0FY8} z%X{^$%Yxp*rcC$2)JC!;p^hq$M>_b*MRca;UAV5K;oa?B>RH}Zb5<(Mr-OnO4vc)l zax7|i#`8R0Qqu(f=ux}OQkq~X6@lF@sQ_s&^8q=xlceBw2q^Tag*67ABotg*gu5lo zN&RqPfLI&Cc4_R&_3d?C3_)MFpm%UTKeZA%>A2AGG~OsbhFTIvHvR{G&e2k@WtH=IYoj- zq|SEyehXp8+<*!5p$>__=RURYZ+=Y(QY~`E<2}>{s4_)Xb~kbrCkn{$Mt)1|y#2VIeEXr(jVcM!glTaJJPoImSnw$W`r#1(pbiclBpnk1 zp~y1khaialV1UIh2Ni$5zcU|EponHp#;R)a|>{56F z%-6u0J3cD#jz}0eB5ykn6&EsEeD~Hqq==J1>Z7-QE={h_t>i#ba6$xZv;of7mme`f z>a`!$ASC6V5Ny3RF|u9!HWKJ)`k^mLb-$u@Ebgkv z{+|C)XQM7g_v=(f_kQzk3wYYA*0#{o2~tLCePpCbdYRnEzR0>tYE>*o$z#Imj`=0J zDVA?VOrN(~`i=J1w@$qXMJ(bGnC}I3QRp`M9F|u0@eAoYMGpAH$2X}Q&bv5F^xbHI zfq$}s*4|kkgry4=2}XZ#>DWfX>5M~sA`%NHkHT6B765G z?}D&1ih#nCRNEab0h_u^eSOf45jcO_#GP4aKT^vDi(iODAFfx=SLQ3Ug^{Jclo=`V z`Qg88be0uMsz9{Y6qJV4+Z9$$aU(e0j5&$cslR^MT5DdGiK}CACo!=0zXpMcpOe>y zI%D7fQAO;AvTdmBKotV+kCeC5V*m0FDIrxzjnU@_C8O|KP7H=Y1~uG`6QT^uu~A*M zB38xn{viVOW{&DLgp)^&$iI&@50b!6<8qu`j}gboBWuD_-(Laoa5b%}*##*DDw@ib za>`G$#A^JV6j>CMu=vrT7gSQH&-0H7IfO?^3^lJ1cJZp40MQeFK7HDw7AE zl>-U258>)Jk$qJ9B<1@JJ;Rt|EZcyKsPh2WIaJ(O7AZ{Yw)A1Cs>kE;shSO?G@ztl z%M>n%k_!a3jV~A9O6v_HrA4_<_kaf>f>6Tj0-b~;d06{Eaa_#0gxEC^uocV)R>a^Y ztUoQ%3@V{=zFFoeh(V+*8t1<$*3i-H*|@ncH<76xOpvKGZMc*|df^!|H4hB_P<@Q* z-?7)SMFEk(d6|5d?#EKnLq?V`pTzt3d>=McUZ5w>WEcf8G>q$JC@R~3>gO@LqFEiw z3NK@JZR>JxT?oP0e)Hpr?-Dx$qTu7j##Nx!>e-R+O385Ai<;1eKR61eMHU^kh|wJd z7_Lctpm}-|!4w>H)PK)skw7kZxxtVdD>0>JMa6D(xl3(<3IEgj6HtSwPka&_)S`HL~?nS-V?*1*8jfP+Kkbj>F ze2a#|hP4kpN7el=+8Za&4~F`rl;3tyO6jZ&BlN46hwdKa0sB z_k0~i)8)FwK#s%6ouo~&=Ac?rL1TVy#Zx7iFAIHM-%Dl^$si3{26f4&{cOPOotngG z7Kn+6)eC>^0Owg{*^R?lvCtY9lh8WM1sq{m04)u5E+7*znoczna;(ZnfxLbhZcJxi zrTTZM5%B<^0kU=UN&s8Jd^ISe-FkuCPz6R*Xv}WMBo3Y#89sjZy#XtKB!Y~cw6#yX zd$NN#m;VE5gL|1a)w$<=HClp&f4Lj=^PvH5*#|L59|NzR7Ax#f?6IQth_i%{WWrvk zWNOhN!+d=xlyf-4G)BcOKk|^ory%GHdkcZ(aRd$$N4Cc0V;?hj1YbD(YxcY`#s)rWD(tf(o?Yz)MyLmNO{4P+2f#qJ4g%1We8ev<{ zpBVTl&0LN7ym;p6dZI>uby;slexc2eP>q8I43`@({vbyi=$@QtLprAQ5~5or#eN~c_;C!+>GS~~ zR{eub0M8&ogpXIqjSG_QcVeylNkjQS_IXN(g zTE!pfg&Pm~r&h)gdxlF6RRZm^wa*>kS|QnwRz|yc98!@#m4AAhSNuDJ`|SuG{uox7 zVkRxhxXs)1v>THKLO&=BoXmI~?j2goDP{Te+;;|xe;3i)9S~Ju0m{dRl=lF)tMY@J z&SbH-xxhm6j`vlz7{F}!xUaJ#R|5qENMG~dDcsVO?iJLAs`9yA|xT*6bvM{(~|N_E83g@TEc%n9fQMoUW(o!Hg% zLg(WV-E}$QxCFfD1%#qx4t51j-}cx$XO!L~WF%M(Iwb9%<+%0fO5p_sNq6ZrhctxP zfqp9_;x;pRF&cYM>2tpt#`1gw&+D?WdpZ|?XL?~^ouu#y^Ae&?SsB$Y-I4d-E-UkM zd-a7G%<13C(SSm)0|1pOIXi4E=i_|yNc#IibNZ?4pi{;!kb3Uixc~Y;y`0*KvHT|T zFt6uNEI)-ix^9-=Rc_C^O&760!Z%wi|0cSVYW3;Fwq zBP32Lcr{oET?TF|NaQ-keJEjNM%3=TtGeYBz@a@})5)tC4($_v5KjU~YZ)y5Nw_4E ziZV3Sr|ds3A^-q=P&rTZIO(Tg=LWF5PucRby~0e@3xuCJ**Ln!zxMl#CrNsz@72VX zXl^uLOYI}E0Cl!QdT+>qJiNl(^`Kui^tiRP+@3UcaLGY_K@)BmJ&8MO0R{lenHqS` zrP<-yEyi;TO#u+-NNOXsQ75>u8w7&XA8F?`uJR1<*Ohf8T9H&Xe5vMt`Jqt$Oz{c{ zub<~$*7M5*c@{jqTrg;-a*uX?i?{d_t9~OioU0j^iwB4IGG6=m*=3QFJgH-pZ~Z11 z4g$MJvRVZBRgj)j-zO(GwYk-|&0@P#3^8-&+aeKqnmH$X2-_fbn|Dw$1TF~|PRTNt z$h7G4genCZH-#PO!-677PG529%HV2^F~m%!V;T6QTBHSgnwYZSVpl}_B~dm^=KVRb zmPV#TT}myf+t-VV%~z?Nrsz41nT@-D>XD7&z0ni1vcs1h^_TtDgL@j|3PSKQ>kspY z5mx5Q_J7P8MU^`z7_BVz5Gu;B-Vm!OJ~&hm^g;Ot@4AsLR*b_qC(FATT%@%&;Z zr{=5C&S`5}Z`ix>)e$d~zJRQc&AtZw-e8WyFTOXu(KT6#kg{m8I<}8G8??e0OPQwq z*>vQ@T28xom+F_-IVJv(JLvz+6mz~B4NoKb8r7Zq&82^v1y_=lK;*Y~$=4cdu zhmyQ64GH3)z-m+W*Hy%|Z3@FkN42~l&3D4N^l)P_hCX2+*Ac{_TOpw2D^<}CnIrY2JX+gOmw>omT z!*KslMM00)t0`9fDsX#+c?PyCvcZmJq;&>%>neVCq8NQA-b~YR=s2rD&JzvAT>kS_ zMEdw}+(vgUBR5(1)y-u!zxrb+Ynp!WC0of8J11*uA5PnRft;_RiVj`|7_E;G(z`6a zHY}-eq%|5F>E|9C-w|rp`ltuSI}sO&!|Kg*f&Z@1ghSP#Uzj!`N|Ec;rikns{auEZ zj5ql$Dhb*Thq8gg^s9ajd;aCTCOOO;?l$wz1sa?k9SFZp9L%p7+$estNE=I#>X0%E z`^$bMs4{kPd8d@>76Jv7_tZbslHZQBdc-6V7$j|kdm09;H|@;N&nf412h#azZFhso zV|Fp*hWlsuN5E$T9jM(83PpootA`%s`)pw|svl5RT#i~^mn|MVlI&HL6}cYy!hW|d z!S_vp*$efn9jT@R*LL@#C@sw+#77tQV}iTaea9>`*)4c^qET*!Av(+p%wjE#)|_r$)QbcAr3-v(y{Mrs+De;hxq%odbZyVC|C%mU1rNj&CYF z0x2MbJP@k9|44W)sVqpzc2(X-=vaom-Bg}kj~@vp)9+&&fI zGqRd+O$3HX5cmW$uRTzwdcH4rFL@uJ0nzM2e=m!LZtn$ea5g-l{~D~@qzi={T5iir3a3$NOAB|Z!{dK-(82mONxT#OG!gF-v(5oO6Q%qQC^tja-q{2q4K`9g(=CfA@KyGR$3U=v zJgqtMzP*g)l%(#c{9G?t;hS+na;5egBq3ng>xbRXEiiM%6K{%prvN@I$~DWriWFt_ z2Pb4v{SMWAmR}>Doo*a@Ue2@F8wgl6Xg?CdpScv+~PjPTjLws&Rcd{pzJ6SLDs*0P!NLUR2&u zLWqgHpO%Nqwwv`t=8yLT;78>hGr|gCnbyRgO8w5_TM4QO&oKS%MfCu~n3oqKQ^uxB zO7T(Sy|n`J3?LS#3*=9gEC;ttNAOH647ApU6@N~;`^-o>CO<|j`IT1wALU&6G_9gd zgCL8LKE74--0sIMe-y?1@ohg17T+2khPw6AO%BAH)?H>d|2@=@(|O>MccjO&jniNi z*em_Zdv!~U7I)lw+qGl&$}CL*2m(rR2`nU%HlhTuQ%s-Jivqaj0U+Td1`hj0HAf0H^O>QvmJ(WIoIseb${{Yl5Xnp_y literal 0 HcmV?d00001 diff --git a/synthesizer/LICENSE.txt b/synthesizer/LICENSE.txt new file mode 100644 index 0000000..3337d45 --- /dev/null +++ b/synthesizer/LICENSE.txt @@ -0,0 +1,24 @@ +MIT License + +Original work Copyright (c) 2018 Rayhane Mama (https://github.com/Rayhane-mamah) +Original work Copyright (c) 2019 fatchord (https://github.com/fatchord) +Modified work Copyright (c) 2019 Corentin Jemine (https://github.com/CorentinJ) +Modified work Copyright (c) 2020 blue-fish (https://github.com/blue-fish) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/synthesizer/__init__.py b/synthesizer/__init__.py new file mode 100644 index 0000000..4287ca8 --- /dev/null +++ b/synthesizer/__init__.py @@ -0,0 +1 @@ +# \ No newline at end of file diff --git a/synthesizer/audio.py b/synthesizer/audio.py new file mode 100644 index 0000000..83dc96c --- /dev/null +++ b/synthesizer/audio.py @@ -0,0 +1,206 @@ +import librosa +import librosa.filters +import numpy as np +from scipy import signal +from scipy.io import wavfile +import soundfile as sf + + +def load_wav(path, sr): + return librosa.core.load(path, sr=sr)[0] + +def save_wav(wav, path, sr): + wav *= 32767 / max(0.01, np.max(np.abs(wav))) + #proposed by @dsmiller + wavfile.write(path, sr, wav.astype(np.int16)) + +def save_wavenet_wav(wav, path, sr): + sf.write(path, wav.astype(np.float32), sr) + +def preemphasis(wav, k, preemphasize=True): + if preemphasize: + return signal.lfilter([1, -k], [1], wav) + return wav + +def inv_preemphasis(wav, k, inv_preemphasize=True): + if inv_preemphasize: + return signal.lfilter([1], [1, -k], wav) + return wav + +#From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py +def start_and_end_indices(quantized, silence_threshold=2): + for start in range(quantized.size): + if abs(quantized[start] - 127) > silence_threshold: + break + for end in range(quantized.size - 1, 1, -1): + if abs(quantized[end] - 127) > silence_threshold: + break + + assert abs(quantized[start] - 127) > silence_threshold + assert abs(quantized[end] - 127) > silence_threshold + + return start, end + +def get_hop_size(hparams): + hop_size = hparams.hop_size + if hop_size is None: + assert hparams.frame_shift_ms is not None + hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate) + return hop_size + +def linearspectrogram(wav, hparams): + D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams) + S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db + + if hparams.signal_normalization: + return _normalize(S, hparams) + return S + +def melspectrogram(wav, hparams): + D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams) + S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db + + if hparams.signal_normalization: + return _normalize(S, hparams) + return S + +def inv_linear_spectrogram(linear_spectrogram, hparams): + """Converts linear spectrogram to waveform using librosa""" + if hparams.signal_normalization: + D = _denormalize(linear_spectrogram, hparams) + else: + D = linear_spectrogram + + S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear + + if hparams.use_lws: + processor = _lws_processor(hparams) + D = processor.run_lws(S.astype(np.float64).T ** hparams.power) + y = processor.istft(D).astype(np.float32) + return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize) + else: + return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize) + +def inv_mel_spectrogram(mel_spectrogram, hparams): + """Converts mel spectrogram to waveform using librosa""" + if hparams.signal_normalization: + D = _denormalize(mel_spectrogram, hparams) + else: + D = mel_spectrogram + + S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear + + if hparams.use_lws: + processor = _lws_processor(hparams) + D = processor.run_lws(S.astype(np.float64).T ** hparams.power) + y = processor.istft(D).astype(np.float32) + return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize) + else: + return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize) + +def _lws_processor(hparams): + import lws + return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech") + +def _griffin_lim(S, hparams): + """librosa implementation of Griffin-Lim + Based on https://github.com/librosa/librosa/issues/434 + """ + angles = np.exp(2j * np.pi * np.random.rand(*S.shape)) + S_complex = np.abs(S).astype(np.complex) + y = _istft(S_complex * angles, hparams) + for i in range(hparams.griffin_lim_iters): + angles = np.exp(1j * np.angle(_stft(y, hparams))) + y = _istft(S_complex * angles, hparams) + return y + +def _stft(y, hparams): + if hparams.use_lws: + return _lws_processor(hparams).stft(y).T + else: + return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size) + +def _istft(y, hparams): + return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size) + +########################################################## +#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!) +def num_frames(length, fsize, fshift): + """Compute number of time frames of spectrogram + """ + pad = (fsize - fshift) + if length % fshift == 0: + M = (length + pad * 2 - fsize) // fshift + 1 + else: + M = (length + pad * 2 - fsize) // fshift + 2 + return M + + +def pad_lr(x, fsize, fshift): + """Compute left and right padding + """ + M = num_frames(len(x), fsize, fshift) + pad = (fsize - fshift) + T = len(x) + 2 * pad + r = (M - 1) * fshift + fsize - T + return pad, pad + r +########################################################## +#Librosa correct padding +def librosa_pad_lr(x, fsize, fshift): + return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] + +# Conversions +_mel_basis = None +_inv_mel_basis = None + +def _linear_to_mel(spectogram, hparams): + global _mel_basis + if _mel_basis is None: + _mel_basis = _build_mel_basis(hparams) + return np.dot(_mel_basis, spectogram) + +def _mel_to_linear(mel_spectrogram, hparams): + global _inv_mel_basis + if _inv_mel_basis is None: + _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams)) + return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram)) + +def _build_mel_basis(hparams): + assert hparams.fmax <= hparams.sample_rate // 2 + return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels, + fmin=hparams.fmin, fmax=hparams.fmax) + +def _amp_to_db(x, hparams): + min_level = np.exp(hparams.min_level_db / 20 * np.log(10)) + return 20 * np.log10(np.maximum(min_level, x)) + +def _db_to_amp(x): + return np.power(10.0, (x) * 0.05) + +def _normalize(S, hparams): + if hparams.allow_clipping_in_normalization: + if hparams.symmetric_mels: + return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value, + -hparams.max_abs_value, hparams.max_abs_value) + else: + return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value) + + assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0 + if hparams.symmetric_mels: + return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value + else: + return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)) + +def _denormalize(D, hparams): + if hparams.allow_clipping_in_normalization: + if hparams.symmetric_mels: + return (((np.clip(D, -hparams.max_abs_value, + hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + + hparams.min_level_db) + else: + return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db) + + if hparams.symmetric_mels: + return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db) + else: + return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db) diff --git a/synthesizer/hparams.py b/synthesizer/hparams.py new file mode 100644 index 0000000..544aeb5 --- /dev/null +++ b/synthesizer/hparams.py @@ -0,0 +1,92 @@ +import ast +import pprint + +class HParams(object): + def __init__(self, **kwargs): self.__dict__.update(kwargs) + def __setitem__(self, key, value): setattr(self, key, value) + def __getitem__(self, key): return getattr(self, key) + def __repr__(self): return pprint.pformat(self.__dict__) + + def parse(self, string): + # Overrides hparams from a comma-separated string of name=value pairs + if len(string) > 0: + overrides = [s.split("=") for s in string.split(",")] + keys, values = zip(*overrides) + keys = list(map(str.strip, keys)) + values = list(map(str.strip, values)) + for k in keys: + self.__dict__[k] = ast.literal_eval(values[keys.index(k)]) + return self + +hparams = HParams( + ### Signal Processing (used in both synthesizer and vocoder) + sample_rate = 16000, + n_fft = 800, + num_mels = 80, + hop_size = 200, # Tacotron uses 12.5 ms frame shift (set to sample_rate * 0.0125) + win_size = 800, # Tacotron uses 50 ms frame length (set to sample_rate * 0.050) + fmin = 55, + min_level_db = -100, + ref_level_db = 20, + max_abs_value = 4., # Gradient explodes if too big, premature convergence if too small. + preemphasis = 0.97, # Filter coefficient to use if preemphasize is True + preemphasize = True, + + ### Tacotron Text-to-Speech (TTS) + tts_embed_dims = 512, # Embedding dimension for the graphemes/phoneme inputs + tts_encoder_dims = 256, + tts_decoder_dims = 128, + tts_postnet_dims = 512, + tts_encoder_K = 5, + tts_lstm_dims = 1024, + tts_postnet_K = 5, + tts_num_highways = 4, + tts_dropout = 0.5, + tts_cleaner_names = ["basic_cleaners"], + tts_stop_threshold = -3.4, # Value below which audio generation ends. + # For example, for a range of [-4, 4], this + # will terminate the sequence at the first + # frame that has all values < -3.4 + + ### Tacotron Training + tts_schedule = [(2, 1e-3, 20_000, 12), # Progressive training schedule + (2, 5e-4, 40_000, 12), # (r, lr, step, batch_size) + (2, 2e-4, 80_000, 12), # + (2, 1e-4, 160_000, 12), # r = reduction factor (# of mel frames + (2, 3e-5, 320_000, 12), # synthesized for each decoder iteration) + (2, 1e-5, 640_000, 12)], # lr = learning rate + + tts_clip_grad_norm = 1.0, # clips the gradient norm to prevent explosion - set to None if not needed + tts_eval_interval = 500, # Number of steps between model evaluation (sample generation) + # Set to -1 to generate after completing epoch, or 0 to disable + + tts_eval_num_samples = 1, # Makes this number of samples + + ### Data Preprocessing + max_mel_frames = 900, + rescale = True, + rescaling_max = 0.9, + synthesis_batch_size = 16, # For vocoder preprocessing and inference. + + ### Mel Visualization and Griffin-Lim + signal_normalization = True, + power = 1.5, + griffin_lim_iters = 60, + + ### Audio processing options + fmax = 7600, # Should not exceed (sample_rate // 2) + allow_clipping_in_normalization = True, # Used when signal_normalization = True + clip_mels_length = True, # If true, discards samples exceeding max_mel_frames + use_lws = False, # "Fast spectrogram phase recovery using local weighted sums" + symmetric_mels = True, # Sets mel range to [-max_abs_value, max_abs_value] if True, + # and [0, max_abs_value] if False + trim_silence = True, # Use with sample_rate of 16000 for best results + + ### SV2TTS + speaker_embedding_size = 256, # Dimension for the speaker embedding + silence_min_duration_split = 0.4, # Duration in seconds of a silence for an utterance to be split + utterance_min_duration = 1.6, # Duration in seconds below which utterances are discarded + ) + +def hparams_debug_string(): + return str(hparams) \ No newline at end of file diff --git a/synthesizer/inference.py b/synthesizer/inference.py new file mode 100644 index 0000000..af7bf08 --- /dev/null +++ b/synthesizer/inference.py @@ -0,0 +1,171 @@ +import torch +from synthesizer import audio +from synthesizer.hparams import hparams +from synthesizer.models.tacotron import Tacotron +from synthesizer.utils.symbols import symbols +from synthesizer.utils.text import text_to_sequence +from vocoder.display import simple_table +from pathlib import Path +from typing import Union, List +import numpy as np +import librosa + + +class Synthesizer: + sample_rate = hparams.sample_rate + hparams = hparams + + def __init__(self, model_fpath: Path, verbose=True): + """ + The model isn't instantiated and loaded in memory until needed or until load() is called. + + :param model_fpath: path to the trained model file + :param verbose: if False, prints less information when using the model + """ + self.model_fpath = model_fpath + self.verbose = verbose + + # Check for GPU + if torch.cuda.is_available(): + self.device = torch.device("cuda") + else: + self.device = torch.device("cpu") + if self.verbose: + print("Synthesizer using device:", self.device) + + # Tacotron model will be instantiated later on first use. + self._model = None + + def is_loaded(self): + """ + Whether the model is loaded in memory. + """ + return self._model is not None + + def load(self): + """ + Instantiates and loads the model given the weights file that was passed in the constructor. + """ + self._model = Tacotron(embed_dims=hparams.tts_embed_dims, + num_chars=len(symbols), + encoder_dims=hparams.tts_encoder_dims, + decoder_dims=hparams.tts_decoder_dims, + n_mels=hparams.num_mels, + fft_bins=hparams.num_mels, + postnet_dims=hparams.tts_postnet_dims, + encoder_K=hparams.tts_encoder_K, + lstm_dims=hparams.tts_lstm_dims, + postnet_K=hparams.tts_postnet_K, + num_highways=hparams.tts_num_highways, + dropout=hparams.tts_dropout, + stop_threshold=hparams.tts_stop_threshold, + speaker_embedding_size=hparams.speaker_embedding_size).to(self.device) + + self._model.load(self.model_fpath) + self._model.eval() + + if self.verbose: + print("Loaded synthesizer \"%s\" trained to step %d" % (self.model_fpath.name, self._model.state_dict()["step"])) + + def synthesize_spectrograms(self, texts: List[str], + embeddings: Union[np.ndarray, List[np.ndarray]], + return_alignments=False): + """ + Synthesizes mel spectrograms from texts and speaker embeddings. + + :param texts: a list of N text prompts to be synthesized + :param embeddings: a numpy array or list of speaker embeddings of shape (N, 256) + :param return_alignments: if True, a matrix representing the alignments between the + characters + and each decoder output step will be returned for each spectrogram + :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the + sequence length of spectrogram i, and possibly the alignments. + """ + # Load the model on the first request. + if not self.is_loaded(): + self.load() + + # Print some info about the model when it is loaded + tts_k = self._model.get_step() // 1000 + + simple_table([("Tacotron", str(tts_k) + "k"), + ("r", self._model.r)]) + + # Preprocess text inputs + inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts] + if not isinstance(embeddings, list): + embeddings = [embeddings] + + # Batch inputs + batched_inputs = [inputs[i:i+hparams.synthesis_batch_size] + for i in range(0, len(inputs), hparams.synthesis_batch_size)] + batched_embeds = [embeddings[i:i+hparams.synthesis_batch_size] + for i in range(0, len(embeddings), hparams.synthesis_batch_size)] + + specs = [] + for i, batch in enumerate(batched_inputs, 1): + if self.verbose: + print(f"\n| Generating {i}/{len(batched_inputs)}") + + # Pad texts so they are all the same length + text_lens = [len(text) for text in batch] + max_text_len = max(text_lens) + chars = [pad1d(text, max_text_len) for text in batch] + chars = np.stack(chars) + + # Stack speaker embeddings into 2D array for batch processing + speaker_embeds = np.stack(batched_embeds[i-1]) + + # Convert to tensor + chars = torch.tensor(chars).long().to(self.device) + speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device) + + # Inference + _, mels, alignments = self._model.generate(chars, speaker_embeddings) + mels = mels.detach().cpu().numpy() + for m in mels: + # Trim silence from end of each spectrogram + while np.max(m[:, -1]) < hparams.tts_stop_threshold: + m = m[:, :-1] + specs.append(m) + + if self.verbose: + print("\n\nDone.\n") + return (specs, alignments) if return_alignments else specs + + @staticmethod + def load_preprocess_wav(fpath): + """ + Loads and preprocesses an audio file under the same conditions the audio files were used to + train the synthesizer. + """ + wav = librosa.load(str(fpath), hparams.sample_rate)[0] + if hparams.rescale: + wav = wav / np.abs(wav).max() * hparams.rescaling_max + return wav + + @staticmethod + def make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]): + """ + Creates a mel spectrogram from an audio file in the same manner as the mel spectrograms that + were fed to the synthesizer when training. + """ + if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): + wav = Synthesizer.load_preprocess_wav(fpath_or_wav) + else: + wav = fpath_or_wav + + mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) + return mel_spectrogram + + @staticmethod + def griffin_lim(mel): + """ + Inverts a mel spectrogram using Griffin-Lim. The mel spectrogram is expected to have been built + with the same parameters present in hparams.py. + """ + return audio.inv_mel_spectrogram(mel, hparams) + + +def pad1d(x, max_len, pad_value=0): + return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) diff --git a/synthesizer/models/tacotron.py b/synthesizer/models/tacotron.py new file mode 100644 index 0000000..769f7f9 --- /dev/null +++ b/synthesizer/models/tacotron.py @@ -0,0 +1,519 @@ +import os +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path +from typing import Union + + +class HighwayNetwork(nn.Module): + def __init__(self, size): + super().__init__() + self.W1 = nn.Linear(size, size) + self.W2 = nn.Linear(size, size) + self.W1.bias.data.fill_(0.) + + def forward(self, x): + x1 = self.W1(x) + x2 = self.W2(x) + g = torch.sigmoid(x2) + y = g * F.relu(x1) + (1. - g) * x + return y + + +class Encoder(nn.Module): + def __init__(self, embed_dims, num_chars, encoder_dims, K, num_highways, dropout): + super().__init__() + prenet_dims = (encoder_dims, encoder_dims) + cbhg_channels = encoder_dims + self.embedding = nn.Embedding(num_chars, embed_dims) + self.pre_net = PreNet(embed_dims, fc1_dims=prenet_dims[0], fc2_dims=prenet_dims[1], + dropout=dropout) + self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels, + proj_channels=[cbhg_channels, cbhg_channels], + num_highways=num_highways) + + def forward(self, x, speaker_embedding=None): + x = self.embedding(x) + x = self.pre_net(x) + x.transpose_(1, 2) + x = self.cbhg(x) + if speaker_embedding is not None: + x = self.add_speaker_embedding(x, speaker_embedding) + return x + + def add_speaker_embedding(self, x, speaker_embedding): + # SV2TTS + # The input x is the encoder output and is a 3D tensor with size (batch_size, num_chars, tts_embed_dims) + # When training, speaker_embedding is also a 2D tensor with size (batch_size, speaker_embedding_size) + # (for inference, speaker_embedding is a 1D tensor with size (speaker_embedding_size)) + # This concats the speaker embedding for each char in the encoder output + + # Save the dimensions as human-readable names + batch_size = x.size()[0] + num_chars = x.size()[1] + + if speaker_embedding.dim() == 1: + idx = 0 + else: + idx = 1 + + # Start by making a copy of each speaker embedding to match the input text length + # The output of this has size (batch_size, num_chars * tts_embed_dims) + speaker_embedding_size = speaker_embedding.size()[idx] + e = speaker_embedding.repeat_interleave(num_chars, dim=idx) + + # Reshape it and transpose + e = e.reshape(batch_size, speaker_embedding_size, num_chars) + e = e.transpose(1, 2) + + # Concatenate the tiled speaker embedding with the encoder output + x = torch.cat((x, e), 2) + return x + + +class BatchNormConv(nn.Module): + def __init__(self, in_channels, out_channels, kernel, relu=True): + super().__init__() + self.conv = nn.Conv1d(in_channels, out_channels, kernel, stride=1, padding=kernel // 2, bias=False) + self.bnorm = nn.BatchNorm1d(out_channels) + self.relu = relu + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) if self.relu is True else x + return self.bnorm(x) + + +class CBHG(nn.Module): + def __init__(self, K, in_channels, channels, proj_channels, num_highways): + super().__init__() + + # List of all rnns to call `flatten_parameters()` on + self._to_flatten = [] + + self.bank_kernels = [i for i in range(1, K + 1)] + self.conv1d_bank = nn.ModuleList() + for k in self.bank_kernels: + conv = BatchNormConv(in_channels, channels, k) + self.conv1d_bank.append(conv) + + self.maxpool = nn.MaxPool1d(kernel_size=2, stride=1, padding=1) + + self.conv_project1 = BatchNormConv(len(self.bank_kernels) * channels, proj_channels[0], 3) + self.conv_project2 = BatchNormConv(proj_channels[0], proj_channels[1], 3, relu=False) + + # Fix the highway input if necessary + if proj_channels[-1] != channels: + self.highway_mismatch = True + self.pre_highway = nn.Linear(proj_channels[-1], channels, bias=False) + else: + self.highway_mismatch = False + + self.highways = nn.ModuleList() + for i in range(num_highways): + hn = HighwayNetwork(channels) + self.highways.append(hn) + + self.rnn = nn.GRU(channels, channels // 2, batch_first=True, bidirectional=True) + self._to_flatten.append(self.rnn) + + # Avoid fragmentation of RNN parameters and associated warning + self._flatten_parameters() + + def forward(self, x): + # Although we `_flatten_parameters()` on init, when using DataParallel + # the model gets replicated, making it no longer guaranteed that the + # weights are contiguous in GPU memory. Hence, we must call it again + self._flatten_parameters() + + # Save these for later + residual = x + seq_len = x.size(-1) + conv_bank = [] + + # Convolution Bank + for conv in self.conv1d_bank: + c = conv(x) # Convolution + conv_bank.append(c[:, :, :seq_len]) + + # Stack along the channel axis + conv_bank = torch.cat(conv_bank, dim=1) + + # dump the last padding to fit residual + x = self.maxpool(conv_bank)[:, :, :seq_len] + + # Conv1d projections + x = self.conv_project1(x) + x = self.conv_project2(x) + + # Residual Connect + x = x + residual + + # Through the highways + x = x.transpose(1, 2) + if self.highway_mismatch is True: + x = self.pre_highway(x) + for h in self.highways: x = h(x) + + # And then the RNN + x, _ = self.rnn(x) + return x + + def _flatten_parameters(self): + """Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used + to improve efficiency and avoid PyTorch yelling at us.""" + [m.flatten_parameters() for m in self._to_flatten] + +class PreNet(nn.Module): + def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): + super().__init__() + self.fc1 = nn.Linear(in_dims, fc1_dims) + self.fc2 = nn.Linear(fc1_dims, fc2_dims) + self.p = dropout + + def forward(self, x): + x = self.fc1(x) + x = F.relu(x) + x = F.dropout(x, self.p, training=True) + x = self.fc2(x) + x = F.relu(x) + x = F.dropout(x, self.p, training=True) + return x + + +class Attention(nn.Module): + def __init__(self, attn_dims): + super().__init__() + self.W = nn.Linear(attn_dims, attn_dims, bias=False) + self.v = nn.Linear(attn_dims, 1, bias=False) + + def forward(self, encoder_seq_proj, query, t): + + # print(encoder_seq_proj.shape) + # Transform the query vector + query_proj = self.W(query).unsqueeze(1) + + # Compute the scores + u = self.v(torch.tanh(encoder_seq_proj + query_proj)) + scores = F.softmax(u, dim=1) + + return scores.transpose(1, 2) + + +class LSA(nn.Module): + def __init__(self, attn_dim, kernel_size=31, filters=32): + super().__init__() + self.conv = nn.Conv1d(1, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=True) + self.L = nn.Linear(filters, attn_dim, bias=False) + self.W = nn.Linear(attn_dim, attn_dim, bias=True) # Include the attention bias in this term + self.v = nn.Linear(attn_dim, 1, bias=False) + self.cumulative = None + self.attention = None + + def init_attention(self, encoder_seq_proj): + device = next(self.parameters()).device # use same device as parameters + b, t, c = encoder_seq_proj.size() + self.cumulative = torch.zeros(b, t, device=device) + self.attention = torch.zeros(b, t, device=device) + + def forward(self, encoder_seq_proj, query, t, chars): + + if t == 0: self.init_attention(encoder_seq_proj) + + processed_query = self.W(query).unsqueeze(1) + + location = self.cumulative.unsqueeze(1) + processed_loc = self.L(self.conv(location).transpose(1, 2)) + + u = self.v(torch.tanh(processed_query + encoder_seq_proj + processed_loc)) + u = u.squeeze(-1) + + # Mask zero padding chars + u = u * (chars != 0).float() + + # Smooth Attention + # scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True) + scores = F.softmax(u, dim=1) + self.attention = scores + self.cumulative = self.cumulative + self.attention + + return scores.unsqueeze(-1).transpose(1, 2) + + +class Decoder(nn.Module): + # Class variable because its value doesn't change between classes + # yet ought to be scoped by class because its a property of a Decoder + max_r = 20 + def __init__(self, n_mels, encoder_dims, decoder_dims, lstm_dims, + dropout, speaker_embedding_size): + super().__init__() + self.register_buffer("r", torch.tensor(1, dtype=torch.int)) + self.n_mels = n_mels + prenet_dims = (decoder_dims * 2, decoder_dims * 2) + self.prenet = PreNet(n_mels, fc1_dims=prenet_dims[0], fc2_dims=prenet_dims[1], + dropout=dropout) + self.attn_net = LSA(decoder_dims) + self.attn_rnn = nn.GRUCell(encoder_dims + prenet_dims[1] + speaker_embedding_size, decoder_dims) + self.rnn_input = nn.Linear(encoder_dims + decoder_dims + speaker_embedding_size, lstm_dims) + self.res_rnn1 = nn.LSTMCell(lstm_dims, lstm_dims) + self.res_rnn2 = nn.LSTMCell(lstm_dims, lstm_dims) + self.mel_proj = nn.Linear(lstm_dims, n_mels * self.max_r, bias=False) + self.stop_proj = nn.Linear(encoder_dims + speaker_embedding_size + lstm_dims, 1) + + def zoneout(self, prev, current, p=0.1): + device = next(self.parameters()).device # Use same device as parameters + mask = torch.zeros(prev.size(), device=device).bernoulli_(p) + return prev * mask + current * (1 - mask) + + def forward(self, encoder_seq, encoder_seq_proj, prenet_in, + hidden_states, cell_states, context_vec, t, chars): + + # Need this for reshaping mels + batch_size = encoder_seq.size(0) + + # Unpack the hidden and cell states + attn_hidden, rnn1_hidden, rnn2_hidden = hidden_states + rnn1_cell, rnn2_cell = cell_states + + # PreNet for the Attention RNN + prenet_out = self.prenet(prenet_in) + + # Compute the Attention RNN hidden state + attn_rnn_in = torch.cat([context_vec, prenet_out], dim=-1) + attn_hidden = self.attn_rnn(attn_rnn_in.squeeze(1), attn_hidden) + + # Compute the attention scores + scores = self.attn_net(encoder_seq_proj, attn_hidden, t, chars) + + # Dot product to create the context vector + context_vec = scores @ encoder_seq + context_vec = context_vec.squeeze(1) + + # Concat Attention RNN output w. Context Vector & project + x = torch.cat([context_vec, attn_hidden], dim=1) + x = self.rnn_input(x) + + # Compute first Residual RNN + rnn1_hidden_next, rnn1_cell = self.res_rnn1(x, (rnn1_hidden, rnn1_cell)) + if self.training: + rnn1_hidden = self.zoneout(rnn1_hidden, rnn1_hidden_next) + else: + rnn1_hidden = rnn1_hidden_next + x = x + rnn1_hidden + + # Compute second Residual RNN + rnn2_hidden_next, rnn2_cell = self.res_rnn2(x, (rnn2_hidden, rnn2_cell)) + if self.training: + rnn2_hidden = self.zoneout(rnn2_hidden, rnn2_hidden_next) + else: + rnn2_hidden = rnn2_hidden_next + x = x + rnn2_hidden + + # Project Mels + mels = self.mel_proj(x) + mels = mels.view(batch_size, self.n_mels, self.max_r)[:, :, :self.r] + hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden) + cell_states = (rnn1_cell, rnn2_cell) + + # Stop token prediction + s = torch.cat((x, context_vec), dim=1) + s = self.stop_proj(s) + stop_tokens = torch.sigmoid(s) + + return mels, scores, hidden_states, cell_states, context_vec, stop_tokens + + +class Tacotron(nn.Module): + def __init__(self, embed_dims, num_chars, encoder_dims, decoder_dims, n_mels, + fft_bins, postnet_dims, encoder_K, lstm_dims, postnet_K, num_highways, + dropout, stop_threshold, speaker_embedding_size): + super().__init__() + self.n_mels = n_mels + self.lstm_dims = lstm_dims + self.encoder_dims = encoder_dims + self.decoder_dims = decoder_dims + self.speaker_embedding_size = speaker_embedding_size + self.encoder = Encoder(embed_dims, num_chars, encoder_dims, + encoder_K, num_highways, dropout) + self.encoder_proj = nn.Linear(encoder_dims + speaker_embedding_size, decoder_dims, bias=False) + self.decoder = Decoder(n_mels, encoder_dims, decoder_dims, lstm_dims, + dropout, speaker_embedding_size) + self.postnet = CBHG(postnet_K, n_mels, postnet_dims, + [postnet_dims, fft_bins], num_highways) + self.post_proj = nn.Linear(postnet_dims, fft_bins, bias=False) + + self.init_model() + self.num_params() + + self.register_buffer("step", torch.zeros(1, dtype=torch.long)) + self.register_buffer("stop_threshold", torch.tensor(stop_threshold, dtype=torch.float32)) + + @property + def r(self): + return self.decoder.r.item() + + @r.setter + def r(self, value): + self.decoder.r = self.decoder.r.new_tensor(value, requires_grad=False) + + def forward(self, x, m, speaker_embedding): + device = next(self.parameters()).device # use same device as parameters + + self.step += 1 + batch_size, _, steps = m.size() + + # Initialise all hidden states and pack into tuple + attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device) + rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) + rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) + hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden) + + # Initialise all lstm cell states and pack into tuple + rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device) + rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device) + cell_states = (rnn1_cell, rnn2_cell) + + # Frame for start of decoder loop + go_frame = torch.zeros(batch_size, self.n_mels, device=device) + + # Need an initial context vector + context_vec = torch.zeros(batch_size, self.encoder_dims + self.speaker_embedding_size, device=device) + + # SV2TTS: Run the encoder with the speaker embedding + # The projection avoids unnecessary matmuls in the decoder loop + encoder_seq = self.encoder(x, speaker_embedding) + encoder_seq_proj = self.encoder_proj(encoder_seq) + + # Need a couple of lists for outputs + mel_outputs, attn_scores, stop_outputs = [], [], [] + + # Run the decoder loop + for t in range(0, steps, self.r): + prenet_in = m[:, :, t - 1] if t > 0 else go_frame + mel_frames, scores, hidden_states, cell_states, context_vec, stop_tokens = \ + self.decoder(encoder_seq, encoder_seq_proj, prenet_in, + hidden_states, cell_states, context_vec, t, x) + mel_outputs.append(mel_frames) + attn_scores.append(scores) + stop_outputs.extend([stop_tokens] * self.r) + + # Concat the mel outputs into sequence + mel_outputs = torch.cat(mel_outputs, dim=2) + + # Post-Process for Linear Spectrograms + postnet_out = self.postnet(mel_outputs) + linear = self.post_proj(postnet_out) + linear = linear.transpose(1, 2) + + # For easy visualisation + attn_scores = torch.cat(attn_scores, 1) + # attn_scores = attn_scores.cpu().data.numpy() + stop_outputs = torch.cat(stop_outputs, 1) + + return mel_outputs, linear, attn_scores, stop_outputs + + def generate(self, x, speaker_embedding=None, steps=2000): + self.eval() + device = next(self.parameters()).device # use same device as parameters + + batch_size, _ = x.size() + + # Need to initialise all hidden states and pack into tuple for tidyness + attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device) + rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) + rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) + hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden) + + # Need to initialise all lstm cell states and pack into tuple for tidyness + rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device) + rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device) + cell_states = (rnn1_cell, rnn2_cell) + + # Need a Frame for start of decoder loop + go_frame = torch.zeros(batch_size, self.n_mels, device=device) + + # Need an initial context vector + context_vec = torch.zeros(batch_size, self.encoder_dims + self.speaker_embedding_size, device=device) + + # SV2TTS: Run the encoder with the speaker embedding + # The projection avoids unnecessary matmuls in the decoder loop + encoder_seq = self.encoder(x, speaker_embedding) + encoder_seq_proj = self.encoder_proj(encoder_seq) + + # Need a couple of lists for outputs + mel_outputs, attn_scores, stop_outputs = [], [], [] + + # Run the decoder loop + for t in range(0, steps, self.r): + prenet_in = mel_outputs[-1][:, :, -1] if t > 0 else go_frame + mel_frames, scores, hidden_states, cell_states, context_vec, stop_tokens = \ + self.decoder(encoder_seq, encoder_seq_proj, prenet_in, + hidden_states, cell_states, context_vec, t, x) + mel_outputs.append(mel_frames) + attn_scores.append(scores) + stop_outputs.extend([stop_tokens] * self.r) + # Stop the loop when all stop tokens in batch exceed threshold + if (stop_tokens > 0.5).all() and t > 10: break + + # Concat the mel outputs into sequence + mel_outputs = torch.cat(mel_outputs, dim=2) + + # Post-Process for Linear Spectrograms + postnet_out = self.postnet(mel_outputs) + linear = self.post_proj(postnet_out) + + + linear = linear.transpose(1, 2) + + # For easy visualisation + attn_scores = torch.cat(attn_scores, 1) + stop_outputs = torch.cat(stop_outputs, 1) + + self.train() + + return mel_outputs, linear, attn_scores + + def init_model(self): + for p in self.parameters(): + if p.dim() > 1: nn.init.xavier_uniform_(p) + + def get_step(self): + return self.step.data.item() + + def reset_step(self): + # assignment to parameters or buffers is overloaded, updates internal dict entry + self.step = self.step.data.new_tensor(1) + + def log(self, path, msg): + with open(path, "a") as f: + print(msg, file=f) + + def load(self, path, optimizer=None): + # Use device of model params as location for loaded state + device = next(self.parameters()).device + checkpoint = torch.load(str(path), map_location=device) + self.load_state_dict(checkpoint["model_state"]) + + if "optimizer_state" in checkpoint and optimizer is not None: + optimizer.load_state_dict(checkpoint["optimizer_state"]) + + def save(self, path, optimizer=None): + if optimizer is not None: + torch.save({ + "model_state": self.state_dict(), + "optimizer_state": optimizer.state_dict(), + }, str(path)) + else: + torch.save({ + "model_state": self.state_dict(), + }, str(path)) + + + def num_params(self, print_out=True): + parameters = filter(lambda p: p.requires_grad, self.parameters()) + parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 + if print_out: + print("Trainable Parameters: %.3fM" % parameters) + return parameters diff --git a/synthesizer/preprocess.py b/synthesizer/preprocess.py new file mode 100644 index 0000000..29ac298 --- /dev/null +++ b/synthesizer/preprocess.py @@ -0,0 +1,358 @@ +from multiprocessing.pool import Pool +from synthesizer import audio +from functools import partial +from itertools import chain +from encoder import inference as encoder +from pathlib import Path +from utils import logmmse +from tqdm import tqdm +import numpy as np +import librosa +import platform +from pypinyin import Style +from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin +from pypinyin.converter import DefaultConverter +from pypinyin.core import Pinyin + +class PinyinConverter(NeutralToneWith5Mixin, DefaultConverter): + pass + +pinyin = Pinyin(PinyinConverter()).pinyin + + +def preprocess_dataset(datasets_root: Path, out_dir: Path, n_processes: int, + skip_existing: bool, hparams, no_alignments: bool, + datasets_name: str, subfolders: str): + # Gather the input directories + dataset_root = datasets_root.joinpath(datasets_name) + input_dirs = [dataset_root.joinpath(subfolder.strip()) for subfolder in subfolders.split(",")] + print("\n ".join(map(str, ["Using data from:"] + input_dirs))) + assert all(input_dir.exists() for input_dir in input_dirs) + + # Create the output directories for each output file type + out_dir.joinpath("mels").mkdir(exist_ok=True) + out_dir.joinpath("audio").mkdir(exist_ok=True) + + # Create a metadata file + metadata_fpath = out_dir.joinpath("train.txt") + metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8") + + # Preprocess the dataset + speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs)) + func = partial(preprocess_speaker, out_dir=out_dir, skip_existing=skip_existing, + hparams=hparams, no_alignments=no_alignments) + job = Pool(n_processes).imap(func, speaker_dirs) + for speaker_metadata in tqdm(job, datasets_name, len(speaker_dirs), unit="speakers"): + for metadatum in speaker_metadata: + metadata_file.write("|".join(str(x) for x in metadatum) + "\n") + metadata_file.close() + + # Verify the contents of the metadata file + with metadata_fpath.open("r", encoding="utf-8") as metadata_file: + metadata = [line.split("|") for line in metadata_file] + mel_frames = sum([int(m[4]) for m in metadata]) + timesteps = sum([int(m[3]) for m in metadata]) + sample_rate = hparams.sample_rate + hours = (timesteps / sample_rate) / 3600 + print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." % + (len(metadata), mel_frames, timesteps, hours)) + print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata)) + print("Max mel frames length: %d" % max(int(m[4]) for m in metadata)) + print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata)) + + +def preprocess_speaker(speaker_dir, out_dir: Path, skip_existing: bool, hparams, no_alignments: bool): + metadata = [] + for book_dir in speaker_dir.glob("*"): + if no_alignments: + # Gather the utterance audios and texts + # LibriTTS uses .wav but we will include extensions for compatibility with other datasets + extensions = ["*.wav", "*.flac", "*.mp3"] + for extension in extensions: + wav_fpaths = book_dir.glob(extension) + + for wav_fpath in wav_fpaths: + # Load the audio waveform + wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate) + if hparams.rescale: + wav = wav / np.abs(wav).max() * hparams.rescaling_max + + # Get the corresponding text + # Check for .txt (for compatibility with other datasets) + text_fpath = wav_fpath.with_suffix(".txt") + if not text_fpath.exists(): + # Check for .normalized.txt (LibriTTS) + text_fpath = wav_fpath.with_suffix(".normalized.txt") + assert text_fpath.exists() + with text_fpath.open("r") as text_file: + text = "".join([line for line in text_file]) + text = text.replace("\"", "") + text = text.strip() + + # Process the utterance + metadata.append(process_utterance(wav, text, out_dir, str(wav_fpath.with_suffix("").name), + skip_existing, hparams)) + else: + # Process alignment file (LibriSpeech support) + # Gather the utterance audios and texts + try: + alignments_fpath = next(book_dir.glob("*.alignment.txt")) + with alignments_fpath.open("r") as alignments_file: + alignments = [line.rstrip().split(" ") for line in alignments_file] + except StopIteration: + # A few alignment files will be missing + continue + + # Iterate over each entry in the alignments file + for wav_fname, words, end_times in alignments: + wav_fpath = book_dir.joinpath(wav_fname + ".flac") + assert wav_fpath.exists() + words = words.replace("\"", "").split(",") + end_times = list(map(float, end_times.replace("\"", "").split(","))) + + # Process each sub-utterance + wavs, texts = split_on_silences(wav_fpath, words, end_times, hparams) + for i, (wav, text) in enumerate(zip(wavs, texts)): + sub_basename = "%s_%02d" % (wav_fname, i) + metadata.append(process_utterance(wav, text, out_dir, sub_basename, + skip_existing, hparams)) + + return [m for m in metadata if m is not None] + + +def split_on_silences(wav_fpath, words, end_times, hparams): + # Load the audio waveform + wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate) + if hparams.rescale: + wav = wav / np.abs(wav).max() * hparams.rescaling_max + + words = np.array(words) + start_times = np.array([0.0] + end_times[:-1]) + end_times = np.array(end_times) + assert len(words) == len(end_times) == len(start_times) + assert words[0] == "" and words[-1] == "" + + # Find pauses that are too long + mask = (words == "") & (end_times - start_times >= hparams.silence_min_duration_split) + mask[0] = mask[-1] = True + breaks = np.where(mask)[0] + + # Profile the noise from the silences and perform noise reduction on the waveform + silence_times = [[start_times[i], end_times[i]] for i in breaks] + silence_times = (np.array(silence_times) * hparams.sample_rate).astype(np.int) + noisy_wav = np.concatenate([wav[stime[0]:stime[1]] for stime in silence_times]) + if len(noisy_wav) > hparams.sample_rate * 0.02: + profile = logmmse.profile_noise(noisy_wav, hparams.sample_rate) + wav = logmmse.denoise(wav, profile, eta=0) + + # Re-attach segments that are too short + segments = list(zip(breaks[:-1], breaks[1:])) + segment_durations = [start_times[end] - end_times[start] for start, end in segments] + i = 0 + while i < len(segments) and len(segments) > 1: + if segment_durations[i] < hparams.utterance_min_duration: + # See if the segment can be re-attached with the right or the left segment + left_duration = float("inf") if i == 0 else segment_durations[i - 1] + right_duration = float("inf") if i == len(segments) - 1 else segment_durations[i + 1] + joined_duration = segment_durations[i] + min(left_duration, right_duration) + + # Do not re-attach if it causes the joined utterance to be too long + if joined_duration > hparams.hop_size * hparams.max_mel_frames / hparams.sample_rate: + i += 1 + continue + + # Re-attach the segment with the neighbour of shortest duration + j = i - 1 if left_duration <= right_duration else i + segments[j] = (segments[j][0], segments[j + 1][1]) + segment_durations[j] = joined_duration + del segments[j + 1], segment_durations[j + 1] + else: + i += 1 + + # Split the utterance + segment_times = [[end_times[start], start_times[end]] for start, end in segments] + segment_times = (np.array(segment_times) * hparams.sample_rate).astype(np.int) + wavs = [wav[segment_time[0]:segment_time[1]] for segment_time in segment_times] + texts = [" ".join(words[start + 1:end]).replace(" ", " ") for start, end in segments] + + # # DEBUG: play the audio segments (run with -n=1) + # import sounddevice as sd + # if len(wavs) > 1: + # print("This sentence was split in %d segments:" % len(wavs)) + # else: + # print("There are no silences long enough for this sentence to be split:") + # for wav, text in zip(wavs, texts): + # # Pad the waveform with 1 second of silence because sounddevice tends to cut them early + # # when playing them. You shouldn't need to do that in your parsers. + # wav = np.concatenate((wav, [0] * 16000)) + # print("\t%s" % text) + # sd.play(wav, 16000, blocking=True) + # print("") + + return wavs, texts + + +def process_utterance(wav: np.ndarray, text: str, out_dir: Path, basename: str, + skip_existing: bool, hparams): + ## FOR REFERENCE: + # For you not to lose your head if you ever wish to change things here or implement your own + # synthesizer. + # - Both the audios and the mel spectrograms are saved as numpy arrays + # - There is no processing done to the audios that will be saved to disk beyond volume + # normalization (in split_on_silences) + # - However, pre-emphasis is applied to the audios before computing the mel spectrogram. This + # is why we re-apply it on the audio on the side of the vocoder. + # - Librosa pads the waveform before computing the mel spectrogram. Here, the waveform is saved + # without extra padding. This means that you won't have an exact relation between the length + # of the wav and of the mel spectrogram. See the vocoder data loader. + + + # Skip existing utterances if needed + mel_fpath = out_dir.joinpath("mels", "mel-%s.npy" % basename) + wav_fpath = out_dir.joinpath("audio", "audio-%s.npy" % basename) + if skip_existing and mel_fpath.exists() and wav_fpath.exists(): + return None + + # Trim silence + if hparams.trim_silence: + wav = encoder.preprocess_wav(wav, normalize=False, trim_silence=True) + + # Skip utterances that are too short + if len(wav) < hparams.utterance_min_duration * hparams.sample_rate: + return None + + # Compute the mel spectrogram + mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) + mel_frames = mel_spectrogram.shape[1] + + # Skip utterances that are too long + if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: + return None + + # Write the spectrogram, embed and audio to disk + np.save(mel_fpath, mel_spectrogram.T, allow_pickle=False) + np.save(wav_fpath, wav, allow_pickle=False) + + # Return a tuple describing this training example + return wav_fpath.name, mel_fpath.name, "embed-%s.npy" % basename, len(wav), mel_frames, text + + +def embed_utterance(fpaths, encoder_model_fpath): + if not encoder.is_loaded(): + encoder.load_model(encoder_model_fpath) + + # Compute the speaker embedding of the utterance + wav_fpath, embed_fpath = fpaths + wav = np.load(wav_fpath) + wav = encoder.preprocess_wav(wav) + embed = encoder.embed_utterance(wav) + np.save(embed_fpath, embed, allow_pickle=False) + + +def create_embeddings(synthesizer_root: Path, encoder_model_fpath: Path, n_processes: int): + wav_dir = synthesizer_root.joinpath("audio") + metadata_fpath = synthesizer_root.joinpath("train.txt") + assert wav_dir.exists() and metadata_fpath.exists() + embed_dir = synthesizer_root.joinpath("embeds") + embed_dir.mkdir(exist_ok=True) + + # Gather the input wave filepath and the target output embed filepath + with metadata_fpath.open("r", encoding="utf-8") as metadata_file: + metadata = [line.split("|") for line in metadata_file] + fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata] + + # TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here. + # Embed the utterances in separate threads + func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath) + job = Pool(n_processes).imap(func, fpaths) + list(tqdm(job, "Embedding", len(fpaths), unit="utterances")) + +# aidatatang_200zh +def preprocess_aidatatang_200zh(datasets_root: Path, out_dir: Path, n_processes: int, + skip_existing: bool, hparams, no_alignments: bool, datasets_name=None, subfolders=None): + # Gather the input directories + dataset_root = datasets_root.joinpath("aidatatang_200zh") + + dict_info = {} + transcript_dirs = dataset_root.joinpath("transcript/aidatatang_200_zh_transcript.txt") + with open(transcript_dirs,"rb") as fp: + dict_transcript = [v.decode() for v in fp] + + for v in dict_transcript: + if not v: + continue + v = v.strip().replace("\n","").split(" ") + dict_info[v[0]] = " ".join(v[1:]) + + input_dirs = [dataset_root.joinpath("corpus/train")] + print("\n ".join(map(str, ["Using data from:"] + input_dirs))) + assert all(input_dir.exists() for input_dir in input_dirs) + + # Create the output directories for each output file type + out_dir.joinpath("mels").mkdir(exist_ok=True) + out_dir.joinpath("audio").mkdir(exist_ok=True) + + # Create a metadata file + metadata_fpath = out_dir.joinpath("train.txt") + metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8") + + # Preprocess the dataset + speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs)) + func = partial(preprocess_speaker_aidatatang_200zh, out_dir=out_dir, skip_existing=skip_existing, + hparams=hparams, dict_info=dict_info, no_alignments=no_alignments) + job = Pool(n_processes).imap(func, speaker_dirs) + for speaker_metadata in tqdm(job, "aidatatang_200zh", len(speaker_dirs), unit="speakers"): + for metadatum in speaker_metadata: + metadata_file.write("|".join(str(x) for x in metadatum) + "\n") + metadata_file.close() + + # Verify the contents of the metadata file + with metadata_fpath.open("r", encoding="utf-8") as metadata_file: + metadata = [line.split("|") for line in metadata_file] + mel_frames = sum([int(m[4]) for m in metadata]) + timesteps = sum([int(m[3]) for m in metadata]) + sample_rate = hparams.sample_rate + hours = (timesteps / sample_rate) / 3600 + print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." % + (len(metadata), mel_frames, timesteps, hours)) + print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata)) + print("Max mel frames length: %d" % max(int(m[4]) for m in metadata)) + print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata)) + +def preprocess_speaker_aidatatang_200zh(speaker_dir, out_dir: Path, skip_existing: bool, hparams, dict_info, no_alignments: bool): + metadata = [] + if platform.system() == "Windows": + split = "\\" + else: + split = "/" + # for book_dir in speaker_dir.glob("*"): + # Gather the utterance audios and texts + + for wav_fpath in speaker_dir.glob("*.wav"): + # D:\dataset\data_aishell\wav\train\S0002\BAC009S0002W0122.wav + + # Process each sub-utterance + + name = str(wav_fpath).split(split)[-1] + key = name.split(".")[0] + words = dict_info.get(key) + if not words: + continue + sub_basename = "%s_%02d" % (name, 0) + wav, text = split_on_silences_aidatatang_200zh(wav_fpath, words, hparams) + metadata.append(process_utterance(wav, text, out_dir, sub_basename, + skip_existing, hparams)) + + return [m for m in metadata if m is not None] + +def split_on_silences_aidatatang_200zh(wav_fpath, words, hparams): + # Load the audio waveform + wav, _ = librosa.load(wav_fpath, hparams.sample_rate) + wav = librosa.effects.trim(wav, top_db= 40, frame_length=2048, hop_length=512)[0] + if hparams.rescale: + wav = wav / np.abs(wav).max() * hparams.rescaling_max + resp = pinyin(words, style=Style.TONE3) + res = [v[0] for v in resp if v[0].strip()] + res = " ".join(res) + + return wav, res \ No newline at end of file diff --git a/synthesizer/synthesize.py b/synthesizer/synthesize.py new file mode 100644 index 0000000..ff05d0e --- /dev/null +++ b/synthesizer/synthesize.py @@ -0,0 +1,97 @@ +import torch +from torch.utils.data import DataLoader +from synthesizer.hparams import hparams_debug_string +from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer +from synthesizer.models.tacotron import Tacotron +from synthesizer.utils.text import text_to_sequence +from synthesizer.utils.symbols import symbols +import numpy as np +from pathlib import Path +from tqdm import tqdm + + +def run_synthesis(in_dir, out_dir, model_dir, hparams): + # This generates ground truth-aligned mels for vocoder training + synth_dir = Path(out_dir).joinpath("mels_gta") + synth_dir.mkdir(exist_ok=True) + print(hparams_debug_string(hparams)) + + # Check for GPU + if torch.cuda.is_available(): + device = torch.device("cuda") + if hparams.synthesis_batch_size % torch.cuda.device_count() != 0: + raise ValueError("`hparams.synthesis_batch_size` must be evenly divisible by n_gpus!") + else: + device = torch.device("cpu") + print("Synthesizer using device:", device) + + # Instantiate Tacotron model + model = Tacotron(embed_dims=hparams.tts_embed_dims, + num_chars=len(symbols), + encoder_dims=hparams.tts_encoder_dims, + decoder_dims=hparams.tts_decoder_dims, + n_mels=hparams.num_mels, + fft_bins=hparams.num_mels, + postnet_dims=hparams.tts_postnet_dims, + encoder_K=hparams.tts_encoder_K, + lstm_dims=hparams.tts_lstm_dims, + postnet_K=hparams.tts_postnet_K, + num_highways=hparams.tts_num_highways, + dropout=0., # Use zero dropout for gta mels + stop_threshold=hparams.tts_stop_threshold, + speaker_embedding_size=hparams.speaker_embedding_size).to(device) + + # Load the weights + model_dir = Path(model_dir) + model_fpath = model_dir.joinpath(model_dir.stem).with_suffix(".pt") + print("\nLoading weights at %s" % model_fpath) + model.load(model_fpath) + print("Tacotron weights loaded from step %d" % model.step) + + # Synthesize using same reduction factor as the model is currently trained + r = np.int32(model.r) + + # Set model to eval mode (disable gradient and zoneout) + model.eval() + + # Initialize the dataset + in_dir = Path(in_dir) + metadata_fpath = in_dir.joinpath("train.txt") + mel_dir = in_dir.joinpath("mels") + embed_dir = in_dir.joinpath("embeds") + + dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams) + data_loader = DataLoader(dataset, + collate_fn=lambda batch: collate_synthesizer(batch, r), + batch_size=hparams.synthesis_batch_size, + num_workers=2, + shuffle=False, + pin_memory=True) + + # Generate GTA mels + meta_out_fpath = Path(out_dir).joinpath("synthesized.txt") + with open(meta_out_fpath, "w") as file: + for i, (texts, mels, embeds, idx) in tqdm(enumerate(data_loader), total=len(data_loader)): + texts = texts.to(device) + mels = mels.to(device) + embeds = embeds.to(device) + + # Parallelize model onto GPUS using workaround due to python bug + if device.type == "cuda" and torch.cuda.device_count() > 1: + _, mels_out, _ = data_parallel_workaround(model, texts, mels, embeds) + else: + _, mels_out, _ = model(texts, mels, embeds) + + for j, k in enumerate(idx): + # Note: outputs mel-spectrogram files and target ones have same names, just different folders + mel_filename = Path(synth_dir).joinpath(dataset.metadata[k][1]) + mel_out = mels_out[j].detach().cpu().numpy().T + + # Use the length of the ground truth mel to remove padding from the generated mels + mel_out = mel_out[:int(dataset.metadata[k][4])] + + # Write the spectrogram to disk + np.save(mel_filename, mel_out, allow_pickle=False) + + # Write metadata into the synthesized file + file.write("|".join(dataset.metadata[k])) diff --git a/synthesizer/synthesizer_dataset.py b/synthesizer/synthesizer_dataset.py new file mode 100644 index 0000000..975cb60 --- /dev/null +++ b/synthesizer/synthesizer_dataset.py @@ -0,0 +1,92 @@ +import torch +from torch.utils.data import Dataset +import numpy as np +from pathlib import Path +from synthesizer.utils.text import text_to_sequence + + +class SynthesizerDataset(Dataset): + def __init__(self, metadata_fpath: Path, mel_dir: Path, embed_dir: Path, hparams): + print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, embed_dir)) + + with metadata_fpath.open("r", encoding="utf-8") as metadata_file: + metadata = [line.split("|") for line in metadata_file] + + mel_fnames = [x[1] for x in metadata if int(x[4])] + mel_fpaths = [mel_dir.joinpath(fname) for fname in mel_fnames] + embed_fnames = [x[2] for x in metadata if int(x[4])] + embed_fpaths = [embed_dir.joinpath(fname) for fname in embed_fnames] + self.samples_fpaths = list(zip(mel_fpaths, embed_fpaths)) + self.samples_texts = [x[5].strip() for x in metadata if int(x[4])] + self.metadata = metadata + self.hparams = hparams + + print("Found %d samples" % len(self.samples_fpaths)) + + def __getitem__(self, index): + # Sometimes index may be a list of 2 (not sure why this happens) + # If that is the case, return a single item corresponding to first element in index + if index is list: + index = index[0] + + mel_path, embed_path = self.samples_fpaths[index] + mel = np.load(mel_path).T.astype(np.float32) + + # Load the embed + embed = np.load(embed_path) + + # Get the text and clean it + text = text_to_sequence(self.samples_texts[index], self.hparams.tts_cleaner_names) + + # Convert the list returned by text_to_sequence to a numpy array + text = np.asarray(text).astype(np.int32) + + return text, mel.astype(np.float32), embed.astype(np.float32), index + + def __len__(self): + return len(self.samples_fpaths) + + +def collate_synthesizer(batch): + # Text + x_lens = [len(x[0]) for x in batch] + max_x_len = max(x_lens) + + chars = [pad1d(x[0], max_x_len) for x in batch] + chars = np.stack(chars) + + # Mel spectrogram + spec_lens = [x[1].shape[-1] for x in batch] + max_spec_len = max(spec_lens) + 1 + if max_spec_len % 2 != 0: # FIXIT: Hardcoded due to incompatibility with Windows (no lambda) + max_spec_len += 2 - max_spec_len % 2 + + # WaveRNN mel spectrograms are normalized to [0, 1] so zero padding adds silence + # By default, SV2TTS uses symmetric mels, where -1*max_abs_value is silence. + # if hparams.symmetric_mels: + # mel_pad_value = -1 * hparams.max_abs_value + # else: + # mel_pad_value = 0 + mel_pad_value = -4 # FIXIT: Hardcoded due to incompatibility with Windows (no lambda) + mel = [pad2d(x[1], max_spec_len, pad_value=mel_pad_value) for x in batch] + mel = np.stack(mel) + + # Speaker embedding (SV2TTS) + embeds = [x[2] for x in batch] + + # Index (for vocoder preprocessing) + indices = [x[3] for x in batch] + + + # Convert all to tensor + chars = torch.tensor(chars).long() + mel = torch.tensor(mel) + embeds = torch.tensor(embeds) + + return chars, mel, embeds, indices + +def pad1d(x, max_len, pad_value=0): + return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) + +def pad2d(x, max_len, pad_value=0): + return np.pad(x, ((0, 0), (0, max_len - x.shape[-1])), mode="constant", constant_values=pad_value) diff --git a/synthesizer/train.py b/synthesizer/train.py new file mode 100644 index 0000000..fc385ef --- /dev/null +++ b/synthesizer/train.py @@ -0,0 +1,268 @@ +import torch +import torch.nn.functional as F +from torch import optim +from torch.utils.data import DataLoader +from synthesizer import audio +from synthesizer.models.tacotron import Tacotron +from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer +from synthesizer.utils import ValueWindow, data_parallel_workaround +from synthesizer.utils.plot import plot_spectrogram +from synthesizer.utils.symbols import symbols +from synthesizer.utils.text import sequence_to_text +from vocoder.display import * +from datetime import datetime +import numpy as np +from pathlib import Path +import sys +import time + + +def np_now(x: torch.Tensor): return x.detach().cpu().numpy() + +def time_string(): + return datetime.now().strftime("%Y-%m-%d %H:%M") + +def train(run_id: str, syn_dir: str, models_dir: str, save_every: int, + backup_every: int, force_restart:bool, hparams): + + syn_dir = Path(syn_dir) + models_dir = Path(models_dir) + models_dir.mkdir(exist_ok=True) + + model_dir = models_dir.joinpath(run_id) + plot_dir = model_dir.joinpath("plots") + wav_dir = model_dir.joinpath("wavs") + mel_output_dir = model_dir.joinpath("mel-spectrograms") + meta_folder = model_dir.joinpath("metas") + model_dir.mkdir(exist_ok=True) + plot_dir.mkdir(exist_ok=True) + wav_dir.mkdir(exist_ok=True) + mel_output_dir.mkdir(exist_ok=True) + meta_folder.mkdir(exist_ok=True) + + weights_fpath = model_dir.joinpath(run_id).with_suffix(".pt") + metadata_fpath = syn_dir.joinpath("train.txt") + + print("Checkpoint path: {}".format(weights_fpath)) + print("Loading training data from: {}".format(metadata_fpath)) + print("Using model: Tacotron") + + # Book keeping + step = 0 + time_window = ValueWindow(100) + loss_window = ValueWindow(100) + + + # From WaveRNN/train_tacotron.py + if torch.cuda.is_available(): + device = torch.device("cuda") + + for session in hparams.tts_schedule: + _, _, _, batch_size = session + if batch_size % torch.cuda.device_count() != 0: + raise ValueError("`batch_size` must be evenly divisible by n_gpus!") + else: + device = torch.device("cpu") + print("Using device:", device) + + # Instantiate Tacotron Model + print("\nInitialising Tacotron Model...\n") + model = Tacotron(embed_dims=hparams.tts_embed_dims, + num_chars=len(symbols), + encoder_dims=hparams.tts_encoder_dims, + decoder_dims=hparams.tts_decoder_dims, + n_mels=hparams.num_mels, + fft_bins=hparams.num_mels, + postnet_dims=hparams.tts_postnet_dims, + encoder_K=hparams.tts_encoder_K, + lstm_dims=hparams.tts_lstm_dims, + postnet_K=hparams.tts_postnet_K, + num_highways=hparams.tts_num_highways, + dropout=hparams.tts_dropout, + stop_threshold=hparams.tts_stop_threshold, + speaker_embedding_size=hparams.speaker_embedding_size).to(device) + + # Initialize the optimizer + optimizer = optim.Adam(model.parameters()) + + # Load the weights + if force_restart or not weights_fpath.exists(): + print("\nStarting the training of Tacotron from scratch\n") + model.save(weights_fpath) + + # Embeddings metadata + char_embedding_fpath = meta_folder.joinpath("CharacterEmbeddings.tsv") + with open(char_embedding_fpath, "w", encoding="utf-8") as f: + for symbol in symbols: + if symbol == " ": + symbol = "\\s" # For visual purposes, swap space with \s + + f.write("{}\n".format(symbol)) + + else: + print("\nLoading weights at %s" % weights_fpath) + model.load(weights_fpath, optimizer) + print("Tacotron weights loaded from step %d" % model.step) + + # Initialize the dataset + metadata_fpath = syn_dir.joinpath("train.txt") + mel_dir = syn_dir.joinpath("mels") + embed_dir = syn_dir.joinpath("embeds") + dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams) + test_loader = DataLoader(dataset, + batch_size=1, + shuffle=True, + pin_memory=True) + + for i, session in enumerate(hparams.tts_schedule): + current_step = model.get_step() + + r, lr, max_step, batch_size = session + + training_steps = max_step - current_step + + # Do we need to change to the next session? + if current_step >= max_step: + # Are there no further sessions than the current one? + if i == len(hparams.tts_schedule) - 1: + # We have completed training. Save the model and exit + model.save(weights_fpath, optimizer) + break + else: + # There is a following session, go to it + continue + + model.r = r + + # Begin the training + simple_table([(f"Steps with r={r}", str(training_steps // 1000) + "k Steps"), + ("Batch Size", batch_size), + ("Learning Rate", lr), + ("Outputs/Step (r)", model.r)]) + + for p in optimizer.param_groups: + p["lr"] = lr + + data_loader = DataLoader(dataset, + collate_fn=collate_synthesizer, + batch_size=batch_size, #change if you got graphic card OOM + num_workers=2, + shuffle=True, + pin_memory=True) + + total_iters = len(dataset) + steps_per_epoch = np.ceil(total_iters / batch_size).astype(np.int32) + epochs = np.ceil(training_steps / steps_per_epoch).astype(np.int32) + + for epoch in range(1, epochs+1): + for i, (texts, mels, embeds, idx) in enumerate(data_loader, 1): + start_time = time.time() + + # Generate stop tokens for training + stop = torch.ones(mels.shape[0], mels.shape[2]) + for j, k in enumerate(idx): + stop[j, :int(dataset.metadata[k][4])-1] = 0 + + texts = texts.to(device) + mels = mels.to(device) + embeds = embeds.to(device) + stop = stop.to(device) + + # Forward pass + # Parallelize model onto GPUS using workaround due to python bug + if device.type == "cuda" and torch.cuda.device_count() > 1: + m1_hat, m2_hat, attention, stop_pred = data_parallel_workaround(model, texts, + mels, embeds) + else: + m1_hat, m2_hat, attention, stop_pred = model(texts, mels, embeds) + + # Backward pass + m1_loss = F.mse_loss(m1_hat, mels) + F.l1_loss(m1_hat, mels) + m2_loss = F.mse_loss(m2_hat, mels) + stop_loss = F.binary_cross_entropy(stop_pred, stop) + + loss = m1_loss + m2_loss + stop_loss + + optimizer.zero_grad() + loss.backward() + + if hparams.tts_clip_grad_norm is not None: + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.tts_clip_grad_norm) + if np.isnan(grad_norm.cpu()): + print("grad_norm was NaN!") + + optimizer.step() + + time_window.append(time.time() - start_time) + loss_window.append(loss.item()) + + step = model.get_step() + k = step // 1000 + + msg = f"| Epoch: {epoch}/{epochs} ({i}/{steps_per_epoch}) | Loss: {loss_window.average:#.4} | {1./time_window.average:#.2} steps/s | Step: {k}k | " + stream(msg) + + # Backup or save model as appropriate + if backup_every != 0 and step % backup_every == 0 : + backup_fpath = Path("{}/{}_{}k.pt".format(str(weights_fpath.parent), run_id, k)) + model.save(backup_fpath, optimizer) + + if save_every != 0 and step % save_every == 0 : + # Must save latest optimizer state to ensure that resuming training + # doesn't produce artifacts + model.save(weights_fpath, optimizer) + + # Evaluate model to generate samples + epoch_eval = hparams.tts_eval_interval == -1 and i == steps_per_epoch # If epoch is done + step_eval = hparams.tts_eval_interval > 0 and step % hparams.tts_eval_interval == 0 # Every N steps + if epoch_eval or step_eval: + for sample_idx in range(hparams.tts_eval_num_samples): + # At most, generate samples equal to number in the batch + if sample_idx + 1 <= len(texts): + # Remove padding from mels using frame length in metadata + mel_length = int(dataset.metadata[idx[sample_idx]][4]) + mel_prediction = np_now(m2_hat[sample_idx]).T[:mel_length] + target_spectrogram = np_now(mels[sample_idx]).T[:mel_length] + attention_len = mel_length // model.r + + eval_model(attention=np_now(attention[sample_idx][:, :attention_len]), + mel_prediction=mel_prediction, + target_spectrogram=target_spectrogram, + input_seq=np_now(texts[sample_idx]), + step=step, + plot_dir=plot_dir, + mel_output_dir=mel_output_dir, + wav_dir=wav_dir, + sample_num=sample_idx + 1, + loss=loss, + hparams=hparams) + + # Break out of loop to update training schedule + if step >= max_step: + break + + # Add line break after every epoch + print("") + +def eval_model(attention, mel_prediction, target_spectrogram, input_seq, step, + plot_dir, mel_output_dir, wav_dir, sample_num, loss, hparams): + # Save some results for evaluation + attention_path = str(plot_dir.joinpath("attention_step_{}_sample_{}".format(step, sample_num))) + save_attention(attention, attention_path) + + # save predicted mel spectrogram to disk (debug) + mel_output_fpath = mel_output_dir.joinpath("mel-prediction-step-{}_sample_{}.npy".format(step, sample_num)) + np.save(str(mel_output_fpath), mel_prediction, allow_pickle=False) + + # save griffin lim inverted wav for debug (mel -> wav) + wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) + wav_fpath = wav_dir.joinpath("step-{}-wave-from-mel_sample_{}.wav".format(step, sample_num)) + audio.save_wav(wav, str(wav_fpath), sr=hparams.sample_rate) + + # save real and predicted mel-spectrogram plot to disk (control purposes) + spec_fpath = plot_dir.joinpath("step-{}-mel-spectrogram_sample_{}.png".format(step, sample_num)) + title_str = "{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss) + plot_spectrogram(mel_prediction, str(spec_fpath), title=title_str, + target_spectrogram=target_spectrogram, + max_len=target_spectrogram.size // hparams.num_mels) + print("Input at step {}: {}".format(step, sequence_to_text(input_seq))) diff --git a/synthesizer/utils/__init__.py b/synthesizer/utils/__init__.py new file mode 100644 index 0000000..5ae3e48 --- /dev/null +++ b/synthesizer/utils/__init__.py @@ -0,0 +1,45 @@ +import torch + + +_output_ref = None +_replicas_ref = None + +def data_parallel_workaround(model, *input): + global _output_ref + global _replicas_ref + device_ids = list(range(torch.cuda.device_count())) + output_device = device_ids[0] + replicas = torch.nn.parallel.replicate(model, device_ids) + # input.shape = (num_args, batch, ...) + inputs = torch.nn.parallel.scatter(input, device_ids) + # inputs.shape = (num_gpus, num_args, batch/num_gpus, ...) + replicas = replicas[:len(inputs)] + outputs = torch.nn.parallel.parallel_apply(replicas, inputs) + y_hat = torch.nn.parallel.gather(outputs, output_device) + _output_ref = outputs + _replicas_ref = replicas + return y_hat + + +class ValueWindow(): + def __init__(self, window_size=100): + self._window_size = window_size + self._values = [] + + def append(self, x): + self._values = self._values[-(self._window_size - 1):] + [x] + + @property + def sum(self): + return sum(self._values) + + @property + def count(self): + return len(self._values) + + @property + def average(self): + return self.sum / max(1, self.count) + + def reset(self): + self._values = [] diff --git a/synthesizer/utils/_cmudict.py b/synthesizer/utils/_cmudict.py new file mode 100644 index 0000000..2cef1f8 --- /dev/null +++ b/synthesizer/utils/_cmudict.py @@ -0,0 +1,62 @@ +import re + +valid_symbols = [ + "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2", + "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2", + "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY", + "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1", + "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0", + "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW", + "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH" +] + +_valid_symbol_set = set(valid_symbols) + + +class CMUDict: + """Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict""" + def __init__(self, file_or_path, keep_ambiguous=True): + if isinstance(file_or_path, str): + with open(file_or_path, encoding="latin-1") as f: + entries = _parse_cmudict(f) + else: + entries = _parse_cmudict(file_or_path) + if not keep_ambiguous: + entries = {word: pron for word, pron in entries.items() if len(pron) == 1} + self._entries = entries + + + def __len__(self): + return len(self._entries) + + + def lookup(self, word): + """Returns list of ARPAbet pronunciations of the given word.""" + return self._entries.get(word.upper()) + + + +_alt_re = re.compile(r"\([0-9]+\)") + + +def _parse_cmudict(file): + cmudict = {} + for line in file: + if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"): + parts = line.split(" ") + word = re.sub(_alt_re, "", parts[0]) + pronunciation = _get_pronunciation(parts[1]) + if pronunciation: + if word in cmudict: + cmudict[word].append(pronunciation) + else: + cmudict[word] = [pronunciation] + return cmudict + + +def _get_pronunciation(s): + parts = s.strip().split(" ") + for part in parts: + if part not in _valid_symbol_set: + return None + return " ".join(parts) diff --git a/synthesizer/utils/cleaners.py b/synthesizer/utils/cleaners.py new file mode 100644 index 0000000..eab63f0 --- /dev/null +++ b/synthesizer/utils/cleaners.py @@ -0,0 +1,88 @@ +""" +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You"ll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). +""" + +import re +from unidecode import unidecode +from .numbers import normalize_numbers + +# Regular expression matching whitespace: +_whitespace_re = re.compile(r"\s+") + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ + ("mrs", "misess"), + ("mr", "mister"), + ("dr", "doctor"), + ("st", "saint"), + ("co", "company"), + ("jr", "junior"), + ("maj", "major"), + ("gen", "general"), + ("drs", "doctors"), + ("rev", "reverend"), + ("lt", "lieutenant"), + ("hon", "honorable"), + ("sgt", "sergeant"), + ("capt", "captain"), + ("esq", "esquire"), + ("ltd", "limited"), + ("col", "colonel"), + ("ft", "fort"), +]] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def expand_numbers(text): + return normalize_numbers(text) + + +def lowercase(text): + """lowercase input tokens.""" + return text.lower() + + +def collapse_whitespace(text): + return re.sub(_whitespace_re, " ", text) + + +def convert_to_ascii(text): + return unidecode(text) + + +def basic_cleaners(text): + """Basic pipeline that lowercases and collapses whitespace without transliteration.""" + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def transliteration_cleaners(text): + """Pipeline for non-English text that transliterates to ASCII.""" + text = convert_to_ascii(text) + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def english_cleaners(text): + """Pipeline for English text, including number and abbreviation expansion.""" + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_numbers(text) + text = expand_abbreviations(text) + text = collapse_whitespace(text) + return text diff --git a/synthesizer/utils/numbers.py b/synthesizer/utils/numbers.py new file mode 100644 index 0000000..75020a0 --- /dev/null +++ b/synthesizer/utils/numbers.py @@ -0,0 +1,68 @@ +import re +import inflect + +_inflect = inflect.engine() +_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") +_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") +_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") +_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)") +_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)") +_number_re = re.compile(r"[0-9]+") + + +def _remove_commas(m): + return m.group(1).replace(",", "") + + +def _expand_decimal_point(m): + return m.group(1).replace(".", " point ") + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split(".") + if len(parts) > 2: + return match + " dollars" # Unexpected format + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = "dollar" if dollars == 1 else "dollars" + cent_unit = "cent" if cents == 1 else "cents" + return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit) + elif dollars: + dollar_unit = "dollar" if dollars == 1 else "dollars" + return "%s %s" % (dollars, dollar_unit) + elif cents: + cent_unit = "cent" if cents == 1 else "cents" + return "%s %s" % (cents, cent_unit) + else: + return "zero dollars" + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return "two thousand" + elif num > 2000 and num < 2010: + return "two thousand " + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + " hundred" + else: + return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ") + else: + return _inflect.number_to_words(num, andword="") + + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r"\1 pounds", text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + return text diff --git a/synthesizer/utils/plot.py b/synthesizer/utils/plot.py new file mode 100644 index 0000000..f47d271 --- /dev/null +++ b/synthesizer/utils/plot.py @@ -0,0 +1,76 @@ +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np + + +def split_title_line(title_text, max_words=5): + """ + A function that splits any string based on specific character + (returning it with the string), with maximum number of words on it + """ + seq = title_text.split() + return "\n".join([" ".join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)]) + +def plot_alignment(alignment, path, title=None, split_title=False, max_len=None): + if max_len is not None: + alignment = alignment[:, :max_len] + + fig = plt.figure(figsize=(8, 6)) + ax = fig.add_subplot(111) + + im = ax.imshow( + alignment, + aspect="auto", + origin="lower", + interpolation="none") + fig.colorbar(im, ax=ax) + xlabel = "Decoder timestep" + + if split_title: + title = split_title_line(title) + + plt.xlabel(xlabel) + plt.title(title) + plt.ylabel("Encoder timestep") + plt.tight_layout() + plt.savefig(path, format="png") + plt.close() + + +def plot_spectrogram(pred_spectrogram, path, title=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False): + if max_len is not None: + target_spectrogram = target_spectrogram[:max_len] + pred_spectrogram = pred_spectrogram[:max_len] + + if split_title: + title = split_title_line(title) + + fig = plt.figure(figsize=(10, 8)) + # Set common labels + fig.text(0.5, 0.18, title, horizontalalignment="center", fontsize=16) + + #target spectrogram subplot + if target_spectrogram is not None: + ax1 = fig.add_subplot(311) + ax2 = fig.add_subplot(312) + + if auto_aspect: + im = ax1.imshow(np.rot90(target_spectrogram), aspect="auto", interpolation="none") + else: + im = ax1.imshow(np.rot90(target_spectrogram), interpolation="none") + ax1.set_title("Target Mel-Spectrogram") + fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1) + ax2.set_title("Predicted Mel-Spectrogram") + else: + ax2 = fig.add_subplot(211) + + if auto_aspect: + im = ax2.imshow(np.rot90(pred_spectrogram), aspect="auto", interpolation="none") + else: + im = ax2.imshow(np.rot90(pred_spectrogram), interpolation="none") + fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2) + + plt.tight_layout() + plt.savefig(path, format="png") + plt.close() diff --git a/synthesizer/utils/symbols.py b/synthesizer/utils/symbols.py new file mode 100644 index 0000000..2dbec9a --- /dev/null +++ b/synthesizer/utils/symbols.py @@ -0,0 +1,16 @@ +""" +Defines the set of symbols used in text input to the model. + +The default is a set of ASCII characters that works well for English or text that has been run +through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. +""" +# from . import cmudict + +_pad = "_" +_eos = "~" +_characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12340!\'(),-.:;? ' +# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): +#_arpabet = ["@' + s for s in cmudict.valid_symbols] + +# Export all symbols: +symbols = [_pad, _eos] + list(_characters) #+ _arpabet diff --git a/synthesizer/utils/text.py b/synthesizer/utils/text.py new file mode 100644 index 0000000..2937217 --- /dev/null +++ b/synthesizer/utils/text.py @@ -0,0 +1,74 @@ +from .symbols import symbols +from . import cleaners +import re + +# Mappings from symbol to numeric ID and vice versa: +_symbol_to_id = {s: i for i, s in enumerate(symbols)} +_id_to_symbol = {i: s for i, s in enumerate(symbols)} + +# Regular expression matching text enclosed in curly braces: +_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)") + + +def text_to_sequence(text, cleaner_names): + """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + + The text can optionally have ARPAbet sequences enclosed in curly braces embedded + in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." + + Args: + text: string to convert to a sequence + cleaner_names: names of the cleaner functions to run the text through + + Returns: + List of integers corresponding to the symbols in the text + """ + sequence = [] + + # Check for curly braces and treat their contents as ARPAbet: + while len(text): + m = _curly_re.match(text) + if not m: + sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) + break + sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) + sequence += _arpabet_to_sequence(m.group(2)) + text = m.group(3) + + # Append EOS token + sequence.append(_symbol_to_id["~"]) + return sequence + + +def sequence_to_text(sequence): + """Converts a sequence of IDs back to a string""" + result = "" + for symbol_id in sequence: + if symbol_id in _id_to_symbol: + s = _id_to_symbol[symbol_id] + # Enclose ARPAbet back in curly braces: + if len(s) > 1 and s[0] == "@": + s = "{%s}" % s[1:] + result += s + return result.replace("}{", " ") + + +def _clean_text(text, cleaner_names): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception("Unknown cleaner: %s" % name) + text = cleaner(text) + return text + + +def _symbols_to_sequence(symbols): + return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)] + + +def _arpabet_to_sequence(text): + return _symbols_to_sequence(["@" + s for s in text.split()]) + + +def _should_keep_symbol(s): + return s in _symbol_to_id and s not in ("_", "~") diff --git a/synthesizer_preprocess_audio.py b/synthesizer_preprocess_audio.py new file mode 100644 index 0000000..436609e --- /dev/null +++ b/synthesizer_preprocess_audio.py @@ -0,0 +1,60 @@ +from synthesizer.preprocess import preprocess_aidatatang_200zh +from synthesizer.hparams import hparams +from utils.argutils import print_args +from pathlib import Path +import argparse + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Preprocesses audio files from datasets, encodes them as mel spectrograms " + "and writes them to the disk. Audio files are also saved, to be used by the " + "vocoder for training.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("datasets_root", type=Path, help=\ + "Path to the directory containing your LibriSpeech/TTS datasets.") + parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ + "Path to the output directory that will contain the mel spectrograms, the audios and the " + "embeds. Defaults to /SV2TTS/synthesizer/") + parser.add_argument("-n", "--n_processes", type=int, default=None, help=\ + "Number of processes in parallel.") + parser.add_argument("-s", "--skip_existing", action="store_true", help=\ + "Whether to overwrite existing files with the same name. Useful if the preprocessing was " + "interrupted.") + parser.add_argument("--hparams", type=str, default="", help=\ + "Hyperparameter overrides as a comma-separated list of name-value pairs") + parser.add_argument("--no_trim", action="store_true", help=\ + "Preprocess audio without trimming silences (not recommended).") + parser.add_argument("--no_alignments", action="store_true", help=\ + "Use this option when dataset does not include alignments\ + (these are used to split long audio files into sub-utterances.)") + parser.add_argument("--datasets_name", type=str, default="LibriSpeech", help=\ + "Name of the dataset directory to process.") + parser.add_argument("--subfolders", type=str, default="train-clean-100, train-clean-360", help=\ + "Comma-separated list of subfolders to process inside your dataset directory") + args = parser.parse_args() + + # Process the arguments + if not hasattr(args, "out_dir"): + args.out_dir = args.datasets_root.joinpath("SV2TTS", "synthesizer") + + # Create directories + assert args.datasets_root.exists() + args.out_dir.mkdir(exist_ok=True, parents=True) + + # Verify webrtcvad is available + if not args.no_trim: + try: + import webrtcvad + except: + raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " + "noise removal and is recommended. Please install and try again. If installation fails, " + "use --no_trim to disable this error message.") + del args.no_trim + + # Preprocess the dataset + print_args(args, parser) + args.hparams = hparams.parse(args.hparams) + # preprocess_dataset(**vars(args)) + preprocess_aidatatang_200zh(**vars(args)) diff --git a/synthesizer_preprocess_embeds.py b/synthesizer_preprocess_embeds.py new file mode 100644 index 0000000..94f864d --- /dev/null +++ b/synthesizer_preprocess_embeds.py @@ -0,0 +1,25 @@ +from synthesizer.preprocess import create_embeddings +from utils.argutils import print_args +from pathlib import Path +import argparse + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Creates embeddings for the synthesizer from the LibriSpeech utterances.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("synthesizer_root", type=Path, help=\ + "Path to the synthesizer training data that contains the audios and the train.txt file. " + "If you let everything as default, it should be /SV2TTS/synthesizer/.") + parser.add_argument("-e", "--encoder_model_fpath", type=Path, + default="encoder/saved_models/pretrained.pt", help=\ + "Path your trained encoder model.") + parser.add_argument("-n", "--n_processes", type=int, default=4, help= \ + "Number of parallel processes. An encoder is created for each, so you may need to lower " + "this value on GPUs with low memory. Set it to 1 if CUDA is unhappy.") + args = parser.parse_args() + + # Preprocess the dataset + print_args(args, parser) + create_embeddings(**vars(args)) diff --git a/synthesizer_train.py b/synthesizer_train.py new file mode 100644 index 0000000..2743d59 --- /dev/null +++ b/synthesizer_train.py @@ -0,0 +1,35 @@ +from synthesizer.hparams import hparams +from synthesizer.train import train +from utils.argutils import print_args +import argparse + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("run_id", type=str, help= \ + "Name for this model instance. If a model state from the same run ID was previously " + "saved, the training will restart from there. Pass -f to overwrite saved states and " + "restart from scratch.") + parser.add_argument("syn_dir", type=str, default=argparse.SUPPRESS, help= \ + "Path to the synthesizer directory that contains the ground truth mel spectrograms, " + "the wavs and the embeds.") + parser.add_argument("-m", "--models_dir", type=str, default="synthesizer/saved_models/", help=\ + "Path to the output directory that will contain the saved model weights and the logs.") + parser.add_argument("-s", "--save_every", type=int, default=1000, help= \ + "Number of steps between updates of the model on the disk. Set to 0 to never save the " + "model.") + parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \ + "Number of steps between backups of the model. Set to 0 to never make backups of the " + "model.") + parser.add_argument("-f", "--force_restart", action="store_true", help= \ + "Do not load any saved model and restart from scratch.") + parser.add_argument("--hparams", default="", + help="Hyperparameter overrides as a comma-separated list of name=value " + "pairs") + args = parser.parse_args() + print_args(args, parser) + + args.hparams = hparams.parse(args.hparams) + + # Run the training + train(**vars(args)) diff --git a/toolbox/__init__.py b/toolbox/__init__.py new file mode 100644 index 0000000..74c6b3c --- /dev/null +++ b/toolbox/__init__.py @@ -0,0 +1,359 @@ +from toolbox.ui import UI +from encoder import inference as encoder +from synthesizer.inference import Synthesizer +from vocoder import inference as vocoder +from pathlib import Path +from time import perf_counter as timer +from toolbox.utterance import Utterance +import numpy as np +import traceback +import sys +import torch +import librosa +from audioread.exceptions import NoBackendError + +# Use this directory structure for your datasets, or modify it to fit your needs +recognized_datasets = [ + "LibriSpeech/dev-clean", + "LibriSpeech/dev-other", + "LibriSpeech/test-clean", + "LibriSpeech/test-other", + "LibriSpeech/train-clean-100", + "LibriSpeech/train-clean-360", + "LibriSpeech/train-other-500", + "LibriTTS/dev-clean", + "LibriTTS/dev-other", + "LibriTTS/test-clean", + "LibriTTS/test-other", + "LibriTTS/train-clean-100", + "LibriTTS/train-clean-360", + "LibriTTS/train-other-500", + "LJSpeech-1.1", + "VoxCeleb1/wav", + "VoxCeleb1/test_wav", + "VoxCeleb2/dev/aac", + "VoxCeleb2/test/aac", + "VCTK-Corpus/wav48", + "aidatatang_200zh/corpus/dev", + "aidatatang_200zh/corpus/test", +] + +#Maximum of generated wavs to keep on memory +MAX_WAVES = 15 + +class Toolbox: + def __init__(self, datasets_root, enc_models_dir, syn_models_dir, voc_models_dir, seed, no_mp3_support): + if not no_mp3_support: + try: + librosa.load("samples/6829_00000.mp3") + except NoBackendError: + print("Librosa will be unable to open mp3 files if additional software is not installed.\n" + "Please install ffmpeg or add the '--no_mp3_support' option to proceed without support for mp3 files.") + exit(-1) + self.no_mp3_support = no_mp3_support + sys.excepthook = self.excepthook + self.datasets_root = datasets_root + self.utterances = set() + self.current_generated = (None, None, None, None) # speaker_name, spec, breaks, wav + + self.synthesizer = None # type: Synthesizer + self.current_wav = None + self.waves_list = [] + self.waves_count = 0 + self.waves_namelist = [] + + # Check for webrtcvad (enables removal of silences in vocoder output) + try: + import webrtcvad + self.trim_silences = True + except: + self.trim_silences = False + + # Initialize the events and the interface + self.ui = UI() + self.reset_ui(enc_models_dir, syn_models_dir, voc_models_dir, seed) + self.setup_events() + self.ui.start() + + def excepthook(self, exc_type, exc_value, exc_tb): + traceback.print_exception(exc_type, exc_value, exc_tb) + self.ui.log("Exception: %s" % exc_value) + + def setup_events(self): + # Dataset, speaker and utterance selection + self.ui.browser_load_button.clicked.connect(lambda: self.load_from_browser()) + random_func = lambda level: lambda: self.ui.populate_browser(self.datasets_root, + recognized_datasets, + level) + self.ui.random_dataset_button.clicked.connect(random_func(0)) + self.ui.random_speaker_button.clicked.connect(random_func(1)) + self.ui.random_utterance_button.clicked.connect(random_func(2)) + self.ui.dataset_box.currentIndexChanged.connect(random_func(1)) + self.ui.speaker_box.currentIndexChanged.connect(random_func(2)) + + # Model selection + self.ui.encoder_box.currentIndexChanged.connect(self.init_encoder) + def func(): + self.synthesizer = None + self.ui.synthesizer_box.currentIndexChanged.connect(func) + self.ui.vocoder_box.currentIndexChanged.connect(self.init_vocoder) + + # Utterance selection + func = lambda: self.load_from_browser(self.ui.browse_file()) + self.ui.browser_browse_button.clicked.connect(func) + func = lambda: self.ui.draw_utterance(self.ui.selected_utterance, "current") + self.ui.utterance_history.currentIndexChanged.connect(func) + func = lambda: self.ui.play(self.ui.selected_utterance.wav, Synthesizer.sample_rate) + self.ui.play_button.clicked.connect(func) + self.ui.stop_button.clicked.connect(self.ui.stop) + self.ui.record_button.clicked.connect(self.record) + + #Audio + self.ui.setup_audio_devices(Synthesizer.sample_rate) + + #Wav playback & save + func = lambda: self.replay_last_wav() + self.ui.replay_wav_button.clicked.connect(func) + func = lambda: self.export_current_wave() + self.ui.export_wav_button.clicked.connect(func) + self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav) + + # Generation + func = lambda: self.synthesize() or self.vocode() + self.ui.generate_button.clicked.connect(func) + self.ui.synthesize_button.clicked.connect(self.synthesize) + self.ui.vocode_button.clicked.connect(self.vocode) + self.ui.random_seed_checkbox.clicked.connect(self.update_seed_textbox) + + # UMAP legend + self.ui.clear_button.clicked.connect(self.clear_utterances) + + def set_current_wav(self, index): + self.current_wav = self.waves_list[index] + + def export_current_wave(self): + self.ui.save_audio_file(self.current_wav, Synthesizer.sample_rate) + + def replay_last_wav(self): + self.ui.play(self.current_wav, Synthesizer.sample_rate) + + def reset_ui(self, encoder_models_dir, synthesizer_models_dir, vocoder_models_dir, seed): + self.ui.populate_browser(self.datasets_root, recognized_datasets, 0, True) + self.ui.populate_models(encoder_models_dir, synthesizer_models_dir, vocoder_models_dir) + self.ui.populate_gen_options(seed, self.trim_silences) + + def load_from_browser(self, fpath=None): + if fpath is None: + fpath = Path(self.datasets_root, + self.ui.current_dataset_name, + self.ui.current_speaker_name, + self.ui.current_utterance_name) + name = str(fpath.relative_to(self.datasets_root)) + speaker_name = self.ui.current_dataset_name + '_' + self.ui.current_speaker_name + + # Select the next utterance + if self.ui.auto_next_checkbox.isChecked(): + self.ui.browser_select_next() + elif fpath == "": + return + else: + name = fpath.name + speaker_name = fpath.parent.name + + if fpath.suffix.lower() == ".mp3" and self.no_mp3_support: + self.ui.log("Error: No mp3 file argument was passed but an mp3 file was used") + return + + # Get the wav from the disk. We take the wav with the vocoder/synthesizer format for + # playback, so as to have a fair comparison with the generated audio + wav = Synthesizer.load_preprocess_wav(fpath) + self.ui.log("Loaded %s" % name) + + self.add_real_utterance(wav, name, speaker_name) + + def record(self): + wav = self.ui.record_one(encoder.sampling_rate, 5) + if wav is None: + return + self.ui.play(wav, encoder.sampling_rate) + + speaker_name = "user01" + name = speaker_name + "_rec_%05d" % np.random.randint(100000) + self.add_real_utterance(wav, name, speaker_name) + + def add_real_utterance(self, wav, name, speaker_name): + # Compute the mel spectrogram + spec = Synthesizer.make_spectrogram(wav) + self.ui.draw_spec(spec, "current") + + # Compute the embedding + if not encoder.is_loaded(): + self.init_encoder() + encoder_wav = encoder.preprocess_wav(wav) + embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) + + # Add the utterance + utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, False) + self.utterances.add(utterance) + self.ui.register_utterance(utterance) + + # Plot it + self.ui.draw_embed(embed, name, "current") + self.ui.draw_umap_projections(self.utterances) + + def clear_utterances(self): + self.utterances.clear() + self.ui.draw_umap_projections(self.utterances) + + def synthesize(self): + self.ui.log("Generating the mel spectrogram...") + self.ui.set_loading(1) + + # Update the synthesizer random seed + if self.ui.random_seed_checkbox.isChecked(): + seed = int(self.ui.seed_textbox.text()) + self.ui.populate_gen_options(seed, self.trim_silences) + else: + seed = None + + if seed is not None: + torch.manual_seed(seed) + + # Synthesize the spectrogram + if self.synthesizer is None or seed is not None: + self.init_synthesizer() + + texts = self.ui.text_prompt.toPlainText().split("\n") + embed = self.ui.selected_utterance.embed + embeds = [embed] * len(texts) + specs = self.synthesizer.synthesize_spectrograms(texts, embeds) + breaks = [spec.shape[1] for spec in specs] + spec = np.concatenate(specs, axis=1) + + self.ui.draw_spec(spec, "generated") + self.current_generated = (self.ui.selected_utterance.speaker_name, spec, breaks, None) + self.ui.set_loading(0) + + def vocode(self): + speaker_name, spec, breaks, _ = self.current_generated + assert spec is not None + + # Initialize the vocoder model and make it determinstic, if user provides a seed + if self.ui.random_seed_checkbox.isChecked(): + seed = int(self.ui.seed_textbox.text()) + self.ui.populate_gen_options(seed, self.trim_silences) + else: + seed = None + + if seed is not None: + torch.manual_seed(seed) + + # Synthesize the waveform + if not vocoder.is_loaded() or seed is not None: + self.init_vocoder() + + def vocoder_progress(i, seq_len, b_size, gen_rate): + real_time_factor = (gen_rate / Synthesizer.sample_rate) * 1000 + line = "Waveform generation: %d/%d (batch size: %d, rate: %.1fkHz - %.2fx real time)" \ + % (i * b_size, seq_len * b_size, b_size, gen_rate, real_time_factor) + self.ui.log(line, "overwrite") + self.ui.set_loading(i, seq_len) + if self.ui.current_vocoder_fpath is not None: + self.ui.log("") + wav = vocoder.infer_waveform(spec, progress_callback=vocoder_progress) + else: + self.ui.log("Waveform generation with Griffin-Lim... ") + wav = Synthesizer.griffin_lim(spec) + self.ui.set_loading(0) + self.ui.log(" Done!", "append") + + # Add breaks + b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size) + b_starts = np.concatenate(([0], b_ends[:-1])) + wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)] + breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks) + wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)]) + + # Trim excessive silences + if self.ui.trim_silences_checkbox.isChecked(): + wav = encoder.preprocess_wav(wav) + + # Play it + wav = wav / np.abs(wav).max() * 0.97 + self.ui.play(wav, Synthesizer.sample_rate) + + # Name it (history displayed in combobox) + # TODO better naming for the combobox items? + wav_name = str(self.waves_count + 1) + + #Update waves combobox + self.waves_count += 1 + if self.waves_count > MAX_WAVES: + self.waves_list.pop() + self.waves_namelist.pop() + self.waves_list.insert(0, wav) + self.waves_namelist.insert(0, wav_name) + + self.ui.waves_cb.disconnect() + self.ui.waves_cb_model.setStringList(self.waves_namelist) + self.ui.waves_cb.setCurrentIndex(0) + self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav) + + # Update current wav + self.set_current_wav(0) + + #Enable replay and save buttons: + self.ui.replay_wav_button.setDisabled(False) + self.ui.export_wav_button.setDisabled(False) + + # Compute the embedding + # TODO: this is problematic with different sampling rates, gotta fix it + if not encoder.is_loaded(): + self.init_encoder() + encoder_wav = encoder.preprocess_wav(wav) + embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) + + # Add the utterance + name = speaker_name + "_gen_%05d" % np.random.randint(100000) + utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, True) + self.utterances.add(utterance) + + # Plot it + self.ui.draw_embed(embed, name, "generated") + self.ui.draw_umap_projections(self.utterances) + + def init_encoder(self): + model_fpath = self.ui.current_encoder_fpath + + self.ui.log("Loading the encoder %s... " % model_fpath) + self.ui.set_loading(1) + start = timer() + encoder.load_model(model_fpath) + self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") + self.ui.set_loading(0) + + def init_synthesizer(self): + model_fpath = self.ui.current_synthesizer_fpath + + self.ui.log("Loading the synthesizer %s... " % model_fpath) + self.ui.set_loading(1) + start = timer() + self.synthesizer = Synthesizer(model_fpath) + self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") + self.ui.set_loading(0) + + def init_vocoder(self): + model_fpath = self.ui.current_vocoder_fpath + # Case of Griffin-lim + if model_fpath is None: + return + + self.ui.log("Loading the vocoder %s... " % model_fpath) + self.ui.set_loading(1) + start = timer() + vocoder.load_model(model_fpath) + self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") + self.ui.set_loading(0) + + def update_seed_textbox(self): + self.ui.update_seed_textbox() diff --git a/toolbox/ui.py b/toolbox/ui.py new file mode 100644 index 0000000..d56b574 --- /dev/null +++ b/toolbox/ui.py @@ -0,0 +1,611 @@ +import matplotlib.pyplot as plt +from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas +from matplotlib.figure import Figure +from PyQt5.QtCore import Qt, QStringListModel +from PyQt5.QtWidgets import * +from encoder.inference import plot_embedding_as_heatmap +from toolbox.utterance import Utterance +from pathlib import Path +from typing import List, Set +import sounddevice as sd +import soundfile as sf +import numpy as np +# from sklearn.manifold import TSNE # You can try with TSNE if you like, I prefer UMAP +from time import sleep +import umap +import sys +from warnings import filterwarnings, warn +filterwarnings("ignore") + + +colormap = np.array([ + [0, 127, 70], + [255, 0, 0], + [255, 217, 38], + [0, 135, 255], + [165, 0, 165], + [255, 167, 255], + [97, 142, 151], + [0, 255, 255], + [255, 96, 38], + [142, 76, 0], + [33, 0, 127], + [0, 0, 0], + [183, 183, 183], + [76, 255, 0], +], dtype=np.float) / 255 + +default_text = \ + "Welcome to the toolbox! To begin, load an utterance from your datasets or record one " \ + "yourself.\nOnce its embedding has been created, you can synthesize any text written here.\n" \ + "The synthesizer expects to generate " \ + "outputs that are somewhere between 5 and 12 seconds.\nTo mark breaks, write a new line. " \ + "Each line will be treated separately.\nThen, they are joined together to make the final " \ + "spectrogram. Use the vocoder to generate audio.\nThe vocoder generates almost in constant " \ + "time, so it will be more time efficient for longer inputs like this one.\nOn the left you " \ + "have the embedding projections. Load or record more utterances to see them.\nIf you have " \ + "at least 2 or 3 utterances from a same speaker, a cluster should form.\nSynthesized " \ + "utterances are of the same color as the speaker whose voice was used, but they're " \ + "represented with a cross." + + +class UI(QDialog): + min_umap_points = 4 + max_log_lines = 5 + max_saved_utterances = 20 + + def draw_utterance(self, utterance: Utterance, which): + self.draw_spec(utterance.spec, which) + self.draw_embed(utterance.embed, utterance.name, which) + + def draw_embed(self, embed, name, which): + embed_ax, _ = self.current_ax if which == "current" else self.gen_ax + embed_ax.figure.suptitle("" if embed is None else name) + + ## Embedding + # Clear the plot + if len(embed_ax.images) > 0: + embed_ax.images[0].colorbar.remove() + embed_ax.clear() + + # Draw the embed + if embed is not None: + plot_embedding_as_heatmap(embed, embed_ax) + embed_ax.set_title("embedding") + embed_ax.set_aspect("equal", "datalim") + embed_ax.set_xticks([]) + embed_ax.set_yticks([]) + embed_ax.figure.canvas.draw() + + def draw_spec(self, spec, which): + _, spec_ax = self.current_ax if which == "current" else self.gen_ax + + ## Spectrogram + # Draw the spectrogram + spec_ax.clear() + if spec is not None: + im = spec_ax.imshow(spec, aspect="auto", interpolation="none") + # spec_ax.figure.colorbar(mappable=im, shrink=0.65, orientation="horizontal", + # spec_ax=spec_ax) + spec_ax.set_title("mel spectrogram") + + spec_ax.set_xticks([]) + spec_ax.set_yticks([]) + spec_ax.figure.canvas.draw() + if which != "current": + self.vocode_button.setDisabled(spec is None) + + def draw_umap_projections(self, utterances: Set[Utterance]): + self.umap_ax.clear() + + speakers = np.unique([u.speaker_name for u in utterances]) + colors = {speaker_name: colormap[i] for i, speaker_name in enumerate(speakers)} + embeds = [u.embed for u in utterances] + + # Display a message if there aren't enough points + if len(utterances) < self.min_umap_points: + self.umap_ax.text(.5, .5, "Add %d more points to\ngenerate the projections" % + (self.min_umap_points - len(utterances)), + horizontalalignment='center', fontsize=15) + self.umap_ax.set_title("") + + # Compute the projections + else: + if not self.umap_hot: + self.log( + "Drawing UMAP projections for the first time, this will take a few seconds.") + self.umap_hot = True + + reducer = umap.UMAP(int(np.ceil(np.sqrt(len(embeds)))), metric="cosine") + # reducer = TSNE() + projections = reducer.fit_transform(embeds) + + speakers_done = set() + for projection, utterance in zip(projections, utterances): + color = colors[utterance.speaker_name] + mark = "x" if "_gen_" in utterance.name else "o" + label = None if utterance.speaker_name in speakers_done else utterance.speaker_name + speakers_done.add(utterance.speaker_name) + self.umap_ax.scatter(projection[0], projection[1], c=[color], marker=mark, + label=label) + # self.umap_ax.set_title("UMAP projections") + self.umap_ax.legend(prop={'size': 10}) + + # Draw the plot + self.umap_ax.set_aspect("equal", "datalim") + self.umap_ax.set_xticks([]) + self.umap_ax.set_yticks([]) + self.umap_ax.figure.canvas.draw() + + def save_audio_file(self, wav, sample_rate): + dialog = QFileDialog() + dialog.setDefaultSuffix(".wav") + fpath, _ = dialog.getSaveFileName( + parent=self, + caption="Select a path to save the audio file", + filter="Audio Files (*.flac *.wav)" + ) + if fpath: + #Default format is wav + if Path(fpath).suffix == "": + fpath += ".wav" + sf.write(fpath, wav, sample_rate) + + def setup_audio_devices(self, sample_rate): + input_devices = [] + output_devices = [] + for device in sd.query_devices(): + # Check if valid input + try: + sd.check_input_settings(device=device["name"], samplerate=sample_rate) + input_devices.append(device["name"]) + except: + pass + + # Check if valid output + try: + sd.check_output_settings(device=device["name"], samplerate=sample_rate) + output_devices.append(device["name"]) + except Exception as e: + # Log a warning only if the device is not an input + if not device["name"] in input_devices: + warn("Unsupported output device %s for the sample rate: %d \nError: %s" % (device["name"], sample_rate, str(e))) + + if len(input_devices) == 0: + self.log("No audio input device detected. Recording may not work.") + self.audio_in_device = None + else: + self.audio_in_device = input_devices[0] + + if len(output_devices) == 0: + self.log("No supported output audio devices were found! Audio output may not work.") + self.audio_out_devices_cb.addItems(["None"]) + self.audio_out_devices_cb.setDisabled(True) + else: + self.audio_out_devices_cb.clear() + self.audio_out_devices_cb.addItems(output_devices) + self.audio_out_devices_cb.currentTextChanged.connect(self.set_audio_device) + + self.set_audio_device() + + def set_audio_device(self): + + output_device = self.audio_out_devices_cb.currentText() + if output_device == "None": + output_device = None + + # If None, sounddevice queries portaudio + sd.default.device = (self.audio_in_device, output_device) + + def play(self, wav, sample_rate): + try: + sd.stop() + sd.play(wav, sample_rate) + except Exception as e: + print(e) + self.log("Error in audio playback. Try selecting a different audio output device.") + self.log("Your device must be connected before you start the toolbox.") + + def stop(self): + sd.stop() + + def record_one(self, sample_rate, duration): + self.record_button.setText("Recording...") + self.record_button.setDisabled(True) + + self.log("Recording %d seconds of audio" % duration) + sd.stop() + try: + wav = sd.rec(duration * sample_rate, sample_rate, 1) + except Exception as e: + print(e) + self.log("Could not record anything. Is your recording device enabled?") + self.log("Your device must be connected before you start the toolbox.") + return None + + for i in np.arange(0, duration, 0.1): + self.set_loading(i, duration) + sleep(0.1) + self.set_loading(duration, duration) + sd.wait() + + self.log("Done recording.") + self.record_button.setText("Record") + self.record_button.setDisabled(False) + + return wav.squeeze() + + @property + def current_dataset_name(self): + return self.dataset_box.currentText() + + @property + def current_speaker_name(self): + return self.speaker_box.currentText() + + @property + def current_utterance_name(self): + return self.utterance_box.currentText() + + def browse_file(self): + fpath = QFileDialog().getOpenFileName( + parent=self, + caption="Select an audio file", + filter="Audio Files (*.mp3 *.flac *.wav *.m4a)" + ) + return Path(fpath[0]) if fpath[0] != "" else "" + + @staticmethod + def repopulate_box(box, items, random=False): + """ + Resets a box and adds a list of items. Pass a list of (item, data) pairs instead to join + data to the items + """ + box.blockSignals(True) + box.clear() + for item in items: + item = list(item) if isinstance(item, tuple) else [item] + box.addItem(str(item[0]), *item[1:]) + if len(items) > 0: + box.setCurrentIndex(np.random.randint(len(items)) if random else 0) + box.setDisabled(len(items) == 0) + box.blockSignals(False) + + def populate_browser(self, datasets_root: Path, recognized_datasets: List, level: int, + random=True): + # Select a random dataset + if level <= 0: + if datasets_root is not None: + datasets = [datasets_root.joinpath(d) for d in recognized_datasets] + datasets = [d.relative_to(datasets_root) for d in datasets if d.exists()] + self.browser_load_button.setDisabled(len(datasets) == 0) + if datasets_root is None or len(datasets) == 0: + msg = "Warning: you d" + ("id not pass a root directory for datasets as argument" \ + if datasets_root is None else "o not have any of the recognized datasets" \ + " in %s" % datasets_root) + self.log(msg) + msg += ".\nThe recognized datasets are:\n\t%s\nFeel free to add your own. You " \ + "can still use the toolbox by recording samples yourself." % \ + ("\n\t".join(recognized_datasets)) + print(msg, file=sys.stderr) + + self.random_utterance_button.setDisabled(True) + self.random_speaker_button.setDisabled(True) + self.random_dataset_button.setDisabled(True) + self.utterance_box.setDisabled(True) + self.speaker_box.setDisabled(True) + self.dataset_box.setDisabled(True) + self.browser_load_button.setDisabled(True) + self.auto_next_checkbox.setDisabled(True) + return + self.repopulate_box(self.dataset_box, datasets, random) + + # Select a random speaker + if level <= 1: + speakers_root = datasets_root.joinpath(self.current_dataset_name) + speaker_names = [d.stem for d in speakers_root.glob("*") if d.is_dir()] + self.repopulate_box(self.speaker_box, speaker_names, random) + + # Select a random utterance + if level <= 2: + utterances_root = datasets_root.joinpath( + self.current_dataset_name, + self.current_speaker_name + ) + utterances = [] + for extension in ['mp3', 'flac', 'wav', 'm4a']: + utterances.extend(Path(utterances_root).glob("**/*.%s" % extension)) + utterances = [fpath.relative_to(utterances_root) for fpath in utterances] + self.repopulate_box(self.utterance_box, utterances, random) + + def browser_select_next(self): + index = (self.utterance_box.currentIndex() + 1) % len(self.utterance_box) + self.utterance_box.setCurrentIndex(index) + + @property + def current_encoder_fpath(self): + return self.encoder_box.itemData(self.encoder_box.currentIndex()) + + @property + def current_synthesizer_fpath(self): + return self.synthesizer_box.itemData(self.synthesizer_box.currentIndex()) + + @property + def current_vocoder_fpath(self): + return self.vocoder_box.itemData(self.vocoder_box.currentIndex()) + + def populate_models(self, encoder_models_dir: Path, synthesizer_models_dir: Path, + vocoder_models_dir: Path): + # Encoder + encoder_fpaths = list(encoder_models_dir.glob("*.pt")) + if len(encoder_fpaths) == 0: + raise Exception("No encoder models found in %s" % encoder_models_dir) + self.repopulate_box(self.encoder_box, [(f.stem, f) for f in encoder_fpaths]) + + # Synthesizer + synthesizer_fpaths = list(synthesizer_models_dir.glob("**/*.pt")) + if len(synthesizer_fpaths) == 0: + raise Exception("No synthesizer models found in %s" % synthesizer_models_dir) + self.repopulate_box(self.synthesizer_box, [(f.stem, f) for f in synthesizer_fpaths]) + + # Vocoder + vocoder_fpaths = list(vocoder_models_dir.glob("**/*.pt")) + vocoder_items = [(f.stem, f) for f in vocoder_fpaths] + [("Griffin-Lim", None)] + self.repopulate_box(self.vocoder_box, vocoder_items) + + @property + def selected_utterance(self): + return self.utterance_history.itemData(self.utterance_history.currentIndex()) + + def register_utterance(self, utterance: Utterance): + self.utterance_history.blockSignals(True) + self.utterance_history.insertItem(0, utterance.name, utterance) + self.utterance_history.setCurrentIndex(0) + self.utterance_history.blockSignals(False) + + if len(self.utterance_history) > self.max_saved_utterances: + self.utterance_history.removeItem(self.max_saved_utterances) + + self.play_button.setDisabled(False) + self.generate_button.setDisabled(False) + self.synthesize_button.setDisabled(False) + + def log(self, line, mode="newline"): + if mode == "newline": + self.logs.append(line) + if len(self.logs) > self.max_log_lines: + del self.logs[0] + elif mode == "append": + self.logs[-1] += line + elif mode == "overwrite": + self.logs[-1] = line + log_text = '\n'.join(self.logs) + + self.log_window.setText(log_text) + self.app.processEvents() + + def set_loading(self, value, maximum=1): + self.loading_bar.setValue(value * 100) + self.loading_bar.setMaximum(maximum * 100) + self.loading_bar.setTextVisible(value != 0) + self.app.processEvents() + + def populate_gen_options(self, seed, trim_silences): + if seed is not None: + self.random_seed_checkbox.setChecked(True) + self.seed_textbox.setText(str(seed)) + self.seed_textbox.setEnabled(True) + else: + self.random_seed_checkbox.setChecked(False) + self.seed_textbox.setText(str(0)) + self.seed_textbox.setEnabled(False) + + if not trim_silences: + self.trim_silences_checkbox.setChecked(False) + self.trim_silences_checkbox.setDisabled(True) + + def update_seed_textbox(self): + if self.random_seed_checkbox.isChecked(): + self.seed_textbox.setEnabled(True) + else: + self.seed_textbox.setEnabled(False) + + def reset_interface(self): + self.draw_embed(None, None, "current") + self.draw_embed(None, None, "generated") + self.draw_spec(None, "current") + self.draw_spec(None, "generated") + self.draw_umap_projections(set()) + self.set_loading(0) + self.play_button.setDisabled(True) + self.generate_button.setDisabled(True) + self.synthesize_button.setDisabled(True) + self.vocode_button.setDisabled(True) + self.replay_wav_button.setDisabled(True) + self.export_wav_button.setDisabled(True) + [self.log("") for _ in range(self.max_log_lines)] + + def __init__(self): + ## Initialize the application + self.app = QApplication(sys.argv) + super().__init__(None) + self.setWindowTitle("SV2TTS toolbox") + + + ## Main layouts + # Root + root_layout = QGridLayout() + self.setLayout(root_layout) + + # Browser + browser_layout = QGridLayout() + root_layout.addLayout(browser_layout, 0, 0, 1, 2) + + # Generation + gen_layout = QVBoxLayout() + root_layout.addLayout(gen_layout, 0, 2, 1, 2) + + # Projections + self.projections_layout = QVBoxLayout() + root_layout.addLayout(self.projections_layout, 1, 0, 1, 1) + + # Visualizations + vis_layout = QVBoxLayout() + root_layout.addLayout(vis_layout, 1, 1, 1, 3) + + + ## Projections + # UMap + fig, self.umap_ax = plt.subplots(figsize=(3, 3), facecolor="#F0F0F0") + fig.subplots_adjust(left=0.02, bottom=0.02, right=0.98, top=0.98) + self.projections_layout.addWidget(FigureCanvas(fig)) + self.umap_hot = False + self.clear_button = QPushButton("Clear") + self.projections_layout.addWidget(self.clear_button) + + + ## Browser + # Dataset, speaker and utterance selection + i = 0 + self.dataset_box = QComboBox() + browser_layout.addWidget(QLabel("Dataset"), i, 0) + browser_layout.addWidget(self.dataset_box, i + 1, 0) + self.speaker_box = QComboBox() + browser_layout.addWidget(QLabel("Speaker"), i, 1) + browser_layout.addWidget(self.speaker_box, i + 1, 1) + self.utterance_box = QComboBox() + browser_layout.addWidget(QLabel("Utterance"), i, 2) + browser_layout.addWidget(self.utterance_box, i + 1, 2) + self.browser_load_button = QPushButton("Load") + browser_layout.addWidget(self.browser_load_button, i + 1, 3) + i += 2 + + # Random buttons + self.random_dataset_button = QPushButton("Random") + browser_layout.addWidget(self.random_dataset_button, i, 0) + self.random_speaker_button = QPushButton("Random") + browser_layout.addWidget(self.random_speaker_button, i, 1) + self.random_utterance_button = QPushButton("Random") + browser_layout.addWidget(self.random_utterance_button, i, 2) + self.auto_next_checkbox = QCheckBox("Auto select next") + self.auto_next_checkbox.setChecked(True) + browser_layout.addWidget(self.auto_next_checkbox, i, 3) + i += 1 + + # Utterance box + browser_layout.addWidget(QLabel("Use embedding from:"), i, 0) + self.utterance_history = QComboBox() + browser_layout.addWidget(self.utterance_history, i, 1, 1, 3) + i += 1 + + # Random & next utterance buttons + self.browser_browse_button = QPushButton("Browse") + browser_layout.addWidget(self.browser_browse_button, i, 0) + self.record_button = QPushButton("Record") + browser_layout.addWidget(self.record_button, i, 1) + self.play_button = QPushButton("Play") + browser_layout.addWidget(self.play_button, i, 2) + self.stop_button = QPushButton("Stop") + browser_layout.addWidget(self.stop_button, i, 3) + i += 1 + + + # Model and audio output selection + self.encoder_box = QComboBox() + browser_layout.addWidget(QLabel("Encoder"), i, 0) + browser_layout.addWidget(self.encoder_box, i + 1, 0) + self.synthesizer_box = QComboBox() + browser_layout.addWidget(QLabel("Synthesizer"), i, 1) + browser_layout.addWidget(self.synthesizer_box, i + 1, 1) + self.vocoder_box = QComboBox() + browser_layout.addWidget(QLabel("Vocoder"), i, 2) + browser_layout.addWidget(self.vocoder_box, i + 1, 2) + + self.audio_out_devices_cb=QComboBox() + browser_layout.addWidget(QLabel("Audio Output"), i, 3) + browser_layout.addWidget(self.audio_out_devices_cb, i + 1, 3) + i += 2 + + #Replay & Save Audio + browser_layout.addWidget(QLabel("Toolbox Output:"), i, 0) + self.waves_cb = QComboBox() + self.waves_cb_model = QStringListModel() + self.waves_cb.setModel(self.waves_cb_model) + self.waves_cb.setToolTip("Select one of the last generated waves in this section for replaying or exporting") + browser_layout.addWidget(self.waves_cb, i, 1) + self.replay_wav_button = QPushButton("Replay") + self.replay_wav_button.setToolTip("Replay last generated vocoder") + browser_layout.addWidget(self.replay_wav_button, i, 2) + self.export_wav_button = QPushButton("Export") + self.export_wav_button.setToolTip("Save last generated vocoder audio in filesystem as a wav file") + browser_layout.addWidget(self.export_wav_button, i, 3) + i += 1 + + + ## Embed & spectrograms + vis_layout.addStretch() + + gridspec_kw = {"width_ratios": [1, 4]} + fig, self.current_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", + gridspec_kw=gridspec_kw) + fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) + vis_layout.addWidget(FigureCanvas(fig)) + + fig, self.gen_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", + gridspec_kw=gridspec_kw) + fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) + vis_layout.addWidget(FigureCanvas(fig)) + + for ax in self.current_ax.tolist() + self.gen_ax.tolist(): + ax.set_facecolor("#F0F0F0") + for side in ["top", "right", "bottom", "left"]: + ax.spines[side].set_visible(False) + + + ## Generation + self.text_prompt = QPlainTextEdit(default_text) + gen_layout.addWidget(self.text_prompt, stretch=1) + + self.generate_button = QPushButton("Synthesize and vocode") + gen_layout.addWidget(self.generate_button) + + layout = QHBoxLayout() + self.synthesize_button = QPushButton("Synthesize only") + layout.addWidget(self.synthesize_button) + self.vocode_button = QPushButton("Vocode only") + layout.addWidget(self.vocode_button) + gen_layout.addLayout(layout) + + layout_seed = QGridLayout() + self.random_seed_checkbox = QCheckBox("Random seed:") + self.random_seed_checkbox.setToolTip("When checked, makes the synthesizer and vocoder deterministic.") + layout_seed.addWidget(self.random_seed_checkbox, 0, 0) + self.seed_textbox = QLineEdit() + self.seed_textbox.setMaximumWidth(80) + layout_seed.addWidget(self.seed_textbox, 0, 1) + self.trim_silences_checkbox = QCheckBox("Enhance vocoder output") + self.trim_silences_checkbox.setToolTip("When checked, trims excess silence in vocoder output." + " This feature requires `webrtcvad` to be installed.") + layout_seed.addWidget(self.trim_silences_checkbox, 0, 2, 1, 2) + gen_layout.addLayout(layout_seed) + + self.loading_bar = QProgressBar() + gen_layout.addWidget(self.loading_bar) + + self.log_window = QLabel() + self.log_window.setAlignment(Qt.AlignBottom | Qt.AlignLeft) + gen_layout.addWidget(self.log_window) + self.logs = [] + gen_layout.addStretch() + + + ## Set the size of the window and of the elements + max_size = QDesktopWidget().availableGeometry(self).size() * 0.8 + self.resize(max_size) + + ## Finalize the display + self.reset_interface() + self.show() + + def start(self): + self.app.exec_() diff --git a/toolbox/utterance.py b/toolbox/utterance.py new file mode 100644 index 0000000..844c8a2 --- /dev/null +++ b/toolbox/utterance.py @@ -0,0 +1,5 @@ +from collections import namedtuple + +Utterance = namedtuple("Utterance", "name speaker_name wav spec embed partial_embeds synth") +Utterance.__eq__ = lambda x, y: x.name == y.name +Utterance.__hash__ = lambda x: hash(x.name) diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/argutils.py b/utils/argutils.py new file mode 100644 index 0000000..db41683 --- /dev/null +++ b/utils/argutils.py @@ -0,0 +1,40 @@ +from pathlib import Path +import numpy as np +import argparse + +_type_priorities = [ # In decreasing order + Path, + str, + int, + float, + bool, +] + +def _priority(o): + p = next((i for i, t in enumerate(_type_priorities) if type(o) is t), None) + if p is not None: + return p + p = next((i for i, t in enumerate(_type_priorities) if isinstance(o, t)), None) + if p is not None: + return p + return len(_type_priorities) + +def print_args(args: argparse.Namespace, parser=None): + args = vars(args) + if parser is None: + priorities = list(map(_priority, args.values())) + else: + all_params = [a.dest for g in parser._action_groups for a in g._group_actions ] + priority = lambda p: all_params.index(p) if p in all_params else len(all_params) + priorities = list(map(priority, args.keys())) + + pad = max(map(len, args.keys())) + 3 + indices = np.lexsort((list(args.keys()), priorities)) + items = list(args.items()) + + print("Arguments:") + for i in indices: + param, value = items[i] + print(" {0}:{1}{2}".format(param, ' ' * (pad - len(param)), value)) + print("") + \ No newline at end of file diff --git a/utils/logmmse.py b/utils/logmmse.py new file mode 100644 index 0000000..58cc450 --- /dev/null +++ b/utils/logmmse.py @@ -0,0 +1,247 @@ +# The MIT License (MIT) +# +# Copyright (c) 2015 braindead +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# +# This code was extracted from the logmmse package (https://pypi.org/project/logmmse/) and I +# simply modified the interface to meet my needs. + + +import numpy as np +import math +from scipy.special import expn +from collections import namedtuple + +NoiseProfile = namedtuple("NoiseProfile", "sampling_rate window_size len1 len2 win n_fft noise_mu2") + + +def profile_noise(noise, sampling_rate, window_size=0): + """ + Creates a profile of the noise in a given waveform. + + :param noise: a waveform containing noise ONLY, as a numpy array of floats or ints. + :param sampling_rate: the sampling rate of the audio + :param window_size: the size of the window the logmmse algorithm operates on. A default value + will be picked if left as 0. + :return: a NoiseProfile object + """ + noise, dtype = to_float(noise) + noise += np.finfo(np.float64).eps + + if window_size == 0: + window_size = int(math.floor(0.02 * sampling_rate)) + + if window_size % 2 == 1: + window_size = window_size + 1 + + perc = 50 + len1 = int(math.floor(window_size * perc / 100)) + len2 = int(window_size - len1) + + win = np.hanning(window_size) + win = win * len2 / np.sum(win) + n_fft = 2 * window_size + + noise_mean = np.zeros(n_fft) + n_frames = len(noise) // window_size + for j in range(0, window_size * n_frames, window_size): + noise_mean += np.absolute(np.fft.fft(win * noise[j:j + window_size], n_fft, axis=0)) + noise_mu2 = (noise_mean / n_frames) ** 2 + + return NoiseProfile(sampling_rate, window_size, len1, len2, win, n_fft, noise_mu2) + + +def denoise(wav, noise_profile: NoiseProfile, eta=0.15): + """ + Cleans the noise from a speech waveform given a noise profile. The waveform must have the + same sampling rate as the one used to create the noise profile. + + :param wav: a speech waveform as a numpy array of floats or ints. + :param noise_profile: a NoiseProfile object that was created from a similar (or a segment of + the same) waveform. + :param eta: voice threshold for noise update. While the voice activation detection value is + below this threshold, the noise profile will be continuously updated throughout the audio. + Set to 0 to disable updating the noise profile. + :return: the clean wav as a numpy array of floats or ints of the same length. + """ + wav, dtype = to_float(wav) + wav += np.finfo(np.float64).eps + p = noise_profile + + nframes = int(math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2)) + x_final = np.zeros(nframes * p.len2) + + aa = 0.98 + mu = 0.98 + ksi_min = 10 ** (-25 / 10) + + x_old = np.zeros(p.len1) + xk_prev = np.zeros(p.len1) + noise_mu2 = p.noise_mu2 + for k in range(0, nframes * p.len2, p.len2): + insign = p.win * wav[k:k + p.window_size] + + spec = np.fft.fft(insign, p.n_fft, axis=0) + sig = np.absolute(spec) + sig2 = sig ** 2 + + gammak = np.minimum(sig2 / noise_mu2, 40) + + if xk_prev.all() == 0: + ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0) + else: + ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0) + ksi = np.maximum(ksi_min, ksi) + + log_sigma_k = gammak * ksi/(1 + ksi) - np.log(1 + ksi) + vad_decision = np.sum(log_sigma_k) / p.window_size + if vad_decision < eta: + noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2 + + a = ksi / (1 + ksi) + vk = a * gammak + ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8)) + hw = a * np.exp(ei_vk) + sig = sig * hw + xk_prev = sig ** 2 + xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0) + xi_w = np.real(xi_w) + + x_final[k:k + p.len2] = x_old + xi_w[0:p.len1] + x_old = xi_w[p.len1:p.window_size] + + output = from_float(x_final, dtype) + output = np.pad(output, (0, len(wav) - len(output)), mode="constant") + return output + + +## Alternative VAD algorithm to webrctvad. It has the advantage of not requiring to install that +## darn package and it also works for any sampling rate. Maybe I'll eventually use it instead of +## webrctvad +# def vad(wav, sampling_rate, eta=0.15, window_size=0): +# """ +# TODO: fix doc +# Creates a profile of the noise in a given waveform. +# +# :param wav: a waveform containing noise ONLY, as a numpy array of floats or ints. +# :param sampling_rate: the sampling rate of the audio +# :param window_size: the size of the window the logmmse algorithm operates on. A default value +# will be picked if left as 0. +# :param eta: voice threshold for noise update. While the voice activation detection value is +# below this threshold, the noise profile will be continuously updated throughout the audio. +# Set to 0 to disable updating the noise profile. +# """ +# wav, dtype = to_float(wav) +# wav += np.finfo(np.float64).eps +# +# if window_size == 0: +# window_size = int(math.floor(0.02 * sampling_rate)) +# +# if window_size % 2 == 1: +# window_size = window_size + 1 +# +# perc = 50 +# len1 = int(math.floor(window_size * perc / 100)) +# len2 = int(window_size - len1) +# +# win = np.hanning(window_size) +# win = win * len2 / np.sum(win) +# n_fft = 2 * window_size +# +# wav_mean = np.zeros(n_fft) +# n_frames = len(wav) // window_size +# for j in range(0, window_size * n_frames, window_size): +# wav_mean += np.absolute(np.fft.fft(win * wav[j:j + window_size], n_fft, axis=0)) +# noise_mu2 = (wav_mean / n_frames) ** 2 +# +# wav, dtype = to_float(wav) +# wav += np.finfo(np.float64).eps +# +# nframes = int(math.floor(len(wav) / len2) - math.floor(window_size / len2)) +# vad = np.zeros(nframes * len2, dtype=np.bool) +# +# aa = 0.98 +# mu = 0.98 +# ksi_min = 10 ** (-25 / 10) +# +# xk_prev = np.zeros(len1) +# noise_mu2 = noise_mu2 +# for k in range(0, nframes * len2, len2): +# insign = win * wav[k:k + window_size] +# +# spec = np.fft.fft(insign, n_fft, axis=0) +# sig = np.absolute(spec) +# sig2 = sig ** 2 +# +# gammak = np.minimum(sig2 / noise_mu2, 40) +# +# if xk_prev.all() == 0: +# ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0) +# else: +# ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0) +# ksi = np.maximum(ksi_min, ksi) +# +# log_sigma_k = gammak * ksi / (1 + ksi) - np.log(1 + ksi) +# vad_decision = np.sum(log_sigma_k) / window_size +# if vad_decision < eta: +# noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2 +# print(vad_decision) +# +# a = ksi / (1 + ksi) +# vk = a * gammak +# ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8)) +# hw = a * np.exp(ei_vk) +# sig = sig * hw +# xk_prev = sig ** 2 +# +# vad[k:k + len2] = vad_decision >= eta +# +# vad = np.pad(vad, (0, len(wav) - len(vad)), mode="constant") +# return vad + + +def to_float(_input): + if _input.dtype == np.float64: + return _input, _input.dtype + elif _input.dtype == np.float32: + return _input.astype(np.float64), _input.dtype + elif _input.dtype == np.uint8: + return (_input - 128) / 128., _input.dtype + elif _input.dtype == np.int16: + return _input / 32768., _input.dtype + elif _input.dtype == np.int32: + return _input / 2147483648., _input.dtype + raise ValueError('Unsupported wave file format') + + +def from_float(_input, dtype): + if dtype == np.float64: + return _input, np.float64 + elif dtype == np.float32: + return _input.astype(np.float32) + elif dtype == np.uint8: + return ((_input * 128) + 128).astype(np.uint8) + elif dtype == np.int16: + return (_input * 32768).astype(np.int16) + elif dtype == np.int32: + print(_input) + return (_input * 2147483648).astype(np.int32) + raise ValueError('Unsupported wave file format') diff --git a/utils/modelutils.py b/utils/modelutils.py new file mode 100644 index 0000000..6acaa98 --- /dev/null +++ b/utils/modelutils.py @@ -0,0 +1,17 @@ +from pathlib import Path + +def check_model_paths(encoder_path: Path, synthesizer_path: Path, vocoder_path: Path): + # This function tests the model paths and makes sure at least one is valid. + if encoder_path.is_file() or encoder_path.is_dir(): + return + if synthesizer_path.is_file() or synthesizer_path.is_dir(): + return + if vocoder_path.is_file() or vocoder_path.is_dir(): + return + + # If none of the paths exist, remind the user to download models if needed + print("********************************************************************************") + print("Error: Model files not found. Follow these instructions to get and install the models:") + print("https://github.com/CorentinJ/Real-Time-Voice-Cloning/wiki/Pretrained-models") + print("********************************************************************************\n") + quit(-1) diff --git a/utils/profiler.py b/utils/profiler.py new file mode 100644 index 0000000..17175b9 --- /dev/null +++ b/utils/profiler.py @@ -0,0 +1,45 @@ +from time import perf_counter as timer +from collections import OrderedDict +import numpy as np + + +class Profiler: + def __init__(self, summarize_every=5, disabled=False): + self.last_tick = timer() + self.logs = OrderedDict() + self.summarize_every = summarize_every + self.disabled = disabled + + def tick(self, name): + if self.disabled: + return + + # Log the time needed to execute that function + if not name in self.logs: + self.logs[name] = [] + if len(self.logs[name]) >= self.summarize_every: + self.summarize() + self.purge_logs() + self.logs[name].append(timer() - self.last_tick) + + self.reset_timer() + + def purge_logs(self): + for name in self.logs: + self.logs[name].clear() + + def reset_timer(self): + self.last_tick = timer() + + def summarize(self): + n = max(map(len, self.logs.values())) + assert n == self.summarize_every + print("\nAverage execution time over %d steps:" % n) + + name_msgs = ["%s (%d/%d):" % (name, len(deltas), n) for name, deltas in self.logs.items()] + pad = max(map(len, name_msgs)) + for name_msg, deltas in zip(name_msgs, self.logs.values()): + print(" %s mean: %4.0fms std: %4.0fms" % + (name_msg.ljust(pad), np.mean(deltas) * 1000, np.std(deltas) * 1000)) + print("", flush=True) + \ No newline at end of file diff --git a/vocoder/LICENSE.txt b/vocoder/LICENSE.txt new file mode 100644 index 0000000..8d67161 --- /dev/null +++ b/vocoder/LICENSE.txt @@ -0,0 +1,22 @@ +MIT License + +Original work Copyright (c) 2019 fatchord (https://github.com/fatchord) +Modified work Copyright (c) 2019 Corentin Jemine (https://github.com/CorentinJ) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vocoder/audio.py b/vocoder/audio.py new file mode 100644 index 0000000..1163962 --- /dev/null +++ b/vocoder/audio.py @@ -0,0 +1,108 @@ +import math +import numpy as np +import librosa +import vocoder.hparams as hp +from scipy.signal import lfilter +import soundfile as sf + + +def label_2_float(x, bits) : + return 2 * x / (2**bits - 1.) - 1. + + +def float_2_label(x, bits) : + assert abs(x).max() <= 1.0 + x = (x + 1.) * (2**bits - 1) / 2 + return x.clip(0, 2**bits - 1) + + +def load_wav(path) : + return librosa.load(str(path), sr=hp.sample_rate)[0] + + +def save_wav(x, path) : + sf.write(path, x.astype(np.float32), hp.sample_rate) + + +def split_signal(x) : + unsigned = x + 2**15 + coarse = unsigned // 256 + fine = unsigned % 256 + return coarse, fine + + +def combine_signal(coarse, fine) : + return coarse * 256 + fine - 2**15 + + +def encode_16bits(x) : + return np.clip(x * 2**15, -2**15, 2**15 - 1).astype(np.int16) + + +mel_basis = None + + +def linear_to_mel(spectrogram): + global mel_basis + if mel_basis is None: + mel_basis = build_mel_basis() + return np.dot(mel_basis, spectrogram) + + +def build_mel_basis(): + return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin) + + +def normalize(S): + return np.clip((S - hp.min_level_db) / -hp.min_level_db, 0, 1) + + +def denormalize(S): + return (np.clip(S, 0, 1) * -hp.min_level_db) + hp.min_level_db + + +def amp_to_db(x): + return 20 * np.log10(np.maximum(1e-5, x)) + + +def db_to_amp(x): + return np.power(10.0, x * 0.05) + + +def spectrogram(y): + D = stft(y) + S = amp_to_db(np.abs(D)) - hp.ref_level_db + return normalize(S) + + +def melspectrogram(y): + D = stft(y) + S = amp_to_db(linear_to_mel(np.abs(D))) + return normalize(S) + + +def stft(y): + return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=hp.hop_length, win_length=hp.win_length) + + +def pre_emphasis(x): + return lfilter([1, -hp.preemphasis], [1], x) + + +def de_emphasis(x): + return lfilter([1], [1, -hp.preemphasis], x) + + +def encode_mu_law(x, mu) : + mu = mu - 1 + fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu) + return np.floor((fx + 1) / 2 * mu + 0.5) + + +def decode_mu_law(y, mu, from_labels=True) : + if from_labels: + y = label_2_float(y, math.log2(mu)) + mu = mu - 1 + x = np.sign(y) / mu * ((1 + mu) ** np.abs(y) - 1) + return x + diff --git a/vocoder/display.py b/vocoder/display.py new file mode 100644 index 0000000..9568807 --- /dev/null +++ b/vocoder/display.py @@ -0,0 +1,120 @@ +import matplotlib.pyplot as plt +import time +import numpy as np +import sys + + +def progbar(i, n, size=16): + done = (i * size) // n + bar = '' + for i in range(size): + bar += '█' if i <= done else '░' + return bar + + +def stream(message) : + try: + sys.stdout.write("\r{%s}" % message) + except: + #Remove non-ASCII characters from message + message = ''.join(i for i in message if ord(i)<128) + sys.stdout.write("\r{%s}" % message) + + +def simple_table(item_tuples) : + + border_pattern = '+---------------------------------------' + whitespace = ' ' + + headings, cells, = [], [] + + for item in item_tuples : + + heading, cell = str(item[0]), str(item[1]) + + pad_head = True if len(heading) < len(cell) else False + + pad = abs(len(heading) - len(cell)) + pad = whitespace[:pad] + + pad_left = pad[:len(pad)//2] + pad_right = pad[len(pad)//2:] + + if pad_head : + heading = pad_left + heading + pad_right + else : + cell = pad_left + cell + pad_right + + headings += [heading] + cells += [cell] + + border, head, body = '', '', '' + + for i in range(len(item_tuples)) : + + temp_head = f'| {headings[i]} ' + temp_body = f'| {cells[i]} ' + + border += border_pattern[:len(temp_head)] + head += temp_head + body += temp_body + + if i == len(item_tuples) - 1 : + head += '|' + body += '|' + border += '+' + + print(border) + print(head) + print(border) + print(body) + print(border) + print(' ') + + +def time_since(started) : + elapsed = time.time() - started + m = int(elapsed // 60) + s = int(elapsed % 60) + if m >= 60 : + h = int(m // 60) + m = m % 60 + return f'{h}h {m}m {s}s' + else : + return f'{m}m {s}s' + + +def save_attention(attn, path) : + fig = plt.figure(figsize=(12, 6)) + plt.imshow(attn.T, interpolation='nearest', aspect='auto') + fig.savefig(f'{path}.png', bbox_inches='tight') + plt.close(fig) + + +def save_spectrogram(M, path, length=None) : + M = np.flip(M, axis=0) + if length : M = M[:, :length] + fig = plt.figure(figsize=(12, 6)) + plt.imshow(M, interpolation='nearest', aspect='auto') + fig.savefig(f'{path}.png', bbox_inches='tight') + plt.close(fig) + + +def plot(array) : + fig = plt.figure(figsize=(30, 5)) + ax = fig.add_subplot(111) + ax.xaxis.label.set_color('grey') + ax.yaxis.label.set_color('grey') + ax.xaxis.label.set_fontsize(23) + ax.yaxis.label.set_fontsize(23) + ax.tick_params(axis='x', colors='grey', labelsize=23) + ax.tick_params(axis='y', colors='grey', labelsize=23) + plt.plot(array) + + +def plot_spec(M) : + M = np.flip(M, axis=0) + plt.figure(figsize=(18,4)) + plt.imshow(M, interpolation='nearest', aspect='auto') + plt.show() + diff --git a/vocoder/distribution.py b/vocoder/distribution.py new file mode 100644 index 0000000..d3119a5 --- /dev/null +++ b/vocoder/distribution.py @@ -0,0 +1,132 @@ +import numpy as np +import torch +import torch.nn.functional as F + + +def log_sum_exp(x): + """ numerically stable log_sum_exp implementation that prevents overflow """ + # TF ordering + axis = len(x.size()) - 1 + m, _ = torch.max(x, dim=axis) + m2, _ = torch.max(x, dim=axis, keepdim=True) + return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis)) + + +# It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py +def discretized_mix_logistic_loss(y_hat, y, num_classes=65536, + log_scale_min=None, reduce=True): + if log_scale_min is None: + log_scale_min = float(np.log(1e-14)) + y_hat = y_hat.permute(0,2,1) + assert y_hat.dim() == 3 + assert y_hat.size(1) % 3 == 0 + nr_mix = y_hat.size(1) // 3 + + # (B x T x C) + y_hat = y_hat.transpose(1, 2) + + # unpack parameters. (B, T, num_mixtures) x 3 + logit_probs = y_hat[:, :, :nr_mix] + means = y_hat[:, :, nr_mix:2 * nr_mix] + log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min) + + # B x T x 1 -> B x T x num_mixtures + y = y.expand_as(means) + + centered_y = y - means + inv_stdv = torch.exp(-log_scales) + plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1)) + cdf_plus = torch.sigmoid(plus_in) + min_in = inv_stdv * (centered_y - 1. / (num_classes - 1)) + cdf_min = torch.sigmoid(min_in) + + # log probability for edge case of 0 (before scaling) + # equivalent: torch.log(F.sigmoid(plus_in)) + log_cdf_plus = plus_in - F.softplus(plus_in) + + # log probability for edge case of 255 (before scaling) + # equivalent: (1 - F.sigmoid(min_in)).log() + log_one_minus_cdf_min = -F.softplus(min_in) + + # probability for all other cases + cdf_delta = cdf_plus - cdf_min + + mid_in = inv_stdv * centered_y + # log probability in the center of the bin, to be used in extreme cases + # (not actually used in our code) + log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in) + + # tf equivalent + """ + log_probs = tf.where(x < -0.999, log_cdf_plus, + tf.where(x > 0.999, log_one_minus_cdf_min, + tf.where(cdf_delta > 1e-5, + tf.log(tf.maximum(cdf_delta, 1e-12)), + log_pdf_mid - np.log(127.5)))) + """ + # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value + # for num_classes=65536 case? 1e-7? not sure.. + inner_inner_cond = (cdf_delta > 1e-5).float() + + inner_inner_out = inner_inner_cond * \ + torch.log(torch.clamp(cdf_delta, min=1e-12)) + \ + (1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2)) + inner_cond = (y > 0.999).float() + inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out + cond = (y < -0.999).float() + log_probs = cond * log_cdf_plus + (1. - cond) * inner_out + + log_probs = log_probs + F.log_softmax(logit_probs, -1) + + if reduce: + return -torch.mean(log_sum_exp(log_probs)) + else: + return -log_sum_exp(log_probs).unsqueeze(-1) + + +def sample_from_discretized_mix_logistic(y, log_scale_min=None): + """ + Sample from discretized mixture of logistic distributions + Args: + y (Tensor): B x C x T + log_scale_min (float): Log scale minimum value + Returns: + Tensor: sample in range of [-1, 1]. + """ + if log_scale_min is None: + log_scale_min = float(np.log(1e-14)) + assert y.size(1) % 3 == 0 + nr_mix = y.size(1) // 3 + + # B x T x C + y = y.transpose(1, 2) + logit_probs = y[:, :, :nr_mix] + + # sample mixture indicator from softmax + temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5) + temp = logit_probs.data - torch.log(- torch.log(temp)) + _, argmax = temp.max(dim=-1) + + # (B, T) -> (B, T, nr_mix) + one_hot = to_one_hot(argmax, nr_mix) + # select logistic parameters + means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1) + log_scales = torch.clamp(torch.sum( + y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min) + # sample from logistic & clip to interval + # we don't actually round to the nearest 8bit value when sampling + u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5) + x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u)) + + x = torch.clamp(torch.clamp(x, min=-1.), max=1.) + + return x + + +def to_one_hot(tensor, n, fill_with=1.): + # we perform one hot encore with respect to the last axis + one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_() + if tensor.is_cuda: + one_hot = one_hot.cuda() + one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with) + return one_hot diff --git a/vocoder/gen_wavernn.py b/vocoder/gen_wavernn.py new file mode 100644 index 0000000..2036737 --- /dev/null +++ b/vocoder/gen_wavernn.py @@ -0,0 +1,31 @@ +from vocoder.models.fatchord_version import WaveRNN +from vocoder.audio import * + + +def gen_testset(model: WaveRNN, test_set, samples, batched, target, overlap, save_path): + k = model.get_step() // 1000 + + for i, (m, x) in enumerate(test_set, 1): + if i > samples: + break + + print('\n| Generating: %i/%i' % (i, samples)) + + x = x[0].numpy() + + bits = 16 if hp.voc_mode == 'MOL' else hp.bits + + if hp.mu_law and hp.voc_mode != 'MOL' : + x = decode_mu_law(x, 2**bits, from_labels=True) + else : + x = label_2_float(x, bits) + + save_wav(x, save_path.joinpath("%dk_steps_%d_target.wav" % (k, i))) + + batch_str = "gen_batched_target%d_overlap%d" % (target, overlap) if batched else \ + "gen_not_batched" + save_str = save_path.joinpath("%dk_steps_%d_%s.wav" % (k, i, batch_str)) + + wav = model.generate(m, batched, target, overlap, hp.mu_law) + save_wav(wav, save_str) + diff --git a/vocoder/hparams.py b/vocoder/hparams.py new file mode 100644 index 0000000..c1de9f7 --- /dev/null +++ b/vocoder/hparams.py @@ -0,0 +1,44 @@ +from synthesizer.hparams import hparams as _syn_hp + + +# Audio settings------------------------------------------------------------------------ +# Match the values of the synthesizer +sample_rate = _syn_hp.sample_rate +n_fft = _syn_hp.n_fft +num_mels = _syn_hp.num_mels +hop_length = _syn_hp.hop_size +win_length = _syn_hp.win_size +fmin = _syn_hp.fmin +min_level_db = _syn_hp.min_level_db +ref_level_db = _syn_hp.ref_level_db +mel_max_abs_value = _syn_hp.max_abs_value +preemphasis = _syn_hp.preemphasis +apply_preemphasis = _syn_hp.preemphasize + +bits = 9 # bit depth of signal +mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode + # below + + +# WAVERNN / VOCODER -------------------------------------------------------------------------------- +voc_mode = 'RAW' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from +# mixture of logistics) +voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length +voc_rnn_dims = 512 +voc_fc_dims = 512 +voc_compute_dims = 128 +voc_res_out_dims = 128 +voc_res_blocks = 10 + +# Training +voc_batch_size = 100 +voc_lr = 1e-4 +voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint +voc_pad = 2 # this will pad the input so that the resnet can 'see' wider + # than input length +voc_seq_len = hop_length * 5 # must be a multiple of hop_length + +# Generating / Synthesizing +voc_gen_batched = True # very fast (realtime+) single utterance batched generation +voc_target = 8000 # target number of samples to be generated in each batch entry +voc_overlap = 400 # number of samples for crossfading between batches diff --git a/vocoder/inference.py b/vocoder/inference.py new file mode 100644 index 0000000..7e54684 --- /dev/null +++ b/vocoder/inference.py @@ -0,0 +1,64 @@ +from vocoder.models.fatchord_version import WaveRNN +from vocoder import hparams as hp +import torch + + +_model = None # type: WaveRNN + +def load_model(weights_fpath, verbose=True): + global _model, _device + + if verbose: + print("Building Wave-RNN") + _model = WaveRNN( + rnn_dims=hp.voc_rnn_dims, + fc_dims=hp.voc_fc_dims, + bits=hp.bits, + pad=hp.voc_pad, + upsample_factors=hp.voc_upsample_factors, + feat_dims=hp.num_mels, + compute_dims=hp.voc_compute_dims, + res_out_dims=hp.voc_res_out_dims, + res_blocks=hp.voc_res_blocks, + hop_length=hp.hop_length, + sample_rate=hp.sample_rate, + mode=hp.voc_mode + ) + + if torch.cuda.is_available(): + _model = _model.cuda() + _device = torch.device('cuda') + else: + _device = torch.device('cpu') + + if verbose: + print("Loading model weights at %s" % weights_fpath) + checkpoint = torch.load(weights_fpath, _device) + _model.load_state_dict(checkpoint['model_state']) + _model.eval() + + +def is_loaded(): + return _model is not None + + +def infer_waveform(mel, normalize=True, batched=True, target=8000, overlap=800, + progress_callback=None): + """ + Infers the waveform of a mel spectrogram output by the synthesizer (the format must match + that of the synthesizer!) + + :param normalize: + :param batched: + :param target: + :param overlap: + :return: + """ + if _model is None: + raise Exception("Please load Wave-RNN in memory before using it") + + if normalize: + mel = mel / hp.mel_max_abs_value + mel = torch.from_numpy(mel[None, ...]) + wav = _model.generate(mel, batched, target, overlap, hp.mu_law, progress_callback) + return wav diff --git a/vocoder/models/deepmind_version.py b/vocoder/models/deepmind_version.py new file mode 100644 index 0000000..1d973d9 --- /dev/null +++ b/vocoder/models/deepmind_version.py @@ -0,0 +1,170 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from utils.display import * +from utils.dsp import * + + +class WaveRNN(nn.Module) : + def __init__(self, hidden_size=896, quantisation=256) : + super(WaveRNN, self).__init__() + + self.hidden_size = hidden_size + self.split_size = hidden_size // 2 + + # The main matmul + self.R = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) + + # Output fc layers + self.O1 = nn.Linear(self.split_size, self.split_size) + self.O2 = nn.Linear(self.split_size, quantisation) + self.O3 = nn.Linear(self.split_size, self.split_size) + self.O4 = nn.Linear(self.split_size, quantisation) + + # Input fc layers + self.I_coarse = nn.Linear(2, 3 * self.split_size, bias=False) + self.I_fine = nn.Linear(3, 3 * self.split_size, bias=False) + + # biases for the gates + self.bias_u = nn.Parameter(torch.zeros(self.hidden_size)) + self.bias_r = nn.Parameter(torch.zeros(self.hidden_size)) + self.bias_e = nn.Parameter(torch.zeros(self.hidden_size)) + + # display num params + self.num_params() + + + def forward(self, prev_y, prev_hidden, current_coarse) : + + # Main matmul - the projection is split 3 ways + R_hidden = self.R(prev_hidden) + R_u, R_r, R_e, = torch.split(R_hidden, self.hidden_size, dim=1) + + # Project the prev input + coarse_input_proj = self.I_coarse(prev_y) + I_coarse_u, I_coarse_r, I_coarse_e = \ + torch.split(coarse_input_proj, self.split_size, dim=1) + + # Project the prev input and current coarse sample + fine_input = torch.cat([prev_y, current_coarse], dim=1) + fine_input_proj = self.I_fine(fine_input) + I_fine_u, I_fine_r, I_fine_e = \ + torch.split(fine_input_proj, self.split_size, dim=1) + + # concatenate for the gates + I_u = torch.cat([I_coarse_u, I_fine_u], dim=1) + I_r = torch.cat([I_coarse_r, I_fine_r], dim=1) + I_e = torch.cat([I_coarse_e, I_fine_e], dim=1) + + # Compute all gates for coarse and fine + u = F.sigmoid(R_u + I_u + self.bias_u) + r = F.sigmoid(R_r + I_r + self.bias_r) + e = F.tanh(r * R_e + I_e + self.bias_e) + hidden = u * prev_hidden + (1. - u) * e + + # Split the hidden state + hidden_coarse, hidden_fine = torch.split(hidden, self.split_size, dim=1) + + # Compute outputs + out_coarse = self.O2(F.relu(self.O1(hidden_coarse))) + out_fine = self.O4(F.relu(self.O3(hidden_fine))) + + return out_coarse, out_fine, hidden + + + def generate(self, seq_len): + with torch.no_grad(): + # First split up the biases for the gates + b_coarse_u, b_fine_u = torch.split(self.bias_u, self.split_size) + b_coarse_r, b_fine_r = torch.split(self.bias_r, self.split_size) + b_coarse_e, b_fine_e = torch.split(self.bias_e, self.split_size) + + # Lists for the two output seqs + c_outputs, f_outputs = [], [] + + # Some initial inputs + out_coarse = torch.LongTensor([0]).cuda() + out_fine = torch.LongTensor([0]).cuda() + + # We'll meed a hidden state + hidden = self.init_hidden() + + # Need a clock for display + start = time.time() + + # Loop for generation + for i in range(seq_len) : + + # Split into two hidden states + hidden_coarse, hidden_fine = \ + torch.split(hidden, self.split_size, dim=1) + + # Scale and concat previous predictions + out_coarse = out_coarse.unsqueeze(0).float() / 127.5 - 1. + out_fine = out_fine.unsqueeze(0).float() / 127.5 - 1. + prev_outputs = torch.cat([out_coarse, out_fine], dim=1) + + # Project input + coarse_input_proj = self.I_coarse(prev_outputs) + I_coarse_u, I_coarse_r, I_coarse_e = \ + torch.split(coarse_input_proj, self.split_size, dim=1) + + # Project hidden state and split 6 ways + R_hidden = self.R(hidden) + R_coarse_u , R_fine_u, \ + R_coarse_r, R_fine_r, \ + R_coarse_e, R_fine_e = torch.split(R_hidden, self.split_size, dim=1) + + # Compute the coarse gates + u = F.sigmoid(R_coarse_u + I_coarse_u + b_coarse_u) + r = F.sigmoid(R_coarse_r + I_coarse_r + b_coarse_r) + e = F.tanh(r * R_coarse_e + I_coarse_e + b_coarse_e) + hidden_coarse = u * hidden_coarse + (1. - u) * e + + # Compute the coarse output + out_coarse = self.O2(F.relu(self.O1(hidden_coarse))) + posterior = F.softmax(out_coarse, dim=1) + distrib = torch.distributions.Categorical(posterior) + out_coarse = distrib.sample() + c_outputs.append(out_coarse) + + # Project the [prev outputs and predicted coarse sample] + coarse_pred = out_coarse.float() / 127.5 - 1. + fine_input = torch.cat([prev_outputs, coarse_pred.unsqueeze(0)], dim=1) + fine_input_proj = self.I_fine(fine_input) + I_fine_u, I_fine_r, I_fine_e = \ + torch.split(fine_input_proj, self.split_size, dim=1) + + # Compute the fine gates + u = F.sigmoid(R_fine_u + I_fine_u + b_fine_u) + r = F.sigmoid(R_fine_r + I_fine_r + b_fine_r) + e = F.tanh(r * R_fine_e + I_fine_e + b_fine_e) + hidden_fine = u * hidden_fine + (1. - u) * e + + # Compute the fine output + out_fine = self.O4(F.relu(self.O3(hidden_fine))) + posterior = F.softmax(out_fine, dim=1) + distrib = torch.distributions.Categorical(posterior) + out_fine = distrib.sample() + f_outputs.append(out_fine) + + # Put the hidden state back together + hidden = torch.cat([hidden_coarse, hidden_fine], dim=1) + + # Display progress + speed = (i + 1) / (time.time() - start) + stream('Gen: %i/%i -- Speed: %i', (i + 1, seq_len, speed)) + + coarse = torch.stack(c_outputs).squeeze(1).cpu().data.numpy() + fine = torch.stack(f_outputs).squeeze(1).cpu().data.numpy() + output = combine_signal(coarse, fine) + + return output, coarse, fine + + def init_hidden(self, batch_size=1) : + return torch.zeros(batch_size, self.hidden_size).cuda() + + def num_params(self) : + parameters = filter(lambda p: p.requires_grad, self.parameters()) + parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 + print('Trainable Parameters: %.3f million' % parameters) \ No newline at end of file diff --git a/vocoder/models/fatchord_version.py b/vocoder/models/fatchord_version.py new file mode 100644 index 0000000..70ef1e3 --- /dev/null +++ b/vocoder/models/fatchord_version.py @@ -0,0 +1,434 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from vocoder.distribution import sample_from_discretized_mix_logistic +from vocoder.display import * +from vocoder.audio import * + + +class ResBlock(nn.Module): + def __init__(self, dims): + super().__init__() + self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) + self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) + self.batch_norm1 = nn.BatchNorm1d(dims) + self.batch_norm2 = nn.BatchNorm1d(dims) + + def forward(self, x): + residual = x + x = self.conv1(x) + x = self.batch_norm1(x) + x = F.relu(x) + x = self.conv2(x) + x = self.batch_norm2(x) + return x + residual + + +class MelResNet(nn.Module): + def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad): + super().__init__() + k_size = pad * 2 + 1 + self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False) + self.batch_norm = nn.BatchNorm1d(compute_dims) + self.layers = nn.ModuleList() + for i in range(res_blocks): + self.layers.append(ResBlock(compute_dims)) + self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1) + + def forward(self, x): + x = self.conv_in(x) + x = self.batch_norm(x) + x = F.relu(x) + for f in self.layers: x = f(x) + x = self.conv_out(x) + return x + + +class Stretch2d(nn.Module): + def __init__(self, x_scale, y_scale): + super().__init__() + self.x_scale = x_scale + self.y_scale = y_scale + + def forward(self, x): + b, c, h, w = x.size() + x = x.unsqueeze(-1).unsqueeze(3) + x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale) + return x.view(b, c, h * self.y_scale, w * self.x_scale) + + +class UpsampleNetwork(nn.Module): + def __init__(self, feat_dims, upsample_scales, compute_dims, + res_blocks, res_out_dims, pad): + super().__init__() + total_scale = np.cumproduct(upsample_scales)[-1] + self.indent = pad * total_scale + self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad) + self.resnet_stretch = Stretch2d(total_scale, 1) + self.up_layers = nn.ModuleList() + for scale in upsample_scales: + k_size = (1, scale * 2 + 1) + padding = (0, scale) + stretch = Stretch2d(scale, 1) + conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False) + conv.weight.data.fill_(1. / k_size[1]) + self.up_layers.append(stretch) + self.up_layers.append(conv) + + def forward(self, m): + aux = self.resnet(m).unsqueeze(1) + aux = self.resnet_stretch(aux) + aux = aux.squeeze(1) + m = m.unsqueeze(1) + for f in self.up_layers: m = f(m) + m = m.squeeze(1)[:, :, self.indent:-self.indent] + return m.transpose(1, 2), aux.transpose(1, 2) + + +class WaveRNN(nn.Module): + def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors, + feat_dims, compute_dims, res_out_dims, res_blocks, + hop_length, sample_rate, mode='RAW'): + super().__init__() + self.mode = mode + self.pad = pad + if self.mode == 'RAW' : + self.n_classes = 2 ** bits + elif self.mode == 'MOL' : + self.n_classes = 30 + else : + RuntimeError("Unknown model mode value - ", self.mode) + + self.rnn_dims = rnn_dims + self.aux_dims = res_out_dims // 4 + self.hop_length = hop_length + self.sample_rate = sample_rate + + self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad) + self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims) + self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) + self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True) + self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims) + self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims) + self.fc3 = nn.Linear(fc_dims, self.n_classes) + + self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False) + self.num_params() + + def forward(self, x, mels): + self.step += 1 + bsize = x.size(0) + if torch.cuda.is_available(): + h1 = torch.zeros(1, bsize, self.rnn_dims).cuda() + h2 = torch.zeros(1, bsize, self.rnn_dims).cuda() + else: + h1 = torch.zeros(1, bsize, self.rnn_dims).cpu() + h2 = torch.zeros(1, bsize, self.rnn_dims).cpu() + mels, aux = self.upsample(mels) + + aux_idx = [self.aux_dims * i for i in range(5)] + a1 = aux[:, :, aux_idx[0]:aux_idx[1]] + a2 = aux[:, :, aux_idx[1]:aux_idx[2]] + a3 = aux[:, :, aux_idx[2]:aux_idx[3]] + a4 = aux[:, :, aux_idx[3]:aux_idx[4]] + + x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2) + x = self.I(x) + res = x + x, _ = self.rnn1(x, h1) + + x = x + res + res = x + x = torch.cat([x, a2], dim=2) + x, _ = self.rnn2(x, h2) + + x = x + res + x = torch.cat([x, a3], dim=2) + x = F.relu(self.fc1(x)) + + x = torch.cat([x, a4], dim=2) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + def generate(self, mels, batched, target, overlap, mu_law, progress_callback=None): + mu_law = mu_law if self.mode == 'RAW' else False + progress_callback = progress_callback or self.gen_display + + self.eval() + output = [] + start = time.time() + rnn1 = self.get_gru_cell(self.rnn1) + rnn2 = self.get_gru_cell(self.rnn2) + + with torch.no_grad(): + if torch.cuda.is_available(): + mels = mels.cuda() + else: + mels = mels.cpu() + wave_len = (mels.size(-1) - 1) * self.hop_length + mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both') + mels, aux = self.upsample(mels.transpose(1, 2)) + + if batched: + mels = self.fold_with_overlap(mels, target, overlap) + aux = self.fold_with_overlap(aux, target, overlap) + + b_size, seq_len, _ = mels.size() + + if torch.cuda.is_available(): + h1 = torch.zeros(b_size, self.rnn_dims).cuda() + h2 = torch.zeros(b_size, self.rnn_dims).cuda() + x = torch.zeros(b_size, 1).cuda() + else: + h1 = torch.zeros(b_size, self.rnn_dims).cpu() + h2 = torch.zeros(b_size, self.rnn_dims).cpu() + x = torch.zeros(b_size, 1).cpu() + + d = self.aux_dims + aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)] + + for i in range(seq_len): + + m_t = mels[:, i, :] + + a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) + + x = torch.cat([x, m_t, a1_t], dim=1) + x = self.I(x) + h1 = rnn1(x, h1) + + x = x + h1 + inp = torch.cat([x, a2_t], dim=1) + h2 = rnn2(inp, h2) + + x = x + h2 + x = torch.cat([x, a3_t], dim=1) + x = F.relu(self.fc1(x)) + + x = torch.cat([x, a4_t], dim=1) + x = F.relu(self.fc2(x)) + + logits = self.fc3(x) + + if self.mode == 'MOL': + sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2)) + output.append(sample.view(-1)) + if torch.cuda.is_available(): + # x = torch.FloatTensor([[sample]]).cuda() + x = sample.transpose(0, 1).cuda() + else: + x = sample.transpose(0, 1) + + elif self.mode == 'RAW' : + posterior = F.softmax(logits, dim=1) + distrib = torch.distributions.Categorical(posterior) + + sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1. + output.append(sample) + x = sample.unsqueeze(-1) + else: + raise RuntimeError("Unknown model mode value - ", self.mode) + + if i % 100 == 0: + gen_rate = (i + 1) / (time.time() - start) * b_size / 1000 + progress_callback(i, seq_len, b_size, gen_rate) + + output = torch.stack(output).transpose(0, 1) + output = output.cpu().numpy() + output = output.astype(np.float64) + + if batched: + output = self.xfade_and_unfold(output, target, overlap) + else: + output = output[0] + + if mu_law: + output = decode_mu_law(output, self.n_classes, False) + if hp.apply_preemphasis: + output = de_emphasis(output) + + # Fade-out at the end to avoid signal cutting out suddenly + fade_out = np.linspace(1, 0, 20 * self.hop_length) + output = output[:wave_len] + output[-20 * self.hop_length:] *= fade_out + + self.train() + + return output + + + def gen_display(self, i, seq_len, b_size, gen_rate): + pbar = progbar(i, seq_len) + msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | ' + stream(msg) + + def get_gru_cell(self, gru): + gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size) + gru_cell.weight_hh.data = gru.weight_hh_l0.data + gru_cell.weight_ih.data = gru.weight_ih_l0.data + gru_cell.bias_hh.data = gru.bias_hh_l0.data + gru_cell.bias_ih.data = gru.bias_ih_l0.data + return gru_cell + + def pad_tensor(self, x, pad, side='both'): + # NB - this is just a quick method i need right now + # i.e., it won't generalise to other shapes/dims + b, t, c = x.size() + total = t + 2 * pad if side == 'both' else t + pad + if torch.cuda.is_available(): + padded = torch.zeros(b, total, c).cuda() + else: + padded = torch.zeros(b, total, c).cpu() + if side == 'before' or side == 'both': + padded[:, pad:pad + t, :] = x + elif side == 'after': + padded[:, :t, :] = x + return padded + + def fold_with_overlap(self, x, target, overlap): + + ''' Fold the tensor with overlap for quick batched inference. + Overlap will be used for crossfading in xfade_and_unfold() + + Args: + x (tensor) : Upsampled conditioning features. + shape=(1, timesteps, features) + target (int) : Target timesteps for each index of batch + overlap (int) : Timesteps for both xfade and rnn warmup + + Return: + (tensor) : shape=(num_folds, target + 2 * overlap, features) + + Details: + x = [[h1, h2, ... hn]] + + Where each h is a vector of conditioning features + + Eg: target=2, overlap=1 with x.size(1)=10 + + folded = [[h1, h2, h3, h4], + [h4, h5, h6, h7], + [h7, h8, h9, h10]] + ''' + + _, total_len, features = x.size() + + # Calculate variables needed + num_folds = (total_len - overlap) // (target + overlap) + extended_len = num_folds * (overlap + target) + overlap + remaining = total_len - extended_len + + # Pad if some time steps poking out + if remaining != 0: + num_folds += 1 + padding = target + 2 * overlap - remaining + x = self.pad_tensor(x, padding, side='after') + + if torch.cuda.is_available(): + folded = torch.zeros(num_folds, target + 2 * overlap, features).cuda() + else: + folded = torch.zeros(num_folds, target + 2 * overlap, features).cpu() + + # Get the values for the folded tensor + for i in range(num_folds): + start = i * (target + overlap) + end = start + target + 2 * overlap + folded[i] = x[:, start:end, :] + + return folded + + def xfade_and_unfold(self, y, target, overlap): + + ''' Applies a crossfade and unfolds into a 1d array. + + Args: + y (ndarry) : Batched sequences of audio samples + shape=(num_folds, target + 2 * overlap) + dtype=np.float64 + overlap (int) : Timesteps for both xfade and rnn warmup + + Return: + (ndarry) : audio samples in a 1d array + shape=(total_len) + dtype=np.float64 + + Details: + y = [[seq1], + [seq2], + [seq3]] + + Apply a gain envelope at both ends of the sequences + + y = [[seq1_in, seq1_target, seq1_out], + [seq2_in, seq2_target, seq2_out], + [seq3_in, seq3_target, seq3_out]] + + Stagger and add up the groups of samples: + + [seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...] + + ''' + + num_folds, length = y.shape + target = length - 2 * overlap + total_len = num_folds * (target + overlap) + overlap + + # Need some silence for the rnn warmup + silence_len = overlap // 2 + fade_len = overlap - silence_len + silence = np.zeros((silence_len), dtype=np.float64) + + # Equal power crossfade + t = np.linspace(-1, 1, fade_len, dtype=np.float64) + fade_in = np.sqrt(0.5 * (1 + t)) + fade_out = np.sqrt(0.5 * (1 - t)) + + # Concat the silence to the fades + fade_in = np.concatenate([silence, fade_in]) + fade_out = np.concatenate([fade_out, silence]) + + # Apply the gain to the overlap samples + y[:, :overlap] *= fade_in + y[:, -overlap:] *= fade_out + + unfolded = np.zeros((total_len), dtype=np.float64) + + # Loop to add up all the samples + for i in range(num_folds): + start = i * (target + overlap) + end = start + target + 2 * overlap + unfolded[start:end] += y[i] + + return unfolded + + def get_step(self) : + return self.step.data.item() + + def checkpoint(self, model_dir, optimizer) : + k_steps = self.get_step() // 1000 + self.save(model_dir.joinpath("checkpoint_%dk_steps.pt" % k_steps), optimizer) + + def log(self, path, msg) : + with open(path, 'a') as f: + print(msg, file=f) + + def load(self, path, optimizer) : + checkpoint = torch.load(path) + if "optimizer_state" in checkpoint: + self.load_state_dict(checkpoint["model_state"]) + optimizer.load_state_dict(checkpoint["optimizer_state"]) + else: + # Backwards compatibility + self.load_state_dict(checkpoint) + + def save(self, path, optimizer) : + torch.save({ + "model_state": self.state_dict(), + "optimizer_state": optimizer.state_dict(), + }, path) + + def num_params(self, print_out=True): + parameters = filter(lambda p: p.requires_grad, self.parameters()) + parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 + if print_out : + print('Trainable Parameters: %.3fM' % parameters) diff --git a/vocoder/train.py b/vocoder/train.py new file mode 100644 index 0000000..4912469 --- /dev/null +++ b/vocoder/train.py @@ -0,0 +1,127 @@ +from vocoder.models.fatchord_version import WaveRNN +from vocoder.vocoder_dataset import VocoderDataset, collate_vocoder +from vocoder.distribution import discretized_mix_logistic_loss +from vocoder.display import stream, simple_table +from vocoder.gen_wavernn import gen_testset +from torch.utils.data import DataLoader +from pathlib import Path +from torch import optim +import torch.nn.functional as F +import vocoder.hparams as hp +import numpy as np +import time +import torch + + +def train(run_id: str, syn_dir: Path, voc_dir: Path, models_dir: Path, ground_truth: bool, + save_every: int, backup_every: int, force_restart: bool): + # Check to make sure the hop length is correctly factorised + assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length + + # Instantiate the model + print("Initializing the model...") + model = WaveRNN( + rnn_dims=hp.voc_rnn_dims, + fc_dims=hp.voc_fc_dims, + bits=hp.bits, + pad=hp.voc_pad, + upsample_factors=hp.voc_upsample_factors, + feat_dims=hp.num_mels, + compute_dims=hp.voc_compute_dims, + res_out_dims=hp.voc_res_out_dims, + res_blocks=hp.voc_res_blocks, + hop_length=hp.hop_length, + sample_rate=hp.sample_rate, + mode=hp.voc_mode + ) + + if torch.cuda.is_available(): + model = model.cuda() + device = torch.device('cuda') + else: + device = torch.device('cpu') + + # Initialize the optimizer + optimizer = optim.Adam(model.parameters()) + for p in optimizer.param_groups: + p["lr"] = hp.voc_lr + loss_func = F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss + + # Load the weights + model_dir = models_dir.joinpath(run_id) + model_dir.mkdir(exist_ok=True) + weights_fpath = model_dir.joinpath(run_id + ".pt") + if force_restart or not weights_fpath.exists(): + print("\nStarting the training of WaveRNN from scratch\n") + model.save(weights_fpath, optimizer) + else: + print("\nLoading weights at %s" % weights_fpath) + model.load(weights_fpath, optimizer) + print("WaveRNN weights loaded from step %d" % model.step) + + # Initialize the dataset + metadata_fpath = syn_dir.joinpath("train.txt") if ground_truth else \ + voc_dir.joinpath("synthesized.txt") + mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath("mels_gta") + wav_dir = syn_dir.joinpath("audio") + dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir) + test_loader = DataLoader(dataset, + batch_size=1, + shuffle=True, + pin_memory=True) + + # Begin the training + simple_table([('Batch size', hp.voc_batch_size), + ('LR', hp.voc_lr), + ('Sequence Len', hp.voc_seq_len)]) + + for epoch in range(1, 350): + data_loader = DataLoader(dataset, + collate_fn=collate_vocoder, + batch_size=hp.voc_batch_size, + num_workers=2, + shuffle=True, + pin_memory=True) + start = time.time() + running_loss = 0. + + for i, (x, y, m) in enumerate(data_loader, 1): + if torch.cuda.is_available(): + x, m, y = x.cuda(), m.cuda(), y.cuda() + + # Forward pass + y_hat = model(x, m) + if model.mode == 'RAW': + y_hat = y_hat.transpose(1, 2).unsqueeze(-1) + elif model.mode == 'MOL': + y = y.float() + y = y.unsqueeze(-1) + + # Backward pass + loss = loss_func(y_hat, y) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + running_loss += loss.item() + speed = i / (time.time() - start) + avg_loss = running_loss / i + + step = model.get_step() + k = step // 1000 + + if backup_every != 0 and step % backup_every == 0 : + model.checkpoint(model_dir, optimizer) + + if save_every != 0 and step % save_every == 0 : + model.save(weights_fpath, optimizer) + + msg = f"| Epoch: {epoch} ({i}/{len(data_loader)}) | " \ + f"Loss: {avg_loss:.4f} | {speed:.1f} " \ + f"steps/s | Step: {k}k | " + stream(msg) + + + gen_testset(model, test_loader, hp.voc_gen_at_checkpoint, hp.voc_gen_batched, + hp.voc_target, hp.voc_overlap, model_dir) + print("") diff --git a/vocoder/vocoder_dataset.py b/vocoder/vocoder_dataset.py new file mode 100644 index 0000000..9eae1b5 --- /dev/null +++ b/vocoder/vocoder_dataset.py @@ -0,0 +1,84 @@ +from torch.utils.data import Dataset +from pathlib import Path +from vocoder import audio +import vocoder.hparams as hp +import numpy as np +import torch + + +class VocoderDataset(Dataset): + def __init__(self, metadata_fpath: Path, mel_dir: Path, wav_dir: Path): + print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, wav_dir)) + + with metadata_fpath.open("r") as metadata_file: + metadata = [line.split("|") for line in metadata_file] + + gta_fnames = [x[1] for x in metadata if int(x[4])] + gta_fpaths = [mel_dir.joinpath(fname) for fname in gta_fnames] + wav_fnames = [x[0] for x in metadata if int(x[4])] + wav_fpaths = [wav_dir.joinpath(fname) for fname in wav_fnames] + self.samples_fpaths = list(zip(gta_fpaths, wav_fpaths)) + + print("Found %d samples" % len(self.samples_fpaths)) + + def __getitem__(self, index): + mel_path, wav_path = self.samples_fpaths[index] + + # Load the mel spectrogram and adjust its range to [-1, 1] + mel = np.load(mel_path).T.astype(np.float32) / hp.mel_max_abs_value + + # Load the wav + wav = np.load(wav_path) + if hp.apply_preemphasis: + wav = audio.pre_emphasis(wav) + wav = np.clip(wav, -1, 1) + + # Fix for missing padding # TODO: settle on whether this is any useful + r_pad = (len(wav) // hp.hop_length + 1) * hp.hop_length - len(wav) + wav = np.pad(wav, (0, r_pad), mode='constant') + assert len(wav) >= mel.shape[1] * hp.hop_length + wav = wav[:mel.shape[1] * hp.hop_length] + assert len(wav) % hp.hop_length == 0 + + # Quantize the wav + if hp.voc_mode == 'RAW': + if hp.mu_law: + quant = audio.encode_mu_law(wav, mu=2 ** hp.bits) + else: + quant = audio.float_2_label(wav, bits=hp.bits) + elif hp.voc_mode == 'MOL': + quant = audio.float_2_label(wav, bits=16) + + return mel.astype(np.float32), quant.astype(np.int64) + + def __len__(self): + return len(self.samples_fpaths) + + +def collate_vocoder(batch): + mel_win = hp.voc_seq_len // hp.hop_length + 2 * hp.voc_pad + max_offsets = [x[0].shape[-1] -2 - (mel_win + 2 * hp.voc_pad) for x in batch] + mel_offsets = [np.random.randint(0, offset) for offset in max_offsets] + sig_offsets = [(offset + hp.voc_pad) * hp.hop_length for offset in mel_offsets] + + mels = [x[0][:, mel_offsets[i]:mel_offsets[i] + mel_win] for i, x in enumerate(batch)] + + labels = [x[1][sig_offsets[i]:sig_offsets[i] + hp.voc_seq_len + 1] for i, x in enumerate(batch)] + + mels = np.stack(mels).astype(np.float32) + labels = np.stack(labels).astype(np.int64) + + mels = torch.tensor(mels) + labels = torch.tensor(labels).long() + + x = labels[:, :hp.voc_seq_len] + y = labels[:, 1:] + + bits = 16 if hp.voc_mode == 'MOL' else hp.bits + + x = audio.label_2_float(x.float(), bits) + + if hp.voc_mode == 'MOL' : + y = audio.label_2_float(y.float(), bits) + + return x, y, mels \ No newline at end of file diff --git a/vocoder_preprocess.py b/vocoder_preprocess.py new file mode 100644 index 0000000..0828d72 --- /dev/null +++ b/vocoder_preprocess.py @@ -0,0 +1,59 @@ +from synthesizer.synthesize import run_synthesis +from synthesizer.hparams import hparams +from utils.argutils import print_args +import argparse +import os + + +if __name__ == "__main__": + class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): + pass + + parser = argparse.ArgumentParser( + description="Creates ground-truth aligned (GTA) spectrograms from the vocoder.", + formatter_class=MyFormatter + ) + parser.add_argument("datasets_root", type=str, help=\ + "Path to the directory containing your SV2TTS directory. If you specify both --in_dir and " + "--out_dir, this argument won't be used.") + parser.add_argument("--model_dir", type=str, + default="synthesizer/saved_models/pretrained/", help=\ + "Path to the pretrained model directory.") + parser.add_argument("-i", "--in_dir", type=str, default=argparse.SUPPRESS, help= \ + "Path to the synthesizer directory that contains the mel spectrograms, the wavs and the " + "embeds. Defaults to /SV2TTS/synthesizer/.") + parser.add_argument("-o", "--out_dir", type=str, default=argparse.SUPPRESS, help= \ + "Path to the output vocoder directory that will contain the ground truth aligned mel " + "spectrograms. Defaults to /SV2TTS/vocoder/.") + parser.add_argument("--hparams", default="", + help="Hyperparameter overrides as a comma-separated list of name=value " + "pairs") + parser.add_argument("--no_trim", action="store_true", help=\ + "Preprocess audio without trimming silences (not recommended).") + parser.add_argument("--cpu", action="store_true", help=\ + "If True, processing is done on CPU, even when a GPU is available.") + args = parser.parse_args() + print_args(args, parser) + modified_hp = hparams.parse(args.hparams) + + if not hasattr(args, "in_dir"): + args.in_dir = os.path.join(args.datasets_root, "SV2TTS", "synthesizer") + if not hasattr(args, "out_dir"): + args.out_dir = os.path.join(args.datasets_root, "SV2TTS", "vocoder") + + if args.cpu: + # Hide GPUs from Pytorch to force CPU processing + os.environ["CUDA_VISIBLE_DEVICES"] = "" + + # Verify webrtcvad is available + if not args.no_trim: + try: + import webrtcvad + except: + raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " + "noise removal and is recommended. Please install and try again. If installation fails, " + "use --no_trim to disable this error message.") + del args.no_trim + + run_synthesis(args.in_dir, args.out_dir, args.model_dir, modified_hp) + diff --git a/vocoder_train.py b/vocoder_train.py new file mode 100644 index 0000000..d712ffa --- /dev/null +++ b/vocoder_train.py @@ -0,0 +1,56 @@ +from utils.argutils import print_args +from vocoder.train import train +from pathlib import Path +import argparse + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Trains the vocoder from the synthesizer audios and the GTA synthesized mels, " + "or ground truth mels.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument("run_id", type=str, help= \ + "Name for this model instance. If a model state from the same run ID was previously " + "saved, the training will restart from there. Pass -f to overwrite saved states and " + "restart from scratch.") + parser.add_argument("datasets_root", type=str, help= \ + "Path to the directory containing your SV2TTS directory. Specifying --syn_dir or --voc_dir " + "will take priority over this argument.") + parser.add_argument("--syn_dir", type=str, default=argparse.SUPPRESS, help= \ + "Path to the synthesizer directory that contains the ground truth mel spectrograms, " + "the wavs and the embeds. Defaults to /SV2TTS/synthesizer/.") + parser.add_argument("--voc_dir", type=str, default=argparse.SUPPRESS, help= \ + "Path to the vocoder directory that contains the GTA synthesized mel spectrograms. " + "Defaults to /SV2TTS/vocoder/. Unused if --ground_truth is passed.") + parser.add_argument("-m", "--models_dir", type=str, default="vocoder/saved_models/", help=\ + "Path to the directory that will contain the saved model weights, as well as backups " + "of those weights and wavs generated during training.") + parser.add_argument("-g", "--ground_truth", action="store_true", help= \ + "Train on ground truth spectrograms (/SV2TTS/synthesizer/mels).") + parser.add_argument("-s", "--save_every", type=int, default=1000, help= \ + "Number of steps between updates of the model on the disk. Set to 0 to never save the " + "model.") + parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \ + "Number of steps between backups of the model. Set to 0 to never make backups of the " + "model.") + parser.add_argument("-f", "--force_restart", action="store_true", help= \ + "Do not load any saved model and restart from scratch.") + args = parser.parse_args() + + # Process the arguments + if not hasattr(args, "syn_dir"): + args.syn_dir = Path(args.datasets_root, "SV2TTS", "synthesizer") + args.syn_dir = Path(args.syn_dir) + if not hasattr(args, "voc_dir"): + args.voc_dir = Path(args.datasets_root, "SV2TTS", "vocoder") + args.voc_dir = Path(args.voc_dir) + del args.datasets_root + args.models_dir = Path(args.models_dir) + args.models_dir.mkdir(exist_ok=True) + + # Run the training + print_args(args, parser) + train(**vars(args)) + \ No newline at end of file