Allow to train encoder

This commit is contained in:
babysor00 2021-10-01 00:01:33 +08:00
parent cb82fcfe58
commit 790d11a58b
5 changed files with 16 additions and 14 deletions

4
.gitignore vendored
View File

@ -17,5 +17,7 @@
*.sh *.sh
synthesizer/saved_models/* synthesizer/saved_models/*
vocoder/saved_models/* vocoder/saved_models/*
encoder/saved_models/*
cp_hifigan/* cp_hifigan/*
!vocoder/saved_models/pretrained/* !vocoder/saved_models/pretrained/*
!encoder/saved_models/pretrained.pt

View File

@ -117,6 +117,15 @@ def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir,
logger.finalize() logger.finalize()
print("Done preprocessing %s.\n" % dataset_name) print("Done preprocessing %s.\n" % dataset_name)
def preprocess_aidatatang_200zh(datasets_root: Path, out_dir: Path, skip_existing=False):
dataset_name = "aidatatang_200zh"
dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
if not dataset_root:
return
# Preprocess all speakers
speaker_dirs = list(dataset_root.joinpath("corpus", "train").glob("*"))
_preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav",
skip_existing, logger)
def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False): def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False):
for dataset_name in librispeech_datasets["train"]["other"]: for dataset_name in librispeech_datasets["train"]["other"]:

Binary file not shown.

View File

@ -1,4 +1,4 @@
from encoder.preprocess import preprocess_librispeech, preprocess_voxceleb1, preprocess_voxceleb2 from encoder.preprocess import preprocess_librispeech, preprocess_voxceleb1, preprocess_voxceleb2, preprocess_aidatatang_200zh
from utils.argutils import print_args from utils.argutils import print_args
from pathlib import Path from pathlib import Path
import argparse import argparse
@ -10,17 +10,7 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Preprocesses audio files from datasets, encodes them as mel spectrograms and " description="Preprocesses audio files from datasets, encodes them as mel spectrograms and "
"writes them to the disk. This will allow you to train the encoder. The " "writes them to the disk. This will allow you to train the encoder. The "
"datasets required are at least one of VoxCeleb1, VoxCeleb2 and LibriSpeech. " "datasets required are at least one of LibriSpeech, VoxCeleb1, VoxCeleb2, aidatatang_200zh. ",
"Ideally, you should have all three. You should extract them as they are "
"after having downloaded them and put them in a same directory, e.g.:\n"
"-[datasets_root]\n"
" -LibriSpeech\n"
" -train-other-500\n"
" -VoxCeleb1\n"
" -wav\n"
" -vox1_meta.csv\n"
" -VoxCeleb2\n"
" -dev",
formatter_class=MyFormatter formatter_class=MyFormatter
) )
parser.add_argument("datasets_root", type=Path, help=\ parser.add_argument("datasets_root", type=Path, help=\
@ -29,7 +19,7 @@ if __name__ == "__main__":
"Path to the output directory that will contain the mel spectrograms. If left out, " "Path to the output directory that will contain the mel spectrograms. If left out, "
"defaults to <datasets_root>/SV2TTS/encoder/") "defaults to <datasets_root>/SV2TTS/encoder/")
parser.add_argument("-d", "--datasets", type=str, parser.add_argument("-d", "--datasets", type=str,
default="librispeech_other,voxceleb1,voxceleb2", help=\ default="librispeech_other,voxceleb1,aidatatang_200zh", help=\
"Comma-separated list of the name of the datasets you want to preprocess. Only the train " "Comma-separated list of the name of the datasets you want to preprocess. Only the train "
"set of these datasets will be used. Possible names: librispeech_other, voxceleb1, " "set of these datasets will be used. Possible names: librispeech_other, voxceleb1, "
"voxceleb2.") "voxceleb2.")
@ -63,6 +53,7 @@ if __name__ == "__main__":
"librispeech_other": preprocess_librispeech, "librispeech_other": preprocess_librispeech,
"voxceleb1": preprocess_voxceleb1, "voxceleb1": preprocess_voxceleb1,
"voxceleb2": preprocess_voxceleb2, "voxceleb2": preprocess_voxceleb2,
"aidatatang_200zh": preprocess_aidatatang_200zh,
} }
args = vars(args) args = vars(args)
for dataset in args.pop("datasets"): for dataset in args.pop("datasets"):