2021-08-07 11:56:00 +08:00
|
|
|
from utils.argutils import print_args
|
2022-12-03 16:54:06 +08:00
|
|
|
from models.vocoder.wavernn.train import train
|
|
|
|
from models.vocoder.hifigan.train import train as train_hifigan
|
|
|
|
from models.vocoder.fregan.train import train as train_fregan
|
2022-05-09 18:44:02 +08:00
|
|
|
from utils.util import AttrDict
|
2021-08-07 11:56:00 +08:00
|
|
|
from pathlib import Path
|
|
|
|
import argparse
|
2021-09-14 13:31:53 +08:00
|
|
|
import json
|
2022-05-22 16:24:50 +08:00
|
|
|
import torch
|
|
|
|
import torch.multiprocessing as mp
|
2021-08-07 11:56:00 +08:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description="Trains the vocoder from the synthesizer audios and the GTA synthesized mels, "
|
|
|
|
"or ground truth mels.",
|
|
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.add_argument("run_id", type=str, help= \
|
|
|
|
"Name for this model instance. If a model state from the same run ID was previously "
|
|
|
|
"saved, the training will restart from there. Pass -f to overwrite saved states and "
|
|
|
|
"restart from scratch.")
|
|
|
|
parser.add_argument("datasets_root", type=str, help= \
|
|
|
|
"Path to the directory containing your SV2TTS directory. Specifying --syn_dir or --voc_dir "
|
|
|
|
"will take priority over this argument.")
|
2021-09-14 13:31:53 +08:00
|
|
|
parser.add_argument("vocoder_type", type=str, default="wavernn", help= \
|
|
|
|
"Choose the vocoder type for train. Defaults to wavernn"
|
|
|
|
"Now, Support <hifigan> and <wavernn> for choose")
|
2021-08-07 11:56:00 +08:00
|
|
|
parser.add_argument("--syn_dir", type=str, default=argparse.SUPPRESS, help= \
|
|
|
|
"Path to the synthesizer directory that contains the ground truth mel spectrograms, "
|
|
|
|
"the wavs and the embeds. Defaults to <datasets_root>/SV2TTS/synthesizer/.")
|
|
|
|
parser.add_argument("--voc_dir", type=str, default=argparse.SUPPRESS, help= \
|
|
|
|
"Path to the vocoder directory that contains the GTA synthesized mel spectrograms. "
|
|
|
|
"Defaults to <datasets_root>/SV2TTS/vocoder/. Unused if --ground_truth is passed.")
|
|
|
|
parser.add_argument("-m", "--models_dir", type=str, default="vocoder/saved_models/", help=\
|
|
|
|
"Path to the directory that will contain the saved model weights, as well as backups "
|
|
|
|
"of those weights and wavs generated during training.")
|
|
|
|
parser.add_argument("-g", "--ground_truth", action="store_true", help= \
|
|
|
|
"Train on ground truth spectrograms (<datasets_root>/SV2TTS/synthesizer/mels).")
|
|
|
|
parser.add_argument("-s", "--save_every", type=int, default=1000, help= \
|
|
|
|
"Number of steps between updates of the model on the disk. Set to 0 to never save the "
|
|
|
|
"model.")
|
|
|
|
parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \
|
|
|
|
"Number of steps between backups of the model. Set to 0 to never make backups of the "
|
|
|
|
"model.")
|
|
|
|
parser.add_argument("-f", "--force_restart", action="store_true", help= \
|
|
|
|
"Do not load any saved model and restart from scratch.")
|
2021-09-14 13:31:53 +08:00
|
|
|
parser.add_argument("--config", type=str, default="vocoder/hifigan/config_16k_.json")
|
2021-08-07 11:56:00 +08:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
if not hasattr(args, "syn_dir"):
|
|
|
|
args.syn_dir = Path(args.datasets_root, "SV2TTS", "synthesizer")
|
|
|
|
args.syn_dir = Path(args.syn_dir)
|
|
|
|
if not hasattr(args, "voc_dir"):
|
|
|
|
args.voc_dir = Path(args.datasets_root, "SV2TTS", "vocoder")
|
|
|
|
args.voc_dir = Path(args.voc_dir)
|
|
|
|
del args.datasets_root
|
|
|
|
args.models_dir = Path(args.models_dir)
|
|
|
|
args.models_dir.mkdir(exist_ok=True)
|
|
|
|
|
|
|
|
print_args(args, parser)
|
2021-09-14 13:31:53 +08:00
|
|
|
|
|
|
|
# Process the arguments
|
|
|
|
if args.vocoder_type == "wavernn":
|
|
|
|
# Run the training wavernn
|
2022-05-12 12:27:17 +08:00
|
|
|
delattr(args, 'vocoder_type')
|
|
|
|
delattr(args, 'config')
|
2021-09-14 13:31:53 +08:00
|
|
|
train(**vars(args))
|
|
|
|
elif args.vocoder_type == "hifigan":
|
|
|
|
with open(args.config) as f:
|
|
|
|
json_config = json.load(f)
|
|
|
|
h = AttrDict(json_config)
|
2022-05-22 16:24:50 +08:00
|
|
|
if h.num_gpus > 1:
|
|
|
|
h.num_gpus = torch.cuda.device_count()
|
|
|
|
h.batch_size = int(h.batch_size / h.num_gpus)
|
|
|
|
print('Batch size per GPU :', h.batch_size)
|
|
|
|
mp.spawn(train_hifigan, nprocs=h.num_gpus, args=(args, h,))
|
|
|
|
else:
|
|
|
|
train_hifigan(0, args, h)
|
2022-05-12 12:27:17 +08:00
|
|
|
elif args.vocoder_type == "fregan":
|
2023-06-02 17:22:38 +08:00
|
|
|
with Path('vocoder/fregan/config.json').open() as f:
|
2022-05-12 12:27:17 +08:00
|
|
|
json_config = json.load(f)
|
|
|
|
h = AttrDict(json_config)
|
2022-05-22 16:24:50 +08:00
|
|
|
if h.num_gpus > 1:
|
|
|
|
h.num_gpus = torch.cuda.device_count()
|
|
|
|
h.batch_size = int(h.batch_size / h.num_gpus)
|
|
|
|
print('Batch size per GPU :', h.batch_size)
|
|
|
|
mp.spawn(train_fregan, nprocs=h.num_gpus, args=(args, h,))
|
|
|
|
else:
|
|
|
|
train_fregan(0, args, h)
|
2021-09-14 13:31:53 +08:00
|
|
|
|
|
|
|
|