forked from svc-develop-team/so-vits-svc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_diff.py
80 lines (66 loc) · 2.97 KB
/
train_diff.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import argparse, os
import torch
from loguru import logger
from torch.optim import lr_scheduler
from diffusion.data_loaders import get_data_loaders
from diffusion.logger import utils
from diffusion.solver import train
from diffusion.unit2mel import Unit2Mel
from diffusion.vocoder import Vocoder
import configs
def parse_args(args=None, namespace=None):
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--speaker", type=str, default="", help="speaker name")
parser.add_argument('--path', type=str, default=configs.data_dir,help='')
return parser.parse_args(args=args, namespace=namespace)
if __name__ == '__main__':
# parse commands
cmd = parse_args()
if cmd.speaker == "":
raise Exception("type speaker")
config_path = os.path.join(cmd.path, cmd.speaker, "configs", "diffusion.yaml")
# load config
args = utils.load_config(config_path)
logger.info(' > config:'+ config_path)
logger.info(' > exp:'+ args.env.expdir)
if not os.path.exists(args.env.expdir):
os.makedirs(args.env.expdir)
if not os.path.exists(os.path.join(args.env.expdir, "model_0.pt")):
import shutil
shutil.copyfile("pretrain/model_0.pt", os.path.join(args.env.expdir, "model_0.pt"))
# load vocoder
vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=args.device)
# load model
model = Unit2Mel(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder.dimension,
args.model.n_layers,
args.model.n_chans,
args.model.n_hidden,
args.model.timesteps,
args.model.k_step_max
)
logger.info(f' > Now model timesteps is {model.timesteps}, and k_step_max is {model.k_step_max}')
# load parameters
optimizer = torch.optim.AdamW(model.parameters())
initial_global_step, model, optimizer = utils.load_model(args.env.expdir, model, optimizer, device=args.device)
for param_group in optimizer.param_groups:
param_group['initial_lr'] = args.train.lr
param_group['lr'] = args.train.lr * (args.train.gamma ** max(((initial_global_step-2)//args.train.decay_step),0) )
param_group['weight_decay'] = args.train.weight_decay
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.train.decay_step, gamma=args.train.gamma,last_epoch=initial_global_step-2)
# device
if args.device == 'cuda':
torch.cuda.set_device(args.env.gpu_id)
model.to(args.device)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(args.device)
# datas
loader_train, loader_valid = get_data_loaders(args, whole_audio=False)
# run
train(args, initial_global_step, model, optimizer, scheduler, vocoder, loader_train, loader_valid)