| import math |
| import logging |
| from typing import Optional |
|
|
| logger = logging.getLogger(__name__) |
|
|
| import numpy as np |
| import torch |
| from torch import nn |
| from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d |
| from torch.nn import functional as F |
| from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm |
|
|
| from infer.lib.infer_pack import attentions, commons, modules |
| from infer.lib.infer_pack.commons import get_padding, init_weights |
|
|
| has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available()) |
|
|
|
|
| class TextEncoder(nn.Module): |
| def __init__( |
| self, |
| in_channels, |
| out_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| f0=True, |
| ): |
| super(TextEncoder, self).__init__() |
| self.out_channels = out_channels |
| self.hidden_channels = hidden_channels |
| self.filter_channels = filter_channels |
| self.n_heads = n_heads |
| self.n_layers = n_layers |
| self.kernel_size = kernel_size |
| self.p_dropout = float(p_dropout) |
| self.emb_phone = nn.Linear(in_channels, hidden_channels) |
| self.lrelu = nn.LeakyReLU(0.1, inplace=True) |
| if f0 == True: |
| self.emb_pitch = nn.Embedding(256, hidden_channels) |
| self.encoder = attentions.Encoder( |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| float(p_dropout), |
| ) |
| self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) |
|
|
| def forward( |
| self, |
| phone: torch.Tensor, |
| pitch: torch.Tensor, |
| lengths: torch.Tensor, |
| skip_head: Optional[torch.Tensor] = None, |
| ): |
| if pitch is None: |
| x = self.emb_phone(phone) |
| else: |
| x = self.emb_phone(phone) + self.emb_pitch(pitch) |
| x = x * math.sqrt(self.hidden_channels) |
| x = self.lrelu(x) |
| x = torch.transpose(x, 1, -1) |
| x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( |
| x.dtype |
| ) |
| x = self.encoder(x * x_mask, x_mask) |
| if skip_head is not None: |
| assert isinstance(skip_head, torch.Tensor) |
| head = int(skip_head.item()) |
| x = x[:, :, head:] |
| x_mask = x_mask[:, :, head:] |
| stats = self.proj(x) * x_mask |
| m, logs = torch.split(stats, self.out_channels, dim=1) |
| return m, logs, x_mask |
|
|
|
|
| class ResidualCouplingBlock(nn.Module): |
| def __init__( |
| self, |
| channels, |
| hidden_channels, |
| kernel_size, |
| dilation_rate, |
| n_layers, |
| n_flows=4, |
| gin_channels=0, |
| ): |
| super(ResidualCouplingBlock, self).__init__() |
| self.channels = channels |
| self.hidden_channels = hidden_channels |
| self.kernel_size = kernel_size |
| self.dilation_rate = dilation_rate |
| self.n_layers = n_layers |
| self.n_flows = n_flows |
| self.gin_channels = gin_channels |
|
|
| self.flows = nn.ModuleList() |
| for i in range(n_flows): |
| self.flows.append( |
| modules.ResidualCouplingLayer( |
| channels, |
| hidden_channels, |
| kernel_size, |
| dilation_rate, |
| n_layers, |
| gin_channels=gin_channels, |
| mean_only=True, |
| ) |
| ) |
| self.flows.append(modules.Flip()) |
|
|
| def forward( |
| self, |
| x: torch.Tensor, |
| x_mask: torch.Tensor, |
| g: Optional[torch.Tensor] = None, |
| reverse: bool = False, |
| ): |
| if not reverse: |
| for flow in self.flows: |
| x, _ = flow(x, x_mask, g=g, reverse=reverse) |
| else: |
| for flow in self.flows[::-1]: |
| x, _ = flow.forward(x, x_mask, g=g, reverse=reverse) |
| return x |
|
|
| def remove_weight_norm(self): |
| for i in range(self.n_flows): |
| self.flows[i * 2].remove_weight_norm() |
|
|
| def __prepare_scriptable__(self): |
| for i in range(self.n_flows): |
| for hook in self.flows[i * 2]._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.flows[i * 2]) |
|
|
| return self |
|
|
|
|
| class PosteriorEncoder(nn.Module): |
| def __init__( |
| self, |
| in_channels, |
| out_channels, |
| hidden_channels, |
| kernel_size, |
| dilation_rate, |
| n_layers, |
| gin_channels=0, |
| ): |
| super(PosteriorEncoder, self).__init__() |
| self.in_channels = in_channels |
| self.out_channels = out_channels |
| self.hidden_channels = hidden_channels |
| self.kernel_size = kernel_size |
| self.dilation_rate = dilation_rate |
| self.n_layers = n_layers |
| self.gin_channels = gin_channels |
|
|
| self.pre = nn.Conv1d(in_channels, hidden_channels, 1) |
| self.enc = modules.WN( |
| hidden_channels, |
| kernel_size, |
| dilation_rate, |
| n_layers, |
| gin_channels=gin_channels, |
| ) |
| self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) |
|
|
| def forward( |
| self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None |
| ): |
| x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( |
| x.dtype |
| ) |
| x = self.pre(x) * x_mask |
| x = self.enc(x, x_mask, g=g) |
| stats = self.proj(x) * x_mask |
| m, logs = torch.split(stats, self.out_channels, dim=1) |
| z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask |
| return z, m, logs, x_mask |
|
|
| def remove_weight_norm(self): |
| self.enc.remove_weight_norm() |
|
|
| def __prepare_scriptable__(self): |
| for hook in self.enc._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.enc) |
| return self |
|
|
|
|
| class Generator(torch.nn.Module): |
| def __init__( |
| self, |
| initial_channel, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| gin_channels=0, |
| ): |
| super(Generator, self).__init__() |
| self.num_kernels = len(resblock_kernel_sizes) |
| self.num_upsamples = len(upsample_rates) |
| self.conv_pre = Conv1d( |
| initial_channel, upsample_initial_channel, 7, 1, padding=3 |
| ) |
| resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 |
|
|
| self.ups = nn.ModuleList() |
| for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): |
| self.ups.append( |
| weight_norm( |
| ConvTranspose1d( |
| upsample_initial_channel // (2**i), |
| upsample_initial_channel // (2 ** (i + 1)), |
| k, |
| u, |
| padding=(k - u) // 2, |
| ) |
| ) |
| ) |
|
|
| self.resblocks = nn.ModuleList() |
| for i in range(len(self.ups)): |
| ch = upsample_initial_channel // (2 ** (i + 1)) |
| for j, (k, d) in enumerate( |
| zip(resblock_kernel_sizes, resblock_dilation_sizes) |
| ): |
| self.resblocks.append(resblock(ch, k, d)) |
|
|
| self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) |
| self.ups.apply(init_weights) |
|
|
| if gin_channels != 0: |
| self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) |
|
|
| def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None): |
| x = self.conv_pre(x) |
| if g is not None: |
| x = x + self.cond(g) |
|
|
| for i in range(self.num_upsamples): |
| x = F.leaky_relu(x, modules.LRELU_SLOPE) |
| x = self.ups[i](x) |
| xs = None |
| for j in range(self.num_kernels): |
| if xs is None: |
| xs = self.resblocks[i * self.num_kernels + j](x) |
| else: |
| xs += self.resblocks[i * self.num_kernels + j](x) |
| x = xs / self.num_kernels |
| x = F.leaky_relu(x) |
| x = self.conv_post(x) |
| x = torch.tanh(x) |
|
|
| return x |
|
|
| def __prepare_scriptable__(self): |
| for l in self.ups: |
| for hook in l._forward_pre_hooks.values(): |
| |
| |
| |
| |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(l) |
|
|
| for l in self.resblocks: |
| for hook in l._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(l) |
| return self |
|
|
| def remove_weight_norm(self): |
| for l in self.ups: |
| remove_weight_norm(l) |
| for l in self.resblocks: |
| l.remove_weight_norm() |
|
|
|
|
| class SineGen(torch.nn.Module): |
| """Definition of sine generator |
| SineGen(samp_rate, harmonic_num = 0, |
| sine_amp = 0.1, noise_std = 0.003, |
| voiced_threshold = 0, |
| flag_for_pulse=False) |
| samp_rate: sampling rate in Hz |
| harmonic_num: number of harmonic overtones (default 0) |
| sine_amp: amplitude of sine-wavefrom (default 0.1) |
| noise_std: std of Gaussian noise (default 0.003) |
| voiced_thoreshold: F0 threshold for U/V classification (default 0) |
| flag_for_pulse: this SinGen is used inside PulseGen (default False) |
| Note: when flag_for_pulse is True, the first time step of a voiced |
| segment is always sin(torch.pi) or cos(0) |
| """ |
|
|
| def __init__( |
| self, |
| samp_rate, |
| harmonic_num=0, |
| sine_amp=0.1, |
| noise_std=0.003, |
| voiced_threshold=0, |
| flag_for_pulse=False, |
| ): |
| super(SineGen, self).__init__() |
| self.sine_amp = sine_amp |
| self.noise_std = noise_std |
| self.harmonic_num = harmonic_num |
| self.dim = self.harmonic_num + 1 |
| self.sampling_rate = samp_rate |
| self.voiced_threshold = voiced_threshold |
|
|
| def _f02uv(self, f0): |
| |
| uv = torch.ones_like(f0) |
| uv = uv * (f0 > self.voiced_threshold) |
| if uv.device.type == "privateuseone": |
| uv = uv.float() |
| return uv |
|
|
| def forward(self, f0: torch.Tensor, upp: int): |
| """sine_tensor, uv = forward(f0) |
| input F0: tensor(batchsize=1, length, dim=1) |
| f0 for unvoiced steps should be 0 |
| output sine_tensor: tensor(batchsize=1, length, dim) |
| output uv: tensor(batchsize=1, length, 1) |
| """ |
| with torch.no_grad(): |
| f0 = f0[:, None].transpose(1, 2) |
| f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) |
| |
| f0_buf[:, :, 0] = f0[:, :, 0] |
| for idx in range(self.harmonic_num): |
| f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( |
| idx + 2 |
| ) |
| rad_values = ( |
| f0_buf / self.sampling_rate |
| ) % 1 |
| rand_ini = torch.rand( |
| f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device |
| ) |
| rand_ini[:, 0] = 0 |
| rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini |
| tmp_over_one = torch.cumsum( |
| rad_values, 1 |
| ) |
| tmp_over_one *= upp |
| tmp_over_one = F.interpolate( |
| tmp_over_one.transpose(2, 1), |
| scale_factor=float(upp), |
| mode="linear", |
| align_corners=True, |
| ).transpose(2, 1) |
| rad_values = F.interpolate( |
| rad_values.transpose(2, 1), scale_factor=float(upp), mode="nearest" |
| ).transpose( |
| 2, 1 |
| ) |
| tmp_over_one %= 1 |
| tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 |
| cumsum_shift = torch.zeros_like(rad_values) |
| cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 |
| sine_waves = torch.sin( |
| torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * torch.pi |
| ) |
| sine_waves = sine_waves * self.sine_amp |
| uv = self._f02uv(f0) |
| uv = F.interpolate( |
| uv.transpose(2, 1), scale_factor=float(upp), mode="nearest" |
| ).transpose(2, 1) |
| noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 |
| noise = noise_amp * torch.randn_like(sine_waves) |
| sine_waves = sine_waves * uv + noise |
| return sine_waves, uv, noise |
|
|
|
|
| class SourceModuleHnNSF(torch.nn.Module): |
| """SourceModule for hn-nsf |
| SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, |
| add_noise_std=0.003, voiced_threshod=0) |
| sampling_rate: sampling_rate in Hz |
| harmonic_num: number of harmonic above F0 (default: 0) |
| sine_amp: amplitude of sine source signal (default: 0.1) |
| add_noise_std: std of additive Gaussian noise (default: 0.003) |
| note that amplitude of noise in unvoiced is decided |
| by sine_amp |
| voiced_threshold: threhold to set U/V given F0 (default: 0) |
| Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) |
| F0_sampled (batchsize, length, 1) |
| Sine_source (batchsize, length, 1) |
| noise_source (batchsize, length 1) |
| uv (batchsize, length, 1) |
| """ |
|
|
| def __init__( |
| self, |
| sampling_rate, |
| harmonic_num=0, |
| sine_amp=0.1, |
| add_noise_std=0.003, |
| voiced_threshod=0, |
| is_half=True, |
| ): |
| super(SourceModuleHnNSF, self).__init__() |
|
|
| self.sine_amp = sine_amp |
| self.noise_std = add_noise_std |
| self.is_half = is_half |
| |
| self.l_sin_gen = SineGen( |
| sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod |
| ) |
|
|
| |
| self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) |
| self.l_tanh = torch.nn.Tanh() |
| |
|
|
| def forward(self, x: torch.Tensor, upp: int = 1): |
| |
| |
| sine_wavs, uv, _ = self.l_sin_gen(x, upp) |
| |
| |
| |
| |
| |
| |
| sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype) |
| sine_merge = self.l_tanh(self.l_linear(sine_wavs)) |
| return sine_merge, None, None |
|
|
|
|
| class GeneratorNSF(torch.nn.Module): |
| def __init__( |
| self, |
| initial_channel, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| gin_channels, |
| sr, |
| is_half=False, |
| ): |
| super(GeneratorNSF, self).__init__() |
| self.num_kernels = len(resblock_kernel_sizes) |
| self.num_upsamples = len(upsample_rates) |
|
|
| self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates)) |
| self.m_source = SourceModuleHnNSF( |
| sampling_rate=sr, harmonic_num=0, is_half=is_half |
| ) |
| self.noise_convs = nn.ModuleList() |
| self.conv_pre = Conv1d( |
| initial_channel, upsample_initial_channel, 7, 1, padding=3 |
| ) |
| resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 |
|
|
| self.ups = nn.ModuleList() |
| for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): |
| c_cur = upsample_initial_channel // (2 ** (i + 1)) |
| self.ups.append( |
| weight_norm( |
| ConvTranspose1d( |
| upsample_initial_channel // (2**i), |
| upsample_initial_channel // (2 ** (i + 1)), |
| k, |
| u, |
| padding=(k - u) // 2, |
| ) |
| ) |
| ) |
| if i + 1 < len(upsample_rates): |
| stride_f0 = math.prod(upsample_rates[i + 1 :]) |
| self.noise_convs.append( |
| Conv1d( |
| 1, |
| c_cur, |
| kernel_size=stride_f0 * 2, |
| stride=stride_f0, |
| padding=stride_f0 // 2, |
| ) |
| ) |
| else: |
| self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) |
|
|
| self.resblocks = nn.ModuleList() |
| for i in range(len(self.ups)): |
| ch = upsample_initial_channel // (2 ** (i + 1)) |
| for j, (k, d) in enumerate( |
| zip(resblock_kernel_sizes, resblock_dilation_sizes) |
| ): |
| self.resblocks.append(resblock(ch, k, d)) |
|
|
| self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) |
| self.ups.apply(init_weights) |
|
|
| if gin_channels != 0: |
| self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) |
|
|
| self.upp = math.prod(upsample_rates) |
|
|
| self.lrelu_slope = modules.LRELU_SLOPE |
|
|
| def forward(self, x, f0, g: Optional[torch.Tensor] = None): |
| har_source, noi_source, uv = self.m_source(f0, self.upp) |
| har_source = har_source.transpose(1, 2) |
| x = self.conv_pre(x) |
| if g is not None: |
| x = x + self.cond(g) |
| |
| |
| for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)): |
| if i < self.num_upsamples: |
| x = F.leaky_relu(x, self.lrelu_slope) |
| x = ups(x) |
| x_source = noise_convs(har_source) |
| x = x + x_source |
| xs: Optional[torch.Tensor] = None |
| l = [i * self.num_kernels + j for j in range(self.num_kernels)] |
| for j, resblock in enumerate(self.resblocks): |
| if j in l: |
| if xs is None: |
| xs = resblock(x) |
| else: |
| xs += resblock(x) |
| |
| |
| assert isinstance(xs, torch.Tensor) |
| x = xs / self.num_kernels |
| x = F.leaky_relu(x) |
| x = self.conv_post(x) |
| x = torch.tanh(x) |
| return x |
|
|
| def remove_weight_norm(self): |
| for l in self.ups: |
| remove_weight_norm(l) |
| for l in self.resblocks: |
| l.remove_weight_norm() |
|
|
| def __prepare_scriptable__(self): |
| for l in self.ups: |
| for hook in l._forward_pre_hooks.values(): |
| |
| |
| |
| |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(l) |
| for l in self.resblocks: |
| for hook in self.resblocks._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(l) |
| return self |
|
|
|
|
| sr2sr = { |
| "32k": 32000, |
| "40k": 40000, |
| "48k": 48000, |
| } |
|
|
|
|
| class SynthesizerTrnMs256NSFsid(nn.Module): |
| def __init__( |
| self, |
| spec_channels, |
| segment_size, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| spk_embed_dim, |
| gin_channels, |
| sr, |
| **kwargs |
| ): |
| super(SynthesizerTrnMs256NSFsid, self).__init__() |
| if isinstance(sr, str): |
| sr = sr2sr[sr] |
| self.spec_channels = spec_channels |
| self.inter_channels = inter_channels |
| self.hidden_channels = hidden_channels |
| self.filter_channels = filter_channels |
| self.n_heads = n_heads |
| self.n_layers = n_layers |
| self.kernel_size = kernel_size |
| self.p_dropout = float(p_dropout) |
| self.resblock = resblock |
| self.resblock_kernel_sizes = resblock_kernel_sizes |
| self.resblock_dilation_sizes = resblock_dilation_sizes |
| self.upsample_rates = upsample_rates |
| self.upsample_initial_channel = upsample_initial_channel |
| self.upsample_kernel_sizes = upsample_kernel_sizes |
| self.segment_size = segment_size |
| self.gin_channels = gin_channels |
| |
| self.spk_embed_dim = spk_embed_dim |
| self.enc_p = TextEncoder( |
| 256, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| float(p_dropout), |
| ) |
| self.dec = GeneratorNSF( |
| inter_channels, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| gin_channels=gin_channels, |
| sr=sr, |
| is_half=kwargs["is_half"], |
| ) |
| self.enc_q = PosteriorEncoder( |
| spec_channels, |
| inter_channels, |
| hidden_channels, |
| 5, |
| 1, |
| 16, |
| gin_channels=gin_channels, |
| ) |
| self.flow = ResidualCouplingBlock( |
| inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels |
| ) |
| self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) |
| logger.debug( |
| "gin_channels: " |
| + str(gin_channels) |
| + ", self.spk_embed_dim: " |
| + str(self.spk_embed_dim) |
| ) |
|
|
| def remove_weight_norm(self): |
| self.dec.remove_weight_norm() |
| self.flow.remove_weight_norm() |
| if hasattr(self, "enc_q"): |
| self.enc_q.remove_weight_norm() |
|
|
| def __prepare_scriptable__(self): |
| for hook in self.dec._forward_pre_hooks.values(): |
| |
| |
| |
| |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.dec) |
| for hook in self.flow._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.flow) |
| if hasattr(self, "enc_q"): |
| for hook in self.enc_q._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.enc_q) |
| return self |
|
|
| @torch.jit.ignore |
| def forward( |
| self, |
| phone: torch.Tensor, |
| phone_lengths: torch.Tensor, |
| pitch: torch.Tensor, |
| pitchf: torch.Tensor, |
| y: torch.Tensor, |
| y_lengths: torch.Tensor, |
| ds: Optional[torch.Tensor] = None, |
| ): |
| |
| g = self.emb_g(ds).unsqueeze(-1) |
| m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) |
| z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) |
| z_p = self.flow(z, y_mask, g=g) |
| z_slice, ids_slice = commons.rand_slice_segments( |
| z, y_lengths, self.segment_size |
| ) |
| |
| pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) |
| |
| o = self.dec(z_slice, pitchf, g=g) |
| return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) |
|
|
| @torch.jit.export |
| def infer( |
| self, |
| phone: torch.Tensor, |
| phone_lengths: torch.Tensor, |
| pitch: torch.Tensor, |
| nsff0: torch.Tensor, |
| sid: torch.Tensor, |
| skip_head: Optional[torch.Tensor] = None, |
| return_length: Optional[torch.Tensor] = None, |
| ): |
| g = self.emb_g(sid).unsqueeze(-1) |
| if skip_head is not None and return_length is not None: |
| assert isinstance(skip_head, torch.Tensor) |
| assert isinstance(return_length, torch.Tensor) |
| head = int(skip_head.item()) |
| length = int(return_length.item()) |
| flow_head = torch.clamp(skip_head - 24, min=0) |
| dec_head = head - int(flow_head.item()) |
| m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths, flow_head) |
| z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask |
| z = self.flow(z_p, x_mask, g=g, reverse=True) |
| z = z[:, :, dec_head : dec_head + length] |
| x_mask = x_mask[:, :, dec_head : dec_head + length] |
| nsff0 = nsff0[:, head : head + length] |
| else: |
| m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) |
| z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask |
| z = self.flow(z_p, x_mask, g=g, reverse=True) |
| o = self.dec(z * x_mask, nsff0, g=g) |
| return o, x_mask, (z, z_p, m_p, logs_p) |
|
|
|
|
| class SynthesizerTrnMs768NSFsid(SynthesizerTrnMs256NSFsid): |
| def __init__( |
| self, |
| spec_channels, |
| segment_size, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| spk_embed_dim, |
| gin_channels, |
| sr, |
| **kwargs |
| ): |
| super(SynthesizerTrnMs768NSFsid, self).__init__( |
| spec_channels, |
| segment_size, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| spk_embed_dim, |
| gin_channels, |
| sr, |
| **kwargs |
| ) |
| del self.enc_p |
| self.enc_p = TextEncoder( |
| 768, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| float(p_dropout), |
| ) |
|
|
|
|
| class SynthesizerTrnMs256NSFsid_nono(nn.Module): |
| def __init__( |
| self, |
| spec_channels, |
| segment_size, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| spk_embed_dim, |
| gin_channels, |
| sr=None, |
| **kwargs |
| ): |
| super(SynthesizerTrnMs256NSFsid_nono, self).__init__() |
| self.spec_channels = spec_channels |
| self.inter_channels = inter_channels |
| self.hidden_channels = hidden_channels |
| self.filter_channels = filter_channels |
| self.n_heads = n_heads |
| self.n_layers = n_layers |
| self.kernel_size = kernel_size |
| self.p_dropout = float(p_dropout) |
| self.resblock = resblock |
| self.resblock_kernel_sizes = resblock_kernel_sizes |
| self.resblock_dilation_sizes = resblock_dilation_sizes |
| self.upsample_rates = upsample_rates |
| self.upsample_initial_channel = upsample_initial_channel |
| self.upsample_kernel_sizes = upsample_kernel_sizes |
| self.segment_size = segment_size |
| self.gin_channels = gin_channels |
| |
| self.spk_embed_dim = spk_embed_dim |
| self.enc_p = TextEncoder( |
| 256, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| float(p_dropout), |
| f0=False, |
| ) |
| self.dec = Generator( |
| inter_channels, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| gin_channels=gin_channels, |
| ) |
| self.enc_q = PosteriorEncoder( |
| spec_channels, |
| inter_channels, |
| hidden_channels, |
| 5, |
| 1, |
| 16, |
| gin_channels=gin_channels, |
| ) |
| self.flow = ResidualCouplingBlock( |
| inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels |
| ) |
| self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) |
| logger.debug( |
| "gin_channels: " |
| + str(gin_channels) |
| + ", self.spk_embed_dim: " |
| + str(self.spk_embed_dim) |
| ) |
|
|
| def remove_weight_norm(self): |
| self.dec.remove_weight_norm() |
| self.flow.remove_weight_norm() |
| if hasattr(self, "enc_q"): |
| self.enc_q.remove_weight_norm() |
|
|
| def __prepare_scriptable__(self): |
| for hook in self.dec._forward_pre_hooks.values(): |
| |
| |
| |
| |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.dec) |
| for hook in self.flow._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.flow) |
| if hasattr(self, "enc_q"): |
| for hook in self.enc_q._forward_pre_hooks.values(): |
| if ( |
| hook.__module__ == "torch.nn.utils.weight_norm" |
| and hook.__class__.__name__ == "WeightNorm" |
| ): |
| torch.nn.utils.remove_weight_norm(self.enc_q) |
| return self |
|
|
| @torch.jit.ignore |
| def forward(self, phone, phone_lengths, y, y_lengths, ds): |
| g = self.emb_g(ds).unsqueeze(-1) |
| m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) |
| z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) |
| z_p = self.flow(z, y_mask, g=g) |
| z_slice, ids_slice = commons.rand_slice_segments( |
| z, y_lengths, self.segment_size |
| ) |
| o = self.dec(z_slice, g=g) |
| return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) |
|
|
| @torch.jit.export |
| def infer( |
| self, |
| phone: torch.Tensor, |
| phone_lengths: torch.Tensor, |
| sid: torch.Tensor, |
| skip_head: Optional[torch.Tensor] = None, |
| return_length: Optional[torch.Tensor] = None, |
| ): |
| g = self.emb_g(sid).unsqueeze(-1) |
| if skip_head is not None and return_length is not None: |
| assert isinstance(skip_head, torch.Tensor) |
| assert isinstance(return_length, torch.Tensor) |
| head = int(skip_head.item()) |
| length = int(return_length.item()) |
| flow_head = torch.clamp(skip_head - 24, min=0) |
| dec_head = head - int(flow_head.item()) |
| m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths, flow_head) |
| z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask |
| z = self.flow(z_p, x_mask, g=g, reverse=True) |
| z = z[:, :, dec_head : dec_head + length] |
| x_mask = x_mask[:, :, dec_head : dec_head + length] |
| else: |
| m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) |
| z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask |
| z = self.flow(z_p, x_mask, g=g, reverse=True) |
| o = self.dec(z * x_mask, g=g) |
| return o, x_mask, (z, z_p, m_p, logs_p) |
|
|
|
|
| class SynthesizerTrnMs768NSFsid_nono(SynthesizerTrnMs256NSFsid_nono): |
| def __init__( |
| self, |
| spec_channels, |
| segment_size, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| spk_embed_dim, |
| gin_channels, |
| sr=None, |
| **kwargs |
| ): |
| super(SynthesizerTrnMs768NSFsid_nono, self).__init__( |
| spec_channels, |
| segment_size, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| p_dropout, |
| resblock, |
| resblock_kernel_sizes, |
| resblock_dilation_sizes, |
| upsample_rates, |
| upsample_initial_channel, |
| upsample_kernel_sizes, |
| spk_embed_dim, |
| gin_channels, |
| sr, |
| **kwargs |
| ) |
| del self.enc_p |
| self.enc_p = TextEncoder( |
| 768, |
| inter_channels, |
| hidden_channels, |
| filter_channels, |
| n_heads, |
| n_layers, |
| kernel_size, |
| float(p_dropout), |
| f0=False, |
| ) |
|
|
|
|
| class MultiPeriodDiscriminator(torch.nn.Module): |
| def __init__(self, use_spectral_norm=False): |
| super(MultiPeriodDiscriminator, self).__init__() |
| periods = [2, 3, 5, 7, 11, 17] |
| |
|
|
| discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] |
| discs = discs + [ |
| DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods |
| ] |
| self.discriminators = nn.ModuleList(discs) |
|
|
| def forward(self, y, y_hat): |
| y_d_rs = [] |
| y_d_gs = [] |
| fmap_rs = [] |
| fmap_gs = [] |
| for i, d in enumerate(self.discriminators): |
| y_d_r, fmap_r = d(y) |
| y_d_g, fmap_g = d(y_hat) |
| |
| |
| y_d_rs.append(y_d_r) |
| y_d_gs.append(y_d_g) |
| fmap_rs.append(fmap_r) |
| fmap_gs.append(fmap_g) |
|
|
| return y_d_rs, y_d_gs, fmap_rs, fmap_gs |
|
|
|
|
| class MultiPeriodDiscriminatorV2(torch.nn.Module): |
| def __init__(self, use_spectral_norm=False): |
| super(MultiPeriodDiscriminatorV2, self).__init__() |
| |
| periods = [2, 3, 5, 7, 11, 17, 23, 37] |
|
|
| discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] |
| discs = discs + [ |
| DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods |
| ] |
| self.discriminators = nn.ModuleList(discs) |
|
|
| def forward(self, y, y_hat): |
| y_d_rs = [] |
| y_d_gs = [] |
| fmap_rs = [] |
| fmap_gs = [] |
| for i, d in enumerate(self.discriminators): |
| y_d_r, fmap_r = d(y) |
| y_d_g, fmap_g = d(y_hat) |
| |
| |
| y_d_rs.append(y_d_r) |
| y_d_gs.append(y_d_g) |
| fmap_rs.append(fmap_r) |
| fmap_gs.append(fmap_g) |
|
|
| return y_d_rs, y_d_gs, fmap_rs, fmap_gs |
|
|
|
|
| class DiscriminatorS(torch.nn.Module): |
| def __init__(self, use_spectral_norm=False): |
| super(DiscriminatorS, self).__init__() |
| norm_f = weight_norm if use_spectral_norm == False else spectral_norm |
| self.convs = nn.ModuleList( |
| [ |
| norm_f(Conv1d(1, 16, 15, 1, padding=7)), |
| norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), |
| norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), |
| norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), |
| norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), |
| norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), |
| ] |
| ) |
| self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) |
|
|
| def forward(self, x): |
| fmap = [] |
|
|
| for l in self.convs: |
| x = l(x) |
| x = F.leaky_relu(x, modules.LRELU_SLOPE) |
| fmap.append(x) |
| x = self.conv_post(x) |
| fmap.append(x) |
| x = torch.flatten(x, 1, -1) |
|
|
| return x, fmap |
|
|
|
|
| class DiscriminatorP(torch.nn.Module): |
| def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): |
| super(DiscriminatorP, self).__init__() |
| self.period = period |
| self.use_spectral_norm = use_spectral_norm |
| norm_f = weight_norm if use_spectral_norm == False else spectral_norm |
| self.convs = nn.ModuleList( |
| [ |
| norm_f( |
| Conv2d( |
| 1, |
| 32, |
| (kernel_size, 1), |
| (stride, 1), |
| padding=(get_padding(kernel_size, 1), 0), |
| ) |
| ), |
| norm_f( |
| Conv2d( |
| 32, |
| 128, |
| (kernel_size, 1), |
| (stride, 1), |
| padding=(get_padding(kernel_size, 1), 0), |
| ) |
| ), |
| norm_f( |
| Conv2d( |
| 128, |
| 512, |
| (kernel_size, 1), |
| (stride, 1), |
| padding=(get_padding(kernel_size, 1), 0), |
| ) |
| ), |
| norm_f( |
| Conv2d( |
| 512, |
| 1024, |
| (kernel_size, 1), |
| (stride, 1), |
| padding=(get_padding(kernel_size, 1), 0), |
| ) |
| ), |
| norm_f( |
| Conv2d( |
| 1024, |
| 1024, |
| (kernel_size, 1), |
| 1, |
| padding=(get_padding(kernel_size, 1), 0), |
| ) |
| ), |
| ] |
| ) |
| self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) |
|
|
| def forward(self, x): |
| fmap = [] |
|
|
| |
| b, c, t = x.shape |
| if t % self.period != 0: |
| n_pad = self.period - (t % self.period) |
| if has_xpu and x.dtype == torch.bfloat16: |
| x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to( |
| dtype=torch.bfloat16 |
| ) |
| else: |
| x = F.pad(x, (0, n_pad), "reflect") |
| t = t + n_pad |
| x = x.view(b, c, t // self.period, self.period) |
|
|
| for l in self.convs: |
| x = l(x) |
| x = F.leaky_relu(x, modules.LRELU_SLOPE) |
| fmap.append(x) |
| x = self.conv_post(x) |
| fmap.append(x) |
| x = torch.flatten(x, 1, -1) |
|
|
| return x, fmap |
|
|