8
8
from torch .nn .utils import remove_weight_norm
9
9
10
10
from .nsf import SourceModuleHnNSF
11
- from .bigv import init_weights , AMPBlock
11
+ from .bigv import init_weights , SnakeBeta , AMPBlock
12
+ from .alias import Activation1d
12
13
13
14
14
15
class SpeakerAdapter (nn .Module ):
@@ -72,6 +73,7 @@ def __init__(self, hp):
72
73
# spk
73
74
self .adapter .append (SpeakerAdapter (
74
75
256 , hp .gen .upsample_initial_channel // (2 ** (i + 1 ))))
76
+ # print(f'ups: {i} {k}, {u}, {(k - u) // 2}')
75
77
# base
76
78
self .ups .append (nn .ModuleList ([
77
79
weight_norm (ConvTranspose1d (hp .gen .upsample_initial_channel // (2 ** i ),
@@ -106,6 +108,8 @@ def __init__(self, hp):
106
108
self .resblocks .append (AMPBlock (hp , ch , k , d ))
107
109
108
110
# post conv
111
+ activation_post = SnakeBeta (ch , alpha_logscale = True )
112
+ self .activation_post = Activation1d (activation = activation_post )
109
113
self .conv_post = weight_norm (Conv1d (ch , 1 , 7 , 1 , padding = 3 ))
110
114
111
115
# weight initialization
@@ -120,6 +124,7 @@ def forward(self, spk, x, pos, f0):
120
124
har_source = self .m_source (f0 )
121
125
har_source = har_source .transpose (1 , 2 )
122
126
# pre conv
127
+ # x = x + torch.randn_like(x) # for last train
123
128
x = self .cond_pre (x ) # [B, L, D]
124
129
p = self .cond_pos (pos )
125
130
x = x + p
@@ -145,7 +150,7 @@ def forward(self, spk, x, pos, f0):
145
150
x = xs / self .num_kernels
146
151
147
152
# post conv
148
- x = nn . functional . leaky_relu (x )
153
+ x = self . activation_post (x )
149
154
x = self .conv_post (x )
150
155
x = torch .tanh (x )
151
156
return x
0 commit comments