models.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. import torch
  2. from torch import nn
  3. from torch.nn import Conv1d, Conv2d, ConvTranspose1d
  4. from torch.nn import functional as F
  5. from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
  6. from fish_speech.models.vits_decoder.modules import attentions, commons, modules
  7. from .commons import get_padding, init_weights
  8. from .mrte import MRTE
  9. from .vq_encoder import VQEncoder
  10. class TextEncoder(nn.Module):
  11. def __init__(
  12. self,
  13. out_channels,
  14. hidden_channels,
  15. filter_channels,
  16. n_heads,
  17. n_layers,
  18. kernel_size,
  19. p_dropout,
  20. latent_channels=192,
  21. codebook_size=264,
  22. ):
  23. super().__init__()
  24. self.out_channels = out_channels
  25. self.hidden_channels = hidden_channels
  26. self.filter_channels = filter_channels
  27. self.n_heads = n_heads
  28. self.n_layers = n_layers
  29. self.kernel_size = kernel_size
  30. self.p_dropout = p_dropout
  31. self.latent_channels = latent_channels
  32. self.ssl_proj = nn.Conv1d(768, hidden_channels, 1)
  33. self.encoder_ssl = attentions.Encoder(
  34. hidden_channels,
  35. filter_channels,
  36. n_heads,
  37. n_layers // 2,
  38. kernel_size,
  39. p_dropout,
  40. )
  41. self.encoder_text = attentions.Encoder(
  42. hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
  43. )
  44. self.text_embedding = nn.Embedding(codebook_size, hidden_channels)
  45. self.mrte = MRTE()
  46. self.encoder2 = attentions.Encoder(
  47. hidden_channels,
  48. filter_channels,
  49. n_heads,
  50. n_layers // 2,
  51. kernel_size,
  52. p_dropout,
  53. )
  54. self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
  55. def forward(self, y, y_lengths, text, text_lengths, ge):
  56. y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, y.size(2)), 1).to(
  57. y.dtype
  58. )
  59. y = self.ssl_proj(y * y_mask) * y_mask
  60. y = self.encoder_ssl(y * y_mask, y_mask)
  61. text_mask = torch.unsqueeze(
  62. commons.sequence_mask(text_lengths, text.size(1)), 1
  63. ).to(y.dtype)
  64. text = self.text_embedding(text).transpose(1, 2)
  65. text = self.encoder_text(text * text_mask, text_mask)
  66. y = self.mrte(y, y_mask, text, text_mask, ge)
  67. y = self.encoder2(y * y_mask, y_mask)
  68. stats = self.proj(y) * y_mask
  69. m, logs = torch.split(stats, self.out_channels, dim=1)
  70. return y, m, logs, y_mask
  71. class ResidualCouplingBlock(nn.Module):
  72. def __init__(
  73. self,
  74. channels,
  75. hidden_channels,
  76. kernel_size,
  77. dilation_rate,
  78. n_layers,
  79. n_flows=4,
  80. gin_channels=0,
  81. ):
  82. super().__init__()
  83. self.channels = channels
  84. self.hidden_channels = hidden_channels
  85. self.kernel_size = kernel_size
  86. self.dilation_rate = dilation_rate
  87. self.n_layers = n_layers
  88. self.n_flows = n_flows
  89. self.gin_channels = gin_channels
  90. self.flows = nn.ModuleList()
  91. for i in range(n_flows):
  92. self.flows.append(
  93. modules.ResidualCouplingLayer(
  94. channels,
  95. hidden_channels,
  96. kernel_size,
  97. dilation_rate,
  98. n_layers,
  99. gin_channels=gin_channels,
  100. mean_only=True,
  101. )
  102. )
  103. self.flows.append(modules.Flip())
  104. def forward(self, x, x_mask, g=None, reverse=False):
  105. if not reverse:
  106. for flow in self.flows:
  107. x, _ = flow(x, x_mask, g=g, reverse=reverse)
  108. else:
  109. for flow in reversed(self.flows):
  110. x = flow(x, x_mask, g=g, reverse=reverse)
  111. return x
  112. class PosteriorEncoder(nn.Module):
  113. def __init__(
  114. self,
  115. in_channels,
  116. out_channels,
  117. hidden_channels,
  118. kernel_size,
  119. dilation_rate,
  120. n_layers,
  121. gin_channels=0,
  122. ):
  123. super().__init__()
  124. self.in_channels = in_channels
  125. self.out_channels = out_channels
  126. self.hidden_channels = hidden_channels
  127. self.kernel_size = kernel_size
  128. self.dilation_rate = dilation_rate
  129. self.n_layers = n_layers
  130. self.gin_channels = gin_channels
  131. self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
  132. self.enc = modules.WN(
  133. hidden_channels,
  134. kernel_size,
  135. dilation_rate,
  136. n_layers,
  137. gin_channels=gin_channels,
  138. )
  139. self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
  140. def forward(self, x, x_lengths, g=None):
  141. if g != None:
  142. g = g.detach()
  143. x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
  144. x.dtype
  145. )
  146. x = self.pre(x) * x_mask
  147. x = self.enc(x, x_mask, g=g)
  148. stats = self.proj(x) * x_mask
  149. m, logs = torch.split(stats, self.out_channels, dim=1)
  150. z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
  151. return z, m, logs, x_mask
  152. class Generator(torch.nn.Module):
  153. def __init__(
  154. self,
  155. initial_channel,
  156. resblock,
  157. resblock_kernel_sizes,
  158. resblock_dilation_sizes,
  159. upsample_rates,
  160. upsample_initial_channel,
  161. upsample_kernel_sizes,
  162. gin_channels=0,
  163. ):
  164. super(Generator, self).__init__()
  165. self.num_kernels = len(resblock_kernel_sizes)
  166. self.num_upsamples = len(upsample_rates)
  167. self.conv_pre = Conv1d(
  168. initial_channel, upsample_initial_channel, 7, 1, padding=3
  169. )
  170. resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
  171. self.ups = nn.ModuleList()
  172. for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
  173. self.ups.append(
  174. weight_norm(
  175. ConvTranspose1d(
  176. upsample_initial_channel // (2**i),
  177. upsample_initial_channel // (2 ** (i + 1)),
  178. k,
  179. u,
  180. padding=(k - u) // 2,
  181. )
  182. )
  183. )
  184. self.resblocks = nn.ModuleList()
  185. for i in range(len(self.ups)):
  186. ch = upsample_initial_channel // (2 ** (i + 1))
  187. for j, (k, d) in enumerate(
  188. zip(resblock_kernel_sizes, resblock_dilation_sizes)
  189. ):
  190. self.resblocks.append(resblock(ch, k, d))
  191. self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
  192. self.ups.apply(init_weights)
  193. if gin_channels != 0:
  194. self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
  195. def forward(self, x, g=None):
  196. x = self.conv_pre(x)
  197. if g is not None:
  198. x = x + self.cond(g)
  199. for i in range(self.num_upsamples):
  200. x = F.leaky_relu(x, modules.LRELU_SLOPE)
  201. x = self.ups[i](x)
  202. xs = None
  203. for j in range(self.num_kernels):
  204. if xs is None:
  205. xs = self.resblocks[i * self.num_kernels + j](x)
  206. else:
  207. xs += self.resblocks[i * self.num_kernels + j](x)
  208. x = xs / self.num_kernels
  209. x = F.leaky_relu(x)
  210. x = self.conv_post(x)
  211. x = torch.tanh(x)
  212. return x
  213. def remove_weight_norm(self):
  214. print("Removing weight norm...")
  215. for l in self.ups:
  216. remove_weight_norm(l)
  217. for l in self.resblocks:
  218. l.remove_weight_norm()
  219. class DiscriminatorP(torch.nn.Module):
  220. def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
  221. super(DiscriminatorP, self).__init__()
  222. self.period = period
  223. self.use_spectral_norm = use_spectral_norm
  224. norm_f = weight_norm if use_spectral_norm == False else spectral_norm
  225. self.convs = nn.ModuleList(
  226. [
  227. norm_f(
  228. Conv2d(
  229. 1,
  230. 32,
  231. (kernel_size, 1),
  232. (stride, 1),
  233. padding=(get_padding(kernel_size, 1), 0),
  234. )
  235. ),
  236. norm_f(
  237. Conv2d(
  238. 32,
  239. 128,
  240. (kernel_size, 1),
  241. (stride, 1),
  242. padding=(get_padding(kernel_size, 1), 0),
  243. )
  244. ),
  245. norm_f(
  246. Conv2d(
  247. 128,
  248. 512,
  249. (kernel_size, 1),
  250. (stride, 1),
  251. padding=(get_padding(kernel_size, 1), 0),
  252. )
  253. ),
  254. norm_f(
  255. Conv2d(
  256. 512,
  257. 1024,
  258. (kernel_size, 1),
  259. (stride, 1),
  260. padding=(get_padding(kernel_size, 1), 0),
  261. )
  262. ),
  263. norm_f(
  264. Conv2d(
  265. 1024,
  266. 1024,
  267. (kernel_size, 1),
  268. 1,
  269. padding=(get_padding(kernel_size, 1), 0),
  270. )
  271. ),
  272. ]
  273. )
  274. self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
  275. def forward(self, x):
  276. fmap = []
  277. # 1d to 2d
  278. b, c, t = x.shape
  279. if t % self.period != 0: # pad first
  280. n_pad = self.period - (t % self.period)
  281. x = F.pad(x, (0, n_pad), "reflect")
  282. t = t + n_pad
  283. x = x.view(b, c, t // self.period, self.period)
  284. for l in self.convs:
  285. x = l(x)
  286. x = F.leaky_relu(x, modules.LRELU_SLOPE)
  287. fmap.append(x)
  288. x = self.conv_post(x)
  289. fmap.append(x)
  290. x = torch.flatten(x, 1, -1)
  291. return x, fmap
  292. class DiscriminatorS(torch.nn.Module):
  293. def __init__(self, use_spectral_norm=False):
  294. super(DiscriminatorS, self).__init__()
  295. norm_f = weight_norm if use_spectral_norm == False else spectral_norm
  296. self.convs = nn.ModuleList(
  297. [
  298. norm_f(Conv1d(1, 16, 15, 1, padding=7)),
  299. norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
  300. norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
  301. norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
  302. norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
  303. norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
  304. ]
  305. )
  306. self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
  307. def forward(self, x):
  308. fmap = []
  309. for l in self.convs:
  310. x = l(x)
  311. x = F.leaky_relu(x, modules.LRELU_SLOPE)
  312. fmap.append(x)
  313. x = self.conv_post(x)
  314. fmap.append(x)
  315. x = torch.flatten(x, 1, -1)
  316. return x, fmap
  317. class EnsembledDiscriminator(torch.nn.Module):
  318. def __init__(self, periods=(2, 3, 5, 7, 11), use_spectral_norm=False):
  319. super().__init__()
  320. discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
  321. discs = discs + [
  322. DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
  323. ]
  324. self.discriminators = nn.ModuleList(discs)
  325. def forward(self, y, y_hat):
  326. y_d_rs = []
  327. y_d_gs = []
  328. fmap_rs = []
  329. fmap_gs = []
  330. for i, d in enumerate(self.discriminators):
  331. y_d_r, fmap_r = d(y)
  332. y_d_g, fmap_g = d(y_hat)
  333. y_d_rs.append(y_d_r)
  334. y_d_gs.append(y_d_g)
  335. fmap_rs.append(fmap_r)
  336. fmap_gs.append(fmap_g)
  337. return y_d_rs, y_d_gs, fmap_rs, fmap_gs
  338. class SynthesizerTrn(nn.Module):
  339. """
  340. Synthesizer for Training
  341. """
  342. def __init__(
  343. self,
  344. *,
  345. spec_channels,
  346. segment_size,
  347. inter_channels,
  348. hidden_channels,
  349. filter_channels,
  350. n_heads,
  351. n_layers,
  352. kernel_size,
  353. p_dropout,
  354. resblock,
  355. resblock_kernel_sizes,
  356. resblock_dilation_sizes,
  357. upsample_rates,
  358. upsample_initial_channel,
  359. upsample_kernel_sizes,
  360. gin_channels=0,
  361. codebook_size=264,
  362. vq_mask_ratio=0.0,
  363. ref_mask_ratio=0.0,
  364. ):
  365. super().__init__()
  366. self.spec_channels = spec_channels
  367. self.inter_channels = inter_channels
  368. self.hidden_channels = hidden_channels
  369. self.filter_channels = filter_channels
  370. self.n_heads = n_heads
  371. self.n_layers = n_layers
  372. self.kernel_size = kernel_size
  373. self.p_dropout = p_dropout
  374. self.resblock = resblock
  375. self.resblock_kernel_sizes = resblock_kernel_sizes
  376. self.resblock_dilation_sizes = resblock_dilation_sizes
  377. self.upsample_rates = upsample_rates
  378. self.upsample_initial_channel = upsample_initial_channel
  379. self.upsample_kernel_sizes = upsample_kernel_sizes
  380. self.segment_size = segment_size
  381. self.gin_channels = gin_channels
  382. self.vq_mask_ratio = vq_mask_ratio
  383. self.ref_mask_ratio = ref_mask_ratio
  384. self.enc_p = TextEncoder(
  385. inter_channels,
  386. hidden_channels,
  387. filter_channels,
  388. n_heads,
  389. n_layers,
  390. kernel_size,
  391. p_dropout,
  392. codebook_size=codebook_size,
  393. )
  394. self.dec = Generator(
  395. inter_channels,
  396. resblock,
  397. resblock_kernel_sizes,
  398. resblock_dilation_sizes,
  399. upsample_rates,
  400. upsample_initial_channel,
  401. upsample_kernel_sizes,
  402. gin_channels=gin_channels,
  403. )
  404. self.enc_q = PosteriorEncoder(
  405. spec_channels,
  406. inter_channels,
  407. hidden_channels,
  408. 5,
  409. 1,
  410. 16,
  411. gin_channels=gin_channels,
  412. )
  413. self.flow = ResidualCouplingBlock(
  414. inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels
  415. )
  416. self.ref_enc = modules.MelStyleEncoder(
  417. spec_channels, style_vector_dim=gin_channels
  418. )
  419. self.vq = VQEncoder()
  420. for param in self.vq.parameters():
  421. param.requires_grad = False
  422. def forward(
  423. self, audio, audio_lengths, gt_specs, gt_spec_lengths, text, text_lengths
  424. ):
  425. y_mask = torch.unsqueeze(
  426. commons.sequence_mask(gt_spec_lengths, gt_specs.size(2)), 1
  427. ).to(gt_specs.dtype)
  428. ge = self.ref_enc(gt_specs * y_mask, y_mask)
  429. if self.training and self.ref_mask_ratio > 0:
  430. bs = audio.size(0)
  431. mask_speaker_len = int(bs * self.ref_mask_ratio)
  432. mask_indices = torch.randperm(bs)[:mask_speaker_len]
  433. audio[mask_indices] = 0
  434. quantized = self.vq(audio, audio_lengths)
  435. # Block masking, block_size = 4
  436. block_size = 4
  437. if self.training and self.vq_mask_ratio > 0:
  438. reduced_length = quantized.size(-1) // block_size
  439. mask_length = int(reduced_length * self.vq_mask_ratio)
  440. mask_indices = torch.randperm(reduced_length)[:mask_length]
  441. short_mask = torch.zeros(
  442. quantized.size(0),
  443. quantized.size(1),
  444. reduced_length,
  445. device=quantized.device,
  446. dtype=torch.float,
  447. )
  448. short_mask[:, :, mask_indices] = 1.0
  449. long_mask = short_mask.repeat_interleave(block_size, dim=-1)
  450. long_mask = F.interpolate(
  451. long_mask, size=quantized.size(-1), mode="nearest"
  452. )
  453. quantized = quantized.masked_fill(long_mask > 0.5, 0)
  454. x, m_p, logs_p, y_mask = self.enc_p(
  455. quantized, gt_spec_lengths, text, text_lengths, ge
  456. )
  457. z, m_q, logs_q, y_mask = self.enc_q(gt_specs, gt_spec_lengths, g=ge)
  458. z_p = self.flow(z, y_mask, g=ge)
  459. z_slice, ids_slice = commons.rand_slice_segments(
  460. z, gt_spec_lengths, self.segment_size
  461. )
  462. o = self.dec(z_slice, g=ge)
  463. return (
  464. o,
  465. ids_slice,
  466. y_mask,
  467. (z, z_p, m_p, logs_p, m_q, logs_q),
  468. )
  469. @torch.no_grad()
  470. def infer(
  471. self,
  472. audio,
  473. audio_lengths,
  474. gt_specs,
  475. gt_spec_lengths,
  476. text,
  477. text_lengths,
  478. noise_scale=0.5,
  479. ):
  480. quantized = self.vq(audio, audio_lengths)
  481. quantized_lengths = audio_lengths // 512
  482. ge = self.encode_ref(gt_specs, gt_spec_lengths)
  483. return self.decode(
  484. quantized,
  485. quantized_lengths,
  486. text,
  487. text_lengths,
  488. noise_scale=noise_scale,
  489. ge=ge,
  490. )
  491. @torch.no_grad()
  492. def infer_posterior(
  493. self,
  494. gt_specs,
  495. gt_spec_lengths,
  496. ):
  497. y_mask = torch.unsqueeze(
  498. commons.sequence_mask(gt_spec_lengths, gt_specs.size(2)), 1
  499. ).to(gt_specs.dtype)
  500. ge = self.ref_enc(gt_specs * y_mask, y_mask)
  501. z, m_q, logs_q, y_mask = self.enc_q(gt_specs, gt_spec_lengths, g=ge)
  502. o = self.dec(z * y_mask, g=ge)
  503. return o
  504. @torch.no_grad()
  505. def decode(
  506. self,
  507. quantized,
  508. quantized_lengths,
  509. text,
  510. text_lengths,
  511. noise_scale=0.5,
  512. ge=None,
  513. ):
  514. x, m_p, logs_p, y_mask = self.enc_p(
  515. quantized, quantized_lengths, text, text_lengths, ge
  516. )
  517. z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
  518. z = self.flow(z_p, y_mask, g=ge, reverse=True)
  519. o = self.dec(z * y_mask, g=ge)
  520. return o
  521. @torch.no_grad()
  522. def encode_ref(self, gt_specs, gt_spec_lengths):
  523. y_mask = torch.unsqueeze(
  524. commons.sequence_mask(gt_spec_lengths, gt_specs.size(2)), 1
  525. ).to(gt_specs.dtype)
  526. ge = self.ref_enc(gt_specs * y_mask, y_mask)
  527. return ge
  528. if __name__ == "__main__":
  529. import librosa
  530. from transformers import AutoTokenizer
  531. from fish_speech.utils.spectrogram import LinearSpectrogram
  532. model = SynthesizerTrn(
  533. spec_channels=1025,
  534. segment_size=20480 // 640,
  535. inter_channels=192,
  536. hidden_channels=192,
  537. filter_channels=768,
  538. n_heads=2,
  539. n_layers=6,
  540. kernel_size=3,
  541. p_dropout=0.1,
  542. resblock="1",
  543. resblock_kernel_sizes=[3, 7, 11],
  544. resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
  545. upsample_rates=[8, 8, 2, 2, 2],
  546. upsample_initial_channel=512,
  547. upsample_kernel_sizes=[16, 16, 8, 2, 2],
  548. gin_channels=512,
  549. )
  550. ckpt = "checkpoints/Bert-VITS2/G_0.pth"
  551. # Try to load the model
  552. print(f"Loading model from {ckpt}")
  553. checkpoint = torch.load(ckpt, map_location="cpu", weights_only=True)["model"]
  554. # d_checkpoint = torch.load(
  555. # "checkpoints/Bert-VITS2/D_0.pth", map_location="cpu", weights_only=True
  556. # )["model"]
  557. # print(checkpoint.keys())
  558. checkpoint.pop("dec.cond.weight")
  559. checkpoint.pop("enc_q.enc.cond_layer.weight_v")
  560. # new_checkpoint = {}
  561. # for k, v in checkpoint.items():
  562. # new_checkpoint["generator." + k] = v
  563. # for k, v in d_checkpoint.items():
  564. # new_checkpoint["discriminator." + k] = v
  565. # torch.save(new_checkpoint, "checkpoints/Bert-VITS2/ensemble.pth")
  566. # exit()
  567. print(model.load_state_dict(checkpoint, strict=False))
  568. # Test
  569. ref_audio = librosa.load(
  570. "data/source/云天河/云天河-旁白/《薄太太》第0025集-yth_24.wav", sr=32000
  571. )[0]
  572. input_audio = librosa.load(
  573. "data/source/云天河/云天河-旁白/《薄太太》第0025集-yth_24.wav", sr=32000
  574. )[0]
  575. ref_audio = input_audio
  576. text = "博兴只知道身边的小女人没睡着,他又凑到她耳边压低了声线。阮苏眉睁眼,不觉得你老公像英雄吗?阮苏还是没反应,这男人是不是有病?刚才那冰冷又强势的样子,和现在这幼稚无赖的样子,根本就判若二人。"
  577. encoded_text = AutoTokenizer.from_pretrained("fishaudio/fish-speech-1")
  578. spec = LinearSpectrogram(n_fft=2048, hop_length=640, win_length=2048)
  579. ref_audio = torch.tensor(ref_audio).unsqueeze(0).unsqueeze(0)
  580. ref_spec = spec(ref_audio)
  581. input_audio = torch.tensor(input_audio).unsqueeze(0).unsqueeze(0)
  582. text = encoded_text(text, return_tensors="pt")["input_ids"]
  583. print(ref_audio.size(), ref_spec.size(), input_audio.size(), text.size())
  584. o, y_mask, (z, z_p, m_p, logs_p) = model.infer(
  585. input_audio,
  586. torch.LongTensor([input_audio.size(2)]),
  587. ref_spec,
  588. torch.LongTensor([ref_spec.size(2)]),
  589. text,
  590. torch.LongTensor([text.size(1)]),
  591. )
  592. print(o.size(), y_mask.size(), z.size(), z_p.size(), m_p.size(), logs_p.size())
  593. # Save output
  594. # import soundfile as sf
  595. # sf.write("output.wav", o.squeeze().detach().numpy(), 32000)