File size: 20,352 Bytes
92df4f5
 
 
 
 
 
 
 
 
40b17fc
fc52d83
92df4f5
6de7952
158b03a
 
fc52d83
 
92df4f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6de7952
92df4f5
6de7952
 
40b17fc
92df4f5
 
 
 
bea1338
 
40b17fc
 
 
bbb1375
1b8b2e7
92df4f5
 
 
 
 
 
 
 
 
 
 
 
 
 
d3127d4
92df4f5
 
 
 
 
 
 
1b8b2e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea1338
1b8b2e7
 
 
 
 
 
 
92df4f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bbb1375
40b17fc
 
92df4f5
 
 
 
 
 
 
6b0bcdf
92df4f5
 
158b03a
 
92df4f5
158b03a
 
 
 
92df4f5
1b8b2e7
158b03a
 
92df4f5
6b0bcdf
92df4f5
 
 
 
 
 
 
6b0bcdf
92df4f5
 
158b03a
40b17fc
 
92df4f5
 
 
 
 
 
 
 
ba1a8f9
92df4f5
 
 
 
 
bbb1375
92df4f5
 
211b582
 
 
bbb1375
92df4f5
 
fc52d83
 
f69fe0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc52d83
 
f69fe0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc52d83
 
 
 
 
 
f69fe0c
 
 
fc52d83
 
f69fe0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc52d83
 
211b582
92df4f5
 
 
 
 
62f951d
92df4f5
 
 
40b17fc
 
92df4f5
fc52d83
40b17fc
92df4f5
6b0bcdf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b8b2e7
40b17fc
92df4f5
40b17fc
92df4f5
 
fc52d83
 
92df4f5
 
 
 
 
fc52d83
92df4f5
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
import numpy as np
import onnxruntime

from text import text_to_sequence, sequence_to_text
import torch
import gradio as gr
import soundfile as sf
import tempfile
import yaml
import json
import os

from huggingface_hub import hf_hub_download
from time import perf_counter

DEFAULT_SPEAKER_ID = os.environ.get("DEFAULT_SPEAKER_ID", default="caf_08106")

def intersperse(lst, item):
    result = [item] * (len(lst) * 2 + 1)
    result[1::2] = lst
    return result


def process_text(i: int, text: str, device: torch.device):
    print(f"[{i}] - Input text: {text}")
    x = torch.tensor(
        intersperse(text_to_sequence(text, ["catalan_cleaners"]), 0),
        dtype=torch.long,
        device=device,
    )[None]
    x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device)
    x_phones = sequence_to_text(x.squeeze(0).tolist())
    print(x_phones)
    return x.numpy(), x_lengths.numpy()

MODEL_PATH_MATCHA_MEL=hf_hub_download(repo_id="BSC-LT/matcha-tts-cat-multispeaker", filename="matcha_multispeaker_cat_opset_15_10_steps_2399.onnx")
MODEL_PATH_MATCHA="matcha_hifigan_multispeaker_cat.onnx"
MODEL_PATH_VOCOS=hf_hub_download(repo_id="BSC-LT/vocos-mel-22khz-cat", filename="mel_spec_22khz_cat.onnx")
CONFIG_PATH=hf_hub_download(repo_id="BSC-LT/vocos-mel-22khz-cat", filename="config.yaml")
SPEAKER_ID_DICT="spk_to_id.json"

sess_options = onnxruntime.SessionOptions()
model_matcha_mel= onnxruntime.InferenceSession(str(MODEL_PATH_MATCHA_MEL), sess_options=sess_options, providers=["CPUExecutionProvider"])
model_vocos = onnxruntime.InferenceSession(str(MODEL_PATH_VOCOS), sess_options=sess_options, providers=["CPUExecutionProvider"])
#model_matcha = onnxruntime.InferenceSession(str(MODEL_PATH_MATCHA), sess_options=sess_options, providers=["CPUExecutionProvider"])

speaker_id_dict = json.load(open(SPEAKER_ID_DICT))
speakers = [sp for sp in speaker_id_dict.keys()]
speakers.sort()

def vocos_inference(mel,denoise):

    with open(CONFIG_PATH, "r") as f:
        config = yaml.safe_load(f)

    params = config["feature_extractor"]["init_args"]
    sample_rate = params["sample_rate"]
    n_fft= params["n_fft"]
    hop_length= params["hop_length"]
    win_length = n_fft

    # ONNX inference
    mag, x, y = model_vocos.run(
        None,
        {
            "mels": mel
        },
    )

    # complex spectrogram from vocos output
    spectrogram = mag * (x + 1j * y)
    window = torch.hann_window(win_length)

    if denoise:
        # Vocoder bias
        mel_rand = torch.zeros_like(torch.tensor(mel))
        mag_bias, x_bias, y_bias = model_vocos.run(
            None,
            {
                "mels": mel_rand.float().numpy()
            },
        )

        # complex spectrogram from vocos output
        spectrogram_bias = mag_bias * (x_bias + 1j * y_bias)

        # Denoising
        spec = torch.view_as_real(torch.tensor(spectrogram))
        # get magnitude of vocos spectrogram
        mag_spec = torch.sqrt(spec.pow(2).sum(-1))

        # get magnitude of bias spectrogram
        spec_bias = torch.view_as_real(torch.tensor(spectrogram_bias))
        mag_spec_bias = torch.sqrt(spec_bias.pow(2).sum(-1))

        # substract 
        strength = 0.0025
        mag_spec_denoised = mag_spec - mag_spec_bias * strength
        mag_spec_denoised = torch.clamp(mag_spec_denoised, 0.0)

        # return to complex spectrogram from magnitude
        angle = torch.atan2(spec[..., -1], spec[..., 0] )
        spectrogram = torch.complex(mag_spec_denoised * torch.cos(angle), mag_spec_denoised * torch.sin(angle))

    # Inverse stft
    pad = (win_length - hop_length) // 2
    spectrogram = torch.tensor(spectrogram)
    B, N, T = spectrogram.shape

    print("Spectrogram synthesized shape", spectrogram.shape)
    # Inverse FFT
    ifft = torch.fft.irfft(spectrogram, n_fft, dim=1, norm="backward")
    ifft = ifft * window[None, :, None]

    # Overlap and Add
    output_size = (T - 1) * hop_length + win_length
    y = torch.nn.functional.fold(
        ifft, output_size=(1, output_size), kernel_size=(1, win_length), stride=(1, hop_length),
    )[:, 0, 0, pad:-pad]

    # Window envelope
    window_sq = window.square().expand(1, T, -1).transpose(1, 2)
    window_envelope = torch.nn.functional.fold(
        window_sq, output_size=(1, output_size), kernel_size=(1, win_length), stride=(1, hop_length),
    ).squeeze()[pad:-pad]

    # Normalize
    assert (window_envelope > 1e-11).all()
    y = y / window_envelope
    
    return y


def tts(text:str, spk_name:str, temperature:float, length_scale:float, denoise:bool):
    spk_id = speaker_id_dict[spk_name]
    sid = np.array([int(spk_id)]) if spk_id is not None else None
    text_matcha , text_lengths = process_text(0,text,"cpu") 

    # MATCHA VOCOS
    inputs = {
        "x": text_matcha,
        "x_lengths": text_lengths,
        "scales": np.array([temperature, length_scale], dtype=np.float32),
        "spks": sid
    }
    mel_t0 = perf_counter()
    # matcha mel inference
    mel, mel_lengths = model_matcha_mel.run(None, inputs)
    mel_infer_secs = perf_counter() - mel_t0
    print("Matcha Mel inference time", mel_infer_secs)

    vocos_t0 = perf_counter()
    # vocos inference
    wavs_vocos = vocos_inference(mel,denoise)
    vocos_infer_secs = perf_counter() - vocos_t0
    print("Vocos inference time", vocos_infer_secs)

    with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir="/home/user/app") as fp_matcha_vocos:
        sf.write(fp_matcha_vocos.name, wavs_vocos.squeeze(0), 22050, "PCM_24")

    #MATCHA HIFIGAN

    inputs = {
        "x": text_matcha,
        "x_lengths": text_lengths,
        "scales": np.array([temperature, length_scale], dtype=np.float32),
        "spks": sid
    }
    hifigan_t0 = perf_counter()
    print(f"RTF matcha + vocos { (mel_infer_secs + vocos_infer_secs) / (wavs_vocos.shape[1]/22050) }")
    return fp_matcha_vocos.name

## GUI space

title = """
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
    <div
        style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;"
    > <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
        Natural and efficient TTS in Catalan
    </h1> </div>
</div>
 """

description = """
 
🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up ODE-based speech synthesis

For vocoders we use [Vocos](https://huggingface.co/BSC-LT/vocos-mel-22khz-cat) trained in a catalan set of ~28 hours.

[Matcha](https://huggingface.co/BSC-LT/matcha-tts-cat-onnx) was trained using openslr69 and festcat datasets

"""

about = """
## 📄 About
Natural and efficient TTS in Catalan: using Matcha-TTS with the Catalan language.

Here you'll be able to find all the information regarding our model, which has been trained with the use of deep learning. If you want specific information on how to train the model you can find it [here](https://huggingface.co/BSC-LT/matcha-tts-cat-multispeaker). The code we've used is also on Github [here](https://github.com/langtech-bsc/Matcha-TTS/tree/dev-cat).

## Table of Contents
<details>
<summary>Click to expand</summary>

- [General Model Description](#general-model-description)
- [Adaptation to Catalan](#adaptation-to-catalan)
- [Intended Uses and Limitations](#intended-uses-and-limitations)
- [Samples](#samples)
- [Citation](#citation)  
- [Additional Information](#additional-information)

</details>

## General Model Description

**Matcha-TTS** is an encoder-decoder architecture designed for fast acoustic modelling in TTS. 
On the one hand, the encoder part is based on a text encoder and a phoneme duration prediction. Together, they predict averaged acoustic features.
On the other hand, the decoder has essentially a U-Net backbone inspired by [Grad-TTS](https://arxiv.org/pdf/2105.06337.pdf), which is based on the Transformer architecture. 
In the latter, by replacing 2D CNNs by 1D CNNs, a large reduction in memory consumption and fast synthesis is achieved.

**Matcha-TTS** is a non-autorregressive model trained with optimal-transport conditional flow matching (OT-CFM). 
This yields an ODE-based decoder capable of generating high output quality in fewer synthesis steps than models trained using score matching.

## Adaptation to Catalan

The original Matcha-TTS model excels in English, but to bring its capabilities to Catalan, a multi-step process was undertaken. Firstly, we fine-tuned the model from English to Catalan central, which laid the groundwork for understanding the language's nuances. This first fine-tuning was done using two datasets:

 * [Our version of the openslr-slr69 dataset.](https://huggingface.co/datasets/projecte-aina/openslr-slr69-ca-trimmed-denoised) 
 
 * A studio-recorded dataset of central catalan, which will soon be published.
 
 This soon to be published dataset also included recordings of three different dialects:
 
 * Valencian
 
 * Occidental
 
 * Balear 
 
With a male and a female speaker for each dialect.

Then, through fine-tuning for these specific Catalan dialects, the model adapted to regional variations in pronunciation and cadence. This meticulous approach ensures that the model reflects the linguistic richness and cultural diversity within the Catalan-speaking community, offering seamless communication in previously underserved dialects.
 
In addition to training the Matcha-TTS model for Catalan, integrating the eSpeak phonemizer played a crucial role in enhancing the naturalness and accuracy of generated speech. A TTS (Text-to-Speech) system comprises several components, each contributing to the overall quality of synthesized speech. The first component involves text preprocessing, where the input text undergoes normalization and linguistic analysis to identify words, punctuation, and linguistic features. Next, the text is converted into phonemes, the smallest units of sound in a language, through a process called phonemization. This step is where the eSpeak phonemizer shines, as it accurately converts Catalan text into phonetic representations, capturing the subtle nuances of pronunciation specific to Catalan. You can find the espeak version we used [here](https://github.com/projecte-aina/espeak-ng/tree/dev-ca).

After phonemization, the phonemes are passed to the synthesis component, where they are transformed into audible speech. Here, the Matcha-TTS model takes center stage, generating high-quality speech output based on the phonetic input. The model's training, fine-tuning, and adaptation to Catalan ensure that the synthesized speech retains the natural rhythm, intonation, and pronunciation patterns of the language, thereby enhancing the overall user experience.

Finally, the synthesized speech undergoes post-processing, where prosodic features such as pitch, duration, and emphasis are applied to further refine the output and make it sound more natural and expressive. By integrating the eSpeak phonemizer into the TTS pipeline and adapting it for Catalan, alongside training the Matcha-TTS model for the language, we have created a comprehensive and effective system for generating high-quality Catalan speech. This combination of advanced techniques and meticulous attention to linguistic detail is instrumental in bridging language barriers and facilitating communication for Catalan speakers worldwide.

## Intended Uses and Limitations

This model is intended to serve as an acoustic feature generator for multispeaker text-to-speech systems for the Catalan language. 
It has been finetuned using a Catalan phonemizer, therefore if the model is used for other languages it may will not produce intelligible samples after mapping 
its output into a speech waveform. 

The quality of the samples can vary depending on the speaker. 
This may be due to the sensitivity of the model in learning specific frequencies and also due to the quality of samples for each speaker.




## Samples
* Female samples 

<table style="font-size:16px">
  <col width="205">
  <col width="205">
  <td>Valencian</td>
  <td>Occidental</td>
  <td>Balear</td>
<tbody
<table>
  <tbody>
    <tr>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/valencia/spk1/0.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/occidental/spk1/0.wav" type="audio/wav"">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/balear/spk1/0.wav" type="audio/wav">
        </audio>
      </td>
    </tr>
    <tr>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/valencia/spk1/1.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/occidental/spk1/1.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/balear/spk1/1.wav" type="audio/wav">
        </audio>
      </td>
    </tr>
    <tr>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/valencia/spk1/2.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/occidental/spk1/2.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/balear/spk1/2.wav" type="audio/wav">
        </audio>
      </td>
    </tr>
  </tbody>
</table>

* Male samples:

<table style="font-size:16px">
  <col width="205">
  <col width="205">
<thead>
<tr>
  <td>Valencian</td>
  <td>Occidental</td>
  <td>Balear</td>
</tr>
</thead>
<tbody
<table>
  <tbody>
    <tr>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/valencia/spk0/0.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/occidental/spk0/0.wav" type="audio/wav"">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/balear/spk0/0.wav" type="audio/wav">
        </audio>
      </td>
    </tr>
    <tr>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/valencia/spk0/1.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/occidental/spk0/1.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/balear/spk0/1.wav" type="audio/wav">
        </audio>
      </td>
    </tr>
    <tr>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/valencia/spk0/2.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/occidental/spk0/2.wav" type="audio/wav">
        </audio>
      </td>
      <td>
        <audio controls="" preload="none" style="width: 200px">
          audio not supported
          <source src="https://github.com/mllopartbsc/assets/raw/c6a393237e712851dd7cc7d10c70dde29d3412ac/matcha_tts_catalan/balear/spk0/2.wav" type="audio/wav">
        </audio>
      </td>
    </tr>
  </tbody>
</table>


## Citation

If this code contributes to your research, please cite the work:

```
@misc{mehta2024matchatts,
      title={Matcha-TTS: A fast TTS architecture with conditional flow matching}, 
      author={Shivam Mehta and Ruibo Tu and Jonas Beskow and Éva Székely and Gustav Eje Henter},
      year={2024},
      eprint={2309.03199},
      archivePrefix={arXiv},
      primaryClass={eess.AS}
}
```

## Additional Information

### Author
The Language Technologies Unit from Barcelona Supercomputing Center.

### Contact
For further information, please send an email to <langtech@bsc.es>.

### Copyright
Copyright(c) 2023 by Language Technologies Unit, Barcelona Supercomputing Center.

### License
[Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)

### Funding
This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project](https://projecteaina.cat/).
"""

article = "Training and demo by The Language Technologies Unit from Barcelona Supercomputing Center."

vits2_inference = gr.Interface(
    fn=tts,
    inputs=[
        gr.Textbox(
            value="m'ha costat molt desenvolupar una veu, i ara que la tinc no estaré en silenci.",
            max_lines=1,
            label="Input text",
        ),
        gr.Dropdown(
            choices=speakers,
            label="Speaker id",
            value=DEFAULT_SPEAKER_ID,
            info=f"Models are trained on 47 speakers. You can prompt the model using one of these speaker ids."
        ),
        gr.Slider(
            0.1,
            2.0,
            value=0.667,
            step=0.01,
            label="Temperature",
            info=f"Temperature",
        ),
        gr.Slider(
            0.5,
            2.0,
            value=1.0,
            step=0.01,
            label="Length scale",
            info=f"Controls speech pace, larger values for slower pace and smaller values for faster pace",
        ),
        gr.Checkbox(label="Denoise", info="Removes model bias from vocos", value=True),
    ],
    outputs=[gr.Audio(label="Matcha vocos", interactive=False, type="filepath")]
)

about_article = gr.Markdown(about)

demo = gr.Blocks()

with demo:
    gr.Markdown(title)
    gr.Markdown(description)
    gr.TabbedInterface([vits2_inference, about_article], ["Demo", "About"])
    gr.Markdown(article)

demo.queue(max_size=10)
demo.launch(show_api=False, server_name="0.0.0.0", server_port=7860)