import numpy as np import onnxruntime from text import text_to_sequence, sequence_to_text import torch import gradio as gr import soundfile as sf import tempfile import yaml import json import os from huggingface_hub import hf_hub_download from time import perf_counter DEFAULT_SPEAKER_ID = os.environ.get("DEFAULT_SPEAKER_ID", default="caf_08106") def intersperse(lst, item): result = [item] * (len(lst) * 2 + 1) result[1::2] = lst return result def process_text(i: int, text: str, device: torch.device): print(f"[{i}] - Input text: {text}") x = torch.tensor( intersperse(text_to_sequence(text, ["catalan_cleaners"]), 0), dtype=torch.long, device=device, )[None] x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device) x_phones = sequence_to_text(x.squeeze(0).tolist()) print(x_phones) return x.numpy(), x_lengths.numpy() MODEL_PATH_MATCHA_MEL=hf_hub_download(repo_id="BSC-LT/matcha-tts-cat-multispeaker", filename="matcha_multispeaker_cat_opset_15_10_steps_2399.onnx") MODEL_PATH_MATCHA="matcha_hifigan_multispeaker_cat.onnx" MODEL_PATH_VOCOS=hf_hub_download(repo_id="BSC-LT/vocos-mel-22khz-cat", filename="mel_spec_22khz_cat.onnx") CONFIG_PATH=hf_hub_download(repo_id="BSC-LT/vocos-mel-22khz-cat", filename="config.yaml") SPEAKER_ID_DICT="spk_to_id.json" sess_options = onnxruntime.SessionOptions() model_matcha_mel= onnxruntime.InferenceSession(str(MODEL_PATH_MATCHA_MEL), sess_options=sess_options, providers=["CPUExecutionProvider"]) model_vocos = onnxruntime.InferenceSession(str(MODEL_PATH_VOCOS), sess_options=sess_options, providers=["CPUExecutionProvider"]) #model_matcha = onnxruntime.InferenceSession(str(MODEL_PATH_MATCHA), sess_options=sess_options, providers=["CPUExecutionProvider"]) speaker_id_dict = json.load(open(SPEAKER_ID_DICT)) speakers = [sp for sp in speaker_id_dict.keys()] speakers.sort() def vocos_inference(mel,denoise): with open(CONFIG_PATH, "r") as f: config = yaml.safe_load(f) params = config["feature_extractor"]["init_args"] sample_rate = params["sample_rate"] n_fft= params["n_fft"] hop_length= params["hop_length"] win_length = n_fft # ONNX inference mag, x, y = model_vocos.run( None, { "mels": mel }, ) # complex spectrogram from vocos output spectrogram = mag * (x + 1j * y) window = torch.hann_window(win_length) if denoise: # Vocoder bias mel_rand = torch.zeros_like(torch.tensor(mel)) mag_bias, x_bias, y_bias = model_vocos.run( None, { "mels": mel_rand.float().numpy() }, ) # complex spectrogram from vocos output spectrogram_bias = mag_bias * (x_bias + 1j * y_bias) # Denoising spec = torch.view_as_real(torch.tensor(spectrogram)) # get magnitude of vocos spectrogram mag_spec = torch.sqrt(spec.pow(2).sum(-1)) # get magnitude of bias spectrogram spec_bias = torch.view_as_real(torch.tensor(spectrogram_bias)) mag_spec_bias = torch.sqrt(spec_bias.pow(2).sum(-1)) # substract strength = 0.0025 mag_spec_denoised = mag_spec - mag_spec_bias * strength mag_spec_denoised = torch.clamp(mag_spec_denoised, 0.0) # return to complex spectrogram from magnitude angle = torch.atan2(spec[..., -1], spec[..., 0] ) spectrogram = torch.complex(mag_spec_denoised * torch.cos(angle), mag_spec_denoised * torch.sin(angle)) # Inverse stft pad = (win_length - hop_length) // 2 spectrogram = torch.tensor(spectrogram) B, N, T = spectrogram.shape print("Spectrogram synthesized shape", spectrogram.shape) # Inverse FFT ifft = torch.fft.irfft(spectrogram, n_fft, dim=1, norm="backward") ifft = ifft * window[None, :, None] # Overlap and Add output_size = (T - 1) * hop_length + win_length y = torch.nn.functional.fold( ifft, output_size=(1, output_size), kernel_size=(1, win_length), stride=(1, hop_length), )[:, 0, 0, pad:-pad] # Window envelope window_sq = window.square().expand(1, T, -1).transpose(1, 2) window_envelope = torch.nn.functional.fold( window_sq, output_size=(1, output_size), kernel_size=(1, win_length), stride=(1, hop_length), ).squeeze()[pad:-pad] # Normalize assert (window_envelope > 1e-11).all() y = y / window_envelope return y def tts(text:str, spk_name:str, temperature:float, length_scale:float, denoise:bool): spk_id = speaker_id_dict[spk_name] sid = np.array([int(spk_id)]) if spk_id is not None else None text_matcha , text_lengths = process_text(0,text,"cpu") # MATCHA VOCOS inputs = { "x": text_matcha, "x_lengths": text_lengths, "scales": np.array([temperature, length_scale], dtype=np.float32), "spks": sid } mel_t0 = perf_counter() # matcha mel inference mel, mel_lengths = model_matcha_mel.run(None, inputs) mel_infer_secs = perf_counter() - mel_t0 print("Matcha Mel inference time", mel_infer_secs) vocos_t0 = perf_counter() # vocos inference wavs_vocos = vocos_inference(mel,denoise) vocos_infer_secs = perf_counter() - vocos_t0 print("Vocos inference time", vocos_infer_secs) with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir="/home/user/app") as fp_matcha_vocos: sf.write(fp_matcha_vocos.name, wavs_vocos.squeeze(0), 22050, "PCM_24") #MATCHA HIFIGAN inputs = { "x": text_matcha, "x_lengths": text_lengths, "scales": np.array([temperature, length_scale], dtype=np.float32), "spks": sid } hifigan_t0 = perf_counter() print(f"RTF matcha + vocos { (mel_infer_secs + vocos_infer_secs) / (wavs_vocos.shape[1]/22050) }") return fp_matcha_vocos.name ## GUI space title = """

Natural and efficient TTS in Catalan

""" description = """ 🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up ODE-based speech synthesis For vocoders we use [Vocos](https://huggingface.co/BSC-LT/vocos-mel-22khz-cat) trained in a catalan set of ~28 hours. [Matcha](https://huggingface.co/BSC-LT/matcha-tts-cat-onnx) was trained using openslr69 and festcat datasets """ about = """ ## 📄 About Natural and efficient TTS in Catalan: using Matcha-TTS with the Catalan language. Here you'll be able to find all the information regarding our model, which has been trained with the use of deep learning. If you want specific information on how to train the model you can find it [here](https://huggingface.co/BSC-LT/matcha-tts-cat-multispeaker). The code we've used is also on Github [here](https://github.com/langtech-bsc/Matcha-TTS/tree/dev-cat). ## Table of Contents
Click to expand - [General Model Description](#general-model-description) - [Adaptation to Catalan](#adaptation-to-catalan) - [Intended Uses and Limitations](#intended-uses-and-limitations) - [Samples](#samples) - [Citation](#citation) - [Additional Information](#additional-information)
## General Model Description **Matcha-TTS** is an encoder-decoder architecture designed for fast acoustic modelling in TTS. On the one hand, the encoder part is based on a text encoder and a phoneme duration prediction. Together, they predict averaged acoustic features. On the other hand, the decoder has essentially a U-Net backbone inspired by [Grad-TTS](https://arxiv.org/pdf/2105.06337.pdf), which is based on the Transformer architecture. In the latter, by replacing 2D CNNs by 1D CNNs, a large reduction in memory consumption and fast synthesis is achieved. **Matcha-TTS** is a non-autorregressive model trained with optimal-transport conditional flow matching (OT-CFM). This yields an ODE-based decoder capable of generating high output quality in fewer synthesis steps than models trained using score matching. ## Adaptation to Catalan The original Matcha-TTS model excels in English, but to bring its capabilities to Catalan, a multi-step process was undertaken. Firstly, we fine-tuned the model from English to Catalan central, which laid the groundwork for understanding the language's nuances. This first fine-tuning was done using two datasets: * [Our version of the openslr-slr69 dataset.](https://huggingface.co/datasets/projecte-aina/openslr-slr69-ca-trimmed-denoised) * A studio-recorded dataset of central catalan, which will soon be published. This soon to be published dataset also included recordings of three different dialects: * Valencian * Occidental * Balear With a male and a female speaker for each dialect. Then, through fine-tuning for these specific Catalan dialects, the model adapted to regional variations in pronunciation and cadence. This meticulous approach ensures that the model reflects the linguistic richness and cultural diversity within the Catalan-speaking community, offering seamless communication in previously underserved dialects. In addition to training the Matcha-TTS model for Catalan, integrating the eSpeak phonemizer played a crucial role in enhancing the naturalness and accuracy of generated speech. A TTS (Text-to-Speech) system comprises several components, each contributing to the overall quality of synthesized speech. The first component involves text preprocessing, where the input text undergoes normalization and linguistic analysis to identify words, punctuation, and linguistic features. Next, the text is converted into phonemes, the smallest units of sound in a language, through a process called phonemization. This step is where the eSpeak phonemizer shines, as it accurately converts Catalan text into phonetic representations, capturing the subtle nuances of pronunciation specific to Catalan. You can find the espeak version we used [here](https://github.com/projecte-aina/espeak-ng/tree/dev-ca). After phonemization, the phonemes are passed to the synthesis component, where they are transformed into audible speech. Here, the Matcha-TTS model takes center stage, generating high-quality speech output based on the phonetic input. The model's training, fine-tuning, and adaptation to Catalan ensure that the synthesized speech retains the natural rhythm, intonation, and pronunciation patterns of the language, thereby enhancing the overall user experience. Finally, the synthesized speech undergoes post-processing, where prosodic features such as pitch, duration, and emphasis are applied to further refine the output and make it sound more natural and expressive. By integrating the eSpeak phonemizer into the TTS pipeline and adapting it for Catalan, alongside training the Matcha-TTS model for the language, we have created a comprehensive and effective system for generating high-quality Catalan speech. This combination of advanced techniques and meticulous attention to linguistic detail is instrumental in bridging language barriers and facilitating communication for Catalan speakers worldwide. ## Intended Uses and Limitations This model is intended to serve as an acoustic feature generator for multispeaker text-to-speech systems for the Catalan language. It has been finetuned using a Catalan phonemizer, therefore if the model is used for other languages it may will not produce intelligible samples after mapping its output into a speech waveform. The quality of the samples can vary depending on the speaker. This may be due to the sensitivity of the model in learning specific frequencies and also due to the quality of samples for each speaker. ## Samples * Female samples
Valencian Occidental Balear
* Male samples:
Valencian Occidental Balear
## Citation If this code contributes to your research, please cite the work: ``` @misc{mehta2024matchatts, title={Matcha-TTS: A fast TTS architecture with conditional flow matching}, author={Shivam Mehta and Ruibo Tu and Jonas Beskow and Éva Székely and Gustav Eje Henter}, year={2024}, eprint={2309.03199}, archivePrefix={arXiv}, primaryClass={eess.AS} } ``` ## Additional Information ### Author The Language Technologies Unit from Barcelona Supercomputing Center. ### Contact For further information, please send an email to . ### Copyright Copyright(c) 2023 by Language Technologies Unit, Barcelona Supercomputing Center. ### License [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) ### Funding This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project](https://projecteaina.cat/). """ article = "Training and demo by The Language Technologies Unit from Barcelona Supercomputing Center." vits2_inference = gr.Interface( fn=tts, inputs=[ gr.Textbox( value="m'ha costat molt desenvolupar una veu, i ara que la tinc no estaré en silenci.", max_lines=1, label="Input text", ), gr.Dropdown( choices=speakers, label="Speaker id", value=DEFAULT_SPEAKER_ID, info=f"Models are trained on 47 speakers. You can prompt the model using one of these speaker ids." ), gr.Slider( 0.1, 2.0, value=0.667, step=0.01, label="Temperature", info=f"Temperature", ), gr.Slider( 0.5, 2.0, value=1.0, step=0.01, label="Length scale", info=f"Controls speech pace, larger values for slower pace and smaller values for faster pace", ), gr.Checkbox(label="Denoise", info="Removes model bias from vocos", value=True), ], outputs=[gr.Audio(label="Matcha vocos", interactive=False, type="filepath")] ) about_article = gr.Markdown(about) demo = gr.Blocks() with demo: gr.Markdown(title) gr.Markdown(description) gr.TabbedInterface([vits2_inference, about_article], ["Demo", "About"]) gr.Markdown(article) demo.queue(max_size=10) demo.launch(show_api=False, server_name="0.0.0.0", server_port=7860)