|
|
|
|
|
|
|
import os |
|
import torch |
|
from openvoice import se_extractor |
|
from openvoice.api import BaseSpeakerTTS, ToneColorConverter |
|
|
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
else: |
|
device = "cpu" |
|
|
|
ckpt_base = 'checkpoints/base_speakers/EN' |
|
ckpt_converter = 'checkpoints/converter' |
|
base_speaker_tts = BaseSpeakerTTS(f'{ckpt_base}/config.json', device=device) |
|
base_speaker_tts.load_ckpt(f'{ckpt_base}/checkpoint.pth') |
|
|
|
tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device) |
|
tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth') |
|
|
|
|
|
|
|
|
|
def vc_en(audio_ref, style_mode): |
|
text = "We have always tried to be at the intersection of technology and liberal arts, to be able to get the best of both, to make extremely advanced products from a technology point of view." |
|
if style_mode=="default": |
|
source_se = torch.load(f'{ckpt_base}/en_default_se.pth').to(device) |
|
reference_speaker = audio_ref |
|
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True) |
|
save_path = "output.wav" |
|
|
|
|
|
src_path = "tmp.wav" |
|
base_speaker_tts.tts(text, src_path, speaker='default', language='English', speed=1.0) |
|
|
|
|
|
encode_message = "@MyShell" |
|
tone_color_converter.convert( |
|
audio_src_path=src_path, |
|
src_se=source_se, |
|
tgt_se=target_se, |
|
output_path=save_path, |
|
message=encode_message) |
|
|
|
else: |
|
source_se = torch.load(f'{ckpt_base}/en_style_se.pth').to(device) |
|
reference_speaker = audio_ref |
|
target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True) |
|
|
|
save_path = "output.wav" |
|
|
|
|
|
src_path = "tmp.wav" |
|
base_speaker_tts.tts(text, src_path, speaker=style_mode, language='English', speed=1.0) |
|
|
|
|
|
encode_message = "@MyShell" |
|
tone_color_converter.convert( |
|
audio_src_path=src_path, |
|
src_se=source_se, |
|
tgt_se=target_se, |
|
output_path=save_path, |
|
message=encode_message) |
|
|
|
return "output.wav" |
|
|
|
|
|
|
|
import re, logging |
|
import LangSegment |
|
logging.getLogger("markdown_it").setLevel(logging.ERROR) |
|
logging.getLogger("urllib3").setLevel(logging.ERROR) |
|
logging.getLogger("httpcore").setLevel(logging.ERROR) |
|
logging.getLogger("httpx").setLevel(logging.ERROR) |
|
logging.getLogger("asyncio").setLevel(logging.ERROR) |
|
logging.getLogger("charset_normalizer").setLevel(logging.ERROR) |
|
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) |
|
import pdb |
|
import json |
|
|
|
cnhubert_base_path = os.environ.get( |
|
"cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base" |
|
) |
|
bert_path = os.environ.get( |
|
"bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" |
|
) |
|
infer_ttswebui = os.environ.get("infer_ttswebui", 9872) |
|
infer_ttswebui = int(infer_ttswebui) |
|
is_share = os.environ.get("is_share", "False") |
|
is_share = eval(is_share) |
|
if "_CUDA_VISIBLE_DEVICES" in os.environ: |
|
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] |
|
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() |
|
import gradio as gr |
|
from transformers import AutoModelForMaskedLM, AutoTokenizer |
|
import numpy as np |
|
import librosa |
|
from feature_extractor import cnhubert |
|
|
|
cnhubert.cnhubert_base_path = cnhubert_base_path |
|
|
|
from module.models import SynthesizerTrn |
|
from AR.models.t2s_lightning_module import Text2SemanticLightningModule |
|
from text import cleaned_text_to_sequence |
|
from text.cleaner import clean_text |
|
from time import time as ttime |
|
from module.mel_processing import spectrogram_torch |
|
from my_utils import load_audio |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(bert_path) |
|
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) |
|
if is_half == True: |
|
bert_model = bert_model.half().to(device) |
|
else: |
|
bert_model = bert_model.to(device) |
|
|
|
|
|
def get_bert_feature(text, word2ph): |
|
with torch.no_grad(): |
|
inputs = tokenizer(text, return_tensors="pt") |
|
for i in inputs: |
|
inputs[i] = inputs[i].to(device) |
|
res = bert_model(**inputs, output_hidden_states=True) |
|
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] |
|
assert len(word2ph) == len(text) |
|
phone_level_feature = [] |
|
for i in range(len(word2ph)): |
|
repeat_feature = res[i].repeat(word2ph[i], 1) |
|
phone_level_feature.append(repeat_feature) |
|
phone_level_feature = torch.cat(phone_level_feature, dim=0) |
|
return phone_level_feature.T |
|
|
|
|
|
class DictToAttrRecursive(dict): |
|
def __init__(self, input_dict): |
|
super().__init__(input_dict) |
|
for key, value in input_dict.items(): |
|
if isinstance(value, dict): |
|
value = DictToAttrRecursive(value) |
|
self[key] = value |
|
setattr(self, key, value) |
|
|
|
def __getattr__(self, item): |
|
try: |
|
return self[item] |
|
except KeyError: |
|
raise AttributeError(f"Attribute {item} not found") |
|
|
|
def __setattr__(self, key, value): |
|
if isinstance(value, dict): |
|
value = DictToAttrRecursive(value) |
|
super(DictToAttrRecursive, self).__setitem__(key, value) |
|
super().__setattr__(key, value) |
|
|
|
def __delattr__(self, item): |
|
try: |
|
del self[item] |
|
except KeyError: |
|
raise AttributeError(f"Attribute {item} not found") |
|
|
|
|
|
ssl_model = cnhubert.get_model() |
|
if is_half == True: |
|
ssl_model = ssl_model.half().to(device) |
|
else: |
|
ssl_model = ssl_model.to(device) |
|
|
|
clm = "" |
|
|
|
def change_sovits_weights(sovits_path): |
|
global vq_model, hps |
|
dict_s2 = torch.load(sovits_path, map_location="cpu") |
|
hps = dict_s2["config"] |
|
hps = DictToAttrRecursive(hps) |
|
hps.model.semantic_frame_rate = "25hz" |
|
vq_model = SynthesizerTrn( |
|
hps.data.filter_length // 2 + 1, |
|
hps.train.segment_size // hps.data.hop_length, |
|
n_speakers=hps.data.n_speakers, |
|
**hps.model |
|
) |
|
if ("pretrained" not in sovits_path): |
|
del vq_model.enc_q |
|
if is_half == True: |
|
vq_model = vq_model.half().to(device) |
|
else: |
|
vq_model = vq_model.to(device) |
|
vq_model.eval() |
|
print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def change_gpt_weights(gpt_path): |
|
global hz, max_sec, t2s_model, config |
|
hz = 50 |
|
dict_s1 = torch.load(gpt_path, map_location="cpu") |
|
config = dict_s1["config"] |
|
max_sec = config["data"]["max_sec"] |
|
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) |
|
t2s_model.load_state_dict(dict_s1["weight"]) |
|
if is_half == True: |
|
t2s_model = t2s_model.half() |
|
t2s_model = t2s_model.to(device) |
|
t2s_model.eval() |
|
total = sum([param.nelement() for param in t2s_model.parameters()]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_spepc(hps, filename): |
|
audio = load_audio(filename, int(hps.data.sampling_rate)) |
|
audio = torch.FloatTensor(audio) |
|
audio_norm = audio |
|
audio_norm = audio_norm.unsqueeze(0) |
|
spec = spectrogram_torch( |
|
audio_norm, |
|
hps.data.filter_length, |
|
hps.data.sampling_rate, |
|
hps.data.hop_length, |
|
hps.data.win_length, |
|
center=False, |
|
) |
|
return spec |
|
|
|
|
|
dict_language = { |
|
"ZH": "all_zh", |
|
"EN": "en", |
|
"JP": "all_ja", |
|
"ZH/EN": "zh", |
|
"JP/EN": "ja", |
|
"Automatic": "auto", |
|
} |
|
|
|
|
|
def clean_text_inf(text, language): |
|
phones, word2ph, norm_text = clean_text(text, language) |
|
phones = cleaned_text_to_sequence(phones) |
|
return phones, word2ph, norm_text |
|
|
|
dtype=torch.float16 if is_half == True else torch.float32 |
|
def get_bert_inf(phones, word2ph, norm_text, language): |
|
language=language.replace("all_","") |
|
if language == "zh": |
|
bert = get_bert_feature(norm_text, word2ph).to(device) |
|
else: |
|
bert = torch.zeros( |
|
(1024, len(phones)), |
|
dtype=torch.float16 if is_half == True else torch.float32, |
|
).to(device) |
|
|
|
return bert |
|
|
|
|
|
splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", } |
|
|
|
|
|
def get_first(text): |
|
pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]" |
|
text = re.split(pattern, text)[0].strip() |
|
return text |
|
|
|
|
|
def get_phones_and_bert(text,language): |
|
if language in {"en","all_zh","all_ja"}: |
|
language = language.replace("all_","") |
|
if language == "en": |
|
LangSegment.setfilters(["en"]) |
|
formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text)) |
|
else: |
|
|
|
formattext = text |
|
while " " in formattext: |
|
formattext = formattext.replace(" ", " ") |
|
phones, word2ph, norm_text = clean_text_inf(formattext, language) |
|
if language == "zh": |
|
bert = get_bert_feature(norm_text, word2ph).to(device) |
|
else: |
|
bert = torch.zeros( |
|
(1024, len(phones)), |
|
dtype=torch.float16 if is_half == True else torch.float32, |
|
).to(device) |
|
elif language in {"zh", "ja","auto"}: |
|
textlist=[] |
|
langlist=[] |
|
LangSegment.setfilters(["zh","ja","en","ko"]) |
|
if language == "auto": |
|
for tmp in LangSegment.getTexts(text): |
|
if tmp["lang"] == "ko": |
|
langlist.append("zh") |
|
textlist.append(tmp["text"]) |
|
else: |
|
langlist.append(tmp["lang"]) |
|
textlist.append(tmp["text"]) |
|
else: |
|
for tmp in LangSegment.getTexts(text): |
|
if tmp["lang"] == "en": |
|
langlist.append(tmp["lang"]) |
|
else: |
|
|
|
langlist.append(language) |
|
textlist.append(tmp["text"]) |
|
print(textlist) |
|
print(langlist) |
|
phones_list = [] |
|
bert_list = [] |
|
norm_text_list = [] |
|
for i in range(len(textlist)): |
|
lang = langlist[i] |
|
phones, word2ph, norm_text = clean_text_inf(textlist[i], lang) |
|
bert = get_bert_inf(phones, word2ph, norm_text, lang) |
|
phones_list.append(phones) |
|
norm_text_list.append(norm_text) |
|
bert_list.append(bert) |
|
bert = torch.cat(bert_list, dim=1) |
|
phones = sum(phones_list, []) |
|
norm_text = ''.join(norm_text_list) |
|
|
|
return phones,bert.to(dtype),norm_text |
|
|
|
|
|
def merge_short_text_in_array(texts, threshold): |
|
if (len(texts)) < 2: |
|
return texts |
|
result = [] |
|
text = "" |
|
for ele in texts: |
|
text += ele |
|
if len(text) >= threshold: |
|
result.append(text) |
|
text = "" |
|
if (len(text) > 0): |
|
if len(result) == 0: |
|
result.append(text) |
|
else: |
|
result[len(result) - 1] += text |
|
return result |
|
|
|
def get_tts_wav(name, gptmp, svmp, sty, ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut="None", top_k=20, top_p=0.6, temperature=0.6, ref_free = False): |
|
|
|
global clm |
|
if(not ref_wav_path): |
|
ref_wav_path=f"referenceaudio/{name}/"+referencedata[name][0][sty] |
|
prompt_text=referencedata[name][1][sty] |
|
if clm!=name: |
|
print(f"Switching to model {name}") |
|
clm=name |
|
change_gpt_weights(gptmp) |
|
change_sovits_weights(svmp) |
|
|
|
if prompt_text is None or len(prompt_text) == 0: |
|
ref_free = True |
|
t0 = ttime() |
|
prompt_language = dict_language[prompt_language] |
|
text_language = dict_language[text_language] |
|
if not ref_free: |
|
prompt_text = prompt_text.strip("\n") |
|
if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "." |
|
text = text.strip("\n") |
|
if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text |
|
|
|
print("Input text:", text) |
|
zero_wav = np.zeros( |
|
int(hps.data.sampling_rate * 0.3), |
|
dtype=np.float16 if is_half == True else np.float32, |
|
) |
|
with torch.no_grad(): |
|
wav16k, sr = librosa.load(ref_wav_path, sr=16000) |
|
if (wav16k.shape[0] > 240000 or wav16k.shape[0] < 48000): |
|
raise OSError("Reference audio too long!!") |
|
wav16k = torch.from_numpy(wav16k) |
|
zero_wav_torch = torch.from_numpy(zero_wav) |
|
if is_half == True: |
|
wav16k = wav16k.half().to(device) |
|
zero_wav_torch = zero_wav_torch.half().to(device) |
|
else: |
|
wav16k = wav16k.to(device) |
|
zero_wav_torch = zero_wav_torch.to(device) |
|
wav16k = torch.cat([wav16k, zero_wav_torch]) |
|
ssl_content = ssl_model.model(wav16k.unsqueeze(0))[ |
|
"last_hidden_state" |
|
].transpose( |
|
1, 2 |
|
) |
|
codes = vq_model.extract_latent(ssl_content) |
|
|
|
prompt_semantic = codes[0, 0] |
|
t1 = ttime() |
|
|
|
if (how_to_cut == "4 Sentences"): |
|
text = cut1(text) |
|
elif (how_to_cut == "50 Characters"): |
|
text = cut2(text) |
|
elif (how_to_cut == "Chinese/Japanese Punctuation"): |
|
text = cut3(text) |
|
elif (how_to_cut == "EN Punctuation"): |
|
text = cut4(text) |
|
elif (how_to_cut == "All Punctuation"): |
|
text = cut5(text) |
|
while "\n\n" in text: |
|
text = text.replace("\n\n", "\n") |
|
texts = text.split("\n") |
|
texts = merge_short_text_in_array(texts, 5) |
|
audio_opt = [] |
|
if not ref_free: |
|
phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language) |
|
|
|
for text in texts: |
|
|
|
if (len(text.strip()) == 0): |
|
continue |
|
if (text[-1] not in splits): text += "。" if text_language != "en" else "." |
|
phones2,bert2,norm_text2=get_phones_and_bert(text, text_language) |
|
if not ref_free: |
|
bert = torch.cat([bert1, bert2], 1) |
|
all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0) |
|
else: |
|
bert = bert2 |
|
all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0) |
|
|
|
bert = bert.to(device).unsqueeze(0) |
|
all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device) |
|
prompt = prompt_semantic.unsqueeze(0).to(device) |
|
t2 = ttime() |
|
with torch.no_grad(): |
|
|
|
pred_semantic, idx = t2s_model.model.infer_panel( |
|
all_phoneme_ids, |
|
all_phoneme_len, |
|
None if ref_free else prompt, |
|
bert, |
|
|
|
top_k=top_k, |
|
top_p=top_p, |
|
temperature=temperature, |
|
early_stop_num=hz * max_sec, |
|
) |
|
t3 = ttime() |
|
|
|
pred_semantic = pred_semantic[:, -idx:].unsqueeze( |
|
0 |
|
) |
|
refer = get_spepc(hps, ref_wav_path) |
|
if is_half == True: |
|
refer = refer.half().to(device) |
|
else: |
|
refer = refer.to(device) |
|
|
|
audio = ( |
|
vq_model.decode( |
|
pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refer |
|
) |
|
.detach() |
|
.cpu() |
|
.numpy()[0, 0] |
|
) |
|
max_audio=np.abs(audio).max() |
|
if max_audio>1:audio/=max_audio |
|
audio_opt.append(audio) |
|
audio_opt.append(zero_wav) |
|
t4 = ttime() |
|
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) |
|
yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype( |
|
np.int16 |
|
) |
|
|
|
|
|
def split(todo_text): |
|
todo_text = todo_text.replace("……", "。").replace("——", ",") |
|
if todo_text[-1] not in splits: |
|
todo_text += "。" |
|
i_split_head = i_split_tail = 0 |
|
len_text = len(todo_text) |
|
todo_texts = [] |
|
while 1: |
|
if i_split_head >= len_text: |
|
break |
|
if todo_text[i_split_head] in splits: |
|
i_split_head += 1 |
|
todo_texts.append(todo_text[i_split_tail:i_split_head]) |
|
i_split_tail = i_split_head |
|
else: |
|
i_split_head += 1 |
|
return todo_texts |
|
|
|
|
|
def cut1(inp): |
|
inp = inp.strip("\n") |
|
inps = split(inp) |
|
split_idx = list(range(0, len(inps), 4)) |
|
split_idx[-1] = None |
|
if len(split_idx) > 1: |
|
opts = [] |
|
for idx in range(len(split_idx) - 1): |
|
opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]])) |
|
else: |
|
opts = [inp] |
|
return "\n".join(opts) |
|
|
|
|
|
def cut2(inp): |
|
inp = inp.strip("\n") |
|
inps = split(inp) |
|
if len(inps) < 2: |
|
return inp |
|
opts = [] |
|
summ = 0 |
|
tmp_str = "" |
|
for i in range(len(inps)): |
|
summ += len(inps[i]) |
|
tmp_str += inps[i] |
|
if summ > 50: |
|
summ = 0 |
|
opts.append(tmp_str) |
|
tmp_str = "" |
|
if tmp_str != "": |
|
opts.append(tmp_str) |
|
|
|
if len(opts) > 1 and len(opts[-1]) < 50: |
|
opts[-2] = opts[-2] + opts[-1] |
|
opts = opts[:-1] |
|
return "\n".join(opts) |
|
|
|
|
|
def cut3(inp): |
|
inp = inp.strip("\n") |
|
return "\n".join(["%s" % item for item in inp.strip("。").split("。")]) |
|
|
|
|
|
def cut4(inp): |
|
inp = inp.strip("\n") |
|
return "\n".join(["%s" % item for item in inp.strip(".").split(".")]) |
|
|
|
|
|
|
|
def cut5(inp): |
|
|
|
|
|
inp = inp.strip("\n") |
|
punds = r'[,.;?!、,。?!;:…]' |
|
items = re.split(f'({punds})', inp) |
|
mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])] |
|
|
|
if len(items)%2 == 1: |
|
mergeitems.append(items[-1]) |
|
opt = "\n".join(mergeitems) |
|
return opt |
|
|
|
|
|
def custom_sort_key(s): |
|
|
|
parts = re.split('(\d+)', s) |
|
|
|
parts = [int(part) if part.isdigit() else part for part in parts] |
|
return parts |
|
|
|
|
|
def change_choices(): |
|
SoVITS_names, GPT_names = get_weights_names() |
|
return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"} |
|
|
|
|
|
pretrained_sovits_name = "GPT_SoVITS/pretrained_models/s2G488k.pth" |
|
pretrained_gpt_name = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" |
|
SoVITS_weight_root = "GPT_SoVITS/SoVITS_weights" |
|
GPT_weight_root = "GPT_SoVITS/GPT_weights" |
|
|
|
|
|
|
|
|
|
def get_weights_names(): |
|
SoVITS_names = [pretrained_sovits_name] |
|
for name in os.listdir(SoVITS_weight_root): |
|
if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (SoVITS_weight_root, name)) |
|
GPT_names = [pretrained_gpt_name] |
|
for name in os.listdir(GPT_weight_root): |
|
if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (GPT_weight_root, name)) |
|
return SoVITS_names, GPT_names |
|
|
|
def load_models(): |
|
print("Loading models...") |
|
voices=[] |
|
ustyles={} |
|
with open("voicelist.json", "r", encoding="utf-8") as f: |
|
voc_info = json.load(f) |
|
for name, info in voc_info.items(): |
|
if not info['enable']: |
|
continue |
|
title= info['title'] |
|
gptmodelpath= "%s/%s" % (GPT_weight_root, info['gpt_model_path']) |
|
sovitsmodelpath= "%s/%s" % (SoVITS_weight_root, info['sovits_model_path']) |
|
author= info['modelauthor'] |
|
image = info['cover'] |
|
styles = info['styles'] |
|
styletrans = info['styletrans'] |
|
st=[styles, styletrans] |
|
voices.append((name, title, gptmodelpath, sovitsmodelpath, author, image)) |
|
ustyles[name]=st |
|
print(f"Indexed model {title}") |
|
return voices, ustyles |
|
|
|
modeldata, referencedata = load_models() |
|
|
|
SoVITS_names, GPT_names = get_weights_names() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text = gr.TextArea(label="Input Text", value="Hello there! This is test audio of a new text to speech tool.") |
|
text_language = gr.Dropdown(label="Language", choices=["EN", "JP", "ZH", "ZH/EN", "JP/EN", "Automatic"], value="EN") |
|
how_to_cut = gr.Dropdown(label="Slicing Method", |
|
choices=["None", "4 Sentences", "50 Characters", "ZH/JP Punctuation", "EN Punctuation", "All Punctuation" ], |
|
value="4 Sentences", |
|
interactive=True, |
|
) |
|
top_k = gr.Slider(minimum=1,maximum=100,step=1,label="top_k",value=5,interactive=True) |
|
top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label="top_p",value=1,interactive=True) |
|
temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label="temperature",value=1,interactive=True) |
|
|
|
|
|
with gr.Blocks(title="Lemonfoot GPT-SoVITS") as app: |
|
gr.Markdown( |
|
"# Lemonfoot GPT-SoVITS 🚀🍋\n" |
|
"### Space by Kit Lemonfoot / Noel Shirogane's High Flying Birds\n" |
|
"Based on code originally by RVC_Boss and kevinwang676\n\n" |
|
"Do no evil.\n\n" |
|
"**NOTE:** *This is more or less a test Space*. HuggingFace Spaces are not capable of running GPT-SoVITS efficiently; a single generation may take upwards of an hour to infer one sentence. " |
|
"If you wish to use these models for legitimate generation, it is recommended to [download the models individually](https://huggingface.co/Kit-Lemonfoot/kitlemonfoot_gptsovits_models) and run GPT-SoVITS locally." |
|
) |
|
for (name, title, gptmodelpath, sovitsmodelpath, author, image) in modeldata: |
|
with gr.TabItem(name): |
|
with gr.Row(): |
|
with gr.Column(): |
|
n = gr.Textbox(value=name, visible=False, interactive=False) |
|
gptmp = gr.Textbox(value=gptmodelpath, visible=False, interactive=False) |
|
svmp = gr.Textbox(value=sovitsmodelpath, visible=False, interactive=False) |
|
gr.Markdown(f"**{title}**\n\n Dataset author: {author}") |
|
gr.Image(f"images/{image}", label=None, show_label=False, width=300, show_download_button=False, container=False, show_share_button=False) |
|
with gr.Column(): |
|
with gr.TabItem("Style using a preset"): |
|
sty = gr.Dropdown( |
|
label="Current style", |
|
choices=referencedata[name][0].keys(), |
|
value="Neutral", |
|
interactive=True |
|
) |
|
with gr.TabItem("Style using a different audio"): |
|
with gr.Column(): |
|
ref_audio_path = gr.Audio(label="Reference Audio", type="filepath") |
|
ref_text_free = gr.Checkbox(label="Enables no text-reference mode.", value=False, interactive=True) |
|
prompt_text = gr.Textbox(label="Reference Audio Text", interactive=True) |
|
prompt_language = gr.Textbox(value="EN", visible=False, interactive=False) |
|
with gr.Column(): |
|
inference_button = gr.Button("Synthesize", variant="primary") |
|
output = gr.Audio(label="Output") |
|
|
|
inference_button.click( |
|
get_tts_wav, |
|
inputs=[n, gptmp, svmp, sty, ref_audio_path, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free], |
|
outputs=[output] |
|
) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
text.render() |
|
text_language.render() |
|
how_to_cut.render() |
|
with gr.Column(): |
|
gr.Markdown("### GPT Sampling Parameters") |
|
top_k.render() |
|
top_p.render() |
|
temperature.render() |
|
|
|
app.queue().launch() |
|
|