File size: 7,909 Bytes
cd2b534
6814b66
 
 
 
 
 
 
 
6d1d18c
 
 
 
6814b66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d1d18c
 
 
f08a872
282e862
f08a872
 
 
6d1d18c
 
 
 
 
6814b66
f08a872
 
 
 
 
 
6814b66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a02f36d
f08a872
a02f36d
f08a872
 
 
 
 
 
cd6874a
f08a872
 
cd6874a
f08a872
 
 
 
cd6874a
f08a872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8dd0ef4
6814b66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f89674
6d1d18c
 
 
 
 
 
 
 
 
 
 
43546b5
e080245
2f8e5b0
0690f86
488eaec
2f8e5b0
 
48c1c40
6843a7f
2f8e5b0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
#from turtle import title
import gradio as gr

import git
import os
os.system('git clone https://github.com/Edresson/Coqui-TTS -b multilingual-torchaudio-SE TTS')
os.system('pip install -q -e TTS/')
os.system('pip install -q torchaudio==0.9.0')

os.system('pip install voicefixer --upgrade')
from voicefixer import VoiceFixer
voicefixer = VoiceFixer()

import sys
TTS_PATH = "TTS/"

# add libraries into environment
sys.path.append(TTS_PATH) # set this if TTS is not installed globally

import string
import time
import argparse
import json

import numpy as np
import IPython
from IPython.display import Audio

import torch
import torchaudio
from speechbrain.pretrained import SpectralMaskEnhancement

import whisper
model1 = whisper.load_model("small")

import openai

enhance_model = SpectralMaskEnhancement.from_hparams(
source="speechbrain/metricgan-plus-voicebank",
savedir="pretrained_models/metricgan-plus-voicebank",
run_opts={"device":"cuda"},
)

mes = [
    {"role": "system", "content": "You are my personal assistant. Try to be helpful."}
]

res = []

from TTS.tts.utils.synthesis import synthesis
from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols
try:
  from TTS.utils.audio import AudioProcessor
except:
  from TTS.utils.audio import AudioProcessor


from TTS.tts.models import setup_model
from TTS.config import load_config
from TTS.tts.models.vits import *  

OUT_PATH = 'out/'

# create output path
os.makedirs(OUT_PATH, exist_ok=True)

# model vars 
MODEL_PATH = '/home/user/app/best_model_latest.pth.tar'
CONFIG_PATH = '/home/user/app/config.json'
TTS_LANGUAGES = "/home/user/app/language_ids.json"
TTS_SPEAKERS = "/home/user/app/speakers.json"
USE_CUDA = torch.cuda.is_available()  

# load the config
C = load_config(CONFIG_PATH)


# load the audio processor
ap = AudioProcessor(**C.audio)

speaker_embedding = None

C.model_args['d_vector_file'] = TTS_SPEAKERS
C.model_args['use_speaker_encoder_as_loss'] = False

model = setup_model(C)
model.language_manager.set_language_ids_from_file(TTS_LANGUAGES)
# print(model.language_manager.num_languages, model.embedded_language_dim)
# print(model.emb_l)
cp = torch.load(MODEL_PATH, map_location=torch.device('cpu'))
# remove speaker encoder
model_weights = cp['model'].copy()
for key in list(model_weights.keys()):
  if "speaker_encoder" in key:
    del model_weights[key]

model.load_state_dict(model_weights)


model.eval()

if USE_CUDA:
    model = model.cuda()

# synthesize voice
use_griffin_lim = False

os.system('pip install -q pydub ffmpeg-normalize')

CONFIG_SE_PATH = "config_se.json"
CHECKPOINT_SE_PATH = "SE_checkpoint.pth.tar"

from TTS.tts.utils.speakers import SpeakerManager
from pydub import AudioSegment
import librosa

SE_speaker_manager = SpeakerManager(encoder_model_path=CHECKPOINT_SE_PATH, encoder_config_path=CONFIG_SE_PATH, use_cuda=USE_CUDA)

def compute_spec(ref_file):
  y, sr = librosa.load(ref_file, sr=ap.sample_rate)
  spec = ap.spectrogram(y)
  spec = torch.FloatTensor(spec).unsqueeze(0)
  return spec
  

def greet(Text2, audio, Voicetoclone,VoiceMicrophone):
    
    openai.api_key = Text2

    # load audio and pad/trim it to fit 30 seconds
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)

    # make log-Mel spectrogram and move to the same device as the model
    mel = whisper.log_mel_spectrogram(audio).to(model1.device)

    # detect the spoken language
    _, probs = model1.detect_language(mel)
    print(f"Detected language: {max(probs, key=probs.get)}")

    # decode the audio
    options = whisper.DecodingOptions()
    result = whisper.decode(model1, mel, options)
    res.append(result.text)

    messages = mes

    # chatgpt
    n = len(res)
    content = res[n-1]
    messages.append({"role": "user", "content": content})

    completion = openai.ChatCompletion.create(
      model = "gpt-3.5-turbo",
      messages = messages
    )

    chat_response = completion.choices[0].message.content

    messages.append({"role": "assistant", "content": chat_response})   
    
    text= "%s" % (chat_response)
    if Voicetoclone is not None:
      reference_files= "%s" % (Voicetoclone)
      print("path url")
      print(Voicetoclone)
      sample= str(Voicetoclone)
    else:
      reference_files= "%s" % (VoiceMicrophone)
      print("path url")
      print(VoiceMicrophone)
      sample= str(VoiceMicrophone)
    size= len(reference_files)*sys.getsizeof(reference_files)
    size2= size / 1000000
    if (size2 > 0.012) or len(text)>2000:
      message="File is greater than 30mb or Text inserted is longer than 2000 characters. Please re-try with smaller sizes."
      print(message)
      raise SystemExit("File is greater than 30mb. Please re-try or Text inserted is longer than 2000 characters. Please re-try with smaller sizes.")
    else:
      os.system('ffmpeg-normalize $sample -nt rms -t=-27 -o $sample -ar 16000 -f')
      reference_emb = SE_speaker_manager.compute_d_vector_from_clip(reference_files)
      model.length_scale = 1  # scaler for the duration predictor. The larger it is, the slower the speech.
      model.inference_noise_scale = 0.3 # defines the noise variance applied to the random z vector at inference.
      model.inference_noise_scale_dp = 0.3 # defines the noise variance applied to the duration predictor z vector at inference.
      text = text
      model.language_manager.language_id_mapping
      language_id = 0
    
      print(" > text: {}".format(text))
      wav, alignment, _, _ = synthesis(
                        model,
                        text,
                        C,
                        "cuda" in str(next(model.parameters()).device),
                        ap,
                        speaker_id=None,
                        d_vector=reference_emb,
                        style_wav=None,
                        language_id=language_id,
                        enable_eos_bos_chars=C.enable_eos_bos_chars,
                        use_griffin_lim=True,
                        do_trim_silence=False,
                    ).values()
      print("Generated Audio")
      IPython.display.display(Audio(wav, rate=ap.sample_rate))
      #file_name = text.replace(" ", "_")
      #file_name = file_name.translate(str.maketrans('', '', string.punctuation.replace('_', ''))) + '.wav'
      file_name="Audio.wav"
      out_path = os.path.join(OUT_PATH, file_name)
      print(" > Saving output to {}".format(out_path))
      ap.save_wav(wav, out_path)
    
      voicefixer.restore(input=out_path, # input wav file path
                      output="audio1.wav", # output wav file path
                      cuda=True, # whether to use gpu acceleration
                      mode = 0) # You can try out mode 0, 1, or 2 to find out the best result
    
      noisy = enhance_model.load_audio(
      "audio1.wav"
      ).unsqueeze(0)

      enhanced = enhance_model.enhance_batch(noisy, lengths=torch.tensor([1.]))
      torchaudio.save("enhanced.wav", enhanced.cpu(), 16000)
      return [result.text, chat_response, "enhanced.wav"]

gr.Interface(
    fn=greet, 
    inputs=[gr.Textbox(label='请输入您的Openai-API-Key', type = "password"), gr.Audio(source="microphone", label='请在这里进行对话吧!随时随地,谈天说地!', type="filepath"), gr.Audio(type="filepath", source="upload",label='请上传您喜欢的声音(wav/mp3文件, max. 30mb)'), gr.Audio(source="microphone", type="filepath", label='请用麦克风上传您喜欢的声音,与文件上传二选一即可')],
    outputs=["text", "text", "audio"],
    title="🥳💬💕 - TalktoAI,随时随地,谈天说地!",
    description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
    article = "🎶🖼️🎡 - It’s the intersection of technology and liberal arts that makes our hearts sing. - Steve Jobs"
    ).launch()