mrmocciai commited on
Commit
8af5222
1 Parent(s): 2228b00

Delete infer_batch_rvc.py

Browse files
Files changed (1) hide show
  1. infer_batch_rvc.py +0 -215
infer_batch_rvc.py DELETED
@@ -1,215 +0,0 @@
1
- """
2
- v1
3
- runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33
4
- v2
5
- runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33
6
- """
7
- import os, sys, pdb, torch
8
-
9
- now_dir = os.getcwd()
10
- sys.path.append(now_dir)
11
- import sys
12
- import torch
13
- import tqdm as tq
14
- from multiprocessing import cpu_count
15
-
16
-
17
- class Config:
18
- def __init__(self, device, is_half):
19
- self.device = device
20
- self.is_half = is_half
21
- self.n_cpu = 0
22
- self.gpu_name = None
23
- self.gpu_mem = None
24
- self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
25
-
26
- def device_config(self) -> tuple:
27
- if torch.cuda.is_available():
28
- i_device = int(self.device.split(":")[-1])
29
- self.gpu_name = torch.cuda.get_device_name(i_device)
30
- if (
31
- ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
32
- or "P40" in self.gpu_name.upper()
33
- or "1060" in self.gpu_name
34
- or "1070" in self.gpu_name
35
- or "1080" in self.gpu_name
36
- ):
37
- print("16系/10系显卡和P40强制单精度")
38
- self.is_half = False
39
- for config_file in ["32k.json", "40k.json", "48k.json"]:
40
- with open(f"configs/{config_file}", "r") as f:
41
- strr = f.read().replace("true", "false")
42
- with open(f"configs/{config_file}", "w") as f:
43
- f.write(strr)
44
- with open("trainset_preprocess_pipeline_print.py", "r") as f:
45
- strr = f.read().replace("3.7", "3.0")
46
- with open("trainset_preprocess_pipeline_print.py", "w") as f:
47
- f.write(strr)
48
- else:
49
- self.gpu_name = None
50
- self.gpu_mem = int(
51
- torch.cuda.get_device_properties(i_device).total_memory
52
- / 1024
53
- / 1024
54
- / 1024
55
- + 0.4
56
- )
57
- if self.gpu_mem <= 4:
58
- with open("trainset_preprocess_pipeline_print.py", "r") as f:
59
- strr = f.read().replace("3.7", "3.0")
60
- with open("trainset_preprocess_pipeline_print.py", "w") as f:
61
- f.write(strr)
62
- elif torch.backends.mps.is_available():
63
- print("没有发现支持的N卡, 使用MPS进行推理")
64
- self.device = "mps"
65
- else:
66
- print("没有发现支持的N卡, 使用CPU进行推理")
67
- self.device = "cpu"
68
- self.is_half = True
69
-
70
- if self.n_cpu == 0:
71
- self.n_cpu = cpu_count()
72
-
73
- if self.is_half:
74
- # 6G显存配置
75
- x_pad = 3
76
- x_query = 10
77
- x_center = 60
78
- x_max = 65
79
- else:
80
- # 5G显存配置
81
- x_pad = 1
82
- x_query = 6
83
- x_center = 38
84
- x_max = 41
85
-
86
- if self.gpu_mem != None and self.gpu_mem <= 4:
87
- x_pad = 1
88
- x_query = 5
89
- x_center = 30
90
- x_max = 32
91
-
92
- return x_pad, x_query, x_center, x_max
93
-
94
-
95
- f0up_key = sys.argv[1]
96
- input_path = sys.argv[2]
97
- index_path = sys.argv[3]
98
- f0method = sys.argv[4] # harvest or pm
99
- opt_path = sys.argv[5]
100
- model_path = sys.argv[6]
101
- index_rate = float(sys.argv[7])
102
- device = sys.argv[8]
103
- is_half = sys.argv[9].lower() != "false"
104
- filter_radius = int(sys.argv[10])
105
- resample_sr = int(sys.argv[11])
106
- rms_mix_rate = float(sys.argv[12])
107
- protect = float(sys.argv[13])
108
- print(sys.argv)
109
- config = Config(device, is_half)
110
- now_dir = os.getcwd()
111
- sys.path.append(now_dir)
112
- from vc_infer_pipeline import VC
113
- from lib.infer_pack.models import (
114
- SynthesizerTrnMs256NSFsid,
115
- SynthesizerTrnMs256NSFsid_nono,
116
- SynthesizerTrnMs768NSFsid,
117
- SynthesizerTrnMs768NSFsid_nono,
118
- )
119
- from lib.audio import load_audio
120
- from fairseq import checkpoint_utils
121
- from scipy.io import wavfile
122
-
123
- hubert_model = None
124
-
125
-
126
- def load_hubert():
127
- global hubert_model
128
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
129
- ["hubert_base.pt"],
130
- suffix="",
131
- )
132
- hubert_model = models[0]
133
- hubert_model = hubert_model.to(device)
134
- if is_half:
135
- hubert_model = hubert_model.half()
136
- else:
137
- hubert_model = hubert_model.float()
138
- hubert_model.eval()
139
-
140
-
141
- def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate):
142
- global tgt_sr, net_g, vc, hubert_model, version
143
- if input_audio is None:
144
- return "You need to upload an audio", None
145
- f0_up_key = int(f0_up_key)
146
- audio = load_audio(input_audio, 16000)
147
- times = [0, 0, 0]
148
- if hubert_model == None:
149
- load_hubert()
150
- if_f0 = cpt.get("f0", 1)
151
- # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
152
- audio_opt = vc.pipeline(
153
- hubert_model,
154
- net_g,
155
- sid,
156
- audio,
157
- input_audio,
158
- times,
159
- f0_up_key,
160
- f0_method,
161
- file_index,
162
- index_rate,
163
- if_f0,
164
- filter_radius,
165
- tgt_sr,
166
- resample_sr,
167
- rms_mix_rate,
168
- version,
169
- protect,
170
- f0_file=f0_file,
171
- )
172
- print(times)
173
- return audio_opt
174
-
175
-
176
- def get_vc(model_path):
177
- global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version
178
- print("loading pth %s" % model_path)
179
- cpt = torch.load(model_path, map_location="cpu")
180
- tgt_sr = cpt["config"][-1]
181
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
182
- if_f0 = cpt.get("f0", 1)
183
- version = cpt.get("version", "v1")
184
- if version == "v1":
185
- if if_f0 == 1:
186
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
187
- else:
188
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
189
- elif version == "v2":
190
- if if_f0 == 1: #
191
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
192
- else:
193
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
194
- del net_g.enc_q
195
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩
196
- net_g.eval().to(device)
197
- if is_half:
198
- net_g = net_g.half()
199
- else:
200
- net_g = net_g.float()
201
- vc = VC(tgt_sr, config)
202
- n_spk = cpt["config"][-3]
203
- # return {"visible": True,"maximum": n_spk, "__type__": "update"}
204
-
205
-
206
- get_vc(model_path)
207
- audios = os.listdir(input_path)
208
- for file in tq.tqdm(audios):
209
- if file.endswith(".wav"):
210
- file_path = input_path + "/" + file
211
- wav_opt = vc_single(
212
- 0, file_path, f0up_key, None, f0method, index_path, index_rate
213
- )
214
- out_path = opt_path + "/" + file
215
- wavfile.write(out_path, tgt_sr, wav_opt)