Hev832 commited on
Commit
b20156f
·
verified ·
1 Parent(s): c5989ff

Update rvc.py

Browse files
Files changed (1) hide show
  1. rvc.py +99 -32
rvc.py CHANGED
@@ -1,10 +1,61 @@
1
  from original import *
2
  import shutil, glob
 
3
  from easyfuncs import download_from_url, CachedModels
4
  os.makedirs("dataset",exist_ok=True)
5
  model_library = CachedModels()
6
 
7
- with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue="zinc")) as app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  with gr.Row():
9
  gr.HTML("<img src='file/a.png' alt='image'>")
10
  with gr.Tabs():
@@ -26,6 +77,21 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
26
  value=0
27
  )
28
  but0 = gr.Button(value="Convert", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  with gr.Row():
30
  with gr.Column():
31
  with gr.Row():
@@ -198,7 +264,7 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
198
  choices=["40k", "32k"],
199
  value="32k",
200
  interactive=True,
201
- visible=False
202
  )
203
  if_f0_3 = gr.Radio(
204
  label="Will your model be used for singing? If not, you can ignore this.",
@@ -212,14 +278,14 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
212
  choices=["v1", "v2"],
213
  value="v2",
214
  interactive=True,
215
- visible=False,
216
  )
217
  dataset_folder = gr.Textbox(
218
  label="dataset folder", value='dataset'
219
  )
220
  easy_uploader = gr.Files(label="Drop your audio files here",file_types=['audio'])
221
- but1 = gr.Button("1. Process", variant="primary")
222
- info1 = gr.Textbox(label="Information", value="",visible=True)
223
  easy_uploader.upload(inputs=[dataset_folder],outputs=[],fn=lambda folder:os.makedirs(folder,exist_ok=True))
224
  easy_uploader.upload(
225
  fn=lambda files,folder: [shutil.copy2(f.name,os.path.join(folder,os.path.split(f.name)[1])) for f in files] if folder != "" else gr.Warning('Please enter a folder name for your dataset'),
@@ -243,12 +309,11 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
243
  interactive=True,
244
  visible=False
245
  )
246
- but1.click(
247
- preprocess_dataset,
248
- [dataset_folder, training_name, sr2, np7],
249
- [info1],
250
- api_name="train_preprocess",
251
- )
252
  with gr.Column():
253
  f0method8 = gr.Radio(
254
  label="F0 extraction method",
@@ -262,27 +327,6 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
262
  interactive=True,
263
  visible=F0GPUVisible,
264
  )
265
- but2 = gr.Button("2. Extract Features", variant="primary")
266
- info2 = gr.Textbox(label="Information", value="", max_lines=8)
267
- f0method8.change(
268
- fn=change_f0_method,
269
- inputs=[f0method8],
270
- outputs=[gpus_rmvpe],
271
- )
272
- but2.click(
273
- extract_f0_feature,
274
- [
275
- gpus6,
276
- np7,
277
- f0method8,
278
- if_f0_3,
279
- training_name,
280
- version19,
281
- gpus_rmvpe,
282
- ],
283
- [info2],
284
- api_name="train_extract_f0_feature",
285
- )
286
  with gr.Column():
287
  total_epoch11 = gr.Slider(
288
  minimum=2,
@@ -292,6 +336,8 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
292
  value=150,
293
  interactive=True,
294
  )
 
 
295
  but4 = gr.Button("3. Train Index", variant="primary")
296
  but3 = gr.Button("4. Train Model", variant="primary")
297
  info3 = gr.Textbox(label="Information", value="", max_lines=10)
@@ -380,6 +426,26 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
380
  )
381
  with gr.Row():
382
  but5 = gr.Button("1 Click Training", variant="primary", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  but3.click(
384
  click_train,
385
  [
@@ -401,6 +467,7 @@ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue=
401
  info3,
402
  api_name="train_start",
403
  )
 
404
  but4.click(train_index, [training_name, version19], info3)
405
  but5.click(
406
  train1key,
 
1
  from original import *
2
  import shutil, glob
3
+ from infer_rvc_python import BaseLoader
4
  from easyfuncs import download_from_url, CachedModels
5
  os.makedirs("dataset",exist_ok=True)
6
  model_library = CachedModels()
7
 
8
+
9
+
10
+
11
+ # Initialize the converter
12
+ converter = BaseLoader(only_cpu=False, hubert_path=None, rmvpe_path=None)
13
+
14
+ import gradio as gr
15
+ import os
16
+ from infer_rvc_python import BaseLoader
17
+
18
+ # Initialize the converter
19
+ converter = BaseLoader(only_cpu=False, hubert_path=None, rmvpe_path=None)
20
+
21
+ def apply_conversion(audio_files, file_model, file_index, pitch_lvl, pitch_algo):
22
+ converter.apply_conf(
23
+ tag=file_model,
24
+ file_model=file_model,
25
+ pitch_algo=pitch_algo,
26
+ pitch_lvl=int(pitch_lvl), # pitch_lvl should be an integer
27
+ file_index=file_index,
28
+ index_influence=0.66,
29
+ respiration_median_filtering=3,
30
+ envelope_ratio=0.25,
31
+ consonant_breath_protection=0.33
32
+ )
33
+
34
+ speakers_list = [file_model] # It should be a list if multiple speakers are possible
35
+
36
+ result = converter(
37
+ audio_files,
38
+ speakers_list,
39
+ overwrite=False,
40
+ parallel_workers=4
41
+ )
42
+
43
+ output_path = "output_audio.wav"
44
+ # Assuming `result` is an array of audio data, save it to a file
45
+ result[0].export(output_path, format="wav") # This is an example, modify as needed for your data type
46
+
47
+ return output_path
48
+
49
+
50
+
51
+
52
+
53
+
54
+
55
+
56
+
57
+
58
+ with gr.Blocks(title="Easy 🔊 GUI",theme="Hev832/Applio") as app:
59
  with gr.Row():
60
  gr.HTML("<img src='file/a.png' alt='image'>")
61
  with gr.Tabs():
 
77
  value=0
78
  )
79
  but0 = gr.Button(value="Convert", variant="primary")
80
+ with gr.TabItem("Inference v2!"):
81
+ audio_files_input = gr.Audio(label="your audios")
82
+ file_model_input = gr.Dropdown(label="Model Voice", choices=sorted(names), value=lambda:sorted(names)[0] if len(sorted(names)) > 0 else '', interactive=True)
83
+ file_index_input = gr.Dropdown(label="Change Index",choices=sorted(index_paths),interactive=True,value=sorted(index_paths)[0] if len(sorted(index_paths)) > 0 else '')
84
+ pitch_lvl_input = gr.Number(label="Pitch",value=0)
85
+ pitch_algo_input = gr.Dropdown(["pm", "harvest", "crepe", "rmvpe", "rmvpe+"], label="Pitch Algorithm")
86
+ submit_button = gr.Button("Convert Audio")
87
+ output_Audio= gr.Audio(label="Conversion Result")
88
+ submit_button.click(
89
+ apply_conversion,
90
+ inputs=[audio_files_input, file_model_input, file_index_input, pitch_lvl_input, pitch_algo_input],
91
+ outputs=output_audio
92
+ )
93
+
94
+
95
  with gr.Row():
96
  with gr.Column():
97
  with gr.Row():
 
264
  choices=["40k", "32k"],
265
  value="32k",
266
  interactive=True,
267
+ visible=true
268
  )
269
  if_f0_3 = gr.Radio(
270
  label="Will your model be used for singing? If not, you can ignore this.",
 
278
  choices=["v1", "v2"],
279
  value="v2",
280
  interactive=True,
281
+ visible=False, # this is default
282
  )
283
  dataset_folder = gr.Textbox(
284
  label="dataset folder", value='dataset'
285
  )
286
  easy_uploader = gr.Files(label="Drop your audio files here",file_types=['audio'])
287
+
288
+ #info1 = gr.Textbox(label="Information", value="",visible=True)
289
  easy_uploader.upload(inputs=[dataset_folder],outputs=[],fn=lambda folder:os.makedirs(folder,exist_ok=True))
290
  easy_uploader.upload(
291
  fn=lambda files,folder: [shutil.copy2(f.name,os.path.join(folder,os.path.split(f.name)[1])) for f in files] if folder != "" else gr.Warning('Please enter a folder name for your dataset'),
 
309
  interactive=True,
310
  visible=False
311
  )
312
+ f0method8.change(
313
+ fn=change_f0_method,
314
+ inputs=[f0method8],
315
+ outputs=[gpus_rmvpe],
316
+ )
 
317
  with gr.Column():
318
  f0method8 = gr.Radio(
319
  label="F0 extraction method",
 
327
  interactive=True,
328
  visible=F0GPUVisible,
329
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  with gr.Column():
331
  total_epoch11 = gr.Slider(
332
  minimum=2,
 
336
  value=150,
337
  interactive=True,
338
  )
339
+ but1 = gr.Button("1. Process", variant="primary")
340
+ but2 = gr.Button("2. Extract Features", variant="primary")
341
  but4 = gr.Button("3. Train Index", variant="primary")
342
  but3 = gr.Button("4. Train Model", variant="primary")
343
  info3 = gr.Textbox(label="Information", value="", max_lines=10)
 
426
  )
427
  with gr.Row():
428
  but5 = gr.Button("1 Click Training", variant="primary", visible=False)
429
+ but1.click(
430
+ preprocess_dataset,
431
+ [dataset_folder, training_name, sr2, np7],
432
+ [info3],
433
+ api_name="train_preprocess",
434
+ )
435
+ but2.click(
436
+ extract_f0_feature,
437
+ [
438
+ gpus6,
439
+ np7,
440
+ f0method8,
441
+ if_f0_3,
442
+ training_name,
443
+ version19,
444
+ gpus_rmvpe,
445
+ ],
446
+ info3,
447
+ api_name="train_extract_f0_feature",
448
+ )
449
  but3.click(
450
  click_train,
451
  [
 
467
  info3,
468
  api_name="train_start",
469
  )
470
+
471
  but4.click(train_index, [training_name, version19], info3)
472
  but5.click(
473
  train1key,