naveenk-ai commited on
Commit
edd5fdd
·
verified ·
1 Parent(s): 738d339

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -42
app.py CHANGED
@@ -6,65 +6,94 @@ import langid
6
  from openvoice.api import BaseSpeakerTTS, ToneColorConverter
7
  import openvoice.se_extractor as se_extractor
8
 
9
- # Constants
10
- CKPT_BASE_PATH = "checkpoints"
11
  EN_SUFFIX = f"{CKPT_BASE_PATH}/base_speakers/EN"
12
  CONVERTER_SUFFIX = f"{CKPT_BASE_PATH}/converter"
13
- OUTPUT_DIR = "outputs/"
 
 
 
14
  os.makedirs(OUTPUT_DIR, exist_ok=True)
15
 
16
- # Download necessary files
17
- def download_from_hf_hub(filename, local_dir="./"):
18
- os.makedirs(local_dir, exist_ok=True)
19
- hf_hub_download(repo_id="myshell-ai/OpenVoice", filename=filename, local_dir=local_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- for file in [f"{CONVERTER_SUFFIX}/checkpoint.pth", f"{CONVERTER_SUFFIX}/config.json",
22
- f"{EN_SUFFIX}/checkpoint.pth", f"{EN_SUFFIX}/config.json",
23
- f"{EN_SUFFIX}/en_default_se.pth", f"{EN_SUFFIX}/en_style_se.pth"]:
24
- download_from_hf_hub(file)
25
 
26
- # Initialize models
27
- pt_device = "cpu"
28
- en_base_speaker_tts = BaseSpeakerTTS(f"{EN_SUFFIX}/config.json", device=pt_device)
29
- en_base_speaker_tts.load_ckpt(f"{EN_SUFFIX}/checkpoint.pth")
 
30
 
31
- tone_color_converter = ToneColorConverter(f"{CONVERTER_SUFFIX}/config.json", device=pt_device)
32
- tone_color_converter.load_ckpt(f"{CONVERTER_SUFFIX}/checkpoint.pth")
33
 
34
- en_source_default_se = torch.load(f"{EN_SUFFIX}/en_default_se.pth")
35
- en_source_style_se = torch.load(f"{EN_SUFFIX}/en_style_se.pth")
 
 
 
36
 
37
- # Main prediction function
38
  def predict(prompt, style, audio_file_pth, tau):
39
  if len(prompt) < 2 or len(prompt) > 200:
40
  return "Text should be between 2 and 200 characters.", None
41
 
42
  try:
43
- target_se, _ = se_extractor.get_se(audio_file_pth, tone_color_converter, target_dir=OUTPUT_DIR, vad=True)
 
 
 
 
 
44
  except Exception as e:
45
- return f"Error getting target tone color: {str(e)}", None
46
-
47
- src_path = f"{OUTPUT_DIR}/tmp.wav"
48
- en_base_speaker_tts.tts(prompt, src_path, speaker=style, language="English")
49
 
50
- save_path = f"{OUTPUT_DIR}/output.wav"
51
- tone_color_converter.convert(
52
- audio_src_path=src_path,
53
- src_se=en_source_style_se if style != "default" else en_source_default_se,
54
- tgt_se=target_se,
55
- output_path=save_path,
56
- tau=tau
57
- )
 
 
 
 
58
 
59
- return "Voice cloning completed successfully.", save_path
 
 
60
 
61
- # Gradio interface
62
  def create_demo():
63
  with gr.Blocks() as demo:
64
- gr.Markdown("# OpenVoice: Instant Voice Cloning with fine-tuning")
65
 
66
  with gr.Row():
67
- input_text = gr.Textbox(label="Text to speak", placeholder="Enter text here (2-200 characters)")
68
  style = gr.Dropdown(
69
  label="Style",
70
  choices=["default", "whispering", "cheerful", "terrified", "angry", "sad", "friendly"],
@@ -73,7 +102,13 @@ def create_demo():
73
 
74
  with gr.Row():
75
  reference_audio = gr.Audio(label="Reference Audio", type="filepath")
76
- tau_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Tau (Voice similarity)", info="Higher values make the output more similar to the reference voice")
 
 
 
 
 
 
77
 
78
  submit_button = gr.Button("Generate Voice")
79
 
@@ -88,7 +123,5 @@ def create_demo():
88
 
89
  return demo
90
 
91
- # Launch the demo
92
- if __name__ == "__main__":
93
- demo = create_demo()
94
- demo.launch()
 
6
  from openvoice.api import BaseSpeakerTTS, ToneColorConverter
7
  import openvoice.se_extractor as se_extractor
8
 
9
+ # Use environment variables or predefined paths
10
+ CKPT_BASE_PATH = os.getenv('CHECKPOINT_PATH', './checkpoints')
11
  EN_SUFFIX = f"{CKPT_BASE_PATH}/base_speakers/EN"
12
  CONVERTER_SUFFIX = f"{CKPT_BASE_PATH}/converter"
13
+ OUTPUT_DIR = "./outputs"
14
+
15
+ # Ensure directories exist
16
+ os.makedirs(CKPT_BASE_PATH, exist_ok=True)
17
  os.makedirs(OUTPUT_DIR, exist_ok=True)
18
 
19
+ def download_files():
20
+ """Centralized file download method with error handling"""
21
+ files_to_download = [
22
+ (f"{CONVERTER_SUFFIX}/checkpoint.pth", "converter/checkpoint.pth"),
23
+ (f"{CONVERTER_SUFFIX}/config.json", "converter/config.json"),
24
+ (f"{EN_SUFFIX}/checkpoint.pth", "base_speakers/EN/checkpoint.pth"),
25
+ (f"{EN_SUFFIX}/config.json", "base_speakers/EN/config.json"),
26
+ (f"{EN_SUFFIX}/en_default_se.pth", "base_speakers/EN/en_default_se.pth"),
27
+ (f"{EN_SUFFIX}/en_style_se.pth", "base_speakers/EN/en_style_se.pth")
28
+ ]
29
+
30
+ for local_path, remote_path in files_to_download:
31
+ try:
32
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
33
+ hf_hub_download(
34
+ repo_id="myshell-ai/OpenVoice",
35
+ filename=remote_path,
36
+ local_dir=CKPT_BASE_PATH
37
+ )
38
+ except Exception as e:
39
+ print(f"Error downloading {remote_path}: {e}")
40
+ raise
41
 
42
+ # Download files early
43
+ download_files()
 
 
44
 
45
+ # Model Initialization with Error Handling
46
+ try:
47
+ pt_device = "cpu" # Explicitly use CPU for Hugging Face deployment
48
+ en_base_speaker_tts = BaseSpeakerTTS(f"{EN_SUFFIX}/config.json", device=pt_device)
49
+ en_base_speaker_tts.load_ckpt(f"{EN_SUFFIX}/checkpoint.pth")
50
 
51
+ tone_color_converter = ToneColorConverter(f"{CONVERTER_SUFFIX}/config.json", device=pt_device)
52
+ tone_color_converter.load_ckpt(f"{CONVERTER_SUFFIX}/checkpoint.pth")
53
 
54
+ en_source_default_se = torch.load(f"{EN_SUFFIX}/en_default_se.pth")
55
+ en_source_style_se = torch.load(f"{EN_SUFFIX}/en_style_se.pth")
56
+ except Exception as model_init_error:
57
+ print(f"Model initialization error: {model_init_error}")
58
+ raise
59
 
 
60
  def predict(prompt, style, audio_file_pth, tau):
61
  if len(prompt) < 2 or len(prompt) > 200:
62
  return "Text should be between 2 and 200 characters.", None
63
 
64
  try:
65
+ target_se, _ = se_extractor.get_se(
66
+ audio_file_pth,
67
+ tone_color_converter,
68
+ target_dir=OUTPUT_DIR,
69
+ vad=True
70
+ )
71
  except Exception as e:
72
+ return f"Error extracting tone: {str(e)}", None
 
 
 
73
 
74
+ try:
75
+ src_path = f"{OUTPUT_DIR}/tmp.wav"
76
+ en_base_speaker_tts.tts(prompt, src_path, speaker=style, language="English")
77
+
78
+ save_path = f"{OUTPUT_DIR}/output.wav"
79
+ tone_color_converter.convert(
80
+ audio_src_path=src_path,
81
+ src_se=en_source_style_se if style != "default" else en_source_default_se,
82
+ tgt_se=target_se,
83
+ output_path=save_path,
84
+ tau=tau
85
+ )
86
 
87
+ return "Voice cloning completed successfully.", save_path
88
+ except Exception as conversion_error:
89
+ return f"Voice conversion error: {conversion_error}", None
90
 
 
91
  def create_demo():
92
  with gr.Blocks() as demo:
93
+ gr.Markdown("# OpenVoice: Instant Voice Cloning")
94
 
95
  with gr.Row():
96
+ input_text = gr.Textbox(label="Text to speak", placeholder="Enter text (2-200 chars)")
97
  style = gr.Dropdown(
98
  label="Style",
99
  choices=["default", "whispering", "cheerful", "terrified", "angry", "sad", "friendly"],
 
102
 
103
  with gr.Row():
104
  reference_audio = gr.Audio(label="Reference Audio", type="filepath")
105
+ tau_slider = gr.Slider(
106
+ minimum=0.1,
107
+ maximum=1.0,
108
+ value=0.7,
109
+ label="Voice Similarity",
110
+ info="Higher values = more similar to reference"
111
+ )
112
 
113
  submit_button = gr.Button("Generate Voice")
114
 
 
123
 
124
  return demo
125
 
126
+ # Hugging Face Space compatibility
127
+ demo = create_demo()