hivecorp commited on
Commit
24be842
·
verified ·
1 Parent(s): 022a24f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -5,7 +5,8 @@ import os
5
  import random
6
  import torch
7
 
8
- CHAR_LIMIT = None
 
9
 
10
  CUDA_AVAILABLE = torch.cuda.is_available()
11
  models = {gpu: KModel().to('cuda' if gpu else 'cpu').eval() for gpu in [False] + ([True] if CUDA_AVAILABLE else [])}
@@ -18,7 +19,7 @@ def forward_gpu(ps, ref_s, speed):
18
  return models[True](ps, ref_s, speed)
19
 
20
  def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
21
- text = text
22
  pipeline = pipelines[voice[0]]
23
  pack = pipeline.load_voice(voice)
24
  use_gpu = use_gpu and CUDA_AVAILABLE
@@ -49,7 +50,7 @@ def tokenize_first(text, voice='af_heart'):
49
  return words # Return a list of words
50
 
51
  def generate_all(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
52
- text = text
53
  pipeline = pipelines[voice[0]]
54
  pack = pipeline.load_voice(voice)
55
  use_gpu = use_gpu and CUDA_AVAILABLE
@@ -189,7 +190,7 @@ with gr.Blocks() as app:
189
  with gr.Column():
190
  gr.TabbedInterface([generate_tab], ['Generate'])
191
  random_btn.click(fn=get_random_text, inputs=[voice], outputs=[text], api_name=API_NAME)
192
- generate_btn.click(fn=generate_all, inputs=[text, voice, speed, use_gpu], outputs=[out_audio, out_ps], api_name=API_NAME)
193
  tokenize_btn.click(fn=tokenize_first, inputs=[text, voice], outputs=[out_ps], api_name=API_NAME)
194
  predict_btn.click(fn=predict, inputs=[text, voice, speed], outputs=[out_audio], api_name=API_NAME)
195
 
 
5
  import random
6
  import torch
7
 
8
+ IS_DUPLICATE = not os.getenv('SPACE_ID', 'hivecorp/keets2').startswith('hexgrad/')
9
+ CHAR_LIMIT = None if IS_DUPLICATE else 5000
10
 
11
  CUDA_AVAILABLE = torch.cuda.is_available()
12
  models = {gpu: KModel().to('cuda' if gpu else 'cpu').eval() for gpu in [False] + ([True] if CUDA_AVAILABLE else [])}
 
19
  return models[True](ps, ref_s, speed)
20
 
21
  def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
22
+ text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
23
  pipeline = pipelines[voice[0]]
24
  pack = pipeline.load_voice(voice)
25
  use_gpu = use_gpu and CUDA_AVAILABLE
 
50
  return words # Return a list of words
51
 
52
  def generate_all(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
53
+ text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
54
  pipeline = pipelines[voice[0]]
55
  pack = pipeline.load_voice(voice)
56
  use_gpu = use_gpu and CUDA_AVAILABLE
 
190
  with gr.Column():
191
  gr.TabbedInterface([generate_tab], ['Generate'])
192
  random_btn.click(fn=get_random_text, inputs=[voice], outputs=[text], api_name=API_NAME)
193
+ generate_btn.click(fn=generate_first, inputs=[text, voice, speed, use_gpu], outputs=[out_audio, out_ps], api_name=API_NAME)
194
  tokenize_btn.click(fn=tokenize_first, inputs=[text, voice], outputs=[out_ps], api_name=API_NAME)
195
  predict_btn.click(fn=predict, inputs=[text, voice, speed], outputs=[out_audio], api_name=API_NAME)
196