bcci commited on
Commit
04e4550
·
verified ·
1 Parent(s): a807e9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -153,7 +153,7 @@ def tokenizer(text: str):
153
  """
154
  phonemes_string, tokens = g2p(text)
155
  phonemes = [ph for ph in phonemes_string]
156
- print(phonemes_string)
157
  tokens = [phoneme_vocab[phoneme] for phoneme in phonemes if phoneme in phoneme_vocab]
158
  return tokens
159
 
@@ -197,7 +197,7 @@ def tts_streaming(text: str, voice: str = "af_heart", speed: float = 1.0, format
197
  if i == 0:
198
  tokens_to_send = [0] + chunk_tokens + [0]
199
  else:
200
- tokens_to_send = [prev_last_token] + [16] + chunk_tokens + [0]
201
  # token_to_send = [0] + chunk_tokens + [0]
202
 
203
  # Save the last token of this chunk for the next iteration.
@@ -205,7 +205,7 @@ def tts_streaming(text: str, voice: str = "af_heart", speed: float = 1.0, format
205
 
206
  # Prepare the model input (a batch of one sequence).
207
  final_token = [tokens_to_send]
208
- print(final_token)
209
 
210
  # Use the number of tokens to select the appropriate style vector.
211
  style_index = len(chunk_tokens) + 2
 
153
  """
154
  phonemes_string, tokens = g2p(text)
155
  phonemes = [ph for ph in phonemes_string]
156
+ print(text + " " + phonemes_string)
157
  tokens = [phoneme_vocab[phoneme] for phoneme in phonemes if phoneme in phoneme_vocab]
158
  return tokens
159
 
 
197
  if i == 0:
198
  tokens_to_send = [0] + chunk_tokens + [0]
199
  else:
200
+ tokens_to_send = [prev_last_token] + [16] + [0] + chunk_tokens + [0]
201
  # token_to_send = [0] + chunk_tokens + [0]
202
 
203
  # Save the last token of this chunk for the next iteration.
 
205
 
206
  # Prepare the model input (a batch of one sequence).
207
  final_token = [tokens_to_send]
208
+ # print(final_token)
209
 
210
  # Use the number of tokens to select the appropriate style vector.
211
  style_index = len(chunk_tokens) + 2