ecker commited on
Commit
26f8f70
·
verified ·
1 Parent(s): b2b56d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -20
app.py CHANGED
@@ -87,21 +87,49 @@ def gradio_wrapper(inputs):
87
  return decorated
88
 
89
  # returns a list of models, assuming the models are placed under ./training/ or ./models/ or ./data/models/
90
- def get_model_paths( paths=[Path("./training/"), Path("./models/"), Path("./data/models/")] ):
91
  configs = []
92
 
93
  for path in paths:
 
 
 
94
  if not path.exists():
95
  continue
96
 
97
  for yaml in path.glob("**/*.yaml"):
98
  if "/logs/" in str(yaml):
99
  continue
 
 
100
  configs.append( yaml )
101
 
102
  for sft in path.glob("**/*.sft"):
103
  if "/logs/" in str(sft):
104
  continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  configs.append( sft )
106
 
107
  configs = [ str(p) for p in configs ]
@@ -115,10 +143,10 @@ def get_attentions():
115
  return AVAILABLE_ATTENTIONS + ["auto"]
116
 
117
  #@gradio_wrapper(inputs=layout["settings"]["inputs"].keys())
118
- def load_model( config, device, dtype, attention ):
119
  gr.Info(f"Loading: {config}")
120
  try:
121
- init_tts( config=Path(config), restart=True, device=device, dtype=dtype, attention=attention )
122
  except Exception as e:
123
  raise gr.Error(e)
124
  gr.Info(f"Loaded model")
@@ -130,7 +158,7 @@ def get_languages():
130
  return list(get_lang_symmap().keys()) + ["auto"]
131
 
132
  def get_tasks():
133
- return ["tts", "sr", "nr", "vc"]
134
 
135
  #@gradio_wrapper(inputs=layout["dataset"]["inputs"].keys())
136
  def load_sample( speaker ):
@@ -219,18 +247,20 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
219
  parser.add_argument("--voice-convert", type=str, default=kwargs["voice-convert"])
220
  parser.add_argument("--language", type=str, default=kwargs["language"])
221
  parser.add_argument("--text-language", type=str, default=kwargs["text-language"])
 
 
222
  parser.add_argument("--split-text-by", type=str, default=kwargs["split-text-by"])
223
  parser.add_argument("--context-history", type=int, default=kwargs["context-history"])
224
  parser.add_argument("--input-prompt-length", type=float, default=kwargs["input-prompt-length"])
225
- parser.add_argument("--input-prompt-prefix", action='store_true', default=kwargs["input-prompt-prefix"])
226
  parser.add_argument("--max-duration", type=int, default=int(kwargs["max-duration"]*cfg.dataset.frames_per_second))
227
- parser.add_argument("--max-levels", type=int, default=kwargs["max-levels"])
228
  parser.add_argument("--max-steps", type=int, default=kwargs["max-steps"])
229
  parser.add_argument("--ar-temperature", type=float, default=kwargs["ar-temperature"])
230
  parser.add_argument("--nar-temperature", type=float, default=kwargs["nar-temperature"])
231
  parser.add_argument("--min-ar-temperature", type=float, default=kwargs["min-ar-temperature"])
232
  parser.add_argument("--min-nar-temperature", type=float, default=kwargs["min-nar-temperature"])
233
- parser.add_argument("--prefix-silence", type=float, default=kwargs["prefix-silence"])
234
  parser.add_argument("--top-p", type=float, default=kwargs["top-p"])
235
  parser.add_argument("--top-k", type=int, default=kwargs["top-k"])
236
  parser.add_argument("--top-no", type=float, default=kwargs["top-no"])
@@ -238,6 +268,7 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
238
  parser.add_argument("--repetition-penalty", type=float, default=kwargs["repetition-penalty"])
239
  parser.add_argument("--repetition-penalty-decay", type=float, default=kwargs["repetition-penalty-decay"])
240
  parser.add_argument("--length-penalty", type=float, default=kwargs["length-penalty"])
 
241
  parser.add_argument("--beam-width", type=int, default=kwargs["beam-width"])
242
  parser.add_argument("--mirostat-tau", type=float, default=kwargs["mirostat-tau"])
243
  parser.add_argument("--mirostat-eta", type=float, default=kwargs["mirostat-eta"])
@@ -249,10 +280,16 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
249
  parser.add_argument("--layer-skip-exit-layer", type=int, default=kwargs["layer-skip-exit-layer"])
250
  parser.add_argument("--layer-skip-entropy-threshold", type=int, default=kwargs["layer-skip-entropy-threshold"])
251
  parser.add_argument("--layer-skip-varentropy-threshold", type=int, default=kwargs["layer-skip-varentropy-threshold"])
 
252
  parser.add_argument("--refine-on-stop", action="store_true")
253
  parser.add_argument("--denoise-start", type=float, default=0.0)
254
  parser.add_argument("--cfg-strength", type=float, default=kwargs['cfg-strength'])
255
  parser.add_argument("--cfg-rescale", type=float, default=kwargs['cfg-rescale'])
 
 
 
 
 
256
  args, unknown = parser.parse_known_args()
257
 
258
  if is_windows:
@@ -274,6 +311,21 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
274
  if kwargs.pop("refine-on-stop", False):
275
  args.refine_on_stop = True
276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  if args.split_text_by == "lines":
278
  args.split_text_by = "\n"
279
  elif args.split_text_by == "none":
@@ -289,30 +341,35 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
289
  sampling_kwargs = dict(
290
  split_text_by=args.split_text_by,
291
  context_history=args.context_history,
 
292
  voice_convert=args.voice_convert,
293
  max_steps=args.max_steps,
294
- max_levels=args.max_levels,
295
  max_duration=args.max_duration,
296
  ar_temperature=args.ar_temperature, nar_temperature=args.nar_temperature,
297
  min_ar_temperature=args.min_ar_temperature, min_nar_temperature=args.min_nar_temperature,
298
  top_p=args.top_p, top_k=args.top_k, min_p=args.min_p, top_no=args.top_no,
299
  repetition_penalty=args.repetition_penalty, repetition_penalty_decay=args.repetition_penalty_decay,
300
  length_penalty=args.length_penalty,
301
- beam_width=args.beam_width,
302
- mirostat_tau=args.mirostat_tau, mirostat_eta=args.mirostat_eta,
303
- dry_multiplier=args.dry_multiplier, dry_base=args.dry_base, dry_allowed_length=args.dry_allowed_length,
304
- entropix_sampling=args.entropix_sampling,
305
- layer_skip=args.layer_skip,
306
- layer_skip_exit_layer=args.layer_skip_exit_layer,
307
- layer_skip_entropy_threshold=args.layer_skip_entropy_threshold,
308
- layer_skip_varentropy_threshold=args.layer_skip_varentropy_threshold,
309
- refine_on_stop=args.refine_on_stop,
310
  denoise_start=args.denoise_start,
311
- prefix_silence=args.prefix_silence,
312
- input_prompt_prefix=args.input_prompt_prefix,
313
  input_prompt_length=args.input_prompt_length,
314
  cfg_strength=args.cfg_strength,
315
  cfg_rescale=args.cfg_rescale,
 
 
 
 
316
  )
317
 
318
  with timer("Inferenced in", callback=lambda msg: gr.Info( msg )) as t:
@@ -321,6 +378,7 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
321
  language=args.language,
322
  text_language=args.text_language,
323
  task=args.task,
 
324
  modality=args.modality.lower(),
325
  references=args.references.split(";") if args.references is not None else [],
326
  **sampling_kwargs,
@@ -416,6 +474,7 @@ def do_training( progress=gr.Progress(track_tqdm=True), *args, **kwargs ):
416
  parser = argparse.ArgumentParser(allow_abbrev=False)
417
  parser.add_argument("--yaml", type=Path, default=os.environ.get('VALLE_YAML', None)) # os environ so it can be specified in a HuggingFace Space too
418
  parser.add_argument("--model", type=Path, default=os.environ.get('VALLE_MODEL', None)) # os environ so it can be specified in a HuggingFace Space too
 
419
  parser.add_argument("--listen", default=None, help="Path for Gradio to listen on")
420
  parser.add_argument("--share", action="store_true")
421
  parser.add_argument("--render_markdown", action="store_true", default="VALLE_YAML" in os.environ)
@@ -469,6 +528,9 @@ with ui:
469
  with gr.Row():
470
  layout["inference_tts"]["inputs"]["split-text-by"] = gr.Dropdown(choices=["sentences", "lines"], label="Text Delimiter", info="How to split the text into utterances.", value="sentences")
471
  layout["inference_tts"]["inputs"]["context-history"] = gr.Slider(value=0, minimum=0, maximum=4, step=1, label="(Rolling) Context History", info="How many prior lines to serve as the context/prefix (0 to disable).")
 
 
 
472
  with gr.Tab("Sampler Settings"):
473
  with gr.Row():
474
  layout["inference_tts"]["inputs"]["ar-temperature"] = gr.Slider(value=1.0, minimum=0.0, maximum=1.5, step=0.05, label="Temperature (AR/NAR-len)", info="Adjusts the probabilities in the AR/NAR-len. (0 to greedy* sample)")
@@ -486,7 +548,12 @@ with ui:
486
  layout["inference_tts"]["inputs"]["repetition-penalty"] = gr.Slider(value=1.0, minimum=0.0, maximum=5.0, step=0.05, label="Repetition Penalty", info="Incurs a penalty to tokens based on how often they appear in a sequence.")
487
  layout["inference_tts"]["inputs"]["repetition-penalty-decay"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Repetition Penalty Length Decay", info="Modifies the reptition penalty based on how far back in time the token appeared in the sequence.")
488
  layout["inference_tts"]["inputs"]["length-penalty"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Length Penalty", info="(AR only) Modifies the probability of a stop token based on the current length of the sequence.")
 
 
 
 
489
  # These settings are pretty much not supported anyways
 
490
  with gr.Tab("Experimental Settings", visible=cfg.experimental):
491
  with gr.Row():
492
  layout["inference_tts"]["inputs"]["max-levels"] = gr.Slider(value=7, minimum=0, maximum=7, step=1, label="Max NAR Levels", info="Limits how many steps to perform in the NAR pass.")
@@ -509,6 +576,7 @@ with ui:
509
  layout["inference_tts"]["inputs"]["layer-skip-exit-layer"] = gr.Slider(value=11, minimum=0, maximum=11, step=1, label="Layer Skip Exit Layer", info="Maximum model layer to exit early from.")
510
  layout["inference_tts"]["inputs"]["layer-skip-entropy-threshold"] = gr.Slider(value=0.1, minimum=0, maximum=1.0, step=0.01, label="Layer Skip Entropy Threshold", info="Entropy threshold for early-exit")
511
  layout["inference_tts"]["inputs"]["layer-skip-varentropy-threshold"] = gr.Slider(value=0.1, minimum=0, maximum=1.0, step=0.01, label="Layer Skip Varentropy Threshold", info="Varentropy threshold for early-exit")
 
512
 
513
  layout["inference_tts"]["buttons"]["inference"].click(
514
  fn=do_inference_tts,
@@ -554,6 +622,7 @@ with ui:
554
  layout["inference_stt"]["inputs"]["repetition-penalty"] = gr.Slider(value=1.0, minimum=-2.0, maximum=2.0, step=0.05, label="Repetition Penalty", info="Incurs a penalty to tokens based on how often they appear in a sequence.")
555
  layout["inference_stt"]["inputs"]["repetition-penalty-decay"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Repetition Penalty Length Decay", info="Modifies the reptition penalty based on how far back in time the token appeared in the sequence.")
556
  layout["inference_stt"]["inputs"]["length-penalty"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Length Penalty", info="(AR only) Modifies the probability of a stop token based on the current length of the sequence.")
 
557
  with gr.Row():
558
  layout["inference_stt"]["inputs"]["dynamic-sampling"] = gr.Checkbox(label="Dynamic Temperature", info="Dynamically adjusts the temperature based on the highest confident predicted token per sampling step.")
559
  layout["inference_stt"]["inputs"]["mirostat-tau"] = gr.Slider(value=0.0, minimum=0.0, maximum=8.0, step=0.05, label="Mirostat τ (Tau)", info="The \"surprise\" value when performing mirostat sampling. 0 to disable.")
@@ -562,6 +631,7 @@ with ui:
562
  layout["inference_stt"]["inputs"]["dry-multiplier"] = gr.Slider(value=0.0, minimum=0.0, maximum=8.0, step=0.05, label="DRY Multiplier", info="The multiplying factor for the DRY score penalty (0 to disable DRY sampling).")
563
  layout["inference_stt"]["inputs"]["dry-base"] = gr.Slider(value=1.75, minimum=0.0, maximum=8.0, step=0.05, label="DRY Base", info="The base of the exponent in the DRY score penalty")
564
  layout["inference_stt"]["inputs"]["dry-allowed-length"] = gr.Slider(value=2, minimum=0, maximum=75, step=1, label="Allowed Length", info="The maximimum length a token can be to perform DRY penalty with.")
 
565
 
566
  layout["inference_stt"]["buttons"]["inference"].click(
567
  fn=do_inference_stt,
@@ -609,8 +679,9 @@ with ui:
609
  with gr.Column(scale=7):
610
  with gr.Row():
611
  layout["settings"]["inputs"]["models"] = gr.Dropdown(choices=get_model_paths(), value=args.yaml or args.model, label="Model", info="Model to load. Can load from a config YAML or the weights itself.")
612
- layout["settings"]["inputs"]["device"] = gr.Dropdown(choices=get_devices(), value="cuda:0", label="Device", info="Device to load the weights onto.")
613
  with gr.Row():
 
614
  layout["settings"]["inputs"]["dtype"] = gr.Dropdown(choices=get_dtypes(), value="auto", label="Precision", info="Tensor type to load the model under.")
615
  layout["settings"]["inputs"]["attentions"] = gr.Dropdown(choices=get_attentions(), value="auto", label="Attentions", info="Attention mechanism to utilize.")
616
 
 
87
  return decorated
88
 
89
  # returns a list of models, assuming the models are placed under ./training/ or ./models/ or ./data/models/
90
+ def get_model_paths(paths=["./training/", "./models/", "./data/models/"] ):
91
  configs = []
92
 
93
  for path in paths:
94
+ if not isinstance( path, Path ):
95
+ path = Path(path)
96
+
97
  if not path.exists():
98
  continue
99
 
100
  for yaml in path.glob("**/*.yaml"):
101
  if "/logs/" in str(yaml):
102
  continue
103
+ if "lora" in str(yaml):
104
+ continue
105
  configs.append( yaml )
106
 
107
  for sft in path.glob("**/*.sft"):
108
  if "/logs/" in str(sft):
109
  continue
110
+ if "lora" in str(sft):
111
+ continue
112
+ configs.append( sft )
113
+
114
+ configs = [ str(p) for p in configs ]
115
+
116
+ return configs
117
+
118
+ def get_lora_paths(paths=["./training/", "./models/", "./data/models/"] ):
119
+ configs = []
120
+
121
+ for path in paths:
122
+ if not isinstance( path, Path ):
123
+ path = Path(path)
124
+
125
+ if not path.exists():
126
+ continue
127
+
128
+ for sft in path.glob("**/*.sft"):
129
+ if "/logs/" in str(sft):
130
+ continue
131
+ if "lora" not in str(sft):
132
+ continue
133
  configs.append( sft )
134
 
135
  configs = [ str(p) for p in configs ]
 
143
  return AVAILABLE_ATTENTIONS + ["auto"]
144
 
145
  #@gradio_wrapper(inputs=layout["settings"]["inputs"].keys())
146
+ def load_model( config, lora, device, dtype, attention ):
147
  gr.Info(f"Loading: {config}")
148
  try:
149
+ init_tts( config=Path(config), lora=Path(lora) if lora is not None else None, restart=True, device=device, dtype=dtype, attention=attention )
150
  except Exception as e:
151
  raise gr.Error(e)
152
  gr.Info(f"Loaded model")
 
158
  return list(get_lang_symmap().keys()) + ["auto"]
159
 
160
  def get_tasks():
161
+ return ["tts", "sr", "ns", "vc"]
162
 
163
  #@gradio_wrapper(inputs=layout["dataset"]["inputs"].keys())
164
  def load_sample( speaker ):
 
247
  parser.add_argument("--voice-convert", type=str, default=kwargs["voice-convert"])
248
  parser.add_argument("--language", type=str, default=kwargs["language"])
249
  parser.add_argument("--text-language", type=str, default=kwargs["text-language"])
250
+ parser.add_argument("--no-phonemize", action="store_true")
251
+ parser.add_argument("--play", action="store_true")
252
  parser.add_argument("--split-text-by", type=str, default=kwargs["split-text-by"])
253
  parser.add_argument("--context-history", type=int, default=kwargs["context-history"])
254
  parser.add_argument("--input-prompt-length", type=float, default=kwargs["input-prompt-length"])
255
+ #parser.add_argument("--input-prompt-prefix", action='store_true', default=kwargs["input-prompt-prefix"])
256
  parser.add_argument("--max-duration", type=int, default=int(kwargs["max-duration"]*cfg.dataset.frames_per_second))
257
+ #parser.add_argument("--max-levels", type=int, default=kwargs["max-levels"])
258
  parser.add_argument("--max-steps", type=int, default=kwargs["max-steps"])
259
  parser.add_argument("--ar-temperature", type=float, default=kwargs["ar-temperature"])
260
  parser.add_argument("--nar-temperature", type=float, default=kwargs["nar-temperature"])
261
  parser.add_argument("--min-ar-temperature", type=float, default=kwargs["min-ar-temperature"])
262
  parser.add_argument("--min-nar-temperature", type=float, default=kwargs["min-nar-temperature"])
263
+ #parser.add_argument("--prefix-silence", type=float, default=kwargs["prefix-silence"])
264
  parser.add_argument("--top-p", type=float, default=kwargs["top-p"])
265
  parser.add_argument("--top-k", type=int, default=kwargs["top-k"])
266
  parser.add_argument("--top-no", type=float, default=kwargs["top-no"])
 
268
  parser.add_argument("--repetition-penalty", type=float, default=kwargs["repetition-penalty"])
269
  parser.add_argument("--repetition-penalty-decay", type=float, default=kwargs["repetition-penalty-decay"])
270
  parser.add_argument("--length-penalty", type=float, default=kwargs["length-penalty"])
271
+ """
272
  parser.add_argument("--beam-width", type=int, default=kwargs["beam-width"])
273
  parser.add_argument("--mirostat-tau", type=float, default=kwargs["mirostat-tau"])
274
  parser.add_argument("--mirostat-eta", type=float, default=kwargs["mirostat-eta"])
 
280
  parser.add_argument("--layer-skip-exit-layer", type=int, default=kwargs["layer-skip-exit-layer"])
281
  parser.add_argument("--layer-skip-entropy-threshold", type=int, default=kwargs["layer-skip-entropy-threshold"])
282
  parser.add_argument("--layer-skip-varentropy-threshold", type=int, default=kwargs["layer-skip-varentropy-threshold"])
283
+ """
284
  parser.add_argument("--refine-on-stop", action="store_true")
285
  parser.add_argument("--denoise-start", type=float, default=0.0)
286
  parser.add_argument("--cfg-strength", type=float, default=kwargs['cfg-strength'])
287
  parser.add_argument("--cfg-rescale", type=float, default=kwargs['cfg-rescale'])
288
+
289
+ parser.add_argument("--sampling-scores-masked-only", action="store_true")
290
+ parser.add_argument("--sampling-scores-flatten", action="store_true")
291
+ parser.add_argument("--sampling-scores-remask", action="store_true")
292
+
293
  args, unknown = parser.parse_known_args()
294
 
295
  if is_windows:
 
311
  if kwargs.pop("refine-on-stop", False):
312
  args.refine_on_stop = True
313
 
314
+ if kwargs.pop("no-phonemize", False):
315
+ args.no_phonemize = True
316
+
317
+ if kwargs.pop("play", False):
318
+ args.play = True
319
+
320
+ if kwargs.pop("sampling-scores-masked-only", False):
321
+ args.sampling_scores_masked_only = True
322
+
323
+ if kwargs.pop("sampling-scores-flatten", False):
324
+ args.sampling_scores_flatten = True
325
+
326
+ if kwargs.pop("sampling-scores-remask", False):
327
+ args.sampling_scores_remask = True
328
+
329
  if args.split_text_by == "lines":
330
  args.split_text_by = "\n"
331
  elif args.split_text_by == "none":
 
341
  sampling_kwargs = dict(
342
  split_text_by=args.split_text_by,
343
  context_history=args.context_history,
344
+ phonemize=not args.no_phonemize,
345
  voice_convert=args.voice_convert,
346
  max_steps=args.max_steps,
347
+ #max_levels=args.max_levels,
348
  max_duration=args.max_duration,
349
  ar_temperature=args.ar_temperature, nar_temperature=args.nar_temperature,
350
  min_ar_temperature=args.min_ar_temperature, min_nar_temperature=args.min_nar_temperature,
351
  top_p=args.top_p, top_k=args.top_k, min_p=args.min_p, top_no=args.top_no,
352
  repetition_penalty=args.repetition_penalty, repetition_penalty_decay=args.repetition_penalty_decay,
353
  length_penalty=args.length_penalty,
354
+ #beam_width=args.beam_width,
355
+ #mirostat_tau=args.mirostat_tau, mirostat_eta=args.mirostat_eta,
356
+ #dry_multiplier=args.dry_multiplier, dry_base=args.dry_base, dry_allowed_length=args.dry_allowed_length,
357
+ #entropix_sampling=args.entropix_sampling,
358
+ #layer_skip=args.layer_skip,
359
+ #layer_skip_exit_layer=args.layer_skip_exit_layer,
360
+ #layer_skip_entropy_threshold=args.layer_skip_entropy_threshold,
361
+ #layer_skip_varentropy_threshold=args.layer_skip_varentropy_threshold,
362
+ #refine_on_stop=args.refine_on_stop,
363
  denoise_start=args.denoise_start,
364
+ #prefix_silence=args.prefix_silence,
365
+ #input_prompt_prefix=args.input_prompt_prefix,
366
  input_prompt_length=args.input_prompt_length,
367
  cfg_strength=args.cfg_strength,
368
  cfg_rescale=args.cfg_rescale,
369
+
370
+ sampling_scores_masked_only=args.sampling_scores_masked_only,
371
+ sampling_scores_flatten=args.sampling_scores_flatten,
372
+ sampling_scores_remask=args.sampling_scores_remask,
373
  )
374
 
375
  with timer("Inferenced in", callback=lambda msg: gr.Info( msg )) as t:
 
378
  language=args.language,
379
  text_language=args.text_language,
380
  task=args.task,
381
+ play=args.play,
382
  modality=args.modality.lower(),
383
  references=args.references.split(";") if args.references is not None else [],
384
  **sampling_kwargs,
 
474
  parser = argparse.ArgumentParser(allow_abbrev=False)
475
  parser.add_argument("--yaml", type=Path, default=os.environ.get('VALLE_YAML', None)) # os environ so it can be specified in a HuggingFace Space too
476
  parser.add_argument("--model", type=Path, default=os.environ.get('VALLE_MODEL', None)) # os environ so it can be specified in a HuggingFace Space too
477
+ parser.add_argument("--lora", type=Path, default=os.environ.get('VALLE_LORA', None)) # os environ so it can be specified in a HuggingFace Space too
478
  parser.add_argument("--listen", default=None, help="Path for Gradio to listen on")
479
  parser.add_argument("--share", action="store_true")
480
  parser.add_argument("--render_markdown", action="store_true", default="VALLE_YAML" in os.environ)
 
528
  with gr.Row():
529
  layout["inference_tts"]["inputs"]["split-text-by"] = gr.Dropdown(choices=["sentences", "lines"], label="Text Delimiter", info="How to split the text into utterances.", value="sentences")
530
  layout["inference_tts"]["inputs"]["context-history"] = gr.Slider(value=0, minimum=0, maximum=4, step=1, label="(Rolling) Context History", info="How many prior lines to serve as the context/prefix (0 to disable).")
531
+ with gr.Row():
532
+ layout["inference_tts"]["inputs"]["no-phonemize"] = gr.Checkbox(label="No Phonemize", info="Use raw text rather than phonemize the text as the input prompt.")
533
+ layout["inference_tts"]["inputs"]["play"] = gr.Checkbox(label="Auto Play", info="Auto play on generation (using sounddevice).")
534
  with gr.Tab("Sampler Settings"):
535
  with gr.Row():
536
  layout["inference_tts"]["inputs"]["ar-temperature"] = gr.Slider(value=1.0, minimum=0.0, maximum=1.5, step=0.05, label="Temperature (AR/NAR-len)", info="Adjusts the probabilities in the AR/NAR-len. (0 to greedy* sample)")
 
548
  layout["inference_tts"]["inputs"]["repetition-penalty"] = gr.Slider(value=1.0, minimum=0.0, maximum=5.0, step=0.05, label="Repetition Penalty", info="Incurs a penalty to tokens based on how often they appear in a sequence.")
549
  layout["inference_tts"]["inputs"]["repetition-penalty-decay"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Repetition Penalty Length Decay", info="Modifies the reptition penalty based on how far back in time the token appeared in the sequence.")
550
  layout["inference_tts"]["inputs"]["length-penalty"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Length Penalty", info="(AR only) Modifies the probability of a stop token based on the current length of the sequence.")
551
+ with gr.Row():
552
+ layout["inference_tts"]["inputs"]["sampling-scores-masked-only"] = gr.Checkbox(label="Sampled Scores: Masked Only", info="(NAR-len only) Update scores for newly generated tokens only")
553
+ layout["inference_tts"]["inputs"]["sampling-scores-flattened"] = gr.Checkbox(label="Sampled Scores: Flattened", info="(NAR-len only) Flattens the scores for all codebook levels")
554
+ layout["inference_tts"]["inputs"]["sampling-scores-remask"] = gr.Checkbox(label="Sampled Scores: Remask", info="(NAR-len only) Remasks P%% of existing tokens randomly after each step.")
555
  # These settings are pretty much not supported anyways
556
+ """
557
  with gr.Tab("Experimental Settings", visible=cfg.experimental):
558
  with gr.Row():
559
  layout["inference_tts"]["inputs"]["max-levels"] = gr.Slider(value=7, minimum=0, maximum=7, step=1, label="Max NAR Levels", info="Limits how many steps to perform in the NAR pass.")
 
576
  layout["inference_tts"]["inputs"]["layer-skip-exit-layer"] = gr.Slider(value=11, minimum=0, maximum=11, step=1, label="Layer Skip Exit Layer", info="Maximum model layer to exit early from.")
577
  layout["inference_tts"]["inputs"]["layer-skip-entropy-threshold"] = gr.Slider(value=0.1, minimum=0, maximum=1.0, step=0.01, label="Layer Skip Entropy Threshold", info="Entropy threshold for early-exit")
578
  layout["inference_tts"]["inputs"]["layer-skip-varentropy-threshold"] = gr.Slider(value=0.1, minimum=0, maximum=1.0, step=0.01, label="Layer Skip Varentropy Threshold", info="Varentropy threshold for early-exit")
579
+ """
580
 
581
  layout["inference_tts"]["buttons"]["inference"].click(
582
  fn=do_inference_tts,
 
622
  layout["inference_stt"]["inputs"]["repetition-penalty"] = gr.Slider(value=1.0, minimum=-2.0, maximum=2.0, step=0.05, label="Repetition Penalty", info="Incurs a penalty to tokens based on how often they appear in a sequence.")
623
  layout["inference_stt"]["inputs"]["repetition-penalty-decay"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Repetition Penalty Length Decay", info="Modifies the reptition penalty based on how far back in time the token appeared in the sequence.")
624
  layout["inference_stt"]["inputs"]["length-penalty"] = gr.Slider(value=0.0, minimum=-2.0, maximum=2.0, step=0.05, label="Length Penalty", info="(AR only) Modifies the probability of a stop token based on the current length of the sequence.")
625
+ """
626
  with gr.Row():
627
  layout["inference_stt"]["inputs"]["dynamic-sampling"] = gr.Checkbox(label="Dynamic Temperature", info="Dynamically adjusts the temperature based on the highest confident predicted token per sampling step.")
628
  layout["inference_stt"]["inputs"]["mirostat-tau"] = gr.Slider(value=0.0, minimum=0.0, maximum=8.0, step=0.05, label="Mirostat τ (Tau)", info="The \"surprise\" value when performing mirostat sampling. 0 to disable.")
 
631
  layout["inference_stt"]["inputs"]["dry-multiplier"] = gr.Slider(value=0.0, minimum=0.0, maximum=8.0, step=0.05, label="DRY Multiplier", info="The multiplying factor for the DRY score penalty (0 to disable DRY sampling).")
632
  layout["inference_stt"]["inputs"]["dry-base"] = gr.Slider(value=1.75, minimum=0.0, maximum=8.0, step=0.05, label="DRY Base", info="The base of the exponent in the DRY score penalty")
633
  layout["inference_stt"]["inputs"]["dry-allowed-length"] = gr.Slider(value=2, minimum=0, maximum=75, step=1, label="Allowed Length", info="The maximimum length a token can be to perform DRY penalty with.")
634
+ """
635
 
636
  layout["inference_stt"]["buttons"]["inference"].click(
637
  fn=do_inference_stt,
 
679
  with gr.Column(scale=7):
680
  with gr.Row():
681
  layout["settings"]["inputs"]["models"] = gr.Dropdown(choices=get_model_paths(), value=args.yaml or args.model, label="Model", info="Model to load. Can load from a config YAML or the weights itself.")
682
+ layout["settings"]["inputs"]["loras"] = gr.Dropdown(choices=get_lora_paths(), value=args.yaml or args.lora, label="LoRA", info="LoRA to load. Can load from a config YAML or the weights itself.")
683
  with gr.Row():
684
+ layout["settings"]["inputs"]["device"] = gr.Dropdown(choices=get_devices(), value="cuda:0", label="Device", info="Device to load the weights onto.")
685
  layout["settings"]["inputs"]["dtype"] = gr.Dropdown(choices=get_dtypes(), value="auto", label="Precision", info="Tensor type to load the model under.")
686
  layout["settings"]["inputs"]["attentions"] = gr.Dropdown(choices=get_attentions(), value="auto", label="Attentions", info="Attention mechanism to utilize.")
687