fffiloni commited on
Commit
0d6e1ac
·
verified ·
1 Parent(s): 28a5c9e

check prompt non empty

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -58,7 +58,9 @@ def start_over(gallery_state, loaded_model_setup):
58
  return gallery_state, None, None, gr.update(visible=False), loaded_model_setup
59
 
60
  def setup_model(prompt, model, seed, num_iterations, learning_rate, hps_w, imgrw_w, pcks_w, clip_w, progress=gr.Progress(track_tqdm=True)):
61
-
 
 
62
  """Clear CUDA memory before starting the training."""
63
  torch.cuda.empty_cache() # Free up cached memory
64
 
@@ -151,7 +153,7 @@ def generate_image(setup_args, num_iterations):
151
 
152
  if error_status["error_occurred"]:
153
  torch.cuda.empty_cache() # Free up cached memory
154
- yield (None, "CUDA out of memory. Please reduce your batch size or image resolution.", None)
155
  else:
156
  main_thread.join() # Ensure thread completion
157
  final_image_path = os.path.join(save_dir, "best_image.png")
 
58
  return gallery_state, None, None, gr.update(visible=False), loaded_model_setup
59
 
60
  def setup_model(prompt, model, seed, num_iterations, learning_rate, hps_w, imgrw_w, pcks_w, clip_w, progress=gr.Progress(track_tqdm=True)):
61
+ if prompt is None:
62
+ raise gr.Error("You forgot to provide a prompt !")
63
+
64
  """Clear CUDA memory before starting the training."""
65
  torch.cuda.empty_cache() # Free up cached memory
66
 
 
153
 
154
  if error_status["error_occurred"]:
155
  torch.cuda.empty_cache() # Free up cached memory
156
+ yield (None, "CUDA out of memory.", None)
157
  else:
158
  main_thread.join() # Ensure thread completion
159
  final_image_path = os.path.join(save_dir, "best_image.png")