John6666 commited on
Commit
dbaa5fe
·
verified ·
1 Parent(s): b3b3af9

Upload dc.py

Browse files
Files changed (1) hide show
  1. dc.py +4 -4
dc.py CHANGED
@@ -360,8 +360,8 @@ class GuiSD:
360
  retain_task_model_in_cache=False,
361
  device="cpu",
362
  )
363
- #self.model.load_beta_styles()
364
- self.model.device = torch.device("cpu") #
365
 
366
  def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
367
  #progress(0, desc="Start inference...")
@@ -679,7 +679,7 @@ class GuiSD:
679
  #progress(1, desc="Inference preparation completed. Starting inference...")
680
 
681
  info_state = "" # for yield version
682
- return self.infer_short(self.model, pipe_params), info_state
683
  ## END MOD
684
 
685
  def dynamic_gpu_duration(func, duration, *args):
@@ -817,7 +817,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
817
  sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
818
  progress(1, desc="Model loaded.")
819
  progress(0, desc="Starting Inference...")
820
- images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
821
  guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
822
  lora4, lora4_wt, lora5, lora5_wt, sampler,
823
  height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
 
360
  retain_task_model_in_cache=False,
361
  device="cpu",
362
  )
363
+ self.model.load_beta_styles()
364
+ #self.model.device = torch.device("cpu") #
365
 
366
  def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
367
  #progress(0, desc="Start inference...")
 
679
  #progress(1, desc="Inference preparation completed. Starting inference...")
680
 
681
  info_state = "" # for yield version
682
+ return self.infer_short(self.model, pipe_params)
683
  ## END MOD
684
 
685
  def dynamic_gpu_duration(func, duration, *args):
 
817
  sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
818
  progress(1, desc="Model loaded.")
819
  progress(0, desc="Starting Inference...")
820
+ images = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
821
  guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
822
  lora4, lora4_wt, lora5, lora5_wt, sampler,
823
  height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,