omer11a commited on
Commit
1e9f321
·
1 Parent(s): cc776b8

Fixed errors

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -134,7 +134,7 @@ def inference(
134
  raise gr.Error("cuda is not available")
135
 
136
  device = torch.device("cuda")
137
- model = model.to(device)
138
 
139
  seed_everything(seed)
140
  start_code = torch.randn([len(prompts), 4, 128, 128], device=device)
@@ -161,7 +161,7 @@ def inference(
161
  register_attention_editor_diffusers(model, editor)
162
  images = model(prompts, latents=start_code, guidance_scale=classifier_free_guidance_scale).images
163
  unregister_attention_editor_diffusers(model)
164
- model.to(torch.device("cpu"))
165
  return images
166
 
167
 
@@ -254,7 +254,7 @@ def clear(batch_size):
254
  def main():
255
  nltk.download("averaged_perceptron_tagger")
256
  scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
257
- model = StableDiffusionXLPipeline.from_pretrained(MODEL_PATH, scheduler=scheduler, torch_dtype=torch.float16)
258
  model.unet.set_default_attn_processor()
259
  model.enable_sequential_cpu_offload()
260
 
 
134
  raise gr.Error("cuda is not available")
135
 
136
  device = torch.device("cuda")
137
+ model.to(device).half()
138
 
139
  seed_everything(seed)
140
  start_code = torch.randn([len(prompts), 4, 128, 128], device=device)
 
161
  register_attention_editor_diffusers(model, editor)
162
  images = model(prompts, latents=start_code, guidance_scale=classifier_free_guidance_scale).images
163
  unregister_attention_editor_diffusers(model)
164
+ model.double().to(torch.device("cpu"))
165
  return images
166
 
167
 
 
254
  def main():
255
  nltk.download("averaged_perceptron_tagger")
256
  scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
257
+ model = StableDiffusionXLPipeline.from_pretrained(MODEL_PATH, scheduler=scheduler)
258
  model.unet.set_default_attn_processor()
259
  model.enable_sequential_cpu_offload()
260