Spaces:
Running
on
Zero
Running
on
Zero
Fixed errors
Browse files
app.py
CHANGED
@@ -134,7 +134,7 @@ def inference(
|
|
134 |
raise gr.Error("cuda is not available")
|
135 |
|
136 |
device = torch.device("cuda")
|
137 |
-
model
|
138 |
|
139 |
seed_everything(seed)
|
140 |
start_code = torch.randn([len(prompts), 4, 128, 128], device=device)
|
@@ -161,7 +161,7 @@ def inference(
|
|
161 |
register_attention_editor_diffusers(model, editor)
|
162 |
images = model(prompts, latents=start_code, guidance_scale=classifier_free_guidance_scale).images
|
163 |
unregister_attention_editor_diffusers(model)
|
164 |
-
model.to(torch.device("cpu"))
|
165 |
return images
|
166 |
|
167 |
|
@@ -254,7 +254,7 @@ def clear(batch_size):
|
|
254 |
def main():
|
255 |
nltk.download("averaged_perceptron_tagger")
|
256 |
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
|
257 |
-
model = StableDiffusionXLPipeline.from_pretrained(MODEL_PATH, scheduler=scheduler
|
258 |
model.unet.set_default_attn_processor()
|
259 |
model.enable_sequential_cpu_offload()
|
260 |
|
|
|
134 |
raise gr.Error("cuda is not available")
|
135 |
|
136 |
device = torch.device("cuda")
|
137 |
+
model.to(device).half()
|
138 |
|
139 |
seed_everything(seed)
|
140 |
start_code = torch.randn([len(prompts), 4, 128, 128], device=device)
|
|
|
161 |
register_attention_editor_diffusers(model, editor)
|
162 |
images = model(prompts, latents=start_code, guidance_scale=classifier_free_guidance_scale).images
|
163 |
unregister_attention_editor_diffusers(model)
|
164 |
+
model.double().to(torch.device("cpu"))
|
165 |
return images
|
166 |
|
167 |
|
|
|
254 |
def main():
|
255 |
nltk.download("averaged_perceptron_tagger")
|
256 |
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
|
257 |
+
model = StableDiffusionXLPipeline.from_pretrained(MODEL_PATH, scheduler=scheduler)
|
258 |
model.unet.set_default_attn_processor()
|
259 |
model.enable_sequential_cpu_offload()
|
260 |
|