Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ import gradio as gr
|
|
13 |
import numpy as np
|
14 |
from PIL import Image
|
15 |
import torch
|
16 |
-
import torch._dynamo
|
17 |
#import diffusers
|
18 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
19 |
from diffusers import EulerAncestralDiscreteScheduler
|
@@ -198,7 +198,7 @@ def load_and_prepare_model():
|
|
198 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
199 |
|
200 |
#Some typical diffusers pipeline optimizations
|
201 |
-
pipe.unet.to(memory_format=torch.channels_last) #Unsupported by hidet, but does not seem to make a difference if disabled.
|
202 |
#pipe.enable_vae_tiling()
|
203 |
#pipe.enable_xformers_memory_efficient_attention()
|
204 |
|
@@ -289,7 +289,7 @@ def generate_30(
|
|
289 |
lora_scale: float = 0.5,
|
290 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
291 |
):
|
292 |
-
torch.set_default_device('cuda')
|
293 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
294 |
seed = int(randomize_seed_fn())
|
295 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
13 |
import numpy as np
|
14 |
from PIL import Image
|
15 |
import torch
|
16 |
+
#import torch._dynamo
|
17 |
#import diffusers
|
18 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
19 |
from diffusers import EulerAncestralDiscreteScheduler
|
|
|
198 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
199 |
|
200 |
#Some typical diffusers pipeline optimizations
|
201 |
+
#pipe.unet.to(memory_format=torch.channels_last) #Unsupported by hidet, but does not seem to make a difference if disabled.
|
202 |
#pipe.enable_vae_tiling()
|
203 |
#pipe.enable_xformers_memory_efficient_attention()
|
204 |
|
|
|
289 |
lora_scale: float = 0.5,
|
290 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
291 |
):
|
292 |
+
#torch.set_default_device('cuda')
|
293 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
294 |
seed = int(randomize_seed_fn())
|
295 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|