Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -101,7 +101,6 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
101 |
return p.replace("{prompt}", positive), n + negative
|
102 |
|
103 |
def load_and_prepare_model():
|
104 |
-
torch.set_default_device('cuda')
|
105 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
106 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
|
107 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
@@ -260,7 +259,7 @@ def generate_30(
|
|
260 |
lora_scale: float = 0.5,
|
261 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
262 |
):
|
263 |
-
|
264 |
pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
265 |
seed = int(randomize_seed_fn())
|
266 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -318,7 +317,7 @@ def generate_60(
|
|
318 |
lora_scale: float = 0.5,
|
319 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
320 |
):
|
321 |
-
|
322 |
pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
323 |
seed = int(randomize_seed_fn())
|
324 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -376,7 +375,7 @@ def generate_90(
|
|
376 |
lora_scale: float = 0.5,
|
377 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
378 |
):
|
379 |
-
|
380 |
pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
381 |
seed = int(randomize_seed_fn())
|
382 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
101 |
return p.replace("{prompt}", positive), n + negative
|
102 |
|
103 |
def load_and_prepare_model():
|
|
|
104 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
105 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
|
106 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
|
|
259 |
lora_scale: float = 0.5,
|
260 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
261 |
):
|
262 |
+
torch.set_default_device('cuda')
|
263 |
pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
264 |
seed = int(randomize_seed_fn())
|
265 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
317 |
lora_scale: float = 0.5,
|
318 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
319 |
):
|
320 |
+
torch.set_default_device('cuda')
|
321 |
pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
322 |
seed = int(randomize_seed_fn())
|
323 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
375 |
lora_scale: float = 0.5,
|
376 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
377 |
):
|
378 |
+
torch.set_default_device('cuda')
|
379 |
pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
380 |
seed = int(randomize_seed_fn())
|
381 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|