Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -53,16 +53,16 @@ def upload_to_ftp(filename):
|
|
53 |
except Exception as e:
|
54 |
print(f"FTP upload error: {e}")
|
55 |
|
56 |
-
device = torch.device("cuda:0"
|
57 |
torch_dtype = torch.bfloat16
|
58 |
|
59 |
checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
60 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
61 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
62 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
63 |
-
vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",
|
64 |
|
65 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16"
|
66 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
|
67 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
|
68 |
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
@@ -76,7 +76,7 @@ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-me
|
|
76 |
#pipe = torch.compile(pipe)
|
77 |
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
|
78 |
|
79 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16",
|
80 |
refiner.vae=vae
|
81 |
refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="sde-dpmsolver++")
|
82 |
refiner.to(device=torch.device("cuda:0"))
|
|
|
53 |
except Exception as e:
|
54 |
print(f"FTP upload error: {e}")
|
55 |
|
56 |
+
device = torch.device("cuda:0")
|
57 |
torch_dtype = torch.bfloat16
|
58 |
|
59 |
checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
60 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
61 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
62 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
63 |
+
vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",safety_checker=None)
|
64 |
|
65 |
+
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(device=torch.device("cuda:0")).to(torch.bfloat16)
|
66 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
|
67 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
|
68 |
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
|
|
76 |
#pipe = torch.compile(pipe)
|
77 |
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
|
78 |
|
79 |
+
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16", requires_aesthetics_score=True)
|
80 |
refiner.vae=vae
|
81 |
refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="sde-dpmsolver++")
|
82 |
refiner.to(device=torch.device("cuda:0"))
|