Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -58,8 +58,9 @@ torch_dtype = torch.bfloat16
|
|
| 58 |
|
| 59 |
checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
| 60 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 61 |
-
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
| 62 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
|
|
|
|
|
|
| 63 |
|
| 64 |
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16") #.to(torch.bfloat16)
|
| 65 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
|
|
@@ -75,7 +76,7 @@ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-me
|
|
| 75 |
#pipe = torch.compile(pipe)
|
| 76 |
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
|
| 77 |
|
| 78 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16",
|
| 79 |
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
|
| 80 |
refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
|
| 81 |
#refiner.enable_model_cpu_offload()
|
|
@@ -257,7 +258,7 @@ def infer(
|
|
| 257 |
#upload_to_ftp(latent_path)
|
| 258 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
| 259 |
pipe.to(torch.device('cpu'))
|
| 260 |
-
refiner.to(torch.
|
| 261 |
refine = refiner(
|
| 262 |
prompt=f"{enhanced_prompt_2}, high quality masterpiece, complex details",
|
| 263 |
negative_prompt = negative_prompt,
|
|
|
|
| 58 |
|
| 59 |
checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
| 60 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
|
|
|
| 61 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
| 62 |
+
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
| 63 |
+
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
| 64 |
|
| 65 |
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16") #.to(torch.bfloat16)
|
| 66 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
|
|
|
|
| 76 |
#pipe = torch.compile(pipe)
|
| 77 |
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
|
| 78 |
|
| 79 |
+
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16", vaeXL, requires_aesthetics_score=True) #.to(torch.bfloat16)
|
| 80 |
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
|
| 81 |
refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
|
| 82 |
#refiner.enable_model_cpu_offload()
|
|
|
|
| 258 |
#upload_to_ftp(latent_path)
|
| 259 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
| 260 |
pipe.to(torch.device('cpu'))
|
| 261 |
+
refiner.to(device=device, dtype=torch.bfloat16)
|
| 262 |
refine = refiner(
|
| 263 |
prompt=f"{enhanced_prompt_2}, high quality masterpiece, complex details",
|
| 264 |
negative_prompt = negative_prompt,
|