Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -53,20 +53,20 @@ torch_dtype = torch.bfloat16
|
|
53 |
|
54 |
checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
55 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
56 |
-
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16
|
57 |
|
58 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16", torch_dtype=torch.bfloat16
|
59 |
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
60 |
|
61 |
# pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")
|
62 |
|
63 |
#pipe.scheduler.config.requires_aesthetics_score = False
|
64 |
#pipe.enable_model_cpu_offload()
|
65 |
-
|
66 |
#pipe = torch.compile(pipe)
|
67 |
-
|
68 |
|
69 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16", vae=vae, torch_dtype=torch.bfloat16, use_safetensors=True, requires_aesthetics_score=True, device_map='balanced')
|
70 |
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
|
71 |
|
72 |
#refiner.enable_model_cpu_offload()
|
@@ -74,7 +74,7 @@ refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffu
|
|
74 |
#refiner.scheduler.config.requires_aesthetics_score=False
|
75 |
#refiner.to(device)
|
76 |
#refiner = torch.compile(refiner)
|
77 |
-
refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
|
78 |
|
79 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False, device_map='balanced')
|
80 |
tokenizer.tokenizer_legacy=False
|
@@ -90,7 +90,7 @@ def filter_text(text):
|
|
90 |
MAX_SEED = np.iinfo(np.int32).max
|
91 |
MAX_IMAGE_SIZE = 4096
|
92 |
|
93 |
-
@spaces.GPU(duration=
|
94 |
def infer(
|
95 |
prompt,
|
96 |
negative_prompt,
|
|
|
53 |
|
54 |
checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
55 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
56 |
+
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16)
|
57 |
|
58 |
+
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16", torch_dtype=torch.bfloat16)
|
59 |
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
60 |
|
61 |
# pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")
|
62 |
|
63 |
#pipe.scheduler.config.requires_aesthetics_score = False
|
64 |
#pipe.enable_model_cpu_offload()
|
65 |
+
pipe.to(device)
|
66 |
#pipe = torch.compile(pipe)
|
67 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="sde-dpmsolver++")
|
68 |
|
69 |
+
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16", vae=vae, torch_dtype=torch.bfloat16, use_safetensors=True, requires_aesthetics_score=True, device_map='balanced')
|
70 |
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
|
71 |
|
72 |
#refiner.enable_model_cpu_offload()
|
|
|
74 |
#refiner.scheduler.config.requires_aesthetics_score=False
|
75 |
#refiner.to(device)
|
76 |
#refiner = torch.compile(refiner)
|
77 |
+
#refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
|
78 |
|
79 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False, device_map='balanced')
|
80 |
tokenizer.tokenizer_legacy=False
|
|
|
90 |
MAX_SEED = np.iinfo(np.int32).max
|
91 |
MAX_IMAGE_SIZE = 4096
|
92 |
|
93 |
+
@spaces.GPU(duration=90)
|
94 |
def infer(
|
95 |
prompt,
|
96 |
negative_prompt,
|