Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -62,7 +62,7 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
|
62 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
63 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
64 |
|
65 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-
|
66 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
|
67 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
|
68 |
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
@@ -76,9 +76,9 @@ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-me
|
|
76 |
#pipe = torch.compile(pipe)
|
77 |
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
|
78 |
|
79 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16",vae = vaeXL, requires_aesthetics_score=True) #.to(torch.bfloat16)
|
80 |
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
|
81 |
-
refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
|
82 |
#refiner.enable_model_cpu_offload()
|
83 |
|
84 |
#pipe.to(device=device, dtype=torch.bfloat16)
|
@@ -261,6 +261,7 @@ def infer(
|
|
261 |
#torch.save(generated_latents, latent_path)
|
262 |
#upload_to_ftp(latent_path)
|
263 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
|
|
264 |
pipe.to(torch.device('cpu'))
|
265 |
refiner.to(device=device, dtype=torch.bfloat16)
|
266 |
refine = refiner(
|
@@ -276,16 +277,17 @@ def infer(
|
|
276 |
refine.save(refine_path,optimize=False,compress_level=0)
|
277 |
upload_to_ftp(refine_path)
|
278 |
refiner.to(torch.device('cpu'))
|
|
|
279 |
upscaler_2.to(torch.device('cuda'))
|
280 |
with torch.no_grad():
|
281 |
-
upscale2 = upscaler_2(
|
282 |
print('-- got upscaled image --')
|
283 |
upscaler_2.to(torch.device('cpu'))
|
284 |
downscale2 = upscale2.resize((upscale2.width // 4, upscale2.height // 4),Image.LANCZOS)
|
285 |
upscale_path = f"sd35_upscale_{seed}.png"
|
286 |
downscale2.save(upscale_path,optimize=False,compress_level=0)
|
287 |
upload_to_ftp(upscale_path)
|
288 |
-
return
|
289 |
|
290 |
examples = [
|
291 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
@@ -335,7 +337,7 @@ def repeat_infer(
|
|
335 |
|
336 |
with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
337 |
with gr.Column(elem_id="col-container"):
|
338 |
-
gr.Markdown(" # Text-to-Text-to-Image StableDiffusion 3.5
|
339 |
expanded_prompt_output = gr.Textbox(label="Expanded Prompt", lines=5) # Add this line
|
340 |
with gr.Row():
|
341 |
prompt = gr.Text(
|
@@ -343,7 +345,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
343 |
show_label=False,
|
344 |
max_lines=1,
|
345 |
placeholder="Enter your prompt",
|
346 |
-
value="A captivating Christmas scene.",
|
347 |
container=False,
|
348 |
)
|
349 |
options = [True, False]
|
|
|
62 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
|
63 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
64 |
|
65 |
+
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-large-bf16").to(device=device, dtype=torch.bfloat16)
|
66 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
|
67 |
#pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
|
68 |
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
|
|
76 |
#pipe = torch.compile(pipe)
|
77 |
# pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
|
78 |
|
79 |
+
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16",vae = vaeXL, requires_aesthetics_score=True) #.to(torch.bfloat16)
|
80 |
#refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
|
81 |
+
#refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
|
82 |
#refiner.enable_model_cpu_offload()
|
83 |
|
84 |
#pipe.to(device=device, dtype=torch.bfloat16)
|
|
|
261 |
#torch.save(generated_latents, latent_path)
|
262 |
#upload_to_ftp(latent_path)
|
263 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
264 |
+
'''
|
265 |
pipe.to(torch.device('cpu'))
|
266 |
refiner.to(device=device, dtype=torch.bfloat16)
|
267 |
refine = refiner(
|
|
|
277 |
refine.save(refine_path,optimize=False,compress_level=0)
|
278 |
upload_to_ftp(refine_path)
|
279 |
refiner.to(torch.device('cpu'))
|
280 |
+
'''
|
281 |
upscaler_2.to(torch.device('cuda'))
|
282 |
with torch.no_grad():
|
283 |
+
upscale2 = upscaler_2(sd_image, tiling=True, tile_width=256, tile_height=256)
|
284 |
print('-- got upscaled image --')
|
285 |
upscaler_2.to(torch.device('cpu'))
|
286 |
downscale2 = upscale2.resize((upscale2.width // 4, upscale2.height // 4),Image.LANCZOS)
|
287 |
upscale_path = f"sd35_upscale_{seed}.png"
|
288 |
downscale2.save(upscale_path,optimize=False,compress_level=0)
|
289 |
upload_to_ftp(upscale_path)
|
290 |
+
return sd_image, seed, enhanced_prompt
|
291 |
|
292 |
examples = [
|
293 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
|
|
337 |
|
338 |
with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
339 |
with gr.Column(elem_id="col-container"):
|
340 |
+
gr.Markdown(" # Text-to-Text-to-Image StableDiffusion 3.5 Large")
|
341 |
expanded_prompt_output = gr.Textbox(label="Expanded Prompt", lines=5) # Add this line
|
342 |
with gr.Row():
|
343 |
prompt = gr.Text(
|
|
|
345 |
show_label=False,
|
346 |
max_lines=1,
|
347 |
placeholder="Enter your prompt",
|
|
|
348 |
container=False,
|
349 |
)
|
350 |
options = [True, False]
|