Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -55,8 +55,8 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
|
55 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
56 |
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16, device_map='balanced')
|
57 |
|
58 |
-
|
59 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
60 |
|
61 |
# pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")
|
62 |
|
@@ -90,7 +90,7 @@ def filter_text(text):
|
|
90 |
MAX_SEED = np.iinfo(np.int32).max
|
91 |
MAX_IMAGE_SIZE = 4096
|
92 |
|
93 |
-
@spaces.GPU(duration=
|
94 |
def infer(
|
95 |
prompt,
|
96 |
negative_prompt,
|
@@ -103,7 +103,7 @@ def infer(
|
|
103 |
progress=gr.Progress(track_tqdm=True),
|
104 |
):
|
105 |
seed = random.randint(0, MAX_SEED)
|
106 |
-
generator = torch.Generator(device='
|
107 |
|
108 |
system_prompt_rewrite = (
|
109 |
"You are an AI assistant that rewrites image prompts to be more descriptive and detailed."
|
@@ -150,7 +150,7 @@ def infer(
|
|
150 |
).images[0]
|
151 |
print('-- got image --')
|
152 |
image_path = f"sd35m_{seed}.png"
|
153 |
-
sd_image.save(image_path)
|
154 |
upload_to_ftp(image_path)
|
155 |
refine = refiner(
|
156 |
prompt=f"{prompt}, high quality masterpiece, complex details",
|
@@ -161,7 +161,7 @@ def infer(
|
|
161 |
generator=generator,
|
162 |
).images[0]
|
163 |
refine_path = f"refine_{seed}.png"
|
164 |
-
refine.save(refine_path)
|
165 |
upload_to_ftp(refine_path)
|
166 |
return refine, seed, refine_path, enhanced_prompt
|
167 |
|
|
|
55 |
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
56 |
vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16, device_map='balanced')
|
57 |
|
58 |
+
pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16", torch_dtype=torch.bfloat16, device_map='balanced')
|
59 |
+
#pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
|
60 |
|
61 |
# pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")
|
62 |
|
|
|
90 |
MAX_SEED = np.iinfo(np.int32).max
|
91 |
MAX_IMAGE_SIZE = 4096
|
92 |
|
93 |
+
@spaces.GPU(duration=80)
|
94 |
def infer(
|
95 |
prompt,
|
96 |
negative_prompt,
|
|
|
103 |
progress=gr.Progress(track_tqdm=True),
|
104 |
):
|
105 |
seed = random.randint(0, MAX_SEED)
|
106 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
107 |
|
108 |
system_prompt_rewrite = (
|
109 |
"You are an AI assistant that rewrites image prompts to be more descriptive and detailed."
|
|
|
150 |
).images[0]
|
151 |
print('-- got image --')
|
152 |
image_path = f"sd35m_{seed}.png"
|
153 |
+
sd_image.save(image_path,optimize=False,compress_level=0))
|
154 |
upload_to_ftp(image_path)
|
155 |
refine = refiner(
|
156 |
prompt=f"{prompt}, high quality masterpiece, complex details",
|
|
|
161 |
generator=generator,
|
162 |
).images[0]
|
163 |
refine_path = f"refine_{seed}.png"
|
164 |
+
refine.save(refine_path,optimize=False,compress_level=0))
|
165 |
upload_to_ftp(refine_path)
|
166 |
return refine, seed, refine_path, enhanced_prompt
|
167 |
|