Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -73,7 +73,7 @@ STYLE_NAMES = list(styles.keys())
|
|
73 |
def apply_style(style_name: str, positive: str) -> str:
|
74 |
return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
|
75 |
|
76 |
-
@spaces.GPU
|
77 |
def generate_image_flux(
|
78 |
prompt: str,
|
79 |
seed: int = 0,
|
@@ -85,27 +85,20 @@ def generate_image_flux(
|
|
85 |
progress=gr.Progress(track_tqdm=True),
|
86 |
):
|
87 |
"""Generate an image using the Flux.1 pipeline with a chosen style."""
|
|
|
88 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
89 |
positive_prompt = apply_style(style_name, prompt)
|
90 |
if trigger_word:
|
91 |
positive_prompt = f"{trigger_word} {positive_prompt}"
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
width=width,
|
102 |
-
height=height,
|
103 |
-
guidance_scale=guidance_scale,
|
104 |
-
num_inference_steps=28,
|
105 |
-
num_images_per_prompt=1,
|
106 |
-
output_type="pil",
|
107 |
-
)
|
108 |
-
images = outputs.images
|
109 |
image_paths = [save_image(img) for img in images]
|
110 |
return image_paths, seed
|
111 |
|
@@ -169,6 +162,7 @@ def generate(
|
|
169 |
- "@image": triggers image generation using the Flux.1 pipeline.
|
170 |
- "@tts1" or "@tts2": triggers text-to-speech after generation.
|
171 |
"""
|
|
|
172 |
text = input_dict["text"]
|
173 |
files = input_dict.get("files", [])
|
174 |
|
|
|
73 |
def apply_style(style_name: str, positive: str) -> str:
|
74 |
return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
|
75 |
|
76 |
+
@spaces.GPU(duration=60, enable_queue=True)
|
77 |
def generate_image_flux(
|
78 |
prompt: str,
|
79 |
seed: int = 0,
|
|
|
85 |
progress=gr.Progress(track_tqdm=True),
|
86 |
):
|
87 |
"""Generate an image using the Flux.1 pipeline with a chosen style."""
|
88 |
+
torch.cuda.empty_cache() # Clear unused GPU memory to prevent allocation errors
|
89 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
90 |
positive_prompt = apply_style(style_name, prompt)
|
91 |
if trigger_word:
|
92 |
positive_prompt = f"{trigger_word} {positive_prompt}"
|
93 |
+
images = pipe(
|
94 |
+
prompt=positive_prompt,
|
95 |
+
width=width,
|
96 |
+
height=height,
|
97 |
+
guidance_scale=guidance_scale,
|
98 |
+
num_inference_steps=28,
|
99 |
+
num_images_per_prompt=1,
|
100 |
+
output_type="pil",
|
101 |
+
).images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
image_paths = [save_image(img) for img in images]
|
103 |
return image_paths, seed
|
104 |
|
|
|
162 |
- "@image": triggers image generation using the Flux.1 pipeline.
|
163 |
- "@tts1" or "@tts2": triggers text-to-speech after generation.
|
164 |
"""
|
165 |
+
torch.cuda.empty_cache() # Clear unused GPU memory for consistency
|
166 |
text = input_dict["text"]
|
167 |
files = input_dict.get("files", [])
|
168 |
|