Add lazy caching & negative prompt

#10
by multimodalart HF Staff - opened
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -173,6 +173,7 @@ def get_image_size(aspect_ratio):
173
  @spaces.GPU(duration=120)
174
  def infer(
175
  prompt,
 
176
  seed=42,
177
  randomize_seed=False,
178
  aspect_ratio="16:9",
@@ -184,8 +185,6 @@ def infer(
184
  """
185
  Generates an image using the local Qwen-Image diffusers pipeline.
186
  """
187
- # Hardcode the negative prompt as requested
188
- negative_prompt = "text, watermark, copyright, blurry, low resolution"
189
 
190
  if randomize_seed:
191
  seed = random.randint(0, MAX_SEED)
@@ -212,7 +211,7 @@ def infer(
212
  num_inference_steps=num_inference_steps,
213
  generator=generator,
214
  true_cfg_scale=guidance_scale,
215
- guidance_scale=1.0 # Use a fixed default for distilled guidance
216
  ).images[0]
217
 
218
  return image, seed
@@ -271,6 +270,7 @@ with gr.Blocks(css=css) as demo:
271
  choices=["1:1", "16:9", "9:16", "4:3", "3:4", "3:2", "2:3"],
272
  value="16:9",
273
  )
 
274
  prompt_enhance = gr.Checkbox(label="Prompt Enhance", value=True)
275
 
276
  with gr.Row():
@@ -290,14 +290,14 @@ with gr.Blocks(css=css) as demo:
290
  value=50,
291
  )
292
 
293
- gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
294
 
295
  gr.on(
296
  triggers=[run_button.click, prompt.submit],
297
  fn=infer,
298
  inputs=[
299
  prompt,
300
- # negative_prompt is no longer an input from the UI
301
  seed,
302
  randomize_seed,
303
  aspect_ratio,
 
173
  @spaces.GPU(duration=120)
174
  def infer(
175
  prompt,
176
+ negative_prompt="text, watermark, copyright, blurry, low resolution",
177
  seed=42,
178
  randomize_seed=False,
179
  aspect_ratio="16:9",
 
185
  """
186
  Generates an image using the local Qwen-Image diffusers pipeline.
187
  """
 
 
188
 
189
  if randomize_seed:
190
  seed = random.randint(0, MAX_SEED)
 
211
  num_inference_steps=num_inference_steps,
212
  generator=generator,
213
  true_cfg_scale=guidance_scale,
214
+ guidance_scale=1.0
215
  ).images[0]
216
 
217
  return image, seed
 
270
  choices=["1:1", "16:9", "9:16", "4:3", "3:4", "3:2", "2:3"],
271
  value="16:9",
272
  )
273
+ negative_prompt = gr.Text(label="Negative Prompt", value="text, watermark, copyright, blurry, low resolution")
274
  prompt_enhance = gr.Checkbox(label="Prompt Enhance", value=True)
275
 
276
  with gr.Row():
 
290
  value=50,
291
  )
292
 
293
+ gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples="lazy")
294
 
295
  gr.on(
296
  triggers=[run_button.click, prompt.submit],
297
  fn=infer,
298
  inputs=[
299
  prompt,
300
+ negative_prompt,
301
  seed,
302
  randomize_seed,
303
  aspect_ratio,