prithivMLmods commited on
Commit
5a70430
·
verified ·
1 Parent(s): ee5186b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +436 -265
app.py CHANGED
@@ -1,24 +1,41 @@
1
- import spaces
2
- import gradio as gr
 
 
 
 
 
 
3
  import torch
4
  from PIL import Image
 
 
5
  from diffusers import DiffusionPipeline
6
- import random
7
- import uuid
8
- from typing import Union, List, Optional
9
- import numpy as np
10
- import time
11
- import zipfile
12
- import os
13
  import requests
14
  from urllib.parse import urlparse
15
  import tempfile
16
  import shutil
 
 
17
 
18
- # Description for the app
19
- DESCRIPTION = """## Qwen Image Hpc/."""
 
 
 
 
 
 
 
 
 
20
 
21
- # Helper functions
22
  def save_image(img):
23
  unique_name = str(uuid.uuid4()) + ".png"
24
  img.save(unique_name)
@@ -29,25 +46,138 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
29
  seed = random.randint(0, MAX_SEED)
30
  return seed
31
 
32
- MAX_SEED = np.iinfo(np.int32).max
33
- MAX_IMAGE_SIZE = 2048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # Load Qwen/Qwen-Image pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  dtype = torch.bfloat16
37
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
38
 
39
- # --- Model Loading ---
40
- pipe_qwen = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).to(device)
41
 
42
- # Aspect ratios
43
  aspect_ratios = {
44
- "1:1": (1328, 1328),
45
- "16:9": (1664, 928),
46
- "9:16": (928, 1664),
47
- "4:3": (1472, 1140),
48
- "3:4": (1140, 1472)
 
 
49
  }
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  def load_lora_opt(pipe, lora_input):
52
  lora_input = lora_input.strip()
53
  if not lora_input:
@@ -60,7 +190,6 @@ def load_lora_opt(pipe, lora_input):
60
 
61
  if lora_input.startswith("http"):
62
  url = lora_input
63
-
64
  # Repo page (no blob/resolve)
65
  if "huggingface.co" in url and "/blob/" not in url and "/resolve/" not in url:
66
  repo_id = urlparse(url).path.strip("/")
@@ -74,7 +203,6 @@ def load_lora_opt(pipe, lora_input):
74
  # Download direct file
75
  tmp_dir = tempfile.mkdtemp()
76
  local_path = os.path.join(tmp_dir, os.path.basename(urlparse(url).path))
77
-
78
  try:
79
  print(f"Downloading LoRA from {url}...")
80
  resp = requests.get(url, stream=True)
@@ -87,272 +215,315 @@ def load_lora_opt(pipe, lora_input):
87
  finally:
88
  shutil.rmtree(tmp_dir, ignore_errors=True)
89
 
90
- # Generation function for Qwen/Qwen-Image
91
- @spaces.GPU(duration=120)
92
- def generate_qwen(
93
- prompt: str,
94
- negative_prompt: str = "",
95
- seed: int = 0,
96
- width: int = 1024,
97
- height: int = 1024,
98
- guidance_scale: float = 4.0,
99
- randomize_seed: bool = False,
100
- num_inference_steps: int = 50,
101
- num_images: int = 1,
102
- zip_images: bool = False,
103
- lora_input: str = "",
104
- lora_scale: float = 1.0,
105
- progress=gr.Progress(track_tqdm=True),
106
- ):
107
- if randomize_seed:
108
- seed = random.randint(0, MAX_SEED)
109
- generator = torch.Generator(device).manual_seed(seed)
110
-
111
- start_time = time.time()
112
-
113
- current_adapters = pipe_qwen.get_list_adapters()
114
- for adapter in current_adapters:
115
- pipe_qwen.delete_adapters(adapter)
116
- pipe_qwen.disable_lora()
117
-
118
- use_lora = False
119
- if lora_input and lora_input.strip() != "":
120
- load_lora_opt(pipe_qwen, lora_input)
121
- pipe_qwen.set_adapters(["default"], adapter_weights=[lora_scale])
122
- use_lora = True
123
-
124
- images = pipe_qwen(
125
- prompt=prompt,
126
- negative_prompt=negative_prompt if negative_prompt else "",
127
- height=height,
128
- width=width,
129
- guidance_scale=guidance_scale,
130
- num_inference_steps=num_inference_steps,
131
- num_images_per_prompt=num_images,
132
- generator=generator,
133
- output_type="pil",
134
- ).images
135
 
136
- end_time = time.time()
137
- duration = end_time - start_time
 
 
 
 
 
 
 
 
138
 
139
- image_paths = [save_image(img) for img in images]
140
- zip_path = None
141
- if zip_images:
142
- zip_name = str(uuid.uuid4()) + ".zip"
143
- with zipfile.ZipFile(zip_name, 'w') as zipf:
144
- for i, img_path in enumerate(image_paths):
145
- zipf.write(img_path, arcname=f"Img_{i}.png")
146
- zip_path = zip_name
147
-
148
- # Clean up adapters
149
- current_adapters = pipe_qwen.get_list_adapters()
150
- for adapter in current_adapters:
151
- pipe_qwen.delete_adapters(adapter)
152
- pipe_qwen.disable_lora()
153
 
154
- return image_paths, seed, f"{duration:.2f}", zip_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
- # Wrapper function to handle UI logic
157
  @spaces.GPU(duration=120)
158
- def generate(
159
- prompt: str,
160
- negative_prompt: str,
161
- use_negative_prompt: bool,
162
- seed: int,
163
- width: int,
164
- height: int,
165
- guidance_scale: float,
166
- randomize_seed: bool,
167
- num_inference_steps: int,
168
- num_images: int,
169
- zip_images: bool,
170
- lora_input: str,
171
- lora_scale: float,
172
- progress=gr.Progress(track_tqdm=True),
173
- ):
 
 
 
 
 
 
 
174
  final_negative_prompt = negative_prompt if use_negative_prompt else ""
175
- return generate_qwen(
176
- prompt=prompt,
177
- negative_prompt=final_negative_prompt,
178
- seed=seed,
179
- width=width,
180
- height=height,
181
- guidance_scale=guidance_scale,
182
- randomize_seed=randomize_seed,
183
- num_inference_steps=num_inference_steps,
184
- num_images=num_images,
185
- zip_images=zip_images,
186
- lora_input=lora_input,
187
- lora_scale=lora_scale,
188
- progress=progress,
189
- )
190
 
191
- # Examples
192
- examples = [
193
- "A decadent slice of layered chocolate cake on a ceramic plate with a drizzle of chocolate syrup and powdered sugar dusted on top. photographed from a slightly low angle with high resolution, natural soft lighting, rich contrast, shallow depth of field, and professional color grading to highlight the dessert’s textures --ar 85:128 --v 6.0 --style raw",
194
- "A beautifully decorated round chocolate birthday cake with rich chocolate frosting and elegant piping, topped with the name 'Qwen' written in white icing. placed on a wooden cake stand with scattered chocolate shavings around, softly lit with natural light, high resolution, professional food photography, clean background, no branding --ar 85:128 --v 6.0 --style raw",
195
- "Realistic still life photography style: A single, fresh apple, resting on a clean, soft-textured surface. The apple is slightly off-center, softly backlit to highlight its natural gloss and subtle color gradients—deep crimson red blending into light golden hues. Fine details such as small blemishes, dew drops, and a few light highlights enhance its lifelike appearance. A shallow depth of field gently blurs the neutral background, drawing full attention to the apple. Hyper-detailed 8K resolution, studio lighting, photorealistic render, emphasizing texture and form.",
196
- "一幅精致细腻的工笔画,画面中心是一株蓬勃生长的红色牡丹,花朵繁茂,既有盛开的硕大花瓣,也有含苞待放的花蕾,层次丰富,色彩艳丽而不失典雅。牡丹枝叶舒展,叶片浓绿饱满,脉络清晰可见,与红花相映成趣。一只蓝紫色蝴蝶仿佛被画中花朵吸引,停驻在画面中央的一朵盛开牡丹上,流连忘返,蝶翼轻展,细节逼真,仿佛随时会随风飞舞。整幅画作笔触工整严谨,色彩浓郁鲜明,展现出中国传统工笔画的精妙与神韵,画面充满生机与灵动之感。",
197
- "A young girl wearing school uniform stands in a classroom, writing on a chalkboard. The text Introducing Qwen-Image, a foundational image generation model that excels in complex text rendering and precise image editing appears in neat white chalk at the center of the blackboard. Soft natural light filters through windows, casting gentle shadows. The scene is rendered in a realistic photography style with fine details, shallow depth of field, and warm tones. The girl's focused expression and chalk dust in the air add dynamism. Background elements include desks and educational posters, subtly blurred to emphasize the central action. Ultra-detailed 32K resolution, DSLR-quality, soft bokeh effect, documentary-style composition",
198
- "手绘风格的水循环示意图,整体画面呈现出一幅生动形象的水循环过程图解。画面中央是一片起伏的山脉和山谷,山谷中流淌着一条清澈的河流,河流最终汇入一片广阔的海洋。山体和陆地上绘制有绿色植被。画面下方为地下水层,用蓝色渐变色块表现,与地表水形成层次分明的空间关系。太阳位于画面右上角,促使地表水蒸发,用上升的曲线箭头表示蒸发过程。云朵漂浮在空中,由白色棉絮状绘制而成,部分云层厚重,表示水汽凝结成雨,用向下箭头连接表示降雨过程。雨水以蓝色线条和点状符号表示,从云中落下,补充河流与地下水。整幅图以卡通手绘风格呈现,线条柔和,色彩明亮,标注清晰。背景为浅黄色纸张质感,带有轻微的手绘纹理。"
199
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
  css = '''
202
- .gradio-container {
203
- max-width: 590px !important;
204
- margin: 0 auto !important;
205
- }
206
- h1 {
207
- text-align: center;
208
- }
209
- footer {
210
- visibility: hidden;
211
- }
 
 
 
 
212
  '''
213
 
214
- # Gradio interface
215
- with gr.Blocks(css=css, theme="bethecloud/storj_theme", delete_cache=(240, 240)) as demo:
216
- gr.Markdown(DESCRIPTION)
217
- with gr.Row():
218
- prompt = gr.Text(
219
- label="Prompt",
220
- show_label=False,
221
- max_lines=1,
222
- placeholder="✦︎ Enter your prompt",
223
- container=False,
224
- )
225
- run_button = gr.Button("Run", scale=0, variant="primary")
226
- result = gr.Gallery(label="Result", columns=1, show_label=False, preview=True)
227
 
228
  with gr.Row():
229
- aspect_ratio = gr.Dropdown(
230
- label="Aspect Ratio",
231
- choices=list(aspect_ratios.keys()),
232
- value="1:1",
233
- )
234
- lora = gr.Textbox(label="qwen image lora (optional)", placeholder="enter the path...")
235
- with gr.Accordion("Additional Options", open=False):
236
- use_negative_prompt = gr.Checkbox(
237
- label="Use negative prompt",
238
- value=True,
239
- visible=True
240
- )
241
- negative_prompt = gr.Text(
242
- label="Negative prompt",
243
- max_lines=1,
244
- placeholder="Enter a negative prompt",
245
- value="text, watermark, copyright, blurry, low resolution",
246
- visible=True,
247
- )
248
- seed = gr.Slider(
249
- label="Seed",
250
- minimum=0,
251
- maximum=MAX_SEED,
252
- step=1,
253
- value=0,
254
- )
255
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
256
- with gr.Row():
257
- width = gr.Slider(
258
- label="Width",
259
- minimum=512,
260
- maximum=2048,
261
- step=64,
262
- value=1024,
263
- )
264
- height = gr.Slider(
265
- label="Height",
266
- minimum=512,
267
- maximum=2048,
268
- step=64,
269
- value=1024,
270
- )
271
- guidance_scale = gr.Slider(
272
- label="Guidance Scale",
273
- minimum=0.0,
274
- maximum=20.0,
275
- step=0.1,
276
- value=4.0,
277
- )
278
- num_inference_steps = gr.Slider(
279
- label="Number of inference steps",
280
- minimum=1,
281
- maximum=100,
282
- step=1,
283
- value=50,
284
- )
285
- num_images = gr.Slider(
286
- label="Number of images",
287
- minimum=1,
288
- maximum=5,
289
- step=1,
290
- value=1,
291
- )
292
- zip_images = gr.Checkbox(label="Zip generated images", value=False)
293
- with gr.Row():
294
- lora_scale = gr.Slider(
295
- label="LoRA Scale",
296
- minimum=0,
297
- maximum=2,
298
- step=0.01,
299
- value=1,
300
  )
301
-
302
- gr.Markdown("### Output Information")
303
- seed_display = gr.Textbox(label="Seed used", interactive=False)
304
- generation_time = gr.Textbox(label="Generation time (seconds)", interactive=False)
305
- zip_file = gr.File(label="Download ZIP")
306
-
307
- # Update aspect ratio
308
- def set_dimensions(ar):
309
- w, h = aspect_ratios[ar]
310
- return gr.update(value=w), gr.update(value=h)
311
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  aspect_ratio.change(
313
  fn=set_dimensions,
314
  inputs=aspect_ratio,
315
  outputs=[width, height]
316
  )
317
 
318
- # Negative prompt visibility
319
  use_negative_prompt.change(
320
  fn=lambda x: gr.update(visible=x),
321
  inputs=use_negative_prompt,
322
  outputs=negative_prompt
323
  )
324
 
325
- # Run button and prompt submit
326
- gr.on(
327
- triggers=[prompt.submit, run_button.click],
328
- fn=generate,
329
- inputs=[
330
- prompt,
331
- negative_prompt,
332
- use_negative_prompt,
333
- seed,
334
- width,
335
- height,
336
- guidance_scale,
337
- randomize_seed,
338
- num_inference_steps,
339
- num_images,
340
- zip_images,
341
- lora,
342
- lora_scale,
343
- ],
344
- outputs=[result, seed_display, generation_time, zip_file],
345
- api_name="run",
346
  )
347
 
348
- # Examples
349
- gr.Examples(
350
- examples=examples,
351
- inputs=prompt,
352
- outputs=[result, seed_display, generation_time, zip_file],
353
- fn=generate,
354
- cache_examples=False,
 
 
 
355
  )
356
 
357
- if __name__ == "__main__":
358
- demo.queue(max_size=50).launch(share=False, mcp_server=True, ssr_mode=False, debug=True, show_error=True)
 
1
+ import os
2
+ import json
3
+ import copy
4
+ import time
5
+ import random
6
+ import logging
7
+ import numpy as np
8
+ from typing import Any, Dict, List, Optional, Union
9
  import torch
10
  from PIL import Image
11
+ import gradio as gr
12
+ import spaces
13
  from diffusers import DiffusionPipeline
14
+ from huggingface_hub import (
15
+ hf_hub_download,
16
+ HfFileSystem,
17
+ ModelCard,
18
+ snapshot_download)
19
+ from diffusers.utils import load_image
 
20
  import requests
21
  from urllib.parse import urlparse
22
  import tempfile
23
  import shutil
24
+ import uuid
25
+ import zipfile
26
 
27
+ def calculate_shift(
28
+ image_seq_len,
29
+ base_seq_len: int = 256,
30
+ max_seq_len: int = 4096,
31
+ base_shift: float = 0.5,
32
+ max_shift: float = 1.16,
33
+ ):
34
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
35
+ b = base_shift - m * base_seq_len
36
+ mu = image_seq_len * m + b
37
+ return mu
38
 
 
39
  def save_image(img):
40
  unique_name = str(uuid.uuid4()) + ".png"
41
  img.save(unique_name)
 
46
  seed = random.randint(0, MAX_SEED)
47
  return seed
48
 
49
+ # Qwen Image pipeline with live preview capability
50
+ @torch.inference_mode()
51
+ def qwen_pipe_call_that_returns_an_iterable_of_images(
52
+ self,
53
+ prompt: Union[str, List[str]] = None,
54
+ negative_prompt: Optional[Union[str, List[str]]] = None,
55
+ height: Optional[int] = None,
56
+ width: Optional[int] = None,
57
+ num_inference_steps: int = 50,
58
+ guidance_scale: float = 4.0,
59
+ num_images_per_prompt: Optional[int] = 1,
60
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
61
+ output_type: Optional[str] = "pil",
62
+ ):
63
+ height = height or 1024
64
+ width = width or 1024
65
+
66
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
67
+ device = self._execution_device
68
+
69
+ # Generate intermediate images during the process
70
+ for i in range(num_inference_steps):
71
+ if i % 5 == 0: # Show progress every 5 steps
72
+ # Generate partial result
73
+ temp_result = self(
74
+ prompt=prompt,
75
+ negative_prompt=negative_prompt,
76
+ height=height,
77
+ width=width,
78
+ guidance_scale=guidance_scale,
79
+ num_inference_steps=max(1, i + 1),
80
+ num_images_per_prompt=num_images_per_prompt,
81
+ generator=generator,
82
+ output_type=output_type,
83
+ ).images[0]
84
+ yield temp_result
85
+ torch.cuda.empty_cache()
86
+
87
+ # Final high-quality result
88
+ final_result = self(
89
+ prompt=prompt,
90
+ negative_prompt=negative_prompt,
91
+ height=height,
92
+ width=width,
93
+ guidance_scale=guidance_scale,
94
+ num_inference_steps=num_inference_steps,
95
+ num_images_per_prompt=num_images_per_prompt,
96
+ generator=generator,
97
+ output_type=output_type,
98
+ ).images[0]
99
+
100
+ yield final_result
101
 
102
+ loras = [
103
+ # Sample Qwen-compatible LoRAs
104
+ {
105
+ "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Studio-Realism/resolve/main/images/2.png",
106
+ "title": "Studio Realism",
107
+ "repo": "prithivMLmods/Qwen-Image-Studio-Realism",
108
+ "weights": "qwen-studio-realism.safetensors",
109
+ "trigger_word": "Studio Realism"
110
+ },
111
+ {
112
+ "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Sketch-Smudge/resolve/main/images/1.png",
113
+ "title": "Sketch Smudge",
114
+ "repo": "prithivMLmods/Qwen-Image-Sketch-Smudge",
115
+ "weights": "qwen-sketch-smudge.safetensors",
116
+ "trigger_word": "Sketch Smudge"
117
+ },
118
+ {
119
+ "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Anime-LoRA/resolve/main/images/1.png",
120
+ "title": "Qwen Anime",
121
+ "repo": "prithivMLmods/Qwen-Image-Anime-LoRA",
122
+ "weights": "qwen-anime.safetensors",
123
+ "trigger_word": "Qwen Anime"
124
+ },
125
+ {
126
+ "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Synthetic-Face/resolve/main/images/2.png",
127
+ "title": "Synthetic Face",
128
+ "repo": "prithivMLmods/Qwen-Image-Synthetic-Face",
129
+ "weights": "qwen-synthetic-face.safetensors",
130
+ "trigger_word": "Synthetic Face"
131
+ },
132
+ {
133
+ "image": "huggingface.co/prithivMLmods/Qwen-Image-Fragmented-Portraiture/resolve/main/images/3.png",
134
+ "title": "Fragmented Portraiture",
135
+ "repo": "prithivMLmods/Qwen-Image-Fragmented-Portraiture",
136
+ "weights": "qwen-fragmented-portraiture.safetensors",
137
+ "trigger_word": "Fragmented Portraiture"
138
+ },
139
+ ]
140
+
141
+ #--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------#
142
  dtype = torch.bfloat16
143
+ device = "cuda" if torch.cuda.is_available() else "cpu"
144
+ base_model = "Qwen/Qwen-Image"
145
 
146
+ # Load Qwen Image pipeline
147
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
148
 
149
+ # Add aspect ratios for Qwen
150
  aspect_ratios = {
151
+ "1:1": (1024, 1024),
152
+ "16:9": (1344, 768),
153
+ "9:16": (768, 1344),
154
+ "4:3": (1152, 896),
155
+ "3:4": (896, 1152),
156
+ "3:2": (1216, 832),
157
+ "2:3": (832, 1216)
158
  }
159
 
160
+ MAX_SEED = 2**32-1
161
+
162
+ # Add the custom method to the pipeline
163
+ pipe.qwen_pipe_call_that_returns_an_iterable_of_images = qwen_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
164
+
165
+ class calculateDuration:
166
+ def __init__(self, activity_name=""):
167
+ self.activity_name = activity_name
168
+
169
+ def __enter__(self):
170
+ self.start_time = time.time()
171
+ return self
172
+
173
+ def __exit__(self, exc_type, exc_value, traceback):
174
+ self.end_time = time.time()
175
+ self.elapsed_time = self.end_time - self.start_time
176
+ if self.activity_name:
177
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
178
+ else:
179
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
180
+
181
  def load_lora_opt(pipe, lora_input):
182
  lora_input = lora_input.strip()
183
  if not lora_input:
 
190
 
191
  if lora_input.startswith("http"):
192
  url = lora_input
 
193
  # Repo page (no blob/resolve)
194
  if "huggingface.co" in url and "/blob/" not in url and "/resolve/" not in url:
195
  repo_id = urlparse(url).path.strip("/")
 
203
  # Download direct file
204
  tmp_dir = tempfile.mkdtemp()
205
  local_path = os.path.join(tmp_dir, os.path.basename(urlparse(url).path))
 
206
  try:
207
  print(f"Downloading LoRA from {url}...")
208
  resp = requests.get(url, stream=True)
 
215
  finally:
216
  shutil.rmtree(tmp_dir, ignore_errors=True)
217
 
218
+ def update_selection(evt: gr.SelectData, width, height):
219
+ selected_lora = loras[evt.index]
220
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
221
+ lora_repo = selected_lora["repo"]
222
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
+ if "aspect" in selected_lora:
225
+ if selected_lora["aspect"] == "portrait":
226
+ width = 768
227
+ height = 1024
228
+ elif selected_lora["aspect"] == "landscape":
229
+ width = 1024
230
+ height = 768
231
+ else:
232
+ width = 1024
233
+ height = 1024
234
 
235
+ return (
236
+ gr.update(placeholder=new_placeholder),
237
+ updated_text,
238
+ evt.index,
239
+ width,
240
+ height,
241
+ )
242
+
243
+ @spaces.GPU(duration=120)
244
+ def generate_image(prompt_mash, negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress):
245
+ pipe.to("cuda")
246
+ generator = torch.Generator(device="cuda").manual_seed(seed)
 
 
247
 
248
+ with calculateDuration("Generating image"):
249
+ # Generate image with live preview
250
+ for img in pipe.qwen_pipe_call_that_returns_an_iterable_of_images(
251
+ prompt=prompt_mash,
252
+ negative_prompt=negative_prompt,
253
+ num_inference_steps=steps,
254
+ guidance_scale=cfg_scale,
255
+ width=width,
256
+ height=height,
257
+ generator=generator,
258
+ ):
259
+ yield img
260
+
261
+ def set_dimensions(ar):
262
+ w, h = aspect_ratios[ar]
263
+ return gr.update(value=w), gr.update(value=h)
264
 
 
265
  @spaces.GPU(duration=120)
266
+ def run_lora(prompt, negative_prompt, use_negative_prompt, aspect_ratio, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
267
+ if selected_index is None:
268
+ raise gr.Error("You must select a LoRA before proceeding.🧨")
269
+
270
+ selected_lora = loras[selected_index]
271
+ lora_path = selected_lora["repo"]
272
+ trigger_word = selected_lora["trigger_word"]
273
+
274
+ # Set dimensions based on aspect ratio
275
+ width, height = aspect_ratios[aspect_ratio]
276
+
277
+ if trigger_word:
278
+ if "trigger_position" in selected_lora:
279
+ if selected_lora["trigger_position"] == "prepend":
280
+ prompt_mash = f"{trigger_word} {prompt}"
281
+ else:
282
+ prompt_mash = f"{prompt} {trigger_word}"
283
+ else:
284
+ prompt_mash = f"{trigger_word} {prompt}"
285
+ else:
286
+ prompt_mash = prompt
287
+
288
+ # Handle negative prompt
289
  final_negative_prompt = negative_prompt if use_negative_prompt else ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
 
291
+ with calculateDuration("Unloading LoRA"):
292
+ # Clear existing adapters
293
+ current_adapters = pipe.get_list_adapters() if hasattr(pipe, 'get_list_adapters') else []
294
+ for adapter in current_adapters:
295
+ if hasattr(pipe, 'delete_adapters'):
296
+ pipe.delete_adapters(adapter)
297
+ if hasattr(pipe, 'disable_lora'):
298
+ pipe.disable_lora()
299
+
300
+ # Load new LoRA weights
301
+ with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
302
+ weight_name = selected_lora.get("weights", None)
303
+ load_lora_opt(pipe, lora_path)
304
+ if hasattr(pipe, 'set_adapters'):
305
+ pipe.set_adapters(["default"], adapter_weights=[lora_scale])
306
+
307
+ with calculateDuration("Randomizing seed"):
308
+ if randomize_seed:
309
+ seed = random.randint(0, MAX_SEED)
310
+
311
+ image_generator = generate_image(prompt_mash, final_negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress)
312
+
313
+ final_image = None
314
+ step_counter = 0
315
+ for image in image_generator:
316
+ step_counter += 1
317
+ final_image = image
318
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
319
+ yield image, seed, gr.update(value=progress_bar, visible=True)
320
+
321
+ yield final_image, seed, gr.update(value=progress_bar, visible=False)
322
+
323
+ def get_huggingface_safetensors(link):
324
+ split_link = link.split("/")
325
+ if len(split_link) == 2:
326
+ model_card = ModelCard.load(link)
327
+ base_model = model_card.data.get("base_model")
328
+ print(base_model)
329
+
330
+ # Allow Qwen models
331
+ if base_model and "qwen" not in base_model.lower():
332
+ raise Exception("Qwen-compatible LoRA Not Found!")
333
+
334
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
335
+ trigger_word = model_card.data.get("instance_prompt", "")
336
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
337
+
338
+ fs = HfFileSystem()
339
+ try:
340
+ list_of_files = fs.ls(link, detail=False)
341
+ for file in list_of_files:
342
+ if file.endswith(".safetensors"):
343
+ safetensors_name = file.split("/")[-1]
344
+ if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
345
+ image_elements = file.split("/")
346
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
347
+ except Exception as e:
348
+ print(e)
349
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
350
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
351
+
352
+ return split_link[1], link, safetensors_name, trigger_word, image_url
353
+
354
+ def check_custom_model(link):
355
+ if link.startswith("https://"):
356
+ if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
357
+ link_split = link.split("huggingface.co/")
358
+ return get_huggingface_safetensors(link_split[1])
359
+ else:
360
+ return get_huggingface_safetensors(link)
361
+
362
+ def add_custom_lora(custom_lora):
363
+ global loras
364
+ if custom_lora:
365
+ try:
366
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
367
+ print(f"Loaded custom LoRA: {repo}")
368
+ card = f'''
369
+ <div class="custom_lora_card">
370
+ <span>Loaded custom LoRA:</span>
371
+ <div class="card_internal">
372
+ <img src="{image}" />
373
+ <div>
374
+ <h3>{title}</h3>
375
+ <small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
376
+ </div>
377
+ </div>
378
+ </div>
379
+ '''
380
+ existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
381
+ if not existing_item_index:
382
+ new_item = {
383
+ "image": image,
384
+ "title": title,
385
+ "repo": repo,
386
+ "weights": path,
387
+ "trigger_word": trigger_word
388
+ }
389
+ print(new_item)
390
+ existing_item_index = len(loras)
391
+ loras.append(new_item)
392
+
393
+ return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
394
+ except Exception as e:
395
+ gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-Qwen compatible LoRA")
396
+ return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-Qwen compatible LoRA"), gr.update(visible=False), gr.update(), "", None, ""
397
+ else:
398
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
399
+
400
+ def remove_custom_lora():
401
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
402
+
403
+ run_lora.zerogpu = True
404
 
405
  css = '''
406
+ #gen_btn{height: 100%}
407
+ #gen_column{align-self: stretch}
408
+ #title{text-align: center}
409
+ #title h1{font-size: 3em; display:inline-flex; align-items:center}
410
+ #title img{width: 100px; margin-right: 0.5em}
411
+ #gallery .grid-wrap{height: 10vh}
412
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
413
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
414
+ .card_internal img{margin-right: 1em}
415
+ .styler{--form-gap-width: 0px !important}
416
+ #progress{height:30px}
417
+ #progress .generating{display:none}
418
+ .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
419
+ .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
420
  '''
421
 
422
+ with gr.Blocks(theme="bethecloud/storj_theme", css=css, delete_cache=(120, 120)) as app:
423
+ title = gr.HTML("""<h1>Qwen Image LoRA DLC🥳</h1>""", elem_id="title",)
424
+ selected_index = gr.State(None)
 
 
 
 
 
 
 
 
 
 
425
 
426
  with gr.Row():
427
+ with gr.Column(scale=3):
428
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="✦︎ Choose the LoRA and type the prompt")
429
+ with gr.Column(scale=1, elem_id="gen_column"):
430
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
431
+
432
+ with gr.Row():
433
+ with gr.Column():
434
+ selected_info = gr.Markdown("")
435
+ gallery = gr.Gallery(
436
+ [(item["image"], item["title"]) for item in loras],
437
+ label="Qwen LoRA Collection",
438
+ allow_preview=False,
439
+ columns=3,
440
+ elem_id="gallery",
441
+ show_share_button=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  )
443
+
444
+ with gr.Group():
445
+ custom_lora = gr.Textbox(label="Enter Custom Qwen LoRA", placeholder="prithivMLmods/Qwen-Image-Sketch-Smudge")
446
+ gr.Markdown("[Check the list of Qwen-compatible LoRAs](https://huggingface.co/models?search=qwen+lora)", elem_id="lora_list")
447
+
448
+ custom_lora_info = gr.HTML(visible=False)
449
+ custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
450
+
451
+ with gr.Column():
452
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
453
+ result = gr.Image(label="Generated Image", format="png")
454
+
455
+ with gr.Row():
456
+ aspect_ratio = gr.Dropdown(
457
+ label="Aspect Ratio",
458
+ choices=list(aspect_ratios.keys()),
459
+ value="1:1",
460
+ )
461
+
462
+ with gr.Row():
463
+ with gr.Accordion("Advanced Settings", open=False):
464
+
465
+ with gr.Row():
466
+ use_negative_prompt = gr.Checkbox(
467
+ label="Use negative prompt", value=True, visible=True
468
+ )
469
+ negative_prompt = gr.Text(
470
+ label="Negative prompt",
471
+ max_lines=1,
472
+ placeholder="Enter a negative prompt",
473
+ value="text, watermark, copyright, blurry, low resolution",
474
+ visible=True,
475
+ )
476
+
477
+ with gr.Column():
478
+ with gr.Row():
479
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=4.0)
480
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=50)
481
+
482
+ with gr.Row():
483
+ width = gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024)
484
+ height = gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024)
485
+
486
+ with gr.Row():
487
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
488
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
489
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2, step=0.01, value=1.0)
490
+
491
+ # Event handlers
492
+ gallery.select(
493
+ update_selection,
494
+ inputs=[width, height],
495
+ outputs=[prompt, selected_info, selected_index, width, height]
496
+ )
497
+
498
  aspect_ratio.change(
499
  fn=set_dimensions,
500
  inputs=aspect_ratio,
501
  outputs=[width, height]
502
  )
503
 
 
504
  use_negative_prompt.change(
505
  fn=lambda x: gr.update(visible=x),
506
  inputs=use_negative_prompt,
507
  outputs=negative_prompt
508
  )
509
 
510
+ custom_lora.input(
511
+ add_custom_lora,
512
+ inputs=[custom_lora],
513
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514
  )
515
 
516
+ custom_lora_button.click(
517
+ remove_custom_lora,
518
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
519
+ )
520
+
521
+ gr.on(
522
+ triggers=[generate_button.click, prompt.submit],
523
+ fn=run_lora,
524
+ inputs=[prompt, negative_prompt, use_negative_prompt, aspect_ratio, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
525
+ outputs=[result, seed, progress_bar]
526
  )
527
 
528
+ app.queue()
529
+ app.launch(share=False, ssr_mode=False, show_error=True)