Aatricks commited on
Commit
1329d8c
·
verified ·
1 Parent(s): 1264e6e

increased duration

Browse files
Files changed (1) hide show
  1. app.py +216 -216
app.py CHANGED
@@ -1,216 +1,216 @@
1
- import glob
2
- import gradio as gr
3
- import sys
4
- import os
5
- from PIL import Image
6
- import numpy as np
7
- import spaces
8
-
9
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
10
-
11
- from modules.user.pipeline import pipeline
12
- import torch
13
-
14
-
15
- def load_generated_images():
16
- """Load generated images with given prefix from disk"""
17
- image_files = glob.glob("./_internal/output/**/*.png")
18
-
19
- # If there are no image files, return
20
- if not image_files:
21
- return []
22
-
23
- # Sort files by modification time in descending order
24
- image_files.sort(key=os.path.getmtime, reverse=True)
25
-
26
- # Get most recent timestamp
27
- latest_time = os.path.getmtime(image_files[0])
28
-
29
- # Get all images from same batch (within 1 second of most recent)
30
- batch_images = []
31
- for file in image_files:
32
- if abs(os.path.getmtime(file) - latest_time) < 1.0:
33
- try:
34
- img = Image.open(file)
35
- batch_images.append(img)
36
- except:
37
- continue
38
-
39
- if not batch_images:
40
- return []
41
- return batch_images
42
-
43
-
44
- @spaces.GPU
45
- def generate_images(
46
- prompt: str,
47
- width: int = 512,
48
- height: int = 512,
49
- num_images: int = 1,
50
- batch_size: int = 1,
51
- hires_fix: bool = False,
52
- adetailer: bool = False,
53
- enhance_prompt: bool = False,
54
- img2img_enabled: bool = False,
55
- img2img_image: str = None,
56
- stable_fast: bool = False,
57
- reuse_seed: bool = False,
58
- flux_enabled: bool = False,
59
- prio_speed: bool = False,
60
- realistic_model: bool = False,
61
- progress=gr.Progress(),
62
- ):
63
- """Generate images using the LightDiffusion pipeline"""
64
- try:
65
- if img2img_enabled and img2img_image is not None:
66
- # Convert numpy array to PIL Image
67
- if isinstance(img2img_image, np.ndarray):
68
- img_pil = Image.fromarray(img2img_image)
69
- img_pil.save("temp_img2img.png")
70
- prompt = "temp_img2img.png"
71
-
72
- # Run pipeline and capture saved images
73
- with torch.inference_mode():
74
- pipeline(
75
- prompt=prompt,
76
- w=width,
77
- h=height,
78
- number=num_images,
79
- batch=batch_size,
80
- hires_fix=hires_fix,
81
- adetailer=adetailer,
82
- enhance_prompt=enhance_prompt,
83
- img2img=img2img_enabled,
84
- stable_fast=stable_fast,
85
- reuse_seed=reuse_seed,
86
- flux_enabled=flux_enabled,
87
- prio_speed=prio_speed,
88
- autohdr=True,
89
- realistic_model=realistic_model,
90
- )
91
-
92
- # Clean up temporary file if it exists
93
- if os.path.exists("temp_img2img.png"):
94
- os.remove("temp_img2img.png")
95
-
96
- return load_generated_images()
97
-
98
- except Exception:
99
- import traceback
100
-
101
- print(traceback.format_exc())
102
- # Clean up temporary file if it exists
103
- if os.path.exists("temp_img2img.png"):
104
- os.remove("temp_img2img.png")
105
- return [Image.new("RGB", (512, 512), color="black")]
106
-
107
-
108
- # Create Gradio interface
109
- with gr.Blocks(title="LightDiffusion Web UI") as demo:
110
- gr.Markdown("# LightDiffusion Web UI")
111
- gr.Markdown("Generate AI images using LightDiffusion")
112
- gr.Markdown(
113
- "This is the demo for LightDiffusion, the fastest diffusion backend for generating images. https://github.com/LightDiffusion/LightDiffusion-Next"
114
- )
115
-
116
- with gr.Row():
117
- with gr.Column():
118
- # Input components
119
- prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
120
-
121
- with gr.Row():
122
- width = gr.Slider(
123
- minimum=64, maximum=2048, value=512, step=64, label="Width"
124
- )
125
- height = gr.Slider(
126
- minimum=64, maximum=2048, value=512, step=64, label="Height"
127
- )
128
-
129
- with gr.Row():
130
- num_images = gr.Slider(
131
- minimum=1, maximum=10, value=1, step=1, label="Number of Images"
132
- )
133
- batch_size = gr.Slider(
134
- minimum=1, maximum=4, value=1, step=1, label="Batch Size"
135
- )
136
-
137
- with gr.Row():
138
- hires_fix = gr.Checkbox(label="HiRes Fix")
139
- adetailer = gr.Checkbox(label="Auto Face/Body Enhancement")
140
- enhance_prompt = gr.Checkbox(label="Enhance Prompt")
141
- stable_fast = gr.Checkbox(label="Stable Fast Mode")
142
-
143
- with gr.Row():
144
- reuse_seed = gr.Checkbox(label="Reuse Seed")
145
- flux_enabled = gr.Checkbox(label="Flux Mode")
146
- prio_speed = gr.Checkbox(label="Prioritize Speed")
147
- realistic_model = gr.Checkbox(label="Realistic Model")
148
-
149
- with gr.Row():
150
- img2img_enabled = gr.Checkbox(label="Image to Image Mode")
151
- img2img_image = gr.Image(label="Input Image for img2img", visible=False)
152
-
153
- # Make input image visible only when img2img is enabled
154
- img2img_enabled.change(
155
- fn=lambda x: gr.update(visible=x),
156
- inputs=[img2img_enabled],
157
- outputs=[img2img_image],
158
- )
159
-
160
- generate_btn = gr.Button("Generate")
161
-
162
- # Output gallery
163
- gallery = gr.Gallery(
164
- label="Generated Images",
165
- show_label=True,
166
- elem_id="gallery",
167
- columns=[2],
168
- rows=[2],
169
- object_fit="contain",
170
- height="auto",
171
- )
172
-
173
- # Connect generate button to pipeline
174
- generate_btn.click(
175
- fn=generate_images,
176
- inputs=[
177
- prompt,
178
- width,
179
- height,
180
- num_images,
181
- batch_size,
182
- hires_fix,
183
- adetailer,
184
- enhance_prompt,
185
- img2img_enabled,
186
- img2img_image,
187
- stable_fast,
188
- reuse_seed,
189
- flux_enabled,
190
- prio_speed,
191
- realistic_model,
192
- ],
193
- outputs=gallery,
194
- )
195
-
196
-
197
- def is_huggingface_space():
198
- return "SPACE_ID" in os.environ
199
-
200
-
201
- # For local testing
202
- if __name__ == "__main__":
203
- if is_huggingface_space():
204
- demo.launch(
205
- debug=False,
206
- server_name="0.0.0.0",
207
- server_port=7860, # Standard HF Spaces port
208
- )
209
- else:
210
- demo.launch(
211
- server_name="0.0.0.0",
212
- server_port=8000,
213
- auth=None,
214
- share=True, # Only enable sharing locally
215
- debug=True,
216
- )
 
1
+ import glob
2
+ import gradio as gr
3
+ import sys
4
+ import os
5
+ from PIL import Image
6
+ import numpy as np
7
+ import spaces
8
+
9
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
10
+
11
+ from modules.user.pipeline import pipeline
12
+ import torch
13
+
14
+
15
+ def load_generated_images():
16
+ """Load generated images with given prefix from disk"""
17
+ image_files = glob.glob("./_internal/output/**/*.png")
18
+
19
+ # If there are no image files, return
20
+ if not image_files:
21
+ return []
22
+
23
+ # Sort files by modification time in descending order
24
+ image_files.sort(key=os.path.getmtime, reverse=True)
25
+
26
+ # Get most recent timestamp
27
+ latest_time = os.path.getmtime(image_files[0])
28
+
29
+ # Get all images from same batch (within 1 second of most recent)
30
+ batch_images = []
31
+ for file in image_files:
32
+ if abs(os.path.getmtime(file) - latest_time) < 1.0:
33
+ try:
34
+ img = Image.open(file)
35
+ batch_images.append(img)
36
+ except:
37
+ continue
38
+
39
+ if not batch_images:
40
+ return []
41
+ return batch_images
42
+
43
+
44
+ @spaces.GPU(duration=300)
45
+ def generate_images(
46
+ prompt: str,
47
+ width: int = 512,
48
+ height: int = 512,
49
+ num_images: int = 1,
50
+ batch_size: int = 1,
51
+ hires_fix: bool = False,
52
+ adetailer: bool = False,
53
+ enhance_prompt: bool = False,
54
+ img2img_enabled: bool = False,
55
+ img2img_image: str = None,
56
+ stable_fast: bool = False,
57
+ reuse_seed: bool = False,
58
+ flux_enabled: bool = False,
59
+ prio_speed: bool = False,
60
+ realistic_model: bool = False,
61
+ progress=gr.Progress(),
62
+ ):
63
+ """Generate images using the LightDiffusion pipeline"""
64
+ try:
65
+ if img2img_enabled and img2img_image is not None:
66
+ # Convert numpy array to PIL Image
67
+ if isinstance(img2img_image, np.ndarray):
68
+ img_pil = Image.fromarray(img2img_image)
69
+ img_pil.save("temp_img2img.png")
70
+ prompt = "temp_img2img.png"
71
+
72
+ # Run pipeline and capture saved images
73
+ with torch.inference_mode():
74
+ pipeline(
75
+ prompt=prompt,
76
+ w=width,
77
+ h=height,
78
+ number=num_images,
79
+ batch=batch_size,
80
+ hires_fix=hires_fix,
81
+ adetailer=adetailer,
82
+ enhance_prompt=enhance_prompt,
83
+ img2img=img2img_enabled,
84
+ stable_fast=stable_fast,
85
+ reuse_seed=reuse_seed,
86
+ flux_enabled=flux_enabled,
87
+ prio_speed=prio_speed,
88
+ autohdr=True,
89
+ realistic_model=realistic_model,
90
+ )
91
+
92
+ # Clean up temporary file if it exists
93
+ if os.path.exists("temp_img2img.png"):
94
+ os.remove("temp_img2img.png")
95
+
96
+ return load_generated_images()
97
+
98
+ except Exception:
99
+ import traceback
100
+
101
+ print(traceback.format_exc())
102
+ # Clean up temporary file if it exists
103
+ if os.path.exists("temp_img2img.png"):
104
+ os.remove("temp_img2img.png")
105
+ return [Image.new("RGB", (512, 512), color="black")]
106
+
107
+
108
+ # Create Gradio interface
109
+ with gr.Blocks(title="LightDiffusion Web UI") as demo:
110
+ gr.Markdown("# LightDiffusion Web UI")
111
+ gr.Markdown("Generate AI images using LightDiffusion")
112
+ gr.Markdown(
113
+ "This is the demo for LightDiffusion, the fastest diffusion backend for generating images. https://github.com/LightDiffusion/LightDiffusion-Next"
114
+ )
115
+
116
+ with gr.Row():
117
+ with gr.Column():
118
+ # Input components
119
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
120
+
121
+ with gr.Row():
122
+ width = gr.Slider(
123
+ minimum=64, maximum=2048, value=512, step=64, label="Width"
124
+ )
125
+ height = gr.Slider(
126
+ minimum=64, maximum=2048, value=512, step=64, label="Height"
127
+ )
128
+
129
+ with gr.Row():
130
+ num_images = gr.Slider(
131
+ minimum=1, maximum=10, value=1, step=1, label="Number of Images"
132
+ )
133
+ batch_size = gr.Slider(
134
+ minimum=1, maximum=4, value=1, step=1, label="Batch Size"
135
+ )
136
+
137
+ with gr.Row():
138
+ hires_fix = gr.Checkbox(label="HiRes Fix")
139
+ adetailer = gr.Checkbox(label="Auto Face/Body Enhancement")
140
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt")
141
+ stable_fast = gr.Checkbox(label="Stable Fast Mode")
142
+
143
+ with gr.Row():
144
+ reuse_seed = gr.Checkbox(label="Reuse Seed")
145
+ flux_enabled = gr.Checkbox(label="Flux Mode")
146
+ prio_speed = gr.Checkbox(label="Prioritize Speed")
147
+ realistic_model = gr.Checkbox(label="Realistic Model")
148
+
149
+ with gr.Row():
150
+ img2img_enabled = gr.Checkbox(label="Image to Image Mode")
151
+ img2img_image = gr.Image(label="Input Image for img2img", visible=False)
152
+
153
+ # Make input image visible only when img2img is enabled
154
+ img2img_enabled.change(
155
+ fn=lambda x: gr.update(visible=x),
156
+ inputs=[img2img_enabled],
157
+ outputs=[img2img_image],
158
+ )
159
+
160
+ generate_btn = gr.Button("Generate")
161
+
162
+ # Output gallery
163
+ gallery = gr.Gallery(
164
+ label="Generated Images",
165
+ show_label=True,
166
+ elem_id="gallery",
167
+ columns=[2],
168
+ rows=[2],
169
+ object_fit="contain",
170
+ height="auto",
171
+ )
172
+
173
+ # Connect generate button to pipeline
174
+ generate_btn.click(
175
+ fn=generate_images,
176
+ inputs=[
177
+ prompt,
178
+ width,
179
+ height,
180
+ num_images,
181
+ batch_size,
182
+ hires_fix,
183
+ adetailer,
184
+ enhance_prompt,
185
+ img2img_enabled,
186
+ img2img_image,
187
+ stable_fast,
188
+ reuse_seed,
189
+ flux_enabled,
190
+ prio_speed,
191
+ realistic_model,
192
+ ],
193
+ outputs=gallery,
194
+ )
195
+
196
+
197
+ def is_huggingface_space():
198
+ return "SPACE_ID" in os.environ
199
+
200
+
201
+ # For local testing
202
+ if __name__ == "__main__":
203
+ if is_huggingface_space():
204
+ demo.launch(
205
+ debug=False,
206
+ server_name="0.0.0.0",
207
+ server_port=7860, # Standard HF Spaces port
208
+ )
209
+ else:
210
+ demo.launch(
211
+ server_name="0.0.0.0",
212
+ server_port=8000,
213
+ auth=None,
214
+ share=True, # Only enable sharing locally
215
+ debug=True,
216
+ )