Meismaxandmaxisme commited on
Commit
97e2b41
·
verified ·
1 Parent(s): 30ccbc3

Upload 9 files

Browse files
Files changed (9) hide show
  1. src/__init__.py +0 -0
  2. src/app.py +554 -0
  3. src/app_settings.py +124 -0
  4. src/constants.py +26 -0
  5. src/context.py +109 -0
  6. src/image_ops.py +15 -0
  7. src/paths.py +110 -0
  8. src/state.py +42 -0
  9. src/utils.py +38 -0
src/__init__.py ADDED
File without changes
src/app.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from argparse import ArgumentParser
3
+
4
+ from PIL import Image
5
+
6
+ import constants
7
+ from backend.controlnet import controlnet_settings_from_dict
8
+ from backend.device import get_device_name
9
+ from backend.models.gen_images import ImageFormat
10
+ from backend.models.lcmdiffusion_setting import DiffusionTask
11
+ from backend.upscale.tiled_upscale import generate_upscaled_image
12
+ from constants import APP_VERSION, DEVICE
13
+ from frontend.webui.image_variations_ui import generate_image_variations
14
+ from models.interface_types import InterfaceType
15
+ from paths import FastStableDiffusionPaths, ensure_path
16
+ from state import get_context, get_settings
17
+ from utils import show_system_info
18
+
19
+ parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
20
+ parser.add_argument(
21
+ "-s",
22
+ "--share",
23
+ action="store_true",
24
+ help="Create sharable link(Web UI)",
25
+ required=False,
26
+ )
27
+ group = parser.add_mutually_exclusive_group(required=False)
28
+ group.add_argument(
29
+ "-g",
30
+ "--gui",
31
+ action="store_true",
32
+ help="Start desktop GUI",
33
+ )
34
+ group.add_argument(
35
+ "-w",
36
+ "--webui",
37
+ action="store_true",
38
+ help="Start Web UI",
39
+ )
40
+ group.add_argument(
41
+ "-a",
42
+ "--api",
43
+ action="store_true",
44
+ help="Start Web API server",
45
+ )
46
+ group.add_argument(
47
+ "-m",
48
+ "--mcp",
49
+ action="store_true",
50
+ help="Start MCP(Model Context Protocol) server",
51
+ )
52
+ group.add_argument(
53
+ "-r",
54
+ "--realtime",
55
+ action="store_true",
56
+ help="Start realtime inference UI(experimental)",
57
+ )
58
+ group.add_argument(
59
+ "-v",
60
+ "--version",
61
+ action="store_true",
62
+ help="Version",
63
+ )
64
+
65
+ parser.add_argument(
66
+ "-b",
67
+ "--benchmark",
68
+ action="store_true",
69
+ help="Run inference benchmark on the selected device",
70
+ )
71
+ parser.add_argument(
72
+ "--lcm_model_id",
73
+ type=str,
74
+ help="Model ID or path,Default stabilityai/sd-turbo",
75
+ default="stabilityai/sd-turbo",
76
+ )
77
+ parser.add_argument(
78
+ "--openvino_lcm_model_id",
79
+ type=str,
80
+ help="OpenVINO Model ID or path,Default rupeshs/sd-turbo-openvino",
81
+ default="rupeshs/sd-turbo-openvino",
82
+ )
83
+ parser.add_argument(
84
+ "--prompt",
85
+ type=str,
86
+ help="Describe the image you want to generate",
87
+ default="",
88
+ )
89
+ parser.add_argument(
90
+ "--negative_prompt",
91
+ type=str,
92
+ help="Describe what you want to exclude from the generation",
93
+ default="",
94
+ )
95
+ parser.add_argument(
96
+ "--image_height",
97
+ type=int,
98
+ help="Height of the image",
99
+ default=512,
100
+ )
101
+ parser.add_argument(
102
+ "--image_width",
103
+ type=int,
104
+ help="Width of the image",
105
+ default=512,
106
+ )
107
+ parser.add_argument(
108
+ "--inference_steps",
109
+ type=int,
110
+ help="Number of steps,default : 1",
111
+ default=1,
112
+ )
113
+ parser.add_argument(
114
+ "--guidance_scale",
115
+ type=float,
116
+ help="Guidance scale,default : 1.0",
117
+ default=1.0,
118
+ )
119
+
120
+ parser.add_argument(
121
+ "--number_of_images",
122
+ type=int,
123
+ help="Number of images to generate ,default : 1",
124
+ default=1,
125
+ )
126
+ parser.add_argument(
127
+ "--seed",
128
+ type=int,
129
+ help="Seed,default : -1 (disabled) ",
130
+ default=-1,
131
+ )
132
+ parser.add_argument(
133
+ "--use_openvino",
134
+ action="store_true",
135
+ help="Use OpenVINO model",
136
+ )
137
+
138
+ parser.add_argument(
139
+ "--use_offline_model",
140
+ action="store_true",
141
+ help="Use offline model",
142
+ )
143
+ parser.add_argument(
144
+ "--clip_skip",
145
+ type=int,
146
+ help="CLIP Skip (1-12), default : 1 (disabled) ",
147
+ default=1,
148
+ )
149
+ parser.add_argument(
150
+ "--token_merging",
151
+ type=float,
152
+ help="Token merging scale, 0.0 - 1.0, default : 0.0",
153
+ default=0.0,
154
+ )
155
+
156
+ parser.add_argument(
157
+ "--use_safety_checker",
158
+ action="store_true",
159
+ help="Use safety checker",
160
+ )
161
+ parser.add_argument(
162
+ "--use_lcm_lora",
163
+ action="store_true",
164
+ help="Use LCM-LoRA",
165
+ )
166
+ parser.add_argument(
167
+ "--base_model_id",
168
+ type=str,
169
+ help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
170
+ default="Lykon/dreamshaper-8",
171
+ )
172
+ parser.add_argument(
173
+ "--lcm_lora_id",
174
+ type=str,
175
+ help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
176
+ default="latent-consistency/lcm-lora-sdv1-5",
177
+ )
178
+ parser.add_argument(
179
+ "-i",
180
+ "--interactive",
181
+ action="store_true",
182
+ help="Interactive CLI mode",
183
+ )
184
+ parser.add_argument(
185
+ "-t",
186
+ "--use_tiny_auto_encoder",
187
+ action="store_true",
188
+ help="Use Tiny AutoEncoder for TAESD/TAESDXL/TAEF1",
189
+ )
190
+ parser.add_argument(
191
+ "-f",
192
+ "--file",
193
+ type=str,
194
+ help="Input image for img2img mode",
195
+ default="",
196
+ )
197
+ parser.add_argument(
198
+ "--img2img",
199
+ action="store_true",
200
+ help="img2img mode; requires input file via -f argument",
201
+ )
202
+ parser.add_argument(
203
+ "--batch_count",
204
+ type=int,
205
+ help="Number of sequential generations",
206
+ default=1,
207
+ )
208
+ parser.add_argument(
209
+ "--strength",
210
+ type=float,
211
+ help="Denoising strength for img2img and Image variations",
212
+ default=0.3,
213
+ )
214
+ parser.add_argument(
215
+ "--sdupscale",
216
+ action="store_true",
217
+ help="Tiled SD upscale,works only for the resolution 512x512,(2x upscale)",
218
+ )
219
+ parser.add_argument(
220
+ "--upscale",
221
+ action="store_true",
222
+ help="EDSR SD upscale ",
223
+ )
224
+ parser.add_argument(
225
+ "--custom_settings",
226
+ type=str,
227
+ help="JSON file containing custom generation settings",
228
+ default=None,
229
+ )
230
+ parser.add_argument(
231
+ "--usejpeg",
232
+ action="store_true",
233
+ help="Images will be saved as JPEG format",
234
+ )
235
+ parser.add_argument(
236
+ "--noimagesave",
237
+ action="store_true",
238
+ help="Disable image saving",
239
+ )
240
+ parser.add_argument(
241
+ "--imagequality", type=int, help="Output image quality [0 to 100]", default=90
242
+ )
243
+ parser.add_argument(
244
+ "--lora",
245
+ type=str,
246
+ help="LoRA model full path e.g D:\lora_models\CuteCartoon15V-LiberteRedmodModel-Cartoon-CuteCartoonAF.safetensors",
247
+ default=None,
248
+ )
249
+ parser.add_argument(
250
+ "--lora_weight",
251
+ type=float,
252
+ help="LoRA adapter weight [0 to 1.0]",
253
+ default=0.5,
254
+ )
255
+ parser.add_argument(
256
+ "--port",
257
+ type=int,
258
+ help="Web server port",
259
+ default=8000,
260
+ )
261
+
262
+ args = parser.parse_args()
263
+
264
+ if args.version:
265
+ print(APP_VERSION)
266
+ exit()
267
+
268
+ # parser.print_help()
269
+ print("FastSD CPU - ", APP_VERSION)
270
+ show_system_info()
271
+ print(f"Using device : {constants.DEVICE}")
272
+
273
+
274
+ if args.webui:
275
+ app_settings = get_settings()
276
+ else:
277
+ app_settings = get_settings()
278
+
279
+ print(f"Output path : {app_settings.settings.generated_images.path}")
280
+ ensure_path(app_settings.settings.generated_images.path)
281
+
282
+ print(f"Found {len(app_settings.lcm_models)} LCM models in config/lcm-models.txt")
283
+ print(
284
+ f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt"
285
+ )
286
+ print(
287
+ f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt"
288
+ )
289
+ print(
290
+ f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
291
+ )
292
+
293
+ if args.noimagesave:
294
+ app_settings.settings.generated_images.save_image = False
295
+ else:
296
+ app_settings.settings.generated_images.save_image = True
297
+
298
+ app_settings.settings.generated_images.save_image_quality = args.imagequality
299
+
300
+ if not args.realtime:
301
+ # To minimize realtime mode dependencies
302
+ from backend.upscale.upscaler import upscale_image
303
+ from frontend.cli_interactive import interactive_mode
304
+
305
+ if args.gui:
306
+ from frontend.gui.ui import start_gui
307
+
308
+ print("Starting desktop GUI mode(Qt)")
309
+ start_gui(
310
+ [],
311
+ app_settings,
312
+ )
313
+ elif args.webui:
314
+ from frontend.webui.ui import start_webui
315
+
316
+ print("Starting web UI mode")
317
+ start_webui(
318
+ args.share,
319
+ )
320
+ elif args.realtime:
321
+ from frontend.webui.realtime_ui import start_realtime_text_to_image
322
+
323
+ print("Starting realtime text to image(EXPERIMENTAL)")
324
+ start_realtime_text_to_image(args.share)
325
+ elif args.api:
326
+ from backend.api.web import start_web_server
327
+
328
+ start_web_server(args.port)
329
+ elif args.mcp:
330
+ from backend.api.mcp_server import start_mcp_server
331
+
332
+ start_mcp_server(args.port)
333
+ else:
334
+ context = get_context(InterfaceType.CLI)
335
+ config = app_settings.settings
336
+
337
+ if args.use_openvino:
338
+ config.lcm_diffusion_setting.openvino_lcm_model_id = args.openvino_lcm_model_id
339
+ else:
340
+ config.lcm_diffusion_setting.lcm_model_id = args.lcm_model_id
341
+
342
+ config.lcm_diffusion_setting.prompt = args.prompt
343
+ config.lcm_diffusion_setting.negative_prompt = args.negative_prompt
344
+ config.lcm_diffusion_setting.image_height = args.image_height
345
+ config.lcm_diffusion_setting.image_width = args.image_width
346
+ config.lcm_diffusion_setting.guidance_scale = args.guidance_scale
347
+ config.lcm_diffusion_setting.number_of_images = args.number_of_images
348
+ config.lcm_diffusion_setting.inference_steps = args.inference_steps
349
+ config.lcm_diffusion_setting.strength = args.strength
350
+ config.lcm_diffusion_setting.seed = args.seed
351
+ config.lcm_diffusion_setting.use_openvino = args.use_openvino
352
+ config.lcm_diffusion_setting.use_tiny_auto_encoder = args.use_tiny_auto_encoder
353
+ config.lcm_diffusion_setting.use_lcm_lora = args.use_lcm_lora
354
+ config.lcm_diffusion_setting.lcm_lora.base_model_id = args.base_model_id
355
+ config.lcm_diffusion_setting.lcm_lora.lcm_lora_id = args.lcm_lora_id
356
+ config.lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
357
+ config.lcm_diffusion_setting.lora.enabled = False
358
+ config.lcm_diffusion_setting.lora.path = args.lora
359
+ config.lcm_diffusion_setting.lora.weight = args.lora_weight
360
+ config.lcm_diffusion_setting.lora.fuse = True
361
+ if config.lcm_diffusion_setting.lora.path:
362
+ config.lcm_diffusion_setting.lora.enabled = True
363
+ if args.usejpeg:
364
+ config.generated_images.format = ImageFormat.JPEG.value.upper()
365
+ if args.seed > -1:
366
+ config.lcm_diffusion_setting.use_seed = True
367
+ else:
368
+ config.lcm_diffusion_setting.use_seed = False
369
+ config.lcm_diffusion_setting.use_offline_model = args.use_offline_model
370
+ config.lcm_diffusion_setting.clip_skip = args.clip_skip
371
+ config.lcm_diffusion_setting.token_merging = args.token_merging
372
+ config.lcm_diffusion_setting.use_safety_checker = args.use_safety_checker
373
+
374
+ # Read custom settings from JSON file
375
+ custom_settings = {}
376
+ if args.custom_settings:
377
+ with open(args.custom_settings) as f:
378
+ custom_settings = json.load(f)
379
+
380
+ # Basic ControlNet settings; if ControlNet is enabled, an image is
381
+ # required even in txt2img mode
382
+ config.lcm_diffusion_setting.controlnet = None
383
+ controlnet_settings_from_dict(
384
+ config.lcm_diffusion_setting,
385
+ custom_settings,
386
+ )
387
+
388
+ # Interactive mode
389
+ if args.interactive:
390
+ # wrapper(interactive_mode, config, context)
391
+ config.lcm_diffusion_setting.lora.fuse = False
392
+ interactive_mode(config, context)
393
+
394
+ # Start of non-interactive CLI image generation
395
+ if args.img2img and args.file != "":
396
+ config.lcm_diffusion_setting.init_image = Image.open(args.file)
397
+ config.lcm_diffusion_setting.diffusion_task = DiffusionTask.image_to_image.value
398
+ elif args.img2img and args.file == "":
399
+ print("Error : You need to specify a file in img2img mode")
400
+ exit()
401
+ elif args.upscale and args.file == "" and args.custom_settings == None:
402
+ print("Error : You need to specify a file in SD upscale mode")
403
+ exit()
404
+ elif (
405
+ args.prompt == ""
406
+ and args.file == ""
407
+ and args.custom_settings == None
408
+ and not args.benchmark
409
+ ):
410
+ print("Error : You need to provide a prompt")
411
+ exit()
412
+
413
+ if args.upscale:
414
+ # image = Image.open(args.file)
415
+ output_path = FastStableDiffusionPaths.get_upscale_filepath(
416
+ args.file,
417
+ 2,
418
+ config.generated_images.format,
419
+ )
420
+ result = upscale_image(
421
+ context,
422
+ args.file,
423
+ output_path,
424
+ 2,
425
+ )
426
+ # Perform Tiled SD upscale (EXPERIMENTAL)
427
+ elif args.sdupscale:
428
+ if args.use_openvino:
429
+ config.lcm_diffusion_setting.strength = 0.3
430
+ upscale_settings = None
431
+ if custom_settings != {}:
432
+ upscale_settings = custom_settings
433
+ filepath = args.file
434
+ output_format = config.generated_images.format
435
+ if upscale_settings:
436
+ filepath = upscale_settings["source_file"]
437
+ output_format = upscale_settings["output_format"].upper()
438
+ output_path = FastStableDiffusionPaths.get_upscale_filepath(
439
+ filepath,
440
+ 2,
441
+ output_format,
442
+ )
443
+
444
+ generate_upscaled_image(
445
+ config,
446
+ filepath,
447
+ config.lcm_diffusion_setting.strength,
448
+ upscale_settings=upscale_settings,
449
+ context=context,
450
+ tile_overlap=32 if config.lcm_diffusion_setting.use_openvino else 16,
451
+ output_path=output_path,
452
+ image_format=output_format,
453
+ )
454
+ exit()
455
+ # If img2img argument is set and prompt is empty, use image variations mode
456
+ elif args.img2img and args.prompt == "":
457
+ for i in range(0, args.batch_count):
458
+ generate_image_variations(
459
+ config.lcm_diffusion_setting.init_image, args.strength
460
+ )
461
+ else:
462
+ if args.benchmark:
463
+ print("Initializing benchmark...")
464
+ bench_lcm_setting = config.lcm_diffusion_setting
465
+ bench_lcm_setting.prompt = "a cat"
466
+ bench_lcm_setting.use_tiny_auto_encoder = False
467
+ context.generate_text_to_image(
468
+ settings=config,
469
+ device=DEVICE,
470
+ )
471
+
472
+ latencies = []
473
+
474
+ print("Starting benchmark please wait...")
475
+ for _ in range(3):
476
+ context.generate_text_to_image(
477
+ settings=config,
478
+ device=DEVICE,
479
+ )
480
+ latencies.append(context.latency)
481
+
482
+ avg_latency = sum(latencies) / 3
483
+
484
+ bench_lcm_setting.use_tiny_auto_encoder = True
485
+
486
+ context.generate_text_to_image(
487
+ settings=config,
488
+ device=DEVICE,
489
+ )
490
+ latencies = []
491
+ for _ in range(3):
492
+ context.generate_text_to_image(
493
+ settings=config,
494
+ device=DEVICE,
495
+ )
496
+ latencies.append(context.latency)
497
+
498
+ avg_latency_taesd = sum(latencies) / 3
499
+
500
+ benchmark_name = ""
501
+
502
+ if config.lcm_diffusion_setting.use_openvino:
503
+ benchmark_name = "OpenVINO"
504
+ else:
505
+ benchmark_name = "PyTorch"
506
+
507
+ bench_model_id = ""
508
+ if bench_lcm_setting.use_openvino:
509
+ bench_model_id = bench_lcm_setting.openvino_lcm_model_id
510
+ elif bench_lcm_setting.use_lcm_lora:
511
+ bench_model_id = bench_lcm_setting.lcm_lora.base_model_id
512
+ else:
513
+ bench_model_id = bench_lcm_setting.lcm_model_id
514
+
515
+ benchmark_result = [
516
+ ["Device", f"{DEVICE.upper()},{get_device_name()}"],
517
+ ["Stable Diffusion Model", bench_model_id],
518
+ [
519
+ "Image Size ",
520
+ f"{bench_lcm_setting.image_width}x{bench_lcm_setting.image_height}",
521
+ ],
522
+ [
523
+ "Inference Steps",
524
+ f"{bench_lcm_setting.inference_steps}",
525
+ ],
526
+ [
527
+ "Benchmark Passes",
528
+ 3,
529
+ ],
530
+ [
531
+ "Average Latency",
532
+ f"{round(avg_latency, 3)} sec",
533
+ ],
534
+ [
535
+ "Average Latency(TAESD* enabled)",
536
+ f"{round(avg_latency_taesd, 3)} sec",
537
+ ],
538
+ ]
539
+ print()
540
+ print(
541
+ f" FastSD Benchmark - {benchmark_name:8} "
542
+ )
543
+ print(f"-" * 80)
544
+ for benchmark in benchmark_result:
545
+ print(f"{benchmark[0]:35} - {benchmark[1]}")
546
+ print(f"-" * 80)
547
+ print("*TAESD - Tiny AutoEncoder for Stable Diffusion")
548
+
549
+ else:
550
+ for i in range(0, args.batch_count):
551
+ context.generate_text_to_image(
552
+ settings=config,
553
+ device=DEVICE,
554
+ )
src/app_settings.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+ from os import makedirs, path
3
+
4
+ import yaml
5
+ from constants import (
6
+ LCM_LORA_MODELS_FILE,
7
+ LCM_MODELS_FILE,
8
+ OPENVINO_LCM_MODELS_FILE,
9
+ SD_MODELS_FILE,
10
+ )
11
+ from paths import FastStableDiffusionPaths, join_paths
12
+ from utils import get_files_in_dir, get_models_from_text_file
13
+
14
+ from models.settings import Settings
15
+
16
+
17
+ class AppSettings:
18
+ def __init__(self):
19
+ self.config_path = FastStableDiffusionPaths().get_app_settings_path()
20
+ self._stable_diffsuion_models = get_models_from_text_file(
21
+ FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
22
+ )
23
+ self._lcm_lora_models = get_models_from_text_file(
24
+ FastStableDiffusionPaths().get_models_config_path(LCM_LORA_MODELS_FILE)
25
+ )
26
+ self._openvino_lcm_models = get_models_from_text_file(
27
+ FastStableDiffusionPaths().get_models_config_path(OPENVINO_LCM_MODELS_FILE)
28
+ )
29
+ self._lcm_models = get_models_from_text_file(
30
+ FastStableDiffusionPaths().get_models_config_path(LCM_MODELS_FILE)
31
+ )
32
+ self._gguf_diffusion_models = get_files_in_dir(
33
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "diffusion")
34
+ )
35
+ self._gguf_clip_models = get_files_in_dir(
36
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "clip")
37
+ )
38
+ self._gguf_vae_models = get_files_in_dir(
39
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "vae")
40
+ )
41
+ self._gguf_t5xxl_models = get_files_in_dir(
42
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "t5xxl")
43
+ )
44
+ self._config = None
45
+
46
+ @property
47
+ def settings(self):
48
+ return self._config
49
+
50
+ @property
51
+ def stable_diffsuion_models(self):
52
+ return self._stable_diffsuion_models
53
+
54
+ @property
55
+ def openvino_lcm_models(self):
56
+ return self._openvino_lcm_models
57
+
58
+ @property
59
+ def lcm_models(self):
60
+ return self._lcm_models
61
+
62
+ @property
63
+ def lcm_lora_models(self):
64
+ return self._lcm_lora_models
65
+
66
+ @property
67
+ def gguf_diffusion_models(self):
68
+ return self._gguf_diffusion_models
69
+
70
+ @property
71
+ def gguf_clip_models(self):
72
+ return self._gguf_clip_models
73
+
74
+ @property
75
+ def gguf_vae_models(self):
76
+ return self._gguf_vae_models
77
+
78
+ @property
79
+ def gguf_t5xxl_models(self):
80
+ return self._gguf_t5xxl_models
81
+
82
+ def load(self, skip_file=False):
83
+ if skip_file:
84
+ print("Skipping config file")
85
+ settings_dict = self._load_default()
86
+ self._config = Settings.model_validate(settings_dict)
87
+ else:
88
+ if not path.exists(self.config_path):
89
+ base_dir = path.dirname(self.config_path)
90
+ if not path.exists(base_dir):
91
+ makedirs(base_dir)
92
+ try:
93
+ print("Settings not found creating default settings")
94
+ with open(self.config_path, "w") as file:
95
+ yaml.dump(
96
+ self._load_default(),
97
+ file,
98
+ )
99
+ except Exception as ex:
100
+ print(f"Error in creating settings : {ex}")
101
+ exit()
102
+ try:
103
+ with open(self.config_path) as file:
104
+ settings_dict = yaml.safe_load(file)
105
+ self._config = Settings.model_validate(settings_dict)
106
+ except Exception as ex:
107
+ print(f"Error in loading settings : {ex}")
108
+
109
+ def save(self):
110
+ try:
111
+ with open(self.config_path, "w") as file:
112
+ tmp_cfg = deepcopy(self._config)
113
+ tmp_cfg.lcm_diffusion_setting.init_image = None
114
+ configurations = tmp_cfg.model_dump(
115
+ exclude=["init_image"],
116
+ )
117
+ if configurations:
118
+ yaml.dump(configurations, file)
119
+ except Exception as ex:
120
+ print(f"Error in saving settings : {ex}")
121
+
122
+ def _load_default(self) -> dict:
123
+ default_config = Settings()
124
+ return default_config.model_dump()
src/constants.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import environ, cpu_count
2
+
3
+ cpu_cores = cpu_count()
4
+ cpus = cpu_cores // 2 if cpu_cores else 0
5
+ APP_VERSION = "v1.0.0 beta 252"
6
+ LCM_DEFAULT_MODEL = "stabilityai/sd-turbo"
7
+ LCM_DEFAULT_MODEL_OPENVINO = "rupeshs/sd-turbo-openvino"
8
+ APP_NAME = "FastSD CPU"
9
+ APP_SETTINGS_FILE = "settings.yaml"
10
+ RESULTS_DIRECTORY = "results"
11
+ CONFIG_DIRECTORY = "configs"
12
+ DEVICE = environ.get("DEVICE", "cpu")
13
+ SD_MODELS_FILE = "stable-diffusion-models.txt"
14
+ LCM_LORA_MODELS_FILE = "lcm-lora-models.txt"
15
+ OPENVINO_LCM_MODELS_FILE = "openvino-lcm-models.txt"
16
+ TAESD_MODEL = "madebyollin/taesd"
17
+ TAESDXL_MODEL = "madebyollin/taesdxl"
18
+ TAESD_MODEL_OPENVINO = "rupeshs/taesd-ov"
19
+ LCM_MODELS_FILE = "lcm-models.txt"
20
+ TAESDXL_MODEL_OPENVINO = "rupeshs/taesdxl-openvino"
21
+ LORA_DIRECTORY = "lora_models"
22
+ CONTROLNET_DIRECTORY = "controlnet_models"
23
+ MODELS_DIRECTORY = "models"
24
+ GGUF_THREADS = environ.get("GGUF_THREADS", cpus)
25
+ TAEF1_MODEL_OPENVINO = "rupeshs/taef1-openvino"
26
+ SAFETY_CHECKER_MODEL = "Falconsai/nsfw_image_detection"
src/context.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pprint import pprint
2
+ from time import perf_counter
3
+ from traceback import print_exc
4
+ from typing import Any
5
+
6
+ from app_settings import Settings
7
+ from backend.image_saver import ImageSaver
8
+ from backend.lcm_text_to_image import LCMTextToImage
9
+ from backend.models.lcmdiffusion_setting import DiffusionTask
10
+ from backend.utils import get_blank_image
11
+ from models.interface_types import InterfaceType
12
+
13
+
14
+ class Context:
15
+ def __init__(
16
+ self,
17
+ interface_type: InterfaceType,
18
+ device="cpu",
19
+ ):
20
+ self.interface_type = interface_type.value
21
+ self.lcm_text_to_image = LCMTextToImage(device)
22
+ self._latency = 0
23
+ self._error = ""
24
+
25
+ @property
26
+ def latency(self):
27
+ return self._latency
28
+
29
+ @property
30
+ def error(self):
31
+ return self._error
32
+
33
+ def generate_text_to_image(
34
+ self,
35
+ settings: Settings,
36
+ reshape: bool = False,
37
+ device: str = "cpu",
38
+ save_config=True,
39
+ ) -> Any:
40
+ try:
41
+ self._error = ""
42
+ tick = perf_counter()
43
+ from state import get_settings
44
+
45
+ if (
46
+ settings.lcm_diffusion_setting.diffusion_task
47
+ == DiffusionTask.text_to_image.value
48
+ ):
49
+ settings.lcm_diffusion_setting.init_image = None
50
+
51
+ if save_config:
52
+ get_settings().save()
53
+
54
+ pprint(settings.lcm_diffusion_setting.model_dump())
55
+ if not settings.lcm_diffusion_setting.lcm_lora:
56
+ return None
57
+ self.lcm_text_to_image.init(
58
+ device,
59
+ settings.lcm_diffusion_setting,
60
+ )
61
+
62
+ images = self.lcm_text_to_image.generate(
63
+ settings.lcm_diffusion_setting,
64
+ reshape,
65
+ )
66
+
67
+ elapsed = perf_counter() - tick
68
+ self._latency = elapsed
69
+ print(f"Latency : {elapsed:.2f} seconds")
70
+ if settings.lcm_diffusion_setting.controlnet:
71
+ if settings.lcm_diffusion_setting.controlnet.enabled:
72
+ images.append(
73
+ settings.lcm_diffusion_setting.controlnet._control_image
74
+ )
75
+
76
+ if settings.lcm_diffusion_setting.use_safety_checker:
77
+ print("Safety Checker is enabled")
78
+ from state import get_safety_checker
79
+
80
+ safety_checker = get_safety_checker()
81
+ blank_image = get_blank_image(
82
+ settings.lcm_diffusion_setting.image_width,
83
+ settings.lcm_diffusion_setting.image_height,
84
+ )
85
+ for idx, image in enumerate(images):
86
+ if not safety_checker.is_safe(image):
87
+ images[idx] = blank_image
88
+ except Exception as exception:
89
+ print(f"Error in generating images: {exception}")
90
+ self._error = str(exception)
91
+ print_exc()
92
+ return None
93
+ return images
94
+
95
+ def save_images(
96
+ self,
97
+ images: Any,
98
+ settings: Settings,
99
+ ) -> list[str]:
100
+ saved_images = []
101
+ if images and settings.generated_images.save_image:
102
+ saved_images = ImageSaver.save_images(
103
+ settings.generated_images.path,
104
+ images=images,
105
+ lcm_diffusion_setting=settings.lcm_diffusion_setting,
106
+ format=settings.generated_images.format,
107
+ jpeg_quality=settings.generated_images.save_image_quality,
108
+ )
109
+ return saved_images
src/image_ops.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+
3
+
4
+ def resize_pil_image(
5
+ pil_image: Image,
6
+ image_width,
7
+ image_height,
8
+ ):
9
+ return pil_image.convert("RGB").resize(
10
+ (
11
+ image_width,
12
+ image_height,
13
+ ),
14
+ Image.Resampling.LANCZOS,
15
+ )
src/paths.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import constants
3
+ from pathlib import Path
4
+ from time import time
5
+ from utils import get_image_file_extension
6
+
7
+
8
+ def join_paths(
9
+ first_path: str,
10
+ second_path: str,
11
+ ) -> str:
12
+ return os.path.join(first_path, second_path)
13
+
14
+
15
+ def get_file_name(file_path: str) -> str:
16
+ return Path(file_path).stem
17
+
18
+
19
+ def get_app_path() -> str:
20
+ app_dir = os.path.dirname(__file__)
21
+ work_dir = os.path.dirname(app_dir)
22
+ return work_dir
23
+
24
+
25
+ def get_configs_path() -> str:
26
+ config_path = join_paths(get_app_path(), constants.CONFIG_DIRECTORY)
27
+ return config_path
28
+
29
+
30
+ class FastStableDiffusionPaths:
31
+ @staticmethod
32
+ def get_app_settings_path() -> str:
33
+ configs_path = get_configs_path()
34
+ settings_path = join_paths(
35
+ configs_path,
36
+ constants.APP_SETTINGS_FILE,
37
+ )
38
+ return settings_path
39
+
40
+ @staticmethod
41
+ def get_results_path() -> str:
42
+ results_path = join_paths(get_app_path(), constants.RESULTS_DIRECTORY)
43
+ return results_path
44
+
45
+ @staticmethod
46
+ def get_css_path() -> str:
47
+ app_dir = os.path.dirname(__file__)
48
+ css_path = os.path.join(
49
+ app_dir,
50
+ "frontend",
51
+ "webui",
52
+ "css",
53
+ "style.css",
54
+ )
55
+ return css_path
56
+
57
+ @staticmethod
58
+ def get_models_config_path(model_config_file: str) -> str:
59
+ configs_path = get_configs_path()
60
+ models_path = join_paths(
61
+ configs_path,
62
+ model_config_file,
63
+ )
64
+ return models_path
65
+
66
+ @staticmethod
67
+ def get_upscale_filepath(
68
+ file_path_src: str,
69
+ scale_factor: int,
70
+ format: str,
71
+ ) -> str:
72
+ if file_path_src:
73
+ file_name_src = get_file_name(file_path_src)
74
+ else:
75
+ file_name_src = "fastsdcpu"
76
+
77
+ extension = get_image_file_extension(format)
78
+ upscaled_filepath = join_paths(
79
+ FastStableDiffusionPaths.get_results_path(),
80
+ f"{file_name_src}_{int(scale_factor)}x_upscale_{int(time())}{extension}",
81
+ )
82
+ return upscaled_filepath
83
+
84
+ @staticmethod
85
+ def get_lora_models_path() -> str:
86
+ lora_models_path = join_paths(get_app_path(), constants.LORA_DIRECTORY)
87
+ return lora_models_path
88
+
89
+ @staticmethod
90
+ def get_controlnet_models_path() -> str:
91
+ controlnet_models_path = join_paths(
92
+ get_app_path(), constants.CONTROLNET_DIRECTORY
93
+ )
94
+ return controlnet_models_path
95
+
96
+ @staticmethod
97
+ def get_gguf_models_path() -> str:
98
+ models_path = join_paths(get_app_path(), constants.MODELS_DIRECTORY)
99
+ guuf_models_path = join_paths(models_path, "gguf")
100
+ return guuf_models_path
101
+
102
+
103
+ def get_base_folder_name(path: str) -> str:
104
+ return os.path.basename(path)
105
+
106
+
107
+ def ensure_path(path: str) -> None:
108
+ """Ensure that the directory exists."""
109
+ if not os.path.exists(path):
110
+ os.makedirs(path, exist_ok=True)
src/state.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from app_settings import AppSettings
2
+ from typing import Optional
3
+
4
+ from context import Context
5
+ from models.interface_types import InterfaceType
6
+ from backend.safety_checker import SafetyChecker
7
+
8
+
9
+ class _AppState:
10
+ _instance: Optional["_AppState"] = None
11
+ settings: Optional[AppSettings] = None
12
+ context: Optional[Context] = None
13
+ safety_checker: Optional[SafetyChecker] = None
14
+
15
+
16
+ def get_state() -> _AppState:
17
+ if _AppState._instance is None:
18
+ _AppState._instance = _AppState()
19
+ return _AppState._instance
20
+
21
+
22
+ def get_settings(skip_file: bool = False) -> AppSettings:
23
+ state = get_state()
24
+ if state.settings is None:
25
+ state.settings = AppSettings()
26
+ state.settings.load(skip_file)
27
+ return state.settings
28
+
29
+
30
+ def get_context(interface_type: InterfaceType) -> Context:
31
+ state = get_state()
32
+ if state.context is None:
33
+ state.context = Context(interface_type)
34
+ return state.context
35
+
36
+
37
+ def get_safety_checker() -> SafetyChecker:
38
+ state = get_state()
39
+ if state.safety_checker is None:
40
+ print("Initializing safety checker")
41
+ state.safety_checker = SafetyChecker()
42
+ return state.safety_checker
src/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import path, listdir
2
+ import platform
3
+ from typing import List
4
+
5
+
6
+ def show_system_info():
7
+ try:
8
+ print(f"Running on {platform.system()} platform")
9
+ print(f"OS: {platform.platform()}")
10
+ print(f"Processor: {platform.processor()}")
11
+ except Exception as ex:
12
+ print(f"Error occurred while getting system information {ex}")
13
+
14
+
15
+ def get_models_from_text_file(file_path: str) -> List:
16
+ models = []
17
+ with open(file_path, "r") as file:
18
+ lines = file.readlines()
19
+ for repo_id in lines:
20
+ if repo_id.strip() != "":
21
+ models.append(repo_id.strip())
22
+ return models
23
+
24
+
25
+ def get_image_file_extension(image_format: str) -> str:
26
+ if image_format == "JPEG":
27
+ return ".jpg"
28
+ elif image_format == "PNG":
29
+ return ".png"
30
+
31
+
32
+ def get_files_in_dir(root_dir: str) -> List:
33
+ models = []
34
+ models.append("None")
35
+ for file in listdir(root_dir):
36
+ if file.endswith((".gguf", ".safetensors")):
37
+ models.append(path.join(root_dir, file))
38
+ return models