Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- __pycache__/app_settings.cpython-310.pyc +0 -0
- __pycache__/constants.cpython-310.pyc +0 -0
- __pycache__/context.cpython-310.pyc +0 -0
- __pycache__/image_ops.cpython-310.pyc +0 -0
- __pycache__/paths.cpython-310.pyc +0 -0
- __pycache__/state.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- configs/lcm-lora-models.txt +3 -0
- configs/lcm-models.txt +5 -0
- configs/openvino-lcm-models.txt +4 -0
- configs/settings.yaml +24 -0
- configs/stable-diffusion-models.txt +7 -0
- testlcm/.gitattributes +35 -0
- testlcm/README.md +12 -0
- testlcm/__init__.py +0 -0
- testlcm/__pycache__/app_settings.cpython-310.pyc +0 -0
- testlcm/__pycache__/constants.cpython-310.pyc +0 -0
- testlcm/__pycache__/context.cpython-310.pyc +0 -0
- testlcm/__pycache__/image_ops.cpython-310.pyc +0 -0
- testlcm/__pycache__/paths.cpython-310.pyc +0 -0
- testlcm/__pycache__/state.cpython-310.pyc +0 -0
- testlcm/__pycache__/utils.cpython-310.pyc +0 -0
- testlcm/app.py +161 -0
- testlcm/app_settings.py +89 -0
- testlcm/backend/__init__.py +0 -0
- testlcm/backend/__pycache__/__init__.cpython-310.pyc +0 -0
- testlcm/backend/__pycache__/device.cpython-310.pyc +0 -0
- testlcm/backend/__pycache__/image_saver.cpython-310.pyc +0 -0
- testlcm/backend/__pycache__/lcm_text_to_image.cpython-310.pyc +0 -0
- testlcm/backend/__pycache__/tiny_decoder.cpython-310.pyc +0 -0
- testlcm/backend/device.py +23 -0
- testlcm/backend/image_saver.py +40 -0
- testlcm/backend/lcm_text_to_image.py +352 -0
- testlcm/backend/models/__pycache__/lcmdiffusion_setting.cpython-310.pyc +0 -0
- testlcm/backend/models/lcmdiffusion_setting.py +39 -0
- testlcm/backend/openvino/__pycache__/custom_ov_model_vae_decoder.cpython-310.pyc +0 -0
- testlcm/backend/openvino/__pycache__/pipelines.cpython-310.pyc +0 -0
- testlcm/backend/openvino/custom_ov_model_vae_decoder.py +21 -0
- testlcm/backend/openvino/pipelines.py +75 -0
- testlcm/backend/pipelines/__pycache__/lcm.cpython-310.pyc +0 -0
- testlcm/backend/pipelines/__pycache__/lcm_lora.cpython-310.pyc +0 -0
- testlcm/backend/pipelines/lcm.py +90 -0
- testlcm/backend/pipelines/lcm_lora.py +25 -0
- testlcm/backend/tiny_decoder.py +30 -0
- testlcm/constants.py +18 -0
- testlcm/context.py +46 -0
- testlcm/frontend/__pycache__/utils.cpython-310.pyc +0 -0
- testlcm/frontend/gui/app_window.py +604 -0
- testlcm/frontend/gui/image_generator_worker.py +37 -0
- testlcm/frontend/gui/ui.py +15 -0
__pycache__/app_settings.cpython-310.pyc
ADDED
|
Binary file (3.19 kB). View file
|
|
|
__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (866 Bytes). View file
|
|
|
__pycache__/context.cpython-310.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
__pycache__/image_ops.cpython-310.pyc
ADDED
|
Binary file (402 Bytes). View file
|
|
|
__pycache__/paths.cpython-310.pyc
ADDED
|
Binary file (2.06 kB). View file
|
|
|
__pycache__/state.cpython-310.pyc
ADDED
|
Binary file (830 Bytes). View file
|
|
|
__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (877 Bytes). View file
|
|
|
configs/lcm-lora-models.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
latent-consistency/lcm-lora-sdv1-5
|
| 2 |
+
latent-consistency/lcm-lora-sdxl
|
| 3 |
+
latent-consistency/lcm-lora-ssd-1b
|
configs/lcm-models.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
stabilityai/sd-turbo
|
| 2 |
+
stabilityai/sdxl-turbo
|
| 3 |
+
SimianLuo/LCM_Dreamshaper_v7
|
| 4 |
+
latent-consistency/lcm-sdxl
|
| 5 |
+
latent-consistency/lcm-ssd-1b
|
configs/openvino-lcm-models.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
rupeshs/sd-turbo-openvino
|
| 2 |
+
rupeshs/sdxl-turbo-openvino-int8
|
| 3 |
+
rupeshs/LCM-dreamshaper-v7-openvino
|
| 4 |
+
Disty0/LCM_SoteMix
|
configs/settings.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
lcm_diffusion_setting:
|
| 2 |
+
diffusion_task: text_to_image
|
| 3 |
+
guidance_scale: 1.0
|
| 4 |
+
image_height: 512
|
| 5 |
+
image_width: 512
|
| 6 |
+
inference_steps: 1
|
| 7 |
+
init_image: null
|
| 8 |
+
lcm_lora:
|
| 9 |
+
base_model_id: Lykon/dreamshaper-8
|
| 10 |
+
lcm_lora_id: latent-consistency/lcm-lora-sdv1-5
|
| 11 |
+
lcm_model_id: stabilityai/sd-turbo
|
| 12 |
+
negative_prompt: ''
|
| 13 |
+
number_of_images: 1
|
| 14 |
+
openvino_lcm_model_id: rupeshs/sd-turbo-openvino
|
| 15 |
+
prompt: a girl dance
|
| 16 |
+
seed: 123123
|
| 17 |
+
strength: 0.6
|
| 18 |
+
use_lcm_lora: false
|
| 19 |
+
use_offline_model: false
|
| 20 |
+
use_openvino: true
|
| 21 |
+
use_safety_checker: false
|
| 22 |
+
use_seed: false
|
| 23 |
+
use_tiny_auto_encoder: false
|
| 24 |
+
results_path: /workspaces/fastsdcpu/results
|
configs/stable-diffusion-models.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Lykon/dreamshaper-8
|
| 2 |
+
Fictiverse/Stable_Diffusion_PaperCut_Model
|
| 3 |
+
stabilityai/stable-diffusion-xl-base-1.0
|
| 4 |
+
runwayml/stable-diffusion-v1-5
|
| 5 |
+
segmind/SSD-1B
|
| 6 |
+
stablediffusionapi/anything-v5
|
| 7 |
+
prompthero/openjourney-v4
|
testlcm/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
testlcm/README.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Testlcm
|
| 3 |
+
emoji: 🏢
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.7.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
testlcm/__init__.py
ADDED
|
File without changes
|
testlcm/__pycache__/app_settings.cpython-310.pyc
ADDED
|
Binary file (3.19 kB). View file
|
|
|
testlcm/__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (874 Bytes). View file
|
|
|
testlcm/__pycache__/context.cpython-310.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
testlcm/__pycache__/image_ops.cpython-310.pyc
ADDED
|
Binary file (410 Bytes). View file
|
|
|
testlcm/__pycache__/paths.cpython-310.pyc
ADDED
|
Binary file (2.06 kB). View file
|
|
|
testlcm/__pycache__/state.cpython-310.pyc
ADDED
|
Binary file (838 Bytes). View file
|
|
|
testlcm/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (885 Bytes). View file
|
|
|
testlcm/app.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from app_settings import AppSettings
|
| 2 |
+
from utils import show_system_info
|
| 3 |
+
import constants
|
| 4 |
+
from argparse import ArgumentParser
|
| 5 |
+
from context import Context
|
| 6 |
+
from constants import APP_VERSION, LCM_DEFAULT_MODEL_OPENVINO
|
| 7 |
+
from models.interface_types import InterfaceType
|
| 8 |
+
from constants import DEVICE
|
| 9 |
+
|
| 10 |
+
parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
|
| 11 |
+
parser.add_argument(
|
| 12 |
+
"-s",
|
| 13 |
+
"--share",
|
| 14 |
+
action="store_true",
|
| 15 |
+
help="Create sharable link(Web UI)",
|
| 16 |
+
required=False,
|
| 17 |
+
)
|
| 18 |
+
group = parser.add_mutually_exclusive_group(required=False)
|
| 19 |
+
group.add_argument(
|
| 20 |
+
"-g",
|
| 21 |
+
"--gui",
|
| 22 |
+
action="store_true",
|
| 23 |
+
help="Start desktop GUI",
|
| 24 |
+
)
|
| 25 |
+
group.add_argument(
|
| 26 |
+
"-w",
|
| 27 |
+
"--webui",
|
| 28 |
+
action="store_true",
|
| 29 |
+
help="Start Web UI",
|
| 30 |
+
)
|
| 31 |
+
group.add_argument(
|
| 32 |
+
"-r",
|
| 33 |
+
"--realtime",
|
| 34 |
+
action="store_true",
|
| 35 |
+
help="Start realtime inference UI(experimental)",
|
| 36 |
+
)
|
| 37 |
+
group.add_argument(
|
| 38 |
+
"-v",
|
| 39 |
+
"--version",
|
| 40 |
+
action="store_true",
|
| 41 |
+
help="Version",
|
| 42 |
+
)
|
| 43 |
+
parser.add_argument(
|
| 44 |
+
"--lcm_model_id",
|
| 45 |
+
type=str,
|
| 46 |
+
help="Model ID or path,Default SimianLuo/LCM_Dreamshaper_v7",
|
| 47 |
+
default="SimianLuo/LCM_Dreamshaper_v7",
|
| 48 |
+
)
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
"--prompt",
|
| 51 |
+
type=str,
|
| 52 |
+
help="Describe the image you want to generate",
|
| 53 |
+
)
|
| 54 |
+
parser.add_argument(
|
| 55 |
+
"--image_height",
|
| 56 |
+
type=int,
|
| 57 |
+
help="Height of the image",
|
| 58 |
+
default=512,
|
| 59 |
+
)
|
| 60 |
+
parser.add_argument(
|
| 61 |
+
"--image_width",
|
| 62 |
+
type=int,
|
| 63 |
+
help="Width of the image",
|
| 64 |
+
default=512,
|
| 65 |
+
)
|
| 66 |
+
parser.add_argument(
|
| 67 |
+
"--inference_steps",
|
| 68 |
+
type=int,
|
| 69 |
+
help="Number of steps,default : 4",
|
| 70 |
+
default=4,
|
| 71 |
+
)
|
| 72 |
+
parser.add_argument(
|
| 73 |
+
"--guidance_scale",
|
| 74 |
+
type=int,
|
| 75 |
+
help="Guidance scale,default : 1.0",
|
| 76 |
+
default=1.0,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
parser.add_argument(
|
| 80 |
+
"--number_of_images",
|
| 81 |
+
type=int,
|
| 82 |
+
help="Number of images to generate ,default : 1",
|
| 83 |
+
default=1,
|
| 84 |
+
)
|
| 85 |
+
parser.add_argument(
|
| 86 |
+
"--seed",
|
| 87 |
+
type=int,
|
| 88 |
+
help="Seed,default : -1 (disabled) ",
|
| 89 |
+
default=-1,
|
| 90 |
+
)
|
| 91 |
+
parser.add_argument(
|
| 92 |
+
"--use_openvino",
|
| 93 |
+
action="store_true",
|
| 94 |
+
help="Use OpenVINO model",
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
parser.add_argument(
|
| 98 |
+
"--use_offline_model",
|
| 99 |
+
action="store_true",
|
| 100 |
+
help="Use offline model",
|
| 101 |
+
)
|
| 102 |
+
parser.add_argument(
|
| 103 |
+
"--use_safety_checker",
|
| 104 |
+
action="store_false",
|
| 105 |
+
help="Use safety checker",
|
| 106 |
+
)
|
| 107 |
+
parser.add_argument(
|
| 108 |
+
"--use_lcm_lora",
|
| 109 |
+
action="store_true",
|
| 110 |
+
help="Use LCM-LoRA",
|
| 111 |
+
)
|
| 112 |
+
parser.add_argument(
|
| 113 |
+
"--base_model_id",
|
| 114 |
+
type=str,
|
| 115 |
+
help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
|
| 116 |
+
default="Lykon/dreamshaper-8",
|
| 117 |
+
)
|
| 118 |
+
parser.add_argument(
|
| 119 |
+
"--lcm_lora_id",
|
| 120 |
+
type=str,
|
| 121 |
+
help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
|
| 122 |
+
default="latent-consistency/lcm-lora-sdv1-5",
|
| 123 |
+
)
|
| 124 |
+
parser.add_argument(
|
| 125 |
+
"-i",
|
| 126 |
+
"--interactive",
|
| 127 |
+
action="store_true",
|
| 128 |
+
help="Interactive CLI mode",
|
| 129 |
+
)
|
| 130 |
+
parser.add_argument(
|
| 131 |
+
"--use_tiny_auto_encoder",
|
| 132 |
+
action="store_true",
|
| 133 |
+
help="Use tiny auto encoder for SD (TAESD)",
|
| 134 |
+
)
|
| 135 |
+
args = parser.parse_args()
|
| 136 |
+
|
| 137 |
+
if args.version:
|
| 138 |
+
print(APP_VERSION)
|
| 139 |
+
exit()
|
| 140 |
+
|
| 141 |
+
# parser.print_help()
|
| 142 |
+
show_system_info()
|
| 143 |
+
print(f"Using device : {constants.DEVICE}")
|
| 144 |
+
app_settings = AppSettings()
|
| 145 |
+
app_settings.load()
|
| 146 |
+
print(
|
| 147 |
+
f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt"
|
| 148 |
+
)
|
| 149 |
+
print(
|
| 150 |
+
f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt"
|
| 151 |
+
)
|
| 152 |
+
print(
|
| 153 |
+
f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
from frontend.webui.ui import start_webui
|
| 157 |
+
|
| 158 |
+
print("Starting web UI mode")
|
| 159 |
+
start_webui(
|
| 160 |
+
args.share,
|
| 161 |
+
)
|
testlcm/app_settings.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
from os import path, makedirs
|
| 3 |
+
from models.settings import Settings
|
| 4 |
+
from paths import FastStableDiffusionPaths
|
| 5 |
+
from utils import get_models_from_text_file
|
| 6 |
+
from constants import (
|
| 7 |
+
OPENVINO_LCM_MODELS_FILE,
|
| 8 |
+
LCM_LORA_MODELS_FILE,
|
| 9 |
+
SD_MODELS_FILE,
|
| 10 |
+
LCM_MODELS_FILE,
|
| 11 |
+
)
|
| 12 |
+
from copy import deepcopy
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class AppSettings:
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.config_path = FastStableDiffusionPaths().get_app_settings_path()
|
| 18 |
+
self._stable_diffsuion_models = get_models_from_text_file(
|
| 19 |
+
FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
|
| 20 |
+
)
|
| 21 |
+
self._lcm_lora_models = get_models_from_text_file(
|
| 22 |
+
FastStableDiffusionPaths().get_models_config_path(LCM_LORA_MODELS_FILE)
|
| 23 |
+
)
|
| 24 |
+
self._openvino_lcm_models = get_models_from_text_file(
|
| 25 |
+
FastStableDiffusionPaths().get_models_config_path(OPENVINO_LCM_MODELS_FILE)
|
| 26 |
+
)
|
| 27 |
+
self._lcm_models = get_models_from_text_file(
|
| 28 |
+
FastStableDiffusionPaths().get_models_config_path(LCM_MODELS_FILE)
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def settings(self):
|
| 33 |
+
return self._config
|
| 34 |
+
|
| 35 |
+
@property
|
| 36 |
+
def stable_diffsuion_models(self):
|
| 37 |
+
return self._stable_diffsuion_models
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def openvino_lcm_models(self):
|
| 41 |
+
return self._openvino_lcm_models
|
| 42 |
+
|
| 43 |
+
@property
|
| 44 |
+
def lcm_models(self):
|
| 45 |
+
return self._lcm_models
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def lcm_lora_models(self):
|
| 49 |
+
return self._lcm_lora_models
|
| 50 |
+
|
| 51 |
+
def load(self, skip_file=False):
|
| 52 |
+
if skip_file:
|
| 53 |
+
print("Skipping config file")
|
| 54 |
+
settings_dict = self._load_default()
|
| 55 |
+
self._config = Settings.parse_obj(settings_dict)
|
| 56 |
+
else:
|
| 57 |
+
if not path.exists(self.config_path):
|
| 58 |
+
base_dir = path.dirname(self.config_path)
|
| 59 |
+
if not path.exists(base_dir):
|
| 60 |
+
makedirs(base_dir)
|
| 61 |
+
try:
|
| 62 |
+
print("Settings not found creating default settings")
|
| 63 |
+
with open(self.config_path, "w") as file:
|
| 64 |
+
yaml.dump(
|
| 65 |
+
self._load_default(),
|
| 66 |
+
file,
|
| 67 |
+
)
|
| 68 |
+
except Exception as ex:
|
| 69 |
+
print(f"Error in creating settings : {ex}")
|
| 70 |
+
exit()
|
| 71 |
+
try:
|
| 72 |
+
with open(self.config_path) as file:
|
| 73 |
+
settings_dict = yaml.safe_load(file)
|
| 74 |
+
self._config = Settings.parse_obj(settings_dict)
|
| 75 |
+
except Exception as ex:
|
| 76 |
+
print(f"Error in loading settings : {ex}")
|
| 77 |
+
|
| 78 |
+
def save(self):
|
| 79 |
+
try:
|
| 80 |
+
with open(self.config_path, "w") as file:
|
| 81 |
+
tmp_cfg = deepcopy(self._config)
|
| 82 |
+
tmp_cfg.lcm_diffusion_setting.init_image = None
|
| 83 |
+
yaml.dump(tmp_cfg.dict(), file)
|
| 84 |
+
except Exception as ex:
|
| 85 |
+
print(f"Error in saving settings : {ex}")
|
| 86 |
+
|
| 87 |
+
def _load_default(self) -> dict:
|
| 88 |
+
defult_config = Settings()
|
| 89 |
+
return defult_config.dict()
|
testlcm/backend/__init__.py
ADDED
|
File without changes
|
testlcm/backend/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (138 Bytes). View file
|
|
|
testlcm/backend/__pycache__/device.cpython-310.pyc
ADDED
|
Binary file (817 Bytes). View file
|
|
|
testlcm/backend/__pycache__/image_saver.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
testlcm/backend/__pycache__/lcm_text_to_image.cpython-310.pyc
ADDED
|
Binary file (6.4 kB). View file
|
|
|
testlcm/backend/__pycache__/tiny_decoder.cpython-310.pyc
ADDED
|
Binary file (878 Bytes). View file
|
|
|
testlcm/backend/device.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import platform
|
| 2 |
+
from constants import DEVICE
|
| 3 |
+
import torch
|
| 4 |
+
import openvino as ov
|
| 5 |
+
|
| 6 |
+
core = ov.Core()
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def is_openvino_device() -> bool:
|
| 10 |
+
if DEVICE.lower() == "cpu" or DEVICE.lower()[0] == "g":
|
| 11 |
+
return True
|
| 12 |
+
else:
|
| 13 |
+
return False
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def get_device_name() -> str:
|
| 17 |
+
if DEVICE == "cuda" or DEVICE == "mps":
|
| 18 |
+
default_gpu_index = torch.cuda.current_device()
|
| 19 |
+
return torch.cuda.get_device_name(default_gpu_index)
|
| 20 |
+
elif platform.system().lower() == "darwin":
|
| 21 |
+
return platform.processor()
|
| 22 |
+
elif is_openvino_device():
|
| 23 |
+
return core.get_property(DEVICE.upper(), "FULL_DEVICE_NAME")
|
testlcm/backend/image_saver.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os import path, mkdir
|
| 2 |
+
from typing import Any
|
| 3 |
+
from uuid import uuid4
|
| 4 |
+
from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ImageSaver:
|
| 9 |
+
@staticmethod
|
| 10 |
+
def save_images(
|
| 11 |
+
output_path: str,
|
| 12 |
+
images: Any,
|
| 13 |
+
folder_name: str = "",
|
| 14 |
+
format: str = ".png",
|
| 15 |
+
lcm_diffusion_setting: LCMDiffusionSetting = None,
|
| 16 |
+
) -> None:
|
| 17 |
+
gen_id = uuid4()
|
| 18 |
+
|
| 19 |
+
for index, image in enumerate(images):
|
| 20 |
+
if not path.exists(output_path):
|
| 21 |
+
mkdir(output_path)
|
| 22 |
+
|
| 23 |
+
if folder_name:
|
| 24 |
+
out_path = path.join(
|
| 25 |
+
output_path,
|
| 26 |
+
folder_name,
|
| 27 |
+
)
|
| 28 |
+
else:
|
| 29 |
+
out_path = output_path
|
| 30 |
+
|
| 31 |
+
if not path.exists(out_path):
|
| 32 |
+
mkdir(out_path)
|
| 33 |
+
image.save(path.join(out_path, f"{gen_id}-{index+1}{format}"))
|
| 34 |
+
if lcm_diffusion_setting:
|
| 35 |
+
with open(path.join(out_path, f"{gen_id}.json"), "w") as json_file:
|
| 36 |
+
json.dump(
|
| 37 |
+
lcm_diffusion_setting.model_dump(exclude="init_image"),
|
| 38 |
+
json_file,
|
| 39 |
+
indent=4,
|
| 40 |
+
)
|
testlcm/backend/lcm_text_to_image.py
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
from diffusers import LCMScheduler
|
| 3 |
+
import torch
|
| 4 |
+
from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
|
| 5 |
+
import numpy as np
|
| 6 |
+
from constants import DEVICE
|
| 7 |
+
from backend.models.lcmdiffusion_setting import LCMLora
|
| 8 |
+
from backend.device import is_openvino_device
|
| 9 |
+
from backend.openvino.pipelines import (
|
| 10 |
+
get_ov_text_to_image_pipeline,
|
| 11 |
+
ov_load_taesd,
|
| 12 |
+
get_ov_image_to_image_pipeline,
|
| 13 |
+
)
|
| 14 |
+
from backend.pipelines.lcm import (
|
| 15 |
+
get_lcm_model_pipeline,
|
| 16 |
+
load_taesd,
|
| 17 |
+
get_image_to_image_pipeline,
|
| 18 |
+
)
|
| 19 |
+
from backend.pipelines.lcm_lora import get_lcm_lora_pipeline
|
| 20 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 21 |
+
from image_ops import resize_pil_image
|
| 22 |
+
from math import ceil
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class LCMTextToImage:
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
device: str = "cpu",
|
| 29 |
+
) -> None:
|
| 30 |
+
self.pipeline = None
|
| 31 |
+
self.use_openvino = False
|
| 32 |
+
self.device = ""
|
| 33 |
+
self.previous_model_id = None
|
| 34 |
+
self.previous_use_tae_sd = False
|
| 35 |
+
self.previous_use_lcm_lora = False
|
| 36 |
+
self.previous_ov_model_id = ""
|
| 37 |
+
self.previous_safety_checker = False
|
| 38 |
+
self.previous_use_openvino = False
|
| 39 |
+
self.img_to_img_pipeline = None
|
| 40 |
+
self.is_openvino_init = False
|
| 41 |
+
self.torch_data_type = (
|
| 42 |
+
torch.float32 if is_openvino_device() or DEVICE == "mps" else torch.float16
|
| 43 |
+
)
|
| 44 |
+
print(f"Torch datatype : {self.torch_data_type}")
|
| 45 |
+
|
| 46 |
+
def _pipeline_to_device(self):
|
| 47 |
+
print(f"Pipeline device : {DEVICE}")
|
| 48 |
+
print(f"Pipeline dtype : {self.torch_data_type}")
|
| 49 |
+
self.pipeline.to(
|
| 50 |
+
torch_device=DEVICE,
|
| 51 |
+
torch_dtype=self.torch_data_type,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def _add_freeu(self):
|
| 55 |
+
pipeline_class = self.pipeline.__class__.__name__
|
| 56 |
+
if isinstance(self.pipeline.scheduler, LCMScheduler):
|
| 57 |
+
if pipeline_class == "StableDiffusionPipeline":
|
| 58 |
+
print("Add FreeU - SD")
|
| 59 |
+
self.pipeline.enable_freeu(
|
| 60 |
+
s1=0.9,
|
| 61 |
+
s2=0.2,
|
| 62 |
+
b1=1.2,
|
| 63 |
+
b2=1.4,
|
| 64 |
+
)
|
| 65 |
+
elif pipeline_class == "StableDiffusionXLPipeline":
|
| 66 |
+
print("Add FreeU - SDXL")
|
| 67 |
+
self.pipeline.enable_freeu(
|
| 68 |
+
s1=0.6,
|
| 69 |
+
s2=0.4,
|
| 70 |
+
b1=1.1,
|
| 71 |
+
b2=1.2,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
def _update_lcm_scheduler_params(self):
|
| 75 |
+
if isinstance(self.pipeline.scheduler, LCMScheduler):
|
| 76 |
+
self.pipeline.scheduler = LCMScheduler.from_config(
|
| 77 |
+
self.pipeline.scheduler.config,
|
| 78 |
+
beta_start=0.001,
|
| 79 |
+
beta_end=0.01,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
def init(
|
| 83 |
+
self,
|
| 84 |
+
device: str = "cpu",
|
| 85 |
+
lcm_diffusion_setting: LCMDiffusionSetting = LCMDiffusionSetting(),
|
| 86 |
+
) -> None:
|
| 87 |
+
self.device = device
|
| 88 |
+
self.use_openvino = lcm_diffusion_setting.use_openvino
|
| 89 |
+
model_id = lcm_diffusion_setting.lcm_model_id
|
| 90 |
+
use_local_model = lcm_diffusion_setting.use_offline_model
|
| 91 |
+
use_tiny_auto_encoder = lcm_diffusion_setting.use_tiny_auto_encoder
|
| 92 |
+
use_lora = lcm_diffusion_setting.use_lcm_lora
|
| 93 |
+
lcm_lora: LCMLora = lcm_diffusion_setting.lcm_lora
|
| 94 |
+
ov_model_id = lcm_diffusion_setting.openvino_lcm_model_id
|
| 95 |
+
|
| 96 |
+
if lcm_diffusion_setting.diffusion_task == DiffusionTask.image_to_image.value:
|
| 97 |
+
lcm_diffusion_setting.init_image = resize_pil_image(
|
| 98 |
+
lcm_diffusion_setting.init_image,
|
| 99 |
+
lcm_diffusion_setting.image_width,
|
| 100 |
+
lcm_diffusion_setting.image_height,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
if (
|
| 104 |
+
self.pipeline is None
|
| 105 |
+
or self.previous_model_id != model_id
|
| 106 |
+
or self.previous_use_tae_sd != use_tiny_auto_encoder
|
| 107 |
+
or self.previous_lcm_lora_base_id != lcm_lora.base_model_id
|
| 108 |
+
or self.previous_lcm_lora_id != lcm_lora.lcm_lora_id
|
| 109 |
+
or self.previous_use_lcm_lora != use_lora
|
| 110 |
+
or self.previous_ov_model_id != ov_model_id
|
| 111 |
+
or self.previous_safety_checker != lcm_diffusion_setting.use_safety_checker
|
| 112 |
+
or self.previous_use_openvino != lcm_diffusion_setting.use_openvino
|
| 113 |
+
):
|
| 114 |
+
if self.use_openvino and is_openvino_device():
|
| 115 |
+
if self.pipeline:
|
| 116 |
+
del self.pipeline
|
| 117 |
+
self.pipeline = None
|
| 118 |
+
self.is_openvino_init = True
|
| 119 |
+
if (
|
| 120 |
+
lcm_diffusion_setting.diffusion_task
|
| 121 |
+
== DiffusionTask.text_to_image.value
|
| 122 |
+
):
|
| 123 |
+
print(f"***** Init Text to image (OpenVINO) - {ov_model_id} *****")
|
| 124 |
+
self.pipeline = get_ov_text_to_image_pipeline(
|
| 125 |
+
ov_model_id,
|
| 126 |
+
use_local_model,
|
| 127 |
+
)
|
| 128 |
+
elif (
|
| 129 |
+
lcm_diffusion_setting.diffusion_task
|
| 130 |
+
== DiffusionTask.image_to_image.value
|
| 131 |
+
):
|
| 132 |
+
print(f"***** Image to image (OpenVINO) - {ov_model_id} *****")
|
| 133 |
+
self.pipeline = get_ov_image_to_image_pipeline(
|
| 134 |
+
ov_model_id,
|
| 135 |
+
use_local_model,
|
| 136 |
+
)
|
| 137 |
+
else:
|
| 138 |
+
if self.pipeline:
|
| 139 |
+
del self.pipeline
|
| 140 |
+
self.pipeline = None
|
| 141 |
+
if self.img_to_img_pipeline:
|
| 142 |
+
del self.img_to_img_pipeline
|
| 143 |
+
self.img_to_img_pipeline = None
|
| 144 |
+
|
| 145 |
+
if use_lora:
|
| 146 |
+
print(
|
| 147 |
+
f"***** Init LCM-LoRA pipeline - {lcm_lora.base_model_id} *****"
|
| 148 |
+
)
|
| 149 |
+
self.pipeline = get_lcm_lora_pipeline(
|
| 150 |
+
lcm_lora.base_model_id,
|
| 151 |
+
lcm_lora.lcm_lora_id,
|
| 152 |
+
use_local_model,
|
| 153 |
+
torch_data_type=self.torch_data_type,
|
| 154 |
+
)
|
| 155 |
+
else:
|
| 156 |
+
print(f"***** Init LCM Model pipeline - {model_id} *****")
|
| 157 |
+
self.pipeline = get_lcm_model_pipeline(
|
| 158 |
+
model_id,
|
| 159 |
+
use_local_model,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
if (
|
| 163 |
+
lcm_diffusion_setting.diffusion_task
|
| 164 |
+
== DiffusionTask.image_to_image.value
|
| 165 |
+
):
|
| 166 |
+
self.img_to_img_pipeline = get_image_to_image_pipeline(
|
| 167 |
+
self.pipeline
|
| 168 |
+
)
|
| 169 |
+
self._pipeline_to_device()
|
| 170 |
+
|
| 171 |
+
if use_tiny_auto_encoder:
|
| 172 |
+
if self.use_openvino and is_openvino_device():
|
| 173 |
+
print("Using Tiny Auto Encoder (OpenVINO)")
|
| 174 |
+
ov_load_taesd(
|
| 175 |
+
self.pipeline,
|
| 176 |
+
use_local_model,
|
| 177 |
+
)
|
| 178 |
+
else:
|
| 179 |
+
print("Using Tiny Auto Encoder")
|
| 180 |
+
if (
|
| 181 |
+
lcm_diffusion_setting.diffusion_task
|
| 182 |
+
== DiffusionTask.text_to_image.value
|
| 183 |
+
):
|
| 184 |
+
load_taesd(
|
| 185 |
+
self.pipeline,
|
| 186 |
+
use_local_model,
|
| 187 |
+
self.torch_data_type,
|
| 188 |
+
)
|
| 189 |
+
elif (
|
| 190 |
+
lcm_diffusion_setting.diffusion_task
|
| 191 |
+
== DiffusionTask.image_to_image.value
|
| 192 |
+
):
|
| 193 |
+
load_taesd(
|
| 194 |
+
self.img_to_img_pipeline,
|
| 195 |
+
use_local_model,
|
| 196 |
+
self.torch_data_type,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
if (
|
| 200 |
+
lcm_diffusion_setting.diffusion_task
|
| 201 |
+
== DiffusionTask.image_to_image.value
|
| 202 |
+
and lcm_diffusion_setting.use_openvino
|
| 203 |
+
):
|
| 204 |
+
self.pipeline.scheduler = LCMScheduler.from_config(
|
| 205 |
+
self.pipeline.scheduler.config,
|
| 206 |
+
)
|
| 207 |
+
else:
|
| 208 |
+
self._update_lcm_scheduler_params()
|
| 209 |
+
|
| 210 |
+
if use_lora:
|
| 211 |
+
self._add_freeu()
|
| 212 |
+
|
| 213 |
+
self.previous_model_id = model_id
|
| 214 |
+
self.previous_ov_model_id = ov_model_id
|
| 215 |
+
self.previous_use_tae_sd = use_tiny_auto_encoder
|
| 216 |
+
self.previous_lcm_lora_base_id = lcm_lora.base_model_id
|
| 217 |
+
self.previous_lcm_lora_id = lcm_lora.lcm_lora_id
|
| 218 |
+
self.previous_use_lcm_lora = use_lora
|
| 219 |
+
self.previous_safety_checker = lcm_diffusion_setting.use_safety_checker
|
| 220 |
+
self.previous_use_openvino = lcm_diffusion_setting.use_openvino
|
| 221 |
+
if (
|
| 222 |
+
lcm_diffusion_setting.diffusion_task
|
| 223 |
+
== DiffusionTask.text_to_image.value
|
| 224 |
+
):
|
| 225 |
+
print(f"Pipeline : {self.pipeline}")
|
| 226 |
+
elif (
|
| 227 |
+
lcm_diffusion_setting.diffusion_task
|
| 228 |
+
== DiffusionTask.image_to_image.value
|
| 229 |
+
):
|
| 230 |
+
if self.use_openvino and is_openvino_device():
|
| 231 |
+
print(f"Pipeline : {self.pipeline}")
|
| 232 |
+
else:
|
| 233 |
+
print(f"Pipeline : {self.img_to_img_pipeline}")
|
| 234 |
+
|
| 235 |
+
def generate(
|
| 236 |
+
self,
|
| 237 |
+
lcm_diffusion_setting: LCMDiffusionSetting,
|
| 238 |
+
reshape: bool = False,
|
| 239 |
+
) -> Any:
|
| 240 |
+
guidance_scale = lcm_diffusion_setting.guidance_scale
|
| 241 |
+
img_to_img_inference_steps = lcm_diffusion_setting.inference_steps
|
| 242 |
+
check_step_value = int(
|
| 243 |
+
lcm_diffusion_setting.inference_steps * lcm_diffusion_setting.strength
|
| 244 |
+
)
|
| 245 |
+
if (
|
| 246 |
+
lcm_diffusion_setting.diffusion_task == DiffusionTask.image_to_image.value
|
| 247 |
+
and check_step_value < 1
|
| 248 |
+
):
|
| 249 |
+
img_to_img_inference_steps = ceil(1 / lcm_diffusion_setting.strength)
|
| 250 |
+
print(
|
| 251 |
+
f"Strength: {lcm_diffusion_setting.strength},{img_to_img_inference_steps}"
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
if lcm_diffusion_setting.use_seed:
|
| 255 |
+
cur_seed = lcm_diffusion_setting.seed
|
| 256 |
+
if self.use_openvino:
|
| 257 |
+
np.random.seed(cur_seed)
|
| 258 |
+
else:
|
| 259 |
+
torch.manual_seed(cur_seed)
|
| 260 |
+
|
| 261 |
+
is_openvino_pipe = lcm_diffusion_setting.use_openvino and is_openvino_device()
|
| 262 |
+
if is_openvino_pipe:
|
| 263 |
+
print("Using OpenVINO")
|
| 264 |
+
if reshape and not self.is_openvino_init:
|
| 265 |
+
print("Reshape and compile")
|
| 266 |
+
self.pipeline.reshape(
|
| 267 |
+
batch_size=-1,
|
| 268 |
+
height=lcm_diffusion_setting.image_height,
|
| 269 |
+
width=lcm_diffusion_setting.image_width,
|
| 270 |
+
num_images_per_prompt=lcm_diffusion_setting.number_of_images,
|
| 271 |
+
)
|
| 272 |
+
self.pipeline.compile()
|
| 273 |
+
|
| 274 |
+
if self.is_openvino_init:
|
| 275 |
+
self.is_openvino_init = False
|
| 276 |
+
|
| 277 |
+
if not lcm_diffusion_setting.use_safety_checker:
|
| 278 |
+
self.pipeline.safety_checker = None
|
| 279 |
+
if (
|
| 280 |
+
lcm_diffusion_setting.diffusion_task
|
| 281 |
+
== DiffusionTask.image_to_image.value
|
| 282 |
+
and not is_openvino_pipe
|
| 283 |
+
):
|
| 284 |
+
self.img_to_img_pipeline.safety_checker = None
|
| 285 |
+
|
| 286 |
+
if (
|
| 287 |
+
not lcm_diffusion_setting.use_lcm_lora
|
| 288 |
+
and not lcm_diffusion_setting.use_openvino
|
| 289 |
+
and lcm_diffusion_setting.guidance_scale != 1.0
|
| 290 |
+
):
|
| 291 |
+
print("Not using LCM-LoRA so setting guidance_scale 1.0")
|
| 292 |
+
guidance_scale = 1.0
|
| 293 |
+
|
| 294 |
+
if lcm_diffusion_setting.use_openvino:
|
| 295 |
+
if (
|
| 296 |
+
lcm_diffusion_setting.diffusion_task
|
| 297 |
+
== DiffusionTask.text_to_image.value
|
| 298 |
+
):
|
| 299 |
+
result_images = self.pipeline(
|
| 300 |
+
prompt=lcm_diffusion_setting.prompt,
|
| 301 |
+
negative_prompt=lcm_diffusion_setting.negative_prompt,
|
| 302 |
+
num_inference_steps=lcm_diffusion_setting.inference_steps,
|
| 303 |
+
guidance_scale=guidance_scale,
|
| 304 |
+
width=lcm_diffusion_setting.image_width,
|
| 305 |
+
height=lcm_diffusion_setting.image_height,
|
| 306 |
+
num_images_per_prompt=lcm_diffusion_setting.number_of_images,
|
| 307 |
+
).images
|
| 308 |
+
elif (
|
| 309 |
+
lcm_diffusion_setting.diffusion_task
|
| 310 |
+
== DiffusionTask.image_to_image.value
|
| 311 |
+
):
|
| 312 |
+
result_images = self.pipeline(
|
| 313 |
+
image=lcm_diffusion_setting.init_image,
|
| 314 |
+
strength=lcm_diffusion_setting.strength,
|
| 315 |
+
prompt=lcm_diffusion_setting.prompt,
|
| 316 |
+
negative_prompt=lcm_diffusion_setting.negative_prompt,
|
| 317 |
+
num_inference_steps=img_to_img_inference_steps * 3,
|
| 318 |
+
guidance_scale=guidance_scale,
|
| 319 |
+
num_images_per_prompt=lcm_diffusion_setting.number_of_images,
|
| 320 |
+
).images
|
| 321 |
+
|
| 322 |
+
else:
|
| 323 |
+
if (
|
| 324 |
+
lcm_diffusion_setting.diffusion_task
|
| 325 |
+
== DiffusionTask.text_to_image.value
|
| 326 |
+
):
|
| 327 |
+
result_images = self.pipeline(
|
| 328 |
+
prompt=lcm_diffusion_setting.prompt,
|
| 329 |
+
negative_prompt=lcm_diffusion_setting.negative_prompt,
|
| 330 |
+
num_inference_steps=lcm_diffusion_setting.inference_steps,
|
| 331 |
+
guidance_scale=guidance_scale,
|
| 332 |
+
width=lcm_diffusion_setting.image_width,
|
| 333 |
+
height=lcm_diffusion_setting.image_height,
|
| 334 |
+
num_images_per_prompt=lcm_diffusion_setting.number_of_images,
|
| 335 |
+
).images
|
| 336 |
+
elif (
|
| 337 |
+
lcm_diffusion_setting.diffusion_task
|
| 338 |
+
== DiffusionTask.image_to_image.value
|
| 339 |
+
):
|
| 340 |
+
result_images = self.img_to_img_pipeline(
|
| 341 |
+
image=lcm_diffusion_setting.init_image,
|
| 342 |
+
strength=lcm_diffusion_setting.strength,
|
| 343 |
+
prompt=lcm_diffusion_setting.prompt,
|
| 344 |
+
negative_prompt=lcm_diffusion_setting.negative_prompt,
|
| 345 |
+
num_inference_steps=img_to_img_inference_steps,
|
| 346 |
+
guidance_scale=guidance_scale,
|
| 347 |
+
width=lcm_diffusion_setting.image_width,
|
| 348 |
+
height=lcm_diffusion_setting.image_height,
|
| 349 |
+
num_images_per_prompt=lcm_diffusion_setting.number_of_images,
|
| 350 |
+
).images
|
| 351 |
+
|
| 352 |
+
return result_images
|
testlcm/backend/models/__pycache__/lcmdiffusion_setting.cpython-310.pyc
ADDED
|
Binary file (1.87 kB). View file
|
|
|
testlcm/backend/models/lcmdiffusion_setting.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Any
|
| 2 |
+
from enum import Enum
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from constants import LCM_DEFAULT_MODEL, LCM_DEFAULT_MODEL_OPENVINO
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LCMLora(BaseModel):
|
| 8 |
+
base_model_id: str = "Lykon/dreamshaper-8"
|
| 9 |
+
lcm_lora_id: str = "latent-consistency/lcm-lora-sdv1-5"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class DiffusionTask(str, Enum):
|
| 13 |
+
"""Diffusion task types"""
|
| 14 |
+
|
| 15 |
+
text_to_image = "text_to_image"
|
| 16 |
+
image_to_image = "image_to_image"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class LCMDiffusionSetting(BaseModel):
|
| 20 |
+
lcm_model_id: str = LCM_DEFAULT_MODEL
|
| 21 |
+
openvino_lcm_model_id: str = LCM_DEFAULT_MODEL_OPENVINO
|
| 22 |
+
use_offline_model: bool = False
|
| 23 |
+
use_lcm_lora: bool = False
|
| 24 |
+
lcm_lora: Optional[LCMLora] = LCMLora()
|
| 25 |
+
use_tiny_auto_encoder: bool = False
|
| 26 |
+
use_openvino: bool = False
|
| 27 |
+
prompt: str = ""
|
| 28 |
+
negative_prompt: str = ""
|
| 29 |
+
init_image: Any = None
|
| 30 |
+
strength: Optional[float] = 0.6
|
| 31 |
+
image_height: Optional[int] = 512
|
| 32 |
+
image_width: Optional[int] = 512
|
| 33 |
+
inference_steps: Optional[int] = 1
|
| 34 |
+
guidance_scale: Optional[float] = 1
|
| 35 |
+
number_of_images: Optional[int] = 1
|
| 36 |
+
seed: Optional[int] = 123123
|
| 37 |
+
use_seed: bool = False
|
| 38 |
+
use_safety_checker: bool = False
|
| 39 |
+
diffusion_task: str = DiffusionTask.text_to_image.value
|
testlcm/backend/openvino/__pycache__/custom_ov_model_vae_decoder.cpython-310.pyc
ADDED
|
Binary file (772 Bytes). View file
|
|
|
testlcm/backend/openvino/__pycache__/pipelines.cpython-310.pyc
ADDED
|
Binary file (1.86 kB). View file
|
|
|
testlcm/backend/openvino/custom_ov_model_vae_decoder.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from backend.device import is_openvino_device
|
| 2 |
+
|
| 3 |
+
if is_openvino_device():
|
| 4 |
+
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
|
| 8 |
+
def __init__(
|
| 9 |
+
self,
|
| 10 |
+
model,
|
| 11 |
+
parent_model,
|
| 12 |
+
ov_config=None,
|
| 13 |
+
model_dir=None,
|
| 14 |
+
):
|
| 15 |
+
super(OVModelVaeDecoder, self).__init__(
|
| 16 |
+
model,
|
| 17 |
+
parent_model,
|
| 18 |
+
ov_config,
|
| 19 |
+
"vae_decoder",
|
| 20 |
+
model_dir,
|
| 21 |
+
)
|
testlcm/backend/openvino/pipelines.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
|
| 2 |
+
from backend.tiny_decoder import get_tiny_decoder_vae_model
|
| 3 |
+
from typing import Any
|
| 4 |
+
from backend.device import is_openvino_device
|
| 5 |
+
from paths import get_base_folder_name
|
| 6 |
+
|
| 7 |
+
if is_openvino_device():
|
| 8 |
+
from huggingface_hub import snapshot_download
|
| 9 |
+
from optimum.intel.openvino.modeling_diffusion import OVBaseModel
|
| 10 |
+
|
| 11 |
+
from optimum.intel.openvino.modeling_diffusion import (
|
| 12 |
+
OVStableDiffusionPipeline,
|
| 13 |
+
OVStableDiffusionImg2ImgPipeline,
|
| 14 |
+
OVStableDiffusionXLPipeline,
|
| 15 |
+
OVStableDiffusionXLImg2ImgPipeline,
|
| 16 |
+
)
|
| 17 |
+
from backend.openvino.custom_ov_model_vae_decoder import CustomOVModelVaeDecoder
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def ov_load_taesd(
|
| 21 |
+
pipeline: Any,
|
| 22 |
+
use_local_model: bool = False,
|
| 23 |
+
):
|
| 24 |
+
taesd_dir = snapshot_download(
|
| 25 |
+
repo_id=get_tiny_decoder_vae_model(pipeline.__class__.__name__),
|
| 26 |
+
local_files_only=use_local_model,
|
| 27 |
+
)
|
| 28 |
+
pipeline.vae_decoder = CustomOVModelVaeDecoder(
|
| 29 |
+
model=OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
|
| 30 |
+
parent_model=pipeline,
|
| 31 |
+
model_dir=taesd_dir,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_ov_text_to_image_pipeline(
|
| 36 |
+
model_id: str = LCM_DEFAULT_MODEL_OPENVINO,
|
| 37 |
+
use_local_model: bool = False,
|
| 38 |
+
) -> Any:
|
| 39 |
+
if "xl" in get_base_folder_name(model_id).lower():
|
| 40 |
+
pipeline = OVStableDiffusionXLPipeline.from_pretrained(
|
| 41 |
+
model_id,
|
| 42 |
+
local_files_only=use_local_model,
|
| 43 |
+
ov_config={"CACHE_DIR": ""},
|
| 44 |
+
device=DEVICE.upper(),
|
| 45 |
+
)
|
| 46 |
+
else:
|
| 47 |
+
pipeline = OVStableDiffusionPipeline.from_pretrained(
|
| 48 |
+
model_id,
|
| 49 |
+
local_files_only=use_local_model,
|
| 50 |
+
ov_config={"CACHE_DIR": ""},
|
| 51 |
+
device=DEVICE.upper(),
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
return pipeline
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def get_ov_image_to_image_pipeline(
|
| 58 |
+
model_id: str = LCM_DEFAULT_MODEL_OPENVINO,
|
| 59 |
+
use_local_model: bool = False,
|
| 60 |
+
) -> Any:
|
| 61 |
+
if "xl" in get_base_folder_name(model_id).lower():
|
| 62 |
+
pipeline = OVStableDiffusionXLImg2ImgPipeline.from_pretrained(
|
| 63 |
+
model_id,
|
| 64 |
+
local_files_only=use_local_model,
|
| 65 |
+
ov_config={"CACHE_DIR": ""},
|
| 66 |
+
device=DEVICE.upper(),
|
| 67 |
+
)
|
| 68 |
+
else:
|
| 69 |
+
pipeline = OVStableDiffusionImg2ImgPipeline.from_pretrained(
|
| 70 |
+
model_id,
|
| 71 |
+
local_files_only=use_local_model,
|
| 72 |
+
ov_config={"CACHE_DIR": ""},
|
| 73 |
+
device=DEVICE.upper(),
|
| 74 |
+
)
|
| 75 |
+
return pipeline
|
testlcm/backend/pipelines/__pycache__/lcm.cpython-310.pyc
ADDED
|
Binary file (2.1 kB). View file
|
|
|
testlcm/backend/pipelines/__pycache__/lcm_lora.cpython-310.pyc
ADDED
|
Binary file (867 Bytes). View file
|
|
|
testlcm/backend/pipelines/lcm.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from constants import LCM_DEFAULT_MODEL
|
| 2 |
+
from diffusers import (
|
| 3 |
+
DiffusionPipeline,
|
| 4 |
+
AutoencoderTiny,
|
| 5 |
+
UNet2DConditionModel,
|
| 6 |
+
LCMScheduler,
|
| 7 |
+
)
|
| 8 |
+
import torch
|
| 9 |
+
from backend.tiny_decoder import get_tiny_decoder_vae_model
|
| 10 |
+
from typing import Any
|
| 11 |
+
from diffusers import (
|
| 12 |
+
LCMScheduler,
|
| 13 |
+
StableDiffusionImg2ImgPipeline,
|
| 14 |
+
StableDiffusionXLImg2ImgPipeline,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _get_lcm_pipeline_from_base_model(
|
| 19 |
+
lcm_model_id: str,
|
| 20 |
+
base_model_id: str,
|
| 21 |
+
use_local_model: bool,
|
| 22 |
+
):
|
| 23 |
+
pipeline = None
|
| 24 |
+
unet = UNet2DConditionModel.from_pretrained(
|
| 25 |
+
lcm_model_id,
|
| 26 |
+
torch_dtype=torch.float32,
|
| 27 |
+
local_files_only=use_local_model,
|
| 28 |
+
)
|
| 29 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 30 |
+
base_model_id,
|
| 31 |
+
unet=unet,
|
| 32 |
+
torch_dtype=torch.float32,
|
| 33 |
+
local_files_only=use_local_model,
|
| 34 |
+
)
|
| 35 |
+
pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
|
| 36 |
+
return pipeline
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def load_taesd(
|
| 40 |
+
pipeline: Any,
|
| 41 |
+
use_local_model: bool = False,
|
| 42 |
+
torch_data_type: torch.dtype = torch.float32,
|
| 43 |
+
):
|
| 44 |
+
vae_model = get_tiny_decoder_vae_model(pipeline.__class__.__name__)
|
| 45 |
+
pipeline.vae = AutoencoderTiny.from_pretrained(
|
| 46 |
+
vae_model,
|
| 47 |
+
torch_dtype=torch_data_type,
|
| 48 |
+
local_files_only=use_local_model,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_lcm_model_pipeline(
|
| 53 |
+
model_id: str = LCM_DEFAULT_MODEL,
|
| 54 |
+
use_local_model: bool = False,
|
| 55 |
+
):
|
| 56 |
+
pipeline = None
|
| 57 |
+
if model_id == "latent-consistency/lcm-sdxl":
|
| 58 |
+
pipeline = _get_lcm_pipeline_from_base_model(
|
| 59 |
+
model_id,
|
| 60 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 61 |
+
use_local_model,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
elif model_id == "latent-consistency/lcm-ssd-1b":
|
| 65 |
+
pipeline = _get_lcm_pipeline_from_base_model(
|
| 66 |
+
model_id,
|
| 67 |
+
"segmind/SSD-1B",
|
| 68 |
+
use_local_model,
|
| 69 |
+
)
|
| 70 |
+
else:
|
| 71 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 72 |
+
model_id,
|
| 73 |
+
local_files_only=use_local_model,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
return pipeline
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def get_image_to_image_pipeline(pipeline: Any) -> Any:
|
| 80 |
+
components = pipeline.components
|
| 81 |
+
pipeline_class = pipeline.__class__.__name__
|
| 82 |
+
if (
|
| 83 |
+
pipeline_class == "LatentConsistencyModelPipeline"
|
| 84 |
+
or pipeline_class == "StableDiffusionPipeline"
|
| 85 |
+
):
|
| 86 |
+
return StableDiffusionImg2ImgPipeline(**components)
|
| 87 |
+
elif pipeline_class == "StableDiffusionXLPipeline":
|
| 88 |
+
return StableDiffusionXLImg2ImgPipeline(**components)
|
| 89 |
+
else:
|
| 90 |
+
raise Exception(f"Unknown pipeline {pipeline_class}")
|
testlcm/backend/pipelines/lcm_lora.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from diffusers import DiffusionPipeline, LCMScheduler
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def get_lcm_lora_pipeline(
|
| 6 |
+
base_model_id: str,
|
| 7 |
+
lcm_lora_id: str,
|
| 8 |
+
use_local_model: bool,
|
| 9 |
+
torch_data_type: torch.dtype,
|
| 10 |
+
):
|
| 11 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
| 12 |
+
base_model_id,
|
| 13 |
+
torch_dtype=torch_data_type,
|
| 14 |
+
local_files_only=use_local_model,
|
| 15 |
+
)
|
| 16 |
+
pipeline.load_lora_weights(
|
| 17 |
+
lcm_lora_id,
|
| 18 |
+
local_files_only=use_local_model,
|
| 19 |
+
)
|
| 20 |
+
if "lcm" in lcm_lora_id.lower():
|
| 21 |
+
print("LCM LoRA model detected so using recommended LCMScheduler")
|
| 22 |
+
pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
|
| 23 |
+
pipeline.fuse_lora()
|
| 24 |
+
pipeline.unet.to(memory_format=torch.channels_last)
|
| 25 |
+
return pipeline
|
testlcm/backend/tiny_decoder.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from constants import (
|
| 2 |
+
TAESD_MODEL,
|
| 3 |
+
TAESDXL_MODEL,
|
| 4 |
+
TAESD_MODEL_OPENVINO,
|
| 5 |
+
TAESDXL_MODEL_OPENVINO,
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_tiny_decoder_vae_model(pipeline_class) -> str:
|
| 10 |
+
print(f"Pipeline class : {pipeline_class}")
|
| 11 |
+
if (
|
| 12 |
+
pipeline_class == "LatentConsistencyModelPipeline"
|
| 13 |
+
or pipeline_class == "StableDiffusionPipeline"
|
| 14 |
+
or pipeline_class == "StableDiffusionImg2ImgPipeline"
|
| 15 |
+
):
|
| 16 |
+
return TAESD_MODEL
|
| 17 |
+
elif (
|
| 18 |
+
pipeline_class == "StableDiffusionXLPipeline"
|
| 19 |
+
or pipeline_class == "StableDiffusionXLImg2ImgPipeline"
|
| 20 |
+
):
|
| 21 |
+
return TAESDXL_MODEL
|
| 22 |
+
elif (
|
| 23 |
+
pipeline_class == "OVStableDiffusionPipeline"
|
| 24 |
+
or pipeline_class == "OVStableDiffusionImg2ImgPipeline"
|
| 25 |
+
):
|
| 26 |
+
return TAESD_MODEL_OPENVINO
|
| 27 |
+
elif pipeline_class == "OVStableDiffusionXLPipeline":
|
| 28 |
+
return TAESDXL_MODEL_OPENVINO
|
| 29 |
+
else:
|
| 30 |
+
raise Exception("No valid pipeline class found!")
|
testlcm/constants.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os import environ
|
| 2 |
+
|
| 3 |
+
APP_VERSION = "v1.0.0 beta 22"
|
| 4 |
+
LCM_DEFAULT_MODEL = "stabilityai/sd-turbo"
|
| 5 |
+
LCM_DEFAULT_MODEL_OPENVINO = "rupeshs/sd-turbo-openvino"
|
| 6 |
+
APP_NAME = "FastSD CPU"
|
| 7 |
+
APP_SETTINGS_FILE = "settings.yaml"
|
| 8 |
+
RESULTS_DIRECTORY = "results"
|
| 9 |
+
CONFIG_DIRECTORY = "configs"
|
| 10 |
+
DEVICE = environ.get("DEVICE", "cpu")
|
| 11 |
+
SD_MODELS_FILE = "stable-diffusion-models.txt"
|
| 12 |
+
LCM_LORA_MODELS_FILE = "lcm-lora-models.txt"
|
| 13 |
+
OPENVINO_LCM_MODELS_FILE = "openvino-lcm-models.txt"
|
| 14 |
+
TAESD_MODEL = "madebyollin/taesd"
|
| 15 |
+
TAESDXL_MODEL = "madebyollin/taesdxl"
|
| 16 |
+
TAESD_MODEL_OPENVINO = "deinferno/taesd-openvino"
|
| 17 |
+
LCM_MODELS_FILE = "lcm-models.txt"
|
| 18 |
+
TAESDXL_MODEL_OPENVINO = "rupeshs/taesdxl-openvino"
|
testlcm/context.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
from app_settings import Settings
|
| 3 |
+
from models.interface_types import InterfaceType
|
| 4 |
+
from backend.lcm_text_to_image import LCMTextToImage
|
| 5 |
+
from time import perf_counter
|
| 6 |
+
from backend.image_saver import ImageSaver
|
| 7 |
+
from pprint import pprint
|
| 8 |
+
from state import get_settings
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Context:
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
interface_type: InterfaceType,
|
| 15 |
+
device="cpu",
|
| 16 |
+
):
|
| 17 |
+
self.interface_type = interface_type
|
| 18 |
+
self.lcm_text_to_image = LCMTextToImage(device)
|
| 19 |
+
|
| 20 |
+
def generate_text_to_image(
|
| 21 |
+
self,
|
| 22 |
+
settings: Settings,
|
| 23 |
+
reshape: bool = False,
|
| 24 |
+
device: str = "cpu",
|
| 25 |
+
) -> Any:
|
| 26 |
+
get_settings().save()
|
| 27 |
+
tick = perf_counter()
|
| 28 |
+
pprint(settings.lcm_diffusion_setting.model_dump())
|
| 29 |
+
if not settings.lcm_diffusion_setting.lcm_lora:
|
| 30 |
+
return None
|
| 31 |
+
self.lcm_text_to_image.init(
|
| 32 |
+
device,
|
| 33 |
+
settings.lcm_diffusion_setting,
|
| 34 |
+
)
|
| 35 |
+
images = self.lcm_text_to_image.generate(
|
| 36 |
+
settings.lcm_diffusion_setting,
|
| 37 |
+
reshape,
|
| 38 |
+
)
|
| 39 |
+
elapsed = perf_counter() - tick
|
| 40 |
+
ImageSaver.save_images(
|
| 41 |
+
settings.results_path,
|
| 42 |
+
images=images,
|
| 43 |
+
lcm_diffusion_setting=settings.lcm_diffusion_setting,
|
| 44 |
+
)
|
| 45 |
+
print(f"Latency : {elapsed:.2f} seconds")
|
| 46 |
+
return images
|
testlcm/frontend/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (1.4 kB). View file
|
|
|
testlcm/frontend/gui/app_window.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PyQt5.QtWidgets import (
|
| 2 |
+
QWidget,
|
| 3 |
+
QPushButton,
|
| 4 |
+
QHBoxLayout,
|
| 5 |
+
QVBoxLayout,
|
| 6 |
+
QLabel,
|
| 7 |
+
QLineEdit,
|
| 8 |
+
QMainWindow,
|
| 9 |
+
QSlider,
|
| 10 |
+
QTabWidget,
|
| 11 |
+
QSpacerItem,
|
| 12 |
+
QSizePolicy,
|
| 13 |
+
QComboBox,
|
| 14 |
+
QCheckBox,
|
| 15 |
+
QTextEdit,
|
| 16 |
+
QToolButton,
|
| 17 |
+
QFileDialog,
|
| 18 |
+
)
|
| 19 |
+
from PyQt5 import QtWidgets, QtCore
|
| 20 |
+
from PyQt5.QtGui import QPixmap, QDesktopServices
|
| 21 |
+
from PyQt5.QtCore import QSize, QThreadPool, Qt, QUrl
|
| 22 |
+
|
| 23 |
+
from PIL.ImageQt import ImageQt
|
| 24 |
+
from constants import (
|
| 25 |
+
LCM_DEFAULT_MODEL,
|
| 26 |
+
LCM_DEFAULT_MODEL_OPENVINO,
|
| 27 |
+
APP_NAME,
|
| 28 |
+
APP_VERSION,
|
| 29 |
+
)
|
| 30 |
+
from frontend.gui.image_generator_worker import ImageGeneratorWorker
|
| 31 |
+
from app_settings import AppSettings
|
| 32 |
+
from paths import FastStableDiffusionPaths
|
| 33 |
+
from frontend.utils import is_reshape_required
|
| 34 |
+
from context import Context
|
| 35 |
+
from models.interface_types import InterfaceType
|
| 36 |
+
from constants import DEVICE
|
| 37 |
+
from frontend.utils import enable_openvino_controls, get_valid_model_id
|
| 38 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 39 |
+
|
| 40 |
+
# DPI scale fix
|
| 41 |
+
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
|
| 42 |
+
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class MainWindow(QMainWindow):
|
| 46 |
+
def __init__(self, config: AppSettings):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.config = config
|
| 49 |
+
self.setWindowTitle(APP_NAME)
|
| 50 |
+
self.setFixedSize(QSize(600, 670))
|
| 51 |
+
self.init_ui()
|
| 52 |
+
self.pipeline = None
|
| 53 |
+
self.threadpool = QThreadPool()
|
| 54 |
+
self.device = "cpu"
|
| 55 |
+
self.previous_width = 0
|
| 56 |
+
self.previous_height = 0
|
| 57 |
+
self.previous_model = ""
|
| 58 |
+
self.previous_num_of_images = 0
|
| 59 |
+
self.context = Context(InterfaceType.GUI)
|
| 60 |
+
self.init_ui_values()
|
| 61 |
+
self.gen_images = []
|
| 62 |
+
self.image_index = 0
|
| 63 |
+
print(f"Output path : { self.config.settings.results_path}")
|
| 64 |
+
|
| 65 |
+
def init_ui_values(self):
|
| 66 |
+
self.lcm_model.setEnabled(
|
| 67 |
+
not self.config.settings.lcm_diffusion_setting.use_openvino
|
| 68 |
+
)
|
| 69 |
+
self.guidance.setValue(
|
| 70 |
+
int(self.config.settings.lcm_diffusion_setting.guidance_scale * 10)
|
| 71 |
+
)
|
| 72 |
+
self.seed_value.setEnabled(self.config.settings.lcm_diffusion_setting.use_seed)
|
| 73 |
+
self.safety_checker.setChecked(
|
| 74 |
+
self.config.settings.lcm_diffusion_setting.use_safety_checker
|
| 75 |
+
)
|
| 76 |
+
self.use_openvino_check.setChecked(
|
| 77 |
+
self.config.settings.lcm_diffusion_setting.use_openvino
|
| 78 |
+
)
|
| 79 |
+
self.width.setCurrentText(
|
| 80 |
+
str(self.config.settings.lcm_diffusion_setting.image_width)
|
| 81 |
+
)
|
| 82 |
+
self.height.setCurrentText(
|
| 83 |
+
str(self.config.settings.lcm_diffusion_setting.image_height)
|
| 84 |
+
)
|
| 85 |
+
self.inference_steps.setValue(
|
| 86 |
+
int(self.config.settings.lcm_diffusion_setting.inference_steps)
|
| 87 |
+
)
|
| 88 |
+
self.seed_check.setChecked(self.config.settings.lcm_diffusion_setting.use_seed)
|
| 89 |
+
self.seed_value.setText(str(self.config.settings.lcm_diffusion_setting.seed))
|
| 90 |
+
self.use_local_model_folder.setChecked(
|
| 91 |
+
self.config.settings.lcm_diffusion_setting.use_offline_model
|
| 92 |
+
)
|
| 93 |
+
self.results_path.setText(self.config.settings.results_path)
|
| 94 |
+
self.num_images.setValue(
|
| 95 |
+
self.config.settings.lcm_diffusion_setting.number_of_images
|
| 96 |
+
)
|
| 97 |
+
self.use_tae_sd.setChecked(
|
| 98 |
+
self.config.settings.lcm_diffusion_setting.use_tiny_auto_encoder
|
| 99 |
+
)
|
| 100 |
+
self.use_lcm_lora.setChecked(
|
| 101 |
+
self.config.settings.lcm_diffusion_setting.use_lcm_lora
|
| 102 |
+
)
|
| 103 |
+
self.lcm_model.setCurrentText(
|
| 104 |
+
get_valid_model_id(
|
| 105 |
+
self.config.lcm_models,
|
| 106 |
+
self.config.settings.lcm_diffusion_setting.lcm_model_id,
|
| 107 |
+
LCM_DEFAULT_MODEL,
|
| 108 |
+
)
|
| 109 |
+
)
|
| 110 |
+
self.base_model_id.setCurrentText(
|
| 111 |
+
get_valid_model_id(
|
| 112 |
+
self.config.stable_diffsuion_models,
|
| 113 |
+
self.config.settings.lcm_diffusion_setting.lcm_lora.base_model_id,
|
| 114 |
+
)
|
| 115 |
+
)
|
| 116 |
+
self.lcm_lora_id.setCurrentText(
|
| 117 |
+
get_valid_model_id(
|
| 118 |
+
self.config.lcm_lora_models,
|
| 119 |
+
self.config.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id,
|
| 120 |
+
)
|
| 121 |
+
)
|
| 122 |
+
self.openvino_lcm_model_id.setCurrentText(
|
| 123 |
+
get_valid_model_id(
|
| 124 |
+
self.config.openvino_lcm_models,
|
| 125 |
+
self.config.settings.lcm_diffusion_setting.openvino_lcm_model_id,
|
| 126 |
+
LCM_DEFAULT_MODEL_OPENVINO,
|
| 127 |
+
)
|
| 128 |
+
)
|
| 129 |
+
self.neg_prompt.setEnabled(
|
| 130 |
+
self.config.settings.lcm_diffusion_setting.use_lcm_lora
|
| 131 |
+
or self.config.settings.lcm_diffusion_setting.use_openvino
|
| 132 |
+
)
|
| 133 |
+
self.openvino_lcm_model_id.setEnabled(
|
| 134 |
+
self.config.settings.lcm_diffusion_setting.use_openvino
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
def init_ui(self):
|
| 138 |
+
self.create_main_tab()
|
| 139 |
+
self.create_settings_tab()
|
| 140 |
+
self.create_about_tab()
|
| 141 |
+
self.show()
|
| 142 |
+
|
| 143 |
+
def create_main_tab(self):
|
| 144 |
+
self.img = QLabel("<<Image>>")
|
| 145 |
+
self.img.setAlignment(Qt.AlignCenter)
|
| 146 |
+
self.img.setFixedSize(QSize(512, 512))
|
| 147 |
+
self.vspacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
|
| 148 |
+
|
| 149 |
+
self.prompt = QTextEdit()
|
| 150 |
+
self.prompt.setPlaceholderText("A fantasy landscape")
|
| 151 |
+
self.prompt.setAcceptRichText(False)
|
| 152 |
+
self.neg_prompt = QTextEdit()
|
| 153 |
+
self.neg_prompt.setPlaceholderText("")
|
| 154 |
+
self.neg_prompt.setAcceptRichText(False)
|
| 155 |
+
self.neg_prompt_label = QLabel("Negative prompt (Set guidance scale > 1.0):")
|
| 156 |
+
self.generate = QPushButton("Generate")
|
| 157 |
+
self.generate.clicked.connect(self.text_to_image)
|
| 158 |
+
self.prompt.setFixedHeight(40)
|
| 159 |
+
self.neg_prompt.setFixedHeight(35)
|
| 160 |
+
self.browse_results = QPushButton("...")
|
| 161 |
+
self.browse_results.setFixedWidth(30)
|
| 162 |
+
self.browse_results.clicked.connect(self.on_open_results_folder)
|
| 163 |
+
self.browse_results.setToolTip("Open output folder")
|
| 164 |
+
|
| 165 |
+
hlayout = QHBoxLayout()
|
| 166 |
+
hlayout.addWidget(self.neg_prompt)
|
| 167 |
+
hlayout.addWidget(self.generate)
|
| 168 |
+
hlayout.addWidget(self.browse_results)
|
| 169 |
+
|
| 170 |
+
self.previous_img_btn = QToolButton()
|
| 171 |
+
self.previous_img_btn.setText("<")
|
| 172 |
+
self.previous_img_btn.clicked.connect(self.on_show_previous_image)
|
| 173 |
+
self.next_img_btn = QToolButton()
|
| 174 |
+
self.next_img_btn.setText(">")
|
| 175 |
+
self.next_img_btn.clicked.connect(self.on_show_next_image)
|
| 176 |
+
hlayout_nav = QHBoxLayout()
|
| 177 |
+
hlayout_nav.addWidget(self.previous_img_btn)
|
| 178 |
+
hlayout_nav.addWidget(self.img)
|
| 179 |
+
hlayout_nav.addWidget(self.next_img_btn)
|
| 180 |
+
|
| 181 |
+
vlayout = QVBoxLayout()
|
| 182 |
+
vlayout.addLayout(hlayout_nav)
|
| 183 |
+
vlayout.addItem(self.vspacer)
|
| 184 |
+
vlayout.addWidget(self.prompt)
|
| 185 |
+
vlayout.addWidget(self.neg_prompt_label)
|
| 186 |
+
vlayout.addLayout(hlayout)
|
| 187 |
+
|
| 188 |
+
self.tab_widget = QTabWidget(self)
|
| 189 |
+
self.tab_main = QWidget()
|
| 190 |
+
self.tab_settings = QWidget()
|
| 191 |
+
self.tab_about = QWidget()
|
| 192 |
+
self.tab_main.setLayout(vlayout)
|
| 193 |
+
|
| 194 |
+
self.tab_widget.addTab(self.tab_main, "Text to Image")
|
| 195 |
+
self.tab_widget.addTab(self.tab_settings, "Settings")
|
| 196 |
+
self.tab_widget.addTab(self.tab_about, "About")
|
| 197 |
+
|
| 198 |
+
self.setCentralWidget(self.tab_widget)
|
| 199 |
+
self.use_seed = False
|
| 200 |
+
|
| 201 |
+
def create_settings_tab(self):
|
| 202 |
+
self.lcm_model_label = QLabel("Latent Consistency Model:")
|
| 203 |
+
# self.lcm_model = QLineEdit(LCM_DEFAULT_MODEL)
|
| 204 |
+
self.lcm_model = QComboBox(self)
|
| 205 |
+
self.lcm_model.addItems(self.config.lcm_models)
|
| 206 |
+
self.lcm_model.currentIndexChanged.connect(self.on_lcm_model_changed)
|
| 207 |
+
|
| 208 |
+
self.use_lcm_lora = QCheckBox("Use LCM LoRA")
|
| 209 |
+
self.use_lcm_lora.setChecked(False)
|
| 210 |
+
self.use_lcm_lora.stateChanged.connect(self.use_lcm_lora_changed)
|
| 211 |
+
|
| 212 |
+
self.lora_base_model_id_label = QLabel("Lora base model ID :")
|
| 213 |
+
self.base_model_id = QComboBox(self)
|
| 214 |
+
self.base_model_id.addItems(self.config.stable_diffsuion_models)
|
| 215 |
+
self.base_model_id.currentIndexChanged.connect(self.on_base_model_id_changed)
|
| 216 |
+
|
| 217 |
+
self.lcm_lora_model_id_label = QLabel("LCM LoRA model ID :")
|
| 218 |
+
self.lcm_lora_id = QComboBox(self)
|
| 219 |
+
self.lcm_lora_id.addItems(self.config.lcm_lora_models)
|
| 220 |
+
self.lcm_lora_id.currentIndexChanged.connect(self.on_lcm_lora_id_changed)
|
| 221 |
+
|
| 222 |
+
self.inference_steps_value = QLabel("Number of inference steps: 4")
|
| 223 |
+
self.inference_steps = QSlider(orientation=Qt.Orientation.Horizontal)
|
| 224 |
+
self.inference_steps.setMaximum(25)
|
| 225 |
+
self.inference_steps.setMinimum(1)
|
| 226 |
+
self.inference_steps.setValue(4)
|
| 227 |
+
self.inference_steps.valueChanged.connect(self.update_steps_label)
|
| 228 |
+
|
| 229 |
+
self.num_images_value = QLabel("Number of images: 1")
|
| 230 |
+
self.num_images = QSlider(orientation=Qt.Orientation.Horizontal)
|
| 231 |
+
self.num_images.setMaximum(100)
|
| 232 |
+
self.num_images.setMinimum(1)
|
| 233 |
+
self.num_images.setValue(1)
|
| 234 |
+
self.num_images.valueChanged.connect(self.update_num_images_label)
|
| 235 |
+
|
| 236 |
+
self.guidance_value = QLabel("Guidance scale: 1")
|
| 237 |
+
self.guidance = QSlider(orientation=Qt.Orientation.Horizontal)
|
| 238 |
+
self.guidance.setMaximum(20)
|
| 239 |
+
self.guidance.setMinimum(10)
|
| 240 |
+
self.guidance.setValue(10)
|
| 241 |
+
self.guidance.valueChanged.connect(self.update_guidance_label)
|
| 242 |
+
|
| 243 |
+
self.width_value = QLabel("Width :")
|
| 244 |
+
self.width = QComboBox(self)
|
| 245 |
+
self.width.addItem("256")
|
| 246 |
+
self.width.addItem("512")
|
| 247 |
+
self.width.addItem("768")
|
| 248 |
+
self.width.addItem("1024")
|
| 249 |
+
self.width.setCurrentText("512")
|
| 250 |
+
self.width.currentIndexChanged.connect(self.on_width_changed)
|
| 251 |
+
|
| 252 |
+
self.height_value = QLabel("Height :")
|
| 253 |
+
self.height = QComboBox(self)
|
| 254 |
+
self.height.addItem("256")
|
| 255 |
+
self.height.addItem("512")
|
| 256 |
+
self.height.addItem("768")
|
| 257 |
+
self.height.addItem("1024")
|
| 258 |
+
self.height.setCurrentText("512")
|
| 259 |
+
self.height.currentIndexChanged.connect(self.on_height_changed)
|
| 260 |
+
|
| 261 |
+
self.seed_check = QCheckBox("Use seed")
|
| 262 |
+
self.seed_value = QLineEdit()
|
| 263 |
+
self.seed_value.setInputMask("9999999999")
|
| 264 |
+
self.seed_value.setText("123123")
|
| 265 |
+
self.seed_check.stateChanged.connect(self.seed_changed)
|
| 266 |
+
|
| 267 |
+
self.safety_checker = QCheckBox("Use safety checker")
|
| 268 |
+
self.safety_checker.setChecked(True)
|
| 269 |
+
self.safety_checker.stateChanged.connect(self.use_safety_checker_changed)
|
| 270 |
+
|
| 271 |
+
self.use_openvino_check = QCheckBox("Use OpenVINO")
|
| 272 |
+
self.use_openvino_check.setChecked(False)
|
| 273 |
+
self.openvino_model_label = QLabel("OpenVINO LCM model:")
|
| 274 |
+
self.use_local_model_folder = QCheckBox(
|
| 275 |
+
"Use locally cached model or downloaded model folder(offline)"
|
| 276 |
+
)
|
| 277 |
+
self.openvino_lcm_model_id = QComboBox(self)
|
| 278 |
+
self.openvino_lcm_model_id.addItems(self.config.openvino_lcm_models)
|
| 279 |
+
self.openvino_lcm_model_id.currentIndexChanged.connect(
|
| 280 |
+
self.on_openvino_lcm_model_id_changed
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
self.use_openvino_check.setEnabled(enable_openvino_controls())
|
| 284 |
+
self.use_local_model_folder.setChecked(False)
|
| 285 |
+
self.use_local_model_folder.stateChanged.connect(self.use_offline_model_changed)
|
| 286 |
+
self.use_openvino_check.stateChanged.connect(self.use_openvino_changed)
|
| 287 |
+
|
| 288 |
+
self.use_tae_sd = QCheckBox(
|
| 289 |
+
"Use Tiny Auto Encoder - TAESD (Fast, moderate quality)"
|
| 290 |
+
)
|
| 291 |
+
self.use_tae_sd.setChecked(False)
|
| 292 |
+
self.use_tae_sd.stateChanged.connect(self.use_tae_sd_changed)
|
| 293 |
+
|
| 294 |
+
hlayout = QHBoxLayout()
|
| 295 |
+
hlayout.addWidget(self.seed_check)
|
| 296 |
+
hlayout.addWidget(self.seed_value)
|
| 297 |
+
hspacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
|
| 298 |
+
slider_hspacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
|
| 299 |
+
|
| 300 |
+
self.results_path_label = QLabel("Output path:")
|
| 301 |
+
self.results_path = QLineEdit()
|
| 302 |
+
self.results_path.textChanged.connect(self.on_path_changed)
|
| 303 |
+
self.browse_folder_btn = QToolButton()
|
| 304 |
+
self.browse_folder_btn.setText("...")
|
| 305 |
+
self.browse_folder_btn.clicked.connect(self.on_browse_folder)
|
| 306 |
+
|
| 307 |
+
self.reset = QPushButton("Reset All")
|
| 308 |
+
self.reset.clicked.connect(self.reset_all_settings)
|
| 309 |
+
|
| 310 |
+
vlayout = QVBoxLayout()
|
| 311 |
+
vspacer = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
|
| 312 |
+
vlayout.addItem(hspacer)
|
| 313 |
+
vlayout.setSpacing(3)
|
| 314 |
+
vlayout.addWidget(self.lcm_model_label)
|
| 315 |
+
vlayout.addWidget(self.lcm_model)
|
| 316 |
+
vlayout.addWidget(self.use_local_model_folder)
|
| 317 |
+
vlayout.addWidget(self.use_lcm_lora)
|
| 318 |
+
vlayout.addWidget(self.lora_base_model_id_label)
|
| 319 |
+
vlayout.addWidget(self.base_model_id)
|
| 320 |
+
vlayout.addWidget(self.lcm_lora_model_id_label)
|
| 321 |
+
vlayout.addWidget(self.lcm_lora_id)
|
| 322 |
+
vlayout.addWidget(self.use_openvino_check)
|
| 323 |
+
vlayout.addWidget(self.openvino_model_label)
|
| 324 |
+
vlayout.addWidget(self.openvino_lcm_model_id)
|
| 325 |
+
vlayout.addWidget(self.use_tae_sd)
|
| 326 |
+
vlayout.addItem(slider_hspacer)
|
| 327 |
+
vlayout.addWidget(self.inference_steps_value)
|
| 328 |
+
vlayout.addWidget(self.inference_steps)
|
| 329 |
+
vlayout.addWidget(self.num_images_value)
|
| 330 |
+
vlayout.addWidget(self.num_images)
|
| 331 |
+
vlayout.addWidget(self.width_value)
|
| 332 |
+
vlayout.addWidget(self.width)
|
| 333 |
+
vlayout.addWidget(self.height_value)
|
| 334 |
+
vlayout.addWidget(self.height)
|
| 335 |
+
vlayout.addWidget(self.guidance_value)
|
| 336 |
+
vlayout.addWidget(self.guidance)
|
| 337 |
+
vlayout.addLayout(hlayout)
|
| 338 |
+
vlayout.addWidget(self.safety_checker)
|
| 339 |
+
|
| 340 |
+
vlayout.addWidget(self.results_path_label)
|
| 341 |
+
hlayout_path = QHBoxLayout()
|
| 342 |
+
hlayout_path.addWidget(self.results_path)
|
| 343 |
+
hlayout_path.addWidget(self.browse_folder_btn)
|
| 344 |
+
vlayout.addLayout(hlayout_path)
|
| 345 |
+
self.tab_settings.setLayout(vlayout)
|
| 346 |
+
hlayout_reset = QHBoxLayout()
|
| 347 |
+
hspacer = QSpacerItem(20, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
|
| 348 |
+
hlayout_reset.addItem(hspacer)
|
| 349 |
+
hlayout_reset.addWidget(self.reset)
|
| 350 |
+
vlayout.addLayout(hlayout_reset)
|
| 351 |
+
vlayout.addItem(vspacer)
|
| 352 |
+
|
| 353 |
+
def create_about_tab(self):
|
| 354 |
+
self.label = QLabel()
|
| 355 |
+
self.label.setAlignment(Qt.AlignCenter)
|
| 356 |
+
self.label.setText(
|
| 357 |
+
f"""<h1>FastSD CPU {APP_VERSION}</h1>
|
| 358 |
+
<h3>(c)2023 - Rupesh Sreeraman</h3>
|
| 359 |
+
<h3>Faster stable diffusion on CPU</h3>
|
| 360 |
+
<h3>Based on Latent Consistency Models</h3>
|
| 361 |
+
<h3>GitHub : https://github.com/rupeshs/fastsdcpu/</h3>"""
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
vlayout = QVBoxLayout()
|
| 365 |
+
vlayout.addWidget(self.label)
|
| 366 |
+
self.tab_about.setLayout(vlayout)
|
| 367 |
+
|
| 368 |
+
def show_image(self, pixmap):
|
| 369 |
+
image_width = self.config.settings.lcm_diffusion_setting.image_width
|
| 370 |
+
image_height = self.config.settings.lcm_diffusion_setting.image_height
|
| 371 |
+
if image_width > 512 or image_height > 512:
|
| 372 |
+
new_width = 512 if image_width > 512 else image_width
|
| 373 |
+
new_height = 512 if image_height > 512 else image_height
|
| 374 |
+
self.img.setPixmap(
|
| 375 |
+
pixmap.scaled(
|
| 376 |
+
new_width,
|
| 377 |
+
new_height,
|
| 378 |
+
Qt.KeepAspectRatio,
|
| 379 |
+
)
|
| 380 |
+
)
|
| 381 |
+
else:
|
| 382 |
+
self.img.setPixmap(pixmap)
|
| 383 |
+
|
| 384 |
+
def on_show_next_image(self):
|
| 385 |
+
if self.image_index != len(self.gen_images) - 1 and len(self.gen_images) > 0:
|
| 386 |
+
self.previous_img_btn.setEnabled(True)
|
| 387 |
+
self.image_index += 1
|
| 388 |
+
self.show_image(self.gen_images[self.image_index])
|
| 389 |
+
if self.image_index == len(self.gen_images) - 1:
|
| 390 |
+
self.next_img_btn.setEnabled(False)
|
| 391 |
+
|
| 392 |
+
def on_open_results_folder(self):
|
| 393 |
+
QDesktopServices.openUrl(QUrl.fromLocalFile(self.config.settings.results_path))
|
| 394 |
+
|
| 395 |
+
def on_show_previous_image(self):
|
| 396 |
+
if self.image_index != 0:
|
| 397 |
+
self.next_img_btn.setEnabled(True)
|
| 398 |
+
self.image_index -= 1
|
| 399 |
+
self.show_image(self.gen_images[self.image_index])
|
| 400 |
+
if self.image_index == 0:
|
| 401 |
+
self.previous_img_btn.setEnabled(False)
|
| 402 |
+
|
| 403 |
+
def on_path_changed(self, text):
|
| 404 |
+
self.config.settings.results_path = text
|
| 405 |
+
|
| 406 |
+
def on_browse_folder(self):
|
| 407 |
+
options = QFileDialog.Options()
|
| 408 |
+
options |= QFileDialog.ShowDirsOnly
|
| 409 |
+
|
| 410 |
+
folder_path = QFileDialog.getExistingDirectory(
|
| 411 |
+
self, "Select a Folder", "", options=options
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
if folder_path:
|
| 415 |
+
self.config.settings.results_path = folder_path
|
| 416 |
+
self.results_path.setText(folder_path)
|
| 417 |
+
|
| 418 |
+
def on_width_changed(self, index):
|
| 419 |
+
width_txt = self.width.itemText(index)
|
| 420 |
+
self.config.settings.lcm_diffusion_setting.image_width = int(width_txt)
|
| 421 |
+
|
| 422 |
+
def on_height_changed(self, index):
|
| 423 |
+
height_txt = self.height.itemText(index)
|
| 424 |
+
self.config.settings.lcm_diffusion_setting.image_height = int(height_txt)
|
| 425 |
+
|
| 426 |
+
def on_lcm_model_changed(self, index):
|
| 427 |
+
model_id = self.lcm_model.itemText(index)
|
| 428 |
+
self.config.settings.lcm_diffusion_setting.lcm_model_id = model_id
|
| 429 |
+
|
| 430 |
+
def on_base_model_id_changed(self, index):
|
| 431 |
+
model_id = self.base_model_id.itemText(index)
|
| 432 |
+
self.config.settings.lcm_diffusion_setting.lcm_lora.base_model_id = model_id
|
| 433 |
+
|
| 434 |
+
def on_lcm_lora_id_changed(self, index):
|
| 435 |
+
model_id = self.lcm_lora_id.itemText(index)
|
| 436 |
+
self.config.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = model_id
|
| 437 |
+
|
| 438 |
+
def on_openvino_lcm_model_id_changed(self, index):
|
| 439 |
+
model_id = self.openvino_lcm_model_id.itemText(index)
|
| 440 |
+
self.config.settings.lcm_diffusion_setting.openvino_lcm_model_id = model_id
|
| 441 |
+
|
| 442 |
+
def use_openvino_changed(self, state):
|
| 443 |
+
if state == 2:
|
| 444 |
+
self.lcm_model.setEnabled(False)
|
| 445 |
+
self.use_lcm_lora.setEnabled(False)
|
| 446 |
+
self.lcm_lora_id.setEnabled(False)
|
| 447 |
+
self.base_model_id.setEnabled(False)
|
| 448 |
+
self.neg_prompt.setEnabled(True)
|
| 449 |
+
self.openvino_lcm_model_id.setEnabled(True)
|
| 450 |
+
self.config.settings.lcm_diffusion_setting.use_openvino = True
|
| 451 |
+
else:
|
| 452 |
+
self.lcm_model.setEnabled(True)
|
| 453 |
+
self.use_lcm_lora.setEnabled(True)
|
| 454 |
+
self.lcm_lora_id.setEnabled(True)
|
| 455 |
+
self.base_model_id.setEnabled(True)
|
| 456 |
+
self.neg_prompt.setEnabled(False)
|
| 457 |
+
self.openvino_lcm_model_id.setEnabled(False)
|
| 458 |
+
self.config.settings.lcm_diffusion_setting.use_openvino = False
|
| 459 |
+
|
| 460 |
+
def use_tae_sd_changed(self, state):
|
| 461 |
+
if state == 2:
|
| 462 |
+
self.config.settings.lcm_diffusion_setting.use_tiny_auto_encoder = True
|
| 463 |
+
else:
|
| 464 |
+
self.config.settings.lcm_diffusion_setting.use_tiny_auto_encoder = False
|
| 465 |
+
|
| 466 |
+
def use_offline_model_changed(self, state):
|
| 467 |
+
if state == 2:
|
| 468 |
+
self.config.settings.lcm_diffusion_setting.use_offline_model = True
|
| 469 |
+
else:
|
| 470 |
+
self.config.settings.lcm_diffusion_setting.use_offline_model = False
|
| 471 |
+
|
| 472 |
+
def use_lcm_lora_changed(self, state):
|
| 473 |
+
if state == 2:
|
| 474 |
+
self.lcm_model.setEnabled(False)
|
| 475 |
+
self.lcm_lora_id.setEnabled(True)
|
| 476 |
+
self.base_model_id.setEnabled(True)
|
| 477 |
+
self.neg_prompt.setEnabled(True)
|
| 478 |
+
self.config.settings.lcm_diffusion_setting.use_lcm_lora = True
|
| 479 |
+
else:
|
| 480 |
+
self.lcm_model.setEnabled(True)
|
| 481 |
+
self.lcm_lora_id.setEnabled(False)
|
| 482 |
+
self.base_model_id.setEnabled(False)
|
| 483 |
+
self.neg_prompt.setEnabled(False)
|
| 484 |
+
self.config.settings.lcm_diffusion_setting.use_lcm_lora = False
|
| 485 |
+
|
| 486 |
+
def use_safety_checker_changed(self, state):
|
| 487 |
+
if state == 2:
|
| 488 |
+
self.config.settings.lcm_diffusion_setting.use_safety_checker = True
|
| 489 |
+
else:
|
| 490 |
+
self.config.settings.lcm_diffusion_setting.use_safety_checker = False
|
| 491 |
+
|
| 492 |
+
def update_steps_label(self, value):
|
| 493 |
+
self.inference_steps_value.setText(f"Number of inference steps: {value}")
|
| 494 |
+
self.config.settings.lcm_diffusion_setting.inference_steps = value
|
| 495 |
+
|
| 496 |
+
def update_num_images_label(self, value):
|
| 497 |
+
self.num_images_value.setText(f"Number of images: {value}")
|
| 498 |
+
self.config.settings.lcm_diffusion_setting.number_of_images = value
|
| 499 |
+
|
| 500 |
+
def update_guidance_label(self, value):
|
| 501 |
+
val = round(int(value) / 10, 1)
|
| 502 |
+
self.guidance_value.setText(f"Guidance scale: {val}")
|
| 503 |
+
self.config.settings.lcm_diffusion_setting.guidance_scale = val
|
| 504 |
+
|
| 505 |
+
def seed_changed(self, state):
|
| 506 |
+
if state == 2:
|
| 507 |
+
self.seed_value.setEnabled(True)
|
| 508 |
+
self.config.settings.lcm_diffusion_setting.use_seed = True
|
| 509 |
+
else:
|
| 510 |
+
self.seed_value.setEnabled(False)
|
| 511 |
+
self.config.settings.lcm_diffusion_setting.use_seed = False
|
| 512 |
+
|
| 513 |
+
def get_seed_value(self) -> int:
|
| 514 |
+
use_seed = self.config.settings.lcm_diffusion_setting.use_seed
|
| 515 |
+
seed_value = int(self.seed_value.text()) if use_seed else -1
|
| 516 |
+
return seed_value
|
| 517 |
+
|
| 518 |
+
def generate_image(self):
|
| 519 |
+
self.config.settings.lcm_diffusion_setting.seed = self.get_seed_value()
|
| 520 |
+
self.config.settings.lcm_diffusion_setting.prompt = self.prompt.toPlainText()
|
| 521 |
+
self.config.settings.lcm_diffusion_setting.negative_prompt = (
|
| 522 |
+
self.neg_prompt.toPlainText()
|
| 523 |
+
)
|
| 524 |
+
self.config.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = (
|
| 525 |
+
self.lcm_lora_id.currentText()
|
| 526 |
+
)
|
| 527 |
+
self.config.settings.lcm_diffusion_setting.lcm_lora.base_model_id = (
|
| 528 |
+
self.base_model_id.currentText()
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
if self.config.settings.lcm_diffusion_setting.use_openvino:
|
| 532 |
+
model_id = self.openvino_lcm_model_id.currentText()
|
| 533 |
+
else:
|
| 534 |
+
model_id = self.lcm_model.currentText()
|
| 535 |
+
|
| 536 |
+
self.config.settings.lcm_diffusion_setting.lcm_model_id = model_id
|
| 537 |
+
|
| 538 |
+
reshape_required = False
|
| 539 |
+
if self.config.settings.lcm_diffusion_setting.use_openvino:
|
| 540 |
+
# Detect dimension change
|
| 541 |
+
reshape_required = is_reshape_required(
|
| 542 |
+
self.previous_width,
|
| 543 |
+
self.config.settings.lcm_diffusion_setting.image_width,
|
| 544 |
+
self.previous_height,
|
| 545 |
+
self.config.settings.lcm_diffusion_setting.image_height,
|
| 546 |
+
self.previous_model,
|
| 547 |
+
model_id,
|
| 548 |
+
self.previous_num_of_images,
|
| 549 |
+
self.config.settings.lcm_diffusion_setting.number_of_images,
|
| 550 |
+
)
|
| 551 |
+
self.config.settings.lcm_diffusion_setting.diffusion_task = (
|
| 552 |
+
DiffusionTask.text_to_image.value
|
| 553 |
+
)
|
| 554 |
+
images = self.context.generate_text_to_image(
|
| 555 |
+
self.config.settings,
|
| 556 |
+
reshape_required,
|
| 557 |
+
DEVICE,
|
| 558 |
+
)
|
| 559 |
+
self.image_index = 0
|
| 560 |
+
self.gen_images = []
|
| 561 |
+
for img in images:
|
| 562 |
+
im = ImageQt(img).copy()
|
| 563 |
+
pixmap = QPixmap.fromImage(im)
|
| 564 |
+
self.gen_images.append(pixmap)
|
| 565 |
+
|
| 566 |
+
if len(self.gen_images) > 1:
|
| 567 |
+
self.next_img_btn.setEnabled(True)
|
| 568 |
+
self.previous_img_btn.setEnabled(False)
|
| 569 |
+
else:
|
| 570 |
+
self.next_img_btn.setEnabled(False)
|
| 571 |
+
self.previous_img_btn.setEnabled(False)
|
| 572 |
+
|
| 573 |
+
self.show_image(self.gen_images[0])
|
| 574 |
+
|
| 575 |
+
self.previous_width = self.config.settings.lcm_diffusion_setting.image_width
|
| 576 |
+
self.previous_height = self.config.settings.lcm_diffusion_setting.image_height
|
| 577 |
+
self.previous_model = model_id
|
| 578 |
+
self.previous_num_of_images = (
|
| 579 |
+
self.config.settings.lcm_diffusion_setting.number_of_images
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
def text_to_image(self):
|
| 583 |
+
self.img.setText("Please wait...")
|
| 584 |
+
worker = ImageGeneratorWorker(self.generate_image)
|
| 585 |
+
self.threadpool.start(worker)
|
| 586 |
+
|
| 587 |
+
def closeEvent(self, event):
|
| 588 |
+
self.config.settings.lcm_diffusion_setting.seed = self.get_seed_value()
|
| 589 |
+
print(self.config.settings.lcm_diffusion_setting)
|
| 590 |
+
print("Saving settings")
|
| 591 |
+
self.config.save()
|
| 592 |
+
|
| 593 |
+
def reset_all_settings(self):
|
| 594 |
+
self.use_local_model_folder.setChecked(False)
|
| 595 |
+
self.width.setCurrentText("512")
|
| 596 |
+
self.height.setCurrentText("512")
|
| 597 |
+
self.inference_steps.setValue(4)
|
| 598 |
+
self.guidance.setValue(10)
|
| 599 |
+
self.use_openvino_check.setChecked(False)
|
| 600 |
+
self.seed_check.setChecked(False)
|
| 601 |
+
self.safety_checker.setChecked(False)
|
| 602 |
+
self.results_path.setText(FastStableDiffusionPaths().get_results_path())
|
| 603 |
+
self.use_tae_sd.setChecked(False)
|
| 604 |
+
self.use_lcm_lora.setChecked(False)
|
testlcm/frontend/gui/image_generator_worker.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PyQt5.QtCore import (
|
| 2 |
+
pyqtSlot,
|
| 3 |
+
QRunnable,
|
| 4 |
+
pyqtSignal,
|
| 5 |
+
pyqtSlot,
|
| 6 |
+
)
|
| 7 |
+
from PyQt5.QtCore import QObject
|
| 8 |
+
import traceback
|
| 9 |
+
import sys
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class WorkerSignals(QObject):
|
| 13 |
+
finished = pyqtSignal()
|
| 14 |
+
error = pyqtSignal(tuple)
|
| 15 |
+
result = pyqtSignal(object)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ImageGeneratorWorker(QRunnable):
|
| 19 |
+
def __init__(self, fn, *args, **kwargs):
|
| 20 |
+
super(ImageGeneratorWorker, self).__init__()
|
| 21 |
+
self.fn = fn
|
| 22 |
+
self.args = args
|
| 23 |
+
self.kwargs = kwargs
|
| 24 |
+
self.signals = WorkerSignals()
|
| 25 |
+
|
| 26 |
+
@pyqtSlot()
|
| 27 |
+
def run(self):
|
| 28 |
+
try:
|
| 29 |
+
result = self.fn(*self.args, **self.kwargs)
|
| 30 |
+
except:
|
| 31 |
+
traceback.print_exc()
|
| 32 |
+
exctype, value = sys.exc_info()[:2]
|
| 33 |
+
self.signals.error.emit((exctype, value, traceback.format_exc()))
|
| 34 |
+
else:
|
| 35 |
+
self.signals.result.emit(result)
|
| 36 |
+
finally:
|
| 37 |
+
self.signals.finished.emit()
|
testlcm/frontend/gui/ui.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
from frontend.gui.app_window import MainWindow
|
| 3 |
+
from PyQt5.QtWidgets import QApplication
|
| 4 |
+
import sys
|
| 5 |
+
from app_settings import AppSettings
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def start_gui(
|
| 9 |
+
argv: List[str],
|
| 10 |
+
app_settings: AppSettings,
|
| 11 |
+
):
|
| 12 |
+
app = QApplication(sys.argv)
|
| 13 |
+
window = MainWindow(app_settings)
|
| 14 |
+
window.show()
|
| 15 |
+
app.exec()
|