Upload 115 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +9 -0
- .gitignore +8 -0
- Dockerfile +7 -0
- LICENSE +21 -0
- THIRD-PARTY-LICENSES +143 -0
- benchmark-openvino.bat +23 -0
- benchmark.bat +23 -0
- configs/lcm-lora-models.txt +4 -0
- configs/lcm-models.txt +8 -0
- configs/openvino-lcm-models.txt +10 -0
- configs/stable-diffusion-models.txt +7 -0
- controlnet_models/Readme.txt +3 -0
- docs/images/2steps-inference.jpg +0 -0
- docs/images/ARCGPU.png +0 -0
- docs/images/comfyui-workflow.png +3 -0
- docs/images/fastcpu-cli.png +0 -0
- docs/images/fastcpu-webui.png +3 -0
- docs/images/fastsdcpu-android-termux-pixel7.png +3 -0
- docs/images/fastsdcpu-api.png +0 -0
- docs/images/fastsdcpu-gui.jpg +3 -0
- docs/images/fastsdcpu-mac-gui.jpg +0 -0
- docs/images/fastsdcpu-screenshot.png +3 -0
- docs/images/fastsdcpu-webui.png +3 -0
- docs/images/fastsdcpu_claude.jpg +3 -0
- docs/images/fastsdcpu_flux_on_cpu.png +3 -0
- docs/images/openwebui-fastsd.jpg +3 -0
- docs/images/openwebui-settings.png +0 -0
- install-mac.sh +36 -0
- install.bat +38 -0
- install.sh +44 -0
- lora_models/Readme.txt +3 -0
- models/gguf/clip/readme.txt +1 -0
- models/gguf/diffusion/readme.txt +1 -0
- models/gguf/t5xxl/readme.txt +1 -0
- models/gguf/vae/readme.txt +1 -0
- requirements.txt +21 -0
- src/__init__.py +0 -0
- src/app.py +554 -0
- src/app_settings.py +124 -0
- src/backend/__init__.py +0 -0
- src/backend/annotators/canny_control.py +15 -0
- src/backend/annotators/control_interface.py +12 -0
- src/backend/annotators/depth_control.py +15 -0
- src/backend/annotators/image_control_factory.py +31 -0
- src/backend/annotators/lineart_control.py +11 -0
- src/backend/annotators/mlsd_control.py +10 -0
- src/backend/annotators/normal_control.py +10 -0
- src/backend/annotators/pose_control.py +10 -0
- src/backend/annotators/shuffle_control.py +10 -0
- src/backend/annotators/softedge_control.py +10 -0
.gitattributes
CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
docs/images/comfyui-workflow.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
docs/images/fastcpu-webui.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
docs/images/fastsdcpu_claude.jpg filter=lfs diff=lfs merge=lfs -text
|
39 |
+
docs/images/fastsdcpu_flux_on_cpu.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
docs/images/fastsdcpu-android-termux-pixel7.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
docs/images/fastsdcpu-gui.jpg filter=lfs diff=lfs merge=lfs -text
|
42 |
+
docs/images/fastsdcpu-screenshot.png filter=lfs diff=lfs merge=lfs -text
|
43 |
+
docs/images/fastsdcpu-webui.png filter=lfs diff=lfs merge=lfs -text
|
44 |
+
docs/images/openwebui-fastsd.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
env
|
2 |
+
env_old
|
3 |
+
*.bak
|
4 |
+
*.pyc
|
5 |
+
__pycache__
|
6 |
+
results
|
7 |
+
# excluding user settings for the GUI frontend
|
8 |
+
configs/settings.yaml
|
Dockerfile
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.13.0-slim
|
2 |
+
WORKDIR /app
|
3 |
+
COPY requirements.txt .
|
4 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
5 |
+
COPY . .
|
6 |
+
EXPOSE 8000
|
7 |
+
CMD ["python", "src/app.py", "--api"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Rupesh Sreeraman
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
THIRD-PARTY-LICENSES
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
stablediffusion.cpp - MIT
|
2 |
+
|
3 |
+
OpenVINO stablediffusion engine - Apache 2
|
4 |
+
|
5 |
+
SD Turbo - STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
|
6 |
+
|
7 |
+
MIT License
|
8 |
+
|
9 |
+
Copyright (c) 2023 leejet
|
10 |
+
|
11 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
12 |
+
of this software and associated documentation files (the "Software"), to deal
|
13 |
+
in the Software without restriction, including without limitation the rights
|
14 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
15 |
+
copies of the Software, and to permit persons to whom the Software is
|
16 |
+
furnished to do so, subject to the following conditions:
|
17 |
+
|
18 |
+
The above copyright notice and this permission notice shall be included in all
|
19 |
+
copies or substantial portions of the Software.
|
20 |
+
|
21 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
22 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
23 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
24 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
25 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
26 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
27 |
+
SOFTWARE.
|
28 |
+
|
29 |
+
ERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
30 |
+
|
31 |
+
Definitions.
|
32 |
+
|
33 |
+
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
34 |
+
|
35 |
+
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
36 |
+
|
37 |
+
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
38 |
+
|
39 |
+
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
40 |
+
|
41 |
+
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
42 |
+
|
43 |
+
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
44 |
+
|
45 |
+
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
46 |
+
|
47 |
+
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
48 |
+
|
49 |
+
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
50 |
+
|
51 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
52 |
+
|
53 |
+
Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
54 |
+
|
55 |
+
Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
56 |
+
|
57 |
+
Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
58 |
+
|
59 |
+
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
60 |
+
|
61 |
+
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
|
62 |
+
|
63 |
+
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
64 |
+
|
65 |
+
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
66 |
+
|
67 |
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
68 |
+
|
69 |
+
Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
70 |
+
|
71 |
+
Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
72 |
+
|
73 |
+
Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
74 |
+
|
75 |
+
Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
76 |
+
|
77 |
+
Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
78 |
+
|
79 |
+
END OF TERMS AND CONDITIONS
|
80 |
+
|
81 |
+
APPENDIX: How to apply the Apache License to your work.
|
82 |
+
|
83 |
+
To apply the Apache License to your work, attach the following
|
84 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
85 |
+
replaced with your own identifying information. (Don't include
|
86 |
+
the brackets!) The text should be enclosed in the appropriate
|
87 |
+
comment syntax for the file format. We also recommend that a
|
88 |
+
file or class name and description of purpose be included on the
|
89 |
+
same "printed page" as the copyright notice for easier
|
90 |
+
identification within third-party archives.
|
91 |
+
Copyright [yyyy] [name of copyright owner]
|
92 |
+
|
93 |
+
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
|
94 |
+
|
95 |
+
<http://www.apache.org/licenses/LICENSE-2.0>
|
96 |
+
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
97 |
+
|
98 |
+
STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
|
99 |
+
Dated: November 28, 2023
|
100 |
+
|
101 |
+
By using or distributing any portion or element of the Models, Software, Software Products or Derivative Works, you agree to be bound by this Agreement.
|
102 |
+
|
103 |
+
"Agreement" means this Stable Non-Commercial Research Community License Agreement.
|
104 |
+
|
105 |
+
“AUP” means the Stability AI Acceptable Use Policy available at <https://stability.ai/use-policy>, as may be updated from time to time.
|
106 |
+
|
107 |
+
"Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model.
|
108 |
+
|
109 |
+
“Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software.
|
110 |
+
|
111 |
+
"Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
|
112 |
+
|
113 |
+
“Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement.
|
114 |
+
|
115 |
+
“Non-Commercial Uses” means exercising any of the rights granted herein for the purpose of research or non-commercial purposes. Non-Commercial Uses does not include any production use of the Software Products or any Derivative Works.
|
116 |
+
|
117 |
+
"Stability AI" or "we" means Stability AI Ltd. and its affiliates.
|
118 |
+
|
119 |
+
"Software" means Stability AI’s proprietary software made available under this Agreement.
|
120 |
+
|
121 |
+
“Software Products” means the Models, Software and Documentation, individually or in any combination.
|
122 |
+
|
123 |
+
1. License Rights and Redistribution.
|
124 |
+
|
125 |
+
a. Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned or controlled by Stability AI embodied in the Software Products to use, reproduce, distribute, and create Derivative Works of, the Software Products, in each case for Non-Commercial Uses only.
|
126 |
+
|
127 |
+
b. You may not use the Software Products or Derivative Works to enable third parties to use the Software Products or Derivative Works as part of your hosted service or via your APIs, whether you are adding substantial additional functionality thereto or not. Merely distributing the Software Products or Derivative Works for download online without offering any related service (ex. by distributing the Models on HuggingFace) is not a violation of this subsection. If you wish to use the Software Products or any Derivative Works for commercial or production use or you wish to make the Software Products or any Derivative Works available to third parties via your hosted service or your APIs, contact Stability AI at <https://stability.ai/contact>.
|
128 |
+
|
129 |
+
c. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Non-Commercial Research Community License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified.
|
130 |
+
|
131 |
+
2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THERE FROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS.
|
132 |
+
|
133 |
+
3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
|
134 |
+
|
135 |
+
4. Intellectual Property.
|
136 |
+
|
137 |
+
a. No trademark licenses are granted under this Agreement, and in connection with the Software Products or Derivative Works, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products or Derivative Works.
|
138 |
+
|
139 |
+
b. Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works
|
140 |
+
|
141 |
+
c. If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products or Derivative Works in violation of this Agreement.
|
142 |
+
|
143 |
+
5. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of any Software Products or Derivative Works. Sections 2-4 shall survive the termination of this Agreement.
|
benchmark-openvino.bat
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
setlocal
|
3 |
+
|
4 |
+
set "PYTHON_COMMAND=python"
|
5 |
+
|
6 |
+
call python --version > nul 2>&1
|
7 |
+
if %errorlevel% equ 0 (
|
8 |
+
echo Python command check :OK
|
9 |
+
) else (
|
10 |
+
echo "Error: Python command not found, please install Python (Recommended : Python 3.10 or Python 3.11) and try again"
|
11 |
+
pause
|
12 |
+
exit /b 1
|
13 |
+
|
14 |
+
)
|
15 |
+
|
16 |
+
:check_python_version
|
17 |
+
for /f "tokens=2" %%I in ('%PYTHON_COMMAND% --version 2^>^&1') do (
|
18 |
+
set "python_version=%%I"
|
19 |
+
)
|
20 |
+
|
21 |
+
echo Python version: %python_version%
|
22 |
+
|
23 |
+
call "%~dp0env\Scripts\activate.bat" && %PYTHON_COMMAND% src/app.py -b --use_openvino --openvino_lcm_model_id "rupeshs/sd-turbo-openvino"
|
benchmark.bat
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
setlocal
|
3 |
+
|
4 |
+
set "PYTHON_COMMAND=python"
|
5 |
+
|
6 |
+
call python --version > nul 2>&1
|
7 |
+
if %errorlevel% equ 0 (
|
8 |
+
echo Python command check :OK
|
9 |
+
) else (
|
10 |
+
echo "Error: Python command not found, please install Python (Recommended : Python 3.10 or Python 3.11) and try again"
|
11 |
+
pause
|
12 |
+
exit /b 1
|
13 |
+
|
14 |
+
)
|
15 |
+
|
16 |
+
:check_python_version
|
17 |
+
for /f "tokens=2" %%I in ('%PYTHON_COMMAND% --version 2^>^&1') do (
|
18 |
+
set "python_version=%%I"
|
19 |
+
)
|
20 |
+
|
21 |
+
echo Python version: %python_version%
|
22 |
+
|
23 |
+
call "%~dp0env\Scripts\activate.bat" && %PYTHON_COMMAND% src/app.py -b
|
configs/lcm-lora-models.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
latent-consistency/lcm-lora-sdv1-5
|
2 |
+
latent-consistency/lcm-lora-sdxl
|
3 |
+
latent-consistency/lcm-lora-ssd-1b
|
4 |
+
rupeshs/hypersd-sd1-5-1-step-lora
|
configs/lcm-models.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
stabilityai/sd-turbo
|
2 |
+
rupeshs/sdxs-512-0.9-orig-vae
|
3 |
+
rupeshs/hyper-sd-sdxl-1-step
|
4 |
+
rupeshs/SDXL-Lightning-2steps
|
5 |
+
stabilityai/sdxl-turbo
|
6 |
+
SimianLuo/LCM_Dreamshaper_v7
|
7 |
+
latent-consistency/lcm-sdxl
|
8 |
+
latent-consistency/lcm-ssd-1b
|
configs/openvino-lcm-models.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
rupeshs/sd-turbo-openvino
|
2 |
+
rupeshs/sdxs-512-0.9-openvino
|
3 |
+
rupeshs/hyper-sd-sdxl-1-step-openvino-int8
|
4 |
+
rupeshs/SDXL-Lightning-2steps-openvino-int8
|
5 |
+
rupeshs/sdxl-turbo-openvino-int8
|
6 |
+
rupeshs/LCM-dreamshaper-v7-openvino
|
7 |
+
Disty0/LCM_SoteMix
|
8 |
+
rupeshs/sd15-lcm-square-openvino-int8
|
9 |
+
OpenVINO/FLUX.1-schnell-int4-ov
|
10 |
+
rupeshs/sana-sprint-0.6b-openvino-int4
|
configs/stable-diffusion-models.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Lykon/dreamshaper-8
|
2 |
+
Fictiverse/Stable_Diffusion_PaperCut_Model
|
3 |
+
stabilityai/stable-diffusion-xl-base-1.0
|
4 |
+
runwayml/stable-diffusion-v1-5
|
5 |
+
segmind/SSD-1B
|
6 |
+
stablediffusionapi/anything-v5
|
7 |
+
prompthero/openjourney-v4
|
controlnet_models/Readme.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Place your ControlNet models in this folder.
|
2 |
+
You can download controlnet model (.safetensors) from https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/tree/main
|
3 |
+
E.g: https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/blob/main/control_v11p_sd15_canny_fp16.safetensors
|
docs/images/2steps-inference.jpg
ADDED
![]() |
docs/images/ARCGPU.png
ADDED
![]() |
docs/images/comfyui-workflow.png
ADDED
![]() |
Git LFS Details
|
docs/images/fastcpu-cli.png
ADDED
![]() |
docs/images/fastcpu-webui.png
ADDED
![]() |
Git LFS Details
|
docs/images/fastsdcpu-android-termux-pixel7.png
ADDED
![]() |
Git LFS Details
|
docs/images/fastsdcpu-api.png
ADDED
![]() |
docs/images/fastsdcpu-gui.jpg
ADDED
![]() |
Git LFS Details
|
docs/images/fastsdcpu-mac-gui.jpg
ADDED
![]() |
docs/images/fastsdcpu-screenshot.png
ADDED
![]() |
Git LFS Details
|
docs/images/fastsdcpu-webui.png
ADDED
![]() |
Git LFS Details
|
docs/images/fastsdcpu_claude.jpg
ADDED
![]() |
Git LFS Details
|
docs/images/fastsdcpu_flux_on_cpu.png
ADDED
![]() |
Git LFS Details
|
docs/images/openwebui-fastsd.jpg
ADDED
![]() |
Git LFS Details
|
docs/images/openwebui-settings.png
ADDED
![]() |
install-mac.sh
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
echo Starting FastSD CPU env installation...
|
3 |
+
set -e
|
4 |
+
PYTHON_COMMAND="python3"
|
5 |
+
|
6 |
+
if ! command -v python3 &>/dev/null; then
|
7 |
+
if ! command -v python &>/dev/null; then
|
8 |
+
echo "Error: Python not found, please install python 3.8 or higher and try again"
|
9 |
+
exit 1
|
10 |
+
fi
|
11 |
+
fi
|
12 |
+
|
13 |
+
if command -v python &>/dev/null; then
|
14 |
+
PYTHON_COMMAND="python"
|
15 |
+
fi
|
16 |
+
|
17 |
+
echo "Found $PYTHON_COMMAND command"
|
18 |
+
|
19 |
+
python_version=$($PYTHON_COMMAND --version 2>&1 | awk '{print $2}')
|
20 |
+
echo "Python version : $python_version"
|
21 |
+
|
22 |
+
if ! command -v uv &>/dev/null; then
|
23 |
+
echo "Error: uv command not found,please install https://docs.astral.sh/uv/getting-started/installation/#__tabbed_1_1 and try again."
|
24 |
+
exit 1
|
25 |
+
fi
|
26 |
+
|
27 |
+
BASEDIR=$(pwd)
|
28 |
+
|
29 |
+
uv venv --python 3.11.6 "$BASEDIR/env"
|
30 |
+
# shellcheck disable=SC1091
|
31 |
+
source "$BASEDIR/env/bin/activate"
|
32 |
+
uv pip install torch
|
33 |
+
uv pip install -r "$BASEDIR/requirements.txt"
|
34 |
+
chmod +x "start.sh"
|
35 |
+
chmod +x "start-webui.sh"
|
36 |
+
read -n1 -r -p "FastSD CPU installation completed,press any key to continue..." key
|
install.bat
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
@echo off
|
3 |
+
setlocal
|
4 |
+
echo Starting FastSD CPU env installation...
|
5 |
+
|
6 |
+
set "PYTHON_COMMAND=python"
|
7 |
+
|
8 |
+
call python --version > nul 2>&1
|
9 |
+
if %errorlevel% equ 0 (
|
10 |
+
echo Python command check :OK
|
11 |
+
) else (
|
12 |
+
echo "Error: Python command not found,please install Python(Recommended : Python 3.10 or Python 3.11) and try again."
|
13 |
+
pause
|
14 |
+
exit /b 1
|
15 |
+
|
16 |
+
)
|
17 |
+
|
18 |
+
call uv --version > nul 2>&1
|
19 |
+
if %errorlevel% equ 0 (
|
20 |
+
echo uv command check :OK
|
21 |
+
) else (
|
22 |
+
echo "Error: uv command not found,please install https://docs.astral.sh/uv/getting-started/installation/#__tabbed_1_2 and try again."
|
23 |
+
pause
|
24 |
+
exit /b 1
|
25 |
+
|
26 |
+
)
|
27 |
+
:check_python_version
|
28 |
+
for /f "tokens=2" %%I in ('%PYTHON_COMMAND% --version 2^>^&1') do (
|
29 |
+
set "python_version=%%I"
|
30 |
+
)
|
31 |
+
|
32 |
+
echo Python version: %python_version%
|
33 |
+
|
34 |
+
uv venv --python 3.11.6 "%~dp0env"
|
35 |
+
call "%~dp0env\Scripts\activate.bat" && uv pip install torch --index-url https://download.pytorch.org/whl/cpu
|
36 |
+
call "%~dp0env\Scripts\activate.bat" && uv pip install -r "%~dp0requirements.txt"
|
37 |
+
echo FastSD CPU env installation completed.
|
38 |
+
pause
|
install.sh
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
echo Starting FastSD CPU env installation...
|
3 |
+
set -e
|
4 |
+
PYTHON_COMMAND="python3"
|
5 |
+
|
6 |
+
if ! command -v python3 &>/dev/null; then
|
7 |
+
if ! command -v python &>/dev/null; then
|
8 |
+
echo "Error: Python not found, please install python 3.8 or higher and try again"
|
9 |
+
exit 1
|
10 |
+
fi
|
11 |
+
fi
|
12 |
+
|
13 |
+
if command -v python &>/dev/null; then
|
14 |
+
PYTHON_COMMAND="python"
|
15 |
+
fi
|
16 |
+
|
17 |
+
echo "Found $PYTHON_COMMAND command"
|
18 |
+
|
19 |
+
python_version=$($PYTHON_COMMAND --version 2>&1 | awk '{print $2}')
|
20 |
+
echo "Python version : $python_version"
|
21 |
+
|
22 |
+
if ! command -v uv &>/dev/null; then
|
23 |
+
echo "Error: uv command not found,please install https://docs.astral.sh/uv/getting-started/installation/#__tabbed_1_1 and try again."
|
24 |
+
exit 1
|
25 |
+
fi
|
26 |
+
|
27 |
+
BASEDIR=$(pwd)
|
28 |
+
|
29 |
+
uv venv --python 3.11.6 "$BASEDIR/env"
|
30 |
+
# shellcheck disable=SC1091
|
31 |
+
source "$BASEDIR/env/bin/activate"
|
32 |
+
uv pip install torch --index-url https://download.pytorch.org/whl/cpu
|
33 |
+
if [[ "$1" == "--disable-gui" ]]; then
|
34 |
+
#! For termux , we don't need Qt based GUI
|
35 |
+
packages="$(grep -v "^ *#\|^PyQt5" requirements.txt | grep .)"
|
36 |
+
# shellcheck disable=SC2086
|
37 |
+
uv pip install $packages
|
38 |
+
else
|
39 |
+
uv pip install -r "$BASEDIR/requirements.txt"
|
40 |
+
fi
|
41 |
+
|
42 |
+
chmod +x "start.sh"
|
43 |
+
chmod +x "start-webui.sh"
|
44 |
+
read -n1 -r -p "FastSD CPU installation completed,press any key to continue..." key
|
lora_models/Readme.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Place your lora models in this folder.
|
2 |
+
You can download lora model (.safetensors/Safetensor) from Civitai (https://civitai.com/) or Hugging Face(https://huggingface.co/)
|
3 |
+
E.g: https://civitai.com/models/207984/cutecartoonredmond-15v-cute-cartoon-lora-for-liberteredmond-sd-15?modelVersionId=234192
|
models/gguf/clip/readme.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Place CLIP model files here"
|
models/gguf/diffusion/readme.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Place your diffusion gguf model files here
|
models/gguf/t5xxl/readme.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Place T5-XXL model files here
|
models/gguf/vae/readme.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Place VAE model files here
|
requirements.txt
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==1.6.0
|
2 |
+
diffusers==0.33.0
|
3 |
+
transformers==4.48.0
|
4 |
+
PyQt5
|
5 |
+
Pillow==9.4.0
|
6 |
+
openvino==2025.1.0
|
7 |
+
optimum-intel==1.23.0
|
8 |
+
onnx==1.16.0
|
9 |
+
numpy==1.26.4
|
10 |
+
onnxruntime==1.17.3
|
11 |
+
pydantic
|
12 |
+
typing-extensions==4.8.0
|
13 |
+
pyyaml==6.0.1
|
14 |
+
gradio==5.6.0
|
15 |
+
peft==0.6.1
|
16 |
+
opencv-python==4.8.1.78
|
17 |
+
omegaconf==2.3.0
|
18 |
+
controlnet-aux==0.0.7
|
19 |
+
mediapipe>=0.10.9
|
20 |
+
tomesd==0.1.3
|
21 |
+
fastapi-mcp==0.3.0
|
src/__init__.py
ADDED
File without changes
|
src/app.py
ADDED
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
import constants
|
7 |
+
from backend.controlnet import controlnet_settings_from_dict
|
8 |
+
from backend.device import get_device_name
|
9 |
+
from backend.models.gen_images import ImageFormat
|
10 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
11 |
+
from backend.upscale.tiled_upscale import generate_upscaled_image
|
12 |
+
from constants import APP_VERSION, DEVICE
|
13 |
+
from frontend.webui.image_variations_ui import generate_image_variations
|
14 |
+
from models.interface_types import InterfaceType
|
15 |
+
from paths import FastStableDiffusionPaths, ensure_path
|
16 |
+
from state import get_context, get_settings
|
17 |
+
from utils import show_system_info
|
18 |
+
|
19 |
+
parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
|
20 |
+
parser.add_argument(
|
21 |
+
"-s",
|
22 |
+
"--share",
|
23 |
+
action="store_true",
|
24 |
+
help="Create sharable link(Web UI)",
|
25 |
+
required=False,
|
26 |
+
)
|
27 |
+
group = parser.add_mutually_exclusive_group(required=False)
|
28 |
+
group.add_argument(
|
29 |
+
"-g",
|
30 |
+
"--gui",
|
31 |
+
action="store_true",
|
32 |
+
help="Start desktop GUI",
|
33 |
+
)
|
34 |
+
group.add_argument(
|
35 |
+
"-w",
|
36 |
+
"--webui",
|
37 |
+
action="store_true",
|
38 |
+
help="Start Web UI",
|
39 |
+
)
|
40 |
+
group.add_argument(
|
41 |
+
"-a",
|
42 |
+
"--api",
|
43 |
+
action="store_true",
|
44 |
+
help="Start Web API server",
|
45 |
+
)
|
46 |
+
group.add_argument(
|
47 |
+
"-m",
|
48 |
+
"--mcp",
|
49 |
+
action="store_true",
|
50 |
+
help="Start MCP(Model Context Protocol) server",
|
51 |
+
)
|
52 |
+
group.add_argument(
|
53 |
+
"-r",
|
54 |
+
"--realtime",
|
55 |
+
action="store_true",
|
56 |
+
help="Start realtime inference UI(experimental)",
|
57 |
+
)
|
58 |
+
group.add_argument(
|
59 |
+
"-v",
|
60 |
+
"--version",
|
61 |
+
action="store_true",
|
62 |
+
help="Version",
|
63 |
+
)
|
64 |
+
|
65 |
+
parser.add_argument(
|
66 |
+
"-b",
|
67 |
+
"--benchmark",
|
68 |
+
action="store_true",
|
69 |
+
help="Run inference benchmark on the selected device",
|
70 |
+
)
|
71 |
+
parser.add_argument(
|
72 |
+
"--lcm_model_id",
|
73 |
+
type=str,
|
74 |
+
help="Model ID or path,Default stabilityai/sd-turbo",
|
75 |
+
default="stabilityai/sd-turbo",
|
76 |
+
)
|
77 |
+
parser.add_argument(
|
78 |
+
"--openvino_lcm_model_id",
|
79 |
+
type=str,
|
80 |
+
help="OpenVINO Model ID or path,Default rupeshs/sd-turbo-openvino",
|
81 |
+
default="rupeshs/sd-turbo-openvino",
|
82 |
+
)
|
83 |
+
parser.add_argument(
|
84 |
+
"--prompt",
|
85 |
+
type=str,
|
86 |
+
help="Describe the image you want to generate",
|
87 |
+
default="",
|
88 |
+
)
|
89 |
+
parser.add_argument(
|
90 |
+
"--negative_prompt",
|
91 |
+
type=str,
|
92 |
+
help="Describe what you want to exclude from the generation",
|
93 |
+
default="",
|
94 |
+
)
|
95 |
+
parser.add_argument(
|
96 |
+
"--image_height",
|
97 |
+
type=int,
|
98 |
+
help="Height of the image",
|
99 |
+
default=512,
|
100 |
+
)
|
101 |
+
parser.add_argument(
|
102 |
+
"--image_width",
|
103 |
+
type=int,
|
104 |
+
help="Width of the image",
|
105 |
+
default=512,
|
106 |
+
)
|
107 |
+
parser.add_argument(
|
108 |
+
"--inference_steps",
|
109 |
+
type=int,
|
110 |
+
help="Number of steps,default : 1",
|
111 |
+
default=1,
|
112 |
+
)
|
113 |
+
parser.add_argument(
|
114 |
+
"--guidance_scale",
|
115 |
+
type=float,
|
116 |
+
help="Guidance scale,default : 1.0",
|
117 |
+
default=1.0,
|
118 |
+
)
|
119 |
+
|
120 |
+
parser.add_argument(
|
121 |
+
"--number_of_images",
|
122 |
+
type=int,
|
123 |
+
help="Number of images to generate ,default : 1",
|
124 |
+
default=1,
|
125 |
+
)
|
126 |
+
parser.add_argument(
|
127 |
+
"--seed",
|
128 |
+
type=int,
|
129 |
+
help="Seed,default : -1 (disabled) ",
|
130 |
+
default=-1,
|
131 |
+
)
|
132 |
+
parser.add_argument(
|
133 |
+
"--use_openvino",
|
134 |
+
action="store_true",
|
135 |
+
help="Use OpenVINO model",
|
136 |
+
)
|
137 |
+
|
138 |
+
parser.add_argument(
|
139 |
+
"--use_offline_model",
|
140 |
+
action="store_true",
|
141 |
+
help="Use offline model",
|
142 |
+
)
|
143 |
+
parser.add_argument(
|
144 |
+
"--clip_skip",
|
145 |
+
type=int,
|
146 |
+
help="CLIP Skip (1-12), default : 1 (disabled) ",
|
147 |
+
default=1,
|
148 |
+
)
|
149 |
+
parser.add_argument(
|
150 |
+
"--token_merging",
|
151 |
+
type=float,
|
152 |
+
help="Token merging scale, 0.0 - 1.0, default : 0.0",
|
153 |
+
default=0.0,
|
154 |
+
)
|
155 |
+
|
156 |
+
parser.add_argument(
|
157 |
+
"--use_safety_checker",
|
158 |
+
action="store_true",
|
159 |
+
help="Use safety checker",
|
160 |
+
)
|
161 |
+
parser.add_argument(
|
162 |
+
"--use_lcm_lora",
|
163 |
+
action="store_true",
|
164 |
+
help="Use LCM-LoRA",
|
165 |
+
)
|
166 |
+
parser.add_argument(
|
167 |
+
"--base_model_id",
|
168 |
+
type=str,
|
169 |
+
help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
|
170 |
+
default="Lykon/dreamshaper-8",
|
171 |
+
)
|
172 |
+
parser.add_argument(
|
173 |
+
"--lcm_lora_id",
|
174 |
+
type=str,
|
175 |
+
help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
|
176 |
+
default="latent-consistency/lcm-lora-sdv1-5",
|
177 |
+
)
|
178 |
+
parser.add_argument(
|
179 |
+
"-i",
|
180 |
+
"--interactive",
|
181 |
+
action="store_true",
|
182 |
+
help="Interactive CLI mode",
|
183 |
+
)
|
184 |
+
parser.add_argument(
|
185 |
+
"-t",
|
186 |
+
"--use_tiny_auto_encoder",
|
187 |
+
action="store_true",
|
188 |
+
help="Use Tiny AutoEncoder for TAESD/TAESDXL/TAEF1",
|
189 |
+
)
|
190 |
+
parser.add_argument(
|
191 |
+
"-f",
|
192 |
+
"--file",
|
193 |
+
type=str,
|
194 |
+
help="Input image for img2img mode",
|
195 |
+
default="",
|
196 |
+
)
|
197 |
+
parser.add_argument(
|
198 |
+
"--img2img",
|
199 |
+
action="store_true",
|
200 |
+
help="img2img mode; requires input file via -f argument",
|
201 |
+
)
|
202 |
+
parser.add_argument(
|
203 |
+
"--batch_count",
|
204 |
+
type=int,
|
205 |
+
help="Number of sequential generations",
|
206 |
+
default=1,
|
207 |
+
)
|
208 |
+
parser.add_argument(
|
209 |
+
"--strength",
|
210 |
+
type=float,
|
211 |
+
help="Denoising strength for img2img and Image variations",
|
212 |
+
default=0.3,
|
213 |
+
)
|
214 |
+
parser.add_argument(
|
215 |
+
"--sdupscale",
|
216 |
+
action="store_true",
|
217 |
+
help="Tiled SD upscale,works only for the resolution 512x512,(2x upscale)",
|
218 |
+
)
|
219 |
+
parser.add_argument(
|
220 |
+
"--upscale",
|
221 |
+
action="store_true",
|
222 |
+
help="EDSR SD upscale ",
|
223 |
+
)
|
224 |
+
parser.add_argument(
|
225 |
+
"--custom_settings",
|
226 |
+
type=str,
|
227 |
+
help="JSON file containing custom generation settings",
|
228 |
+
default=None,
|
229 |
+
)
|
230 |
+
parser.add_argument(
|
231 |
+
"--usejpeg",
|
232 |
+
action="store_true",
|
233 |
+
help="Images will be saved as JPEG format",
|
234 |
+
)
|
235 |
+
parser.add_argument(
|
236 |
+
"--noimagesave",
|
237 |
+
action="store_true",
|
238 |
+
help="Disable image saving",
|
239 |
+
)
|
240 |
+
parser.add_argument(
|
241 |
+
"--imagequality", type=int, help="Output image quality [0 to 100]", default=90
|
242 |
+
)
|
243 |
+
parser.add_argument(
|
244 |
+
"--lora",
|
245 |
+
type=str,
|
246 |
+
help="LoRA model full path e.g D:\lora_models\CuteCartoon15V-LiberteRedmodModel-Cartoon-CuteCartoonAF.safetensors",
|
247 |
+
default=None,
|
248 |
+
)
|
249 |
+
parser.add_argument(
|
250 |
+
"--lora_weight",
|
251 |
+
type=float,
|
252 |
+
help="LoRA adapter weight [0 to 1.0]",
|
253 |
+
default=0.5,
|
254 |
+
)
|
255 |
+
parser.add_argument(
|
256 |
+
"--port",
|
257 |
+
type=int,
|
258 |
+
help="Web server port",
|
259 |
+
default=8000,
|
260 |
+
)
|
261 |
+
|
262 |
+
args = parser.parse_args()
|
263 |
+
|
264 |
+
if args.version:
|
265 |
+
print(APP_VERSION)
|
266 |
+
exit()
|
267 |
+
|
268 |
+
# parser.print_help()
|
269 |
+
print("FastSD CPU - ", APP_VERSION)
|
270 |
+
show_system_info()
|
271 |
+
print(f"Using device : {constants.DEVICE}")
|
272 |
+
|
273 |
+
|
274 |
+
if args.webui:
|
275 |
+
app_settings = get_settings()
|
276 |
+
else:
|
277 |
+
app_settings = get_settings()
|
278 |
+
|
279 |
+
print(f"Output path : {app_settings.settings.generated_images.path}")
|
280 |
+
ensure_path(app_settings.settings.generated_images.path)
|
281 |
+
|
282 |
+
print(f"Found {len(app_settings.lcm_models)} LCM models in config/lcm-models.txt")
|
283 |
+
print(
|
284 |
+
f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt"
|
285 |
+
)
|
286 |
+
print(
|
287 |
+
f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt"
|
288 |
+
)
|
289 |
+
print(
|
290 |
+
f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
|
291 |
+
)
|
292 |
+
|
293 |
+
if args.noimagesave:
|
294 |
+
app_settings.settings.generated_images.save_image = False
|
295 |
+
else:
|
296 |
+
app_settings.settings.generated_images.save_image = True
|
297 |
+
|
298 |
+
app_settings.settings.generated_images.save_image_quality = args.imagequality
|
299 |
+
|
300 |
+
if not args.realtime:
|
301 |
+
# To minimize realtime mode dependencies
|
302 |
+
from backend.upscale.upscaler import upscale_image
|
303 |
+
from frontend.cli_interactive import interactive_mode
|
304 |
+
|
305 |
+
if args.gui:
|
306 |
+
from frontend.gui.ui import start_gui
|
307 |
+
|
308 |
+
print("Starting desktop GUI mode(Qt)")
|
309 |
+
start_gui(
|
310 |
+
[],
|
311 |
+
app_settings,
|
312 |
+
)
|
313 |
+
elif args.webui:
|
314 |
+
from frontend.webui.ui import start_webui
|
315 |
+
|
316 |
+
print("Starting web UI mode")
|
317 |
+
start_webui(
|
318 |
+
args.share,
|
319 |
+
)
|
320 |
+
elif args.realtime:
|
321 |
+
from frontend.webui.realtime_ui import start_realtime_text_to_image
|
322 |
+
|
323 |
+
print("Starting realtime text to image(EXPERIMENTAL)")
|
324 |
+
start_realtime_text_to_image(args.share)
|
325 |
+
elif args.api:
|
326 |
+
from backend.api.web import start_web_server
|
327 |
+
|
328 |
+
start_web_server(args.port)
|
329 |
+
elif args.mcp:
|
330 |
+
from backend.api.mcp_server import start_mcp_server
|
331 |
+
|
332 |
+
start_mcp_server(args.port)
|
333 |
+
else:
|
334 |
+
context = get_context(InterfaceType.CLI)
|
335 |
+
config = app_settings.settings
|
336 |
+
|
337 |
+
if args.use_openvino:
|
338 |
+
config.lcm_diffusion_setting.openvino_lcm_model_id = args.openvino_lcm_model_id
|
339 |
+
else:
|
340 |
+
config.lcm_diffusion_setting.lcm_model_id = args.lcm_model_id
|
341 |
+
|
342 |
+
config.lcm_diffusion_setting.prompt = args.prompt
|
343 |
+
config.lcm_diffusion_setting.negative_prompt = args.negative_prompt
|
344 |
+
config.lcm_diffusion_setting.image_height = args.image_height
|
345 |
+
config.lcm_diffusion_setting.image_width = args.image_width
|
346 |
+
config.lcm_diffusion_setting.guidance_scale = args.guidance_scale
|
347 |
+
config.lcm_diffusion_setting.number_of_images = args.number_of_images
|
348 |
+
config.lcm_diffusion_setting.inference_steps = args.inference_steps
|
349 |
+
config.lcm_diffusion_setting.strength = args.strength
|
350 |
+
config.lcm_diffusion_setting.seed = args.seed
|
351 |
+
config.lcm_diffusion_setting.use_openvino = args.use_openvino
|
352 |
+
config.lcm_diffusion_setting.use_tiny_auto_encoder = args.use_tiny_auto_encoder
|
353 |
+
config.lcm_diffusion_setting.use_lcm_lora = args.use_lcm_lora
|
354 |
+
config.lcm_diffusion_setting.lcm_lora.base_model_id = args.base_model_id
|
355 |
+
config.lcm_diffusion_setting.lcm_lora.lcm_lora_id = args.lcm_lora_id
|
356 |
+
config.lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
|
357 |
+
config.lcm_diffusion_setting.lora.enabled = False
|
358 |
+
config.lcm_diffusion_setting.lora.path = args.lora
|
359 |
+
config.lcm_diffusion_setting.lora.weight = args.lora_weight
|
360 |
+
config.lcm_diffusion_setting.lora.fuse = True
|
361 |
+
if config.lcm_diffusion_setting.lora.path:
|
362 |
+
config.lcm_diffusion_setting.lora.enabled = True
|
363 |
+
if args.usejpeg:
|
364 |
+
config.generated_images.format = ImageFormat.JPEG.value.upper()
|
365 |
+
if args.seed > -1:
|
366 |
+
config.lcm_diffusion_setting.use_seed = True
|
367 |
+
else:
|
368 |
+
config.lcm_diffusion_setting.use_seed = False
|
369 |
+
config.lcm_diffusion_setting.use_offline_model = args.use_offline_model
|
370 |
+
config.lcm_diffusion_setting.clip_skip = args.clip_skip
|
371 |
+
config.lcm_diffusion_setting.token_merging = args.token_merging
|
372 |
+
config.lcm_diffusion_setting.use_safety_checker = args.use_safety_checker
|
373 |
+
|
374 |
+
# Read custom settings from JSON file
|
375 |
+
custom_settings = {}
|
376 |
+
if args.custom_settings:
|
377 |
+
with open(args.custom_settings) as f:
|
378 |
+
custom_settings = json.load(f)
|
379 |
+
|
380 |
+
# Basic ControlNet settings; if ControlNet is enabled, an image is
|
381 |
+
# required even in txt2img mode
|
382 |
+
config.lcm_diffusion_setting.controlnet = None
|
383 |
+
controlnet_settings_from_dict(
|
384 |
+
config.lcm_diffusion_setting,
|
385 |
+
custom_settings,
|
386 |
+
)
|
387 |
+
|
388 |
+
# Interactive mode
|
389 |
+
if args.interactive:
|
390 |
+
# wrapper(interactive_mode, config, context)
|
391 |
+
config.lcm_diffusion_setting.lora.fuse = False
|
392 |
+
interactive_mode(config, context)
|
393 |
+
|
394 |
+
# Start of non-interactive CLI image generation
|
395 |
+
if args.img2img and args.file != "":
|
396 |
+
config.lcm_diffusion_setting.init_image = Image.open(args.file)
|
397 |
+
config.lcm_diffusion_setting.diffusion_task = DiffusionTask.image_to_image.value
|
398 |
+
elif args.img2img and args.file == "":
|
399 |
+
print("Error : You need to specify a file in img2img mode")
|
400 |
+
exit()
|
401 |
+
elif args.upscale and args.file == "" and args.custom_settings == None:
|
402 |
+
print("Error : You need to specify a file in SD upscale mode")
|
403 |
+
exit()
|
404 |
+
elif (
|
405 |
+
args.prompt == ""
|
406 |
+
and args.file == ""
|
407 |
+
and args.custom_settings == None
|
408 |
+
and not args.benchmark
|
409 |
+
):
|
410 |
+
print("Error : You need to provide a prompt")
|
411 |
+
exit()
|
412 |
+
|
413 |
+
if args.upscale:
|
414 |
+
# image = Image.open(args.file)
|
415 |
+
output_path = FastStableDiffusionPaths.get_upscale_filepath(
|
416 |
+
args.file,
|
417 |
+
2,
|
418 |
+
config.generated_images.format,
|
419 |
+
)
|
420 |
+
result = upscale_image(
|
421 |
+
context,
|
422 |
+
args.file,
|
423 |
+
output_path,
|
424 |
+
2,
|
425 |
+
)
|
426 |
+
# Perform Tiled SD upscale (EXPERIMENTAL)
|
427 |
+
elif args.sdupscale:
|
428 |
+
if args.use_openvino:
|
429 |
+
config.lcm_diffusion_setting.strength = 0.3
|
430 |
+
upscale_settings = None
|
431 |
+
if custom_settings != {}:
|
432 |
+
upscale_settings = custom_settings
|
433 |
+
filepath = args.file
|
434 |
+
output_format = config.generated_images.format
|
435 |
+
if upscale_settings:
|
436 |
+
filepath = upscale_settings["source_file"]
|
437 |
+
output_format = upscale_settings["output_format"].upper()
|
438 |
+
output_path = FastStableDiffusionPaths.get_upscale_filepath(
|
439 |
+
filepath,
|
440 |
+
2,
|
441 |
+
output_format,
|
442 |
+
)
|
443 |
+
|
444 |
+
generate_upscaled_image(
|
445 |
+
config,
|
446 |
+
filepath,
|
447 |
+
config.lcm_diffusion_setting.strength,
|
448 |
+
upscale_settings=upscale_settings,
|
449 |
+
context=context,
|
450 |
+
tile_overlap=32 if config.lcm_diffusion_setting.use_openvino else 16,
|
451 |
+
output_path=output_path,
|
452 |
+
image_format=output_format,
|
453 |
+
)
|
454 |
+
exit()
|
455 |
+
# If img2img argument is set and prompt is empty, use image variations mode
|
456 |
+
elif args.img2img and args.prompt == "":
|
457 |
+
for i in range(0, args.batch_count):
|
458 |
+
generate_image_variations(
|
459 |
+
config.lcm_diffusion_setting.init_image, args.strength
|
460 |
+
)
|
461 |
+
else:
|
462 |
+
if args.benchmark:
|
463 |
+
print("Initializing benchmark...")
|
464 |
+
bench_lcm_setting = config.lcm_diffusion_setting
|
465 |
+
bench_lcm_setting.prompt = "a cat"
|
466 |
+
bench_lcm_setting.use_tiny_auto_encoder = False
|
467 |
+
context.generate_text_to_image(
|
468 |
+
settings=config,
|
469 |
+
device=DEVICE,
|
470 |
+
)
|
471 |
+
|
472 |
+
latencies = []
|
473 |
+
|
474 |
+
print("Starting benchmark please wait...")
|
475 |
+
for _ in range(3):
|
476 |
+
context.generate_text_to_image(
|
477 |
+
settings=config,
|
478 |
+
device=DEVICE,
|
479 |
+
)
|
480 |
+
latencies.append(context.latency)
|
481 |
+
|
482 |
+
avg_latency = sum(latencies) / 3
|
483 |
+
|
484 |
+
bench_lcm_setting.use_tiny_auto_encoder = True
|
485 |
+
|
486 |
+
context.generate_text_to_image(
|
487 |
+
settings=config,
|
488 |
+
device=DEVICE,
|
489 |
+
)
|
490 |
+
latencies = []
|
491 |
+
for _ in range(3):
|
492 |
+
context.generate_text_to_image(
|
493 |
+
settings=config,
|
494 |
+
device=DEVICE,
|
495 |
+
)
|
496 |
+
latencies.append(context.latency)
|
497 |
+
|
498 |
+
avg_latency_taesd = sum(latencies) / 3
|
499 |
+
|
500 |
+
benchmark_name = ""
|
501 |
+
|
502 |
+
if config.lcm_diffusion_setting.use_openvino:
|
503 |
+
benchmark_name = "OpenVINO"
|
504 |
+
else:
|
505 |
+
benchmark_name = "PyTorch"
|
506 |
+
|
507 |
+
bench_model_id = ""
|
508 |
+
if bench_lcm_setting.use_openvino:
|
509 |
+
bench_model_id = bench_lcm_setting.openvino_lcm_model_id
|
510 |
+
elif bench_lcm_setting.use_lcm_lora:
|
511 |
+
bench_model_id = bench_lcm_setting.lcm_lora.base_model_id
|
512 |
+
else:
|
513 |
+
bench_model_id = bench_lcm_setting.lcm_model_id
|
514 |
+
|
515 |
+
benchmark_result = [
|
516 |
+
["Device", f"{DEVICE.upper()},{get_device_name()}"],
|
517 |
+
["Stable Diffusion Model", bench_model_id],
|
518 |
+
[
|
519 |
+
"Image Size ",
|
520 |
+
f"{bench_lcm_setting.image_width}x{bench_lcm_setting.image_height}",
|
521 |
+
],
|
522 |
+
[
|
523 |
+
"Inference Steps",
|
524 |
+
f"{bench_lcm_setting.inference_steps}",
|
525 |
+
],
|
526 |
+
[
|
527 |
+
"Benchmark Passes",
|
528 |
+
3,
|
529 |
+
],
|
530 |
+
[
|
531 |
+
"Average Latency",
|
532 |
+
f"{round(avg_latency, 3)} sec",
|
533 |
+
],
|
534 |
+
[
|
535 |
+
"Average Latency(TAESD* enabled)",
|
536 |
+
f"{round(avg_latency_taesd, 3)} sec",
|
537 |
+
],
|
538 |
+
]
|
539 |
+
print()
|
540 |
+
print(
|
541 |
+
f" FastSD Benchmark - {benchmark_name:8} "
|
542 |
+
)
|
543 |
+
print(f"-" * 80)
|
544 |
+
for benchmark in benchmark_result:
|
545 |
+
print(f"{benchmark[0]:35} - {benchmark[1]}")
|
546 |
+
print(f"-" * 80)
|
547 |
+
print("*TAESD - Tiny AutoEncoder for Stable Diffusion")
|
548 |
+
|
549 |
+
else:
|
550 |
+
for i in range(0, args.batch_count):
|
551 |
+
context.generate_text_to_image(
|
552 |
+
settings=config,
|
553 |
+
device=DEVICE,
|
554 |
+
)
|
src/app_settings.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import deepcopy
|
2 |
+
from os import makedirs, path
|
3 |
+
|
4 |
+
import yaml
|
5 |
+
from constants import (
|
6 |
+
LCM_LORA_MODELS_FILE,
|
7 |
+
LCM_MODELS_FILE,
|
8 |
+
OPENVINO_LCM_MODELS_FILE,
|
9 |
+
SD_MODELS_FILE,
|
10 |
+
)
|
11 |
+
from paths import FastStableDiffusionPaths, join_paths
|
12 |
+
from utils import get_files_in_dir, get_models_from_text_file
|
13 |
+
|
14 |
+
from models.settings import Settings
|
15 |
+
|
16 |
+
|
17 |
+
class AppSettings:
|
18 |
+
def __init__(self):
|
19 |
+
self.config_path = FastStableDiffusionPaths().get_app_settings_path()
|
20 |
+
self._stable_diffsuion_models = get_models_from_text_file(
|
21 |
+
FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
|
22 |
+
)
|
23 |
+
self._lcm_lora_models = get_models_from_text_file(
|
24 |
+
FastStableDiffusionPaths().get_models_config_path(LCM_LORA_MODELS_FILE)
|
25 |
+
)
|
26 |
+
self._openvino_lcm_models = get_models_from_text_file(
|
27 |
+
FastStableDiffusionPaths().get_models_config_path(OPENVINO_LCM_MODELS_FILE)
|
28 |
+
)
|
29 |
+
self._lcm_models = get_models_from_text_file(
|
30 |
+
FastStableDiffusionPaths().get_models_config_path(LCM_MODELS_FILE)
|
31 |
+
)
|
32 |
+
self._gguf_diffusion_models = get_files_in_dir(
|
33 |
+
join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "diffusion")
|
34 |
+
)
|
35 |
+
self._gguf_clip_models = get_files_in_dir(
|
36 |
+
join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "clip")
|
37 |
+
)
|
38 |
+
self._gguf_vae_models = get_files_in_dir(
|
39 |
+
join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "vae")
|
40 |
+
)
|
41 |
+
self._gguf_t5xxl_models = get_files_in_dir(
|
42 |
+
join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "t5xxl")
|
43 |
+
)
|
44 |
+
self._config = None
|
45 |
+
|
46 |
+
@property
|
47 |
+
def settings(self):
|
48 |
+
return self._config
|
49 |
+
|
50 |
+
@property
|
51 |
+
def stable_diffsuion_models(self):
|
52 |
+
return self._stable_diffsuion_models
|
53 |
+
|
54 |
+
@property
|
55 |
+
def openvino_lcm_models(self):
|
56 |
+
return self._openvino_lcm_models
|
57 |
+
|
58 |
+
@property
|
59 |
+
def lcm_models(self):
|
60 |
+
return self._lcm_models
|
61 |
+
|
62 |
+
@property
|
63 |
+
def lcm_lora_models(self):
|
64 |
+
return self._lcm_lora_models
|
65 |
+
|
66 |
+
@property
|
67 |
+
def gguf_diffusion_models(self):
|
68 |
+
return self._gguf_diffusion_models
|
69 |
+
|
70 |
+
@property
|
71 |
+
def gguf_clip_models(self):
|
72 |
+
return self._gguf_clip_models
|
73 |
+
|
74 |
+
@property
|
75 |
+
def gguf_vae_models(self):
|
76 |
+
return self._gguf_vae_models
|
77 |
+
|
78 |
+
@property
|
79 |
+
def gguf_t5xxl_models(self):
|
80 |
+
return self._gguf_t5xxl_models
|
81 |
+
|
82 |
+
def load(self, skip_file=False):
|
83 |
+
if skip_file:
|
84 |
+
print("Skipping config file")
|
85 |
+
settings_dict = self._load_default()
|
86 |
+
self._config = Settings.model_validate(settings_dict)
|
87 |
+
else:
|
88 |
+
if not path.exists(self.config_path):
|
89 |
+
base_dir = path.dirname(self.config_path)
|
90 |
+
if not path.exists(base_dir):
|
91 |
+
makedirs(base_dir)
|
92 |
+
try:
|
93 |
+
print("Settings not found creating default settings")
|
94 |
+
with open(self.config_path, "w") as file:
|
95 |
+
yaml.dump(
|
96 |
+
self._load_default(),
|
97 |
+
file,
|
98 |
+
)
|
99 |
+
except Exception as ex:
|
100 |
+
print(f"Error in creating settings : {ex}")
|
101 |
+
exit()
|
102 |
+
try:
|
103 |
+
with open(self.config_path) as file:
|
104 |
+
settings_dict = yaml.safe_load(file)
|
105 |
+
self._config = Settings.model_validate(settings_dict)
|
106 |
+
except Exception as ex:
|
107 |
+
print(f"Error in loading settings : {ex}")
|
108 |
+
|
109 |
+
def save(self):
|
110 |
+
try:
|
111 |
+
with open(self.config_path, "w") as file:
|
112 |
+
tmp_cfg = deepcopy(self._config)
|
113 |
+
tmp_cfg.lcm_diffusion_setting.init_image = None
|
114 |
+
configurations = tmp_cfg.model_dump(
|
115 |
+
exclude=["init_image"],
|
116 |
+
)
|
117 |
+
if configurations:
|
118 |
+
yaml.dump(configurations, file)
|
119 |
+
except Exception as ex:
|
120 |
+
print(f"Error in saving settings : {ex}")
|
121 |
+
|
122 |
+
def _load_default(self) -> dict:
|
123 |
+
default_config = Settings()
|
124 |
+
return default_config.model_dump()
|
src/backend/__init__.py
ADDED
File without changes
|
src/backend/annotators/canny_control.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from backend.annotators.control_interface import ControlInterface
|
3 |
+
from cv2 import Canny
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
|
7 |
+
class CannyControl(ControlInterface):
|
8 |
+
def get_control_image(self, image: Image) -> Image:
|
9 |
+
low_threshold = 100
|
10 |
+
high_threshold = 200
|
11 |
+
image = np.array(image)
|
12 |
+
image = Canny(image, low_threshold, high_threshold)
|
13 |
+
image = image[:, :, None]
|
14 |
+
image = np.concatenate([image, image, image], axis=2)
|
15 |
+
return Image.fromarray(image)
|
src/backend/annotators/control_interface.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
class ControlInterface(ABC):
|
7 |
+
@abstractmethod
|
8 |
+
def get_control_image(
|
9 |
+
self,
|
10 |
+
image: Image,
|
11 |
+
) -> Image:
|
12 |
+
pass
|
src/backend/annotators/depth_control.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from backend.annotators.control_interface import ControlInterface
|
3 |
+
from PIL import Image
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
+
|
7 |
+
class DepthControl(ControlInterface):
|
8 |
+
def get_control_image(self, image: Image) -> Image:
|
9 |
+
depth_estimator = pipeline("depth-estimation")
|
10 |
+
image = depth_estimator(image)["depth"]
|
11 |
+
image = np.array(image)
|
12 |
+
image = image[:, :, None]
|
13 |
+
image = np.concatenate([image, image, image], axis=2)
|
14 |
+
image = Image.fromarray(image)
|
15 |
+
return image
|
src/backend/annotators/image_control_factory.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from backend.annotators.canny_control import CannyControl
|
2 |
+
from backend.annotators.depth_control import DepthControl
|
3 |
+
from backend.annotators.lineart_control import LineArtControl
|
4 |
+
from backend.annotators.mlsd_control import MlsdControl
|
5 |
+
from backend.annotators.normal_control import NormalControl
|
6 |
+
from backend.annotators.pose_control import PoseControl
|
7 |
+
from backend.annotators.shuffle_control import ShuffleControl
|
8 |
+
from backend.annotators.softedge_control import SoftEdgeControl
|
9 |
+
|
10 |
+
|
11 |
+
class ImageControlFactory:
|
12 |
+
def create_control(self, controlnet_type: str):
|
13 |
+
if controlnet_type == "Canny":
|
14 |
+
return CannyControl()
|
15 |
+
elif controlnet_type == "Pose":
|
16 |
+
return PoseControl()
|
17 |
+
elif controlnet_type == "MLSD":
|
18 |
+
return MlsdControl()
|
19 |
+
elif controlnet_type == "Depth":
|
20 |
+
return DepthControl()
|
21 |
+
elif controlnet_type == "LineArt":
|
22 |
+
return LineArtControl()
|
23 |
+
elif controlnet_type == "Shuffle":
|
24 |
+
return ShuffleControl()
|
25 |
+
elif controlnet_type == "NormalBAE":
|
26 |
+
return NormalControl()
|
27 |
+
elif controlnet_type == "SoftEdge":
|
28 |
+
return SoftEdgeControl()
|
29 |
+
else:
|
30 |
+
print("Error: Control type not implemented!")
|
31 |
+
raise Exception("Error: Control type not implemented!")
|
src/backend/annotators/lineart_control.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from backend.annotators.control_interface import ControlInterface
|
3 |
+
from controlnet_aux import LineartDetector
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
|
7 |
+
class LineArtControl(ControlInterface):
|
8 |
+
def get_control_image(self, image: Image) -> Image:
|
9 |
+
processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
10 |
+
control_image = processor(image)
|
11 |
+
return control_image
|
src/backend/annotators/mlsd_control.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from backend.annotators.control_interface import ControlInterface
|
2 |
+
from controlnet_aux import MLSDdetector
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
class MlsdControl(ControlInterface):
|
7 |
+
def get_control_image(self, image: Image) -> Image:
|
8 |
+
mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
|
9 |
+
image = mlsd(image)
|
10 |
+
return image
|
src/backend/annotators/normal_control.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from backend.annotators.control_interface import ControlInterface
|
2 |
+
from controlnet_aux import NormalBaeDetector
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
class NormalControl(ControlInterface):
|
7 |
+
def get_control_image(self, image: Image) -> Image:
|
8 |
+
processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
9 |
+
control_image = processor(image)
|
10 |
+
return control_image
|
src/backend/annotators/pose_control.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from backend.annotators.control_interface import ControlInterface
|
2 |
+
from controlnet_aux import OpenposeDetector
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
class PoseControl(ControlInterface):
|
7 |
+
def get_control_image(self, image: Image) -> Image:
|
8 |
+
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
9 |
+
image = openpose(image)
|
10 |
+
return image
|
src/backend/annotators/shuffle_control.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from backend.annotators.control_interface import ControlInterface
|
2 |
+
from controlnet_aux import ContentShuffleDetector
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
class ShuffleControl(ControlInterface):
|
7 |
+
def get_control_image(self, image: Image) -> Image:
|
8 |
+
shuffle_processor = ContentShuffleDetector()
|
9 |
+
image = shuffle_processor(image)
|
10 |
+
return image
|
src/backend/annotators/softedge_control.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from backend.annotators.control_interface import ControlInterface
|
2 |
+
from controlnet_aux import PidiNetDetector
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
class SoftEdgeControl(ControlInterface):
|
7 |
+
def get_control_image(self, image: Image) -> Image:
|
8 |
+
processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
9 |
+
control_image = processor(image)
|
10 |
+
return control_image
|