Initial commit
Browse files- requirements.txt +0 -1
- src/utils/gradio_utils.py +16 -0
requirements.txt
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/T4/xformers-0.0.13.dev0-py3-none-any.whl
|
2 |
accelerate==0.12.0
|
3 |
diffusers==0.9.0
|
4 |
tokenizers==0.13.2
|
|
|
|
|
1 |
accelerate==0.12.0
|
2 |
diffusers==0.9.0
|
3 |
tokenizers==0.13.2
|
src/utils/gradio_utils.py
CHANGED
@@ -19,6 +19,8 @@ from transformers import CLIPTextModel, CLIPTokenizer
|
|
19 |
from torch import autocast
|
20 |
from src.diffusers_ import StableDiffusionPipeline
|
21 |
|
|
|
|
|
22 |
|
23 |
|
24 |
def launch_source():
|
@@ -72,9 +74,23 @@ def launch_optimize(img_in_real, prompt, n_hiper):
|
|
72 |
CLIP_text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name, subfolder="text_encoder")#, use_auth_token=True)
|
73 |
vae = AutoencoderKL.from_pretrained(pretrained_model_name, subfolder="vae")#, use_auth_token=True)
|
74 |
unet = UNet2DConditionModel.from_pretrained(pretrained_model_name, subfolder="unet")#, use_auth_token=True)
|
|
|
75 |
noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
76 |
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
# Encode the input image.
|
79 |
vae.to(accelerator.device, dtype=weight_dtype)
|
80 |
input_image = img_in_real.convert("RGB")
|
|
|
19 |
from torch import autocast
|
20 |
from src.diffusers_ import StableDiffusionPipeline
|
21 |
|
22 |
+
from diffusers.utils.import_utils import is_xformers_available
|
23 |
+
from packaging import version
|
24 |
|
25 |
|
26 |
def launch_source():
|
|
|
74 |
CLIP_text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name, subfolder="text_encoder")#, use_auth_token=True)
|
75 |
vae = AutoencoderKL.from_pretrained(pretrained_model_name, subfolder="vae")#, use_auth_token=True)
|
76 |
unet = UNet2DConditionModel.from_pretrained(pretrained_model_name, subfolder="unet")#, use_auth_token=True)
|
77 |
+
unet.enable_xformers_memory_efficient_attention()
|
78 |
noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
79 |
|
80 |
|
81 |
+
if is_xformers_available():
|
82 |
+
import xformers
|
83 |
+
|
84 |
+
xformers_version = version.parse(xformers.__version__)
|
85 |
+
if xformers_version == version.parse("0.0.16"):
|
86 |
+
print(
|
87 |
+
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
|
88 |
+
)
|
89 |
+
unet.enable_xformers_memory_efficient_attention()
|
90 |
+
else:
|
91 |
+
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
92 |
+
|
93 |
+
|
94 |
# Encode the input image.
|
95 |
vae.to(accelerator.device, dtype=weight_dtype)
|
96 |
input_image = img_in_real.convert("RGB")
|