Update app.py
Browse files
app.py
CHANGED
|
@@ -1,49 +1,4 @@
|
|
| 1 |
-
import subprocess
|
| 2 |
|
| 3 |
-
# Remove existing submodule
|
| 4 |
-
subprocess.run(["git", "submodule", "deinit", "-f", "--", "PASD"])
|
| 5 |
-
subprocess.run(["git", "rm", "-f", "PASD"])
|
| 6 |
-
subprocess.run(["rm", "-rf", ".git/modules/PASD"])
|
| 7 |
-
|
| 8 |
-
# Add submodule
|
| 9 |
-
subprocess.run(["git", "submodule", "add", "https://github.com/fffiloni/PASD.git", "PASD"])
|
| 10 |
-
subprocess.run(["git", "submodule", "update", "--init", "--recursive"])
|
| 11 |
-
|
| 12 |
-
# Ensure submodule is up-to-date
|
| 13 |
-
subprocess.run(["git", "submodule", "update", "--recursive", "--remote"])
|
| 14 |
-
|
| 15 |
-
import torch
|
| 16 |
-
import types
|
| 17 |
-
torch.cuda.get_device_capability = lambda *args, **kwargs: (8, 6)
|
| 18 |
-
torch.cuda.get_device_properties = lambda *args, **kwargs: types.SimpleNamespace(name='NVIDIA A10G', major=8, minor=6, total_memory=23836033024, multi_processor_count=80)
|
| 19 |
-
|
| 20 |
-
import huggingface_hub
|
| 21 |
-
huggingface_hub.snapshot_download(
|
| 22 |
-
repo_id='camenduru/PASD',
|
| 23 |
-
allow_patterns=[
|
| 24 |
-
'pasd/**',
|
| 25 |
-
'pasd_light/**',
|
| 26 |
-
'pasd_light_rrdb/**',
|
| 27 |
-
'pasd_rrdb/**',
|
| 28 |
-
],
|
| 29 |
-
local_dir='PASD/runs',
|
| 30 |
-
local_dir_use_symlinks=False,
|
| 31 |
-
)
|
| 32 |
-
huggingface_hub.hf_hub_download(
|
| 33 |
-
repo_id='camenduru/PASD',
|
| 34 |
-
filename='majicmixRealistic_v6.safetensors',
|
| 35 |
-
local_dir='PASD/checkpoints/personalized_models',
|
| 36 |
-
local_dir_use_symlinks=False,
|
| 37 |
-
)
|
| 38 |
-
huggingface_hub.hf_hub_download(
|
| 39 |
-
repo_id='akhaliq/RetinaFace-R50',
|
| 40 |
-
filename='RetinaFace-R50.pth',
|
| 41 |
-
local_dir='PASD/annotator/ckpts',
|
| 42 |
-
local_dir_use_symlinks=False,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
import sys;
|
| 46 |
-
#sys.path.append('./PASD')
|
| 47 |
import spaces
|
| 48 |
import os
|
| 49 |
import datetime
|
|
@@ -78,10 +33,10 @@ else:
|
|
| 78 |
from models.pasd.unet_2d_condition import UNet2DConditionModel
|
| 79 |
from models.pasd.controlnet import ControlNetModel
|
| 80 |
|
| 81 |
-
pretrained_model_path = "
|
| 82 |
-
ckpt_path = "
|
| 83 |
#dreambooth_lora_path = "checkpoints/personalized_models/toonyou_beta3.safetensors"
|
| 84 |
-
dreambooth_lora_path = "
|
| 85 |
#dreambooth_lora_path = "checkpoints/personalized_models/Realistic_Vision_V5.1.safetensors"
|
| 86 |
weight_dtype = torch.float16
|
| 87 |
device = "cuda"
|
|
@@ -90,7 +45,7 @@ scheduler = UniPCMultistepScheduler.from_pretrained(pretrained_model_path, subfo
|
|
| 90 |
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
|
| 91 |
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
| 92 |
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
|
| 93 |
-
feature_extractor = CLIPImageProcessor.from_pretrained(pretrained_model_path
|
| 94 |
unet = UNet2DConditionModel.from_pretrained(ckpt_path, subfolder="unet")
|
| 95 |
controlnet = ControlNetModel.from_pretrained(ckpt_path, subfolder="controlnet")
|
| 96 |
vae.requires_grad_(False)
|
|
@@ -237,7 +192,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 237 |
""")
|
| 238 |
with gr.Row():
|
| 239 |
with gr.Column():
|
| 240 |
-
input_image = gr.Image(type="filepath", sources=["upload"], value="
|
| 241 |
prompt_in = gr.Textbox(label="Prompt", value="Frog")
|
| 242 |
with gr.Accordion(label="Advanced settings", open=False):
|
| 243 |
added_prompt = gr.Textbox(label="Added Prompt", value='clean, high-resolution, 8k, best quality, masterpiece')
|
|
|
|
|
|
|
| 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import spaces
|
| 3 |
import os
|
| 4 |
import datetime
|
|
|
|
| 33 |
from models.pasd.unet_2d_condition import UNet2DConditionModel
|
| 34 |
from models.pasd.controlnet import ControlNetModel
|
| 35 |
|
| 36 |
+
pretrained_model_path = "checkpoints/stable-diffusion-v1-5"
|
| 37 |
+
ckpt_path = "runs/pasd/checkpoint-100000"
|
| 38 |
#dreambooth_lora_path = "checkpoints/personalized_models/toonyou_beta3.safetensors"
|
| 39 |
+
dreambooth_lora_path = "checkpoints/personalized_models/majicmixRealistic_v6.safetensors"
|
| 40 |
#dreambooth_lora_path = "checkpoints/personalized_models/Realistic_Vision_V5.1.safetensors"
|
| 41 |
weight_dtype = torch.float16
|
| 42 |
device = "cuda"
|
|
|
|
| 45 |
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
|
| 46 |
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
| 47 |
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
|
| 48 |
+
feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
|
| 49 |
unet = UNet2DConditionModel.from_pretrained(ckpt_path, subfolder="unet")
|
| 50 |
controlnet = ControlNetModel.from_pretrained(ckpt_path, subfolder="controlnet")
|
| 51 |
vae.requires_grad_(False)
|
|
|
|
| 192 |
""")
|
| 193 |
with gr.Row():
|
| 194 |
with gr.Column():
|
| 195 |
+
input_image = gr.Image(type="filepath", sources=["upload"], value="samples/frog.png")
|
| 196 |
prompt_in = gr.Textbox(label="Prompt", value="Frog")
|
| 197 |
with gr.Accordion(label="Advanced settings", open=False):
|
| 198 |
added_prompt = gr.Textbox(label="Added Prompt", value='clean, high-resolution, 8k, best quality, masterpiece')
|