Spaces:
Running
on
Zero
Running
on
Zero
File size: 8,321 Bytes
5725111 e14280e 5725111 e14280e f622070 e14280e 5725111 4d6da88 5725111 dda6c02 4d6da88 5725111 dda6c02 5725111 4d6da88 5725111 4d6da88 5725111 4d6da88 5725111 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
import gradio as gr
import os
import sys
from typing import List
# sys.path.append(os.getcwd())
import numpy as np
from PIL import Image
import torch
import torch.utils.checkpoint
from pytorch_lightning import seed_everything
from diffusers import AutoencoderKL, DDPMScheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor
from huggingface_hub import hf_hub_download, snapshot_download
from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline
from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix
from ram.models.ram_lora import ram
from ram import inference_ram as inference
from torchvision import transforms
from models.controlnet import ControlNetModel
from models.unet_2d_condition import UNet2DConditionModel
tensor_transforms = transforms.Compose([
transforms.ToTensor(),
])
ram_transforms = transforms.Compose([
transforms.Resize((384, 384)),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
snapshot_download(
repo_id="alexnasa/SEESR",
local_dir="preset/models"
)
snapshot_download(
repo_id="stabilityai/sd-turbo",
local_dir="preset/models/sd-turbo"
)
snapshot_download(
repo_id="xinyu1205/recognize_anything_model",
local_dir="preset/models/"
)
# Load scheduler, tokenizer and models.
pretrained_model_path = 'preset/models/sd-turbo'
seesr_model_path = 'preset/models/seesr'
scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
# feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
unet = UNet2DConditionModel.from_pretrained_orig(pretrained_model_path, seesr_model_path, subfolder="unet")
controlnet = ControlNetModel.from_pretrained(seesr_model_path, subfolder="controlnet")
# Freeze vae and text_encoder
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
controlnet.requires_grad_(False)
if is_xformers_available():
unet.enable_xformers_memory_efficient_attention()
controlnet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# Get the validation pipeline
validation_pipeline = StableDiffusionControlNetPipeline(
vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=None,
unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False,
)
validation_pipeline._init_tiled_vae(encoder_tile_size=1024,
decoder_tile_size=224)
weight_dtype = torch.float16
device = "cuda"
# Move text_encode and vae to gpu and cast to weight_dtype
text_encoder.to(device, dtype=weight_dtype)
vae.to(device, dtype=weight_dtype)
unet.to(device, dtype=weight_dtype)
controlnet.to(device, dtype=weight_dtype)
tag_model = ram(pretrained='preset/models/ram_swin_large_14m.pth',
pretrained_condition='preset/models/DAPE.pth',
image_size=384,
vit='swin_l')
tag_model.eval()
tag_model.to(device, dtype=weight_dtype)
@torch.no_grad()
def process(
input_image: Image.Image,
user_prompt: str,
positive_prompt: str,
negative_prompt: str,
num_inference_steps: int,
scale_factor: int,
cfg_scale: float,
seed: int,
latent_tiled_size: int,
latent_tiled_overlap: int,
sample_times: int
) -> List[np.ndarray]:
process_size = 512
resize_preproc = transforms.Compose([
transforms.Resize(process_size, interpolation=transforms.InterpolationMode.BILINEAR),
])
# with torch.no_grad():
seed_everything(seed)
generator = torch.Generator(device=device)
validation_prompt = ""
lq = tensor_transforms(input_image).unsqueeze(0).to(device).half()
lq = ram_transforms(lq)
res = inference(lq, tag_model)
ram_encoder_hidden_states = tag_model.generate_image_embeds(lq)
validation_prompt = f"{res[0]}, {positive_prompt},"
validation_prompt = validation_prompt if user_prompt=='' else f"{user_prompt}, {validation_prompt}"
ori_width, ori_height = input_image.size
resize_flag = False
rscale = scale_factor
input_image = input_image.resize((int(input_image.size[0] * rscale), int(input_image.size[1] * rscale)))
if min(input_image.size) < process_size:
input_image = resize_preproc(input_image)
input_image = input_image.resize((input_image.size[0] // 8 * 8, input_image.size[1] // 8 * 8))
width, height = input_image.size
resize_flag = True #
images = []
for _ in range(sample_times):
try:
with torch.autocast("cuda"):
image = validation_pipeline(
validation_prompt, input_image, negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps, generator=generator,
height=height, width=width,
guidance_scale=cfg_scale, conditioning_scale=1,
start_point='lr', start_steps=999,ram_encoder_hidden_states=ram_encoder_hidden_states,
latent_tiled_size=latent_tiled_size, latent_tiled_overlap=latent_tiled_overlap
).images[0]
if True: # alpha<1.0:
image = wavelet_color_fix(image, input_image)
if resize_flag:
image = image.resize((ori_width * rscale, ori_height * rscale))
except Exception as e:
print(e)
image = Image.new(mode="RGB", size=(512, 512))
images.append(np.array(image))
return images
#
MARKDOWN = \
"""
## SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution
[GitHub](https://github.com/cswry/SeeSR) | [Paper](https://arxiv.org/abs/2311.16518)
If SeeSR is helpful for you, please help star the GitHub Repo. Thanks!
"""
block = gr.Blocks().queue()
with block:
with gr.Row():
gr.Markdown(MARKDOWN)
with gr.Row():
with gr.Column():
input_image = gr.Image(type="pil")
run_button = gr.Button("Run")
with gr.Accordion("Options", open=True):
user_prompt = gr.Textbox(label="User Prompt", value="")
positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
)
cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=1, value=1, step=0)
num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=8, value=2, step=1)
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
latent_tiled_overlap = gr.Slider(label="Diffusion Tile Overlap", minimum=4, maximum=16, value=4, step=1)
scale_factor = gr.Number(label="SR Scale", value=4)
with gr.Column():
result_gallery = gr.Gallery(label="Output", show_label=False, elem_id="gallery")
inputs = [
input_image,
user_prompt,
positive_prompt,
negative_prompt,
num_inference_steps,
scale_factor,
cfg_scale,
seed,
latent_tiled_size,
latent_tiled_overlap,
sample_times,
]
run_button.click(fn=process, inputs=inputs, outputs=[result_gallery])
block.launch(share=True)
|