Spaces:
Runtime error
Runtime error
Upload 11 files
Browse files- .gitattributes +1 -0
- data/catstatue_rgba.png +0 -0
- data/csm_luigi_rgba.png +0 -0
- data/test.png +3 -0
- data/zelda_rgba.png +0 -0
- guidance/sd_utils.py +334 -0
- guidance/zero123_utils.py +226 -0
- scripts/convert_obj_to_video.py +20 -0
- scripts/run.sh +5 -0
- scripts/run_sd.sh +31 -0
- scripts/runall.py +48 -0
- scripts/runall_sd.py +45 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
data/test.png filter=lfs diff=lfs merge=lfs -text
|
data/catstatue_rgba.png
ADDED
|
data/csm_luigi_rgba.png
ADDED
|
data/test.png
ADDED
|
Git LFS Details
|
data/zelda_rgba.png
ADDED
|
guidance/sd_utils.py
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
| 2 |
+
from diffusers import (
|
| 3 |
+
AutoencoderKL,
|
| 4 |
+
UNet2DConditionModel,
|
| 5 |
+
PNDMScheduler,
|
| 6 |
+
DDIMScheduler,
|
| 7 |
+
StableDiffusionPipeline,
|
| 8 |
+
)
|
| 9 |
+
from diffusers.utils.import_utils import is_xformers_available
|
| 10 |
+
|
| 11 |
+
# suppress partial model loading warning
|
| 12 |
+
logging.set_verbosity_error()
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def seed_everything(seed):
|
| 21 |
+
torch.manual_seed(seed)
|
| 22 |
+
torch.cuda.manual_seed(seed)
|
| 23 |
+
# torch.backends.cudnn.deterministic = True
|
| 24 |
+
# torch.backends.cudnn.benchmark = True
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class StableDiffusion(nn.Module):
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
device,
|
| 31 |
+
fp16=True,
|
| 32 |
+
vram_O=False,
|
| 33 |
+
sd_version="2.1",
|
| 34 |
+
hf_key=None,
|
| 35 |
+
t_range=[0.02, 0.98],
|
| 36 |
+
):
|
| 37 |
+
super().__init__()
|
| 38 |
+
|
| 39 |
+
self.device = device
|
| 40 |
+
self.sd_version = sd_version
|
| 41 |
+
|
| 42 |
+
if hf_key is not None:
|
| 43 |
+
print(f"[INFO] using hugging face custom model key: {hf_key}")
|
| 44 |
+
model_key = hf_key
|
| 45 |
+
elif self.sd_version == "2.1":
|
| 46 |
+
model_key = "stabilityai/stable-diffusion-2-1-base"
|
| 47 |
+
elif self.sd_version == "2.0":
|
| 48 |
+
model_key = "stabilityai/stable-diffusion-2-base"
|
| 49 |
+
elif self.sd_version == "1.5":
|
| 50 |
+
model_key = "runwayml/stable-diffusion-v1-5"
|
| 51 |
+
else:
|
| 52 |
+
raise ValueError(
|
| 53 |
+
f"Stable-diffusion version {self.sd_version} not supported."
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
self.dtype = torch.float16 if fp16 else torch.float32
|
| 57 |
+
|
| 58 |
+
# Create model
|
| 59 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 60 |
+
model_key, torch_dtype=self.dtype
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
if vram_O:
|
| 64 |
+
pipe.enable_sequential_cpu_offload()
|
| 65 |
+
pipe.enable_vae_slicing()
|
| 66 |
+
pipe.unet.to(memory_format=torch.channels_last)
|
| 67 |
+
pipe.enable_attention_slicing(1)
|
| 68 |
+
# pipe.enable_model_cpu_offload()
|
| 69 |
+
else:
|
| 70 |
+
pipe.to(device)
|
| 71 |
+
|
| 72 |
+
self.vae = pipe.vae
|
| 73 |
+
self.tokenizer = pipe.tokenizer
|
| 74 |
+
self.text_encoder = pipe.text_encoder
|
| 75 |
+
self.unet = pipe.unet
|
| 76 |
+
|
| 77 |
+
self.scheduler = DDIMScheduler.from_pretrained(
|
| 78 |
+
model_key, subfolder="scheduler", torch_dtype=self.dtype
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
del pipe
|
| 82 |
+
|
| 83 |
+
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
|
| 84 |
+
self.min_step = int(self.num_train_timesteps * t_range[0])
|
| 85 |
+
self.max_step = int(self.num_train_timesteps * t_range[1])
|
| 86 |
+
self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience
|
| 87 |
+
|
| 88 |
+
self.embeddings = None
|
| 89 |
+
|
| 90 |
+
@torch.no_grad()
|
| 91 |
+
def get_text_embeds(self, prompts, negative_prompts):
|
| 92 |
+
pos_embeds = self.encode_text(prompts) # [1, 77, 768]
|
| 93 |
+
neg_embeds = self.encode_text(negative_prompts)
|
| 94 |
+
self.embeddings = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768]
|
| 95 |
+
|
| 96 |
+
def encode_text(self, prompt):
|
| 97 |
+
# prompt: [str]
|
| 98 |
+
inputs = self.tokenizer(
|
| 99 |
+
prompt,
|
| 100 |
+
padding="max_length",
|
| 101 |
+
max_length=self.tokenizer.model_max_length,
|
| 102 |
+
return_tensors="pt",
|
| 103 |
+
)
|
| 104 |
+
embeddings = self.text_encoder(inputs.input_ids.to(self.device))[0]
|
| 105 |
+
return embeddings
|
| 106 |
+
|
| 107 |
+
@torch.no_grad()
|
| 108 |
+
def refine(self, pred_rgb,
|
| 109 |
+
guidance_scale=100, steps=50, strength=0.8,
|
| 110 |
+
):
|
| 111 |
+
|
| 112 |
+
batch_size = pred_rgb.shape[0]
|
| 113 |
+
pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False)
|
| 114 |
+
latents = self.encode_imgs(pred_rgb_512.to(self.dtype))
|
| 115 |
+
# latents = torch.randn((1, 4, 64, 64), device=self.device, dtype=self.dtype)
|
| 116 |
+
|
| 117 |
+
self.scheduler.set_timesteps(steps)
|
| 118 |
+
init_step = int(steps * strength)
|
| 119 |
+
latents = self.scheduler.add_noise(latents, torch.randn_like(latents), self.scheduler.timesteps[init_step])
|
| 120 |
+
|
| 121 |
+
for i, t in enumerate(self.scheduler.timesteps[init_step:]):
|
| 122 |
+
|
| 123 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 124 |
+
|
| 125 |
+
noise_pred = self.unet(
|
| 126 |
+
latent_model_input, t, encoder_hidden_states=self.embeddings,
|
| 127 |
+
).sample
|
| 128 |
+
|
| 129 |
+
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
|
| 130 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
| 131 |
+
|
| 132 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 133 |
+
|
| 134 |
+
imgs = self.decode_latents(latents) # [1, 3, 512, 512]
|
| 135 |
+
return imgs
|
| 136 |
+
|
| 137 |
+
def train_step(
|
| 138 |
+
self,
|
| 139 |
+
pred_rgb,
|
| 140 |
+
step_ratio=None,
|
| 141 |
+
guidance_scale=100,
|
| 142 |
+
as_latent=False,
|
| 143 |
+
):
|
| 144 |
+
|
| 145 |
+
batch_size = pred_rgb.shape[0]
|
| 146 |
+
pred_rgb = pred_rgb.to(self.dtype)
|
| 147 |
+
|
| 148 |
+
if as_latent:
|
| 149 |
+
latents = F.interpolate(pred_rgb, (64, 64), mode="bilinear", align_corners=False) * 2 - 1
|
| 150 |
+
else:
|
| 151 |
+
# interp to 512x512 to be fed into vae.
|
| 152 |
+
pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode="bilinear", align_corners=False)
|
| 153 |
+
# encode image into latents with vae, requires grad!
|
| 154 |
+
latents = self.encode_imgs(pred_rgb_512)
|
| 155 |
+
|
| 156 |
+
if step_ratio is not None:
|
| 157 |
+
# dreamtime-like
|
| 158 |
+
# t = self.max_step - (self.max_step - self.min_step) * np.sqrt(step_ratio)
|
| 159 |
+
t = np.round((1 - step_ratio) * self.num_train_timesteps).clip(self.min_step, self.max_step)
|
| 160 |
+
t = torch.full((batch_size,), t, dtype=torch.long, device=self.device)
|
| 161 |
+
else:
|
| 162 |
+
t = torch.randint(self.min_step, self.max_step + 1, (batch_size,), dtype=torch.long, device=self.device)
|
| 163 |
+
|
| 164 |
+
# w(t), sigma_t^2
|
| 165 |
+
w = (1 - self.alphas[t]).view(batch_size, 1, 1, 1)
|
| 166 |
+
|
| 167 |
+
# predict the noise residual with unet, NO grad!
|
| 168 |
+
with torch.no_grad():
|
| 169 |
+
# add noise
|
| 170 |
+
noise = torch.randn_like(latents)
|
| 171 |
+
latents_noisy = self.scheduler.add_noise(latents, noise, t)
|
| 172 |
+
# pred noise
|
| 173 |
+
latent_model_input = torch.cat([latents_noisy] * 2)
|
| 174 |
+
tt = torch.cat([t] * 2)
|
| 175 |
+
|
| 176 |
+
noise_pred = self.unet(
|
| 177 |
+
latent_model_input, tt, encoder_hidden_states=self.embeddings.repeat(batch_size, 1, 1)
|
| 178 |
+
).sample
|
| 179 |
+
|
| 180 |
+
# perform guidance (high scale from paper!)
|
| 181 |
+
noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2)
|
| 182 |
+
noise_pred = noise_pred_uncond + guidance_scale * (
|
| 183 |
+
noise_pred_pos - noise_pred_uncond
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
grad = w * (noise_pred - noise)
|
| 187 |
+
grad = torch.nan_to_num(grad)
|
| 188 |
+
|
| 189 |
+
# seems important to avoid NaN...
|
| 190 |
+
# grad = grad.clamp(-1, 1)
|
| 191 |
+
|
| 192 |
+
target = (latents - grad).detach()
|
| 193 |
+
loss = 0.5 * F.mse_loss(latents.float(), target, reduction='sum') / latents.shape[0]
|
| 194 |
+
|
| 195 |
+
return loss
|
| 196 |
+
|
| 197 |
+
@torch.no_grad()
|
| 198 |
+
def produce_latents(
|
| 199 |
+
self,
|
| 200 |
+
height=512,
|
| 201 |
+
width=512,
|
| 202 |
+
num_inference_steps=50,
|
| 203 |
+
guidance_scale=7.5,
|
| 204 |
+
latents=None,
|
| 205 |
+
):
|
| 206 |
+
if latents is None:
|
| 207 |
+
latents = torch.randn(
|
| 208 |
+
(
|
| 209 |
+
self.embeddings.shape[0] // 2,
|
| 210 |
+
self.unet.in_channels,
|
| 211 |
+
height // 8,
|
| 212 |
+
width // 8,
|
| 213 |
+
),
|
| 214 |
+
device=self.device,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 218 |
+
|
| 219 |
+
for i, t in enumerate(self.scheduler.timesteps):
|
| 220 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
| 221 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 222 |
+
# predict the noise residual
|
| 223 |
+
noise_pred = self.unet(
|
| 224 |
+
latent_model_input, t, encoder_hidden_states=self.embeddings
|
| 225 |
+
).sample
|
| 226 |
+
|
| 227 |
+
# perform guidance
|
| 228 |
+
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
|
| 229 |
+
noise_pred = noise_pred_uncond + guidance_scale * (
|
| 230 |
+
noise_pred_cond - noise_pred_uncond
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 234 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 235 |
+
|
| 236 |
+
return latents
|
| 237 |
+
|
| 238 |
+
def decode_latents(self, latents):
|
| 239 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 240 |
+
|
| 241 |
+
imgs = self.vae.decode(latents).sample
|
| 242 |
+
imgs = (imgs / 2 + 0.5).clamp(0, 1)
|
| 243 |
+
|
| 244 |
+
return imgs
|
| 245 |
+
|
| 246 |
+
def encode_imgs(self, imgs):
|
| 247 |
+
# imgs: [B, 3, H, W]
|
| 248 |
+
|
| 249 |
+
imgs = 2 * imgs - 1
|
| 250 |
+
|
| 251 |
+
posterior = self.vae.encode(imgs).latent_dist
|
| 252 |
+
latents = posterior.sample() * self.vae.config.scaling_factor
|
| 253 |
+
|
| 254 |
+
return latents
|
| 255 |
+
|
| 256 |
+
def prompt_to_img(
|
| 257 |
+
self,
|
| 258 |
+
prompts,
|
| 259 |
+
negative_prompts="",
|
| 260 |
+
height=512,
|
| 261 |
+
width=512,
|
| 262 |
+
num_inference_steps=50,
|
| 263 |
+
guidance_scale=7.5,
|
| 264 |
+
latents=None,
|
| 265 |
+
):
|
| 266 |
+
if isinstance(prompts, str):
|
| 267 |
+
prompts = [prompts]
|
| 268 |
+
|
| 269 |
+
if isinstance(negative_prompts, str):
|
| 270 |
+
negative_prompts = [negative_prompts]
|
| 271 |
+
|
| 272 |
+
# Prompts -> text embeds
|
| 273 |
+
self.get_text_embeds(prompts, negative_prompts)
|
| 274 |
+
|
| 275 |
+
# Text embeds -> img latents
|
| 276 |
+
latents = self.produce_latents(
|
| 277 |
+
height=height,
|
| 278 |
+
width=width,
|
| 279 |
+
latents=latents,
|
| 280 |
+
num_inference_steps=num_inference_steps,
|
| 281 |
+
guidance_scale=guidance_scale,
|
| 282 |
+
) # [1, 4, 64, 64]
|
| 283 |
+
|
| 284 |
+
# Img latents -> imgs
|
| 285 |
+
imgs = self.decode_latents(latents) # [1, 3, 512, 512]
|
| 286 |
+
|
| 287 |
+
# Img to Numpy
|
| 288 |
+
imgs = imgs.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 289 |
+
imgs = (imgs * 255).round().astype("uint8")
|
| 290 |
+
|
| 291 |
+
return imgs
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
if __name__ == "__main__":
|
| 295 |
+
import argparse
|
| 296 |
+
import matplotlib.pyplot as plt
|
| 297 |
+
|
| 298 |
+
parser = argparse.ArgumentParser()
|
| 299 |
+
parser.add_argument("prompt", type=str)
|
| 300 |
+
parser.add_argument("--negative", default="", type=str)
|
| 301 |
+
parser.add_argument(
|
| 302 |
+
"--sd_version",
|
| 303 |
+
type=str,
|
| 304 |
+
default="2.1",
|
| 305 |
+
choices=["1.5", "2.0", "2.1"],
|
| 306 |
+
help="stable diffusion version",
|
| 307 |
+
)
|
| 308 |
+
parser.add_argument(
|
| 309 |
+
"--hf_key",
|
| 310 |
+
type=str,
|
| 311 |
+
default=None,
|
| 312 |
+
help="hugging face Stable diffusion model key",
|
| 313 |
+
)
|
| 314 |
+
parser.add_argument("--fp16", action="store_true", help="use float16 for training")
|
| 315 |
+
parser.add_argument(
|
| 316 |
+
"--vram_O", action="store_true", help="optimization for low VRAM usage"
|
| 317 |
+
)
|
| 318 |
+
parser.add_argument("-H", type=int, default=512)
|
| 319 |
+
parser.add_argument("-W", type=int, default=512)
|
| 320 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 321 |
+
parser.add_argument("--steps", type=int, default=50)
|
| 322 |
+
opt = parser.parse_args()
|
| 323 |
+
|
| 324 |
+
seed_everything(opt.seed)
|
| 325 |
+
|
| 326 |
+
device = torch.device("cuda")
|
| 327 |
+
|
| 328 |
+
sd = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key)
|
| 329 |
+
|
| 330 |
+
imgs = sd.prompt_to_img(opt.prompt, opt.negative, opt.H, opt.W, opt.steps)
|
| 331 |
+
|
| 332 |
+
# visualize image
|
| 333 |
+
plt.imshow(imgs[0])
|
| 334 |
+
plt.show()
|
guidance/zero123_utils.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
| 2 |
+
from diffusers import (
|
| 3 |
+
AutoencoderKL,
|
| 4 |
+
UNet2DConditionModel,
|
| 5 |
+
DDIMScheduler,
|
| 6 |
+
StableDiffusionPipeline,
|
| 7 |
+
)
|
| 8 |
+
import torchvision.transforms.functional as TF
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
sys.path.append('./')
|
| 17 |
+
|
| 18 |
+
from zero123 import Zero123Pipeline
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Zero123(nn.Module):
|
| 22 |
+
def __init__(self, device, fp16=True, t_range=[0.02, 0.98]):
|
| 23 |
+
super().__init__()
|
| 24 |
+
|
| 25 |
+
self.device = device
|
| 26 |
+
self.fp16 = fp16
|
| 27 |
+
self.dtype = torch.float16 if fp16 else torch.float32
|
| 28 |
+
|
| 29 |
+
self.pipe = Zero123Pipeline.from_pretrained(
|
| 30 |
+
# "bennyguo/zero123-diffusers",
|
| 31 |
+
"bennyguo/zero123-xl-diffusers",
|
| 32 |
+
# './model_cache/zero123_xl',
|
| 33 |
+
variant="fp16_ema" if self.fp16 else None,
|
| 34 |
+
torch_dtype=self.dtype,
|
| 35 |
+
).to(self.device)
|
| 36 |
+
|
| 37 |
+
# for param in self.pipe.parameters():
|
| 38 |
+
# param.requires_grad = False
|
| 39 |
+
|
| 40 |
+
self.pipe.image_encoder.eval()
|
| 41 |
+
self.pipe.vae.eval()
|
| 42 |
+
self.pipe.unet.eval()
|
| 43 |
+
self.pipe.clip_camera_projection.eval()
|
| 44 |
+
|
| 45 |
+
self.vae = self.pipe.vae
|
| 46 |
+
self.unet = self.pipe.unet
|
| 47 |
+
|
| 48 |
+
self.pipe.set_progress_bar_config(disable=True)
|
| 49 |
+
|
| 50 |
+
self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
|
| 51 |
+
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
|
| 52 |
+
|
| 53 |
+
self.min_step = int(self.num_train_timesteps * t_range[0])
|
| 54 |
+
self.max_step = int(self.num_train_timesteps * t_range[1])
|
| 55 |
+
self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience
|
| 56 |
+
|
| 57 |
+
self.embeddings = None
|
| 58 |
+
|
| 59 |
+
@torch.no_grad()
|
| 60 |
+
def get_img_embeds(self, x):
|
| 61 |
+
# x: image tensor in [0, 1]
|
| 62 |
+
x = F.interpolate(x, (256, 256), mode='bilinear', align_corners=False)
|
| 63 |
+
x_pil = [TF.to_pil_image(image) for image in x]
|
| 64 |
+
x_clip = self.pipe.feature_extractor(images=x_pil, return_tensors="pt").pixel_values.to(device=self.device, dtype=self.dtype)
|
| 65 |
+
c = self.pipe.image_encoder(x_clip).image_embeds
|
| 66 |
+
v = self.encode_imgs(x.to(self.dtype)) / self.vae.config.scaling_factor
|
| 67 |
+
self.embeddings = [c, v]
|
| 68 |
+
|
| 69 |
+
@torch.no_grad()
|
| 70 |
+
def refine(self, pred_rgb, polar, azimuth, radius,
|
| 71 |
+
guidance_scale=5, steps=50, strength=0.8,
|
| 72 |
+
):
|
| 73 |
+
|
| 74 |
+
batch_size = pred_rgb.shape[0]
|
| 75 |
+
|
| 76 |
+
self.scheduler.set_timesteps(steps)
|
| 77 |
+
|
| 78 |
+
if strength == 0:
|
| 79 |
+
init_step = 0
|
| 80 |
+
latents = torch.randn((1, 4, 32, 32), device=self.device, dtype=self.dtype)
|
| 81 |
+
else:
|
| 82 |
+
init_step = int(steps * strength)
|
| 83 |
+
pred_rgb_256 = F.interpolate(pred_rgb, (256, 256), mode='bilinear', align_corners=False)
|
| 84 |
+
latents = self.encode_imgs(pred_rgb_256.to(self.dtype))
|
| 85 |
+
latents = self.scheduler.add_noise(latents, torch.randn_like(latents), self.scheduler.timesteps[init_step])
|
| 86 |
+
|
| 87 |
+
T = np.stack([np.deg2rad(polar), np.sin(np.deg2rad(azimuth)), np.cos(np.deg2rad(azimuth)), radius], axis=-1)
|
| 88 |
+
T = torch.from_numpy(T).unsqueeze(1).to(self.dtype).to(self.device) # [8, 1, 4]
|
| 89 |
+
cc_emb = torch.cat([self.embeddings[0].repeat(batch_size, 1, 1), T], dim=-1)
|
| 90 |
+
cc_emb = self.pipe.clip_camera_projection(cc_emb)
|
| 91 |
+
cc_emb = torch.cat([cc_emb, torch.zeros_like(cc_emb)], dim=0)
|
| 92 |
+
|
| 93 |
+
vae_emb = self.embeddings[1].repeat(batch_size, 1, 1, 1)
|
| 94 |
+
vae_emb = torch.cat([vae_emb, torch.zeros_like(vae_emb)], dim=0)
|
| 95 |
+
|
| 96 |
+
for i, t in enumerate(self.scheduler.timesteps[init_step:]):
|
| 97 |
+
|
| 98 |
+
x_in = torch.cat([latents] * 2)
|
| 99 |
+
t_in = torch.cat([t.view(1)] * 2).to(self.device)
|
| 100 |
+
|
| 101 |
+
noise_pred = self.unet(
|
| 102 |
+
torch.cat([x_in, vae_emb], dim=1),
|
| 103 |
+
t_in.to(self.unet.dtype),
|
| 104 |
+
encoder_hidden_states=cc_emb,
|
| 105 |
+
).sample
|
| 106 |
+
|
| 107 |
+
noise_pred_cond, noise_pred_uncond = noise_pred.chunk(2)
|
| 108 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
| 109 |
+
|
| 110 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 111 |
+
|
| 112 |
+
imgs = self.decode_latents(latents) # [1, 3, 256, 256]
|
| 113 |
+
return imgs
|
| 114 |
+
|
| 115 |
+
def train_step(self, pred_rgb, polar, azimuth, radius, step_ratio=None, guidance_scale=5, as_latent=False):
|
| 116 |
+
# pred_rgb: tensor [1, 3, H, W] in [0, 1]
|
| 117 |
+
|
| 118 |
+
batch_size = pred_rgb.shape[0]
|
| 119 |
+
|
| 120 |
+
if as_latent:
|
| 121 |
+
latents = F.interpolate(pred_rgb, (32, 32), mode='bilinear', align_corners=False) * 2 - 1
|
| 122 |
+
else:
|
| 123 |
+
pred_rgb_256 = F.interpolate(pred_rgb, (256, 256), mode='bilinear', align_corners=False)
|
| 124 |
+
latents = self.encode_imgs(pred_rgb_256.to(self.dtype))
|
| 125 |
+
|
| 126 |
+
if step_ratio is not None:
|
| 127 |
+
# dreamtime-like
|
| 128 |
+
# t = self.max_step - (self.max_step - self.min_step) * np.sqrt(step_ratio)
|
| 129 |
+
t = np.round((1 - step_ratio) * self.num_train_timesteps).clip(self.min_step, self.max_step)
|
| 130 |
+
t = torch.full((batch_size,), t, dtype=torch.long, device=self.device)
|
| 131 |
+
else:
|
| 132 |
+
t = torch.randint(self.min_step, self.max_step + 1, (batch_size,), dtype=torch.long, device=self.device)
|
| 133 |
+
|
| 134 |
+
w = (1 - self.alphas[t]).view(batch_size, 1, 1, 1)
|
| 135 |
+
|
| 136 |
+
with torch.no_grad():
|
| 137 |
+
noise = torch.randn_like(latents)
|
| 138 |
+
latents_noisy = self.scheduler.add_noise(latents, noise, t)
|
| 139 |
+
|
| 140 |
+
x_in = torch.cat([latents_noisy] * 2)
|
| 141 |
+
t_in = torch.cat([t] * 2)
|
| 142 |
+
|
| 143 |
+
T = np.stack([np.deg2rad(polar), np.sin(np.deg2rad(azimuth)), np.cos(np.deg2rad(azimuth)), radius], axis=-1)
|
| 144 |
+
T = torch.from_numpy(T).unsqueeze(1).to(self.dtype).to(self.device) # [8, 1, 4]
|
| 145 |
+
cc_emb = torch.cat([self.embeddings[0].repeat(batch_size, 1, 1), T], dim=-1)
|
| 146 |
+
cc_emb = self.pipe.clip_camera_projection(cc_emb)
|
| 147 |
+
cc_emb = torch.cat([cc_emb, torch.zeros_like(cc_emb)], dim=0)
|
| 148 |
+
|
| 149 |
+
vae_emb = self.embeddings[1].repeat(batch_size, 1, 1, 1)
|
| 150 |
+
vae_emb = torch.cat([vae_emb, torch.zeros_like(vae_emb)], dim=0)
|
| 151 |
+
|
| 152 |
+
noise_pred = self.unet(
|
| 153 |
+
torch.cat([x_in, vae_emb], dim=1),
|
| 154 |
+
t_in.to(self.unet.dtype),
|
| 155 |
+
encoder_hidden_states=cc_emb,
|
| 156 |
+
).sample
|
| 157 |
+
|
| 158 |
+
noise_pred_cond, noise_pred_uncond = noise_pred.chunk(2)
|
| 159 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
| 160 |
+
|
| 161 |
+
grad = w * (noise_pred - noise)
|
| 162 |
+
grad = torch.nan_to_num(grad)
|
| 163 |
+
|
| 164 |
+
target = (latents - grad).detach()
|
| 165 |
+
loss = 0.5 * F.mse_loss(latents.float(), target, reduction='sum')
|
| 166 |
+
|
| 167 |
+
return loss
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def decode_latents(self, latents):
|
| 171 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 172 |
+
|
| 173 |
+
imgs = self.vae.decode(latents).sample
|
| 174 |
+
imgs = (imgs / 2 + 0.5).clamp(0, 1)
|
| 175 |
+
|
| 176 |
+
return imgs
|
| 177 |
+
|
| 178 |
+
def encode_imgs(self, imgs, mode=False):
|
| 179 |
+
# imgs: [B, 3, H, W]
|
| 180 |
+
|
| 181 |
+
imgs = 2 * imgs - 1
|
| 182 |
+
|
| 183 |
+
posterior = self.vae.encode(imgs).latent_dist
|
| 184 |
+
if mode:
|
| 185 |
+
latents = posterior.mode()
|
| 186 |
+
else:
|
| 187 |
+
latents = posterior.sample()
|
| 188 |
+
latents = latents * self.vae.config.scaling_factor
|
| 189 |
+
|
| 190 |
+
return latents
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
if __name__ == '__main__':
|
| 194 |
+
import cv2
|
| 195 |
+
import argparse
|
| 196 |
+
import numpy as np
|
| 197 |
+
import matplotlib.pyplot as plt
|
| 198 |
+
|
| 199 |
+
parser = argparse.ArgumentParser()
|
| 200 |
+
|
| 201 |
+
parser.add_argument('input', type=str)
|
| 202 |
+
parser.add_argument('--polar', type=float, default=0, help='delta polar angle in [-90, 90]')
|
| 203 |
+
parser.add_argument('--azimuth', type=float, default=0, help='delta azimuth angle in [-180, 180]')
|
| 204 |
+
parser.add_argument('--radius', type=float, default=0, help='delta camera radius multiplier in [-0.5, 0.5]')
|
| 205 |
+
|
| 206 |
+
opt = parser.parse_args()
|
| 207 |
+
|
| 208 |
+
device = torch.device('cuda')
|
| 209 |
+
|
| 210 |
+
print(f'[INFO] loading image from {opt.input} ...')
|
| 211 |
+
image = cv2.imread(opt.input, cv2.IMREAD_UNCHANGED)
|
| 212 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 213 |
+
image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)
|
| 214 |
+
image = image.astype(np.float32) / 255.0
|
| 215 |
+
image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).contiguous().to(device)
|
| 216 |
+
|
| 217 |
+
print(f'[INFO] loading model ...')
|
| 218 |
+
zero123 = Zero123(device)
|
| 219 |
+
|
| 220 |
+
print(f'[INFO] running model ...')
|
| 221 |
+
zero123.get_img_embeds(image)
|
| 222 |
+
|
| 223 |
+
while True:
|
| 224 |
+
outputs = zero123.refine(image, polar=[opt.polar], azimuth=[opt.azimuth], radius=[opt.radius], strength=0)
|
| 225 |
+
plt.imshow(outputs.float().cpu().numpy().transpose(0, 2, 3, 1)[0])
|
| 226 |
+
plt.show()
|
scripts/convert_obj_to_video.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument('--dir', default='logs', type=str, help='Directory where obj files are stored')
|
| 7 |
+
parser.add_argument('--out', default='videos', type=str, help='Directory where videos will be saved')
|
| 8 |
+
args = parser.parse_args()
|
| 9 |
+
|
| 10 |
+
out = args.out
|
| 11 |
+
os.makedirs(out, exist_ok=True)
|
| 12 |
+
|
| 13 |
+
files = glob.glob(f'{args.dir}/*.obj')
|
| 14 |
+
for f in files:
|
| 15 |
+
name = os.path.basename(f)
|
| 16 |
+
# first stage model, ignore
|
| 17 |
+
if name.endswith('_mesh.obj'):
|
| 18 |
+
continue
|
| 19 |
+
print(f'[INFO] process {name}')
|
| 20 |
+
os.system(f"python -m kiui.render {f} --save_video {os.path.join(out, name.replace('.obj', '.mp4'))} ")
|
scripts/run.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export CUDA_VISIBLE_DEVICES=5
|
| 2 |
+
|
| 3 |
+
python main.py --config configs/image.yaml input=data/anya_rgba.png save_path=anya
|
| 4 |
+
python main2.py --config configs/image.yaml input=data/anya_rgba.png save_path=anya
|
| 5 |
+
python -m kiui.render logs/anya.obj --save_video videos/anya.mp4 --wogui
|
scripts/run_sd.sh
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export CUDA_VISIBLE_DEVICES=6
|
| 2 |
+
|
| 3 |
+
# easy samples
|
| 4 |
+
python main.py --config configs/text.yaml prompt="a photo of an icecream" save_path=icecream
|
| 5 |
+
python main2.py --config configs/text.yaml prompt="a photo of an icecream" save_path=icecream
|
| 6 |
+
python main.py --config configs/text.yaml prompt="a ripe strawberry" save_path=strawberry
|
| 7 |
+
python main2.py --config configs/text.yaml prompt="a ripe strawberry" save_path=strawberry
|
| 8 |
+
python main.py --config configs/text.yaml prompt="a blue tulip" save_path=tulip
|
| 9 |
+
python main2.py --config configs/text.yaml prompt="a blue tulip" save_path=tulip
|
| 10 |
+
|
| 11 |
+
python main.py --config configs/text.yaml prompt="a golden goblet" save_path=goblet
|
| 12 |
+
python main2.py --config configs/text.yaml prompt="a golden goblet" save_path=goblet
|
| 13 |
+
python main.py --config configs/text.yaml prompt="a photo of a hamburger" save_path=hamburger
|
| 14 |
+
python main2.py --config configs/text.yaml prompt="a photo of a hamburger" save_path=hamburger
|
| 15 |
+
python main.py --config configs/text.yaml prompt="a delicious croissant" save_path=croissant
|
| 16 |
+
python main2.py --config configs/text.yaml prompt="a delicious croissant" save_path=croissant
|
| 17 |
+
|
| 18 |
+
# hard samples
|
| 19 |
+
python main.py --config configs/text.yaml prompt="a baby bunny sitting on top of a stack of pancake" save_path=bunny_pancake
|
| 20 |
+
python main2.py --config configs/text.yaml prompt="a baby bunny sitting on top of a stack of pancake" save_path=bunny_pancake
|
| 21 |
+
python main.py --config configs/text.yaml prompt="a typewriter" save_path=typewriter
|
| 22 |
+
python main2.py --config configs/text.yaml prompt="a typewriter" save_path=typewriter
|
| 23 |
+
python main.py --config configs/text.yaml prompt="a pineapple" save_path=pineapple
|
| 24 |
+
python main2.py --config configs/text.yaml prompt="a pineapple" save_path=pineapple
|
| 25 |
+
|
| 26 |
+
python main.py --config configs/text.yaml prompt="a model of a house in Tudor style" save_path=tudor_house
|
| 27 |
+
python main2.py --config configs/text.yaml prompt="a model of a house in Tudor style" save_path=tudor_house
|
| 28 |
+
python main.py --config configs/text.yaml prompt="a lionfish" save_path=lionfish
|
| 29 |
+
python main2.py --config configs/text.yaml prompt="a lionfish" save_path=lionfish
|
| 30 |
+
python main.py --config configs/text.yaml prompt="a bunch of yellow rose, highly detailed" save_path=rose
|
| 31 |
+
python main2.py --config configs/text.yaml prompt="a bunch of yellow rose, highly detailed" save_path=rose
|
scripts/runall.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument('--dir', default='data', type=str, help='Directory where processed images are stored')
|
| 7 |
+
parser.add_argument('--out', default='logs', type=str, help='Directory where obj files will be saved')
|
| 8 |
+
parser.add_argument('--video-out', default='videos', type=str, help='Directory where videos will be saved')
|
| 9 |
+
parser.add_argument('--gpu', default=0, type=int, help='ID of GPU to use')
|
| 10 |
+
parser.add_argument('--elevation', default=0, type=int, help='Elevation angle of view in degrees')
|
| 11 |
+
parser.add_argument('--config', default='configs', type=str, help='Path to config directory, which contains image.yaml')
|
| 12 |
+
args = parser.parse_args()
|
| 13 |
+
|
| 14 |
+
files = glob.glob(f'{args.dir}/*_rgba.png')
|
| 15 |
+
configs_dir = args.config
|
| 16 |
+
|
| 17 |
+
# check if image.yaml exists
|
| 18 |
+
if not os.path.exists(os.path.join(configs_dir, 'image.yaml')):
|
| 19 |
+
raise FileNotFoundError(
|
| 20 |
+
f'image.yaml not found in {configs_dir} directory. Please check if the directory is correct.'
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# create output directories if not exists
|
| 24 |
+
out_dir = args.out
|
| 25 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 26 |
+
video_dir = args.video_out
|
| 27 |
+
os.makedirs(video_dir, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
for file in files:
|
| 31 |
+
name = os.path.basename(file).replace("_rgba.png", "")
|
| 32 |
+
print(f'======== processing {name} ========')
|
| 33 |
+
# first stage
|
| 34 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main.py '
|
| 35 |
+
f'--config {configs_dir}/image.yaml '
|
| 36 |
+
f'input={file} '
|
| 37 |
+
f'save_path={name} elevation={args.elevation}')
|
| 38 |
+
# second stage
|
| 39 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main2.py '
|
| 40 |
+
f'--config {configs_dir}/image.yaml '
|
| 41 |
+
f'input={file} '
|
| 42 |
+
f'save_path={name} elevation={args.elevation}')
|
| 43 |
+
# export video
|
| 44 |
+
mesh_path = os.path.join(out_dir, f'{name}.obj')
|
| 45 |
+
os.system(f'python -m kiui.render {mesh_path} '
|
| 46 |
+
f'--save_video {video_dir}/{name}.mp4 '
|
| 47 |
+
f'--wogui '
|
| 48 |
+
f'--elevation {args.elevation}')
|
scripts/runall_sd.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
parser = argparse.ArgumentParser()
|
| 6 |
+
parser.add_argument('--gpu', default=0, type=int)
|
| 7 |
+
args = parser.parse_args()
|
| 8 |
+
|
| 9 |
+
prompts = [
|
| 10 |
+
('strawberry', 'a ripe strawberry'),
|
| 11 |
+
('cactus_pot', 'a small saguaro cactus planted in a clay pot'),
|
| 12 |
+
('hamburger', 'a delicious hamburger'),
|
| 13 |
+
('icecream', 'an icecream'),
|
| 14 |
+
('tulip', 'a blue tulip'),
|
| 15 |
+
('pineapple', 'a ripe pineapple'),
|
| 16 |
+
('goblet', 'a golden goblet'),
|
| 17 |
+
# ('squitopus', 'a squirrel-octopus hybrid'),
|
| 18 |
+
# ('astronaut', 'Michelangelo style statue of an astronaut'),
|
| 19 |
+
# ('teddy_bear', 'a teddy bear'),
|
| 20 |
+
# ('corgi_nurse', 'a plush toy of a corgi nurse'),
|
| 21 |
+
# ('teapot', 'a blue and white porcelain teapot'),
|
| 22 |
+
# ('skull', "a human skull"),
|
| 23 |
+
# ('penguin', 'a penguin'),
|
| 24 |
+
# ('campfire', 'a campfire'),
|
| 25 |
+
# ('donut', 'a donut with pink icing'),
|
| 26 |
+
# ('cupcake', 'a birthday cupcake'),
|
| 27 |
+
# ('pie', 'shepherds pie'),
|
| 28 |
+
# ('cone', 'a traffic cone'),
|
| 29 |
+
# ('schoolbus', 'a schoolbus'),
|
| 30 |
+
# ('avocado_chair', 'a chair that looks like an avocado'),
|
| 31 |
+
# ('glasses', 'a pair of sunglasses')
|
| 32 |
+
# ('potion', 'a bottle of green potion'),
|
| 33 |
+
# ('chalice', 'a delicate chalice'),
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
for name, prompt in prompts:
|
| 37 |
+
print(f'======== processing {name} ========')
|
| 38 |
+
# first stage
|
| 39 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main.py --config configs/text.yaml prompt="{prompt}" save_path={name}')
|
| 40 |
+
# second stage
|
| 41 |
+
os.system(f'CUDA_VISIBLE_DEVICES={args.gpu} python main2.py --config configs/text.yaml prompt="{prompt}" save_path={name}')
|
| 42 |
+
# export video
|
| 43 |
+
mesh_path = os.path.join('logs', f'{name}.obj')
|
| 44 |
+
os.makedirs('videos', exist_ok=True)
|
| 45 |
+
os.system(f'python -m kiui.render {mesh_path} --save_video videos/{name}.mp4 --wogui')
|