Create pintar.py
Browse files
pintar.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
from skimage import color, io
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from models import ColorEncoder, ColorUNet
|
| 10 |
+
from extractor.manga_panel_extractor import PanelExtractor
|
| 11 |
+
|
| 12 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
|
| 13 |
+
|
| 14 |
+
def mkdirs(path):
|
| 15 |
+
if not os.path.exists(path):
|
| 16 |
+
os.makedirs(path)
|
| 17 |
+
|
| 18 |
+
def Lab2RGB_out(img_lab):
|
| 19 |
+
img_lab = img_lab.detach().cpu()
|
| 20 |
+
img_l = img_lab[:,:1,:,:]
|
| 21 |
+
img_ab = img_lab[:,1:,:,:]
|
| 22 |
+
img_l = img_l + 50
|
| 23 |
+
pred_lab = torch.cat((img_l, img_ab), 1)[0,...].numpy()
|
| 24 |
+
out = (np.clip(color.lab2rgb(pred_lab.transpose(1, 2, 0)), 0, 1)* 255).astype("uint8")
|
| 25 |
+
return out
|
| 26 |
+
|
| 27 |
+
def RGB2Lab(inputs):
|
| 28 |
+
return color.rgb2lab(inputs)
|
| 29 |
+
|
| 30 |
+
def Normalize(inputs):
|
| 31 |
+
l = inputs[:, :, 0:1]
|
| 32 |
+
ab = inputs[:, :, 1:3]
|
| 33 |
+
l = l - 50
|
| 34 |
+
lab = np.concatenate((l, ab), 2)
|
| 35 |
+
return lab.astype('float32')
|
| 36 |
+
|
| 37 |
+
def numpy2tensor(inputs):
|
| 38 |
+
out = torch.from_numpy(inputs.transpose(2,0,1))
|
| 39 |
+
return out
|
| 40 |
+
|
| 41 |
+
def tensor2numpy(inputs):
|
| 42 |
+
out = inputs[0,...].detach().cpu().numpy().transpose(1,2,0)
|
| 43 |
+
return out
|
| 44 |
+
|
| 45 |
+
def preprocessing(inputs):
|
| 46 |
+
img_lab = Normalize(RGB2Lab(inputs))
|
| 47 |
+
img = np.array(inputs, 'float32')
|
| 48 |
+
img = numpy2tensor(img)
|
| 49 |
+
img_lab = numpy2tensor(img_lab)
|
| 50 |
+
return img.unsqueeze(0), img_lab.unsqueeze(0)
|
| 51 |
+
|
| 52 |
+
if __name__ == "__main__":
|
| 53 |
+
device = "cuda"
|
| 54 |
+
|
| 55 |
+
ckpt_path = 'experiments/Color2Manga_gray/074000_gray.pt'
|
| 56 |
+
test_dir_path = 'test_datasets/gray_test'
|
| 57 |
+
no_extractor = False
|
| 58 |
+
ref_img_path = 'path_to_your_reference_image.jpg' # Especifica la ruta de tu imagen de referencia aquí
|
| 59 |
+
|
| 60 |
+
ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
|
| 61 |
+
|
| 62 |
+
colorEncoder = ColorEncoder().to(device)
|
| 63 |
+
colorEncoder.load_state_dict(ckpt["colorEncoder"])
|
| 64 |
+
colorEncoder.eval()
|
| 65 |
+
|
| 66 |
+
colorUNet = ColorUNet().to(device)
|
| 67 |
+
colorUNet.load_state_dict(ckpt["colorUNet"])
|
| 68 |
+
colorUNet.eval()
|
| 69 |
+
|
| 70 |
+
img1 = Image.open(ref_img_path).convert("RGB")
|
| 71 |
+
width, height = img1.size
|
| 72 |
+
|
| 73 |
+
img1, img1_lab = preprocessing(img1)
|
| 74 |
+
img1 = img1.to(device)
|
| 75 |
+
img1_lab = img1_lab.to(device)
|
| 76 |
+
|
| 77 |
+
while True:
|
| 78 |
+
print(f'make sure manga images are under this path: {test_dir_path}')
|
| 79 |
+
img_path = input("please input the name of image needed to be colorized (with file extension): ")
|
| 80 |
+
img_path = os.path.join(test_dir_path, img_path)
|
| 81 |
+
img_name = os.path.basename(img_path)
|
| 82 |
+
img_name = os.path.splitext(img_name)[0]
|
| 83 |
+
|
| 84 |
+
img2 = Image.open(img_path).convert("RGB")
|
| 85 |
+
img2, img2_lab = preprocessing(img2)
|
| 86 |
+
img2 = img2.to(device)
|
| 87 |
+
img2_lab = img2_lab.to(device)
|
| 88 |
+
|
| 89 |
+
with torch.no_grad():
|
| 90 |
+
img2_resize = F.interpolate(img2 / 255., size=(256, 256), mode='bilinear', recompute_scale_factor=False, align_corners=False)
|
| 91 |
+
img1_L_resize = F.interpolate(img1_lab[:,:1,:,:] / 50., size=(256, 256), mode='bilinear', recompute_scale_factor=False, align_corners=False)
|
| 92 |
+
|
| 93 |
+
color_vector = colorEncoder(img2_resize)
|
| 94 |
+
|
| 95 |
+
fake_ab = colorUNet((img1_L_resize, color_vector))
|
| 96 |
+
fake_ab = F.interpolate(fake_ab*110, size=(height, width), mode='bilinear', recompute_scale_factor=False, align_corners=False)
|
| 97 |
+
|
| 98 |
+
fake_img = torch.cat((img1_lab[:,:1,:,:], fake_ab), 1)
|
| 99 |
+
fake_img = Lab2RGB_out(fake_img)
|
| 100 |
+
|
| 101 |
+
out_folder = os.path.dirname(img_path)
|
| 102 |
+
out_name = os.path.basename(img_path)
|
| 103 |
+
out_name = os.path.splitext(out_name)[0]
|
| 104 |
+
out_img_path = os.path.join(out_folder, 'color', f'{out_name}_color.png')
|
| 105 |
+
|
| 106 |
+
# show image
|
| 107 |
+
Image.fromarray(fake_img).show()
|
| 108 |
+
# save image
|
| 109 |
+
folder_path = os.path.join(out_folder, 'color')
|
| 110 |
+
if not os.path.exists(folder_path):
|
| 111 |
+
os.mkdir(folder_path)
|
| 112 |
+
io.imsave(out_img_path, fake_img)
|