utils
Browse files- flagged/log.csv +2 -0
- util/__init__.py +1 -0
- util/__pycache__/__init__.cpython-310.pyc +0 -0
- util/__pycache__/__init__.cpython-36.pyc +0 -0
- util/__pycache__/__init__.cpython-37.pyc +0 -0
- util/__pycache__/__init__.cpython-38.pyc +0 -0
- util/__pycache__/flow_util.cpython-310.pyc +0 -0
- util/__pycache__/flow_util.cpython-38.pyc +0 -0
- util/__pycache__/util.cpython-36.pyc +0 -0
- util/__pycache__/util.cpython-37.pyc +0 -0
- util/flow_util.py +297 -0
- util/image_pool.py +31 -0
- util/util.py +94 -0
flagged/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Input Text,Input Text2,Generated Text,flag,username,timestamp
|
2 |
+
hhdjvbdfhv,bala,hhdjvbdfhv hello bala,,,2022-11-08 04:59:13.367059
|
util/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# util_init
|
util/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (152 Bytes). View file
|
|
util/__pycache__/__init__.cpython-36.pyc
ADDED
Binary file (130 Bytes). View file
|
|
util/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (134 Bytes). View file
|
|
util/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (150 Bytes). View file
|
|
util/__pycache__/flow_util.cpython-310.pyc
ADDED
Binary file (9.75 kB). View file
|
|
util/__pycache__/flow_util.cpython-38.pyc
ADDED
Binary file (9.85 kB). View file
|
|
util/__pycache__/util.cpython-36.pyc
ADDED
Binary file (4.02 kB). View file
|
|
util/__pycache__/util.cpython-37.pyc
ADDED
Binary file (3.7 kB). View file
|
|
util/flow_util.py
ADDED
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import os
|
3 |
+
import imageio
|
4 |
+
import math
|
5 |
+
import torch
|
6 |
+
import importlib
|
7 |
+
import re
|
8 |
+
import argparse
|
9 |
+
from natsort import natsorted
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
# convert a tensor into a numpy array
|
14 |
+
def tensor2im(image_tensor, bytes=255.0, imtype=np.uint8):
|
15 |
+
if image_tensor.dim() == 3:
|
16 |
+
image_numpy = image_tensor.cpu().float().numpy()
|
17 |
+
else:
|
18 |
+
image_numpy = image_tensor[0].cpu().float().numpy()
|
19 |
+
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * bytes
|
20 |
+
|
21 |
+
return image_numpy.astype(imtype)
|
22 |
+
|
23 |
+
|
24 |
+
# conver a tensor into a numpy array
|
25 |
+
def tensor2array(value_tensor):
|
26 |
+
if value_tensor.dim() == 3:
|
27 |
+
numpy = value_tensor.view(-1).cpu().float().numpy()
|
28 |
+
else:
|
29 |
+
numpy = value_tensor[0].view(-1).cpu().float().numpy()
|
30 |
+
return numpy
|
31 |
+
|
32 |
+
# label color map
|
33 |
+
def uint82bin(n, count=8):
|
34 |
+
"""returns the binary of integer n, count refers to amount of bits"""
|
35 |
+
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
|
36 |
+
|
37 |
+
def labelcolormap(N):
|
38 |
+
if N == 19: # CelebAMask-HQ
|
39 |
+
cmap = np.array([(0, 0, 0), (204, 0, 0), (76, 153, 0),
|
40 |
+
(204, 204, 0), (51, 51, 255), (204, 0, 204), (0, 255, 255),
|
41 |
+
(51, 255, 255), (102, 51, 0), (255, 0, 0), (102, 204, 0),
|
42 |
+
(255, 255, 0), (0, 0, 153), (0, 0, 204), (255, 51, 153),
|
43 |
+
(0, 204, 204), (0, 51, 0), (255, 153, 51), (0, 204, 0)],
|
44 |
+
dtype=np.uint8)
|
45 |
+
else:
|
46 |
+
cmap = np.zeros((N, 3), dtype=np.uint8)
|
47 |
+
for i in range(N):
|
48 |
+
r, g, b = 0, 0, 0
|
49 |
+
id = i
|
50 |
+
for j in range(7):
|
51 |
+
str_id = uint82bin(id)
|
52 |
+
r = r ^ (np.uint8(str_id[-1]) << (7-j))
|
53 |
+
g = g ^ (np.uint8(str_id[-2]) << (7-j))
|
54 |
+
b = b ^ (np.uint8(str_id[-3]) << (7-j))
|
55 |
+
id = id >> 3
|
56 |
+
cmap[i, 0] = r
|
57 |
+
cmap[i, 1] = g
|
58 |
+
cmap[i, 2] = b
|
59 |
+
return cmap
|
60 |
+
|
61 |
+
class Colorize(object):
|
62 |
+
def __init__(self, n):
|
63 |
+
self.cmap = labelcolormap(n)
|
64 |
+
self.cmap = torch.from_numpy(self.cmap[:n])
|
65 |
+
|
66 |
+
def __call__(self, gray_image):
|
67 |
+
if len(gray_image.size()) != 3:
|
68 |
+
gray_image = gray_image[0]
|
69 |
+
|
70 |
+
size = gray_image.size()
|
71 |
+
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
|
72 |
+
|
73 |
+
for label in range(0, len(self.cmap)):
|
74 |
+
mask = (label == gray_image[0]).cpu()
|
75 |
+
color_image[0][mask] = self.cmap[label][0]
|
76 |
+
color_image[1][mask] = self.cmap[label][1]
|
77 |
+
color_image[2][mask] = self.cmap[label][2]
|
78 |
+
color_image = color_image.float()/255.0 * 2 - 1
|
79 |
+
return color_image
|
80 |
+
|
81 |
+
|
82 |
+
def make_colorwheel():
|
83 |
+
'''
|
84 |
+
Generates a color wheel for optical flow visualization as presented in:
|
85 |
+
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
|
86 |
+
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
|
87 |
+
According to the C++ source code of Daniel Scharstein
|
88 |
+
According to the Matlab source code of Deqing Sun
|
89 |
+
'''
|
90 |
+
RY = 15
|
91 |
+
YG = 6
|
92 |
+
GC = 4
|
93 |
+
CB = 11
|
94 |
+
BM = 13
|
95 |
+
MR = 6
|
96 |
+
|
97 |
+
ncols = RY + YG + GC + CB + BM + MR
|
98 |
+
colorwheel = np.zeros((ncols, 3))
|
99 |
+
col = 0
|
100 |
+
|
101 |
+
# RY
|
102 |
+
colorwheel[0:RY, 0] = 255
|
103 |
+
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
|
104 |
+
col = col+RY
|
105 |
+
# YG
|
106 |
+
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
|
107 |
+
colorwheel[col:col+YG, 1] = 255
|
108 |
+
col = col+YG
|
109 |
+
# GC
|
110 |
+
colorwheel[col:col+GC, 1] = 255
|
111 |
+
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
|
112 |
+
col = col+GC
|
113 |
+
# CB
|
114 |
+
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
|
115 |
+
colorwheel[col:col+CB, 2] = 255
|
116 |
+
col = col+CB
|
117 |
+
# BM
|
118 |
+
colorwheel[col:col+BM, 2] = 255
|
119 |
+
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
|
120 |
+
col = col+BM
|
121 |
+
# MR
|
122 |
+
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
|
123 |
+
colorwheel[col:col+MR, 0] = 255
|
124 |
+
return colorwheel
|
125 |
+
|
126 |
+
|
127 |
+
class flow2color():
|
128 |
+
# code from: https://github.com/tomrunia/OpticalFlow_Visualization
|
129 |
+
# MIT License
|
130 |
+
#
|
131 |
+
# Copyright (c) 2018 Tom Runia
|
132 |
+
#
|
133 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
134 |
+
# of this software and associated documentation files (the "Software"), to deal
|
135 |
+
# in the Software without restriction, including without limitation the rights
|
136 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
137 |
+
# copies of the Software, and to permit persons to whom the Software is
|
138 |
+
# furnished to do so, subject to conditions.
|
139 |
+
#
|
140 |
+
# Author: Tom Runia
|
141 |
+
# Date Created: 2018-08-03
|
142 |
+
def __init__(self):
|
143 |
+
self.colorwheel = make_colorwheel()
|
144 |
+
|
145 |
+
|
146 |
+
def flow_compute_color(self, u, v, convert_to_bgr=False):
|
147 |
+
'''
|
148 |
+
Applies the flow color wheel to (possibly clipped) flow components u and v.
|
149 |
+
According to the C++ source code of Daniel Scharstein
|
150 |
+
According to the Matlab source code of Deqing Sun
|
151 |
+
:param u: np.ndarray, input horizontal flow
|
152 |
+
:param v: np.ndarray, input vertical flow
|
153 |
+
:param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
|
154 |
+
:return:
|
155 |
+
'''
|
156 |
+
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
|
157 |
+
ncols = self.colorwheel.shape[0]
|
158 |
+
|
159 |
+
rad = np.sqrt(np.square(u) + np.square(v))
|
160 |
+
a = np.arctan2(-v, -u)/np.pi
|
161 |
+
fk = (a+1) / 2*(ncols-1)
|
162 |
+
k0 = np.floor(fk).astype(np.int32)
|
163 |
+
k1 = k0 + 1
|
164 |
+
k1[k1 == ncols] = 0
|
165 |
+
f = fk - k0
|
166 |
+
|
167 |
+
for i in range(self.colorwheel.shape[1]):
|
168 |
+
|
169 |
+
tmp = self.colorwheel[:,i]
|
170 |
+
col0 = tmp[k0] / 255.0
|
171 |
+
col1 = tmp[k1] / 255.0
|
172 |
+
col = (1-f)*col0 + f*col1
|
173 |
+
|
174 |
+
idx = (rad <= 1)
|
175 |
+
col[idx] = 1 - rad[idx] * (1-col[idx])
|
176 |
+
col[~idx] = col[~idx] * 0.75 # out of range?
|
177 |
+
|
178 |
+
# Note the 2-i => BGR instead of RGB
|
179 |
+
ch_idx = 2-i if convert_to_bgr else i
|
180 |
+
flow_image[:,:,ch_idx] = np.floor(255 * col)
|
181 |
+
|
182 |
+
return flow_image
|
183 |
+
|
184 |
+
|
185 |
+
def __call__(self, flow_uv, clip_flow=None, convert_to_bgr=False):
|
186 |
+
'''
|
187 |
+
Expects a two dimensional flow image of shape [H,W,2]
|
188 |
+
According to the C++ source code of Daniel Scharstein
|
189 |
+
According to the Matlab source code of Deqing Sun
|
190 |
+
:param flow_uv: np.ndarray of shape [H,W,2]
|
191 |
+
:param clip_flow: float, maximum clipping value for flow
|
192 |
+
:return:
|
193 |
+
'''
|
194 |
+
if len(flow_uv.size()) != 3:
|
195 |
+
flow_uv = flow_uv[0]
|
196 |
+
flow_uv = flow_uv.permute(1,2,0).cpu().detach().numpy()
|
197 |
+
|
198 |
+
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
|
199 |
+
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
|
200 |
+
|
201 |
+
if clip_flow is not None:
|
202 |
+
flow_uv = np.clip(flow_uv, 0, clip_flow)
|
203 |
+
|
204 |
+
u = flow_uv[:,:,1]
|
205 |
+
v = flow_uv[:,:,0]
|
206 |
+
|
207 |
+
|
208 |
+
rad = np.sqrt(np.square(u) + np.square(v))
|
209 |
+
rad_max = np.max(rad)
|
210 |
+
|
211 |
+
epsilon = 1e-5
|
212 |
+
u = u / (rad_max + epsilon)
|
213 |
+
v = v / (rad_max + epsilon)
|
214 |
+
image = self.flow_compute_color(u, v, convert_to_bgr)
|
215 |
+
image = torch.tensor(image).float().permute(2,0,1)/255.0 * 2 - 1
|
216 |
+
return image
|
217 |
+
|
218 |
+
|
219 |
+
def save_image(image_numpy, image_path):
|
220 |
+
if image_numpy.shape[2] == 1:
|
221 |
+
image_numpy = image_numpy.reshape(image_numpy.shape[0], image_numpy.shape[1])
|
222 |
+
|
223 |
+
imageio.imwrite(image_path, image_numpy)
|
224 |
+
|
225 |
+
|
226 |
+
def mkdirs(paths):
|
227 |
+
if isinstance(paths, list) and not isinstance(paths, str):
|
228 |
+
for path in paths:
|
229 |
+
mkdir(path)
|
230 |
+
else:
|
231 |
+
mkdir(paths)
|
232 |
+
|
233 |
+
|
234 |
+
def mkdir(path):
|
235 |
+
if not os.path.exists(path):
|
236 |
+
os.makedirs(path)
|
237 |
+
|
238 |
+
|
239 |
+
def find_class_in_module(target_cls_name, module):
|
240 |
+
target_cls_name = target_cls_name.replace('_', '').lower()
|
241 |
+
clslib = importlib.import_module(module)
|
242 |
+
cls = None
|
243 |
+
for name, clsobj in clslib.__dict__.items():
|
244 |
+
if name.lower() == target_cls_name:
|
245 |
+
cls = clsobj
|
246 |
+
|
247 |
+
if cls is None:
|
248 |
+
print("In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name))
|
249 |
+
exit(0)
|
250 |
+
|
251 |
+
return cls
|
252 |
+
|
253 |
+
|
254 |
+
|
255 |
+
def atoi(text):
|
256 |
+
return int(text) if text.isdigit() else text
|
257 |
+
|
258 |
+
|
259 |
+
def natural_keys(text):
|
260 |
+
'''
|
261 |
+
alist.sort(key=natural_keys) sorts in human order
|
262 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
263 |
+
(See Toothy's implementation in the comments)
|
264 |
+
'''
|
265 |
+
return [atoi(c) for c in re.split('(\d+)', text)]
|
266 |
+
|
267 |
+
|
268 |
+
def natural_sort(items):
|
269 |
+
items.sort(key=natural_keys)
|
270 |
+
|
271 |
+
class StoreDictKeyPair(argparse.Action):
|
272 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
273 |
+
my_dict = {}
|
274 |
+
for kv in values.split(","):
|
275 |
+
# print(kv)
|
276 |
+
k,v = kv.split("=")
|
277 |
+
my_dict[k] = int(v)
|
278 |
+
setattr(namespace, self.dest, my_dict)
|
279 |
+
|
280 |
+
class StoreList(argparse.Action):
|
281 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
282 |
+
my_list = [int(item) for item in values.split(',')]
|
283 |
+
setattr(namespace, self.dest, my_list)
|
284 |
+
#
|
285 |
+
def get_iteration(dir_name, file_name, net_name):
|
286 |
+
if os.path.exists(os.path.join(dir_name, file_name)) is False:
|
287 |
+
return None
|
288 |
+
if 'latest' in file_name:
|
289 |
+
gen_models = [os.path.join(dir_name, f) for f in os.listdir(dir_name) if
|
290 |
+
os.path.isfile(os.path.join(dir_name, f)) and (not 'latest' in f) and ('_net_'+net_name+'.pth' in f)]
|
291 |
+
if gen_models == []:
|
292 |
+
return 0
|
293 |
+
model_name = os.path.basename(natsorted(gen_models)[-1])
|
294 |
+
else:
|
295 |
+
model_name = file_name
|
296 |
+
iterations = int(model_name.replace('_net_'+net_name+'.pth', ''))
|
297 |
+
return iterations
|
util/image_pool.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import torch
|
3 |
+
from torch.autograd import Variable
|
4 |
+
class ImagePool():
|
5 |
+
def __init__(self, pool_size):
|
6 |
+
self.pool_size = pool_size
|
7 |
+
if self.pool_size > 0:
|
8 |
+
self.num_imgs = 0
|
9 |
+
self.images = []
|
10 |
+
|
11 |
+
def query(self, images):
|
12 |
+
if self.pool_size == 0:
|
13 |
+
return images
|
14 |
+
return_images = []
|
15 |
+
for image in images.data:
|
16 |
+
image = torch.unsqueeze(image, 0)
|
17 |
+
if self.num_imgs < self.pool_size:
|
18 |
+
self.num_imgs = self.num_imgs + 1
|
19 |
+
self.images.append(image)
|
20 |
+
return_images.append(image)
|
21 |
+
else:
|
22 |
+
p = random.uniform(0, 1)
|
23 |
+
if p > 0.5:
|
24 |
+
random_id = random.randint(0, self.pool_size-1)
|
25 |
+
tmp = self.images[random_id].clone()
|
26 |
+
self.images[random_id] = image
|
27 |
+
return_images.append(tmp)
|
28 |
+
else:
|
29 |
+
return_images.append(image)
|
30 |
+
return_images = Variable(torch.cat(return_images, 0))
|
31 |
+
return return_images
|
util/util.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import print_function
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
import os
|
7 |
+
|
8 |
+
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
|
9 |
+
if isinstance(image_tensor, list):
|
10 |
+
image_numpy = []
|
11 |
+
for i in range(len(image_tensor)):
|
12 |
+
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
|
13 |
+
return image_numpy
|
14 |
+
image_numpy = image_tensor.cpu().float().numpy()
|
15 |
+
|
16 |
+
image_numpy = (image_numpy + 1) / 2.0
|
17 |
+
image_numpy = np.clip(image_numpy, 0, 1)
|
18 |
+
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
|
19 |
+
image_numpy = image_numpy[:,:,0]
|
20 |
+
|
21 |
+
return image_numpy
|
22 |
+
|
23 |
+
def tensor2label(label_tensor, n_label, imtype=np.uint8):
|
24 |
+
if n_label == 0:
|
25 |
+
return tensor2im(label_tensor, imtype)
|
26 |
+
label_tensor = label_tensor.cpu().float()
|
27 |
+
if label_tensor.size()[0] > 1:
|
28 |
+
label_tensor = label_tensor.max(0, keepdim=True)[1]
|
29 |
+
label_tensor = Colorize(n_label)(label_tensor)
|
30 |
+
label_numpy = label_tensor.numpy()
|
31 |
+
label_numpy = label_numpy / 255.0
|
32 |
+
|
33 |
+
return label_numpy
|
34 |
+
|
35 |
+
def save_image(image_numpy, image_path):
|
36 |
+
image_pil = Image.fromarray(image_numpy)
|
37 |
+
image_pil.save(image_path)
|
38 |
+
|
39 |
+
def mkdirs(paths):
|
40 |
+
if isinstance(paths, list) and not isinstance(paths, str):
|
41 |
+
for path in paths:
|
42 |
+
mkdir(path)
|
43 |
+
else:
|
44 |
+
mkdir(paths)
|
45 |
+
|
46 |
+
def mkdir(path):
|
47 |
+
if not os.path.exists(path):
|
48 |
+
os.makedirs(path)
|
49 |
+
|
50 |
+
|
51 |
+
def uint82bin(n, count=8):
|
52 |
+
"""returns the binary of integer n, count refers to amount of bits"""
|
53 |
+
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
|
54 |
+
|
55 |
+
def labelcolormap(N):
|
56 |
+
if N == 35: # cityscape
|
57 |
+
cmap = np.array([( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), (111, 74, 0), ( 81, 0, 81),
|
58 |
+
(128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153),
|
59 |
+
(180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220, 0),
|
60 |
+
(107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142), ( 0, 0, 70),
|
61 |
+
( 0, 60,100), ( 0, 0, 90), ( 0, 0,110), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0,142)],
|
62 |
+
dtype=np.uint8)
|
63 |
+
else:
|
64 |
+
cmap = np.zeros((N, 3), dtype=np.uint8)
|
65 |
+
for i in range(N):
|
66 |
+
r, g, b = 0, 0, 0
|
67 |
+
id = i
|
68 |
+
for j in range(7):
|
69 |
+
str_id = uint82bin(id)
|
70 |
+
r = r ^ (np.uint8(str_id[-1]) << (7-j))
|
71 |
+
g = g ^ (np.uint8(str_id[-2]) << (7-j))
|
72 |
+
b = b ^ (np.uint8(str_id[-3]) << (7-j))
|
73 |
+
id = id >> 3
|
74 |
+
cmap[i, 0] = r
|
75 |
+
cmap[i, 1] = g
|
76 |
+
cmap[i, 2] = b
|
77 |
+
return cmap
|
78 |
+
|
79 |
+
class Colorize(object):
|
80 |
+
def __init__(self, n=35):
|
81 |
+
self.cmap = labelcolormap(n)
|
82 |
+
self.cmap = torch.from_numpy(self.cmap[:n])
|
83 |
+
|
84 |
+
def __call__(self, gray_image):
|
85 |
+
size = gray_image.size()
|
86 |
+
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
|
87 |
+
|
88 |
+
for label in range(0, len(self.cmap)):
|
89 |
+
mask = (label == gray_image[0]).cpu()
|
90 |
+
color_image[0][mask] = self.cmap[label][0]
|
91 |
+
color_image[1][mask] = self.cmap[label][1]
|
92 |
+
color_image[2][mask] = self.cmap[label][2]
|
93 |
+
|
94 |
+
return color_image
|