repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FishFSRNet
|
FishFSRNet-main/parsing/test_parsingnet.py
|
from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import dataset_parsingnet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import util
import torchvision
import parsingnet
net = parsingnet.ParsingNet()
net = util.prepare(net)
print(util.get_parameter_number(net))
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
testdata = dataset_parsingnet.Data(root=os.path.join(args.dir_data, args.data_test), args=args, train=False)
testset = DataLoader(testdata, batch_size=1, shuffle=False, num_workers=1)
pretrained_dict = torch.load('./epoch.pth', map_location='cuda:0')
net.load_state_dict(pretrained_dict)
net = util.prepare(net)
net.eval()
val_psnr = 0
val_ssim = 0
with torch.no_grad():
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result-test'), exist_ok=True)
net.eval()
timer_test = util.timer()
for batch, (lr, _, filename) in enumerate(testset):
lr = util.prepare(lr)
p = net(lr)
torchvision.utils.save_image(p[0],
os.path.join(args.save_path, args.writer_name, 'result-test',
'{}'.format(str(filename[0])[:-4] + ".png")))
print("Tesing over.")
| 1,315 | 36.6 | 108 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/parsingnet.py
|
import common
import torch.nn as nn
class ParsingNet(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(ParsingNet, self).__init__()
n_resblocks = 8
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
self.args = args
m_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
m_feature = [
conv(n_feats, 3, kernel_size)
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.feature = nn.Sequential(*m_feature)
def forward(self, x):
x = self.head(x)
res = self.body(x)
feature = self.feature(res)
return feature
| 906 | 24.914286 | 77 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/option.py
|
import argparse
# import os
parser = argparse.ArgumentParser(description='FaceSR')
parser.add_argument('--cpu', action='store_true',
help='use cpu only')
parser.add_argument('--n_GPUs', type=int, default=1,
help='number of GPUs')
parser.add_argument('--cuda_name', type=str, default='1')
parser.add_argument('--gpu_ids', type=int, default=1)
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--dir_data', type=str, default='/userhome/data/CelebA',
help='dataset directory')
parser.add_argument('--data_train', type=str, default='train',
help='train dataset name')
parser.add_argument('--data_test', type=str, default='test',
help='test dataset name')
parser.add_argument('--data_val', type=str, default='val',
help='val dataset name')
parser.add_argument('--scale', type=int, default=8,
help='super resolution scale')
parser.add_argument('--rgb_range', type=int, default=255,
help='maximum value of RGB')
parser.add_argument('--n_colors', type=int, default=3,
help='number of color channels to use')
parser.add_argument('--augment', action='store_true',
help='use data augmentation')
parser.add_argument('--PCSR1', action="store_true",
help='PCSR1')
parser.add_argument('--refine2', action="store_true",
help='refine2')
# Model specifications
parser.add_argument('--model', default='fishfsrnet',
help='model name')
parser.add_argument('--act', type=str, default='relu',
help='activation function')
parser.add_argument('--n_resblocks', type=int, default=8,
help='number of residual blocks')
parser.add_argument('--n_feats', type=int, default=64,
help='number of feature maps')
parser.add_argument('--res_scale', type=float, default=0.2,
help='residual scaling')
parser.add_argument('--large', action="store_true",
help='the input is as large as output or not')
parser.add_argument('--epochs', type=int, default=400,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=8,
help='input batch size for training')
parser.add_argument('--test_only', action='store_true',# default=True,
help='set this option to test the model')
# Optimization specifications
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--save_path', type=str, default='/userhome/experiment',
help='file path to save model')
parser.add_argument('--load', type=str, default='',
help='file name to load')
parser.add_argument("--writer_name", type=str, default="fishfsrnet",
help="the name of the writer")
args = parser.parse_args()
if args.epochs == 0:
args.epochs = 1e8
for arg in vars(args):
if vars(args)[arg] == 'True':
vars(args)[arg] = True
elif vars(args)[arg] == 'False':
vars(args)[arg] = False
| 3,257 | 36.883721 | 76 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/cbam.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=None):
super(ChannelGate, self).__init__()
if pool_types is None:
pool_types = ['avg', 'max']
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, int(gate_channels // reduction_ratio)),
nn.ReLU(),
nn.Linear(int(gate_channels // reduction_ratio), gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif pool_type == 'lp':
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif pool_type == 'var':
var_pool = variance_pool(x)
channel_att_raw = self.mlp(var_pool)
elif pool_type == 'lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class CSAR_SpatialGate(nn.Module):
def __init__(self, n_feats, gama=2):
super(CSAR_SpatialGate, self).__init__()
self.spatial_layer1 = nn.Conv2d(in_channels=n_feats, out_channels=gama * n_feats, kernel_size=1, stride=1)
self.spatial_layer2 = nn.ReLU()
self.spatial_layer3 = nn.Conv2d(in_channels=gama * n_feats, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x):
x_compress = self.spatial_layer1(x)
x_out = self.spatial_layer2(x_compress)
x_out = self.spatial_layer3(x_out)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
def variance_pool(x):
my_mean = x.mean(dim=3, keepdim=True).mean(dim=2, keepdim=True)
return (x - my_mean).pow(2).mean(dim=3, keepdim=False).mean(dim=2, keepdim=False).view(x.size()[0], x.size()[1], 1,
1)
| 3,309 | 34.978261 | 119 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/common.py
|
import torch.nn as nn
import torch
import cbam
import math
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True,
bn=False, act=nn.PReLU()):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
def get_parameters(model, bias):
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if bias:
yield m.bias
else:
yield m.weight
elif isinstance(m, nn.BatchNorm2d):
if bias:
yield m.bias
else:
yield m.weight
def default_conv(in_channels, out_channels, kernel_size, stride=1, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=stride, padding=(kernel_size // 2), bias=bias)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class invPixelShuffle(nn.Module):
def __init__(self, ratio=2):
super(invPixelShuffle, self).__init__()
self.ratio = ratio
def forward(self, tensor):
ratio = self.ratio
b = tensor.size(0)
ch = tensor.size(1)
y = tensor.size(2)
x = tensor.size(3)
assert x % ratio == 0 and y % ratio == 0, 'x, y, ratio : {}, {}, {}'.format(x, y, ratio)
return tensor.view(b, ch, y // ratio, ratio, x // ratio, ratio).permute(0, 1, 3, 5, 2, 4).contiguous().view(b,
-1,
y // ratio,
x // ratio)
class invUpsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(invPixelShuffle(2))
m.append(conv(n_feat * 4, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(invPixelShuffle(3))
m.append(conv(n_feat * 9, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(invUpsampler, self).__init__(*m)
class Refine(nn.Module):
def __init__(self, n_feats, conv=default_conv):
super(Refine, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
self.conv = nn.Sequential(*[ResBlock(conv, n_feats, kernel_size, act=act),
ResBlock(conv, n_feats, kernel_size, act=act)])
def forward(self, first, second):
resdual = second - first
res = self.conv(resdual)
res = res + second
return res
class Multi_scale_fusion_block(nn.Module):
def __init__(self, n_feats, scale):
super(Multi_scale_fusion_block, self).__init__()
self.scale = scale
if scale == 2:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.down2 = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)])
elif scale == 4:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
elif scale == 8:
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
self.up2 = nn.UpsamplingNearest2d(scale_factor=4)
self.refine2 = Refine(n_feats)
self.refine4 = Refine(n_feats)
self.refine8 = Refine(n_feats)
# self.attention = CA(conv=default_conv, n_feats=n_feats, kernel_size=1)
self.attention = cbam.ChannelGate(n_feats, reduction_ratio=4, pool_types=['avg', 'max', 'var'])
self.conv = nn.Conv2d(in_channels=n_feats * 3, out_channels=n_feats, kernel_size=1)
def forward(self, scale2, scale4, scale8, now):
if self.scale == 2:
scale4 = self.down1(scale4)
scale8 = self.down2(scale8)
elif self.scale == 4:
scale8 = self.down1(scale8)
scale2 = self.up1(scale2)
elif self.scale == 8:
scale4 = self.up1(scale4)
scale2 = self.up2(scale2)
feature1 = self.refine2(scale2, now)
feature2 = self.refine4(scale4, now)
feature3 = self.refine8(scale8, now)
fea = torch.cat((feature1, feature2, feature3), 1)
fea = self.conv(fea)
fea = self.attention(fea)
fea = fea + now
return fea
class PCSR1(nn.Module):
def __init__(self, conv, n_feats, kernel_size, bias=True, act=nn.ReLU(True), res_scale=1, gama=2, lamb=4):
super(PCSR1, self).__init__()
# First branch
m = []
for i in range(2):
if i == 0:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
m.append(act)
if i == 1:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
self.body = nn.Sequential(*m)
self.attention_layer1 = cbam.CSAR_SpatialGate(n_feats, gama=gama)
self.attention_layer2 = cbam.ChannelGate(n_feats, reduction_ratio=lamb, pool_types=['avg', 'max', 'var'])
self.conv = conv(2 * n_feats, n_feats, 1, bias=bias)
self.res_scale = res_scale
# Second branch
self.conv_feature = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_parsing = nn.Sequential(
*[nn.Conv2d(in_channels=3, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=3, stride=1,
padding=1)
self.attention_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x, p):
# First branch
res = self.body(x)
res1 = self.attention_layer1(res)
res2 = self.attention_layer2(res)
res = torch.cat((res1, res2), 1)
res = self.conv(res)
# Second branch
fea = self.conv_feature(x)
par = self.conv_parsing(p)
fea = torch.cat((fea, par), 1)
fea = self.conv_fusion(fea)
fea_fusion = torch.cat((fea, res), 1)
res = self.attention_fusion(fea_fusion)
res += x
return res
| 8,996 | 34.007782 | 127 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/util.py
|
import torch
import numpy as np
import math
import cv2
def prepare(arg):
if torch.cuda.is_available():
# print(1)
arg = arg.cuda()
return arg
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def calc_metrics(img1, img2, crop_border=8, test_Y=True):
#
# print(img1.shape, img1.shape[2])
img1 = np.transpose(img1, (1, 2, 0))
img2 = np.transpose(img2, (1, 2, 0))
img1 = np.array(img1)
img2 = np.array(img2)
# print(img1.shape, img1.shape[2])
if test_Y and img1.shape[2] == 3: # evaluate on Y channel in YCbCr color space
im1_in = rgb2ycbcr(img1)
im2_in = rgb2ycbcr(img2)
else:
im1_in = img1
im2_in = img2
# print("img1_in.ndim: ", im1_in.ndim)
if im1_in.ndim == 3:
# cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border, :]
# cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_im1 = im1_in[:, crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[:, crop_border:-crop_border, crop_border:-crop_border]
elif im1_in.ndim == 2:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border]
else:
raise ValueError('Wrong image dimension: {}. Should be 2 or 3.'.format(im1_in.ndim))
# print("cropped: ", cropped_im1.shape, cropped_im2.shape)
psnr = calc_psnr(cropped_im1 * 255, cropped_im2 * 255)
ssim = calc_ssim(cropped_im1 * 255, cropped_im2 * 255)
# print(type(ssim))
return psnr, ssim
def calc_psnr(img1, img2):
# img1 and img2 have range [0, 255]
#
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
mse = np.mean((img1_np - img2_np)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1_np, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2_np, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1_np**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2_np**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1_np * img2_np, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calc_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
# print("img2: ", img2.shape)
# img1 = np.transpose(img1, (1, 2, 0))
# img2 = np.transpose(img2, (1, 2, 0))
# print("img2_np_trans", img2.shape)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
# print(img1.shape)
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
# print(img1.shape[2])
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
| 4,380 | 28.601351 | 92 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/main_parsingnet.py
|
import torch
import torch.optim as optim
from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch.nn as nn
import dataset_parsingnet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import util
import torchvision
from parsingnet import ParsingNet
net = ParsingNet()
net = util.prepare(net)
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
traindata = dataset_parsingnet.Data(root=os.path.join(args.dir_data, args.data_train), args=args, train=True)
trainset = DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=16)
valdata = dataset_parsingnet.Data(root=os.path.join(args.dir_data, args.data_val), args=args, train=False)
valset = DataLoader(valdata, batch_size=1, shuffle=False, num_workers=1)
criterion1 = nn.L1Loss()
optimizer = optim.Adam(params=net.parameters(), lr=args.lr, betas=(0.9, 0.99), eps=1e-8)
for i in range(args.epochs):
net.train()
train_loss = 0
bum = len(trainset)
for batch, (lr, hr, _) in enumerate(trainset):
lr, hr = util.prepare(lr), util.prepare(hr)
sr = net(lr)
loss = criterion1(sr, hr)
train_loss = train_loss + loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Epoch:{} loss: {:.3f}".format(i + 1, train_loss / (len(trainset)) * 255))
writer.add_scalar('train_loss', train_loss / (len(trainset)) * 255, i + 1)
os.makedirs(os.path.join(args.save_path, args.writer_name), exist_ok=True)
os.makedirs(os.path.join(args.save_path, args.writer_name, 'model'), exist_ok=True)
torch.save(net.state_dict(),
os.path.join(args.save_path, args.writer_name, 'model', 'epoch{}.pth'.format(i + 1)))
net.eval()
val_psnr_my = 0
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result'), exist_ok=True)
for batch, (lr, hr, filename) in enumerate(valset):
lr, hr = util.prepare(lr), util.prepare(hr)
sr = net(lr)
val_psnr_my = val_psnr_my + util.cal_psnr(hr[0].data.cpu(), sr[0].data.cpu())
print("Epoch:{} val psnr: {:.3f}".format(i + 1, val_psnr_my / (len(valset))))
writer.add_scalar("val_psnr_my", val_psnr_my / len(valset), i + 1)
| 2,264 | 40.181818 | 109 |
py
|
FishFSRNet
|
FishFSRNet-main/parsing/dataset_parsingnet.py
|
from torch.utils import data
import os
from PIL import Image
from torchvision import transforms
from torchvision.transforms import ToTensor
import numpy
import glob
class Data(data.Dataset):
def __init__(self, root, args, train=False):
# 返回指定路径下的文件和文件夹列表。
self.args = args
if args.scale == 4:
self.imgs_LR_path = os.path.join(root, 'LR_x4')
self.imgs_parsing_path = os.path.join(root, 'global_2_LR_x4')
elif args.scale == 8:
self.imgs_LR_path = os.path.join(root, 'LR')
self.imgs_parsing_path = os.path.join(root, 'global_2_LR')
elif args.scale == 16:
self.imgs_LR_path = os.path.join(root, 'LR_x16')
self.imgs_parsing_path = os.path.join(root, 'global_2_LR_x16')
self.imgs_LR = sorted(
glob.glob(os.path.join(self.imgs_LR_path, '*.png'))
)
self.imgs_parsing = sorted(
glob.glob(os.path.join(self.imgs_parsing_path, '*.png'))
)
self.transform = transforms.ToTensor()
self.train = train
def __getitem__(self, item):
img_path_LR = os.path.join(self.imgs_LR_path, self.imgs_LR[item])
img_path_parsing = os.path.join(self.imgs_parsing_path, self.imgs_parsing[item])
LR = Image.open(img_path_LR)
parsing = Image.open(img_path_parsing)
LR = numpy.array(LR)
parsing = numpy.array(parsing)
LR = ToTensor()(LR)
parsing = ToTensor()(parsing)
filename = os.path.basename(img_path_LR)
return LR, parsing, filename
def __len__(self):
return len(self.imgs_LR)
| 1,638 | 31.78 | 88 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/fishfsrnet.py
|
import common
import torch.nn.functional as F
import torch.nn as nn
import torch
def fish_block(args, conv=common.default_conv, n_feats=64, PCSR1=False):
kernel_size = 3
res = []
act = nn.ReLU(True)
if PCSR1:
res.append(common.PCSR1(
conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
))
res.append(common.PCSR1(
conv, n_feats, kernel_size, act=act, res_scale=args.res_scale
))
else:
res.append(common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale))
res.append(common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale))
return res
class FISHNET(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(FISHNET, self).__init__()
n_resblocks = 8
n_feats = 64
kernel_size = 3
scale = 8
act = nn.ReLU(True)
self.args = args
# define head module
m_head = [conv(args.n_colors, n_feats, kernel_size)]
if args.refine2:
self.refine2 = nn.Sequential(*[common.Multi_scale_fusion_block(n_feats, scale=8),
common.Multi_scale_fusion_block(n_feats, scale=4),
common.Multi_scale_fusion_block(n_feats, scale=2),
common.Multi_scale_fusion_block(n_feats, scale=2),
common.Multi_scale_fusion_block(n_feats, scale=4),
common.Multi_scale_fusion_block(n_feats, scale=8),
])
# define body module
self.up1 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.up_stage1 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up2 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.up_stage2 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up3 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.up_stage3 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.down1 = nn.Sequential(*common.invUpsampler(conv, 2, n_feats, act=False))
self.down_stage1 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.down2 = nn.Sequential(*common.invUpsampler(conv, 2, n_feats, act=False))
self.down_stage2 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.down3 = nn.Sequential(*common.invUpsampler(conv, 2, n_feats, act=False))
self.down_stage3 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.conv_tail1 = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
self.conv = conv(n_feats, n_feats, 3)
self.up21 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.conv_tail2 = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
self.up2_stage1 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up22 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False)) # n_feats*3
self.up2_stage2 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
self.up23 = nn.Sequential(*common.Upsampler(conv, 2, n_feats, act=False))
self.conv_tail3 = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
self.up2_stage3 = nn.Sequential(
*fish_block(args, n_feats=args.n_feats, PCSR1=args.PCSR1))
# define tail module
m_tail = [
conv(n_feats, args.n_colors, kernel_size)
]
self.reduc = common.channelReduction()
self.head = nn.Sequential(*m_head)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, parsing=None):
intp = x
# print(parsing.shape)
if parsing is not None:
p2 = F.interpolate(parsing, scale_factor=2, mode='nearest')
p4 = F.interpolate(parsing, scale_factor=4, mode='nearest')
p8 = F.interpolate(parsing, scale_factor=8, mode='nearest')
# for i in range(len(parsing_list)):
# print(i, parsing_list[i].shape)
x = self.head(intp)
# print(x.shape)
x1 = self.up1(x)
if self.args.PCSR1:
x = self.up_stage1[0](x1, p2)
x = self.up_stage1[1](x, p2)
else:
x = self.up_stage1(x1)
x2 = self.up2(x)
if self.args.PCSR1:
x = self.up_stage2[0](x2, p4)
x = self.up_stage2[1](x, p4)
else:
x = self.up_stage2(x2)
x3 = self.up3(x)
if self.args.PCSR1:
res1 = self.up_stage3[0](x3, p8)
res1 = self.up_stage3[1](res1, p8)
else:
res1 = self.up_stage3(x3)
# if self.args.shift_mean:
# res1 = self.add_mean(res1)
if self.args.refine2:
inp = self.refine2[0](x1, x2, x3, res1)
else:
inp = torch.cat((x3, res1), 1)
inp = self.reduc(inp)
x4 = self.down1(inp)
if self.args.PCSR1:
x = self.down_stage1[0](x4, p4)
x = self.down_stage1[1](x, p4)
else:
x = self.down_stage1(x4)
if self.args.refine2:
inp1 = self.refine2[1](x1, x2, x3, x)
else:
inp1 = torch.cat((x, x2), 1)
inp1 = self.reduc(inp1)
x5 = self.down2(inp1)
if self.args.PCSR1:
x = self.down_stage2[0](x5, p2)
x = self.down_stage2[1](x, p2)
else:
x = self.down_stage2(x5)
if self.args.refine2:
inp2 = self.refine2[2](x1, x2, x3, x)
else:
inp2 = torch.cat((x, x1), 1)
inp2 = self.reduc(inp2)
x6 = self.down3(inp2)
if self.args.PCSR1:
x = self.down_stage3[0](x6, parsing)
x = self.down_stage3[1](x, parsing)
else:
x = self.down_stage3(x6)
if self.args.refine2:
inp3 = self.refine2[3](x6, x5, x4, x)
else:
inp3 = torch.cat((x, x6), 1)
inp3 = self.conv_tail1(inp3)
inp3 = self.conv(inp3)
x = self.up21(inp3)
if self.args.PCSR1:
x = self.up2_stage1[0](x, p2)
x = self.up2_stage1[1](x, p2)
else:
x = self.up2_stage1(x)
if self.args.refine2:
inp4 = self.refine2[4](x6, x5, x4, x)
else:
inp4 = torch.cat((x, x5), 1)
inp4 = self.conv_tail2(inp4)
x = self.up22(inp4)
if self.args.PCSR1:
x = self.up2_stage2[0](x, p4)
x = self.up2_stage2[1](x, p4)
else:
x = self.up2_stage2(x)
if self.args.refine2:
inp5 = self.refine2[5](x6, x5, x4, x)
else:
inp5 = torch.cat((x, x4), 1)
inp5 = self.conv_tail3(inp5)
x = self.up23(inp5)
if self.args.PCSR1:
res = self.up2_stage3[0](x, p8)
res = self.up2_stage3[0](res, p8)
else:
res = self.up2_stage3(x)
x = self.tail(res)
return x
| 7,540 | 32.665179 | 107 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/test.py
|
from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import dataset_parsing
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import util
import torchvision
from fishfsrnet import FISHNET
net = FISHNET(args)
net = util.prepare(net)
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
testdata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_test), args=args, train=False)
testset = DataLoader(testdata, batch_size=1, shuffle=False, num_workers=1)
pretrained_dict = torch.load('/epoch.pth', map_location='cuda:0')
net.load_state_dict(pretrained_dict)
net = util.prepare(net)
net.eval()
val_psnr = 0
val_ssim = 0
with torch.no_grad():
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result-test'), exist_ok=True)
net.eval()
for batch, (lr, hr, parsing, filename) in enumerate(testset):
lr, hr, parsing = util.prepare(lr), util.prepare(hr), util.prepare(parsing)
sr = net(lr, parsing)
psnr1, _ = util.calc_metrics(hr[0].data.cpu(), sr[0].data.cpu(), crop_border=8)
val_psnr = val_psnr + psnr1
torchvision.utils.save_image(sr[0],
os.path.join(args.save_path, args.writer_name, 'result-test',
'{}'.format(str(filename[0])[:-4] + ".png")))
print("Test psnr: {:.3f}".format(val_psnr / (len(testset))))
print(val_ssim / (len(testset)))
| 1,520 | 39.026316 | 105 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/option.py
|
import argparse
# import os
parser = argparse.ArgumentParser(description='FaceSR')
parser.add_argument('--cpu', action='store_true',
help='use cpu only')
parser.add_argument('--n_GPUs', type=int, default=1,
help='number of GPUs')
parser.add_argument('--cuda_name', type=str, default='1')
parser.add_argument('--gpu_ids', type=int, default=1)
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--dir_data', type=str, default='/userhome/data/CelebA',
help='dataset directory')
parser.add_argument('--data_train', type=str, default='train',
help='train dataset name')
parser.add_argument('--data_test', type=str, default='test',
help='test dataset name')
parser.add_argument('--data_val', type=str, default='val',
help='val dataset name')
parser.add_argument('--scale', type=int, default=8,
help='super resolution scale')
parser.add_argument('--rgb_range', type=int, default=255,
help='maximum value of RGB')
parser.add_argument('--n_colors', type=int, default=3,
help='number of color channels to use')
parser.add_argument('--augment', action='store_true',
help='use data augmentation')
parser.add_argument('--PCSR1', action="store_true",
help='PCSR1')
parser.add_argument('--refine2', action="store_true",
help='refine2')
# Model specifications
parser.add_argument('--model', default='fishfsrnet',
help='model name')
parser.add_argument('--act', type=str, default='relu',
help='activation function')
parser.add_argument('--n_resblocks', type=int, default=8,
help='number of residual blocks')
parser.add_argument('--n_feats', type=int, default=64,
help='number of feature maps')
parser.add_argument('--res_scale', type=float, default=0.2,
help='residual scaling')
parser.add_argument('--large', action="store_true",
help='the input is as large as output or not')
parser.add_argument('--epochs', type=int, default=400,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=8,
help='input batch size for training')
parser.add_argument('--test_only', action='store_true',# default=True,
help='set this option to test the model')
# Optimization specifications
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--save_path', type=str, default='/userhome/experiment',
help='file path to save model')
parser.add_argument('--load', type=str, default='',
help='file name to load')
parser.add_argument("--writer_name", type=str, default="fishfsrnet",
help="the name of the writer")
args = parser.parse_args()
if args.epochs == 0:
args.epochs = 1e8
for arg in vars(args):
if vars(args)[arg] == 'True':
vars(args)[arg] = True
elif vars(args)[arg] == 'False':
vars(args)[arg] = False
| 3,257 | 36.883721 | 76 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/cbam.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=None):
super(ChannelGate, self).__init__()
if pool_types is None:
pool_types = ['avg', 'max']
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, int(gate_channels // reduction_ratio)),
nn.ReLU(),
nn.Linear(int(gate_channels // reduction_ratio), gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif pool_type == 'lp':
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif pool_type == 'var':
var_pool = variance_pool(x)
channel_att_raw = self.mlp(var_pool)
elif pool_type == 'lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class CSAR_SpatialGate(nn.Module):
def __init__(self, n_feats, gama=2):
super(CSAR_SpatialGate, self).__init__()
self.spatial_layer1 = nn.Conv2d(in_channels=n_feats, out_channels=gama * n_feats, kernel_size=1, stride=1)
self.spatial_layer2 = nn.ReLU()
self.spatial_layer3 = nn.Conv2d(in_channels=gama * n_feats, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x):
x_compress = self.spatial_layer1(x)
x_out = self.spatial_layer2(x_compress)
x_out = self.spatial_layer3(x_out)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
def variance_pool(x):
my_mean = x.mean(dim=3, keepdim=True).mean(dim=2, keepdim=True)
return (x - my_mean).pow(2).mean(dim=3, keepdim=False).mean(dim=2, keepdim=False).view(x.size()[0], x.size()[1], 1,
1)
| 3,309 | 34.978261 | 119 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/common.py
|
import torch.nn as nn
import torch
import cbam
import math
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True,
bn=False, act=nn.PReLU()):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
def get_parameters(model, bias):
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if bias:
yield m.bias
else:
yield m.weight
elif isinstance(m, nn.BatchNorm2d):
if bias:
yield m.bias
else:
yield m.weight
def default_conv(in_channels, out_channels, kernel_size, stride=1, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=stride, padding=(kernel_size // 2), bias=bias)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class invPixelShuffle(nn.Module):
def __init__(self, ratio=2):
super(invPixelShuffle, self).__init__()
self.ratio = ratio
def forward(self, tensor):
ratio = self.ratio
b = tensor.size(0)
ch = tensor.size(1)
y = tensor.size(2)
x = tensor.size(3)
assert x % ratio == 0 and y % ratio == 0, 'x, y, ratio : {}, {}, {}'.format(x, y, ratio)
return tensor.view(b, ch, y // ratio, ratio, x // ratio, ratio).permute(0, 1, 3, 5, 2, 4).contiguous().view(b,
-1,
y // ratio,
x // ratio)
class invUpsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(invPixelShuffle(2))
m.append(conv(n_feat * 4, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(invPixelShuffle(3))
m.append(conv(n_feat * 9, n_feat, 3, bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(invUpsampler, self).__init__(*m)
class Refine(nn.Module):
def __init__(self, n_feats, conv=default_conv):
super(Refine, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
self.conv = nn.Sequential(*[ResBlock(conv, n_feats, kernel_size, act=act),
ResBlock(conv, n_feats, kernel_size, act=act)])
def forward(self, first, second):
resdual = second - first
res = self.conv(resdual)
res = res + second
return res
class Multi_scale_fusion_block(nn.Module):
def __init__(self, n_feats, scale):
super(Multi_scale_fusion_block, self).__init__()
self.scale = scale
if scale == 2:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.down2 = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)])
elif scale == 4:
self.down1 = nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=2, padding=1)
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
elif scale == 8:
self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
self.up2 = nn.UpsamplingNearest2d(scale_factor=4)
self.refine2 = Refine(n_feats)
self.refine4 = Refine(n_feats)
self.refine8 = Refine(n_feats)
# self.attention = CA(conv=default_conv, n_feats=n_feats, kernel_size=1)
self.attention = cbam.ChannelGate(n_feats, reduction_ratio=4, pool_types=['avg', 'max', 'var'])
self.conv = nn.Conv2d(in_channels=n_feats * 3, out_channels=n_feats, kernel_size=1)
def forward(self, scale2, scale4, scale8, now):
if self.scale == 2:
scale4 = self.down1(scale4)
scale8 = self.down2(scale8)
elif self.scale == 4:
scale8 = self.down1(scale8)
scale2 = self.up1(scale2)
elif self.scale == 8:
scale4 = self.up1(scale4)
scale2 = self.up2(scale2)
feature1 = self.refine2(scale2, now)
feature2 = self.refine4(scale4, now)
feature3 = self.refine8(scale8, now)
fea = torch.cat((feature1, feature2, feature3), 1)
fea = self.conv(fea)
fea = self.attention(fea)
fea = fea + now
return fea
class PCSR1(nn.Module):
def __init__(self, conv, n_feats, kernel_size, bias=True, act=nn.ReLU(True), res_scale=1, gama=2, lamb=4):
super(PCSR1, self).__init__()
# First branch
m = []
for i in range(2):
if i == 0:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
m.append(act)
if i == 1:
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
self.body = nn.Sequential(*m)
self.attention_layer1 = cbam.CSAR_SpatialGate(n_feats, gama=gama)
self.attention_layer2 = cbam.ChannelGate(n_feats, reduction_ratio=lamb, pool_types=['avg', 'max', 'var'])
self.conv = conv(2 * n_feats, n_feats, 1, bias=bias)
self.res_scale = res_scale
# Second branch
self.conv_feature = nn.Sequential(
*[nn.Conv2d(in_channels=n_feats, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_parsing = nn.Sequential(
*[nn.Conv2d(in_channels=3, out_channels=n_feats, kernel_size=3, stride=1, padding=1),
act])
self.conv_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=3, stride=1,
padding=1)
self.attention_fusion = nn.Conv2d(in_channels=n_feats * 2, out_channels=n_feats, kernel_size=1, stride=1)
def forward(self, x, p):
# First branch
res = self.body(x)
res1 = self.attention_layer1(res)
res2 = self.attention_layer2(res)
res = torch.cat((res1, res2), 1)
res = self.conv(res)
# Second branch
fea = self.conv_feature(x)
par = self.conv_parsing(p)
fea = torch.cat((fea, par), 1)
fea = self.conv_fusion(fea)
fea_fusion = torch.cat((fea, res), 1)
res = self.attention_fusion(fea_fusion)
res += x
return res
| 8,996 | 34.007782 | 127 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/util.py
|
import torch
import numpy as np
import math
import cv2
def prepare(arg):
if torch.cuda.is_available():
# print(1)
arg = arg.cuda()
return arg
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def calc_metrics(img1, img2, crop_border=8, test_Y=True):
#
# print(img1.shape, img1.shape[2])
img1 = np.transpose(img1, (1, 2, 0))
img2 = np.transpose(img2, (1, 2, 0))
img1 = np.array(img1)
img2 = np.array(img2)
# print(img1.shape, img1.shape[2])
if test_Y and img1.shape[2] == 3: # evaluate on Y channel in YCbCr color space
im1_in = rgb2ycbcr(img1)
im2_in = rgb2ycbcr(img2)
else:
im1_in = img1
im2_in = img2
# print("img1_in.ndim: ", im1_in.ndim)
if im1_in.ndim == 3:
# cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border, :]
# cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_im1 = im1_in[:, crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[:, crop_border:-crop_border, crop_border:-crop_border]
elif im1_in.ndim == 2:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border]
else:
raise ValueError('Wrong image dimension: {}. Should be 2 or 3.'.format(im1_in.ndim))
# print("cropped: ", cropped_im1.shape, cropped_im2.shape)
psnr = calc_psnr(cropped_im1 * 255, cropped_im2 * 255)
ssim = calc_ssim(cropped_im1 * 255, cropped_im2 * 255)
# print(type(ssim))
return psnr, ssim
def calc_psnr(img1, img2):
# img1 and img2 have range [0, 255]
#
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
mse = np.mean((img1_np - img2_np)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
img1_np = np.array(img1)
img2_np = np.array(img2)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1_np, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2_np, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1_np**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2_np**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1_np * img2_np, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calc_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
# print("img2: ", img2.shape)
# img1 = np.transpose(img1, (1, 2, 0))
# img2 = np.transpose(img2, (1, 2, 0))
# print("img2_np_trans", img2.shape)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
# print(img1.shape)
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
# print(img1.shape[2])
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
| 4,380 | 28.601351 | 92 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/dataset_parsing.py
|
from torch.utils import data
import os
from PIL import Image
from torchvision.transforms import ToTensor
import numpy
import glob
import random
import numpy as np
def augment(lr, hr, p, hflip=True, rot=True):
# def _augment(img):
# if hflip: img = img[:, ::-1, :]
# if vflip: img = img[::-1, :, :]
# if rot90: img = img.transpose(1, 0, 2)
# return img
if random.random() > 0.5 and hflip:
lr = lr[:, ::-1, :]
hr = hr[:, ::-1, :]
p = p[:, ::-1, :]
# print("hflip")
if rot:
rot_rand = random.random()
if rot_rand > 0.75:
lr = np.rot90(lr, k=1, axes=(0, 1))
hr = np.rot90(hr, k=1, axes=(0, 1))
p = np.rot90(p, k=1, axes=(0, 1))
elif rot_rand > 0.5:
lr = np.rot90(lr, k=2, axes=(0, 1))
hr = np.rot90(hr, k=2, axes=(0, 1))
p = np.rot90(p, k=2, axes=(0, 1))
elif rot_rand > 0.25:
lr = np.rot90(lr, k=3, axes=(0, 1))
hr = np.rot90(hr, k=3, axes=(0, 1))
p = np.rot90(p, k=3, axes=(0, 1))
# print("rot")
return lr, hr, p
class Data(data.Dataset):
def __init__(self, root, args, train=False):
# 返回指定路径下的文件和文件夹列表。
self.args = args
self.imgs_HR_path = os.path.join(root, 'HR')
self.imgs_HR = sorted(
glob.glob(os.path.join(self.imgs_HR_path, '*.png'))
)
if self.args.scale == 8:
self.imgs_LR_path = os.path.join(root, 'LR')
elif self.args.scale == 16:
self.imgs_LR_path = os.path.join(root, 'LR_x16')
elif self.args.scale == 4:
self.imgs_LR_path = os.path.join(root, 'LR_x4')
self.imgs_LR = sorted(
glob.glob(os.path.join(self.imgs_LR_path, '*.png'))
)
if self.args.scale == 8:
self.imgs_parsing_path = os.path.join(root, 'Es_parsing')
elif self.args.scale == 16:
self.imgs_parsing_path = os.path.join(root, 'Es_parsing_x16')
elif self.args.scale == 4:
self.imgs_parsing_path = os.path.join(root, 'Es_parsing_x4')
self.imgs_parsing = sorted(
glob.glob(os.path.join(self.imgs_parsing_path, '*.png'))
)
self.train = train
def __getitem__(self, item):
img_path_LR = os.path.join(self.imgs_LR_path, self.imgs_LR[item])
img_path_HR = os.path.join(self.imgs_HR_path, self.imgs_HR[item])
img_path_parsing = os.path.join(self.imgs_parsing_path, self.imgs_parsing[item])
LR = Image.open(img_path_LR)
HR = Image.open(img_path_HR)
parsing = Image.open(img_path_parsing)
HR = numpy.array(HR)
LR = numpy.array(LR)
parsing = numpy.array(parsing)
if self.args.augment and self.train:
LR, HR, parsing = augment(LR, HR, parsing)
LR = np.ascontiguousarray(LR)
HR = np.ascontiguousarray(HR)
parsing = np.ascontiguousarray(parsing)
HR = ToTensor()(HR)
LR = ToTensor()(LR)
res = ToTensor()(parsing)
filename = os.path.basename(img_path_HR)
return LR, HR, res, filename
def __len__(self):
return len(self.imgs_HR)
| 3,235 | 31.36 | 88 |
py
|
FishFSRNet
|
FishFSRNet-main/fsr/main_parsing.py
|
from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import torch.optim as optim
import torch.nn as nn
import dataset_parsing
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import util
from fishfsrnet import FISHNET
net = FISHNET(args)
net = util.prepare(net)
# print(net)
writer = SummaryWriter('./logs/{}'.format(args.writer_name))
traindata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_train), args=args, train=True)
trainset = DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=16)
valdata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_val), args=args, train=False)
valset = DataLoader(valdata, batch_size=1, shuffle=False, num_workers=1)
testdata = dataset_parsing.Data(root=os.path.join(args.dir_data, args.data_test), args=args, train=False)
testset = DataLoader(testdata, batch_size=1, shuffle=False, num_workers=1)
criterion1 = nn.L1Loss()
optimizer = optim.Adam(params=net.parameters(), lr=lr, betas=(0.9, 0.99), eps=1e-8)
for i in range(args.epochs):
net.train()
train_loss = 0
bum = len(trainset)
for batch, (lr, hr, parsing, _) in enumerate(trainset):
lr, hr, parsing = util.prepare(lr), util.prepare(hr), util.prepare(parsing)
sr = net(lr, parsing)
loss = criterion1(sr, hr)
train_loss = train_loss + loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Epoch:{} loss: {:.3f}".format(i + 1, train_loss / (len(trainset)) * 255))
writer.add_scalar('train_loss', train_loss / (len(trainset)) * 255, i + 1)
os.makedirs(os.path.join(args.save_path, args.writer_name), exist_ok=True)
os.makedirs(os.path.join(args.save_path, args.writer_name, 'model'), exist_ok=True)
torch.save(net.state_dict(),
os.path.join(args.save_path, args.writer_name, 'model', 'epoch{}.pth'.format(i + 1)))
net.eval()
val_psnr = 0
val_ssim = 0
os.makedirs(os.path.join(args.save_path, args.writer_name, 'result'), exist_ok=True)
for batch, (lr, hr, parsing, filename) in enumerate(valset):
lr, hr, parsing = util.prepare(lr), util.prepare(hr), util.prepare(parsing)
sr = net(lr, parsing)
psnr_c, ssim_c = util.calc_metrics(hr[0].data.cpu(), sr[0].data.cpu())
val_psnr = val_psnr + psnr_c
val_ssim = val_ssim + ssim_c
print("Epoch:{} val psnr: {:.3f}".format(i + 1, val_psnr / (len(valset))))
writer.add_scalar("val_psnr_DIC", val_psnr / len(valset), i + 1)
writer.add_scalar("val_ssim_DIC", val_ssim / len(valset), i + 1)
| 2,671 | 40.75 | 106 |
py
|
omni3d
|
omni3d-main/tools/__init__.py
| 0 | 0 | 0 |
py
|
|
omni3d
|
omni3d-main/tools/train_net.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import os
import sys
import numpy as np
import copy
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.engine import (
default_argument_parser,
default_setup,
default_writers,
launch
)
from detectron2.solver import build_lr_scheduler
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("cubercnn")
sys.dont_write_bytecode = True
sys.path.append(os.getcwd())
np.set_printoptions(suppress=True)
from cubercnn.solver import build_optimizer, freeze_bn, PeriodicCheckpointerOnlyOne
from cubercnn.config import get_cfg_defaults
from cubercnn.data import (
load_omni3d_json,
DatasetMapper3D,
build_detection_train_loader,
build_detection_test_loader,
get_omni3d_categories,
simple_register
)
from cubercnn.evaluation import (
Omni3DEvaluator, Omni3Deval,
Omni3DEvaluationHelper,
inference_on_dataset
)
from cubercnn.modeling.proposal_generator import RPNWithIgnore
from cubercnn.modeling.roi_heads import ROIHeads3D
from cubercnn.modeling.meta_arch import RCNN3D, build_model
from cubercnn.modeling.backbone import build_dla_from_vision_fpn_backbone
from cubercnn import util, vis, data
import cubercnn.vis.logperf as utils_logperf
MAX_TRAINING_ATTEMPTS = 10
def do_test(cfg, model, iteration='final', storage=None):
filter_settings = data.get_filter_settings_from_cfg(cfg)
filter_settings['visibility_thres'] = cfg.TEST.VISIBILITY_THRES
filter_settings['truncation_thres'] = cfg.TEST.TRUNCATION_THRES
filter_settings['min_height_thres'] = 0.0625
filter_settings['max_depth'] = 1e8
dataset_names_test = cfg.DATASETS.TEST
only_2d = cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_3D == 0.0
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", 'iter_{}'.format(iteration))
eval_helper = Omni3DEvaluationHelper(
dataset_names_test,
filter_settings,
output_folder,
iter_label=iteration,
only_2d=only_2d,
)
for dataset_name in dataset_names_test:
"""
Cycle through each dataset and test them individually.
This loop keeps track of each per-image evaluation result,
so that it doesn't need to be re-computed for the collective.
"""
'''
Distributed Cube R-CNN inference
'''
data_loader = build_detection_test_loader(cfg, dataset_name)
results_json = inference_on_dataset(model, data_loader)
if comm.is_main_process():
'''
Individual dataset evaluation
'''
eval_helper.add_predictions(dataset_name, results_json)
eval_helper.save_predictions(dataset_name)
eval_helper.evaluate(dataset_name)
'''
Optionally, visualize some instances
'''
instances = torch.load(os.path.join(output_folder, dataset_name, 'instances_predictions.pth'))
log_str = vis.visualize_from_instances(
instances, data_loader.dataset, dataset_name,
cfg.INPUT.MIN_SIZE_TEST, os.path.join(output_folder, dataset_name),
MetadataCatalog.get('omni3d_model').thing_classes, iteration
)
logger.info(log_str)
if comm.is_main_process():
'''
Summarize each Omni3D Evaluation metric
'''
eval_helper.summarize_all()
def do_train(cfg, model, dataset_id_to_unknown_cats, dataset_id_to_src, resume=False):
max_iter = cfg.SOLVER.MAX_ITER
do_eval = cfg.TEST.EVAL_PERIOD > 0
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
# bookkeeping
checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler)
periodic_checkpointer = PeriodicCheckpointerOnlyOne(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter)
writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# create the dataloader
data_mapper = DatasetMapper3D(cfg, is_train=True)
data_loader = build_detection_train_loader(cfg, mapper=data_mapper, dataset_id_to_src=dataset_id_to_src)
# give the mapper access to dataset_ids
data_mapper.dataset_id_to_unknown_cats = dataset_id_to_unknown_cats
if cfg.MODEL.WEIGHTS_PRETRAIN != '':
# load ONLY the model, no checkpointables.
checkpointer.load(cfg.MODEL.WEIGHTS_PRETRAIN, checkpointables=[])
# determine the starting iteration, if resuming
start_iter = (checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1)
iteration = start_iter
logger.info("Starting training from iteration {}".format(start_iter))
if not cfg.MODEL.USE_BN:
freeze_bn(model)
world_size = comm.get_world_size()
# if the loss diverges for more than the below TOLERANCE
# as a percent of the iterations, the training will stop.
# This is only enabled if "STABILIZE" is on, which
# prevents a single example from exploding the training.
iterations_success = 0
iterations_explode = 0
# when loss > recent_loss * TOLERANCE, then it could be a
# diverging/failing model, which we should skip all updates for.
TOLERANCE = 4.0
GAMMA = 0.02 # rolling average weight gain
recent_loss = None # stores the most recent loss magnitude
data_iter = iter(data_loader)
# model.parameters() is surprisingly expensive at 150ms, so cache it
named_params = list(model.named_parameters())
with EventStorage(start_iter) as storage:
while True:
data = next(data_iter)
storage.iter = iteration
# forward
loss_dict = model(data)
losses = sum(loss_dict.values())
# reduce
loss_dict_reduced = {k: v.item() for k, v in allreduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
# sync up
comm.synchronize()
if recent_loss is None:
# init recent loss fairly high
recent_loss = losses_reduced*2.0
# Is stabilization enabled, and loss high or NaN?
diverging_model = cfg.MODEL.STABILIZE > 0 and \
(losses_reduced > recent_loss*TOLERANCE or \
not (np.isfinite(losses_reduced)) or np.isnan(losses_reduced))
if diverging_model:
# clip and warn the user.
losses = losses.clip(0, 1)
logger.warning('Skipping gradient update due to higher than normal loss {:.2f} vs. rolling mean {:.2f}, Dict-> {}'.format(
losses_reduced, recent_loss, loss_dict_reduced
))
else:
# compute rolling average of loss
recent_loss = recent_loss * (1-GAMMA) + losses_reduced*GAMMA
if comm.is_main_process():
# send loss scalars to tensorboard.
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
# backward and step
optimizer.zero_grad()
losses.backward()
# if the loss is not too high,
# we still want to check gradients.
if not diverging_model:
if cfg.MODEL.STABILIZE > 0:
for name, param in named_params:
if param.grad is not None:
diverging_model = torch.isnan(param.grad).any() or torch.isinf(param.grad).any()
if diverging_model:
logger.warning('Skipping gradient update due to inf/nan detection, loss is {}'.format(loss_dict_reduced))
break
# convert exploded to a float, then allreduce it,
# if any process gradients have exploded then we skip together.
diverging_model = torch.tensor(float(diverging_model)).cuda()
if world_size > 1:
dist.all_reduce(diverging_model)
# sync up
comm.synchronize()
if diverging_model > 0:
optimizer.zero_grad()
iterations_explode += 1
else:
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
iterations_success += 1
total_iterations = iterations_success + iterations_explode
# Only retry if we have trained sufficiently long relative
# to the latest checkpoint, which we would otherwise revert back to.
retry = (iterations_explode / total_iterations) >= cfg.MODEL.STABILIZE \
and (total_iterations > cfg.SOLVER.CHECKPOINT_PERIOD*1/2)
# Important for dist training. Convert to a float, then allreduce it,
# if any process gradients have exploded then we must skip together.
retry = torch.tensor(float(retry)).cuda()
if world_size > 1:
dist.all_reduce(retry)
# sync up
comm.synchronize()
# any processes need to retry
if retry > 0:
# instead of failing, try to resume the iteration instead.
logger.warning('!! Restarting training at {} iters. Exploding loss {:d}% of iters !!'.format(
iteration, int(100*(iterations_explode / (iterations_success + iterations_explode)))
))
# send these to garbage, for ideally a cleaner restart.
del data_mapper
del data_loader
del optimizer
del checkpointer
del periodic_checkpointer
return False
scheduler.step()
# Evaluate only when the loss is not diverging.
if not (diverging_model > 0) and \
(do_eval and ((iteration + 1) % cfg.TEST.EVAL_PERIOD) == 0 and iteration != (max_iter - 1)):
logger.info('Starting test for iteration {}'.format(iteration+1))
do_test(cfg, model, iteration=iteration+1, storage=storage)
comm.synchronize()
if not cfg.MODEL.USE_BN:
freeze_bn(model)
# Flush events
if iteration - start_iter > 5 and ((iteration + 1) % 20 == 0 or iteration == max_iter - 1):
for writer in writers:
writer.write()
# Do not bother checkpointing if there is potential for a diverging model.
if not (diverging_model > 0) and \
(iterations_explode / total_iterations) < 0.5*cfg.MODEL.STABILIZE:
periodic_checkpointer.step(iteration)
iteration += 1
if iteration >= max_iter:
break
# success
return True
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
get_cfg_defaults(cfg)
config_file = args.config_file
# store locally if needed
if config_file.startswith(util.CubeRCNNHandler.PREFIX):
config_file = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, config_file)
cfg.merge_from_file(config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="cubercnn")
filter_settings = data.get_filter_settings_from_cfg(cfg)
for dataset_name in cfg.DATASETS.TRAIN:
simple_register(dataset_name, filter_settings, filter_empty=True)
dataset_names_test = cfg.DATASETS.TEST
for dataset_name in dataset_names_test:
if not(dataset_name in cfg.DATASETS.TRAIN):
simple_register(dataset_name, filter_settings, filter_empty=False)
return cfg
def main(args):
cfg = setup(args)
logger.info('Preprocessing Training Datasets')
filter_settings = data.get_filter_settings_from_cfg(cfg)
priors = None
if args.eval_only:
category_path = os.path.join(util.file_parts(args.config_file)[0], 'category_meta.json')
# store locally if needed
if category_path.startswith(util.CubeRCNNHandler.PREFIX):
category_path = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, category_path)
metadata = util.load_json(category_path)
# register the categories
thing_classes = metadata['thing_classes']
id_map = {int(key):val for key, val in metadata['thing_dataset_id_to_contiguous_id'].items()}
MetadataCatalog.get('omni3d_model').thing_classes = thing_classes
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id = id_map
else:
# setup and join the data.
dataset_paths = [os.path.join('datasets', 'Omni3D', name + '.json') for name in cfg.DATASETS.TRAIN]
datasets = data.Omni3D(dataset_paths, filter_settings=filter_settings)
# determine the meta data given the datasets used.
data.register_and_store_model_metadata(datasets, cfg.OUTPUT_DIR, filter_settings)
thing_classes = MetadataCatalog.get('omni3d_model').thing_classes
dataset_id_to_contiguous_id = MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
'''
It may be useful to keep track of which categories are annotated/known
for each dataset in use, in case a method wants to use this information.
'''
infos = datasets.dataset['info']
if type(infos) == dict:
infos = [datasets.dataset['info']]
dataset_id_to_unknown_cats = {}
possible_categories = set(i for i in range(cfg.MODEL.ROI_HEADS.NUM_CLASSES + 1))
dataset_id_to_src = {}
for info in infos:
dataset_id = info['id']
known_category_training_ids = set()
if not dataset_id in dataset_id_to_src:
dataset_id_to_src[dataset_id] = info['source']
for id in info['known_category_ids']:
if id in dataset_id_to_contiguous_id:
known_category_training_ids.add(dataset_id_to_contiguous_id[id])
# determine and store the unknown categories.
unknown_categories = possible_categories - known_category_training_ids
dataset_id_to_unknown_cats[dataset_id] = unknown_categories
# log the per-dataset categories
logger.info('Available categories for {}'.format(info['name']))
logger.info([thing_classes[i] for i in (possible_categories & known_category_training_ids)])
# compute priors given the training data.
priors = util.compute_priors(cfg, datasets)
'''
The training loops can attempt to train for N times.
This catches a divergence or other failure modes.
'''
remaining_attempts = MAX_TRAINING_ATTEMPTS
while remaining_attempts > 0:
# build the training model.
model = build_model(cfg, priors=priors)
if remaining_attempts == MAX_TRAINING_ATTEMPTS:
# log the first attempt's settings.
logger.info("Model:\n{}".format(model))
if args.eval_only:
# skip straight to eval mode
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
# setup distributed training.
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()],
broadcast_buffers=False, find_unused_parameters=True
)
# train full model, potentially with resume.
if do_train(cfg, model, dataset_id_to_unknown_cats, dataset_id_to_src, resume=args.resume):
break
else:
# allow restart when a model fails to train.
remaining_attempts -= 1
del model
if remaining_attempts == 0:
# Exit if the model could not finish without diverging.
raise ValueError('Training failed')
return do_test(cfg, model)
def allreduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = comm.get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 18,388 | 35.056863 | 138 |
py
|
omni3d
|
omni3d-main/demo/demo.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import os
import argparse
import sys
import numpy as np
from collections import OrderedDict
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.data import transforms as T
logger = logging.getLogger("detectron2")
sys.dont_write_bytecode = True
sys.path.append(os.getcwd())
np.set_printoptions(suppress=True)
from cubercnn.config import get_cfg_defaults
from cubercnn.modeling.proposal_generator import RPNWithIgnore
from cubercnn.modeling.roi_heads import ROIHeads3D
from cubercnn.modeling.meta_arch import RCNN3D, build_model
from cubercnn.modeling.backbone import build_dla_from_vision_fpn_backbone
from cubercnn import util, vis
def do_test(args, cfg, model):
list_of_ims = util.list_files(os.path.join(args.input_folder, ''), '*')
model.eval()
focal_length = args.focal_length
principal_point = args.principal_point
thres = args.threshold
output_dir = cfg.OUTPUT_DIR
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
augmentations = T.AugmentationList([T.ResizeShortestEdge(min_size, max_size, "choice")])
util.mkdir_if_missing(output_dir)
category_path = os.path.join(util.file_parts(args.config_file)[0], 'category_meta.json')
# store locally if needed
if category_path.startswith(util.CubeRCNNHandler.PREFIX):
category_path = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, category_path)
metadata = util.load_json(category_path)
cats = metadata['thing_classes']
for path in list_of_ims:
im_name = util.file_parts(path)[1]
im = util.imread(path)
if im is None:
continue
image_shape = im.shape[:2] # h, w
h, w = image_shape
if focal_length == 0:
focal_length_ndc = 4.0
focal_length = focal_length_ndc * h / 2
if len(principal_point) == 0:
px, py = w/2, h/2
else:
px, py = principal_point
K = np.array([
[focal_length, 0.0, px],
[0.0, focal_length, py],
[0.0, 0.0, 1.0]
])
aug_input = T.AugInput(im)
_ = augmentations(aug_input)
image = aug_input.image
batched = [{
'image': torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))).cuda(),
'height': image_shape[0], 'width': image_shape[1], 'K': K
}]
dets = model(batched)[0]['instances']
n_det = len(dets)
meshes = []
meshes_text = []
if n_det > 0:
for idx, (corners3D, center_cam, center_2D, dimensions, pose, score, cat_idx) in enumerate(zip(
dets.pred_bbox3D, dets.pred_center_cam, dets.pred_center_2D, dets.pred_dimensions,
dets.pred_pose, dets.scores, dets.pred_classes
)):
# skip
if score < thres:
continue
cat = cats[cat_idx]
bbox3D = center_cam.tolist() + dimensions.tolist()
meshes_text.append('{} {:.2f}'.format(cat, score))
color = [c/255.0 for c in util.get_color(idx)]
box_mesh = util.mesh_cuboid(bbox3D, pose.tolist(), color=color)
meshes.append(box_mesh)
print('File: {} with {} dets'.format(im_name, len(meshes)))
if len(meshes) > 0:
im_drawn_rgb, im_topdown, _ = vis.draw_scene_view(im, K, meshes, text=meshes_text, scale=im.shape[0], blend_weight=0.5, blend_weight_overlay=0.85)
if args.display:
im_concat = np.concatenate((im_drawn_rgb, im_topdown), axis=1)
vis.imshow(im_concat)
util.imwrite(im_drawn_rgb, os.path.join(output_dir, im_name+'_boxes.jpg'))
util.imwrite(im_topdown, os.path.join(output_dir, im_name+'_novel.jpg'))
else:
util.imwrite(im, os.path.join(output_dir, im_name+'_boxes.jpg'))
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
get_cfg_defaults(cfg)
config_file = args.config_file
# store locally if needed
if config_file.startswith(util.CubeRCNNHandler.PREFIX):
config_file = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, config_file)
cfg.merge_from_file(config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=True
)
with torch.no_grad():
do_test(args, cfg, model)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
epilog=None, formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument('--input-folder', type=str, help='list of image folders to process', required=True)
parser.add_argument("--focal-length", type=float, default=0, help="focal length for image inputs (in px)")
parser.add_argument("--principal-point", type=float, default=[], nargs=2, help="principal point for image inputs (in px)")
parser.add_argument("--threshold", type=float, default=0.25, help="threshold on score for visualizing")
parser.add_argument("--display", default=False, action="store_true", help="Whether to show the images in matplotlib",)
parser.add_argument("--eval-only", default=True, action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="Modify config options by adding 'KEY VALUE' pairs at the end of the command. "
"See config references at "
"https://detectron2.readthedocs.io/modules/config.html#config-references",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 7,175 | 34.349754 | 158 |
py
|
omni3d
|
omni3d-main/cubercnn/solver/checkpoint.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.checkpoint import PeriodicCheckpointer
from typing import Any
class PeriodicCheckpointerOnlyOne(PeriodicCheckpointer):
def step(self, iteration: int, **kwargs: Any) -> None:
"""
Perform the appropriate action at the given iteration.
Args:
iteration (int): the current iteration, ranged in [0, max_iter-1].
kwargs (Any): extra data to save, same as in
:meth:`Checkpointer.save`.
"""
iteration = int(iteration)
additional_state = {"iteration": iteration}
additional_state.update(kwargs)
if (iteration + 1) % self.period == 0:
# simply save a single recent model
self.checkpointer.save(
"{}_recent".format(self.file_prefix), **additional_state
)
if self.max_iter is not None:
if iteration >= self.max_iter - 1:
self.checkpointer.save(f"{self.file_prefix}_final", **additional_state)
| 1,060 | 36.892857 | 87 |
py
|
omni3d
|
omni3d-main/cubercnn/solver/__init__.py
|
from .build import *
from .checkpoint import *
| 46 | 22.5 | 25 |
py
|
omni3d
|
omni3d-main/cubercnn/solver/build.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from typing import Any, Dict, List, Set
from detectron2.solver.build import maybe_add_gradient_clipping
def build_optimizer(cfg, model):
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for key, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if isinstance(module, norm_module_types) and (cfg.SOLVER.WEIGHT_DECAY_NORM is not None):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM
elif key == "bias":
if (cfg.SOLVER.BIAS_LR_FACTOR is not None):
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
if (cfg.SOLVER.WEIGHT_DECAY_BIAS is not None):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
# these params do not need weight decay at all
# TODO parameterize these in configs instead.
if key in ['priors_dims_per_cat', 'priors_z_scales', 'priors_z_stats']:
weight_decay = 0.0
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.TYPE == 'sgd':
optimizer = torch.optim.SGD(
params,
cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY
)
elif cfg.SOLVER.TYPE == 'adam':
optimizer = torch.optim.Adam(params, cfg.SOLVER.BASE_LR, eps=1e-02)
elif cfg.SOLVER.TYPE == 'adam+amsgrad':
optimizer = torch.optim.Adam(params, cfg.SOLVER.BASE_LR, amsgrad=True, eps=1e-02)
elif cfg.SOLVER.TYPE == 'adamw':
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR, eps=1e-02)
elif cfg.SOLVER.TYPE == 'adamw+amsgrad':
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR, amsgrad=True, eps=1e-02)
else:
raise ValueError('{} is not supported as an optimizer.'.format(cfg.SOLVER.TYPE))
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
def freeze_bn(network):
for _, module in network.named_modules():
if isinstance(module, torch.nn.BatchNorm2d):
module.eval()
module.track_running_stats = False
| 2,963 | 37.493506 | 100 |
py
|
omni3d
|
omni3d-main/cubercnn/evaluation/omni3d_evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import contextlib
import copy
import datetime
import io
import itertools
import json
import logging
import os
import time
from collections import defaultdict
from typing import List, Union
from typing import Tuple
import numpy as np
import pycocotools.mask as maskUtils
import torch
from detectron2.utils.memory import retry_if_cuda_oom
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.evaluation.coco_evaluation import COCOEvaluator
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table, log_every_n_seconds
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
from detectron2.utils.comm import get_world_size, is_main_process
import detectron2.utils.comm as comm
from detectron2.evaluation import (
DatasetEvaluators, inference_context, DatasetEvaluator
)
from collections import OrderedDict, abc
from contextlib import ExitStack, contextmanager
from torch import nn
import logging
from cubercnn.data import Omni3D
from pytorch3d import _C
import torch.nn.functional as F
from pytorch3d.ops.iou_box3d import _box_planes, _box_triangles
import cubercnn.vis.logperf as utils_logperf
from cubercnn.data import (
get_omni3d_categories,
simple_register
)
"""
This file contains
* Omni3DEvaluationHelper: a helper object to accumulate and summarize evaluation results
* Omni3DEval: a wrapper around COCOeval to perform 3D bounding evaluation in the detection setting
* Omni3DEvaluator: a wrapper around COCOEvaluator to collect results on each dataset
* Omni3DParams: parameters for the evaluation API
"""
logger = logging.getLogger(__name__)
# Defines the max cross of len(dts) * len(gts)
# which we will attempt to compute on a GPU.
# Fallback is safer computation on a CPU.
# 0 is disabled on GPU.
MAX_DTS_CROSS_GTS_FOR_IOU3D = 0
def _check_coplanar(boxes: torch.Tensor, eps: float = 1e-4) -> torch.BoolTensor:
"""
Checks that plane vertices are coplanar.
Returns a bool tensor of size B, where True indicates a box is coplanar.
"""
faces = torch.tensor(_box_planes, dtype=torch.int64, device=boxes.device)
verts = boxes.index_select(index=faces.view(-1), dim=1)
B = boxes.shape[0]
P, V = faces.shape
# (B, P, 4, 3) -> (B, P, 3)
v0, v1, v2, v3 = verts.reshape(B, P, V, 3).unbind(2)
# Compute the normal
e0 = F.normalize(v1 - v0, dim=-1)
e1 = F.normalize(v2 - v0, dim=-1)
normal = F.normalize(torch.cross(e0, e1, dim=-1), dim=-1)
# Check the fourth vertex is also on the same plane
mat1 = (v3 - v0).view(B, 1, -1) # (B, 1, P*3)
mat2 = normal.view(B, -1, 1) # (B, P*3, 1)
return (mat1.bmm(mat2).abs() < eps).view(B)
def _check_nonzero(boxes: torch.Tensor, eps: float = 1e-8) -> torch.BoolTensor:
"""
Checks that the sides of the box have a non zero area.
Returns a bool tensor of size B, where True indicates a box is nonzero.
"""
faces = torch.tensor(_box_triangles, dtype=torch.int64, device=boxes.device)
verts = boxes.index_select(index=faces.view(-1), dim=1)
B = boxes.shape[0]
T, V = faces.shape
# (B, T, 3, 3) -> (B, T, 3)
v0, v1, v2 = verts.reshape(B, T, V, 3).unbind(2)
normals = torch.cross(v1 - v0, v2 - v0, dim=-1) # (B, T, 3)
face_areas = normals.norm(dim=-1) / 2
return (face_areas > eps).all(1).view(B)
def box3d_overlap(
boxes_dt: torch.Tensor, boxes_gt: torch.Tensor,
eps_coplanar: float = 1e-4, eps_nonzero: float = 1e-8
) -> torch.Tensor:
"""
Computes the intersection of 3D boxes_dt and boxes_gt.
Inputs boxes_dt, boxes_gt are tensors of shape (B, 8, 3)
(where B doesn't have to be the same for boxes_dt and boxes_gt),
containing the 8 corners of the boxes, as follows:
(4) +---------+. (5)
| ` . | ` .
| (0) +---+-----+ (1)
| | | |
(7) +-----+---+. (6)|
` . | ` . |
(3) ` +---------+ (2)
NOTE: Throughout this implementation, we assume that boxes
are defined by their 8 corners exactly in the order specified in the
diagram above for the function to give correct results. In addition
the vertices on each plane must be coplanar.
As an alternative to the diagram, this is a unit bounding
box which has the correct vertex ordering:
box_corner_vertices = [
[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1],
]
Args:
boxes_dt: tensor of shape (N, 8, 3) of the coordinates of the 1st boxes
boxes_gt: tensor of shape (M, 8, 3) of the coordinates of the 2nd boxes
Returns:
iou: (N, M) tensor of the intersection over union which is
defined as: `iou = vol / (vol1 + vol2 - vol)`
"""
# Make sure predictions are coplanar and nonzero
invalid_coplanar = ~_check_coplanar(boxes_dt, eps=eps_coplanar)
invalid_nonzero = ~_check_nonzero(boxes_dt, eps=eps_nonzero)
ious = _C.iou_box3d(boxes_dt, boxes_gt)[1]
# Offending boxes are set to zero IoU
if invalid_coplanar.any():
ious[invalid_coplanar] = 0
print('Warning: skipping {:d} non-coplanar boxes at eval.'.format(int(invalid_coplanar.float().sum())))
if invalid_nonzero.any():
ious[invalid_nonzero] = 0
print('Warning: skipping {:d} zero volume boxes at eval.'.format(int(invalid_nonzero.float().sum())))
return ious
class Omni3DEvaluationHelper:
def __init__(self,
dataset_names,
filter_settings,
output_folder,
iter_label='-',
only_2d=False,
):
"""
A helper class to initialize, evaluate and summarize Omni3D metrics.
The evaluator relies on the detectron2 MetadataCatalog for keeping track
of category names and contiguous IDs. Hence, it is important to set
these variables appropriately.
# (list[str]) the category names in their contiguous order
MetadataCatalog.get('omni3d_model').thing_classes = ...
# (dict[int: int]) the mapping from Omni3D category IDs to the contiguous order
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
Args:
dataset_names (list[str]): the individual dataset splits for evaluation
filter_settings (dict): the filter settings used for evaluation, see
cubercnn/data/datasets.py get_filter_settings_from_cfg
output_folder (str): the output folder where results can be stored to disk.
iter_label (str): an optional iteration/label used within the summary
only_2d (bool): whether the evaluation mode should be 2D or 2D and 3D.
"""
self.dataset_names = dataset_names
self.filter_settings = filter_settings
self.output_folder = output_folder
self.iter_label = iter_label
self.only_2d = only_2d
# Each dataset evaluator is stored here
self.evaluators = OrderedDict()
# These are the main evaluation results
self.results = OrderedDict()
# These store store per-dataset results to be printed
self.results_analysis = OrderedDict()
self.results_omni3d = OrderedDict()
self.overall_imgIds = set()
self.overall_catIds = set()
# These store the evaluations for each category and area,
# concatenated from ALL evaluated datasets. Doing so avoids
# the need to re-compute them when accumulating results.
self.evals_per_cat_area2D = {}
self.evals_per_cat_area3D = {}
self.output_folders = {
dataset_name: os.path.join(self.output_folder, dataset_name)
for dataset_name in dataset_names
}
for dataset_name in self.dataset_names:
# register any datasets that need it
if MetadataCatalog.get(dataset_name).get('json_file') is None:
simple_register(dataset_name, filter_settings, filter_empty=False)
# create an individual dataset evaluator
self.evaluators[dataset_name] = Omni3DEvaluator(
dataset_name, output_dir=self.output_folders[dataset_name],
filter_settings=self.filter_settings, only_2d=self.only_2d,
eval_prox=('Objectron' in dataset_name or 'SUNRGBD' in dataset_name),
distributed=False, # actual evaluation should be single process
)
self.evaluators[dataset_name].reset()
self.overall_imgIds.update(set(self.evaluators[dataset_name]._omni_api.getImgIds()))
self.overall_catIds.update(set(self.evaluators[dataset_name]._omni_api.getCatIds()))
def add_predictions(self, dataset_name, predictions):
"""
Adds predictions to the evaluator for dataset_name. This can be any number of
predictions, including all predictions passed in at once or in batches.
Args:
dataset_name (str): the dataset split name which the predictions belong to
predictions (list[dict]): each item in the list is a dict as follows:
{
"image_id": <int> the unique image identifier from Omni3D,
"K": <np.array> 3x3 intrinsics matrix for the image,
"width": <int> image width,
"height": <int> image height,
"instances": [
{
"image_id": <int> the unique image identifier from Omni3D,
"category_id": <int> the contiguous category prediction IDs,
which can be mapped from Omni3D's category ID's using
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
"bbox": [float] 2D box as [x1, y1, x2, y2] used for IoU2D,
"score": <float> the confidence score for the object,
"depth": <float> the depth of the center of the object,
"bbox3D": list[list[float]] 8x3 corner vertices used for IoU3D,
}
...
]
}
"""
# concatenate incoming predictions
self.evaluators[dataset_name]._predictions += predictions
def save_predictions(self, dataset_name):
"""
Saves the predictions from dataset_name to disk, in a self.output_folder.
Args:
dataset_name (str): the dataset split name which should be saved.
"""
# save predictions to disk
output_folder_dataset = self.output_folders[dataset_name]
PathManager.mkdirs(output_folder_dataset)
file_path = os.path.join(output_folder_dataset, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self.evaluators[dataset_name]._predictions, f)
def evaluate(self, dataset_name):
"""
Runs the evaluation for an individual dataset split, assuming all
predictions have been passed in.
Args:
dataset_name (str): the dataset split name which should be evalated.
"""
if not dataset_name in self.results:
# run evaluation and cache
self.results[dataset_name] = self.evaluators[dataset_name].evaluate()
results = self.results[dataset_name]
logger.info('\n'+results['log_str_2D'].replace('mode=2D', '{} iter={} mode=2D'.format(dataset_name, self.iter_label)))
# store the partially accumulated evaluations per category per area
for key, item in results['bbox_2D_evals_per_cat_area'].items():
if not key in self.evals_per_cat_area2D:
self.evals_per_cat_area2D[key] = []
self.evals_per_cat_area2D[key] += item
if not self.only_2d:
# store the partially accumulated evaluations per category per area
for key, item in results['bbox_3D_evals_per_cat_area'].items():
if not key in self.evals_per_cat_area3D:
self.evals_per_cat_area3D[key] = []
self.evals_per_cat_area3D[key] += item
logger.info('\n'+results['log_str_3D'].replace('mode=3D', '{} iter={} mode=3D'.format(dataset_name, self.iter_label)))
# full model category names
category_names = self.filter_settings['category_names']
# The set of categories present in the dataset; there should be no duplicates
categories = {cat for cat in category_names if 'AP-{}'.format(cat) in results['bbox_2D']}
assert len(categories) == len(set(categories))
# default are all NaN
general_2D, general_3D, omni_2D, omni_3D = (np.nan,) * 4
# 2D and 3D performance for categories in dataset; and log
general_2D = np.mean([results['bbox_2D']['AP-{}'.format(cat)] for cat in categories])
if not self.only_2d:
general_3D = np.mean([results['bbox_3D']['AP-{}'.format(cat)] for cat in categories])
# 2D and 3D performance on Omni3D categories
omni3d_dataset_categories = get_omni3d_categories(dataset_name) # dataset-specific categories
if len(omni3d_dataset_categories - categories) == 0: # omni3d_dataset_categories is a subset of categories
omni_2D = np.mean([results['bbox_2D']['AP-{}'.format(cat)] for cat in omni3d_dataset_categories])
if not self.only_2d:
omni_3D = np.mean([results['bbox_3D']['AP-{}'.format(cat)] for cat in omni3d_dataset_categories])
self.results_omni3d[dataset_name] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Performance analysis
extras_AP15, extras_AP25, extras_AP50, extras_APn, extras_APm, extras_APf = (np.nan,)*6
if not self.only_2d:
extras_AP15 = results['bbox_3D']['AP15']
extras_AP25 = results['bbox_3D']['AP25']
extras_AP50 = results['bbox_3D']['AP50']
extras_APn = results['bbox_3D']['APn']
extras_APm = results['bbox_3D']['APm']
extras_APf = results['bbox_3D']['APf']
self.results_analysis[dataset_name] = {
"iters": self.iter_label,
"AP2D": general_2D, "AP3D": general_3D,
"AP3D@15": extras_AP15, "AP3D@25": extras_AP25, "AP3D@50": extras_AP50,
"AP3D-N": extras_APn, "AP3D-M": extras_APm, "AP3D-F": extras_APf
}
# Performance per category
results_cat = OrderedDict()
for cat in category_names:
cat_2D, cat_3D = (np.nan,) * 2
if 'AP-{}'.format(cat) in results['bbox_2D']:
cat_2D = results['bbox_2D']['AP-{}'.format(cat)]
if not self.only_2d:
cat_3D = results['bbox_3D']['AP-{}'.format(cat)]
if not np.isnan(cat_2D) or not np.isnan(cat_3D):
results_cat[cat] = {"AP2D": cat_2D, "AP3D": cat_3D}
utils_logperf.print_ap_category_histogram(dataset_name, results_cat)
def summarize_all(self,):
'''
Report collective metrics when possible for the the Omni3D dataset.
This uses pre-computed evaluation results from each dataset,
which were aggregated and cached while evaluating individually.
This process simply re-accumulate and summarizes them.
'''
# First, double check that we have all the evaluations
for dataset_name in self.dataset_names:
if not dataset_name in self.results:
self.evaluate(dataset_name)
thing_classes = MetadataCatalog.get('omni3d_model').thing_classes
catId2contiguous = MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id
ordered_things = [thing_classes[catId2contiguous[cid]] for cid in self.overall_catIds]
categories = set(ordered_things)
evaluator2D = Omni3Deval(mode='2D')
evaluator2D.params.catIds = list(self.overall_catIds)
evaluator2D.params.imgIds = list(self.overall_imgIds)
evaluator2D.evalImgs = True
evaluator2D.evals_per_cat_area = self.evals_per_cat_area2D
evaluator2D._paramsEval = copy.deepcopy(evaluator2D.params)
evaluator2D.accumulate()
summarize_str2D = evaluator2D.summarize()
precisions = evaluator2D.eval['precision']
metrics = ["AP", "AP50", "AP75", "AP95", "APs", "APm", "APl"]
results2D = {
metric: float(
evaluator2D.stats[idx] * 100 if evaluator2D.stats[idx] >= 0 else "nan"
)
for idx, metric in enumerate(metrics)
}
for idx, name in enumerate(ordered_things):
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results2D.update({"AP-" + "{}".format(name): float(ap * 100)})
evaluator3D = Omni3Deval(mode='3D')
evaluator3D.params.catIds = list(self.overall_catIds)
evaluator3D.params.imgIds = list(self.overall_imgIds)
evaluator3D.evalImgs = True
evaluator3D.evals_per_cat_area = self.evals_per_cat_area3D
evaluator3D._paramsEval = copy.deepcopy(evaluator3D.params)
evaluator3D.accumulate()
summarize_str3D = evaluator3D.summarize()
precisions = evaluator3D.eval['precision']
metrics = ["AP", "AP15", "AP25", "AP50", "APn", "APm", "APf"]
results3D = {
metric: float(
evaluator3D.stats[idx] * 100 if evaluator3D.stats[idx] >= 0 else "nan"
)
for idx, metric in enumerate(metrics)
}
for idx, name in enumerate(ordered_things):
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results3D.update({"AP-" + "{}".format(name): float(ap * 100)})
# All concat categories
general_2D, general_3D = (np.nan,) * 2
general_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in categories])
if not self.only_2d:
general_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in categories])
# Analysis performance
extras_AP15, extras_AP25, extras_AP50, extras_APn, extras_APm, extras_APf = (np.nan,) * 6
if not self.only_2d:
extras_AP15 = results3D['AP15']
extras_AP25 = results3D['AP25']
extras_AP50 = results3D['AP50']
extras_APn = results3D['APn']
extras_APm = results3D['APm']
extras_APf = results3D['APf']
self.results_analysis["<Concat>"] = {
"iters": self.iter_label,
"AP2D": general_2D, "AP3D": general_3D,
"AP3D@15": extras_AP15, "AP3D@25": extras_AP25, "AP3D@50": extras_AP50,
"AP3D-N": extras_APn, "AP3D-M": extras_APm, "AP3D-F": extras_APf
}
# Omni3D Outdoor performance
omni_2D, omni_3D = (np.nan,) * 2
omni3d_outdoor_categories = get_omni3d_categories("omni3d_out")
if len(omni3d_outdoor_categories - categories) == 0:
omni_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in omni3d_outdoor_categories])
if not self.only_2d:
omni_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in omni3d_outdoor_categories])
self.results_omni3d["Omni3D_Out"] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Omni3D Indoor performance
omni_2D, omni_3D = (np.nan,) * 2
omni3d_indoor_categories = get_omni3d_categories("omni3d_in")
if len(omni3d_indoor_categories - categories) == 0:
omni_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in omni3d_indoor_categories])
if not self.only_2d:
omni_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in omni3d_indoor_categories])
self.results_omni3d["Omni3D_In"] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Omni3D performance
omni_2D, omni_3D = (np.nan,) * 2
omni3d_categories = get_omni3d_categories("omni3d")
if len(omni3d_categories - categories) == 0:
omni_2D = np.mean([results2D['AP-{}'.format(cat)] for cat in omni3d_categories])
if not self.only_2d:
omni_3D = np.mean([results3D['AP-{}'.format(cat)] for cat in omni3d_categories])
self.results_omni3d["Omni3D"] = {"iters": self.iter_label, "AP2D": omni_2D, "AP3D": omni_3D}
# Per-category performance for the cumulative datasets
results_cat = OrderedDict()
for cat in self.filter_settings['category_names']:
cat_2D, cat_3D = (np.nan,) * 2
if 'AP-{}'.format(cat) in results2D:
cat_2D = results2D['AP-{}'.format(cat)]
if not self.only_2d:
cat_3D = results3D['AP-{}'.format(cat)]
if not np.isnan(cat_2D) or not np.isnan(cat_3D):
results_cat[cat] = {"AP2D": cat_2D, "AP3D": cat_3D}
utils_logperf.print_ap_category_histogram("<Concat>", results_cat)
utils_logperf.print_ap_analysis_histogram(self.results_analysis)
utils_logperf.print_ap_omni_histogram(self.results_omni3d)
def inference_on_dataset(model, data_loader):
"""
Run model on the data_loader.
Also benchmark the inference speed of `model.__call__` accurately.
The model will be used in eval mode.
Args:
model (callable): a callable which takes an object from
`data_loader` and returns some outputs.
If it's an nn.Module, it will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = get_world_size()
distributed = num_devices > 1
logger.info("Start inference on {} batches".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_data_time = 0
total_compute_time = 0
total_eval_time = 0
inference_json = []
with ExitStack() as stack:
if isinstance(model, nn.Module):
stack.enter_context(inference_context(model))
stack.enter_context(torch.no_grad())
start_data_time = time.perf_counter()
for idx, inputs in enumerate(data_loader):
total_data_time += time.perf_counter() - start_data_time
if idx == num_warmup:
start_time = time.perf_counter()
total_data_time = 0
total_compute_time = 0
total_eval_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs)
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
start_eval_time = time.perf_counter()
for input, output in zip(inputs, outputs):
prediction = {
"image_id": input["image_id"],
"K": input["K"],
"width": input["width"],
"height": input["height"],
}
# convert to json format
instances = output["instances"].to('cpu')
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
# store in overall predictions
inference_json.append(prediction)
total_eval_time += time.perf_counter() - start_eval_time
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
data_seconds_per_iter = total_data_time / iters_after_start
compute_seconds_per_iter = total_compute_time / iters_after_start
eval_seconds_per_iter = total_eval_time / iters_after_start
total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
(
f"Inference done {idx + 1}/{total}. "
f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
f"Total: {total_seconds_per_iter:.4f} s/iter. "
f"ETA={eta}"
),
n=5,
)
start_data_time = time.perf_counter()
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
if distributed:
comm.synchronize()
inference_json = comm.gather(inference_json, dst=0)
inference_json = list(itertools.chain(*inference_json))
if not comm.is_main_process():
return []
return inference_json
class Omni3DEvaluator(COCOEvaluator):
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
max_dets_per_image=None,
use_fast_impl=False,
eval_prox=False,
only_2d=False,
filter_settings={},
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. For now, support only for "bbox".
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
max_dets_per_image (int): limit on the maximum number of detections per image.
By default in COCO, this limit is to 100, but this can be customized
to be greater, as is needed in evaluation metrics AP fixed and AP pool
(see https://arxiv.org/pdf/2102.01066.pdf)
This doesn't affect keypoint evaluation.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
eval_prox (bool): whether to perform proximity evaluation. For datasets that are not
exhaustively annotated.
only_2d (bool): evaluates only 2D performance if set to True
filter_settions: settings for the dataset loader. TBD
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
self._eval_prox = eval_prox
self._only_2d = only_2d
self._filter_settings = filter_settings
# COCOeval requires the limit on the number of detections per image (maxDets) to be a list
# with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
# 3rd element (100) is used as the limit on the number of detections per image when
# evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
# we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
if max_dets_per_image is None:
max_dets_per_image = [1, 10, 100]
else:
max_dets_per_image = [1, 10, max_dets_per_image]
self._max_dets_per_image = max_dets_per_image
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._omni_api = Omni3D([json_file], filter_settings)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._omni_api.dataset
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
# Optional image keys to keep when available
img_keys_optional = ["p2"]
for input, output in zip(inputs, outputs):
prediction = {
"image_id": input["image_id"],
"K": input["K"],
"width": input["width"],
"height": input["height"],
}
# store optional keys when available
for img_key in img_keys_optional:
if img_key in input:
prediction.update({img_key: input[img_key]})
# already in COCO format
if type(output["instances"]) == list:
prediction["instances"] = output["instances"]
# tensor instances format
else:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(
instances, input["image_id"]
)
if len(prediction) > 1:
self._predictions.append(prediction)
def _derive_omni_results(self, omni_eval, iou_type, mode, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
omni_eval (None or Omni3Deval): None represents no predictions from model.
iou_type (str):
mode (str): either "2D" or "3D"
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
assert mode in ["2D", "3D"]
metrics = {
"2D": ["AP", "AP50", "AP75", "AP95", "APs", "APm", "APl"],
"3D": ["AP", "AP15", "AP25", "AP50", "APn", "APm", "APf"],
}[mode]
if iou_type != "bbox":
raise ValueError("Support only for bbox evaluation.")
if omni_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(
omni_eval.stats[idx] * 100 if omni_eval.stats[idx] >= 0 else "nan"
)
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {} in {} mode: \n".format(iou_type, mode)
+ create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = omni_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_table = itertools.zip_longest(
*[results_flatten[i::N_COLS] for i in range(N_COLS)]
)
table = tabulate(
results_table,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info(
"Per-category {} AP in {} mode: \n".format(iou_type, mode) + table
)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
omni_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(omni_results)
omni3d_global_categories = MetadataCatalog.get('omni3d_model').thing_classes
# the dataset results will store only the categories that are present
# in the corresponding dataset, all others will be dropped.
dataset_results = []
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = (
self._metadata.thing_dataset_id_to_contiguous_id
)
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert (
min(all_contiguous_ids) == 0
and max(all_contiguous_ids) == num_classes - 1
)
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in omni_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
cat_name = omni3d_global_categories[category_id]
if cat_name in self._metadata.thing_classes:
dataset_results.append(result)
# replace the results with the filtered
# instances that are in vocabulary.
omni_results = dataset_results
if self._output_dir:
file_path = os.path.join(self._output_dir, "omni_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(omni_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox"}, f"Got unknown task: {task}!"
evals, log_strs = (
_evaluate_predictions_on_omni(
self._omni_api,
omni_results,
task,
img_ids=img_ids,
only_2d=self._only_2d,
eval_prox=self._eval_prox,
)
if len(omni_results) > 0
else None # cocoapi does not handle empty results very well
)
modes = evals.keys()
for mode in modes:
res = self._derive_omni_results(
evals[mode],
task,
mode,
class_names=self._metadata.get("thing_classes"),
)
self._results[task + "_" + format(mode)] = res
self._results[task + "_" + format(mode) + '_evalImgs'] = evals[mode].evalImgs
self._results[task + "_" + format(mode) + '_evals_per_cat_area'] = evals[mode].evals_per_cat_area
self._results["log_str_2D"] = log_strs["2D"]
if "3D" in log_strs:
self._results["log_str_3D"] = log_strs["3D"]
def _evaluate_predictions_on_omni(
omni_gt,
omni_results,
iou_type,
img_ids=None,
only_2d=False,
eval_prox=False,
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(omni_results) > 0
log_strs, evals = {}, {}
omni_dt = omni_gt.loadRes(omni_results)
modes = ["2D"] if only_2d else ["2D", "3D"]
for mode in modes:
omni_eval = Omni3Deval(
omni_gt, omni_dt, iouType=iou_type, mode=mode, eval_prox=eval_prox
)
if img_ids is not None:
omni_eval.params.imgIds = img_ids
omni_eval.evaluate()
omni_eval.accumulate()
log_str = omni_eval.summarize()
log_strs[mode] = log_str
evals[mode] = omni_eval
return evals, log_strs
def instances_to_coco_json(instances, img_id):
num_instances = len(instances)
if num_instances == 0:
return []
boxes = BoxMode.convert(
instances.pred_boxes.tensor.numpy(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
).tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
if hasattr(instances, "pred_bbox3D"):
bbox3D = instances.pred_bbox3D.tolist()
center_cam = instances.pred_center_cam.tolist()
center_2D = instances.pred_center_2D.tolist()
dimensions = instances.pred_dimensions.tolist()
pose = instances.pred_pose.tolist()
else:
# dummy
bbox3D = np.ones([num_instances, 8, 3]).tolist()
center_cam = np.ones([num_instances, 3]).tolist()
center_2D = np.ones([num_instances, 2]).tolist()
dimensions = np.ones([num_instances, 3]).tolist()
pose = np.ones([num_instances, 3, 3]).tolist()
results = []
for k in range(num_instances):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
"depth": np.array(bbox3D[k])[:, 2].mean(),
"bbox3D": bbox3D[k],
"center_cam": center_cam[k],
"center_2D": center_2D[k],
"dimensions": dimensions[k],
"pose": pose[k],
}
results.append(result)
return results
# ---------------------------------------------------------------------
# Omni3DParams
# ---------------------------------------------------------------------
class Omni3DParams:
"""
Params for the Omni evaluation API
"""
def setDet2DParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(
0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True
)
self.recThrs = np.linspace(
0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True
)
self.maxDets = [1, 10, 100]
self.areaRng = [
[0 ** 2, 1e5 ** 2],
[0 ** 2, 32 ** 2],
[32 ** 2, 96 ** 2],
[96 ** 2, 1e5 ** 2],
]
self.areaRngLbl = ["all", "small", "medium", "large"]
self.useCats = 1
def setDet3DParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(
0.05, 0.5, int(np.round((0.5 - 0.05) / 0.05)) + 1, endpoint=True
)
self.recThrs = np.linspace(
0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True
)
self.maxDets = [1, 10, 100]
self.areaRng = [[0, 1e5], [0, 10], [10, 35], [35, 1e5]]
self.areaRngLbl = ["all", "near", "medium", "far"]
self.useCats = 1
def __init__(self, mode="2D"):
"""
Args:
iouType (str): defines 2D or 3D evaluation parameters.
One of {"2D", "3D"}
"""
if mode == "2D":
self.setDet2DParams()
elif mode == "3D":
self.setDet3DParams()
else:
raise Exception("mode %s not supported" % (mode))
self.iouType = "bbox"
self.mode = mode
# the proximity threshold defines the neighborhood
# when evaluating on non-exhaustively annotated datasets
self.proximity_thresh = 0.3
# ---------------------------------------------------------------------
# Omni3Deval
# ---------------------------------------------------------------------
class Omni3Deval(COCOeval):
"""
Wraps COCOeval for 2D or 3D box evaluation depending on mode
"""
def __init__(
self, cocoGt=None, cocoDt=None, iouType="bbox", mode="2D", eval_prox=False
):
"""
Initialize COCOeval using coco APIs for Gt and Dt
Args:
cocoGt: COCO object with ground truth annotations
cocoDt: COCO object with detection results
iouType: (str) defines the evaluation type. Supports only "bbox" now.
mode: (str) defines whether to evaluate 2D or 3D performance.
One of {"2D", "3D"}
eval_prox: (bool) if True, performs "Proximity Evaluation", i.e.
evaluates detections in the proximity of the ground truth2D boxes.
This is used for datasets which are not exhaustively annotated.
"""
if not iouType:
print("iouType not specified. use default iouType bbox")
elif iouType != "bbox":
print("no support for %s iouType" % (iouType))
self.mode = mode
if mode not in ["2D", "3D"]:
raise Exception("mode %s not supported" % (mode))
self.eval_prox = eval_prox
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
# per-image per-category evaluation results [KxAxI] elements
self.evalImgs = defaultdict(list)
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Omni3DParams(mode) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if cocoGt is not None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
self.evals_per_cat_area = None
def _prepare(self):
"""
Prepare ._gts and ._dts for evaluation based on params
"""
p = self.params
if p.useCats:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# set ignore flag
ignore_flag = "ignore2D" if self.mode == "2D" else "ignore3D"
for gt in gts:
gt[ignore_flag] = gt[ignore_flag] if ignore_flag in gt else 0
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt["image_id"], gt["category_id"]].append(gt)
for dt in dts:
self._dts[dt["image_id"], dt["category_id"]].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
assert self.evalImgs, 'Please run evaluate() first'
tic = time.time()
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
catid_list = [k for n, k in enumerate(p.catIds) if k in setK]
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
has_precomputed_evals = not (self.evals_per_cat_area is None)
if has_precomputed_evals:
evals_per_cat_area = self.evals_per_cat_area
else:
evals_per_cat_area = {}
# retrieve E at each category, area range, and max number of detections
for k, (k0, catId) in enumerate(zip(k_list, catid_list)):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
if has_precomputed_evals:
E = evals_per_cat_area[(catId, a)]
else:
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
evals_per_cat_area[(catId, a)] = E
if len(E) == 0:
continue
for m, maxDet in enumerate(m_list):
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0)
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.evals_per_cat_area = evals_per_cat_area
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
"""
print("Running per image evaluation...")
p = self.params
print("Evaluate annotation type *{}*".format(p.iouType))
tic = time.time()
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
catIds = p.catIds if p.useCats else [-1]
# loop through images, area range, max detection number
self.ious = {
(imgId, catId): self.computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
maxDet = p.maxDets[-1]
self.evalImgs = [
self.evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("DONE (t={:0.2f}s).".format(toc - tic))
def computeIoU(self, imgId, catId):
"""
ComputeIoU computes the IoUs by sorting based on "score"
for either 2D boxes (in 2D mode) or 3D boxes (in 3D mode)
"""
device = (torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu"))
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0 : p.maxDets[-1]]
if p.iouType == "bbox":
if self.mode == "2D":
g = [g["bbox"] for g in gt]
d = [d["bbox"] for d in dt]
elif self.mode == "3D":
g = [g["bbox3D"] for g in gt]
d = [d["bbox3D"] for d in dt]
else:
raise Exception("unknown iouType for iou computation")
# compute iou between each dt and gt region
# iscrowd is required in builtin maskUtils so we
# use a dummy buffer for it
iscrowd = [0 for o in gt]
if self.mode == "2D":
ious = maskUtils.iou(d, g, iscrowd)
elif len(d) > 0 and len(g) > 0:
# For 3D eval, we want to run IoU in CUDA if available
if torch.cuda.is_available() and len(d) * len(g) < MAX_DTS_CROSS_GTS_FOR_IOU3D:
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
dd = torch.tensor(d, device=device, dtype=torch.float32)
gg = torch.tensor(g, device=device, dtype=torch.float32)
ious = box3d_overlap(dd, gg).cpu().numpy()
else:
ious = []
in_prox = None
if self.eval_prox:
g = [g["bbox"] for g in gt]
d = [d["bbox"] for d in dt]
iscrowd = [0 for o in gt]
ious2d = maskUtils.iou(d, g, iscrowd)
if type(ious2d) == list:
in_prox = []
else:
in_prox = ious2d > p.proximity_thresh
return ious, in_prox
def evaluateImg(self, imgId, catId, aRng, maxDet):
"""
Perform evaluation for single category and image
Returns:
dict (single image results)
"""
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
flag_range = "area" if self.mode == "2D" else "depth"
flag_ignore = "ignore2D" if self.mode == "2D" else "ignore3D"
for g in gt:
if g[flag_ignore] or (g[flag_range] < aRng[0] or g[flag_range] > aRng[1]):
g["_ignore"] = 1
else:
g["_ignore"] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dtind[0:maxDet]]
# load computed ious
ious = (
self.ious[imgId, catId][0][:, gtind]
if len(self.ious[imgId, catId][0]) > 0
else self.ious[imgId, catId][0]
)
if self.eval_prox:
in_prox = (
self.ious[imgId, catId][1][:, gtind]
if len(self.ious[imgId, catId][1]) > 0
else self.ious[imgId, catId][1]
)
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g["_ignore"] for g in gt])
dtIg = np.zeros((T, D))
if not len(ious) == 0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, g in enumerate(gt):
# in case of proximity evaluation, if not in proximity continue
if self.eval_prox and not in_prox[dind, gind]:
continue
# if this gt already matched, continue
if gtm[tind, gind] > 0:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
# continue to next gt unless better match made
if ious[dind, gind] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]["id"]
gtm[tind, m] = d["id"]
# set unmatched detections outside of area range to ignore
a = np.array(
[d[flag_range] < aRng[0] or d[flag_range] > aRng[1] for d in dt]
).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
# in case of proximity evaluation, ignore detections which are far from gt regions
if self.eval_prox and len(in_prox) > 0:
dt_far = in_prox.any(1) == 0
dtIg = np.logical_or(dtIg, np.repeat(dt_far.reshape((1, len(dt))), T, 0))
# store results for given image and category
return {
"image_id": imgId,
"category_id": catId,
"aRng": aRng,
"maxDet": maxDet,
"dtIds": [d["id"] for d in dt],
"gtIds": [g["id"] for g in gt],
"dtMatches": dtm,
"gtMatches": gtm,
"dtScores": [d["score"] for d in dt],
"gtIgnore": gtIg,
"dtIgnore": dtIg,
}
def summarize(self):
"""
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
"""
def _summarize(mode, ap=1, iouThr=None, areaRng="all", maxDets=100, log_str=""):
p = self.params
eval = self.eval
if mode == "2D":
iStr = (" {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}")
elif mode == "3D":
iStr = " {:<18} {} @[ IoU={:<9} | depth={:>6s} | maxDets={:>3d} ] = {:0.3f}"
titleStr = "Average Precision" if ap == 1 else "Average Recall"
typeStr = "(AP)" if ap == 1 else "(AR)"
iouStr = (
"{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
if iouThr is None
else "{:0.2f}".format(iouThr)
)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = eval["precision"]
# IoU
if iouThr is not None:
t = np.where(np.isclose(iouThr, p.iouThrs.astype(float)))[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = eval["recall"]
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
if log_str != "":
log_str += "\n"
log_str += "mode={} ".format(mode) + \
iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)
return mean_s, log_str
def _summarizeDets(mode):
params = self.params
# the thresholds here, define the thresholds printed in `derive_omni_results`
thres = [0.5, 0.75, 0.95] if mode == "2D" else [0.15, 0.25, 0.50]
stats = np.zeros((13,))
stats[0], log_str = _summarize(mode, 1)
stats[1], log_str = _summarize(
mode, 1, iouThr=thres[0], maxDets=params.maxDets[2], log_str=log_str
)
stats[2], log_str = _summarize(
mode, 1, iouThr=thres[1], maxDets=params.maxDets[2], log_str=log_str
)
stats[3], log_str = _summarize(
mode, 1, iouThr=thres[2], maxDets=params.maxDets[2], log_str=log_str
)
stats[4], log_str = _summarize(
mode,
1,
areaRng=params.areaRngLbl[1],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[5], log_str = _summarize(
mode,
1,
areaRng=params.areaRngLbl[2],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[6], log_str = _summarize(
mode,
1,
areaRng=params.areaRngLbl[3],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[7], log_str = _summarize(
mode, 0, maxDets=params.maxDets[0], log_str=log_str
)
stats[8], log_str = _summarize(
mode, 0, maxDets=params.maxDets[1], log_str=log_str
)
stats[9], log_str = _summarize(
mode, 0, maxDets=params.maxDets[2], log_str=log_str
)
stats[10], log_str = _summarize(
mode,
0,
areaRng=params.areaRngLbl[1],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[11], log_str = _summarize(
mode,
0,
areaRng=params.areaRngLbl[2],
maxDets=params.maxDets[2],
log_str=log_str,
)
stats[12], log_str = _summarize(
mode,
0,
areaRng=params.areaRngLbl[3],
maxDets=params.maxDets[2],
log_str=log_str,
)
return stats, log_str
if not self.eval:
raise Exception("Please run accumulate() first")
stats, log_str = _summarizeDets(self.mode)
self.stats = stats
return log_str
| 65,081 | 37.171261 | 168 |
py
|
omni3d
|
omni3d-main/cubercnn/evaluation/__init__.py
|
from .omni3d_evaluation import *
| 32 | 32 | 32 |
py
|
omni3d
|
omni3d-main/cubercnn/config/config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.config import CfgNode as CN
def get_cfg_defaults(cfg):
# A list of category names which will be used
cfg.DATASETS.CATEGORY_NAMES = []
# The category names which will be treated as ignore
# e.g., not counting as background during training
# or as false positives during evaluation.
cfg.DATASETS.IGNORE_NAMES = []
# Should the datasets appear with the same probabilty
# in batches (e.g., the imbalance from small and large
# datasets will be accounted for during sampling)
cfg.DATALOADER.BALANCE_DATASETS = False
# The thresholds for when to treat a known box
# as ignore based on too heavy of truncation or
# too low of visibility in the image. This affects
# both training and evaluation ignores.
cfg.DATASETS.TRUNCATION_THRES = 0.99
cfg.DATASETS.VISIBILITY_THRES = 0.01
cfg.DATASETS.MIN_HEIGHT_THRES = 0.00
cfg.DATASETS.MAX_DEPTH = 1e8
# Whether modal 2D boxes should be loaded,
# or if the full 3D projected boxes should be used.
cfg.DATASETS.MODAL_2D_BOXES = False
# Whether truncated 2D boxes should be loaded,
# or if the 3D full projected boxes should be used.
cfg.DATASETS.TRUNC_2D_BOXES = True
# Threshold used for matching and filtering boxes
# inside of ignore regions, within the RPN and ROIHeads
cfg.MODEL.RPN.IGNORE_THRESHOLD = 0.5
# Configuration for cube head
cfg.MODEL.ROI_CUBE_HEAD = CN()
cfg.MODEL.ROI_CUBE_HEAD.NAME = "CubeHead"
cfg.MODEL.ROI_CUBE_HEAD.POOLER_RESOLUTION = 7
cfg.MODEL.ROI_CUBE_HEAD.POOLER_SAMPLING_RATIO = 0
cfg.MODEL.ROI_CUBE_HEAD.POOLER_TYPE = "ROIAlignV2"
# Settings for the cube head features
cfg.MODEL.ROI_CUBE_HEAD.NUM_CONV = 0
cfg.MODEL.ROI_CUBE_HEAD.CONV_DIM = 256
cfg.MODEL.ROI_CUBE_HEAD.NUM_FC = 2
cfg.MODEL.ROI_CUBE_HEAD.FC_DIM = 1024
# the style to predict Z with currently supported
# options --> ['direct', 'sigmoid', 'log', 'clusters']
cfg.MODEL.ROI_CUBE_HEAD.Z_TYPE = "direct"
# the style to predict pose with currently supported
# options --> ['6d', 'euler', 'quaternion']
cfg.MODEL.ROI_CUBE_HEAD.POSE_TYPE = "6d"
# Whether to scale all 3D losses by inverse depth
cfg.MODEL.ROI_CUBE_HEAD.INVERSE_Z_WEIGHT = False
# Virtual depth puts all predictions of depth into
# a shared virtual space with a shared focal length.
cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_DEPTH = True
cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_FOCAL = 512.0
# If true, then all losses are computed using the 8 corners
# such that they are all in a shared scale space.
# E.g., their scale correlates with their impact on 3D IoU.
# This way no manual weights need to be set.
cfg.MODEL.ROI_CUBE_HEAD.DISENTANGLED_LOSS = True
# When > 1, the outputs of the 3D head will be based on
# a 2D scale clustering, based on 2D proposal height/width.
# This parameter describes the number of bins to cluster.
cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS = 1
# Whether batch norm is enabled during training.
# If false, all BN weights will be frozen.
cfg.MODEL.USE_BN = True
# Whether to predict the pose in allocentric space.
# The allocentric space may correlate better with 2D
# images compared to egocentric poses.
cfg.MODEL.ROI_CUBE_HEAD.ALLOCENTRIC_POSE = True
# Whether to use chamfer distance for disentangled losses
# of pose. This avoids periodic issues of rotation but
# may prevent the pose "direction" from being interpretable.
cfg.MODEL.ROI_CUBE_HEAD.CHAMFER_POSE = True
# Should the prediction heads share FC features or not.
# These include groups of uv, z, whl, pose.
cfg.MODEL.ROI_CUBE_HEAD.SHARED_FC = True
# Check for stable gradients. When inf is detected, skip the update.
# This prevents an occasional bad sample from exploding the model.
# The threshold below is the allows percent of bad samples.
# 0.0 is off, and 0.01 is recommended for minor robustness to exploding.
cfg.MODEL.STABILIZE = 0.01
# Whether or not to use the dimension priors
cfg.MODEL.ROI_CUBE_HEAD.DIMS_PRIORS_ENABLED = True
# How prior dimensions should be computed?
# The supported modes are ["exp", "sigmoid"]
# where exp is unbounded and sigmoid is bounded
# between +- 3 standard deviations from the mean.
cfg.MODEL.ROI_CUBE_HEAD.DIMS_PRIORS_FUNC = 'exp'
# weight for confidence loss. 0 is off.
cfg.MODEL.ROI_CUBE_HEAD.USE_CONFIDENCE = 1.0
# Loss weights for XY, Z, Dims, Pose
cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_3D = 1.0
cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_XY = 1.0
cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_Z = 1.0
cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_DIMS = 1.0
cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_POSE = 1.0
cfg.MODEL.DLA = CN()
# Supported types for DLA backbones are...
# dla34, dla46_c, dla46x_c, dla60x_c, dla60, dla60x, dla102x, dla102x2, dla169
cfg.MODEL.DLA.TYPE = 'dla34'
# Only available for dla34, dla60, dla102
cfg.MODEL.DLA.TRICKS = False
# A joint loss for the disentangled loss.
# All predictions are computed using a corner
# or chamfers loss depending on chamfer_pose!
# Recommened to keep this weight small: [0.05, 0.5]
cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_JOINT = 1.0
# sgd, adam, adam+amsgrad, adamw, adamw+amsgrad
cfg.SOLVER.TYPE = 'sgd'
cfg.MODEL.RESNETS.TORCHVISION = True
cfg.TEST.DETECTIONS_PER_IMAGE = 100
cfg.TEST.VISIBILITY_THRES = 1/2.0
cfg.TEST.TRUNCATION_THRES = 1/2.0
cfg.INPUT.RANDOM_FLIP = "horizontal"
# When True, we will use localization uncertainty
# as the new IoUness score in the RPN.
cfg.MODEL.RPN.OBJECTNESS_UNCERTAINTY = 'IoUness'
# If > 0.0 this is the scaling factor that will be applied to
# an RoI 2D box before doing any pooling to give more context.
# Ex. 1.5 makes width and height 50% larger.
cfg.MODEL.ROI_CUBE_HEAD.SCALE_ROI_BOXES = 0.0
# weight path specifically for pretraining (no checkpointables will be loaded)
cfg.MODEL.WEIGHTS_PRETRAIN = ''
| 6,155 | 37.716981 | 82 |
py
|
omni3d
|
omni3d-main/cubercnn/config/__init__.py
|
from .config import *
| 21 | 21 | 21 |
py
|
omni3d
|
omni3d-main/cubercnn/vis/vis.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import math
import torch
from copy import deepcopy
from pytorch3d.structures.meshes import join_meshes_as_scene
from pytorch3d.transforms.so3 import (
so3_relative_angle,
)
from matplotlib.path import Path
from cubercnn import util
def interp_color(dist, bounds=[0, 1], color_lo=(0,0, 250), color_hi=(0, 250, 250)):
percent = (dist - bounds[0]) / (bounds[1] - bounds[0])
b = color_lo[0] * (1 - percent) + color_hi[0] * percent
g = color_lo[1] * (1 - percent) + color_hi[1] * percent
r = color_lo[2] * (1 - percent) + color_hi[2] * percent
return (b, g, r)
def draw_bev(canvas_bev, z3d, l3d, w3d, x3d, ry3d, color=(0, 200, 200), scale=1, thickness=2):
w = l3d * scale
l = w3d * scale
x = x3d * scale
z = z3d * scale
r = ry3d*-1
corners1 = np.array([
[-w / 2, -l / 2, 1],
[+w / 2, -l / 2, 1],
[+w / 2, +l / 2, 1],
[-w / 2, +l / 2, 1]
])
ry = np.array([
[+math.cos(r), -math.sin(r), 0],
[+math.sin(r), math.cos(r), 0],
[0, 0, 1],
])
corners2 = ry.dot(corners1.T).T
corners2[:, 0] += w/2 + x + canvas_bev.shape[1] / 2
corners2[:, 1] += l/2 + z
draw_line(canvas_bev, corners2[0], corners2[1], color=color, thickness=thickness)
draw_line(canvas_bev, corners2[1], corners2[2], color=color, thickness=thickness)
draw_line(canvas_bev, corners2[2], corners2[3], color=color, thickness=thickness)
draw_line(canvas_bev, corners2[3], corners2[0], color=color, thickness=thickness)
def draw_line(im, v0, v1, color=(0, 200, 200), thickness=1):
cv2.line(im, (int(v0[0]), int(v0[1])), (int(v1[0]), int(v1[1])), color, thickness)
def create_colorbar(height, width, color_lo=(0,0, 250), color_hi=(0, 250, 250)):
im = np.zeros([height, width, 3])
for h in range(0, height):
color = interp_color(h + 0.5, [0, height], color_hi, color_lo)
im[h, :, 0] = (color[0])
im[h, :, 1] = (color[1])
im[h, :, 2] = (color[2])
return im.astype(np.uint8)
def visualize_from_instances(detections, dataset, dataset_name, min_size_test, output_folder, category_names_official, iteration=''):
vis_folder = os.path.join(output_folder, 'vis')
util.mkdir_if_missing(vis_folder)
log_str = ''
xy_errors = []
z_errors = []
w3d_errors = []
h3d_errors = []
l3d_errors = []
dim_errors = []
ry_errors = []
n_cats = len(category_names_official)
thres = np.sqrt(1/n_cats)
for imind, im_obj in enumerate(detections):
write_sample = ((imind % 50) == 0)
annos = dataset._dataset[imind]['annotations']
gt_boxes_2d = np.array([anno['bbox'] for anno in annos])
if len(gt_boxes_2d)==0:
continue
gt_boxes_2d[:, 2] += gt_boxes_2d[:, 0]
gt_boxes_2d[:, 3] += gt_boxes_2d[:, 1]
gt_boxes_cat = np.array([anno['category_id'] for anno in annos])
if write_sample:
data_obj = dataset[imind]
assert(data_obj['image_id'] == im_obj['image_id'])
im = util.imread(data_obj['file_name'])
K = np.array(im_obj['K'])
K_inv = np.linalg.inv(K)
sf = im_obj['height'] / min_size_test
for instance in im_obj['instances']:
cat = category_names_official[instance['category_id']]
score = instance['score']
x1, y1, w, h = instance['bbox']
x2 = x1 + w
y2 = y1 + h
alpha, h3d, w3d, l3d, x3d, y3d, z3d, ry3d = (-1,)*8
w3d, h3d, l3d = instance['dimensions']
# unproject
cen_2d = np.array(instance['center_2D'] + [1])
z3d = instance['center_cam'][2]
# get rotation (y-axis only)
ry3d = np.array(instance['pose'])
valid_gt_inds = np.flatnonzero(instance['category_id'] == gt_boxes_cat)
if len(valid_gt_inds) > 0:
quality_matrix = util.iou(np.array([[x1, y1, x2, y2]]), gt_boxes_2d[valid_gt_inds])
nearest_gt = quality_matrix.argmax(axis=1)[0]
nearest_gt_iou = quality_matrix.max(axis=1)[0]
valid_match = nearest_gt_iou >= 0.5
else:
valid_match = False
if valid_match:
gt_x1, gt_y1, gt_w, gt_h = annos[valid_gt_inds[nearest_gt]]['bbox']
gt_x3d, gt_y3d, gt_z3d = annos[valid_gt_inds[nearest_gt]]['center_cam']
gt_w3d, gt_h3d, gt_l3d = annos[valid_gt_inds[nearest_gt]]['dimensions']
gt_cen_2d = K @ np.array([gt_x3d, gt_y3d, gt_z3d])
gt_cen_2d /= gt_cen_2d[2]
gt_pose = annos[valid_gt_inds[nearest_gt]]['pose']
gt_ry3d = np.array(gt_pose)
if valid_match:
# compute errors
xy_errors.append(np.sqrt(((cen_2d[:2] - gt_cen_2d[:2])**2).sum()))
z_errors.append(np.abs(z3d - gt_z3d))
w3d_errors.append(np.abs(w3d - gt_w3d))
h3d_errors.append(np.abs(h3d - gt_h3d))
l3d_errors.append(np.abs(l3d - gt_l3d))
dim_errors.append(np.sqrt((w3d - gt_w3d)**2 + (h3d - gt_h3d)**2 + (l3d - gt_l3d)**2))
try:
ry_errors.append(so3_relative_angle(torch.from_numpy(ry3d).unsqueeze(0), torch.from_numpy(gt_ry3d).unsqueeze(0), cos_bound=1).item())
except:
pass
# unproject point to 3D
x3d, y3d, z3d = (K_inv @ (z3d*cen_2d))
# let us visualize the detections now
if write_sample and score > thres:
color = util.get_color(instance['category_id'])
draw_3d_box(im, K, [x3d, y3d, z3d, w3d, h3d, l3d], ry3d, color=color, thickness=int(np.round(3*im.shape[0]/500)), draw_back=False)
draw_text(im, '{}, z={:.1f}, s={:.2f}'.format(cat, z3d, score), [x1, y1, w, h], scale=0.50*im.shape[0]/500, bg_color=color)
if write_sample:
util.imwrite(im, os.path.join(vis_folder, '{:06d}.jpg'.format(imind)))
# safety in case all rotation matrices failed.
if len(ry_errors) == 0:
ry_errors = [1000, 1000]
log_str += dataset_name + 'iter={}, xy({:.2f}), z({:.2f}), whl({:.2f}, {:.2f}, {:.2f}), ry({:.2f})\n'.format(
iteration,
np.mean(xy_errors), np.mean(z_errors),
np.mean(w3d_errors), np.mean(h3d_errors), np.mean(l3d_errors),
np.mean(ry_errors),
)
return log_str
def imshow(im, fig_num=None):
if fig_num is not None: plt.figure(fig_num)
if len(im.shape) == 2:
im = np.tile(im, [3, 1, 1]).transpose([1, 2, 0])
plt.imshow(cv2.cvtColor(im.astype(np.uint8), cv2.COLOR_RGB2BGR))
plt.show()
def draw_scene_view(im, K, meshes, text=None, scale=1000, R=None, T=None, zoom_factor=1.0, mode='front_and_novel', blend_weight=0.80, blend_weight_overlay=1.0, ground_bounds=None, canvas=None, zplane=0.05):
"""
Draws a scene from multiple different modes.
Args:
im (array): the image to draw onto
K (array): the 3x3 matrix for projection to camera to screen
meshes ([Mesh]): a list of meshes to draw into the scene
text ([str]): optional strings to draw per mesh
scale (int): the size of the square novel view canvas (pixels)
R (array): a single 3x3 matrix defining the novel view
T (array): a 3x vector defining the position of the novel view
zoom_factor (float): an optional amount to zoom out (>1) or in (<1)
mode (str): supports ['2D_only', 'front', 'novel', 'front_and_novel'] where
front implies the front-facing camera view and novel is based on R,T
blend_weight (float): blend factor for box edges over the RGB
blend_weight_overlay (float): blends the RGB image with the rendered meshes
ground_bounds (tuple): max_y3d, x3d_start, x3d_end, z3d_start, z3d_end for the Ground floor or
None to let the renderer to estimate the ground bounds in the novel view itself.
canvas (array): if the canvas doesn't change it can be faster to re-use it. Optional.
zplane (float): a plane of depth to solve intersection when
vertex points project behind the camera plane.
"""
if R is None:
R = util.euler2mat([np.pi/3, 0, 0])
if mode == '2D_only':
im_drawn_rgb = deepcopy(im)
# go in order of reverse depth
for mesh_idx in reversed(np.argsort([mesh.verts_padded().cpu().mean(1)[0, 1] for mesh in meshes])):
mesh = meshes[mesh_idx]
verts3D = mesh.verts_padded()[0].numpy()
verts2D = (K @ verts3D.T) / verts3D[:, -1]
color = [min(255, c*255*1.25) for c in mesh.textures.verts_features_padded()[0,0].tolist()]
x1 = verts2D[0, :].min()
y1 = verts2D[1, :].min()
x2 = verts2D[0, :].max()
y2 = verts2D[1, :].max()
draw_2d_box(im_drawn_rgb, [x1, y1, x2-x1, y2-y1], color=color, thickness=max(2, int(np.round(3*im_drawn_rgb.shape[0]/1250))))
if text is not None:
draw_text(im_drawn_rgb, '{}'.format(text[mesh_idx]), [x1, y1], scale=0.50*im_drawn_rgb.shape[0]/500, bg_color=color)
return im_drawn_rgb
else:
meshes_scene = join_meshes_as_scene(meshes).cuda()
device = meshes_scene.device
meshes_scene.textures = meshes_scene.textures.to(device)
cameras = util.get_camera(K, im.shape[1], im.shape[0]).to(device)
renderer = util.get_basic_renderer(cameras, im.shape[1], im.shape[0], use_color=True).to(device)
if mode in ['front_and_novel', 'front']:
'''
Render full scene from image view
'''
im_drawn_rgb = deepcopy(im)
# save memory if not blending the render
if blend_weight > 0:
rendered_img, _ = renderer(meshes_scene)
sil_mask = rendered_img[0, :, :, 3].cpu().numpy() > 0.1
rendered_img = (rendered_img[0, :, :, :3].cpu().numpy() * 255).astype(np.uint8)
im_drawn_rgb[sil_mask] = rendered_img[sil_mask] * blend_weight + im_drawn_rgb[sil_mask] * (1 - blend_weight)
'''
Draw edges for image view
'''
# go in order of reverse depth
for mesh_idx in reversed(np.argsort([mesh.verts_padded().cpu().mean(1)[0, 1] for mesh in meshes])):
mesh = meshes[mesh_idx]
verts3D = mesh.verts_padded()[0].cpu().numpy()
verts2D = (K @ verts3D.T) / verts3D[:, -1]
color = [min(255, c*255*1.25) for c in mesh.textures.verts_features_padded()[0,0].tolist()]
draw_3d_box_from_verts(
im_drawn_rgb, K, verts3D, color=color,
thickness=max(2, int(np.round(3*im_drawn_rgb.shape[0]/1250))),
draw_back=False, draw_top=False, zplane=zplane
)
x1 = verts2D[0, :].min() #min(verts2D[0, (verts2D[0, :] > 0) & (verts2D[0, :] < im_drawn_rgb.shape[1])])
y1 = verts2D[1, :].min() #min(verts2D[1, (verts2D[1, :] > 0) & (verts2D[1, :] < im_drawn_rgb.shape[0])])
if text is not None:
draw_text(im_drawn_rgb, '{}'.format(text[mesh_idx]), [x1, y1], scale=0.50*im_drawn_rgb.shape[0]/500, bg_color=color)
if blend_weight_overlay < 1.0 and blend_weight_overlay > 0.0:
im_drawn_rgb = im_drawn_rgb * blend_weight_overlay + deepcopy(im) * (1 - blend_weight_overlay)
if mode == 'front':
return im_drawn_rgb
elif mode in ['front_and_novel', 'novel']:
'''
Render from a new view
'''
has_canvas_already = canvas is not None
if not has_canvas_already:
canvas = np.ones((scale, scale, 3))
view_R = torch.from_numpy(R).float().to(device)
if T is None:
center = (meshes_scene.verts_padded().min(1).values + meshes_scene.verts_padded().max(1).values).unsqueeze(0)/2
else:
center = torch.from_numpy(T).float().to(device).view(1, 1, 3)
verts_rotated = meshes_scene.verts_padded().clone()
verts_rotated -= center
verts_rotated = (view_R @ verts_rotated[0].T).T.unsqueeze(0)
K_novelview = deepcopy(K)
K_novelview[0, -1] *= scale / im.shape[1]
K_novelview[1, -1] *= scale / im.shape[0]
cameras = util.get_camera(K_novelview, scale, scale).to(device)
renderer = util.get_basic_renderer(cameras, scale, scale, use_color=True).to(device)
margin = 0.01
if T is None:
max_trials = 10000
zoom_factor = 100.0
zoom_factor_in = zoom_factor
while max_trials:
zoom_factor_in = zoom_factor_in*0.95
verts = verts_rotated.clone()
verts[:, :, -1] += center[:, :, -1]*zoom_factor_in
verts_np = verts.cpu().numpy()
proj = ((K_novelview @ verts_np[0].T) / verts_np[:, :, -1])
# some vertices are extremely close or negative...
# this implies we have zoomed in too much
if (verts[0, :, -1] < 0.25).any():
break
# left or above image
elif (proj[:2, :] < scale*margin).any():
break
# right or below borders
elif (proj[:2, :] > scale*(1 - margin)).any():
break
# everything is in view.
zoom_factor = zoom_factor_in
max_trials -= 1
zoom_out_bias = center[:, :, -1].item()
else:
zoom_out_bias = 1.0
verts_rotated[:, :, -1] += zoom_out_bias*zoom_factor
meshes_novel_view = meshes_scene.clone().update_padded(verts_rotated)
rendered_img, _ = renderer(meshes_novel_view)
im_novel_view = (rendered_img[0, :, :, :3].cpu().numpy() * 255).astype(np.uint8)
sil_mask = rendered_img[0, :, :, 3].cpu().numpy() > 0.1
center_np = center.cpu().numpy()
view_R_np = view_R.cpu().numpy()
if not has_canvas_already:
if ground_bounds is None:
min_x3d, _, min_z3d = meshes_scene.verts_padded().min(1).values[0, :].tolist()
max_x3d, max_y3d, max_z3d = meshes_scene.verts_padded().max(1).values[0, :].tolist()
# go for grid projection, but with extremely bad guess at bounds
x3d_start = np.round(min_x3d - (max_x3d - min_x3d)*50)
x3d_end = np.round(max_x3d + (max_x3d - min_x3d)*50)
z3d_start = np.round(min_z3d - (max_z3d - min_z3d)*50)
z3d_end = np.round(max_z3d + (max_z3d - min_z3d)*50)
grid_xs = np.arange(x3d_start, x3d_end)
grid_zs = np.arange(z3d_start, z3d_end)
xs_mesh, zs_mesh = np.meshgrid(grid_xs, grid_zs)
ys_mesh = np.ones_like(xs_mesh)*max_y3d
point_mesh = np.concatenate((xs_mesh[:, :, np.newaxis], ys_mesh[:, :, np.newaxis], zs_mesh[:, :, np.newaxis]), axis=2)
point_mesh_orig = deepcopy(point_mesh)
mesh_shape = point_mesh.shape
point_mesh = view_R_np @ (point_mesh - center_np).transpose(2, 0, 1).reshape(3, -1)
point_mesh[-1] += zoom_out_bias*zoom_factor
point_mesh[-1, :] = point_mesh[-1, :].clip(0.25)
point_mesh_2D = (K_novelview @ point_mesh) / point_mesh[-1]
point_mesh_2D[-1] = point_mesh[-1]
point_mesh = point_mesh.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
point_mesh_2D = point_mesh_2D.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
maskx = (point_mesh_2D[:, :, 0].T >= -50) & (point_mesh_2D[:, :, 0].T < scale+50) & (point_mesh_2D[:, :, 2].T > 0)
maskz = (point_mesh_2D[:, :, 1].T >= -50) & (point_mesh_2D[:, :, 1].T < scale+50) & (point_mesh_2D[:, :, 2].T > 0)
# invalid scene?
if (not maskz.any()) or (not maskx.any()):
return im, im, canvas
# go for grid projection again!! but with sensible bounds
x3d_start = np.round(point_mesh[:, :, 0].T[maskx].min() - 10)
x3d_end = np.round(point_mesh[:, :, 0].T[maskx].max() + 10)
z3d_start = np.round(point_mesh_orig[:, :, 2].T[maskz].min() - 10)
z3d_end = np.round(point_mesh_orig[:, :, 2].T[maskz].max() + 10)
else:
max_y3d, x3d_start, x3d_end, z3d_start, z3d_end = ground_bounds
grid_xs = np.arange(x3d_start, x3d_end)
grid_zs = np.arange(z3d_start, z3d_end)
xs_mesh, zs_mesh = np.meshgrid(grid_xs, grid_zs)
ys_mesh = np.ones_like(xs_mesh)*max_y3d
point_mesh = np.concatenate((xs_mesh[:, :, np.newaxis], ys_mesh[:, :, np.newaxis], zs_mesh[:, :, np.newaxis]), axis=2)
mesh_shape = point_mesh.shape
point_mesh = view_R_np @ (point_mesh - center_np).transpose(2, 0, 1).reshape(3, -1)
point_mesh[-1] += zoom_out_bias*zoom_factor
point_mesh[-1, :] = point_mesh[-1, :].clip(0.25)
point_mesh_2D = (K_novelview @ point_mesh) / point_mesh[-1]
point_mesh_2D[-1] = point_mesh[-1]
point_mesh = point_mesh.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
point_mesh_2D = point_mesh_2D.reshape(3, mesh_shape[0], mesh_shape[1]).transpose(1, 2, 0)
bg_color = (225,)*3
line_color = (175,)*3
canvas[:, :, 0] = bg_color[0]
canvas[:, :, 1] = bg_color[1]
canvas[:, :, 2] = bg_color[2]
lines_to_draw = set()
for grid_row_idx in range(1, len(grid_zs)):
pre_z = grid_zs[grid_row_idx-1]
cur_z = grid_zs[grid_row_idx]
for grid_col_idx in range(1, len(grid_xs)):
pre_x = grid_xs[grid_col_idx-1]
cur_x = grid_xs[grid_col_idx]
p1 = point_mesh_2D[grid_row_idx-1, grid_col_idx-1]
valid1 = p1[-1] > 0
p2 = point_mesh_2D[grid_row_idx-1, grid_col_idx]
valid2 = p2[-1] > 0
if valid1 and valid2:
line = (tuple(p1[:2].astype(int).tolist()), tuple(p2[:2].astype(int).tolist()))
lines_to_draw.add(line)
# draw vertical line from the previous row
p1 = point_mesh_2D[grid_row_idx-1, grid_col_idx-1]
valid1 = p1[-1] > 0
p2 = point_mesh_2D[grid_row_idx, grid_col_idx-1]
valid2 = p2[-1] > 0
if valid1 and valid2:
line = (tuple(p1[:2].astype(int).tolist()), tuple(p2[:2].astype(int).tolist()))
lines_to_draw.add(line)
for line in lines_to_draw:
draw_line(canvas, line[0], line[1], color=line_color, thickness=max(1, int(np.round(3*scale/1250))))
im_novel_view[~sil_mask] = canvas[~sil_mask]
'''
Draw edges for novel view
'''
# apply novel view to meshes
meshes_novel = []
for mesh in meshes:
mesh_novel = mesh.clone().to(device)
verts_rotated = mesh_novel.verts_padded()
verts_rotated -= center
verts_rotated = (view_R @ verts_rotated[0].T).T.unsqueeze(0)
verts_rotated[:, :, -1] += zoom_out_bias*zoom_factor
mesh_novel = mesh_novel.update_padded(verts_rotated)
meshes_novel.append(mesh_novel)
# go in order of reverse depth
for mesh_idx in reversed(np.argsort([mesh.verts_padded().cpu().mean(1)[0, 1] for mesh in meshes_novel])):
mesh = meshes_novel[mesh_idx]
verts3D = mesh.verts_padded()[0].cpu().numpy()
verts2D = (K_novelview @ verts3D.T) / verts3D[:, -1]
color = [min(255, c*255*1.25) for c in mesh.textures.verts_features_padded()[0,0].tolist()]
draw_3d_box_from_verts(
im_novel_view, K_novelview, verts3D, color=color,
thickness=max(2, int(np.round(3*im_novel_view.shape[0]/1250))),
draw_back=False, draw_top=False, zplane=zplane
)
x1 = verts2D[0, :].min()
y1 = verts2D[1, :].min()
if text is not None:
draw_text(im_novel_view, '{}'.format(text[mesh_idx]), [x1, y1], scale=0.50*im_novel_view.shape[0]/500, bg_color=color)
if mode == 'front_and_novel':
return im_drawn_rgb, im_novel_view, canvas
else:
return im_novel_view, canvas
else:
raise ValueError('No visualization written for {}'.format(mode))
def get_polygon_grid(im, poly_verts):
nx = im.shape[1]
ny = im.shape[0]
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
path = Path(poly_verts)
grid = path.contains_points(points)
grid = grid.reshape((ny, nx))
return grid
def draw_circle(im, pos, radius=5, thickness=1, color=(250, 100, 100), fill=True):
if fill: thickness = -1
cv2.circle(im, (int(pos[0]), int(pos[1])), radius, color=color, thickness=thickness)
def draw_transparent_polygon(im, verts, blend=0.5, color=(0, 255, 255)):
mask = get_polygon_grid(im, verts[:4, :])
im[mask, 0] = im[mask, 0] * blend + (1 - blend) * color[0]
im[mask, 1] = im[mask, 1] * blend + (1 - blend) * color[1]
im[mask, 2] = im[mask, 2] * blend + (1 - blend) * color[2]
def draw_3d_box_from_verts(im, K, verts3d, color=(0, 200, 200), thickness=1, draw_back=False, draw_top=False, zplane=0.05, eps=1e-4):
"""
Draws a scene from multiple different modes.
Args:
im (array): the image to draw onto
K (array): the 3x3 matrix for projection to camera to screen
verts3d (array): the 8x3 matrix of vertices in camera space
color (tuple): color in RGB scaled [0, 255)
thickness (float): the line thickness for opencv lines
draw_back (bool): whether a backface should be highlighted
draw_top (bool): whether the top face should be highlighted
zplane (float): a plane of depth to solve intersection when
vertex points project behind the camera plane.
"""
if isinstance(K, torch.Tensor):
K = K.detach().cpu().numpy()
if isinstance(verts3d, torch.Tensor):
verts3d = verts3d.detach().cpu().numpy()
# reorder
bb3d_lines_verts = [[0, 1], [1, 2], [2, 3], [3, 0], [1, 5], [5, 6], [6, 2], [4, 5], [4, 7], [6, 7], [0, 4], [3, 7]]
# define back and top vetice planes
back_idxs = [4, 0, 3, 7]
top_idxs = [4, 0, 1, 5]
for (i, j) in bb3d_lines_verts:
v0 = verts3d[i]
v1 = verts3d[j]
z0, z1 = v0[-1], v1[-1]
if (z0 >= zplane or z1 >= zplane):
# computer intersection of v0, v1 and zplane
s = (zplane - z0) / max((z1 - z0), eps)
new_v = v0 + s * (v1 - v0)
if (z0 < zplane) and (z1 >= zplane):
# i0 vertex is behind the plane
v0 = new_v
elif (z0 >= zplane) and (z1 < zplane):
# i1 vertex is behind the plane
v1 = new_v
v0_proj = (K @ v0)/max(v0[-1], eps)
v1_proj = (K @ v1)/max(v1[-1], eps)
# project vertices
cv2.line(im,
(int(v0_proj[0]), int(v0_proj[1])),
(int(v1_proj[0]), int(v1_proj[1])),
color, thickness
)
# dont draw the planes if a vertex is out of bounds
draw_back &= np.all(verts3d[back_idxs, -1] >= zplane)
draw_top &= np.all(verts3d[top_idxs, -1] >= zplane)
if draw_back or draw_top:
# project to image
verts2d = (K @ verts3d.T).T
verts2d /= verts2d[:, -1][:, np.newaxis]
if type(verts2d) == torch.Tensor:
verts2d = verts2d.detach().cpu().numpy()
if draw_back:
draw_transparent_polygon(im, verts2d[back_idxs, :2], blend=0.5, color=color)
if draw_top:
draw_transparent_polygon(im, verts2d[top_idxs, :2], blend=0.5, color=color)
def draw_3d_box(im, K, box3d, R, color=(0, 200, 200), thickness=1, draw_back=False, draw_top=False, view_R=None, view_T=None):
verts2d, verts3d = util.get_cuboid_verts(K, box3d, R, view_R=view_R, view_T=view_T)
draw_3d_box_from_verts(im, K, verts3d, color=color, thickness=thickness, draw_back=draw_back, draw_top=draw_top)
def draw_text(im, text, pos, scale=0.4, color='auto', font=cv2.FONT_HERSHEY_SIMPLEX, bg_color=(0, 255, 255),
blend=0.33, lineType=1):
text = str(text)
pos = [int(pos[0]), int(pos[1])]
if color == 'auto':
if bg_color is not None:
color = (0, 0, 0) if ((bg_color[0] + bg_color[1] + bg_color[2])/3) > 127.5 else (255, 255, 255)
else:
color = (0, 0, 0)
if bg_color is not None:
text_size, _ = cv2.getTextSize(text, font, scale, lineType)
x_s = int(np.clip(pos[0], a_min=0, a_max=im.shape[1]))
x_e = int(np.clip(x_s + text_size[0] - 1 + 4, a_min=0, a_max=im.shape[1]))
y_s = int(np.clip(pos[1] - text_size[1] - 2, a_min=0, a_max=im.shape[0]))
y_e = int(np.clip(pos[1] + 1 - 2, a_min=0, a_max=im.shape[0]))
im[y_s:y_e + 1, x_s:x_e + 1, 0] = im[y_s:y_e + 1, x_s:x_e + 1, 0]*blend + bg_color[0] * (1 - blend)
im[y_s:y_e + 1, x_s:x_e + 1, 1] = im[y_s:y_e + 1, x_s:x_e + 1, 1]*blend + bg_color[1] * (1 - blend)
im[y_s:y_e + 1, x_s:x_e + 1, 2] = im[y_s:y_e + 1, x_s:x_e + 1, 2]*blend + bg_color[2] * (1 - blend)
pos[0] = int(np.clip(pos[0] + 2, a_min=0, a_max=im.shape[1]))
pos[1] = int(np.clip(pos[1] - 2, a_min=0, a_max=im.shape[0]))
cv2.putText(im, text, tuple(pos), font, scale, color, lineType)
def draw_transparent_square(im, pos, alpha=1, radius=5, color=(250, 100, 100)):
l = pos[1] - radius
r = pos[1] + radius
t = pos[0] - radius
b = pos[0] + radius
if (np.array([l, r, t, b]) >= 0).any():
l = np.clip(np.floor(l), 0, im.shape[0]).astype(int)
r = np.clip(np.floor(r), 0, im.shape[0]).astype(int)
t = np.clip(np.floor(t), 0, im.shape[1]).astype(int)
b = np.clip(np.floor(b), 0, im.shape[1]).astype(int)
# blend
im[l:r + 1, t:b + 1, 0] = im[l:r + 1, t:b + 1, 0] * alpha + color[0] * (1 - alpha)
im[l:r + 1, t:b + 1, 1] = im[l:r + 1, t:b + 1, 1] * alpha + color[1] * (1 - alpha)
im[l:r + 1, t:b + 1, 2] = im[l:r + 1, t:b + 1, 2] * alpha + color[2] * (1 - alpha)
def draw_2d_box(im, box, color=(0, 200, 200), thickness=1):
x = box[0]
y = box[1]
w = box[2]
h = box[3]
x2 = (x + w) - 1
y2 = (y + h) - 1
cv2.rectangle(im, (int(x), int(y)), (int(x2), int(y2)), color, thickness)
def imhstack(im1, im2):
sf = im1.shape[0] / im2.shape[0]
if sf > 1:
im2 = cv2.resize(im2, (int(im2.shape[1] / sf), im1.shape[0]))
elif sf < 1:
im1 = cv2.resize(im1, (int(im1.shape[1] / sf), im2.shape[0]))
im_concat = np.hstack((im1, im2))
return im_concat
def imvstack(im1, im2):
sf = im1.shape[1] / im2.shape[1]
if sf > 1:
im2 = cv2.resize(im2, (int(im2.shape[0] / sf), im1.shape[1]))
elif sf < 1:
im1 = cv2.resize(im1, (int(im1.shape[0] / sf), im2.shape[1]))
im_concat = np.vstack((im1, im2))
return im_concat
| 29,091 | 38.154778 | 206 |
py
|
omni3d
|
omni3d-main/cubercnn/vis/logperf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from termcolor import colored
import itertools
from tabulate import tabulate
import logging
logger = logging.getLogger(__name__)
def print_ap_category_histogram(dataset, results):
"""
Prints AP performance for each category.
Args:
results: dictionary; each entry contains information for a dataset
"""
num_classes = len(results)
N_COLS = 9
data = list(
itertools.chain(
*[
[
cat,
out["AP2D"],
out["AP3D"],
]
for cat, out in results.items()
]
)
)
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "AP2D", "AP3D"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
logger.info(
"Performance for each of {} categories on {}:\n".format(num_classes, dataset)
+ colored(table, "cyan")
)
def print_ap_analysis_histogram(results):
"""
Prints AP performance for various IoU thresholds and (near, medium, far) objects.
Args:
results: dictionary. Each entry in results contains outputs for a dataset
"""
metric_names = ["AP2D", "AP3D", "AP3D@15", "AP3D@25", "AP3D@50", "AP3D-N", "AP3D-M", "AP3D-F"]
N_COLS = 10
data = []
for name, metrics in results.items():
data_item = [name, metrics["iters"], metrics["AP2D"], metrics["AP3D"], metrics["AP3D@15"], metrics["AP3D@25"], metrics["AP3D@50"], metrics["AP3D-N"], metrics["AP3D-M"], metrics["AP3D-F"]]
data.append(data_item)
table = tabulate(
data,
headers=["Dataset", "#iters", "AP2D", "AP3D", "AP3D@15", "AP3D@25", "AP3D@50", "AP3D-N", "AP3D-M", "AP3D-F"],
tablefmt="grid",
numalign="left",
stralign="center",
)
logger.info(
"Per-dataset performance analysis on test set:\n"
+ colored(table, "cyan")
)
def print_ap_dataset_histogram(results):
"""
Prints AP performance for each dataset.
Args:
results: list of dicts. Each entry in results contains outputs for a dataset
"""
metric_names = ["AP2D", "AP3D"]
N_COLS = 4
data = []
for name, metrics in results.items():
data_item = [name, metrics["iters"], metrics["AP2D"], metrics["AP3D"]]
data.append(data_item)
table = tabulate(
data,
headers=["Dataset", "#iters", "AP2D", "AP3D"],
tablefmt="grid",
numalign="left",
stralign="center",
)
logger.info(
"Per-dataset performance on test set:\n"
+ colored(table, "cyan")
)
def print_ap_omni_histogram(results):
"""
Prints AP performance for Omni3D dataset.
Args:
results: list of dicts. Each entry in results contains outputs for a dataset
"""
metric_names = ["AP2D", "AP3D"]
N_COLS = 4
data = []
for name, metrics in results.items():
data_item = [name, metrics["iters"], metrics["AP2D"], metrics["AP3D"]]
data.append(data_item)
table = tabulate(
data,
headers=["Dataset", "#iters", "AP2D", "AP3D"],
tablefmt="grid",
numalign="left",
stralign="center",
)
logger.info("Omni3D performance on test set. The numbers below should be used to compare to others approaches on Omni3D, such as Cube R-CNN")
logger.info(
"Performance on Omni3D:\n"
+ colored(table, "magenta")
)
| 3,654 | 29.974576 | 195 |
py
|
omni3d
|
omni3d-main/cubercnn/vis/__init__.py
|
from .vis import *
| 19 | 19 | 19 |
py
|
omni3d
|
omni3d-main/cubercnn/util/model_zoo.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.utils.file_io import PathHandler, PathManager
__all__ = ["CubeRCNNHandler"]
class CubeRCNNHandler(PathHandler):
"""
Resolves CubeRCNN's model zoo files.
"""
PREFIX = "cubercnn://"
CUBERCNN_PREFIX = "https://dl.fbaipublicfiles.com/cubercnn/"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path):
name = path[len(self.PREFIX) :]
return PathManager.get_local_path(self.CUBERCNN_PREFIX + name)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(CubeRCNNHandler())
| 724 | 28 | 75 |
py
|
omni3d
|
omni3d-main/cubercnn/util/math_util.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import numpy as np
import pandas as pd
from typing import Tuple, List
from copy import copy
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import SoftPhongShader
import cv2
import torch
from pytorch3d.structures import Meshes
from detectron2.structures import BoxMode
from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures.meshes import (
Meshes,
)
from pytorch3d.renderer import (
PerspectiveCameras,
RasterizationSettings,
MeshRasterizer
)
from pytorch3d.renderer import (
PerspectiveCameras,
SoftSilhouetteShader,
RasterizationSettings,
MeshRasterizer
)
from detectron2.data import (
MetadataCatalog,
)
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.renderer import MeshRenderer as MR
UNIT_CUBE = np.array([
[-0.5, -0.5, -0.5],
[ 0.5, -0.5, -0.5],
[ 0.5, 0.5, -0.5],
[-0.5, 0.5, -0.5],
[-0.5, -0.5, 0.5],
[ 0.5, -0.5, 0.5],
[ 0.5, 0.5, 0.5],
[-0.5, 0.5, 0.5]
])
def upto_2Pi(val):
out = val
# constrain between [0, 2pi)
while out >= 2*math.pi: out -= math.pi * 2
while out < 0: out += math.pi * 2
return out
def upto_Pi(val):
out = val
# constrain between [0, pi)
while out >= math.pi: out -= math.pi
while out < 0: out += math.pi
return out
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
# adopted from https://www.learnopencv.com/rotation-matrix-to-euler-angles/
def mat2euler(R):
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
#singular = sy < 1e-6
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
return np.array([x, y, z])
# Calculates Rotation Matrix given euler angles.
# adopted from https://www.learnopencv.com/rotation-matrix-to-euler-angles/
def euler2mat(euler):
R_x = np.array([[1, 0, 0],
[0, math.cos(euler[0]), -math.sin(euler[0])],
[0, math.sin(euler[0]), math.cos(euler[0])]
])
R_y = np.array([[math.cos(euler[1]), 0, math.sin(euler[1])],
[0, 1, 0],
[-math.sin(euler[1]), 0, math.cos(euler[1])]
])
R_z = np.array([[math.cos(euler[2]), -math.sin(euler[2]), 0],
[math.sin(euler[2]), math.cos(euler[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def to_float_tensor(input):
data_type = type(input)
if data_type != torch.Tensor:
input = torch.tensor(input)
return input.float()
def get_cuboid_verts_faces(box3d=None, R=None):
"""
Computes vertices and faces from a 3D cuboid representation.
Args:
bbox3d (flexible): [[X Y Z W H L]]
R (flexible): [np.array(3x3)]
Returns:
verts: the 3D vertices of the cuboid in camera space
faces: the vertex indices per face
"""
if box3d is None:
box3d = [0, 0, 0, 1, 1, 1]
# make sure types are correct
box3d = to_float_tensor(box3d)
if R is not None:
R = to_float_tensor(R)
squeeze = len(box3d.shape) == 1
if squeeze:
box3d = box3d.unsqueeze(0)
if R is not None:
R = R.unsqueeze(0)
n = len(box3d)
x3d = box3d[:, 0].unsqueeze(1)
y3d = box3d[:, 1].unsqueeze(1)
z3d = box3d[:, 2].unsqueeze(1)
w3d = box3d[:, 3].unsqueeze(1)
h3d = box3d[:, 4].unsqueeze(1)
l3d = box3d[:, 5].unsqueeze(1)
'''
v4_____________________v5
/| /|
/ | / |
/ | / |
/___|_________________/ |
v0| | |v1 |
| | | |
| | | |
| | | |
| |_________________|___|
| / v7 | /v6
| / | /
| / | /
|/_____________________|/
v3 v2
'''
verts = to_float_tensor(torch.zeros([n, 3, 8], device=box3d.device))
# setup X
verts[:, 0, [0, 3, 4, 7]] = -l3d / 2
verts[:, 0, [1, 2, 5, 6]] = l3d / 2
# setup Y
verts[:, 1, [0, 1, 4, 5]] = -h3d / 2
verts[:, 1, [2, 3, 6, 7]] = h3d / 2
# setup Z
verts[:, 2, [0, 1, 2, 3]] = -w3d / 2
verts[:, 2, [4, 5, 6, 7]] = w3d / 2
if R is not None:
# rotate
verts = R @ verts
# translate
verts[:, 0, :] += x3d
verts[:, 1, :] += y3d
verts[:, 2, :] += z3d
verts = verts.transpose(1, 2)
faces = torch.tensor([
[0, 1, 2], # front TR
[2, 3, 0], # front BL
[1, 5, 6], # right TR
[6, 2, 1], # right BL
[4, 0, 3], # left TR
[3, 7, 4], # left BL
[5, 4, 7], # back TR
[7, 6, 5], # back BL
[4, 5, 1], # top TR
[1, 0, 4], # top BL
[3, 2, 6], # bottom TR
[6, 7, 3], # bottom BL
]).float().unsqueeze(0).repeat([n, 1, 1])
if squeeze:
verts = verts.squeeze()
faces = faces.squeeze()
return verts, faces.to(verts.device)
def get_cuboid_verts(K, box3d, R=None, view_R=None, view_T=None):
# make sure types are correct
K = to_float_tensor(K)
box3d = to_float_tensor(box3d)
if R is not None:
R = to_float_tensor(R)
squeeze = len(box3d.shape) == 1
if squeeze:
box3d = box3d.unsqueeze(0)
if R is not None:
R = R.unsqueeze(0)
n = len(box3d)
if len(K.shape) == 2:
K = K.unsqueeze(0).repeat([n, 1, 1])
corners_3d, _ = get_cuboid_verts_faces(box3d, R)
if view_T is not None:
corners_3d -= view_T.view(1, 1, 3)
if view_R is not None:
corners_3d = (view_R @ corners_3d[0].T).T.unsqueeze(0)
if view_T is not None:
corners_3d[:, :, -1] += view_T.view(1, 1, 3)[:, :, -1]*1.25
# project to 2D
corners_2d = K @ corners_3d.transpose(1, 2)
corners_2d[:, :2, :] = corners_2d[:, :2, :] / corners_2d[:, 2, :].unsqueeze(1)
corners_2d = corners_2d.transpose(1, 2)
if squeeze:
corners_3d = corners_3d.squeeze()
corners_2d = corners_2d.squeeze()
return corners_2d, corners_3d
def approx_eval_resolution(h, w, scale_min=0, scale_max=1e10):
"""
Approximates the resolution an image with h x w resolution would
run through a model at which constrains the scale to a min and max.
Args:
h (int): input resolution height
w (int): input resolution width
scale_min (int): minimum scale allowed to resize too
scale_max (int): maximum scale allowed to resize too
Returns:
h (int): output resolution height
w (int): output resolution width
sf (float): scaling factor that was applied
which can convert from original --> network resolution.
"""
orig_h = h
# first resize to min
sf = scale_min / min(h, w)
h *= sf
w *= sf
# next resize to max
sf = min(scale_max / max(h, w), 1.0)
h *= sf
w *= sf
return h, w, h/orig_h
def compute_priors(cfg, datasets, max_cluster_rounds=1000, min_points_for_std=5):
"""
Computes priors via simple averaging or a custom K-Means clustering.
"""
annIds = datasets.getAnnIds()
anns = datasets.loadAnns(annIds)
data_raw = []
category_names = MetadataCatalog.get('omni3d_model').thing_classes
virtual_depth = cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_DEPTH
virtual_focal = cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_FOCAL
test_scale_min = cfg.INPUT.MIN_SIZE_TEST
test_scale_max = cfg.INPUT.MAX_SIZE_TEST
'''
Accumulate the annotations while discarding the 2D center information
(hence, keeping only the 2D and 3D scale information, and properties.)
'''
for ann_idx, ann in enumerate(anns):
category_name = ann['category_name'].lower()
ignore = ann['ignore']
dataset_id = ann['dataset_id']
image_id = ann['image_id']
fy = datasets.imgs[image_id]['K'][1][1]
im_h = datasets.imgs[image_id]['height']
im_w = datasets.imgs[image_id]['width']
f = 2 * fy / im_h
if cfg.DATASETS.MODAL_2D_BOXES and 'bbox2D_tight' in ann and ann['bbox2D_tight'][0] != -1:
x, y, w, h = BoxMode.convert(ann['bbox2D_tight'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif cfg.DATASETS.TRUNC_2D_BOXES and 'bbox2D_trunc' in ann and not np.all([val==-1 for val in ann['bbox2D_trunc']]):
x, y, w, h = BoxMode.convert(ann['bbox2D_trunc'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif 'bbox2D_proj' in ann:
x, y, w, h = BoxMode.convert(ann['bbox2D_proj'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
else:
continue
x3d, y3d, z3d = ann['center_cam']
w3d, h3d, l3d = ann['dimensions']
test_h, test_w, sf = approx_eval_resolution(im_h, im_w, test_scale_min, test_scale_max)
# scale everything to test resolution
h *= sf
w *= sf
if virtual_depth:
virtual_to_real = compute_virtual_scale_from_focal_spaces(fy, im_h, virtual_focal, test_h)
real_to_virtual = 1/virtual_to_real
z3d *= real_to_virtual
scale = np.sqrt(h**2 + w**2)
if (not ignore) and category_name in category_names:
data_raw.append([category_name, w, h, x3d, y3d, z3d, w3d, h3d, l3d, w3d*h3d*l3d, dataset_id, image_id, fy, f, scale])
# TODO pandas is fairly inefficient to rely on for large scale.
df_raw = pd.DataFrame(data_raw, columns=[
'name',
'w', 'h', 'x3d', 'y3d', 'z3d',
'w3d', 'h3d', 'l3d', 'volume',
'dataset', 'image',
'fy', 'f', 'scale'
])
priors_bins = []
priors_dims_per_cat = []
priors_z3d_per_cat = []
priors_y3d_per_cat = []
# compute priors for z and y globally
priors_z3d = [df_raw.z3d.mean(), df_raw.z3d.std()]
priors_y3d = [df_raw.y3d.mean(), df_raw.y3d.std()]
n_bins = cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS
# Each prior is pre-computed per category
for cat in category_names:
df_cat = df_raw[df_raw.name == cat]
'''
First compute static variable statistics
'''
scales = torch.FloatTensor(np.array(df_cat.scale))
n = len(scales)
if n > 0:
priors_dims_per_cat.append([[df_cat.w3d.mean(), df_cat.h3d.mean(), df_cat.l3d.mean()], [df_cat.w3d.std(), df_cat.h3d.std(), df_cat.l3d.std()]])
priors_z3d_per_cat.append([df_cat.z3d.mean(), df_cat.z3d.std()])
priors_y3d_per_cat.append([df_cat.y3d.mean(), df_cat.y3d.std()])
else:
# dummy data.
priors_dims_per_cat.append([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
priors_z3d_per_cat.append([50, 50])
priors_y3d_per_cat.append([1, 10])
'''
Next compute Z cluster statistics based on y and area
'''
def compute_cluster_scale_mean(scales, assignments, n_bins, match_quality):
cluster_scales = []
for bin in range(n_bins):
in_cluster = assignments==bin
if in_cluster.sum() < min_points_for_std:
in_cluster[match_quality[:, bin].topk(min_points_for_std)[1]] = True
scale = scales[in_cluster].mean()
cluster_scales.append(scale.item())
return torch.FloatTensor(cluster_scales)
if n_bins > 1:
if n < min_points_for_std:
print('Warning {} category has only {} valid samples...'.format(cat, n))
# dummy data since category doesn't have available samples.
max_scale = cfg.MODEL.ANCHOR_GENERATOR.SIZES[-1][-1]
min_scale = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0][0]
base = (max_scale / min_scale) ** (1 / (n_bins - 1))
cluster_scales = np.array([min_scale * (base ** i) for i in range(0, n_bins)])
# default values are unused anyways in training. but range linearly
# from 100 to 1 and ascend with 2D scale.
bin_priors_z = [[b, 15] for b in np.arange(100, 1, -(100-1)/n_bins)]
priors_bins.append((cat, cluster_scales.tolist(), bin_priors_z))
assert len(bin_priors_z) == n_bins, 'Broken default bin scaling.'
else:
max_scale = scales.max()
min_scale = scales.min()
base = (max_scale / min_scale) ** (1 / (n_bins - 1))
cluster_scales = torch.FloatTensor([min_scale * (base ** i) for i in range(0, n_bins)])
best_score = -np.inf
for round in range(max_cluster_rounds):
# quality scores for gts and clusters (n x n_bins)
match_quality = -(cluster_scales.unsqueeze(0) - scales.unsqueeze(1)).abs()
# assign to best clusters
scores, assignments_round = match_quality.max(1)
round_score = scores.mean().item()
if np.round(round_score, 5) > best_score:
best_score = round_score
assignments = assignments_round
# make new clusters
cluster_scales = compute_cluster_scale_mean(scales, assignments, n_bins, match_quality)
else:
break
bin_priors_z = []
for bin in range(n_bins):
in_cluster = assignments == bin
# not enough in the cluster to compute reliable stats?
# fill it with the topk others
if in_cluster.sum() < min_points_for_std:
in_cluster[match_quality[:, bin].topk(min_points_for_std)[1]] = True
# move to numpy for indexing pandas
in_cluster = in_cluster.numpy()
z3d_mean = df_cat.z3d[in_cluster].mean()
z3d_std = df_cat.z3d[in_cluster].std()
bin_priors_z.append([z3d_mean, z3d_std])
priors_bins.append((cat, cluster_scales.numpy().tolist(), bin_priors_z))
priors = {
'priors_dims_per_cat': priors_dims_per_cat,
'priors_z3d_per_cat': priors_z3d_per_cat,
'priors_y3d_per_cat': priors_y3d_per_cat,
'priors_bins': priors_bins,
'priors_y3d': priors_y3d,
'priors_z3d': priors_z3d,
}
return priors
def convert_3d_box_to_2d(K, box3d, R=None, clipw=0, cliph=0, XYWH=True, min_z=0.20):
"""
Converts a 3D box to a 2D box via projection.
Args:
K (np.array): intrinsics matrix 3x3
bbox3d (flexible): [[X Y Z W H L]]
R (flexible): [np.array(3x3)]
clipw (int): clip invalid X to the image bounds. Image width is usually used here.
cliph (int): clip invalid Y to the image bounds. Image height is usually used here.
XYWH (bool): returns in XYWH if true, otherwise XYXY format.
min_z: the threshold for how close a vertex is allowed to be before being
considered as invalid for projection purposes.
Returns:
box2d (flexible): the 2D box results.
behind_camera (bool): whether the projection has any points behind the camera plane.
fully_behind (bool): all points are behind the camera plane.
"""
# bounds used for vertices behind image plane
topL_bound = torch.tensor([[0, 0, 0]]).float()
topR_bound = torch.tensor([[clipw-1, 0, 0]]).float()
botL_bound = torch.tensor([[0, cliph-1, 0]]).float()
botR_bound = torch.tensor([[clipw-1, cliph-1, 0]]).float()
# make sure types are correct
K = to_float_tensor(K)
box3d = to_float_tensor(box3d)
if R is not None:
R = to_float_tensor(R)
squeeze = len(box3d.shape) == 1
if squeeze:
box3d = box3d.unsqueeze(0)
if R is not None:
R = R.unsqueeze(0)
n = len(box3d)
verts2d, verts3d = get_cuboid_verts(K, box3d, R)
# any boxes behind camera plane?
verts_behind = verts2d[:, :, 2] <= min_z
behind_camera = verts_behind.any(1)
verts_signs = torch.sign(verts3d)
# check for any boxes projected behind image plane corners
topL = verts_behind & (verts_signs[:, :, 0] < 0) & (verts_signs[:, :, 1] < 0)
topR = verts_behind & (verts_signs[:, :, 0] > 0) & (verts_signs[:, :, 1] < 0)
botL = verts_behind & (verts_signs[:, :, 0] < 0) & (verts_signs[:, :, 1] > 0)
botR = verts_behind & (verts_signs[:, :, 0] > 0) & (verts_signs[:, :, 1] > 0)
# clip values to be in bounds for invalid points
verts2d[topL] = topL_bound
verts2d[topR] = topR_bound
verts2d[botL] = botL_bound
verts2d[botR] = botR_bound
x, xi = verts2d[:, :, 0].min(1)
y, yi = verts2d[:, :, 1].min(1)
x2, x2i = verts2d[:, :, 0].max(1)
y2, y2i = verts2d[:, :, 1].max(1)
fully_behind = verts_behind.all(1)
width = x2 - x
height = y2 - y
if XYWH:
box2d = torch.cat((x.unsqueeze(1), y.unsqueeze(1), width.unsqueeze(1), height.unsqueeze(1)), dim=1)
else:
box2d = torch.cat((x.unsqueeze(1), y.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1)), dim=1)
if squeeze:
box2d = box2d.squeeze()
behind_camera = behind_camera.squeeze()
fully_behind = fully_behind.squeeze()
return box2d, behind_camera, fully_behind
#
def compute_virtual_scale_from_focal_spaces(f, H, f0, H0):
"""
Computes the scaling factor of depth from f0, H0 to f, H
Args:
f (float): the desired [virtual] focal length (px)
H (float): the desired [virtual] height (px)
f0 (float): the initial [real] focal length (px)
H0 (float): the initial [real] height (px)
Returns:
the scaling factor float to convert form (f0, H0) --> (f, H)
"""
return (H0 * f) / (f0 * H)
def R_to_allocentric(K, R, u=None, v=None):
"""
Convert a rotation matrix or series of rotation matrices to allocentric
representation given a 2D location (u, v) in pixels.
When u or v are not available, we fall back on the principal point of K.
"""
if type(K) == torch.Tensor:
fx = K[:, 0, 0]
fy = K[:, 1, 1]
sx = K[:, 0, 2]
sy = K[:, 1, 2]
n = len(K)
oray = torch.stack(((u - sx)/fx, (v - sy)/fy, torch.ones_like(u))).T
oray = oray / torch.linalg.norm(oray, dim=1).unsqueeze(1)
angle = torch.acos(oray[:, -1])
axis = torch.zeros_like(oray)
axis[:, 0] = axis[:, 0] - oray[:, 1]
axis[:, 1] = axis[:, 1] + oray[:, 0]
norms = torch.linalg.norm(axis, dim=1)
valid_angle = angle > 0
M = axis_angle_to_matrix(angle.unsqueeze(1)*axis/norms.unsqueeze(1))
R_view = R.clone()
R_view[valid_angle] = torch.bmm(M[valid_angle].transpose(2, 1), R[valid_angle])
else:
fx = K[0][0]
fy = K[1][1]
sx = K[0][2]
sy = K[1][2]
if u is None:
u = sx
if v is None:
v = sy
oray = np.array([(u - sx)/fx, (v - sy)/fy, 1])
oray = oray / np.linalg.norm(oray)
cray = np.array([0, 0, 1])
angle = math.acos(cray.dot(oray))
if angle != 0:
axis = np.cross(cray, oray)
axis_torch = torch.from_numpy(angle*axis/np.linalg.norm(axis)).float()
R_view = np.dot(axis_angle_to_matrix(axis_torch).numpy().T, R)
else:
R_view = R
return R_view
def R_from_allocentric(K, R_view, u=None, v=None):
"""
Convert a rotation matrix or series of rotation matrices to egocentric
representation given a 2D location (u, v) in pixels.
When u or v are not available, we fall back on the principal point of K.
"""
if type(K) == torch.Tensor:
fx = K[:, 0, 0]
fy = K[:, 1, 1]
sx = K[:, 0, 2]
sy = K[:, 1, 2]
n = len(K)
oray = torch.stack(((u - sx)/fx, (v - sy)/fy, torch.ones_like(u))).T
oray = oray / torch.linalg.norm(oray, dim=1).unsqueeze(1)
angle = torch.acos(oray[:, -1])
axis = torch.zeros_like(oray)
axis[:, 0] = axis[:, 0] - oray[:, 1]
axis[:, 1] = axis[:, 1] + oray[:, 0]
norms = torch.linalg.norm(axis, dim=1)
valid_angle = angle > 0
M = axis_angle_to_matrix(angle.unsqueeze(1)*axis/norms.unsqueeze(1))
R = R_view.clone()
R[valid_angle] = torch.bmm(M[valid_angle], R_view[valid_angle])
else:
fx = K[0][0]
fy = K[1][1]
sx = K[0][2]
sy = K[1][2]
if u is None:
u = sx
if v is None:
v = sy
oray = np.array([(u - sx)/fx, (v - sy)/fy, 1])
oray = oray / np.linalg.norm(oray)
cray = np.array([0, 0, 1])
angle = math.acos(cray.dot(oray))
if angle != 0:
#axis = np.cross(cray, oray)
axis = np.array([-oray[1], oray[0], 0])
axis_torch = torch.from_numpy(angle*axis/np.linalg.norm(axis)).float()
R = np.dot(axis_angle_to_matrix(axis_torch).numpy(), R_view)
else:
R = R_view
return R
def render_depth_map(K, box3d, pose, width, height, device=None):
cameras = get_camera(K, width, height)
renderer = get_basic_renderer(cameras, width, height)
mesh = mesh_cuboid(box3d, pose)
if device is not None:
cameras = cameras.to(device)
renderer = renderer.to(device)
mesh = mesh.to(device)
im_rendered, fragment = renderer(mesh)
silhouettes = im_rendered[:, :, :, -1] > 0
zbuf = fragment.zbuf[:, :, :, 0]
zbuf[zbuf==-1] = math.inf
depth_map, depth_map_inds = zbuf.min(dim=0)
return silhouettes, depth_map, depth_map_inds
def estimate_visibility(K, box3d, pose, width, height, device=None):
silhouettes, depth_map, depth_map_inds = render_depth_map(K, box3d, pose, width, height, device=device)
n = silhouettes.shape[0]
visibilies = []
for annidx in range(n):
area = silhouettes[annidx].sum()
visible = (depth_map_inds[silhouettes[annidx]] == annidx).sum()
visibilies.append((visible / area).item())
return visibilies
def estimate_truncation(K, box3d, R, imW, imH):
box2d, out_of_bounds, fully_behind = convert_3d_box_to_2d(K, box3d, R, imW, imH)
if fully_behind:
return 1.0
box2d = box2d.detach().cpu().numpy().tolist()
box2d_XYXY = BoxMode.convert(box2d, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
image_box = np.array([0, 0, imW-1, imH-1])
truncation = 1 - iou(np.array(box2d_XYXY)[np.newaxis], image_box[np.newaxis], ign_area_b=True)
return truncation.item()
def mesh_cuboid(box3d=None, R=None, color=None):
verts, faces = get_cuboid_verts_faces(box3d, R)
if verts.ndim == 2:
verts = to_float_tensor(verts).unsqueeze(0)
faces = to_float_tensor(faces).unsqueeze(0)
ninstances = len(verts)
if (isinstance(color, Tuple) or isinstance(color, List)) and len(color) == 3:
color = torch.tensor(color).view(1, 1, 3).expand(ninstances, 8, 3).float()
# pass in a tensor of colors per box
elif color.ndim == 2:
color = to_float_tensor(color).unsqueeze(1).expand(ninstances, 8, 3).float()
device = verts.device
mesh = Meshes(verts=verts, faces=faces, textures=None if color is None else TexturesVertex(verts_features=color).to(device))
return mesh
def get_camera(K, width, height, switch_hands=True, R=None, T=None):
K = to_float_tensor(K)
if switch_hands:
K = K @ torch.tensor([
[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]
]).float()
fx = K[0, 0]
fy = K[1, 1]
px = K[0, 2]
py = K[1, 2]
if R is None:
camera = PerspectiveCameras(
focal_length=((fx, fy),), principal_point=((px, py),),
image_size=((height, width),), in_ndc=False
)
else:
camera = PerspectiveCameras(
focal_length=((fx, fy),), principal_point=((px, py),),
image_size=((height, width),), in_ndc=False, R=R, T=T
)
return camera
def get_basic_renderer(cameras, width, height, use_color=False):
raster_settings = RasterizationSettings(
image_size=(height, width),
blur_radius=0 if use_color else np.log(1. / 1e-4 - 1.) * 1e-4,
faces_per_pixel=1,
perspective_correct=False,
)
if use_color:
# SoftPhongShader, HardPhongShader, HardFlatShader, SoftGouraudShader
lights = PointLights(location=[[0.0, 0.0, 0.0]])
shader = SoftPhongShader(cameras=cameras, lights=lights)
else:
shader = SoftSilhouetteShader()
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings,
),
shader=shader
)
return renderer
class MeshRenderer(MR):
def __init__(self, rasterizer, shader):
super().__init__(rasterizer, shader)
def forward(self, meshes_world, **kwargs) -> torch.Tensor:
fragments = self.rasterizer(meshes_world, **kwargs)
images = self.shader(fragments, meshes_world, **kwargs)
return images, fragments
def iou(box_a, box_b, mode='cross', ign_area_b=False):
"""
Computes the amount of Intersection over Union (IoU) between two different sets of boxes.
Args:
box_a (array or tensor): Mx4 boxes, defined by [x1, y1, x2, y2]
box_a (array or tensor): Nx4 boxes, defined by [x1, y1, x2, y2]
mode (str): either 'cross' or 'list', where cross will check all combinations of box_a and
box_b hence MxN array, and list expects the same size list M == N, hence returns Mx1 array.
ign_area_b (bool): if true then we ignore area of b. e.g., checking % box a is inside b
"""
data_type = type(box_a)
# this mode computes the IoU in the sense of cross.
# i.e., box_a = M x 4, box_b = N x 4 then the output is M x N
if mode == 'cross':
inter = intersect(box_a, box_b, mode=mode)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1]))
area_b = ((box_b[:, 2] - box_b[:, 0]) *
(box_b[:, 3] - box_b[:, 1]))
# torch.Tensor
if data_type == torch.Tensor:
union = area_a.unsqueeze(0)
if not ign_area_b:
union = union + area_b.unsqueeze(1) - inter
return (inter / union).permute(1, 0)
# np.ndarray
elif data_type == np.ndarray:
union = np.expand_dims(area_a, 0)
if not ign_area_b:
union = union + np.expand_dims(area_b, 1) - inter
return (inter / union).T
# unknown type
else:
raise ValueError('unknown data type {}'.format(data_type))
# this mode compares every box in box_a with target in box_b
# i.e., box_a = M x 4 and box_b = M x 4 then output is M x 1
elif mode == 'list':
inter = intersect(box_a, box_b, mode=mode)
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])
area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])
union = area_a + area_b - inter
return inter / union
else:
raise ValueError('unknown mode {}'.format(mode))
def intersect(box_a, box_b, mode='cross'):
"""
Computes the amount of intersect between two different sets of boxes.
Args:
box_a (nparray): Mx4 boxes, defined by [x1, y1, x2, y2]
box_a (nparray): Nx4 boxes, defined by [x1, y1, x2, y2]
mode (str): either 'cross' or 'list', where cross will check all combinations of box_a and
box_b hence MxN array, and list expects the same size list M == N, hence returns Mx1 array.
data_type (type): either torch.Tensor or np.ndarray, we automatically determine otherwise
"""
# determine type
data_type = type(box_a)
# this mode computes the intersect in the sense of cross.
# i.e., box_a = M x 4, box_b = N x 4 then the output is M x N
if mode == 'cross':
# np.ndarray
if data_type == np.ndarray:
max_xy = np.minimum(box_a[:, 2:4], np.expand_dims(box_b[:, 2:4], axis=1))
min_xy = np.maximum(box_a[:, 0:2], np.expand_dims(box_b[:, 0:2], axis=1))
inter = np.clip((max_xy - min_xy), a_min=0, a_max=None)
elif data_type == torch.Tensor:
max_xy = torch.min(box_a[:, 2:4], box_b[:, 2:4].unsqueeze(1))
min_xy = torch.max(box_a[:, 0:2], box_b[:, 0:2].unsqueeze(1))
inter = torch.clamp((max_xy - min_xy), 0)
# unknown type
else:
raise ValueError('type {} is not implemented'.format(data_type))
return inter[:, :, 0] * inter[:, :, 1]
# this mode computes the intersect in the sense of list_a vs. list_b.
# i.e., box_a = M x 4, box_b = M x 4 then the output is Mx1
elif mode == 'list':
# torch.Tesnor
if data_type == torch.Tensor:
max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])
min_xy = torch.max(box_a[:, :2], box_b[:, :2])
inter = torch.clamp((max_xy - min_xy), 0)
# np.ndarray
elif data_type == np.ndarray:
max_xy = np.min(box_a[:, 2:], box_b[:, 2:])
min_xy = np.max(box_a[:, :2], box_b[:, :2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=None)
# unknown type
else:
raise ValueError('unknown data type {}'.format(data_type))
return inter[:, 0] * inter[:, 1]
else:
raise ValueError('unknown mode {}'.format(mode))
def scaled_sigmoid(vals, min=0.0, max=1.0):
"""
Simple helper function for a scaled sigmoid.
The output is bounded by (min, max)
Args:
vals (Tensor): input logits to scale
min (Tensor or float): the minimum value to scale to.
max (Tensor or float): the maximum value to scale to.
"""
return min + (max-min)*torch.sigmoid(vals)
| 31,079 | 30.779141 | 167 |
py
|
omni3d
|
omni3d-main/cubercnn/util/util.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import json
import pickle
import cv2
from time import time
import numpy as np
import os
import shutil
import scipy.io
from PIL import Image
from glob import glob
from difflib import SequenceMatcher
import matplotlib.colors as mplc
def file_parts(file_path):
base_path, tail = os.path.split(file_path)
name, ext = os.path.splitext(tail)
return base_path, name, ext
def save_json(path, data):
with open(path, 'w') as fp:
json.dump(data, fp)
def load_json(path):
with open(path, 'r') as fp:
data = json.load(fp)
return data
def load_mat(path):
data = scipy.io.loadmat(path, struct_as_record=False, squeeze_me=True)
return data
def pickle_write(file_path, obj):
with open(file_path, 'wb') as file:
pickle.dump(obj, file)
def pickle_read(file_path, latin=False, iso8859=False, bytes=False):
with open(file_path, 'rb') as file:
if bytes:
obj = pickle.load(file, encoding='bytes')
elif latin:
obj = pickle.load(file, encoding='latin1')
elif iso8859:
obj = pickle.load(file, encoding='iso-8859-1')
# default encoding
else:
obj = pickle.load(file)
return obj
def imread(path):
return cv2.imread(path)
# much faster than reading the entire image, just to get the width, height
def imreadstats(path):
im = Image.open(path)
width, height = im.size
return width, height
def imwrite(im, path):
cv2.imwrite(path, im)
def compute_eta(start_time, idx, total):
"""
Computes estimated time left for an iterative function to finish.
Args:
start_time (int): the time the function started at (e.g from time())
idx (int): the index the function is currently on, or has completed.
total (int): the total amount that needs to pass for completion.
Returns:
time_str (str): convenient string to display the time remaining
in seconds, minutes or hours depending on magnitude.
dt (float): the average change in seconds per iteration.
"""
# cannot be less than 1
idx = max(idx, 1)
dt = (time() - start_time)/idx
timeleft = np.max([dt * (total - idx), 0])
if timeleft > 3600: time_str = '{:.1f}h'.format(timeleft / 3600);
elif timeleft > 60: time_str = '{:.1f}m'.format(timeleft / 60);
else: time_str = '{:.1f}s'.format(timeleft);
return time_str, dt
def list_files(base_dir, file_pattern):
"""
Returns a list of files given a directory and pattern
The results are sorted alphabetically
Example:
files = list_files('path/to/images/', '*.jpg')
"""
return sorted(glob(os.path.join(base_dir) + file_pattern))
def list_subdirectories(path, include_files=False):
# this lists everything.
if include_files:
return sorted(glob(os.path.join(path, '*')))
# only subdirectories.
else:
return [fpath for fpath in glob(os.path.join(path, '*')) if os.path.isdir(fpath)]
def mkdir_if_missing(directory, delete_if_exist=False):
if delete_if_exist and os.path.exists(directory): shutil.rmtree(directory)
# check if not exist, then make
if not os.path.exists(directory):
os.makedirs(directory)
# All coco categories, together with their nice-looking visualization colors
# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},]
_colors = [cat['color'] for cat in COCO_CATEGORIES]
def _jitter(color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = [c/255.0 for c in color]
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return [c*255.0 for c in res]
def get_color(ind=None, hex=False):
if ind is None:
ind = np.random.randint(len(_colors))
color = _jitter(_colors[ind % len(_colors)])
if hex:
return '#%02x%02x%02x' % (color[0], color[1], color[2])
else:
return color
def string_similarity(text1, text2):
return SequenceMatcher(None, text1, text2).ratio()
| 14,651 | 47.356436 | 96 |
py
|
omni3d
|
omni3d-main/cubercnn/util/__init__.py
|
from .util import *
from .model_zoo import *
from .math_util import *
| 69 | 22.333333 | 24 |
py
|
omni3d
|
omni3d-main/cubercnn/data/builtin.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
def get_omni3d_categories(dataset="omni3d"):
"""
Returns the Omni3D categories for dataset
Args:
dataset: str
Returns:
cats: set of strings with category names
"""
if dataset == "omni3d":
cats = set({'chair', 'table', 'cabinet', 'car', 'lamp', 'books', 'sofa', 'pedestrian', 'picture', 'window', 'pillow', 'truck', 'door', 'blinds', 'sink', 'shelves', 'television', 'shoes', 'cup', 'bottle', 'bookcase', 'laptop', 'desk', 'cereal box', 'floor mat', 'traffic cone', 'mirror', 'barrier', 'counter', 'camera', 'bicycle', 'toilet', 'bus', 'bed', 'refrigerator', 'trailer', 'box', 'oven', 'clothes', 'van', 'towel', 'motorcycle', 'night stand', 'stove', 'machine', 'stationery', 'bathtub', 'cyclist', 'curtain', 'bin'})
assert len(cats) == 50
elif dataset == "omni3d_in":
cats = set({'stationery', 'sink', 'table', 'floor mat', 'bottle', 'bookcase', 'bin', 'blinds', 'pillow', 'bicycle', 'refrigerator', 'night stand', 'chair', 'sofa', 'books', 'oven', 'towel', 'cabinet', 'window', 'curtain', 'bathtub', 'laptop', 'desk', 'television', 'clothes', 'stove', 'cup', 'shelves', 'box', 'shoes', 'mirror', 'door', 'picture', 'lamp', 'machine', 'counter', 'bed', 'toilet'})
assert len(cats) == 38
elif dataset == "omni3d_out":
cats = set({'cyclist', 'pedestrian', 'trailer', 'bus', 'motorcycle', 'car', 'barrier', 'truck', 'van', 'traffic cone', 'bicycle'})
assert len(cats) == 11
elif dataset in ["SUNRGBD_train", "SUNRGBD_val", "SUNRGBD_test"]:
cats = set({'bicycle', 'books', 'bottle', 'chair', 'cup', 'laptop', 'shoes', 'towel', 'blinds', 'window', 'lamp', 'shelves', 'mirror', 'sink', 'cabinet', 'bathtub', 'door', 'toilet', 'desk', 'box', 'bookcase', 'picture', 'table', 'counter', 'bed', 'night stand', 'pillow', 'sofa', 'television', 'floor mat', 'curtain', 'clothes', 'stationery', 'refrigerator', 'bin', 'stove', 'oven', 'machine'})
assert len(cats) == 38
elif dataset in ["Hypersim_train", "Hypersim_val"]:
cats = set({'books', 'chair', 'towel', 'blinds', 'window', 'lamp', 'shelves', 'mirror', 'sink', 'cabinet', 'bathtub', 'door', 'toilet', 'desk', 'box', 'bookcase', 'picture', 'table', 'counter', 'bed', 'night stand', 'pillow', 'sofa', 'television', 'floor mat', 'curtain', 'clothes', 'stationery', 'refrigerator'})
assert len(cats) == 29
elif dataset == "Hypersim_test":
# Hypersim test annotation does not contain toilet
cats = set({'books', 'chair', 'towel', 'blinds', 'window', 'lamp', 'shelves', 'mirror', 'sink', 'cabinet', 'bathtub', 'door', 'desk', 'box', 'bookcase', 'picture', 'table', 'counter', 'bed', 'night stand', 'pillow', 'sofa', 'television', 'floor mat', 'curtain', 'clothes', 'stationery', 'refrigerator'})
assert len(cats) == 28
elif dataset in ["ARKitScenes_train", "ARKitScenes_val", "ARKitScenes_test"]:
cats = set({'table', 'bed', 'sofa', 'television', 'refrigerator', 'chair', 'oven', 'machine', 'stove', 'shelves', 'sink', 'cabinet', 'bathtub', 'toilet'})
assert len(cats) == 14
elif dataset in ["Objectron_train", "Objectron_val", "Objectron_test"]:
cats = set({'bicycle', 'books', 'bottle', 'camera', 'cereal box', 'chair', 'cup', 'laptop', 'shoes'})
assert len(cats) == 9
elif dataset in ["KITTI_train", "KITTI_val", "KITTI_test"]:
cats = set({'pedestrian', 'car', 'cyclist', 'van', 'truck'})
assert len(cats) == 5
elif dataset in ["nuScenes_train", "nuScenes_val", "nuScenes_test"]:
cats = set({'pedestrian', 'car', 'truck', 'traffic cone', 'barrier', 'motorcycle', 'bicycle', 'bus', 'trailer'})
assert len(cats) == 9
else:
raise ValueError("%s dataset is not registered." % (dataset))
return cats
| 3,848 | 82.673913 | 534 |
py
|
omni3d
|
omni3d-main/cubercnn/data/dataset_mapper.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import copy
import torch
import numpy as np
from detectron2.structures import BoxMode, Keypoints
from detectron2.data import detection_utils
from detectron2.data import transforms as T
from detectron2.data import (
DatasetMapper
)
from detectron2.structures import (
Boxes,
BoxMode,
Instances,
)
class DatasetMapper3D(DatasetMapper):
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = detection_utils.read_image(dataset_dict["file_name"], format=self.image_format)
detection_utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
# no need for additoinal processing at inference
if not self.is_train:
return dataset_dict
if "annotations" in dataset_dict:
dataset_id = dataset_dict['dataset_id']
K = np.array(dataset_dict['K'])
unknown_categories = self.dataset_id_to_unknown_cats[dataset_id]
# transform and pop off annotations
annos = [
transform_instance_annotations(obj, transforms, K=K)
for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0
]
# convert to instance format
instances = annotations_to_instances(annos, image_shape, unknown_categories)
dataset_dict["instances"] = detection_utils.filter_empty_instances(instances)
return dataset_dict
'''
Cached for mirroring annotations
'''
_M1 = np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, -1]
])
_M2 = np.array([
[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]
])
def transform_instance_annotations(annotation, transforms, *, K):
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
bbox = transforms.apply_box(np.array([bbox]))[0]
annotation["bbox"] = bbox
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if annotation['center_cam'][2] != 0:
# project the 3D box annotation XYZ_3D to screen
point3D = annotation['center_cam']
point2D = K @ np.array(point3D)
point2D[:2] = point2D[:2] / point2D[-1]
annotation["center_cam_proj"] = point2D.tolist()
# apply coords transforms to 2D box
annotation["center_cam_proj"][0:2] = transforms.apply_coords(
point2D[np.newaxis][:, :2]
)[0].tolist()
keypoints = (K @ np.array(annotation["bbox3D_cam"]).T).T
keypoints[:, 0] /= keypoints[:, -1]
keypoints[:, 1] /= keypoints[:, -1]
if annotation['ignore']:
# all keypoints marked as not visible
# 0 - unknown, 1 - not visible, 2 visible
keypoints[:, 2] = 1
else:
valid_keypoints = keypoints[:, 2] > 0
# 0 - unknown, 1 - not visible, 2 visible
keypoints[:, 2] = 2
keypoints[valid_keypoints, 2] = 2
# in place
transforms.apply_coords(keypoints[:, :2])
annotation["keypoints"] = keypoints.tolist()
# manually apply mirror for pose
for transform in transforms:
# horrizontal flip?
if isinstance(transform, T.HFlipTransform):
pose = _M1 @ np.array(annotation["pose"]) @ _M2
annotation["pose"] = pose.tolist()
annotation["R_cam"] = pose.tolist()
return annotation
def annotations_to_instances(annos, image_size, unknown_categories):
# init
target = Instances(image_size)
# add classes, 2D boxes, 3D boxes and poses
target.gt_classes = torch.tensor([int(obj["category_id"]) for obj in annos], dtype=torch.int64)
target.gt_boxes = Boxes([BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos])
target.gt_boxes3D = torch.FloatTensor([anno['center_cam_proj'] + anno['dimensions'] + anno['center_cam'] for anno in annos])
target.gt_poses = torch.FloatTensor([anno['pose'] for anno in annos])
n = len(target.gt_classes)
# do keypoints?
target.gt_keypoints = Keypoints(torch.FloatTensor([anno['keypoints'] for anno in annos]))
gt_unknown_category_mask = torch.zeros(max(unknown_categories)+1, dtype=bool)
gt_unknown_category_mask[torch.tensor(list(unknown_categories))] = True
# include available category indices as tensor with GTs
target.gt_unknown_category_mask = gt_unknown_category_mask.unsqueeze(0).repeat([n, 1])
return target
| 5,231 | 32.538462 | 128 |
py
|
omni3d
|
omni3d-main/cubercnn/data/datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import json
import time
import os
import contextlib
import io
import logging
import numpy as np
from pycocotools.coco import COCO
from collections import defaultdict
from fvcore.common.timer import Timer
from detectron2.utils.file_io import PathManager
from detectron2.structures import BoxMode
from detectron2.data import MetadataCatalog, DatasetCatalog
from cubercnn import util
VERSION = '0.1'
logger = logging.getLogger(__name__)
def get_version():
return VERSION
def get_global_dataset_stats(path_to_stats=None, reset=False):
if path_to_stats is None:
path_to_stats = os.path.join('datasets', 'Omni3D', 'stats.json')
if os.path.exists(path_to_stats) and not reset:
stats = util.load_json(path_to_stats)
else:
stats = {
'n_datasets': 0,
'n_ims': 0,
'n_anns': 0,
'categories': []
}
return stats
def save_global_dataset_stats(stats, path_to_stats=None):
if path_to_stats is None:
path_to_stats = os.path.join('datasets', 'Omni3D', 'stats.json')
util.save_json(path_to_stats, stats)
def get_filter_settings_from_cfg(cfg=None):
if cfg is None:
return {
'category_names': [],
'ignore_names': [],
'truncation_thres': 0.99,
'visibility_thres': 0.01,
'min_height_thres': 0.00,
'max_height_thres': 1.50,
'modal_2D_boxes': False,
'trunc_2D_boxes': False,
'max_depth': 1e8,
}
else:
return {
'category_names': cfg.DATASETS.CATEGORY_NAMES,
'ignore_names': cfg.DATASETS.IGNORE_NAMES,
'truncation_thres': cfg.DATASETS.TRUNCATION_THRES,
'visibility_thres': cfg.DATASETS.VISIBILITY_THRES,
'min_height_thres': cfg.DATASETS.MIN_HEIGHT_THRES,
'modal_2D_boxes': cfg.DATASETS.MODAL_2D_BOXES,
'trunc_2D_boxes': cfg.DATASETS.TRUNC_2D_BOXES,
'max_depth': cfg.DATASETS.MAX_DEPTH,
# TODO expose as a config
'max_height_thres': 1.50,
}
def is_ignore(anno, filter_settings, image_height):
ignore = anno['behind_camera']
ignore |= (not bool(anno['valid3D']))
if ignore:
return ignore
ignore |= anno['dimensions'][0] <= 0
ignore |= anno['dimensions'][1] <= 0
ignore |= anno['dimensions'][2] <= 0
ignore |= anno['center_cam'][2] > filter_settings['max_depth']
ignore |= (anno['lidar_pts'] == 0)
ignore |= (anno['segmentation_pts'] == 0)
ignore |= (anno['depth_error'] > 0.5)
# tightly annotated 2D boxes are not always available.
if filter_settings['modal_2D_boxes'] and 'bbox2D_tight' in anno and anno['bbox2D_tight'][0] != -1:
bbox2D = BoxMode.convert(anno['bbox2D_tight'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
# truncated projected 2D boxes are also not always available.
elif filter_settings['trunc_2D_boxes'] and 'bbox2D_trunc' in anno and not np.all([val==-1 for val in anno['bbox2D_trunc']]):
bbox2D = BoxMode.convert(anno['bbox2D_trunc'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
# use the projected 3D --> 2D box, which requires a visible 3D cuboid.
elif 'bbox2D_proj' in anno:
bbox2D = BoxMode.convert(anno['bbox2D_proj'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
else:
bbox2D = anno['bbox']
ignore |= bbox2D[3] <= filter_settings['min_height_thres']*image_height
ignore |= bbox2D[3] >= filter_settings['max_height_thres']*image_height
ignore |= (anno['truncation'] >=0 and anno['truncation'] >= filter_settings['truncation_thres'])
ignore |= (anno['visibility'] >= 0 and anno['visibility'] <= filter_settings['visibility_thres'])
if 'ignore_names' in filter_settings:
ignore |= anno['category_name'] in filter_settings['ignore_names']
return ignore
def simple_register(dataset_name, filter_settings, filter_empty=False, datasets_root_path=None):
if datasets_root_path is None:
datasets_root_path = path_to_json = os.path.join('datasets', 'Omni3D',)
path_to_json = os.path.join(datasets_root_path, dataset_name + '.json')
path_to_image_root = 'datasets'
DatasetCatalog.register(dataset_name, lambda: load_omni3d_json(
path_to_json, path_to_image_root,
dataset_name, filter_settings, filter_empty=filter_empty
))
MetadataCatalog.get(dataset_name).set(json_file=path_to_json, image_root=path_to_image_root, evaluator_type="coco")
class Omni3D(COCO):
'''
Class for COCO-like dataset object. Not inherently related to
use with Detectron2 or training per se.
'''
def __init__(self, annotation_files, filter_settings=None):
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if isinstance(annotation_files, str):
annotation_files = [annotation_files,]
cats_ids_master = []
cats_master = []
for annotation_file in annotation_files:
_, name, _ = util.file_parts(annotation_file)
print('loading {} annotations into memory...'.format(name))
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
if type(dataset['info']) == list:
dataset['info'] = dataset['info'][0]
dataset['info']['known_category_ids'] = [cat['id'] for cat in dataset['categories']]
# first dataset
if len(self.dataset) == 0:
self.dataset = dataset
# concatenate datasets
else:
if type(self.dataset['info']) == dict:
self.dataset['info'] = [self.dataset['info']]
self.dataset['info'] += [dataset['info']]
self.dataset['annotations'] += dataset['annotations']
self.dataset['images'] += dataset['images']
# sort through categories
for cat in dataset['categories']:
if not cat['id'] in cats_ids_master:
cats_ids_master.append(cat['id'])
cats_master.append(cat)
if filter_settings is None:
# include every category in the master list
self.dataset['categories'] = [
cats_master[i]
for i in np.argsort(cats_ids_master)
]
else:
# determine which categories we may actually use for filtering.
trainable_cats = set(filter_settings['ignore_names']) | set(filter_settings['category_names'])
# category names are provided to us
if len(filter_settings['category_names']) > 0:
self.dataset['categories'] = [
cats_master[i]
for i in np.argsort(cats_ids_master)
if cats_master[i]['name'] in filter_settings['category_names']
]
# no categories are provided, so assume use ALL available.
else:
self.dataset['categories'] = [
cats_master[i]
for i in np.argsort(cats_ids_master)
]
filter_settings['category_names'] = [cat['name'] for cat in self.dataset['categories']]
trainable_cats = trainable_cats | set(filter_settings['category_names'])
valid_anns = []
im_height_map = {}
for im_obj in self.dataset['images']:
im_height_map[im_obj['id']] = im_obj['height']
# Filter out annotations
for anno_idx, anno in enumerate(self.dataset['annotations']):
im_height = im_height_map[anno['image_id']]
ignore = is_ignore(anno, filter_settings, im_height)
if filter_settings['trunc_2D_boxes'] and 'bbox2D_trunc' in anno and not np.all([val==-1 for val in anno['bbox2D_trunc']]):
bbox2D = BoxMode.convert(anno['bbox2D_trunc'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif anno['bbox2D_proj'][0] != -1:
bbox2D = BoxMode.convert(anno['bbox2D_proj'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif anno['bbox2D_tight'][0] != -1:
bbox2D = BoxMode.convert(anno['bbox2D_tight'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
else:
continue
width = bbox2D[2]
height = bbox2D[3]
self.dataset['annotations'][anno_idx]['area'] = width*height
self.dataset['annotations'][anno_idx]['iscrowd'] = False
self.dataset['annotations'][anno_idx]['ignore'] = ignore
self.dataset['annotations'][anno_idx]['ignore2D'] = ignore
self.dataset['annotations'][anno_idx]['ignore3D'] = ignore
if filter_settings['modal_2D_boxes'] and anno['bbox2D_tight'][0] != -1:
self.dataset['annotations'][anno_idx]['bbox'] = BoxMode.convert(anno['bbox2D_tight'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
else:
self.dataset['annotations'][anno_idx]['bbox'] = bbox2D
self.dataset['annotations'][anno_idx]['bbox3D'] = anno['bbox3D_cam']
self.dataset['annotations'][anno_idx]['depth'] = anno['center_cam'][2]
category_name = anno["category_name"]
# category is part of trainable categories?
if category_name in trainable_cats:
valid_anns.append(self.dataset['annotations'][anno_idx])
self.dataset['annotations'] = valid_anns
self.createIndex()
def info(self):
infos = self.dataset['info']
if type(infos) == dict:
infos = [infos]
for i, info in enumerate(infos):
print('Dataset {}/{}'.format(i+1, infos))
for key, value in info.items():
print('{}: {}'.format(key, value))
def register_and_store_model_metadata(datasets, output_dir, filter_settings=None):
output_file = os.path.join(output_dir, 'category_meta.json')
if os.path.exists(output_file):
metadata = util.load_json(output_file)
thing_classes = metadata['thing_classes']
id_map = metadata['thing_dataset_id_to_contiguous_id']
# json saves id map as strings rather than ints
id_map = {int(idA):idB for idA, idB in id_map.items()}
else:
omni3d_stats = util.load_json(os.path.join('datasets', 'Omni3D', 'stats.json'))
thing_classes = filter_settings['category_names']
cat_ids = []
for cat in thing_classes:
cat_idx = omni3d_stats['category_names'].index(cat)
cat_id = omni3d_stats['categories'][cat_idx]['id']
cat_ids.append(cat_id)
cat_order = np.argsort(cat_ids)
cat_ids = [cat_ids[i] for i in cat_order]
thing_classes = [thing_classes[i] for i in cat_order]
id_map = {id: i for i, id in enumerate(cat_ids)}
util.save_json(output_file, {
'thing_classes': thing_classes,
'thing_dataset_id_to_contiguous_id': id_map,
})
MetadataCatalog.get('omni3d_model').thing_classes = thing_classes
MetadataCatalog.get('omni3d_model').thing_dataset_id_to_contiguous_id = id_map
def load_omni3d_json(json_file, image_root, dataset_name, filter_settings, filter_empty=False):
# read in the dataset
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
# the global meta information for the full dataset
meta_model = MetadataCatalog.get('omni3d_model')
# load the meta information
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(coco_api.getCatIds(filter_settings['category_names']))
cats = coco_api.loadCats(cat_ids)
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
# the id mapping must be based on the model!
id_map = meta_model.thing_dataset_id_to_contiguous_id
meta.thing_dataset_id_to_contiguous_id = id_map
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
total_num_valid_anns = sum([len(x) for x in anns])
total_num_anns = len(coco_api.anns)
if total_num_valid_anns < total_num_anns:
logger.info(
f"{json_file} contains {total_num_anns} annotations, but only "
f"{total_num_valid_anns} of them match to images in the file."
)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in Omni3D format from {}".format(len(imgs_anns), json_file))
dataset_dicts = []
# annotation keys to pass along
ann_keys = [
"bbox", "bbox3D_cam", "bbox2D_proj", "bbox2D_trunc", "bbox2D_tight",
"center_cam", "dimensions", "pose", "R_cam", "category_id",
]
# optional per image keys to pass if exists
# this property is unique to KITTI.
img_keys_optional = ['p2']
invalid_count = 0
for (img_dict, anno_dict_list) in imgs_anns:
has_valid_annotation = False
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_path"])
record["dataset_id"] = img_dict["dataset_id"]
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["K"] = img_dict["K"]
# store optional keys when available
for img_key in img_keys_optional:
if img_key in img_dict:
record[img_key] = img_dict[img_key]
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
assert anno["image_id"] == image_id
obj = {key: anno[key] for key in ann_keys if key in anno}
obj["bbox_mode"] = BoxMode.XYWH_ABS
annotation_category_id = obj["category_id"]
# category is not part of ids and is not in the ignore category?
if not (annotation_category_id in id_map) and not (anno['category_name'] in filter_settings['ignore_names']):
continue
ignore = is_ignore(anno, filter_settings, img_dict["height"])
obj['iscrowd'] = False
obj['ignore'] = ignore
if filter_settings['modal_2D_boxes'] and 'bbox2D_tight' in anno and anno['bbox2D_tight'][0] != -1:
obj['bbox'] = BoxMode.convert(anno['bbox2D_tight'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif filter_settings['trunc_2D_boxes'] and 'bbox2D_trunc' in anno and not np.all([val==-1 for val in anno['bbox2D_trunc']]):
obj['bbox'] = BoxMode.convert(anno['bbox2D_trunc'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
elif 'bbox2D_proj' in anno:
obj['bbox'] = BoxMode.convert(anno['bbox2D_proj'], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
else:
continue
obj['pose'] = anno['R_cam']
# store category as -1 for ignores!
obj["category_id"] = -1 if ignore else id_map[annotation_category_id]
objs.append(obj)
has_valid_annotation |= (not ignore)
if has_valid_annotation or (not filter_empty):
record["annotations"] = objs
dataset_dicts.append(record)
else:
invalid_count += 1
logger.info("Filtered out {}/{} images without valid annotations".format(invalid_count, len(imgs_anns)))
return dataset_dicts
| 16,471 | 35.685969 | 141 |
py
|
omni3d
|
omni3d-main/cubercnn/data/__init__.py
|
from .datasets import *
from .dataset_mapper import *
from .build import *
from .builtin import *
| 97 | 23.5 | 29 |
py
|
omni3d
|
omni3d-main/cubercnn/data/build.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import itertools
import logging
import numpy as np
import math
from collections import defaultdict
import torch.utils.data
from detectron2.config import configurable
from detectron2.utils.logger import _log_api_usage
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import (
InferenceSampler,
RepeatFactorTrainingSampler,
TrainingSampler
)
from detectron2.data.build import (
filter_images_with_only_crowd_annotations,
build_batch_data_loader,
trivial_batch_collator
)
def get_detection_dataset_dicts(names, filter_empty=True, **kwargs):
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts
def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None, dataset_id_to_src=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
if mapper is None:
mapper = DatasetMapper(cfg, True)
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
balance_datasets = cfg.DATALOADER.BALANCE_DATASETS
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if balance_datasets:
assert dataset_id_to_src is not None, 'Need dataset sources.'
dataset_source_to_int = {val:i for i, val in enumerate(set(dataset_id_to_src.values()))}
dataset_ids_per_img = [dataset_source_to_int[dataset_id_to_src[img['dataset_id']]] for img in dataset]
dataset_ids = np.unique(dataset_ids_per_img)
# only one source? don't re-weight then.
if len(dataset_ids) == 1:
weights_per_img = torch.ones(len(dataset_ids_per_img)).float()
# compute per-dataset weights.
else:
counts = np.bincount(dataset_ids_per_img)
counts = [counts[id] for id in dataset_ids]
weights = [1 - count/np.sum(counts) for count in counts]
weights = [weight/np.min(weights) for weight in weights]
weights_per_img = torch.zeros(len(dataset_ids_per_img)).float()
dataset_ids_per_img = torch.FloatTensor(dataset_ids_per_img).long()
# copy weights
for dataset_id, weight in zip(dataset_ids, weights):
weights_per_img[dataset_ids_per_img == dataset_id] = weight
# no special sampling whatsoever
if sampler_name == "TrainingSampler" and not balance_datasets:
sampler = TrainingSampler(len(dataset))
# balance the weight sampling by datasets
elif sampler_name == "TrainingSampler" and balance_datasets:
sampler = RepeatFactorTrainingSampler(weights_per_img)
# balance the weight sampling by categories
elif sampler_name == "RepeatFactorTrainingSampler" and not balance_datasets:
repeat_factors = repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
# balance the weight sampling by categories AND by dataset frequency
elif sampler_name == "RepeatFactorTrainingSampler" and balance_datasets:
repeat_factors = repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
repeat_factors *= weights_per_img
repeat_factors /= repeat_factors.min().item()
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
}
def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
"""
Compute (fractional) per-image repeat factors based on category frequency.
The repeat factor for an image is a function of the frequency of the rarest
category labeled in that image. The "frequency of category c" in [0, 1] is defined
as the fraction of images in the training set (without repeats) in which category c
appears.
See :paper:`lvis` (>= v2) Appendix B.2.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
repeat_thresh (float): frequency threshold below which data is repeated.
If the frequency is half of `repeat_thresh`, the image will be
repeated twice.
Returns:
torch.Tensor:
the i-th element is the repeat factor for the dataset image at index i.
"""
# 1. For each category c, compute the fraction of images that contain it: f(c)
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts: # For each image (without repeats)
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
for cat_id in cat_ids:
if cat_id < 0: continue
category_freq[cat_id] += 1
num_images = len(dataset_dicts)
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t / f(c)))
category_rep = {
cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
rep_factors = []
for dataset_dict in dataset_dicts:
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids if cat_id >= 0}, default=1.0)
rep_factors.append(rep_factor)
return torch.tensor(rep_factors, dtype=torch.float32)
@configurable(from_config=_train_loader_from_config)
def build_detection_train_loader(dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0):
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torch.utils.data.sampler.Sampler)
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers
)
def _test_loader_from_config(cfg, dataset_name, mapper=None):
if isinstance(dataset_name, str):
dataset_name = [dataset_name]
dataset = get_detection_dataset_dicts(
dataset_name,
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {"dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS}
@configurable(from_config=_test_loader_from_config)
def build_detection_test_loader(dataset, *, mapper, sampler=None, num_workers=0):
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
| 9,407 | 39.551724 | 128 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/backbone/dla.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import detectron2.utils.comm as comm
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone.fpn import FPN
BatchNorm = nn.BatchNorm2d
"""
Adapted models from repositories
Deep Layer Aggregation CVPR 2018
https://github.com/ucbdrive/dla
BSD-3 Licence https://github.com/ucbdrive/dla/blob/master/LICENSE
Geometry Uncertainty Projection Network for Monocular 3D Object Detection, ICCV 2021
https://github.com/SuperMHP/GUPNet/blob/main/code/lib/backbones/dla.py
MIT Licence https://github.com/SuperMHP/GUPNet/blob/main/LICENSE
"""
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return os.path.join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# load model only on main process
# to prevent redundent model caching
if comm.is_main_process():
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
del model_weights['fc.weight']
del model_weights['fc.bias']
self.load_state_dict(model_weights)
def dla34(pretrained=False, tricks=False, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
if tricks:
model.load_pretrained_model(data='imagenet', name='dla34+tricks', hash='24a49e58')
else:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=False, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla46_c', hash='2bfd52c3')
return model
def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla46x_c', hash='d761bae7')
return model
def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=False, tricks=False, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained:
if tricks:
model.load_pretrained_model(data='imagenet', name='dla60+tricks', hash='14488826')
else:
model.load_pretrained_model(data='imagenet', name='dla60', hash='24839fc4')
return model
def dla60x(pretrained=False, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x', hash='d15cacda')
return model
def dla102(pretrained=False, tricks=False, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
if tricks:
model.load_pretrained_model(data='imagenet', name='dla102+tricks', hash='27a30eac')
else:
model.load_pretrained_model(data='imagenet', name='dla102', hash='d94d9790')
return model
def dla102x(pretrained=False, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102x', hash='ad62be81')
return model
def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102x2', hash='262837b6')
return model
def dla169(pretrained=False, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla169', hash='0914e092')
return model
class DLABackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
if cfg.MODEL.DLA.TYPE == 'dla34':
base = dla34(pretrained=pretrained, tricks=cfg.MODEL.DLA.TRICKS)
self._out_feature_channels = {'p2': 64, 'p3': 128, 'p4': 256, 'p5': 512, 'p6': 512}
elif cfg.MODEL.DLA.TYPE == 'dla46_c':
base = dla46_c(pretrained=pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 64, 'p4': 128, 'p5': 256, 'p6': 256}
elif cfg.MODEL.DLA.TYPE == 'dla46x_c':
base = dla46x_c(pretrained=pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 64, 'p4': 128, 'p5': 256, 'p6': 256}
elif cfg.MODEL.DLA.TYPE == 'dla60x_c':
base = dla60x_c(pretrained=pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 64, 'p4': 128, 'p5': 256, 'p6': 256}
elif cfg.MODEL.DLA.TYPE == 'dla60':
base = dla60(pretrained=pretrained, tricks=cfg.MODEL.DLA.TRICKS)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla60x':
base = dla60x(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla102':
base = dla102(pretrained=pretrained, tricks=cfg.MODEL.DLA.TRICKS)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla102x':
base = dla102x(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla102x2':
base = dla102x2(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
elif cfg.MODEL.DLA.TYPE == 'dla169':
base = dla169(pretrained=pretrained)
self._out_feature_channels = {'p2': 128, 'p3': 256, 'p4': 512, 'p5': 1024, 'p6': 1024}
self.base_layer = base.base_layer
self.level0 = base.level0
self.level1 = base.level1
self.level2 = base.level2
self.level3 = base.level3
self.level4 = base.level4
self.level5 = base.level5
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
base_layer = self.base_layer(x)
level0 = self.level0(base_layer)
level1 = self.level1(level0)
level2 = self.level2(level1)
level3 = self.level3(level2)
level4 = self.level4(level3)
level5 = self.level5(level4)
level6 = F.max_pool2d(level5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = level2
outputs['p3'] = level3
outputs['p4'] = level4
outputs['p5'] = level5
outputs['p6'] = level6
return outputs
@BACKBONE_REGISTRY.register()
def build_dla_from_vision_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = DLABackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 18,904 | 36.287968 | 98 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/backbone/resnet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
from detectron2.modeling.backbone.resnet import build_resnet_backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class ResNet(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
if cfg.MODEL.RESNETS.DEPTH == 18:
base = models.resnet18(pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 128, 'p4': 256, 'p5': 512, 'p6': 512}
elif cfg.MODEL.RESNETS.DEPTH == 34:
base = models.resnet34(pretrained)
self._out_feature_channels = {'p2': 64, 'p3': 128, 'p4': 256, 'p5': 512, 'p6': 512}
elif cfg.MODEL.RESNETS.DEPTH == 50:
base = models.resnet50(pretrained)
self._out_feature_channels = {'p2': 256, 'p3': 512, 'p4': 1024, 'p5': 2048, 'p6': 2048}
elif cfg.MODEL.RESNETS.DEPTH == 101:
base = models.resnet101(pretrained)
self._out_feature_channels = {'p2': 256, 'p3': 512, 'p4': 1024, 'p5': 2048, 'p6': 2048}
else:
raise ValueError('No configuration currently supporting depth of {}'.format(cfg.MODEL.RESNETS.DEPTH))
self.conv1 = base.conv1
self.bn1 = base.bn1
self.relu = base.relu
self.maxpool = base.maxpool
self.layer1 = base.layer1
self.layer2 = base.layer2
self.layer3 = base.layer3
self.layer4 = base.layer4
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
p2 = self.layer1(x)
p3 = self.layer2(p2)
p4 = self.layer3(p3)
p5 = self.layer4(p4)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = p2
outputs['p3'] = p3
outputs['p4'] = p4
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_resnet_from_vision_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
if cfg.MODEL.RESNETS.TORCHVISION:
bottom_up = ResNet(cfg, input_shape, pretrained=imagenet_pretrain)
else:
# use the MSRA modeling logic to build the backbone.
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 3,333 | 33.371134 | 113 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/backbone/mnasnet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class MNASNetBackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
base = models.mnasnet1_0(pretrained)
base = base.layers
self.base = base
self._out_feature_channels = {'p2': 24, 'p3': 40, 'p4': 96, 'p5': 320, 'p6': 320}
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
p2 = self.base[0:9](x)
p3 = self.base[9](p2)
p4 = self.base[10:12](p3)
p5 = self.base[12:14](p4)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = p2
outputs['p3'] = p3
outputs['p4'] = p4
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_mnasnet_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = MNASNetBackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 1,936 | 29.265625 | 89 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/backbone/densenet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class DenseNetBackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
base = models.densenet121(pretrained)
base = base.features
self.base = base
self._out_feature_channels = {'p2': 256, 'p3': 512, 'p4': 1024, 'p5': 1024, 'p6': 1024}
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
db1 = self.base[0:5](x)
db2 = self.base[5:7](db1)
db3 = self.base[7:9](db2)
p5 = self.base[9:](db3)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = db1
outputs['p3'] = db2
outputs['p4'] = db3
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_densenet_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = DenseNetBackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE
)
return backbone
| 1,952 | 29.515625 | 95 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/backbone/shufflenet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
class ShufflenetBackbone(Backbone):
def __init__(self, cfg, input_shape, pretrained=True):
super().__init__()
base = models.shufflenet_v2_x1_0(pretrained)
self.conv1 = base.conv1
self.maxpool = base.maxpool
self.stage2 = base.stage2
self.stage3 = base.stage3
self.stage4 = base.stage4
self.conv5 = base.conv5
self._out_feature_channels = {'p2': 24, 'p3': 116, 'p4': 232, 'p5': 464, 'p6': 464}
self._out_feature_strides ={'p2': 4, 'p3': 8, 'p4': 16, 'p5': 32, 'p6': 64}
self._out_features = ['p2', 'p3', 'p4', 'p5', 'p6']
def forward(self, x):
outputs = {}
x = self.conv1(x)
p2 = self.maxpool(x)
p3 = self.stage2(p2)
p4 = self.stage3(p3)
p5 = self.stage4(p4)
p6 = F.max_pool2d(p5, kernel_size=1, stride=2, padding=0)
outputs['p2'] = p2
outputs['p3'] = p3
outputs['p4'] = p4
outputs['p5'] = p5
outputs['p6'] = p6
return outputs
@BACKBONE_REGISTRY.register()
def build_shufflenet_fpn_backbone(cfg, input_shape: ShapeSpec, priors=None):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
imagenet_pretrain = cfg.MODEL.WEIGHTS_PRETRAIN + cfg.MODEL.WEIGHTS == ''
bottom_up = ShufflenetBackbone(cfg, input_shape, pretrained=imagenet_pretrain)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 2,113 | 29.2 | 91 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/backbone/__init__.py
|
from .densenet import *
from .mnasnet import *
from .resnet import *
from .shufflenet import *
from .dla import *
| 118 | 22.8 | 26 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/meta_arch/__init__.py
|
from .rcnn3d import *
| 21 | 21 | 21 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/meta_arch/rcnn3d.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from typing import Dict, List, Optional
import torch
import numpy as np
from detectron2.layers import ShapeSpec, batched_nms
from detectron2.utils.visualizer import Visualizer
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import Instances
from detectron2.utils.events import get_event_storage
from detectron2.data import MetadataCatalog
from detectron2.modeling.backbone import Backbone, BACKBONE_REGISTRY
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.utils.logger import _log_api_usage
from detectron2.modeling.meta_arch import (
META_ARCH_REGISTRY, GeneralizedRCNN
)
from cubercnn.modeling.roi_heads import build_roi_heads
from detectron2.data import MetadataCatalog
from pytorch3d.transforms import rotation_6d_to_matrix
from cubercnn.modeling.roi_heads import build_roi_heads
from cubercnn import util, vis
@META_ARCH_REGISTRY.register()
class RCNN3D(GeneralizedRCNN):
@classmethod
def from_config(cls, cfg, priors=None):
backbone = build_backbone(cfg, priors=priors)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape(), priors=priors),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
# scaling factor for the sample relative to its original scale
# e.g., how much has the image been upsampled by? or downsampled?
im_scales_ratio = [info['height'] / im.shape[1] for (info, im) in zip(batched_inputs, images)]
# The unmodified intrinsics for the image
Ks = [torch.FloatTensor(info['K']) for info in batched_inputs]
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
instances, detector_losses = self.roi_heads(
images, features, proposals,
Ks, im_scales_ratio,
gt_instances
)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0 and storage.iter > 0:
self.visualize_training(batched_inputs, proposals, instances)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: List[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
assert not self.training
images = self.preprocess_image(batched_inputs)
# scaling factor for the sample relative to its original scale
# e.g., how much has the image been upsampled by? or downsampled?
im_scales_ratio = [info['height'] / im.shape[1] for (info, im) in zip(batched_inputs, images)]
# The unmodified intrinsics for the image
Ks = [torch.FloatTensor(info['K']) for info in batched_inputs]
features = self.backbone(images.tensor)
# Pass oracle 2D boxes into the RoI heads
if type(batched_inputs == list) and np.any(['oracle2D' in b for b in batched_inputs]):
oracles = [b['oracle2D'] for b in batched_inputs]
results, _ = self.roi_heads(images, features, oracles, Ks, im_scales_ratio, None)
# normal inference
else:
proposals, _ = self.proposal_generator(images, features, None)
results, _ = self.roi_heads(images, features, proposals, Ks, im_scales_ratio, None)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def visualize_training(self, batched_inputs, proposals, instances):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
instances (list): a list that contains predicted RoIhead instances. Both
batched_inputs and proposals should have the same length.
"""
storage = get_event_storage()
# minimum number of boxes to try to visualize per image
max_vis_prop = 20
if not hasattr(self, 'thing_classes'):
self.thing_classes = MetadataCatalog.get('omni3d_model').thing_classes
self.num_classes = len(self.thing_classes)
for input, prop, instances_i in zip(batched_inputs, proposals, instances):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
img_3DGT = np.ascontiguousarray(img.copy()[:, :, [2, 1, 1]]) # BGR
img_3DPR = np.ascontiguousarray(img.copy()[:, :, [2, 1, 1]]) # BGR
'''
Visualize the 2D GT and proposal predictions
'''
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img_rpn = np.concatenate((anno_img, prop_img), axis=1)
vis_img_rpn = vis_img_rpn.transpose(2, 0, 1)
storage.put_image("Left: GT 2D bounding boxes; Right: Predicted 2D proposals", vis_img_rpn)
'''
Visualize the 3D GT and predictions
'''
K = torch.tensor(input['K'], device=self.device)
scale = input['height']/img.shape[0]
fx, sx = (val.item()/scale for val in K[0, [0, 2]])
fy, sy = (val.item()/scale for val in K[1, [1, 2]])
K_scaled = torch.tensor(
[[1/scale, 0 , 0], [0, 1/scale, 0], [0, 0, 1.0]],
dtype=torch.float32, device=self.device
) @ K
gts_per_image = input["instances"]
gt_classes = gts_per_image.gt_classes
# Filter out irrelevant groundtruth
fg_selection_mask = (gt_classes != -1) & (gt_classes < self.num_classes)
gt_classes = gt_classes[fg_selection_mask]
gt_class_names = [self.thing_classes[cls_idx] for cls_idx in gt_classes]
gt_boxes = gts_per_image.gt_boxes.tensor[fg_selection_mask] # 2D boxes
gt_poses = gts_per_image.gt_poses[fg_selection_mask] # GT poses
# projected 2D center, depth, w, h, l, 3D center
gt_boxes3D = gts_per_image.gt_boxes3D[fg_selection_mask]
# this box may have been mirrored and scaled so
# we need to recompute XYZ in 3D by backprojecting.
gt_z = gt_boxes3D[:, 2]
gt_x3D = gt_z * (gt_boxes3D[:, 0] - sx)/fx
gt_y3D = gt_z * (gt_boxes3D[:, 1] - sy)/fy
# put together the GT boxes
gt_center_3D = torch.stack((gt_x3D, gt_y3D, gt_z)).T
gt_boxes3D_XYZ_WHL = torch.cat((gt_center_3D, gt_boxes3D[:, 3:6]), dim=1)
gt_colors = torch.tensor(
[util.get_color(i) for i in range(len(gt_boxes3D_XYZ_WHL))],
device=self.device
)/255.0
gt_meshes = util.mesh_cuboid(gt_boxes3D_XYZ_WHL, gt_poses, gt_colors)
# perform a simple NMS, which is not cls dependent.
keep = batched_nms(
instances_i.pred_boxes.tensor,
instances_i.scores,
torch.zeros(len(instances_i.scores), dtype=torch.long, device=instances_i.scores.device),
self.roi_heads.box_predictor.test_nms_thresh
)
keep = keep[:max_vis_prop]
num_to_visualize = len(keep)
pred_xyzwhl = torch.cat((instances_i.pred_center_cam[keep], instances_i.pred_dimensions[keep]), dim=1)
pred_pose = instances_i.pred_pose[keep]
pred_colors = torch.tensor(
[util.get_color(i) for i in range(num_to_visualize)],
device=self.device
)/255.0
pred_boxes = instances_i.pred_boxes[keep]
pred_scores = instances_i.scores[keep]
pred_classes = instances_i.pred_classes[keep]
pred_class_names = ['{} {:.2f}'.format(self.thing_classes[cls_idx], score) for cls_idx, score in zip(pred_classes, pred_scores)]
pred_meshes = util.mesh_cuboid(pred_xyzwhl, pred_pose, pred_colors)
# convert to lists
pred_meshes = [pred_meshes.__getitem__(i).detach() for i in range(len(pred_meshes))]
gt_meshes = [gt_meshes.__getitem__(i) for i in range(len(gt_meshes))]
img_3DPR = vis.draw_scene_view(img_3DPR, K_scaled.cpu().numpy(), pred_meshes, text=pred_class_names, mode='front', blend_weight=0.0, blend_weight_overlay=0.85)
img_3DGT = vis.draw_scene_view(img_3DGT, K_scaled.cpu().numpy(), gt_meshes, text=gt_class_names, mode='front', blend_weight=0.0, blend_weight_overlay=0.85)
# horizontal stack 3D GT and pred left/right
vis_img_3d = np.concatenate((img_3DGT, img_3DPR), axis=1)
vis_img_3d = vis_img_3d[:, :, [2, 1, 0]] # RGB
vis_img_3d = vis_img_3d.astype(np.uint8).transpose(2, 0, 1)
storage.put_image("Left: GT 3D cuboids; Right: Predicted 3D cuboids", vis_img_3d)
break # only visualize one image in a batch
def build_model(cfg, priors=None):
"""
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
Note that it does not load any weights from ``cfg``.
"""
meta_arch = cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(cfg, priors=priors)
model.to(torch.device(cfg.MODEL.DEVICE))
_log_api_usage("modeling.meta_arch." + meta_arch)
return model
def build_backbone(cfg, input_shape=None, priors=None):
"""
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
Returns:
an instance of :class:`Backbone`
"""
if input_shape is None:
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
backbone_name = cfg.MODEL.BACKBONE.NAME
backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape, priors)
assert isinstance(backbone, Backbone)
return backbone
| 11,688 | 41.974265 | 171 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/roi_heads/fast_rcnn.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from re import L
import torch
from torch.nn import functional as F
from typing import List, Tuple
from fvcore.nn import giou_loss, smooth_l1_loss
from detectron2.utils.events import get_event_storage
from detectron2.layers import cat, cross_entropy, nonzero_tuple, batched_nms
from detectron2.structures import Instances, Boxes
from detectron2.modeling.roi_heads.fast_rcnn import (
FastRCNNOutputLayers, _log_classification_stats
)
from cubercnn.modeling.proposal_generator.rpn import matched_pairwise_iou
def fast_rcnn_inference(
boxes: List[torch.Tensor],
scores: List[torch.Tensor],
image_shapes: List[Tuple[int, int]],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def fast_rcnn_inference_single_image(
boxes,
scores,
image_shape: Tuple[int, int],
score_thresh: float,
nms_thresh: float,
topk_per_image: int,
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = Boxes(boxes.reshape(-1, 4))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# 1. Filter results based on detection scores. It can make NMS more efficient
# by filtering out low-confidence detections.
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores_full = scores[filter_inds[:, 0]]
scores = scores[filter_mask]
# 2. Apply NMS for each class independently.
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds, scores_full = boxes[keep], scores[keep], filter_inds[keep], scores_full[keep]
result = Instances(image_shape)
result.pred_boxes = Boxes(boxes)
result.scores = scores
result.scores_full = scores_full
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
class FastRCNNOutputs(FastRCNNOutputLayers):
def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def losses(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
Returns:
Dict[str, Tensor]: dict of losses
"""
scores, proposal_deltas = predictions
# parse classification outputs
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
# parse box regression outputs
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
normalize_factor = max(gt_classes.numel(), 1.0)
'''
Standard Faster R-CNN losses
'''
_log_classification_stats(scores, gt_classes)
loss_cls = cross_entropy(scores, gt_classes, reduction="mean")
loss_box_reg = self.box_reg_loss(proposal_boxes, gt_boxes, proposal_deltas, gt_classes, reduction="none")
loss_box_reg = (loss_box_reg).sum() / normalize_factor
losses = {
"BoxHead/loss_cls": loss_cls,
"BoxHead/loss_box_reg": loss_box_reg,
}
return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes, reduction='mean'):
"""
Args:
All boxes are tensors with the same shape Rx(4 or 5).
gt_classes is a long tensor of shape R, the gt class label of each proposal.
R shall be the number of proposals.
"""
box_dim = proposal_boxes.shape[1] # 4 or 5
# Regression loss is only computed for foreground proposals (those matched to a GT)
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]
if pred_deltas.shape[1] == box_dim: # cls-agnostic regression
fg_pred_deltas = pred_deltas[fg_inds]
else:
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
fg_inds, gt_classes[fg_inds]
]
if reduction == 'mean':
if self.box_reg_loss_type == "smooth_l1":
gt_pred_deltas = self.box2box_transform.get_deltas(
proposal_boxes[fg_inds],
gt_boxes[fg_inds],
)
loss_box_reg = smooth_l1_loss(
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
fg_pred_deltas, proposal_boxes[fg_inds]
)
loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
# The reg loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty
elif reduction == 'none':
if self.box_reg_loss_type == "smooth_l1":
gt_pred_deltas = self.box2box_transform.get_deltas(
proposal_boxes[fg_inds],
gt_boxes[fg_inds],
)
loss_box_reg = smooth_l1_loss(
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="none"
)
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
# return non-reduced type
return loss_box_reg
else:
raise ValueError(f"Invalid bbox reg reduction type '{reduction}'")
| 11,154 | 41.576336 | 113 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/roi_heads/cube_head.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.utils.registry import Registry
from typing import Dict
from detectron2.layers import ShapeSpec
from torch import nn
import torch
import numpy as np
import fvcore.nn.weight_init as weight_init
from pytorch3d.transforms.rotation_conversions import _copysign
from pytorch3d.transforms import (
rotation_6d_to_matrix,
euler_angles_to_matrix,
quaternion_to_matrix
)
ROI_CUBE_HEAD_REGISTRY = Registry("ROI_CUBE_HEAD")
@ROI_CUBE_HEAD_REGISTRY.register()
class CubeHead(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
#-------------------------------------------
# Settings
#-------------------------------------------
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.use_conf = cfg.MODEL.ROI_CUBE_HEAD.USE_CONFIDENCE
self.z_type = cfg.MODEL.ROI_CUBE_HEAD.Z_TYPE
self.pose_type = cfg.MODEL.ROI_CUBE_HEAD.POSE_TYPE
self.cluster_bins = cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS
self.shared_fc = cfg.MODEL.ROI_CUBE_HEAD.SHARED_FC
#-------------------------------------------
# Feature generator
#-------------------------------------------
num_conv = cfg.MODEL.ROI_CUBE_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_CUBE_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_CUBE_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_CUBE_HEAD.FC_DIM
conv_dims = [conv_dim] * num_conv
fc_dims = [fc_dim] * num_fc
assert len(conv_dims) + len(fc_dims) > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
if self.shared_fc:
self.feature_generator = nn.Sequential()
else:
self.feature_generator_XY = nn.Sequential()
self.feature_generator_dims = nn.Sequential()
self.feature_generator_pose = nn.Sequential()
self.feature_generator_Z = nn.Sequential()
if self.use_conf:
self.feature_generator_conf = nn.Sequential()
# create fully connected layers for Cube Head
for k, fc_dim in enumerate(fc_dims):
fc_dim_in = int(np.prod(self._output_size))
self._output_size = fc_dim
if self.shared_fc:
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator.add_module("fc{}".format(k + 1), fc)
self.feature_generator.add_module("fc_relu{}".format(k + 1), nn.ReLU())
else:
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_dims.add_module("fc{}".format(k + 1), fc)
self.feature_generator_dims.add_module("fc_relu{}".format(k + 1), nn.ReLU())
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_XY.add_module("fc{}".format(k + 1), fc)
self.feature_generator_XY.add_module("fc_relu{}".format(k + 1), nn.ReLU())
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_pose.add_module("fc{}".format(k + 1), fc)
self.feature_generator_pose.add_module("fc_relu{}".format(k + 1), nn.ReLU())
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_Z.add_module("fc{}".format(k + 1), fc)
self.feature_generator_Z.add_module("fc_relu{}".format(k + 1), nn.ReLU())
if self.use_conf:
fc = nn.Linear(fc_dim_in, fc_dim)
weight_init.c2_xavier_fill(fc)
self.feature_generator_conf.add_module("fc{}".format(k + 1), fc)
self.feature_generator_conf.add_module("fc_relu{}".format(k + 1), nn.ReLU())
#-------------------------------------------
# 3D outputs
#-------------------------------------------
# Dimensions in meters (width, height, length)
self.bbox_3D_dims = nn.Linear(self._output_size, self.num_classes*3)
nn.init.normal_(self.bbox_3D_dims.weight, std=0.001)
nn.init.constant_(self.bbox_3D_dims.bias, 0)
cluster_bins = self.cluster_bins if self.cluster_bins > 1 else 1
# XY
self.bbox_3D_center_deltas = nn.Linear(self._output_size, self.num_classes*2)
nn.init.normal_(self.bbox_3D_center_deltas.weight, std=0.001)
nn.init.constant_(self.bbox_3D_center_deltas.bias, 0)
# Pose
if self.pose_type == '6d':
self.bbox_3D_pose = nn.Linear(self._output_size, self.num_classes*6)
elif self.pose_type == 'quaternion':
self.bbox_3D_pose = nn.Linear(self._output_size, self.num_classes*4)
elif self.pose_type == 'euler':
self.bbox_3D_pose = nn.Linear(self._output_size, self.num_classes*3)
else:
raise ValueError('Cuboid pose type {} is not recognized'.format(self.pose_type))
nn.init.normal_(self.bbox_3D_pose.weight, std=0.001)
nn.init.constant_(self.bbox_3D_pose.bias, 0)
# Z
self.bbox_3D_center_depth = nn.Linear(self._output_size, self.num_classes*cluster_bins)
nn.init.normal_(self.bbox_3D_center_depth.weight, std=0.001)
nn.init.constant_(self.bbox_3D_center_depth.bias, 0)
# Optionally, box confidence
if self.use_conf:
self.bbox_3D_uncertainty = nn.Linear(self._output_size, self.num_classes*1)
nn.init.normal_(self.bbox_3D_uncertainty.weight, std=0.001)
nn.init.constant_(self.bbox_3D_uncertainty.bias, 5)
def forward(self, x):
n = x.shape[0]
box_z = None
box_uncert = None
box_2d_deltas = None
if self.shared_fc:
features = self.feature_generator(x)
box_2d_deltas = self.bbox_3D_center_deltas(features)
box_dims = self.bbox_3D_dims(features)
box_pose = self.bbox_3D_pose(features)
box_z = self.bbox_3D_center_depth(features)
if self.use_conf:
box_uncert = self.bbox_3D_uncertainty(features).clip(0.01)
else:
box_2d_deltas = self.bbox_3D_center_deltas(self.feature_generator_XY(x))
box_dims = self.bbox_3D_dims(self.feature_generator_dims(x))
box_pose = self.bbox_3D_pose(self.feature_generator_pose(x))
box_z = self.bbox_3D_center_depth(self.feature_generator_Z(x))
if self.use_conf:
box_uncert = self.bbox_3D_uncertainty(self.feature_generator_conf(x)).clip(0.01)
# Pose
if self.pose_type == '6d':
box_pose = rotation_6d_to_matrix(box_pose.view(-1, 6))
elif self.pose_type == 'quaternion':
quats = box_pose.view(-1, 4)
quats_scales = (quats * quats).sum(1)
quats = quats / _copysign(torch.sqrt(quats_scales), quats[:, 0])[:, None]
box_pose = quaternion_to_matrix(quats)
elif self.pose_type == 'euler':
box_pose = euler_angles_to_matrix(box_pose.view(-1, 3), 'XYZ')
box_2d_deltas = box_2d_deltas.view(n, self.num_classes, 2)
box_dims = box_dims.view(n, self.num_classes, 3)
box_pose = box_pose.view(n, self.num_classes, 3, 3)
if self.cluster_bins > 1:
box_z = box_z.view(n, self.cluster_bins, self.num_classes, -1)
else:
box_z = box_z.view(n, self.num_classes, -1)
return box_2d_deltas, box_z, box_dims, box_pose, box_uncert
def build_cube_head(cfg, input_shape: Dict[str, ShapeSpec]):
name = cfg.MODEL.ROI_CUBE_HEAD.NAME
return ROI_CUBE_HEAD_REGISTRY.get(name)(cfg, input_shape)
| 8,064 | 38.925743 | 96 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/roi_heads/roi_heads.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import numpy as np
import cv2
from typing import Dict, List, Tuple
import torch
from torch import nn
import torch.nn.functional as F
from pytorch3d.transforms.so3 import (
so3_relative_angle
)
from detectron2.config import configurable
from detectron2.structures import Instances, Boxes, pairwise_iou, pairwise_ioa
from detectron2.layers import ShapeSpec, nonzero_tuple
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads import (
StandardROIHeads, ROI_HEADS_REGISTRY, select_foreground_proposals,
)
from detectron2.modeling.poolers import ROIPooler
from cubercnn.modeling.roi_heads.cube_head import build_cube_head
from cubercnn.modeling.proposal_generator.rpn import subsample_labels
from cubercnn.modeling.roi_heads.fast_rcnn import FastRCNNOutputs
from cubercnn import util
logger = logging.getLogger(__name__)
E_CONSTANT = 2.71828183
SQRT_2_CONSTANT = 1.41421356
def build_roi_heads(cfg, input_shape, priors=None):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.ROI_HEADS.NAME
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape, priors=priors)
@ROI_HEADS_REGISTRY.register()
class ROIHeads3D(StandardROIHeads):
@configurable
def __init__(
self,
*,
ignore_thresh: float,
cube_head: nn.Module,
cube_pooler: nn.Module,
loss_w_3d: float,
loss_w_xy: float,
loss_w_z: float,
loss_w_dims: float,
loss_w_pose: float,
loss_w_joint: float,
use_confidence: float,
inverse_z_weight: bool,
z_type: str,
pose_type: str,
cluster_bins: int,
priors = None,
dims_priors_enabled = None,
dims_priors_func = None,
disentangled_loss=None,
virtual_depth=None,
virtual_focal=None,
test_scale=None,
allocentric_pose=None,
chamfer_pose=None,
scale_roi_boxes=None,
**kwargs,
):
super().__init__(**kwargs)
self.scale_roi_boxes = scale_roi_boxes
# rotation settings
self.allocentric_pose = allocentric_pose
self.chamfer_pose = chamfer_pose
# virtual settings
self.virtual_depth = virtual_depth
self.virtual_focal = virtual_focal
# loss weights, <=0 is off
self.loss_w_3d = loss_w_3d
self.loss_w_xy = loss_w_xy
self.loss_w_z = loss_w_z
self.loss_w_dims = loss_w_dims
self.loss_w_pose = loss_w_pose
self.loss_w_joint = loss_w_joint
# loss modes
self.disentangled_loss = disentangled_loss
self.inverse_z_weight = inverse_z_weight
# misc
self.test_scale = test_scale
self.ignore_thresh = ignore_thresh
# related to network outputs
self.z_type = z_type
self.pose_type = pose_type
self.use_confidence = use_confidence
# related to priors
self.cluster_bins = cluster_bins
self.dims_priors_enabled = dims_priors_enabled
self.dims_priors_func = dims_priors_func
# if there is no 3D loss, then we don't need any heads.
if loss_w_3d > 0:
self.cube_head = cube_head
self.cube_pooler = cube_pooler
# the dimensions could rely on pre-computed priors
if self.dims_priors_enabled and priors is not None:
self.priors_dims_per_cat = nn.Parameter(torch.FloatTensor(priors['priors_dims_per_cat']).unsqueeze(0))
else:
self.priors_dims_per_cat = nn.Parameter(torch.ones(1, self.num_classes, 2, 3))
# Optionally, refactor priors and store them in the network params
if self.cluster_bins > 1 and priors is not None:
# the depth could have been clustered based on 2D scales
priors_z_scales = torch.stack([torch.FloatTensor(prior[1]) for prior in priors['priors_bins']])
self.priors_z_scales = nn.Parameter(priors_z_scales)
else:
self.priors_z_scales = nn.Parameter(torch.ones(self.num_classes, self.cluster_bins))
# the depth can be based on priors
if self.z_type == 'clusters':
assert self.cluster_bins > 1, 'To use z_type of priors, there must be more than 1 cluster bin'
if priors is None:
self.priors_z_stats = nn.Parameter(torch.ones(self.num_classes, self.cluster_bins, 2).float())
else:
# stats
priors_z_stats = torch.cat([torch.FloatTensor(prior[2]).unsqueeze(0) for prior in priors['priors_bins']])
self.priors_z_stats = nn.Parameter(priors_z_stats)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec], priors=None):
ret = super().from_config(cfg, input_shape)
# pass along priors
ret["box_predictor"] = FastRCNNOutputs(cfg, ret['box_head'].output_shape)
ret.update(cls._init_cube_head(cfg, input_shape))
ret["priors"] = priors
return ret
@classmethod
def _init_cube_head(self, cfg, input_shape: Dict[str, ShapeSpec]):
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
pooler_resolution = cfg.MODEL.ROI_CUBE_HEAD.POOLER_RESOLUTION
pooler_sampling_ratio = cfg.MODEL.ROI_CUBE_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_CUBE_HEAD.POOLER_TYPE
cube_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=pooler_sampling_ratio,
pooler_type=pooler_type,
)
in_channels = [input_shape[f].channels for f in in_features][0]
shape = ShapeSpec(
channels=in_channels, width=pooler_resolution, height=pooler_resolution
)
cube_head = build_cube_head(cfg, shape)
return {
'cube_head': cube_head,
'cube_pooler': cube_pooler,
'use_confidence': cfg.MODEL.ROI_CUBE_HEAD.USE_CONFIDENCE,
'inverse_z_weight': cfg.MODEL.ROI_CUBE_HEAD.INVERSE_Z_WEIGHT,
'loss_w_3d': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_3D,
'loss_w_xy': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_XY,
'loss_w_z': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_Z,
'loss_w_dims': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_DIMS,
'loss_w_pose': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_POSE,
'loss_w_joint': cfg.MODEL.ROI_CUBE_HEAD.LOSS_W_JOINT,
'z_type': cfg.MODEL.ROI_CUBE_HEAD.Z_TYPE,
'pose_type': cfg.MODEL.ROI_CUBE_HEAD.POSE_TYPE,
'dims_priors_enabled': cfg.MODEL.ROI_CUBE_HEAD.DIMS_PRIORS_ENABLED,
'dims_priors_func': cfg.MODEL.ROI_CUBE_HEAD.DIMS_PRIORS_FUNC,
'disentangled_loss': cfg.MODEL.ROI_CUBE_HEAD.DISENTANGLED_LOSS,
'virtual_depth': cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_DEPTH,
'virtual_focal': cfg.MODEL.ROI_CUBE_HEAD.VIRTUAL_FOCAL,
'test_scale': cfg.INPUT.MIN_SIZE_TEST,
'chamfer_pose': cfg.MODEL.ROI_CUBE_HEAD.CHAMFER_POSE,
'allocentric_pose': cfg.MODEL.ROI_CUBE_HEAD.ALLOCENTRIC_POSE,
'cluster_bins': cfg.MODEL.ROI_CUBE_HEAD.CLUSTER_BINS,
'ignore_thresh': cfg.MODEL.RPN.IGNORE_THRESHOLD,
'scale_roi_boxes': cfg.MODEL.ROI_CUBE_HEAD.SCALE_ROI_BOXES,
}
def forward(self, images, features, proposals, Ks, im_scales_ratio, targets=None):
im_dims = [image.shape[1:] for image in images]
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training:
losses = self._forward_box(features, proposals)
if self.loss_w_3d > 0:
instances_3d, losses_cube = self._forward_cube(features, proposals, Ks, im_dims, im_scales_ratio)
losses.update(losses_cube)
return instances_3d, losses
else:
# when oracle is available, by pass the box forward.
# simulate the predicted instances by creating a new
# instance for each passed in image.
if isinstance(proposals, list) and ~np.any([isinstance(p, Instances) for p in proposals]):
pred_instances = []
for proposal, im_dim in zip(proposals, im_dims):
pred_instances_i = Instances(im_dim)
pred_instances_i.pred_boxes = Boxes(proposal['gt_bbox2D'])
pred_instances_i.pred_classes = proposal['gt_classes']
pred_instances_i.scores = torch.ones_like(proposal['gt_classes']).float()
pred_instances.append(pred_instances_i)
else:
pred_instances = self._forward_box(features, proposals)
if self.loss_w_3d > 0:
pred_instances = self._forward_cube(features, pred_instances, Ks, im_dims, im_scales_ratio)
return pred_instances, {}
def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if self.training:
losses = self.box_predictor.losses(
predictions, proposals,
)
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.pred_boxes = Boxes(pred_boxes_per_image)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals, )
return pred_instances
def l1_loss(self, vals, target):
return F.smooth_l1_loss(vals, target, reduction='none', beta=0.0)
def chamfer_loss(self, vals, target):
B = vals.shape[0]
xx = vals.view(B, 8, 1, 3)
yy = target.view(B, 1, 8, 3)
l1_dist = (xx - yy).abs().sum(-1)
l1 = (l1_dist.min(1).values.mean(-1) + l1_dist.min(2).values.mean(-1))
return l1
# optionally, scale proposals to zoom RoI in (<1.0) our out (>1.0)
def scale_proposals(self, proposal_boxes):
if self.scale_roi_boxes > 0:
proposal_boxes_scaled = []
for boxes in proposal_boxes:
centers = boxes.get_centers()
widths = boxes.tensor[:, 2] - boxes.tensor[:, 0]
heights = boxes.tensor[:, 2] - boxes.tensor[:, 0]
x1 = centers[:, 0] - 0.5*widths*self.scale_roi_boxes
x2 = centers[:, 0] + 0.5*widths*self.scale_roi_boxes
y1 = centers[:, 1] - 0.5*heights*self.scale_roi_boxes
y2 = centers[:, 1] + 0.5*heights*self.scale_roi_boxes
boxes_scaled = Boxes(torch.stack([x1, y1, x2, y2], dim=1))
proposal_boxes_scaled.append(boxes_scaled)
else:
proposal_boxes_scaled = proposal_boxes
return proposal_boxes_scaled
def _forward_cube(self, features, instances, Ks, im_current_dims, im_scales_ratio):
features = [features[f] for f in self.in_features]
# training on foreground
if self.training:
losses = {}
# add up the amount we should normalize the losses by.
# this follows the same logic as the BoxHead, where each FG proposal
# is able to contribute the same amount of supervision. Technically,
# this value doesn't change during training unless the batch size is dynamic.
self.normalize_factor = max(sum([i.gt_classes.numel() for i in instances]), 1.0)
# The loss is only defined on positive proposals
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
pred_boxes = [x.pred_boxes for x in proposals]
box_classes = (torch.cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0))
gt_boxes3D = torch.cat([p.gt_boxes3D for p in proposals], dim=0,)
gt_poses = torch.cat([p.gt_poses for p in proposals], dim=0,)
assert len(gt_poses) == len(gt_boxes3D) == len(box_classes)
# eval on all instances
else:
proposals = instances
pred_boxes = [x.pred_boxes for x in instances]
proposal_boxes = pred_boxes
box_classes = torch.cat([x.pred_classes for x in instances])
proposal_boxes_scaled = self.scale_proposals(proposal_boxes)
# forward features
cube_features = self.cube_pooler(features, proposal_boxes_scaled).flatten(1)
n = cube_features.shape[0]
# nothing to do..
if n == 0:
return instances if not self.training else (instances, {})
num_boxes_per_image = [len(i) for i in proposals]
# scale the intrinsics according to the ratio the image has been scaled.
# this means the projections at the current scale are in sync.
Ks_scaled_per_box = torch.cat([
(Ks[i]/im_scales_ratio[i]).unsqueeze(0).repeat([num, 1, 1])
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
Ks_scaled_per_box[:, -1, -1] = 1
focal_lengths_per_box = torch.cat([
(Ks[i][1, 1]).unsqueeze(0).repeat([num])
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
im_ratios_per_box = torch.cat([
torch.FloatTensor([im_scales_ratio[i]]).repeat(num)
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
# scaling factor for Network resolution -> Original
im_scales_per_box = torch.cat([
torch.FloatTensor([im_current_dims[i][0]]).repeat(num)
for (i, num) in enumerate(num_boxes_per_image)
]).to(cube_features.device)
im_scales_original_per_box = im_scales_per_box * im_ratios_per_box
if self.virtual_depth:
virtual_to_real = util.compute_virtual_scale_from_focal_spaces(
focal_lengths_per_box, im_scales_original_per_box,
self.virtual_focal, im_scales_per_box
)
real_to_virtual = 1 / virtual_to_real
else:
real_to_virtual = virtual_to_real = 1.0
# 2D boxes are needed to apply deltas
src_boxes = torch.cat([box_per_im.tensor for box_per_im in proposal_boxes], dim=0)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_scales = (src_heights**2 + src_widths**2).sqrt()
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
# For some methods, we need the predicted 2D box,
# e.g., the differentiable tensors from the 2D box head.
pred_src_boxes = torch.cat([box_per_im.tensor for box_per_im in pred_boxes], dim=0)
pred_widths = pred_src_boxes[:, 2] - pred_src_boxes[:, 0]
pred_heights = pred_src_boxes[:, 3] - pred_src_boxes[:, 1]
pred_src_x = (pred_src_boxes[:, 2] + pred_src_boxes[:, 0]) * 0.5
pred_src_y = (pred_src_boxes[:, 3] + pred_src_boxes[:, 1]) * 0.5
# forward predictions
cube_2d_deltas, cube_z, cube_dims, cube_pose, cube_uncert = self.cube_head(cube_features)
# simple indexing re-used commonly for selection purposes
fg_inds = torch.arange(n)
# Z when clusters are used
if cube_z is not None and self.cluster_bins > 1:
# compute closest bin assignments per batch per category (batch x n_category)
scales_diff = (self.priors_z_scales.detach().T.unsqueeze(0) - src_scales.unsqueeze(1).unsqueeze(2)).abs()
# assign the correct scale prediction.
# (the others are not used / thrown away)
assignments = scales_diff.argmin(1)
# select FG, category, and correct cluster
cube_z = cube_z[fg_inds, :, box_classes, :][fg_inds, assignments[fg_inds, box_classes]]
elif cube_z is not None:
# if z is available, collect the per-category predictions.
cube_z = cube_z[fg_inds, box_classes, :]
cube_dims = cube_dims[fg_inds, box_classes, :]
cube_pose = cube_pose[fg_inds, box_classes, :, :]
if self.use_confidence:
# if uncertainty is available, collect the per-category predictions.
cube_uncert = cube_uncert[fg_inds, box_classes]
cube_2d_deltas = cube_2d_deltas[fg_inds, box_classes, :]
# apply our predicted deltas based on src boxes.
cube_x = src_ctr_x + src_widths * cube_2d_deltas[:, 0]
cube_y = src_ctr_y + src_heights * cube_2d_deltas[:, 1]
cube_xy = torch.cat((cube_x.unsqueeze(1), cube_y.unsqueeze(1)), dim=1)
cube_dims_norm = cube_dims
if self.dims_priors_enabled:
# gather prior dimensions
prior_dims = self.priors_dims_per_cat.detach().repeat([n, 1, 1, 1])[fg_inds, box_classes]
prior_dims_mean = prior_dims[:, 0, :]
prior_dims_std = prior_dims[:, 1, :]
if self.dims_priors_func == 'sigmoid':
prior_dims_min = (prior_dims_mean - 3*prior_dims_std).clip(0.0)
prior_dims_max = (prior_dims_mean + 3*prior_dims_std)
cube_dims = util.scaled_sigmoid(cube_dims_norm, min=prior_dims_min, max=prior_dims_max)
elif self.dims_priors_func == 'exp':
cube_dims = torch.exp(cube_dims_norm.clip(max=5)) * prior_dims_mean
else:
# no priors are used
cube_dims = torch.exp(cube_dims_norm.clip(max=5))
if self.allocentric_pose:
# To compare with GTs, we need the pose to be egocentric, not allocentric
cube_pose_allocentric = cube_pose
cube_pose = util.R_from_allocentric(Ks_scaled_per_box, cube_pose, u=cube_x.detach(), v=cube_y.detach())
cube_z = cube_z.squeeze()
if self.z_type =='sigmoid':
cube_z_norm = torch.sigmoid(cube_z)
cube_z = cube_z_norm * 100
elif self.z_type == 'log':
cube_z_norm = cube_z
cube_z = torch.exp(cube_z)
elif self.z_type == 'clusters':
# gather the mean depth, same operation as above, for a n x c result
z_means = self.priors_z_stats[:, :, 0].T.unsqueeze(0).repeat([n, 1, 1])
z_means = torch.gather(z_means, 1, assignments.unsqueeze(1)).squeeze(1)
# gather the std depth, same operation as above, for a n x c result
z_stds = self.priors_z_stats[:, :, 1].T.unsqueeze(0).repeat([n, 1, 1])
z_stds = torch.gather(z_stds, 1, assignments.unsqueeze(1)).squeeze(1)
# do not learn these, they are static
z_means = z_means.detach()
z_stds = z_stds.detach()
z_means = z_means[fg_inds, box_classes]
z_stds = z_stds[fg_inds, box_classes]
z_mins = (z_means - 3*z_stds).clip(0)
z_maxs = (z_means + 3*z_stds)
cube_z_norm = cube_z
cube_z = util.scaled_sigmoid(cube_z, min=z_mins, max=z_maxs)
if self.virtual_depth:
cube_z = (cube_z * virtual_to_real)
if self.training:
prefix = 'Cube/'
storage = get_event_storage()
# Pull off necessary GT information
# let lowercase->2D and uppercase->3D
# [x, y, Z, W, H, L]
gt_2d = gt_boxes3D[:, :2]
gt_z = gt_boxes3D[:, 2]
gt_dims = gt_boxes3D[:, 3:6]
# this box may have been mirrored and scaled so
# we need to recompute XYZ in 3D by backprojecting.
gt_x3d = gt_z * (gt_2d[:, 0] - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
gt_y3d = gt_z * (gt_2d[:, 1] - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
gt_3d = torch.stack((gt_x3d, gt_y3d, gt_z)).T
# put together the GT boxes
gt_box3d = torch.cat((gt_3d, gt_dims), dim=1)
# These are the corners which will be the target for all losses!!
gt_corners = util.get_cuboid_verts_faces(gt_box3d, gt_poses)[0]
# project GT corners
gt_proj_boxes = torch.bmm(Ks_scaled_per_box, gt_corners.transpose(1,2))
gt_proj_boxes /= gt_proj_boxes[:, -1, :].clone().unsqueeze(1)
gt_proj_x1 = gt_proj_boxes[:, 0, :].min(1)[0]
gt_proj_y1 = gt_proj_boxes[:, 1, :].min(1)[0]
gt_proj_x2 = gt_proj_boxes[:, 0, :].max(1)[0]
gt_proj_y2 = gt_proj_boxes[:, 1, :].max(1)[0]
gt_widths = gt_proj_x2 - gt_proj_x1
gt_heights = gt_proj_y2 - gt_proj_y1
gt_x = gt_proj_x1 + 0.5 * gt_widths
gt_y = gt_proj_y1 + 0.5 * gt_heights
gt_proj_boxes = torch.stack((gt_proj_x1, gt_proj_y1, gt_proj_x2, gt_proj_y2), dim=1)
if self.disentangled_loss:
'''
Disentangled loss compares each varaible group to the
cuboid corners, which is generally more robust to hyperparams.
'''
# compute disentangled Z corners
cube_dis_x3d_from_z = cube_z * (gt_2d[:, 0] - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_dis_y3d_from_z = cube_z * (gt_2d[:, 1] - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_dis_z = torch.cat((torch.stack((cube_dis_x3d_from_z, cube_dis_y3d_from_z, cube_z)).T, gt_dims), dim=1)
dis_z_corners = util.get_cuboid_verts_faces(cube_dis_z, gt_poses)[0]
# compute disentangled XY corners
cube_dis_x3d = gt_z * (cube_x - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_dis_y3d = gt_z * (cube_y - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_dis_XY = torch.cat((torch.stack((cube_dis_x3d, cube_dis_y3d, gt_z)).T, gt_dims), dim=1)
dis_XY_corners = util.get_cuboid_verts_faces(cube_dis_XY, gt_poses)[0]
loss_xy = self.l1_loss(dis_XY_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Pose
dis_pose_corners = util.get_cuboid_verts_faces(gt_box3d, cube_pose)[0]
# Dims
dis_dims_corners = util.get_cuboid_verts_faces(torch.cat((gt_3d, cube_dims), dim=1), gt_poses)[0]
# Loss dims
loss_dims = self.l1_loss(dis_dims_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Loss z
loss_z = self.l1_loss(dis_z_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Rotation uses chamfer or l1 like others
if self.chamfer_pose:
loss_pose = self.chamfer_loss(dis_pose_corners, gt_corners)
else:
loss_pose = self.l1_loss(dis_pose_corners, gt_corners).contiguous().view(n, -1).mean(dim=1)
# Non-disentangled training losses
else:
'''
These loss functions are fairly arbitrarily designed.
Generally, they are in some normalized space but there
are many alternative implementations for most functions.
'''
# XY
gt_deltas = (gt_2d.clone() - torch.cat((src_ctr_x.unsqueeze(1), src_ctr_y.unsqueeze(1)), dim=1)) \
/ torch.cat((src_widths.unsqueeze(1), src_heights.unsqueeze(1)), dim=1)
loss_xy = self.l1_loss(cube_2d_deltas, gt_deltas).mean(1)
# Dims
if self.dims_priors_enabled:
cube_dims_gt_normspace = torch.log(gt_dims/prior_dims)
loss_dims = self.l1_loss(cube_dims_norm, cube_dims_gt_normspace).mean(1)
else:
loss_dims = self.l1_loss(cube_dims_norm, torch.log(gt_dims)).mean(1)
# Pose
try:
if self.allocentric_pose:
gt_poses_allocentric = util.R_to_allocentric(Ks_scaled_per_box, gt_poses, u=cube_x.detach(), v=cube_y.detach())
loss_pose = 1-so3_relative_angle(cube_pose_allocentric, gt_poses_allocentric, eps=0.1, cos_angle=True)
else:
loss_pose = 1-so3_relative_angle(cube_pose, gt_poses, eps=0.1, cos_angle=True)
# Can fail with bad EPS values/instability
except:
loss_pose = None
if self.z_type == 'direct':
loss_z = self.l1_loss(cube_z, gt_z)
elif self.z_type == 'sigmoid':
loss_z = self.l1_loss(cube_z_norm, (gt_z * real_to_virtual / 100).clip(0, 1))
elif self.z_type == 'log':
loss_z = self.l1_loss(cube_z_norm, torch.log((gt_z * real_to_virtual).clip(0.01)))
elif self.z_type == 'clusters':
loss_z = self.l1_loss(cube_z_norm, (((gt_z * real_to_virtual) - z_means)/(z_stds)))
total_3D_loss_for_reporting = loss_dims*self.loss_w_dims
if not loss_pose is None:
total_3D_loss_for_reporting += loss_pose*self.loss_w_pose
if not cube_2d_deltas is None:
total_3D_loss_for_reporting += loss_xy*self.loss_w_xy
if not loss_z is None:
total_3D_loss_for_reporting += loss_z*self.loss_w_z
# reporting does not need gradients
total_3D_loss_for_reporting = total_3D_loss_for_reporting.detach()
if self.loss_w_joint > 0:
'''
If we are using joint [entangled] loss, then we also need to pair all
predictions together and compute a chamfer or l1 loss vs. cube corners.
'''
cube_dis_x3d_from_z = cube_z * (cube_x - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_dis_y3d_from_z = cube_z * (cube_y - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_dis_z = torch.cat((torch.stack((cube_dis_x3d_from_z, cube_dis_y3d_from_z, cube_z)).T, cube_dims), dim=1)
dis_z_corners_joint = util.get_cuboid_verts_faces(cube_dis_z, cube_pose)[0]
if self.chamfer_pose and self.disentangled_loss:
loss_joint = self.chamfer_loss(dis_z_corners_joint, gt_corners)
else:
loss_joint = self.l1_loss(dis_z_corners_joint, gt_corners).contiguous().view(n, -1).mean(dim=1)
valid_joint = loss_joint < np.inf
total_3D_loss_for_reporting += (loss_joint*self.loss_w_joint).detach()
# compute errors for tracking purposes
z_error = (cube_z - gt_z).detach().abs()
dims_error = (cube_dims - gt_dims).detach().abs()
xy_error = (cube_xy - gt_2d).detach().abs()
storage.put_scalar(prefix + 'z_error', z_error.mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'dims_error', dims_error.mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'xy_error', xy_error.mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'z_close', (z_error<0.20).float().mean().item(), smoothing_hint=False)
storage.put_scalar(prefix + 'total_3D_loss', self.loss_w_3d * self.safely_reduce_losses(total_3D_loss_for_reporting), smoothing_hint=False)
if self.inverse_z_weight:
'''
Weights all losses to prioritize close up boxes.
'''
gt_z = gt_boxes3D[:, 2]
inverse_z_w = 1/torch.log(gt_z.clip(E_CONSTANT))
loss_dims *= inverse_z_w
# scale based on log, but clip at e
if not cube_2d_deltas is None:
loss_xy *= inverse_z_w
if loss_z is not None:
loss_z *= inverse_z_w
if loss_pose is not None:
loss_pose *= inverse_z_w
if self.loss_w_joint > 0:
loss_joint *= inverse_z_w
if self.use_confidence > 0:
uncert_sf = SQRT_2_CONSTANT * torch.exp(-cube_uncert)
loss_dims *= uncert_sf
if not cube_2d_deltas is None:
loss_xy *= uncert_sf
if not loss_z is None:
loss_z *= uncert_sf
if loss_pose is not None:
loss_pose *= uncert_sf
if self.loss_w_joint > 0:
loss_joint *= uncert_sf
losses.update({prefix + 'uncert': self.use_confidence*self.safely_reduce_losses(cube_uncert.clone())})
storage.put_scalar(prefix + 'conf', torch.exp(-cube_uncert).mean().item(), smoothing_hint=False)
# store per batch loss stats temporarily
self.batch_losses = [batch_losses.mean().item() for batch_losses in total_3D_loss_for_reporting.split(num_boxes_per_image)]
if self.loss_w_dims > 0:
losses.update({
prefix + 'loss_dims': self.safely_reduce_losses(loss_dims) * self.loss_w_dims * self.loss_w_3d,
})
if not cube_2d_deltas is None:
losses.update({
prefix + 'loss_xy': self.safely_reduce_losses(loss_xy) * self.loss_w_xy * self.loss_w_3d,
})
if not loss_z is None:
losses.update({
prefix + 'loss_z': self.safely_reduce_losses(loss_z) * self.loss_w_z * self.loss_w_3d,
})
if loss_pose is not None:
losses.update({
prefix + 'loss_pose': self.safely_reduce_losses(loss_pose) * self.loss_w_pose * self.loss_w_3d,
})
if self.loss_w_joint > 0:
if valid_joint.any():
losses.update({prefix + 'loss_joint': self.safely_reduce_losses(loss_joint[valid_joint]) * self.loss_w_joint * self.loss_w_3d})
'''
Inference
'''
if len(cube_z.shape) == 0:
cube_z = cube_z.unsqueeze(0)
# inference
cube_x3d = cube_z * (cube_x - Ks_scaled_per_box[:, 0, 2])/Ks_scaled_per_box[:, 0, 0]
cube_y3d = cube_z * (cube_y - Ks_scaled_per_box[:, 1, 2])/Ks_scaled_per_box[:, 1, 1]
cube_3D = torch.cat((torch.stack((cube_x3d, cube_y3d, cube_z)).T, cube_dims, cube_xy*im_ratios_per_box.unsqueeze(1)), dim=1)
if self.use_confidence:
cube_conf = torch.exp(-cube_uncert)
cube_3D = torch.cat((cube_3D, cube_conf.unsqueeze(1)), dim=1)
# convert the predictions to intances per image
cube_3D = cube_3D.split(num_boxes_per_image)
cube_pose = cube_pose.split(num_boxes_per_image)
box_classes = box_classes.split(num_boxes_per_image)
pred_instances = None
pred_instances = instances if not self.training else \
[Instances(image_size) for image_size in im_current_dims]
for cube_3D_i, cube_pose_i, instances_i, K, im_dim, im_scale_ratio, box_classes_i, pred_boxes_i in \
zip(cube_3D, cube_pose, pred_instances, Ks, im_current_dims, im_scales_ratio, box_classes, pred_boxes):
# merge scores if they already exist
if hasattr(instances_i, 'scores'):
instances_i.scores = (instances_i.scores * cube_3D_i[:, -1])**(1/2)
# assign scores if none are present
else:
instances_i.scores = cube_3D_i[:, -1]
# assign box classes if none exist
if not hasattr(instances_i, 'pred_classes'):
instances_i.pred_classes = box_classes_i
# assign predicted boxes if none exist
if not hasattr(instances_i, 'pred_boxes'):
instances_i.pred_boxes = pred_boxes_i
instances_i.pred_bbox3D = util.get_cuboid_verts_faces(cube_3D_i[:, :6], cube_pose_i)[0]
instances_i.pred_center_cam = cube_3D_i[:, :3]
instances_i.pred_center_2D = cube_3D_i[:, 6:8]
instances_i.pred_dimensions = cube_3D_i[:, 3:6]
instances_i.pred_pose = cube_pose_i
if self.training:
return pred_instances, losses
else:
return pred_instances
def _sample_proposals(
self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor, matched_ious=None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes, matched_ious=matched_ious
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals(self, proposals: List[Instances], targets: List[Instances]) -> List[Instances]:
#separate valid and ignore gts
targets_ign = [target[target.gt_classes < 0] for target in targets]
targets = [target[target.gt_classes >= 0] for target in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(targets, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image, targets_ign_per_image in zip(proposals, targets, targets_ign):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, proposals_per_image.proposal_boxes)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
try:
if len(targets_ign_per_image) > 0:
# compute the quality matrix, only on subset of background
background_inds = (matched_labels == 0).nonzero().squeeze()
# determine the boxes inside ignore regions with sufficient threshold
if background_inds.numel() > 1:
match_quality_matrix_ign = pairwise_ioa(targets_ign_per_image.gt_boxes, proposals_per_image.proposal_boxes[background_inds])
matched_labels[background_inds[match_quality_matrix_ign.max(0)[0] >= self.ignore_thresh]] = -1
del match_quality_matrix_ign
except:
pass
gt_arange = torch.arange(match_quality_matrix.shape[1]).to(matched_idxs.device)
matched_ious = match_quality_matrix[matched_idxs, gt_arange]
sampled_idxs, gt_classes = self._sample_proposals(matched_idxs, matched_labels, targets_per_image.gt_classes, matched_ious=matched_ious)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def safely_reduce_losses(self, loss):
valid = (~(loss.isinf())) & (~(loss.isnan()))
if valid.any():
return loss[valid].mean()
else:
# no valid losses, simply zero out
return loss.mean()*0.0
| 41,015 | 42.634043 | 151 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/roi_heads/__init__.py
|
from .roi_heads import *
| 24 | 24 | 24 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/proposal_generator/rpn.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from typing import Dict, List, Tuple
import torch
from typing import List, Tuple, Union
import torch.nn.functional as F
from detectron2.config import configurable
from detectron2.utils.events import get_event_storage
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import Boxes, Instances, pairwise_iou, pairwise_ioa
from detectron2.utils.memory import retry_if_cuda_oom
from fvcore.nn import smooth_l1_loss
from detectron2.layers import cat
from detectron2.layers import nonzero_tuple
from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss
from detectron2.modeling.proposal_generator import RPN
from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY
@PROPOSAL_GENERATOR_REGISTRY.register()
class RPNWithIgnore(RPN):
@configurable
def __init__(
self,
*,
ignore_thresh: float = 0.5,
objectness_uncertainty: str = 'none',
**kwargs
):
super().__init__(**kwargs)
self.ignore_thresh = ignore_thresh
self.objectness_uncertainty = objectness_uncertainty
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["ignore_thresh"] = cfg.MODEL.RPN.IGNORE_THRESHOLD
ret["objectness_uncertainty"] = cfg.MODEL.RPN.OBJECTNESS_UNCERTAINTY
return ret
@torch.jit.unused
@torch.no_grad()
def label_and_sample_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
anchors = Boxes.cat(anchors)
# separate valid and ignore gts
gt_boxes_ign = [x.gt_boxes[x.gt_classes < 0] for x in gt_instances]
gt_boxes = [x.gt_boxes[x.gt_classes >= 0] for x in gt_instances]
del gt_instances
gt_labels = []
matched_gt_boxes = []
for gt_boxes_i, gt_boxes_ign_i in zip(gt_boxes, gt_boxes_ign):
"""
gt_boxes_i: ground-truth boxes for i-th image
gt_boxes_ign_i: ground-truth ignore boxes for i-th image
"""
match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors)
matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
gt_arange = torch.arange(match_quality_matrix.shape[1]).to(matched_idxs.device)
matched_ious = match_quality_matrix[matched_idxs, gt_arange]
best_ious_gt_vals, best_ious_gt_ind = match_quality_matrix.max(dim=1)
del match_quality_matrix
best_inds = torch.tensor(list(set(best_ious_gt_ind.tolist()) & set((gt_labels_i == 1).nonzero().squeeze(1).tolist())))
# A vector of labels (-1, 0, 1) for each anchor
# which denote (ignore, background, foreground)
gt_labels_i = self._subsample_labels(gt_labels_i, matched_ious=matched_ious)
# overrride the best possible GT options, always selected for sampling.
# otherwise aggressive thresholds may produce HUGE amounts of low quality FG.
if best_inds.numel() > 0:
gt_labels_i[best_inds] = 1.0
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
else:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
if len(gt_boxes_ign_i) > 0:
# compute the quality matrix, only on subset of background
background_inds = (gt_labels_i == 0).nonzero().squeeze()
if background_inds.numel() > 1:
match_quality_matrix_ign = retry_if_cuda_oom(pairwise_ioa)(gt_boxes_ign_i, anchors[background_inds])
# determine the boxes inside ignore regions with sufficient threshold
gt_labels_i[background_inds[match_quality_matrix_ign.max(0)[0] >= self.ignore_thresh]] = -1
del match_quality_matrix_ign
gt_labels.append(gt_labels_i) # N,AHW
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
def _subsample_labels(self, label, matched_ious=None):
"""
Randomly sample a subset of positive and negative examples, and overwrite
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
Args:
labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
"""
pos_idx, neg_idx = subsample_labels(
label, self.batch_size_per_image, self.positive_fraction, 0, matched_ious=matched_ious
)
# Fill with the ignore label (-1), then set positive and negative labels
label.fill_(-1)
label.scatter_(0, pos_idx, 1)
label.scatter_(0, neg_idx, 0)
return label
@torch.jit.unused
def losses(
self,
anchors: List[Boxes],
pred_objectness_logits: List[torch.Tensor],
gt_labels: List[torch.Tensor],
pred_anchor_deltas: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
) -> Dict[str, torch.Tensor]:
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Args:
anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each
has shape (Hi*Wi*A, B), where B is box dimension (4 or 5).
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, Hi*Wi*A) representing
the predicted objectness logits for all anchors.
gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors
to proposals.
gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai))
# Log the number of positive/negative anchors per-image that's used in training
pos_mask = gt_labels == 1
num_pos_anchors = pos_mask.sum().item()
num_neg_anchors = (gt_labels == 0).sum().item()
storage = get_event_storage()
storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images)
storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images)
if not self.objectness_uncertainty.lower() in ['none']:
localization_loss, objectness_loss = _dense_box_regression_loss_with_uncertainty(
anchors,
self.box2box_transform,
pred_anchor_deltas,
pred_objectness_logits,
gt_boxes,
pos_mask,
box_reg_loss_type=self.box_reg_loss_type,
smooth_l1_beta=self.smooth_l1_beta,
uncertainty_type=self.objectness_uncertainty,
)
else:
localization_loss = _dense_box_regression_loss(
anchors,
self.box2box_transform,
pred_anchor_deltas,
gt_boxes,
pos_mask,
box_reg_loss_type=self.box_reg_loss_type,
smooth_l1_beta=self.smooth_l1_beta,
)
valid_mask = gt_labels >= 0
objectness_loss = F.binary_cross_entropy_with_logits(
cat(pred_objectness_logits, dim=1)[valid_mask],
gt_labels[valid_mask].to(torch.float32),
reduction="sum",
)
normalizer = self.batch_size_per_image * num_images
losses = {
"rpn/cls": objectness_loss / normalizer,
"rpn/loc": localization_loss / normalizer,
}
losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
return losses
def _dense_box_regression_loss_with_uncertainty(
anchors: List[Union[Boxes, torch.Tensor]],
box2box_transform: Box2BoxTransform,
pred_anchor_deltas: List[torch.Tensor],
pred_objectness_logits: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
fg_mask: torch.Tensor,
box_reg_loss_type="smooth_l1",
smooth_l1_beta=0.0,
uncertainty_type='centerness',
):
"""
Compute loss for dense multi-level box regression.
Loss is accumulated over ``fg_mask``.
Args:
anchors: #lvl anchor boxes, each is (HixWixA, 4)
pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)
gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))
fg_mask: the foreground boolean mask of shape (N, R) to compute loss on
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou",
"diou", "ciou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
if isinstance(anchors[0], Boxes):
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
else:
anchors = cat(anchors)
n = len(gt_boxes)
boxes_fg = Boxes(anchors.unsqueeze(0).repeat([n, 1, 1])[fg_mask])
gt_boxes_fg = Boxes(torch.stack(gt_boxes)[fg_mask].detach())
objectness_targets_anchors = matched_pairwise_iou(boxes_fg, gt_boxes_fg).detach()
objectness_logits = torch.cat(pred_objectness_logits, dim=1)
# Numerically the same as (-(y*torch.log(p) + (1 - y)*torch.log(1 - p))).sum()
loss_box_conf = F.binary_cross_entropy_with_logits(
objectness_logits[fg_mask],
objectness_targets_anchors,
reduction='none'
)
loss_box_conf = (loss_box_conf * objectness_targets_anchors).sum()
# keep track of how scores look for FG / BG.
# ideally, FG slowly >>> BG scores as regression improves.
storage = get_event_storage()
storage.put_scalar("rpn/conf_pos_anchors", torch.sigmoid(objectness_logits[fg_mask]).mean().item())
storage.put_scalar("rpn/conf_neg_anchors", torch.sigmoid(objectness_logits[~fg_mask]).mean().item())
if box_reg_loss_type == "smooth_l1":
gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
loss_box_reg = smooth_l1_loss(
cat(pred_anchor_deltas, dim=1)[fg_mask],
gt_anchor_deltas[fg_mask],
beta=smooth_l1_beta,
reduction="none",
)
loss_box_reg = (loss_box_reg.sum(dim=1) * objectness_targets_anchors).sum()
else:
raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'")
return loss_box_reg, loss_box_conf
def subsample_labels(
labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int, matched_ious=None, eps=1e-4
):
"""
Return `num_samples` (or fewer, if not enough found)
random samples from `labels` which is a mixture of positives & negatives.
It will try to return as many positives as possible without
exceeding `positive_fraction * num_samples`, and then try to
fill the remaining slots with negatives.
Args:
labels (Tensor): (N, ) label vector with values:
* -1: ignore
* bg_label: background ("negative") class
* otherwise: one or more foreground ("positive") classes
num_samples (int): The total number of labels with value >= 0 to return.
Values that are not sampled will be filled with -1 (ignore).
positive_fraction (float): The number of subsampled labels with values > 0
is `min(num_positives, int(positive_fraction * num_samples))`. The number
of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.
In order words, if there are not enough positives, the sample is filled with
negatives. If there are also not enough negatives, then as many elements are
sampled as is possible.
bg_label (int): label index of background ("negative") class.
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0]
negative = nonzero_tuple(labels == bg_label)[0]
num_pos = int(num_samples * positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = num_samples - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
#if positive_fraction == 1.0 and num_neg > 10:
# allow some negatives for statistics only.
#num_neg = 10
# randomly select positive and negative examples
if num_pos > 0 and matched_ious is not None:
perm1 = torch.multinomial(matched_ious[positive] + eps, num_pos)
else:
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
if num_neg > 0 and matched_ious is not None:
perm2 = torch.multinomial(matched_ious[negative] + eps, num_neg)
else:
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx = positive[perm1]
neg_idx = negative[perm2]
return pos_idx, neg_idx
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes that have the same number of boxes.
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
Args:
boxes1 (Boxes): bounding boxes, sized [N,4].
boxes2 (Boxes): same length as boxes1
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou
| 15,229 | 42.022599 | 141 |
py
|
omni3d
|
omni3d-main/cubercnn/modeling/proposal_generator/__init__.py
|
from .rpn import *
| 19 | 9 | 18 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/test.py
|
import _init_paths
import os
import argparse
from copy import deepcopy
from vqa.function.config import config, update_config
from vqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Get Test Result of VQA Network')
parser.add_argument('--cfg', type=str, help='path to answer net config yaml')
parser.add_argument('--ckpt', type=str, help='path to checkpoint of answer net')
parser.add_argument('--bs', type=int)
parser.add_argument('--gpus', type=int, nargs='+')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--result-path', type=str, help='path to store test result file.')
parser.add_argument('--result-name', type=str)
parser.add_argument('--split', default='test2015')
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.bs is not None:
config.TEST.BATCH_IMAGES = args.bs
if args.gpus is not None:
config.GPUS = ','.join([str(gpu) for gpu in args.gpus])
if args.split is not None:
config.DATASET.TEST_IMAGE_SET = args.split
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
return args, config
def main():
args, config = parse_args()
result_json_path = test_net(args, config,
ckpt_path=args.ckpt, save_path=args.result_path, save_name=args.result_name)
if __name__ == '__main__':
main()
| 1,525 | 32.173913 | 108 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/_init_paths.py
|
import os
import sys
this_dir = os.path.abspath(os.path.dirname(__file__))
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
root_path = os.path.join(this_dir, '../')
add_path(root_path)
| 224 | 15.071429 | 53 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/train_end2end.py
|
import _init_paths
import os
import argparse
import torch
import subprocess
from vqa.function.config import config, update_config
from vqa.function.train import train_net
from vqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set',
default=False, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')
# easy test pretrain model
parser.add_argument('--partial-pretrain', type=str)
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if args.partial_pretrain is not None:
config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
return args, config
def main():
args, config = parse_args()
rank, model = train_net(args, config)
if args.do_test and (rank is None or rank == 0):
test_net(args, config)
if __name__ == '__main__':
main()
| 2,191 | 33.793651 | 113 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/function/val.py
|
from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_batch]
datas = [batch[i] for i in range(len(batch)) if i != label_index_in_batch % len(batch)]
outputs = net(*datas)
outputs.update({'label': label})
metrics.update(outputs)
| 528 | 26.842105 | 95 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/function/test.py
|
import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from vqa.data.build import make_dataloader
from vqa.modules import *
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-best.model'.format(model_prefix)
print('Use best checkpoint {}...'.format(ckpt_path))
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if not os.path.exists(save_path):
os.makedirs(save_path)
shutil.copy2(ckpt_path,
os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# get network
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
torch.cuda.set_device(device_ids[0])
model = model.cuda()
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# loader
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# test
q_ids = []
answer_ids = []
model.eval()
cur_id = 0
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
# for nbatch, batch in tqdm(enumerate(test_loader)):
bs = test_loader.batch_sampler.batch_size if test_loader.batch_sampler is not None else test_loader.batch_size
q_ids.extend([test_database[id]['question_id'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
batch = to_cuda(batch)
output = model(*batch)
answer_ids.extend(output['label_logits'].argmax(dim=1).detach().cpu().tolist())
cur_id += bs
result = [{'question_id': q_id, 'answer': test_dataset.answer_vocab[a_id]} for q_id, a_id in zip(q_ids, answer_ids)]
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
result_json_path = os.path.join(save_path, '{}_vqa2_{}.json'.format(cfg_name if save_name is None else save_name,
config.DATASET.TEST_IMAGE_SET))
with open(result_json_path, 'w') as f:
json.dump(result, f)
print('result json saved to {}.'.format(result_json_path))
return result_json_path
| 3,359 | 39.481928 | 120 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/function/config.py
|
from easydict import EasyDict as edict
import yaml
_C = edict()
config = _C
# ------------------------------------------------------------------------------------- #
# Common options
# ------------------------------------------------------------------------------------- #
_C.RNG_SEED = -1
_C.OUTPUT_PATH = ''
_C.MODULE = ''
_C.GPUS = ''
_C.LOG_FREQUENT = 50
_C.VAL_FREQUENT = 1
_C.CHECKPOINT_FREQUENT = 1
_C.MODEL_PREFIX = ''
_C.NUM_WORKERS_PER_GPU = 4
_C.SCALES = ()
# ------------------------------------------------------------------------------------- #
# Common dataset options
# ------------------------------------------------------------------------------------- #
_C.DATASET = edict()
_C.DATASET.DATASET = ''
_C.DATASET.ANSWER_VOCAB_FILE = ''
_C.DATASET.ANSWER_VOCAB_SIZE = 3129
_C.DATASET.LABEL_INDEX_IN_BATCH = -1
_C.DATASET.APPEND_INDEX = False
_C.DATASET.TASK = 'Q2AR'
_C.DATASET.BASIC_ALIGN = False
_C.DATASET.DATASET_PATH = ''
_C.DATASET.ROOT_PATH = ''
_C.DATASET.TRAIN_IMAGE_SET = ''
_C.DATASET.VAL_IMAGE_SET = ''
_C.DATASET.TEST_IMAGE_SET = ''
_C.DATASET.TRAIN_ANNOTATION_FILE = ''
_C.DATASET.VAL_ANNOTATION_FILE = ''
_C.DATASET.TEST_ANNOTATION_FILE = ''
_C.DATASET.ONLY_USE_RELEVANT_DETS = True
_C.DATASET.ADD_IMAGE_AS_A_BOX = True
_C.DATASET.ZIP_MODE = False
_C.DATASET.CACHE_MODE = False
_C.DATASET.IGNORE_DB_CACHE = True
_C.DATASET.MASK_SIZE = 14
_C.DATASET.QA2R_NOQ = False
_C.DATASET.QA2R_AUG = False
_C.DATASET.BOXES = "36" # "36" or "10-100ada"
_C.DATASET.USE_IMDB = True
# ------------------------------------------------------------------------------------- #
# Common network options
# ------------------------------------------------------------------------------------- #
_C.NETWORK = edict()
_C.NETWORK.BLIND = False
_C.NETWORK.NO_GROUNDING = False
_C.NETWORK.PARTIAL_PRETRAIN = ""
_C.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES = []
_C.NETWORK.FOR_MASK_VL_MODELING_PRETRAIN = False
_C.NETWORK.NO_OBJ_ATTENTION = False
_C.NETWORK.IMAGE_FEAT_PRECOMPUTED = False
_C.NETWORK.IMAGE_NUM_LAYERS = 50
_C.NETWORK.IMAGE_C5_DILATED = False
_C.NETWORK.IMAGE_STRIDE_IN_1x1 = False
_C.NETWORK.PIXEL_MEANS = ()
_C.NETWORK.PIXEL_STDS = ()
_C.NETWORK.IMAGE_PRETRAINED = ''
_C.NETWORK.IMAGE_PRETRAINED_EPOCH = 0
_C.NETWORK.IMAGE_FROZEN_BACKBONE_STAGES = [1, 2]
_C.NETWORK.IMAGE_FROZEN_BN = True
_C.NETWORK.IMAGE_FINAL_DIM = 512
_C.NETWORK.IMAGE_SEMANTIC = False
_C.NETWORK.OUTPUT_CONV5 = False
_C.NETWORK.BERT_MODEL_NAME = 'bert-base-uncased'
_C.NETWORK.BERT_PRETRAINED = ''
_C.NETWORK.BERT_PRETRAINED_EPOCH = 0
_C.NETWORK.BERT_FROZEN = True
_C.NETWORK.BERT_ALIGN_QUESTION = True
_C.NETWORK.BERT_ALIGN_ANSWER = True
_C.NETWORK.BERT_USE_LAYER = -2
_C.NETWORK.BERT_WITH_NSP_LOSS = False
_C.NETWORK.BERT_WITH_MLM_LOSS = False
_C.NETWORK.ENABLE_CNN_REG_LOSS = True
_C.NETWORK.CNN_LOSS_WEIGHT = 1.0
_C.NETWORK.ANS_LOSS_WEIGHT = 1.0
_C.NETWORK.ANS_LOSS_TYPE = 'bce' # 'bce' or 'ce'
_C.NETWORK.REPLACE_OBJECT_CHANGE_LABEL = True
_C.NETWORK.VLBERT = edict()
# _C.NETWORK.VLBERT.vocab_size = None
_C.NETWORK.VLBERT.input_size = 1280
# 1: LN + [1x1 conv] 2: LN + [1x1 conv] + dropout 3: LN + [1x1 conv] + dropout + BertLayer
_C.NETWORK.VLBERT.input_transform_type = 1
_C.NETWORK.VLBERT.word_embedding_frozen = False
_C.NETWORK.VLBERT.obj_pos_id_relative = True
_C.NETWORK.VLBERT.hidden_size = 512
_C.NETWORK.VLBERT.visual_size = 512
_C.NETWORK.VLBERT.num_hidden_layers = 4
_C.NETWORK.VLBERT.num_attention_heads = 8
_C.NETWORK.VLBERT.intermediate_size = 2048
_C.NETWORK.VLBERT.hidden_act = "gelu"
_C.NETWORK.VLBERT.hidden_dropout_prob = 0.1
_C.NETWORK.VLBERT.attention_probs_dropout_prob = 0.1
_C.NETWORK.VLBERT.max_position_embeddings = 512
_C.NETWORK.VLBERT.type_vocab_size = 3
_C.NETWORK.VLBERT.vocab_size = 30522
_C.NETWORK.VLBERT.initializer_range = 0.02
_C.NETWORK.VLBERT.visual_scale_text_init = 0.0
_C.NETWORK.VLBERT.visual_scale_object_init = 0.0
_C.NETWORK.VLBERT.visual_ln = False
# 1: class embedding 2: class agnostic embedding 3: average of word embedding of text
_C.NETWORK.VLBERT.object_word_embed_mode = 2
_C.NETWORK.VLBERT.with_pooler = False
_C.NETWORK.VLBERT.position_padding_idx = -1
_C.NETWORK.CLASSIFIER_TYPE = "2fc" # 2fc or 1fc or mlm
_C.NETWORK.CLASSIFIER_PRETRAINED = False
_C.NETWORK.CLASSIFIER_HIDDEN_SIZE = 1024
_C.NETWORK.CLASSIFIER_DROPOUT = 0.1
_C.NETWORK.CLASSIFIER_SIGMOID = False
_C.NETWORK.CLASSIFIER_SIGMOID_LOSS_POSITIVE_WEIGHT = 1.0
# ------------------------------------------------------------------------------------- #
# Common training related options
# ------------------------------------------------------------------------------------- #
_C.TRAIN = edict()
_C.TRAIN.LR_MULT = []
_C.TRAIN.VISUAL_SCALE_TEXT_LR_MULT = 1.0
_C.TRAIN.VISUAL_SCALE_OBJECT_LR_MULT = 1.0
_C.TRAIN.VISUAL_SCALE_CLIP_GRAD_NORM = -1
_C.TRAIN.SHUFFLE = True
_C.TRAIN.FLIP_PROB = 0.5
_C.TRAIN.BATCH_IMAGES = 1
_C.TRAIN.ASPECT_GROUPING = True
_C.TRAIN.RESUME = False
_C.TRAIN.AUTO_RESUME = True
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 0
_C.TRAIN.OPTIMIZER = 'SGD'
_C.TRAIN.CLIP_GRAD_NORM = -1
_C.TRAIN.GRAD_ACCUMULATE_STEPS = 1
_C.TRAIN.LR = 0.1
_C.TRAIN.LR_SCHEDULE = 'step' # step/triangle/plateau
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = ()
_C.TRAIN.WARMUP = False
_C.TRAIN.WARMUP_METHOD = 'linear'
_C.TRAIN.WARMUP_FACTOR = 1.0 / 3
_C.TRAIN.WARMUP_STEPS = 1000
_C.TRAIN.WD = 0.0001
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.FP16 = False
_C.TRAIN.FP16_LOSS_SCALE = 128.0
_C.TRAIN.LOSS_LOGGERS = [('ans_loss', 'AnsLoss')]
# ------------------------------------------------------------------------------------- #
# Common validation related options
# ------------------------------------------------------------------------------------- #
_C.VAL = edict()
_C.VAL.SHUFFLE = False
_C.VAL.FLIP_PROB = 0
_C.VAL.BATCH_IMAGES = 1
# ------------------------------------------------------------------------------------- #
# Common testing related options
# ------------------------------------------------------------------------------------- #
_C.TEST = edict()
_C.TEST.SHUFFLE = False
_C.TEST.FLIP_PROB = 0
_C.TEST.TEST_EPOCH = 0
_C.TEST.BATCH_IMAGES = 1
def update_config(config_file):
with open(config_file) as f:
exp_config = edict(yaml.load(f))
for k, v in exp_config.items():
if k in config:
if isinstance(v, dict):
for vk, vv in v.items():
if vk in config[k]:
if vk == 'LR_STEP':
config[k][vk] = tuple(float(s) for s in vv.split(','))
elif vk == 'LOSS_LOGGERS':
config[k][vk] = [tuple(str(s) for s in vvi.split(',')) for vvi in vv]
elif vk == "VLBERT" and isinstance(vv, dict):
for vvk, vvv in vv.items():
if vvk in config[k][vk]:
config[k][vk][vvk] = vvv
else:
raise ValueError("key {}.{}.{} not in config.py".format(k, vk, vvk))
else:
config[k][vk] = vv
else:
raise ValueError("key {}.{} not in config.py".format(k, vk))
else:
if k == 'SCALES':
config[k] = (tuple(v))
else:
config[k] = v
else:
raise ValueError("key {} not in config.py".format(k))
| 7,553 | 36.211823 | 108 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/function/__init__.py
| 0 | 0 | 0 |
py
|
|
VLC-BERT
|
VLC-BERT-master/vqa/function/train.py
|
import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import vqa_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from vqa.data.build import make_dataloader, build_dataset, build_transforms
from vqa.modules import *
from vqa.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
"Please use amp.parallel.DistributedDataParallel instead."
total_gpus = num_gpus
rank = None
writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir is not None else None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict)
# pretrained classifier
if config.NETWORK.CLASSIFIER_PRETRAINED:
print('Initializing classifier weight from pretrained word embeddings...')
answers_word_embed = []
for k, v in model.state_dict().items():
if 'word_embeddings.weight' in k:
word_embeddings = v.detach().clone()
break
for answer in train_loader.dataset.answer_vocab:
a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
a_word_embed = (torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0)).mean(dim=0)
answers_word_embed.append(a_word_embed)
answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
for name, module in model.named_modules():
if name.endswith('final_mlp'):
module[-1].weight.data = answers_word_embed_tensor.to(device=module[-1].weight.data.device)
# metrics
train_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
val_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(
vqa_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist,
num_replicas=world_size if args.dist else 1))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name='SoftAcc',
label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH)
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(config.TRAIN.LR_SCHEDULE))
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger,
rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks,
writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return rank, model
| 17,541 | 51.053412 | 147 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/modules/resnet_vlbert_for_vqa.py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class ResNetVLBERT(Module):
def __init__(self, config):
super(ResNetVLBERT, self).__init__(config)
self.enable_cnn_reg_loss = config.NETWORK.ENABLE_CNN_REG_LOSS
if not config.NETWORK.BLIND:
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=self.enable_cnn_reg_loss)
if config.NETWORK.VLBERT.object_word_embed_mode == 1:
self.object_linguistic_embeddings = nn.Embedding(81, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 2:
self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 3:
self.object_linguistic_embeddings = None
else:
raise NotImplementedError
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
self.language_pretrained_model_path = language_pretrained_model_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBert(config.NETWORK.VLBERT,
language_pretrained_model_path=language_pretrained_model_path)
# self.hm_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
# self.hi_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
dim = config.NETWORK.VLBERT.hidden_size
if config.NETWORK.CLASSIFIER_TYPE == "2fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
)
elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
)
elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
self.final_mlp = nn.Sequential(
transform,
nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
linear
)
else:
raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
# self.hm_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hm_out.bias.data.zero_()
# self.hi_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hi_out.bias.data.zero_()
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0, std=0.02)
for m in self.final_mlp.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.constant_(m.bias, 0)
if self.config.NETWORK.CLASSIFIER_TYPE == 'mlm':
language_pretrained = torch.load(self.language_pretrained_model_path)
mlm_transform_state_dict = {}
pretrain_keys = []
for k, v in language_pretrained.items():
if k.startswith('cls.predictions.transform.'):
pretrain_keys.append(k)
k_ = k[len('cls.predictions.transform.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
mlm_transform_state_dict[k_] = v
print("loading pretrained classifier transform keys: {}.".format(pretrain_keys))
self.final_mlp[0].load_state_dict(mlm_transform_state_dict)
def train(self, mode=True):
super(ResNetVLBERT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def prepare_text_from_qa(self, question, question_tags, question_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + answer_mask.sum(1)).max() + 3
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
a_end = q_end + 1 + answer_mask.sum(1, keepdim=True)
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.uint8, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
a_input_mask = (grid_j > q_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def train_forward(self,
image,
boxes,
im_info,
question,
label,
):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
output_all_encoded_layers=False)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
# loss
ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)
outputs.update({'label_logits': logits,
'label': label,
'ans_loss': ans_loss})
loss = ans_loss.mean()
return outputs, loss
def inference_forward(self,
image,
boxes,
im_info,
question):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
output_all_encoded_layers=False)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
outputs.update({'label_logits': logits})
return outputs
| 16,341 | 47.064706 | 117 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/modules/__init__.py
|
from .resnet_vlbert_for_vqa import ResNetVLBERT
| 50 | 11.75 | 47 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/__init__.py
| 0 | 0 | 0 |
py
|
|
VLC-BERT
|
VLC-BERT-master/vqa/data/collate_batch.py
|
import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_question_length = max([len(data[self.data_names.index('question')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if image_none:
out['image'] = None
else:
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
question = ibatch[self.data_names.index('question')]
out['question'] = clip_pad_1d(question, max_question_length, pad=0)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
| 2,035 | 35.357143 | 115 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/build.py
|
import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'vqa': VQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in catalogs"
return DATASET_CATALOGS[dataset_name](*args, **kwargs)
def make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size):
if aspect_grouping:
group_ids = dataset.group_ids
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
return batch_sampler
def make_dataloader(cfg, dataset=None, mode='train', distributed=False, num_replicas=None, rank=None,
expose_sampler=False):
assert mode in ['train', 'val', 'test']
if mode == 'train':
ann_file = cfg.DATASET.TRAIN_ANNOTATION_FILE
image_set = cfg.DATASET.TRAIN_IMAGE_SET
aspect_grouping = cfg.TRAIN.ASPECT_GROUPING
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TRAIN.BATCH_IMAGES * num_gpu
shuffle = cfg.TRAIN.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
elif mode == 'val':
ann_file = cfg.DATASET.VAL_ANNOTATION_FILE
image_set = cfg.DATASET.VAL_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.VAL.BATCH_IMAGES * num_gpu
shuffle = cfg.VAL.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
else:
ann_file = cfg.DATASET.TEST_ANNOTATION_FILE
image_set = cfg.DATASET.TEST_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TEST.BATCH_IMAGES * num_gpu
shuffle = cfg.TEST.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
transform = build_transforms(cfg, mode)
if dataset is None:
dataset = build_dataset(dataset_name=cfg.DATASET.DATASET, ann_file=ann_file, image_set=image_set,
use_imdb=cfg.DATASET.USE_IMDB,
with_precomputed_visual_feat=cfg.NETWORK.IMAGE_FEAT_PRECOMPUTED,
boxes=cfg.DATASET.BOXES,
answer_vocab_file=cfg.DATASET.ANSWER_VOCAB_FILE,
root_path=cfg.DATASET.ROOT_PATH, data_path=cfg.DATASET.DATASET_PATH,
test_mode=(mode == 'test'), transform=transform,
zip_mode=cfg.DATASET.ZIP_MODE, cache_mode=cfg.DATASET.CACHE_MODE,
cache_db=True if (rank is None or rank == 0) else False,
ignore_db_cache=cfg.DATASET.IGNORE_DB_CACHE,
add_image_as_a_box=cfg.DATASET.ADD_IMAGE_AS_A_BOX,
aspect_grouping=aspect_grouping,
mask_size=(cfg.DATASET.MASK_SIZE, cfg.DATASET.MASK_SIZE),
pretrained_model_name=cfg.NETWORK.BERT_MODEL_NAME)
sampler = make_data_sampler(dataset, shuffle, distributed, num_replicas, rank)
batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size)
collator = BatchCollator(dataset=dataset, append_ind=cfg.DATASET.APPEND_INDEX)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=False,
collate_fn=collator)
if expose_sampler:
return dataloader, sampler
return dataloader
| 4,336 | 42.37 | 106 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/datasets/vqa.py
|
import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import pprint
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from pycocotools.coco import COCO
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
class VQA(Dataset):
def __init__(self, image_set, root_path, data_path, answer_vocab_file, use_imdb=True,
with_precomputed_visual_feat=False, boxes="36",
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=True, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False, mask_size=(14, 14),
aspect_grouping=False, **kwargs):
"""
Visual Question Answering Dataset
:param image_set: image folder name
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param mask_size: size of instance mask of each object
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(VQA, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
categories = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat',
'trafficlight', 'firehydrant', 'stopsign', 'parkingmeter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sportsball', 'kite', 'baseballbat', 'baseballglove',
'skateboard', 'surfboard', 'tennisracket', 'bottle', 'wineglass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza', 'donut',
'cake', 'chair', 'couch', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tv', 'laptop', 'mouse',
'remote', 'keyboard', 'cellphone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddybear', 'hairdrier', 'toothbrush']
vqa_question = {
"train2014": "vqa/v2_OpenEnded_mscoco_train2014_questions.json",
"valminusminival2014": "vqa/v2_OpenEnded_mscoco_valminusminival2014_questions.json",
"val2014": "vqa/v2_OpenEnded_mscoco_val2014_questions.json",
"minival2014": "vqa/v2_OpenEnded_mscoco_minival2014_questions.json",
"test-dev2015": "vqa/v2_OpenEnded_mscoco_test-dev2015_questions.json",
"test2015": "vqa/v2_OpenEnded_mscoco_test2015_questions.json",
}
vqa_annot = {
"train2014": "vqa/v2_mscoco_train2014_annotations.json",
"valminusminival2014": "vqa/v2_mscoco_valminusminival2014_annotations.json",
"val2014": "vqa/v2_mscoco_val2014_annotations.json",
"minival2014": "vqa/v2_mscoco_minival2014_annotations.json",
}
vqa_imdb = {
"train2014": "vqa/vqa_imdb/imdb_train2014.npy",
"val2014": "vqa/vqa_imdb/imdb_val2014.npy",
'test2015': "vqa/vqa_imdb/imdb_test2015.npy",
'minival2014': "vqa/vqa_imdb/imdb_minival2014.npy",
}
if boxes == "36":
precomputed_boxes = {
'train2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
"valminusminival2014": ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'val2014': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
"minival2014": ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
"test-dev2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome_36"),
"test2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome_36"),
}
elif boxes == "10-100ada":
precomputed_boxes = {
'train2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
"valminusminival2014": ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'val2014': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
"minival2014": ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
"test-dev2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome"),
"test2015": ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome"),
}
else:
raise ValueError("Not support boxes: {}!".format(boxes))
coco_dataset = {
"train2014": ("train2014", "annotations/instances_train2014.json"),
"valminusminival2014": ("val2014", "annotations/instances_val2014.json"),
"val2014": ("val2014", "annotations/instances_val2014.json"),
"minival2014": ("val2014", "annotations/instances_val2014.json"),
"test-dev2015": ("test2015", "annotations/image_info_test2015.json"),
"test2015": ("test2015", "annotations/image_info_test2015.json"),
}
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
self.use_imdb = use_imdb
self.boxes = boxes
self.test_mode = test_mode
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.category_to_idx = {c: i for i, c in enumerate(categories)}
self.data_path = data_path
self.root_path = root_path
with open(answer_vocab_file, 'r', encoding='utf8') as f:
self.answer_vocab = [w.lower().strip().strip('\r').strip('\n').strip('\r') for w in f.readlines()]
self.answer_vocab = list(filter(lambda x: x != '', self.answer_vocab))
if not self.use_imdb:
self.answer_vocab = [self.processPunctuation(w) for w in self.answer_vocab]
self.image_sets = [iset.strip() for iset in image_set.split('+')]
self.ann_files = [os.path.join(data_path, vqa_annot[iset]) for iset in self.image_sets] \
if not self.test_mode else [None for iset in self.image_sets]
self.q_files = [os.path.join(data_path, vqa_question[iset]) for iset in self.image_sets]
self.imdb_files = [os.path.join(data_path, vqa_imdb[iset]) for iset in self.image_sets]
self.precomputed_box_files = [
os.path.join(data_path, precomputed_boxes[iset][0],
'{0}.zip@/{0}'.format(precomputed_boxes[iset][1])
if zip_mode else precomputed_boxes[iset][1])
for iset in self.image_sets]
self.box_bank = {}
self.coco_datasets = [(os.path.join(data_path,
coco_dataset[iset][0],
'COCO_{}_{{:012d}}.jpg'.format(coco_dataset[iset][0]))
if not zip_mode else
os.path.join(data_path,
coco_dataset[iset][0] + '.zip@/' + coco_dataset[iset][0],
'COCO_{}_{{:012d}}.jpg'.format(coco_dataset[iset][0])),
os.path.join(data_path, coco_dataset[iset][1]))
for iset in self.image_sets]
self.transform = transform
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
self.mask_size = mask_size
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir)
if zip_mode:
self.zipreader = ZipReader()
self.database = self.load_annotations()
if self.aspect_grouping:
self.group_ids = self.group_aspect(self.database)
@property
def data_names(self):
if self.test_mode:
return ['image', 'boxes', 'im_info', 'question']
else:
return ['image', 'boxes', 'im_info', 'question', 'label']
def __getitem__(self, index):
idb = self.database[index]
# image, boxes, im_info
boxes_data = self._load_json(idb['box_fn'])
if self.with_precomputed_visual_feat:
image = None
w0, h0 = idb['width'], idb['height']
boxes_features = torch.as_tensor(
np.frombuffer(self.b64_decode(boxes_data['features']), dtype=np.float32).reshape((boxes_data['num_boxes'], -1))
)
else:
image = self._load_image(idb['image_fn'])
w0, h0 = image.size
boxes = torch.as_tensor(
np.frombuffer(self.b64_decode(boxes_data['boxes']), dtype=np.float32).reshape(
(boxes_data['num_boxes'], -1))
)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
if 'image_box_feature' in boxes_data:
image_box_feature = torch.as_tensor(
np.frombuffer(
self.b64_decode(boxes_data['image_box_feature']), dtype=np.float32
).reshape((1, -1))
)
else:
image_box_feature = boxes_features.mean(0, keepdim=True)
boxes_features = torch.cat((image_box_feature, boxes_features), dim=0)
im_info = torch.tensor([w0, h0, 1.0, 1.0])
flipped = False
if self.transform is not None:
image, boxes, _, im_info, flipped = self.transform(image, boxes, None, im_info, flipped)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h - 1)
# flip: 'left' -> 'right', 'right' -> 'left'
if self.use_imdb:
q_tokens = idb['question_tokens']
else:
q_tokens = self.tokenizer.tokenize(idb['question'])
if flipped:
q_tokens = self.flip_tokens(q_tokens, verbose=False)
if not self.test_mode:
answers = idb['answers']
if flipped:
answers_tokens = [a.split(' ') for a in answers]
answers_tokens = [self.flip_tokens(a_toks, verbose=False) for a_toks in answers_tokens]
answers = [' '.join(a_toks) for a_toks in answers_tokens]
label = self.get_soft_target(answers)
# question
if self.use_imdb:
q_str = ' '.join(q_tokens)
q_retokens = self.tokenizer.tokenize(q_str)
else:
q_retokens = q_tokens
q_ids = self.tokenizer.convert_tokens_to_ids(q_retokens)
# concat box feature to box
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=-1)
if self.test_mode:
return image, boxes, im_info, q_ids
else:
# print([(self.answer_vocab[i], p.item()) for i, p in enumerate(label) if p.item() != 0])
return image, boxes, im_info, q_ids, label
@staticmethod
def flip_tokens(tokens, verbose=True):
changed = False
tokens_new = [tok for tok in tokens]
for i, tok in enumerate(tokens):
if tok == 'left':
tokens_new[i] = 'right'
changed = True
elif tok == 'right':
tokens_new[i] = 'left'
changed = True
if verbose and changed:
logging.info('[Tokens Flip] {} -> {}'.format(tokens, tokens_new))
return tokens_new
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
def answer_to_ind(self, answer):
if answer in self.answer_vocab:
return self.answer_vocab.index(answer)
else:
return self.answer_vocab.index('<unk>')
def get_soft_target(self, answers):
soft_target = torch.zeros(len(self.answer_vocab), dtype=torch.float)
answer_indices = [self.answer_to_ind(answer) for answer in answers]
gt_answers = list(enumerate(answer_indices))
unique_answers = set(answer_indices)
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.index('<unk>'):
soft_target[answer] = avg_acc
return soft_target
def processPunctuation(self, inText):
if inText == '<unk>':
return inText
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def load_annotations(self):
tic = time.time()
database = []
if self.use_imdb:
db_cache_name = 'vqa2_imdb_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
else:
db_cache_name = 'vqa2_nonimdb_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
if self.with_precomputed_visual_feat:
db_cache_name += 'visualprecomp'
if self.zip_mode:
db_cache_name = db_cache_name + '_zipmode'
if self.test_mode:
db_cache_name = db_cache_name + '_testmode'
db_cache_root = os.path.join(self.root_path, 'cache')
db_cache_path = os.path.join(db_cache_root, '{}.pkl'.format(db_cache_name))
if os.path.exists(db_cache_path):
if not self.ignore_db_cache:
# reading cached database
print('cached database found in {}.'.format(db_cache_path))
with open(db_cache_path, 'rb') as f:
print('loading cached database from {}...'.format(db_cache_path))
tic = time.time()
database = cPickle.load(f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
else:
print('cached database ignored.')
# ignore or not find cached database, reload it from annotation file
print('loading database of split {}...'.format('+'.join(self.image_sets)))
tic = time.time()
if self.use_imdb:
for imdb_file, (coco_path, coco_annot), box_file \
in zip(self.imdb_files, self.coco_datasets, self.precomputed_box_files):
print("loading imdb: {}".format(imdb_file))
imdb = np.load(imdb_file, allow_pickle=True)
print("imdb info:")
pprint.pprint(imdb[0])
coco = COCO(coco_annot)
for item in imdb[1:]:
idb = {'image_id': item['image_id'],
'image_fn': coco_path.format(item['image_id']),
'width': coco.imgs[item['image_id']]['width'],
'height': coco.imgs[item['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(item['image_id'])),
'question_id': item['question_id'],
'question_tokens': item['question_tokens'],
'answers': item['answers'] if not self.test_mode else None,
}
database.append(idb)
else:
for ann_file, q_file, (coco_path, coco_annot), box_file \
in zip(self.ann_files, self.q_files, self.coco_datasets, self.precomputed_box_files):
qs = self._load_json(q_file)['questions']
anns = self._load_json(ann_file)['annotations'] if not self.test_mode else ([None] * len(qs))
coco = COCO(coco_annot)
for ann, q in zip(anns, qs):
idb = {'image_id': q['image_id'],
'image_fn': coco_path.format(q['image_id']),
'width': coco.imgs[q['image_id']]['width'],
'height': coco.imgs[q['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(q['image_id'])),
'question_id': q['question_id'],
'question': q['question'],
'answers': [a['answer'] for a in ann['answers']] if not self.test_mode else None,
'multiple_choice_answer': ann['multiple_choice_answer'] if not self.test_mode else None,
"question_type": ann['question_type'] if not self.test_mode else None,
"answer_type": ann['answer_type'] if not self.test_mode else None,
}
database.append(idb)
print('Done (t={:.2f}s)'.format(time.time() - tic))
# cache database via cPickle
if self.cache_db:
print('caching database to {}...'.format(db_cache_path))
tic = time.time()
if not os.path.exists(db_cache_root):
makedirsExist(db_cache_root)
with open(db_cache_path, 'wb') as f:
cPickle.dump(database, f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def load_precomputed_boxes(self, box_file):
if box_file in self.box_bank:
return self.box_bank[box_file]
else:
in_data = {}
with open(box_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in (['boxes', 'features'] if self.with_precomputed_visual_feat else ['boxes']):
item[field] = np.frombuffer(base64.decodebytes(item[field].encode()),
dtype=np.float32).reshape((item['num_boxes'], -1))
in_data[item['image_id']] = item
self.box_bank[box_file] = in_data
return in_data
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 21,774 | 45.527778 | 127 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/datasets/__init__.py
|
from .vqa import VQA
| 22 | 6.666667 | 20 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/samplers/grouped_batch_sampler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,846 | 40.42735 | 88 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/samplers/distributed.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,568 | 37.924242 | 86 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/samplers/__init__.py
|
from .distributed import DistributedSampler
from .grouped_batch_sampler import GroupedBatchSampler
| 100 | 24.25 | 54 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/transforms/__init__.py
|
from .transforms import Compose
from .transforms import Resize
from .transforms import RandomHorizontalFlip
from .transforms import ToTensor
from .transforms import Normalize
from .build import build_transforms
| 212 | 25.625 | 44 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/transforms/build.py
|
from . import transforms as T
def build_transforms(cfg, mode='train'):
assert mode in ['train', 'test', 'val']
min_size = cfg.SCALES[0]
max_size = cfg.SCALES[1]
assert min_size <= max_size
if mode == 'train':
flip_prob = cfg.TRAIN.FLIP_PROB
elif mode == 'test':
flip_prob = cfg.TEST.FLIP_PROB
else:
flip_prob = cfg.VAL.FLIP_PROB
to_bgr255 = True
normalize_transform = T.Normalize(
mean=cfg.NETWORK.PIXEL_MEANS, std=cfg.NETWORK.PIXEL_STDS, to_bgr255=to_bgr255
)
# transform = T.Compose(
# [
# T.Resize(min_size, max_size),
# T.RandomHorizontalFlip(flip_prob),
# T.ToTensor(),
# normalize_transform,
# T.FixPadding(min_size, max_size, pad=0)
# ]
# )
transform = T.Compose(
[
T.Resize(min_size, max_size),
T.RandomHorizontalFlip(flip_prob),
T.ToTensor(),
normalize_transform,
]
)
return transform
| 1,034 | 23.069767 | 85 |
py
|
VLC-BERT
|
VLC-BERT-master/vqa/data/transforms/transforms.py
|
import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
image, boxes, masks, im_info, flipped = t(image, boxes, masks, im_info, flipped)
return image, boxes, masks, im_info, flipped
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def __call__(self, image, boxes, masks, im_info, flipped):
origin_size = im_info[:2]
size = self.get_size(origin_size)
if image is not None:
image = F.resize(image, (size[1], size[0]))
ratios = [size[0] * 1.0 / origin_size[0], size[1] * 1.0 / origin_size[1]]
if boxes is not None:
boxes[:, [0, 2]] *= ratios[0]
boxes[:, [1, 3]] *= ratios[1]
im_info[0], im_info[1] = size
im_info[2], im_info[3] = ratios
return image, boxes, masks, im_info, flipped
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, boxes, masks, im_info, flipped):
if random.random() < self.prob:
w, h = im_info[:2]
if image is not None:
image = F.hflip(image)
if boxes is not None:
boxes[:, [0, 2]] = w - 1 - boxes[:, [2, 0]]
if masks is not None:
masks = torch.as_tensor(masks.numpy()[:, :, ::-1].tolist())
flipped = not flipped
return image, boxes, masks, im_info, flipped
class ToTensor(object):
def __call__(self, image, boxes, masks, im_info, flipped):
return F.to_tensor(image) if image is not None else image, boxes, masks, im_info, flipped
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, boxes, masks, im_info, flipped
class FixPadding(object):
def __init__(self, min_size, max_size, pad=0):
self.min_size = min_size
self.max_size = max_size
self.pad = pad
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
# padding to fixed size for determinacy
c, h, w = image.shape
if h <= w:
h1 = self.min_size
w1 = self.max_size
else:
h1 = self.max_size
w1 = self.min_size
padded_image = image.new_zeros((c, h1, w1)).fill_(self.pad)
padded_image[:, :h, :w] = image
image = padded_image
return image, boxes, masks, im_info, flipped
| 4,104 | 30.821705 | 97 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/test.py
|
import _init_paths
import os
import argparse
from copy import deepcopy
from aokvqa.function.config import config, update_config
from aokvqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Get Test Result of OK-VQA Network')
parser.add_argument('--cfg', type=str, help='path to answer net config yaml')
parser.add_argument('--ckpt', type=str, help='path to checkpoint of answer net')
parser.add_argument('--bs', type=int)
parser.add_argument('--gpus', type=int, nargs='+')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--result-path', type=str, help='path to store test result file.')
parser.add_argument('--result-name', type=str)
parser.add_argument('--split', default='val2017')
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.bs is not None:
config.TEST.BATCH_IMAGES = args.bs
if args.gpus is not None:
config.GPUS = ','.join([str(gpu) for gpu in args.gpus])
if args.split is not None:
config.DATASET.TEST_IMAGE_SET = args.split
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
return args, config
def main():
args, config = parse_args()
result_json_path = test_net(args, config,
ckpt_path=args.ckpt, save_path=args.result_path, save_name=args.result_name)
if __name__ == '__main__':
main()
| 1,533 | 32.347826 | 108 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/_init_paths.py
|
import os
import sys
this_dir = os.path.abspath(os.path.dirname(__file__))
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
root_path = os.path.join(this_dir, '../')
add_path(root_path)
| 224 | 15.071429 | 53 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/train_end2end.py
|
import _init_paths
import os
import argparse
import torch
import subprocess
from aokvqa.function.config import config, update_config
from aokvqa.function.train import train_net
from aokvqa.function.test import test_net
from external.PythonEvaluationTools.aokvqa_vqaEval import run_eval
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set',
default=True, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')
# easy test pretrain model
parser.add_argument('--partial-pretrain', type=str)
args = parser.parse_args()
if args.cfg is not None:
update_config(args.cfg)
if args.model_dir is not None:
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if args.partial_pretrain is not None:
config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
return args, config
def main():
args, config = parse_args()
rank, model = train_net(args, config)
if args.do_test and (rank is None or rank == 0):
res_path, save_path = test_net(args, config)
run_eval(res_path, split='val')
if __name__ == '__main__':
main()
| 2,328 | 34.830769 | 113 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/function/val.py
|
from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_batch]
datas = [batch[i] for i in range(len(batch)) if i != label_index_in_batch % len(batch)]
outputs = net(*datas)
outputs.update({'label': label})
metrics.update(outputs)
| 528 | 26.842105 | 95 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/function/test.py
|
import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from aokvqa.data.build import make_dataloader
from aokvqa.modules import *
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-latest.model'.format(model_prefix)
print('Use latest checkpoint {}...'.format(ckpt_path))
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if not os.path.exists(save_path):
os.makedirs(save_path)
# shutil.copy2(ckpt_path,
# os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# get network
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
torch.cuda.set_device(device_ids[0])
model = model.cuda()
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# loader
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# test
q_ids = []
answer_ids = []
attn_weights = []
model.eval()
cur_id = 0
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
# for nbatch, batch in tqdm(enumerate(test_loader)):
bs = test_loader.batch_sampler.batch_size if test_loader.batch_sampler is not None else test_loader.batch_size
q_ids.extend([test_database[id]['question_id'] for id in range(cur_id, min(cur_id + bs, len(test_database)))])
batch = to_cuda(batch)
output = model(*batch)
answer_ids.extend(output['label_logits'].argmax(dim=1).detach().cpu().tolist())
attn_weights.extend(output['attn_weights'].detach().cpu().tolist())
cur_id += bs
result = [{'question_id': q_id, 'answer': test_dataset.answer_vocab[a_id], 'attn_weights': attn} for q_id, a_id, attn in zip(q_ids, answer_ids, attn_weights)]
cfg_name = os.path.splitext(os.path.basename(args.cfg))[0]
result_json_path = os.path.join(save_path, '{}_aokvqa_{}.json'.format(cfg_name if save_name is None else save_name,
config.DATASET.TEST_IMAGE_SET))
with open(result_json_path, 'w') as f:
json.dump(result, f)
print('result json saved to {}.'.format(result_json_path))
return result_json_path, save_path
| 3,526 | 40.494118 | 162 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/function/config.py
|
from easydict import EasyDict as edict
import yaml
_C = edict()
config = _C
# ------------------------------------------------------------------------------------- #
# Common options
# ------------------------------------------------------------------------------------- #
_C.RNG_SEED = -1
_C.OUTPUT_PATH = ''
_C.MODULE = ''
_C.GPUS = ''
_C.LOG_FREQUENT = 50
_C.VAL_FREQUENT = 1
_C.CHECKPOINT_FREQUENT = 1
_C.MODEL_PREFIX = ''
_C.NUM_WORKERS_PER_GPU = 4
_C.SCALES = ()
# ------------------------------------------------------------------------------------- #
# Common dataset options
# ------------------------------------------------------------------------------------- #
_C.DATASET = edict()
_C.DATASET.DATASET = ''
_C.DATASET.ANSWER_VOCAB_FILE = ''
_C.DATASET.ANSWER_VOCAB_SIZE = 3129
_C.DATASET.LABEL_INDEX_IN_BATCH = -1
_C.DATASET.APPEND_INDEX = False
_C.DATASET.TASK = 'Q2AR'
_C.DATASET.BASIC_ALIGN = False
_C.DATASET.DATASET_PATH = ''
_C.DATASET.ROOT_PATH = ''
_C.DATASET.TRAIN_IMAGE_SET = ''
_C.DATASET.VAL_IMAGE_SET = ''
_C.DATASET.TEST_IMAGE_SET = ''
_C.DATASET.TRAIN_ANNOTATION_FILE = ''
_C.DATASET.VAL_ANNOTATION_FILE = ''
_C.DATASET.TEST_ANNOTATION_FILE = ''
_C.DATASET.ONLY_USE_RELEVANT_DETS = True
_C.DATASET.ADD_IMAGE_AS_A_BOX = True
_C.DATASET.ZIP_MODE = False
_C.DATASET.CACHE_MODE = False
_C.DATASET.IGNORE_DB_CACHE = True
_C.DATASET.MASK_SIZE = 14
_C.DATASET.QA2R_NOQ = False
_C.DATASET.QA2R_AUG = False
_C.DATASET.BOXES = "36" # "36" or "10-100ada"
_C.DATASET.USE_IMDB = True
_C.DATASET.USE_SBERT = False
_C.DATASET.COMMONSENSE_EXP_NAME = ''
_C.DATASET.MAX_COMMONSENSE_LEN = 5
# ------------------------------------------------------------------------------------- #
# Common network options
# ------------------------------------------------------------------------------------- #
_C.NETWORK = edict()
_C.NETWORK.BLIND = False
_C.NETWORK.NO_GROUNDING = False
_C.NETWORK.PARTIAL_PRETRAIN = ""
_C.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES = []
_C.NETWORK.FOR_MASK_VL_MODELING_PRETRAIN = False
_C.NETWORK.NO_OBJ_ATTENTION = False
_C.NETWORK.IMAGE_FEAT_PRECOMPUTED = False
_C.NETWORK.IMAGE_NUM_LAYERS = 50
_C.NETWORK.IMAGE_C5_DILATED = False
_C.NETWORK.IMAGE_STRIDE_IN_1x1 = False
_C.NETWORK.PIXEL_MEANS = ()
_C.NETWORK.PIXEL_STDS = ()
_C.NETWORK.IMAGE_PRETRAINED = ''
_C.NETWORK.IMAGE_PRETRAINED_EPOCH = 0
_C.NETWORK.IMAGE_FROZEN_BACKBONE_STAGES = [1, 2]
_C.NETWORK.IMAGE_FROZEN_BN = True
_C.NETWORK.IMAGE_FINAL_DIM = 512
_C.NETWORK.IMAGE_SEMANTIC = False
_C.NETWORK.OUTPUT_CONV5 = False
_C.NETWORK.BERT_MODEL_NAME = 'bert-base-uncased'
_C.NETWORK.BERT_PRETRAINED = ''
_C.NETWORK.BERT_PRETRAINED_EPOCH = 0
_C.NETWORK.BERT_FROZEN = True
_C.NETWORK.BERT_ALIGN_QUESTION = True
_C.NETWORK.BERT_ALIGN_ANSWER = True
_C.NETWORK.BERT_USE_LAYER = -2
_C.NETWORK.BERT_WITH_NSP_LOSS = False
_C.NETWORK.BERT_WITH_MLM_LOSS = False
_C.NETWORK.ENABLE_CNN_REG_LOSS = True
_C.NETWORK.CNN_LOSS_WEIGHT = 1.0
_C.NETWORK.ANS_LOSS_WEIGHT = 1.0
_C.NETWORK.ANS_LOSS_TYPE = 'bce' # 'bce' or 'ce'
_C.NETWORK.REPLACE_OBJECT_CHANGE_LABEL = True
_C.NETWORK.WEAK_ATTN_LOSS = False
_C.NETWORK.VLBERT = edict()
# _C.NETWORK.VLBERT.vocab_size = None
_C.NETWORK.VLBERT.input_size = 1280
# 1: LN + [1x1 conv] 2: LN + [1x1 conv] + dropout 3: LN + [1x1 conv] + dropout + BertLayer
_C.NETWORK.VLBERT.input_transform_type = 1
_C.NETWORK.VLBERT.word_embedding_frozen = False
_C.NETWORK.VLBERT.obj_pos_id_relative = True
_C.NETWORK.VLBERT.hidden_size = 512
_C.NETWORK.VLBERT.visual_size = 512
_C.NETWORK.VLBERT.num_hidden_layers = 4
_C.NETWORK.VLBERT.num_attention_heads = 8
_C.NETWORK.VLBERT.intermediate_size = 2048
_C.NETWORK.VLBERT.hidden_act = "gelu"
_C.NETWORK.VLBERT.hidden_dropout_prob = 0.1
_C.NETWORK.VLBERT.attention_probs_dropout_prob = 0.1
_C.NETWORK.VLBERT.max_position_embeddings = 512
_C.NETWORK.VLBERT.type_vocab_size = 3
_C.NETWORK.VLBERT.vocab_size = 30522
_C.NETWORK.VLBERT.initializer_range = 0.02
_C.NETWORK.VLBERT.visual_scale_text_init = 0.0
_C.NETWORK.VLBERT.visual_scale_object_init = 0.0
_C.NETWORK.VLBERT.visual_ln = False
# 1: class embedding 2: class agnostic embedding 3: average of word embedding of text
_C.NETWORK.VLBERT.object_word_embed_mode = 2
_C.NETWORK.VLBERT.with_pooler = False
_C.NETWORK.VLBERT.position_padding_idx = -1
_C.NETWORK.VLBERT.commonsense_emb_type = ''
_C.NETWORK.CLASSIFIER_TYPE = "2fc" # 2fc or 1fc or mlm
_C.NETWORK.CLASSIFIER_PRETRAINED = False
_C.NETWORK.CLASSIFIER_HIDDEN_SIZE = 1024
_C.NETWORK.CLASSIFIER_DROPOUT = 0.1
_C.NETWORK.CLASSIFIER_SIGMOID = False
_C.NETWORK.CLASSIFIER_SIGMOID_LOSS_POSITIVE_WEIGHT = 1.0
# ------------------------------------------------------------------------------------- #
# Common training related options
# ------------------------------------------------------------------------------------- #
_C.TRAIN = edict()
_C.TRAIN.LR_MULT = []
_C.TRAIN.VISUAL_SCALE_TEXT_LR_MULT = 1.0
_C.TRAIN.VISUAL_SCALE_OBJECT_LR_MULT = 1.0
_C.TRAIN.VISUAL_SCALE_CLIP_GRAD_NORM = -1
_C.TRAIN.SHUFFLE = True
_C.TRAIN.FLIP_PROB = 0.5
_C.TRAIN.BATCH_IMAGES = 1
_C.TRAIN.ASPECT_GROUPING = True
_C.TRAIN.RESUME = False
_C.TRAIN.AUTO_RESUME = True
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 0
_C.TRAIN.OPTIMIZER = 'SGD'
_C.TRAIN.CLIP_GRAD_NORM = -1
_C.TRAIN.GRAD_ACCUMULATE_STEPS = 1
_C.TRAIN.LR = 0.1
_C.TRAIN.LR_SCHEDULE = 'step' # step/triangle/plateau
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = ()
_C.TRAIN.WARMUP = False
_C.TRAIN.WARMUP_METHOD = 'linear'
_C.TRAIN.WARMUP_FACTOR = 1.0 / 3
_C.TRAIN.WARMUP_STEPS = 1000
_C.TRAIN.WD = 0.0001
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.FP16 = False
_C.TRAIN.FP16_LOSS_SCALE = 128.0
_C.TRAIN.LOSS_LOGGERS = [('ans_loss', 'AnsLoss')]
# ------------------------------------------------------------------------------------- #
# Common validation related options
# ------------------------------------------------------------------------------------- #
_C.VAL = edict()
_C.VAL.SHUFFLE = False
_C.VAL.FLIP_PROB = 0
_C.VAL.BATCH_IMAGES = 1
# ------------------------------------------------------------------------------------- #
# Common testing related options
# ------------------------------------------------------------------------------------- #
_C.TEST = edict()
_C.TEST.SHUFFLE = False
_C.TEST.FLIP_PROB = 0
_C.TEST.TEST_EPOCH = 0
_C.TEST.BATCH_IMAGES = 1
def update_config(config_file):
with open(config_file) as f:
exp_config = edict(yaml.safe_load(f))
for k, v in exp_config.items():
if k in config:
if isinstance(v, dict):
for vk, vv in v.items():
if vk in config[k]:
if vk == 'LR_STEP':
config[k][vk] = tuple(float(s) for s in vv.split(','))
elif vk == 'LOSS_LOGGERS':
config[k][vk] = [tuple(str(s) for s in vvi.split(',')) for vvi in vv]
elif vk == "VLBERT" and isinstance(vv, dict):
for vvk, vvv in vv.items():
if vvk in config[k][vk]:
config[k][vk][vvk] = vvv
else:
raise ValueError("key {}.{}.{} not in config.py".format(k, vk, vvk))
else:
config[k][vk] = vv
else:
raise ValueError("key {}.{} not in config.py".format(k, vk))
else:
if k == 'SCALES':
config[k] = (tuple(v))
else:
config[k] = v
else:
raise ValueError("key {} not in config.py".format(k))
| 7,737 | 36.201923 | 108 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/function/__init__.py
| 0 | 0 | 0 |
py
|
|
VLC-BERT
|
VLC-BERT-master/aokvqa/function/train.py
|
import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import vqa_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from aokvqa.data.build import make_dataloader, build_dataset, build_transforms
from aokvqa.modules import *
from aokvqa.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
"Please use amp.parallel.DistributedDataParallel instead."
total_gpus = num_gpus
rank = None
writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir is not None else None
# model
if num_gpus > 1:
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict, vocab_size=config.NETWORK.VLBERT.type_vocab_size)
# pretrained classifier
if config.NETWORK.CLASSIFIER_PRETRAINED:
print('Initializing classifier weight from pretrained word embeddings...')
answers_word_embed = []
for k, v in model.state_dict().items():
if 'word_embeddings.weight' in k:
word_embeddings = v.detach().clone()
break
for answer in train_loader.dataset.answer_vocab:
a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
a_word_embed = (torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0)).mean(dim=0)
answers_word_embed.append(a_word_embed)
answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
for name, module in model.named_modules():
if name.endswith('final_mlp'):
module[-1].weight.data = answers_word_embed_tensor.to(device=module[-1].weight.data.device)
# metrics
train_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
val_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist,
num_replicas=world_size if args.dist else 1)]
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(
vqa_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist,
num_replicas=world_size if args.dist else 1))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name='SoftAcc',
label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH)
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(config.TRAIN.LR_SCHEDULE))
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger,
rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks,
writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return rank, model
| 17,600 | 51.228487 | 147 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/modules/resnet_vlbert_for_aokvqa.py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_bert import VisualLinguisticBert
BERT_WEIGHTS_NAME = 'pytorch_model.bin'
class ResNetVLBERT(Module):
def __init__(self, config):
super(ResNetVLBERT, self).__init__(config)
self.enable_cnn_reg_loss = config.NETWORK.ENABLE_CNN_REG_LOSS
if not config.NETWORK.BLIND:
self.image_feature_extractor = FastRCNN(config,
average_pool=True,
final_dim=config.NETWORK.IMAGE_FINAL_DIM,
enable_cnn_reg_loss=self.enable_cnn_reg_loss)
if config.NETWORK.VLBERT.object_word_embed_mode == 1:
self.object_linguistic_embeddings = nn.Embedding(81, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 2:
self.object_linguistic_embeddings = nn.Embedding(1, config.NETWORK.VLBERT.hidden_size)
elif config.NETWORK.VLBERT.object_word_embed_mode == 3:
self.object_linguistic_embeddings = None
else:
raise NotImplementedError
self.image_feature_bn_eval = config.NETWORK.IMAGE_FROZEN_BN
self.use_expansions = config.DATASET.COMMONSENSE_EXP_NAME != ''
self.commonsense_exp_name = config.NETWORK.VLBERT.commonsense_emb_type
self.tokenizer = BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)
language_pretrained_model_path = None
if config.NETWORK.BERT_PRETRAINED != '':
language_pretrained_model_path = '{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED,
config.NETWORK.BERT_PRETRAINED_EPOCH)
elif os.path.isdir(config.NETWORK.BERT_MODEL_NAME):
weight_path = os.path.join(config.NETWORK.BERT_MODEL_NAME, BERT_WEIGHTS_NAME)
if os.path.isfile(weight_path):
language_pretrained_model_path = weight_path
self.language_pretrained_model_path = language_pretrained_model_path
if language_pretrained_model_path is None:
print("Warning: no pretrained language model found, training from scratch!!!")
self.vlbert = VisualLinguisticBert(config.NETWORK.VLBERT,
language_pretrained_model_path=language_pretrained_model_path)
# self.hm_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
# self.hi_out = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.NETWORK.VLBERT.hidden_size)
dim = config.NETWORK.VLBERT.hidden_size
if config.NETWORK.CLASSIFIER_TYPE == "2fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.NETWORK.CLASSIFIER_HIDDEN_SIZE),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(config.NETWORK.CLASSIFIER_HIDDEN_SIZE, config.DATASET.ANSWER_VOCAB_SIZE),
)
elif config.NETWORK.CLASSIFIER_TYPE == "1fc":
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
torch.nn.Linear(dim, config.DATASET.ANSWER_VOCAB_SIZE)
)
elif config.NETWORK.CLASSIFIER_TYPE == 'mlm':
transform = BertPredictionHeadTransform(config.NETWORK.VLBERT)
linear = nn.Linear(config.NETWORK.VLBERT.hidden_size, config.DATASET.ANSWER_VOCAB_SIZE)
self.final_mlp = nn.Sequential(
transform,
nn.Dropout(config.NETWORK.CLASSIFIER_DROPOUT, inplace=False),
linear
)
else:
raise ValueError("Not support classifier type: {}!".format(config.NETWORK.CLASSIFIER_TYPE))
# init weights
self.init_weight()
self.fix_params()
def init_weight(self):
# self.hm_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hm_out.bias.data.zero_()
# self.hi_out.weight.data.normal_(mean=0.0, std=0.02)
# self.hi_out.bias.data.zero_()
self.image_feature_extractor.init_weight()
if self.object_linguistic_embeddings is not None:
self.object_linguistic_embeddings.weight.data.normal_(mean=0.0, std=0.02)
for m in self.final_mlp.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.constant_(m.bias, 0)
if self.config.NETWORK.CLASSIFIER_TYPE == 'mlm':
language_pretrained = torch.load(self.language_pretrained_model_path)
mlm_transform_state_dict = {}
pretrain_keys = []
for k, v in language_pretrained.items():
if k.startswith('cls.predictions.transform.'):
pretrain_keys.append(k)
k_ = k[len('cls.predictions.transform.'):]
if 'gamma' in k_:
k_ = k_.replace('gamma', 'weight')
if 'beta' in k_:
k_ = k_.replace('beta', 'bias')
mlm_transform_state_dict[k_] = v
print("loading pretrained classifier transform keys: {}.".format(pretrain_keys))
self.final_mlp[0].load_state_dict(mlm_transform_state_dict)
def train(self, mode=True):
super(ResNetVLBERT, self).train(mode)
# turn some frozen layers to eval mode
if self.image_feature_bn_eval:
self.image_feature_extractor.bn_eval()
def fix_params(self):
pass
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def prepare_text_from_qa(self, question, question_tags, question_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + answer_mask.sum(1)).max() + 3
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
a_end = q_end + 1 + answer_mask.sum(1, keepdim=True)
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.bool, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
a_input_mask = (grid_j > q_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def prepare_text_from_qea(self, question, question_tags, question_mask, expansions, expansions_tags, expansions_mask, answer, answer_tags, answer_mask):
batch_size, max_q_len = question.shape
_, max_e_len = expansions.shape
_, max_a_len = answer.shape
max_len = (question_mask.sum(1) + expansions_mask.sum(1) + answer_mask.sum(1)).max() + 4
cls_id, sep_id = self.tokenizer.convert_tokens_to_ids(['[CLS]', '[SEP]'])
q_end = 1 + question_mask.sum(1, keepdim=True)
e_end = q_end + 1 + expansions_mask.sum(1, keepdim=True)
a_end = e_end + 1 + answer_mask.sum(1, keepdim=True)
# Define a new input sequence
input_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
input_mask = torch.ones((batch_size, max_len), dtype=torch.bool, device=question.device)
input_type_ids = torch.zeros((batch_size, max_len), dtype=question.dtype, device=question.device)
text_tags = input_type_ids.new_zeros((batch_size, max_len))
grid_i, grid_j = torch.meshgrid(torch.arange(batch_size, device=question.device),
torch.arange(max_len, device=question.device))
input_mask[grid_j > a_end] = 0
input_type_ids[(grid_j > q_end) & (grid_j <= e_end)] = 3
input_type_ids[(grid_j > e_end) & (grid_j <= a_end)] = 1
q_input_mask = (grid_j > 0) & (grid_j < q_end)
e_input_mask = (grid_j > q_end) & (grid_j < e_end)
a_input_mask = (grid_j > e_end) & (grid_j < a_end)
input_ids[:, 0] = cls_id
input_ids[grid_j == q_end] = sep_id
input_ids[grid_j == e_end] = sep_id
input_ids[grid_j == a_end] = sep_id
input_ids[q_input_mask] = question[question_mask]
input_ids[e_input_mask] = expansions[expansions_mask]
input_ids[a_input_mask] = answer[answer_mask]
text_tags[q_input_mask] = question_tags[question_mask]
text_tags[e_input_mask] = expansions_tags[expansions_mask]
text_tags[a_input_mask] = answer_tags[answer_mask]
#print('Inputs: ', input_ids, input_type_ids, text_tags, input_mask)
return input_ids, input_type_ids, text_tags, input_mask, (a_end - 1).squeeze(1)
def train_forward(self,
image,
boxes,
im_info,
question,
expansions,
commonsense_emb,
label
):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
expansions_ids = expansions
expansions_tags = expansions.new_zeros(expansions_ids.shape)
expansions_mask = (expansions > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
if self.use_expansions:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qea(question_ids,
question_tags,
question_mask,
expansions_ids,
expansions_tags,
expansions_mask,
answer_ids,
answer_tags,
answer_mask)
else:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc, attn_weights = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
commonsense_embeddings=commonsense_emb,
output_all_encoded_layers=False,
output_commonsense_attn_weights=True)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
if self.config.NETWORK.WEAK_ATTN_LOSS:
max_c_len = 0-(self.config.DATASET.MAX_COMMONSENSE_LEN + 1)
attn_label = label[:, max_c_len:]
label = label[:, :max_c_len]
attn_weights = torch.mean(attn_weights, dim=1)
# loss
ans_loss = F.binary_cross_entropy_with_logits(logits, label) * label.size(1)
if self.config.NETWORK.WEAK_ATTN_LOSS:
loss_mask = attn_label.sum(1) > 0
attn_weights = attn_weights[loss_mask, :]
attn_label = attn_label[loss_mask, :]
if attn_label.sum() > 0:
attn_loss = F.binary_cross_entropy_with_logits(attn_weights, attn_label) * attn_label.size(1)
else:
attn_loss = 0
ans_loss = ans_loss + attn_loss
outputs.update({'label_logits': logits,
'label': label,
'ans_loss': ans_loss})
loss = ans_loss.mean()
return outputs, loss
def inference_forward(self,
image,
boxes,
im_info,
question,
expansions,
commonsense_emb):
###########################################
# visual feature extraction
images = image
box_mask = (boxes[:, :, 0] > - 1.5)
max_len = int(box_mask.sum(1).max().item())
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
obj_reps = self.image_feature_extractor(images=images,
boxes=boxes,
box_mask=box_mask,
im_info=im_info,
classes=None,
segms=None)
question_ids = question
question_tags = question.new_zeros(question_ids.shape)
question_mask = (question > 0.5)
expansions_ids = expansions
expansions_tags = expansions.new_zeros(expansions_ids.shape)
expansions_mask = (expansions > 0.5)
answer_ids = question_ids.new_zeros((question_ids.shape[0], 1)).fill_(
self.tokenizer.convert_tokens_to_ids(['[MASK]'])[0])
answer_mask = question_mask.new_zeros(answer_ids.shape).fill_(1)
answer_tags = question_tags.new_zeros(answer_ids.shape)
############################################
# prepare text
if self.use_expansions:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qea(question_ids,
question_tags,
question_mask,
expansions_ids,
expansions_tags,
expansions_mask,
answer_ids,
answer_tags,
answer_mask)
else:
text_input_ids, text_token_type_ids, text_tags, text_mask, ans_pos = self.prepare_text_from_qa(question_ids,
question_tags,
question_mask,
answer_ids,
answer_tags,
answer_mask)
if self.config.NETWORK.NO_GROUNDING:
obj_rep_zeroed = obj_reps['obj_reps'].new_zeros(obj_reps['obj_reps'].shape)
text_tags.zero_()
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_rep_zeroed)
else:
text_visual_embeddings = self._collect_obj_reps(text_tags, obj_reps['obj_reps'])
assert self.config.NETWORK.VLBERT.object_word_embed_mode == 2
object_linguistic_embeddings = self.object_linguistic_embeddings(
boxes.new_zeros((boxes.shape[0], boxes.shape[1])).long()
)
object_vl_embeddings = torch.cat((obj_reps['obj_reps'], object_linguistic_embeddings), -1)
###########################################
# Visual Linguistic BERT
hidden_states, hc, attn_weights = self.vlbert(text_input_ids,
text_token_type_ids,
text_visual_embeddings,
text_mask,
object_vl_embeddings,
box_mask,
commonsense_embeddings=commonsense_emb,
output_all_encoded_layers=False,
output_commonsense_attn_weights=True)
_batch_inds = torch.arange(question.shape[0], device=question.device)
hm = hidden_states[_batch_inds, ans_pos]
# hm = F.tanh(self.hm_out(hidden_states[_batch_inds, ans_pos]))
# hi = F.tanh(self.hi_out(hidden_states[_batch_inds, ans_pos + 2]))
###########################################
outputs = {}
# classifier
# logits = self.final_mlp(hc * hm * hi)
# logits = self.final_mlp(hc)
logits = self.final_mlp(hm)
outputs.update({'label_logits': logits, 'attn_weights': attn_weights})
return outputs
| 22,529 | 50.674312 | 156 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/modules/__init__.py
|
from .resnet_vlbert_for_aokvqa import ResNetVLBERT
| 53 | 12.5 | 50 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/__init__.py
| 0 | 0 | 0 |
py
|
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/collate_batch.py
|
import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_question_length = max([len(data[self.data_names.index('question')]) for data in batch])
max_expansions_length = max([len(data[self.data_names.index('expansions')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if image_none:
out['image'] = None
else:
image = ibatch[self.data_names.index('image')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
question = ibatch[self.data_names.index('question')]
out['question'] = clip_pad_1d(question, max_question_length, pad=0)
expansions = ibatch[self.data_names.index('expansions')]
out['expansions'] = clip_pad_1d(expansions, max_expansions_length, pad=0)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
| 2,295 | 37.266667 | 115 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/build.py
|
import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'aokvqa': AOKVQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in catalogs"
return DATASET_CATALOGS[dataset_name](*args, **kwargs)
def make_data_sampler(dataset, shuffle, distributed, num_replicas, rank):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size):
if aspect_grouping:
group_ids = dataset.group_ids
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
return batch_sampler
def make_dataloader(cfg, dataset=None, mode='train', distributed=False, num_replicas=None, rank=None,
expose_sampler=False):
assert mode in ['train', 'val', 'test']
if mode == 'train':
ann_file = cfg.DATASET.TRAIN_ANNOTATION_FILE
image_set = cfg.DATASET.TRAIN_IMAGE_SET
aspect_grouping = cfg.TRAIN.ASPECT_GROUPING
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TRAIN.BATCH_IMAGES * num_gpu
shuffle = cfg.TRAIN.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
elif mode == 'val':
ann_file = cfg.DATASET.VAL_ANNOTATION_FILE
image_set = cfg.DATASET.VAL_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.VAL.BATCH_IMAGES * num_gpu
shuffle = cfg.VAL.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
else:
ann_file = cfg.DATASET.TEST_ANNOTATION_FILE
image_set = cfg.DATASET.TEST_IMAGE_SET
aspect_grouping = False
num_gpu = len(cfg.GPUS.split(','))
batch_size = cfg.TEST.BATCH_IMAGES * num_gpu
shuffle = cfg.TEST.SHUFFLE
num_workers = cfg.NUM_WORKERS_PER_GPU * num_gpu
transform = build_transforms(cfg, mode)
if dataset is None:
dataset = build_dataset(dataset_name=cfg.DATASET.DATASET, ann_file=ann_file, image_set=image_set,
use_imdb=cfg.DATASET.USE_IMDB,
with_precomputed_visual_feat=cfg.NETWORK.IMAGE_FEAT_PRECOMPUTED,
boxes=cfg.DATASET.BOXES,
answer_vocab_file=cfg.DATASET.ANSWER_VOCAB_FILE,
root_path=cfg.DATASET.ROOT_PATH, data_path=cfg.DATASET.DATASET_PATH,
test_mode=(mode == 'test'), transform=transform,
zip_mode=cfg.DATASET.ZIP_MODE, cache_mode=cfg.DATASET.CACHE_MODE,
cache_db=True if (rank is None or rank == 0) else False,
ignore_db_cache=cfg.DATASET.IGNORE_DB_CACHE,
add_image_as_a_box=cfg.DATASET.ADD_IMAGE_AS_A_BOX,
aspect_grouping=aspect_grouping,
mask_size=(cfg.DATASET.MASK_SIZE, cfg.DATASET.MASK_SIZE),
pretrained_model_name=cfg.NETWORK.BERT_MODEL_NAME,
use_sbert = cfg.DATASET.USE_SBERT,
commonsense_exp_name = cfg.DATASET.COMMONSENSE_EXP_NAME,
max_commonsense_len = cfg.DATASET.MAX_COMMONSENSE_LEN,
commonsense_emb_type = cfg.NETWORK.VLBERT.commonsense_emb_type,
learn_attn= cfg.NETWORK.WEAK_ATTN_LOSS)
sampler = make_data_sampler(dataset, shuffle, distributed, num_replicas, rank)
batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, batch_size)
collator = BatchCollator(dataset=dataset, append_ind=cfg.DATASET.APPEND_INDEX)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=False,
collate_fn=collator)
if expose_sampler:
return dataloader, sampler
return dataloader
| 4,753 | 44.27619 | 106 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/datasets/aokvqa.py
|
import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import logging
import pickle5 as pickle
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
from pycocotools.coco import COCO
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
class AOKVQA(Dataset):
def __init__(self, image_set, root_path, data_path, answer_vocab_file, use_imdb=True,
with_precomputed_visual_feat=False, boxes="36",
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=True, ignore_db_cache=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False, mask_size=(14, 14),
aspect_grouping=False, use_sbert=False, commonsense_exp_name='', max_commonsense_len=5,
commonsense_emb_type='', learn_attn=False, **kwargs):
"""
Visual Question Answering Dataset
:param image_set: image folder name
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param mask_size: size of instance mask of each object
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(AOKVQA, self).__init__()
assert not cache_mode, 'currently not support cache mode!'
aokvqa_question = {
"train2017": "aokvqa/aokvqa_v1p0_train.json",
"val2017": "aokvqa/aokvqa_v1p0_val.json",
"test2017": "aokvqa/aokvqa_v1p0_test.json",
}
if boxes == "36":
precomputed_boxes = {
'train2017': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'val2017': ("vgbua_res101_precomputed", "trainval_resnet101_faster_rcnn_genome_36"),
'test2017': ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome_36"),
}
elif boxes == "10-100ada":
precomputed_boxes = {
'train2017': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'val2017': ("vgbua_res101_precomputed", "trainval2014_resnet101_faster_rcnn_genome"),
'test2017': ("vgbua_res101_precomputed", "test2015_resnet101_faster_rcnn_genome"),
}
else:
raise ValueError("Not support boxes: {}!".format(boxes))
coco_dataset = {
"train2017": ("train2017", "annotations/instances_train2017.json"),
"val2017": ("val2017", "annotations/instances_val2017.json"),
"test2017": ("test2017", "annotations/image_info_test2017.json"),
}
commonsense_path = "data/coco/aokvqa/commonsense/"
self.experiment_name = commonsense_exp_name
self.use_sbert = use_sbert
self.max_commonsense_len = max_commonsense_len
self.commonsense_emb_type = commonsense_emb_type
self.learn_attn = learn_attn
if self.experiment_name == 'semqo':
aokvqa_expansions = {
'train2017': commonsense_path+'expansions/semq.o_aokvqa_train.json',
'val2017': commonsense_path+'expansions/semq.o_aokvqa_val.json',
'test2017': commonsense_path+'expansions/semq.o_aokvqa_test.json',
}
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
print("Loading OK-VQA dataset: ", image_set)
self.boxes = boxes
self.test_mode = test_mode
self.with_precomputed_visual_feat = with_precomputed_visual_feat
self.data_path = data_path
self.root_path = root_path
with open(answer_vocab_file, 'r', encoding='utf8') as f:
self.answer_vocab = [w.lower().strip().strip('\r').strip('\n').strip('\r') for w in f.readlines()]
self.answer_vocab = list(filter(lambda x: x != '', self.answer_vocab))
self.answer_vocab = [self.processPunctuation(w) for w in self.answer_vocab]
self.image_sets = [iset.strip() for iset in image_set.split('+')]
self.q_files = [os.path.join(data_path, aokvqa_question[iset]) for iset in self.image_sets]
self.expansion_files = [aokvqa_expansions[iset] for iset in self.image_sets] \
if (self.experiment_name != '') else [None for iset in self.image_sets]
self.precomputed_box_files = [
os.path.join(data_path, precomputed_boxes[iset][0],
'{0}.zip@/{0}'.format(precomputed_boxes[iset][1])
if zip_mode else precomputed_boxes[iset][1])
for iset in self.image_sets]
self.box_bank = {}
self.coco_datasets = [(os.path.join(data_path,
coco_dataset[iset][0],
'{{:012d}}.jpg'.format(coco_dataset[iset][0]))
if not zip_mode else
os.path.join(data_path,
coco_dataset[iset][0] + '.zip@/' + coco_dataset[iset][0],
'{{:012d}}.jpg'.format(coco_dataset[iset][0])),
os.path.join(data_path, coco_dataset[iset][1]))
for iset in self.image_sets]
self.transform = transform
self.zip_mode = zip_mode
self.cache_mode = cache_mode
self.cache_db = cache_db
self.ignore_db_cache = ignore_db_cache
self.aspect_grouping = aspect_grouping
self.cache_dir = os.path.join(root_path, 'cache')
self.add_image_as_a_box = add_image_as_a_box
self.mask_size = mask_size
if not os.path.exists(self.cache_dir):
makedirsExist(self.cache_dir)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name,
cache_dir=self.cache_dir)
if zip_mode:
self.zipreader = ZipReader()
self.database = self.load_annotations()
if self.aspect_grouping:
self.group_ids = self.group_aspect(self.database)
self.attn_gt = None
if self.learn_attn and not self.test_mode:
self.attn_gt = self._load_json('data/coco/aokvqa/'+self.experiment_name+'_aokvqa_train_attn_annot_'+str(self.max_commonsense_len)+'.json')
@property
def data_names(self):
if self.test_mode:
return ['image', 'boxes', 'im_info', 'question', 'expansions', 'c_emb']
else:
return ['image', 'boxes', 'im_info', 'question', 'expansions', 'c_emb', 'label']
def __getitem__(self, index):
idb = self.database[index]
# image, boxes, im_info
boxes_data = self._load_json(idb['box_fn'])
if self.with_precomputed_visual_feat:
image = None
w0, h0 = idb['width'], idb['height']
boxes_features = torch.tensor(
np.frombuffer(self.b64_decode(boxes_data['features']), dtype=np.float32).reshape((boxes_data['num_boxes'], -1))
)
else:
image = self._load_image(idb['image_fn'])
w0, h0 = image.size
boxes = torch.tensor(
np.frombuffer(self.b64_decode(boxes_data['boxes']), dtype=np.float32).reshape(
(boxes_data['num_boxes'], -1))
)
if self.add_image_as_a_box:
image_box = torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1]])
boxes = torch.cat((image_box, boxes), dim=0)
if self.with_precomputed_visual_feat:
if 'image_box_feature' in boxes_data:
image_box_feature = torch.as_tensor(
np.frombuffer(
self.b64_decode(boxes_data['image_box_feature']), dtype=np.float32
).reshape((1, -1))
)
else:
image_box_feature = boxes_features.mean(0, keepdim=True)
boxes_features = torch.cat((image_box_feature, boxes_features), dim=0)
im_info = torch.tensor([w0, h0, 1.0, 1.0])
flipped = False
if self.transform is not None:
image, boxes, _, im_info, flipped = self.transform(image, boxes, None, im_info, flipped)
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h - 1)
# flip: 'left' -> 'right', 'right' -> 'left'
q_tokens = self.tokenizer.tokenize(idb['question'])
if flipped:
q_tokens = self.flip_tokens(q_tokens, verbose=False)
if not self.test_mode:
answers = idb['answers']
if flipped:
answers_tokens = [a.split(' ') for a in answers]
answers_tokens = [self.flip_tokens(a_toks, verbose=False) for a_toks in answers_tokens]
answers = [' '.join(a_toks) for a_toks in answers_tokens]
label = self.get_soft_target(answers)
# question
q_retokens = q_tokens
q_ids = self.tokenizer.convert_tokens_to_ids(q_retokens)
# commonsense
exp_ids = []
commonsense_embeddings = torch.tensor([0])
if self.experiment_name != '':
# If we use SBERT, add [MASK] tokens exp_ids, and load the embeddings in commonsense_embeddings
if self.use_sbert:
if self.commonsense_emb_type == 'fusion':
commonsense_embeddings = self.get_cached_expansion_emb(idb['image_fn'].split('/')[-1], idb['question_id'], custom_tag='_ques')
else:
commonsense_embeddings = self.get_cached_expansion_emb(idb['image_fn'].split('/')[-1], idb['question_id'])
# Now that we have commonsense embeddings, we add the [MASK] tokens that will be replaced by the commonsense embeddings in training code
if self.commonsense_emb_type == 'fusion':
m_tokens = ['[MASK]']
else:
m_tokens = ['[MASK]']*self.max_commonsense_len
m_ids = self.tokenizer.convert_tokens_to_ids(m_tokens)
exp_ids += m_ids
# If not SBERT, clean the picked expansions and add them to exp_ids
else:
# We use picked expansions from knowlege selection process
picked_exp = idb['picked_exp']
if isinstance(picked_exp, list):
picked_exp = picked_exp[0]
picked_exp = picked_exp.split('.')
picked_exp = [sentence.strip() for sentence in picked_exp]
picked_exp = [sentence+'.' for sentence in picked_exp if sentence != '']
if len(picked_exp) >= self.max_commonsense_len:
picked_exp = picked_exp[:self.max_commonsense_len]
else:
picked_exp = picked_exp + [''] * (self.max_commonsense_len - len(picked_exp))
picked_exp = ' '.join(picked_exp)
picked_exp_tokens = self.tokenizer.tokenize(picked_exp)
exp_ids += self.tokenizer.convert_tokens_to_ids(picked_exp_tokens)
# concat box feature to box
if self.with_precomputed_visual_feat:
boxes = torch.cat((boxes, boxes_features), dim=-1)
if self.attn_gt is not None:
if str(idb['image_id']) in self.attn_gt and idb['question_id'] in self.attn_gt[str(idb['image_id'])]:
attn_weight_label = torch.tensor(self.attn_gt[str(idb['image_id'])][idb['question_id']])
else:
attn_weight_label = torch.zeros(self.max_commonsense_len+1)
label = torch.cat((label, attn_weight_label), dim=0)
if self.test_mode:
return image, boxes, im_info, q_ids, exp_ids, commonsense_embeddings
else:
return image, boxes, im_info, q_ids, exp_ids, commonsense_embeddings, label
@staticmethod
def flip_tokens(tokens, verbose=True):
changed = False
tokens_new = [tok for tok in tokens]
for i, tok in enumerate(tokens):
if tok == 'left':
tokens_new[i] = 'right'
changed = True
elif tok == 'right':
tokens_new[i] = 'left'
changed = True
if verbose and changed:
logging.info('[Tokens Flip] {} -> {}'.format(tokens, tokens_new))
return tokens_new
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
def answer_to_ind(self, answer):
if answer in self.answer_vocab:
return self.answer_vocab.index(answer)
else:
return self.answer_vocab.index('<unk>')
def get_soft_target(self, answers):
soft_target = torch.zeros(len(self.answer_vocab), dtype=torch.float)
answer_indices = [self.answer_to_ind(answer) for answer in answers]
gt_answers = list(enumerate(answer_indices))
unique_answers = set(answer_indices)
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.index('<unk>'):
soft_target[answer] = avg_acc
return soft_target
def processPunctuation(self, inText):
if inText == '<unk>':
return inText
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def load_annotations(self):
tic = time.time()
database = []
db_cache_name = 'aokvqa_boxes{}_{}'.format(self.boxes, '+'.join(self.image_sets))
if self.with_precomputed_visual_feat:
db_cache_name += 'visualprecomp'
if self.zip_mode:
db_cache_name = db_cache_name + '_zipmode'
if self.test_mode:
db_cache_name = db_cache_name + '_testmode'
if self.experiment_name != '':
db_cache_name = db_cache_name + '_' + self.experiment_name
db_cache_root = os.path.join(self.root_path, 'cache')
db_cache_path = os.path.join(db_cache_root, '{}.pkl'.format(db_cache_name))
if os.path.exists(db_cache_path):
if not self.ignore_db_cache:
# reading cached database
print('cached database found in {}.'.format(db_cache_path))
with open(db_cache_path, 'rb') as f:
print('loading cached database from {}...'.format(db_cache_path))
tic = time.time()
database = cPickle.load(f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
else:
print('cached database ignored.')
# ignore or not find cached database, reload it from annotation file
print('loading database of split {}...'.format('+'.join(self.image_sets)))
tic = time.time()
for q_file, expansion_file, (coco_path, coco_annot), box_file \
in zip(self.q_files, self.expansion_files, self.coco_datasets, self.precomputed_box_files):
qs = self._load_json(q_file)
expansion_data = self._load_json(expansion_file)
coco = COCO(coco_annot)
for q in qs:
idb = {'image_id': q['image_id'],
'image_fn': coco_path.format(q['image_id']),
'width': coco.imgs[q['image_id']]['width'],
'height': coco.imgs[q['image_id']]['height'],
'box_fn': os.path.join(box_file, '{}.json'.format(q['image_id'])),
'question_id': q['question_id'],
'question': q['question'],
"picked_exp": expansion_data[str(coco_path.format(q['image_id']).split('/')[-1])][str(q['question_id'])] if (self.experiment_name != '') else None,
"rationales": q['rationales'] if self.experiment_name == 'rationales' else None,
'answers': q['direct_answers'] if not self.test_mode else None,
"question_type": "other" if not self.test_mode else None,
"answer_type": "other" if not self.test_mode else None,
}
database.append(idb)
print('Done (t={:.2f}s)'.format(time.time() - tic))
# cache database via cPickle
if self.cache_db:
print('caching database to {}...'.format(db_cache_path))
tic = time.time()
if not os.path.exists(db_cache_root):
makedirsExist(db_cache_root)
with open(db_cache_path, 'wb') as f:
cPickle.dump(database, f)
print('Done (t={:.2f}s)'.format(time.time() - tic))
return database
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def load_precomputed_boxes(self, box_file):
if box_file in self.box_bank:
return self.box_bank[box_file]
else:
in_data = {}
with open(box_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in (['boxes', 'features'] if self.with_precomputed_visual_feat else ['boxes']):
item[field] = np.frombuffer(base64.decodebytes(item[field].encode()),
dtype=np.float32).reshape((item['num_boxes'], -1))
in_data[item['image_id']] = item
self.box_bank[box_file] = in_data
return in_data
def get_cached_expansion_emb(self, image_id, question_id, custom_tag=''):
commonsense_embeddings = None
for subset in self.image_sets:
savepath = 'data/coco/sbert/aokvqa/'+self.experiment_name+'/'+str(self.max_commonsense_len)+custom_tag+'/'+subset
image_id = str(image_id)
question_id = str(question_id)
if not os.path.exists(savepath+'/'+image_id+'.pkl'):
continue
with open(savepath+'/'+image_id+'.pkl', 'rb') as handle:
unserialized_data = pickle.load(handle)
commonsense_embeddings = torch.tensor(unserialized_data[question_id])
assert commonsense_embeddings is not None, 'No expansion embedding found at {}'.format(savepath+'/'+image_id+'.pkl')
return commonsense_embeddings
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if path == None:
return None
elif '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
| 21,774 | 42.812877 | 171 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/datasets/__init__.py
|
from .aokvqa import AOKVQA
| 28 | 8.666667 | 26 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/samplers/grouped_batch_sampler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,846 | 40.42735 | 88 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/samplers/distributed.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,568 | 37.924242 | 86 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/samplers/__init__.py
|
from .distributed import DistributedSampler
from .grouped_batch_sampler import GroupedBatchSampler
| 100 | 24.25 | 54 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/transforms/__init__.py
|
from .transforms import Compose
from .transforms import Resize
from .transforms import RandomHorizontalFlip
from .transforms import ToTensor
from .transforms import Normalize
from .build import build_transforms
| 212 | 25.625 | 44 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/transforms/build.py
|
from . import transforms as T
def build_transforms(cfg, mode='train'):
assert mode in ['train', 'test', 'val']
min_size = cfg.SCALES[0]
max_size = cfg.SCALES[1]
assert min_size <= max_size
if mode == 'train':
flip_prob = cfg.TRAIN.FLIP_PROB
elif mode == 'test':
flip_prob = cfg.TEST.FLIP_PROB
else:
flip_prob = cfg.VAL.FLIP_PROB
to_bgr255 = True
normalize_transform = T.Normalize(
mean=cfg.NETWORK.PIXEL_MEANS, std=cfg.NETWORK.PIXEL_STDS, to_bgr255=to_bgr255
)
# transform = T.Compose(
# [
# T.Resize(min_size, max_size),
# T.RandomHorizontalFlip(flip_prob),
# T.ToTensor(),
# normalize_transform,
# T.FixPadding(min_size, max_size, pad=0)
# ]
# )
transform = T.Compose(
[
T.Resize(min_size, max_size),
T.RandomHorizontalFlip(flip_prob),
T.ToTensor(),
normalize_transform,
]
)
return transform
| 1,034 | 23.069767 | 85 |
py
|
VLC-BERT
|
VLC-BERT-master/aokvqa/data/transforms/transforms.py
|
import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
image, boxes, masks, im_info, flipped = t(image, boxes, masks, im_info, flipped)
return image, boxes, masks, im_info, flipped
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (ow, oh)
def __call__(self, image, boxes, masks, im_info, flipped):
origin_size = im_info[:2]
size = self.get_size(origin_size)
if image is not None:
image = F.resize(image, (size[1], size[0]))
ratios = [size[0] * 1.0 / origin_size[0], size[1] * 1.0 / origin_size[1]]
if boxes is not None:
boxes[:, [0, 2]] *= ratios[0]
boxes[:, [1, 3]] *= ratios[1]
im_info[0], im_info[1] = size
im_info[2], im_info[3] = ratios
return image, boxes, masks, im_info, flipped
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, boxes, masks, im_info, flipped):
if random.random() < self.prob:
w, h = im_info[:2]
if image is not None:
image = F.hflip(image)
if boxes is not None:
boxes[:, [0, 2]] = w - 1 - boxes[:, [2, 0]]
if masks is not None:
masks = torch.as_tensor(masks.numpy()[:, :, ::-1].tolist())
flipped = not flipped
return image, boxes, masks, im_info, flipped
class ToTensor(object):
def __call__(self, image, boxes, masks, im_info, flipped):
return F.to_tensor(image) if image is not None else image, boxes, masks, im_info, flipped
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, boxes, masks, im_info, flipped
class FixPadding(object):
def __init__(self, min_size, max_size, pad=0):
self.min_size = min_size
self.max_size = max_size
self.pad = pad
def __call__(self, image, boxes, masks, im_info, flipped):
if image is not None:
# padding to fixed size for determinacy
c, h, w = image.shape
if h <= w:
h1 = self.min_size
w1 = self.max_size
else:
h1 = self.max_size
w1 = self.min_size
padded_image = image.new_zeros((c, h1, w1)).fill_(self.pad)
padded_image[:, :h, :w] = image
image = padded_image
return image, boxes, masks, im_info, flipped
| 4,104 | 30.821705 | 97 |
py
|
VLC-BERT
|
VLC-BERT-master/external/PythonEvaluationTools/aokvqa_vqaEval.py
|
import argparse
import json
import os
def load_aokvqa(aokvqa_dir, split, version='v1p0'):
#assert split in ['train', 'val', 'test', 'test_w_ans', 'val_pruned']
dataset = json.load(open(
os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json")
))
return dataset
def get_coco_path(split, image_id, coco_dir):
return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg")
def run_eval(resFile=None, split='test', save_path=None, multiple_choice=False, strict=True):
# Load data
dataset = load_aokvqa('data/coco/aokvqa', split=split)
# Load predictions works only for direct answers
if not multiple_choice:
predictions = json.load(open(resFile, 'r'))
preds = {}
for d in predictions:
preds[d['question_id']] = d['answer']
# for q in predictions.keys():
# if 'direct_answer' in predictions[q].keys():
# da_predictions[q] = predictions[q]['direct_answer']
if isinstance(dataset, list):
dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) }
if multiple_choice:
dataset = {k:v for k,v in dataset.items() if v['difficult_direct_answer'] is False}
if strict:
dataset_qids = set(dataset.keys())
preds_qids = set(preds.keys())
assert dataset_qids.issubset(preds_qids)
# dataset = q_id (str) : dataset element (dict)
# preds = q_id (str) : prediction (str)
acc = []
for q in dataset.keys():
if q not in preds.keys():
acc.append(0.0)
continue
pred = preds[q]
choices = dataset[q]['choices']
direct_answers = dataset[q]['direct_answers']
## Multiple Choice setting
if multiple_choice:
if strict:
assert pred in choices, 'Prediction must be a valid choice'
correct_choice_idx = dataset[q]['correct_choice_idx']
acc.append( float(pred == choices[correct_choice_idx]) )
## Direct Answer setting
else:
num_match = sum([pred == da for da in direct_answers])
vqa_acc = min(1.0, num_match / 3.0)
acc.append(vqa_acc)
acc = sum(acc) / len(acc) * 100
# print accuracies
print("\n")
print("Overall Accuracy is: %.02f\n" % (acc))
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resFile', type=str, help='Path to the json file with predictions')
parser.add_argument('--split', type=str, help='Split to evaluate on')
opt = parser.parse_args()
run_eval(opt.resFile, split=opt.split)
| 2,667 | 31.536585 | 93 |
py
|
VLC-BERT
|
VLC-BERT-master/external/PythonEvaluationTools/okvqa_vqaEval.py
|
# coding: utf-8
import argparse
import json
from external.PythonEvaluationTools.vqaEval import VQAEval
from external.PythonEvaluationTools.vqa_helper import VQA
def run_eval(resFile=None, save_path=None, pruned=False):
# set up file names and paths
taskType = 'OpenEnded'
dataType = 'mscoco'
dataSubType = 'val2014'
data_dir = 'data/coco/okvqa'
pruned_tag = '_pruned' if pruned else ''
annFile = "%s/%s_%s_annotations%s.json" % (data_dir, dataType, dataSubType, pruned_tag)
quesFile = "%s/%s_%s_%s_questions%s.json" % (data_dir, taskType, dataType, dataSubType, pruned_tag)
fileTypes = ['accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']
output_dir = save_path
[accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/%s%s.json' % (output_dir, fileType, pruned_tag) for fileType in
fileTypes]
# create vqa object and vqaRes object
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(resFile, quesFile)
# create vqaEval object by taking vqa and vqaRes
vqaEval = VQAEval(vqa, vqaRes, n=2) # n is precision of accuracy (number of places after decimal), default is 2
# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""
vqaEval.evaluate()
question_types = {
"eight": "Plants and Animals",
"nine": "Science and Technology",
"four": "Sports and Recreation",
"six": "Geography, History, Language and Culture",
"two": "Brands, Companies and Products",
"other": "Other",
"one": "Vehicles and Transportation",
"five": "Cooking and Food",
"ten": "Weather and Climate",
"seven": "People and Everyday life",
"three": "Objects, Material and Clothing"
}
# print accuracies
print("\n")
print("Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall']))
print("Per Question Type Accuracy is the following:")
for quesType in vqaEval.accuracy['perQuestionType']:
print("%s : %.02f" % (question_types[quesType], vqaEval.accuracy['perQuestionType'][quesType]))
print("\n")
print("Per Answer Type Accuracy is the following:")
for ansType in vqaEval.accuracy['perAnswerType']:
print("%s : %.02f" % (ansType, vqaEval.accuracy['perAnswerType'][ansType]))
print("\n")
#save evaluation results to ./Results folder
json.dump(vqaEval.accuracy, open(accuracyFile, 'w'))
json.dump(vqaEval.evalQA, open(evalQAFile, 'w'))
json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w'))
json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w'))
return vqaEval.accuracy['overall']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resFile', type=str, help='Path to the json file with predictions')
parser.add_argument('--savepath', type=str, help='Save path')
parser.add_argument('--pruned', action='store_true', help='Whether to use pruned annotations')
opt = parser.parse_args()
run_eval(opt.resFile, opt.savepath, opt.pruned)
| 3,266 | 39.8375 | 136 |
py
|
VLC-BERT
|
VLC-BERT-master/external/PythonEvaluationTools/vqaEval.py
|
# coding=utf-8
__author__='aagrawal'
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py).
import sys
import re
class VQAEval:
def __init__(self, vqa, vqaRes, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
self.params = {'question_id': vqa.getQuesIds()}
self.contractions = {"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", \
"couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", \
"hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", \
"he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", \
"Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", \
"maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", \
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", \
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", \
"she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", \
"somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", \
"somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", \
"someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", \
"something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", \
"there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", \
"they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", \
"wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", \
"whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", \
"whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", \
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", \
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", \
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", \
"youll": "you'll", "youre": "you're", "youve": "you've"}
self.manualMap = { 'none': '0',
'zero': '0',
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10'
}
self.articles = ['a',
'an',
'the'
]
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
def evaluate(self, quesIds=None):
if quesIds == None:
quesIds = [quesId for quesId in self.params['question_id']]
gts = {}
res = {}
for quesId in quesIds:
gts[quesId] = self.vqa.qa[quesId]
res[quesId] = self.vqaRes.qa[quesId]
# =================================================
# Compute accuracy
# =================================================
accQA = []
accQuesType = {}
accAnsType = {}
print("computing accuracy")
step = 0
for quesId in quesIds:
resAns = res[quesId]['answer']
resAns = resAns.replace('\n', ' ')
resAns = resAns.replace('\t', ' ')
resAns = resAns.strip()
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]['answers']:
ansDic['answer'] = self.processPunctuation(ansDic['answer'])
for gtAnsDatum in gts[quesId]['answers']:
otherGTAns = [item for item in gts[quesId]['answers'] if item!=gtAnsDatum]
matchingAns = [item for item in otherGTAns if item['answer']==resAns]
acc = min(1, float(len(matchingAns))/3)
gtAcc.append(acc)
quesType = gts[quesId]['question_type']
ansType = gts[quesId]['answer_type']
avgGTAcc = float(sum(gtAcc))/len(gtAcc)
accQA.append(avgGTAcc)
if quesType not in accQuesType:
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if ansType not in accAnsType:
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if step%100 == 0:
self.updateProgress(step/float(len(quesIds)))
step = step + 1
self.setAccuracy(accQA, accQuesType, accAnsType)
print("Done computing accuracy")
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if word not in self.articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in self.contractions:
outText[wordId] = self.contractions[word]
outText = ' '.join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy['overall'] = round(100*float(sum(accQA))/len(accQA), self.n)
self.accuracy['perQuestionType'] = {quesType: round(100*float(sum(accQuesType[quesType]))/len(accQuesType[quesType]), self.n) for quesType in accQuesType}
self.accuracy['perAnswerType'] = {ansType: round(100*float(sum(accAnsType[ansType]))/len(accAnsType[ansType]), self.n) for ansType in accAnsType}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round(100*acc, self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if quesType not in self.evalQuesType:
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round(100*acc, self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if ansType not in self.evalAnsType:
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round(100*acc, self.n)
def updateProgress(self, progress):
barLength = 20
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rFinshed Percent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), int(progress*100), status)
sys.stdout.write(text)
sys.stdout.flush()
| 8,197 | 43.075269 | 156 |
py
|
VLC-BERT
|
VLC-BERT-master/external/PythonEvaluationTools/vqa_helper.py
|
__author__ = 'aagrawal'
__version__ = '0.9'
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
import copy
import datetime
import json
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not annotation_file == None and not question_file == None:
print('loading VQA annotations and questions into memory...')
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
questions = json.load(open(question_file, 'r'))
print(datetime.datetime.utcnow() - time_t)
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']}
qa = {ann['question_id']: [] for ann in self.dataset['annotations']}
qqa = {ann['question_id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToQA[ann['image_id']] += [ann]
qa[ann['question_id']] = ann
for ques in self.questions['questions']:
qqa[ques['question_id']] = ques
print('index created!')
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print('%s: %s' % (key, value))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], [])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['question_id'] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(quesIds) == 0:
anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa], [])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['image_id'] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann['question_id']
print("Question: %s" % (self.qqa[quesId]['question']))
for ans in ann['answers']:
print("Answer %d: %s" % (ans['answer_id'], ans['answer']))
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset['info'] = copy.deepcopy(self.questions['info'])
res.dataset['task_type'] = copy.deepcopy(self.questions['task_type'])
res.dataset['data_type'] = copy.deepcopy(self.questions['data_type'])
res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype'])
res.dataset['license'] = copy.deepcopy(self.questions['license'])
print('Loading and preparing results... ')
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results is not an array of objects'
annsQuesIds = [ann['question_id'] for ann in anns]
assert set(annsQuesIds) == set(self.getQuesIds()), \
'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann['question_id']
if res.dataset['task_type'] == 'Multiple Choice':
assert ann['answer'] in self.qqa[quesId][
'multiple_choices'], 'predicted answer is not one of the multiple choices'
qaAnn = self.qa[quesId]
ann['image_id'] = qaAnn['image_id']
ann['question_type'] = qaAnn['question_type']
ann['answer_type'] = qaAnn['answer_type']
print('DONE (t=%0.2fs)' % ((datetime.datetime.utcnow() - time_t).total_seconds()))
res.dataset['annotations'] = anns
res.createIndex()
return res
| 8,063 | 43.552486 | 242 |
py
|
VLC-BERT
|
VLC-BERT-master/external/PythonEvaluationTools/__init__.py
| 0 | 0 | 0 |
py
|
|
VLC-BERT
|
VLC-BERT-master/external/pytorch_pretrained_bert/optimization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 6,803 | 40.742331 | 116 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.