repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
AutoPruner | AutoPruner-master/ResNet50/50/compress_model/compress_model.py | import torch
from new_model import NetworkNew
import argparse
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--group_id', default=0, type=int, help='the id of compressed layer, starting from 0')
args = parser.parse_args()
print(args)
def main():
# 1. create compressed model
vgg16_new = NetworkNew(group_id=args.group_id)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, '../checkpoint/model.pth')
print('Finished!')
if __name__ == '__main__':
main()
| 786 | 28.148148 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/50/compress_model/evaluate_net.py | import torch
from new_model import NetworkNew_test
import argparse
import torch.backends.cudnn as cudnn
import os
import sys
import time
sys.path.append('../')
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
def evaluate():
# Phase 1: load model
model = NetworkNew_test('../checkpoint/model.pth')
# Phase 2 : Model setup
model = model.cuda()
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Phase 2 : Data Upload
print('\n[Phase 2] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 3: Validation
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate()
| 3,966 | 31.516393 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/30/fine_tune_compressed_model.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms
from src_code.lmdbdataset import lmdbDataset
from compress_model.new_model import NetworkNew_test
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=False, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
# model = models.vgg16(pretrained=True)
model = NetworkNew_test('checkpoint/model.pth')
print(model)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, int(args.epochs/3.0))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
folder_path = 'checkpoint/fine_tune'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model.state_dict(), folder_path + '/model.pth')
print('best acc is %.3f' % best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 4 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,872 | 35.733108 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/30/main.py | # ************************************************************
# Author : Bumsoo Kim, 2017
# Github : https://github.com/meliketoy/fine-tuning.pytorch
#
# Korea University, Data-Mining Lab
# Deep Convolutional Network Fine tuning Implementation
#
# Description : main.py
# The main code for training classification networks.
# ***********************************************************
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
import numpy as np
import shutil
import math
from torchvision import models
from src_code import Network_FT
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=8, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/resnet50-19c8e357.pth',
type=str, help='the path of fine tuned model')
parser.add_argument('--gpu_id', default='0,1,2,3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--group_id', default=0, type=int, help='the id of compressed group, starting from 0')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--compression_rate', default=0.3, type=float, help='the percentage of 1 in compressed model')
parser.add_argument('--channel_index_range', default=20, type=int, help='the range to calculate channel index')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--alpha_range', default=100, type=int, help='the range to calculate channel index')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
print(args)
resnet_channel_number = [6, 8, 12, 4]
scale_factor_list = None
alpha_index = 0
threshold = 95 * np.ones(resnet_channel_number[args.group_id])
def main():
global args, best_prec1, scale_factor_list, resnet_channel_number
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
if args.group_id == 0:
model_ft = models.resnet50(True).cuda()
model_ft = torch.nn.DataParallel(model_ft)
model_param = model_ft.state_dict()
torch.save(model_param, 'checkpoint/model.pth')
model_ft = Network_FT.NetworkNew(args.group_id).cuda()
model_ft = torch.nn.DataParallel(model_ft)
cudnn.benchmark = True
print("model setup success!")
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
tmp = np.linspace(1, 100, int(args.num_epochs * len(train_loader) / args.alpha_range))
scale_factor_list = np.ones([resnet_channel_number[args.group_id], len(tmp)])
for tmp_i in range(resnet_channel_number[args.group_id]):
scale_factor_list[tmp_i, :] = tmp.copy()
reg_lambda = 10.0 * np.ones(resnet_channel_number[args.group_id])
for epoch in range(args.start_epoch, args.num_epochs):
adjust_learning_rate(optimizer, epoch, int(args.num_epochs/2.0))
# train for one epoch
channel_index, reg_lambda = train(train_loader, model_ft, criterion, optimizer, epoch, reg_lambda)
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion, channel_index)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
folder_path = 'checkpoint/group_' + str(args.group_id)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
if args.group_id == 3:
tmp = channel_index[0].copy()
tmp[:] = 1.0
channel_index.append(tmp.copy())
channel_index.append(tmp.copy())
torch.save(channel_index, folder_path+'/channel_index.pth')
def train(train_loader, model, criterion, optimizer, epoch, reg_lambda):
global resnet_channel_number, scale_factor_list, alpha_index, threshold
gpu_num = torch.cuda.device_count()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
channel_index_list = list()
channel_index_binary = list()
end = time.time()
for i, (input, target) in enumerate(train_loader):
if i % args.alpha_range == 0:
if alpha_index == scale_factor_list.shape[1]:
alpha_index = alpha_index - 1
scale_factor = scale_factor_list[:, alpha_index]
alpha_index = alpha_index + 1
model.module.set_scale_factor(scale_factor)
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output, scale_vec = model(input_var)
loss = criterion(output, target_var)
for vec_i in range(len(scale_vec)):
loss = loss + float(reg_lambda[vec_i]) * (
scale_vec[vec_i].norm(1) / float(scale_vec[vec_i].size(0)) - args.compression_rate) ** 2
# compute channel index
channel_index_sublist = list()
for vec_i in range(len(scale_vec)):
tmp = scale_vec[vec_i].data.cpu().numpy().reshape(gpu_num, -1).mean(0)
channel_index_sublist.append(tmp.copy())
if i == 0:
print('first 5 values in layer {0}: [{1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}, {5:.6f}]'.format(int(vec_i),
tmp[0],
tmp[1],
tmp[2],
tmp[3],
tmp[4]))
channel_index_list.append(channel_index_sublist.copy())
if len(channel_index_list) == args.channel_index_range:
channel_index_binary = list()
for vec_i in range(len(scale_vec)):
tmp = list()
for tmp_i in range(len(channel_index_list)):
tmp_a = channel_index_list[tmp_i][vec_i]
tmp_a = (np.sign(tmp_a - 0.5) + 1) / 2.0 # to 0-1 binary
tmp.append(tmp_a)
tmp = np.array(tmp).sum(axis=0)
tmp = tmp / args.channel_index_range
tmp_value = channel_index_list[0][vec_i]
print(
'first 5 values in layer {0}: [{1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}, {5:.6f}]'.format(int(vec_i),
tmp_value[0],
tmp_value[1],
tmp_value[2],
tmp_value[3],
tmp_value[4]))
channel_index = (np.sign(tmp - 0.5) + 1) / 2.0 # to 0-1 binary
channel_index_binary.append(channel_index.copy())
binary_pruning_rate = 100.0 * np.sum(channel_index == 0) / len(channel_index)
if binary_pruning_rate >= threshold[vec_i]:
scale_factor_list[vec_i, :] = scale_factor_list[vec_i, :] + 1
threshold[vec_i] = threshold[vec_i] - 5
if threshold[vec_i] < 100 - 100 * args.compression_rate:
threshold[vec_i] = 100 - 100 * args.compression_rate
print('threshold in layer %d is %d' % (int(vec_i), int(threshold[vec_i])))
two_side_rate = (np.sum(tmp_value > 0.8) + np.sum(tmp_value < 0.2)) / len(tmp_value)
if two_side_rate < 0.9 and alpha_index >= 50:
scale_factor_list[vec_i, :] = scale_factor_list[vec_i, :] + 1
reg_lambda[vec_i] = 100.0 * np.abs(binary_pruning_rate/100.0 - 1 + args.compression_rate)
tmp[tmp == 0] = 1
channel_inconsistency = 100.0 * np.sum(tmp != 1) / len(tmp)
print(
"[{0}] pruning rate: {1:.4f}%, inconsistency: {2:.4f}%, reg_lambda: {3:.4f}, scale_factor: {4:.4f}, two_side: {5:.4f}".format(
int(vec_i), binary_pruning_rate, channel_inconsistency, reg_lambda[vec_i], scale_factor[vec_i],
two_side_rate))
sys.stdout.flush()
channel_index_list = list()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
top1=top1, top5=top5))
print('+--------------------------------------------------------------------------------------------------+')
sys.stdout.flush()
return channel_index_binary, reg_lambda
def validate(val_loader, model, criterion, channel_index):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output, _ = model(input_var, channel_index)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 14,760 | 42.160819 | 146 | py |
AutoPruner | AutoPruner-master/ResNet50/30/evaluate_network.py | import torch
import torch.backends.cudnn as cudnn
import os
import sys
import argparse
import time
from src_code.lmdbdataset import lmdbDataset
from src_code import Network_FT
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/resnet50-19c8e357.pth',
type=str, help='the path of fine tuned model')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
print(args)
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
model = Network_FT.NetworkNew(0).cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
model.module.set_scale_factor(2.0)
cudnn.benchmark = True
# Phase 3: evaluation
def evaluate_net():
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate_net()
| 4,037 | 31.564516 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/30/fine_tune_again.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms
from src_code.lmdbdataset import lmdbDataset
from compress_model.new_model import NetworkNew_test
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=12, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=False, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
# model = models.vgg16(pretrained=True)
model = NetworkNew_test('checkpoint/model.pth')
print(model)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
folder_path = 'checkpoint/fine_again'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
best_prec1 = max(prec1, best_prec1)
torch.save(model.state_dict(), folder_path + '/model.pth')
print('best accuracy is %.3f' % best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 4 epochs"""
lr = args.lr * (0.1 ** (epoch // 4))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,814 | 35.785714 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/30/src_code/my_op_fc.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import numpy as np
class MyGAP_fc(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*4096 -> 1*4096
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.mean(input, dim=0, keepdim=True)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = input[0].clone()
for i in range(grad_input.shape[0]):
grad_input[i, :] = grad_output.data / grad_input.shape[0]
return Variable(grad_input)
class MyScale_fc(torch.autograd.Function):
'''
input: x: 64*4096, scale:4096 ==> x[:, i]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
input_data2 = input_data.clone()
for i in range(scale_vec.shape[0]):
input_data2[:, i] = input_data[:, i] * scale_vec[i]
return input_data2
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
grad_input = input_data.clone()
for i in range(scale_vec.shape[0]):
grad_input[:, i] = grad_output.data[:, i] * scale_vec[i]
grad_vec = scale_vec.clone()
for i in range(scale_vec.shape[0]):
grad_vec[i] = torch.sum(grad_output.data[:, i]*input_data[:, i])
return Variable(grad_input), Variable(grad_vec)
class MyCS_fc(nn.Module):
def __init__(self, channels_num):
super(MyCS_fc, self).__init__()
self.layer_type = 'MyCS_fc'
self.fc = nn.Linear(channels_num, channels_num)
self.sigmoid = nn.Sigmoid()
def forward(self, x, scale_factor):
x_scale = MyGAP_fc.apply(x) # apply my GAP: N*4096 => 1*4096
x_scale = self.fc(x_scale) # 1*4096
x_scale = torch.squeeze(x_scale) # 4096
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
index = (np.sign(x_scale.data.cpu().numpy() - 0.5) + 1) / 2.0
x_scale.data = torch.FloatTensor(index).cuda()
x = MyScale_fc.apply(x, x_scale)
return x, x_scale
if __name__ == '__main__':
in_ = (Variable(torch.randn(3, 4).double(), requires_grad=True),
Variable(torch.randn(4).double(), requires_grad=True))
res = gradcheck(MyScale_fc.apply, in_, eps=1e-6, atol=1e-4)
# in_ = (Variable(torch.randn(4, 64).double(), requires_grad=True),)
# res = gradcheck(MyGAP_fc.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 2,729 | 31.117647 | 76 | py |
AutoPruner | AutoPruner-master/ResNet50/30/src_code/my_op.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import numpy as np
import math
class MyGAP(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*512*14*14 -> 1*512*14*14
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.mean(input, dim=0, keepdim=True)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = input[0].clone()
for i in range(grad_input.shape[0]):
grad_input[i, :, :, :] = grad_output.data / grad_input.shape[0]
return Variable(grad_input)
class MyScale(torch.autograd.Function):
'''
input: x: 64*512*7*7, scale:512 ==> x[:, i, :, :]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
input_data2 = input_data.clone()
for i in range(scale_vec.shape[0]):
input_data2[:, i, :, :] = input_data[:, i, :, :] * scale_vec[i]
return input_data2
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
grad_input = input_data.clone()
for i in range(scale_vec.shape[0]):
grad_input[:, i, :, :] = grad_output.data[:, i, :, :] * scale_vec[i]
grad_vec = scale_vec.clone()
for i in range(scale_vec.shape[0]):
grad_vec[i] = torch.sum(grad_output.data[:, i, :, :]*input_data[:, i, :, :])
return Variable(grad_input), Variable(grad_vec)
class MyCS(nn.Module):
def __init__(self, channels_num, activation_size=14, max_ks=2):
super(MyCS, self).__init__()
self.layer_type = 'MyCS'
self.conv = nn.Conv2d(channels_num, channels_num,
kernel_size=int(activation_size / max_ks), stride=1, padding=0)
self.map = nn.MaxPool2d(kernel_size=max_ks, stride=max_ks)
self.sigmoid = nn.Sigmoid()
n = int(activation_size / max_ks) * int(activation_size / max_ks) * channels_num
self.conv.weight.data.normal_(0, 10 * math.sqrt(2.0 / n))
def forward(self, x, scale_factor, channel_index=None):
x_scale = MyGAP.apply(x) # apply my GAP: N*512*14*14 => 1*512*14*14
x_scale = self.map(x_scale) # apply MAP: 1*512*14*14 => 1*512*7*7
x_scale = self.conv(x_scale) # 1*512*1*1
x_scale = torch.squeeze(x_scale) # 512
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
x_scale.data = torch.FloatTensor(channel_index).cuda()
x = MyScale.apply(x, x_scale)
return x, x_scale
if __name__ == '__main__':
# in_ = (Variable(torch.randn(1, 1, 3, 3).double(), requires_grad=True),
# Variable(torch.randn(1).double(), requires_grad=True))
# res = gradcheck(MyScale.apply, in_, eps=1e-6, atol=1e-4)
in_ = (Variable(torch.randn(2, 64, 3, 3).double(), requires_grad=True),)
res = gradcheck(MyGAP.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 3,182 | 33.225806 | 93 | py |
AutoPruner | AutoPruner-master/ResNet50/30/src_code/Network_FT.py | import torch.nn as nn
import math
import torch
from . import my_op
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, number_list, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(number_list[1], number_list[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(number_list[0])
self.conv2 = nn.Conv2d(number_list[3], number_list[2], kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(number_list[2])
self.conv3 = nn.Conv2d(number_list[5], number_list[4], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(number_list[4])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck_with_CS(nn.Module):
expansion = 4
def __init__(self, number_list, stride=1, downsample=None, ks=1, CS_id=0):
super(Bottleneck_with_CS, self).__init__()
self.conv1 = nn.Conv2d(number_list[1], number_list[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(number_list[0])
self.conv2 = nn.Conv2d(number_list[3], number_list[2], kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(number_list[2])
self.conv3 = nn.Conv2d(number_list[5], number_list[4], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(number_list[4])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.CS_id = CS_id
self.channel_index = list()
if ks == 7:
mks = 1
else:
mks = 2
self.cs1 = my_op.MyCS(number_list[0], activation_size=ks * stride, max_ks=mks)
self.cs2 = my_op.MyCS(number_list[2], activation_size=ks, max_ks=mks)
self.vec1 = None
self.vec2 = None
self.scale_factor1 = 1.0
self.scale_factor2 = 1.0
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.training:
out, self.vec1 = self.cs1(out, self.scale_factor1)
else:
out, self.vec1 = self.cs1(out, self.scale_factor1, self.channel_index[2 * self.CS_id])
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.training:
out, self.vec2 = self.cs2(out, self.scale_factor2)
else:
out, self.vec2 = self.cs2(out, self.scale_factor2, self.channel_index[2 * self.CS_id + 1])
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, group_id, block, layers, num_classes=1000):
old_weight = torch.load('checkpoint/model.pth')
channel_number_list = analyse_number(old_weight)
self.kernel_size = int(56 / (2**group_id))
self.inplanes = 64
self.g_id = group_id
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(channel_number_list[0], 0, block, 64, layers[0])
self.layer2 = self._make_layer(channel_number_list[1], 1, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(channel_number_list[2], 2, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(channel_number_list[3], 3, block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# # m.weight.data.normal_(0, math.sqrt(2. / n))
# m.weight.data.normal_(0, math.sqrt(1.))
# # torch.nn.init.xavier_uniform(m.weight)
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
old_weight = torch.load('checkpoint/model.pth')
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in old_weight.items():
name = ''.join(list(k)[7:])
if name in my_keys:
my_weight[name] = v
self.load_state_dict(my_weight)
def _make_layer(self, number_list, group_id, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
if group_id == self.g_id:
layers.append(Bottleneck_with_CS(number_list[0], stride, downsample, ks=self.kernel_size, CS_id=0))
else:
layers.append(block(number_list[0], stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if group_id == self.g_id:
if self.g_id == 3 and i == blocks-1:
layers.append(block(number_list[i]))
else:
layers.append(Bottleneck_with_CS(number_list[i], ks=self.kernel_size, CS_id=i))
else:
layers.append(block(number_list[i]))
return nn.Sequential(*layers)
def forward(self, x, channel_index=None):
if not self.training:
self.set_channel_index(channel_index)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) # 128, 64, 56, 56
x = self.layer1(x) # 128, 64, 56, 56
x = self.layer2(x) # 128, 512, 28, 28
x = self.layer3(x) # 128, 1024, 14, 14
x = self.layer4(x) # 128, 2048, 7, 7
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
scale_vector = self.get_scale_vector()
return x, scale_vector
def set_channel_index(self, channel_index):
if self.g_id == 0:
self.layer1[0].channel_index = channel_index
self.layer1[1].channel_index = channel_index
self.layer1[2].channel_index = channel_index
elif self.g_id == 1:
self.layer2[0].channel_index = channel_index
self.layer2[1].channel_index = channel_index
self.layer2[2].channel_index = channel_index
self.layer2[3].channel_index = channel_index
elif self.g_id == 2:
self.layer3[0].channel_index = channel_index
self.layer3[1].channel_index = channel_index
self.layer3[2].channel_index = channel_index
self.layer3[3].channel_index = channel_index
self.layer3[4].channel_index = channel_index
self.layer3[5].channel_index = channel_index
else:
self.layer4[0].channel_index = channel_index
self.layer4[1].channel_index = channel_index
# self.layer4[2].channel_index = channel_index
def get_scale_vector(self):
vector_list = list()
if self.g_id == 0:
vector_list.append(self.layer1[0].vec1)
vector_list.append(self.layer1[0].vec2)
vector_list.append(self.layer1[1].vec1)
vector_list.append(self.layer1[1].vec2)
vector_list.append(self.layer1[2].vec1)
vector_list.append(self.layer1[2].vec2)
elif self.g_id == 1:
vector_list.append(self.layer2[0].vec1)
vector_list.append(self.layer2[0].vec2)
vector_list.append(self.layer2[1].vec1)
vector_list.append(self.layer2[1].vec2)
vector_list.append(self.layer2[2].vec1)
vector_list.append(self.layer2[2].vec2)
vector_list.append(self.layer2[3].vec1)
vector_list.append(self.layer2[3].vec2)
elif self.g_id == 2:
vector_list.append(self.layer3[0].vec1)
vector_list.append(self.layer3[0].vec2)
vector_list.append(self.layer3[1].vec1)
vector_list.append(self.layer3[1].vec2)
vector_list.append(self.layer3[2].vec1)
vector_list.append(self.layer3[2].vec2)
vector_list.append(self.layer3[3].vec1)
vector_list.append(self.layer3[3].vec2)
vector_list.append(self.layer3[4].vec1)
vector_list.append(self.layer3[4].vec2)
vector_list.append(self.layer3[5].vec1)
vector_list.append(self.layer3[5].vec2)
else:
vector_list.append(self.layer4[0].vec1)
vector_list.append(self.layer4[0].vec2)
vector_list.append(self.layer4[1].vec1)
vector_list.append(self.layer4[1].vec2)
# vector_list.append(self.layer4[2].vec1)
# vector_list.append(self.layer4[2].vec2)
return vector_list
def set_scale_factor(self, sf):
if self.g_id == 0:
self.layer1[0].scale_factor1 = sf[0]
self.layer1[0].scale_factor2 = sf[1]
self.layer1[1].scale_factor1 = sf[2]
self.layer1[1].scale_factor2 = sf[3]
self.layer1[2].scale_factor1 = sf[4]
self.layer1[2].scale_factor2 = sf[5]
elif self.g_id == 1:
self.layer2[0].scale_factor1 = sf[0]
self.layer2[0].scale_factor2 = sf[1]
self.layer2[1].scale_factor1 = sf[2]
self.layer2[1].scale_factor2 = sf[3]
self.layer2[2].scale_factor1 = sf[4]
self.layer2[2].scale_factor2 = sf[5]
self.layer2[3].scale_factor1 = sf[6]
self.layer2[3].scale_factor2 = sf[7]
elif self.g_id == 2:
self.layer3[0].scale_factor1 = sf[0]
self.layer3[0].scale_factor2 = sf[1]
self.layer3[1].scale_factor1 = sf[2]
self.layer3[1].scale_factor2 = sf[3]
self.layer3[2].scale_factor1 = sf[4]
self.layer3[2].scale_factor2 = sf[5]
self.layer3[3].scale_factor1 = sf[6]
self.layer3[3].scale_factor2 = sf[7]
self.layer3[4].scale_factor1 = sf[8]
self.layer3[4].scale_factor2 = sf[9]
self.layer3[5].scale_factor1 = sf[10]
self.layer3[5].scale_factor2 = sf[11]
else:
self.layer4[0].scale_factor1 = sf[0]
self.layer4[0].scale_factor2 = sf[1]
self.layer4[1].scale_factor1 = sf[2]
self.layer4[1].scale_factor2 = sf[3]
# self.layer4[2].scale_factor = sf
def analyse_number(weight):
number_list = list()
group_list = list()
layer_list = list()
old_name = '1.0.'
old_group = '1'
for k, v in weight.items():
if 'layer' in k and'conv' in k:
current_name = k.split('layer')[1].split('conv')[0]
current_group = current_name.split('.')[0]
if current_name != old_name:
old_name = current_name
group_list.append(layer_list.copy())
layer_list = list()
if current_group != old_group:
old_group = current_group
number_list.append(group_list.copy())
group_list = list()
layer_list.append(v.size()[0])
layer_list.append(v.size()[1])
group_list.append(layer_list.copy())
number_list.append(group_list.copy())
return number_list
def NetworkNew(group_id):
model = ResNet(group_id, Bottleneck, [3, 4, 6, 3])
return model
| 12,489 | 37.549383 | 111 | py |
AutoPruner | AutoPruner-master/ResNet50/30/src_code/lmdbdataset.py | import cv2
import numpy as np
import torchvision.transforms as transforms
import lmdb
import msgpack
from torch.utils.data import Dataset
from PIL import Image
class lmdbDataset(Dataset):
def __init__(self, location, is_train):
self.env = lmdb.open(location, subdir=False, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
self.txn = self.env.begin(write=False)
self.length = self.txn.stat()['entries']
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# train data augment
if is_train:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# test data augment
else:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
'''
for key,data in self.txn.cursor():
now_data = msgpack.loads(data,raw=False)
data_img = now_data[0]
label = now_data[1]
now_arr = np.frombuffer(data_img[b'data'],dtype=np.uint8)
print(now_arr)
image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)
print(image_content.shape)
#print(type(_))
break
'''
def __len__(self):
return self.length - 1
def __getitem__(self, index):
new_index = str(index).encode()
data = self.txn.get(new_index)
now_data = msgpack.loads(data, raw=False)
data_img = now_data[0]
label = now_data[1]
now_arr = np.frombuffer(data_img[b'data'], dtype=np.uint8)
image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)
image_content = cv2.cvtColor(image_content, cv2.COLOR_BGR2RGB)
image_content = Image.fromarray(image_content)
image_content = self.transform(image_content)
return image_content, label
if __name__ == '__main__':
temp_dataset = lmdbDataset('indoor67.lmdb', True)
print(temp_dataset[0])
#print(i)
#assert temp_dataset[i][0] is not None | 2,431 | 34.246377 | 111 | py |
AutoPruner | AutoPruner-master/ResNet50/30/compress_model/new_model.py | import torch.nn as nn
import torch
import numpy as np
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, number_list, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(number_list[1], number_list[0], kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(number_list[0])
self.conv2 = nn.Conv2d(number_list[3], number_list[2], kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(number_list[2])
self.conv3 = nn.Conv2d(number_list[5], number_list[4], kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(number_list[4])
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, group_id, block, layers, num_classes=1000):
folder_path = '../checkpoint/group_' + str(group_id)
old_weight = torch.load(folder_path+'/model.pth')
channel_index = torch.load(folder_path+'/channel_index.pth')
channel_number_list = analyse_number(old_weight)
for i in range(int(len(channel_index)/2)):
new_num = np.where(channel_index[2 * i] != 0)[0]
new_num_1 = int(new_num.shape[0])
new_num = np.where(channel_index[2 * i + 1] != 0)[0]
new_num_2 = int(new_num.shape[0])
channel_number_list[group_id][i][0] = new_num_1
channel_number_list[group_id][i][2] = new_num_2
channel_number_list[group_id][i][3] = new_num_1
channel_number_list[group_id][i][5] = new_num_2
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(channel_number_list[0], block, 64, layers[0])
self.layer2 = self._make_layer(channel_number_list[1], block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(channel_number_list[2], block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(channel_number_list[3], block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
my_weight = self.state_dict()
ci_count = 0
ci_1 = 0
ci_2 = 0
for k, v in my_weight.items():
name = 'module.' + k
if 'layer'+str(group_id+1) in name and 'downsample' not in name:
name_tmp = name.split('.')
if '1' in name_tmp[3]:
if 'conv' in name:
ci_1 = torch.cuda.LongTensor(np.where(channel_index[ci_count] != 0)[0])
ci_count += 1
my_weight[k] = old_weight[name][ci_1, :, :, :]
else:
my_weight[k] = old_weight[name][ci_1]
elif '2' in name_tmp[3]:
if 'conv' in name:
ci_2 = torch.cuda.LongTensor(np.where(channel_index[ci_count] != 0)[0])
ci_count += 1
my_weight[k] = old_weight[name][ci_2, :, :, :]
my_weight[k] = my_weight[k][:, ci_1, :, :]
else:
my_weight[k] = old_weight[name][ci_2]
elif '3' in name_tmp[3]:
if 'conv' in name:
my_weight[k] = old_weight[name][:, ci_2, :, :]
else:
my_weight[k] = old_weight[name]
else:
print('error!')
else:
my_weight[k] = old_weight[name]
self.load_state_dict(my_weight)
def _make_layer(self, number_list, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(number_list[0], stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(number_list[i]))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def analyse_number(weight):
number_list = list()
group_list = list()
layer_list = list()
old_name = '1.0.'
old_group = '1'
for k, v in weight.items():
if 'layer' in k and'conv' in k and 'cs' not in k:
current_name = k.split('layer')[1].split('conv')[0]
current_group = current_name.split('.')[0]
if current_name != old_name:
old_name = current_name
group_list.append(layer_list.copy())
layer_list = list()
if current_group != old_group:
old_group = current_group
number_list.append(group_list.copy())
group_list = list()
layer_list.append(v.size()[0])
layer_list.append(v.size()[1])
group_list.append(layer_list.copy())
number_list.append(group_list.copy())
return number_list
def NetworkNew(group_id):
model = ResNet(group_id, Bottleneck, [3, 4, 6, 3])
return model
class ResNet_test(nn.Module):
def __init__(self, model_path, block, layers, num_classes=1000):
old_weight = torch.load(model_path)
channel_number_list = analyse_number(old_weight)
self.inplanes = 64
super(ResNet_test, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(channel_number_list[0], block, 64, layers[0])
self.layer2 = self._make_layer(channel_number_list[1], block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(channel_number_list[2], block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(channel_number_list[3], block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
my_weight = self.state_dict()
for k, v in my_weight.items():
name = 'module.' + k
my_weight[k] = old_weight[name]
self.load_state_dict(my_weight)
def _make_layer(self, number_list, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(number_list[0], stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(number_list[i]))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def NetworkNew_test(model_path):
model = ResNet_test(model_path, Bottleneck, [3, 4, 6, 3])
return model
| 8,767 | 35.381743 | 95 | py |
AutoPruner | AutoPruner-master/ResNet50/30/compress_model/compress_model.py | import torch
from new_model import NetworkNew
import argparse
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--group_id', default=3, type=int, help='the id of compressed layer, starting from 0')
args = parser.parse_args()
print(args)
def main():
# 1. create compressed model
vgg16_new = NetworkNew(group_id=args.group_id)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, '../checkpoint/model.pth')
print('Finished!')
if __name__ == '__main__':
main()
| 786 | 28.148148 | 106 | py |
AutoPruner | AutoPruner-master/ResNet50/30/compress_model/evaluate_net.py | import torch
from new_model import NetworkNew_test
import argparse
import torch.backends.cudnn as cudnn
import os
import sys
import time
sys.path.append('../')
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='4,5', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
def evaluate():
# Phase 1: load model
model = NetworkNew_test('../checkpoint/model.pth')
# Phase 2 : Model setup
model = model.cuda()
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Phase 2 : Data Upload
print('\n[Phase 2] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 3: Validation
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate()
| 3,968 | 31.532787 | 106 | py |
AutoPruner | AutoPruner-master/vgg16/50/fine_tune_compressed_model.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms
from src_code.lmdbdataset import lmdbDataset
from compress_model.new_model import vgg16_test
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=False, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
# model = models.vgg16(pretrained=True)
model = vgg16_test('checkpoint/model.pth')
print(model)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, int(args.epochs/3.0))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
folder_path = 'checkpoint/fine_tune'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model.state_dict(), folder_path + '/model.pth')
print('best acc is %.3f' % best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,897 | 35.693603 | 106 | py |
AutoPruner | AutoPruner-master/vgg16/50/main.py | # ************************************************************
# Author : Bumsoo Kim, 2017
# Github : https://github.com/meliketoy/fine-tuning.pytorch
#
# Korea University, Data-Mining Lab
# Deep Convolutional Network Fine tuning Implementation
#
# Description : main.py
# The main code for training classification networks.
# ***********************************************************
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
import numpy as np
import shutil
import math
from src_code import Network_FT
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=3, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/vgg16-397923af.pth',
type=str, help='the path of fine tuned model')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--layer_id', default=11, type=int, help='the id of compressed layer, starting from 0')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--compression_rate', default=0.4, type=float, help='the percentage of 1 in compressed model')
parser.add_argument('--channel_index_range', default=20, type=int, help='the range to calculate channel index')
parser.add_argument('--alpha_range', default=100, type=int, help='the range to calculate channel index')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
scale_factor_list = list()
alpha_index = 0
threshold = 95
if args.layer_id == 12:
args.compression_rate = 0.4
print(args)
def main():
global args, best_prec1, scale_factor_list
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=10,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
if args.layer_id == 0:
model_ft = Network_FT.Vgg16(args.ft_model_path).cuda()
model_ft = torch.nn.DataParallel(model_ft)
model_param = model_ft.state_dict()
torch.save(model_param, 'checkpoint/model.pth')
model_ft = Network_FT.NetworkNew(args.layer_id).cuda()
print(model_ft)
model_ft = torch.nn.DataParallel(model_ft)
cudnn.benchmark = True
print("model setup success!")
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
scale_factor_list = np.linspace(0.1, 2, int(args.num_epochs * len(train_loader) / args.alpha_range))
reg_lambda = 10.0
for epoch in range(args.start_epoch, args.num_epochs):
adjust_learning_rate(optimizer, epoch, 2)
# train for one epoch
channel_index, reg_lambda = train(train_loader, model_ft, criterion, optimizer, epoch, reg_lambda)
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion, channel_index)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
folder_path = 'checkpoint/layer_' + str(args.layer_id)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
torch.save(channel_index, folder_path+'/channel_index.pth')
def train(train_loader, model, criterion, optimizer, epoch, reg_lambda):
global scale_factor_list, alpha_index, threshold
gpu_num = torch.cuda.device_count()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
channel_index_list = list()
channel_index = 0
end = time.time()
for i, (input, target) in enumerate(train_loader):
if i % args.alpha_range == 0:
if alpha_index == len(scale_factor_list):
alpha_index = len(scale_factor_list) - 1
scale_factor = scale_factor_list[alpha_index]
alpha_index = alpha_index + 1
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output, scale_vec = model(input_var, scale_factor)
loss = criterion(output, target_var)
loss = loss + float(reg_lambda) * (
scale_vec.norm(1) / float(scale_vec.size(0)) - args.compression_rate) ** 2
# compute channel index
tmp = scale_vec.data.cpu().numpy().reshape(gpu_num, -1).mean(0)
channel_index_list.append(tmp.copy())
if i == 0:
print('first 5 values: [{0:.6f}, {1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}]'.format(tmp[0], tmp[1], tmp[2], tmp[3],
tmp[4]))
if len(channel_index_list) == args.channel_index_range:
channel_index_list = np.array(channel_index_list)
tmp_value = channel_index_list[args.channel_index_range-1, :]
tmp = tmp_value
two_side_rate = (np.sum(tmp_value > 0.8) + np.sum(tmp_value < 0.2)) / len(tmp_value)
print('first 5 values: [{0:.6f}, {1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}]'.format(tmp[0], tmp[1], tmp[2], tmp[3],
tmp[4]))
tmp2 = channel_index_list.sum(axis=0)
tmp2 = tmp2 / args.channel_index_range
for tmp_i in range(len(channel_index_list)):
channel_index_list[tmp_i] = (np.sign(channel_index_list[tmp_i] - 0.5) + 1) / 2.0
tmp = channel_index_list.sum(axis=0)
tmp = tmp / args.channel_index_range
channel_index = (np.sign(tmp - 0.5) + 1) / 2.0 # to 0-1 binary
real_pruning_rate = 100.0 * np.sum(tmp2 < 10**-6) / len(tmp2)
binary_pruning_rate = 100.0 * np.sum(channel_index < 10**-6) / len(channel_index)
if binary_pruning_rate >= threshold:
scale_factor_list = scale_factor_list + 0.1
scale_factor = scale_factor + 0.1
threshold = threshold - 5
if threshold < 100 - 100*args.compression_rate:
threshold = 100 - 100*args.compression_rate
print('threshold is %d' % int(threshold))
if two_side_rate < 0.9 and alpha_index >= 20:
scale_factor_list = scale_factor_list + 0.1
scale_factor = scale_factor + 0.1
tmp[tmp == 0] = 1
channel_inconsistency = 100.0 * np.sum(tmp != 1) / len(tmp)
print(
"pruning rate (real/binary): {0:.4f}%/{1:.4f}%, index inconsistency: {2:.4f}%, two_side_rate: {3:.3f}".format(
real_pruning_rate, binary_pruning_rate, channel_inconsistency, two_side_rate))
channel_index_list = list()
reg_lambda = 100.0 * np.abs(binary_pruning_rate/100.0 - 1 + args.compression_rate)
sys.stdout.flush()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'scale_factor: {3:.4f}\t'
'reg_lambda: {4:.4f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), scale_factor, reg_lambda, batch_time=batch_time,
top1=top1, top5=top5))
sys.stdout.flush()
return channel_index, reg_lambda
def validate(val_loader, model, criterion, channel_index):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output, _ = model(input_var, 1.0, channel_index)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 12,807 | 38.409231 | 126 | py |
AutoPruner | AutoPruner-master/vgg16/50/evaluate_network.py | import torch
import torch.backends.cudnn as cudnn
import os
import sys
import argparse
import time
from src_code.lmdbdataset import lmdbDataset
from src_code import Network_FT
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl2/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='12', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/vgg16-397923af.pth',
type=str, help='the path of fine tuned model')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
print(args)
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
# model = Network_FT.NetworkNew(0).cuda()
model = Network_FT.Vgg16(args.ft_model_path).cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Phase 3: evaluation
def evaluate_net():
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate_net()
| 4,055 | 31.709677 | 107 | py |
AutoPruner | AutoPruner-master/vgg16/50/mytest.py | # ************************************************************
# Author : Bumsoo Kim, 2017
# Github : https://github.com/meliketoy/fine-tuning.pytorch
#
# Korea University, Data-Mining Lab
# Deep Convolutional Network Fine tuning Implementation
#
# Description : main.py
# The main code for training classification networks.
# ***********************************************************
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
import numpy as np
import shutil
import math
from src_code import Network_FT
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=2, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--ft_model_path', default='/home/luojh2/.torch/models/vgg16-397923af.pth',
type=str, help='the path of fine tuned model')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--layer_id', default=7, type=int, help='the id of compressed layer, starting from 0')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--compression_rate', default=0.2, type=float, help='the percentage of 1 in compressed model')
parser.add_argument('--channel_index_range', default=20, type=int, help='the range to calculate channel index')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
print(args)
def main():
global args, best_prec1
# Phase 1 : Data Upload
print('\n[Phase 1] : Data Preperation')
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=16,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=16,
pin_memory=True)
print('data_loader_success!')
# Phase 2 : Model setup
print('\n[Phase 2] : Model setup')
if args.layer_id == 0:
model_ft = Network_FT.Vgg16(args.ft_model_path).cuda()
model_ft = torch.nn.DataParallel(model_ft)
model_param = model_ft.state_dict()
torch.save(model_param, 'checkpoint/model.pth')
model_ft = Network_FT.NetworkNew(args.layer_id).cuda()
weight = torch.load('checkpoint/layer_7/model.pth')
model_ft = torch.nn.DataParallel(model_ft)
model_ft.load_state_dict(weight)
cudnn.benchmark = True
print("model setup success!")
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
scale_factor = 9.0
for epoch in range(args.start_epoch, args.num_epochs):
adjust_learning_rate(optimizer, epoch, 1)
# train for one epoch
channel_index, scale_factor = train(train_loader, model_ft, criterion, optimizer, epoch, scale_factor)
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion, channel_index)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
folder_path = 'checkpoint/layer_' + str(args.layer_id)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
torch.save(channel_index, folder_path+'/channel_index.pth')
def train(train_loader, model, criterion, optimizer, epoch, scale_factor):
gpu_num = torch.cuda.device_count()
scale_factor_mul = math.pow(100, 1.0/(args.num_epochs*len(train_loader)))
reg_lambda = 100
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
channel_index_list = list()
channel_index = 0
end = time.time()
for i, (input, target) in enumerate(train_loader):
scale_factor = scale_factor * scale_factor_mul
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output, scale_vec = model(input_var, scale_factor)
loss = criterion(output, target_var)
loss = loss + float(reg_lambda) * (
scale_vec.norm(1) / float(scale_vec.size(0)) - args.compression_rate) ** 2
# compute channel index
tmp = scale_vec.data.cpu().numpy().reshape(gpu_num, -1).mean(0)
channel_index_list.append(tmp.copy())
if len(channel_index_list) == args.channel_index_range:
channel_index_list = np.array(channel_index_list)
tmp = channel_index_list[0, :]
print('first 5 values: [{0:.6f}, {1:.6f}, {2:.6f}, {3:.6f}, {4:.6f}]'.format(tmp[0], tmp[1], tmp[2], tmp[3],
tmp[4]))
tmp2 = channel_index_list.sum(axis=0)
tmp2 = tmp2 / args.channel_index_range
for tmp_i in range(len(channel_index_list)):
channel_index_list[tmp_i] = (np.sign(channel_index_list[tmp_i] - 0.5) + 1) / 2.0
tmp = channel_index_list.sum(axis=0)
tmp = tmp / args.channel_index_range
channel_index = (np.sign(tmp - 0.5) + 1) / 2.0 # to 0-1 binary
real_pruning_rate = 100.0 * np.sum(tmp2 < 10**-6) / len(tmp2)
binary_pruning_rate = 100.0 * np.sum(channel_index < 10**-6) / len(channel_index)
tmp[tmp == 0] = 1
channel_inconsistency = 100.0 * np.sum(tmp != 1) / len(tmp)
print("pruning rate (real/binary): {0:.4f}%/{1:.4f}%, index inconsistency: {2:.4f}%".format(
real_pruning_rate, binary_pruning_rate, channel_inconsistency))
channel_index_list = list()
reg_lambda = 100.0 * np.abs(binary_pruning_rate/100.0 - 1 + args.compression_rate)
sys.stdout.flush()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'scale_factor: {3:.4f}\t'
'reg_lambda: {4:.4f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), scale_factor, reg_lambda, batch_time=batch_time,
top1=top1, top5=top5))
sys.stdout.flush()
return channel_index, scale_factor
def validate(val_loader, model, criterion, channel_index):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output, _ = model(input_var, 1.0, channel_index)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 11,382 | 37.849829 | 120 | py |
AutoPruner | AutoPruner-master/vgg16/50/fine_tune_vgg16.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, models, transforms
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=2, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=True, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='4,5', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
model = models.vgg16(pretrained=True)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
torch.save(model.state_dict(), 'checkpoint/model.pth')
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 11,358 | 35.760518 | 106 | py |
AutoPruner | AutoPruner-master/vgg16/50/fine_tune_GAP.py | import argparse
import os
import shutil
import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms
from src_code.lmdbdataset import lmdbDataset
from compress_model.new_model import vgg16_GAP
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
'''parser.add_argument('data', metavar='DIR',
help='path to dataset')
'''
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=24, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', default=False, type=bool,
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--load_from_lmdb', default=True, type=bool, help='load image data from lmdb or not')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = 0
print(args)
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
model = vgg16_GAP('checkpoint/model.pth')
print(model)
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
# Data loading code from lmdb
if args.load_from_lmdb:
train_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
num_workers=16,
pin_memory=True
)
print('train_loader_success!')
val_loader = torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=16,
pin_memory=True
)
else:
traindir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'train')
valdir = os.path.join('/opt/luojh/Dataset/ImageNet/images', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate and train
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, int(args.epochs/3.0))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
folder_path = 'checkpoint/fine_tune_GAP'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model.state_dict(), folder_path + '/model.pth')
print('best acc is %.3f' % best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,791 | 35.707483 | 106 | py |
AutoPruner | AutoPruner-master/vgg16/50/src_code/my_op_fc.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import numpy as np
class MyGAP_fc(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*4096 -> 1*4096
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.mean(input, dim=0, keepdim=True)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = input[0].clone()
for i in range(grad_input.shape[0]):
grad_input[i, :] = grad_output.data / grad_input.shape[0]
return Variable(grad_input)
class MyScale_fc(torch.autograd.Function):
'''
input: x: 64*4096, scale:4096 ==> x[:, i]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
input_data2 = input_data.clone()
for i in range(scale_vec.shape[0]):
input_data2[:, i] = input_data[:, i] * scale_vec[i]
return input_data2
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
grad_input = input_data.clone()
for i in range(scale_vec.shape[0]):
grad_input[:, i] = grad_output.data[:, i] * scale_vec[i]
grad_vec = scale_vec.clone()
for i in range(scale_vec.shape[0]):
grad_vec[i] = torch.sum(grad_output.data[:, i]*input_data[:, i])
return Variable(grad_input), Variable(grad_vec)
class MyCS_fc(nn.Module):
def __init__(self, channels_num):
super(MyCS_fc, self).__init__()
self.layer_type = 'MyCS_fc'
self.fc = nn.Linear(channels_num, channels_num)
self.sigmoid = nn.Sigmoid()
def forward(self, x, scale_factor):
x_scale = MyGAP_fc.apply(x) # apply my GAP: N*4096 => 1*4096
x_scale = self.fc(x_scale) # 1*4096
x_scale = torch.squeeze(x_scale) # 4096
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
index = (np.sign(x_scale.data.cpu().numpy() - 0.5) + 1) / 2.0
x_scale.data = torch.FloatTensor(index).cuda()
x = MyScale_fc.apply(x, x_scale)
return x, x_scale
if __name__ == '__main__':
in_ = (Variable(torch.randn(3, 4).double(), requires_grad=True),
Variable(torch.randn(4).double(), requires_grad=True))
res = gradcheck(MyScale_fc.apply, in_, eps=1e-6, atol=1e-4)
# in_ = (Variable(torch.randn(4, 64).double(), requires_grad=True),)
# res = gradcheck(MyGAP_fc.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 2,729 | 31.117647 | 76 | py |
AutoPruner | AutoPruner-master/vgg16/50/src_code/my_op.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import math
class MyGAP(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*512*14*14 -> 1*512*14*14
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
input = torch.mean(input, dim=0, keepdim=True)
return input
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = input[0].clone()
for i in range(grad_input.shape[0]):
grad_input[i, :, :, :] = grad_output.data / grad_input.shape[0]
return Variable(grad_input)
class MyScale(torch.autograd.Function):
'''
input: x: 64*512*7*7, scale:512 ==> x[:, i, :, :]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
input_data2 = input_data.clone()
for i in range(scale_vec.shape[0]):
input_data2[:, i, :, :] = input_data[:, i, :, :] * scale_vec[i]
return input_data2
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
grad_input = input_data.clone()
for i in range(scale_vec.shape[0]):
grad_input[:, i, :, :] = grad_output.data[:, i, :, :] * scale_vec[i]
grad_vec = scale_vec.clone()
for i in range(scale_vec.shape[0]):
grad_vec[i] = torch.sum(grad_output.data[:, i, :, :]*input_data[:, i, :, :])
return Variable(grad_input), Variable(grad_vec)
class MyCS(nn.Module):
def __init__(self, channels_num, activation_size=14, max_ks=2):
super(MyCS, self).__init__()
self.layer_type = 'MyCS'
self.conv = nn.Conv2d(channels_num, channels_num,
kernel_size=int(activation_size / max_ks), stride=1, padding=0)
self.map = nn.MaxPool2d(kernel_size=max_ks, stride=max_ks)
self.sigmoid = nn.Sigmoid()
# self.conv.weight.data.normal_(0, 0.005)
n = int(activation_size / max_ks) * int(activation_size / max_ks) * channels_num
self.conv.weight.data.normal_(0, 10*math.sqrt(2.0/n))
# torch.nn.init.xavier_normal(self.conv.weight)
# torch.nn.init.constant(self.conv.bias, 0)
def forward(self, x, scale_factor, channel_index=None):
x_scale = MyGAP.apply(x) # apply my GAP: N*512*14*14 => 1*512*14*14
x_scale = self.map(x_scale) # apply MAP: 1*512*14*14 => 1*512*7*7
x_scale = self.conv(x_scale) # 1*512*1*1
x_scale = torch.squeeze(x_scale) # 512
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
x_scale.data = torch.FloatTensor(channel_index).cuda()
x = MyScale.apply(x, x_scale)
return x, x_scale
if __name__ == '__main__':
# in_ = (Variable(torch.randn(1, 1, 3, 3).double(), requires_grad=True),
# Variable(torch.randn(1).double(), requires_grad=True))
# res = gradcheck(MyScale.apply, in_, eps=1e-6, atol=1e-4)
in_ = (Variable(torch.randn(2, 64, 3, 3).double(), requires_grad=True),)
res = gradcheck(MyGAP.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 3,318 | 33.572917 | 93 | py |
AutoPruner | AutoPruner-master/vgg16/50/src_code/Network_FT.py | import torch
from . import my_op
from torch import nn
class NetworkNew(torch.nn.Module):
def __init__(self, layer_id=0):
torch.nn.Module.__init__(self)
model_weight = torch.load('checkpoint/model.pth')
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
self.feature_1 = nn.Sequential()
self.feature_2 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
ks_dict = {0: 224, 1: 224, 2: 112, 3: 112, 4: 56, 5: 56, 6: 56, 7: 28, 8: 28, 9: 28, 10: 14, 11: 14, 12: 14}
self.CS = my_op.MyCS(channel_length[layer_id+1], activation_size=ks_dict[layer_id], max_ks=2)
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9, 12]
# add feature_1 and feature_2 layers
for i in range(13):
if i < layer_id:
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
elif i == layer_id:
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_2.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
elif i > layer_id:
self.feature_2.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_2.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_2.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
# add classifier
self.classifier.add_module('fc6', nn.Linear(channel_length[13] * 7 * 7, channel_length[14]))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(channel_length[14], channel_length[15]))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(channel_length[15], channel_length[16]))
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in model_weight.items():
name = k.split('.')
name = 'feature_1.'+name[2]+'.'+name[3]
if name in my_keys:
my_weight[name] = v
name = k.split('.')
name = 'feature_2.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
name = k[7:]
if name in my_keys:
my_weight[name] = v
self.load_state_dict(my_weight)
def forward(self, x, scale_factor=1.0, channel_index=None):
x = self.feature_1(x)
x, scale_vector = self.CS(x, scale_factor, channel_index)
x = self.feature_2(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, scale_vector
class Vgg16(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add feature layers
self.feature_1.add_module('conv1_1', nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu1_1', nn.ReLU(inplace=True))
self.feature_1.add_module('conv1_2', nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu1_2', nn.ReLU(inplace=True))
self.feature_1.add_module('pool1', nn.MaxPool2d(kernel_size=2, stride=2))
self.feature_1.add_module('conv2_1', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu2_1', nn.ReLU(inplace=True))
self.feature_1.add_module('conv2_2', nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu2_2', nn.ReLU(inplace=True))
self.feature_1.add_module('pool2', nn.MaxPool2d(kernel_size=2, stride=2))
self.feature_1.add_module('conv3_1', nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu3_1', nn.ReLU(inplace=True))
self.feature_1.add_module('conv3_2', nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu3_2', nn.ReLU(inplace=True))
self.feature_1.add_module('conv3_3', nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu3_3', nn.ReLU(inplace=True))
self.feature_1.add_module('pool3', nn.MaxPool2d(kernel_size=2, stride=2))
self.feature_1.add_module('conv4_1', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu4_1', nn.ReLU(inplace=True))
self.feature_1.add_module('conv4_2', nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu4_2', nn.ReLU(inplace=True))
self.feature_1.add_module('conv4_3', nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu4_3', nn.ReLU(inplace=True))
self.feature_1.add_module('pool4', nn.MaxPool2d(kernel_size=2, stride=2))
self.feature_1.add_module('conv5_1', nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu5_1', nn.ReLU(inplace=True))
self.feature_1.add_module('conv5_2', nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu5_2', nn.ReLU(inplace=True))
self.feature_1.add_module('conv5_3', nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
self.feature_1.add_module('relu5_3', nn.ReLU(inplace=True))
self.feature_1.add_module('pool5', nn.MaxPool2d(kernel_size=2, stride=2))
# add classifier
self.classifier.add_module('fc6', nn.Linear(512*7*7, 4096))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(4096, 4096))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(4096, 1000))
model_weight = torch.load(model_path)
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
count = 0
for k, v in model_weight.items():
my_weight[my_keys[count]] = v
count += 1
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
| 8,144 | 49.590062 | 119 | py |
AutoPruner | AutoPruner-master/vgg16/50/src_code/lmdbdataset.py | import cv2
import numpy as np
import torchvision.transforms as transforms
import lmdb
import msgpack
from torch.utils.data import Dataset
from PIL import Image
class lmdbDataset(Dataset):
def __init__(self, location, is_train):
self.env = lmdb.open(location, subdir=False, max_readers=1, readonly=True, lock=False, readahead=False,
meminit=False)
self.txn = self.env.begin(write=False)
self.length = self.txn.stat()['entries']
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# train data augment
if is_train:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# test data augment
else:
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
'''
for key,data in self.txn.cursor():
now_data = msgpack.loads(data,raw=False)
data_img = now_data[0]
label = now_data[1]
now_arr = np.frombuffer(data_img[b'data'],dtype=np.uint8)
print(now_arr)
image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)
print(image_content.shape)
#print(type(_))
break
'''
def __len__(self):
return self.length - 1
def __getitem__(self, index):
new_index = str(index).encode()
data = self.txn.get(new_index)
now_data = msgpack.loads(data, raw=False)
data_img = now_data[0]
label = now_data[1]
now_arr = np.frombuffer(data_img[b'data'], dtype=np.uint8)
image_content = cv2.imdecode(now_arr, cv2.IMREAD_COLOR)
image_content = cv2.cvtColor(image_content, cv2.COLOR_BGR2RGB)
image_content = Image.fromarray(image_content)
image_content = self.transform(image_content)
return image_content, label
if __name__ == '__main__':
temp_dataset = lmdbDataset('indoor67.lmdb', True)
print(temp_dataset[0])
#print(i)
#assert temp_dataset[i][0] is not None | 2,431 | 34.246377 | 111 | py |
AutoPruner | AutoPruner-master/vgg16/50/compress_model/new_model.py | import torch
from torch import nn
import numpy as np
import os
import torch.nn.init as init
class vgg16_compressed(torch.nn.Module):
def __init__(self, layer_id=0, model_path=None):
torch.nn.Module.__init__(self)
model_weight = torch.load(model_path + 'model.pth')
channel_index = torch.load(model_path + 'channel_index.pth')
channel_index = np.where(channel_index != 0)[0]
new_num = int(channel_index.shape[0])
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
channel_length[layer_id + 1] = new_num
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9, 12]
# add feature_1 and feature_2 layers
for i in range(13):
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
# add classifier
self.classifier.add_module('fc6', nn.Linear(channel_length[13] * 7 * 7, channel_length[14]))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(channel_length[14], channel_length[15]))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(channel_length[15], channel_length[16]))
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
channel_index = torch.cuda.LongTensor(channel_index)
if layer_id < 12:
# conv1_1 to conv5_2
for k, v in model_weight.items():
name = k.split('.')
if name[2] == conv_names[layer_id]:
if name[3] == 'weight':
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index, :, :, :]
else:
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index]
elif name[2] == conv_names[layer_id + 1]:
if name[3] == 'weight':
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[:, channel_index, :, :]
else:
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v
else:
if name[1] in ['feature_1', 'feature_2']:
name = 'feature_1.' + name[2] + '.' + name[3]
else:
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
elif layer_id == 12:
# conv5_3
for k, v in model_weight.items():
name = k.split('.')
if name[2] == conv_names[layer_id]:
if name[3] == 'weight':
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index, :, :, :]
else:
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index]
elif name[2] == 'fc6':
if name[3] == 'weight':
name = 'classifier.' + name[2] + '.' + name[3]
tmp = v.view(4096, 512, 7, 7)
tmp = tmp[:, channel_index, :, :]
my_weight[name] = tmp.view(4096, -1)
else:
name = 'classifier.' + name[2] + '.' + name[3]
my_weight[name] = v
else:
if name[1] in ['feature_1', 'feature_2']:
name = 'feature_1.' + name[2] + '.' + name[3]
else:
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class vgg16_test(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
model_weight = torch.load(model_path)
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9, 12]
# add feature_1 and feature_2 layers
for i in range(13):
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
# add classifier
self.classifier.add_module('fc6', nn.Linear(channel_length[13] * 7 * 7, channel_length[14]))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(channel_length[14], channel_length[15]))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(channel_length[15], channel_length[16]))
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in model_weight.items():
name = k.split('.')
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
else:
print('error')
os.exit(0)
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class vgg16_GAP(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
model_weight = torch.load(model_path)
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9]
# add feature_1 and feature_2 layers
for i in range(13):
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
if i == 12:
self.feature_1.add_module(pool_names[i], nn.AvgPool2d(kernel_size=14, stride=1))
# add classifier
self.classifier.add_module('fc', nn.Linear(channel_length[13], channel_length[16]))
init.xavier_uniform(self.classifier.fc.weight, gain=np.sqrt(2.0))
init.constant(self.classifier.fc.bias, 0)
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in model_weight.items():
name = k.split('.')
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
else:
print(name)
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
if __name__ == '__main__':
model = vgg16_GAP('../checkpoint/fine_tune/model.pth')
print(model)
| 10,696 | 43.570833 | 119 | py |
AutoPruner | AutoPruner-master/vgg16/50/compress_model/compress_model.py | import torch
from new_model import vgg16_compressed
import argparse
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--layer_id', default=2, type=int, help='the id of compressed layer, starting from 0')
args = parser.parse_args()
print(args)
def main(model_path):
# 1. create compressed model
vgg16_new = vgg16_compressed(layer_id=args.layer_id, model_path=model_path)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, '../checkpoint/model.pth')
print('Finished!')
if __name__ == '__main__':
folder_path = '../checkpoint/layer_' + str(args.layer_id)+'/'
main(folder_path)
| 908 | 31.464286 | 106 | py |
AutoPruner | AutoPruner-master/vgg16/50/compress_model/evaluate_net.py | import torch
from new_model import vgg16_compressed, vgg16_test
import argparse
import torch.backends.cudnn as cudnn
import os
import sys
import time
sys.path.append('../')
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=500, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
def main(model_path):
# 1. create compressed model
vgg16_new = vgg16_compressed(layer_id=args.layer_id, model_path=model_path)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, model_path+'model.pth')
print('Finished!')
return vgg16_new
def evaluate():
# Phase 1: load model
model = vgg16_test('../checkpoint/model.pth')
# Phase 2 : Model setup
model = model.cuda()
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Phase 2 : Data Upload
print('\n[Phase 2] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 3: Validation
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate()
| 4,451 | 31.977778 | 106 | py |
AutoPruner | AutoPruner-master/MobileNetv2/released_model/evaluate.py | import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
from torchvision import datasets, transforms
import mobilenetv2
from torchsummaryX import summary
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=64, type=int, help='batch size')
parser.add_argument('--num_epochs', default=0, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/mnt/ramdisk/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='2', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--ft_model_path', default='mobilenetv2-pruned.pth',
type=str, help='the path of fine tuned model')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
print(args)
def main():
global args, best_prec1
# Phase 1 : Model setup
print('\n[Phase 2] : Model setup')
model = mobilenetv2.MobileNetV2(args.ft_model_path)
model.eval()
summary(model, torch.zeros((1, 3, 224, 224)))
model_ft = torch.nn.DataParallel(model.cuda())
cudnn.benchmark = True
print("model setup success!")
# Phase 2 : Data Load
# Data pre-processing
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
# transforms.Resize(256),
# transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = args.data_base
print("| Preparing data...")
dsets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']
}
val_loader = torch.utils.data.DataLoader(dsets['val'], batch_size=args.batch_size, shuffle=False, num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
validate(val_loader, model_ft, criterion)
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.cuda(), target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 5,806 | 32.761628 | 116 | py |
AutoPruner | AutoPruner-master/MobileNetv2/released_model/mobilenetv2.py | """
Creates a MobileNetV2 Model as defined in:
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. (2018).
MobileNetV2: Inverted Residuals and Linear Bottlenecks
arXiv preprint arXiv:1801.04381.
import from https://github.com/tonylins/pytorch-mobilenet-v2
"""
import torch.nn as nn
import math
import torch
import numpy as np
__all__ = ['mobilenetv2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, channel_number, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.identity = stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
hidden_dim = channel_number.pop(0)
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.identity:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, model_path, num_classes=1000, width_mult=1.):
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# load channel index
file_path = model_path.split('mobilenet')[0]
f = open('channel_index_AP.txt')
lines = f.readlines()
channel_number = []
for line in lines:
line = line.split(', ')
line = line[0:-1] # remove '\n'
tmp = []
for item in line:
tmp.append(int(float(item)))
channel_number.append(int(np.sum(tmp)))
f.close()
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = InvertedResidual
for t, c, n, s in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
for i in range(n):
layers.append(block(channel_number, input_channel, output_channel, s if i == 0 else 1, t))
input_channel = output_channel
# building last several layers
output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280
layers.append(conv_1x1_bn(input_channel, output_channel))
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(output_channel, num_classes)
self._initialize_weights(model_path)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self, model_path):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# initialized from pretrained model
model_weight = torch.load(model_path)
my_weight = self.state_dict()
my_keys = list(my_weight)
for i, (k, v) in enumerate(model_weight.items()):
my_weight[my_keys[i]] = v
self.load_state_dict(my_weight)
| 5,774 | 32.77193 | 120 | py |
AutoPruner | AutoPruner-master/MobileNetv2/2_fine_tune/main.py | import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
from torchvision import datasets, transforms
from src_code import mobilenetv2
from torchsummaryX import summary
from math import cos, pi
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=4e-5, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=150, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/mnt/ramdisk/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default=False, type=bool,
help='path to latest checkpoint (default: none)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
print(args)
def main():
global args, best_prec1
# Phase 1 : Model setup
print('\n[Phase 2] : Model setup')
model = mobilenetv2.MobileNetV2('../1_pruning/checkpoint/model.pth')
model.eval()
summary(model, torch.zeros((1, 3, 224, 224)))
model_ft = torch.nn.DataParallel(model.cuda())
cudnn.benchmark = True
print("model setup success!")
if args.resume:
weight = torch.load('checkpoint/model.pth')
model_ft.load_state_dict(weight)
# Phase 2 : Data Load
# Data pre-processing
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.0)),
# transforms.Resize(256),
# transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = args.data_base
print("| Preparing data...")
dsets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']
}
train_loader = torch.utils.data.DataLoader(dsets['train'], batch_size=args.batch_size, shuffle=True, num_workers=8,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(dsets['val'], batch_size=args.batch_size, shuffle=False, num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
validate(val_loader, model_ft, criterion)
for epoch in range(args.start_epoch, args.num_epochs):
# adjust_learning_rate(optimizer, epoch, 10) # reduce lr every 3 epochs
# train for one epoch
time1 = time.time()
train(train_loader, model_ft, criterion, optimizer, epoch)
print('training one epoch takes {0:.3f} seconds.'.format(time.time()-time1))
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
print('best accuracy is {0:.3f}'.format(best_prec1))
folder_path = 'checkpoint'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
adjust_learning_rate(optimizer, epoch, i, len(train_loader))
data_time.update(time.time() - end)
input, target = input.cuda(), target.cuda()
# compute output
output = model(input)
# calculate loss
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
top1=top1, top5=top5, loss=losses))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.cuda(), target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# step lr
# def adjust_learning_rate(optimizer, epoch, epoch_num):
# """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = args.lr * (0.1 ** (epoch // epoch_num))
# print('| Learning Rate = %f' % lr)
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# consine lr
def adjust_learning_rate(optimizer, epoch, iteration, num_iter):
warmup_epoch = 0
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = args.num_epochs * num_iter
lr = args.lr * (1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
if epoch < warmup_epoch:
lr = args.lr * current_iter / warmup_iter
if iteration == 0:
print('current learning rate:{0}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 9,434 | 33.944444 | 119 | py |
AutoPruner | AutoPruner-master/MobileNetv2/2_fine_tune/src_code/mobilenetv2.py | """
Creates a MobileNetV2 Model as defined in:
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. (2018).
MobileNetV2: Inverted Residuals and Linear Bottlenecks
arXiv preprint arXiv:1801.04381.
import from https://github.com/tonylins/pytorch-mobilenet-v2
"""
import torch.nn as nn
import torch
import numpy as np
__all__ = ['mobilenetv2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, channel_number, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.identity = stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
hidden_dim = channel_number.pop(0)
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.identity:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, model_path, num_classes=1000, width_mult=1.):
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# load channel index
f = open('../1_pruning/checkpoint/channel_index.txt')
lines = f.readlines()
index_code = []
channel_number = []
for line in lines:
line = line.split(', ')
line = line[0:-1] # remove '\n'
tmp = []
for item in line:
tmp.append(int(float(item)))
index_code.append(tmp)
channel_number.append(int(np.sum(tmp)))
f.close()
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = InvertedResidual
for t, c, n, s in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
for i in range(n):
layers.append(block(channel_number, input_channel, output_channel, s if i == 0 else 1, t))
input_channel = output_channel
self.features = nn.Sequential(*layers)
# building last several layers
output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280
self.conv = conv_1x1_bn(input_channel, output_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(output_channel, num_classes)
self._initialize_weights(model_path, index_code)
def forward(self, x):
x = self.features(x)
x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self, model_path, index_code):
model_weight = torch.load(model_path)
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
i = 0
for k, v in model_weight.items():
if 'AP' in k:
if 'AP.conv.bias' in k and i != 6:
index = index_code.pop(0)
ind_ = np.array(index).nonzero()[0]
continue
if 'num_batches_tracked' in k:
i = i + 1
continue
if i < 18 or i > 305:
# not pruned, # layers = (306-18)/6=48, 16blocks=16*3=48 layers
my_weight[my_keys[i]] = v
else:
# pruned blocks, 3 layers/block
if len(my_weight[my_keys[i]]) == len(ind_):
my_weight[my_keys[i]] = v[ind_] # the first and second layer, conv+bn, 6 layers
elif 'conv.6.weight' in my_keys[i]:
my_weight[my_keys[i]] = v[:, ind_, :, :] # remove the conv layer of third layer
else:
my_weight[my_keys[i]] = v # not change
i = i + 1
self.load_state_dict(my_weight)
def mobilenetv2(**kwargs):
"""
Constructs a MobileNet V2 model
"""
return MobileNetV2(**kwargs)
if __name__ == '__main__':
model = MobileNetV2('../../1_pruning/checkpoint/model.pth')
| 6,370 | 32.356021 | 120 | py |
AutoPruner | AutoPruner-master/MobileNetv2/2_fine_tune/src_code/Network_FT.py | import torch
from torch import nn
import numpy as np
class VGG16(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.ReLU = nn.ReLU(inplace=True)
# load channel index
f = open('../1_pruning/checkpoint/channel_index.txt')
lines = f.readlines()
index_code = []
channel_number = []
for line in lines:
line = line.split(', ')
line = line[0:-1] # remove '\n'
tmp = []
for item in line:
tmp.append(int(float(item)))
index_code.append(tmp)
channel_number.append(np.sum(tmp))
f.close()
# add feature layers
self.conv1_1 = nn.Conv2d(3, channel_number[0], kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(channel_number[0], channel_number[1], kernel_size=3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(channel_number[1], channel_number[2], kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(channel_number[2], channel_number[3], kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(channel_number[3], channel_number[4], kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(channel_number[4], channel_number[5], kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(channel_number[5], channel_number[6], kernel_size=3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(channel_number[6], channel_number[7], kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(channel_number[7], channel_number[8], kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(channel_number[8], channel_number[9], kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(channel_number[9], channel_number[10], kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(channel_number[10], channel_number[11], kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(channel_number[11], 512, kernel_size=3, stride=1, padding=1)
# add classifier
self.classifier = nn.Sequential()
self.classifier.add_module('fc6', nn.Linear(512*7*7, 4096))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(4096, 4096))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(4096, 1000))
model_weight = torch.load(model_path)
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
count = 0
i = 0
ind_old = [0, 1, 2]
for k, v in model_weight.items():
if 'AP' in k:
continue
if 'conv' in k:
if 'conv5_3' in k:
if 'weight' in k:
my_weight[my_keys[i]] = v[:, ind_old, :, :]
else:
my_weight[my_keys[i]] = v
else:
# conv layer
if 'weight' in k:
# weight
ind_ = np.array(index_code[count]).nonzero()[0]
v = v[:, ind_old, :, :]
my_weight[my_keys[i]] = v[ind_, :, :, :]
else:
# bias
my_weight[my_keys[i]] = v[ind_]
ind_old = ind_
count += 1
else:
# fc layer
my_weight[my_keys[i]] = v
i = i + 1
self.load_state_dict(my_weight)
def forward(self, x):
x = self.ReLU(self.conv1_1(x))
x = self.maxpool(self.ReLU(self.conv1_2(x)))
x = self.ReLU(self.conv2_1(x))
x = self.maxpool(self.ReLU(self.conv2_2(x)))
x = self.ReLU(self.conv3_1(x))
x = self.ReLU(self.conv3_2(x))
x = self.maxpool(self.ReLU(self.conv3_3(x)))
x = self.ReLU(self.conv4_1(x))
x = self.ReLU(self.conv4_2(x))
x = self.maxpool(self.ReLU(self.conv4_3(x)))
x = self.ReLU(self.conv5_1(x))
x = self.ReLU(self.conv5_2(x))
x = self.maxpool(self.ReLU(self.conv5_3(x)))
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
if __name__ == '__main__':
VGG16('/home/luojh2/model.pth')
| 4,591 | 38.247863 | 108 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/main.py | import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
import numpy as np
import shutil
from torchvision import datasets, transforms
from src_code import mobilenetv2
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=5, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/mnt/ramdisk/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--ft_model_path', default='/mnt/data3/luojh/project/6_CURL/Journal/pretrained_model/ImageNet/mobilenetv2_1.0-0c6065bc.pth',
type=str, help='the path of fine tuned model')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--compression_rate', default=0.64, type=float, help='the proportion of 1 in compressed model')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--alpha_start', default=0.1, type=float, help='the initial value of alpha in AutoPruner layer')
parser.add_argument('--alpha_end', default=100, type=float, help='the initial value of alpha in AutoPruner layer')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
alpha = 0
alpha_step = 0
print(args)
# args.ft_model_path = '/home/luojh2/.torch/models/vgg16-397923af.pth'
def main():
global args, best_prec1, alpha, alpha_step
# Phase 1 : Model setup
print('\n[Phase 2] : Model setup')
model = mobilenetv2.MobileNetV2(args.ft_model_path).cuda()
print(model)
model_ft = torch.nn.DataParallel(model)
cudnn.benchmark = True
print("model setup success!")
# Phase 2 : Data Load
# Data pre-processing
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = args.data_base
print("| Preparing data...")
dsets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']
}
train_loader = torch.utils.data.DataLoader(dsets['train'], batch_size=args.batch_size, shuffle=True, num_workers=8,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(dsets['val'], batch_size=args.batch_size, shuffle=False, num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
alpha_step = (args.alpha_end - args.alpha_start)/float(args.num_epochs * len(train_loader))
alpha = args.alpha_start
for epoch in range(args.start_epoch, args.num_epochs):
# adjust_learning_rate(optimizer, epoch, 3) # reduce lr every 3 epochs
# train for one epoch
time1 = time.time()
channel_index = train(train_loader, model_ft, criterion, optimizer, epoch)
print('training one epoch takes {0:.3f} seconds.'.format(time.time()-time1))
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion, channel_index)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
folder_path = 'checkpoint'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
fw = open(folder_path+'/channel_index.txt', 'w')
for item in channel_index:
for item_ in item:
fw.write('{0}, '.format(item_))
fw.write('\n')
fw.close()
def train(train_loader, model, criterion, optimizer, epoch):
global alpha_step, alpha
gpu_num = torch.cuda.device_count()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
alpha = alpha + alpha_step
data_time.update(time.time() - end)
input, target = input.cuda(), target.cuda()
# compute output
output, scale_vec = model(input, alpha)
# adjust alpha and reg_lambda
channel_index = []
first_value = []
binary_pruning_rate = []
index_code = []
two_side_rate = []
for item in scale_vec:
tmp = item.data.cpu().numpy().reshape(gpu_num, -1).mean(0)
index_code.append(tmp)
for item in index_code:
tmp = item
channel_index.append((np.sign(tmp - 0.5) + 1) / 2.0)
first_value.append(tmp[0])
binary_pruning_rate.append(np.sum(tmp < 0.1) / len(tmp)) # The proportion of 0
two_side_rate.append((np.sum(tmp > 0.9) + np.sum(tmp < 0.1)) / len(tmp))
if i % args.print_freq == 0:
print('The first value of each layer: {0}'.format(first_value))
print('The binary rate of each layer: {0}'.format(binary_pruning_rate))
print('The two side rate of each layer: {0}'.format(two_side_rate))
# check the binary rate in the last epoch
if epoch == args.num_epochs - 1:
if not all(my_item > 0.9 for my_item in two_side_rate):
alpha = alpha + 10*alpha_step
# calculate loss
loss1 = criterion(output, target)
loss2 = 0
for ind_, item in enumerate(scale_vec):
loss2 += (item.norm(1) / float(item.size(0)) - args.compression_rate) ** 2
loss = loss1 + 10.0*loss2
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'alpha: {3:.4f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, i, len(train_loader), alpha, batch_time=batch_time,
top1=top1, top5=top5, loss=losses))
sys.stdout.flush()
return channel_index
def validate(val_loader, model, criterion, channel_index):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.cuda(), target.cuda()
# compute output
output, _ = model(input, 1.0, channel_index)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 11,003 | 35.68 | 144 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/evaluate.py | import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import time
import os
import sys
import argparse
from torchvision import datasets, transforms
from src_code import mobilenetv2
from torchsummaryX import summary
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--num_epochs', default=0, type=int, help='number of training epochs')
parser.add_argument('--lr_decay_epoch', default=10, type=int, help='learning rate decay epoch')
parser.add_argument('--data_base', default='/mnt/ramdisk/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--ft_model_path', default='/mnt/data3/luojh/project/6_CURL/Journal/pretrained_model/ImageNet/mobilenetv2_1.0-0c6065bc.pth',
type=str, help='the path of fine tuned model')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
best_prec1 = -1
print(args)
def main():
global args, best_prec1
# Phase 1 : Model setup
print('\n[Phase 2] : Model setup')
model = mobilenetv2.MobileNetV2(args.ft_model_path)
model.eval()
summary(model, torch.zeros((1, 3, 224, 224)))
model_ft = torch.nn.DataParallel(model.cuda())
cudnn.benchmark = True
print("model setup success!")
# Phase 2 : Data Load
# Data pre-processing
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
# transforms.Resize(256),
# transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = args.data_base
print("| Preparing data...")
dsets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']
}
train_loader = torch.utils.data.DataLoader(dsets['train'], batch_size=args.batch_size, shuffle=True, num_workers=8,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(dsets['val'], batch_size=args.batch_size, shuffle=False, num_workers=8,
pin_memory=True)
print('data_loader_success!')
# Phase 3: fine_tune model
print('\n[Phase 3] : Model fine tune')
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
validate(val_loader, model_ft, criterion)
for epoch in range(args.start_epoch, args.num_epochs):
adjust_learning_rate(optimizer, epoch, 10) # reduce lr every 3 epochs
# train for one epoch
time1 = time.time()
train(train_loader, model_ft, criterion, optimizer, epoch)
print('training one epoch takes {0:.3f} seconds.'.format(time.time()-time1))
# evaluate on validation set
prec1 = validate(val_loader, model_ft, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
folder_path = 'checkpoint'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
torch.save(model_ft.state_dict(), folder_path+'/model.pth')
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input, target = input.cuda(), target.cuda()
# compute output
output = model(input)
# calculate loss
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch[{0}]: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
top1=top1, top5=top5, loss=losses))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.cuda(), target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, epoch_num):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // epoch_num))
print('| Learning Rate = %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| 8,623 | 34.489712 | 144 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/src_code/mobilenetv2.py | """
Creates a MobileNetV2 Model as defined in:
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. (2018).
MobileNetV2: Inverted Residuals and Linear Bottlenecks
arXiv preprint arXiv:1801.04381.
import from https://github.com/tonylins/pytorch-mobilenet-v2
"""
import torch.nn as nn
import math
import torch
from . import my_op
__all__ = ['mobilenetv2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, block_id, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.expand_ratio = expand_ratio
self.identity = stride == 1 and inp == oup
self.ReLU = nn.ReLU6(inplace=True)
if expand_ratio == 1:
self.conv1 = nn.Conv2d(inp, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn2 = nn.BatchNorm2d(oup)
else:
self.block_id = block_id
self.activation_size_list = [112, 56, 56, 28, 28, 28, 14, 14, 14, 14, 14, 14, 14, 7, 7, 7]
self.AP = my_op.APLayer(hidden_dim, hidden_dim, activation_size=self.activation_size_list[block_id], max_ks=2,
layer_id=block_id)
# hidden layer of each block
# 96, 144, 144, 192, 192, 192, 384, 384, 384, 384, 576, 576, 576, 960, 960, 960]
self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)
self.bn2 = nn.BatchNorm2d(hidden_dim)
self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
self.index_code = [] # the generated index code
self.scale_factor = 0.1
self.channel_index = [] # binary index code for evaluation
def forward(self, x):
output = x
if self.expand_ratio == 1:
x = self.ReLU(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
else:
x = self.ReLU(self.bn1(self.conv1(x)))
x_scale = self.AP(x, self.scale_factor, self.channel_index)
self.index_code = x_scale
x = my_op.MyScale.apply(x, x_scale)
x = self.ReLU(self.bn2(self.conv2(x)))
x = my_op.MyScale.apply(x, x_scale)
x = self.bn3(self.conv3(x))
if self.identity:
return x + output
else:
return x
class MobileNetV2(nn.Module):
def __init__(self, model_path, num_classes=1000, width_mult=1.):
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = InvertedResidual
block_id = -1
for t, c, n, s in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
for i in range(n):
layers.append(block(block_id, input_channel, output_channel, s if i == 0 else 1, t))
input_channel = output_channel
block_id += 1
self.features = nn.Sequential(*layers)
# building last several layers
output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280
self.conv = conv_1x1_bn(input_channel, output_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(output_channel, num_classes)
self._initialize_weights(model_path)
def forward(self, x, scale_factor=1.0, channel_index=None):
self.set_scale_factor(scale_factor)
if not self.training:
self.set_channel_index(channel_index)
x = self.features(x)
x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
index_code = self.get_index_code()
return x, index_code
def set_scale_factor(self, scale_factor):
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
block.scale_factor = scale_factor
def set_channel_index(self, channel_index):
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
block.channel_index = channel_index
def get_index_code(self):
index_code = []
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
index_code.append(block.index_code)
return index_code
def _initialize_weights(self, model_path):
model_weight = torch.load(model_path)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
my_weight = self.state_dict()
my_keys = list(my_weight)
new_keys = []
for item in my_keys:
if 'AP' not in item:
new_keys.append(item)
for i, (k, v) in enumerate(model_weight.items()):
my_weight[new_keys[i]] = v
self.load_state_dict(my_weight)
def mobilenetv2(**kwargs):
"""
Constructs a MobileNet V2 model
"""
return MobileNetV2(**kwargs)
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model_path = '/mnt/data3/luojh/project/6_CURL/Journal/pretrained_model/ImageNet/mobilenetv2_1.0-0c6065bc.pth'
model = MobileNetV2(model_path).cuda()
input = torch.zeros((1, 3, 224, 224)).cuda()
output = model(input)
a=1
| 7,872 | 34.949772 | 122 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/src_code/my_op.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import gradcheck
import math
class MyGAP(torch.autograd.Function):
'''
Global Average Pooling with batchsize: N*512*14*14 -> 1*512*14*14
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.mean(dim=0, keepdim=True)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
batch_size, num_channels, h, w = input.size()
grad_input = grad_output.div(batch_size).expand(batch_size, num_channels, h, w)
return grad_input
class MyScale(torch.autograd.Function):
'''
input: x: 64*512*7*7, scale:512 ==> x[:, i, :, :]*scale[i]
'''
@staticmethod
def forward(self, input_data, scale_vec):
self.save_for_backward(input_data, scale_vec)
batch_size, num_channels, h, w = input_data.size()
scale_vec = scale_vec.view([1, num_channels, 1, 1]).expand(input_data.size())
return input_data.mul(scale_vec)
@staticmethod
def backward(self, grad_output):
input_data, scale_vec = self.saved_tensors
batch_size, num_channels, h, w = input_data.size()
scale_vec = scale_vec.view([1, num_channels, 1, 1]).expand(input_data.size())
grad_input = grad_output.mul(scale_vec)
grad_vec = grad_output.mul(input_data).sum(-1).sum(-1).sum(0)
return grad_input, grad_vec
# AutoPruner layer
class APLayer(nn.Module):
def __init__(self, in_num, out_num, activation_size=14, max_ks=2, layer_id=0):
super(APLayer, self).__init__()
self.layer_type = 'APLayer'
self.id = layer_id
self.conv = nn.Conv2d(in_num, out_num,
kernel_size=int(activation_size / max_ks), stride=1, padding=0)
self.map = nn.MaxPool2d(kernel_size=max_ks, stride=max_ks)
self.sigmoid = nn.Sigmoid()
# n = int(activation_size / max_ks) * int(activation_size / max_ks) * channels_num
# self.conv.weight.data.normal_(0, 10 * math.sqrt(2.0 / n))
nn.init.kaiming_normal_(self.conv.weight, mode='fan_out', nonlinearity='relu')
def forward(self, x, scale_factor, channel_index=None):
x_scale = MyGAP.apply(x) # apply my GAP: N*512*14*14 => 1*512*14*14
x_scale = self.map(x_scale) # apply MAP: 1*512*14*14 => 1*512*7*7
x_scale = self.conv(x_scale) # 1*512*1*1
x_scale = torch.squeeze(x_scale) # 512
x_scale = x_scale * scale_factor # apply scale sigmoid
x_scale = self.sigmoid(x_scale)
if not self.training:
x_scale.data = torch.FloatTensor(channel_index[self.id]).cuda()
return x_scale
if __name__ == '__main__':
in_ = (Variable(torch.randn(1, 1, 3, 3).double(), requires_grad=True),
Variable(torch.randn(1).double(), requires_grad=True))
res = gradcheck(MyScale.apply, in_, eps=1e-6, atol=1e-4)
# in_ = (Variable(torch.randn(2, 64, 3, 3).double(), requires_grad=True),)
# res = gradcheck(MyGAP.apply, in_, eps=1e-6, atol=1e-4)
print(res)
| 3,128 | 34.965517 | 93 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/src_code/Network_FT.py | import torch
from . import my_op
from torch import nn
class VGG16(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.ReLU = nn.ReLU(inplace=True)
# add feature layers
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.AP1_1 = my_op.APLayer(64, activation_size=224, max_ks=2, layer_id=0)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.AP1_2 = my_op.APLayer(64, activation_size=112, max_ks=2, layer_id=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.AP2_1 = my_op.APLayer(128, activation_size=112, max_ks=2, layer_id=2)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.AP2_2 = my_op.APLayer(128, activation_size=56, max_ks=2, layer_id=3)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.AP3_1 = my_op.APLayer(256, activation_size=56, max_ks=2, layer_id=4)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.AP3_2 = my_op.APLayer(256, activation_size=56, max_ks=2, layer_id=5)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.AP3_3 = my_op.APLayer(256, activation_size=28, max_ks=2, layer_id=6)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.AP4_1 = my_op.APLayer(512, activation_size=28, max_ks=2, layer_id=7)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.AP4_2 = my_op.APLayer(512, activation_size=28, max_ks=2, layer_id=8)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.AP4_3 = my_op.APLayer(512, activation_size=14, max_ks=2, layer_id=9)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.AP5_1 = my_op.APLayer(512, activation_size=14, max_ks=2, layer_id=10)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.AP5_2 = my_op.APLayer(512, activation_size=14, max_ks=2, layer_id=11)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
# add classifier
self.classifier = nn.Sequential()
self.classifier.add_module('fc6', nn.Linear(512*7*7, 4096))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(4096, 4096))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(4096, 1000))
model_weight = torch.load(model_path)
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
count = 0
for k, v in model_weight.items():
if 'AP' in my_keys[count]:
count = count + 2
my_weight[my_keys[count]] = v
count += 1
self.load_state_dict(my_weight)
def forward(self, x, scale_factor=1.0, channel_index=None):
x, s1 = self.AP1_1(self.ReLU(self.conv1_1(x)), scale_factor, channel_index)
x, s2 = self.AP1_2(self.maxpool(self.ReLU(self.conv1_2(x))), scale_factor, channel_index)
x, s3 = self.AP2_1(self.ReLU(self.conv2_1(x)), scale_factor, channel_index)
x, s4 = self.AP2_2(self.maxpool(self.ReLU(self.conv2_2(x))), scale_factor, channel_index)
x, s5 = self.AP3_1(self.ReLU(self.conv3_1(x)), scale_factor, channel_index)
x, s6 = self.AP3_2(self.ReLU(self.conv3_2(x)), scale_factor, channel_index)
x, s7 = self.AP3_3(self.maxpool(self.ReLU(self.conv3_3(x))), scale_factor, channel_index)
x, s8 = self.AP4_1(self.ReLU(self.conv4_1(x)), scale_factor, channel_index)
x, s9 = self.AP4_2(self.ReLU(self.conv4_2(x)), scale_factor, channel_index)
x, s10 = self.AP4_3(self.maxpool(self.ReLU(self.conv4_3(x))), scale_factor, channel_index)
x, s11 = self.AP5_1(self.ReLU(self.conv5_1(x)), scale_factor, channel_index)
x, s12 = self.AP5_2(self.ReLU(self.conv5_2(x)), scale_factor, channel_index)
x = self.maxpool(self.ReLU(self.conv5_3(x)))
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, [s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12]
| 4,519 | 50.363636 | 98 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/compress_model/new_model.py | import torch
from torch import nn
import numpy as np
import os
import torch.nn.init as init
class vgg16_compressed(torch.nn.Module):
def __init__(self, layer_id=0, model_path=None):
torch.nn.Module.__init__(self)
model_weight = torch.load(model_path + 'model.pth')
channel_index = torch.load(model_path + 'channel_index.pth')
channel_index = np.where(channel_index != 0)[0]
new_num = int(channel_index.shape[0])
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
channel_length[layer_id + 1] = new_num
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9, 12]
# add feature_1 and feature_2 layers
for i in range(13):
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
# add classifier
self.classifier.add_module('fc6', nn.Linear(channel_length[13] * 7 * 7, channel_length[14]))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(channel_length[14], channel_length[15]))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(channel_length[15], channel_length[16]))
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
channel_index = torch.cuda.LongTensor(channel_index)
if layer_id < 12:
# conv1_1 to conv5_2
for k, v in model_weight.items():
name = k.split('.')
if name[2] == conv_names[layer_id]:
if name[3] == 'weight':
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index, :, :, :]
else:
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index]
elif name[2] == conv_names[layer_id + 1]:
if name[3] == 'weight':
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[:, channel_index, :, :]
else:
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v
else:
if name[1] in ['feature_1', 'feature_2']:
name = 'feature_1.' + name[2] + '.' + name[3]
else:
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
elif layer_id == 12:
# conv5_3
for k, v in model_weight.items():
name = k.split('.')
if name[2] == conv_names[layer_id]:
if name[3] == 'weight':
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index, :, :, :]
else:
name = 'feature_1.' + name[2] + '.' + name[3]
my_weight[name] = v[channel_index]
elif name[2] == 'fc6':
if name[3] == 'weight':
name = 'classifier.' + name[2] + '.' + name[3]
tmp = v.view(4096, 512, 7, 7)
tmp = tmp[:, channel_index, :, :]
my_weight[name] = tmp.view(4096, -1)
else:
name = 'classifier.' + name[2] + '.' + name[3]
my_weight[name] = v
else:
if name[1] in ['feature_1', 'feature_2']:
name = 'feature_1.' + name[2] + '.' + name[3]
else:
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class vgg16_test(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
model_weight = torch.load(model_path)
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9, 12]
# add feature_1 and feature_2 layers
for i in range(13):
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
# add classifier
self.classifier.add_module('fc6', nn.Linear(channel_length[13] * 7 * 7, channel_length[14]))
self.classifier.add_module('relu6', nn.ReLU(inplace=True))
self.classifier.add_module('dropout6', nn.Dropout())
self.classifier.add_module('fc7', nn.Linear(channel_length[14], channel_length[15]))
self.classifier.add_module('relu7', nn.ReLU(inplace=True))
self.classifier.add_module('dropout7', nn.Dropout())
self.classifier.add_module('fc8', nn.Linear(channel_length[15], channel_length[16]))
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in model_weight.items():
name = k.split('.')
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
else:
print('error')
os.exit(0)
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class vgg16_GAP(torch.nn.Module):
def __init__(self, model_path):
torch.nn.Module.__init__(self)
model_weight = torch.load(model_path)
channel_length = list()
channel_length.append(3)
for k, v in model_weight.items():
if 'bias' in k:
channel_length.append(v.size()[0])
self.feature_1 = nn.Sequential()
self.classifier = nn.Sequential()
# add channel selection layers
conv_names = {0: 'conv1_1', 1: 'conv1_2', 2: 'conv2_1', 3: 'conv2_2', 4: 'conv3_1', 5: 'conv3_2', 6: 'conv3_3',
7: 'conv4_1', 8: 'conv4_2', 9: 'conv4_3', 10: 'conv5_1', 11: 'conv5_2', 12: 'conv5_3'}
relu_names = {0: 'relu1_1', 1: 'relu1_2', 2: 'relu2_1', 3: 'relu2_2', 4: 'relu3_1', 5: 'relu3_2', 6: 'relu3_3',
7: 'relu4_1', 8: 'relu4_2', 9: 'relu4_3', 10: 'relu5_1', 11: 'relu5_2', 12: 'relu5_3'}
pool_names = {1: 'pool1', 3: 'pool2', 6: 'pool3', 9: 'pool4', 12: 'pool5'}
pooling_layer_id = [1, 3, 6, 9]
# add feature_1 and feature_2 layers
for i in range(13):
self.feature_1.add_module(conv_names[i],
nn.Conv2d(channel_length[i], channel_length[i + 1], kernel_size=3, stride=1,
padding=1))
self.feature_1.add_module(relu_names[i], nn.ReLU(inplace=True))
if i in pooling_layer_id:
self.feature_1.add_module(pool_names[i], nn.MaxPool2d(kernel_size=2, stride=2))
if i == 12:
self.feature_1.add_module(pool_names[i], nn.AvgPool2d(kernel_size=14, stride=1))
# add classifier
self.classifier.add_module('fc', nn.Linear(channel_length[13], channel_length[16]))
init.xavier_uniform(self.classifier.fc.weight, gain=np.sqrt(2.0))
init.constant(self.classifier.fc.bias, 0)
# load pretrain model weights
my_weight = self.state_dict()
my_keys = list(my_weight.keys())
for k, v in model_weight.items():
name = k.split('.')
name = name[1] + '.' + name[2] + '.' + name[3]
if name in my_keys:
my_weight[name] = v
else:
print(name)
self.load_state_dict(my_weight)
def forward(self, x):
x = self.feature_1(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
if __name__ == '__main__':
model = vgg16_GAP('../checkpoint/fine_tune/model.pth')
print(model)
| 10,696 | 43.570833 | 119 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/compress_model/compress_model.py | import torch
from new_model import vgg16_compressed
import argparse
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--layer_id', default=2, type=int, help='the id of compressed layer, starting from 0')
args = parser.parse_args()
print(args)
def main(model_path):
# 1. create compressed model
vgg16_new = vgg16_compressed(layer_id=args.layer_id, model_path=model_path)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, '../checkpoint/model.pth')
print('Finished!')
if __name__ == '__main__':
folder_path = '../checkpoint/layer_' + str(args.layer_id)+'/'
main(folder_path)
| 908 | 31.464286 | 106 | py |
AutoPruner | AutoPruner-master/MobileNetv2/1_pruning/compress_model/evaluate_net.py | import torch
from new_model import vgg16_compressed, vgg16_test
import argparse
import torch.backends.cudnn as cudnn
import os
import sys
import time
sys.path.append('../')
from src_code.lmdbdataset import lmdbDataset
parser = argparse.ArgumentParser(description='PyTorch Digital Mammography Training')
parser.add_argument('--batch_size', default=500, type=int, help='batch size')
parser.add_argument('--data_base', default='/data/zhangcl/ImageNet', type=str, help='the path of dataset')
parser.add_argument('--gpu_id', default='4,5,6,7', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
def main(model_path):
# 1. create compressed model
vgg16_new = vgg16_compressed(layer_id=args.layer_id, model_path=model_path)
# Phase 2 : Model setup
vgg16_new = vgg16_new.cuda()
vgg16_new = torch.nn.DataParallel(vgg16_new.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
new_model_param = vgg16_new.state_dict()
torch.save(new_model_param, model_path+'model.pth')
print('Finished!')
return vgg16_new
def evaluate():
# Phase 1: load model
model = vgg16_test('../checkpoint/model.pth')
# Phase 2 : Model setup
model = model.cuda()
model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Phase 2 : Data Upload
print('\n[Phase 2] : Data Preperation')
dset_loaders = {
'train': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-train.lmdb'), True),
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True),
'val': torch.utils.data.DataLoader(
lmdbDataset(os.path.join(args.data_base, 'ILSVRC-val.lmdb'), False),
batch_size=args.batch_size,
num_workers=8,
pin_memory=True)
}
print('data_loader_success!')
# Phase 3: Validation
print("\n[Phase 3 : Inference on val")
criterion = torch.nn.CrossEntropyLoss().cuda()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for batch_idx, (input, target) in enumerate(dset_loaders['val']): # dset_loaders['val']):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(dset_loaders['val']), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
evaluate()
| 4,451 | 31.977778 | 106 | py |
SERT | SERT-master/hside_simu_test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
from hsi_setup import Engine, train_options, make_dataset
import time
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
"""Test-Dev"""
test_dir = opt.test_dir
mat_dataset = MatDataFromFolder(
test_dir)
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt', needsigma=False),
])
mat_dataset = TransformDataset(mat_dataset, mat_transform)
mat_loader = DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
)
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
engine.epoch = 0
strart_time = time.time()
engine.test(mat_loader, test_dir)
end_time = time.time()
test_time = end_time-strart_time
print('cost-time: ',(test_time/len(mat_dataset)))
| 1,643 | 23.176471 | 72 | py |
SERT | SERT-master/hside_real.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
import datetime
import time
from hsi_setup import Engine, train_options, make_dataset
#os.environ["WANDB_MODE"] ='offline'
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
img_options={}
img_options['patch_size'] = 128
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
train_dir = '/train_real/'
train_dataset = DataLoaderTrain(train_dir,50,img_options=img_options,use2d=engine.get_net().use_2dconv)
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
print('==> Preparing data..')
"""Test-Dev"""
basefolder = '/test_real'
mat_datasets = DataLoaderVal(basefolder, 50, None,use2d=engine.get_net().use_2dconv)
mat_loader = DataLoader(
mat_datasets,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
)
base_lr = opt.lr
epoch_per_save = 20
adjust_learning_rate(engine.optimizer, opt.lr)
print('loading finished')
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 1000:
np.random.seed()
if engine.epoch == 200:
adjust_learning_rate(engine.optimizer, base_lr*0.5)
if engine.epoch == 400:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loader)
engine.validate(mat_loader, 'real')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
wandb.finish()
| 2,369 | 25.333333 | 113 | py |
SERT | SERT-master/hsi_setup.py | from email.mime import base, image
from locale import normalize
from math import fabs
from xml.sax import SAXException
import torch
import torch.optim as optim
import models
import os
import argparse
from os.path import join
from utility import *
from utility.ssim import SSIMLoss,SAMLoss
from thop import profile
from torchstat import stat
import scipy.io as scio
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from torchvision import models as torchmodel
from torch import einsum
import torchvision.utils as vutil
import time
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
class MultipleLoss(nn.Module):
def __init__(self, losses, weight=None):
super(MultipleLoss, self).__init__()
self.losses = nn.ModuleList(losses)
self.weight = weight or [1/len(self.losses)] * len(self.losses)
def forward(self, predict, target):
total_loss = 0
for weight, loss in zip(self.weight, self.losses):
total_loss += loss(predict, target) * weight
return total_loss
def extra_repr(self):
return 'weight={}'.format(self.weight)
class L1Consist(nn.Module):
def __init__(self, losses, weight=None):
super(L1Consist, self).__init__()
self.loss1 = losses[0]
self.loss_cons = losses[1]
self.weight = weight or [1/len(self.losses)] * len(self.losses)
def forward(self, predict, target,inputs):
total_loss = 0
total_loss += self.loss1(predict, target) * self.weight[0]
total_loss += self.loss_cons( predict , target,inputs) * self.weight[1]
return total_loss
def extra_repr(self):
return 'weight={}'.format(self.weight)
def train_options(parser):
def _parse_str_args(args):
str_args = args.split(',')
parsed_args = []
for str_arg in str_args:
arg = int(str_arg)
if arg >= 0:
parsed_args.append(arg)
return parsed_args
parser.add_argument('--prefix', '-p', type=str, default='denoise',
help='prefix')
parser.add_argument('--arch', '-a', metavar='ARCH', required=True,
choices= model_names ,
help='model architecture: ' +
' | '.join(model_names))
parser.add_argument('--batchSize', '-b', type=int,
default=16, help='training batch size. default=16')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate. default=1e-3.')
parser.add_argument('--wd', type=float, default=0,
help='weight decay. default=0')
parser.add_argument('--loss', type=str, default='l2',
help='which loss to choose.', choices=['l1', 'l2', 'smooth_l1', 'ssim', 'l2_ssim','l2_sam','cons','cons_l2'])
parser.add_argument('--testdir', type=str)
parser.add_argument('--sigma', type=int)
parser.add_argument('--init', type=str, default='kn',
help='which init scheme to choose.', choices=['kn', 'ku', 'xn', 'xu', 'edsr'])
parser.add_argument('--no-cuda', action='store_true', help='disable cuda?')
parser.add_argument('--no-log', action='store_true',
help='disable logger?')
parser.add_argument('--threads', type=int, default=1,
help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=2018,
help='random seed to use. default=2018')
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--no-ropt', '-nro', action='store_true',
help='not resume optimizer')
parser.add_argument('--chop', action='store_true',
help='forward chop')
parser.add_argument('--resumePath', '-rp', type=str,
default=None, help='checkpoint to use.')
parser.add_argument('--test-dir', type=str,
default='/data/HSI_Data/icvl_noise/512_noniid', help='The path of test HSIs')
parser.add_argument('--dataroot', '-d', type=str,
default='/data/HSI_Data/ICVL64_31.db', help='data root')
parser.add_argument('--clip', type=float, default=1e6)
parser.add_argument('--gpu-ids', type=str, default='0', help='gpu ids')
####################
parser.add_argument('--update_lr', type=float, default=0.5e-4, help='learning rate of inner loop')
parser.add_argument('--meta_lr', type=float, default=0.5e-4, help='learning rate of outer loop')
parser.add_argument('--n_way', type=int, default=1, help='the number of ways')
parser.add_argument('--k_spt', type=int, default=2, help='the number of support set')
parser.add_argument('--k_qry', type=int, default=5, help='the number of query set')
parser.add_argument('--task_num', type=int, default=16, help='the number of tasks')
parser.add_argument('--update_step', type=int, default=5, help='update step of inner loop in training')
parser.add_argument('--update_step_test', type=int, default=10, help='update step of inner loop in testing')
opt = parser.parse_args()
opt.gpu_ids = _parse_str_args(opt.gpu_ids)
return opt
def make_dataset(opt, train_transform, target_transform, common_transform, batch_size=None, repeat=1):
dataset = LMDBDataset(opt.dataroot, repeat=repeat)
# dataset.length -= 1000
# dataset.length = size or dataset.length
"""Split patches dataset into training, validation parts"""
dataset = TransformDataset(dataset, common_transform)
train_dataset = ImageTransformDataset(dataset, train_transform, target_transform)
train_loader = DataLoader(train_dataset,
batch_size=batch_size or opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
return train_loader
def make_metadataset(opt, train_transform, target_transform, common_transform, batch_size=None, repeat=1):
dataset = LMDBDataset(opt.dataroot, repeat=repeat)
# dataset.length -= 1000
# dataset.length = size or dataset.length
"""Split patches dataset into training, validation parts"""
dataset = TransformDataset(dataset, common_transform)
train_dataset = MetaRandomDataset(dataset, opt.n_way, opt.k_spt, opt.k_qry, train_transform, target_transform)
train_loader = DataLoader(train_dataset,
batch_size=batch_size or opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
return train_loader
class Engine(object):
def __init__(self, opt):
self.prefix = opt.prefix
self.opt = opt
self.net = None
self.optimizer = None
self.criterion = None
self.basedir = None
self.iteration = None
self.epoch = None
self.best_psnr = None
self.best_loss = None
self.writer = None
self.__setup()
def __setup(self):
self.basedir = join('checkpoints', self.opt.arch)
if not os.path.exists(self.basedir):
os.makedirs(self.basedir)
self.best_psnr = 0
self.best_loss = 1e6
self.epoch = 0 # start from epoch 0 or last checkpoint epoch
self.iteration = 0
cuda = not self.opt.no_cuda
self.device = 'cuda' if cuda else 'cpu'
print('Cuda Acess: %d' % cuda)
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(self.opt.seed)
if cuda:
torch.cuda.manual_seed(self.opt.seed)
"""Model"""
print("=> creating model '{}'".format(self.opt.arch))
self.net = models.__dict__[self.opt.arch]()
# initialize parameters
#print(self.net)
init_params(self.net, init_type=self.opt.init) # disable for default initialization
if len(self.opt.gpu_ids) > 1:
self.net = nn.DataParallel(self.net.cuda(), device_ids=self.opt.gpu_ids, output_device=self.opt.gpu_ids[0])
if self.opt.loss == 'l2':
self.criterion = nn.MSELoss()
if self.opt.loss == 'l1':
self.criterion = nn.L1Loss()
if self.opt.loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
if self.opt.loss == 'ssim':
self.criterion = SSIMLoss(data_range=1, channel=31)
if self.opt.loss == 'l2_ssim':
self.criterion = MultipleLoss([nn.MSELoss(), SSIMLoss(data_range=1, channel=31)], weight=[1, 2.5e-3])
if self.opt.loss == 'l2_sam':
self.criterion = MultipleLoss([nn.MSELoss(),SAMLoss()],weight=[1, 1e-3])
if self.opt.loss == 'cons':
self.criterion = L1Consist([nn.L1Loss(),ContrastLoss(ablation=False)],weight=[1, 1])
if self.opt.loss == 'cons_l2':
self.criterion = L1Consist([nn.MSELoss(),ContrastLoss(ablation=False)],weight=[1, 0.01])
print(self.criterion)
if cuda:
self.net.to(self.device)
print('cuda initialized')
self.criterion = self.criterion.to(self.device)
"""Logger Setup"""
log = not self.opt.no_log
if log:
self.writer = get_summary_writer(os.path.join(self.basedir, 'logs'), self.opt.prefix)
"""Optimization Setup"""
self.optimizer = optim.Adam(
self.net.parameters(), lr=self.opt.lr, weight_decay=self.opt.wd, amsgrad=False)
"""Resume previous model"""
if self.opt.resume:
# Load checkpoint.
self.load(self.opt.resumePath, not self.opt.no_ropt)
else:
print('==> Building model..')
# print(self.net)
total = sum([param.nelement() for param in self.net.parameters()])
print("Number of parameter: %.2fM" % (total/1e6))
# # stat(self.net, (31, 64, 64))
# from ptflops import get_model_complexity_info
# if self.get_net().use_2dconv == True:
# macs, params = get_model_complexity_info(self.net, (31, 512, 512),as_strings=True,
# print_per_layer_stat=False, verbose=False)
# else:
# macs, params = get_model_complexity_info(self.net, (1,31, 512, 512),as_strings=True,
# print_per_layer_stat=False, verbose=False)
# print('{:<30} {:<8}'.format('Computational complexity: ', macs))
# print('{:<30} {:<8}'.format('Number of parameters: ', params))
# # print(self.net.flops([64,64]))
# input_res= (31, 64, 64)
# batch = torch.ones(()).new_empty((1, *input_res),
# dtype=next(self.net.parameters()).dtype,
# device=next(self.net.parameters()).device)
# #print(input_res.shape)
# #from fvcore.nn import FlopCountAnalysis
# from flop_count.flop_count import FlopCountAnalysis
# flops = FlopCountAnalysis(self.net, batch)
# print(flops.total())
# from thop import profile
# batch = torch.randn(1,31, 512, 512)
# macs, params = profile(self.net, inputs=(batch.to('cuda'), ))
# print(macs,params)
# from torchstat import stat
# stat(self.net, (3, 256, 256))
# print(self.net.flops([64,64]))
def reset_params(self):
init_params(self.net, init_type=self.opt.init) # disable for default initialization
def forward(self, inputs):
if self.opt.chop:
output = self.forward_chop(inputs)
else:
output = self.net(inputs)
return output
def forward_chop(self, x, base=16):
n, c, b, h, w = x.size()
h_half, w_half = h // 2, w // 2
shave_h = np.ceil(h_half / base) * base - h_half
shave_w = np.ceil(w_half / base) * base - w_half
shave_h = shave_h if shave_h >= 10 else shave_h + base
shave_w = shave_w if shave_w >= 10 else shave_w + base
h_size, w_size = int(h_half + shave_h), int(w_half + shave_w)
inputs = [
x[..., 0:h_size, 0:w_size],
x[..., 0:h_size, (w - w_size):w],
x[..., (h - h_size):h, 0:w_size],
x[..., (h - h_size):h, (w - w_size):w]
]
outputs = [self.net(input_i) for input_i in inputs]
output = torch.zeros_like(x)
output_w = torch.zeros_like(x)
output[..., 0:h_half, 0:w_half] += outputs[0][..., 0:h_half, 0:w_half]
output_w[..., 0:h_half, 0:w_half] += 1
output[..., 0:h_half, w_half:w] += outputs[1][..., 0:h_half, (w_size - w + w_half):w_size]
output_w[..., 0:h_half, w_half:w] += 1
output[..., h_half:h, 0:w_half] += outputs[2][..., (h_size - h + h_half):h_size, 0:w_half]
output_w[..., h_half:h, 0:w_half] += 1
output[..., h_half:h, w_half:w] += outputs[3][..., (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
output_w[..., h_half:h, w_half:w] += 1
output /= output_w
return output
def __step(self, train, inputs, targets,sigma=None):
if train:
self.optimizer.zero_grad()
loss_data = 0
total_norm = None
self.net.eval()
if self.get_net().bandwise:
O = []
for time, (i, t) in enumerate(zip(inputs.split(1, 1), targets.split(1, 1))):
o = self.net(i)
O.append(o)
loss = self.criterion(o, t)
if train:
loss.backward()
loss_data += loss.item()
outputs = torch.cat(O, dim=1)
else:
#noisy_sigma = torch.zeros
outputs = self.net(inputs)
# outputs = torch.clamp(outputs, 0, 1)
# loss = self.criterion(outputs, targets)
# if outputs.ndimension() == 5:
# loss = self.criterion(outputs[:,0,...], torch.clamp(targets[:,0,...], 0, 1))
# else:
# loss = self.criterion(outputs, torch.clamp(targets, 0, 1))
#print(outputs.shape,torch.squeeze(outputs).shape,targets.shape)
#loss = self.criterion(outputs[:,0,...], targets[:,0,...])
# if self.net.use_2dconv == True:
# loss = self.criterion(outputs[:,0,...], targets[:,0,...])
# else:
loss = self.criterion(outputs[...], targets) #memnet
if train:
loss.backward()
loss_data += loss.item()
if train:
total_norm = nn.utils.clip_grad_norm_(self.net.parameters(), self.opt.clip)
self.optimizer.step()
return outputs, loss_data, total_norm
def load(self, resumePath=None, load_opt=True):
print('==> Resuming from checkpoint %s..' % resumePath)
assert os.path.isdir('checkpoints'), 'Error: no checkpoint directory found!'
checkpoint = torch.load(resumePath )
# if load_opt:
# self.optimizer.load_state_dict(checkpoint['optimizer'])
self.get_net().load_state_dict(checkpoint['net'])
def train(self, train_loader,val):
print('\nEpoch: %d' % self.epoch)
self.net.train()
train_loss = 0
train_psnr = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
if not self.opt.no_cuda:
inputs, targets = inputs.to(self.device), targets.to(self.device)
#print(inputs.shape,inputs.type)
outputs, loss_data, total_norm = self.__step(True, inputs, targets)
train_loss += loss_data
avg_loss = train_loss / (batch_idx+1)
psnr = np.mean(cal_bwpsnr(outputs, targets))
train_psnr += psnr
avg_psnr = train_psnr/ (batch_idx+1)
if not self.opt.no_log:
wandb.log({'train_psnr':avg_psnr},step=self.iteration)
wandb.log({'train_loss':loss_data},step=self.iteration)
wandb.log({'train_avg_loss':avg_loss},step=self.iteration)
self.writer.add_scalar(
join(self.prefix, 'train_psnr'), avg_psnr, self.iteration)
self.writer.add_scalar(
join(self.prefix, 'train_loss'), loss_data, self.iteration)
self.writer.add_scalar(
join(self.prefix, 'train_avg_loss'), avg_loss, self.iteration)
self.iteration += 1
progress_bar(batch_idx, len(train_loader), 'AvgLoss: %.4e | Loss: %.4e | Norm: %.4e | Psnr: %4e'
% (avg_loss, loss_data, total_norm,psnr))
self.epoch += 1
if not self.opt.no_log:
self.writer.add_scalar(
join(self.prefix, 'train_loss_epoch'), avg_loss, self.epoch)
def test(self, valid_loader, filen):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
if os.path.exists(filen):
filenames = [
fn
for fn in os.listdir(filen)
if fn.endswith('.mat')
]
print('[i] Eval dataset ...')
print(len(valid_loader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
if not self.opt.no_cuda:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
#outputs = torch.clamp(self.net(inputs), 0, 1)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
h,w=inputs.shape[-2:]
band = inputs.shape[-3]
result = outputs.squeeze().cpu().detach().numpy()
img = targets.squeeze().cpu().detach().numpy()
for k in range(band):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= band*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(band):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(band):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/band)
ERGAS.append(ergas)
# inputs = inputs.squeeze().cpu().detach().numpy()
# result = inputs
# for band in range(31):
# img = result[band]*255#
# cv2.imwrite(os.path.join(save_path, filenames[batch_idx][:-4] +'_band_'+str(band)+'.jpg'),cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR))
# scio.savemat('/data/HSI_Data/Hyperspectral_Project/Urban_cvpr2023/'+self.opt.arch+'urban.mat',{'result':result})
# save_path = '/data/HSI_Data/Hyperspectral_Project/Urban_cvpr2023/imgs/'
# result = np.clip(result,0,1)
# for band in range(100,105):
# img = result[band]*255#
# cv2.imwrite(os.path.join(save_path, self.opt.arch +'_band_'+str(band)+'.jpg'),cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR))
# color_img = np.concatenate([result[0][np.newaxis,:],result[105][np.newaxis,:],result[207][np.newaxis,:]],0)
# color_img = color_img.transpose((1,2,0))*255
# print(color_img.shape)
# cv2.imwrite(os.path.join(save_path, self.opt.arch +'color.jpg'),cv2.cvtColor(color_img.astype(np.uint8),cv2.COLOR_RGB2BGR))
# result = img
# color_img = np.concatenate([result[9][np.newaxis,:],result[15][np.newaxis,:],result[28][np.newaxis,:]],0)
# color_img = color_img.transpose((1,2,0))*255
# print(color_img.shape)
# cv2.imwrite(os.path.join(save_path, filenames[batch_idx][:-4] +'color.png'),cv2.cvtColor(color_img.astype(np.uint8),cv2.COLOR_RGB2BGR))
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def test_patch(self, valid_loader, filen,patch_size=64):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
filenames = [
fn
for fn in os.listdir(filen)
if fn.endswith('.mat')
]
print('[i] Eval dataset ...')
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
_,channel, width, height = inputs.shape
input_patch = torch.zeros((64,31,64,64),dtype=torch.float)
targets_patch = torch.zeros((64,31,64,64),dtype=torch.float)
num = 0
for i in range(width//patch_size):
for j in range(height//patch_size):
sub_image = inputs[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
input_patch[num] = sub_image
targets_patch[num] = targets[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
num += 1
if not self.opt.no_cuda:
inputs, targets = input_patch.to(self.device), targets_patch.to(self.device)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
result_patch = outputs.squeeze().cpu().detach().numpy()
img_patch = targets.squeeze().cpu().numpy()
result = np.zeros((31,512,512))
img = np.zeros((31,512,512))
h,w=result.shape[-2:]
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
result[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = result_patch[num]
img[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = img_patch[num]
num += 1
for k in range(31):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= 31*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(31):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(31):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/31)
ERGAS.append(ergas)
# scio.savemat('/data/HSI_Data/Hyperspectral_Project/Urban_result/Ours/'+filenames[batch_idx], {'result': result})
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def test_3dpatch(self, valid_loader, filen,patch_size=64,band_size=31,all_size=512):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
filenames = [
fn
for fn in os.listdir(filen)
if fn.endswith('.mat')
]
print('[i] Eval dataset ...')
blocks = (all_size//patch_size)*(all_size//patch_size)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
_,_,channel, width, height = inputs.shape
input_patch = torch.zeros((blocks,band_size,patch_size,patch_size),dtype=torch.float)
targets_patch = torch.zeros((blocks,band_size,patch_size,patch_size),dtype=torch.float)
num = 0
for i in range(width//patch_size):
for j in range(height//patch_size):
sub_image = inputs[:,:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
input_patch[num] = sub_image
targets_patch[num] = targets[:,:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
num += 1
if not self.opt.no_cuda:
inputs, targets = input_patch.to(self.device), targets_patch.to(self.device)
inputs=inputs.unsqueeze(1)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
result_patch = outputs.squeeze().cpu().detach().numpy()
img_patch = targets.squeeze().cpu().numpy()
result = np.zeros((band_size,all_size,all_size))
img = np.zeros((band_size,all_size,all_size))
h,w=result.shape[-2:]
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
result[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = result_patch[num]
img[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = img_patch[num]
num += 1
for k in range(band_size):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= band_size*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(band_size):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(band_size):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/band_size)
ERGAS.append(ergas)
# scio.savemat('/data/HSI_Data/Hyperspectral_Project/Urban_result/Ours/'+filenames[batch_idx], {'result': result})
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def validate(self, valid_loader, name,patch_size=64):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
print('[i] Eval dataset {}...'.format(name))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
if ('cswin_unet' in self.opt.arch) or ('unfold' in self.opt.arch)or ('scalable' in self.opt.arch):
_,channel, width, height = inputs.shape
input_patch = torch.zeros((64,31,64,64),dtype=torch.float)
targets_patch = torch.zeros((64,31,64,64),dtype=torch.float)
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
sub_image = inputs[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
input_patch[num] = sub_image
targets_patch[num] = targets[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
num += 1
if not self.opt.no_cuda:
inputs, targets = input_patch.to(self.device), targets_patch.to(self.device)
else:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
h,w=inputs.shape[-2:]
if ('cswin_unet' in self.opt.arch) or ('unfold' in self.opt.arch) or('scalable' in self.opt.arch):
result_patch = outputs.squeeze().cpu().detach().numpy()
img_patch = targets.squeeze().cpu().numpy()
result = np.zeros((31,512,512))
img = np.zeros((31,512,512))
h,w=result.shape[-2:]
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
result[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = result_patch[num]
img[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = img_patch[num]
num += 1
else:
# outputs = torch.clamp(outputs,0,1)
result = outputs.squeeze().cpu().detach().numpy()
img = targets.squeeze().cpu().numpy()
for k in range(31):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= 31*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(31):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(31):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/31)
ERGAS.append(ergas)
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
if not self.opt.no_log:
wandb.log({'val_loss_epoch':avg_loss,'val_psnr_epoch':avg_psnr,'val_sam_epoch':avg_sam,'epoch':self.epoch})
self.writer.add_scalar(
join(self.prefix, name, 'val_loss_epoch'), avg_loss, self.epoch)
self.writer.add_scalar(
join(self.prefix, name, 'val_psnr_epoch'), avg_psnr, self.epoch)
self.writer.add_scalar(
join(self.prefix, name, 'val_sam_epoch'), avg_sam, self.epoch)
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def save_checkpoint(self, model_out_path=None, **kwargs):
if not model_out_path:
model_out_path = join(self.basedir, self.prefix, "model_epoch_%d_%d.pth" % (
self.epoch, self.iteration))
state = {
'net': self.get_net().state_dict(),
'optimizer': self.optimizer.state_dict(),
'epoch': self.epoch,
'iteration': self.iteration,
}
state.update(kwargs)
if not os.path.isdir(join(self.basedir, self.prefix)):
os.makedirs(join(self.basedir, self.prefix))
torch.save(state, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
# saving result into disk
def test_develop(self, test_loader, savedir=None, verbose=True):
from scipy.io import savemat
from os.path import basename, exists
def torch2numpy(hsi):
if self.net.use_2dconv:
R_hsi = hsi.data[0].cpu().numpy().transpose((1,2,0))
else:
R_hsi = hsi.data[0].cpu().numpy()[0,...].transpose((1,2,0))
return R_hsi
self.net.eval()
test_loss = 0
total_psnr = 0
dataset = test_loader.dataset.dataset
res_arr = np.zeros((len(test_loader), 3))
input_arr = np.zeros((len(test_loader), 3))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
if not self.opt.no_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs, loss_data, _ = self.__step(False, inputs, targets)
test_loss += loss_data
avg_loss = test_loss / (batch_idx+1)
res_arr[batch_idx, :] = MSIQA(outputs, targets)
input_arr[batch_idx, :] = MSIQA(inputs, targets)
"""Visualization"""
# Visualize3D(inputs.data[0].cpu().numpy())
# Visualize3D(outputs.data[0].cpu().numpy())
psnr = res_arr[batch_idx, 0]
ssim = res_arr[batch_idx, 1]
if verbose:
print(batch_idx, psnr, ssim)
if savedir:
filedir = join(savedir, basename(dataset.filenames[batch_idx]).split('.')[0])
outpath = join(filedir, '{}.mat'.format(self.opt.arch))
if not exists(filedir):
os.mkdir(filedir)
if not exists(outpath):
savemat(outpath, {'R_hsi': torch2numpy(outputs)})
return res_arr, input_arr
def test_real(self, test_loader, savedir=None):
"""Warning: this code is not compatible with bandwise flag"""
from scipy.io import savemat
from os.path import basename
self.net.eval()
dataset = test_loader.dataset.dataset
with torch.no_grad():
for batch_idx, inputs in enumerate(test_loader):
if not self.opt.no_cuda:
inputs = inputs.cuda()
outputs = self.forward(inputs)
"""Visualization"""
input_np = inputs[0].cpu().numpy()
output_np = outputs[0].cpu().numpy()
display = np.concatenate([input_np, output_np], axis=-1)
Visualize3D(display)
# Visualize3D(outputs[0].cpu().numpy())
# Visualize3D((outputs-inputs).data[0].cpu().numpy())
if savedir:
R_hsi = outputs.data[0].cpu().numpy()[0,...].transpose((1,2,0))
savepath = join(savedir, basename(dataset.filenames[batch_idx]).split('.')[0], self.opt.arch + '.mat')
savemat(savepath, {'R_hsi': R_hsi})
return outputs
def get_net(self):
if len(self.opt.gpu_ids) > 1:
return self.net.module
else:
return self.net
| 41,464 | 41.835744 | 166 | py |
SERT | SERT-master/hside_simu.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
import datetime
import time
from hsi_setup import Engine, train_options, make_dataset
import wandb
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
data = datetime.datetime.now()
wandb.init(project="hsi-denoising", entity="name",name=opt.arch+opt.prefix+'-'+str(data.month)+'-'+str(data.day)+'-'+str(data.hour)+':'+str(data.minute),config=opt)
wandb.config.update(parser)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
train_transform = Compose([
AddNoiseBlindv1(10,70),
HSI2Tensor()
])
icvl_64_31_dir ='/data/HSI_Data/ICVL64_31.db/'
icvl_64_31 = LMDBDataset(icvl_64_31_dir)
target_transform = HSI2Tensor()
train_dataset = ImageTransformDataset(icvl_64_31, train_transform,target_transform)
print('==> Preparing data..')
"""Test-Dev"""
basefolder = '/data/HSI_Data/icvl_val_gaussian/512_10_70'
mat_datasets = [MatDataFromFolder(
basefolder, size=5)]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 100:
np.random.seed()
if engine.epoch == 50:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loaders[0])
engine.validate(mat_loaders[0], 'wdc')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
wandb.finish()
| 3,121 | 29.019231 | 170 | py |
SERT | SERT-master/hside_urban.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import datetime
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
train_transform = Compose([
AddNoiseNoniid_v2(0,55),
HSI2Tensor()
])
db_path = '/data/HSI_Data/Hyperspectral_Project/apex_big.db'
if not os.path.exists(db_path):
db_path = '/home/limiaoyu/data/Urban/apex_big.db'
icvl_64_31 = LMDBDataset(db_path,repeat=10)
target_transform = HSI2Tensor()
train_dataset = ImageTransformDataset(icvl_64_31, train_transform,target_transform)
print('==> Preparing data..')
# icvl_64_31_TL = make_dataset(
# opt, train_transform,
# target_transform, common_transform, 64)
"""Test-Dev"""
basefolder = '/data/HSI_Data/Hyperspectral_Project/'
if not os.path.exists(db_path):
basefolder = '/home/limiaoyu/data/Urban/'
mat_datasets = [MatDataFromFolder(
basefolder, size=1,fns=['Urban_304_minmax.mat']) ]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 100:
np.random.seed()
if engine.epoch == 11:
adjust_learning_rate(engine.optimizer, base_lr*0.5)
if engine.epoch == 45:
adjust_learning_rate(engine.optimizer, base_lr*0.5*0.5)
if engine.epoch == 80:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
# if engine.epoch == 120:
# adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loaders[0])
#engine.test(mat_loaders[0], basefolder)
engine.validate(mat_loaders[0], 'icvl-validate-mixture')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
| 3,580 | 29.87069 | 113 | py |
SERT | SERT-master/hside_simu_complex.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import datetime
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
data = datetime.datetime.now()
wandb.init(project="hsi-denoising-complex", entity="miayili",name=opt.arch+opt.prefix+'-'+str(data.month)+'-'+str(data.day)+'-'+str(data.hour)+':'+str(data.minute),config=opt)
wandb.config.update(parser)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
sigmas = [10, 30, 50, 70]
train_transform = Compose([
AddNoiseNoniid(sigmas),
SequentialSelect(
transforms=[
lambda x: x,
AddNoiseImpulse(),
AddNoiseStripe(),
AddNoiseDeadline()
]
),
HSI2Tensor()
])
#change to 10 for sms_10
icvl_64_31_dir = '/data/HSI_Data/ICVL64_31.db/'
if not os.path.exists(icvl_64_31_dir):
icvl_64_31_dir = '/home/limiaoyu/data/ICVL64_31.db/'
icvl_64_31 = LMDBDataset(icvl_64_31_dir)
target_transform = HSI2Tensor()
train_dataset = ImageTransformDataset(icvl_64_31, train_transform,target_transform)
print('==> Preparing data..')
# icvl_64_31_TL = make_dataset(
# opt, train_transform,
# target_transform, common_transform, 64)
"""Test-Dev"""
folder_mat = '/data/HSI_Data/icvl_noise_50/512_mix'
if not os.path.exists(folder_mat):
folder_mat = '/home/limiaoyu/data/icvl_val_gaussian/50_mix'
mat_datasets = [MatDataFromFolder(folder_mat, size=5)]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=8, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 100:
np.random.seed()
#swin_ir_o 1e-4 60 o_resume from 40
#for 10
if engine.epoch == 50:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
#deep_qrnn3d 10epoch后1e-3
#for 10
# if engine.epoch == 45:
# adjust_learning_rate(engine.optimizer, base_lr*0.1)
# if engine.epoch == 45:
# adjust_learning_rate(engine.optimizer, base_lr*0.1*0.1)
# if engine.epoch == 70:
# adjust_learning_rate(engine.optimizer, base_lr*0.01)
# if engine.epoch == 120:
# adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loaders[0])
engine.validate(mat_loaders[0], 'icvl-validate-noniid')
#engine.validate(mat_loaders[1], 'icvl-validate-mixture')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
| 4,281 | 31.938462 | 181 | py |
SERT | SERT-master/hside_urban_test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
"""Test-Dev"""
basefolder = '/data/HSI_Data/Hyperspectral_Project/'
mat_datasets = [MatDataFromFolder(
basefolder, size=1,fns=['Urban_304.mat']) ]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
engine.test(mat_loaders[0], basefolder)
| 1,535 | 23.774194 | 75 | py |
SERT | SERT-master/hside_real_test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
"""Test-Dev"""
basefolder = opt.testdir
mat_datasets = DataLoaderVal(basefolder, 50, None,use2d=engine.get_net().use_2dconv)
print(len(mat_datasets))
print('loading finished')
mat_loader = DataLoader(
mat_datasets,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda )
strart_time = time.time()
engine.test(mat_loader, basefolder)
end_time = time.time()
test_time = end_time-strart_time
print('cost-time: ',(test_time/15))
| 1,136 | 23.191489 | 88 | py |
SERT | SERT-master/utility/ssim.py | import torch
import torch.nn.functional as F
def _fspecial_gauss_1d(size, sigma):
r"""Create 1-D gauss kernel
Args:
size (int): the size of gauss kernel
sigma (float): sigma of normal distribution
Returns:
torch.Tensor: 1D kernel
"""
coords = torch.arange(size).to(dtype=torch.float)
coords -= size//2
g = torch.exp(-(coords**2) / (2*sigma**2))
g /= g.sum()
return g.unsqueeze(0).unsqueeze(0)
def gaussian_filter(input, win):
r""" Blur input with 1-D kernel
Args:
input (torch.Tensor): a batch of tensors to be blured
window (torch.Tensor): 1-D gauss kernel
Returns:
torch.Tensor: blured tensors
"""
N, C, H, W = input.shape
out = F.conv2d(input, win, stride=1, padding=0, groups=C)
# make it contiguous in y direction for memory efficiency
out = out.transpose(2, 3).contiguous()
out = F.conv2d(out, win, stride=1, padding=0, groups=C)
return out.transpose(2, 3).contiguous()
def _ssim(X, Y, win, data_range=255, size_average=True, full=False):
r""" Calculate ssim index for X and Y
Args:
X (torch.Tensor): images
Y (torch.Tensor): images
win (torch.Tensor): 1-D gauss kernel
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
full (bool, optional): return sc or not
Returns:
torch.Tensor: ssim results
"""
K1 = 0.01
K2 = 0.03
batch, channel, height, width = X.shape
compensation = 1.0
C1 = (K1 * data_range)**2
C2 = (K2 * data_range)**2
#####################################
# the 5 convs (blurs) can be combined
concat_input = torch.cat([X, Y, X*X, Y*Y, X*Y], dim=1)
concat_win = win.repeat(5, 1, 1, 1).to(X.device, dtype=X.dtype)
concat_out = gaussian_filter(concat_input, concat_win)
# unpack from conv output
mu1, mu2, sigma1_sq, sigma2_sq, sigma12 = (
concat_out[:, idx*channel:(idx+1)*channel, :, :] for idx in range(5))
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = compensation * (sigma1_sq - mu1_sq)
sigma2_sq = compensation * (sigma2_sq - mu2_sq)
sigma12 = compensation * (sigma12 - mu1_mu2)
##########################
# implementation from original repo
#_mu1 = F.conv2d( X, win, stride=1, padding=0, groups=channel)
#_mu2 = F.conv2d( Y, win, stride=1, padding=0, groups=channel)
#mu1_sq = mu1.pow(2)
#mu2_sq = mu2.pow(2)
#mu1_mu2 = mu1 * mu2
#sigma1_sq = compensation * ( F.conv2d( X*X, win, stride=1, padding=0, groups=channel) - mu1_sq )
#sigma2_sq = compensation * ( F.conv2d( Y*Y, win, stride=1, padding=0, groups=channel) - mu2_sq )
#sigma12 = compensation * ( F.conv2d( X*Y, win, stride=1, padding=0, groups=channel) - mu1_mu2 )
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
if size_average:
ssim_val = ssim_map.mean()
cs = cs_map.mean()
else:
ssim_val = ssim_map.mean(-1).mean(-1).mean(-1) # reduce along CHW
cs = cs_map.mean(-1).mean(-1).mean(-1)
if full:
return ssim_val, cs
else:
return ssim_val
def ssim(X, Y, win_size=11, win_sigma=1.5, win=None, data_range=255, size_average=True, full=False):
r""" interface of ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
full (bool, optional): return sc or not
Returns:
torch.Tensor: ssim results
"""
if len(X.shape) != 4:
raise ValueError('Input images must 4-d tensor.')
if not X.type() == Y.type():
raise ValueError('Input images must have the same dtype.')
if not X.shape == Y.shape:
raise ValueError('Input images must have the same dimensions.')
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
win_sigma = win_sigma
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat(X.shape[1], 1, 1, 1)
else:
win_size = win.shape[-1]
ssim_val, cs = _ssim(X, Y,
win=win,
data_range=data_range,
size_average=False,
full=True)
if size_average:
ssim_val = ssim_val.mean()
cs = cs.mean()
if full:
return ssim_val, cs
else:
return ssim_val
def ms_ssim(X, Y, win_size=11, win_sigma=1.5, win=None, data_range=255, size_average=True, full=False, weights=None):
r""" interface of ms-ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
full (bool, optional): return sc or not
weights (list, optional): weights for different levels
Returns:
torch.Tensor: ms-ssim results
"""
if len(X.shape) != 4:
raise ValueError('Input images must 4-d tensor.')
if not X.type() == Y.type():
raise ValueError('Input images must have the same dtype.')
if not X.shape == Y.shape:
raise ValueError('Input images must have the same dimensions.')
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
if weights is None:
weights = torch.FloatTensor(
[0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(X.device, dtype=X.dtype)
win_sigma = win_sigma
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat(X.shape[1], 1, 1, 1)
else:
win_size = win.shape[-1]
levels = weights.shape[0]
mcs = []
for _ in range(levels):
ssim_val, cs = _ssim(X, Y,
win=win,
data_range=data_range,
size_average=False,
full=True)
mcs.append(cs)
padding = (X.shape[2] % 2, X.shape[3] % 2)
X = F.avg_pool2d(X, kernel_size=2, padding=padding)
Y = F.avg_pool2d(Y, kernel_size=2, padding=padding)
mcs = torch.stack(mcs, dim=0) # mcs, (level, batch)
# weights, (level)
msssim_val = torch.prod((mcs[:-1] ** weights[:-1].unsqueeze(1))
* (ssim_val ** weights[-1]), dim=0) # (batch, )
if size_average:
msssim_val = msssim_val.mean()
return msssim_val
# Classes to re-use window
class SSIMLoss(torch.nn.Module):
def __init__(self, win_size=11, win_sigma=1.5, data_range=None, size_average=True, channel=3):
r""" class for ssim
Args:
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
channel (int, optional): input channels (default: 3)
"""
super(SSIMLoss, self).__init__()
self.win = _fspecial_gauss_1d(
win_size, win_sigma).repeat(channel, 1, 1, 1)
self.size_average = size_average
self.data_range = data_range
def forward(self, X, Y):
if X.ndimension() == 5:
X = X[:,0,...]
Y = Y[:,0,...]
return 1-ssim(X, Y, win=self.win, data_range=self.data_range, size_average=self.size_average)
class SAMLoss(torch.nn.Module):
def __init__(self, size_average = False):
super(SAMLoss, self).__init__()
def forward(self, img_base, img_out):
if img_base.ndimension() == 5:
img_base = img_base[:,0,...]
if img_out.ndimension() == 5:
img_out = img_out[:,0,...]
sum1 = torch.sum(img_base * img_out, 1)
sum2 = torch.sum(img_base * img_base, 1)
sum3 = torch.sum(img_out * img_out, 1)
t = (sum2 * sum3) ** 0.5
numlocal = torch.gt(t, 0)
num = torch.sum(numlocal)
t = sum1 / t
angle = torch.acos(t)
sumangle = torch.where(torch.isnan(angle), torch.full_like(angle, 0), angle).sum()
if num == 0:
averangle = sumangle
else:
averangle = sumangle / num
SAM = averangle * 180 / 3.14159256
return SAM
class MS_SSIM(torch.nn.Module):
def __init__(self, win_size=11, win_sigma=1.5, data_range=None, size_average=True, channel=3, weights=None):
r""" class for ms-ssim
Args:
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
channel (int, optional): input channels (default: 3)
weights (list, optional): weights for different levels
"""
super(MS_SSIM, self).__init__()
self.win = _fspecial_gauss_1d(
win_size, win_sigma).repeat(channel, 1, 1, 1)
self.size_average = size_average
self.data_range = data_range
self.weights = weights
def forward(self, X, Y):
return ms_ssim(X, Y, win=self.win, size_average=self.size_average, data_range=self.data_range, weights=self.weights)
| 10,533 | 34.829932 | 129 | py |
SERT | SERT-master/utility/lmdb_dataset.py | import torch.utils.data as data
import numpy as np
from PIL import Image
import os
import os.path
class LMDBDataset(data.Dataset):
def __init__(self, db_path, repeat=1):
import lmdb
self.db_path = db_path
self.env = lmdb.open(db_path, max_readers=1, readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries']
self.length = int(self.length)
print(self.length)
self.repeat = repeat
with open(os.path.join(db_path, 'meta_info.txt')) as fin:
line = fin.readlines()[0]
size = line.split('(')[1].split(')')[0]
h,w,c =[ int(s) for s in size.split(',')]
self.channels = c
self.width = h
self.height = w
def __getitem__(self, index):
index = index % (self.length)
env = self.env
with env.begin(write=False) as txn:
data = txn.get('{:08}'.format(index).encode('ascii'))
flat_x = np.fromstring(data, dtype=np.float32)
x = flat_x.reshape(self.channels, self.height, self.width)
return x
def __len__(self):
return self.length * self.repeat
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
if __name__ == '__main__':
dataset = LMDBDataset('/media/lmy/LMY/aaai/ICVL64_31.db')
print(len(dataset))
train_loader = data.DataLoader(dataset, batch_size=20, num_workers=4)
print(iter(train_loader).next().shape) | 1,605 | 30.490196 | 79 | py |
SERT | SERT-master/utility/load_tif.py | import numpy as np
import os
from torch.utils.data import Dataset
import torch
import torch.nn.functional as F
import random
import scipy.stats as stats
from torch.utils.data import DataLoader
from skimage import io
import cv2
####################i##############################################################################
class Augment_RGB_torch:
def __init__(self):
pass
def transform0(self, torch_tensor):
return torch_tensor
def transform1(self, torch_tensor):
torch_tensor = torch.rot90(torch_tensor, k=1, dims=[-1,-2])
return torch_tensor
def transform2(self, torch_tensor):
torch_tensor = torch.rot90(torch_tensor, k=2, dims=[-1,-2])
return torch_tensor
def transform3(self, torch_tensor):
torch_tensor = torch.rot90(torch_tensor, k=3, dims=[-1,-2])
return torch_tensor
def transform4(self, torch_tensor):
torch_tensor = torch_tensor.flip(-2)
return torch_tensor
def transform5(self, torch_tensor):
torch_tensor = (torch.rot90(torch_tensor, k=1, dims=[-1,-2])).flip(-2)
return torch_tensor
def transform6(self, torch_tensor):
torch_tensor = (torch.rot90(torch_tensor, k=2, dims=[-1,-2])).flip(-2)
return torch_tensor
def transform7(self, torch_tensor):
torch_tensor = (torch.rot90(torch_tensor, k=3, dims=[-1,-2])).flip(-2)
return torch_tensor
class MixUp_AUG:
def __init__(self):
self.dist = torch.distributions.beta.Beta(torch.tensor([1.2]), torch.tensor([1.2]))
def aug(self, rgb_gt, rgb_noisy):
bs = rgb_gt.size(0)
indices = torch.randperm(bs)
rgb_gt2 = rgb_gt[indices]
rgb_noisy2 = rgb_noisy[indices]
lam = self.dist.rsample((bs,1)).view(-1,1,1,1).cuda()
rgb_gt = lam * rgb_gt + (1-lam) * rgb_gt2
rgb_noisy = lam * rgb_noisy + (1-lam) * rgb_noisy2
return rgb_gt, rgb_noisy
augment = Augment_RGB_torch()
transforms_aug = [method for method in dir(augment) if callable(getattr(augment, method)) if not method.startswith('_')]
def load_tif_img(filepath):
img = io.imread(filepath)
img = img.astype(np.float32)
#if type == 'gt':
img = img/4096.
return img
def is_tif_file(filename):
return any(filename.endswith(extension) for extension in [".tif"])
class DataLoaderTrain(Dataset):
def __init__(self, data_dir, ratio=50, img_options=None, target_transform=None,use2d=True,repeat=20):
super(DataLoaderTrain, self).__init__()
self.target_transform = target_transform
clean_files = sorted(os.listdir(os.path.join(data_dir, 'gt')))
noisy_files = sorted(os.listdir(os.path.join(data_dir, 'input{}'.format(ratio))))
self.clean_filenames = [os.path.join(data_dir, 'gt', x) for x in clean_files if is_tif_file(x)]
self.noisy_filenames = [os.path.join(data_dir, 'input{}'.format(ratio), x) for x in noisy_files if is_tif_file(x)]
self.clean = [torch.from_numpy(np.float32(load_tif_img(self.clean_filenames[index]))) for index in range(len(self.clean_filenames))]
self.noisy = [torch.from_numpy(np.float32(load_tif_img(self.noisy_filenames[index]))) for index in range(len(self.noisy_filenames))]
self.img_options=img_options
self.tar_size = len(self.clean_filenames) # get the size of target
self.ratio = ratio
self.use2d=use2d
self.repeat =repeat
def __len__(self):
return self.tar_size*self.repeat
def __getitem__(self, index):
tar_index = index % self.tar_size
clean = self.clean[tar_index]
noisy = self.noisy[tar_index]
clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]
clean = torch.clamp(clean, 0, 1)
noisy = torch.clamp(noisy, 0, 1)
#Crop Input and Target
ps = self.img_options['patch_size']
H = clean.shape[1]
W = clean.shape[2]
r = np.random.randint(0, H - ps)
c = np.random.randint(0, W - ps)
clean = clean[:, r:r + ps, c:c + ps]
noisy = noisy[:, r:r + ps, c:c + ps] * self.ratio
apply_trans = transforms_aug[random.getrandbits(3)]
clean = getattr(augment, apply_trans)(clean)
noisy = getattr(augment, apply_trans)(noisy)
if not self.use2d:
clean = clean[None,...]
noisy = noisy[None,...]
return noisy,clean#, clean_filename, noisy_filename
##################################################################################################
class DataLoaderVal(Dataset):
def __init__(self, data_dir, ratio=50, target_transform=None,use2d=True):
super(DataLoaderVal, self).__init__()
self.target_transform = target_transform
clean_files = sorted(os.listdir(os.path.join(data_dir, 'gt')))
noisy_files = sorted(os.listdir(os.path.join(data_dir, 'input{}'.format(ratio))))
self.clean_filenames = [os.path.join(data_dir, 'gt', x) for x in clean_files if is_tif_file(x)]
self.noisy_filenames = [os.path.join(data_dir, 'input{}'.format(ratio), x) for x in noisy_files if is_tif_file(x)]
self.clean = [torch.from_numpy(np.float32(load_tif_img(self.clean_filenames[index]))) for index in range(len(self.clean_filenames))]
self.noisy = [torch.from_numpy(np.float32(load_tif_img(self.noisy_filenames[index]))) for index in range(len(self.noisy_filenames))]
self.tar_size = len(self.clean_filenames)
self.ratio = ratio
self.use2d = use2d
def __len__(self):
return self.tar_size
def __getitem__(self, index):
tar_index = index % self.tar_size
clean = self.clean[tar_index]
noisy = self.noisy[tar_index]
clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]
ps = 512
r = clean.shape[1]//2-ps//2
c = clean.shape[2]//2-ps//2
clean = clean[:, r:r + ps, c:c + ps]
noisy = noisy[:, r:r + ps, c:c + ps] * self.ratio
if not self.use2d:
clean = clean[None,...]
noisy = noisy[None,...]
clean = torch.clamp(clean, 0, 1)
noisy = torch.clamp(noisy, 0, 1)
return noisy,clean#, clean_filename, noisy_filename
if __name__ == '__main__':
rgb_dir = '/media/lmy/LMY/aaai/real_dataset'
ratio = 50
train_dir = '/media/lmy/LMY/aaai/train_real/'
img_options ={}
img_options['patch_size'] = 128
#train_dataset = DataLoaderTrain(train_dir,50,img_options=img_options)
# train_loader = DataLoader(train_dataset,
# batch_size=1, shuffle=True,
# num_workers=1)
test_dir= '/media/lmy/LMY/aaai/test_real/'
dataset = DataLoaderVal(test_dir, ratio, None)
# print(len(dataset))
train_loader = DataLoader(dataset, batch_size=1, num_workers=1)
#print(iter(train_loader).next())
for batch_idx, (inputs, targets) in enumerate(train_loader):
print(batch_idx,inputs.shape)
band =20
inputs = inputs.numpy()
targets = targets.numpy()
cv2.imwrite('tnoisy_'+'_band'+str(band)+'.png',inputs[0,band]*255)
cv2.imwrite('tgt_'+'_band'+str(band)+'.png',targets[0,band]*255)
break
| 7,553 | 36.959799 | 140 | py |
SERT | SERT-master/utility/validation.py | import torch
import torchvision
import random
import cv2
import shutil
try:
from .util import *
except:
from util import *
from torchvision.transforms import Compose, ToPILImage, ToTensor, RandomHorizontalFlip, RandomChoice
from torch.utils.data import DataLoader, Dataset
from torchnet.dataset import TransformDataset, SplitDataset, TensorDataset, ResampleDataset
from PIL import Image
from skimage.util import random_noise
from scipy.ndimage.filters import gaussian_filter
def show_validation_cadi():
all_datadir = '/data/HSI_Data/icvl201'
train_dir = '/data/HSI_Data/icvl_train_gaussian/'
test_dir = '/data/HSI_Data/icvl_validation_5'
all_fns = os.listdir(all_datadir)
test_fns = os.listdir(test_dir)
train_fns = os.listdir(train_dir)
rest_fns = []
for fn in all_fns:
if fn not in test_fns:
if fn not in train_fns:
rest_fns.append(fn)
print(rest_fns)
if __name__ == '__main__':
show_validation_cadi() | 994 | 26.638889 | 100 | py |
SERT | SERT-master/utility/helper.py | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import datetime
from tensorboardX import SummaryWriter
import socket
import wandb
def adjust_learning_rate(optimizer, lr):
print('Adjust Learning Rate => %.4e' %lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def display_learning_rate(optimizer):
lrs = []
for i, param_group in enumerate(optimizer.param_groups):
lr = param_group['lr']
print('learning rate of group %d: %.4e' % (i, lr))
lrs.append(lr)
return lrs
def adjust_opt_params(optimizer, param_dict):
print('Adjust Optimizer Parameters => %s' %param_dict)
for param_group in optimizer.param_groups:
for k, v in param_dict.items():
param_group[k] = v
def display_opt_params(optimizer, keys):
for i, param_group in enumerate(optimizer.param_groups):
for k in keys:
v = param_group[k]
print('%s of group %d: %.4e' % (k,i,v))
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.weight.requires_grad = False
m.bias.requires_grad = False
m.eval()
def get_summary_writer(log_dir, prefix=None):
# log_dir = './checkpoints/%s/logs'%(arch)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
if prefix is None:
log_dir = os.path.join(log_dir, datetime.datetime.now().strftime('%b%d_%H-%M-%S')+'_'+socket.gethostname())
else:
log_dir = os.path.join(log_dir, prefix+'_'+datetime.datetime.now().strftime('%b%d_%H-%M-%S')+'_'+socket.gethostname())
if not os.path.exists(log_dir):
os.mkdir(log_dir)
writer = SummaryWriter(log_dir)
return writer
def init_params(net, init_type='kn'):
print('use init scheme: %s' %init_type)
if init_type != 'edsr':
for m in net.modules():
if isinstance(m, (nn.Conv2d, nn.Conv3d)):
if init_type == 'kn':
init.kaiming_normal_(m.weight, mode='fan_out')
if init_type == 'ku':
init.kaiming_uniform_(m.weight, mode='fan_out')
if init_type == 'xn':
init.xavier_normal_(m.weight)
if init_type == 'xu':
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=1e-3)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 4,909 | 28.053254 | 126 | py |
SERT | SERT-master/utility/dataset.py | # There are functions for creating a train and validation iterator.
from os import mkdir
import torch
import torchvision
import random
import cv2
try:
from .util import *
except:
from util import *
from torchvision.transforms import Compose, ToPILImage, ToTensor, RandomHorizontalFlip, RandomChoice
from torch.utils.data import DataLoader, Dataset
from torchnet.dataset import TransformDataset, SplitDataset, TensorDataset, ResampleDataset
from PIL import Image
from skimage.util import random_noise
from scipy.ndimage.filters import gaussian_filter
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
# Define Transforms
class RandomGeometricTransform(object):
def __call__(self, img):
"""
Args:
img (np.mdarray): Image to be geometric transformed.
Returns:
np.ndarray: Randomly geometric transformed image.
"""
if random.random() < 0.25:
return data_augmentation(img)
return img
class RandomCrop(object):
"""For HSI (c x h x w)"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img):
img = rand_crop(img, self.crop_size, self.crop_size)
return img
class SequentialSelect(object):
def __pos(self, n):
i = 0
while True:
# print(i)
yield i
i = (i + 1) % n
def __init__(self, transforms):
self.transforms = transforms
self.pos = LockedIterator(self.__pos(len(transforms)))
def __call__(self, img):
out = self.transforms[next(self.pos)](img)
return out
class AddNoise(object):
"""add gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, sigma):
self.sigma_ratio = sigma / 255.
def __call__(self, img):
noise = np.random.randn(*img.shape) * self.sigma_ratio
# print(img.sum(), noise.sum())
return img + noise
class AddNoiseBlind(object):
"""add blind gaussian noise to the given numpy array (B,H,W)"""
def __pos(self, n):
i = 0
while True:
yield i
i = (i + 1) % n
def __init__(self, sigmas):
self.sigmas = np.array(sigmas) / 255.
self.pos = LockedIterator(self.__pos(len(sigmas)))
def __call__(self, img):
sigma = self.sigmas[next(self.pos)]
noise = np.random.randn(*img.shape) * sigma
return img + noise, sigma
class AddNoiseBlindv1(object):
"""add blind gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, min_sigma, max_sigma):
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __call__(self, img):
sigma = np.random.uniform(self.min_sigma, self.max_sigma) / 255
noise = np.random.randn(*img.shape) * sigma
#print(img.shape)
out = img + noise
return out #, sigma
class AddNoiseBlindv2(object):
"""add blind gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, min_sigma, max_sigma):
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __call__(self, img):
sigma = np.random.uniform(self.min_sigma, self.max_sigma) / 255
noise = np.random.randn(*img.shape) * sigma
#print(img.shape)
out = img + noise
return out , sigma
class AddNoiseNoniid_v2(object):
"""add non-iid gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, min_sigma, max_sigma):
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __call__(self, img):
bwsigmas = np.reshape((np.random.rand( img.shape[0])*(self.max_sigma-self.min_sigma)+self.min_sigma), (-1,1,1))
noise = np.random.randn(*img.shape) * bwsigmas/255
return img + noise
class AddNoiseNoniid(object):
"""add non-iid gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, sigmas):
self.sigmas = np.array(sigmas) / 255.
def __call__(self, img):
bwsigmas = np.reshape(self.sigmas[np.random.randint(0, len(self.sigmas), img.shape[0])], (-1,1,1))
noise = np.random.randn(*img.shape) * bwsigmas
return img + noise
class AddNoiseMixed(object):
"""add mixed noise to the given numpy array (B,H,W)
Args:
noise_bank: list of noise maker (e.g. AddNoiseImpulse)
num_bands: list of number of band which is corrupted by each item in noise_bank"""
def __init__(self, noise_bank, num_bands):
assert len(noise_bank) == len(num_bands)
self.noise_bank = noise_bank
self.num_bands = num_bands
def __call__(self, img):
B, H, W = img.shape
all_bands = np.random.permutation(range(B))
pos = 0
for noise_maker, num_band in zip(self.noise_bank, self.num_bands):
if 0 < num_band <= 1:
num_band = int(np.floor(num_band * B))
bands = all_bands[pos:pos+num_band]
pos += num_band
img = noise_maker(img, bands)
return img
class _AddNoiseImpulse(object):
"""add impulse noise to the given numpy array (B,H,W)"""
def __init__(self, amounts, s_vs_p=0.5):
self.amounts = np.array(amounts)
self.s_vs_p = s_vs_p
def __call__(self, img, bands):
# bands = np.random.permutation(range(img.shape[0]))[:self.num_band]
bwamounts = self.amounts[np.random.randint(0, len(self.amounts), len(bands))]
for i, amount in zip(bands,bwamounts):
self.add_noise(img[i,...], amount=amount, salt_vs_pepper=self.s_vs_p)
return img
def add_noise(self, image, amount, salt_vs_pepper):
# out = image.copy()
out = image
p = amount
q = salt_vs_pepper
flipped = np.random.choice([True, False], size=image.shape,
p=[p, 1 - p])
salted = np.random.choice([True, False], size=image.shape,
p=[q, 1 - q])
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = 0
return out
class _AddNoiseStripe(object):
"""add stripe noise to the given numpy array (B,H,W)"""
def __init__(self, min_amount, max_amount):
assert max_amount > min_amount
self.min_amount = min_amount
self.max_amount = max_amount
def __call__(self, img, bands):
B, H, W = img.shape
# bands = np.random.permutation(range(img.shape[0]))[:len(bands)]
num_stripe = np.random.randint(np.floor(self.min_amount*W), np.floor(self.max_amount*W), len(bands))
for i, n in zip(bands, num_stripe):
loc = np.random.permutation(range(W))
loc = loc[:n]
stripe = np.random.uniform(0,1, size=(len(loc),))*0.5-0.25
img[i, :, loc] -= np.reshape(stripe, (-1, 1))
return img
class _AddNoiseDeadline(object):
"""add deadline noise to the given numpy array (B,H,W)"""
def __init__(self, min_amount, max_amount):
assert max_amount > min_amount
self.min_amount = min_amount
self.max_amount = max_amount
def __call__(self, img, bands):
B, H, W = img.shape
# bands = np.random.permutation(range(img.shape[0]))[:len(bands)]
num_deadline = np.random.randint(np.ceil(self.min_amount*W), np.ceil(self.max_amount*W), len(bands))
for i, n in zip(bands, num_deadline):
loc = np.random.permutation(range(W))
loc = loc[:n]
img[i, :, loc] = 0
return img
class AddNoiseImpulse(AddNoiseMixed):
def __init__(self):
self.noise_bank = [_AddNoiseImpulse([0.1, 0.3, 0.5, 0.7])]
self.num_bands = [1/3]
class AddNoiseStripe(AddNoiseMixed):
def __init__(self):
self.noise_bank = [_AddNoiseStripe(0.05, 0.15)]
self.num_bands = [1/3]
class AddNoiseDeadline(AddNoiseMixed):
def __init__(self):
self.noise_bank = [_AddNoiseDeadline(0.05, 0.15)]
self.num_bands = [1/3]
class AddNoiseComplex(AddNoiseMixed):
def __init__(self):
self.noise_bank = [
_AddNoiseStripe(0.05, 0.15),
_AddNoiseDeadline(0.05, 0.15),
_AddNoiseImpulse([0.1, 0.3, 0.5, 0.7])
]
self.num_bands = [1/3, 1/3, 1/3]
class HSI2Tensor(object):
"""
Transform a numpy array with shape (C, H, W)
into torch 4D Tensor (1, C, H, W) or (C, H, W)
"""
def __init__(self, use_2dconv):
self.use_2dconv = use_2dconv
def __call__(self, hsi):
if self.use_2dconv:
img = torch.from_numpy(hsi)
else:
img = torch.from_numpy(hsi[None])
# for ch in range(hsi.shape[0]):
# hsi[ch, ...] = minmax_normalize(hsi[ch, ...])
# img = torch.from_numpy(hsi)
return img.float()
class LoadMatHSI(object):
def __init__(self, input_key, gt_key, needsigma=False, transform=None,crop=False):
self.gt_key = gt_key
self.input_key = input_key
self.transform = transform
self.needsigma = needsigma
self.crop=False
def __call__(self, mat):
if self.transform:
input = self.transform(mat[self.input_key][:].transpose((2,0,1)))
gt = self.transform(mat[self.gt_key][:].transpose((2,0,1)))
else:
input = mat[self.input_key][:].transpose((2,0,1))
gt = mat[self.gt_key][:].transpose((2,0,1))
if self.needsigma:
sigma = mat['sigma']
sigma = torch.from_numpy(sigma).float()
# input = torch.from_numpy(input[None]).float()
input = torch.from_numpy(input).float()
# gt = torch.from_numpy(gt[None]).float() # for 3D net
gt = torch.from_numpy(gt).float()
self.crop=False
size = 64
startx = 120
starty = 110
if self.crop:
gt = gt[:,startx:startx+size,starty:starty+size]
input = input[:,startx:startx+size,starty:starty+size]
if self.needsigma:
return input, gt, sigma
return input, gt
class LoadMatKey(object):
def __init__(self, key):
self.key = key
def __call__(self, mat):
item = mat[self.key][:].transpose((2,0,1))
return item.astype(np.float32)
# Define Datasets
class DatasetFromFolder(Dataset):
"""Wrap data from image folder"""
def __init__(self, data_dir, suffix='png'):
super(DatasetFromFolder, self).__init__()
self.filenames = [
os.path.join(data_dir, fn)
for fn in os.listdir(data_dir)
if fn.endswith(suffix)
]
def __getitem__(self, index):
img = Image.open(self.filenames[index]).convert('L')
return img
def __len__(self):
return len(self.filenames)
class MatDataFromFolder(Dataset):
"""Wrap mat data from folder"""
def __init__(self, data_dir, load=loadmat, suffix='.mat', fns=None, size=None):
super(MatDataFromFolder, self).__init__()
if fns is not None:
self.filenames = [
os.path.join(data_dir, fn) for fn in fns
]
else:
self.filenames = [
os.path.join(data_dir, fn)
for fn in os.listdir(data_dir)
if fn.endswith(suffix)
]
# for i in range(10):
# print(self.filenames[i])
self.load = load
if size and size <= len(self.filenames):
self.filenames = self.filenames[:size]
# self.filenames = self.filenames[5:]
def __getitem__(self, index):
# print(self.filenames[index])
mat = self.load(self.filenames[index])
# print(self.filenames[index])
return mat
def __len__(self):
return len(self.filenames)
class DataLoaderVal_TIF(Dataset):
def __init__(self, data_dir, ratio=50, target_transform=None):
super(DataLoaderVal_TIF, self).__init__()
self.target_transform = target_transform
clean_files = sorted(os.listdir(os.path.join(data_dir, 'gt')))
noisy_files = sorted(os.listdir(os.path.join(data_dir, 'input{}'.format(ratio))))
self.clean_filenames = [os.path.join(data_dir, 'gt', x) for x in clean_files if is_tif_file(x)]
self.noisy_filenames = [os.path.join(data_dir, 'input{}'.format(ratio), x) for x in noisy_files if is_tif_file(x)]
self.clean = [torch.from_numpy(np.float32(load_tif_img(self.clean_filenames[index]))) for index in range(len(self.clean_filenames))]
self.noisy = [torch.from_numpy(np.float32(load_tif_img(self.noisy_filenames[index]))) for index in range(len(self.noisy_filenames))]
self.tar_size = len(self.clean_filenames)
self.ratio = ratio
def __len__(self):
return self.tar_size
def __getitem__(self, index):
tar_index = index % self.tar_size
clean = self.clean[tar_index]
noisy = self.noisy[tar_index]
print(clean.max(),noisy.max(),clean.shape)
clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]
ps = 512
r = clean.shape[1]//2-ps//2
c = clean.shape[2]//2-ps//2
clean = clean[:, r:r + ps, c:c + ps]
noisy = noisy[:, r:r + ps, c:c + ps] * self.ratio
return clean, noisy, clean_filename, noisy_filename
def get_train_valid_loader(dataset,
batch_size,
train_transform=None,
valid_transform=None,
valid_size=None,
shuffle=True,
verbose=False,
num_workers=1,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over any pytorch dataset. A sample
of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- dataset: full dataset which contains training and validation data
- batch_size: how many samples per batch to load. (train, val)
- train_transform/valid_transform: callable function
applied to each sample of dataset. default: transforms.ToTensor().
- valid_size: should be a integer in the range [1, len(dataset)].
- shuffle: whether to shuffle the train/validation indices.
- verbose: display the verbose information of dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be an integer in the range [1, %d]." %(len(dataset))
if not valid_size:
valid_size = int(0.1 * len(dataset))
if not isinstance(valid_size, int) or valid_size < 1 or valid_size > len(dataset):
raise TypeError(error_msg)
# define transform
default_transform = lambda item: item # identity maping
train_transform = train_transform or default_transform
valid_transform = valid_transform or default_transform
# generate train/val datasets
partitions = {'Train': len(dataset)-valid_size, 'Valid':valid_size}
train_dataset = TransformDataset(
SplitDataset(dataset, partitions, initial_partition='Train'),
train_transform
)
valid_dataset = TransformDataset(
SplitDataset(dataset, partitions, initial_partition='Valid'),
valid_transform
)
train_loader = DataLoader(train_dataset,
batch_size=batch_size[0], shuffle=True,
num_workers=num_workers, pin_memory=pin_memory)
valid_loader = DataLoader(valid_dataset,
batch_size=batch_size[1], shuffle=False,
num_workers=num_workers, pin_memory=pin_memory)
return (train_loader, valid_loader)
def get_train_valid_dataset(dataset, valid_size=None):
error_msg = "[!] valid_size should be an integer in the range [1, %d]." %(len(dataset))
if not valid_size:
valid_size = int(0.1 * len(dataset))
if not isinstance(valid_size, int) or valid_size < 1 or valid_size > len(dataset):
raise TypeError(error_msg)
# generate train/val datasets
partitions = {'Train': len(dataset)-valid_size, 'Valid':valid_size}
train_dataset = SplitDataset(dataset, partitions, initial_partition='Train')
valid_dataset = SplitDataset(dataset, partitions, initial_partition='Valid')
return (train_dataset, valid_dataset)
class ImageTransformDataset(Dataset):
def __init__(self, dataset, transform, target_transform=None):
super(ImageTransformDataset, self).__init__()
self.dataset = dataset
self.transform = transform
self.target_transform = target_transform
self.length = len(self.dataset)
def __len__(self):
return self.length
def __getitem__(self, idx):
img = self.dataset[idx]
target = img.copy()
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
#sigma = torch.FloatTensor([50/255.0]).unsqueeze(1)
return img, target#,sigma
class MetaRandomDataset(Dataset):
def __init__(self, data, n_way, k_shot, k_query, transform, target_transform=None, min_sigma=10, max_sigma=70):
self.data = data
self.n_way = n_way # n-way
self.k_shot = k_shot # k-shot
self.k_query = k_query # for evaluation
self.setsz = self.n_way * self.k_shot # num of samples per set
self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
self.transform = transform
self.target_transform = target_transform
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __getitem__(self, index):
support_x = []
support_y = []
query_x = []
query_y = []
# sigma = 0.1*np.random.rand()
sigma = np.random.uniform(self.min_sigma, self.max_sigma)
noisemaker = AddNoise(sigma)
img = self.data[index]
target = img.copy()
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
img = img.unsqueeze(dim=0)
GT = target.unsqueeze(dim=0)
for j in range(self.k_shot):
noisy_img = noisemaker(img)
support_x.append(noisy_img)
support_y.append(GT)
for j in range(self.k_query):
noisy_img = noisemaker(img)
query_x.append(noisy_img)
query_y.append(GT)
support_x = torch.cat(support_x, dim=0).float()
support_y = torch.cat(support_y, dim=0).float()
query_x = torch.cat(query_x, dim=0).float()
query_y = torch.cat(query_y, dim=0).float()
return [support_x, support_y, query_x, query_y, sigma/255]
def __len__(self):
return len(self.data)
def addNoise2ICVL():
#srcdir = '/data/HSI_Data/icvl_val_gaussian/gt/'
srcdir = '/media/lmy/LMY/cvpr2023/test_96_icvl/'
# noise_sig = [10,30,50,70]
# noisemodel = AddNoiseNoniid(noise_sig)
# dstdir ='/media/lmy/LMY/cvpr2023/test_noise_96_icvl/'+'512_mix'
# mkdir(dstdir)
c=0
#noisemodel = AddNoiseBlindv2(10,70)
# for filename in os.listdir(srcdir):
# c = c + 1
# print(c)
# filepath = os.path.join(srcdir, filename)
# mat = loadmat(filepath)
# srchsi = mat['data'].transpose(2,0,1)
# # inpaintinghsi, mask = inpaintingmodel(srchsi)
# noisyhsi = noisemodel(srchsi)
# # noisyhsi = stripemodel(noisyhsi)
# #noisyhsi = add_noniid_noise(srchsi)
# n_sigma = 0#/255
# savemat(os.path.join(dstdir, filename), {'gt': srchsi.transpose(
# 1, 2, 0),'sigma':n_sigma, 'input': noisyhsi.transpose(1, 2, 0)})
# stripemodel = AddNoiseImpulse()
# add_noniid_noise = Compose([
# AddNoiseNoniid(sigmas),
# AddNoiseComplex(),
# ])
#add_noniid_noise = AddNoiseNoniid(sigmas)
#srcimg = '/home/rainy/QRNN3D/data/toy.mat'
s_sigma = [10,30,50,70]
#s_sigma = [0]
for sigma in s_sigma:
#dstdir = '/data/HSI_Data/icvl_noise_50/512_mix'+'/'
dstdir = '/media/lmy/LMY/cvpr2023/test_noise_96_icvl/'+'512_'+str(sigma)
mkdir(dstdir)
noisemodel = AddNoise(sigma)
c = 0
#inpaintingmodel = AddInpaintingHole(0.05, 0.15,1/3)
for filename in os.listdir(srcdir):
c = c + 1
print(c)
filepath = os.path.join(srcdir, filename)
mat = loadmat(filepath)
srchsi = mat['data'].transpose(2,0,1)
# inpaintinghsi, mask = inpaintingmodel(srchsi)
noisyhsi = noisemodel(srchsi)
# noisyhsi = stripemodel(noisyhsi)
#noisyhsi = add_noniid_noise(srchsi)
n_sigma = sigma/255
savemat(os.path.join(dstdir, filename), {'gt': srchsi.transpose(
1, 2, 0),'sigma':n_sigma, 'input': noisyhsi.transpose(1, 2, 0)})
if __name__ == '__main__':
addNoise2ICVL()
| 21,829 | 33.928 | 140 | py |
SERT | SERT-master/utility/indexes.py | import numpy as np
import torch
from skimage.measure import compare_ssim, compare_psnr
from functools import partial
class Bandwise(object):
def __init__(self, index_fn):
self.index_fn = index_fn
def __call__(self, X, Y):
C = X.shape[-3]
bwindex = []
for ch in range(C):
x = torch.squeeze(X[...,ch,:,:].data).cpu().numpy()
y = torch.squeeze(Y[...,ch,:,:].data).cpu().numpy()
index = self.index_fn(x, y)
bwindex.append(index)
return bwindex
cal_bwssim = Bandwise(compare_ssim)
cal_bwpsnr = Bandwise(partial(compare_psnr, data_range=1))
def cal_sam(X, Y, eps=1e-8):
X = torch.squeeze(X.data).cpu().numpy()
Y = torch.squeeze(Y.data).cpu().numpy()
tmp = (np.sum(X*Y, axis=0) + eps) / (np.sqrt(np.sum(X**2, axis=0)) + eps) / (np.sqrt(np.sum(Y**2, axis=0)) + eps)
return np.mean(np.real(np.arccos(tmp)))
def MSIQA(X, Y):
psnr = np.mean(cal_bwpsnr(X, Y))
ssim = np.mean(cal_bwssim(X, Y))
sam = cal_sam(X, Y)
return psnr, ssim, sam
| 1,066 | 27.078947 | 121 | py |
SERT | SERT-master/utility/util.py | import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import cv2
import h5py
import os
import random
import threading
from itertools import product
from scipy.io import loadmat, savemat
from functools import partial
from scipy.ndimage import zoom
from matplotlib.widgets import Slider
from PIL import Image
def Data2Volume(data, ksizes, strides):
"""
Construct Volumes from Original High Dimensional (D) Data
"""
dshape = data.shape
PatNum = lambda l, k, s: (np.floor( (l - k) / s ) + 1)
TotalPatNum = 1
for i in range(len(ksizes)):
TotalPatNum = TotalPatNum * PatNum(dshape[i], ksizes[i], strides[i])
V = np.zeros([int(TotalPatNum)]+ksizes); # create D+1 dimension volume
args = [range(kz) for kz in ksizes]
for s in product(*args):
s1 = (slice(None),) + s
s2 = tuple([slice(key, -ksizes[i]+key+1 or None, strides[i]) for i, key in enumerate(s)])
V[s1] = np.reshape(data[s2],-1)
return V
def crop_center(img,cropx,cropy):
_,y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[:, starty:starty+cropy,startx:startx+cropx]
def rand_crop(img, cropx, cropy):
_,y,x = img.shape
x1 = random.randint(0, x - cropx)
y1 = random.randint(0, y - cropy)
return img[:, y1:y1+cropy, x1:x1+cropx]
def sequetial_process(*fns):
"""
Integerate all process functions
"""
def processor(data):
for f in fns:
data = f(data)
return data
return processor
def minmax_normalize(array):
amin = np.min(array)
amax = np.max(array)
return (array - amin) / (amax - amin)
def minmax_normalize_tensor(array):
amin = array.max()
amax = array.min()
return (array - amin) / (amax - amin)
def frame_diff(frames):
diff_frames = frames[1:, ...] - frames[:-1, ...]
return diff_frames
def visualize(filename, matkey, load=loadmat, preprocess=None):
"""
Visualize a preprecessed hyperspectral image
"""
if not preprocess:
preprocess = lambda identity: identity
mat = load(filename)
data = preprocess(mat[matkey])
print(data.shape)
print(np.max(data), np.min(data))
data = np.squeeze(data[:,:,:])
Visualize3D(data)
# Visualize3D(np.squeeze(data[:,0,:,:]))
def Visualize3D(data, meta=None):
data = np.squeeze(data)
for ch in range(data.shape[0]):
data[ch, ...] = minmax_normalize(data[ch, ...])
print(np.max(data), np.min(data))
ax = plt.subplot(111)
plt.subplots_adjust(left=0.25, bottom=0.25)
frame = 0
# l = plt.imshow(data[frame,:,:])
l = plt.imshow(data[frame,:,:], cmap='gray') #shows 256x256 image, i.e. 0th frame
# plt.colorbar()
axcolor = 'lightgoldenrodyellow'
axframe = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
sframe = Slider(axframe, 'Frame', 0, data.shape[0]-1, valinit=0)
def update(val):
frame = int(np.around(sframe.val))
l.set_data(data[frame,:,:])
if meta is not None:
axframe.set_title(meta[frame])
sframe.on_changed(update)
plt.show()
def data_augmentation(image, mode=None):
"""
Args:
image: np.ndarray, shape: C X H X W
"""
axes = (-2, -1)
flipud = lambda x: x[:, ::-1, :]
if mode is None:
mode = random.randint(0, 7)
if mode == 0:
# original
image = image
elif mode == 1:
# flip up and down
image = flipud(image)
elif mode == 2:
# rotate counterwise 90 degree
image = np.rot90(image, axes=axes)
elif mode == 3:
# rotate 90 degree and flip up and down
image = np.rot90(image, axes=axes)
image = flipud(image)
elif mode == 4:
# rotate 180 degree
image = np.rot90(image, k=2, axes=axes)
elif mode == 5:
# rotate 180 degree and flip
image = np.rot90(image, k=2, axes=axes)
image = flipud(image)
elif mode == 6:
# rotate 270 degree
image = np.rot90(image, k=3, axes=axes)
elif mode == 7:
# rotate 270 degree and flip
image = np.rot90(image, k=3, axes=axes)
image = flipud(image)
# we apply spectrum reversal for training 3D CNN, e.g. QRNN3D.
# disable it when training 2D CNN, e.g. MemNet
if random.random() < 0.5:
image = image[::-1, :, :]
return np.ascontiguousarray(image)
class LockedIterator(object):
def __init__(self, it):
self.lock = threading.Lock()
self.it = it.__iter__()
def __iter__(self): return self
def __next__(self):
self.lock.acquire()
try:
return next(self.it)
finally:
self.lock.release()
if __name__ == '__main__':
"""Code Usage Example"""
"""ICVL"""
# hsi_rot = partial(np.rot90, k=-1, axes=(1,2))
# crop = lambda img: img[:,-1024:, -1024:]
# zoom_512 = partial(zoom, zoom=[1, 0.5, 0.5])
# d2v = partial(Data2Volume, ksizes=[31,64,64], strides=[1,28,28])
# preprocess = sequetial_process(hsi_rot, crop, minmax_normalize, d2v)
# preprocess = sequetial_process(hsi_rot, crop, minmax_normalize)
# datadir = 'Data/ICVL/Training/'
# fns = os.listdir(datadir)
# mat = h5py.File(os.path.join(datadir, fns[1]))
# data = preprocess(mat['rad'])
# data = np.linalg.norm(data, ord=2, axis=(1,2))
"""Common"""
# print(data)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.plot(data)
# plt.show()
# preprocess = sequetial_process(hsi_rot, crop, minmax_normalize, frame_diff)
# visualize(os.path.join(datadir, fns[0]), 'rad', load=h5py.File, preprocess=preprocess)
# visualize('Data/BSD/TrainingPatches/imdb_40_128.mat', 'inputs', load=h5py.File, preprocess=None)
# preprocess = lambda x: np.transpose(x[4][0],(2,0,1))
# preprocess = lambda x: minmax_normalize(np.transpose(np.array(x,dtype=np.float),(2,0,1)))
# visualize('/media/kaixuan/DATA/Papers/Code/Data/PIRM18/sample/true_hr', 'hsi', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Data/PIRM18/sample/img_1', 'true_hr', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Matlab/ITSReg/code of ITSReg MSI denoising/data/real/new/Indian/Indian_pines.mat', 'hsi', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Matlab/ECCV2018/Result/Indian/Indian_pines/QRNN3D-f.mat', 'R_hsi', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Matlab/ECCV2018/Data/Pavia/PaviaU', 'input', load=loadmat, preprocess=preprocess)
pass | 6,743 | 28.709251 | 175 | py |
SERT | SERT-master/models/sert.py |
from tkinter import W
from turtle import forward
import torch
import torch.nn as nn
import torch.nn.functional as F
from pdb import set_trace as stx
import numbers
from einops import rearrange
import numpy as np
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def img2windows(img, H_sp, W_sp):
"""
img: B C H W
"""
B, C, H, W = img.shape
img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
img_perm = img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp* W_sp, C)
return img_perm
def windows2img(img_splits_hw, H_sp, W_sp, H, W):
"""
img_splits_hw: B' H W C
"""
B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp))
img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1)
img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return img
class LePEAttention(nn.Module):
def __init__(self, dim, resolution, idx, split_size=7, dim_out=None, num_heads=8, attn_drop=0., qk_scale=None):
super().__init__()
self.dim = dim
self.dim_out = dim_out or dim
self.resolution = resolution
self.split_size = split_size
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
if idx == 0:
H_sp, W_sp = self.resolution, self.split_size
elif idx == 1:
W_sp, H_sp = self.resolution, self.split_size
else:
print ("ERROR MODE", idx)
exit(0)
self.H_sp = H_sp
self.W_sp = W_sp
self.get_v = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1,groups=dim)
self.attn_drop = nn.Dropout(attn_drop)
def im2cswin(self, x):
B, N, C = x.shape
H = W = int(np.sqrt(N))
x = x.transpose(-2,-1).contiguous().view(B, C, H, W)
x = img2windows(x, self.H_sp, self.W_sp)
x = x.reshape(-1, self.H_sp* self.W_sp, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3).contiguous()
return x
def get_lepe(self, x, func):
B, N, C = x.shape
H = W = int(np.sqrt(N))
x = x.transpose(-2,-1).contiguous().view(B, C, H, W)
H_sp, W_sp = self.H_sp, self.W_sp
x = x.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
x = x.permute(0, 2, 4, 1, 3, 5).contiguous().reshape(-1, C, H_sp, W_sp) ### B', C, H', W'
lepe = func(x) ### B', C, H', W'
lepe = lepe.reshape(-1, self.num_heads, C // self.num_heads, H_sp * W_sp).permute(0, 1, 3, 2).contiguous()
x = x.reshape(-1, self.num_heads, C // self.num_heads, self.H_sp* self.W_sp).permute(0, 1, 3, 2).contiguous()
return x, lepe
def forward(self, qkv,mask=None):
"""
x: B L C
"""
q,k,v = qkv[0], qkv[1], qkv[2]
### Img2Window
H = W = self.resolution
B, L, C = q.shape
# assert L == H * W, "flatten img_tokens has wrong size"
q = self.im2cswin(q)
k = self.im2cswin(k)
v, lepe = self.get_lepe(v, self.get_v)
q = q * self.scale
#print(q.shape,k.shape)
attn = (q @ k.transpose(-2, -1)) # B head N C @ B head C N --> B head N N
attn = nn.functional.softmax(attn, dim=-1, dtype=attn.dtype)
attn = self.attn_drop(attn)
x = (attn @ v) + lepe
x = x.transpose(1, 2).reshape(-1, self.H_sp* self.W_sp, C) # B head N N @ B head N C
### Window2Img
x = windows2img(x, self.H_sp, self.W_sp, H, W).view(B, -1, C) # B H' W' C
return x
def flops(self,shape):
flops = 0
H, W = shape
#q, k, v = (B* H//H_sp * W//W_sp) heads H_sp*W_sp C//heads
flops += ( (H//self.H_sp) * (W//self.W_sp)) *self.num_heads* (self.H_sp*self.W_sp)*(self.dim//self.num_heads)*(self.H_sp*self.W_sp)
flops += ( (H//self.H_sp) * (W//self.W_sp)) *self.num_heads* (self.H_sp*self.W_sp)*(self.dim//self.num_heads)*(self.H_sp*self.W_sp)
return flops
class ChannelAttention(nn.Module):
"""Channel attention used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
"""
def __init__(self, num_feat, squeeze_factor=16,memory_blocks=128):
super(ChannelAttention, self).__init__()
self.pool = nn.AdaptiveAvgPool1d(1)
self.subnet = nn.Sequential(
nn.Linear(num_feat, num_feat // squeeze_factor),
#nn.ReLU(inplace=True)
)
self.upnet= nn.Sequential(
nn.Linear(num_feat // squeeze_factor, num_feat),
#nn.Linear(num_feat, num_feat),
nn.Sigmoid())
self.mb = torch.nn.Parameter(torch.randn(num_feat // squeeze_factor, memory_blocks))
self.low_dim = num_feat // squeeze_factor
def forward(self, x):
b,n,c = x.shape
t = x.transpose(1,2)
y = self.pool(t).squeeze(-1)
low_rank_f = self.subnet(y).unsqueeze(2)
mbg = self.mb.unsqueeze(0).repeat(b, 1, 1)
f1 = (low_rank_f.transpose(1,2) ) @mbg
f_dic_c = F.softmax(f1 * (int(self.low_dim) ** (-0.5)), dim=-1) # get the similarity information
y1 = [email protected](1,2)
y2 = self.upnet(y1)
out = x*y2
return out
class CAB(nn.Module):
def __init__(self, num_feat, compress_ratio=3, squeeze_factor=30,memory_blocks=128):
super(CAB, self).__init__()
self.num_feat = num_feat
self.cab = nn.Sequential(
nn.Linear(num_feat,num_feat // compress_ratio),
nn.GELU(),
nn.Linear(num_feat // compress_ratio, num_feat), ChannelAttention(num_feat, squeeze_factor, memory_blocks) )
def forward(self, x):
return self.cab(x)
def flops(self,shape):
flops = 0
H,W = shape
flops += self.num_feat*H*W
return flops
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=0, qk_scale=None, memory_blocks=128,down_rank=16,weight_factor=0.1,attn_drop=0., proj_drop=0.,split_size=1):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.weight_factor = weight_factor
self.attns = nn.ModuleList([
LePEAttention(
dim//2, resolution=self.window_size[0], idx = i,
split_size=split_size, num_heads=num_heads//2, dim_out=dim//2,
qk_scale=qk_scale, attn_drop=attn_drop)
for i in range(2)])
self.c_attns = CAB(dim,compress_ratio=4,squeeze_factor=down_rank,memory_blocks=memory_blocks) #
#self.c_attns_15 = CAB(dim,compress_ratio=4,squeeze_factor=15)
#self.c_attns = Subspace(dim)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, -1, 3, C).permute(2, 0, 1, 3)
x1 = self.attns[0](qkv[:,:,:,:C//2],mask)
x2 = self.attns[1](qkv[:,:,:,C//2:],mask)
attened_x = torch.cat([x1,x2], dim=2)
attened_x = rearrange(attened_x, 'b n (g d) -> b n ( d g)', g=4)
x3 = self.c_attns(x)
attn = attened_x + self.weight_factor*x3
x = self.proj(attn)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, shape):
# calculate flops for 1 window with token length of N
flops = 0
H,W = shape
# qkv = self.qkv(x)
flops += 2*self.attns[0].flops([H,W])
flops += self.c_attns.flops([H,W])
return flops
class SSMTDA(nn.Module):
r""" Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,split_size=1,drop_path=0.0,weight_factor=0.1,memory_blocks=128,down_rank=16,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., act_layer=nn.GELU):
super(SSMTDA,self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.weight_factor=weight_factor
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.attns = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,memory_blocks=memory_blocks,down_rank=down_rank,weight_factor=weight_factor,split_size=split_size,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.num_heads = num_heads
def forward(self, x):
B,C,H,W = x.shape
x = x.flatten(2).transpose(1, 2)
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
attn_windows = self.attns(x_windows)
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self,shape):
flops = 0
H,W = shape
nW = H * W / self.window_size / self.window_size
flops += nW *self.attns.flops([self.window_size,self.window_size])
return flops
class SMSBlock(nn.Module):
def __init__(self,
dim = 90,
window_size=8,
depth=6,
num_head=6,
mlp_ratio=2,
qkv_bias=True, qk_scale=None,
weight_factor=0.1,memory_blocks=128,down_rank=16,
drop_path=0.0,
split_size=1,
):
super(SMSBlock,self).__init__()
self.smsblock = nn.Sequential(*[SSMTDA(dim=dim,input_resolution=window_size, num_heads=num_head, memory_blocks=memory_blocks,window_size=window_size,shift_size=0 if i%2==0 else window_size//2,
weight_factor=weight_factor,down_rank=down_rank,
split_size = split_size,
mlp_ratio=mlp_ratio,
drop_path = drop_path[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,)
for i in range(depth)])
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
def forward(self,x):
out = self.smsblock(x)
out = self.conv(out)+x
return out
def flops(self,shape):
flops = 0
for blk in self.smsblock:
flops += blk.flops(shape)
return flops
class SERT(nn.Module):
def __init__(self,
inp_channels=31,
dim = 90,
window_sizes=[8,8,8,8,8,8],
depths=[ 6,6,6,6,6,6],
num_heads=[ 6,6,6,6,6,6],
split_sizes=[1,1,1,1,1,1],
mlp_ratio=2,down_rank=16,memory_blocks = 256,
qkv_bias=True, qk_scale=None,
bias=False,
drop_path_rate=0.1,
weight_factor = 0.1,
):
super(SERT, self).__init__()
self.conv_first = nn.Conv2d(inp_channels, dim, 3, 1, 1)
self.num_layers = depths
self.layers = nn.ModuleList()
print(len(self.num_layers))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
for i_layer in range(len(self.num_layers)):
layer = SMSBlock(dim = dim,
window_size=window_sizes[i_layer],
depth=depths[i_layer],
num_head=num_heads[i_layer],
weight_factor = weight_factor,down_rank=down_rank,memory_blocks=memory_blocks,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
split_size=split_sizes[i_layer],
drop_path =dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])]
)
self.layers.append(layer)
self.output = nn.Conv2d(int(dim), dim, kernel_size=3, stride=1, padding=1, bias=bias)
self.conv_delasta = nn.Conv2d(dim,inp_channels, 3, 1, 1)
def forward(self, inp_img):
_,_,h_inp,w_inp = inp_img.shape
hb, wb = 16, 16
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
inp_img = F.pad(inp_img, (0, pad_h, 0, pad_w), 'reflect')
f1 = self.conv_first(inp_img)
x=f1
for layer in self.layers:
x = layer(x)
x = self.output(x+f1) #+ inp_img
x = self.conv_delasta(x)+inp_img
x = x[:,:,:h_inp,:w_inp]
return x
def flops(self,shape):
flops = 0
for i, layer in enumerate(self.layers):
flops += layer.flops(shape)
return flops
| 17,738 | 34.620482 | 200 | py |
SERT | SERT-master/models/competing_methods/SST.py |
from turtle import forward
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GSAttention(nn.Module):
"""global spectral attention (GSA)
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads
bias (bool): If True, add a learnable bias to projection
"""
def __init__(self, dim, num_heads, bias):
super(GSAttention, self).__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Conv2d(dim, dim*3, kernel_size=1, bias=bias)
self.project_out = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
def forward(self, x):
b,c,h,w = x.shape
qkv = self.qkv(x)
q,k,v = qkv.chunk(3, dim=1)
q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
out = (attn @ v)
out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)
out = self.project_out(out)
return out
def flops(self,patchresolution):
flops = 0
H, W,C = patchresolution
flops += H* C *W* C
flops += C *C*H*W
return flops
class NLSA(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super(NLSA,self).__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
#define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SSMA(nn.Module):
r""" Transformer Block:Spatial-Spectral Multi-head self-Attention (SSMA)
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,drop_path=0.0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,act_layer=nn.GELU,bias=False):
super(SSMA,self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.attn = NLSA(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
self.num_heads = num_heads
self.spectral_attn = GSAttention(dim, num_heads, bias)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x):
B,C,H,W = x.shape
x = x.flatten(2).transpose(1, 2)
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
if self.input_resolution == [H,W]: #non-local speatial attention
attn_windows = self.attn(x_windows, mask=self.attn_mask)
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask([H,W]).to(x.device))
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
x = x.transpose(1, 2).view(B, C, H, W)
x = self.spectral_attn(x) #global spectral attention
x = x.flatten(2).transpose(1, 2)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
class SMSBlock(nn.Module):
"""
residual spatial-spectral block (RSSB).
Args:
dim (int, optional): Embedding dim of features. Defaults to 90.
window_size (int, optional): window size of non-local spatial attention. Defaults to 8.
depth (int, optional): numbers of Transformer block at this layer. Defaults to 6.
num_head (int, optional):Number of attention heads. Defaults to 6.
mlp_ratio (int, optional): Ratio of mlp dim. Defaults to 2.
qkv_bias (bool, optional): Learnable bias to query, key, value. Defaults to True.
qk_scale (_type_, optional): The qk scale in non-local spatial attention. Defaults to None.
drop_path (float, optional): drop_rate. Defaults to 0.0.
bias (bool, optional): Defaults to False.
"""
def __init__(self,
dim = 90,
window_size=8,
depth=6,
num_head=6,
mlp_ratio=2,
qkv_bias=True, qk_scale=None,
drop_path=0.0,
bias = False):
super(SMSBlock,self).__init__()
self.smsblock = nn.Sequential(*[SSMA(dim=dim,input_resolution=[64,64], num_heads=num_head, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
drop_path = drop_path[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,bias=bias )
for i in range(depth)])
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
def forward(self,x):
out = self.smsblock(x)
out = self.conv(out)+x
return out
class SST(nn.Module):
"""SST
Spatial-Spectral Transformer for Hyperspectral Image Denoising
Args:
inp_channels (int, optional): Input channels of HSI. Defaults to 31.
dim (int, optional): Embedding dimension. Defaults to 90.
window_size (int, optional): Window size of non-local spatial attention. Defaults to 8.
depths (list, optional): Number of Transformer block at different layers of network. Defaults to [ 6,6,6,6,6,6].
num_heads (list, optional): Number of attention heads in different layers. Defaults to [ 6,6,6,6,6,6].
mlp_ratio (int, optional): Ratio of mlp dim. Defaults to 2.
qkv_bias (bool, optional): Learnable bias to query, key, value. Defaults to True.
qk_scale (_type_, optional): The qk scale in non-local spatial attention. Defaults to None. If it is set to None, the embedding dimension is used to calculate the qk scale.
bias (bool, optional): Defaults to False.
drop_path_rate (float, optional): Stochastic depth rate of drop rate. Defaults to 0.1.
"""
def __init__(self,
inp_channels=31,
dim = 90,
window_size=8,
depths=[ 6,6,6,6,6,6],
num_heads=[ 6,6,6,6,6,6],
mlp_ratio=2,
qkv_bias=True, qk_scale=None,
bias = False,
drop_path_rate=0.1
):
super(SST, self).__init__()
self.conv_first = nn.Conv2d(inp_channels, dim, 3, 1, 1) #shallow featrure extraction
self.num_layers = depths
self.layers = nn.ModuleList()
print(len(self.num_layers))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
for i_layer in range(len(self.num_layers)):
layer = SMSBlock(dim = dim,
window_size=window_size,
depth=depths[i_layer],
num_head=num_heads[i_layer],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_path =dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
bias = bias)
self.layers.append(layer)
self.output = nn.Conv2d(int(dim), dim, kernel_size=3, stride=1, padding=1, bias=bias)
self.conv_delasta = nn.Conv2d(dim,inp_channels, 3, 1, 1) #reconstruction from features
def forward(self, inp_img):
f1 = self.conv_first(inp_img)
x=f1
for layer in self.layers:
x = layer(x)
x = self.output(x+f1)
x = self.conv_delasta(x)+inp_img
return x
| 16,392 | 39.376847 | 184 | py |
SERT | SERT-master/models/competing_methods/GRNet.py | from re import S
from turtle import forward
from matplotlib.pyplot import sca
from numpy import True_, pad
import torch
import torch.nn as nn
import torch.nn.functional as F
class conv_relu(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=1, padding_mode='zeros', bias=True):
super(conv_relu, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, padding_mode=padding_mode)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.conv(x))
class GSM(nn.Module):
def __init__(self, in_ch):
super(GSM, self).__init__()
self.channel = in_ch
self.conv1 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv3 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv4 = nn.Conv2d(self.channel//2, self.channel, kernel_size=1, stride=1, padding=0)
def forward(self, x):
theta = self.conv1(x)
theta = torch.reshape(theta, (-1, theta.shape[1], theta.shape[2]*theta.shape[3]))
phi = self.conv2(x)
phi = torch.reshape(phi, (-1, phi.shape[1], phi.shape[2]*phi.shape[3]))
g = self.conv3(x)
g = torch.reshape(g, (-1, g.shape[1], g.shape[2]*g.shape[3]))
phi1 = torch.reshape(phi, (-1, phi.shape[1]*phi.shape[2]))
phi1 = F.softmax(phi1, dim=-1)
phi1 = torch.reshape(phi1, phi.shape)
g1 = torch.reshape(g, (-1, g.shape[1]*g.shape[2]))
g1 = F.softmax(g1, dim=-1)
g1 = torch.reshape(g1, g.shape)
phi1 = phi1.transpose(1,2)
y = torch.bmm(theta, phi1)
# print(theta.shape[1]*phi1.shape[1]*phi1.shape[2])
y = torch.bmm(y, g1)
#print(y.shape[1]*g1.shape[1]*g1.shape[2])
# y = torch.bmm(phi1, g1)
# y = torch.bmm(theta, y)
# y = torch.matmul(theta, y)
F_s = torch.reshape(y, (-1, self.channel//2, x.shape[2], x.shape[3]))
res_F = self.conv4(F_s)
return res_F+x
class GCM(nn.Module):
def __init__(self, in_ch):
super(GCM ,self).__init__()
self.channel = in_ch
self.conv1 = nn.Conv2d(self.channel, self.channel//4, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv3 = nn.Conv2d(self.channel//4, self.channel//4, kernel_size=1, stride=1, padding=0)
self.conv4 = nn.Conv2d(self.channel//2, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv5 = nn.Conv2d(self.channel//2, self.channel, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU(inplace=True)
# self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
#x shape: [B, C, H, W]
x1 = self.conv1(x) # [B, C/4, H, W]
x1 = torch.reshape(x1, [x1.shape[0], x1.shape[1], -1]) # [B, C/4, H*W]
x2 = self.conv2(x) # [B, C/2, H, W]
x2 = torch.reshape(x2, [x2.shape[0], x2.shape[1], -1]) # [B, C/2, H*W]
x2 = x2.permute((0, 2, 1)) # [B, H*W, C/2]
v = torch.bmm(x1, x2)
# print(x1.shape[1]*x2.shape[1]*x2.shape[2])
# v = torch.matmul(x1, x2) # [B, C/4, C/2]
tmp = torch.reshape(v, (-1, v.shape[1]*v.shape[2]))
tmp = F.softmax(tmp, dim=-1)
v = torch.reshape(tmp, v.shape)
v = torch.unsqueeze(v, dim=3) # [B, C/4, C/2, 1]
n = self.conv3(v) # [B, C/4, C/2, 1]
n = v + n # [B, C/4, C/2, 1]
n = self.relu(n)
n = n.permute((0, 2, 1, 3)) # [B, C/2, C/4, 1]
n = self.conv4(n) # [B, C/2, C/4, 1]
z = torch.squeeze(n, dim=3) # [B, C/2, C/4]
y = torch.bmm(z, x1)
#print(z.shape[1]*x1.shape[1]*x1.shape[2])
# y = torch.matmul(z, x1) # [B, C/2, H*W]
y = torch.unsqueeze(y, dim=3) # [B, C/2, H*W, 1]
y = torch.reshape(y, (y.shape[0], y.shape[1], x.shape[2], x.shape[3])) # [B, C/2, H, W]
x_res = self.conv5(y) # [B, C, H, W]
return x + x_res
class DCM(nn.Module):
def __init__(self, channel, out_channel=None):
super(DCM, self).__init__()
if out_channel == None:
out_channel = channel
self.conv1 = conv_relu(channel, channel, kernel_size=3, stride=1, padding=1, padding_mode='replicate')
self.conv2 = conv_relu(channel, channel, kernel_size=3, stride=1, padding=1, padding_mode='replicate')
self.conv3 = conv_relu(channel, channel, kernel_size=3, stride=1, padding=1, padding_mode='replicate')
self.conv4 = nn.Conv2d(channel, out_channel, kernel_size=1, stride=1, padding=0, padding_mode='replicate')
def forward(self, x):
c1 = self.conv1(x)
tmp1 = c1 + x
c2 = self.conv2(tmp1)
tmp2 = tmp1 + c2
c3 = self.conv3(tmp2)
tmp3 = tmp2 + c3
c4 = self.conv4(tmp3)
return c4
class BlockEncoder(nn.Module):
def __init__(self, in_ch):
super(BlockEncoder, self).__init__()
self.DCM = DCM(in_ch)
self.GCM = GCM(in_ch)
def forward(self, x):
dcm_x = self.DCM(x)
gcm_x = self.GCM(dcm_x)
return x + gcm_x
class BlockDecoder(nn.Module):
def __init__(self, in_ch):
super(BlockDecoder, self).__init__()
self.GSM = GSM(in_ch)
self.DCM = DCM(in_ch)
def forward(self, x):
gsm_x = self.GSM(x)
dcm_x = self.DCM(gsm_x)
return x + dcm_x
class GRNet(nn.Module):
def __init__(self, in_ch=25):
super(GRNet, self).__init__()
n1 = 64
# filters = [n1, n1 * 2, n1 * 4, n1 * 8]
filters = [64, 64, 64, 64, 64]
self.down0 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
self.down1 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
self.down2 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
# self.Down4 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
# self.Down4 = nn.Conv2d()
self.conv0 = nn.Conv2d(in_ch, filters[0], kernel_size=3, stride=1, padding=1, padding_mode='replicate', bias=True)
self.conv1 = nn.Conv2d(filters[0], filters[0], kernel_size=3, stride=1, padding=1, padding_mode='replicate', bias=True)
self.encoder0 = BlockEncoder(filters[0])
self.encoder1 = BlockEncoder(filters[1])
self.encoder2 = BlockEncoder(filters[2])
self.middle = BlockEncoder(filters[3])
# self.Conv5 = BlockEncoder(filters[4])
# self.Up5 = nn.Conv2d(filters[4]*2, filters[3], kernel_size=3, stride=1, padding=1, bias=True)
self.up_conv2 = conv_relu(filters[2]*2, filters[2], kernel_size=1, padding=0, stride=1, bias=True)
self.decoder2 = BlockDecoder(filters[4])
# self.Up4 = nn.ConvTranspose2d(filters[3], filters[2], kernel_size=2, stride=2, padding=0, bias=True)
# self.Up4 = nn.Conv2d(filters[3]*2, filters[2], kernel_size=3, stride=1, padding=1, bias=True)
self.up_conv1 = conv_relu(filters[1]*2, filters[1], kernel_size=1, padding=0, stride=1, bias=True_)
self.decoder1 = BlockDecoder(filters[3])
# self.Up3 = nn.Conv2d(filters[2]*2, filters[1], kernel_size=3, stride=1, padding=1, bias=True)
self.up_conv0 = conv_relu(filters[0]*2, filters[0], kernel_size=1, padding=0, stride=1, bias=True)
self.decoder0 = BlockDecoder(filters[2])
# self.Up2 = nn.Conv2d(filters[1]*2, filters[0], kernel_size=3, stride=1, padding=1, bias=True)
# self.Up_conv2 = BlockDecoder(filters[1])
self.Conv = nn.Conv2d(filters[0], in_ch, kernel_size=3, padding=1, stride=1, padding_mode='replicate')
def forward(self, x):
basic = self.conv0(x)
basic1 = self.conv1(basic)
encode0 = self.encoder0(basic1)
down0 = self.down0(encode0)
encode1 = self.encoder1(down0)
down1 = self.down1(encode1)
encode2 = self.encoder2(down1)
down2 = self.down2(encode2)
media_end = self.middle(down2)
deblock2 = F.upsample_bilinear(media_end, scale_factor=2)
deblock2 = torch.cat((deblock2, encode2), dim=1)
deblock2 = self.up_conv2(deblock2)
deblock2 = self.decoder2(deblock2)
deblock1 = F.upsample_bilinear(deblock2, scale_factor=2)
deblock1 = torch.cat((deblock1, encode1), dim=1)
deblock1 = self.up_conv1(deblock1)
deblock1 = self.decoder1(deblock1)
deblock0 = F.upsample_bilinear(deblock1, scale_factor=2)
deblock0 = torch.cat((deblock0, encode0), dim=1)
deblock0 = self.up_conv0(deblock0)
deblock0 = self.decoder0(deblock0)
decoding_end = deblock0 + basic
res = self.Conv(decoding_end)
out = x + res
return out
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(negative_slope=0.01, inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(negative_slope=0.01, inplace=True))
self.conv_residual = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, bias=True)
def forward(self, x):
x = self.conv(x) + self.conv_residual(x)
return x
class U_Net_GR(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch=34, out_ch=34):
super(U_Net_GR, self).__init__()
n1 = 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
self.Down1 = nn.Conv2d(filters[0], filters[0], kernel_size=4, stride=2, padding=1, bias=True)
self.Down2 = nn.Conv2d(filters[1], filters[1], kernel_size=4, stride=2, padding=1, bias=True)
self.Down3 = nn.Conv2d(filters[2], filters[2], kernel_size=4, stride=2, padding=1, bias=True)
self.Down4 = nn.Conv2d(filters[3], filters[3], kernel_size=4, stride=2, padding=1, bias=True)
self.Conv1 = conv_block(in_ch, filters[0])
self.skip1 = nn.Conv2d(in_ch, filters[0], kernel_size=1, stride=1, padding=0)
self.Conv2 = conv_block(filters[0], filters[1])
self.skip2 = nn.Conv2d(filters[0], filters[1], kernel_size=1, stride=1, padding=0)
self.Conv3 = conv_block(filters[1], filters[2])
self.skip3 = nn.Conv2d(filters[1], filters[2], kernel_size=1, stride=1, padding=0)
self.Conv4 = conv_block(filters[2], filters[3])
self.skip4 = nn.Conv2d(filters[2], filters[3], kernel_size=1, stride=1, padding=0)
self.Conv5 = conv_block(filters[3], filters[4])
self.skip5 = nn.Conv2d(filters[3], filters[4], kernel_size=1, stride=1, padding=0)
self.Up_conv5 = conv_block(filters[4], filters[3])
self.skip_up5 = nn.Conv2d(filters[4], filters[3], kernel_size=1, stride=1, padding=0)
self.Up_conv4 = conv_block(filters[3], filters[2])
self.skip_up4 = nn.Conv2d(filters[3], filters[2], kernel_size=1, stride=1, padding=0)
self.Up_conv3 = conv_block(filters[2], filters[1])
self.skip_up3 = nn.Conv2d(filters[2], filters[1], kernel_size=1, stride=1, padding=0)
self.Up_conv2 = conv_block(filters[1], filters[0])
self.skip_up2 = nn.Conv2d(filters[1], filters[0], kernel_size=1, stride=1, padding=0)
# self.Conv1 = DCM(in_ch, filters[0])
# self.Conv2 = DCM(filters[0], filters[1])
# self.Conv3 = DCM(filters[1], filters[2])
# self.Conv4 = DCM(filters[2], filters[3])
# self.Conv5 = DCM(filters[3], filters[4])
# self.Up_conv5 = DCM(filters[4], filters[3])
# self.Up_conv4 = DCM(filters[3], filters[2])
# self.Up_conv3 = DCM(filters[2], filters[1])
# self.Up_conv2 = DCM(filters[1], filters[0])
self.GCM1 = GCM(filters[0])
self.GCM2 = GCM(filters[1])
self.GCM3 = GCM(filters[2])
self.GCM4 = GCM(filters[3])
self.GCM5 = GCM(filters[4])
self.Up5 = nn.ConvTranspose2d(filters[4], filters[3], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM5 = GSM(filters[4])
self.Up4 = nn.ConvTranspose2d(filters[3], filters[2], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM4 = GSM(filters[3])
self.Up3 = nn.ConvTranspose2d(filters[2], filters[1], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM3 = GSM(filters[2])
self.Up2 = nn.ConvTranspose2d(filters[1], filters[0], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM2 = GSM(filters[1])
self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
e1 = self.GCM1(self.Conv1(x)) + self.skip1(x)
e2 = self.Down1(e1)
e2 = self.GCM2(self.Conv2(e2)) + self.skip2(e2)
e3 = self.Down2(e2)
e3 = self.GCM3(self.Conv3(e3)) + self.skip3(e3)
e4 = self.Down3(e3)
e4 = self.GCM4(self.Conv4(e4)) + self.skip4(e4)
e5 = self.Down4(e4)
e5 = self.GCM5(self.Conv5(e5)) + self.skip5(e5)
d5 = self.Up5(e5)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(self.GSM5(d5)) + self.skip_up5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(self.GSM4(d4)) + self.skip_up4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(self.GSM3(d3)) + self.skip_up3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(self.GSM2(d2)) + self.skip_up2(d2)
out = self.Conv(d2)
#d1 = self.active(out)
return out+x
| 14,247 | 38.359116 | 139 | py |
SERT | SERT-master/models/competing_methods/macnet/MACNet.py | from collections import namedtuple
from .ops.utils import est_noise,count
# from model.qrnn.combinations import *
from .non_local import NLBlockND,EfficientNL
from .combinations import *
Params = namedtuple('Params', ['in_channels', 'channels', 'num_half_layer','rs'])
from skimage.restoration import denoise_nl_means,estimate_sigma
class MACNet(nn.Module):
'''
Tied lista with coupling
'''
def __init__(self, in_channels=1,channels =16, num_half_layer =5 ):
super(MACNet, self).__init__()
self.rs = 2
self.net=REDC3DBNRES_NL(in_channels=in_channels,channels=channels,num_half_layer=num_half_layer)
def forward(self, I, writer=None, epoch=None, return_patches=False):
return self.pro_sub(I)
def pro_sub(self, I):
R = list()
Ek = list()
Rw = list()
I_iid = list()
sigma_est = 0
I_size = I.shape
for _I in I:
_I = _I.permute([1, 2, 0])
_, _, w, _Rw = count(_I) # count subspace
_I = torch.matmul(_I, torch.inverse(_Rw).sqrt()) # spectral iid
I_nlm = _I.cpu().numpy()
sigma_est = estimate_sigma(I_nlm, multichannel=True, average_sigmas=True)
I_nlm = denoise_nl_means(I_nlm, patch_size=7, patch_distance=9, h=0.08, multichannel=True,
fast_mode=True, sigma=sigma_est)
I_nlm = torch.FloatTensor(I_nlm).to(device=_I.device)
_R, _Ek, _, _ = count(I_nlm)
if self.rs:
_R = _R // 3
# _R = max(_R, torch.FloatTensor(3).to(I.device))
R.append(_R)
Ek.append(_Ek)
Rw.append(_Rw)
I_iid.append(_I)
dim = max(torch.stack(R).max(), 3)
Ek = torch.stack(Ek, dim=0)
I_iid = torch.stack(I_iid, dim=0)
Ek = Ek[:, :, 0:dim]
Rw = torch.stack(Rw, dim=0)
I_sub = torch.bmm(I_iid.view(I_size[0], -1, I_size[1]), Ek)
I_sub = I_sub.view(I_size[0], I_size[2], I_size[3], -1).permute([0, 3, 1, 2])
CNN_sub = self.net(I_sub.unsqueeze(1)).squeeze(1)
CNN_sub = CNN_sub.view(I_size[0], dim, -1)
output = torch.bmm(Rw.sqrt(), torch.bmm(Ek, CNN_sub))
output = output.view(I_size)
return output
class REDC3DBNRES_NL(torch.nn.Module):
"""Residual Encoder-Decoder Convolution 3D
Args:
downsample: downsample times, None denotes no downsample"""
def __init__(self, in_channels, channels, num_half_layer, downsample=None):
super(REDC3DBNRES_NL, self).__init__()
# Encoder
# assert downsample is None or 0 < downsample <= num_half_layer
interval = 2
self.feature_extractor = BNReLUConv3d(in_channels, channels)
self.encoder = nn.ModuleList()
for i in range(1, num_half_layer + 1):
if i % interval:
encoder_layer = BNReLUConv3d(channels, channels)
else:
encoder_layer = BNReLUConv3d(channels, 2 * channels, k=3, s=(1, 2, 2), p=1)
channels *= 2
self.encoder.append(encoder_layer)
# Decoder
self.decoder = nn.ModuleList()
for i in range(1, num_half_layer + 1):
if i % interval:
decoder_layer = BNReLUDeConv3d(channels, channels)
else:
decoder_layer = BNReLUUpsampleConv3d(channels, channels // 2)
channels //= 2
self.decoder.append(decoder_layer)
self.reconstructor = BNReLUDeConv3d(channels, in_channels)
# self.enl_1 = EfficientNL(in_channels=channels)
self.enl_2 = EfficientNL(in_channels=channels)
self.enl_3 = EfficientNL(in_channels=1,key_channels=1,value_channels=1,head_count=1)
# = None, head_count = None, = None
def forward(self, x):
num_half_layer = len(self.encoder)
xs = [x]
out = self.feature_extractor(xs[0])
xs.append(out)
for i in range(num_half_layer - 1):
out = self.encoder[i](out)
xs.append(out)
out = self.encoder[-1](out)
# out = self.nl_1(out)
out = self.decoder[0](out)
for i in range(1, num_half_layer):
out = out + xs.pop()
out = self.decoder[i](out)
out = self.enl_2(out) + xs.pop()
out = self.reconstructor(out)
out = self.enl_3(out) + xs.pop()
return out
| 4,441 | 38.309735 | 104 | py |
SERT | SERT-master/models/competing_methods/macnet/combinations.py | import torch
import torch.nn as nn
from torch.nn import functional
from models.competing_methods.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
BatchNorm3d = SynchronizedBatchNorm3d
BatchNorm2d=SynchronizedBatchNorm2d
class BNReLUConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
class BNReLUConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUConv2d, self).__init__()
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))
class Conv3dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(Conv3dBNReLU, self).__init__()
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class Conv2dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(Conv2dBNReLU, self).__init__()
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class BNReLUDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUDeConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
class BNReLUDeConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUDeConv2d, self).__init__()
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
class DeConv3dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(DeConv3dBNReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class DeConv2dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(DeConv3dBNReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class ReLUDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(ReLUDeConv3d, self).__init__()
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
class ReLUDeConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(ReLUDeConv2d, self).__init__()
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
class BNReLUUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(BNReLUUpsampleConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class BNReLUUpsampleConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(2,2), inplace=False):
super(BNReLUUpsampleConv2d, self).__init__()
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class UpsampleConv3dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(UpsampleConv3dBNReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv2dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(UpsampleConv2dBNReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class Conv3dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(Conv3dReLU, self).__init__()
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm3d(channels))
class Conv2dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(Conv2dReLU, self).__init__()
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class DeConv3dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(DeConv3dReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class DeConv2dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(DeConv2dReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv3dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False,bn=False):
super(UpsampleConv3dReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
if bn:
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv2dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(UpsampleConv2dReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv3d(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, upsample=None):
super(UpsampleConv3d, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample, mode='trilinear', align_corners=True)
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.conv3d(x_in)
return out
class UpsampleConv2d(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, upsample=None):
super(UpsampleConv2d, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample, mode='bilinear', align_corners=True)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.conv2d(x_in)
return out
class BasicConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=bias))
class BasicConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicConv2d, self).__init__()
if bn:
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=bias))
class BasicDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicDeConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=bias))
class BasicDeConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicDeConv2d, self).__init__()
if bn:
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=bias))
class BasicUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True):
super(BasicUpsampleConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class BasicUpsampleConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True):
super(BasicUpsampleConv2d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
| 11,593 | 48.33617 | 119 | py |
SERT | SERT-master/models/competing_methods/macnet/non_local.py | import torch
from torch import nn
from torch.nn import functional as F
class EfficientNL(nn.Module):
def __init__(self, in_channels, key_channels=None, head_count=None, value_channels=None):
super(EfficientNL, self).__init__()
self.in_channels = in_channels
self.key_channels = key_channels
self.head_count = head_count
self.value_channels = value_channels
if self.key_channels==None:
self.key_channels=self.in_channels//2
if self.value_channels == None:
self.value_channels = self.in_channels // 2
if self.head_count == None:
self.head_count = self.head_count=2
self.keys = nn.Conv3d( self.in_channels, self.key_channels, 1)
self.queries = nn.Conv3d( self.in_channels, self.key_channels, 1)
self.values = nn.Conv3d( self.in_channels, self.value_channels, 1)
self.reprojection = nn.Conv3d(self.value_channels, self.in_channels, 1)
def forward(self, input_):
n, _,c, h, w = input_.size()
keys = self.keys(input_).reshape((n, self.key_channels,-1))
queries = self.queries(input_).reshape(n, self.key_channels, -1)
values = self.values(input_).reshape((n, self.value_channels, -1))
head_key_channels = self.key_channels // self.head_count
head_value_channels = self.value_channels // self.head_count
attended_values = []
for i in range(self.head_count):
key = F.softmax(keys[
:,
i * head_key_channels: (i + 1) * head_key_channels,
:
], dim=2)
query = F.softmax(queries[
:,
i * head_key_channels: (i + 1) * head_key_channels,
:
], dim=1)
value = values[
:,
i * head_value_channels: (i + 1) * head_value_channels,
:
]
context = key @ value.transpose(1, 2)
attended_value = (
context.transpose(1, 2) @ query
).reshape(n, head_value_channels,c, h, w)
attended_values.append(attended_value)
aggregated_values = torch.cat(attended_values, dim=1)
reprojected_value = self.reprojection(aggregated_values)
attention = reprojected_value + input_
return attention
class NLBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, mode='embedded',
dimension=3, bn_layer=True, levels=None):
"""Implementation of Non-Local Block with 4 different pairwise functions
args:
in_channels: original channel size (1024 in the paper)
inter_channels: channel size inside the block if not specifed reduced to half (512 in the paper)
mode: supports Gaussian, Embedded Gaussian, Dot Product, and Concatenation
dimension: can be 1 (temporal), 2 (spatial), 3 (spatiotemporal)
bn_layer: whether to add batch norm
"""
super(NLBlockND, self).__init__()
assert dimension in [1, 2, 3]
if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:
raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')
self.mode = mode
self.dimension = dimension
self.in_channels = in_channels
self.inter_channels = inter_channels
if levels is not None:
self.ssp=True
self.p = SpatialPyramidPooling(levels=[2*i+1 for i in range(0,levels)])
else:
self.ssp = False
# the channel size is reduced to half inside the block
if self.inter_channels is None:
self.inter_channels = in_channels // 4
if self.inter_channels == 0:
self.inter_channels = 1
# assign appropriate convolutional, max pool, and batch norm layers for different dimensions
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
# function g in the paper which goes through conv. with kernel size 1
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1)
# add BatchNorm layer after the last conv layer
if bn_layer:
self.W_z = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1),
bn(self.in_channels)
)
nn.init.constant_(self.W_z[1].weight, 0)
nn.init.constant_(self.W_z[1].bias, 0)
else:
self.W_z = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1)
nn.init.constant_(self.W_z.weight, 0)
nn.init.constant_(self.W_z.bias, 0)
# define theta and phi for all operations except gaussian
if self.mode == "embedded" or self.mode == "dot" or self.mode == "concatenate":
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1)
if self.mode == "concatenate":
self.W_f = nn.Sequential(
nn.Conv2d(in_channels=self.inter_channels * 2, out_channels=1, kernel_size=1),
nn.ReLU()
)
# print()
def forward(self, x):
"""
args
x: (N, C, T, H, W) for dimension=3; (N, C, H, W) for dimension 2; (N, C, T) for dimension 1
"""
batch_size,c,t,h,w = x.size()
# (N, C, THW)
g_x = self.g(x).view(batch_size, -1, h,w)
if self.ssp:
g_x = self.p(g_x)
g_x=g_x.view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
# print(self.mode)
if self.mode == "gaussian":
theta_x = x.view(batch_size, self.in_channels, -1)
phi_x = x.view(batch_size, self.in_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
f = torch.matmul(theta_x, phi_x)
elif self.mode == "embedded" or self.mode == "dot":
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
phi_x = self.phi(x).view(batch_size, -1, h,w)
if self.ssp:
phi_x=self.p(phi_x)
phi_x=phi_x.view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
f = torch.matmul(theta_x, phi_x)
elif self.mode == "concatenate":
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, 1, -1)
h = theta_x.size(2)
w = phi_x.size(3)
theta_x = theta_x.repeat(1, 1, 1, w)
phi_x = phi_x.repeat(1, 1, h, 1)
concat = torch.cat([theta_x, phi_x], dim=1)
f = self.W_f(concat)
f = f.view(f.size(0), f.size(2), f.size(3))
if self.mode == "gaussian" or self.mode == "embedded":
f_div_C = F.softmax(f, dim=-1)
elif self.mode == "dot" or self.mode == "concatenate":
N = f.size(-1) # number of position in x
f_div_C = f / N
# print(f_div_C.shape)
# print(g_x.shape)
y = torch.matmul(f_div_C, g_x)
# contiguous here just allocates contiguous chunk of memory
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W_z(y)
# residual connection
z = W_y + x
return z
if __name__ == '__main__':
import torch
# for bn_layer in [True, False]:
# img = torch.zeros(2, 3, 20)
# net = NLBlockND(in_channels=3, mode='concatenate', dimension=1, bn_layer=bn_layer)
# out = net(img)
# print(out.size())
#
# img = torch.zeros(2, 3, 20, 20)
# net = NLBlockND(in_channels=3, mode='concatenate', dimension=2, bn_layer=bn_layer)
# out = net(img)
# print(out.size())
img = torch.randn(1, 16, 31, 512, 512)
net = EfficientNL(in_channels=16)
out = net(img)
print(out.size())
| 8,755 | 40.107981 | 111 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/utils_blocks.py | import torch
import torch.nn.functional as F
from ops.im2col import Im2Col, Col2Im, Col2Cube,Cube2Col
def shape_pad_even(tensor_shape, patch,stride):
assert len(tensor_shape) == 4
b,c,h,w = tensor_shape
required_pad_h = stride - (h-patch) % stride
required_pad_w = stride - (w-patch) % stride
return required_pad_h,required_pad_w
class block_module():
def __init__(self,block_size,block_stride, kernel_size, params):
super(block_module).__init__()
self.params = params
self.kernel_size = kernel_size
self.block_size = block_size
self.block_stride = block_stride
# self.channel_size = channel_size
def _make_blocks(self, image, return_padded=False):
'''
:param image: (1,C,H,W)
:return: raw block (batch,C,block_size,block_size), tulple shape augmented image
'''
params = self.params
self.channel_size = image.shape[1]
if params['pad_block']:
pad = (self.block_size - 1,) * 4
elif params['pad_patch']:
pad = (self.kernel_size,)*4
elif params['no_pad']:
pad = (0,) * 4
elif params['custom_pad'] is not None:
pad = (params['custom_pad'],) * 4
else:
raise NotImplementedError
image_mirror_padded = F.pad(image, pad, mode='reflect')
pad_even = shape_pad_even(image_mirror_padded.shape, self.block_size, self.block_stride)
pad_h, pad_w = pad_even
if params['centered_pad']:
pad_ = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)
else:
pad_ =(0, pad_w, 0, pad_h)
pad = tuple([x+y for x,y in zip(pad,pad_)])
self.pad = pad
image_mirror_padded_even = F.pad(image, pad, mode='reflect') # add half kernel cause block edges are dump
self.augmented_shape = image_mirror_padded_even.shape
if return_padded:
return image_mirror_padded
batch_blocks = Im2Col(image_mirror_padded_even,
kernel_size=self.block_size,
stride= self.block_stride,
padding=0)
batch_blocks = batch_blocks.permute(2, 0, 1)
batch_blocks = batch_blocks.view(-1, self.channel_size, self.block_size, self.block_size)
return batch_blocks
def _make_cubes(self, image, return_padded=False):
'''
:param image: (1,C,H,W)
:return: raw block (batch_spa,batch_spec,block_size,block_size,block_size), tulple shape augmented image
'''
params = self.params
self.channel_size = image.shape[1]
if params['pad_block']:
pad = (self.block_size - 1,) * 4
elif params['pad_patch']:
pad = (self.kernel_size,)*4
elif params['no_pad']:
pad = (0,) * 4
elif params['custom_pad'] is not None:
pad = (params['custom_pad'],) * 4
else:
raise NotImplementedError
image_mirror_padded = F.pad(image, pad, mode='reflect')
pad_even = shape_pad_even(image_mirror_padded.shape, self.block_size, self.block_stride)
pad_h, pad_w = pad_even
if params['centered_pad']:
pad_ = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)
else:
pad_ =(0, pad_w, 0, pad_h)
pad = tuple([x+y for x,y in zip(pad,pad_)])
self.pad = pad
image_mirror_padded_even = F.pad(image, pad, mode='reflect') # add half kernel cause block edges are dump
self.augmented_shape = image_mirror_padded_even.shape
if return_padded:
return image_mirror_padded
batch_blocks = Im2Col(image_mirror_padded_even,
kernel_size=self.block_size,
stride= self.block_stride,
padding=0)
batch_blocks = batch_blocks.permute(2, 0, 1)
batch_blocks = batch_blocks.view(-1, self.channel_size, self.block_size, self.block_size)
return batch_blocks
def _agregate_blocks(self,batch_out_blocks):
'''
:param blocks: processed blocks
:return: image of averaged estimates
'''
h_pad, w_pad = self.augmented_shape[2:]
params = self.params
l = self.kernel_size // 2
device = batch_out_blocks.device
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
batch_out_blocks_flatten = batch_out_blocks.view(-1,self.channel_size * self.block_size**2).transpose(0,1).unsqueeze(0)
print(self.block_size)
# print(self.kernel_size)
if params['ponderate_out_blocks']:
if self.kernel_size%2==0:
mask = F.conv_transpose2d(torch.ones((1,1)+(self.block_size - 2 * l,)*2),
torch.ones((1,1)+(self.kernel_size+1,)*2))
else:
mask = F.conv_transpose2d(torch.ones((1, 1) + (self.block_size - 2 * l,) * 2),
torch.ones((1, 1) + (self.kernel_size,) * 2))
mask = mask.to(device=device)
print(batch_out_blocks.shape)
print(mask.shape)
batch_out_blocks *= mask
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
output_padded = Col2Im(batch_out_blocks_flatten,
output_size=(h_pad, w_pad),
kernel_size=self.block_size,
stride=self.block_stride,
padding=0,
avg=False)
batch_out_blocks_ones = torch.ones_like(batch_out_blocks) * mask
# batch_out_blocks_flatten_ones = batch_out_blocks_ones.flatten(2, 3).permute(1, 2, 0)
batch_out_blocks_flatten_ones = batch_out_blocks_ones.view(-1, self.channel_size * self.block_size ** 2).transpose(0,1).unsqueeze(0)
if params['avg']:
mask_ = Col2Im(batch_out_blocks_flatten_ones,
output_size=(h_pad, w_pad),
kernel_size=self.block_size,
stride=self.block_stride,
padding=0,
avg=False)
output_padded /= mask_
elif params['crop_out_blocks']:
kernel_ = self.block_size - 2 * l
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
output_padded = Col2Im(batch_out_blocks_flatten,
output_size=(h_pad - 2 * l, w_pad - 2 * l),
kernel_size=kernel_,
stride=self.block_size,
padding=0,
avg=params['avg'])
elif params['sum_blocks']:
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
output_padded = Col2Im(batch_out_blocks_flatten,
output_size=(h_pad, w_pad),
kernel_size=self.block_size,
stride=self.block_stride,
padding=0,
avg=params['avg'])
else:
raise NotImplementedError
pad = self.pad
output = output_padded[:, :, pad[2]:-pad[3], pad[0]:-pad[1]]
return output
| 7,650 | 39.057592 | 144 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/utils.py | import torch
import torch.functional as F
from random import randint
import argparse
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage.measure import compare_ssim, compare_psnr
from .gauss import fspecial_gauss
from scipy import signal
def kronecker(A, B):
return torch.einsum("ab,cd->acbd", A, B).view(A.size(0)*B.size(0), A.size(1)*B.size(1))
def gen_bayer_mask(h,w):
x = torch.zeros(1, 3, h, w)
x[:, 0, 1::2, 1::2] = 1 # r
x[:, 1, ::2, 1::2] = 1
x[:, 1, 1::2, ::2] = 1 # g
x[:, 2, ::2, ::2] = 1 # b
return x
def togray(tensor):
b, c, h, w = tensor.shape
tensor = tensor.view(b, 3, -1, h, w)
tensor = tensor.sum(1)
return tensor
def torch_to_np(img_var):
return img_var.detach().cpu().numpy()
def plot_tensor(img, **kwargs):
inp_shape = tuple(img.shape)
print(inp_shape)
img_np = torch_to_np(img)
if inp_shape[1]==3:
img_np_ = img_np.transpose([1,2,0])
plt.imshow(img_np_)
elif inp_shape[1]==1:
img_np_ = np.squeeze(img_np)
plt.imshow(img_np_, **kwargs)
else:
# raise NotImplementedError
plt.imshow(img_np, **kwargs)
plt.axis('off')
def get_mask(A):
mask = A.clone().detach()
mask[A != 0] = 1
return mask.byte()
def sparsity(A):
return get_mask(A).sum().item()/A.numel()
def soft_threshold(x, lambd):
return nn.functional.relu(x - lambd,inplace=True) - nn.functional.relu(-x - lambd,inplace=True)
def nn_threshold(x, lambd):
return nn.functional.relu(x - lambd)
def fastSoftThrs(x, lmbda):
return x + 0.5 * (torch.abs(x-torch.abs(lmbda))-torch.abs(x+torch.abs(lmbda)))
def save_checkpoint(state,ckpt_path):
torch.save(state, ckpt_path)
def generate_key():
return '{}'.format(randint(0, 100000))
def show_mem():
mem = torch.cuda.memory_allocated() * 1e-6
max_mem = torch.cuda.max_memory_allocated() * 1e-6
return mem, max_mem
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def step_lr(optimizer, lr_decay):
lr = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr * lr_decay
def set_lr(optimizer, lr):
# lr = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr
def step_lr_als(optimizer, lr_decay):
lr = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr * lr_decay
optimizer.param_groups[1]['lr'] *= lr_decay
def get_lr(optimizer):
return optimizer.param_groups[0]['lr']
def gen_mask_windows(h, w):
'''
return mask for block window
:param h:
:param w:
:return: (h,w,h,w)
'''
mask = torch.zeros(2 * h, 2 * w, h, w)
for i in range(h):
for j in range(w):
mask[i:i + h, j:j + w, i, j] = 1
return mask[h // 2:-h // 2, w // 2:-w // 2, :, :]
def gen_linear_mask_windows(h, w, h_,w_):
'''
return mask for block window
:param h:
:param w:
:return: (h,w,h,w)
'''
x = torch.ones(1, 1, h - h_ + 1, w - w_ + 1)
k = torch.ones(1, 1, h_, w_)
kernel = F.conv_transpose2d(x, k)
kernel /= kernel.max()
mask = torch.zeros(2 * h, 2 * w, h, w)
for i in range(h):
for j in range(w):
mask[i:i + h, j:j + w, i, j] = kernel
return mask[h // 2:-h // 2, w // 2:-w // 2, :, :]
def gen_quadra_mask_windows(h, w, h_,w_):
'''
return mask for block window
:param h:
:param w:
:return: (h,w,h,w)
'''
x = torch.ones(1, 1, h - h_ + 1, w - w_ + 1)
k = torch.ones(1, 1, h_, w_)
kernel = F.conv_transpose2d(x, k) **2
kernel /= kernel.max()
mask = torch.zeros(2 * h, 2 * w, h, w)
for i in range(h):
for j in range(w):
mask[i:i + h, j:j + w, i, j] = kernel
return mask[h // 2:-h // 2, w // 2:-w // 2, :, :]
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def Init_DCT(n, m):
""" Compute the Overcomplete Discrete Cosinus Transform. """
n=int(n)
m=int(m)
Dictionary = np.zeros((n,m))
for k in range(m):
V = np.cos(np.arange(0, n) * k * np.pi / m)
if k > 0:
V = V - np.mean(V)
Dictionary[:, k] = V / np.linalg.norm(V)
# Dictionary = np.kron(Dictionary, Dictionary)
# Dictionary = Dictionary.dot(np.diag(1 / np.sqrt(np.sum(Dictionary ** 2, axis=0))))
# idx = np.arange(0, n ** 2)
# idx = idx.reshape(n, n, order="F")
# idx = idx.reshape(n ** 2, order="C")
# Dictionary = Dictionary[idx, :]
Dictionary = torch.from_numpy(Dictionary).float()
return Dictionary
def est_noise(y, noise_type='additive'):
"""
This function infers the noise in a
hyperspectral data set, by assuming that the
reflectance at a given band is well modelled
by a linear regression on the remaining bands.
Parameters:
y: `numpy array`
a HSI cube ((m*n) x p)
noise_type: `string [optional 'additive'|'poisson']`
Returns: `tuple numpy array, numpy array`
* the noise estimates for every pixel (N x p)
* the noise correlation matrix estimates (p x p)
Copyright:
Jose Nascimento ([email protected]) and Jose Bioucas-Dias ([email protected])
For any comments contact the authors
"""
# def est_additive_noise(r):
# small = 1e-6
# L, N = r.shape
# w=np.zeros((L,N), dtype=np.float)
# RR=np.dot(r,r.T)
# RRi = np.linalg.pinv(RR+small*np.eye(L))
# RRi = np.matrix(RRi)
# for i in range(L):
# XX = RRi - (RRi[:,i]*RRi[i,:]) / RRi[i,i]
# RRa = RR[:,i]
# RRa[i] = 0
# beta = np.dot(XX, RRa)
# beta[0,i]=0;
# w[i,:] = r[i,:] - np.dot(beta,r)
# Rw = np.diag(np.diag(np.dot(w,w.T) / N))
# return w, Rw
def est_additive_noise(r):
small = 1e-6
L, N = r.shape
w=torch.zeros((L,N), dtype=torch.float,device=r.device)
[email protected]
# print((small*torch.eye(L,device=r.device)).device)
temp=RR+small*torch.eye(L,device=r.device)
# print(temp.device)
RRi = torch.inverse(temp)
# RRi = np.matrix(RRi)
for i in range(L):
XX = RRi - (RRi[:,i].unsqueeze(1)*RRi[i,:].unsqueeze(0)) / RRi[i,i]
RRa = RR[:,i]
RRa[i] = 0
beta =XX@RRa
beta[i]=0;
w[i,:] = r[i,:] - beta@r
Rw = torch.diag(torch.diag(([email protected]) / N))
return w, Rw
h, w, numBands = y.shape
y = torch.reshape(y, (w * h, numBands))
# y = np.reshape(y, (w * h, numBands))
y = y.T
L, N = y.shape
# verb = 'poisson'
if noise_type == 'poisson':
sqy = torch.sqrt(y * (y > 0))
u, Ru = est_additive_noise(sqy)
x = (sqy - u) ** 2
w = torch.sqrt(x) * u * 2
Rw = ([email protected]) / N
# additive
else:
w, Rw = est_additive_noise(y)
return w.T, Rw.T
# y = y.T
# L, N = y.shape
# #verb = 'poisson'
# if noise_type == 'poisson':
# sqy = np.sqrt(y * (y > 0))
# u, Ru = est_additive_noise(sqy)
# x = (sqy - u)**2
# w = np.sqrt(x)*u*2
# Rw = np.dot(w,w.T) / N
# # additive
# else:
# w, Rw = est_additive_noise(y)
# return w.T, Rw.T
def hysime(y, n, Rn):
"""
Hyperspectral signal subspace estimation
Parameters:
y: `numpy array`
hyperspectral data set (each row is a pixel)
with ((m*n) x p), where p is the number of bands
and (m*n) the number of pixels.
n: `numpy array`
((m*n) x p) matrix with the noise in each pixel.
Rn: `numpy array`
noise correlation matrix (p x p)
Returns: `tuple integer, numpy array`
* kf signal subspace dimension
* Ek matrix which columns are the eigenvectors that span
the signal subspace.
Copyright:
Jose Nascimento ([email protected]) & Jose Bioucas-Dias ([email protected])
For any comments contact the authors
"""
h, w, numBands = y.shape
y = torch.reshape(y, (w * h, numBands))
y=y.T
n=n.T
Rn=Rn.T
L, N = y.shape
Ln, Nn = n.shape
d1, d2 = Rn.shape
x = y - n;
Ry = [email protected] / N
Rx = [email protected]/ N
E, dx, V =torch.svd(Rx.cpu())
E=E.to(device=y.device)
# print(V)
Rn = Rn+torch.sum(torch.diag(Rx))/L/10**5 * torch.eye(L,device=y.device)
Py = torch.diag(E.T@(Ry@E))
Pn = torch.diag(E.T@(Rn@E))
cost_F = -Py + 2 * Pn
kf = torch.sum(cost_F < 0)
ind_asc = torch.argsort(cost_F)
Ek = E[:, ind_asc[0:kf]]
# h, w, numBands = y.shape
# y = np.reshape(y, (w * h, numBands))
# y = y.T
# n = n.T
# Rn = Rn.T
# L, N = y.shape
# Ln, Nn = n.shape
# d1, d2 = Rn.shape
#
# x = y - n;
#
# Ry = np.dot(y, y.T) / N
# Rx = np.dot(x, x.T) / N
# E, dx, V = np.linalg.svd(Rx)
#
# Rn = Rn + np.sum(np.diag(Rx)) / L / 10 ** 5 * np.eye(L)
# Py = np.diag(np.dot(E.T, np.dot(Ry, E)))
# Pn = np.diag(np.dot(E.T, np.dot(Rn, E)))
# cost_F = -Py + 2 * Pn
# kf = np.sum(cost_F < 0)
# ind_asc = np.argsort(cost_F)
# Ek = E[:, ind_asc[0:kf]]
return kf, E # Ek.T ?
def count(M):
w, Rw = est_noise(M)
kf, Ek = hysime(M, w, Rw)
return kf, Ek, w, Rw
def cal_sam(X, Y, eps=1e-8):
# X = torch.squeeze(X.data).cpu().numpy()
# Y = torch.squeeze(Y.data).cpu().numpy()
tmp = (np.sum(X*Y, axis=0) + eps) / ((np.sqrt(np.sum(X**2, axis=0)) + eps) * (np.sqrt(np.sum(Y**2, axis=0)) + eps)+eps)
return np.mean(np.real(np.arccos(tmp)))
def cal_psnr(im_true,im_test,eps=13-8):
c,_,_=im_true.shape
bwindex = []
for i in range(c):
bwindex.append(compare_psnr(im_true[i,:,:], im_test[i,:,:]))
return np.mean(bwindex)
def ssim(img1, img2, cs_map=False):
"""Return the Structural Similarity Map corresponding to input images img1
and img2 (images are assumed to be uint8)
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
"""
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
size = 11
sigma = 1.5
window = fspecial_gauss(size, sigma)
K1 = 0.01
K2 = 0.03
L = 255 # bitdepth of image
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
mu1 = signal.fftconvolve(window, img1, mode='valid')
mu2 = signal.fftconvolve(window, img2, mode='valid')
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = signal.fftconvolve(window, img1 * img1, mode='valid') - mu1_sq
sigma2_sq = signal.fftconvolve(window, img2 * img2, mode='valid') - mu2_sq
sigma12 = signal.fftconvolve(window, img1 * img2, mode='valid') - mu1_mu2
if cs_map:
return (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2)),
(2.0 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2))
else:
return ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
def cal_ssim(im_true,im_test,eps=13-8):
# print(im_true.shape)
# print(im_true.shape)
# print(im_test.shape)
# im_true=im_true.cpu().numpy()
# im_test = im_test.cpu().numpy()
c,_,_=im_true.shape
bwindex = []
for i in range(c):
bwindex.append(ssim(im_true[i,:,:]*255, im_test[i,:,:,]*255))
return np.mean(bwindex)
# def cal_ssim(im_true,im_test,eps=13-8):
# c,_,_=im_true.shape
# bwindex = []
# for i in range(c):
# bwindex.append(compare_ssim(im_true[i,:,:], im_test[i,:,:,]))
# return np.mean(bwindex)
# class Bandwise(object):
# def __init__(self, index_fn):
# self.index_fn = index_fn
#
# def __call__(self, X, Y):
# C = X.shape[-3]
# bwindex = []
# for ch in range(C):
# x = torch.squeeze(X[...,ch,:,:].data).cpu().numpy()
# y = torch.squeeze(Y[...,ch,:,:].data).cpu().numpy()
# index = self.index_fn(x, y)
# bwindex.append(index)
# return bwindex
def MSIQA(X, Y):
# print(X.shape)
# print(Y.shape)
psnr = cal_psnr(X, Y)
ssim = cal_ssim(X, Y)
sam = cal_sam(X, Y)
return psnr, ssim, sam
if __name__ == '__main__':
hsi = torch.rand(200,200, 198)
w, Rw=est_noise(hsi)
kf, E= hysime(hsi, w, Rw)
print(kf)
| 13,403 | 27.887931 | 123 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/utils_plot.py | import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from torchvision.utils import make_grid
from ops.im2col import *
from ops.utils import get_mask
def plot_tensor(img, **kwargs):
inp_shape = tuple(img.shape)
print(inp_shape)
img_np = torch_to_np(img)
if inp_shape[1]==3:
img_np_ = img_np.transpose([1,2,0])
plt.imshow(img_np_)
elif inp_shape[1]==1:
img_np_ = np.squeeze(img_np)
plt.imshow(img_np_, **kwargs)
else:
# raise NotImplementedError
plt.imshow(img_np, **kwargs)
plt.axis('off')
def hist_tensor(img,**kwargs):
inp_shape = tuple(img.shape)
print(inp_shape)
img_np = torch_to_np(img)
return plt.hist(img_np.flatten(),**kwargs)
def np_to_torch(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
'''Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy()[0]
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def show_dict(m,a=None, norm_grid=False, sort_freq=True, norm=True):
n_elem,_,s = m.shape
s_ = int(math.sqrt(s))
m=m.view(n_elem,1,s_,s_)
if norm:
m = normalize_patches(m)
if sort_freq:
if a is None:
raise ValueError("provide code array to sort dicts by usage frequency")
idx = sort_patches(a)
m = m[idx]
grid = make_grid(m, normalize=norm_grid, padding=2,nrow=int(math.sqrt(n_elem)))
return grid
def whiten_col(tx,eps=1e-4):
shape = tx.shape
tx = tx.squeeze()
D = torch.mm(tx, tx.t()) / len(tx)
diag, v = torch.symeig(D, eigenvectors=True)
diag[diag < eps] = 1
diag = diag ** 0.5
diag = 1 / diag
S = torch.diag(diag)
out = v @ S @ v.t() @ tx
out = out.view(shape)
return out
def normalize_patches(D):
p=3.5
M=D.max()
m=D.min()
if m>=0:
me = 0
else:
me = D.mean()
sig = torch.sqrt(((D-me)**2).mean())
D=torch.min(torch.max(D, -p*sig),p*sig)
M=D.max()
m=D.min()
D = (D-m)/(M-m)
return D
def sort_patches(a):
code = get_mask(a).float()
code_freq = code.mean([0, 2, 3]).flatten()
_, idx = code_freq.sort(descending=True)
return idx
| 2,976 | 22.626984 | 83 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/im2col.py | from torch.nn import functional as F
import torch
from torch.nn.modules.utils import _pair
import math
def Im2Col(input_tensor, kernel_size, stride, padding,dilation=1,tensorized=False,):
batch = input_tensor.shape[0]
out = F.unfold(input_tensor, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
if tensorized:
lh,lw = im2col_shape(input_tensor.shape[1:],kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation)[-2:]
out = out.view(batch,-1,lh,lw)
return out
def Cube2Col(input_tensor, kernel_size, stride, padding,dilation=1,tensorized=False,):
input_sz=input_tensor.shape
if input_sz[1]<kernel_size:
input_tensor = F.pad(input_tensor, (0, 0, 0, 0, kernel_size-input_sz[1], 0), 'constant', 0)
# input_tensor=F.pad(input_tensor,(0,1), mode='replicate')
input_sz=input_tensor.shape
_t=input_sz[1]-kernel_size+1
out=torch.zeros(input_sz[0],kernel_size**3,input_sz[1]-kernel_size+1,input_sz[2]-kernel_size+1,input_sz[3]-kernel_size+1)
for i in range(_t):
ind1=i
ind2=i+kernel_size
out[:,:,i,:,:]=Im2Col(input_tensor[:,ind1:ind2,:,:], kernel_size, stride, padding, dilation, tensorized)
return out
def Col2Cube(input_tensor,output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
batch = input_tensor.shape[0]
_t = output_size[0] - kernel_size + 1
out = torch.zeros([batch,output_size[0],output_size[1],output_size[2]]).to(input_tensor.device)
me=torch.zeros_like(out).to(input_tensor.device)
for i in range(_t):
ind1 = i
ind2 = i + kernel_size
if input_tensorized:
temp_tensor = input_tensor[:,:,i,:,:].flatten(2,3)
out[:,ind1:ind2,:,:] += F.fold(temp_tensor, output_size=output_size[1:], kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
me[:,ind1:ind2,:,:] += F.fold(torch.ones_like(temp_tensor), output_size=output_size[1:], kernel_size=kernel_size,
padding=padding, stride=stride, dilation=dilation)
if avg:
# me[me==0]=1 # !!!!!!!
out = out / me
# me_ = F.conv_transpose2d(torch.ones_like(input_tensor),torch.ones(1,1,kernel_size,kernel_size))
return out
def Col2Im(input_tensor,output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
batch = input_tensor.shape[0]
if input_tensorized:
input_tensor = input_tensor.flatten(2,3)
out = F.fold(input_tensor, output_size=output_size, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
if avg:
me = F.fold(torch.ones_like(input_tensor), output_size=output_size, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
# me[me==0]=1 # !!!!!!!
out = out / me
# me_ = F.conv_transpose2d(torch.ones_like(input_tensor),torch.ones(1,1,kernel_size,kernel_size))
return out
class Col2Im_(torch.nn.Module):
def __init__(self,input_shape, output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
super(Col2Im_,self).__init__()
xshape = tuple(input_shape)
if input_tensorized:
xshape = xshape[0:2]+(xshape[2]*xshape[3],)
if avg:
me = F.fold(torch.ones(xshape), output_size=output_size, kernel_size=kernel_size,
padding=padding, stride=stride, dilation=dilation)
me[me == 0] = 1
self.me = me
def forward(self, input_tensor,output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
if input_tensorized:
input_tensor = input_tensor.flatten(2, 3)
out = F.fold(input_tensor, output_size=output_size, kernel_size=kernel_size, padding=padding, stride=stride,
dilation=dilation)
if avg:
out /= self.me
return out
# def im2col_shape(size, kernel_size, stride, padding):
# ksize_h, ksize_w = _pair(kernel_size)
# stride_h, stride_w = _pair(stride)
# pad_h, pad_w = _pair(padding)
# n_input_plane, height, width = size
# height_col = (height + 2 * pad_h - ksize_h) // stride_h + 1
# width_col = (width + 2 * pad_w - ksize_w) // stride_w + 1
# return n_input_plane, ksize_h, ksize_w, height_col, width_col
def im2col_shape(size, kernel_size, stride, padding, dilation):
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
dil_h, dil_w = _pair(dilation)
pad_h, pad_w = _pair(padding)
n_input_plane, height, width = size
height_col = (height + 2 * pad_h - dil_h * (ksize_h-1)-1) / stride_h + 1
width_col = (width + 2 * pad_w - dil_w * (ksize_w-1)-1) / stride_w + 1
return n_input_plane, ksize_h, ksize_w, math.floor(height_col), math.floor(width_col)
def col2im_shape(size, kernel_size, stride, padding, input_size=None):
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
n_input_plane, ksize_h, ksize_w, height_col, width_col = size
if input_size is not None:
height, width = input_size
else:
height = (height_col - 1) * stride_h - 2 * pad_h + ksize_h
width = (width_col - 1) * stride_w - 2 * pad_w + ksize_w
return n_input_plane, height, width | 5,405 | 42.248 | 159 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import numpy as np
from torch.autograd import Variable
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
npa, npb = as_numpy(a), as_numpy(b)
self.assertTrue(
np.allclose(npa, npb, atol=atol),
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
)
| 835 | 26.866667 | 157 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
| 12,973 | 40.056962 | 116 | py |
SERT | SERT-master/models/competing_methods/qrnn/combinations.py | import torch
import torch.nn as nn
from torch.nn import functional
from models.competing_methods.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
BatchNorm3d = SynchronizedBatchNorm3d
class BNReLUConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
class BNReLUDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUDeConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
class BNReLUUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(BNReLUUpsampleConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class UpsampleConv3d(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, upsample=None):
super(UpsampleConv3d, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample, mode='trilinear', align_corners=True)
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
#print(x.shape,self.upsample)
# x_in = torch.zeros((x.shape[0],x.shape[1],x.shape[2]*self.upsample[0],x.shape[3]*self.upsample[1],x.shape[4]*self.upsample[2])).cuda()
out = self.conv3d(x_in)
return out
class BasicConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=bias))
class BasicDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicDeConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=bias))
class BasicUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True):
super(BasicUpsampleConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
| 3,464 | 42.3125 | 143 | py |
SERT | SERT-master/models/competing_methods/qrnn/resnet.py | import torch
import torch.nn as nn
import numpy as np
import os
if __name__ == '__main__':
from qrnn3d import *
else:
from .qrnn3d import *
class ResQRNN3D(nn.Module):
def __init__(self, in_channels, channels, n_resblocks):
super(ResQRNN3D, self).__init__()
bn = True
act = 'tanh'
# define head module
m_head = [BiQRNNConv3D(in_channels, channels, bn=bn, act=act)]
# define body module
m_body = [
ResBlock(
QRNNConv3D, channels, bn=bn, act=act
) for i in range(n_resblocks)
]
# define tail module
m_tail = [
BiQRNNConv3D(channels, in_channels, bn=bn, act='none')
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
class ResBlock(nn.Module):
def __init__(
self, block, channels, **kwargs):
super(ResBlock, self).__init__()
self.layer1 = block(channels, channels, **kwargs)
self.layer2 = block(channels, channels, **kwargs)
def forward(self, x, reverse=False):
res = self.layer1(x, reverse)
res = self.layer2(x, not reverse)
res += x
return res
| 1,415 | 23 | 70 | py |
SERT | SERT-master/models/competing_methods/qrnn/utils.py | import torch
import torch.nn as nn
class QRNNREDC3D(nn.Module):
def __init__(self, in_channels, channels, num_half_layer, sample_idx,
BiQRNNConv3D=None, BiQRNNDeConv3D=None,
QRNN3DEncoder=None, QRNN3DDecoder=None, is_2d=False, has_ad=True, bn=True, act='tanh', plain=False):
super(QRNNREDC3D, self).__init__()
assert sample_idx is None or isinstance(sample_idx, list)
self.enable_ad = has_ad
if sample_idx is None: sample_idx = []
if is_2d:
self.feature_extractor = BiQRNNConv3D(in_channels, channels, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
self.feature_extractor = BiQRNNConv3D(in_channels, channels, bn=bn, act=act)
self.encoder = QRNN3DEncoder(channels, num_half_layer, sample_idx, is_2d=is_2d, has_ad=has_ad, bn=bn, act=act, plain=plain)
self.decoder = QRNN3DDecoder(channels*(2**len(sample_idx)), num_half_layer, sample_idx, is_2d=is_2d, has_ad=has_ad, bn=bn, act=act, plain=plain)
if act == 'relu':
act = 'none'
if is_2d:
self.reconstructor = BiQRNNDeConv3D(channels, in_channels, bias=True, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
self.reconstructor = BiQRNNDeConv3D(channels, in_channels, bias=True, bn=bn, act=act)
def forward(self, x):
#x = x.unsqueeze(0)
xs = [x]
out = self.feature_extractor(xs[0])
xs.append(out)
if self.enable_ad:
out, reverse = self.encoder(out, xs, reverse=False)
out = self.decoder(out, xs, reverse=(reverse))
else:
out = self.encoder(out, xs)
out = self.decoder(out, xs)
out = out + xs.pop()
out = self.reconstructor(out)
out = out + xs.pop()
return out
class QRNN3DEncoder(nn.Module):
def __init__(self, channels, num_half_layer, sample_idx, QRNNConv3D=None,
is_2d=False, has_ad=True, bn=True, act='tanh', plain=False):
super(QRNN3DEncoder, self).__init__()
# Encoder
self.layers = nn.ModuleList()
self.enable_ad = has_ad
for i in range(num_half_layer):
if i not in sample_idx:
if is_2d:
encoder_layer = QRNNConv3D(channels, channels, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
encoder_layer = QRNNConv3D(channels, channels, bn=bn, act=act)
else:
if is_2d:
encoder_layer = QRNNConv3D(channels, 2*channels, k=(1,3,3), s=(1,2,2), p=(0,1,1), bn=bn, act=act)
else:
if not plain:
encoder_layer = QRNNConv3D(channels, 2*channels, k=3, s=(1,2,2), p=1, bn=bn, act=act)
else:
encoder_layer = QRNNConv3D(channels, 2*channels, k=3, s=(1,1,1), p=1, bn=bn, act=act)
channels *= 2
self.layers.append(encoder_layer)
def forward(self, x, xs, reverse=False):
if not self.enable_ad:
num_half_layer = len(self.layers)
for i in range(num_half_layer-1):
x = self.layers[i](x)
xs.append(x)
x = self.layers[-1](x)
return x
else:
num_half_layer = len(self.layers)
for i in range(num_half_layer-1):
x = self.layers[i](x, reverse=reverse)
reverse = not reverse
xs.append(x)
x = self.layers[-1](x, reverse=reverse)
reverse = not reverse
return x, reverse
class QRNN3DDecoder(nn.Module):
def __init__(self, channels, num_half_layer, sample_idx, QRNNDeConv3D=None, QRNNUpsampleConv3d=None,
is_2d=False, has_ad=True, bn=True, act='tanh', plain=False):
super(QRNN3DDecoder, self).__init__()
# Decoder
self.layers = nn.ModuleList()
self.enable_ad = has_ad
for i in reversed(range(num_half_layer)):
if i not in sample_idx:
if is_2d:
decoder_layer = QRNNDeConv3D(channels, channels, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
decoder_layer = QRNNDeConv3D(channels, channels, bn=bn, act=act)
else:
if is_2d:
decoder_layer = QRNNUpsampleConv3d(channels, channels//2, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
if not plain:
decoder_layer = QRNNUpsampleConv3d(channels, channels//2, bn=bn, act=act)
else:
decoder_layer = QRNNDeConv3D(channels, channels//2, bn=bn, act=act)
channels //= 2
self.layers.append(decoder_layer)
def forward(self, x, xs, reverse=False):
if not self.enable_ad:
num_half_layer = len(self.layers)
x = self.layers[0](x)
for i in range(1, num_half_layer):
x = x + xs.pop()
x = self.layers[i](x)
return x
else:
num_half_layer = len(self.layers)
x = self.layers[0](x, reverse=reverse)
reverse = not reverse
for i in range(1, num_half_layer):
x = x + xs.pop()
x = self.layers[i](x, reverse=reverse)
reverse = not reverse
return x
| 5,623 | 39.753623 | 152 | py |
SERT | SERT-master/models/competing_methods/qrnn/qrnn3d.py | import torch
import torch.nn as nn
import torch.nn.functional as FF
import numpy as np
from functools import partial
if __name__ == '__main__':
from combinations import *
from utils import *
else:
from .combinations import *
from .utils import *
"""F pooling"""
class QRNN3DLayer(nn.Module):
def __init__(self, in_channels, hidden_channels, conv_layer, act='tanh'):
super(QRNN3DLayer, self).__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
# quasi_conv_layer
self.conv = conv_layer
self.act = act
def _conv_step(self, inputs):
gates = self.conv(inputs)
Z, F = gates.split(split_size=self.hidden_channels, dim=1)
if self.act == 'tanh':
return Z.tanh(), F.sigmoid()
elif self.act == 'relu':
return Z.relu(), F.sigmoid()
elif self.act == 'none':
return Z, F.sigmoid
else:
raise NotImplementedError
def _rnn_step(self, z, f, h):
# uses 'f pooling' at each time step
h_ = (1 - f) * z if h is None else f * h + (1 - f) * z
return h_
def forward(self, inputs, reverse=False):
h = None
Z, F = self._conv_step(inputs)
h_time = []
if not reverse:
for time, (z, f) in enumerate(zip(Z.split(1, 2), F.split(1, 2))): # split along timestep
h = self._rnn_step(z, f, h)
h_time.append(h)
else:
for time, (z, f) in enumerate((zip(
reversed(Z.split(1, 2)), reversed(F.split(1, 2))
))): # split along timestep
h = self._rnn_step(z, f, h)
h_time.insert(0, h)
# return concatenated hidden states
return torch.cat(h_time, dim=2)
def extra_repr(self):
return 'act={}'.format(self.act)
class BiQRNN3DLayer(QRNN3DLayer):
def _conv_step(self, inputs):
gates = self.conv(inputs)
Z, F1, F2 = gates.split(split_size=self.hidden_channels, dim=1)
if self.act == 'tanh':
return Z.tanh(), F1.sigmoid(), F2.sigmoid()
elif self.act == 'relu':
return Z.relu(), F1.sigmoid(), F2.sigmoid()
elif self.act == 'none':
return Z, F1.sigmoid(), F2.sigmoid()
else:
raise NotImplementedError
def forward(self, inputs, fname=None):
h = None
Z, F1, F2 = self._conv_step(inputs)
hsl = [] ; hsr = []
zs = Z.split(1, 2)
for time, (z, f) in enumerate(zip(zs, F1.split(1, 2))): # split along timestep
h = self._rnn_step(z, f, h)
hsl.append(h)
h = None
for time, (z, f) in enumerate((zip(
reversed(zs), reversed(F2.split(1, 2))
))): # split along timestep
h = self._rnn_step(z, f, h)
hsr.insert(0, h)
# return concatenated hidden states
hsl = torch.cat(hsl, dim=2)
hsr = torch.cat(hsr, dim=2)
if fname is not None:
stats_dict = {'z':Z, 'fl':F1, 'fr':F2, 'hsl':hsl, 'hsr':hsr}
torch.save(stats_dict, fname)
return hsl + hsr
class BiQRNNConv3D(BiQRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bn=True, act='tanh'):
super(BiQRNNConv3D, self).__init__(
in_channels, hidden_channels, BasicConv3d(in_channels, hidden_channels*3, k, s, p, bn=bn), act=act)
class BiQRNNDeConv3D(BiQRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bias=False, bn=True, act='tanh'):
super(BiQRNNDeConv3D, self).__init__(
in_channels, hidden_channels, BasicDeConv3d(in_channels, hidden_channels*3, k, s, p, bias=bias, bn=bn), act=act)
class QRNNConv3D(QRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bn=True, act='tanh'):
super(QRNNConv3D, self).__init__(
in_channels, hidden_channels, BasicConv3d(in_channels, hidden_channels*2, k, s, p, bn=bn), act=act)
class QRNNDeConv3D(QRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bn=True, act='tanh'):
super(QRNNDeConv3D, self).__init__(
in_channels, hidden_channels, BasicDeConv3d(in_channels, hidden_channels*2, k, s, p, bn=bn), act=act)
class QRNNUpsampleConv3d(QRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True, act='tanh'):
super(QRNNUpsampleConv3d, self).__init__(
in_channels, hidden_channels, BasicUpsampleConv3d(in_channels, hidden_channels*2, k, s, p, upsample, bn=bn), act=act)
QRNN3DEncoder = partial(
QRNN3DEncoder,
QRNNConv3D=QRNNConv3D)
QRNN3DDecoder = partial(
QRNN3DDecoder,
QRNNDeConv3D=QRNNDeConv3D,
QRNNUpsampleConv3d=QRNNUpsampleConv3d)
QRNNREDC3D = partial(
QRNNREDC3D,
BiQRNNConv3D=BiQRNNConv3D,
BiQRNNDeConv3D=BiQRNNDeConv3D,
QRNN3DEncoder=QRNN3DEncoder,
QRNN3DDecoder=QRNN3DDecoder
)
| 5,125 | 32.503268 | 129 | py |
SERT | SERT-master/models/competing_methods/qrnn/redc3d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
if __name__ == '__main__':
from combinations import *
else:
from .combinations import *
class REDC3D(torch.nn.Module):
"""Residual Encoder-Decoder Convolution 3D
Args:
downsample: downsample times, None denotes no downsample"""
def __init__(self, in_channels, channels, num_half_layer, downsample=None):
super(REDC3D, self).__init__()
# Encoder
assert downsample is None or 0 < downsample <= num_half_layer
interval = num_half_layer // downsample if downsample else num_half_layer+1
self.feature_extractor = BNReLUConv3d(in_channels, channels)
self.encoder = nn.ModuleList()
for i in range(1, num_half_layer+1):
if i % interval:
encoder_layer = BNReLUConv3d(channels, channels)
else:
encoder_layer = BNReLUConv3d(channels, 2*channels, k=3, s=(1,2,2), p=1)
channels *= 2
self.encoder.append(encoder_layer)
# Decoder
self.decoder = nn.ModuleList()
for i in range(1,num_half_layer+1):
if i % interval:
decoder_layer = BNReLUDeConv3d(channels, channels)
else:
decoder_layer = BNReLUUpsampleConv3d(channels, channels//2)
channels //= 2
self.decoder.append(decoder_layer)
self.reconstructor = BNReLUDeConv3d(channels, in_channels)
def forward(self, x):
num_half_layer = len(self.encoder)
xs = [x]
out = self.feature_extractor(xs[0])
xs.append(out)
for i in range(num_half_layer-1):
out = self.encoder[i](out)
xs.append(out)
out = self.encoder[-1](out)
out = self.decoder[0](out)
for i in range(1, num_half_layer):
out = out + xs.pop()
out = self.decoder[i](out)
out = out + xs.pop()
out = self.reconstructor(out)
out = out + xs.pop()
return out
| 2,115 | 34.864407 | 87 | py |
SERT | SERT-master/models/competing_methods/T3SC/multilayer.py | import logging
import torch
import torch.nn as nn
from models.competing_methods.T3SC import layers
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class MultilayerModel(nn.Module):
def __init__(
self,
channels,
layers,
ssl=0,
n_ssl=0,
ckpt=None,
):
super().__init__()
self.channels = channels
self.layers_params = layers
self.ssl = ssl
self.n_ssl = n_ssl
logger.debug(f"ssl : {self.ssl}, n_ssl : {self.n_ssl}")
self.init_layers()
self.normalized_dict = False
logger.info(f"Using SSL : {self.ssl}")
self.ckpt = ckpt
if self.ckpt is not None:
logger.info(f"Loading ckpt {self.ckpt!r}")
d = torch.load(self.ckpt)
self.load_state_dict(d["state_dict"])
def init_layers(self):
list_layers = []
in_channels = self.channels
for i in range(len(self.layers_params)):
logger.debug(f"Initializing layer {i}")
name = self.layers_params[f"l{i}"]["name"]
params = self.layers_params[f"l{i}"]["params"]
layer_cls = layers.__dict__[name]
layer = layer_cls(
in_channels=in_channels,
**params,
)
in_channels = layer.code_size
list_layers.append(layer)
self.layers = nn.ModuleList(list_layers)
def forward(
self, x, mode=None, img_id=None, sigmas=None, ssl_idx=None, **kwargs
):
assert mode in ["encode", "decode", None], f"Mode {mode!r} unknown"
x = x.float().clone()
if mode in ["encode", None]:
x = self.encode(x, img_id, sigmas=sigmas, ssl_idx=ssl_idx)
if mode in ["decode", None]:
x = self.decode(x, img_id)
return x
def encode(self, x, img_id, sigmas, ssl_idx):
for layer in self.layers:
x = layer(
x,
mode="encode",
img_id=img_id,
sigmas=sigmas,
ssl_idx=ssl_idx,
)
return x
def decode(self, x, img_id):
for layer in self.layers[::-1]:
x = layer(x, mode="decode", img_id=img_id)
return x
| 2,291 | 25.964706 | 76 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/lowrank_sc_layer.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import math
import logging
from models.competing_methods.T3SC.layers.encoding_layer import EncodingLayer
from models.competing_methods.T3SC.layers.soft_thresholding import SoftThresholding
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LowRankSCLayer(EncodingLayer):
def __init__(
self,
patch_side,
stride,
K,
rank,
patch_centering,
lbda_init,
lbda_mode,
beta=0,
ssl=0,
**kwargs,
):
super().__init__(**kwargs)
assert self.in_channels is not None
assert self.code_size is not None
self.patch_side = patch_side
self.stride = stride
self.K = K
self.rank = rank
self.patch_centering = patch_centering
self.lbda_init = lbda_init
self.lbda_mode = lbda_mode
self.patch_size = self.in_channels * self.patch_side ** 2
self.spat_dim = self.patch_side ** 2
self.spec_dim = self.in_channels
self.beta = beta
self.ssl = ssl
# first is spectral, second is spatial
self.init_weights(
[
(self.code_size, self.spec_dim, self.rank),
(self.code_size, self.rank, self.spat_dim),
]
)
self.thresholds = SoftThresholding(
mode=self.lbda_mode,
lbda_init=self.lbda_init,
code_size=self.code_size,
K=self.K,
)
if self.patch_centering and self.patch_side == 1:
raise ValueError(
"Patch centering and 1x1 kernel will result in null patches"
)
if self.patch_centering:
ones = torch.ones(
self.in_channels, 1, self.patch_side, self.patch_side
)
self.ker_mean = (ones / self.patch_side ** 2).to(device)
self.ker_divider = torch.ones(
1, 1, self.patch_side, self.patch_side
).to(device)
self.divider = None
if self.beta:
self.beta_estimator = nn.Sequential(
# layer1
nn.Conv2d(
in_channels=1, out_channels=64, kernel_size=5, stride=2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
# layer2
nn.Conv2d(
in_channels=64, out_channels=128, kernel_size=3, stride=2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
# layer3
nn.Conv2d(
in_channels=128, out_channels=1, kernel_size=3, stride=1
),
nn.Sigmoid(),
)
def init_weights(self, shape):
for w in ["C", "D", "W"]:
setattr(self, w, self.init_param(shape))
def init_param(self, shape):
def init_tensor(shape):
tensor = torch.empty(*shape)
torch.nn.init.kaiming_uniform_(tensor, a=math.sqrt(5))
return tensor
if isinstance(shape, list):
return torch.nn.ParameterList([self.init_param(s) for s in shape])
return torch.nn.Parameter(init_tensor(shape))
def _encode(self, x, sigmas=None, ssl_idx=None, **kwargs):
self.shape_in = x.shape
bs, c, h, w = self.shape_in
if self.beta:
block = min(56, h)
c_w = (w - block) // 2
c_h = (h - block) // 2
to_estimate = x[:, :, c_h : c_h + block, c_w : c_w + block].view(
bs * c, 1, block, block
)
beta = 1 - self.beta_estimator(to_estimate)
# (bs * c, 1)
beta = beta.view(bs, c, 1, 1)
else:
beta = torch.ones((bs, c, 1, 1), device=x.device)
if self.ssl:
# discard error on bands we want to predict
with torch.no_grad():
mask = torch.ones_like(beta)
mask[:, ssl_idx.long()] = 0.0
beta = beta * mask
if self.beta or self.ssl:
# applying beta before or after centering is equivalent
x = x * beta
CT = (self.C[0] @ self.C[1]).view(
self.code_size,
self.in_channels,
self.patch_side,
self.patch_side,
)
if self.patch_centering:
A = F.conv2d(x, CT - CT.mean(dim=[2, 3], keepdim=True))
self.means = F.conv2d(x, self.ker_mean, groups=self.in_channels)
else:
A = F.conv2d(x, CT)
alpha = self.thresholds(A, 0)
D = (self.D[0] @ self.D[1]).view(
self.code_size,
self.in_channels,
self.patch_side,
self.patch_side,
)
for k in range(1, self.K):
D_alpha = F.conv_transpose2d(alpha, D)
D_alpha = D_alpha * beta
alpha = self.thresholds(A + alpha - F.conv2d(D_alpha, CT), k)
return alpha
def _decode(self, alpha, **kwargs):
W = ((self.W[0]) @ self.W[1]).view(
self.code_size,
self.in_channels,
self.patch_side,
self.patch_side,
)
x = F.conv_transpose2d(alpha, W)
if self.patch_centering:
x += F.conv_transpose2d(
self.means,
self.ker_mean * self.patch_side ** 2,
groups=self.in_channels,
)
if self.divider is None or self.divider.shape[-2:] != (x.shape[-2:]):
ones = torch.ones(
1, 1, alpha.shape[2], alpha.shape[3], device=alpha.device
).to(alpha.device)
self.divider = F.conv_transpose2d(ones, self.ker_divider)
x = x / self.divider
return x
| 5,915 | 29.65285 | 83 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/soft_thresholding.py | import torch
import torch.nn as nn
import torch.nn.functional as F
MODES = ["SG", "SC", "MG", "MC"]
class SoftThresholding(nn.Module):
def __init__(self, mode, lbda_init, code_size=None, K=None):
super().__init__()
assert mode in MODES, f"Mode {mode!r} not recognized"
self.mode = mode
if self.mode[1] == "C":
# 1 lambda per channel
lbda_shape = (1, code_size, 1, 1)
else:
# 1 lambda for all channels
lbda_shape = (1, 1, 1, 1)
if self.mode[0] == "M":
# 1 set of lambdas per unfolding
self.lbda = nn.ParameterList(
[
nn.Parameter(lbda_init * torch.ones(*lbda_shape))
for _ in range(K)
]
)
else:
# 1 set of lambdas for all unfoldings
self.lbda = nn.Parameter(lbda_init * torch.ones(*lbda_shape))
def forward(self, x, k=None):
if self.mode[0] == "M":
return self._forward(x, self.lbda[k])
else:
return self._forward(x, self.lbda)
def _forward(self, x, lbda):
return F.relu(x - lbda) - F.relu(-x - lbda)
| 1,204 | 27.690476 | 73 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/encoding_layer.py | import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class EncodingLayer(nn.Module):
def __init__(
self,
in_channels=None,
code_size=None,
input_centering=False,
**kwargs,
):
super().__init__()
self.in_channels = in_channels
self.code_size = code_size
self.input_centering = input_centering
def forward(self, x, mode=None, **kwargs):
assert mode in ["encode", "decode", None], f"Mode {mode!r} unknown"
if mode in ["encode", None]:
x = self.encode(x, **kwargs)
if mode in ["decode", None]:
x = self.decode(x, **kwargs)
return x
def encode(self, x, **kwargs):
if self.input_centering:
self.input_means = x.mean(dim=[2, 3], keepdim=True)
x -= self.input_means
x = self._encode(x, **kwargs)
return x
def decode(self, x, **kwargs):
x = self._decode(x, **kwargs)
if self.input_centering:
x += self.input_means
return x
def _encode(self, x, **kwargs):
raise NotImplementedError
def _decode(self, x, **kwargs):
raise NotImplementedError
| 1,251 | 22.622642 | 75 | py |
CamDiff | CamDiff-main/inpainting_diff.py | from diffusers import StableDiffusionInpaintPipeline
import torch
import os
# from einops import repeat
import numpy as np
import time
import argparse
from PIL import Image
import random
# from efficientnet_classification import EfficientnetPipeline
from clip_classification import ClipPipeline
WIDTH = 512
HEIGHT = 512
RATIO = 0.0625
RATIO_MIN = 0.0625
RATIO_MAX = 0.25
LENGTH_RATIO_MIN = 1/5
LENGTH_RATIO_MAX = 5
MASK_RATIO = 0.75
SHRINK = np.sqrt(MASK_RATIO)
PROB = 0.4
MASK_WIDTH = 128
MASK_HEIGHT = 128
def make_mask(mask, image):
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32)/255.0
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
image = np.array(image.convert("RGB"))
image = image.transpose(2,0,1)
# increase mask to box
coord = np.where(mask == 1)
xmin = min(coord[0])
xmax = max(coord[0])
ymin = min(coord[1])
ymax = max(coord[1])
new_image, new_mask, mask_ratio, coord, flag = choose_area(xmin, xmax, ymin, ymax, image)
if flag == 1:
new_image = Image.fromarray(new_image.astype(np.uint8).transpose(1, 2, 0))
mask_image = Image.fromarray(new_mask.astype(np.uint8)*255).convert("RGB")
else:
mask_image = 0
return new_image, mask_image, mask_ratio, coord, flag
def choose_area(xmin, xmax, ymin, ymax, image):
A = np.array([[0, 0], [xmin, ymin]])
B = np.array([[0, ymin], [xmin, ymax]])
C = np.array([[0, ymax], [xmin, WIDTH]])
D = np.array([[xmin, 0], [xmax, ymin]])
E = np.array([[xmin, ymax], [xmax, WIDTH]])
F = np.array([[xmax, 0], [HEIGHT, ymin]])
G = np.array([[xmax, ymin], [HEIGHT, ymax]])
H = np.array([[xmax, ymax], [HEIGHT, WIDTH]])
candidates = [A, B, C, D, E, F, G, H]
random.shuffle(candidates)
flag = 0
for i in candidates:
mask_ratio = (i[1, 0] - i[0, 0]) * (i[1, 1] - i[0, 1]) / (WIDTH * HEIGHT)
if mask_ratio > RATIO_MIN: # avoid mask ratio is zero
# Mask is a square, because DM's input size is 512 x 512
if ((i[1, 0] - i[0, 0]) < (i[1, 1] - i[0, 1])):
i[1, 1] = i[0, 1] + (i[1, 0] - i[0, 0])
else:
i[1, 0] = i[0, 0] + (i[1, 1] - i[0, 1])
if mask_ratio > RATIO_MAX: # avoid mask ratio is too big
shrink = np.sqrt(RATIO_MAX / mask_ratio)
x_mid = int((i[1, 0] + i[0, 0]) / 2)
y_mid = int((i[1, 1] + i[0, 1]) / 2)
dx = int((i[1, 0] - i[0, 0]) * shrink)
dy = int((i[1, 1] - i[0, 1]) * shrink)
d = min(dx, dy)
i[0, 0] = int(x_mid - dx / 2)
i[1, 0] = int(x_mid + dx / 2)
i[0, 1] = int(y_mid - dy / 2)
i[1, 1] = int(y_mid + dy / 2)
# new_mask[i[0, 0]:i[1, 0], i[0, 1]:i[1, 1]] = 1
new_image = image[:, i[0, 0]:i[1, 0], i[0, 1]:i[1, 1]]
flag += 1
break
if flag == 1:
new_mask = np.zeros((new_image.shape[1], new_image.shape[2]))
x_mid_mask = int(new_image.shape[1] / 2)
y_mid_mask = int(new_image.shape[2] / 2)
dx_half_mask = int(new_image.shape[1] * SHRINK / 2)
dy_half_mask = int(new_image.shape[2] * SHRINK / 2)
new_mask[(x_mid_mask-dx_half_mask) : (x_mid_mask+dx_half_mask), (y_mid_mask-dy_half_mask):(y_mid_mask+dy_half_mask)] = 1
mask_ratio = (i[1, 0] - i[0, 0]) * (i[1, 1] - i[0, 1]) / (WIDTH * HEIGHT) * MASK_RATIO
else:
new_mask = 0
new_image = 0
mask_ratio = 0
i = 0
return new_image, new_mask, mask_ratio, i, flag
def crop_object(image, mask):
image = np.array(image.convert("RGB"))
image = image.transpose(2,0,1)
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32)/255.0
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# get box info
coord = np.where(mask == 1)
xmin = min(coord[0])
xmax = max(coord[0])
ymin = min(coord[1])
ymax = max(coord[1])
# dimension = RGB image
mask = mask[None]
mask_image = image * (mask > 0.5)
mask_image = Image.fromarray(mask_image[:, xmin:xmax, ymin:ymax].transpose(1, 2, 0))
## Save mask
# mask_image = image * (mask < 0.5)
# mask_image = Image.fromarray(mask_image.transpose(1, 2, 0))
return mask_image
def num_bad_img(images, mask_image, prompt, org_w , org_h, coord, org_image):
del_idx = []
left_images = []
for idx, image in enumerate(images):
test_object = crop_object(image, mask_image)
label, prob = classifier.forward(test_object)
# avoid many types of fish
if "Fish" in label or "fish" in label:
label = "Fish"
if "Frogmouth" in label:
label = "Bird"
# insert the sampled image into the original image
image = image.resize((org_w, org_h))
image = np.array(image.convert("RGB"))
image = image.transpose(2,0,1)
new_image = org_image.copy()
new_image = np.array(new_image.convert("RGB"))
new_image = new_image.transpose(2,0,1)
new_image[:, coord[0, 0]:coord[1, 0], coord[0,1]:coord[1,1]] = image
new_image = Image.fromarray(new_image.transpose(1, 2, 0))
# new_image.save("./image.jpg")
# breakpoint()
if label not in prompt or prob < PROB:
del_idx.append(idx)
else:
left_images.append(new_image)
return len(del_idx), left_images
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--indir",
type=str,
default="./Dataset",
nargs="?",
help="dir containing image-mask pairs (`example.png` and `example_mask.png`)",
)
parser.add_argument(
"--outdir",
type=str,
default="./result",
nargs="?",
help="dir to write results to",
)
parser.add_argument(
"--steps",
type=int,
default=50,
help="number of ddim sampling steps",
)
parser.add_argument(
"-d",
"--device",
default="cuda",
help="computation device to use",
choices=["cpu", "cuda"]
)
opt = parser.parse_args()
data_root = os.path.join(opt.indir, "Imgs")
mask_root = os.path.join(opt.indir, "GT")
images = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
masks = [os.path.join(mask_root, os.path.splitext(os.path.split(file_path)[-1])[0] + '.png') for file_path in images]
print(f"Found {len(masks)} inputs.")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
revision="fp16",
torch_dtype=torch.float16,
).to(opt.device)
print("Pretrained model is loaded")
classifier = ClipPipeline(data_root, opt.device)
print("-------------Begin inpainting-------------")
start = time.time()
os.makedirs(opt.outdir, exist_ok=True)
for image_path, mask_path in zip(images, masks):
print(f"Image file: {image_path}")
# breakpoint()
outpath = os.path.join(opt.outdir, os.path.split(image_path)[1])
if len(os.path.split(outpath)[1].split("-")) == 1:
# camo, chameleon, nc4k
prompt = "a " + random.choice(classifier.labels)
else:
prompt = "a " + os.path.split(outpath)[1].split("-")[-2]
print("Prompt: " + prompt)
# avoid many types of fish
if "Fish" in prompt or "fish" in prompt:
prompt = "a Fish"
if "Frogmouth" in prompt:
prompt = "a Bird"
#image and mask_image should be PIL images.
#The mask structure is white for inpainting and black for keeping as is
image = Image.open(image_path)
mask = Image.open(mask_path)
image = image.resize((WIDTH, HEIGHT))
mask= mask.resize((WIDTH, HEIGHT))
print(f"resized to ({WIDTH}, {HEIGHT})")
# Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
# usually at the expense of lower image quality.
num_samples = 1
guidance_scale= 7.5
seed = 0
for i in range(num_samples):
if len(os.path.split(outpath)[1].split("-")) == 1:
# camo, chameleon, nc4k
prompt = "a " + random.choice(classifier.labels)
seed = random.randint(seed + 1, seed + 10)
# mask position is randomly generated
new_image, mask_image, mask_ratio, coord, flag = make_mask(mask, image)
print(f"mask ratio is {mask_ratio}")
if flag == 0:
print("Remask")
continue
org_w , org_h = mask_image.size
new_image = new_image.resize((WIDTH, HEIGHT))
mask_image= mask_image.resize((WIDTH, HEIGHT))
generator = torch.Generator(device="cuda").manual_seed(seed) # change the seed to get different results
images = pipe(prompt=prompt,
image=new_image,
mask_image=mask_image,
guidance_scale=guidance_scale,
generator=generator,
num_images_per_prompt=1,
).images
num_resamples, images = num_bad_img(images, mask_image, prompt, org_w , org_h, coord, image)
# avoid no break in while loop
count = 0
while (len(images) < 1) & (count < 10):
print(f"Resample {num_resamples} images")
new_image, mask_image, mask_ratio, coord, flag = make_mask(mask, image)
print(f"mask ratio is {mask_ratio}")
if flag == 0:
print("Remask")
continue
org_w , org_h = mask_image.size
new_image = new_image.resize((WIDTH, HEIGHT))
mask_image= mask_image.resize((WIDTH, HEIGHT))
generator = torch.Generator(device="cuda").manual_seed(random.randint(seed + 1, seed + 10))
resample_images = pipe(prompt=prompt,
image=new_image,
mask_image=mask_image,
guidance_scale=guidance_scale,
generator=generator,
num_images_per_prompt=num_resamples,
).images
num_resamples, left_images = num_bad_img(resample_images, mask_image, prompt, org_w , org_h, coord, image)
for img in left_images:
images.append(img)
count += 1
if num_resamples != 1:
subpath = os.path.join(os.path.splitext(outpath)[0] + "-" + str(i) + os.path.splitext(outpath)[1])
images[0].save(subpath)
end = time.time()
print(f"Total time: {end - start}")
| 11,126 | 35.009709 | 128 | py |
CamDiff | CamDiff-main/clip_classification.py | import os
import clip
import torch
import numpy as np
def get_label_list(input_dir):
images = [os.path.join(input_dir, file_path) for file_path in os.listdir(input_dir)]
label_list = []
for image in images:
if len(os.path.split(image)[1].split("-")) == 1:
continue
else:
label = os.path.split(image)[1].split("-")[-2]
if label not in label_list:
label_list.append(label)
return label_list
class ClipPipeline():
def __init__(self, input_dir, device) -> None:
self.device = device
self.model, self.preprocess = clip.load("ViT-B/32", device=device)
self.labels = get_label_list(input_dir)
# self.labels = ["Fish", "Rabbit", "Butterfly", "Bird", "Cat", "Dog", "Duck", "Bee", "Owl", "Frog"]
def forward(self, image):
img = self.preprocess(image).unsqueeze(0).to(self.device)
# labels = get_label_list(input_dir)
txt = clip.tokenize(self.labels).to(self.device)
with torch.no_grad():
image_features = self.model.encode_image(img)
text_features = self.model.encode_text(txt)
logits_per_image, logits_per_text = self.model(img, txt)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
idx = np.argmax(probs)
print(f"Predicted label {self.labels[idx]} has the probality of {probs[0][idx]*100}%")
label = self.labels[idx]
prob = probs[0][idx]
return label, prob
| 1,529 | 32.26087 | 107 | py |
EBM-HEP | EBM-HEP-main/mcmc.py | import torch
def energy_wrapper(nenergy):
'''
Wrapper to facilitate flexible energy function sign
'''
energy = - nenergy
return energy
# Partially based on code from Yilun Du, Improved Contrastive Divergence Training of Energy Based Models.
# https://github.com/yilundu/improved_contrastive_divergence
def hamiltonian(x, v, model):
energy = 0.5 * torch.pow(v, 2).sum(dim=1) + energy_wrapper(model.forward(x).squeeze())
return energy
def leapfrog_step(x, v, model, step_size, num_steps, sample=False, mh=True):
x0 = x
v0 = v
x.requires_grad_(requires_grad=True)
energy = energy_wrapper(model.forward(x))
x_grad = torch.autograd.grad([energy.sum()], [x])[0]
v = v - 0.5 * step_size * x_grad
x_negs = []
for i in range(num_steps):
x.requires_grad_(requires_grad=True)
energy = energy_wrapper(model.forward(x))
if i == num_steps - 1:
x_grad = torch.autograd.grad([energy.sum()], [x], create_graph=True)[0]
v = v - step_size * x_grad
x = x + step_size * v
v = v.detach()
else:
x_grad = torch.autograd.grad([energy.sum()], [x])[0]
v = v - step_size * x_grad
x = x + step_size * v
x = x.detach()
v = v.detach()
if sample:
x_negs.append(x)
if i % 10 == 0:
print(i, hamiltonian(torch.sigmoid(x), v, model).mean(), torch.abs(v).mean(), torch.abs(x_grad).mean())
if mh:
accept = MH_accept(model, x0, x)
x = accept * x + (1 - accept) * x0
v = accept * v + (1 - accept) * v0
x_grad = accept * x_grad
if sample:
return x, torch.stack(x_negs, dim=0), v, x_grad
else:
return x, v, x_grad
def gen_hmc_samples(model, x_neg, num_steps, step_size, sample=False, mh=True):
v = 0.001 * torch.randn_like(x_neg)
if sample:
x_neg, x_negs, v, x_grad = leapfrog_step(x_neg, v, model, step_size, num_steps, sample=sample, mh=mh)
return x_neg, x_negs, x_grad, v
else:
x_neg, v, x_grad = leapfrog_step(x_neg, v, model, step_size, num_steps, sample=sample, mh=mh)
return x_neg, x_grad, v
####
def MH_accept(model, x0, x1):
'''
Add a Metropolis-Hastings step after HMC to move around the energy landscape
'''
energy0 = energy_wrapper(model.forward(x0))
energy1 = energy_wrapper(model.forward(x1))
likelihood_ratio = torch.exp(-energy1 + energy0)
u = torch.rand_like(likelihood_ratio)
accept = ((u - likelihood_ratio) < 0).float()
return accept | 2,631 | 31.493827 | 115 | py |
EBM-HEP | EBM-HEP-main/ebm_models.py |
import copy
import math
import torch
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.data import Dataset
import torch.optim as optim
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class MLPJet(nn.Module):
def __init__(self, input_dim=80, **kwargs):
super().__init__()
self.mlp = nn.Sequential(
spectral_norm(nn.Linear(input_dim, 512)),
nn.ReLU(),
spectral_norm(nn.Linear(512, 512)),
nn.ReLU(),
spectral_norm(nn.Linear(512, 128)),
nn.ReLU(),
spectral_norm(nn.Linear(128, 64)),
nn.ReLU(),
spectral_norm(nn.Linear(64, 1))
)
def forward(self, x):
x = self.mlp(x)
return x
class Embedder(nn.Module):
def __init__(self, d_in, d_model):
super().__init__()
self.embed = nn.Linear(d_in, d_model)
def forward(self, x):
return self.embed(x)
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
#mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // num_heads
self.h = num_heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out_linear = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
batch_size = q.size(0)
k = self.k_linear(k).view(batch_size, -1, self.h, self.d_k)
q = self.q_linear(q).view(batch_size, -1, self.h, self.d_k)
v = self.v_linear(v).view(batch_size, -1, self.h, self.d_k)
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
v_out = attention(q, k, v, self.d_k, mask, self.dropout)
v_out = v_out.transpose(1,2).contiguous().view(batch_size, -1, self.d_model)
output = self.out_linear(v_out)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=1024, dropout = 0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.act = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.act(self.linear_1(x))
x = self.dropout(x)
x = self.linear_2(x)
return x
class EncoderLayer(nn.Module):
def __init__(self, d_model, num_heads, dff, dropout=0.1):
super().__init__()
self.norm_1 = nn.LayerNorm(d_model)
self.norm_2 = nn.LayerNorm(d_model)
self.attn = MultiHeadAttention(num_heads, d_model)
self.ff = FeedForward(d_model, dff, dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x0 = x
#x = self.norm_1(x)
x = self.attn(x,x,x,mask)
x = x0 + self.dropout_1(x)
x0 = x
#x = self.norm_2(x)
x = self.ff(x)
x = x0 + self.dropout_2(x)
return x
class Encoder(nn.Module):
def __init__(self, num_layers, d_model, num_heads, dff, dropout):
super().__init__()
self.num_layers = num_layers
self.embedding = Embedder(3, d_model)
self.layers = nn.ModuleList([EncoderLayer(d_model, num_heads, dff, dropout) for _ in range(num_layers)])
self.norm = nn.LayerNorm(d_model)
def forward(self, x, mask):
x = self.embedding(x)
for i in range(self.num_layers):
x = self.layers[i](x, mask)
#x = self.norm(x)
return x
class Transformer(nn.Module):
def __init__(self, num_layers=3, d_model=128, num_heads=8, dff=256, rate=0.1, n_output=1):
super().__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff, rate)
self.mlp = nn.Sequential(
nn.Linear(d_model, 500),
Swish(),
nn.Linear(500, 500),
Swish(),
nn.Linear(500, n_output)
)
def _create_padding_mask(self, seq):
seq = torch.sum(seq, 2)
seq = torch.eq(seq, 0)
#seq = tf.cast(torch.eq(seq, 0), tf.float32)
seq = torch.unsqueeze(seq, 1)
seq = torch.unsqueeze(seq, 1)
return seq # (batch_size, 1, 1, seq_len)
def forward(self, x, mask=None):
x = x.view(x.shape[0], -1, 3)
if mask is None:
mask = self._create_padding_mask(x)
e_outputs = self.encoder(x, mask)
e_outputs = torch.sum(e_outputs, 1)
output = self.mlp(e_outputs)
return output
'''
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
#mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
'''
| 6,173 | 29.564356 | 112 | py |
EBM-HEP | EBM-HEP-main/utils.py |
import os
from pathlib import Path
import random
import h5py
import numpy as np
from numpy import inf
import torch
import torch.nn.functional as F
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar
import uproot_methods
def jet_from_ptetaphi(X, scaled=False):
from sklearn.preprocessing import RobustScaler
def load_attn_train(n_train=None, input_dim=160, scale=False):
f = h5py.File(os.environ["VAE_DIR"] +"qcd_preprocessed.h5", "r")
qcd_train = f["constituents" if "constituents" in f.keys() else "table"]
if n_train:
qcd_train = qcd_train[:n_train, :input_dim]
else:
qcd_train = qcd_train[:, :input_dim]
X = qcd_train
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
scaler = RobustScaler().fit(X)
X = scaler.transform(X)
else:
scaler = None
return X, scaler
if scaled:
input_dim = X.shape[1] // 3 * 4
_, scaler = load_attn_train(n_train=10000, input_dim=input_dim, scale=True)
X = scaler.inverse_transform(X)
X = np.reshape(X, (len(X), -1, 3))
log_pt = X[:,:,0]
eta = X[:,:,1]
phi = X[:,:,2]
pt = np.exp(log_pt)
m = np.zeros_like(pt)
p4 = uproot_methods.TLorentzVectorArray.from_ptetaphim(pt, eta, phi, m)
e = p4.energy
px = p4.x
py = p4.y
pz = p4.z
e = e.reshape(len(e), -1, 1)
px = px.reshape(len(px), -1, 1)
py = py.reshape(len(py), -1, 1)
pz = pz.reshape(len(pz), -1, 1)
X = np.concatenate((e, px, py, pz), -1)
X = X.reshape(len(X), -1)
return X
def jet_e(jet):
E_j = 0.0
Px_j = 0.0
Py_j = 0.0
Pz_j = 0.0
jet = np.reshape(jet, (-1, 4))
E_j, _, _, _ = np.sum(jet, axis=0)
return E_j
def jet_mass(jet):
E_j=0
Px_j=0
Py_j=0
Pz_j=0
jet = np.reshape(jet, (-1, 4))
E_j, Px_j, Py_j, Pz_j = np.sum(jet, axis=0)
if E_j**2 > (Px_j**2 + Py_j**2 + Pz_j**2):
m = np.sqrt(E_j**2 - (Px_j**2 + Py_j**2 + Pz_j**2))
else:
m = 0
return m
def jet_pt(jet):
Px_j=0
Py_j=0
jet = np.reshape(jet, (-1, 4))
n_consti = len(jet)
for i in range(n_consti):
Px_j += jet[i, 1]
Py_j += jet[i ,2]
pt = np.sqrt(Px_j**2 + Py_j**2)
return pt
def jet_girth(jet): ##### to be modified
jet = copy.deepcopy(jet)
eta_j=jet["eta"] # just using pseudo-rapidity here
phi_j=jet["phi"]
pt_j=jet["pt"]
m_j=jet["mass"]
j=LorentzVector()
j.set_pt_eta_phi_m(pt_j, eta_j, phi_j, m_j)
rap_j = j.Rapidity() # jet rapidity here
constituents = jet["content"][jet["tree"][:, 0] == -1]
g = 0
for i in range(len(constituents)):
v = LorentzVector(constituents[i])
e=v.E()
pz=v.Pz()
pt=v.Pt()
eta = 0.5 * (np.log(e + pz) - np.log(e - pz)) # using rapidity here
phi=v.phi()
delta_eta=eta-rap_j
delta_phi=phi-phi_j
if (delta_phi)>np.pi:
delta_phi -= 2*np.pi
elif (delta_phi)<-np.pi:
delta_phi += 2*np.pi
dr=np.sqrt(delta_eta**2 + delta_phi**2)
g += pt * dr
g /= pt_j
return g
def plot_jet_image(jets, ax, cmap="Blues"):
'''
Inputs: [n, l]
n: number of jets
l: four-vectors of jet constituents
Four-vectors: (E, Px, Py, Pz)
Outputs: average jet images on (eta, phi) plane
'''
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
#plt.rcParams["figure.figsize"] = (6,6)
a=[]
for i in range(len(jets)):
constituents=jets[i].reshape(-1,4)
jet=constituents.sum(axis=0)
#v=LorentzVector(jet[1], jet[2], jet[3], jet[0])
#pt_j=v.Pt()
pt_j=np.sqrt(jet[1]**2+jet[2]**2)
for c in constituents:
if c[0]<1e-10:
continue
eta=0.5*np.log((c[0]+c[3])/(c[0]-c[3]))
phi=np.arctan2(c[2], c[1])
pt=np.sqrt(c[1]**2+c[2]**2)
#v=LorentzVector(c[1], c[2], c[3], c[0])
#a.append(np.array([v.eta(), v.phi(), v.Pt()/pt_j]))
a.append(np.array([eta, phi, pt/pt_j]))
a=np.vstack(a)
ax.hist2d(a[:, 0], a[:, 1], range=[(-1.0, 1.0), (-1.0,1.0)],
weights=a[:, 2],
bins=50, cmap=cmap, norm=LogNorm())
ax.set_xlabel(r"$\eta$")
ax.set_ylabel(r"$\phi$")
def calc_js_div(real, gen, plot_range=[200, 1000]):
prob_gen = torch.histc(gen, bins=50, min=plot_range[0], max=plot_range[1])
prob_gen = prob_gen / prob_gen.sum()
prob_real = torch.histc(real, bins=50, min=plot_range[0], max=plot_range[1])
prob_real = prob_real / prob_real.sum()
prob_mean = (prob_real + prob_gen) / 2.0
js_div = (F.kl_div(torch.log(prob_mean), prob_real) + F.kl_div(torch.log(prob_mean), prob_gen)) / 2.0
return js_div
class LitProgressBar(TQDMProgressBar):
def on_train_epoch_start(self, trainer, pl_module):
if trainer.current_epoch:
print()
super().on_train_epoch_start(trainer, pl_module)
def get_metrics(self, trainer, pl_module, **kwargs):
# don't show the version number
items = super().get_metrics(trainer, pl_module)
items.pop("v_num", None)
return items
class PeriodicCheckpoint(ModelCheckpoint):
def __init__(self, interval, **kwargs):
super().__init__()
self.interval = interval
def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):
if pl_module.global_step % self.interval == 0:
assert self.dirpath is not None
#current = Path(self.dirpath) / f"{pl_module.global_step // self.interval}-{pl_module.global_step}.ckpt"
current = Path(self.dirpath) / f"e{pl_module.global_step // self.interval}.ckpt"
prev = Path(self.dirpath) / f"{pl_module.global_step - self.interval}.ckpt"
trainer.save_checkpoint(current)
#prev.unlink() | 6,933 | 27.652893 | 116 | py |
EBM-HEP | EBM-HEP-main/ebm_preamble.py | #__all__ = ['utils', 'load_data', 'ebm_models']
import os
import json
import math
import numpy as np
from math import inf
import h5py
import random
import copy
import time, argparse
import timeit
import datetime
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import torch
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.data import Dataset
import torch.optim as optim
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import seaborn as sns
import uproot_methods
from utils import jet_e, jet_pt, jet_mass, jet_from_ptetaphi, plot_jet_image
from utils import calc_js_div
from utils import LitProgressBar, PeriodicCheckpoint
from load_data import *
from ebm_models import Transformer, MLPJet
from mcmc import gen_hmc_samples
CHECKPOINT_PATH = "./tmp"
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
| 1,444 | 27.333333 | 93 | py |
EBM-HEP | EBM-HEP-main/ebm_jet_attn.py | #!/usr/bin/env python
from ebm_preamble import *
FLAGS = {
'max_len': 10000,
'new_sample_rate': 0.05,
'singlestep': False, # for KL improved training, only back-prop through the last LD step
'MH': True, # Metropolis-Hastings step for HMC
'val_steps': 128,
'scaled': False # Input feature scaling
}
def random_sample(n_sample, n_consti):
if FLAGS['scaled']:
rand_logpt = torch.normal(0.0, 1.0, (n_sample, n_consti, 1))
rand_eta = torch.normal(0.0, 1.0, (n_sample, n_consti, 1))
rand_phi = torch.normal(0.0, 1.0, (n_sample, n_consti, 1))
else:
rand_logpt = torch.normal(2.0, 1.0, (n_sample, n_consti, 1))
rand_eta = torch.normal(0.0, 0.1, (n_sample, n_consti, 1))
rand_phi = torch.normal(0.0, 0.2, (n_sample, n_consti, 1))
rand_jets = torch.cat([rand_logpt, rand_eta, rand_phi], dim=-1)
rand_jets = rand_jets.view(n_sample, n_consti*3)
return rand_jets
class Sampler:
def __init__(self, model, jet_shape, sample_size, max_len=FLAGS['max_len'], kl=False, hmc=False, epsilon=0.005, return_grad=False):
super().__init__()
self.model = model
self.jet_shape = jet_shape
self.sample_size = sample_size
self.max_len = max_len
self.kl = kl
self.hmc = hmc
self.epsilon = epsilon
self.return_grad = return_grad
self.examples = [random_sample(1, jet_shape[0] // 3) for _ in range(sample_size)]
def sample_new_exmps(self, steps=60, step_size=10):
n_new = np.random.binomial(self.sample_size, FLAGS['new_sample_rate'])
n_consti = self.jet_shape[0] // 3
rand_jets = random_sample(n_new, n_consti)
old_jets = torch.cat(random.choices(self.examples, k=self.sample_size-n_new), dim=0)
inp_jets = torch.cat([rand_jets, old_jets], dim=0).detach().to(device)
if self.hmc:
inp_jets, x_grad, v = Sampler.generate_samples(self.model, inp_jets, steps=steps, step_size=step_size, hmc=True)
self.examples = list(inp_jets.to(torch.device("cpu")).chunk(self.sample_size, dim=0)) + self.examples
self.examples = self.examples[:self.max_len]
return inp_jets, x_grad, v
else:
inp_jets, inp_jets_kl, grad_norm = Sampler.generate_samples(self.model, inp_jets, steps=steps, step_size=step_size, kl=self.kl, epsilon=self.epsilon, return_grad=self.return_grad)
self.examples = list(inp_jets.to(torch.device("cpu")).chunk(self.sample_size, dim=0)) + self.examples
self.examples = self.examples[:self.max_len]
return inp_jets, inp_jets_kl, grad_norm
@staticmethod
def generate_samples(model, inp_jets, steps=60, step_size=10, return_jet_per_step=False, return_grad=False, kl=False, hmc=False, epsilon=0.005):
if hmc:
if return_jet_per_step:
im_neg, im_samples, x_grad, v = gen_hmc_samples(model, inp_jets, steps, step_size, sample=True, mh=FLAGS['MH'])
return im_samples, v
else:
im_neg, x_grad, v = gen_hmc_samples(model, inp_jets, steps, step_size, sample=False, mh=FLAGS['MH'])
return im_neg, x_grad, v
else:
is_training = model.training
model.eval()
for p in model.parameters():
p.requires_grad = False
had_gradients_enabled = torch.is_grad_enabled()
torch.set_grad_enabled(True)
inp_jets.requires_grad = True
noise = torch.randn(inp_jets.shape, device=inp_jets.device)
grad_norm = 0.0
jets_per_step = []
for i in range(steps):
if i == steps - 1:
inp_jets_orig = inp_jets
noise.normal_(0, epsilon)
inp_jets.data.add_(noise.data)
out_jets = - model.forward(inp_jets.float())
if kl and not FLAGS['singlestep']:
x_grad = torch.autograd.grad([out_jets.sum()], [inp_jets], create_graph=True)[0]
else:
x_grad = torch.autograd.grad([out_jets.sum()], [inp_jets])[0]
inp_jets = inp_jets - step_size * x_grad
grad_norm += x_grad.norm(dim=1)
if return_jet_per_step:
jets_per_step.append(inp_jets.clone().detach())
if i == steps - 1:
if kl:
inp_jets_kl = inp_jets_orig
energy = - model.forward(inp_jets_kl)
x_grad = torch.autograd.grad([energy.sum()], [inp_jets_kl], create_graph=True)[0]
inp_jets_kl = inp_jets_kl - step_size * x_grad
else:
inp_jets_kl = torch.zeros_like(inp_jets)
inp_jets = inp_jets.detach()
for p in model.parameters():
p.requires_grad = True
model.train(is_training)
torch.set_grad_enabled(had_gradients_enabled)
if return_grad:
grad_norm = grad_norm / steps
else:
grad_norm = 0.0
if return_jet_per_step:
return torch.stack(jets_per_step, dim=0), grad_norm
else:
return inp_jets, inp_jets_kl, grad_norm
class DeepEnergyModel(pl.LightningModule):
def __init__(self, jet_shape, batch_size, steps=60, step_size=10, kl=False, repel=False, hmc=False, epsilon=0.005, alpha=0.1, lr=1e-4, beta1=0.0, **net_args):
super().__init__()
self.save_hyperparameters()
self.jet_shape = jet_shape
self.batch_size = batch_size
self.hmc = hmc
self.epsilon = epsilon
self.net = Transformer(**net_args)
self.sampler = Sampler(self.net, jet_shape=jet_shape, sample_size=batch_size, kl=kl, hmc=hmc, epsilon=epsilon, return_grad=True)
def forward(self, x):
z = self.net(x)
return z
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.lr, betas=(self.hparams.beta1, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.98)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
self.train()
real_jets = batch
small_noise = torch.randn_like(real_jets) * self.epsilon
real_jets = real_jets + small_noise
if self.hparams.hmc:
fake_jets, x_grad, v = self.sampler.sample_new_exmps(steps=self.hparams.steps, step_size=self.hparams.step_size)
else:
fake_jets, fake_jets_kl, v = self.sampler.sample_new_exmps(steps=self.hparams.steps, step_size=self.hparams.step_size)
inp_jets = torch.cat([real_jets, fake_jets], dim=0)
real_out, fake_out = self.net(inp_jets.float()).chunk(2, dim=0)
reg_loss = self.hparams.alpha * (real_out ** 2 + fake_out ** 2).mean()
cdiv_loss = fake_out.mean() - real_out.mean()
loss = reg_loss + cdiv_loss
if self.hparams.hmc:
v_flat = v.view(v.size(0), -1)
x_grad_flat = x_grad.view(x_grad.size(0), -1)
dot_product = F.normalize(v_flat, dim=1) * F.normalize(x_grad_flat, dim=1)
loss_hmc = torch.abs(dot_product.sum(dim=1)).mean()
loss = loss + 0.1 * loss_hmc
v = v.norm(dim=1)
else:
loss_hmc = torch.zeros(1)
if self.hparams.kl:
self.net.requires_grad_(False)
loss_kl = - self.net.forward(fake_jets_kl)
self.net.requires_grad_(True)
loss = loss + loss_kl.mean()
if self.hparams.repel:
bs = fake_jets_kl.size(0)
fake_jets_flat = fake_jets_kl.view(bs, -1)
if len(self.sampler.examples) > 1000:
compare_batch = torch.cat(random.choices(self.sampler.examples, k=100), dim=0)
compare_batch = torch.Tensor(compare_batch).cuda(0)
compare_flat = compare_batch.view(100, -1)
dist_matrix = torch.norm(fake_jets_flat[:, None, :] - compare_flat[None, :, :], p=2, dim=-1)
loss_repel = torch.log(dist_matrix.min(dim=1)[0]).mean()
loss = loss - 0.3 * loss_repel
else:
loss_repel = torch.zeros(1)
else:
loss_repel = torch.zeros(1)
else:
loss_kl = torch.zeros(1)
loss_repel = torch.zeros(1)
self.log('loss', loss)
self.log('loss_reg', reg_loss)
self.log('loss_cd', cdiv_loss, prog_bar=True)
self.log('loss_kl', loss_kl.mean(), prog_bar=True)
self.log('loss_repel', loss_repel)
self.log('loss_hmc', loss_hmc.mean(), prog_bar=True)
self.log('nenergy_real', real_out.mean())
self.log('nenergy_sample', fake_out.mean())
self.log('train_average_v', v.mean())
return loss
def validation_step(self, batch, batch_idx):
self.eval()
jets, labels = batch
batch_size = len(labels)
qcd = jets[labels==0]
signal = jets[labels==1]
jets = torch.cat([qcd, signal], dim=0)
qcd_out, signal_out = self.net(jets.float()).chunk(2, dim=0)
cdiv_top = signal_out.mean() - qcd_out.mean()
y_pred = np.concatenate((-qcd_out.cpu(), -signal_out.cpu()))
y_true = np.concatenate((np.zeros_like(qcd_out.cpu()), np.ones_like(signal_out.cpu())))
auc = roc_auc_score(y_true, y_pred)
n_consti = self.jet_shape[0] // 3
random_jets = random_sample(batch_size, n_consti).to(device)
random_out = self.net(random_jets.float())
cdiv_random = random_out.mean() - qcd_out.mean()
self.log('val_cd_top', cdiv_top, prog_bar=True)
self.log('val_cd_random', cdiv_random, prog_bar=True)
self.log('val_nenergy_top', signal_out.mean())
self.log('val_nenergy_qcd', qcd_out.mean())
self.log('val_auc_top', auc, prog_bar=True)
self.log('hp_metric', auc)
init_samples = random_sample(batch_size, n_consti).to(self.device)
torch.set_grad_enabled(True)
if self.hparams.hmc:
gen_samples, x_grad, v = self.sampler.generate_samples(self.net, init_samples, steps=FLAGS['val_steps'], step_size=self.hparams.step_size, hmc=True)
else:
gen_samples, _, _ = self.sampler.generate_samples(self.net, init_samples, steps=FLAGS['val_steps'], step_size=self.hparams.step_size, kl=False, hmc=False) # turn off KL for saving memory and faster generation
torch.set_grad_enabled(False)
gen_out = self.net(gen_samples)
cdiv_gen = gen_out.mean() - qcd_out.mean()
self.log('val_cd_gen', cdiv_gen, prog_bar=True)
gen_samples = jet_from_ptetaphi(gen_samples.cpu(), scaled=FLAGS['scaled'])
qcd = jet_from_ptetaphi(qcd.cpu(), scaled=FLAGS['scaled'])
gen_pts = list(map(jet_pt, gen_samples))
gen_pts = torch.tensor(gen_pts)
real_pts = list(map(jet_pt, qcd))
real_pts = torch.tensor(real_pts)
js_pt = calc_js_div(real_pts, gen_pts, plot_range=[200, 1000])
self.log('val_JS_pt', js_pt, prog_bar=True)
gen_ms = list(map(jet_mass, gen_samples))
gen_ms = torch.tensor(gen_ms)
real_ms = list(map(jet_mass, qcd))
real_ms = torch.tensor(real_ms)
js_m = calc_js_div(real_ms, gen_ms, plot_range=[0, 500])
self.log('val_JS_m', js_m, prog_bar=True)
self.log('val_JS_avg', (js_pt + js_m)/2.0, prog_bar=True)
def get_progress_bar_dict(self):
tqdm_dict = super().get_progress_bar_dict()
tqdm_dict.pop("v_num", None)
return tqdm_dict
def train_model(train_loader, val_loader, model_name, epochs, **kwargs):
default_path = os.path.join(CHECKPOINT_PATH, "attn", datetime.datetime.now().strftime("%m%d-%H%M%S")+"_"+model_name)
tb_logger = TensorBoardLogger(default_path, name=None, version=None)
trainer = pl.Trainer(logger=tb_logger,
gpus=-1 if str(device).startswith("cuda") else 0, # set gpus=-1 to use all available gpus
#accelerator="ddp",
max_epochs=epochs,
gradient_clip_val=0.1,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="min", monitor='val_JS_avg'),
PeriodicCheckpoint(interval=len(train_loader), save_weights_only=True),
LitProgressBar(),
LearningRateMonitor("epoch")
])
model = DeepEnergyModel(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = DeepEnergyModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
return model
def eval_ood(model, train_loader, test_loader):
model.to(device)
model.eval()
with torch.no_grad():
train_energy = []
test_energy = []
for train_imgs in train_loader:
train_imgs = train_imgs.to(model.device)
train_energy.append(model.net(train_imgs.float()))
for test_imgs in test_loader:
test_imgs = test_imgs.to(model.device)
test_energy.append(model.net(test_imgs.float()))
train_energy = torch.concat(train_energy)
test_energy = torch.concat(test_energy)
y_true = np.concatenate((np.zeros_like(train_energy), np.ones_like(test_energy)))
y_pred = np.concatenate((-train_energy, -test_energy))
auc = roc_auc_score(y_true, y_pred)
print(f"Test AUC: {auc:4.3f}")
def main():
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_dim', type=int, default=160)
parser.add_argument('--input_scaler', action='store_true')
# MCMC
parser.add_argument('--steps', type=int, default=128)
parser.add_argument('--step_size', type=float, default=1.0)
parser.add_argument('--epsilon', type=float, default=0.005)
parser.add_argument('--kl', action='store_true')
parser.add_argument('--repel', action='store_true')
parser.add_argument('--hmc', action='store_true')
# Training
parser.add_argument('--n_train', type=int, default=50000)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--topref', action='store_true')
# Saving models
parser.add_argument('--mode', default="train")
parser.add_argument('--model_name', default=None)
parser.add_argument('--tag', default=None)
args = parser.parse_args()
e_func = "attnv3"
train_set, scaler = load_attn_train(n_train=args.n_train, input_dim=args.input_dim, scale=args.input_scaler, topref=args.topref)
val_X, val_y = load_attn_val(scaler, n_val=10000, input_dim=args.input_dim, scale=args.input_scaler, topref=args.topref)
train_loader = data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=2, pin_memory=True)
val_loader = data.DataLoader([[val_X[i], val_y[i]] for i in range(len(val_X))], batch_size=args.batch_size, shuffle=False, drop_last=True, num_workers=2, pin_memory=True)
test_fn = os.environ['VAE_DIR'] + 'h3_m174_h80_01_preprocessed.h5'
test_set = load_attn_test(scaler, test_fn, input_dim=args.input_dim, scale=args.input_scaler)
test_loader = data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=2)
if args.mode == "train":
if args.model_name is None:
model_path = 'models/{}_n{}k_d{}_stp{}_ss{}_eps{}_bs{}_e{}_l{}'.format(e_func, int(args.n_train / 1000.), args.input_dim, args.steps, args.step_size, args.epsilon, args.batch_size, args.epochs, args.lr)
model_path += "_kl" if args.kl else ""
model_path += "_hmc" if args.hmc else ""
model_path += "_scale" if args.input_scaler else ""
model_path += "_{}".format(args.tag) if args.tag else ""
else:
model_path = "models/" + args.model_name
model = train_model(train_loader,
val_loader,
os.path.basename(model_path),
epochs=args.epochs,
jet_shape=(args.input_dim // 4 * 3,),
batch_size=train_loader.batch_size,
lr=args.lr,
beta1=0.0,
steps=args.steps,
step_size=args.step_size,
num_layers=8,
d_model=128,
num_heads=16,
dff=1024,
rate=0.1,
kl=args.kl,
repel=args.repel,
hmc=args.hmc,
epsilon=args.epsilon
)
torch.save(model.state_dict(), model_path)
eval_ood(model, train_loader, test_loader)
elif args.mode == "test":
model = DeepEnergyModel(img_shape=(args.input_dim // 4 * 3,),
batch_size=train_loader.batch_size,
lr=args.lr,
beta1=0.0,
step_size=args.step_size
)
model.load_state_dict(torch.load('models/'+model_name))
eval_ood(model, train_loader, test_loader)
return model
if __name__ == "__main__":
main()
| 18,407 | 40.647059 | 221 | py |
EBM-HEP | EBM-HEP-main/load_data.py |
import os
import numpy as np
import h5py
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import torch
import torch.nn.functional as F
import uproot_methods
from utils import jet_e, jet_pt, jet_mass
from math import inf
def load_attn_train(n_train=None, input_dim=160, scale=False, topref=False):
if topref:
f = h5py.File(os.environ['TOPREF_DIR']+'train_preprocessed.h5', "r")
X_train = np.array(f['table'])
y_train = np.array(f['labels'])
qcd_train = X_train[y_train==0]
else:
f = h5py.File(os.environ["VAE_DIR"] +"qcd_preprocessed.h5", "r")
qcd_train = f["constituents" if "constituents" in f.keys() else "table"]
if n_train:
qcd_train = qcd_train[:n_train, :input_dim]
else:
qcd_train = qcd_train[:, :input_dim]
X = qcd_train
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
scaler = RobustScaler().fit(X)
X = scaler.transform(X)
else:
scaler = None
return X, scaler
def load_attn_val(scaler, n_val=10000, input_dim=160, scale=False, pt_scaling=False, pt_refine=True, m_window=False, topref=False):
'''
construct validation set for OOD detection.
different from training data, validation set has sample lables.
TODO: readjust n_val to match the final number of events
'''
from sklearn.utils import shuffle
from utils import jet_pt, jet_mass
if topref:
f = h5py.File(os.environ['TOPREF_DIR']+'val_preprocessed.h5', "r")
val_X = np.array(f['table'])
val_y = np.array(f['labels'])
val_X = val_X[-n_val:, :input_dim]
val_y = val_y[-n_val:]
else:
f1 = h5py.File(os.environ["VAE_DIR"] +"qcd_preprocessed.h5", "r")
qcd_val = f1["constituents" if "constituents" in f1.keys() else "table"]
qcd_val = np.array(qcd_val)
if pt_refine:
from utils import jet_pt, jet_mass
pts = []
for j in qcd_val:
pts.append(jet_pt(j))
pts = np.array(pts)
qcd_val = qcd_val[(pts>550) & (pts<=650)]
qcd_val = qcd_val[-n_val:, :input_dim]
f = h5py.File(os.environ["VAE_DIR"] +"top_preprocessed.h5", 'r')
for key in ['table', 'constituents', 'jet1']:
if key in f.keys():
w_test=f[key]
if key == "jet1":
labels=f["labels"]
labels=np.array(labels)
w_test = np.array(w_test)
if pt_refine:
from utils import jet_pt, jet_mass
pts = []
for j in w_test:
pts.append(jet_pt(j))
pts = np.array(pts)
w_test = w_test[(pts>550) & (pts<=650)]
if m_window:
ms=[]
for j in w_test:
ms.append(jet_mass(j))
ms=np.array(ms)
w_test=w_test[(ms>150)&(ms<=200)]
if pt_scaling:
for i in range(len(w_test)):
pt=jet_pt(w_test[i])
w_test[i]=w_test[i]/pt
w_test = w_test[-n_val:, :input_dim]
val_X = np.concatenate((qcd_val, w_test))
val_y = np.concatenate((np.zeros(len(qcd_val)), np.ones(len(w_test))))
val_X, val_y = shuffle(val_X, val_y)
f1.close()
X = val_X
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
val_X = scaler.transform(X)
val_X = X
f.close()
return val_X, val_y
def load_attn_test(scaler, fn, input_dim=160, n_test=10000, scale=False, pt_scaling=False, pt_refine=True, m_window=False):
f = h5py.File(fn, 'r')
for key in ['table', 'constituents', 'jet1']:
if key in f.keys():
w_test=f[key]
if key == "jet1":
labels=f["labels"]
labels=np.array(labels)
w_test = np.array(w_test)
if pt_refine:
from utils import jet_pt, jet_mass
pts=[]
for j in w_test:
pts.append(jet_pt(j))
pts=np.array(pts)
w_test=w_test[(pts>550)&(pts<=650)]
if m_window:
ms=[]
for j in w_test:
ms.append(jet_mass(j))
ms=np.array(ms)
w_test=w_test[(ms>150)&(ms<=200)]
w_test = w_test[:n_test,:input_dim]
if pt_scaling:
for i in range(len(w_test)):
pt=jet_pt(w_test[i])
w_test[i]=w_test[i]/pt
X = w_test
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
X = scaler.transform(X)
f.close()
return X
def load_clf_train(n_train=None, input_dim=80, ova=None):
'''
ova: 1 - QCD/others; 2 - W/others; 3 - Top/others
'''
def load_data(n_train_pclass=350000, input_dim=160, ova=None):
from sklearn.utils import shuffle
f = h5py.File(os.environ["CLFAD_DIR"] + 'qcd_pt600_preprocessed.h5', 'r')
qcd = np.array(f['constituents'])
f.close()
f = h5py.File(os.environ["CLFAD_DIR"] + 'w_pt600_preprocessed.h5', 'r')
w = np.array(f['constituents'])
f.close()
f = h5py.File(os.environ["CLFAD_DIR"] + 'top_pt600_preprocessed.h5', 'r')
top = np.array(f['constituents'])
f.close()
X = np.concatenate((qcd[:n_train_pclass, :input_dim], w[:n_train_pclass, :input_dim], top[:n_train_pclass, :input_dim]), axis=0)
#m = np.concatenate((qcd_obs[:n_train_pclass], w_obs[:n_train_pclass], top_obs[:n_train_pclass]))
if ova:
y = np.concatenate(((1 - (ova == 1))*np.ones(n_train_pclass), (1 - (ova == 2))*np.ones(n_train_pclass), (1 - (ova == 3))*np.ones(n_train_pclass)))
else:
labels_2 = np.empty(n_train_pclass)
labels_2.fill(2)
y = np.concatenate((np.zeros(n_train_pclass),np.ones(n_train_pclass), labels_2))
X, y = shuffle(X, y)
#y = F.one_hot(torch.tensor(y).to(torch.int64), num_classes=3) # commented out due to torch.nn.CrossEntropyLoss()
return X, y
X, y = load_data(n_train // 3, input_dim, ova)
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
return X, y | 8,832 | 28.055921 | 158 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex2_tpr_proposed.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 1.5
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval(z_interval, eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,136 | 19.548077 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex4_count_no_interval.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
return len(list_zk), len(z_interval)
# start_time = time.time()
#
# en, tn = run()
# print(en, tn)
#
# print("--- %s seconds ---" % (time.time() - start_time))
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_truncation_interval = []
local_list_encounted_interval = []
for i in range(iter_each_thread):
en, tn = run()
local_list_truncation_interval.append(tn)
local_list_encounted_interval.append(en)
total_list_tn = COMM.gather(local_list_truncation_interval, root=0)
total_list_en = COMM.gather(local_list_encounted_interval, root=0)
if COMM.rank == 0:
total_list_tn = [_i for temp in total_list_tn for _i in temp]
total_list_en = [_i for temp in total_list_en for _i in temp]
print(total_list_tn)
print()
print(total_list_en)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,198 | 20.144231 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex3_len_interval_proposed_oc.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
global_list_ineq = []
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
model = load_model('./model/test_' + str(d) + '.h5')
# model.summary()
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
x = X_vec
eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)
u, v = util.compute_u_v(x, eta, d * d)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar == 0:
if b > 0:
print('Error B')
elif a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
else:
Vminus = max(Vminus, -b_scalar / a_scalar)
return Vplus - Vminus
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_length = []
for i in range(iter_each_thread):
length = run()
if length is not None:
local_list_length.append(length)
total_list_length = COMM.gather(local_list_length, root=0)
if COMM.rank == 0:
total_list_length = [_i for temp in total_list_length for _i in temp]
print(total_list_length)
print("--- %s seconds ---" % (time.time() - start_time)) | 3,789 | 23.294872 | 108 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/training.py | import numpy as np
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D, UpSampling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
import gen_data
def run(d):
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 1
X_train, Y_train = gen_data.generate(5000, IMG_WIDTH, mu_1, mu_2)
print(X_train.shape, Y_train.shape)
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
c1 = Conv2D(4, (3, 3), padding='same')(inputs)
p1 = MaxPooling2D((2, 2))(c1)
u2 = UpSampling2D(size=(2, 2))(p1)
c2 = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(u2)
outputs = c2
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(patience=15, verbose=1)
checkpointer = ModelCheckpoint('./model/test_' + str(d) + '.h5', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.1, epochs=20,
callbacks=[earlystopper, checkpointer])
if __name__ == '__main__':
list_d = [4, 8, 16, 32]
for d in list_d:
run(d) | 1,416 | 27.34 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex2_tpr_proposed_oc.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
global_list_ineq = []
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
model = load_model('./model/test_' + str(d) + '.h5')
# model.summary()
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
x = X_vec
eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x, eta, d * d)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar == 0:
if b > 0:
print('Error B')
elif a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
else:
Vminus = max(Vminus, -b_scalar / a_scalar)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval([[Vminus, Vplus]], eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 200
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 4,137 | 23.05814 | 108 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex1_fpr_proposed_oc.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 0
global_list_ineq = []
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
model = load_model('./model/test_' + str(d) + '.h5')
# model.summary()
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
x = X_vec
eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x, eta, d * d)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar == 0:
if b > 0:
print('Error B')
elif a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
else:
Vminus = max(Vminus, -b_scalar / a_scalar)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval([[Vminus, Vplus]], eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 4,163 | 22.931034 | 108 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex1_fpr_proposed.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 0
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval(z_interval, eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,160 | 19.386792 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex1_fpr_naive.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
mu_1 = 0
mu_2 = 0
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=0)
output = output.flatten()
X_vec = X_test.flatten()
m_a = 0
m_b = 0
n_a = 0
n_b = 0
for i in range(len(output)):
if output[i] <= 0.5:
n_a = n_a + 1
m_a = m_a + X_vec[i]
else:
n_b = n_b + 1
m_b = m_b + X_vec[i]
if (n_a == 0) or (n_b == 0):
return None
m_a = m_a / n_a
m_b = m_b / n_b
test_statistic = m_a - m_b
pivot = util.compute_naive_p(test_statistic, n_a, n_b, 1)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 200
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 1,834 | 17.72449 | 71 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex3_len_interval_proposed.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
length = 0
for interval in z_interval:
length = length + (interval[1] - interval[0])
# print(length)
return length
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_length = []
for i in range(iter_each_thread):
length = run()
if length is not None:
local_list_length.append(length)
total_list_length = COMM.gather(local_list_length, root=0)
if COMM.rank == 0:
total_list_length = [_i for temp in total_list_length for _i in temp]
print(total_list_length)
print("--- %s seconds ---" % (time.time() - start_time)) | 1,907 | 19.516129 | 100 | py |
UNITER | UNITER-master/train_nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for NLVR2
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, DetectFeatLmdb, TxtTokLmdb,
Nlvr2PairedDataset, Nlvr2PairedEvalDataset,
Nlvr2TripletDataset, Nlvr2TripletEvalDataset,
nlvr2_paired_collate, nlvr2_paired_eval_collate,
nlvr2_triplet_collate, nlvr2_triplet_eval_collate,
PrefetchLoader)
from model.nlvr2 import (UniterForNlvr2Paired, UniterForNlvr2Triplet,
UniterForNlvr2PairedAttn)
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, BUCKET_SIZE
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db = DetectFeatLmdb(img_path, opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
dset = dset_cls(txt_db, img_db, opts.use_img_type)
sampler = TokenBucketSampler(dset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return PrefetchLoader(loader)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
if 'paired' in opts.model:
DatasetCls = Nlvr2PairedDataset
EvalDatasetCls = Nlvr2PairedEvalDataset
collate_fn = nlvr2_paired_collate
eval_collate_fn = nlvr2_paired_eval_collate
if opts.model == 'paired':
ModelCls = UniterForNlvr2Paired
elif opts.model == 'paired-attn':
ModelCls = UniterForNlvr2PairedAttn
else:
raise ValueError('unrecognized model type')
elif opts.model == 'triplet':
DatasetCls = Nlvr2TripletDataset
EvalDatasetCls = Nlvr2TripletEvalDataset
ModelCls = UniterForNlvr2Triplet
collate_fn = nlvr2_triplet_collate
eval_collate_fn = nlvr2_triplet_eval_collate
else:
raise ValueError('unrecognized model type')
# data loaders
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
DatasetCls, collate_fn, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
EvalDatasetCls, eval_collate_fn, opts)
test_dataloader = create_dataloader(opts.test_img_db, opts.test_txt_db,
opts.val_batch_size, False,
EvalDatasetCls, eval_collate_fn, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = ModelCls.from_pretrained(opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM)
model.init_type_embedding()
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results')) # store val predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
targets = batch['targets']
n_examples += targets.size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'Step {global_step}: '
f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
if global_step % opts.valid_steps == 0:
for split, loader in [('val', val_dataloader),
('test', test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
log, results = validate(model, loader, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
TB_LOGGER.log_scaler_dict(log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"Step {global_step}: finished {n_epoch} epochs")
if opts.num_train_steps % opts.valid_steps != 0:
for split, loader in [('val', val_dataloader),
('test', test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
log, results = validate(model, loader, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
TB_LOGGER.log_scaler_dict(log)
model_saver.save(model, global_step)
@torch.no_grad()
def validate(model, val_loader, split):
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = []
for i, batch in enumerate(val_loader):
qids = batch['qids']
targets = batch['targets']
del batch['targets']
del batch['qids']
scores = model(batch, compute_loss=False)
loss = F.cross_entropy(scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += (scores.max(dim=-1, keepdim=False)[1] == targets
).sum().item()
answers = ['True' if i == 1 else 'False'
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
results.extend(zip(qids, answers))
n_ex += len(qids)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/{split}_loss': val_loss,
f'valid/{split}_acc': val_acc,
f'valid/{split}_ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument("--test_txt_db",
default=None, type=str,
help="The input test corpus. (LMDB)")
parser.add_argument("--test_img_db",
default=None, type=str,
help="The input test images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument("--model", default='paired',
choices=['paired', 'triplet', 'paired-attn'],
help="choose from 2 model architecture")
parser.add_argument('--use_img_type', action='store_true',
help="expand the type embedding for 2 image types")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+', type=float,
help="beta for adam optimizer")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 17,550 | 41.703163 | 79 | py |
UNITER | UNITER-master/pretrain.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER pre-training
"""
import argparse
from collections import defaultdict
import json
import math
import os
from os.path import exists, join
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, TokenBucketSamplerForItm,
MetaLoader, PrefetchLoader,
TxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
MlmDataset, MrfrDataset, MrcDataset,
mlm_collate, mrfr_collate, mrc_collate,
ItmDataset, itm_collate, itm_ot_collate)
from model.pretrain import UniterForPretraining
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, IMG_LABEL_DIM, BUCKET_SIZE
def build_dataloader(dataset, collate_fn, is_train, opts):
if is_train:
batch_size = opts.train_batch_size
else:
batch_size = opts.val_batch_size
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return loader
def build_dataloader_itm(dataset, collate_fn, is_train, opts):
if is_train:
batch_size = opts.train_batch_size
else:
batch_size = opts.val_batch_size
sampler = TokenBucketSamplerForItm(
dataset, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return loader
def build_mlm_dataset(txt_db, img_db, is_train, opts):
if is_train:
collate_fn = mlm_collate
datasets = [MlmDataset(t, i) for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
collate_fn = mlm_collate
dataset = MlmDataset(txt_db, img_db)
return dataset, collate_fn
def build_mrfr_dataset(txt_db, img_db, is_train, opts):
if is_train:
datasets = [MrfrDataset(opts.mrm_prob, t, i)
for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrfrDataset(opts.mrm_prob, txt_db, img_db)
return dataset, mrfr_collate
def build_mrc_dataset(txt_db, img_db, is_train, opts):
if is_train:
datasets = [MrcDataset(opts.mrm_prob, t, i)
for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrcDataset(opts.mrm_prob, txt_db, img_db)
return dataset, mrc_collate
def build_itm_dataset(txt_db, img_db, is_train, opts):
if is_train:
datasets = [ItmDataset(t, i, opts.itm_neg_prob)
for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = ItmDataset(txt_db, img_db, opts.itm_neg_prob)
collate_fn = itm_ot_collate if opts.itm_ot_lambda > 0 else itm_collate
return dataset, collate_fn
def create_dataloaders(datasets, is_train, opts, all_img_dbs=None):
if all_img_dbs is None:
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
dataloaders = {}
for dset in datasets:
if is_train:
assert len(dset['db']) == len(dset['img'])
assert len(dset['tasks']) == len(dset['mix_ratio'])
img_db = [all_img_dbs[path] for path in dset['img']]
else:
assert len(dset['db']) == len(dset['img']) == 1
img_db = all_img_dbs[dset['img'][0]]
for i, t in enumerate(dset['tasks']):
task = f'{t}_{dset["name"]}'
if is_train:
LOGGER.info(f"Loading {task} train dataset "
f"{dset['db']}, {[img.img_dir for img in img_db]}")
txt_db = [TxtTokLmdb(path, opts.max_txt_len)
for path in dset['db']]
else:
LOGGER.info(f"Loading {task} validation dataset, "
f"{dset['db']}, {img_db.img_dir}")
txt_db = TxtTokLmdb(dset['db'][0], -1)
if task.startswith('mlm'):
dataset = build_mlm_dataset(txt_db, img_db, is_train, opts)
elif task.startswith('mrfr'):
dataset = build_mrfr_dataset(txt_db, img_db, is_train, opts)
elif task.startswith('mrc'):
dataset = build_mrc_dataset(txt_db, img_db, is_train, opts)
elif task.startswith('itm'):
dataset = build_itm_dataset(txt_db, img_db, is_train, opts)
else:
raise ValueError(f'Undefined task {task}')
LOGGER.info(f"{len(dataset[0])*hvd.size()} samples loaded")
if task.startswith('itm'):
# itm handles distributed training in dset not sampler
loader = build_dataloader_itm(*dataset, is_train, opts)
else:
loader = build_dataloader(*dataset, is_train, opts)
if is_train:
ratio = dset['mix_ratio'][i]
dataloaders[task] = (loader, ratio)
else:
dataloaders[task] = PrefetchLoader(loader)
return dataloaders, all_img_dbs
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(args.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
all_dbs = [db for datasets in [opts.train_datasets, opts.val_datasets]
for dset in datasets for db in dset['db']]
tokenizer = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(tokenizer == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
# build data loaders
train_dataloaders, all_img_dbs = create_dataloaders(
opts.train_datasets, True, opts)
val_dataloaders, _ = create_dataloaders(
opts.val_datasets, False, opts, all_img_dbs)
meta_loader = MetaLoader(train_dataloaders,
accum_steps=opts.gradient_accumulation_steps,
distributed=n_gpu > 1)
meta_loader = PrefetchLoader(meta_loader)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForPretraining.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, img_label_dim=IMG_LABEL_DIM)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
task2scaler = {t: i for i, t in enumerate(train_dataloaders.keys())}
model, optimizer = amp.initialize(model, optimizer,
num_losses=len(task2scaler),
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
# to compute training statistics
task2loss = {task: RunningMeter(f'loss/{task}')
for task in train_dataloaders.keys()}
# ITM w/ OT
if opts.itm_ot_lambda > 0:
for task in train_dataloaders.keys():
if task.startswith('itm'):
task2loss[f'{task}_xe'] = RunningMeter(f'loss/{task}_xe')
task2loss[f'{task}_ot'] = RunningMeter(f'loss/{task}_ot')
task2loss[f'{task}_ot_pos'] = RunningMeter(
f'loss/{task}_ot_pos')
task2loss[f'{task}_ot_neg'] = RunningMeter(
f'loss/{task}_ot_neg')
n_examples = defaultdict(int)
n_in_units = defaultdict(int)
n_loss_units = defaultdict(int)
grad_norm = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
for step, (name, batch) in enumerate(meta_loader):
# forward pass
n_examples[name] += batch['input_ids'].size(0)
n_in_units[name] += (batch['attn_masks'] == 1).sum().item()
task = name.split('_')[0]
loss = model(batch, task=task, compute_loss=True)
if task.startswith('itm'):
# OT
itm_loss, ot_loss = loss
n_loss_units[name] += itm_loss.size(0)
itm_loss = itm_loss.mean()
if ot_loss is not None:
ot_pos, ot_neg = ot_loss
ot_loss = (ot_pos.sum() - ot_neg.sum()
) / (ot_pos.size(0) + ot_neg.size(0))
# NOTE: be ware of empty tensor
ot_pos = ot_pos.mean().item()
if not math.isnan(ot_pos):
task2loss[f'{name}_ot_pos'](ot_pos)
ot_neg = ot_neg.mean().item()
if not math.isnan(ot_neg):
task2loss[f'{name}_ot_neg'](ot_neg)
loss = itm_loss + opts.itm_ot_lambda * ot_loss
task2loss[f'{name}_xe'](itm_loss.item())
task2loss[f'{name}_ot'](ot_loss.item())
else:
loss = itm_loss
else:
n_loss_units[name] += loss.size(0)
loss = loss.mean() # loss is not normalized in model
# backward pass
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale,
loss_id=task2scaler[name]) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
task2loss[name](loss.item())
# optimizer update and logging
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.log_scaler_dict({ll.name: ll.val
for ll in task2loss.values()
if ll.val is not None})
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'==============Step {global_step}===============')
for t in train_dataloaders.keys():
assert all(tt == t for tt in all_gather_list(t))
tot_ex = sum(all_gather_list(n_examples[t]))
ex_per_sec = int(tot_ex / (time()-start))
tot_in = sum(all_gather_list(n_in_units[t]))
in_per_sec = int(tot_in / (time()-start))
tot_l = sum(all_gather_list(n_loss_units[t]))
l_per_sec = int(tot_l / (time()-start))
LOGGER.info(f'{t}: {tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar(f'perf/{t}_ex_per_s', ex_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_in_per_s', in_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_loss_per_s', l_per_sec,
global_step)
LOGGER.info('===============================================')
if global_step % opts.valid_steps == 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step % opts.valid_steps != 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
def validate(model, val_dataloaders):
model.eval()
for task, loader in val_dataloaders.items():
LOGGER.info(f"validate on {task} task")
if task.startswith('mlm'):
val_log = validate_mlm(model, loader)
elif task.startswith('mrfr'):
val_log = validate_mrfr(model, loader)
elif task.startswith('mrc'):
val_log = validate_mrc(model, loader, task)
elif task.startswith('itm'):
val_log = validate_itm(model, loader)
else:
raise ValueError(f'Undefined task {task}')
val_log = {f'{task}_{k}': v for k, v in val_log.items()}
TB_LOGGER.log_scaler_dict(
{f'valid_{task}/{k}': v for k, v in val_log.items()})
model.train()
@torch.no_grad()
def validate_mlm(model, val_loader):
LOGGER.info("start running MLM validation...")
val_loss = 0
n_correct = 0
n_word = 0
st = time()
for i, batch in enumerate(val_loader):
scores = model(batch, task='mlm', compute_loss=False)
labels = batch['txt_labels']
labels = labels[labels != -1]
loss = F.cross_entropy(scores, labels, reduction='sum')
val_loss += loss.item()
n_correct += (scores.max(dim=-1)[1] == labels).sum().item()
n_word += labels.numel()
val_loss = sum(all_gather_list(val_loss))
n_correct = sum(all_gather_list(n_correct))
n_word = sum(all_gather_list(n_word))
tot_time = time()-st
val_loss /= n_word
acc = n_correct / n_word
val_log = {'loss': val_loss,
'acc': acc,
'tok_per_s': n_word/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"acc: {acc*100:.2f}")
return val_log
def accuracy_count(out, labels):
outputs = out.max(dim=-1)[1]
mask = labels != -1
n_correct = (outputs == labels).masked_select(mask).sum().item()
return n_correct
@torch.no_grad()
def validate_mrfr(model, val_loader):
LOGGER.info("start running MRFR validation...")
val_loss = 0
n_feat = 0
st = time()
for i, batch in enumerate(val_loader):
loss = model(batch, task='mrfr', compute_loss=True)
val_loss += loss.sum().item() / IMG_DIM
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_log = {'loss': val_loss,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"loss: {val_loss:.2f}")
return val_log
@torch.no_grad()
def validate_mrc(model, val_loader, task):
LOGGER.info("start running MRC validation...")
val_loss = 0
n_feat = 0
st = time()
tot_score = 0
for i, batch in enumerate(val_loader):
prediction_soft_label = model(
batch, task=task, compute_loss=False)
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
label_targets = batch['label_targets']
loss = F.kl_div(
prediction_soft_label, label_targets, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label, label_targets)
else:
# background class should not be the target
cls_label_targets = label_targets[:, 1:].max(dim=-1)[1] + 1
loss = F.cross_entropy(
prediction_soft_label, cls_label_targets,
ignore_index=0, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label[:, 1:], label_targets[:, 1:])
val_loss += loss.item()
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_acc = tot_score / n_feat
val_log = {'loss': val_loss,
'acc': val_acc,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log
def compute_accuracy_for_soft_targets(out, labels):
outputs = out.max(dim=-1)[1]
labels = labels.max(dim=-1)[1] # argmax
n_correct = (outputs == labels).sum().item()
return n_correct
@torch.no_grad()
def validate_itm(model, val_loader):
LOGGER.info("start running ITM validation...")
val_loss = 0
tot_ot_loss = 0
tot_ot_pos = 0
tot_ot_neg = 0
tot_score = 0
n_ex = 0
st = time()
for i, batch in enumerate(val_loader):
scores, ot_loss = model(batch, task='itm', compute_loss=False)
if ot_loss is not None:
if isinstance(ot_loss, tuple):
ot_pos, ot_neg = ot_loss
ot_pos = ot_pos.sum().item()
ot_neg = ot_neg.sum().item()
tot_ot_pos += ot_pos
tot_ot_neg += ot_neg
tot_ot_loss += ot_pos - ot_neg
else:
tot_ot_loss += ot_loss.sum().item()
targets = batch['targets']
loss = F.cross_entropy(scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += (scores.max(dim=-1)[1] == targets).sum().item()
n_ex += len(targets)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {'valid/loss': val_loss,
'valid/acc': val_acc,
'valid/ex_per_s': n_ex/tot_time}
if ot_loss is not None:
tot_ot_loss = sum(all_gather_list(tot_ot_loss))
tot_ot_pos = sum(all_gather_list(tot_ot_pos))
tot_ot_neg = sum(all_gather_list(tot_ot_neg))
val_log['valid/ot_loss'] = tot_ot_loss / n_ex
val_log['valid/ot_pos'] = tot_ot_pos / n_ex
val_log['valid/ot_neg'] = tot_ot_neg / n_ex
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
# NOTE: train tasks and val tasks cannot take command line arguments
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config", type=str,
help="path to model structure config json")
parser.add_argument("--checkpoint", default=None, type=str,
help="path to model checkpoint (*.pt)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
parser.add_argument('--mrm_prob', default=0.15, type=float,
help='probability to mask in MRM training')
parser.add_argument('--itm_neg_prob', default=0.5, type=float,
help='probability to make negative examples'
'in ITM training')
parser.add_argument('--itm_ot_lambda', default=0.0, type=float,
help='weight of OT (optimal transport) loss (WRA)')
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adamw',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=10000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', required=True, help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 25,780 | 39.094868 | 79 | py |
UNITER | UNITER-master/train_itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for Image-Text Retrieval
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, ConcatDataset
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, TxtTokLmdb, ImageLmdbGroup,
ItmRankDataset, itm_rank_collate,
ItmValDataset, itm_val_collate,
ItmEvalDataset, itm_eval_collate)
from model.itm import UniterForImageTextRetrieval
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM
from utils.itm_eval import evaluate
def build_dataloader(dataset, collate_fn, is_train, opts):
batch_size = opts.train_batch_size if is_train else 1
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=is_train, drop_last=is_train,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
if hvd.rank() == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
# store ITM predictions
os.makedirs(join(opts.output_dir, 'results_val'))
os.makedirs(join(opts.output_dir, 'results_test'))
os.makedirs(join(opts.output_dir, 'results_train'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_dbs}, "
f"{opts.train_img_dbs}")
# check multiple DBs
assert len(opts.train_txt_dbs) == len(opts.train_img_dbs), \
"train txt_db and img_db have different length"
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets.append(ItmRankDataset(txt_db, img_db,
opts.negative_size))
train_dataset = ConcatDataset(train_datasets)
# val
LOGGER.info(f"Loading Val Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, -1)
val_dataset = ItmValDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
val_dataloader = build_dataloader(val_dataset, itm_val_collate,
False, opts)
# eval
LOGGER.info(f"Loading val, test Dataset for full evaluation: "
f"{opts.val_txt_db}, {opts.val_img_db}"
f"{opts.test_txt_db}, {opts.test_img_db}")
eval_dataset_val = ItmEvalDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
eval_loader_val = build_dataloader(eval_dataset_val, itm_eval_collate,
False, opts)
test_img_db = all_img_dbs[opts.test_img_db]
test_txt_db = TxtTokLmdb(opts.test_txt_db, -1)
eval_dataset_test = ItmEvalDataset(test_txt_db, test_img_db,
opts.inf_minibatch_size)
eval_loader_test = build_dataloader(eval_dataset_test, itm_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForImageTextRetrieval.from_pretrained(
opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM, margin=opts.margin)
model.init_output() # pretrain ITM head is different from ranking head
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training on {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset) * hvd.size())
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
train_dataloader = build_dataloader(
train_dataset, itm_rank_collate, True, opts)
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'------------Step {global_step}-------------')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info(f'-------------------------------------------')
if global_step % opts.valid_steps == 0:
if opts.full_val:
LOGGER.info(
f"========================== Step {global_step} "
f"==========================")
val_log = evaluate(model, eval_loader_val)
TB_LOGGER.log_scaler_dict(
{f"valid/{k}": v for k, v in val_log.items()})
LOGGER.info(f"image retrieval R1: "
f"{val_log['img_r1']*100:.2f},\n"
f"image retrieval R5: "
f"{val_log['img_r5']*100:.2f},\n"
f"image retrieval R10: "
f"{val_log['img_r10']*100:.2f}\n"
f"text retrieval R1: "
f"{val_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: "
f"{val_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: "
f"{val_log['txt_r10']*100:.2f}")
LOGGER.info("================================="
"=================================")
else:
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
pbar.close()
if opts.num_train_steps % opts.valid_steps != 0:
# final validation
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
# evaluation
for split, loader in [('val', eval_loader_val),
('test', eval_loader_test)]:
eval_log = evaluate(model, loader)
TB_LOGGER.log_scaler_dict({f"eval/{split}_{k}": v
for k, v in eval_log.items()})
if hvd.rank() != 0:
continue
LOGGER.info(
f"========================= {split} ===========================\n"
f"image retrieval R1: {eval_log['img_r1']*100:.2f},\n"
f"image retrieval R5: {eval_log['img_r5']*100:.2f},\n"
f"image retrieval R10: {eval_log['img_r10']*100:.2f}\n"
f"text retrieval R1: {eval_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: {eval_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: {eval_log['txt_r10']*100:.2f}")
LOGGER.info("=========================================================")
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
pbar = tqdm(total=len(val_loader))
else:
pbar = NoOp()
LOGGER.info("start running Image Retrieval validation ...")
model.eval()
n_ex = 0
st = time()
recall_at_1, recall_at_5, recall_at_10 = 0, 0, 0
for batch in val_loader:
scores = model(batch, compute_loss=False)
_, indices = scores.squeeze(1).topk(10, dim=0)
rank = (indices == 0).nonzero()
if rank.numel():
rank = rank.item()
if rank < 1:
recall_at_1 += 1
if rank < 5:
recall_at_5 += 1
if rank < 10:
recall_at_10 += 1
n_ex += 1
pbar.update(1)
n_ex = sum(all_gather_list(n_ex))
recall_at_1 = sum(all_gather_list(recall_at_1)) / n_ex
recall_at_5 = sum(all_gather_list(recall_at_5)) / n_ex
recall_at_10 = sum(all_gather_list(recall_at_10)) / n_ex
tot_time = time()-st
val_log = {'valid/ex_per_s': n_ex/tot_time,
'valid/recall_1': recall_at_1,
'valid/recall_5': recall_at_5,
'valid/recall_10': recall_at_10}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"recall_1: {recall_at_1*100:.2f}, "
f"recall_5: {recall_at_5*100:.2f}, "
f"recall_10: {recall_at_10*100:.2f}")
pbar.close()
return val_log
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained MLM")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory where the model "
"checkpoints will be written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=128, type=int,
help="Total batch size for training. "
"(batch by examples)")
parser.add_argument("--negative_size", default=1, type=int,
help="Number of negative samples per positive sample")
parser.add_argument("--inf_minibatch_size", default=400, type=int,
help="batch size for running inference. "
"(used for validation, and evaluation)")
parser.add_argument("--margin", default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=0.25, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--full_val', action='store_true',
help="Always run full evaluation during training")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 17,930 | 42.627737 | 79 | py |
UNITER | UNITER-master/prepro.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
preprocess NLVR annotations into LMDB
"""
import argparse
import json
import pickle
import os
from os.path import exists
from cytoolz import curry
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from data.data import open_lmdb
@curry
def bert_tokenize(tokenizer, text):
ids = []
for word in text.strip().split():
ws = tokenizer.tokenize(word)
if not ws:
# some special char
continue
ids.extend(tokenizer.convert_tokens_to_ids(ws))
return ids
def process_nlvr2(jsonl, db, tokenizer, missing=None):
id2len = {}
txt2img = {} # not sure if useful
for line in tqdm(jsonl, desc='processing NLVR2'):
example = json.loads(line)
id_ = example['identifier']
img_id = '-'.join(id_.split('-')[:-1])
img_fname = (f'nlvr2_{img_id}-img0.npz', f'nlvr2_{img_id}-img1.npz')
if missing and (img_fname[0] in missing or img_fname[1] in missing):
continue
input_ids = tokenizer(example['sentence'])
if 'label' in example:
target = 1 if example['label'] == 'True' else 0
else:
target = None
txt2img[id_] = img_fname
id2len[id_] = len(input_ids)
example['input_ids'] = input_ids
example['img_fname'] = img_fname
example['target'] = target
db[id_] = example
return id2len, txt2img
def process_referring_expressions(refs, instances, iid_to_ann_ids,
db, tokenizer, split):
"""
Inputs:
- refs: [ref_id, ann_id, image_id, split, sent_ids, sentences]
- instances: {images, annotations, categories}
- iid_to_ann_ids: image_id -> ann_ids ordered by extracted butd features
Return:
- id2len : sent_id -> tokenized question length
- images : [{id, file_name, ann_ids, height, width} ]
- annotations: [{id, area, bbox, image_id, category_id, iscrowd}]
- categories : [{id, name, supercategory}]
"""
# images within split
image_set = set([ref['image_id'] for ref in refs if ref['split'] == split])
images = []
for img in instances['images']:
if img['id'] in image_set:
images.append({
'id': img['id'], 'file_name': img['file_name'],
'ann_ids': iid_to_ann_ids[str(img['id'])],
'height': img['height'], 'width': img['width']})
# Images = {img['id']: img for img in images}
# anns within split
annotations = []
for ann in instances['annotations']:
if ann['image_id'] in image_set:
annotations.append({
'id': ann['id'], 'area': ann['area'], 'bbox': ann['bbox'],
'image_id': ann['image_id'],
'category_id': ann['category_id'],
'iscrowd': ann['iscrowd']
})
Anns = {ann['id']: ann for ann in annotations}
# category info
categories = instances['categories']
# refs within split
refs = [ref for ref in refs if ref['split'] == split]
print(f"Processing {len(refs)} annotations...")
id2len = {}
for ref in tqdm(refs, desc='processing referring expressions'):
ref_id = ref['ref_id']
ann_id = ref['ann_id']
image_id = ref['image_id']
img_fname = f"visual_grounding_coco_gt_{int(image_id):012}.npz"
for sent in ref['sentences']:
sent_id = sent['sent_id']
input_ids = tokenizer(sent['sent'])
id2len[str(sent_id)] = len(input_ids)
db[str(sent_id)] = {
'sent_id': sent_id, 'sent': sent['sent'],
'ref_id': ref_id, 'ann_id': ann_id,
'image_id': image_id, 'bbox': Anns[ann_id]['bbox'],
'input_ids': input_ids,
'img_fname': img_fname
}
return id2len, images, annotations, categories, refs
def main(opts):
if not exists(opts.output):
os.makedirs(opts.output)
else:
raise ValueError('Found existing DB. Please explicitly remove '
'for re-processing')
meta = vars(opts)
meta['tokenizer'] = opts.toker
toker = BertTokenizer.from_pretrained(
opts.toker, do_lower_case='uncased' in opts.toker)
tokenizer = bert_tokenize(toker)
meta['UNK'] = toker.convert_tokens_to_ids(['[UNK]'])[0]
meta['CLS'] = toker.convert_tokens_to_ids(['[CLS]'])[0]
meta['SEP'] = toker.convert_tokens_to_ids(['[SEP]'])[0]
meta['MASK'] = toker.convert_tokens_to_ids(['[MASK]'])[0]
meta['v_range'] = (toker.convert_tokens_to_ids('!')[0],
len(toker.vocab))
with open(f'{opts.output}/meta.json', 'w') as f:
json.dump(vars(opts), f, indent=4)
open_db = curry(open_lmdb, opts.output, readonly=False)
output_field_name = ['id2len', 'txt2img']
with open_db() as db:
if opts.task == 'nlvr':
with open(opts.annotations[0]) as ann:
if opts.missing_imgs is not None:
missing_imgs = set(json.load(open(opts.missing_imgs)))
else:
missing_imgs = None
jsons = process_nlvr2(
ann, db, tokenizer, missing_imgs)
elif opts.task == 're':
data = pickle.load(open(opts.annotations[0], 'rb'))
instances = json.load(open(opts.annotations[1], 'r'))
iid_to_ann_ids = json.load(
open(opts.annotations[2], 'r'))['iid_to_ann_ids']
# dirs/refcoco_testA_bert-base-cased.db -> testA
img_split = opts.output.split('/')[-1].split('.')[0].split('_')[1]
jsons = process_referring_expressions(
data, instances, iid_to_ann_ids,
db, tokenizer, img_split)
output_field_name = [
'id2len', 'images', 'annotations',
'categories', 'refs']
for dump, name in zip(jsons, output_field_name):
with open(f'{opts.output}/{name}.json', 'w') as f:
json.dump(dump, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--annotations', required=True, nargs='+',
help='annotation JSON')
parser.add_argument('--missing_imgs',
help='some training image features are corrupted')
parser.add_argument('--output', required=True,
help='output dir of DB')
parser.add_argument('--task', required=True, default='nlvr',
choices=['nlvr', 're'])
parser.add_argument('--toker', default='bert-base-cased',
help='which BERT tokenizer to used')
args = parser.parse_args()
if args.task == 'nlvr':
assert len(args.annotations) == 1
elif args.task == 're':
assert len(args.annotations) == 3
main(args)
| 6,939 | 36.923497 | 79 | py |
UNITER | UNITER-master/inf_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VCR for submission
"""
import argparse
import json
import os
from os.path import exists
import pandas as pd
from time import time
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
import numpy as np
from torch.utils.data.distributed import DistributedSampler
from data import (PrefetchLoader,
DetectFeatLmdb, VcrTxtTokLmdb, VcrEvalDataset,
vcr_eval_collate)
from model.vcr import UniterForVisualCommonsenseReasoning
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import NoOp, Struct
from utils.const import IMG_DIM
from tqdm import tqdm
NUM_SPECIAL_TOKENS = 81
def load_img_feat(dir_list, opts):
dir_ = dir_list.split(";")
assert len(dir_) <= 2, "More than two img_dirs found"
img_db_gt, img_db = None, None
gt_db_path, db_path = "", ""
for d in dir_:
if "gt" in d:
gt_db_path = d
else:
db_path = d
if gt_db_path != "":
img_db_gt = DetectFeatLmdb(
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
if db_path != "":
img_db = DetectFeatLmdb(
db_path, opts.conf_th,
opts.max_bb, opts.min_bb, opts.num_bb,
opts.compressed_db)
return img_db, img_db_gt
def save_for_submission(pred_file):
with open(os.path.join(pred_file), "r") as f:
data = json.load(f)
probs_grp = []
ids_grp = []
ordered_data = sorted(data.items(),
key=lambda item: int(item[0].split("-")[1]))
for annot_id, scores in ordered_data:
ids_grp.append(annot_id)
probs_grp.append(np.array(scores).reshape(1, 5, 4))
# Double check the IDs are in the same order for everything
# assert [x == ids_grp[0] for x in ids_grp]
probs_grp = np.stack(probs_grp, 1)
# essentially probs_grp is a [num_ex, 5, 4] array of probabilities.
# The 5 'groups' are
# [answer, rationale_conditioned_on_a0, rationale_conditioned_on_a1,
# rationale_conditioned_on_a2, rationale_conditioned_on_a3].
# We will flatten this to a CSV file so it's easy to submit.
group_names = ['answer'] + [f'rationale_conditioned_on_a{i}'
for i in range(4)]
probs_df = pd.DataFrame(data=probs_grp.reshape((-1, 20)),
columns=[f'{group_name}_{i}'
for group_name in group_names for i in range(4)])
probs_df['annot_id'] = ids_grp
probs_df = probs_df.set_index('annot_id', drop=True)
return probs_df
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if rank != 0:
LOGGER.disabled = True
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(json.load(open(hps_file)))
assert opts.split in opts.img_db and opts.split in opts.txt_db
# load DBs and image dirs
eval_img_db, eval_img_db_gt = load_img_feat(opts.img_db, model_opts)
eval_txt_db = VcrTxtTokLmdb(opts.txt_db, -1)
eval_dataset = VcrEvalDataset(
"test", eval_txt_db, img_db=eval_img_db,
img_db_gt=eval_img_db_gt)
# Prepare model
model = UniterForVisualCommonsenseReasoning.from_pretrained(
f'{opts.output_dir}/log/model.json', state_dict={},
img_dim=IMG_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
state_dict = checkpoint.get('model_state', checkpoint)
matched_state_dict = {}
unexpected_keys = set()
missing_keys = set()
for name, param in model.named_parameters():
missing_keys.add(name)
for key, data in state_dict.items():
if key in missing_keys:
matched_state_dict[key] = data
missing_keys.remove(key)
else:
unexpected_keys.add(key)
LOGGER.info(f"Unexpected_keys: {list(unexpected_keys)}")
LOGGER.info(f"Missing_keys: {list(missing_keys)}")
model.load_state_dict(matched_state_dict, strict=False)
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=True, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset,
batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
shuffle=False,
collate_fn=vcr_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
_, results = evaluate(model, eval_dataloader)
result_dir = f'{opts.output_dir}/results_{opts.split}'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results = {}
for id2res in all_gather_list(results):
all_results.update(id2res)
if hvd.rank() == 0:
with open(f'{result_dir}/'
f'results_{opts.checkpoint}_all.json', 'w') as f:
json.dump(all_results, f)
probs_df = save_for_submission(
f'{result_dir}/results_{opts.checkpoint}_all.json')
probs_df.to_csv(f'{result_dir}/results_{opts.checkpoint}_all.csv')
@torch.no_grad()
def evaluate(model, eval_loader):
model.eval()
LOGGER.info("start running evaluation ...")
if hvd.rank() == 0:
val_pbar = tqdm(total=len(eval_loader))
else:
val_pbar = NoOp()
val_qa_loss, val_qar_loss = 0, 0
tot_qa_score, tot_qar_score, tot_score = 0, 0, 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(eval_loader):
qids = batch['qids']
qa_targets, qar_targets = batch['qa_targets'], batch['qar_targets']
scores = model(batch, compute_loss=False)
scores = scores.view(len(qids), -1)
if torch.max(qa_targets) > -1:
vcr_qa_loss = F.cross_entropy(
scores[:, :4], qa_targets.squeeze(-1), reduction="sum")
if scores.shape[1] > 8:
qar_scores = []
for batch_id in range(scores.shape[0]):
answer_ind = qa_targets[batch_id].item()
qar_index = [4+answer_ind*4+i
for i in range(4)]
qar_scores.append(scores[batch_id, qar_index])
qar_scores = torch.stack(qar_scores, dim=0)
else:
qar_scores = scores[:, 4:]
vcr_qar_loss = F.cross_entropy(
qar_scores, qar_targets.squeeze(-1), reduction="sum")
val_qa_loss += vcr_qa_loss.item()
val_qar_loss += vcr_qar_loss.item()
curr_qa_score, curr_qar_score, curr_score = compute_accuracies(
scores[:, :4], qa_targets, qar_scores, qar_targets)
tot_qar_score += curr_qar_score
tot_qa_score += curr_qa_score
tot_score += curr_score
for qid, score in zip(qids, scores):
results[qid] = score.cpu().tolist()
n_ex += len(qids)
val_pbar.update(1)
val_qa_loss = sum(all_gather_list(val_qa_loss))
val_qar_loss = sum(all_gather_list(val_qar_loss))
tot_qa_score = sum(all_gather_list(tot_qa_score))
tot_qar_score = sum(all_gather_list(tot_qar_score))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_qa_loss /= n_ex
val_qar_loss /= n_ex
val_qa_acc = tot_qa_score / n_ex
val_qar_acc = tot_qar_score / n_ex
val_acc = tot_score / n_ex
val_log = {'valid/ex_per_s': n_ex/tot_time,
'valid/vcr_qa_loss': val_qa_loss,
'valid/vcr_qar_loss': val_qar_loss,
'valid/acc_qa': val_qa_acc,
'valid/acc_qar': val_qar_acc,
'valid/acc': val_acc}
model.train()
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds, "
f"score_qa: {val_qa_acc*100:.2f} "
f"score_qar: {val_qar_acc*100:.2f} "
f"score: {val_acc*100:.2f} ")
return val_log, results
def compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):
outputs_qa = out_qa.max(dim=-1)[1]
outputs_qar = out_qar.max(dim=-1)[1]
matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()
matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()
matched_joined = matched_qa & matched_qar
n_correct_qa = matched_qa.sum().item()
n_correct_qar = matched_qar.sum().item()
n_correct_joined = matched_joined.sum().item()
return n_correct_qa, n_correct_qar, n_correct_joined
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
default="/txt/vcr_val.db/", type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db",
default="/img/vcr_gt_val/;/img/vcr_val/", type=str,
help="The input train images.")
parser.add_argument("--split",
default="val", type=str,
help="The input split")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="can be the path to binary or int number (step)")
parser.add_argument("--batch_size",
default=10, type=int,
help="number of examples in a batch")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory of the training command")
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
main(args)
| 10,802 | 36.905263 | 78 | py |
UNITER | UNITER-master/train_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VCR
"""
import argparse
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, PrefetchLoader, DetectFeatLmdb,
VcrTxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
VcrDataset, VcrEvalDataset,
vcr_collate, vcr_eval_collate,)
from model.vcr import UniterForVisualCommonsenseReasoning
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import BUCKET_SIZE, IMG_DIM
NUM_SPECIAL_TOKENS = 81
def build_dataloader(dataset, collate_fn, is_train, opts):
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
if is_train:
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
dataloader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
else:
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=opts.n_workers, shuffle=False,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" vqa linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 'vcr_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 'vcr_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def load_img_feat(db_list, all_img_dbs, opts):
db_ = db_list.split(";")
assert len(db_) <= 2, "More than two img_dbs found"
gt_db_path, db_path = "", ""
for d in db_:
if "gt" in d:
gt_db_path = d
else:
db_path = d
if gt_db_path != "":
img_db_gt = DetectFeatLmdb(
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
all_img_dbs.path2imgdb[gt_db_path] = img_db_gt
else:
img_db_gt = None
img_db = all_img_dbs[db_path] if db_path != "" else None
all_img_dbs.path2imgdb[db_path] = img_db
return img_db, img_db_gt
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db, img_db_gt = load_img_feat(img_path, all_img_dbs, opts)
qa_txt_db = VcrTxtTokLmdb(txt_path, opts.max_txt_len, task="qa")
qar_txt_db = VcrTxtTokLmdb(txt_path, opts.max_txt_len, task="qar")
train_datasets.append(
VcrDataset(qa_txt_db, img_db_gt=img_db_gt, img_db=img_db))
train_datasets.append(
VcrDataset(qar_txt_db, img_db_gt=img_db_gt, img_db=img_db))
train_dataset = ConcatDatasetWithLens(train_datasets)
train_dataloader = build_dataloader(train_dataset, vcr_collate, True, opts)
# val
LOGGER.info(f"Loading Val Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db, val_img_db_gt = load_img_feat(
opts.val_img_db, all_img_dbs, opts)
val_txt_db = VcrTxtTokLmdb(opts.val_txt_db, -1)
val_dataset = VcrEvalDataset(
"val", val_txt_db, img_db=val_img_db, img_db_gt=val_img_db_gt)
val_final_dataset = VcrEvalDataset(
"test", val_txt_db, img_db=val_img_db, img_db_gt=val_img_db_gt)
val_dataloader = build_dataloader(val_dataset, vcr_eval_collate,
False, opts)
val_final_dataloader = build_dataloader(
val_final_dataset, vcr_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint and opts.checkpoint_from == "pretrain":
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = opts.train_txt_dbs + [opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(toker == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
model = UniterForVisualCommonsenseReasoning.from_pretrained(
opts.model_config, checkpoint, img_dim=IMG_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
if opts.checkpoint_from == "vcr_pretrain":
checkpoint = torch.load(opts.checkpoint)
state_dict = checkpoint.get('model_state', checkpoint)
matched_state_dict = {}
unexpected_keys = set()
missing_keys = set()
for name, param in model.named_parameters():
missing_keys.add(name)
for key, data in state_dict.items():
if key in missing_keys:
matched_state_dict[key] = data
missing_keys.remove(key)
else:
unexpected_keys.add(key)
print("Unexpected_keys:", list(unexpected_keys))
print("Missing_keys:", list(missing_keys))
model.load_state_dict(matched_state_dict, strict=False)
del checkpoint
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset) * hvd.size())
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info('===========================================')
if global_step % opts.valid_steps == 0:
val_log, results = validate(
model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
if global_step % opts.valid_steps != 0:
val_log, results = validate(
model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
val_log, results = validate(model, val_final_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_final_qa_qar_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
def compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):
outputs_qa = out_qa.max(dim=-1)[1]
outputs_qar = out_qar.max(dim=-1)[1]
matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()
matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()
matched_joined = matched_qa & matched_qar
n_correct_qa = matched_qa.sum().item()
n_correct_qar = matched_qar.sum().item()
n_correct_joined = matched_joined.sum().item()
return n_correct_qa, n_correct_qar, n_correct_joined
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
val_pbar = tqdm(total=len(val_loader))
else:
val_pbar = NoOp()
LOGGER.info("start running validation...")
model.eval()
val_qa_loss, val_qar_loss = 0, 0
tot_qa_score, tot_qar_score, tot_score = 0, 0, 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
scores = model(batch, compute_loss=False)
qa_targets = batch['qa_targets']
qar_targets = batch['qar_targets']
qids = batch['qids']
scores = scores.view(len(qids), -1)
vcr_qa_loss = F.cross_entropy(
scores[:, :4], qa_targets.squeeze(-1), reduction="sum")
if scores.shape[1] > 8:
qar_scores = []
for batch_id in range(scores.shape[0]):
answer_ind = qa_targets[batch_id].item()
qar_index = [4+answer_ind*4+i
for i in range(4)]
qar_scores.append(scores[batch_id, qar_index])
qar_scores = torch.stack(qar_scores, dim=0)
else:
qar_scores = scores[:, 4:]
vcr_qar_loss = F.cross_entropy(
qar_scores, qar_targets.squeeze(-1), reduction="sum")
val_qa_loss += vcr_qa_loss.item()
val_qar_loss += vcr_qar_loss.item()
curr_qa_score, curr_qar_score, curr_score = compute_accuracies(
scores[:, :4], qa_targets, qar_scores, qar_targets)
tot_qar_score += curr_qar_score
tot_qa_score += curr_qa_score
tot_score += curr_score
for qid, score in zip(qids, scores):
results[qid] = score.cpu().tolist()
n_ex += len(qids)
val_pbar.update(1)
val_qa_loss = sum(all_gather_list(val_qa_loss))
val_qar_loss = sum(all_gather_list(val_qar_loss))
tot_qa_score = sum(all_gather_list(tot_qa_score))
tot_qar_score = sum(all_gather_list(tot_qar_score))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_qa_loss /= n_ex
val_qar_loss /= n_ex
val_qa_acc = tot_qa_score / n_ex
val_qar_acc = tot_qar_score / n_ex
val_acc = tot_score / n_ex
val_log = {'valid/vcr_qa_loss': val_qa_loss,
'valid/vcr_qar_loss': val_qar_loss,
'valid/acc_qa': val_qa_acc,
'valid/acc_qar': val_qar_acc,
'valid/acc': val_acc,
'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score_qa: {val_qa_acc*100:.2f} "
f"score_qar: {val_qar_acc*100:.2f} "
f"score: {val_acc*100:.2f} ")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument("--checkpoint_from",
default='pretrain', type=str,
choices=['pretrain', 'vcr_pretrain'],
help="which setting is checkpoint from")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--lr_mul", default=10.0, type=float,
help="multiplier for top layer lr")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 20,770 | 41.131846 | 79 | py |
Subsets and Splits