repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
noise2noise | noise2noise-master/download_kodak.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import sys
import argparse
from urllib.request import urlretrieve
examples='''examples:
python %(prog)s --output-dir=./tmp
'''
def main():
parser = argparse.ArgumentParser(
description='Download the Kodak dataset .PNG image files.',
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--output-dir", help="Directory where to save the Kodak dataset .PNGs")
args = parser.parse_args()
if args.output_dir is None:
print ('Must specify output directory where to store tfrecords with --output-dir')
sys.exit(1)
os.makedirs(args.output_dir, exist_ok=True)
for i in range(1, 25):
imgname = 'kodim%02d.png' % i
url = "http://r0k.us/graphics/kodak/kodak/" + imgname
print ('Downloading', url)
urlretrieve(url, os.path.join(args.output_dir, imgname))
print ('Kodak validation set successfully downloaded.')
if __name__ == "__main__":
main()
| 1,343 | 29.545455 | 95 | py |
noise2noise | noise2noise-master/config.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import dnnlib
import argparse
import sys
import dnnlib.submission.submit as submit
import validation
# Submit config
# ------------------------------------------------------------------------------------------
submit_config = dnnlib.SubmitConfig()
submit_config.run_dir_root = 'results'
submit_config.run_dir_ignore += ['datasets', 'results']
desc = "autoencoder"
# Tensorflow config
# ------------------------------------------------------------------------------------------
tf_config = dnnlib.EasyDict()
tf_config["graph_options.place_pruned_graph"] = True
# Network config
# ------------------------------------------------------------------------------------------
net_config = dnnlib.EasyDict(func_name="network.autoencoder")
# Optimizer config
# ------------------------------------------------------------------------------------------
optimizer_config = dnnlib.EasyDict(beta1=0.9, beta2=0.99, epsilon=1e-8)
# Noise augmentation config
gaussian_noise_config = dnnlib.EasyDict(
func_name='train.AugmentGaussian',
train_stddev_rng_range=(0.0, 50.0),
validation_stddev=25.0
)
poisson_noise_config = dnnlib.EasyDict(
func_name='train.AugmentPoisson',
lam_max=50.0
)
# ------------------------------------------------------------------------------------------
# Preconfigured validation sets
datasets = {
'kodak': dnnlib.EasyDict(dataset_dir='datasets/kodak'),
'bsd300': dnnlib.EasyDict(dataset_dir='datasets/bsd300'),
'set14': dnnlib.EasyDict(dataset_dir='datasets/set14')
}
default_validation_config = datasets['kodak']
corruption_types = {
'gaussian': gaussian_noise_config,
'poisson': poisson_noise_config
}
# Train config
# ------------------------------------------------------------------------------------------
train_config = dnnlib.EasyDict(
iteration_count=300000,
eval_interval=1000,
minibatch_size=4,
run_func_name="train.train",
learning_rate=0.0003,
ramp_down_perc=0.3,
noise=gaussian_noise_config,
# noise=poisson_noise_config,
noise2noise=True,
train_tfrecords='datasets/imagenet_val_raw.tfrecords',
validation_config=default_validation_config
)
# Validation run config
# ------------------------------------------------------------------------------------------
validate_config = dnnlib.EasyDict(
run_func_name="validation.validate",
dataset=default_validation_config,
network_snapshot=None,
noise=gaussian_noise_config
)
# ------------------------------------------------------------------------------------------
# jhellsten quota group
def error(*print_args):
print (*print_args)
sys.exit(1)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# ------------------------------------------------------------------------------------------
examples='''examples:
# Train a network using the BSD300 dataset:
python %(prog)s train --train-tfrecords=datasets/bsd300.tfrecords
# Run a set of images through a pre-trained network:
python %(prog)s validate --network-snapshot=results/network_final.pickle --dataset-dir=datasets/kodak
'''
if __name__ == "__main__":
def train(args):
if args:
n2n = args.noise2noise if 'noise2noise' in args else True
train_config.noise2noise = n2n
if 'long_train' in args and args.long_train:
train_config.iteration_count = 500000
train_config.eval_interval = 5000
train_config.ramp_down_perc = 0.5
else:
print ('running with defaults in train_config')
noise = 'gaussian'
if 'noise' in args:
if args.noise not in corruption_types:
error('Unknown noise type', args.noise)
else:
noise = args.noise
train_config.noise = corruption_types[noise]
if train_config.noise2noise:
submit_config.run_desc += "-n2n"
else:
submit_config.run_desc += "-n2c"
if 'train_tfrecords' in args and args.train_tfrecords is not None:
train_config.train_tfrecords = submit.get_path_from_template(args.train_tfrecords)
print (train_config)
dnnlib.submission.submit.submit_run(submit_config, **train_config)
def validate(args):
if submit_config.submit_target != dnnlib.SubmitTarget.LOCAL:
print ('Command line overrides currently supported only in local runs for the validate subcommand')
sys.exit(1)
if args.dataset_dir is None:
error('Must select dataset with --dataset-dir')
else:
validate_config.dataset = {
'dataset_dir': args.dataset_dir
}
if args.noise not in corruption_types:
error('Unknown noise type', args.noise)
validate_config.noise = corruption_types[args.noise]
if args.network_snapshot is None:
error('Must specify trained network filename with --network-snapshot')
validate_config.network_snapshot = args.network_snapshot
dnnlib.submission.submit.submit_run(submit_config, **validate_config)
def infer_image(args):
if submit_config.submit_target != dnnlib.SubmitTarget.LOCAL:
print ('Command line overrides currently supported only in local runs for the validate subcommand')
sys.exit(1)
if args.image is None:
error('Must specify image file with --image')
if args.out is None:
error('Must specify output image file with --out')
if args.network_snapshot is None:
error('Must specify trained network filename with --network-snapshot')
# Note: there's no dnnlib.submission.submit_run here. This is for quick interactive
# testing, not for long-running training or validation runs.
validation.infer_image(args.network_snapshot, args.image, args.out)
# Train by default
parser = argparse.ArgumentParser(
description='Train a network or run a set of images through a trained network.',
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--desc', default='', help='Append desc to the run descriptor string')
parser.add_argument('--run-dir-root', help='Working dir for a training or a validation run. Will contain training and validation results.')
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
parser_train = subparsers.add_parser('train', help='Train a network')
parser_train.add_argument('--noise2noise', nargs='?', type=str2bool, const=True, default=True, help='Noise2noise (--noise2noise=true) or noise2clean (--noise2noise=false). Default is noise2noise=true.')
parser_train.add_argument('--noise', default='gaussian', help='Type of noise corruption (one of: gaussian, poisson)')
parser_train.add_argument('--long-train', default=False, help='Train for a very long time (500k iterations or 500k*minibatch image)')
parser_train.add_argument('--train-tfrecords', help='Filename of the training set tfrecords file')
parser_train.set_defaults(func=train)
parser_validate = subparsers.add_parser('validate', help='Run a set of images through the network')
parser_validate.add_argument('--dataset-dir', help='Load all images from a directory (*.png, *.jpg/jpeg, *.bmp)')
parser_validate.add_argument('--network-snapshot', help='Trained network pickle')
parser_validate.add_argument('--noise', default='gaussian', help='Type of noise corruption (one of: gaussian, poisson)')
parser_validate.set_defaults(func=validate)
parser_infer_image = subparsers.add_parser('infer-image', help='Run one image through the network without adding any noise')
parser_infer_image.add_argument('--image', help='Image filename')
parser_infer_image.add_argument('--out', help='Output filename')
parser_infer_image.add_argument('--network-snapshot', help='Trained network pickle')
parser_infer_image.set_defaults(func=infer_image)
args = parser.parse_args()
submit_config.run_desc = desc + args.desc
if args.run_dir_root is not None:
submit_config.run_dir_root = args.run_dir_root
if args.command is not None:
args.func(args)
else:
# Train if no subcommand was given
train(args)
| 8,864 | 39.665138 | 207 | py |
noise2noise | noise2noise-master/util.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import numpy as np
import pickle
import PIL.Image
import dnnlib.submission.submit as submit
# save_pkl, load_pkl are used by the mri code to save datasets
def save_pkl(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
def load_pkl(filename):
with open(filename, 'rb') as file:
return pickle.load(file)
# save_snapshot, load_snapshot are used save/restore trained networks
def save_snapshot(submit_config, net, fname_postfix):
dump_fname = os.path.join(submit_config.run_dir, "network_%s.pickle" % fname_postfix)
with open(dump_fname, "wb") as f:
pickle.dump(net, f)
def load_snapshot(fname):
fname = os.path.join(submit.get_path_from_template(fname))
with open(fname, "rb") as f:
return pickle.load(f)
def save_image(submit_config, img_t, filename):
t = img_t.transpose([1, 2, 0]) # [RGB, H, W] -> [H, W, RGB]
if t.dtype in [np.float32, np.float64]:
t = clip_to_uint8(t)
else:
assert t.dtype == np.uint8
PIL.Image.fromarray(t, 'RGB').save(os.path.join(submit_config.run_dir, filename))
def clip_to_uint8(arr):
return np.clip((arr + 0.5) * 255.0 + 0.5, 0, 255).astype(np.uint8)
def crop_np(img, x, y, w, h):
return img[:, y:h, x:w]
# Run an image through the network (apply reflect padding when needed
# and crop back to original dimensions.)
def infer_image(net, img):
w = img.shape[2]
h = img.shape[1]
pw, ph = (w+31)//32*32-w, (h+31)//32*32-h
padded_img = img
if pw!=0 or ph!=0:
padded_img = np.pad(img, ((0,0),(0,ph),(0,pw)), 'reflect')
inferred = net.run(np.expand_dims(padded_img, axis=0), width=w+pw, height=h+ph)
return clip_to_uint8(crop_np(inferred[0], 0, 0, w, h))
| 2,129 | 33.918033 | 89 | py |
noise2noise | noise2noise-master/dataset_tool_tf.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import glob
import os
import sys
import argparse
import tensorflow as tf
import PIL.Image
import numpy as np
from collections import defaultdict
size_stats = defaultdict(int)
format_stats = defaultdict(int)
def load_image(fname):
global format_stats, size_stats
im = PIL.Image.open(fname)
format_stats[im.mode] += 1
if (im.width < 256 or im.height < 256):
size_stats['< 256x256'] += 1
else:
size_stats['>= 256x256'] += 1
arr = np.array(im.convert('RGB'), dtype=np.uint8)
assert len(arr.shape) == 3
return arr.transpose([2, 0, 1])
def shape_feature(v):
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
examples='''examples:
python %(prog)s --input-dir=./kodak --out=imagenet_val_raw.tfrecords
'''
def main():
parser = argparse.ArgumentParser(
description='Convert a set of image files into a TensorFlow tfrecords training set.',
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--input-dir", help="Directory containing ImageNet images")
parser.add_argument("--out", help="Filename of the output tfrecords file")
args = parser.parse_args()
if args.input_dir is None:
print ('Must specify input file directory with --input-dir')
sys.exit(1)
if args.out is None:
print ('Must specify output filename with --out')
sys.exit(1)
print ('Loading image list from %s' % args.input_dir)
images = sorted(glob.glob(os.path.join(args.input_dir, '*.JPEG')))
images += sorted(glob.glob(os.path.join(args.input_dir, '*.jpg')))
images += sorted(glob.glob(os.path.join(args.input_dir, '*.png')))
np.random.RandomState(0x1234f00d).shuffle(images)
#----------------------------------------------------------
outdir = os.path.dirname(args.out)
os.makedirs(outdir, exist_ok=True)
writer = tf.python_io.TFRecordWriter(args.out)
for (idx, imgname) in enumerate(images):
print (idx, imgname)
image = load_image(imgname)
feature = {
'shape': shape_feature(image.shape),
'data': bytes_feature(tf.compat.as_bytes(image.tostring()))
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
print ('Dataset statistics:')
print (' Formats:')
for key in format_stats:
print (' %s: %d images' % (key, format_stats[key]))
print (' width,height buckets:')
for key in size_stats:
print (' %s: %d images' % (key, size_stats[key]))
writer.close()
if __name__ == "__main__":
main()
| 3,115 | 31.8 | 93 | py |
noise2noise | noise2noise-master/dataset_tool_mri.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# See README.md in this directory for instructions on how to use this script.
import re
import argparse
import glob
import os
import PIL.Image
import numpy as np
import sys
import util
import nibabel as nib
OUT_RESOLUTION = 256
# Select z-slices from [25,124]
slice_min = 25
slice_max = 125
# Select train and validation subsets from IXI-T1 (these two lists shouldn't overlap)
train_basenames=['IXI002-Guys-0828', 'IXI012-HH-1211', 'IXI013-HH-1212', 'IXI014-HH-1236', 'IXI015-HH-1258', 'IXI016-Guys-0697', 'IXI017-Guys-0698', 'IXI019-Guys-0702', 'IXI020-Guys-0700', 'IXI021-Guys-0703', 'IXI022-Guys-0701', 'IXI023-Guys-0699', 'IXI024-Guys-0705', 'IXI025-Guys-0852', 'IXI026-Guys-0696', 'IXI027-Guys-0710', 'IXI028-Guys-1038', 'IXI029-Guys-0829', 'IXI030-Guys-0708', 'IXI031-Guys-0797', 'IXI033-HH-1259', 'IXI034-HH-1260', 'IXI035-IOP-0873', 'IXI036-Guys-0736', 'IXI037-Guys-0704', 'IXI038-Guys-0729', 'IXI039-HH-1261', 'IXI040-Guys-0724', 'IXI041-Guys-0706', 'IXI042-Guys-0725', 'IXI043-Guys-0714', 'IXI044-Guys-0712', 'IXI045-Guys-0713', 'IXI046-Guys-0824', 'IXI048-HH-1326', 'IXI049-HH-1358', 'IXI050-Guys-0711', 'IXI051-HH-1328', 'IXI052-HH-1343', 'IXI053-Guys-0727', 'IXI054-Guys-0707', 'IXI055-Guys-0730', 'IXI056-HH-1327', 'IXI057-HH-1342', 'IXI058-Guys-0726', 'IXI059-HH-1284', 'IXI060-Guys-0709', 'IXI061-Guys-0715', 'IXI062-Guys-0740', 'IXI063-Guys-0742']
valid_basenames=['IXI064-Guys-0743', 'IXI065-Guys-0744', 'IXI066-Guys-0731', 'IXI067-HH-1356', 'IXI068-Guys-0756', 'IXI069-Guys-0769', 'IXI070-Guys-0767', 'IXI071-Guys-0770', 'IXI072-HH-2324', 'IXI073-Guys-0755']
def fftshift2d(x, ifft=False):
assert (len(x.shape) == 2) and all([(s % 2 == 1) for s in x.shape])
s0 = (x.shape[0] // 2) + (0 if ifft else 1)
s1 = (x.shape[1] // 2) + (0 if ifft else 1)
x = np.concatenate([x[s0:, :], x[:s0, :]], axis=0)
x = np.concatenate([x[:, s1:], x[:, :s1]], axis=1)
return x
def preprocess_mri(input_files,
output_file):
all_files = sorted(input_files)
num_images = len(all_files)
print('Input images: %d' % num_images)
assert num_images > 0
resolution = np.asarray(PIL.Image.open(all_files[0]), dtype=np.uint8).shape
assert len(resolution) == 2 # Expect monochromatic images
print('Image resolution: %s' % str(resolution))
crop_size = tuple([((r - 1) | 1) for r in resolution])
crop_slice = np.s_[:crop_size[0], :crop_size[1]]
print('Crop size: %s' % str(crop_size))
img_primal = np.zeros((num_images,) + resolution, dtype=np.uint8)
img_spectrum = np.zeros((num_images,) + crop_size, dtype=np.complex64)
print('Processing input files..')
for i, fn in enumerate(all_files):
if i % 100 == 0:
print('%d / %d ..' % (i, num_images))
img = np.asarray(PIL.Image.open(fn), dtype=np.uint8)
img_primal[i] = img
img = img.astype(np.float32) / 255.0 - 0.5
img = img[crop_slice]
spec = np.fft.fft2(img).astype(np.complex64)
spec = fftshift2d(spec)
img_spectrum[i] = spec
print('Saving: %s' % output_file)
util.save_pkl((img_primal, img_spectrum), output_file)
def genpng(args):
if args.outdir is None:
print ('Must specify output directory with --outdir')
sys.exit(1)
if args.ixi_dir is None:
print ('Must specify input IXI-T1 directory with --ixi-dir')
sys.exit(1)
mri_directory = args.ixi_dir
out_directory = args.outdir
os.makedirs(out_directory, exist_ok=True)
nii_files = glob.glob(os.path.join(mri_directory, "*.nii.gz"))
for nii_file in nii_files:
print('Processing', nii_file)
nii_img = nib.load(nii_file)
name = os.path.basename(nii_file).split(".")[0]
print("name", name)
hborder = (np.asarray([OUT_RESOLUTION, OUT_RESOLUTION]) - nii_img.shape[0:2]) // 2
print("Img: ", nii_img.shape, " border: ", hborder)
# Normalize image to [0,1]
img = nii_img.get_data().astype(np.float32)
img = img / np.max(img)
print('Max value', np.max(img))
# # Slice along z dimension
#for s in range(70, nii_img.shape[2]-25):
for s in range(slice_min, slice_max):
slice = img[:, :, s]
# Pad to output resolution by inserting zeros
output = np.zeros([OUT_RESOLUTION, OUT_RESOLUTION])
output[hborder[0] : hborder[0] + nii_img.shape[0], hborder[1] : hborder[1] + nii_img.shape[1]] = slice
output = np.minimum(output, 1.0)
output = np.maximum(output, 0.0)
output = output * 255
# Save to png
if np.max(output) > 1.0:
outname = os.path.join(out_directory, "%s_%03d.png" % (name, s))
PIL.Image.fromarray(output).convert('L').save(outname)
def make_slice_name(basename, slice_idx):
return basename + ('-T1_%03d.png' % slice_idx)
def genpkl(args):
if args.png_dir is None:
print ('Must specify PNG directory directory with --png-dir')
sys.exit(1)
if args.pkl_dir is None:
print ('Must specify PKL output directory directory with --pkl-dir')
sys.exit(1)
input_train_files = []
input_valid_files = []
for base in train_basenames:
for sidx in range(slice_min, slice_max):
input_train_files.append(os.path.join(args.png_dir, make_slice_name(base, sidx)))
for base in valid_basenames:
for sidx in range(slice_min, slice_max):
input_valid_files.append(os.path.join(args.png_dir, make_slice_name(base, sidx)))
print ('Num train samples', len(input_train_files))
print ('Num valid samples', len(input_valid_files))
preprocess_mri(input_files=input_train_files, output_file=os.path.join(args.pkl_dir, 'ixi_train.pkl'))
preprocess_mri(input_files=input_valid_files, output_file=os.path.join(args.pkl_dir, 'ixi_valid.pkl'))
def extract_basenames(lst):
s = set()
name_re = re.compile('^(.*)-T1_[0-9]+.png')
for fname in lst:
m = name_re.match(os.path.basename(fname))
if m:
s.add(m[1])
return sorted(list(s))
examples='''examples:
# Convert the IXI-T1 dataset into a set of PNG image files:
python %(prog)s genpng --ixi-dir=~/Downloads/IXI-T1 --outdir=datasets/ixi-png
# Convert the PNG image files into a Python pickle for use in training:
python %(prog)s genpkl --png-dir=datasets/ixi-png --pkl-dir=datasets
'''
def main():
parser = argparse.ArgumentParser(
description='Convert the IXI-T1 dataset into a format suitable for network training',
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(help='Sub-commands')
parser_genpng = subparsers.add_parser('genpng', help='IXI nifti to PNG converter (intermediate step)')
parser_genpng.add_argument('--ixi-dir', help='Directory pointing to unpacked IXI-T1.tar')
parser_genpng.add_argument('--outdir', help='Directory where to save .PNG files')
parser_genpng.set_defaults(func=genpng)
parser_genpkl = subparsers.add_parser('genpkl', help='PNG to PKL converter (used in training)')
parser_genpkl.add_argument('--png-dir', help='Directory containing .PNGs saved by with the genpng command')
parser_genpkl.add_argument('--pkl-dir', help='Where to save the .pkl files for train and valid sets')
parser_genpkl.set_defaults(func=genpkl)
args = parser.parse_args()
if 'func' not in args:
print ('No command given. Try --help.')
sys.exit(1)
args.func(args)
if __name__ == "__main__":
main()
| 7,975 | 42.347826 | 987 | py |
noise2noise | noise2noise-master/train.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import tensorflow as tf
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
import dnnlib.tflib.tfutil as tfutil
import dnnlib.util as util
import config
from util import save_image, save_snapshot
from validation import ValidationSet
from dataset import create_dataset
class AugmentGaussian:
def __init__(self, validation_stddev, train_stddev_rng_range):
self.validation_stddev = validation_stddev
self.train_stddev_range = train_stddev_rng_range
def add_train_noise_tf(self, x):
(minval,maxval) = self.train_stddev_range
shape = tf.shape(x)
rng_stddev = tf.random_uniform(shape=[1, 1, 1], minval=minval/255.0, maxval=maxval/255.0)
return x + tf.random_normal(shape) * rng_stddev
def add_validation_noise_np(self, x):
return x + np.random.normal(size=x.shape)*(self.validation_stddev/255.0)
class AugmentPoisson:
def __init__(self, lam_max):
self.lam_max = lam_max
def add_train_noise_tf(self, x):
chi_rng = tf.random_uniform(shape=[1, 1, 1], minval=0.001, maxval=self.lam_max)
return tf.random_poisson(chi_rng*(x+0.5), shape=[])/chi_rng - 0.5
def add_validation_noise_np(self, x):
chi = 30.0
return np.random.poisson(chi*(x+0.5))/chi - 0.5
def compute_ramped_down_lrate(i, iteration_count, ramp_down_perc, learning_rate):
ramp_down_start_iter = iteration_count * (1 - ramp_down_perc)
if i >= ramp_down_start_iter:
t = ((i - ramp_down_start_iter) / ramp_down_perc) / iteration_count
smooth = (0.5+np.cos(t * np.pi)/2)**2
return learning_rate * smooth
return learning_rate
def train(
submit_config: dnnlib.SubmitConfig,
iteration_count: int,
eval_interval: int,
minibatch_size: int,
learning_rate: float,
ramp_down_perc: float,
noise: dict,
validation_config: dict,
train_tfrecords: str,
noise2noise: bool):
noise_augmenter = dnnlib.util.call_func_by_name(**noise)
validation_set = ValidationSet(submit_config)
validation_set.load(**validation_config)
# Create a run context (hides low level details, exposes simple API to manage the run)
ctx = dnnlib.RunContext(submit_config, config)
# Initialize TensorFlow graph and session using good default settings
tfutil.init_tf(config.tf_config)
dataset_iter = create_dataset(train_tfrecords, minibatch_size, noise_augmenter.add_train_noise_tf)
# Construct the network using the Network helper class and a function defined in config.net_config
with tf.device("/gpu:0"):
net = tflib.Network(**config.net_config)
# Optionally print layer information
net.print_layers()
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'), tf.device("/cpu:0"):
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
noisy_input, noisy_target, clean_target = dataset_iter.get_next()
noisy_input_split = tf.split(noisy_input, submit_config.num_gpus)
noisy_target_split = tf.split(noisy_target, submit_config.num_gpus)
clean_target_split = tf.split(clean_target, submit_config.num_gpus)
# Define the loss function using the Optimizer helper class, this will take care of multi GPU
opt = tflib.Optimizer(learning_rate=lrate_in, **config.optimizer_config)
for gpu in range(submit_config.num_gpus):
with tf.device("/gpu:%d" % gpu):
net_gpu = net if gpu == 0 else net.clone()
denoised = net_gpu.get_output_for(noisy_input_split[gpu])
if noise2noise:
meansq_error = tf.reduce_mean(tf.square(noisy_target_split[gpu] - denoised))
else:
meansq_error = tf.reduce_mean(tf.square(clean_target_split[gpu] - denoised))
# Create an autosummary that will average over all GPUs
with tf.control_dependencies([autosummary("Loss", meansq_error)]):
opt.register_gradients(meansq_error, net_gpu.trainables)
train_step = opt.apply_updates()
# Create a log file for Tensorboard
summary_log = tf.summary.FileWriter(submit_config.run_dir)
summary_log.add_graph(tf.get_default_graph())
print('Training...')
time_maintenance = ctx.get_time_since_last_update()
ctx.update(loss='run %d' % submit_config.run_id, cur_epoch=0, max_epoch=iteration_count)
# The actual training loop
for i in range(iteration_count):
# Whether to stop the training or not should be asked from the context
if ctx.should_stop():
break
# Dump training status
if i % eval_interval == 0:
time_train = ctx.get_time_since_last_update()
time_total = ctx.get_time_since_start()
# Evaluate 'x' to draw a batch of inputs
[source_mb, target_mb] = tfutil.run([noisy_input, clean_target])
denoised = net.run(source_mb)
save_image(submit_config, denoised[0], "img_{0}_y_pred.png".format(i))
save_image(submit_config, target_mb[0], "img_{0}_y.png".format(i))
save_image(submit_config, source_mb[0], "img_{0}_x_aug.png".format(i))
validation_set.evaluate(net, i, noise_augmenter.add_validation_noise_np)
print('iter %-10d time %-12s sec/eval %-7.1f sec/iter %-7.2f maintenance %-6.1f' % (
autosummary('Timing/iter', i),
dnnlib.util.format_time(autosummary('Timing/total_sec', time_total)),
autosummary('Timing/sec_per_eval', time_train),
autosummary('Timing/sec_per_iter', time_train / eval_interval),
autosummary('Timing/maintenance_sec', time_maintenance)))
dnnlib.tflib.autosummary.save_summaries(summary_log, i)
ctx.update(loss='run %d' % submit_config.run_id, cur_epoch=i, max_epoch=iteration_count)
time_maintenance = ctx.get_last_update_interval() - time_train
lrate = compute_ramped_down_lrate(i, iteration_count, ramp_down_perc, learning_rate)
tfutil.run([train_step], {lrate_in: lrate})
print("Elapsed time: {0}".format(util.format_time(ctx.get_time_since_start())))
save_snapshot(submit_config, net, 'final')
# Summary log and context should be closed at the end
summary_log.close()
ctx.close()
| 6,718 | 39.969512 | 102 | py |
noise2noise | noise2noise-master/dnnlib/util.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous utility classes and functions."""
import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
# Util classes
# ------------------------------------------------------------------------------------------
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
class Logger(object):
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
self.file = None
if file_name is not None:
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
# Small util functions
# ------------------------------------------------------------------------------------------
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
def ask_yes_no(question: str) -> bool:
"""Ask the user the question until the user inputs a valid answer."""
while True:
try:
print("{0} [y/n]".format(question))
return strtobool(input().lower())
except ValueError:
pass
def tuple_product(t: Tuple) -> Any:
"""Calculate the product of the tuple elements."""
result = 1
for v in t:
result *= v
return result
_str_to_ctype = {
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"int8": ctypes.c_byte,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"float32": ctypes.c_float,
"float64": ctypes.c_double
}
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, "__name__"):
type_str = type_obj.__name__
elif hasattr(type_obj, "name"):
type_str = type_obj.name
else:
raise RuntimeError("Cannot infer type name from input")
assert type_str in _str_to_ctype.keys()
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
return my_dtype, my_ctype
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False
# Functionality to import modules/objects by name, and call functions by name
# ------------------------------------------------------------------------------------------
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
"""Searches for the underlying module behind the name to some python object.
Returns the module and the object name (original name with module part removed)."""
# allow convenience shorthands, substitute them by full names
obj_name = obj_name.replace("np.", "numpy.")
obj_name = obj_name.replace("tf.", "tensorflow.")
parts = obj_name.split(".")
for i in range(len(parts), 0, -1):
try:
module_name = ".".join(parts[:i])
module = importlib.import_module(module_name)
obj_name = ".".join(parts[i:])
return module, obj_name
except ImportError:
pass
raise ImportError(obj_name)
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
"""Traverses the object name and returns the last (rightmost) python object."""
obj = module
for part in obj_name.split("."):
obj = getattr(obj, part)
return obj
def get_obj_by_name(name: str) -> Any:
"""Finds the python object with the given name."""
module, obj_name = get_module_from_obj_name(name)
return get_obj_from_module(module, obj_name)
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
def get_module_dir_by_obj_name(obj_name: str) -> str:
"""Get the directory path of the module containing the given object name."""
module, _ = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module))
# File system helpers
# ------------------------------------------------------------------------------------------
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if ignores is None:
ignores = []
result = []
for root, dirs, files in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
# dirs need to be edited in-place
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert len(absolute_paths) == len(relative_paths)
result += zip(absolute_paths, relative_paths)
return result
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1])
| 8,907 | 29.930556 | 151 | py |
noise2noise | noise2noise-master/dnnlib/__init__.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import submission
from .submission.run_context import RunContext
from .submission.submit import SubmitTarget
from .submission.submit import PathType
from .submission.submit import SubmitConfig
from .util import EasyDict
submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
| 701 | 35.947368 | 126 | py |
noise2noise | noise2noise-master/dnnlib/tflib/tfutil.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous helper utils for Tensorflow."""
import numpy as np
import tensorflow as tf
from typing import Any, Iterable, List, Union
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
"""A type that represents a valid Tensorflow expression."""
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
"""A type that can be converted to a valid Tensorflow expression."""
def run(*args, **kwargs) -> Any:
"""Run the specified ops in the default session."""
assert_tf_initialized()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x: Any) -> bool:
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints."""
return [dim.value for dim in shape]
def flatten(x: TfExpressionEx) -> TfExpression:
"""Shortcut function for flattening a tensor."""
with tf.name_scope("Flatten"):
return tf.reshape(x, [-1])
def log2(x: TfExpressionEx) -> TfExpression:
"""Logarithm in base 2."""
with tf.name_scope("Log2"):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x: TfExpressionEx) -> TfExpression:
"""Exponent in base 2."""
with tf.name_scope("Exp2"):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
"""Linear interpolation."""
with tf.name_scope("Lerp"):
return a + (b - a) * t
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
"""Linear interpolation with clip."""
with tf.name_scope("LerpClip"):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope: str) -> tf.name_scope:
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
return tf.name_scope(scope + "/")
def init_tf(config_dict: dict = None) -> None:
"""Initialize TensorFlow session using good default settings."""
if tf.get_default_session() is None:
tf.set_random_seed(np.random.randint(1 << 31))
create_session(config_dict, force_as_default=True)
def assert_tf_initialized():
"""Check that TensorFlow session has been initialized."""
if tf.get_default_session() is None:
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.tfutil.init_tf().")
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
"""Create tf.Session based on config dict."""
config = tf.ConfigProto()
if config_dict is not None:
for key, value in config_dict.items():
fields = key.split(".")
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
# pylint: disable=protected-access
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__() # pylint: disable=no-member
return session
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
"""Create tf.Variable with large initial value without bloating the tf graph."""
assert_tf_initialized()
assert isinstance(initial_value, np.ndarray)
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
var = tf.Variable(zeros, *args, **kwargs)
set_vars({var: initial_value})
return var
| 6,195 | 34.405714 | 130 | py |
noise2noise | noise2noise-master/dnnlib/tflib/autosummary.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for adding automatically tracked values to Tensorboard.
Autosummary creates an identity op that internally keeps track of the input
values and automatically shows up in TensorBoard. The reported value
represents an average over input components. The average is accumulated
constantly over time and flushed when save_summaries() is called.
Notes:
- The output tensor must be used as an input for something else in the
graph. Otherwise, the autosummary op will not get executed, and the average
value will not get accumulated.
- It is perfectly fine to include autosummaries with the same name in
several places throughout the graph, even if they are executed concurrently.
- It is ok to also pass in a python scalar or numpy array. In this case, it
is added to the average immediately.
"""
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2
from . import tfutil
from .tfutil import TfExpression
from .tfutil import TfExpressionEx
_dtype = tf.float64
_vars = OrderedDict() # name => [var, ...]
_immediate = OrderedDict() # name => update_op, update_value
_finalized = False
_merge_op = None
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
"""Internal helper for creating autosummary accumulators."""
assert not _finalized
name_id = name.replace("/", "_")
v = tf.cast(value_expr, _dtype)
if v.shape.is_fully_defined():
size = np.prod(tfutil.shape_to_list(v.shape))
size_expr = tf.constant(size, dtype=_dtype)
else:
size = None
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
if size == 1:
if v.shape.ndims != 0:
v = tf.reshape(v, [])
v = [size_expr, v, tf.square(v)]
else:
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _vars:
_vars[name].append(var)
else:
_vars[name] = [var]
return update_op
def autosummary(name: str, value: TfExpressionEx) -> TfExpressionEx:
"""Create a new autosummary."""
tfutil.assert_tf_initialized()
name_id = name.replace("/", "_")
if tfutil.is_tf_expression(value):
with tf.name_scope("summary_" + name_id), tf.device(value.device):
update_op = _create_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value)
else: # python scalar or numpy array
if name not in _immediate:
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(_dtype)
update_op = _create_var(name, update_value)
_immediate[name] = update_op, update_value
update_op, update_value = _immediate[name]
tfutil.run(update_op, {update_value: value})
return value
def finalize_autosummaries() -> None:
"""Create the necessary ops to include autosummaries in TensorBoard report.
Note: This should be done only once per graph.
"""
global _finalized
tfutil.assert_tf_initialized()
if _finalized:
return None
_finalized = True
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
# Create summary ops.
with tf.device(None), tf.control_dependencies(None):
for name, vars_list in _vars.items():
name_id = name.replace("/", "_")
with tfutil.absolute_name_scope("Autosummary/" + name_id):
moments = tf.add_n(vars_list)
moments /= moments[0]
with tf.control_dependencies([moments]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
mean = moments[1]
std = tf.sqrt(moments[2] - tf.square(moments[1]))
tf.summary.scalar(name, mean)
tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
# Group by category and chart name.
cat_dict = OrderedDict()
for series_name in sorted(_vars.keys()):
p = series_name.split("/")
cat = p[0] if len(p) >= 2 else ""
chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
if cat not in cat_dict:
cat_dict[cat] = OrderedDict()
if chart not in cat_dict[cat]:
cat_dict[cat][chart] = []
cat_dict[cat][chart].append(series_name)
# Setup custom_scalar layout.
categories = []
for cat_name, chart_dict in cat_dict.items():
charts = []
for chart_name, series_names in chart_dict.items():
series = []
for series_name in series_names:
series.append(layout_pb2.MarginChartContent.Series(
value=series_name,
lower="xCustomScalars/" + series_name + "/margin_lo",
upper="xCustomScalars/" + series_name + "/margin_hi"))
margin = layout_pb2.MarginChartContent(series=series)
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
return layout
def save_summaries(file_writer, global_step=None):
"""Call FileWriter.add_summary() with all summaries in the default graph,
automatically finalizing and merging them on the first call.
"""
global _merge_op
tfutil.assert_tf_initialized()
if _merge_op is None:
layout = finalize_autosummaries()
if layout is not None:
file_writer.add_summary(layout)
with tf.device(None), tf.control_dependencies(None):
_merge_op = tf.summary.merge_all()
file_writer.add_summary(_merge_op.eval(), global_step)
| 6,945 | 39.858824 | 118 | py |
noise2noise | noise2noise-master/dnnlib/tflib/network.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for managing networks."""
import types
import inspect
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import List, Tuple, Union
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_import_modules = [] # Temporary modules create during pickle import.
def import_handler(handler_func):
"""Function decorator for declaring custom import handlers."""
_import_handlers.append(handler_func)
return handler_func
class Network:
"""Generic network abstraction.
Acts as a convenience wrapper for a parameterized network construction
function, providing several utility methods and convenient access to
the inputs/outputs/weights.
Network objects can be safely pickled and unpickled for long-term
archival purposes. The pickling works reliably as long as the underlying
network construction function is defined in a standalone Python module
that has no side effects or application-specific imports.
Args:
name: Network name. Used to select TensorFlow name and variable scopes.
func_name: Fully qualified name of the underlying network construction function.
static_kwargs: Keyword arguments to be passed in to the network construction function.
Attributes:
name: User-specified name, defaults to build func name if None.
scope: Unique TF graph scope, derived from the user-specified name.
static_kwargs: Arguments passed to the user-supplied build func.
num_inputs: Number of input tensors.
num_outputs: Number of output tensors.
input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
input_shape: Short-hand for input_shapes[0].
output_shape: Short-hand for output_shapes[0].
input_templates: Input placeholders in the template graph.
output_templates: Output tensors in the template graph.
input_names: Name string for each input.
output_names: Name string for each output.
vars: All variables (local_name => var).
trainables: Trainable variables (local_name => var).
"""
def __init__(self, name: str = None, func_name: str = None, **static_kwargs):
tfutil.assert_tf_initialized()
assert isinstance(name, str) or name is None
assert isinstance(func_name, str) # must not be None
assert util.is_pickleable(static_kwargs)
self._init_fields()
self.name = name
self.static_kwargs = util.EasyDict(static_kwargs)
# Init build func.
module, self._build_func_name = util.get_module_from_obj_name(func_name)
self._build_module_src = inspect.getsource(module)
self._build_func = util.get_obj_from_module(module, self._build_func_name)
# Init graph.
self._init_graph()
self.reset_vars()
def _init_fields(self) -> None:
self.name = None
self.scope = None
self.static_kwargs = util.EasyDict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.vars = OrderedDict()
self.trainables = OrderedDict()
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self) -> None:
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
self.scope = tf.get_default_graph().unique_name(self.name.replace("/", "_"), mark_as_used=False)
# Build template graph.
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
assert tf.get_variable_scope().name == self.scope
with tfutil.absolute_name_scope(self.scope): # ignore surrounding name_scope
with tf.control_dependencies(None): # ignore surrounding control_dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
assert callable(self._build_func)
out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
# Collect outputs.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
# Check that input and output shapes are defined.
if any(t.shape.ndims is None for t in self.input_templates):
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
if any(t.shape.ndims is None for t in self.output_templates):
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
# Populate remaining fields.
self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
self.vars = OrderedDict([(self.get_var_local_name(var), var) for var in tf.global_variables(self.scope + "/")])
self.trainables = OrderedDict([(self.get_var_local_name(var), var) for var in tf.trainable_variables(self.scope + "/")])
def reset_vars(self) -> None:
"""Run initializers for all variables defined by this network."""
tfutil.run([var.initializer for var in self.vars.values()])
def reset_trainables(self) -> None:
"""Run initializers for all trainable variables defined by this network."""
tfutil.run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
"""Get TensorFlow expression(s) for the output(s) of this network, given the inputs."""
assert len(in_expr) == self.num_inputs
assert not all(expr is None for expr in in_expr)
all_kwargs = dict(self.static_kwargs)
all_kwargs.update(dynamic_kwargs)
with tf.variable_scope(self.scope, reuse=True):
assert tf.get_variable_scope().name == self.scope
valid_inputs = [expr for expr in in_expr if expr is not None]
final_inputs = []
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
if expr is not None:
expr = tf.identity(expr, name=name)
else:
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
final_inputs.append(expr)
assert callable(self._build_func)
out_expr = self._build_func(*final_inputs, **all_kwargs)
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
return out_expr
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
"""Get the local name of a given variable, excluding any surrounding name scopes."""
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
assert global_name.startswith(self.scope + "/")
local_name = global_name[len(self.scope) + 1:]
local_name = local_name.split(":")[0]
return local_name
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
"""Find variable by local or global name."""
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
"""Get the value of a given variable as NumPy array.
Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible."""
return self.find_var(var_or_local_name).eval()
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
"""Set the value of a given variable based on the given NumPy array.
Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible."""
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
def __getstate__(self) -> dict:
"""Pickle export."""
return {
"version": 2,
"name": self.name,
"static_kwargs": dict(self.static_kwargs),
"build_module_src": self._build_module_src,
"build_func_name": self._build_func_name,
"variables": list(zip(self.vars.keys(), tfutil.run(list(self.vars.values()))))}
def __setstate__(self, state: dict) -> None:
"""Pickle import."""
tfutil.assert_tf_initialized()
self._init_fields()
# Execute custom import handlers.
for handler in _import_handlers:
state = handler(state)
# Set basic fields.
assert state["version"] == 2
self.name = state["name"]
self.static_kwargs = util.EasyDict(state["static_kwargs"])
self._build_module_src = state["build_module_src"]
self._build_func_name = state["build_func_name"]
# Parse imported module.
module = types.ModuleType("_tfutil_network_import_module_%d" % len(_import_modules))
exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
self._build_func = util.get_obj_from_module(module, self._build_func_name)
_import_modules.append(module) # avoid gc
# Init graph.
self._init_graph()
self.reset_vars()
tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
def clone(self, name: str = None) -> "Network":
"""Create a clone of this network with its own copy of the variables."""
# pylint: disable=protected-access
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = util.EasyDict(self.static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network."""
names = [name for name in self.vars.keys() if name in src_net.vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_trainables_from(self, src_net: "Network") -> None:
"""Copy the values of all trainable variables from the given network."""
names = [name for name in self.trainables.keys() if name in src_net.trainables]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
"""Create new network with the given parameters, and copy all variables from this network."""
if new_name is None:
new_name = self.name
static_kwargs = dict(self.static_kwargs)
static_kwargs.update(new_static_kwargs)
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
"""Construct a TensorFlow op that updates the variables of this network
to be slightly closer to those of the given network."""
with tfutil.absolute_name_scope(self.scope):
with tf.name_scope("MovingAvg"):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self,
*in_arrays: Tuple[Union[np.ndarray, None], ...],
return_as_list: bool = False,
print_progress: bool = False,
minibatch_size: int = None,
num_gpus: int = 1,
assume_frozen: bool = False,
out_mul: float = 1.0,
out_add: float = 0.0,
out_shrink: int = 1,
out_dtype: np.dtype = None,
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
Args:
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress: Print progress to the console? Useful for very large input arrays.
minibatch_size: Maximum minibatch size to use, None = disable batching.
num_gpus: Number of GPUs to use.
assume_frozen: Improve multi-GPU perf by assuming that trainables are not going to change.
out_mul: Multiplicative constant to apply to the output(s).
out_add: Additive constant to apply to the output(s).
out_shrink: Shrink the spatial dimensions of the output(s) by the given factor.
out_dtype: Convert the output to the specified data type.
dynamic_kwargs: Additional keyword arguments to pass into the network construction function.
"""
assert len(in_arrays) == self.num_inputs
assert not all(arr is None for arr in in_arrays)
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])
# Build graph.
if key not in self._run_cache:
with tfutil.absolute_name_scope(self.scope + "/Run"), tf.control_dependencies(None):
with tf.device("/cpu:0"):
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
out_split = []
for gpu in range(num_gpus):
with tf.device("/gpu:%d" % gpu):
net = self.clone() if assume_frozen else self
out_expr = net.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
if out_mul != 1.0:
out_expr = [x * out_mul for x in out_expr]
if out_add != 0.0:
out_expr = [x + out_add for x in out_expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in out_expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
out_expr = [tf.round(x) for x in out_expr]
out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
out_split.append(out_expr)
with tf.device("/cpu:0"):
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
self._run_cache[key] = in_expr, out_expr
# Run minibatches.
in_expr, out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print("\r%d / %d" % (mb_begin, num_items), end="")
mb_end = min(mb_begin + minibatch_size, num_items)
mb_num = mb_end - mb_begin
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print("\r%d / %d" % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
def list_ops(self) -> List[TfExpression]:
prefix = self.scope + '/'
return [op for op in tf.get_default_graph().get_operations() if op.name.startswith(prefix)]
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
"""Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
individual layers of the network. Mainly intended to be used for reporting."""
layers = []
def recurse(scope, parent_ops, level):
prefix = scope + "/"
ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)]
# Ignore specific patterns.
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat"]):
return
# Does not contain leaf nodes => expand immediate children.
if level == 0 or all("/" in op.name[len(prefix):] for op in ops):
visited = set()
for op in ops:
suffix = op.name[len(prefix):]
if "/" in suffix:
suffix = suffix[:suffix.index("/")]
if suffix not in visited:
recurse(prefix + suffix, ops, level + 1)
visited.add(suffix)
return
# Filter out irrelevant ops within variable name scopes.
layer_vars = [op for op in ops if op.type.startswith("Variable")]
for var in layer_vars:
prefix = var.name + "/"
ops = [op for op in ops if not op.name.startswith(prefix)]
# Dig up the details for this layer.
layer_name = scope[len(self.scope) + 1:]
layer_output = ops[-1].outputs[0]
layer_trainables = [op.outputs[0] for op in layer_vars if self.get_var_local_name(op.name) in self.trainables]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, self.list_ops(), 0)
return layers
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
"""Print a summary table of the network structure."""
if title is None:
title = self.name
print()
print("%-28s%-12s%-24s%-24s" % (title, "Params", "OutputShape", "WeightShape"))
print("%-28s%-12s%-24s%-24s" % (("---",) * 4))
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables)
total_params += num_params
if hide_layers_with_no_params and num_params == 0:
continue
print("%-28s%-12s%-24s%-24s" % (
layer_name,
num_params if num_params else "-",
layer_output.shape,
weights[0].shape if len(weights) == 1 else "-"))
print("%-28s%-12s%-24s%-24s" % (("---",) * 4))
print("%-28s%-12s%-24s%-24s" % ("Total", total_params, "", ""))
print()
def setup_weight_histograms(self, title: str = None) -> None:
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
if title is None:
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for local_name, var in self.trainables.items():
if "/" in local_name:
p = local_name.split("/")
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
else:
name = title + "_toplevel/" + local_name
tf.summary.histogram(name, var)
| 22,715 | 45.644764 | 151 | py |
noise2noise | noise2noise-master/dnnlib/tflib/__init__.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import autosummary
from . import network
from . import optimizer
from . import tfutil
from .network import Network
from .optimizer import Optimizer
| 500 | 30.3125 | 76 | py |
noise2noise | noise2noise-master/dnnlib/tflib/optimizer.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper wrapper for a Tensorflow optimizer."""
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import List, Union
from . import autosummary
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
class Optimizer:
"""A Wrapper for tf.train.Optimizer.
Automatically takes care of:
- Gradient averaging for multi-GPU training.
- Dynamic loss scaling and typecasts for FP16 training.
- Ignoring corrupted gradients that contain NaNs/Infs.
- Reporting statistics.
- Well-chosen default settings.
"""
def __init__(self,
name: str = "Train",
tf_optimizer: str = "tf.train.AdamOptimizer",
learning_rate: TfExpressionEx = 0.001,
use_loss_scaling: bool = False,
loss_scaling_init: float = 64.0,
loss_scaling_inc: float = 0.0005,
loss_scaling_dec: float = 1.0,
**kwargs):
# Init fields.
self.name = name
self.learning_rate = tf.convert_to_tensor(learning_rate)
self.id = self.name.replace("/", ".")
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = util.get_obj_by_name(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._grad_shapes = None # [shape, ...]
self._dev_opt = OrderedDict() # device => optimizer
self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
self._updates_applied = False
def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
"""Register the gradients of the given loss function with respect to the given variables.
Intended to be called once per GPU."""
assert not self._updates_applied
# Validate arguments.
if isinstance(trainable_vars, dict):
trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
if self._grad_shapes is None:
self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars]
assert len(trainable_vars) == len(self._grad_shapes)
assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes))
dev = loss.device
assert all(var.device == dev for var in trainable_vars)
# Register device and compute gradients.
with tf.name_scope(self.id + "_grad"), tf.device(dev):
if dev not in self._dev_opt:
opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt)
assert callable(self.optimizer_class)
self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
self._dev_grads[dev] = []
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
self._dev_grads[dev].append(grads)
def apply_updates(self) -> tf.Operation:
"""Construct training op to update the registered variables based on their gradients."""
tfutil.assert_tf_initialized()
assert not self._updates_applied
self._updates_applied = True
devices = list(self._dev_grads.keys())
total_grads = sum(len(grads) for grads in self._dev_grads.values())
assert len(devices) >= 1 and total_grads >= 1
ops = []
with tfutil.absolute_name_scope(self.scope):
# Cast gradients to FP32 and calculate partial sum within each device.
dev_grads = OrderedDict() # device => [(grad, var), ...]
for dev_idx, dev in enumerate(devices):
with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev):
sums = []
for gv in zip(*self._dev_grads[dev]):
assert all(v is gv[0][1] for g, v in gv)
g = [tf.cast(g, tf.float32) for g, v in gv]
g = g[0] if len(g) == 1 else tf.add_n(g)
sums.append((g, gv[0][1]))
dev_grads[dev] = sums
# Sum gradients across devices.
if len(devices) > 1:
with tf.name_scope("SumAcrossGPUs"), tf.device(None):
for var_idx, grad_shape in enumerate(self._grad_shapes):
g = [dev_grads[dev][var_idx][0] for dev in devices]
if np.prod(grad_shape): # nccl does not support zero-sized tensors
g = tf.contrib.nccl.all_sum(g)
for dev, gg in zip(devices, g):
dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
# Apply updates separately on each device.
for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev):
# Scale gradients as needed.
if self.use_loss_scaling or total_grads > 1:
with tf.name_scope("Scale"):
coef = tf.constant(np.float32(1.0 / total_grads), name="coef")
coef = self.undo_loss_scaling(coef)
grads = [(g * coef, v) for g, v in grads]
# Check for overflows.
with tf.name_scope("CheckOverflow"):
grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
# Update weights and adjust loss scaling.
with tf.name_scope("UpdateWeights"):
# pylint: disable=cell-var-from-loop
opt = self._dev_opt[dev]
ls_var = self.get_loss_scaling_var(dev)
if not self.use_loss_scaling:
ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
else:
ops.append(tf.cond(grad_ok,
lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
# Report statistics on the last device.
if dev == devices[-1]:
with tf.name_scope("Statistics"):
ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1)))
if self.use_loss_scaling:
ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var))
# Initialize variables and group everything into a single op.
self.reset_optimizer_state()
tfutil.init_uninitialized_vars(list(self._dev_ls_var.values()))
return tf.group(*ops, name="TrainingOp")
def reset_optimizer_state(self) -> None:
"""Reset internal state of the underlying optimizer."""
tfutil.assert_tf_initialized()
tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
"""Get or create variable representing log2 of the current dynamic loss scaling factor."""
if not self.use_loss_scaling:
return None
if device not in self._dev_ls_var:
with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None):
self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var")
return self._dev_ls_var[device]
def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
"""Apply dynamic loss scaling for the given expression."""
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
"""Undo the effect of dynamic loss scaling for the given expression."""
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
| 9,828 | 46.028708 | 164 | py |
noise2noise | noise2noise-master/dnnlib/submission/run_context.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helpers for managing the run/training loop."""
import datetime
import json
import os
import pprint
import signal
import time
import types
from typing import Any
from . import submit
class RunContext(object):
"""Helper class for managing the run/training loop.
The context will hide the implementation details of a basic run/training loop.
It will set things up properly, tell if run should be stopped, and then cleans up.
User should call update periodically and use should_stop to determine if run should be stopped.
Args:
submit_config: The SubmitConfig that is used for the current run.
config_module: The whole config module that is used for the current run.
max_epoch: Optional cached value for the max_epoch variable used in update.
"""
def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None):
self.submit_config = submit_config
self.should_stop_flag = False
self.has_closed = False
self.start_time = time.time()
self.last_update_time = time.time()
self.last_update_interval = 0.0
self.max_epoch = max_epoch
# pretty print the all the relevant content of the config module to a text file
if config_module is not None:
with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f:
filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))}
pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False)
# write out details about the run to a text file
self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")}
with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f:
pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
# overrides whatever previous function was set as signal handler for these signals
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def __enter__(self) -> "RunContext":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def _signal_handler(self, signum: int, frame: int) -> None:
del signum, frame # unused
self.should_stop_flag = True
print("RunContext: Interrupt signal received!")
def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None:
"""Do general housekeeping and keep the state of the context up-to-date.
Should be called often enough but not in a tight loop."""
assert not self.has_closed
self.last_update_interval = time.time() - self.last_update_time
self.last_update_time = time.time()
if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")):
self.should_stop_flag = True
max_epoch_val = self.max_epoch if max_epoch is None else max_epoch
def should_stop(self) -> bool:
"""Tell whether a stopping condition has been triggered one way or another."""
return self.should_stop_flag
def get_time_since_start(self) -> float:
"""How much time has passed since the creation of the context."""
return time.time() - self.start_time
def get_time_since_last_update(self) -> float:
"""How much time has passed since the last call to update."""
return time.time() - self.last_update_time
def get_last_update_interval(self) -> float:
"""How much time passed between the previous two calls to update."""
return self.last_update_interval
def close(self) -> None:
"""Close the context and clean up.
Should only be called once."""
if not self.has_closed:
# update the run.txt with stopping time
self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ")
with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f:
pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
self.has_closed = True
| 4,766 | 42.336364 | 213 | py |
noise2noise | noise2noise-master/dnnlib/submission/__init__.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import run_context
from . import submit
| 390 | 38.1 | 76 | py |
noise2noise | noise2noise-master/dnnlib/submission/submit.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Submit a function to be run either locally or in a computing cluster."""
import copy
import io
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import time
import traceback
import typeguard
import zipfile
from enum import Enum
from .. import util
from ..util import EasyDict
class SubmitTarget(Enum):
"""The target where the function should be run.
LOCAL: Run it locally.
"""
LOCAL = 1
class PathType(Enum):
"""Determines in which format should a path be formatted.
WINDOWS: Format with Windows style.
LINUX: Format with Linux/Posix style.
AUTO: Use current OS type to select either WINDOWS or LINUX.
"""
WINDOWS = 1
LINUX = 2
AUTO = 3
_user_name_override = None
class SubmitConfig(util.EasyDict):
"""Strongly typed config dict needed to submit runs.
Attributes:
run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template.
run_desc: Description of the run. Will be used in the run dir and task name.
run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir.
run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir.
submit_target: Submit target enum value. Used to select where the run is actually launched.
num_gpus: Number of GPUs used/requested for the run.
print_info: Whether to print debug information when submitting.
ask_confirmation: Whether to ask a confirmation before submitting.
use_typeguard: Whether to use the typeguard module for run-time type checking (slow!).
run_id: Automatically populated value during submit.
run_name: Automatically populated value during submit.
run_dir: Automatically populated value during submit.
run_func_name: Automatically populated value during submit.
run_func_kwargs: Automatically populated value during submit.
user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value.
task_name: Automatically populated value during submit.
host_name: Automatically populated value during submit.
"""
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode"]
self.run_dir_extra_files = None
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.ask_confirmation = False
self.use_typeguard = False
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
"""Replace tags in the given path template and return either Windows or Linux formatted path."""
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
"""Convert a normal path back to its template representation."""
# replace all path parts with the template tags
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
"""Convert a normal path to template and the convert it back to a normal path with given path type."""
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
"""Set the global username override value."""
global _user_name_override
_user_name_override = name
def get_user_name():
"""Get the current user name."""
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd # pylint: disable=import-error
return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def _create_run_dir_local(submit_config: SubmitConfig) -> str:
"""Create a new run dir with increasing ID number at the start."""
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
print("Creating the run dir root: {}".format(run_dir_root))
os.makedirs(run_dir_root)
submit_config.run_id = _get_next_run_id_local(run_dir_root)
submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if os.path.exists(run_dir):
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
print("Creating the run dir: {}".format(run_dir))
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
"""Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names."""
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(run_dir: str, submit_config: SubmitConfig) -> None:
"""Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable."""
print("Copying files to the run dir")
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert '.' in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count('.') - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True)
if submit_config.run_dir_extra_files is not None:
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "_internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False)
def run_wrapper(submit_config: SubmitConfig) -> None:
"""Wrap the actual run function call for handling logging, exceptions, typing, etc."""
is_local = submit_config.submit_target == SubmitTarget.LOCAL
checker = None
if submit_config.use_typeguard:
checker = typeguard.TypeChecker("dnnlib")
checker.start()
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name=None, should_flush=True)
import dnnlib
dnnlib.submit_config = submit_config
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
util.call_func_by_name(func_name=submit_config.run_func_name, submit_config=submit_config, **submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.submit_config = None
logger.close()
if checker is not None:
checker.stop()
def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None:
"""Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place."""
submit_config = copy.copy(submit_config)
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
assert submit_config.submit_target == SubmitTarget.LOCAL
if submit_config.submit_target in {SubmitTarget.LOCAL}:
run_dir = _create_run_dir_local(submit_config)
submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
submit_config.run_dir = run_dir
_populate_run_dir(run_dir, submit_config)
if submit_config.print_info:
print("\nSubmit config:\n")
pprint.pprint(submit_config, indent=4, width=200, compact=False)
print()
if submit_config.ask_confirmation:
if not util.ask_yes_no("Continue submitting the job?"):
return
run_wrapper(submit_config)
| 11,443 | 37.531987 | 175 | py |
noise2noise | noise2noise-master/dnnlib/submission/_internal/run.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for launching run functions in computing clusters.
During the submit process, this file is copied to the appropriate run dir.
When the job is launched in the cluster, this module is the first thing that
is run inside the docker container.
"""
import os
import pickle
import sys
# PYTHONPATH should have been set so that the run_dir/src is in it
import dnnlib
if not len(sys.argv) >= 4:
raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!")
run_dir = str(sys.argv[1])
task_name = str(sys.argv[2])
host_name = str(sys.argv[3])
submit_config_path = os.path.join(run_dir, "submit_config.pkl")
# SubmitConfig should have been pickled to the run dir
if not os.path.exists(submit_config_path):
raise RuntimeError("SubmitConfig pickle file does not exist!")
submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb"))
dnnlib.submission.submit.set_user_name_override(submit_config.user_name)
submit_config.task_name = task_name
submit_config.host_name = host_name
dnnlib.submission.submit.run_wrapper(submit_config)
| 1,436 | 33.214286 | 94 | py |
SERT | SERT-master/hside_simu_test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
from hsi_setup import Engine, train_options, make_dataset
import time
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
"""Test-Dev"""
test_dir = opt.test_dir
mat_dataset = MatDataFromFolder(
test_dir)
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt', needsigma=False),
])
mat_dataset = TransformDataset(mat_dataset, mat_transform)
mat_loader = DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
)
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
engine.epoch = 0
strart_time = time.time()
engine.test(mat_loader, test_dir)
end_time = time.time()
test_time = end_time-strart_time
print('cost-time: ',(test_time/len(mat_dataset)))
| 1,643 | 23.176471 | 72 | py |
SERT | SERT-master/hside_real.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
import datetime
import time
from hsi_setup import Engine, train_options, make_dataset
#os.environ["WANDB_MODE"] ='offline'
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
img_options={}
img_options['patch_size'] = 128
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
train_dir = '/train_real/'
train_dataset = DataLoaderTrain(train_dir,50,img_options=img_options,use2d=engine.get_net().use_2dconv)
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
print('==> Preparing data..')
"""Test-Dev"""
basefolder = '/test_real'
mat_datasets = DataLoaderVal(basefolder, 50, None,use2d=engine.get_net().use_2dconv)
mat_loader = DataLoader(
mat_datasets,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
)
base_lr = opt.lr
epoch_per_save = 20
adjust_learning_rate(engine.optimizer, opt.lr)
print('loading finished')
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 1000:
np.random.seed()
if engine.epoch == 200:
adjust_learning_rate(engine.optimizer, base_lr*0.5)
if engine.epoch == 400:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loader)
engine.validate(mat_loader, 'real')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
wandb.finish()
| 2,369 | 25.333333 | 113 | py |
SERT | SERT-master/hsi_setup.py | from email.mime import base, image
from locale import normalize
from math import fabs
from xml.sax import SAXException
import torch
import torch.optim as optim
import models
import os
import argparse
from os.path import join
from utility import *
from utility.ssim import SSIMLoss,SAMLoss
from thop import profile
from torchstat import stat
import scipy.io as scio
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from torchvision import models as torchmodel
from torch import einsum
import torchvision.utils as vutil
import time
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
class MultipleLoss(nn.Module):
def __init__(self, losses, weight=None):
super(MultipleLoss, self).__init__()
self.losses = nn.ModuleList(losses)
self.weight = weight or [1/len(self.losses)] * len(self.losses)
def forward(self, predict, target):
total_loss = 0
for weight, loss in zip(self.weight, self.losses):
total_loss += loss(predict, target) * weight
return total_loss
def extra_repr(self):
return 'weight={}'.format(self.weight)
class L1Consist(nn.Module):
def __init__(self, losses, weight=None):
super(L1Consist, self).__init__()
self.loss1 = losses[0]
self.loss_cons = losses[1]
self.weight = weight or [1/len(self.losses)] * len(self.losses)
def forward(self, predict, target,inputs):
total_loss = 0
total_loss += self.loss1(predict, target) * self.weight[0]
total_loss += self.loss_cons( predict , target,inputs) * self.weight[1]
return total_loss
def extra_repr(self):
return 'weight={}'.format(self.weight)
def train_options(parser):
def _parse_str_args(args):
str_args = args.split(',')
parsed_args = []
for str_arg in str_args:
arg = int(str_arg)
if arg >= 0:
parsed_args.append(arg)
return parsed_args
parser.add_argument('--prefix', '-p', type=str, default='denoise',
help='prefix')
parser.add_argument('--arch', '-a', metavar='ARCH', required=True,
choices= model_names ,
help='model architecture: ' +
' | '.join(model_names))
parser.add_argument('--batchSize', '-b', type=int,
default=16, help='training batch size. default=16')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate. default=1e-3.')
parser.add_argument('--wd', type=float, default=0,
help='weight decay. default=0')
parser.add_argument('--loss', type=str, default='l2',
help='which loss to choose.', choices=['l1', 'l2', 'smooth_l1', 'ssim', 'l2_ssim','l2_sam','cons','cons_l2'])
parser.add_argument('--testdir', type=str)
parser.add_argument('--sigma', type=int)
parser.add_argument('--init', type=str, default='kn',
help='which init scheme to choose.', choices=['kn', 'ku', 'xn', 'xu', 'edsr'])
parser.add_argument('--no-cuda', action='store_true', help='disable cuda?')
parser.add_argument('--no-log', action='store_true',
help='disable logger?')
parser.add_argument('--threads', type=int, default=1,
help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=2018,
help='random seed to use. default=2018')
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--no-ropt', '-nro', action='store_true',
help='not resume optimizer')
parser.add_argument('--chop', action='store_true',
help='forward chop')
parser.add_argument('--resumePath', '-rp', type=str,
default=None, help='checkpoint to use.')
parser.add_argument('--test-dir', type=str,
default='/data/HSI_Data/icvl_noise/512_noniid', help='The path of test HSIs')
parser.add_argument('--dataroot', '-d', type=str,
default='/data/HSI_Data/ICVL64_31.db', help='data root')
parser.add_argument('--clip', type=float, default=1e6)
parser.add_argument('--gpu-ids', type=str, default='0', help='gpu ids')
####################
parser.add_argument('--update_lr', type=float, default=0.5e-4, help='learning rate of inner loop')
parser.add_argument('--meta_lr', type=float, default=0.5e-4, help='learning rate of outer loop')
parser.add_argument('--n_way', type=int, default=1, help='the number of ways')
parser.add_argument('--k_spt', type=int, default=2, help='the number of support set')
parser.add_argument('--k_qry', type=int, default=5, help='the number of query set')
parser.add_argument('--task_num', type=int, default=16, help='the number of tasks')
parser.add_argument('--update_step', type=int, default=5, help='update step of inner loop in training')
parser.add_argument('--update_step_test', type=int, default=10, help='update step of inner loop in testing')
opt = parser.parse_args()
opt.gpu_ids = _parse_str_args(opt.gpu_ids)
return opt
def make_dataset(opt, train_transform, target_transform, common_transform, batch_size=None, repeat=1):
dataset = LMDBDataset(opt.dataroot, repeat=repeat)
# dataset.length -= 1000
# dataset.length = size or dataset.length
"""Split patches dataset into training, validation parts"""
dataset = TransformDataset(dataset, common_transform)
train_dataset = ImageTransformDataset(dataset, train_transform, target_transform)
train_loader = DataLoader(train_dataset,
batch_size=batch_size or opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
return train_loader
def make_metadataset(opt, train_transform, target_transform, common_transform, batch_size=None, repeat=1):
dataset = LMDBDataset(opt.dataroot, repeat=repeat)
# dataset.length -= 1000
# dataset.length = size or dataset.length
"""Split patches dataset into training, validation parts"""
dataset = TransformDataset(dataset, common_transform)
train_dataset = MetaRandomDataset(dataset, opt.n_way, opt.k_spt, opt.k_qry, train_transform, target_transform)
train_loader = DataLoader(train_dataset,
batch_size=batch_size or opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
return train_loader
class Engine(object):
def __init__(self, opt):
self.prefix = opt.prefix
self.opt = opt
self.net = None
self.optimizer = None
self.criterion = None
self.basedir = None
self.iteration = None
self.epoch = None
self.best_psnr = None
self.best_loss = None
self.writer = None
self.__setup()
def __setup(self):
self.basedir = join('checkpoints', self.opt.arch)
if not os.path.exists(self.basedir):
os.makedirs(self.basedir)
self.best_psnr = 0
self.best_loss = 1e6
self.epoch = 0 # start from epoch 0 or last checkpoint epoch
self.iteration = 0
cuda = not self.opt.no_cuda
self.device = 'cuda' if cuda else 'cpu'
print('Cuda Acess: %d' % cuda)
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(self.opt.seed)
if cuda:
torch.cuda.manual_seed(self.opt.seed)
"""Model"""
print("=> creating model '{}'".format(self.opt.arch))
self.net = models.__dict__[self.opt.arch]()
# initialize parameters
#print(self.net)
init_params(self.net, init_type=self.opt.init) # disable for default initialization
if len(self.opt.gpu_ids) > 1:
self.net = nn.DataParallel(self.net.cuda(), device_ids=self.opt.gpu_ids, output_device=self.opt.gpu_ids[0])
if self.opt.loss == 'l2':
self.criterion = nn.MSELoss()
if self.opt.loss == 'l1':
self.criterion = nn.L1Loss()
if self.opt.loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
if self.opt.loss == 'ssim':
self.criterion = SSIMLoss(data_range=1, channel=31)
if self.opt.loss == 'l2_ssim':
self.criterion = MultipleLoss([nn.MSELoss(), SSIMLoss(data_range=1, channel=31)], weight=[1, 2.5e-3])
if self.opt.loss == 'l2_sam':
self.criterion = MultipleLoss([nn.MSELoss(),SAMLoss()],weight=[1, 1e-3])
if self.opt.loss == 'cons':
self.criterion = L1Consist([nn.L1Loss(),ContrastLoss(ablation=False)],weight=[1, 1])
if self.opt.loss == 'cons_l2':
self.criterion = L1Consist([nn.MSELoss(),ContrastLoss(ablation=False)],weight=[1, 0.01])
print(self.criterion)
if cuda:
self.net.to(self.device)
print('cuda initialized')
self.criterion = self.criterion.to(self.device)
"""Logger Setup"""
log = not self.opt.no_log
if log:
self.writer = get_summary_writer(os.path.join(self.basedir, 'logs'), self.opt.prefix)
"""Optimization Setup"""
self.optimizer = optim.Adam(
self.net.parameters(), lr=self.opt.lr, weight_decay=self.opt.wd, amsgrad=False)
"""Resume previous model"""
if self.opt.resume:
# Load checkpoint.
self.load(self.opt.resumePath, not self.opt.no_ropt)
else:
print('==> Building model..')
# print(self.net)
total = sum([param.nelement() for param in self.net.parameters()])
print("Number of parameter: %.2fM" % (total/1e6))
# # stat(self.net, (31, 64, 64))
# from ptflops import get_model_complexity_info
# if self.get_net().use_2dconv == True:
# macs, params = get_model_complexity_info(self.net, (31, 512, 512),as_strings=True,
# print_per_layer_stat=False, verbose=False)
# else:
# macs, params = get_model_complexity_info(self.net, (1,31, 512, 512),as_strings=True,
# print_per_layer_stat=False, verbose=False)
# print('{:<30} {:<8}'.format('Computational complexity: ', macs))
# print('{:<30} {:<8}'.format('Number of parameters: ', params))
# # print(self.net.flops([64,64]))
# input_res= (31, 64, 64)
# batch = torch.ones(()).new_empty((1, *input_res),
# dtype=next(self.net.parameters()).dtype,
# device=next(self.net.parameters()).device)
# #print(input_res.shape)
# #from fvcore.nn import FlopCountAnalysis
# from flop_count.flop_count import FlopCountAnalysis
# flops = FlopCountAnalysis(self.net, batch)
# print(flops.total())
# from thop import profile
# batch = torch.randn(1,31, 512, 512)
# macs, params = profile(self.net, inputs=(batch.to('cuda'), ))
# print(macs,params)
# from torchstat import stat
# stat(self.net, (3, 256, 256))
# print(self.net.flops([64,64]))
def reset_params(self):
init_params(self.net, init_type=self.opt.init) # disable for default initialization
def forward(self, inputs):
if self.opt.chop:
output = self.forward_chop(inputs)
else:
output = self.net(inputs)
return output
def forward_chop(self, x, base=16):
n, c, b, h, w = x.size()
h_half, w_half = h // 2, w // 2
shave_h = np.ceil(h_half / base) * base - h_half
shave_w = np.ceil(w_half / base) * base - w_half
shave_h = shave_h if shave_h >= 10 else shave_h + base
shave_w = shave_w if shave_w >= 10 else shave_w + base
h_size, w_size = int(h_half + shave_h), int(w_half + shave_w)
inputs = [
x[..., 0:h_size, 0:w_size],
x[..., 0:h_size, (w - w_size):w],
x[..., (h - h_size):h, 0:w_size],
x[..., (h - h_size):h, (w - w_size):w]
]
outputs = [self.net(input_i) for input_i in inputs]
output = torch.zeros_like(x)
output_w = torch.zeros_like(x)
output[..., 0:h_half, 0:w_half] += outputs[0][..., 0:h_half, 0:w_half]
output_w[..., 0:h_half, 0:w_half] += 1
output[..., 0:h_half, w_half:w] += outputs[1][..., 0:h_half, (w_size - w + w_half):w_size]
output_w[..., 0:h_half, w_half:w] += 1
output[..., h_half:h, 0:w_half] += outputs[2][..., (h_size - h + h_half):h_size, 0:w_half]
output_w[..., h_half:h, 0:w_half] += 1
output[..., h_half:h, w_half:w] += outputs[3][..., (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
output_w[..., h_half:h, w_half:w] += 1
output /= output_w
return output
def __step(self, train, inputs, targets,sigma=None):
if train:
self.optimizer.zero_grad()
loss_data = 0
total_norm = None
self.net.eval()
if self.get_net().bandwise:
O = []
for time, (i, t) in enumerate(zip(inputs.split(1, 1), targets.split(1, 1))):
o = self.net(i)
O.append(o)
loss = self.criterion(o, t)
if train:
loss.backward()
loss_data += loss.item()
outputs = torch.cat(O, dim=1)
else:
#noisy_sigma = torch.zeros
outputs = self.net(inputs)
# outputs = torch.clamp(outputs, 0, 1)
# loss = self.criterion(outputs, targets)
# if outputs.ndimension() == 5:
# loss = self.criterion(outputs[:,0,...], torch.clamp(targets[:,0,...], 0, 1))
# else:
# loss = self.criterion(outputs, torch.clamp(targets, 0, 1))
#print(outputs.shape,torch.squeeze(outputs).shape,targets.shape)
#loss = self.criterion(outputs[:,0,...], targets[:,0,...])
# if self.net.use_2dconv == True:
# loss = self.criterion(outputs[:,0,...], targets[:,0,...])
# else:
loss = self.criterion(outputs[...], targets) #memnet
if train:
loss.backward()
loss_data += loss.item()
if train:
total_norm = nn.utils.clip_grad_norm_(self.net.parameters(), self.opt.clip)
self.optimizer.step()
return outputs, loss_data, total_norm
def load(self, resumePath=None, load_opt=True):
print('==> Resuming from checkpoint %s..' % resumePath)
assert os.path.isdir('checkpoints'), 'Error: no checkpoint directory found!'
checkpoint = torch.load(resumePath )
# if load_opt:
# self.optimizer.load_state_dict(checkpoint['optimizer'])
self.get_net().load_state_dict(checkpoint['net'])
def train(self, train_loader,val):
print('\nEpoch: %d' % self.epoch)
self.net.train()
train_loss = 0
train_psnr = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
if not self.opt.no_cuda:
inputs, targets = inputs.to(self.device), targets.to(self.device)
#print(inputs.shape,inputs.type)
outputs, loss_data, total_norm = self.__step(True, inputs, targets)
train_loss += loss_data
avg_loss = train_loss / (batch_idx+1)
psnr = np.mean(cal_bwpsnr(outputs, targets))
train_psnr += psnr
avg_psnr = train_psnr/ (batch_idx+1)
if not self.opt.no_log:
wandb.log({'train_psnr':avg_psnr},step=self.iteration)
wandb.log({'train_loss':loss_data},step=self.iteration)
wandb.log({'train_avg_loss':avg_loss},step=self.iteration)
self.writer.add_scalar(
join(self.prefix, 'train_psnr'), avg_psnr, self.iteration)
self.writer.add_scalar(
join(self.prefix, 'train_loss'), loss_data, self.iteration)
self.writer.add_scalar(
join(self.prefix, 'train_avg_loss'), avg_loss, self.iteration)
self.iteration += 1
progress_bar(batch_idx, len(train_loader), 'AvgLoss: %.4e | Loss: %.4e | Norm: %.4e | Psnr: %4e'
% (avg_loss, loss_data, total_norm,psnr))
self.epoch += 1
if not self.opt.no_log:
self.writer.add_scalar(
join(self.prefix, 'train_loss_epoch'), avg_loss, self.epoch)
def test(self, valid_loader, filen):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
if os.path.exists(filen):
filenames = [
fn
for fn in os.listdir(filen)
if fn.endswith('.mat')
]
print('[i] Eval dataset ...')
print(len(valid_loader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
if not self.opt.no_cuda:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
#outputs = torch.clamp(self.net(inputs), 0, 1)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
h,w=inputs.shape[-2:]
band = inputs.shape[-3]
result = outputs.squeeze().cpu().detach().numpy()
img = targets.squeeze().cpu().detach().numpy()
for k in range(band):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= band*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(band):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(band):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/band)
ERGAS.append(ergas)
# inputs = inputs.squeeze().cpu().detach().numpy()
# result = inputs
# for band in range(31):
# img = result[band]*255#
# cv2.imwrite(os.path.join(save_path, filenames[batch_idx][:-4] +'_band_'+str(band)+'.jpg'),cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR))
# scio.savemat('/data/HSI_Data/Hyperspectral_Project/Urban_cvpr2023/'+self.opt.arch+'urban.mat',{'result':result})
# save_path = '/data/HSI_Data/Hyperspectral_Project/Urban_cvpr2023/imgs/'
# result = np.clip(result,0,1)
# for band in range(100,105):
# img = result[band]*255#
# cv2.imwrite(os.path.join(save_path, self.opt.arch +'_band_'+str(band)+'.jpg'),cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR))
# color_img = np.concatenate([result[0][np.newaxis,:],result[105][np.newaxis,:],result[207][np.newaxis,:]],0)
# color_img = color_img.transpose((1,2,0))*255
# print(color_img.shape)
# cv2.imwrite(os.path.join(save_path, self.opt.arch +'color.jpg'),cv2.cvtColor(color_img.astype(np.uint8),cv2.COLOR_RGB2BGR))
# result = img
# color_img = np.concatenate([result[9][np.newaxis,:],result[15][np.newaxis,:],result[28][np.newaxis,:]],0)
# color_img = color_img.transpose((1,2,0))*255
# print(color_img.shape)
# cv2.imwrite(os.path.join(save_path, filenames[batch_idx][:-4] +'color.png'),cv2.cvtColor(color_img.astype(np.uint8),cv2.COLOR_RGB2BGR))
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def test_patch(self, valid_loader, filen,patch_size=64):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
filenames = [
fn
for fn in os.listdir(filen)
if fn.endswith('.mat')
]
print('[i] Eval dataset ...')
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
_,channel, width, height = inputs.shape
input_patch = torch.zeros((64,31,64,64),dtype=torch.float)
targets_patch = torch.zeros((64,31,64,64),dtype=torch.float)
num = 0
for i in range(width//patch_size):
for j in range(height//patch_size):
sub_image = inputs[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
input_patch[num] = sub_image
targets_patch[num] = targets[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
num += 1
if not self.opt.no_cuda:
inputs, targets = input_patch.to(self.device), targets_patch.to(self.device)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
result_patch = outputs.squeeze().cpu().detach().numpy()
img_patch = targets.squeeze().cpu().numpy()
result = np.zeros((31,512,512))
img = np.zeros((31,512,512))
h,w=result.shape[-2:]
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
result[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = result_patch[num]
img[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = img_patch[num]
num += 1
for k in range(31):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= 31*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(31):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(31):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/31)
ERGAS.append(ergas)
# scio.savemat('/data/HSI_Data/Hyperspectral_Project/Urban_result/Ours/'+filenames[batch_idx], {'result': result})
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def test_3dpatch(self, valid_loader, filen,patch_size=64,band_size=31,all_size=512):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
filenames = [
fn
for fn in os.listdir(filen)
if fn.endswith('.mat')
]
print('[i] Eval dataset ...')
blocks = (all_size//patch_size)*(all_size//patch_size)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
_,_,channel, width, height = inputs.shape
input_patch = torch.zeros((blocks,band_size,patch_size,patch_size),dtype=torch.float)
targets_patch = torch.zeros((blocks,band_size,patch_size,patch_size),dtype=torch.float)
num = 0
for i in range(width//patch_size):
for j in range(height//patch_size):
sub_image = inputs[:,:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
input_patch[num] = sub_image
targets_patch[num] = targets[:,:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
num += 1
if not self.opt.no_cuda:
inputs, targets = input_patch.to(self.device), targets_patch.to(self.device)
inputs=inputs.unsqueeze(1)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
result_patch = outputs.squeeze().cpu().detach().numpy()
img_patch = targets.squeeze().cpu().numpy()
result = np.zeros((band_size,all_size,all_size))
img = np.zeros((band_size,all_size,all_size))
h,w=result.shape[-2:]
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
result[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = result_patch[num]
img[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = img_patch[num]
num += 1
for k in range(band_size):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= band_size*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(band_size):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(band_size):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/band_size)
ERGAS.append(ergas)
# scio.savemat('/data/HSI_Data/Hyperspectral_Project/Urban_result/Ours/'+filenames[batch_idx], {'result': result})
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def validate(self, valid_loader, name,patch_size=64):
self.net.eval()
validate_loss = 0
total_psnr = 0
total_sam = 0
RMSE = []
SSIM = []
SAM = []
ERGAS = []
PSNR = []
print('[i] Eval dataset {}...'.format(name))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
if ('cswin_unet' in self.opt.arch) or ('unfold' in self.opt.arch)or ('scalable' in self.opt.arch):
_,channel, width, height = inputs.shape
input_patch = torch.zeros((64,31,64,64),dtype=torch.float)
targets_patch = torch.zeros((64,31,64,64),dtype=torch.float)
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
sub_image = inputs[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
input_patch[num] = sub_image
targets_patch[num] = targets[:,:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]
num += 1
if not self.opt.no_cuda:
inputs, targets = input_patch.to(self.device), targets_patch.to(self.device)
else:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs, loss_data, _ = self.__step(False, inputs, targets)
psnr = np.mean(cal_bwpsnr(outputs, targets))
sam = cal_sam(outputs, targets)
validate_loss += loss_data
total_sam += sam
avg_loss = validate_loss / (batch_idx+1)
avg_sam = total_sam / (batch_idx+1)
total_psnr += psnr
avg_psnr = total_psnr / (batch_idx+1)
progress_bar(batch_idx, len(valid_loader), 'Loss: %.4e | PSNR: %.4f | AVGPSNR: %.4f '
% (avg_loss, psnr, avg_psnr))
psnr = []
h,w=inputs.shape[-2:]
if ('cswin_unet' in self.opt.arch) or ('unfold' in self.opt.arch) or('scalable' in self.opt.arch):
result_patch = outputs.squeeze().cpu().detach().numpy()
img_patch = targets.squeeze().cpu().numpy()
result = np.zeros((31,512,512))
img = np.zeros((31,512,512))
h,w=result.shape[-2:]
num=0
for i in range(width//patch_size):
for j in range(height//patch_size):
result[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = result_patch[num]
img[:, i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size] = img_patch[num]
num += 1
else:
# outputs = torch.clamp(outputs,0,1)
result = outputs.squeeze().cpu().detach().numpy()
img = targets.squeeze().cpu().numpy()
for k in range(31):
psnr.append(10*np.log10((h*w)/sum(sum((result[k]-img[k])**2))))
PSNR.append(sum(psnr)/len(psnr))
mse = sum(sum(sum((result-img)**2)))
mse /= 31*h*w
mse *= 255*255
rmse = np.sqrt(mse)
RMSE.append(rmse)
ssim = []
k1 = 0.01
k2 = 0.03
for k in range(31):
ssim.append((2*np.mean(result[k])*np.mean(img[k])+k1**2) \
*(2*np.cov(result[k].reshape(h*w), img[k].reshape(h*w))[0,1]+k2**2) \
/(np.mean(result[k])**2+np.mean(img[k])**2+k1**2) \
/(np.var(result[k])+np.var(img[k])+k2**2))
SSIM.append(sum(ssim)/len(ssim))
temp = (np.sum(result*img, 0) + np.spacing(1)) \
/(np.sqrt(np.sum(result**2, 0) + np.spacing(1))) \
/(np.sqrt(np.sum(img**2, 0) + np.spacing(1)))
#print(np.arccos(temp)*180/np.pi)
sam = np.mean(np.arccos(temp))*180/np.pi
SAM.append(sam)
ergas = 0.
for k in range(31):
ergas += np.mean((img[k]-result[k])**2)/np.mean(img[k])**2
ergas = 100*np.sqrt(ergas/31)
ERGAS.append(ergas)
print(sum(PSNR)/len(PSNR), sum(RMSE)/len(RMSE), sum(SSIM)/len(SSIM), sum(SAM)/len(SAM), sum(ERGAS)/len(ERGAS))
if not self.opt.no_log:
wandb.log({'val_loss_epoch':avg_loss,'val_psnr_epoch':avg_psnr,'val_sam_epoch':avg_sam,'epoch':self.epoch})
self.writer.add_scalar(
join(self.prefix, name, 'val_loss_epoch'), avg_loss, self.epoch)
self.writer.add_scalar(
join(self.prefix, name, 'val_psnr_epoch'), avg_psnr, self.epoch)
self.writer.add_scalar(
join(self.prefix, name, 'val_sam_epoch'), avg_sam, self.epoch)
print(avg_psnr, avg_loss,avg_sam)
return avg_psnr, avg_loss,avg_sam
def save_checkpoint(self, model_out_path=None, **kwargs):
if not model_out_path:
model_out_path = join(self.basedir, self.prefix, "model_epoch_%d_%d.pth" % (
self.epoch, self.iteration))
state = {
'net': self.get_net().state_dict(),
'optimizer': self.optimizer.state_dict(),
'epoch': self.epoch,
'iteration': self.iteration,
}
state.update(kwargs)
if not os.path.isdir(join(self.basedir, self.prefix)):
os.makedirs(join(self.basedir, self.prefix))
torch.save(state, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
# saving result into disk
def test_develop(self, test_loader, savedir=None, verbose=True):
from scipy.io import savemat
from os.path import basename, exists
def torch2numpy(hsi):
if self.net.use_2dconv:
R_hsi = hsi.data[0].cpu().numpy().transpose((1,2,0))
else:
R_hsi = hsi.data[0].cpu().numpy()[0,...].transpose((1,2,0))
return R_hsi
self.net.eval()
test_loss = 0
total_psnr = 0
dataset = test_loader.dataset.dataset
res_arr = np.zeros((len(test_loader), 3))
input_arr = np.zeros((len(test_loader), 3))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
if not self.opt.no_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs, loss_data, _ = self.__step(False, inputs, targets)
test_loss += loss_data
avg_loss = test_loss / (batch_idx+1)
res_arr[batch_idx, :] = MSIQA(outputs, targets)
input_arr[batch_idx, :] = MSIQA(inputs, targets)
"""Visualization"""
# Visualize3D(inputs.data[0].cpu().numpy())
# Visualize3D(outputs.data[0].cpu().numpy())
psnr = res_arr[batch_idx, 0]
ssim = res_arr[batch_idx, 1]
if verbose:
print(batch_idx, psnr, ssim)
if savedir:
filedir = join(savedir, basename(dataset.filenames[batch_idx]).split('.')[0])
outpath = join(filedir, '{}.mat'.format(self.opt.arch))
if not exists(filedir):
os.mkdir(filedir)
if not exists(outpath):
savemat(outpath, {'R_hsi': torch2numpy(outputs)})
return res_arr, input_arr
def test_real(self, test_loader, savedir=None):
"""Warning: this code is not compatible with bandwise flag"""
from scipy.io import savemat
from os.path import basename
self.net.eval()
dataset = test_loader.dataset.dataset
with torch.no_grad():
for batch_idx, inputs in enumerate(test_loader):
if not self.opt.no_cuda:
inputs = inputs.cuda()
outputs = self.forward(inputs)
"""Visualization"""
input_np = inputs[0].cpu().numpy()
output_np = outputs[0].cpu().numpy()
display = np.concatenate([input_np, output_np], axis=-1)
Visualize3D(display)
# Visualize3D(outputs[0].cpu().numpy())
# Visualize3D((outputs-inputs).data[0].cpu().numpy())
if savedir:
R_hsi = outputs.data[0].cpu().numpy()[0,...].transpose((1,2,0))
savepath = join(savedir, basename(dataset.filenames[batch_idx]).split('.')[0], self.opt.arch + '.mat')
savemat(savepath, {'R_hsi': R_hsi})
return outputs
def get_net(self):
if len(self.opt.gpu_ids) > 1:
return self.net.module
else:
return self.net
| 41,464 | 41.835744 | 166 | py |
SERT | SERT-master/hside_simu.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
import datetime
import time
from hsi_setup import Engine, train_options, make_dataset
import wandb
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
data = datetime.datetime.now()
wandb.init(project="hsi-denoising", entity="name",name=opt.arch+opt.prefix+'-'+str(data.month)+'-'+str(data.day)+'-'+str(data.hour)+':'+str(data.minute),config=opt)
wandb.config.update(parser)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
train_transform = Compose([
AddNoiseBlindv1(10,70),
HSI2Tensor()
])
icvl_64_31_dir ='/data/HSI_Data/ICVL64_31.db/'
icvl_64_31 = LMDBDataset(icvl_64_31_dir)
target_transform = HSI2Tensor()
train_dataset = ImageTransformDataset(icvl_64_31, train_transform,target_transform)
print('==> Preparing data..')
"""Test-Dev"""
basefolder = '/data/HSI_Data/icvl_val_gaussian/512_10_70'
mat_datasets = [MatDataFromFolder(
basefolder, size=5)]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 100:
np.random.seed()
if engine.epoch == 50:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loaders[0])
engine.validate(mat_loaders[0], 'wdc')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
wandb.finish()
| 3,121 | 29.019231 | 170 | py |
SERT | SERT-master/hside_urban.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import datetime
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
train_transform = Compose([
AddNoiseNoniid_v2(0,55),
HSI2Tensor()
])
db_path = '/data/HSI_Data/Hyperspectral_Project/apex_big.db'
if not os.path.exists(db_path):
db_path = '/home/limiaoyu/data/Urban/apex_big.db'
icvl_64_31 = LMDBDataset(db_path,repeat=10)
target_transform = HSI2Tensor()
train_dataset = ImageTransformDataset(icvl_64_31, train_transform,target_transform)
print('==> Preparing data..')
# icvl_64_31_TL = make_dataset(
# opt, train_transform,
# target_transform, common_transform, 64)
"""Test-Dev"""
basefolder = '/data/HSI_Data/Hyperspectral_Project/'
if not os.path.exists(db_path):
basefolder = '/home/limiaoyu/data/Urban/'
mat_datasets = [MatDataFromFolder(
basefolder, size=1,fns=['Urban_304_minmax.mat']) ]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=opt.threads, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 100:
np.random.seed()
if engine.epoch == 11:
adjust_learning_rate(engine.optimizer, base_lr*0.5)
if engine.epoch == 45:
adjust_learning_rate(engine.optimizer, base_lr*0.5*0.5)
if engine.epoch == 80:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
# if engine.epoch == 120:
# adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loaders[0])
#engine.test(mat_loaders[0], basefolder)
engine.validate(mat_loaders[0], 'icvl-validate-mixture')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
| 3,580 | 29.87069 | 113 | py |
SERT | SERT-master/hside_simu_complex.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import datetime
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
data = datetime.datetime.now()
wandb.init(project="hsi-denoising-complex", entity="miayili",name=opt.arch+opt.prefix+'-'+str(data.month)+'-'+str(data.day)+'-'+str(data.hour)+':'+str(data.minute),config=opt)
wandb.config.update(parser)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
sigmas = [10, 30, 50, 70]
train_transform = Compose([
AddNoiseNoniid(sigmas),
SequentialSelect(
transforms=[
lambda x: x,
AddNoiseImpulse(),
AddNoiseStripe(),
AddNoiseDeadline()
]
),
HSI2Tensor()
])
#change to 10 for sms_10
icvl_64_31_dir = '/data/HSI_Data/ICVL64_31.db/'
if not os.path.exists(icvl_64_31_dir):
icvl_64_31_dir = '/home/limiaoyu/data/ICVL64_31.db/'
icvl_64_31 = LMDBDataset(icvl_64_31_dir)
target_transform = HSI2Tensor()
train_dataset = ImageTransformDataset(icvl_64_31, train_transform,target_transform)
print('==> Preparing data..')
# icvl_64_31_TL = make_dataset(
# opt, train_transform,
# target_transform, common_transform, 64)
"""Test-Dev"""
folder_mat = '/data/HSI_Data/icvl_noise_50/512_mix'
if not os.path.exists(folder_mat):
folder_mat = '/home/limiaoyu/data/icvl_val_gaussian/50_mix'
mat_datasets = [MatDataFromFolder(folder_mat, size=5)]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='gt', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
train_loader = DataLoader(train_dataset,
batch_size=opt.batchSize, shuffle=True,
num_workers=8, pin_memory=not opt.no_cuda, worker_init_fn=worker_init_fn)
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
base_lr = opt.lr
epoch_per_save = 5
adjust_learning_rate(engine.optimizer, opt.lr)
# from epoch 50 to 100
engine.epoch = 0
while engine.epoch < 100:
np.random.seed()
#swin_ir_o 1e-4 60 o_resume from 40
#for 10
if engine.epoch == 50:
adjust_learning_rate(engine.optimizer, base_lr*0.1)
#deep_qrnn3d 10epoch后1e-3
#for 10
# if engine.epoch == 45:
# adjust_learning_rate(engine.optimizer, base_lr*0.1)
# if engine.epoch == 45:
# adjust_learning_rate(engine.optimizer, base_lr*0.1*0.1)
# if engine.epoch == 70:
# adjust_learning_rate(engine.optimizer, base_lr*0.01)
# if engine.epoch == 120:
# adjust_learning_rate(engine.optimizer, base_lr*0.1)
engine.train(train_loader,mat_loaders[0])
engine.validate(mat_loaders[0], 'icvl-validate-noniid')
#engine.validate(mat_loaders[1], 'icvl-validate-mixture')
display_learning_rate(engine.optimizer)
print('Latest Result Saving...')
model_latest_path = os.path.join(engine.basedir, engine.prefix, 'model_latest.pth')
engine.save_checkpoint(
model_out_path=model_latest_path
)
display_learning_rate(engine.optimizer)
if engine.epoch % epoch_per_save == 0:
engine.save_checkpoint()
| 4,281 | 31.938462 | 181 | py |
SERT | SERT-master/hside_urban_test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
target_transform = HSI2Tensor()
"""Test-Dev"""
basefolder = '/data/HSI_Data/Hyperspectral_Project/'
mat_datasets = [MatDataFromFolder(
basefolder, size=1,fns=['Urban_304.mat']) ]
if not engine.get_net().use_2dconv:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input',
transform=lambda x:x[ ...][None], needsigma=False),
])
else:
mat_transform = Compose([
LoadMatHSI(input_key='input', gt_key='input', needsigma=False),
])
mat_datasets = [TransformDataset(mat_dataset, mat_transform)
for mat_dataset in mat_datasets]
mat_loaders = [DataLoader(
mat_dataset,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda
) for mat_dataset in mat_datasets]
engine.test(mat_loaders[0], basefolder)
| 1,535 | 23.774194 | 75 | py |
SERT | SERT-master/hside_real_test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utility import *
from hsi_setup import Engine, train_options, make_dataset
if __name__ == '__main__':
"""Training settings"""
parser = argparse.ArgumentParser(
description='Hyperspectral Image Denoising (Complex noise)')
opt = train_options(parser)
print(opt)
"""Setup Engine"""
engine = Engine(opt)
"""Dataset Setting"""
HSI2Tensor = partial(HSI2Tensor, use_2dconv=engine.net.use_2dconv)
"""Test-Dev"""
basefolder = opt.testdir
mat_datasets = DataLoaderVal(basefolder, 50, None,use2d=engine.get_net().use_2dconv)
print(len(mat_datasets))
print('loading finished')
mat_loader = DataLoader(
mat_datasets,
batch_size=1, shuffle=False,
num_workers=1, pin_memory=opt.no_cuda )
strart_time = time.time()
engine.test(mat_loader, basefolder)
end_time = time.time()
test_time = end_time-strart_time
print('cost-time: ',(test_time/15))
| 1,136 | 23.191489 | 88 | py |
SERT | SERT-master/utility/ssim.py | import torch
import torch.nn.functional as F
def _fspecial_gauss_1d(size, sigma):
r"""Create 1-D gauss kernel
Args:
size (int): the size of gauss kernel
sigma (float): sigma of normal distribution
Returns:
torch.Tensor: 1D kernel
"""
coords = torch.arange(size).to(dtype=torch.float)
coords -= size//2
g = torch.exp(-(coords**2) / (2*sigma**2))
g /= g.sum()
return g.unsqueeze(0).unsqueeze(0)
def gaussian_filter(input, win):
r""" Blur input with 1-D kernel
Args:
input (torch.Tensor): a batch of tensors to be blured
window (torch.Tensor): 1-D gauss kernel
Returns:
torch.Tensor: blured tensors
"""
N, C, H, W = input.shape
out = F.conv2d(input, win, stride=1, padding=0, groups=C)
# make it contiguous in y direction for memory efficiency
out = out.transpose(2, 3).contiguous()
out = F.conv2d(out, win, stride=1, padding=0, groups=C)
return out.transpose(2, 3).contiguous()
def _ssim(X, Y, win, data_range=255, size_average=True, full=False):
r""" Calculate ssim index for X and Y
Args:
X (torch.Tensor): images
Y (torch.Tensor): images
win (torch.Tensor): 1-D gauss kernel
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
full (bool, optional): return sc or not
Returns:
torch.Tensor: ssim results
"""
K1 = 0.01
K2 = 0.03
batch, channel, height, width = X.shape
compensation = 1.0
C1 = (K1 * data_range)**2
C2 = (K2 * data_range)**2
#####################################
# the 5 convs (blurs) can be combined
concat_input = torch.cat([X, Y, X*X, Y*Y, X*Y], dim=1)
concat_win = win.repeat(5, 1, 1, 1).to(X.device, dtype=X.dtype)
concat_out = gaussian_filter(concat_input, concat_win)
# unpack from conv output
mu1, mu2, sigma1_sq, sigma2_sq, sigma12 = (
concat_out[:, idx*channel:(idx+1)*channel, :, :] for idx in range(5))
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = compensation * (sigma1_sq - mu1_sq)
sigma2_sq = compensation * (sigma2_sq - mu2_sq)
sigma12 = compensation * (sigma12 - mu1_mu2)
##########################
# implementation from original repo
#_mu1 = F.conv2d( X, win, stride=1, padding=0, groups=channel)
#_mu2 = F.conv2d( Y, win, stride=1, padding=0, groups=channel)
#mu1_sq = mu1.pow(2)
#mu2_sq = mu2.pow(2)
#mu1_mu2 = mu1 * mu2
#sigma1_sq = compensation * ( F.conv2d( X*X, win, stride=1, padding=0, groups=channel) - mu1_sq )
#sigma2_sq = compensation * ( F.conv2d( Y*Y, win, stride=1, padding=0, groups=channel) - mu2_sq )
#sigma12 = compensation * ( F.conv2d( X*Y, win, stride=1, padding=0, groups=channel) - mu1_mu2 )
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
if size_average:
ssim_val = ssim_map.mean()
cs = cs_map.mean()
else:
ssim_val = ssim_map.mean(-1).mean(-1).mean(-1) # reduce along CHW
cs = cs_map.mean(-1).mean(-1).mean(-1)
if full:
return ssim_val, cs
else:
return ssim_val
def ssim(X, Y, win_size=11, win_sigma=1.5, win=None, data_range=255, size_average=True, full=False):
r""" interface of ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
full (bool, optional): return sc or not
Returns:
torch.Tensor: ssim results
"""
if len(X.shape) != 4:
raise ValueError('Input images must 4-d tensor.')
if not X.type() == Y.type():
raise ValueError('Input images must have the same dtype.')
if not X.shape == Y.shape:
raise ValueError('Input images must have the same dimensions.')
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
win_sigma = win_sigma
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat(X.shape[1], 1, 1, 1)
else:
win_size = win.shape[-1]
ssim_val, cs = _ssim(X, Y,
win=win,
data_range=data_range,
size_average=False,
full=True)
if size_average:
ssim_val = ssim_val.mean()
cs = cs.mean()
if full:
return ssim_val, cs
else:
return ssim_val
def ms_ssim(X, Y, win_size=11, win_sigma=1.5, win=None, data_range=255, size_average=True, full=False, weights=None):
r""" interface of ms-ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
full (bool, optional): return sc or not
weights (list, optional): weights for different levels
Returns:
torch.Tensor: ms-ssim results
"""
if len(X.shape) != 4:
raise ValueError('Input images must 4-d tensor.')
if not X.type() == Y.type():
raise ValueError('Input images must have the same dtype.')
if not X.shape == Y.shape:
raise ValueError('Input images must have the same dimensions.')
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
if weights is None:
weights = torch.FloatTensor(
[0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(X.device, dtype=X.dtype)
win_sigma = win_sigma
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat(X.shape[1], 1, 1, 1)
else:
win_size = win.shape[-1]
levels = weights.shape[0]
mcs = []
for _ in range(levels):
ssim_val, cs = _ssim(X, Y,
win=win,
data_range=data_range,
size_average=False,
full=True)
mcs.append(cs)
padding = (X.shape[2] % 2, X.shape[3] % 2)
X = F.avg_pool2d(X, kernel_size=2, padding=padding)
Y = F.avg_pool2d(Y, kernel_size=2, padding=padding)
mcs = torch.stack(mcs, dim=0) # mcs, (level, batch)
# weights, (level)
msssim_val = torch.prod((mcs[:-1] ** weights[:-1].unsqueeze(1))
* (ssim_val ** weights[-1]), dim=0) # (batch, )
if size_average:
msssim_val = msssim_val.mean()
return msssim_val
# Classes to re-use window
class SSIMLoss(torch.nn.Module):
def __init__(self, win_size=11, win_sigma=1.5, data_range=None, size_average=True, channel=3):
r""" class for ssim
Args:
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
channel (int, optional): input channels (default: 3)
"""
super(SSIMLoss, self).__init__()
self.win = _fspecial_gauss_1d(
win_size, win_sigma).repeat(channel, 1, 1, 1)
self.size_average = size_average
self.data_range = data_range
def forward(self, X, Y):
if X.ndimension() == 5:
X = X[:,0,...]
Y = Y[:,0,...]
return 1-ssim(X, Y, win=self.win, data_range=self.data_range, size_average=self.size_average)
class SAMLoss(torch.nn.Module):
def __init__(self, size_average = False):
super(SAMLoss, self).__init__()
def forward(self, img_base, img_out):
if img_base.ndimension() == 5:
img_base = img_base[:,0,...]
if img_out.ndimension() == 5:
img_out = img_out[:,0,...]
sum1 = torch.sum(img_base * img_out, 1)
sum2 = torch.sum(img_base * img_base, 1)
sum3 = torch.sum(img_out * img_out, 1)
t = (sum2 * sum3) ** 0.5
numlocal = torch.gt(t, 0)
num = torch.sum(numlocal)
t = sum1 / t
angle = torch.acos(t)
sumangle = torch.where(torch.isnan(angle), torch.full_like(angle, 0), angle).sum()
if num == 0:
averangle = sumangle
else:
averangle = sumangle / num
SAM = averangle * 180 / 3.14159256
return SAM
class MS_SSIM(torch.nn.Module):
def __init__(self, win_size=11, win_sigma=1.5, data_range=None, size_average=True, channel=3, weights=None):
r""" class for ms-ssim
Args:
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
channel (int, optional): input channels (default: 3)
weights (list, optional): weights for different levels
"""
super(MS_SSIM, self).__init__()
self.win = _fspecial_gauss_1d(
win_size, win_sigma).repeat(channel, 1, 1, 1)
self.size_average = size_average
self.data_range = data_range
self.weights = weights
def forward(self, X, Y):
return ms_ssim(X, Y, win=self.win, size_average=self.size_average, data_range=self.data_range, weights=self.weights)
| 10,533 | 34.829932 | 129 | py |
SERT | SERT-master/utility/lmdb_dataset.py | import torch.utils.data as data
import numpy as np
from PIL import Image
import os
import os.path
class LMDBDataset(data.Dataset):
def __init__(self, db_path, repeat=1):
import lmdb
self.db_path = db_path
self.env = lmdb.open(db_path, max_readers=1, readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries']
self.length = int(self.length)
print(self.length)
self.repeat = repeat
with open(os.path.join(db_path, 'meta_info.txt')) as fin:
line = fin.readlines()[0]
size = line.split('(')[1].split(')')[0]
h,w,c =[ int(s) for s in size.split(',')]
self.channels = c
self.width = h
self.height = w
def __getitem__(self, index):
index = index % (self.length)
env = self.env
with env.begin(write=False) as txn:
data = txn.get('{:08}'.format(index).encode('ascii'))
flat_x = np.fromstring(data, dtype=np.float32)
x = flat_x.reshape(self.channels, self.height, self.width)
return x
def __len__(self):
return self.length * self.repeat
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
if __name__ == '__main__':
dataset = LMDBDataset('/media/lmy/LMY/aaai/ICVL64_31.db')
print(len(dataset))
train_loader = data.DataLoader(dataset, batch_size=20, num_workers=4)
print(iter(train_loader).next().shape) | 1,605 | 30.490196 | 79 | py |
SERT | SERT-master/utility/load_tif.py | import numpy as np
import os
from torch.utils.data import Dataset
import torch
import torch.nn.functional as F
import random
import scipy.stats as stats
from torch.utils.data import DataLoader
from skimage import io
import cv2
####################i##############################################################################
class Augment_RGB_torch:
def __init__(self):
pass
def transform0(self, torch_tensor):
return torch_tensor
def transform1(self, torch_tensor):
torch_tensor = torch.rot90(torch_tensor, k=1, dims=[-1,-2])
return torch_tensor
def transform2(self, torch_tensor):
torch_tensor = torch.rot90(torch_tensor, k=2, dims=[-1,-2])
return torch_tensor
def transform3(self, torch_tensor):
torch_tensor = torch.rot90(torch_tensor, k=3, dims=[-1,-2])
return torch_tensor
def transform4(self, torch_tensor):
torch_tensor = torch_tensor.flip(-2)
return torch_tensor
def transform5(self, torch_tensor):
torch_tensor = (torch.rot90(torch_tensor, k=1, dims=[-1,-2])).flip(-2)
return torch_tensor
def transform6(self, torch_tensor):
torch_tensor = (torch.rot90(torch_tensor, k=2, dims=[-1,-2])).flip(-2)
return torch_tensor
def transform7(self, torch_tensor):
torch_tensor = (torch.rot90(torch_tensor, k=3, dims=[-1,-2])).flip(-2)
return torch_tensor
class MixUp_AUG:
def __init__(self):
self.dist = torch.distributions.beta.Beta(torch.tensor([1.2]), torch.tensor([1.2]))
def aug(self, rgb_gt, rgb_noisy):
bs = rgb_gt.size(0)
indices = torch.randperm(bs)
rgb_gt2 = rgb_gt[indices]
rgb_noisy2 = rgb_noisy[indices]
lam = self.dist.rsample((bs,1)).view(-1,1,1,1).cuda()
rgb_gt = lam * rgb_gt + (1-lam) * rgb_gt2
rgb_noisy = lam * rgb_noisy + (1-lam) * rgb_noisy2
return rgb_gt, rgb_noisy
augment = Augment_RGB_torch()
transforms_aug = [method for method in dir(augment) if callable(getattr(augment, method)) if not method.startswith('_')]
def load_tif_img(filepath):
img = io.imread(filepath)
img = img.astype(np.float32)
#if type == 'gt':
img = img/4096.
return img
def is_tif_file(filename):
return any(filename.endswith(extension) for extension in [".tif"])
class DataLoaderTrain(Dataset):
def __init__(self, data_dir, ratio=50, img_options=None, target_transform=None,use2d=True,repeat=20):
super(DataLoaderTrain, self).__init__()
self.target_transform = target_transform
clean_files = sorted(os.listdir(os.path.join(data_dir, 'gt')))
noisy_files = sorted(os.listdir(os.path.join(data_dir, 'input{}'.format(ratio))))
self.clean_filenames = [os.path.join(data_dir, 'gt', x) for x in clean_files if is_tif_file(x)]
self.noisy_filenames = [os.path.join(data_dir, 'input{}'.format(ratio), x) for x in noisy_files if is_tif_file(x)]
self.clean = [torch.from_numpy(np.float32(load_tif_img(self.clean_filenames[index]))) for index in range(len(self.clean_filenames))]
self.noisy = [torch.from_numpy(np.float32(load_tif_img(self.noisy_filenames[index]))) for index in range(len(self.noisy_filenames))]
self.img_options=img_options
self.tar_size = len(self.clean_filenames) # get the size of target
self.ratio = ratio
self.use2d=use2d
self.repeat =repeat
def __len__(self):
return self.tar_size*self.repeat
def __getitem__(self, index):
tar_index = index % self.tar_size
clean = self.clean[tar_index]
noisy = self.noisy[tar_index]
clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]
clean = torch.clamp(clean, 0, 1)
noisy = torch.clamp(noisy, 0, 1)
#Crop Input and Target
ps = self.img_options['patch_size']
H = clean.shape[1]
W = clean.shape[2]
r = np.random.randint(0, H - ps)
c = np.random.randint(0, W - ps)
clean = clean[:, r:r + ps, c:c + ps]
noisy = noisy[:, r:r + ps, c:c + ps] * self.ratio
apply_trans = transforms_aug[random.getrandbits(3)]
clean = getattr(augment, apply_trans)(clean)
noisy = getattr(augment, apply_trans)(noisy)
if not self.use2d:
clean = clean[None,...]
noisy = noisy[None,...]
return noisy,clean#, clean_filename, noisy_filename
##################################################################################################
class DataLoaderVal(Dataset):
def __init__(self, data_dir, ratio=50, target_transform=None,use2d=True):
super(DataLoaderVal, self).__init__()
self.target_transform = target_transform
clean_files = sorted(os.listdir(os.path.join(data_dir, 'gt')))
noisy_files = sorted(os.listdir(os.path.join(data_dir, 'input{}'.format(ratio))))
self.clean_filenames = [os.path.join(data_dir, 'gt', x) for x in clean_files if is_tif_file(x)]
self.noisy_filenames = [os.path.join(data_dir, 'input{}'.format(ratio), x) for x in noisy_files if is_tif_file(x)]
self.clean = [torch.from_numpy(np.float32(load_tif_img(self.clean_filenames[index]))) for index in range(len(self.clean_filenames))]
self.noisy = [torch.from_numpy(np.float32(load_tif_img(self.noisy_filenames[index]))) for index in range(len(self.noisy_filenames))]
self.tar_size = len(self.clean_filenames)
self.ratio = ratio
self.use2d = use2d
def __len__(self):
return self.tar_size
def __getitem__(self, index):
tar_index = index % self.tar_size
clean = self.clean[tar_index]
noisy = self.noisy[tar_index]
clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]
ps = 512
r = clean.shape[1]//2-ps//2
c = clean.shape[2]//2-ps//2
clean = clean[:, r:r + ps, c:c + ps]
noisy = noisy[:, r:r + ps, c:c + ps] * self.ratio
if not self.use2d:
clean = clean[None,...]
noisy = noisy[None,...]
clean = torch.clamp(clean, 0, 1)
noisy = torch.clamp(noisy, 0, 1)
return noisy,clean#, clean_filename, noisy_filename
if __name__ == '__main__':
rgb_dir = '/media/lmy/LMY/aaai/real_dataset'
ratio = 50
train_dir = '/media/lmy/LMY/aaai/train_real/'
img_options ={}
img_options['patch_size'] = 128
#train_dataset = DataLoaderTrain(train_dir,50,img_options=img_options)
# train_loader = DataLoader(train_dataset,
# batch_size=1, shuffle=True,
# num_workers=1)
test_dir= '/media/lmy/LMY/aaai/test_real/'
dataset = DataLoaderVal(test_dir, ratio, None)
# print(len(dataset))
train_loader = DataLoader(dataset, batch_size=1, num_workers=1)
#print(iter(train_loader).next())
for batch_idx, (inputs, targets) in enumerate(train_loader):
print(batch_idx,inputs.shape)
band =20
inputs = inputs.numpy()
targets = targets.numpy()
cv2.imwrite('tnoisy_'+'_band'+str(band)+'.png',inputs[0,band]*255)
cv2.imwrite('tgt_'+'_band'+str(band)+'.png',targets[0,band]*255)
break
| 7,553 | 36.959799 | 140 | py |
SERT | SERT-master/utility/validation.py | import torch
import torchvision
import random
import cv2
import shutil
try:
from .util import *
except:
from util import *
from torchvision.transforms import Compose, ToPILImage, ToTensor, RandomHorizontalFlip, RandomChoice
from torch.utils.data import DataLoader, Dataset
from torchnet.dataset import TransformDataset, SplitDataset, TensorDataset, ResampleDataset
from PIL import Image
from skimage.util import random_noise
from scipy.ndimage.filters import gaussian_filter
def show_validation_cadi():
all_datadir = '/data/HSI_Data/icvl201'
train_dir = '/data/HSI_Data/icvl_train_gaussian/'
test_dir = '/data/HSI_Data/icvl_validation_5'
all_fns = os.listdir(all_datadir)
test_fns = os.listdir(test_dir)
train_fns = os.listdir(train_dir)
rest_fns = []
for fn in all_fns:
if fn not in test_fns:
if fn not in train_fns:
rest_fns.append(fn)
print(rest_fns)
if __name__ == '__main__':
show_validation_cadi() | 994 | 26.638889 | 100 | py |
SERT | SERT-master/utility/lmdb_data.py | """Create lmdb dataset"""
from util import *
import lmdb
import scipy.io as scio
def create_lmdb_train(
datadir, fns, name, matkey,
crop_sizes, scales, ksizes, strides,
load=h5py.File, augment=True,
seed=2017):
"""
Create Augmented Dataset
"""
def preprocess(data):
new_data = []
data = minmax_normalize(data)
# data = np.rot90(data, k=2, axes=(1,2)) # ICVL
#data = minmax_normalize(data.transpose((2,0,1))) # for Remote Sensing
# Visualize3D(data)
if crop_sizes is not None:
data = crop_center(data, crop_sizes[0], crop_sizes[1])
for i in range(len(scales)):
if scales[i] != 1:
temp = zoom(data, zoom=(1, scales[i], scales[i]))
else:
temp = data
temp = Data2Volume(temp, ksizes=ksizes, strides=list(strides[i]))
new_data.append(temp)
new_data = np.concatenate(new_data, axis=0)
if augment:
for i in range(new_data.shape[0]):
new_data[i,...] = data_augmentation(new_data[i, ...])
return new_data.astype(np.float32)
np.random.seed(seed)
scales = list(scales)
ksizes = list(ksizes)
assert len(scales) == len(strides)
# calculate the shape of dataset
data = load(datadir + fns[0])[matkey]
data = preprocess(data)
N = data.shape[0]
print(data.shape)
map_size = data.nbytes * len(fns) * 1.2
print('map size (GB):', map_size / 1024 / 1024 / 1024)
#import ipdb; ipdb.set_trace()
print(name+'.db')
if os.path.exists(name+'.db'):
raise Exception('database already exist!')
env = lmdb.open(name+'.db', map_size=map_size, writemap=True)
txt_file = open(os.path.join(name+'.db', 'meta_info.txt'), 'w')
with env.begin(write=True) as txn:
# txn is a Transaction object
k = 0
for i, fn in enumerate(fns):
try:
X = load(datadir + fn)[matkey]
except:
print('loading', datadir+fn, 'fail')
continue
X = preprocess(X)
N = X.shape[0]
for j in range(N):
c,h,w = X.shape[1:]
data_byte = X[j].tobytes()
str_id = '{:08}'.format(k)
k += 1
txt_file.write(f'{str_id} ({h},{w},{c})\n')
txn.put(str_id.encode('ascii'), data_byte)
print('load mat (%d/%d): %s' %(i,len(fns),fn))
print('done')
def create_icvl64_31():
print('create icvl_31...')
datadir = '/data/HSI_Data/icvl_train/' # your own data address
fns = os.listdir(datadir)
fns = [fn.split('.')[0]+'.mat' for fn in fns]
create_lmdb_train(
datadir, fns, '/media/lmy/LMY/aaai/ICVL64_31', 'rad', # your own dataset address
crop_sizes=(1024, 1024),
scales=(1, 0.5, 0.25),
ksizes=(31, 64, 64),
strides=[(31, 64, 64), (31, 32, 32), (31, 32, 32)],
load=h5py.File, augment=True,
)
def createDCmall():
print('create wdc...')
datadir = '/data/HSI_Data/Hyperspectral_Project/WDC/train/'
fns = os.listdir(datadir)
fns = [fn.split('.')[0]+'.mat' for fn in fns]
create_lmdb_train(
datadir, fns, '/data/HSI_Data/Hyperspectral_Project/WDC/wdc', 'data', # your own dataset address
crop_sizes=None,
scales=(1, 0.5, 0.25),
ksizes=(191, 64, 64),
strides=[(191, 16, 16), (191, 8, 8), (191, 8, 8)],
load=scio.loadmat, augment=True,
)
def createApex():
print('create apex...')
datadir = '/data/HSI_Data/Hyperspectral_Project/apex_crop/'
fns = os.listdir(datadir)
create_lmdb_train(
datadir, fns, '/data/HSI_Data/Hyperspectral_Project/apex', 'data', # your own dataset address
crop_sizes=None,
scales=(1, 0.5,0.5,0.25),
ksizes=(210, 64, 64),
strides=[(210, 64, 64),(210, 32, 32), (210, 32, 32), (210, 16, 16)],
load=scio.loadmat, augment=True,
)
if __name__ == '__main__':
#createApex()
createDCmall()
create_icvl64_31()
pass
| 4,284 | 32.217054 | 105 | py |
SERT | SERT-master/utility/helper.py | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import datetime
from tensorboardX import SummaryWriter
import socket
import wandb
def adjust_learning_rate(optimizer, lr):
print('Adjust Learning Rate => %.4e' %lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def display_learning_rate(optimizer):
lrs = []
for i, param_group in enumerate(optimizer.param_groups):
lr = param_group['lr']
print('learning rate of group %d: %.4e' % (i, lr))
lrs.append(lr)
return lrs
def adjust_opt_params(optimizer, param_dict):
print('Adjust Optimizer Parameters => %s' %param_dict)
for param_group in optimizer.param_groups:
for k, v in param_dict.items():
param_group[k] = v
def display_opt_params(optimizer, keys):
for i, param_group in enumerate(optimizer.param_groups):
for k in keys:
v = param_group[k]
print('%s of group %d: %.4e' % (k,i,v))
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.weight.requires_grad = False
m.bias.requires_grad = False
m.eval()
def get_summary_writer(log_dir, prefix=None):
# log_dir = './checkpoints/%s/logs'%(arch)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
if prefix is None:
log_dir = os.path.join(log_dir, datetime.datetime.now().strftime('%b%d_%H-%M-%S')+'_'+socket.gethostname())
else:
log_dir = os.path.join(log_dir, prefix+'_'+datetime.datetime.now().strftime('%b%d_%H-%M-%S')+'_'+socket.gethostname())
if not os.path.exists(log_dir):
os.mkdir(log_dir)
writer = SummaryWriter(log_dir)
return writer
def init_params(net, init_type='kn'):
print('use init scheme: %s' %init_type)
if init_type != 'edsr':
for m in net.modules():
if isinstance(m, (nn.Conv2d, nn.Conv3d)):
if init_type == 'kn':
init.kaiming_normal_(m.weight, mode='fan_out')
if init_type == 'ku':
init.kaiming_uniform_(m.weight, mode='fan_out')
if init_type == 'xn':
init.xavier_normal_(m.weight)
if init_type == 'xu':
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=1e-3)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 4,909 | 28.053254 | 126 | py |
SERT | SERT-master/utility/dataset.py | # There are functions for creating a train and validation iterator.
from os import mkdir
import torch
import torchvision
import random
import cv2
try:
from .util import *
except:
from util import *
from torchvision.transforms import Compose, ToPILImage, ToTensor, RandomHorizontalFlip, RandomChoice
from torch.utils.data import DataLoader, Dataset
from torchnet.dataset import TransformDataset, SplitDataset, TensorDataset, ResampleDataset
from PIL import Image
from skimage.util import random_noise
from scipy.ndimage.filters import gaussian_filter
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
# Define Transforms
class RandomGeometricTransform(object):
def __call__(self, img):
"""
Args:
img (np.mdarray): Image to be geometric transformed.
Returns:
np.ndarray: Randomly geometric transformed image.
"""
if random.random() < 0.25:
return data_augmentation(img)
return img
class RandomCrop(object):
"""For HSI (c x h x w)"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img):
img = rand_crop(img, self.crop_size, self.crop_size)
return img
class SequentialSelect(object):
def __pos(self, n):
i = 0
while True:
# print(i)
yield i
i = (i + 1) % n
def __init__(self, transforms):
self.transforms = transforms
self.pos = LockedIterator(self.__pos(len(transforms)))
def __call__(self, img):
out = self.transforms[next(self.pos)](img)
return out
class AddNoise(object):
"""add gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, sigma):
self.sigma_ratio = sigma / 255.
def __call__(self, img):
noise = np.random.randn(*img.shape) * self.sigma_ratio
# print(img.sum(), noise.sum())
return img + noise
class AddNoiseBlind(object):
"""add blind gaussian noise to the given numpy array (B,H,W)"""
def __pos(self, n):
i = 0
while True:
yield i
i = (i + 1) % n
def __init__(self, sigmas):
self.sigmas = np.array(sigmas) / 255.
self.pos = LockedIterator(self.__pos(len(sigmas)))
def __call__(self, img):
sigma = self.sigmas[next(self.pos)]
noise = np.random.randn(*img.shape) * sigma
return img + noise, sigma
class AddNoiseBlindv1(object):
"""add blind gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, min_sigma, max_sigma):
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __call__(self, img):
sigma = np.random.uniform(self.min_sigma, self.max_sigma) / 255
noise = np.random.randn(*img.shape) * sigma
#print(img.shape)
out = img + noise
return out #, sigma
class AddNoiseBlindv2(object):
"""add blind gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, min_sigma, max_sigma):
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __call__(self, img):
sigma = np.random.uniform(self.min_sigma, self.max_sigma) / 255
noise = np.random.randn(*img.shape) * sigma
#print(img.shape)
out = img + noise
return out , sigma
class AddNoiseNoniid_v2(object):
"""add non-iid gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, min_sigma, max_sigma):
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __call__(self, img):
bwsigmas = np.reshape((np.random.rand( img.shape[0])*(self.max_sigma-self.min_sigma)+self.min_sigma), (-1,1,1))
noise = np.random.randn(*img.shape) * bwsigmas/255
return img + noise
class AddNoiseNoniid(object):
"""add non-iid gaussian noise to the given numpy array (B,H,W)"""
def __init__(self, sigmas):
self.sigmas = np.array(sigmas) / 255.
def __call__(self, img):
bwsigmas = np.reshape(self.sigmas[np.random.randint(0, len(self.sigmas), img.shape[0])], (-1,1,1))
noise = np.random.randn(*img.shape) * bwsigmas
return img + noise
class AddNoiseMixed(object):
"""add mixed noise to the given numpy array (B,H,W)
Args:
noise_bank: list of noise maker (e.g. AddNoiseImpulse)
num_bands: list of number of band which is corrupted by each item in noise_bank"""
def __init__(self, noise_bank, num_bands):
assert len(noise_bank) == len(num_bands)
self.noise_bank = noise_bank
self.num_bands = num_bands
def __call__(self, img):
B, H, W = img.shape
all_bands = np.random.permutation(range(B))
pos = 0
for noise_maker, num_band in zip(self.noise_bank, self.num_bands):
if 0 < num_band <= 1:
num_band = int(np.floor(num_band * B))
bands = all_bands[pos:pos+num_band]
pos += num_band
img = noise_maker(img, bands)
return img
class _AddNoiseImpulse(object):
"""add impulse noise to the given numpy array (B,H,W)"""
def __init__(self, amounts, s_vs_p=0.5):
self.amounts = np.array(amounts)
self.s_vs_p = s_vs_p
def __call__(self, img, bands):
# bands = np.random.permutation(range(img.shape[0]))[:self.num_band]
bwamounts = self.amounts[np.random.randint(0, len(self.amounts), len(bands))]
for i, amount in zip(bands,bwamounts):
self.add_noise(img[i,...], amount=amount, salt_vs_pepper=self.s_vs_p)
return img
def add_noise(self, image, amount, salt_vs_pepper):
# out = image.copy()
out = image
p = amount
q = salt_vs_pepper
flipped = np.random.choice([True, False], size=image.shape,
p=[p, 1 - p])
salted = np.random.choice([True, False], size=image.shape,
p=[q, 1 - q])
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = 0
return out
class _AddNoiseStripe(object):
"""add stripe noise to the given numpy array (B,H,W)"""
def __init__(self, min_amount, max_amount):
assert max_amount > min_amount
self.min_amount = min_amount
self.max_amount = max_amount
def __call__(self, img, bands):
B, H, W = img.shape
# bands = np.random.permutation(range(img.shape[0]))[:len(bands)]
num_stripe = np.random.randint(np.floor(self.min_amount*W), np.floor(self.max_amount*W), len(bands))
for i, n in zip(bands, num_stripe):
loc = np.random.permutation(range(W))
loc = loc[:n]
stripe = np.random.uniform(0,1, size=(len(loc),))*0.5-0.25
img[i, :, loc] -= np.reshape(stripe, (-1, 1))
return img
class _AddNoiseDeadline(object):
"""add deadline noise to the given numpy array (B,H,W)"""
def __init__(self, min_amount, max_amount):
assert max_amount > min_amount
self.min_amount = min_amount
self.max_amount = max_amount
def __call__(self, img, bands):
B, H, W = img.shape
# bands = np.random.permutation(range(img.shape[0]))[:len(bands)]
num_deadline = np.random.randint(np.ceil(self.min_amount*W), np.ceil(self.max_amount*W), len(bands))
for i, n in zip(bands, num_deadline):
loc = np.random.permutation(range(W))
loc = loc[:n]
img[i, :, loc] = 0
return img
class AddNoiseImpulse(AddNoiseMixed):
def __init__(self):
self.noise_bank = [_AddNoiseImpulse([0.1, 0.3, 0.5, 0.7])]
self.num_bands = [1/3]
class AddNoiseStripe(AddNoiseMixed):
def __init__(self):
self.noise_bank = [_AddNoiseStripe(0.05, 0.15)]
self.num_bands = [1/3]
class AddNoiseDeadline(AddNoiseMixed):
def __init__(self):
self.noise_bank = [_AddNoiseDeadline(0.05, 0.15)]
self.num_bands = [1/3]
class AddNoiseComplex(AddNoiseMixed):
def __init__(self):
self.noise_bank = [
_AddNoiseStripe(0.05, 0.15),
_AddNoiseDeadline(0.05, 0.15),
_AddNoiseImpulse([0.1, 0.3, 0.5, 0.7])
]
self.num_bands = [1/3, 1/3, 1/3]
class HSI2Tensor(object):
"""
Transform a numpy array with shape (C, H, W)
into torch 4D Tensor (1, C, H, W) or (C, H, W)
"""
def __init__(self, use_2dconv):
self.use_2dconv = use_2dconv
def __call__(self, hsi):
if self.use_2dconv:
img = torch.from_numpy(hsi)
else:
img = torch.from_numpy(hsi[None])
# for ch in range(hsi.shape[0]):
# hsi[ch, ...] = minmax_normalize(hsi[ch, ...])
# img = torch.from_numpy(hsi)
return img.float()
class LoadMatHSI(object):
def __init__(self, input_key, gt_key, needsigma=False, transform=None,crop=False):
self.gt_key = gt_key
self.input_key = input_key
self.transform = transform
self.needsigma = needsigma
self.crop=False
def __call__(self, mat):
if self.transform:
input = self.transform(mat[self.input_key][:].transpose((2,0,1)))
gt = self.transform(mat[self.gt_key][:].transpose((2,0,1)))
else:
input = mat[self.input_key][:].transpose((2,0,1))
gt = mat[self.gt_key][:].transpose((2,0,1))
if self.needsigma:
sigma = mat['sigma']
sigma = torch.from_numpy(sigma).float()
# input = torch.from_numpy(input[None]).float()
input = torch.from_numpy(input).float()
# gt = torch.from_numpy(gt[None]).float() # for 3D net
gt = torch.from_numpy(gt).float()
self.crop=False
size = 64
startx = 120
starty = 110
if self.crop:
gt = gt[:,startx:startx+size,starty:starty+size]
input = input[:,startx:startx+size,starty:starty+size]
if self.needsigma:
return input, gt, sigma
return input, gt
class LoadMatKey(object):
def __init__(self, key):
self.key = key
def __call__(self, mat):
item = mat[self.key][:].transpose((2,0,1))
return item.astype(np.float32)
# Define Datasets
class DatasetFromFolder(Dataset):
"""Wrap data from image folder"""
def __init__(self, data_dir, suffix='png'):
super(DatasetFromFolder, self).__init__()
self.filenames = [
os.path.join(data_dir, fn)
for fn in os.listdir(data_dir)
if fn.endswith(suffix)
]
def __getitem__(self, index):
img = Image.open(self.filenames[index]).convert('L')
return img
def __len__(self):
return len(self.filenames)
class MatDataFromFolder(Dataset):
"""Wrap mat data from folder"""
def __init__(self, data_dir, load=loadmat, suffix='.mat', fns=None, size=None):
super(MatDataFromFolder, self).__init__()
if fns is not None:
self.filenames = [
os.path.join(data_dir, fn) for fn in fns
]
else:
self.filenames = [
os.path.join(data_dir, fn)
for fn in os.listdir(data_dir)
if fn.endswith(suffix)
]
# for i in range(10):
# print(self.filenames[i])
self.load = load
if size and size <= len(self.filenames):
self.filenames = self.filenames[:size]
# self.filenames = self.filenames[5:]
def __getitem__(self, index):
# print(self.filenames[index])
mat = self.load(self.filenames[index])
# print(self.filenames[index])
return mat
def __len__(self):
return len(self.filenames)
class DataLoaderVal_TIF(Dataset):
def __init__(self, data_dir, ratio=50, target_transform=None):
super(DataLoaderVal_TIF, self).__init__()
self.target_transform = target_transform
clean_files = sorted(os.listdir(os.path.join(data_dir, 'gt')))
noisy_files = sorted(os.listdir(os.path.join(data_dir, 'input{}'.format(ratio))))
self.clean_filenames = [os.path.join(data_dir, 'gt', x) for x in clean_files if is_tif_file(x)]
self.noisy_filenames = [os.path.join(data_dir, 'input{}'.format(ratio), x) for x in noisy_files if is_tif_file(x)]
self.clean = [torch.from_numpy(np.float32(load_tif_img(self.clean_filenames[index]))) for index in range(len(self.clean_filenames))]
self.noisy = [torch.from_numpy(np.float32(load_tif_img(self.noisy_filenames[index]))) for index in range(len(self.noisy_filenames))]
self.tar_size = len(self.clean_filenames)
self.ratio = ratio
def __len__(self):
return self.tar_size
def __getitem__(self, index):
tar_index = index % self.tar_size
clean = self.clean[tar_index]
noisy = self.noisy[tar_index]
print(clean.max(),noisy.max(),clean.shape)
clean_filename = os.path.split(self.clean_filenames[tar_index])[-1]
noisy_filename = os.path.split(self.noisy_filenames[tar_index])[-1]
ps = 512
r = clean.shape[1]//2-ps//2
c = clean.shape[2]//2-ps//2
clean = clean[:, r:r + ps, c:c + ps]
noisy = noisy[:, r:r + ps, c:c + ps] * self.ratio
return clean, noisy, clean_filename, noisy_filename
def get_train_valid_loader(dataset,
batch_size,
train_transform=None,
valid_transform=None,
valid_size=None,
shuffle=True,
verbose=False,
num_workers=1,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over any pytorch dataset. A sample
of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- dataset: full dataset which contains training and validation data
- batch_size: how many samples per batch to load. (train, val)
- train_transform/valid_transform: callable function
applied to each sample of dataset. default: transforms.ToTensor().
- valid_size: should be a integer in the range [1, len(dataset)].
- shuffle: whether to shuffle the train/validation indices.
- verbose: display the verbose information of dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be an integer in the range [1, %d]." %(len(dataset))
if not valid_size:
valid_size = int(0.1 * len(dataset))
if not isinstance(valid_size, int) or valid_size < 1 or valid_size > len(dataset):
raise TypeError(error_msg)
# define transform
default_transform = lambda item: item # identity maping
train_transform = train_transform or default_transform
valid_transform = valid_transform or default_transform
# generate train/val datasets
partitions = {'Train': len(dataset)-valid_size, 'Valid':valid_size}
train_dataset = TransformDataset(
SplitDataset(dataset, partitions, initial_partition='Train'),
train_transform
)
valid_dataset = TransformDataset(
SplitDataset(dataset, partitions, initial_partition='Valid'),
valid_transform
)
train_loader = DataLoader(train_dataset,
batch_size=batch_size[0], shuffle=True,
num_workers=num_workers, pin_memory=pin_memory)
valid_loader = DataLoader(valid_dataset,
batch_size=batch_size[1], shuffle=False,
num_workers=num_workers, pin_memory=pin_memory)
return (train_loader, valid_loader)
def get_train_valid_dataset(dataset, valid_size=None):
error_msg = "[!] valid_size should be an integer in the range [1, %d]." %(len(dataset))
if not valid_size:
valid_size = int(0.1 * len(dataset))
if not isinstance(valid_size, int) or valid_size < 1 or valid_size > len(dataset):
raise TypeError(error_msg)
# generate train/val datasets
partitions = {'Train': len(dataset)-valid_size, 'Valid':valid_size}
train_dataset = SplitDataset(dataset, partitions, initial_partition='Train')
valid_dataset = SplitDataset(dataset, partitions, initial_partition='Valid')
return (train_dataset, valid_dataset)
class ImageTransformDataset(Dataset):
def __init__(self, dataset, transform, target_transform=None):
super(ImageTransformDataset, self).__init__()
self.dataset = dataset
self.transform = transform
self.target_transform = target_transform
self.length = len(self.dataset)
def __len__(self):
return self.length
def __getitem__(self, idx):
img = self.dataset[idx]
target = img.copy()
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
#sigma = torch.FloatTensor([50/255.0]).unsqueeze(1)
return img, target#,sigma
class MetaRandomDataset(Dataset):
def __init__(self, data, n_way, k_shot, k_query, transform, target_transform=None, min_sigma=10, max_sigma=70):
self.data = data
self.n_way = n_way # n-way
self.k_shot = k_shot # k-shot
self.k_query = k_query # for evaluation
self.setsz = self.n_way * self.k_shot # num of samples per set
self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
self.transform = transform
self.target_transform = target_transform
self.min_sigma = min_sigma
self.max_sigma = max_sigma
def __getitem__(self, index):
support_x = []
support_y = []
query_x = []
query_y = []
# sigma = 0.1*np.random.rand()
sigma = np.random.uniform(self.min_sigma, self.max_sigma)
noisemaker = AddNoise(sigma)
img = self.data[index]
target = img.copy()
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
img = img.unsqueeze(dim=0)
GT = target.unsqueeze(dim=0)
for j in range(self.k_shot):
noisy_img = noisemaker(img)
support_x.append(noisy_img)
support_y.append(GT)
for j in range(self.k_query):
noisy_img = noisemaker(img)
query_x.append(noisy_img)
query_y.append(GT)
support_x = torch.cat(support_x, dim=0).float()
support_y = torch.cat(support_y, dim=0).float()
query_x = torch.cat(query_x, dim=0).float()
query_y = torch.cat(query_y, dim=0).float()
return [support_x, support_y, query_x, query_y, sigma/255]
def __len__(self):
return len(self.data)
def addNoise2ICVL():
#srcdir = '/data/HSI_Data/icvl_val_gaussian/gt/'
srcdir = '/media/lmy/LMY/cvpr2023/test_96_icvl/'
# noise_sig = [10,30,50,70]
# noisemodel = AddNoiseNoniid(noise_sig)
# dstdir ='/media/lmy/LMY/cvpr2023/test_noise_96_icvl/'+'512_mix'
# mkdir(dstdir)
c=0
#noisemodel = AddNoiseBlindv2(10,70)
# for filename in os.listdir(srcdir):
# c = c + 1
# print(c)
# filepath = os.path.join(srcdir, filename)
# mat = loadmat(filepath)
# srchsi = mat['data'].transpose(2,0,1)
# # inpaintinghsi, mask = inpaintingmodel(srchsi)
# noisyhsi = noisemodel(srchsi)
# # noisyhsi = stripemodel(noisyhsi)
# #noisyhsi = add_noniid_noise(srchsi)
# n_sigma = 0#/255
# savemat(os.path.join(dstdir, filename), {'gt': srchsi.transpose(
# 1, 2, 0),'sigma':n_sigma, 'input': noisyhsi.transpose(1, 2, 0)})
# stripemodel = AddNoiseImpulse()
# add_noniid_noise = Compose([
# AddNoiseNoniid(sigmas),
# AddNoiseComplex(),
# ])
#add_noniid_noise = AddNoiseNoniid(sigmas)
#srcimg = '/home/rainy/QRNN3D/data/toy.mat'
s_sigma = [10,30,50,70]
#s_sigma = [0]
for sigma in s_sigma:
#dstdir = '/data/HSI_Data/icvl_noise_50/512_mix'+'/'
dstdir = '/media/lmy/LMY/cvpr2023/test_noise_96_icvl/'+'512_'+str(sigma)
mkdir(dstdir)
noisemodel = AddNoise(sigma)
c = 0
#inpaintingmodel = AddInpaintingHole(0.05, 0.15,1/3)
for filename in os.listdir(srcdir):
c = c + 1
print(c)
filepath = os.path.join(srcdir, filename)
mat = loadmat(filepath)
srchsi = mat['data'].transpose(2,0,1)
# inpaintinghsi, mask = inpaintingmodel(srchsi)
noisyhsi = noisemodel(srchsi)
# noisyhsi = stripemodel(noisyhsi)
#noisyhsi = add_noniid_noise(srchsi)
n_sigma = sigma/255
savemat(os.path.join(dstdir, filename), {'gt': srchsi.transpose(
1, 2, 0),'sigma':n_sigma, 'input': noisyhsi.transpose(1, 2, 0)})
if __name__ == '__main__':
addNoise2ICVL()
| 21,829 | 33.928 | 140 | py |
SERT | SERT-master/utility/indexes.py | import numpy as np
import torch
from skimage.measure import compare_ssim, compare_psnr
from functools import partial
class Bandwise(object):
def __init__(self, index_fn):
self.index_fn = index_fn
def __call__(self, X, Y):
C = X.shape[-3]
bwindex = []
for ch in range(C):
x = torch.squeeze(X[...,ch,:,:].data).cpu().numpy()
y = torch.squeeze(Y[...,ch,:,:].data).cpu().numpy()
index = self.index_fn(x, y)
bwindex.append(index)
return bwindex
cal_bwssim = Bandwise(compare_ssim)
cal_bwpsnr = Bandwise(partial(compare_psnr, data_range=1))
def cal_sam(X, Y, eps=1e-8):
X = torch.squeeze(X.data).cpu().numpy()
Y = torch.squeeze(Y.data).cpu().numpy()
tmp = (np.sum(X*Y, axis=0) + eps) / (np.sqrt(np.sum(X**2, axis=0)) + eps) / (np.sqrt(np.sum(Y**2, axis=0)) + eps)
return np.mean(np.real(np.arccos(tmp)))
def MSIQA(X, Y):
psnr = np.mean(cal_bwpsnr(X, Y))
ssim = np.mean(cal_bwssim(X, Y))
sam = cal_sam(X, Y)
return psnr, ssim, sam
| 1,066 | 27.078947 | 121 | py |
SERT | SERT-master/utility/mat_data.py | """generate testing mat dataset"""
import os
import numpy as np
import h5py
from os.path import join, exists
from scipy.io import loadmat, savemat
from util import crop_center, Visualize3D, minmax_normalize
from PIL import Image
def create_mat_dataset(datadir, fnames, newdir, matkey, func=None, load=h5py.File):
if not exists(newdir):
os.mkdir(newdir)
for i, fn in enumerate(fnames):
print('generate data(%d/%d)' %(i+1, len(fnames)))
filepath = join(datadir, fn)
try:
mat = load(filepath)
data = func(mat[matkey][...])
data_hwc = data.transpose((2,1,0))
savemat(join(newdir, fn), {'data': data_hwc})
try:
Image.fromarray(np.array(data_hwc*255,np.uint8)[:,:,20]).save('/data/HSI_Data/icvl_test_512_png/{}.png'.format(os.path.splitext(fn)[0]))
except Exception as e:
print(e)
except:
print('open error for {}'.format(fn))
continue
def create_icvl_sr():
basedir = '/data/HSI_Data/'
datadir = join(basedir, '/data/HSI_Data/icvl201/')
newdir = join(basedir, '/media/lmy/LMY/cvpr2023/test_96/')
#fnames = os.listdir(datadir)
f = open("test_list.txt")
fnames = f.readlines()
for i in range(len(fnames)):
fnames[i] = fnames[i].split('\n')[0]
f.close()
print(fnames)
def func(data):
data = np.rot90(data, k=-1, axes=(1,2))
data = crop_center(data, 512, 512)
data = minmax_normalize(data)
return data
create_mat_dataset(datadir, fnames, newdir, 'rad', func=func)
def generate_icvl_png():
basedir = '/data/HSI_Data/laisdata'
#/data/HSI_Data/laisdata/sig10_size256
datadir = join(basedir, 'sig10_size256')
newdir = join(basedir, 'size256_png')
fnames = os.listdir(datadir)
# def func(data):
# data = np.rot90(data, k=-1, axes=(1,2))
# data = minmax_normalize(data)
# return data
if not exists(newdir):
os.mkdir(newdir)
for i, fn in enumerate(fnames):
print('generate data(%d/%d)' %(i+1, len(fnames)))
filepath = join(datadir, fn)
try:
#mat = h5py.File(filepath)
mat = loadmat(filepath)
#data = func(mat['rad'][...])
data = mat['gt']
# data = mat['data']
#data_hwc = data.transpose((2,1,0))
data_hwc = data
Image.fromarray(np.array(data_hwc*255,np.uint8)[:,:,20]).save(os.path.join(newdir, '{}.png'.format(os.path.splitext(fn)[0])))
except:
print('open error for {}'.format(fn))
continue
def copydata():
basedir = '/data/HSI_Data/laisdata'
datadir = join(basedir, 'sig10')
newdir = join(basedir, 'gt')
fnames = os.listdir(datadir)
for i, fn in enumerate(fnames):
print('generate data(%d/%d)' %(i+1, len(fnames)))
filepath = join(datadir, fn)
mat = loadmat(filepath)
data = mat['gt']
savemat(join(newdir, fn), {'data': data})
if __name__ == '__main__':
create_icvl_sr()
#generate_icvl_png()
#copydata()
pass
| 3,241 | 28.743119 | 152 | py |
SERT | SERT-master/utility/util.py | import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import cv2
import h5py
import os
import random
import threading
from itertools import product
from scipy.io import loadmat, savemat
from functools import partial
from scipy.ndimage import zoom
from matplotlib.widgets import Slider
from PIL import Image
def Data2Volume(data, ksizes, strides):
"""
Construct Volumes from Original High Dimensional (D) Data
"""
dshape = data.shape
PatNum = lambda l, k, s: (np.floor( (l - k) / s ) + 1)
TotalPatNum = 1
for i in range(len(ksizes)):
TotalPatNum = TotalPatNum * PatNum(dshape[i], ksizes[i], strides[i])
V = np.zeros([int(TotalPatNum)]+ksizes); # create D+1 dimension volume
args = [range(kz) for kz in ksizes]
for s in product(*args):
s1 = (slice(None),) + s
s2 = tuple([slice(key, -ksizes[i]+key+1 or None, strides[i]) for i, key in enumerate(s)])
V[s1] = np.reshape(data[s2],-1)
return V
def crop_center(img,cropx,cropy):
_,y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[:, starty:starty+cropy,startx:startx+cropx]
def rand_crop(img, cropx, cropy):
_,y,x = img.shape
x1 = random.randint(0, x - cropx)
y1 = random.randint(0, y - cropy)
return img[:, y1:y1+cropy, x1:x1+cropx]
def sequetial_process(*fns):
"""
Integerate all process functions
"""
def processor(data):
for f in fns:
data = f(data)
return data
return processor
def minmax_normalize(array):
amin = np.min(array)
amax = np.max(array)
return (array - amin) / (amax - amin)
def minmax_normalize_tensor(array):
amin = array.max()
amax = array.min()
return (array - amin) / (amax - amin)
def frame_diff(frames):
diff_frames = frames[1:, ...] - frames[:-1, ...]
return diff_frames
def visualize(filename, matkey, load=loadmat, preprocess=None):
"""
Visualize a preprecessed hyperspectral image
"""
if not preprocess:
preprocess = lambda identity: identity
mat = load(filename)
data = preprocess(mat[matkey])
print(data.shape)
print(np.max(data), np.min(data))
data = np.squeeze(data[:,:,:])
Visualize3D(data)
# Visualize3D(np.squeeze(data[:,0,:,:]))
def Visualize3D(data, meta=None):
data = np.squeeze(data)
for ch in range(data.shape[0]):
data[ch, ...] = minmax_normalize(data[ch, ...])
print(np.max(data), np.min(data))
ax = plt.subplot(111)
plt.subplots_adjust(left=0.25, bottom=0.25)
frame = 0
# l = plt.imshow(data[frame,:,:])
l = plt.imshow(data[frame,:,:], cmap='gray') #shows 256x256 image, i.e. 0th frame
# plt.colorbar()
axcolor = 'lightgoldenrodyellow'
axframe = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
sframe = Slider(axframe, 'Frame', 0, data.shape[0]-1, valinit=0)
def update(val):
frame = int(np.around(sframe.val))
l.set_data(data[frame,:,:])
if meta is not None:
axframe.set_title(meta[frame])
sframe.on_changed(update)
plt.show()
def data_augmentation(image, mode=None):
"""
Args:
image: np.ndarray, shape: C X H X W
"""
axes = (-2, -1)
flipud = lambda x: x[:, ::-1, :]
if mode is None:
mode = random.randint(0, 7)
if mode == 0:
# original
image = image
elif mode == 1:
# flip up and down
image = flipud(image)
elif mode == 2:
# rotate counterwise 90 degree
image = np.rot90(image, axes=axes)
elif mode == 3:
# rotate 90 degree and flip up and down
image = np.rot90(image, axes=axes)
image = flipud(image)
elif mode == 4:
# rotate 180 degree
image = np.rot90(image, k=2, axes=axes)
elif mode == 5:
# rotate 180 degree and flip
image = np.rot90(image, k=2, axes=axes)
image = flipud(image)
elif mode == 6:
# rotate 270 degree
image = np.rot90(image, k=3, axes=axes)
elif mode == 7:
# rotate 270 degree and flip
image = np.rot90(image, k=3, axes=axes)
image = flipud(image)
# we apply spectrum reversal for training 3D CNN, e.g. QRNN3D.
# disable it when training 2D CNN, e.g. MemNet
if random.random() < 0.5:
image = image[::-1, :, :]
return np.ascontiguousarray(image)
class LockedIterator(object):
def __init__(self, it):
self.lock = threading.Lock()
self.it = it.__iter__()
def __iter__(self): return self
def __next__(self):
self.lock.acquire()
try:
return next(self.it)
finally:
self.lock.release()
if __name__ == '__main__':
"""Code Usage Example"""
"""ICVL"""
# hsi_rot = partial(np.rot90, k=-1, axes=(1,2))
# crop = lambda img: img[:,-1024:, -1024:]
# zoom_512 = partial(zoom, zoom=[1, 0.5, 0.5])
# d2v = partial(Data2Volume, ksizes=[31,64,64], strides=[1,28,28])
# preprocess = sequetial_process(hsi_rot, crop, minmax_normalize, d2v)
# preprocess = sequetial_process(hsi_rot, crop, minmax_normalize)
# datadir = 'Data/ICVL/Training/'
# fns = os.listdir(datadir)
# mat = h5py.File(os.path.join(datadir, fns[1]))
# data = preprocess(mat['rad'])
# data = np.linalg.norm(data, ord=2, axis=(1,2))
"""Common"""
# print(data)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.plot(data)
# plt.show()
# preprocess = sequetial_process(hsi_rot, crop, minmax_normalize, frame_diff)
# visualize(os.path.join(datadir, fns[0]), 'rad', load=h5py.File, preprocess=preprocess)
# visualize('Data/BSD/TrainingPatches/imdb_40_128.mat', 'inputs', load=h5py.File, preprocess=None)
# preprocess = lambda x: np.transpose(x[4][0],(2,0,1))
# preprocess = lambda x: minmax_normalize(np.transpose(np.array(x,dtype=np.float),(2,0,1)))
# visualize('/media/kaixuan/DATA/Papers/Code/Data/PIRM18/sample/true_hr', 'hsi', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Data/PIRM18/sample/img_1', 'true_hr', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Matlab/ITSReg/code of ITSReg MSI denoising/data/real/new/Indian/Indian_pines.mat', 'hsi', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Matlab/ECCV2018/Result/Indian/Indian_pines/QRNN3D-f.mat', 'R_hsi', load=loadmat, preprocess=preprocess)
# visualize('/media/kaixuan/DATA/Papers/Code/Matlab/ECCV2018/Data/Pavia/PaviaU', 'input', load=loadmat, preprocess=preprocess)
pass | 6,743 | 28.709251 | 175 | py |
SERT | SERT-master/utility/__init__.py | from .dataset import *
from .util import *
from .helper import *
from .lmdb_dataset import LMDBDataset
from .indexes import *
from .load_tif import * | 149 | 24 | 37 | py |
SERT | SERT-master/models/__init__.py | from .sert import SERT
from .competing_methods import *
def sert_base():
net = SERT(inp_channels=31,dim = 96, window_sizes=[16,32,32] , depths=[ 6,6,6], num_heads=[ 6,6,6],split_sizes=[1,2,4],mlp_ratio=2,weight_factor=0.1,memory_blocks=128,down_rank=8) #16,32,32
net.use_2dconv = True
net.bandwise = False
return net
def sert_tiny():
net = SERT(inp_channels=31,dim = 96, window_sizes=[16,32] , depths=[ 4,4], num_heads=[ 6,6],split_sizes=[2,4],mlp_ratio=2,weight_factor=0.1,memory_blocks=128,down_rank=8) #16,32,32
net.use_2dconv = True
net.bandwise = False
return net
def sert_small():
net = SERT(inp_channels=31,dim = 96, window_sizes=[16,32,32] , depths=[ 4,4,4], num_heads=[ 6,6,6],split_sizes=[1,2,4],mlp_ratio=2,weight_factor=0.1,memory_blocks=128,down_rank=8) #16,32,32
net.use_2dconv = True
net.bandwise = False
return net
def sert_urban():
net = SERT(inp_channels=210,dim = 96*2, window_sizes=[8,16,16] , depths=[ 6,6,6], num_heads=[ 6,6,6],split_sizes=[2,4,4],mlp_ratio=2,down_rank=8,memory_blocks=128)
net.use_2dconv = True
net.bandwise = False
return net
def sert_real():
net = SERT(inp_channels=34,dim = 96, window_sizes=[16,32,32] , depths=[6,6,6],down_rank=8, num_heads=[ 6,6,6],split_sizes=[1,2,4],mlp_ratio=2,memory_blocks=64)
net.use_2dconv = True
net.bandwise = False
return net
def qrnn3d():
net = QRNNREDC3D(1, 16, 5, [1, 3], has_ad=True)
net.use_2dconv = False
net.bandwise = False
return net
def grn_net():
net = U_Net_GR(in_ch=31,out_ch=31)
net.use_2dconv = True
net.bandwise = False
return net
def grn_net_real():
net = U_Net_GR(in_ch=34,out_ch=34)
net.use_2dconv = True
net.bandwise = False
return net
def grn_net_urban():
net = U_Net_GR(in_ch=210,out_ch=210)
net.use_2dconv = True
net.bandwise = False
return net
def t3sc():
from omegaconf import OmegaConf
cfg = OmegaConf.load('models/competing_methods/T3SC/layers/t3sc.yaml')
net = MultilayerModel(**cfg.params)
net.use_2dconv = True
net.bandwise = False
return net
def t3sc_real():
from omegaconf import OmegaConf
cfg = OmegaConf.load('models/competing_methods/T3SC/layers/t3sc_real.yaml')
net = MultilayerModel(**cfg.params)
net.use_2dconv = True
net.bandwise = False
return net
def t3sc_urban():
from omegaconf import OmegaConf
cfg = OmegaConf.load('models/competing_methods/T3SC/layers/t3sc_urban.yaml')
net = MultilayerModel(**cfg.params)
net.use_2dconv = True
net.bandwise = False
return net
def macnet():
net = MACNet(in_channels=1,channels=16,num_half_layer=5)
net.use_2dconv = True
net.bandwise = False
return net
def sst():
net = SST(inp_channels=31,dim = 90,
window_size=8,
depths=[ 6,6,6,6,6,6],
num_heads=[ 6,6,6,6,6,6],mlp_ratio=2)
net.use_2dconv = True
net.bandwise = False
return net
def sst_real():
net = SST(inp_channels=34,depths=[6,6,6])
net.use_2dconv = True
net.bandwise = False
return net
def sst_urban():
net = SST(inp_channels=210,dim = 210,
window_size=8,
depths=[ 6,6,6,6,6,6],
num_heads=[ 6,6,6,6,6,6],mlp_ratio=2)
net.use_2dconv = True
net.bandwise = False
return net | 3,539 | 29.782609 | 220 | py |
SERT | SERT-master/models/sert.py |
from tkinter import W
from turtle import forward
import torch
import torch.nn as nn
import torch.nn.functional as F
from pdb import set_trace as stx
import numbers
from einops import rearrange
import numpy as np
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def img2windows(img, H_sp, W_sp):
"""
img: B C H W
"""
B, C, H, W = img.shape
img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
img_perm = img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp* W_sp, C)
return img_perm
def windows2img(img_splits_hw, H_sp, W_sp, H, W):
"""
img_splits_hw: B' H W C
"""
B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp))
img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1)
img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return img
class LePEAttention(nn.Module):
def __init__(self, dim, resolution, idx, split_size=7, dim_out=None, num_heads=8, attn_drop=0., qk_scale=None):
super().__init__()
self.dim = dim
self.dim_out = dim_out or dim
self.resolution = resolution
self.split_size = split_size
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
if idx == 0:
H_sp, W_sp = self.resolution, self.split_size
elif idx == 1:
W_sp, H_sp = self.resolution, self.split_size
else:
print ("ERROR MODE", idx)
exit(0)
self.H_sp = H_sp
self.W_sp = W_sp
self.get_v = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1,groups=dim)
self.attn_drop = nn.Dropout(attn_drop)
def im2cswin(self, x):
B, N, C = x.shape
H = W = int(np.sqrt(N))
x = x.transpose(-2,-1).contiguous().view(B, C, H, W)
x = img2windows(x, self.H_sp, self.W_sp)
x = x.reshape(-1, self.H_sp* self.W_sp, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3).contiguous()
return x
def get_lepe(self, x, func):
B, N, C = x.shape
H = W = int(np.sqrt(N))
x = x.transpose(-2,-1).contiguous().view(B, C, H, W)
H_sp, W_sp = self.H_sp, self.W_sp
x = x.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
x = x.permute(0, 2, 4, 1, 3, 5).contiguous().reshape(-1, C, H_sp, W_sp) ### B', C, H', W'
lepe = func(x) ### B', C, H', W'
lepe = lepe.reshape(-1, self.num_heads, C // self.num_heads, H_sp * W_sp).permute(0, 1, 3, 2).contiguous()
x = x.reshape(-1, self.num_heads, C // self.num_heads, self.H_sp* self.W_sp).permute(0, 1, 3, 2).contiguous()
return x, lepe
def forward(self, qkv,mask=None):
"""
x: B L C
"""
q,k,v = qkv[0], qkv[1], qkv[2]
### Img2Window
H = W = self.resolution
B, L, C = q.shape
# assert L == H * W, "flatten img_tokens has wrong size"
q = self.im2cswin(q)
k = self.im2cswin(k)
v, lepe = self.get_lepe(v, self.get_v)
q = q * self.scale
#print(q.shape,k.shape)
attn = (q @ k.transpose(-2, -1)) # B head N C @ B head C N --> B head N N
attn = nn.functional.softmax(attn, dim=-1, dtype=attn.dtype)
attn = self.attn_drop(attn)
x = (attn @ v) + lepe
x = x.transpose(1, 2).reshape(-1, self.H_sp* self.W_sp, C) # B head N N @ B head N C
### Window2Img
x = windows2img(x, self.H_sp, self.W_sp, H, W).view(B, -1, C) # B H' W' C
return x
def flops(self,shape):
flops = 0
H, W = shape
#q, k, v = (B* H//H_sp * W//W_sp) heads H_sp*W_sp C//heads
flops += ( (H//self.H_sp) * (W//self.W_sp)) *self.num_heads* (self.H_sp*self.W_sp)*(self.dim//self.num_heads)*(self.H_sp*self.W_sp)
flops += ( (H//self.H_sp) * (W//self.W_sp)) *self.num_heads* (self.H_sp*self.W_sp)*(self.dim//self.num_heads)*(self.H_sp*self.W_sp)
return flops
class ChannelAttention(nn.Module):
"""Channel attention used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
"""
def __init__(self, num_feat, squeeze_factor=16,memory_blocks=128):
super(ChannelAttention, self).__init__()
self.pool = nn.AdaptiveAvgPool1d(1)
self.subnet = nn.Sequential(
nn.Linear(num_feat, num_feat // squeeze_factor),
#nn.ReLU(inplace=True)
)
self.upnet= nn.Sequential(
nn.Linear(num_feat // squeeze_factor, num_feat),
#nn.Linear(num_feat, num_feat),
nn.Sigmoid())
self.mb = torch.nn.Parameter(torch.randn(num_feat // squeeze_factor, memory_blocks))
self.low_dim = num_feat // squeeze_factor
def forward(self, x):
b,n,c = x.shape
t = x.transpose(1,2)
y = self.pool(t).squeeze(-1)
low_rank_f = self.subnet(y).unsqueeze(2)
mbg = self.mb.unsqueeze(0).repeat(b, 1, 1)
f1 = (low_rank_f.transpose(1,2) ) @mbg
f_dic_c = F.softmax(f1 * (int(self.low_dim) ** (-0.5)), dim=-1) # get the similarity information
y1 = [email protected](1,2)
y2 = self.upnet(y1)
out = x*y2
return out
class CAB(nn.Module):
def __init__(self, num_feat, compress_ratio=3, squeeze_factor=30,memory_blocks=128):
super(CAB, self).__init__()
self.num_feat = num_feat
self.cab = nn.Sequential(
nn.Linear(num_feat,num_feat // compress_ratio),
nn.GELU(),
nn.Linear(num_feat // compress_ratio, num_feat), ChannelAttention(num_feat, squeeze_factor, memory_blocks) )
def forward(self, x):
return self.cab(x)
def flops(self,shape):
flops = 0
H,W = shape
flops += self.num_feat*H*W
return flops
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=0, qk_scale=None, memory_blocks=128,down_rank=16,weight_factor=0.1,attn_drop=0., proj_drop=0.,split_size=1):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.weight_factor = weight_factor
self.attns = nn.ModuleList([
LePEAttention(
dim//2, resolution=self.window_size[0], idx = i,
split_size=split_size, num_heads=num_heads//2, dim_out=dim//2,
qk_scale=qk_scale, attn_drop=attn_drop)
for i in range(2)])
self.c_attns = CAB(dim,compress_ratio=4,squeeze_factor=down_rank,memory_blocks=memory_blocks) #
#self.c_attns_15 = CAB(dim,compress_ratio=4,squeeze_factor=15)
#self.c_attns = Subspace(dim)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, -1, 3, C).permute(2, 0, 1, 3)
x1 = self.attns[0](qkv[:,:,:,:C//2],mask)
x2 = self.attns[1](qkv[:,:,:,C//2:],mask)
attened_x = torch.cat([x1,x2], dim=2)
attened_x = rearrange(attened_x, 'b n (g d) -> b n ( d g)', g=4)
x3 = self.c_attns(x)
attn = attened_x + self.weight_factor*x3
x = self.proj(attn)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, shape):
# calculate flops for 1 window with token length of N
flops = 0
H,W = shape
# qkv = self.qkv(x)
flops += 2*self.attns[0].flops([H,W])
flops += self.c_attns.flops([H,W])
return flops
class SSMTDA(nn.Module):
r""" Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,split_size=1,drop_path=0.0,weight_factor=0.1,memory_blocks=128,down_rank=16,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., act_layer=nn.GELU):
super(SSMTDA,self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.weight_factor=weight_factor
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.attns = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,memory_blocks=memory_blocks,down_rank=down_rank,weight_factor=weight_factor,split_size=split_size,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.num_heads = num_heads
def forward(self, x):
B,C,H,W = x.shape
x = x.flatten(2).transpose(1, 2)
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
attn_windows = self.attns(x_windows)
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self,shape):
flops = 0
H,W = shape
nW = H * W / self.window_size / self.window_size
flops += nW *self.attns.flops([self.window_size,self.window_size])
return flops
class SMSBlock(nn.Module):
def __init__(self,
dim = 90,
window_size=8,
depth=6,
num_head=6,
mlp_ratio=2,
qkv_bias=True, qk_scale=None,
weight_factor=0.1,memory_blocks=128,down_rank=16,
drop_path=0.0,
split_size=1,
):
super(SMSBlock,self).__init__()
self.smsblock = nn.Sequential(*[SSMTDA(dim=dim,input_resolution=window_size, num_heads=num_head, memory_blocks=memory_blocks,window_size=window_size,shift_size=0 if i%2==0 else window_size//2,
weight_factor=weight_factor,down_rank=down_rank,
split_size = split_size,
mlp_ratio=mlp_ratio,
drop_path = drop_path[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,)
for i in range(depth)])
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
def forward(self,x):
out = self.smsblock(x)
out = self.conv(out)+x
return out
def flops(self,shape):
flops = 0
for blk in self.smsblock:
flops += blk.flops(shape)
return flops
class SERT(nn.Module):
def __init__(self,
inp_channels=31,
dim = 90,
window_sizes=[8,8,8,8,8,8],
depths=[ 6,6,6,6,6,6],
num_heads=[ 6,6,6,6,6,6],
split_sizes=[1,1,1,1,1,1],
mlp_ratio=2,down_rank=16,memory_blocks = 256,
qkv_bias=True, qk_scale=None,
bias=False,
drop_path_rate=0.1,
weight_factor = 0.1,
):
super(SERT, self).__init__()
self.conv_first = nn.Conv2d(inp_channels, dim, 3, 1, 1)
self.num_layers = depths
self.layers = nn.ModuleList()
print(len(self.num_layers))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
for i_layer in range(len(self.num_layers)):
layer = SMSBlock(dim = dim,
window_size=window_sizes[i_layer],
depth=depths[i_layer],
num_head=num_heads[i_layer],
weight_factor = weight_factor,down_rank=down_rank,memory_blocks=memory_blocks,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
split_size=split_sizes[i_layer],
drop_path =dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])]
)
self.layers.append(layer)
self.output = nn.Conv2d(int(dim), dim, kernel_size=3, stride=1, padding=1, bias=bias)
self.conv_delasta = nn.Conv2d(dim,inp_channels, 3, 1, 1)
def forward(self, inp_img):
_,_,h_inp,w_inp = inp_img.shape
hb, wb = 16, 16
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
inp_img = F.pad(inp_img, (0, pad_h, 0, pad_w), 'reflect')
f1 = self.conv_first(inp_img)
x=f1
for layer in self.layers:
x = layer(x)
x = self.output(x+f1) #+ inp_img
x = self.conv_delasta(x)+inp_img
x = x[:,:,:h_inp,:w_inp]
return x
def flops(self,shape):
flops = 0
for i, layer in enumerate(self.layers):
flops += layer.flops(shape)
return flops
| 17,738 | 34.620482 | 200 | py |
SERT | SERT-master/models/competing_methods/SST.py |
from turtle import forward
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GSAttention(nn.Module):
"""global spectral attention (GSA)
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads
bias (bool): If True, add a learnable bias to projection
"""
def __init__(self, dim, num_heads, bias):
super(GSAttention, self).__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Conv2d(dim, dim*3, kernel_size=1, bias=bias)
self.project_out = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
def forward(self, x):
b,c,h,w = x.shape
qkv = self.qkv(x)
q,k,v = qkv.chunk(3, dim=1)
q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
out = (attn @ v)
out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)
out = self.project_out(out)
return out
def flops(self,patchresolution):
flops = 0
H, W,C = patchresolution
flops += H* C *W* C
flops += C *C*H*W
return flops
class NLSA(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super(NLSA,self).__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
#define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SSMA(nn.Module):
r""" Transformer Block:Spatial-Spectral Multi-head self-Attention (SSMA)
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,drop_path=0.0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,act_layer=nn.GELU,bias=False):
super(SSMA,self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.attn = NLSA(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
self.num_heads = num_heads
self.spectral_attn = GSAttention(dim, num_heads, bias)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x):
B,C,H,W = x.shape
x = x.flatten(2).transpose(1, 2)
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
if self.input_resolution == [H,W]: #non-local speatial attention
attn_windows = self.attn(x_windows, mask=self.attn_mask)
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask([H,W]).to(x.device))
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
x = x.transpose(1, 2).view(B, C, H, W)
x = self.spectral_attn(x) #global spectral attention
x = x.flatten(2).transpose(1, 2)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
class SMSBlock(nn.Module):
"""
residual spatial-spectral block (RSSB).
Args:
dim (int, optional): Embedding dim of features. Defaults to 90.
window_size (int, optional): window size of non-local spatial attention. Defaults to 8.
depth (int, optional): numbers of Transformer block at this layer. Defaults to 6.
num_head (int, optional):Number of attention heads. Defaults to 6.
mlp_ratio (int, optional): Ratio of mlp dim. Defaults to 2.
qkv_bias (bool, optional): Learnable bias to query, key, value. Defaults to True.
qk_scale (_type_, optional): The qk scale in non-local spatial attention. Defaults to None.
drop_path (float, optional): drop_rate. Defaults to 0.0.
bias (bool, optional): Defaults to False.
"""
def __init__(self,
dim = 90,
window_size=8,
depth=6,
num_head=6,
mlp_ratio=2,
qkv_bias=True, qk_scale=None,
drop_path=0.0,
bias = False):
super(SMSBlock,self).__init__()
self.smsblock = nn.Sequential(*[SSMA(dim=dim,input_resolution=[64,64], num_heads=num_head, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
drop_path = drop_path[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,bias=bias )
for i in range(depth)])
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
def forward(self,x):
out = self.smsblock(x)
out = self.conv(out)+x
return out
class SST(nn.Module):
"""SST
Spatial-Spectral Transformer for Hyperspectral Image Denoising
Args:
inp_channels (int, optional): Input channels of HSI. Defaults to 31.
dim (int, optional): Embedding dimension. Defaults to 90.
window_size (int, optional): Window size of non-local spatial attention. Defaults to 8.
depths (list, optional): Number of Transformer block at different layers of network. Defaults to [ 6,6,6,6,6,6].
num_heads (list, optional): Number of attention heads in different layers. Defaults to [ 6,6,6,6,6,6].
mlp_ratio (int, optional): Ratio of mlp dim. Defaults to 2.
qkv_bias (bool, optional): Learnable bias to query, key, value. Defaults to True.
qk_scale (_type_, optional): The qk scale in non-local spatial attention. Defaults to None. If it is set to None, the embedding dimension is used to calculate the qk scale.
bias (bool, optional): Defaults to False.
drop_path_rate (float, optional): Stochastic depth rate of drop rate. Defaults to 0.1.
"""
def __init__(self,
inp_channels=31,
dim = 90,
window_size=8,
depths=[ 6,6,6,6,6,6],
num_heads=[ 6,6,6,6,6,6],
mlp_ratio=2,
qkv_bias=True, qk_scale=None,
bias = False,
drop_path_rate=0.1
):
super(SST, self).__init__()
self.conv_first = nn.Conv2d(inp_channels, dim, 3, 1, 1) #shallow featrure extraction
self.num_layers = depths
self.layers = nn.ModuleList()
print(len(self.num_layers))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
for i_layer in range(len(self.num_layers)):
layer = SMSBlock(dim = dim,
window_size=window_size,
depth=depths[i_layer],
num_head=num_heads[i_layer],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_path =dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
bias = bias)
self.layers.append(layer)
self.output = nn.Conv2d(int(dim), dim, kernel_size=3, stride=1, padding=1, bias=bias)
self.conv_delasta = nn.Conv2d(dim,inp_channels, 3, 1, 1) #reconstruction from features
def forward(self, inp_img):
f1 = self.conv_first(inp_img)
x=f1
for layer in self.layers:
x = layer(x)
x = self.output(x+f1)
x = self.conv_delasta(x)+inp_img
return x
| 16,392 | 39.376847 | 184 | py |
SERT | SERT-master/models/competing_methods/__init__.py | from .GRNet import U_Net_GR
from .qrnn import QRNNREDC3D
from .T3SC.multilayer import MultilayerModel
from .macnet import MACNet
from .SST import SST | 149 | 29 | 44 | py |
SERT | SERT-master/models/competing_methods/GRNet.py | from re import S
from turtle import forward
from matplotlib.pyplot import sca
from numpy import True_, pad
import torch
import torch.nn as nn
import torch.nn.functional as F
class conv_relu(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=1, padding_mode='zeros', bias=True):
super(conv_relu, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, padding_mode=padding_mode)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.conv(x))
class GSM(nn.Module):
def __init__(self, in_ch):
super(GSM, self).__init__()
self.channel = in_ch
self.conv1 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv3 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv4 = nn.Conv2d(self.channel//2, self.channel, kernel_size=1, stride=1, padding=0)
def forward(self, x):
theta = self.conv1(x)
theta = torch.reshape(theta, (-1, theta.shape[1], theta.shape[2]*theta.shape[3]))
phi = self.conv2(x)
phi = torch.reshape(phi, (-1, phi.shape[1], phi.shape[2]*phi.shape[3]))
g = self.conv3(x)
g = torch.reshape(g, (-1, g.shape[1], g.shape[2]*g.shape[3]))
phi1 = torch.reshape(phi, (-1, phi.shape[1]*phi.shape[2]))
phi1 = F.softmax(phi1, dim=-1)
phi1 = torch.reshape(phi1, phi.shape)
g1 = torch.reshape(g, (-1, g.shape[1]*g.shape[2]))
g1 = F.softmax(g1, dim=-1)
g1 = torch.reshape(g1, g.shape)
phi1 = phi1.transpose(1,2)
y = torch.bmm(theta, phi1)
# print(theta.shape[1]*phi1.shape[1]*phi1.shape[2])
y = torch.bmm(y, g1)
#print(y.shape[1]*g1.shape[1]*g1.shape[2])
# y = torch.bmm(phi1, g1)
# y = torch.bmm(theta, y)
# y = torch.matmul(theta, y)
F_s = torch.reshape(y, (-1, self.channel//2, x.shape[2], x.shape[3]))
res_F = self.conv4(F_s)
return res_F+x
class GCM(nn.Module):
def __init__(self, in_ch):
super(GCM ,self).__init__()
self.channel = in_ch
self.conv1 = nn.Conv2d(self.channel, self.channel//4, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(self.channel, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv3 = nn.Conv2d(self.channel//4, self.channel//4, kernel_size=1, stride=1, padding=0)
self.conv4 = nn.Conv2d(self.channel//2, self.channel//2, kernel_size=1, stride=1, padding=0)
self.conv5 = nn.Conv2d(self.channel//2, self.channel, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU(inplace=True)
# self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
#x shape: [B, C, H, W]
x1 = self.conv1(x) # [B, C/4, H, W]
x1 = torch.reshape(x1, [x1.shape[0], x1.shape[1], -1]) # [B, C/4, H*W]
x2 = self.conv2(x) # [B, C/2, H, W]
x2 = torch.reshape(x2, [x2.shape[0], x2.shape[1], -1]) # [B, C/2, H*W]
x2 = x2.permute((0, 2, 1)) # [B, H*W, C/2]
v = torch.bmm(x1, x2)
# print(x1.shape[1]*x2.shape[1]*x2.shape[2])
# v = torch.matmul(x1, x2) # [B, C/4, C/2]
tmp = torch.reshape(v, (-1, v.shape[1]*v.shape[2]))
tmp = F.softmax(tmp, dim=-1)
v = torch.reshape(tmp, v.shape)
v = torch.unsqueeze(v, dim=3) # [B, C/4, C/2, 1]
n = self.conv3(v) # [B, C/4, C/2, 1]
n = v + n # [B, C/4, C/2, 1]
n = self.relu(n)
n = n.permute((0, 2, 1, 3)) # [B, C/2, C/4, 1]
n = self.conv4(n) # [B, C/2, C/4, 1]
z = torch.squeeze(n, dim=3) # [B, C/2, C/4]
y = torch.bmm(z, x1)
#print(z.shape[1]*x1.shape[1]*x1.shape[2])
# y = torch.matmul(z, x1) # [B, C/2, H*W]
y = torch.unsqueeze(y, dim=3) # [B, C/2, H*W, 1]
y = torch.reshape(y, (y.shape[0], y.shape[1], x.shape[2], x.shape[3])) # [B, C/2, H, W]
x_res = self.conv5(y) # [B, C, H, W]
return x + x_res
class DCM(nn.Module):
def __init__(self, channel, out_channel=None):
super(DCM, self).__init__()
if out_channel == None:
out_channel = channel
self.conv1 = conv_relu(channel, channel, kernel_size=3, stride=1, padding=1, padding_mode='replicate')
self.conv2 = conv_relu(channel, channel, kernel_size=3, stride=1, padding=1, padding_mode='replicate')
self.conv3 = conv_relu(channel, channel, kernel_size=3, stride=1, padding=1, padding_mode='replicate')
self.conv4 = nn.Conv2d(channel, out_channel, kernel_size=1, stride=1, padding=0, padding_mode='replicate')
def forward(self, x):
c1 = self.conv1(x)
tmp1 = c1 + x
c2 = self.conv2(tmp1)
tmp2 = tmp1 + c2
c3 = self.conv3(tmp2)
tmp3 = tmp2 + c3
c4 = self.conv4(tmp3)
return c4
class BlockEncoder(nn.Module):
def __init__(self, in_ch):
super(BlockEncoder, self).__init__()
self.DCM = DCM(in_ch)
self.GCM = GCM(in_ch)
def forward(self, x):
dcm_x = self.DCM(x)
gcm_x = self.GCM(dcm_x)
return x + gcm_x
class BlockDecoder(nn.Module):
def __init__(self, in_ch):
super(BlockDecoder, self).__init__()
self.GSM = GSM(in_ch)
self.DCM = DCM(in_ch)
def forward(self, x):
gsm_x = self.GSM(x)
dcm_x = self.DCM(gsm_x)
return x + dcm_x
class GRNet(nn.Module):
def __init__(self, in_ch=25):
super(GRNet, self).__init__()
n1 = 64
# filters = [n1, n1 * 2, n1 * 4, n1 * 8]
filters = [64, 64, 64, 64, 64]
self.down0 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
self.down1 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
self.down2 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
# self.Down4 = conv_relu(filters[0], filters[0], kernel_size=3, padding=1, stride=2, bias=True, padding_mode='replicate')
# self.Down4 = nn.Conv2d()
self.conv0 = nn.Conv2d(in_ch, filters[0], kernel_size=3, stride=1, padding=1, padding_mode='replicate', bias=True)
self.conv1 = nn.Conv2d(filters[0], filters[0], kernel_size=3, stride=1, padding=1, padding_mode='replicate', bias=True)
self.encoder0 = BlockEncoder(filters[0])
self.encoder1 = BlockEncoder(filters[1])
self.encoder2 = BlockEncoder(filters[2])
self.middle = BlockEncoder(filters[3])
# self.Conv5 = BlockEncoder(filters[4])
# self.Up5 = nn.Conv2d(filters[4]*2, filters[3], kernel_size=3, stride=1, padding=1, bias=True)
self.up_conv2 = conv_relu(filters[2]*2, filters[2], kernel_size=1, padding=0, stride=1, bias=True)
self.decoder2 = BlockDecoder(filters[4])
# self.Up4 = nn.ConvTranspose2d(filters[3], filters[2], kernel_size=2, stride=2, padding=0, bias=True)
# self.Up4 = nn.Conv2d(filters[3]*2, filters[2], kernel_size=3, stride=1, padding=1, bias=True)
self.up_conv1 = conv_relu(filters[1]*2, filters[1], kernel_size=1, padding=0, stride=1, bias=True_)
self.decoder1 = BlockDecoder(filters[3])
# self.Up3 = nn.Conv2d(filters[2]*2, filters[1], kernel_size=3, stride=1, padding=1, bias=True)
self.up_conv0 = conv_relu(filters[0]*2, filters[0], kernel_size=1, padding=0, stride=1, bias=True)
self.decoder0 = BlockDecoder(filters[2])
# self.Up2 = nn.Conv2d(filters[1]*2, filters[0], kernel_size=3, stride=1, padding=1, bias=True)
# self.Up_conv2 = BlockDecoder(filters[1])
self.Conv = nn.Conv2d(filters[0], in_ch, kernel_size=3, padding=1, stride=1, padding_mode='replicate')
def forward(self, x):
basic = self.conv0(x)
basic1 = self.conv1(basic)
encode0 = self.encoder0(basic1)
down0 = self.down0(encode0)
encode1 = self.encoder1(down0)
down1 = self.down1(encode1)
encode2 = self.encoder2(down1)
down2 = self.down2(encode2)
media_end = self.middle(down2)
deblock2 = F.upsample_bilinear(media_end, scale_factor=2)
deblock2 = torch.cat((deblock2, encode2), dim=1)
deblock2 = self.up_conv2(deblock2)
deblock2 = self.decoder2(deblock2)
deblock1 = F.upsample_bilinear(deblock2, scale_factor=2)
deblock1 = torch.cat((deblock1, encode1), dim=1)
deblock1 = self.up_conv1(deblock1)
deblock1 = self.decoder1(deblock1)
deblock0 = F.upsample_bilinear(deblock1, scale_factor=2)
deblock0 = torch.cat((deblock0, encode0), dim=1)
deblock0 = self.up_conv0(deblock0)
deblock0 = self.decoder0(deblock0)
decoding_end = deblock0 + basic
res = self.Conv(decoding_end)
out = x + res
return out
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(negative_slope=0.01, inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(negative_slope=0.01, inplace=True))
self.conv_residual = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, bias=True)
def forward(self, x):
x = self.conv(x) + self.conv_residual(x)
return x
class U_Net_GR(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch=34, out_ch=34):
super(U_Net_GR, self).__init__()
n1 = 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
self.Down1 = nn.Conv2d(filters[0], filters[0], kernel_size=4, stride=2, padding=1, bias=True)
self.Down2 = nn.Conv2d(filters[1], filters[1], kernel_size=4, stride=2, padding=1, bias=True)
self.Down3 = nn.Conv2d(filters[2], filters[2], kernel_size=4, stride=2, padding=1, bias=True)
self.Down4 = nn.Conv2d(filters[3], filters[3], kernel_size=4, stride=2, padding=1, bias=True)
self.Conv1 = conv_block(in_ch, filters[0])
self.skip1 = nn.Conv2d(in_ch, filters[0], kernel_size=1, stride=1, padding=0)
self.Conv2 = conv_block(filters[0], filters[1])
self.skip2 = nn.Conv2d(filters[0], filters[1], kernel_size=1, stride=1, padding=0)
self.Conv3 = conv_block(filters[1], filters[2])
self.skip3 = nn.Conv2d(filters[1], filters[2], kernel_size=1, stride=1, padding=0)
self.Conv4 = conv_block(filters[2], filters[3])
self.skip4 = nn.Conv2d(filters[2], filters[3], kernel_size=1, stride=1, padding=0)
self.Conv5 = conv_block(filters[3], filters[4])
self.skip5 = nn.Conv2d(filters[3], filters[4], kernel_size=1, stride=1, padding=0)
self.Up_conv5 = conv_block(filters[4], filters[3])
self.skip_up5 = nn.Conv2d(filters[4], filters[3], kernel_size=1, stride=1, padding=0)
self.Up_conv4 = conv_block(filters[3], filters[2])
self.skip_up4 = nn.Conv2d(filters[3], filters[2], kernel_size=1, stride=1, padding=0)
self.Up_conv3 = conv_block(filters[2], filters[1])
self.skip_up3 = nn.Conv2d(filters[2], filters[1], kernel_size=1, stride=1, padding=0)
self.Up_conv2 = conv_block(filters[1], filters[0])
self.skip_up2 = nn.Conv2d(filters[1], filters[0], kernel_size=1, stride=1, padding=0)
# self.Conv1 = DCM(in_ch, filters[0])
# self.Conv2 = DCM(filters[0], filters[1])
# self.Conv3 = DCM(filters[1], filters[2])
# self.Conv4 = DCM(filters[2], filters[3])
# self.Conv5 = DCM(filters[3], filters[4])
# self.Up_conv5 = DCM(filters[4], filters[3])
# self.Up_conv4 = DCM(filters[3], filters[2])
# self.Up_conv3 = DCM(filters[2], filters[1])
# self.Up_conv2 = DCM(filters[1], filters[0])
self.GCM1 = GCM(filters[0])
self.GCM2 = GCM(filters[1])
self.GCM3 = GCM(filters[2])
self.GCM4 = GCM(filters[3])
self.GCM5 = GCM(filters[4])
self.Up5 = nn.ConvTranspose2d(filters[4], filters[3], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM5 = GSM(filters[4])
self.Up4 = nn.ConvTranspose2d(filters[3], filters[2], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM4 = GSM(filters[3])
self.Up3 = nn.ConvTranspose2d(filters[2], filters[1], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM3 = GSM(filters[2])
self.Up2 = nn.ConvTranspose2d(filters[1], filters[0], kernel_size=2, stride=2, padding=0, bias=True)
self.GSM2 = GSM(filters[1])
self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
e1 = self.GCM1(self.Conv1(x)) + self.skip1(x)
e2 = self.Down1(e1)
e2 = self.GCM2(self.Conv2(e2)) + self.skip2(e2)
e3 = self.Down2(e2)
e3 = self.GCM3(self.Conv3(e3)) + self.skip3(e3)
e4 = self.Down3(e3)
e4 = self.GCM4(self.Conv4(e4)) + self.skip4(e4)
e5 = self.Down4(e4)
e5 = self.GCM5(self.Conv5(e5)) + self.skip5(e5)
d5 = self.Up5(e5)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(self.GSM5(d5)) + self.skip_up5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(self.GSM4(d4)) + self.skip_up4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(self.GSM3(d3)) + self.skip_up3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(self.GSM2(d2)) + self.skip_up2(d2)
out = self.Conv(d2)
#d1 = self.active(out)
return out+x
| 14,247 | 38.359116 | 139 | py |
SERT | SERT-master/models/competing_methods/macnet/MACNet.py | from collections import namedtuple
from .ops.utils import est_noise,count
# from model.qrnn.combinations import *
from .non_local import NLBlockND,EfficientNL
from .combinations import *
Params = namedtuple('Params', ['in_channels', 'channels', 'num_half_layer','rs'])
from skimage.restoration import denoise_nl_means,estimate_sigma
class MACNet(nn.Module):
'''
Tied lista with coupling
'''
def __init__(self, in_channels=1,channels =16, num_half_layer =5 ):
super(MACNet, self).__init__()
self.rs = 2
self.net=REDC3DBNRES_NL(in_channels=in_channels,channels=channels,num_half_layer=num_half_layer)
def forward(self, I, writer=None, epoch=None, return_patches=False):
return self.pro_sub(I)
def pro_sub(self, I):
R = list()
Ek = list()
Rw = list()
I_iid = list()
sigma_est = 0
I_size = I.shape
for _I in I:
_I = _I.permute([1, 2, 0])
_, _, w, _Rw = count(_I) # count subspace
_I = torch.matmul(_I, torch.inverse(_Rw).sqrt()) # spectral iid
I_nlm = _I.cpu().numpy()
sigma_est = estimate_sigma(I_nlm, multichannel=True, average_sigmas=True)
I_nlm = denoise_nl_means(I_nlm, patch_size=7, patch_distance=9, h=0.08, multichannel=True,
fast_mode=True, sigma=sigma_est)
I_nlm = torch.FloatTensor(I_nlm).to(device=_I.device)
_R, _Ek, _, _ = count(I_nlm)
if self.rs:
_R = _R // 3
# _R = max(_R, torch.FloatTensor(3).to(I.device))
R.append(_R)
Ek.append(_Ek)
Rw.append(_Rw)
I_iid.append(_I)
dim = max(torch.stack(R).max(), 3)
Ek = torch.stack(Ek, dim=0)
I_iid = torch.stack(I_iid, dim=0)
Ek = Ek[:, :, 0:dim]
Rw = torch.stack(Rw, dim=0)
I_sub = torch.bmm(I_iid.view(I_size[0], -1, I_size[1]), Ek)
I_sub = I_sub.view(I_size[0], I_size[2], I_size[3], -1).permute([0, 3, 1, 2])
CNN_sub = self.net(I_sub.unsqueeze(1)).squeeze(1)
CNN_sub = CNN_sub.view(I_size[0], dim, -1)
output = torch.bmm(Rw.sqrt(), torch.bmm(Ek, CNN_sub))
output = output.view(I_size)
return output
class REDC3DBNRES_NL(torch.nn.Module):
"""Residual Encoder-Decoder Convolution 3D
Args:
downsample: downsample times, None denotes no downsample"""
def __init__(self, in_channels, channels, num_half_layer, downsample=None):
super(REDC3DBNRES_NL, self).__init__()
# Encoder
# assert downsample is None or 0 < downsample <= num_half_layer
interval = 2
self.feature_extractor = BNReLUConv3d(in_channels, channels)
self.encoder = nn.ModuleList()
for i in range(1, num_half_layer + 1):
if i % interval:
encoder_layer = BNReLUConv3d(channels, channels)
else:
encoder_layer = BNReLUConv3d(channels, 2 * channels, k=3, s=(1, 2, 2), p=1)
channels *= 2
self.encoder.append(encoder_layer)
# Decoder
self.decoder = nn.ModuleList()
for i in range(1, num_half_layer + 1):
if i % interval:
decoder_layer = BNReLUDeConv3d(channels, channels)
else:
decoder_layer = BNReLUUpsampleConv3d(channels, channels // 2)
channels //= 2
self.decoder.append(decoder_layer)
self.reconstructor = BNReLUDeConv3d(channels, in_channels)
# self.enl_1 = EfficientNL(in_channels=channels)
self.enl_2 = EfficientNL(in_channels=channels)
self.enl_3 = EfficientNL(in_channels=1,key_channels=1,value_channels=1,head_count=1)
# = None, head_count = None, = None
def forward(self, x):
num_half_layer = len(self.encoder)
xs = [x]
out = self.feature_extractor(xs[0])
xs.append(out)
for i in range(num_half_layer - 1):
out = self.encoder[i](out)
xs.append(out)
out = self.encoder[-1](out)
# out = self.nl_1(out)
out = self.decoder[0](out)
for i in range(1, num_half_layer):
out = out + xs.pop()
out = self.decoder[i](out)
out = self.enl_2(out) + xs.pop()
out = self.reconstructor(out)
out = self.enl_3(out) + xs.pop()
return out
| 4,441 | 38.309735 | 104 | py |
SERT | SERT-master/models/competing_methods/macnet/combinations.py | import torch
import torch.nn as nn
from torch.nn import functional
from models.competing_methods.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
BatchNorm3d = SynchronizedBatchNorm3d
BatchNorm2d=SynchronizedBatchNorm2d
class BNReLUConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
class BNReLUConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUConv2d, self).__init__()
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))
class Conv3dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(Conv3dBNReLU, self).__init__()
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class Conv2dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(Conv2dBNReLU, self).__init__()
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class BNReLUDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUDeConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
class BNReLUDeConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUDeConv2d, self).__init__()
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
class DeConv3dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(DeConv3dBNReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class DeConv2dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(DeConv3dBNReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class ReLUDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(ReLUDeConv3d, self).__init__()
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
class ReLUDeConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(ReLUDeConv2d, self).__init__()
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
class BNReLUUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(BNReLUUpsampleConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class BNReLUUpsampleConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(2,2), inplace=False):
super(BNReLUUpsampleConv2d, self).__init__()
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class UpsampleConv3dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(UpsampleConv3dBNReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv2dBNReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(UpsampleConv2dBNReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class Conv3dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(Conv3dReLU, self).__init__()
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm3d(channels))
class Conv2dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(Conv2dReLU, self).__init__()
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class DeConv3dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(DeConv3dReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class DeConv2dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False,bn=False):
super(DeConv2dReLU, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=False))
if bn:
self.add_module('bn', BatchNorm2d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv3dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False,bn=False):
super(UpsampleConv3dReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
if bn:
self.add_module('bn', BatchNorm3d(channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv2dReLU(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(UpsampleConv2dReLU, self).__init__()
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
self.add_module('relu', nn.ReLU(inplace=inplace))
class UpsampleConv3d(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, upsample=None):
super(UpsampleConv3d, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample, mode='trilinear', align_corners=True)
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.conv3d(x_in)
return out
class UpsampleConv2d(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, upsample=None):
super(UpsampleConv2d, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample, mode='bilinear', align_corners=True)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.conv2d(x_in)
return out
class BasicConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=bias))
class BasicConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicConv2d, self).__init__()
if bn:
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('conv', nn.Conv2d(in_channels, channels, k, s, p, bias=bias))
class BasicDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicDeConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=bias))
class BasicDeConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicDeConv2d, self).__init__()
if bn:
self.add_module('bn', BatchNorm2d(in_channels))
self.add_module('deconv', nn.ConvTranspose2d(in_channels, channels, k, s, p, bias=bias))
class BasicUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True):
super(BasicUpsampleConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class BasicUpsampleConv2d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True):
super(BasicUpsampleConv2d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('upsample_conv', UpsampleConv2d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
| 11,593 | 48.33617 | 119 | py |
SERT | SERT-master/models/competing_methods/macnet/non_local.py | import torch
from torch import nn
from torch.nn import functional as F
class EfficientNL(nn.Module):
def __init__(self, in_channels, key_channels=None, head_count=None, value_channels=None):
super(EfficientNL, self).__init__()
self.in_channels = in_channels
self.key_channels = key_channels
self.head_count = head_count
self.value_channels = value_channels
if self.key_channels==None:
self.key_channels=self.in_channels//2
if self.value_channels == None:
self.value_channels = self.in_channels // 2
if self.head_count == None:
self.head_count = self.head_count=2
self.keys = nn.Conv3d( self.in_channels, self.key_channels, 1)
self.queries = nn.Conv3d( self.in_channels, self.key_channels, 1)
self.values = nn.Conv3d( self.in_channels, self.value_channels, 1)
self.reprojection = nn.Conv3d(self.value_channels, self.in_channels, 1)
def forward(self, input_):
n, _,c, h, w = input_.size()
keys = self.keys(input_).reshape((n, self.key_channels,-1))
queries = self.queries(input_).reshape(n, self.key_channels, -1)
values = self.values(input_).reshape((n, self.value_channels, -1))
head_key_channels = self.key_channels // self.head_count
head_value_channels = self.value_channels // self.head_count
attended_values = []
for i in range(self.head_count):
key = F.softmax(keys[
:,
i * head_key_channels: (i + 1) * head_key_channels,
:
], dim=2)
query = F.softmax(queries[
:,
i * head_key_channels: (i + 1) * head_key_channels,
:
], dim=1)
value = values[
:,
i * head_value_channels: (i + 1) * head_value_channels,
:
]
context = key @ value.transpose(1, 2)
attended_value = (
context.transpose(1, 2) @ query
).reshape(n, head_value_channels,c, h, w)
attended_values.append(attended_value)
aggregated_values = torch.cat(attended_values, dim=1)
reprojected_value = self.reprojection(aggregated_values)
attention = reprojected_value + input_
return attention
class NLBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, mode='embedded',
dimension=3, bn_layer=True, levels=None):
"""Implementation of Non-Local Block with 4 different pairwise functions
args:
in_channels: original channel size (1024 in the paper)
inter_channels: channel size inside the block if not specifed reduced to half (512 in the paper)
mode: supports Gaussian, Embedded Gaussian, Dot Product, and Concatenation
dimension: can be 1 (temporal), 2 (spatial), 3 (spatiotemporal)
bn_layer: whether to add batch norm
"""
super(NLBlockND, self).__init__()
assert dimension in [1, 2, 3]
if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:
raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')
self.mode = mode
self.dimension = dimension
self.in_channels = in_channels
self.inter_channels = inter_channels
if levels is not None:
self.ssp=True
self.p = SpatialPyramidPooling(levels=[2*i+1 for i in range(0,levels)])
else:
self.ssp = False
# the channel size is reduced to half inside the block
if self.inter_channels is None:
self.inter_channels = in_channels // 4
if self.inter_channels == 0:
self.inter_channels = 1
# assign appropriate convolutional, max pool, and batch norm layers for different dimensions
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
# function g in the paper which goes through conv. with kernel size 1
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1)
# add BatchNorm layer after the last conv layer
if bn_layer:
self.W_z = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1),
bn(self.in_channels)
)
nn.init.constant_(self.W_z[1].weight, 0)
nn.init.constant_(self.W_z[1].bias, 0)
else:
self.W_z = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1)
nn.init.constant_(self.W_z.weight, 0)
nn.init.constant_(self.W_z.bias, 0)
# define theta and phi for all operations except gaussian
if self.mode == "embedded" or self.mode == "dot" or self.mode == "concatenate":
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1)
if self.mode == "concatenate":
self.W_f = nn.Sequential(
nn.Conv2d(in_channels=self.inter_channels * 2, out_channels=1, kernel_size=1),
nn.ReLU()
)
# print()
def forward(self, x):
"""
args
x: (N, C, T, H, W) for dimension=3; (N, C, H, W) for dimension 2; (N, C, T) for dimension 1
"""
batch_size,c,t,h,w = x.size()
# (N, C, THW)
g_x = self.g(x).view(batch_size, -1, h,w)
if self.ssp:
g_x = self.p(g_x)
g_x=g_x.view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
# print(self.mode)
if self.mode == "gaussian":
theta_x = x.view(batch_size, self.in_channels, -1)
phi_x = x.view(batch_size, self.in_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
f = torch.matmul(theta_x, phi_x)
elif self.mode == "embedded" or self.mode == "dot":
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
phi_x = self.phi(x).view(batch_size, -1, h,w)
if self.ssp:
phi_x=self.p(phi_x)
phi_x=phi_x.view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
f = torch.matmul(theta_x, phi_x)
elif self.mode == "concatenate":
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, 1, -1)
h = theta_x.size(2)
w = phi_x.size(3)
theta_x = theta_x.repeat(1, 1, 1, w)
phi_x = phi_x.repeat(1, 1, h, 1)
concat = torch.cat([theta_x, phi_x], dim=1)
f = self.W_f(concat)
f = f.view(f.size(0), f.size(2), f.size(3))
if self.mode == "gaussian" or self.mode == "embedded":
f_div_C = F.softmax(f, dim=-1)
elif self.mode == "dot" or self.mode == "concatenate":
N = f.size(-1) # number of position in x
f_div_C = f / N
# print(f_div_C.shape)
# print(g_x.shape)
y = torch.matmul(f_div_C, g_x)
# contiguous here just allocates contiguous chunk of memory
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W_z(y)
# residual connection
z = W_y + x
return z
if __name__ == '__main__':
import torch
# for bn_layer in [True, False]:
# img = torch.zeros(2, 3, 20)
# net = NLBlockND(in_channels=3, mode='concatenate', dimension=1, bn_layer=bn_layer)
# out = net(img)
# print(out.size())
#
# img = torch.zeros(2, 3, 20, 20)
# net = NLBlockND(in_channels=3, mode='concatenate', dimension=2, bn_layer=bn_layer)
# out = net(img)
# print(out.size())
img = torch.randn(1, 16, 31, 512, 512)
net = EfficientNL(in_channels=16)
out = net(img)
print(out.size())
| 8,755 | 40.107981 | 111 | py |
SERT | SERT-master/models/competing_methods/macnet/__init__.py | from .MACNet import MACNet | 26 | 26 | 26 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/gauss.py | #!/usr/bin/env python
"""Module providing functionality surrounding gaussian function.
"""
SVN_REVISION = '$LastChangedRevision: 16541 $'
import sys
import numpy
def gaussian2(size, sigma):
"""Returns a normalized circularly symmetric 2D gauss kernel array
f(x,y) = A.e^{-(x^2/2*sigma^2 + y^2/2*sigma^2)} where
A = 1/(2*pi*sigma^2)
as define by Wolfram Mathworld
http://mathworld.wolfram.com/GaussianFunction.html
"""
A = 1 / (2.0 * numpy.pi * sigma ** 2)
x, y = numpy.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
g = A * numpy.exp(-((x ** 2 / (2.0 * sigma ** 2)) + (y ** 2 / (2.0 * sigma ** 2))))
return g
def fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x, y = numpy.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
g = numpy.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))
return g / g.sum()
def main():
"""Show simple use cases for functionality provided by this module."""
from mpl_toolkits.mplot3d.axes3d import Axes3D
import pylab
argv = sys.argv
if len(argv) != 3:
print >> sys.stderr, 'usage: python -m pim.sp.gauss size sigma'
sys.exit(2)
size = int(argv[1])
sigma = float(argv[2])
x, y = numpy.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
fig = pylab.figure()
fig.suptitle('Some 2-D Gauss Functions')
ax = fig.add_subplot(2, 1, 1, projection='3d')
ax.plot_surface(x, y, fspecial_gauss(size, sigma), rstride=1, cstride=1,
linewidth=0, antialiased=False, cmap=pylab.jet())
ax = fig.add_subplot(2, 1, 2, projection='3d')
ax.plot_surface(x, y, gaussian2(size, sigma), rstride=1, cstride=1,
linewidth=0, antialiased=False, cmap=pylab.jet())
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main())
# {"mode": "full", "isActive": false} | 1,954 | 31.583333 | 87 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/utils_blocks.py | import torch
import torch.nn.functional as F
from ops.im2col import Im2Col, Col2Im, Col2Cube,Cube2Col
def shape_pad_even(tensor_shape, patch,stride):
assert len(tensor_shape) == 4
b,c,h,w = tensor_shape
required_pad_h = stride - (h-patch) % stride
required_pad_w = stride - (w-patch) % stride
return required_pad_h,required_pad_w
class block_module():
def __init__(self,block_size,block_stride, kernel_size, params):
super(block_module).__init__()
self.params = params
self.kernel_size = kernel_size
self.block_size = block_size
self.block_stride = block_stride
# self.channel_size = channel_size
def _make_blocks(self, image, return_padded=False):
'''
:param image: (1,C,H,W)
:return: raw block (batch,C,block_size,block_size), tulple shape augmented image
'''
params = self.params
self.channel_size = image.shape[1]
if params['pad_block']:
pad = (self.block_size - 1,) * 4
elif params['pad_patch']:
pad = (self.kernel_size,)*4
elif params['no_pad']:
pad = (0,) * 4
elif params['custom_pad'] is not None:
pad = (params['custom_pad'],) * 4
else:
raise NotImplementedError
image_mirror_padded = F.pad(image, pad, mode='reflect')
pad_even = shape_pad_even(image_mirror_padded.shape, self.block_size, self.block_stride)
pad_h, pad_w = pad_even
if params['centered_pad']:
pad_ = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)
else:
pad_ =(0, pad_w, 0, pad_h)
pad = tuple([x+y for x,y in zip(pad,pad_)])
self.pad = pad
image_mirror_padded_even = F.pad(image, pad, mode='reflect') # add half kernel cause block edges are dump
self.augmented_shape = image_mirror_padded_even.shape
if return_padded:
return image_mirror_padded
batch_blocks = Im2Col(image_mirror_padded_even,
kernel_size=self.block_size,
stride= self.block_stride,
padding=0)
batch_blocks = batch_blocks.permute(2, 0, 1)
batch_blocks = batch_blocks.view(-1, self.channel_size, self.block_size, self.block_size)
return batch_blocks
def _make_cubes(self, image, return_padded=False):
'''
:param image: (1,C,H,W)
:return: raw block (batch_spa,batch_spec,block_size,block_size,block_size), tulple shape augmented image
'''
params = self.params
self.channel_size = image.shape[1]
if params['pad_block']:
pad = (self.block_size - 1,) * 4
elif params['pad_patch']:
pad = (self.kernel_size,)*4
elif params['no_pad']:
pad = (0,) * 4
elif params['custom_pad'] is not None:
pad = (params['custom_pad'],) * 4
else:
raise NotImplementedError
image_mirror_padded = F.pad(image, pad, mode='reflect')
pad_even = shape_pad_even(image_mirror_padded.shape, self.block_size, self.block_stride)
pad_h, pad_w = pad_even
if params['centered_pad']:
pad_ = (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)
else:
pad_ =(0, pad_w, 0, pad_h)
pad = tuple([x+y for x,y in zip(pad,pad_)])
self.pad = pad
image_mirror_padded_even = F.pad(image, pad, mode='reflect') # add half kernel cause block edges are dump
self.augmented_shape = image_mirror_padded_even.shape
if return_padded:
return image_mirror_padded
batch_blocks = Im2Col(image_mirror_padded_even,
kernel_size=self.block_size,
stride= self.block_stride,
padding=0)
batch_blocks = batch_blocks.permute(2, 0, 1)
batch_blocks = batch_blocks.view(-1, self.channel_size, self.block_size, self.block_size)
return batch_blocks
def _agregate_blocks(self,batch_out_blocks):
'''
:param blocks: processed blocks
:return: image of averaged estimates
'''
h_pad, w_pad = self.augmented_shape[2:]
params = self.params
l = self.kernel_size // 2
device = batch_out_blocks.device
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
batch_out_blocks_flatten = batch_out_blocks.view(-1,self.channel_size * self.block_size**2).transpose(0,1).unsqueeze(0)
print(self.block_size)
# print(self.kernel_size)
if params['ponderate_out_blocks']:
if self.kernel_size%2==0:
mask = F.conv_transpose2d(torch.ones((1,1)+(self.block_size - 2 * l,)*2),
torch.ones((1,1)+(self.kernel_size+1,)*2))
else:
mask = F.conv_transpose2d(torch.ones((1, 1) + (self.block_size - 2 * l,) * 2),
torch.ones((1, 1) + (self.kernel_size,) * 2))
mask = mask.to(device=device)
print(batch_out_blocks.shape)
print(mask.shape)
batch_out_blocks *= mask
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
output_padded = Col2Im(batch_out_blocks_flatten,
output_size=(h_pad, w_pad),
kernel_size=self.block_size,
stride=self.block_stride,
padding=0,
avg=False)
batch_out_blocks_ones = torch.ones_like(batch_out_blocks) * mask
# batch_out_blocks_flatten_ones = batch_out_blocks_ones.flatten(2, 3).permute(1, 2, 0)
batch_out_blocks_flatten_ones = batch_out_blocks_ones.view(-1, self.channel_size * self.block_size ** 2).transpose(0,1).unsqueeze(0)
if params['avg']:
mask_ = Col2Im(batch_out_blocks_flatten_ones,
output_size=(h_pad, w_pad),
kernel_size=self.block_size,
stride=self.block_stride,
padding=0,
avg=False)
output_padded /= mask_
elif params['crop_out_blocks']:
kernel_ = self.block_size - 2 * l
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
output_padded = Col2Im(batch_out_blocks_flatten,
output_size=(h_pad - 2 * l, w_pad - 2 * l),
kernel_size=kernel_,
stride=self.block_size,
padding=0,
avg=params['avg'])
elif params['sum_blocks']:
# batch_out_blocks_flatten = batch_out_blocks.flatten(2, 3).permute(1, 2, 0)
output_padded = Col2Im(batch_out_blocks_flatten,
output_size=(h_pad, w_pad),
kernel_size=self.block_size,
stride=self.block_stride,
padding=0,
avg=params['avg'])
else:
raise NotImplementedError
pad = self.pad
output = output_padded[:, :, pad[2]:-pad[3], pad[0]:-pad[1]]
return output
| 7,650 | 39.057592 | 144 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/utils.py | import torch
import torch.functional as F
from random import randint
import argparse
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage.measure import compare_ssim, compare_psnr
from .gauss import fspecial_gauss
from scipy import signal
def kronecker(A, B):
return torch.einsum("ab,cd->acbd", A, B).view(A.size(0)*B.size(0), A.size(1)*B.size(1))
def gen_bayer_mask(h,w):
x = torch.zeros(1, 3, h, w)
x[:, 0, 1::2, 1::2] = 1 # r
x[:, 1, ::2, 1::2] = 1
x[:, 1, 1::2, ::2] = 1 # g
x[:, 2, ::2, ::2] = 1 # b
return x
def togray(tensor):
b, c, h, w = tensor.shape
tensor = tensor.view(b, 3, -1, h, w)
tensor = tensor.sum(1)
return tensor
def torch_to_np(img_var):
return img_var.detach().cpu().numpy()
def plot_tensor(img, **kwargs):
inp_shape = tuple(img.shape)
print(inp_shape)
img_np = torch_to_np(img)
if inp_shape[1]==3:
img_np_ = img_np.transpose([1,2,0])
plt.imshow(img_np_)
elif inp_shape[1]==1:
img_np_ = np.squeeze(img_np)
plt.imshow(img_np_, **kwargs)
else:
# raise NotImplementedError
plt.imshow(img_np, **kwargs)
plt.axis('off')
def get_mask(A):
mask = A.clone().detach()
mask[A != 0] = 1
return mask.byte()
def sparsity(A):
return get_mask(A).sum().item()/A.numel()
def soft_threshold(x, lambd):
return nn.functional.relu(x - lambd,inplace=True) - nn.functional.relu(-x - lambd,inplace=True)
def nn_threshold(x, lambd):
return nn.functional.relu(x - lambd)
def fastSoftThrs(x, lmbda):
return x + 0.5 * (torch.abs(x-torch.abs(lmbda))-torch.abs(x+torch.abs(lmbda)))
def save_checkpoint(state,ckpt_path):
torch.save(state, ckpt_path)
def generate_key():
return '{}'.format(randint(0, 100000))
def show_mem():
mem = torch.cuda.memory_allocated() * 1e-6
max_mem = torch.cuda.max_memory_allocated() * 1e-6
return mem, max_mem
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def step_lr(optimizer, lr_decay):
lr = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr * lr_decay
def set_lr(optimizer, lr):
# lr = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr
def step_lr_als(optimizer, lr_decay):
lr = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr * lr_decay
optimizer.param_groups[1]['lr'] *= lr_decay
def get_lr(optimizer):
return optimizer.param_groups[0]['lr']
def gen_mask_windows(h, w):
'''
return mask for block window
:param h:
:param w:
:return: (h,w,h,w)
'''
mask = torch.zeros(2 * h, 2 * w, h, w)
for i in range(h):
for j in range(w):
mask[i:i + h, j:j + w, i, j] = 1
return mask[h // 2:-h // 2, w // 2:-w // 2, :, :]
def gen_linear_mask_windows(h, w, h_,w_):
'''
return mask for block window
:param h:
:param w:
:return: (h,w,h,w)
'''
x = torch.ones(1, 1, h - h_ + 1, w - w_ + 1)
k = torch.ones(1, 1, h_, w_)
kernel = F.conv_transpose2d(x, k)
kernel /= kernel.max()
mask = torch.zeros(2 * h, 2 * w, h, w)
for i in range(h):
for j in range(w):
mask[i:i + h, j:j + w, i, j] = kernel
return mask[h // 2:-h // 2, w // 2:-w // 2, :, :]
def gen_quadra_mask_windows(h, w, h_,w_):
'''
return mask for block window
:param h:
:param w:
:return: (h,w,h,w)
'''
x = torch.ones(1, 1, h - h_ + 1, w - w_ + 1)
k = torch.ones(1, 1, h_, w_)
kernel = F.conv_transpose2d(x, k) **2
kernel /= kernel.max()
mask = torch.zeros(2 * h, 2 * w, h, w)
for i in range(h):
for j in range(w):
mask[i:i + h, j:j + w, i, j] = kernel
return mask[h // 2:-h // 2, w // 2:-w // 2, :, :]
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def Init_DCT(n, m):
""" Compute the Overcomplete Discrete Cosinus Transform. """
n=int(n)
m=int(m)
Dictionary = np.zeros((n,m))
for k in range(m):
V = np.cos(np.arange(0, n) * k * np.pi / m)
if k > 0:
V = V - np.mean(V)
Dictionary[:, k] = V / np.linalg.norm(V)
# Dictionary = np.kron(Dictionary, Dictionary)
# Dictionary = Dictionary.dot(np.diag(1 / np.sqrt(np.sum(Dictionary ** 2, axis=0))))
# idx = np.arange(0, n ** 2)
# idx = idx.reshape(n, n, order="F")
# idx = idx.reshape(n ** 2, order="C")
# Dictionary = Dictionary[idx, :]
Dictionary = torch.from_numpy(Dictionary).float()
return Dictionary
def est_noise(y, noise_type='additive'):
"""
This function infers the noise in a
hyperspectral data set, by assuming that the
reflectance at a given band is well modelled
by a linear regression on the remaining bands.
Parameters:
y: `numpy array`
a HSI cube ((m*n) x p)
noise_type: `string [optional 'additive'|'poisson']`
Returns: `tuple numpy array, numpy array`
* the noise estimates for every pixel (N x p)
* the noise correlation matrix estimates (p x p)
Copyright:
Jose Nascimento ([email protected]) and Jose Bioucas-Dias ([email protected])
For any comments contact the authors
"""
# def est_additive_noise(r):
# small = 1e-6
# L, N = r.shape
# w=np.zeros((L,N), dtype=np.float)
# RR=np.dot(r,r.T)
# RRi = np.linalg.pinv(RR+small*np.eye(L))
# RRi = np.matrix(RRi)
# for i in range(L):
# XX = RRi - (RRi[:,i]*RRi[i,:]) / RRi[i,i]
# RRa = RR[:,i]
# RRa[i] = 0
# beta = np.dot(XX, RRa)
# beta[0,i]=0;
# w[i,:] = r[i,:] - np.dot(beta,r)
# Rw = np.diag(np.diag(np.dot(w,w.T) / N))
# return w, Rw
def est_additive_noise(r):
small = 1e-6
L, N = r.shape
w=torch.zeros((L,N), dtype=torch.float,device=r.device)
[email protected]
# print((small*torch.eye(L,device=r.device)).device)
temp=RR+small*torch.eye(L,device=r.device)
# print(temp.device)
RRi = torch.inverse(temp)
# RRi = np.matrix(RRi)
for i in range(L):
XX = RRi - (RRi[:,i].unsqueeze(1)*RRi[i,:].unsqueeze(0)) / RRi[i,i]
RRa = RR[:,i]
RRa[i] = 0
beta =XX@RRa
beta[i]=0;
w[i,:] = r[i,:] - beta@r
Rw = torch.diag(torch.diag(([email protected]) / N))
return w, Rw
h, w, numBands = y.shape
y = torch.reshape(y, (w * h, numBands))
# y = np.reshape(y, (w * h, numBands))
y = y.T
L, N = y.shape
# verb = 'poisson'
if noise_type == 'poisson':
sqy = torch.sqrt(y * (y > 0))
u, Ru = est_additive_noise(sqy)
x = (sqy - u) ** 2
w = torch.sqrt(x) * u * 2
Rw = ([email protected]) / N
# additive
else:
w, Rw = est_additive_noise(y)
return w.T, Rw.T
# y = y.T
# L, N = y.shape
# #verb = 'poisson'
# if noise_type == 'poisson':
# sqy = np.sqrt(y * (y > 0))
# u, Ru = est_additive_noise(sqy)
# x = (sqy - u)**2
# w = np.sqrt(x)*u*2
# Rw = np.dot(w,w.T) / N
# # additive
# else:
# w, Rw = est_additive_noise(y)
# return w.T, Rw.T
def hysime(y, n, Rn):
"""
Hyperspectral signal subspace estimation
Parameters:
y: `numpy array`
hyperspectral data set (each row is a pixel)
with ((m*n) x p), where p is the number of bands
and (m*n) the number of pixels.
n: `numpy array`
((m*n) x p) matrix with the noise in each pixel.
Rn: `numpy array`
noise correlation matrix (p x p)
Returns: `tuple integer, numpy array`
* kf signal subspace dimension
* Ek matrix which columns are the eigenvectors that span
the signal subspace.
Copyright:
Jose Nascimento ([email protected]) & Jose Bioucas-Dias ([email protected])
For any comments contact the authors
"""
h, w, numBands = y.shape
y = torch.reshape(y, (w * h, numBands))
y=y.T
n=n.T
Rn=Rn.T
L, N = y.shape
Ln, Nn = n.shape
d1, d2 = Rn.shape
x = y - n;
Ry = [email protected] / N
Rx = [email protected]/ N
E, dx, V =torch.svd(Rx.cpu())
E=E.to(device=y.device)
# print(V)
Rn = Rn+torch.sum(torch.diag(Rx))/L/10**5 * torch.eye(L,device=y.device)
Py = torch.diag(E.T@(Ry@E))
Pn = torch.diag(E.T@(Rn@E))
cost_F = -Py + 2 * Pn
kf = torch.sum(cost_F < 0)
ind_asc = torch.argsort(cost_F)
Ek = E[:, ind_asc[0:kf]]
# h, w, numBands = y.shape
# y = np.reshape(y, (w * h, numBands))
# y = y.T
# n = n.T
# Rn = Rn.T
# L, N = y.shape
# Ln, Nn = n.shape
# d1, d2 = Rn.shape
#
# x = y - n;
#
# Ry = np.dot(y, y.T) / N
# Rx = np.dot(x, x.T) / N
# E, dx, V = np.linalg.svd(Rx)
#
# Rn = Rn + np.sum(np.diag(Rx)) / L / 10 ** 5 * np.eye(L)
# Py = np.diag(np.dot(E.T, np.dot(Ry, E)))
# Pn = np.diag(np.dot(E.T, np.dot(Rn, E)))
# cost_F = -Py + 2 * Pn
# kf = np.sum(cost_F < 0)
# ind_asc = np.argsort(cost_F)
# Ek = E[:, ind_asc[0:kf]]
return kf, E # Ek.T ?
def count(M):
w, Rw = est_noise(M)
kf, Ek = hysime(M, w, Rw)
return kf, Ek, w, Rw
def cal_sam(X, Y, eps=1e-8):
# X = torch.squeeze(X.data).cpu().numpy()
# Y = torch.squeeze(Y.data).cpu().numpy()
tmp = (np.sum(X*Y, axis=0) + eps) / ((np.sqrt(np.sum(X**2, axis=0)) + eps) * (np.sqrt(np.sum(Y**2, axis=0)) + eps)+eps)
return np.mean(np.real(np.arccos(tmp)))
def cal_psnr(im_true,im_test,eps=13-8):
c,_,_=im_true.shape
bwindex = []
for i in range(c):
bwindex.append(compare_psnr(im_true[i,:,:], im_test[i,:,:]))
return np.mean(bwindex)
def ssim(img1, img2, cs_map=False):
"""Return the Structural Similarity Map corresponding to input images img1
and img2 (images are assumed to be uint8)
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
"""
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
size = 11
sigma = 1.5
window = fspecial_gauss(size, sigma)
K1 = 0.01
K2 = 0.03
L = 255 # bitdepth of image
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
mu1 = signal.fftconvolve(window, img1, mode='valid')
mu2 = signal.fftconvolve(window, img2, mode='valid')
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = signal.fftconvolve(window, img1 * img1, mode='valid') - mu1_sq
sigma2_sq = signal.fftconvolve(window, img2 * img2, mode='valid') - mu2_sq
sigma12 = signal.fftconvolve(window, img1 * img2, mode='valid') - mu1_mu2
if cs_map:
return (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2)),
(2.0 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2))
else:
return ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
def cal_ssim(im_true,im_test,eps=13-8):
# print(im_true.shape)
# print(im_true.shape)
# print(im_test.shape)
# im_true=im_true.cpu().numpy()
# im_test = im_test.cpu().numpy()
c,_,_=im_true.shape
bwindex = []
for i in range(c):
bwindex.append(ssim(im_true[i,:,:]*255, im_test[i,:,:,]*255))
return np.mean(bwindex)
# def cal_ssim(im_true,im_test,eps=13-8):
# c,_,_=im_true.shape
# bwindex = []
# for i in range(c):
# bwindex.append(compare_ssim(im_true[i,:,:], im_test[i,:,:,]))
# return np.mean(bwindex)
# class Bandwise(object):
# def __init__(self, index_fn):
# self.index_fn = index_fn
#
# def __call__(self, X, Y):
# C = X.shape[-3]
# bwindex = []
# for ch in range(C):
# x = torch.squeeze(X[...,ch,:,:].data).cpu().numpy()
# y = torch.squeeze(Y[...,ch,:,:].data).cpu().numpy()
# index = self.index_fn(x, y)
# bwindex.append(index)
# return bwindex
def MSIQA(X, Y):
# print(X.shape)
# print(Y.shape)
psnr = cal_psnr(X, Y)
ssim = cal_ssim(X, Y)
sam = cal_sam(X, Y)
return psnr, ssim, sam
if __name__ == '__main__':
hsi = torch.rand(200,200, 198)
w, Rw=est_noise(hsi)
kf, E= hysime(hsi, w, Rw)
print(kf)
| 13,403 | 27.887931 | 123 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/utils_plot.py | import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from torchvision.utils import make_grid
from ops.im2col import *
from ops.utils import get_mask
def plot_tensor(img, **kwargs):
inp_shape = tuple(img.shape)
print(inp_shape)
img_np = torch_to_np(img)
if inp_shape[1]==3:
img_np_ = img_np.transpose([1,2,0])
plt.imshow(img_np_)
elif inp_shape[1]==1:
img_np_ = np.squeeze(img_np)
plt.imshow(img_np_, **kwargs)
else:
# raise NotImplementedError
plt.imshow(img_np, **kwargs)
plt.axis('off')
def hist_tensor(img,**kwargs):
inp_shape = tuple(img.shape)
print(inp_shape)
img_np = torch_to_np(img)
return plt.hist(img_np.flatten(),**kwargs)
def np_to_torch(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
'''Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy()[0]
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def show_dict(m,a=None, norm_grid=False, sort_freq=True, norm=True):
n_elem,_,s = m.shape
s_ = int(math.sqrt(s))
m=m.view(n_elem,1,s_,s_)
if norm:
m = normalize_patches(m)
if sort_freq:
if a is None:
raise ValueError("provide code array to sort dicts by usage frequency")
idx = sort_patches(a)
m = m[idx]
grid = make_grid(m, normalize=norm_grid, padding=2,nrow=int(math.sqrt(n_elem)))
return grid
def whiten_col(tx,eps=1e-4):
shape = tx.shape
tx = tx.squeeze()
D = torch.mm(tx, tx.t()) / len(tx)
diag, v = torch.symeig(D, eigenvectors=True)
diag[diag < eps] = 1
diag = diag ** 0.5
diag = 1 / diag
S = torch.diag(diag)
out = v @ S @ v.t() @ tx
out = out.view(shape)
return out
def normalize_patches(D):
p=3.5
M=D.max()
m=D.min()
if m>=0:
me = 0
else:
me = D.mean()
sig = torch.sqrt(((D-me)**2).mean())
D=torch.min(torch.max(D, -p*sig),p*sig)
M=D.max()
m=D.min()
D = (D-m)/(M-m)
return D
def sort_patches(a):
code = get_mask(a).float()
code_freq = code.mean([0, 2, 3]).flatten()
_, idx = code_freq.sort(descending=True)
return idx
| 2,976 | 22.626984 | 83 | py |
SERT | SERT-master/models/competing_methods/macnet/ops/im2col.py | from torch.nn import functional as F
import torch
from torch.nn.modules.utils import _pair
import math
def Im2Col(input_tensor, kernel_size, stride, padding,dilation=1,tensorized=False,):
batch = input_tensor.shape[0]
out = F.unfold(input_tensor, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
if tensorized:
lh,lw = im2col_shape(input_tensor.shape[1:],kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation)[-2:]
out = out.view(batch,-1,lh,lw)
return out
def Cube2Col(input_tensor, kernel_size, stride, padding,dilation=1,tensorized=False,):
input_sz=input_tensor.shape
if input_sz[1]<kernel_size:
input_tensor = F.pad(input_tensor, (0, 0, 0, 0, kernel_size-input_sz[1], 0), 'constant', 0)
# input_tensor=F.pad(input_tensor,(0,1), mode='replicate')
input_sz=input_tensor.shape
_t=input_sz[1]-kernel_size+1
out=torch.zeros(input_sz[0],kernel_size**3,input_sz[1]-kernel_size+1,input_sz[2]-kernel_size+1,input_sz[3]-kernel_size+1)
for i in range(_t):
ind1=i
ind2=i+kernel_size
out[:,:,i,:,:]=Im2Col(input_tensor[:,ind1:ind2,:,:], kernel_size, stride, padding, dilation, tensorized)
return out
def Col2Cube(input_tensor,output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
batch = input_tensor.shape[0]
_t = output_size[0] - kernel_size + 1
out = torch.zeros([batch,output_size[0],output_size[1],output_size[2]]).to(input_tensor.device)
me=torch.zeros_like(out).to(input_tensor.device)
for i in range(_t):
ind1 = i
ind2 = i + kernel_size
if input_tensorized:
temp_tensor = input_tensor[:,:,i,:,:].flatten(2,3)
out[:,ind1:ind2,:,:] += F.fold(temp_tensor, output_size=output_size[1:], kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
me[:,ind1:ind2,:,:] += F.fold(torch.ones_like(temp_tensor), output_size=output_size[1:], kernel_size=kernel_size,
padding=padding, stride=stride, dilation=dilation)
if avg:
# me[me==0]=1 # !!!!!!!
out = out / me
# me_ = F.conv_transpose2d(torch.ones_like(input_tensor),torch.ones(1,1,kernel_size,kernel_size))
return out
def Col2Im(input_tensor,output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
batch = input_tensor.shape[0]
if input_tensorized:
input_tensor = input_tensor.flatten(2,3)
out = F.fold(input_tensor, output_size=output_size, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
if avg:
me = F.fold(torch.ones_like(input_tensor), output_size=output_size, kernel_size=kernel_size, padding=padding, stride=stride,dilation=dilation)
# me[me==0]=1 # !!!!!!!
out = out / me
# me_ = F.conv_transpose2d(torch.ones_like(input_tensor),torch.ones(1,1,kernel_size,kernel_size))
return out
class Col2Im_(torch.nn.Module):
def __init__(self,input_shape, output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
super(Col2Im_,self).__init__()
xshape = tuple(input_shape)
if input_tensorized:
xshape = xshape[0:2]+(xshape[2]*xshape[3],)
if avg:
me = F.fold(torch.ones(xshape), output_size=output_size, kernel_size=kernel_size,
padding=padding, stride=stride, dilation=dilation)
me[me == 0] = 1
self.me = me
def forward(self, input_tensor,output_size, kernel_size, stride, padding, dilation=1, avg=False,input_tensorized=False):
if input_tensorized:
input_tensor = input_tensor.flatten(2, 3)
out = F.fold(input_tensor, output_size=output_size, kernel_size=kernel_size, padding=padding, stride=stride,
dilation=dilation)
if avg:
out /= self.me
return out
# def im2col_shape(size, kernel_size, stride, padding):
# ksize_h, ksize_w = _pair(kernel_size)
# stride_h, stride_w = _pair(stride)
# pad_h, pad_w = _pair(padding)
# n_input_plane, height, width = size
# height_col = (height + 2 * pad_h - ksize_h) // stride_h + 1
# width_col = (width + 2 * pad_w - ksize_w) // stride_w + 1
# return n_input_plane, ksize_h, ksize_w, height_col, width_col
def im2col_shape(size, kernel_size, stride, padding, dilation):
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
dil_h, dil_w = _pair(dilation)
pad_h, pad_w = _pair(padding)
n_input_plane, height, width = size
height_col = (height + 2 * pad_h - dil_h * (ksize_h-1)-1) / stride_h + 1
width_col = (width + 2 * pad_w - dil_w * (ksize_w-1)-1) / stride_w + 1
return n_input_plane, ksize_h, ksize_w, math.floor(height_col), math.floor(width_col)
def col2im_shape(size, kernel_size, stride, padding, input_size=None):
ksize_h, ksize_w = _pair(kernel_size)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
n_input_plane, ksize_h, ksize_w, height_col, width_col = size
if input_size is not None:
height, width = input_size
else:
height = (height_col - 1) * stride_h - 2 * pad_h + ksize_h
width = (width_col - 1) * stride_w - 2 * pad_w + ksize_w
return n_input_plane, height, width | 5,405 | 42.248 | 159 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import numpy as np
from torch.autograd import Variable
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
npa, npb = as_numpy(a), as_numpy(b)
self.assertTrue(
np.allclose(npa, npb, atol=atol),
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
)
| 835 | 26.866667 | 157 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
| 12,973 | 40.056962 | 116 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/comm.py | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,278 | 31.416667 | 117 | py |
SERT | SERT-master/models/competing_methods/sync_batchnorm/__init__.py | # -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .replicate import DataParallelWithCallback, patch_replication_callback
| 449 | 33.615385 | 96 | py |
SERT | SERT-master/models/competing_methods/qrnn/combinations.py | import torch
import torch.nn as nn
from torch.nn import functional
from models.competing_methods.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
BatchNorm3d = SynchronizedBatchNorm3d
class BNReLUConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=False))
class BNReLUDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, inplace=False):
super(BNReLUDeConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=False))
class BNReLUUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), inplace=False):
super(BNReLUUpsampleConv3d, self).__init__()
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(inplace=inplace))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
class UpsampleConv3d(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, upsample=None):
super(UpsampleConv3d, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample, mode='trilinear', align_corners=True)
self.conv3d = torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
#print(x.shape,self.upsample)
# x_in = torch.zeros((x.shape[0],x.shape[1],x.shape[2]*self.upsample[0],x.shape[3]*self.upsample[1],x.shape[4]*self.upsample[2])).cuda()
out = self.conv3d(x_in)
return out
class BasicConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('conv', nn.Conv3d(in_channels, channels, k, s, p, bias=bias))
class BasicDeConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, bias=False, bn=True):
super(BasicDeConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('deconv', nn.ConvTranspose3d(in_channels, channels, k, s, p, bias=bias))
class BasicUpsampleConv3d(nn.Sequential):
def __init__(self, in_channels, channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True):
super(BasicUpsampleConv3d, self).__init__()
if bn:
self.add_module('bn', BatchNorm3d(in_channels))
self.add_module('upsample_conv', UpsampleConv3d(in_channels, channels, k, s, p, bias=False, upsample=upsample))
| 3,464 | 42.3125 | 143 | py |
SERT | SERT-master/models/competing_methods/qrnn/resnet.py | import torch
import torch.nn as nn
import numpy as np
import os
if __name__ == '__main__':
from qrnn3d import *
else:
from .qrnn3d import *
class ResQRNN3D(nn.Module):
def __init__(self, in_channels, channels, n_resblocks):
super(ResQRNN3D, self).__init__()
bn = True
act = 'tanh'
# define head module
m_head = [BiQRNNConv3D(in_channels, channels, bn=bn, act=act)]
# define body module
m_body = [
ResBlock(
QRNNConv3D, channels, bn=bn, act=act
) for i in range(n_resblocks)
]
# define tail module
m_tail = [
BiQRNNConv3D(channels, in_channels, bn=bn, act='none')
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
class ResBlock(nn.Module):
def __init__(
self, block, channels, **kwargs):
super(ResBlock, self).__init__()
self.layer1 = block(channels, channels, **kwargs)
self.layer2 = block(channels, channels, **kwargs)
def forward(self, x, reverse=False):
res = self.layer1(x, reverse)
res = self.layer2(x, not reverse)
res += x
return res
| 1,415 | 23 | 70 | py |
SERT | SERT-master/models/competing_methods/qrnn/utils.py | import torch
import torch.nn as nn
class QRNNREDC3D(nn.Module):
def __init__(self, in_channels, channels, num_half_layer, sample_idx,
BiQRNNConv3D=None, BiQRNNDeConv3D=None,
QRNN3DEncoder=None, QRNN3DDecoder=None, is_2d=False, has_ad=True, bn=True, act='tanh', plain=False):
super(QRNNREDC3D, self).__init__()
assert sample_idx is None or isinstance(sample_idx, list)
self.enable_ad = has_ad
if sample_idx is None: sample_idx = []
if is_2d:
self.feature_extractor = BiQRNNConv3D(in_channels, channels, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
self.feature_extractor = BiQRNNConv3D(in_channels, channels, bn=bn, act=act)
self.encoder = QRNN3DEncoder(channels, num_half_layer, sample_idx, is_2d=is_2d, has_ad=has_ad, bn=bn, act=act, plain=plain)
self.decoder = QRNN3DDecoder(channels*(2**len(sample_idx)), num_half_layer, sample_idx, is_2d=is_2d, has_ad=has_ad, bn=bn, act=act, plain=plain)
if act == 'relu':
act = 'none'
if is_2d:
self.reconstructor = BiQRNNDeConv3D(channels, in_channels, bias=True, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
self.reconstructor = BiQRNNDeConv3D(channels, in_channels, bias=True, bn=bn, act=act)
def forward(self, x):
#x = x.unsqueeze(0)
xs = [x]
out = self.feature_extractor(xs[0])
xs.append(out)
if self.enable_ad:
out, reverse = self.encoder(out, xs, reverse=False)
out = self.decoder(out, xs, reverse=(reverse))
else:
out = self.encoder(out, xs)
out = self.decoder(out, xs)
out = out + xs.pop()
out = self.reconstructor(out)
out = out + xs.pop()
return out
class QRNN3DEncoder(nn.Module):
def __init__(self, channels, num_half_layer, sample_idx, QRNNConv3D=None,
is_2d=False, has_ad=True, bn=True, act='tanh', plain=False):
super(QRNN3DEncoder, self).__init__()
# Encoder
self.layers = nn.ModuleList()
self.enable_ad = has_ad
for i in range(num_half_layer):
if i not in sample_idx:
if is_2d:
encoder_layer = QRNNConv3D(channels, channels, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
encoder_layer = QRNNConv3D(channels, channels, bn=bn, act=act)
else:
if is_2d:
encoder_layer = QRNNConv3D(channels, 2*channels, k=(1,3,3), s=(1,2,2), p=(0,1,1), bn=bn, act=act)
else:
if not plain:
encoder_layer = QRNNConv3D(channels, 2*channels, k=3, s=(1,2,2), p=1, bn=bn, act=act)
else:
encoder_layer = QRNNConv3D(channels, 2*channels, k=3, s=(1,1,1), p=1, bn=bn, act=act)
channels *= 2
self.layers.append(encoder_layer)
def forward(self, x, xs, reverse=False):
if not self.enable_ad:
num_half_layer = len(self.layers)
for i in range(num_half_layer-1):
x = self.layers[i](x)
xs.append(x)
x = self.layers[-1](x)
return x
else:
num_half_layer = len(self.layers)
for i in range(num_half_layer-1):
x = self.layers[i](x, reverse=reverse)
reverse = not reverse
xs.append(x)
x = self.layers[-1](x, reverse=reverse)
reverse = not reverse
return x, reverse
class QRNN3DDecoder(nn.Module):
def __init__(self, channels, num_half_layer, sample_idx, QRNNDeConv3D=None, QRNNUpsampleConv3d=None,
is_2d=False, has_ad=True, bn=True, act='tanh', plain=False):
super(QRNN3DDecoder, self).__init__()
# Decoder
self.layers = nn.ModuleList()
self.enable_ad = has_ad
for i in reversed(range(num_half_layer)):
if i not in sample_idx:
if is_2d:
decoder_layer = QRNNDeConv3D(channels, channels, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
decoder_layer = QRNNDeConv3D(channels, channels, bn=bn, act=act)
else:
if is_2d:
decoder_layer = QRNNUpsampleConv3d(channels, channels//2, k=(1,3,3), s=1, p=(0,1,1), bn=bn, act=act)
else:
if not plain:
decoder_layer = QRNNUpsampleConv3d(channels, channels//2, bn=bn, act=act)
else:
decoder_layer = QRNNDeConv3D(channels, channels//2, bn=bn, act=act)
channels //= 2
self.layers.append(decoder_layer)
def forward(self, x, xs, reverse=False):
if not self.enable_ad:
num_half_layer = len(self.layers)
x = self.layers[0](x)
for i in range(1, num_half_layer):
x = x + xs.pop()
x = self.layers[i](x)
return x
else:
num_half_layer = len(self.layers)
x = self.layers[0](x, reverse=reverse)
reverse = not reverse
for i in range(1, num_half_layer):
x = x + xs.pop()
x = self.layers[i](x, reverse=reverse)
reverse = not reverse
return x
| 5,623 | 39.753623 | 152 | py |
SERT | SERT-master/models/competing_methods/qrnn/qrnn3d.py | import torch
import torch.nn as nn
import torch.nn.functional as FF
import numpy as np
from functools import partial
if __name__ == '__main__':
from combinations import *
from utils import *
else:
from .combinations import *
from .utils import *
"""F pooling"""
class QRNN3DLayer(nn.Module):
def __init__(self, in_channels, hidden_channels, conv_layer, act='tanh'):
super(QRNN3DLayer, self).__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
# quasi_conv_layer
self.conv = conv_layer
self.act = act
def _conv_step(self, inputs):
gates = self.conv(inputs)
Z, F = gates.split(split_size=self.hidden_channels, dim=1)
if self.act == 'tanh':
return Z.tanh(), F.sigmoid()
elif self.act == 'relu':
return Z.relu(), F.sigmoid()
elif self.act == 'none':
return Z, F.sigmoid
else:
raise NotImplementedError
def _rnn_step(self, z, f, h):
# uses 'f pooling' at each time step
h_ = (1 - f) * z if h is None else f * h + (1 - f) * z
return h_
def forward(self, inputs, reverse=False):
h = None
Z, F = self._conv_step(inputs)
h_time = []
if not reverse:
for time, (z, f) in enumerate(zip(Z.split(1, 2), F.split(1, 2))): # split along timestep
h = self._rnn_step(z, f, h)
h_time.append(h)
else:
for time, (z, f) in enumerate((zip(
reversed(Z.split(1, 2)), reversed(F.split(1, 2))
))): # split along timestep
h = self._rnn_step(z, f, h)
h_time.insert(0, h)
# return concatenated hidden states
return torch.cat(h_time, dim=2)
def extra_repr(self):
return 'act={}'.format(self.act)
class BiQRNN3DLayer(QRNN3DLayer):
def _conv_step(self, inputs):
gates = self.conv(inputs)
Z, F1, F2 = gates.split(split_size=self.hidden_channels, dim=1)
if self.act == 'tanh':
return Z.tanh(), F1.sigmoid(), F2.sigmoid()
elif self.act == 'relu':
return Z.relu(), F1.sigmoid(), F2.sigmoid()
elif self.act == 'none':
return Z, F1.sigmoid(), F2.sigmoid()
else:
raise NotImplementedError
def forward(self, inputs, fname=None):
h = None
Z, F1, F2 = self._conv_step(inputs)
hsl = [] ; hsr = []
zs = Z.split(1, 2)
for time, (z, f) in enumerate(zip(zs, F1.split(1, 2))): # split along timestep
h = self._rnn_step(z, f, h)
hsl.append(h)
h = None
for time, (z, f) in enumerate((zip(
reversed(zs), reversed(F2.split(1, 2))
))): # split along timestep
h = self._rnn_step(z, f, h)
hsr.insert(0, h)
# return concatenated hidden states
hsl = torch.cat(hsl, dim=2)
hsr = torch.cat(hsr, dim=2)
if fname is not None:
stats_dict = {'z':Z, 'fl':F1, 'fr':F2, 'hsl':hsl, 'hsr':hsr}
torch.save(stats_dict, fname)
return hsl + hsr
class BiQRNNConv3D(BiQRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bn=True, act='tanh'):
super(BiQRNNConv3D, self).__init__(
in_channels, hidden_channels, BasicConv3d(in_channels, hidden_channels*3, k, s, p, bn=bn), act=act)
class BiQRNNDeConv3D(BiQRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bias=False, bn=True, act='tanh'):
super(BiQRNNDeConv3D, self).__init__(
in_channels, hidden_channels, BasicDeConv3d(in_channels, hidden_channels*3, k, s, p, bias=bias, bn=bn), act=act)
class QRNNConv3D(QRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bn=True, act='tanh'):
super(QRNNConv3D, self).__init__(
in_channels, hidden_channels, BasicConv3d(in_channels, hidden_channels*2, k, s, p, bn=bn), act=act)
class QRNNDeConv3D(QRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, bn=True, act='tanh'):
super(QRNNDeConv3D, self).__init__(
in_channels, hidden_channels, BasicDeConv3d(in_channels, hidden_channels*2, k, s, p, bn=bn), act=act)
class QRNNUpsampleConv3d(QRNN3DLayer):
def __init__(self, in_channels, hidden_channels, k=3, s=1, p=1, upsample=(1,2,2), bn=True, act='tanh'):
super(QRNNUpsampleConv3d, self).__init__(
in_channels, hidden_channels, BasicUpsampleConv3d(in_channels, hidden_channels*2, k, s, p, upsample, bn=bn), act=act)
QRNN3DEncoder = partial(
QRNN3DEncoder,
QRNNConv3D=QRNNConv3D)
QRNN3DDecoder = partial(
QRNN3DDecoder,
QRNNDeConv3D=QRNNDeConv3D,
QRNNUpsampleConv3d=QRNNUpsampleConv3d)
QRNNREDC3D = partial(
QRNNREDC3D,
BiQRNNConv3D=BiQRNNConv3D,
BiQRNNDeConv3D=BiQRNNDeConv3D,
QRNN3DEncoder=QRNN3DEncoder,
QRNN3DDecoder=QRNN3DDecoder
)
| 5,125 | 32.503268 | 129 | py |
SERT | SERT-master/models/competing_methods/qrnn/redc3d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
if __name__ == '__main__':
from combinations import *
else:
from .combinations import *
class REDC3D(torch.nn.Module):
"""Residual Encoder-Decoder Convolution 3D
Args:
downsample: downsample times, None denotes no downsample"""
def __init__(self, in_channels, channels, num_half_layer, downsample=None):
super(REDC3D, self).__init__()
# Encoder
assert downsample is None or 0 < downsample <= num_half_layer
interval = num_half_layer // downsample if downsample else num_half_layer+1
self.feature_extractor = BNReLUConv3d(in_channels, channels)
self.encoder = nn.ModuleList()
for i in range(1, num_half_layer+1):
if i % interval:
encoder_layer = BNReLUConv3d(channels, channels)
else:
encoder_layer = BNReLUConv3d(channels, 2*channels, k=3, s=(1,2,2), p=1)
channels *= 2
self.encoder.append(encoder_layer)
# Decoder
self.decoder = nn.ModuleList()
for i in range(1,num_half_layer+1):
if i % interval:
decoder_layer = BNReLUDeConv3d(channels, channels)
else:
decoder_layer = BNReLUUpsampleConv3d(channels, channels//2)
channels //= 2
self.decoder.append(decoder_layer)
self.reconstructor = BNReLUDeConv3d(channels, in_channels)
def forward(self, x):
num_half_layer = len(self.encoder)
xs = [x]
out = self.feature_extractor(xs[0])
xs.append(out)
for i in range(num_half_layer-1):
out = self.encoder[i](out)
xs.append(out)
out = self.encoder[-1](out)
out = self.decoder[0](out)
for i in range(1, num_half_layer):
out = out + xs.pop()
out = self.decoder[i](out)
out = out + xs.pop()
out = self.reconstructor(out)
out = out + xs.pop()
return out
| 2,115 | 34.864407 | 87 | py |
SERT | SERT-master/models/competing_methods/qrnn/__init__.py | from .qrnn3d import QRNNREDC3D
from .redc3d import REDC3D
from .resnet import ResQRNN3D
| 88 | 21.25 | 30 | py |
SERT | SERT-master/models/competing_methods/T3SC/multilayer.py | import logging
import torch
import torch.nn as nn
from models.competing_methods.T3SC import layers
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class MultilayerModel(nn.Module):
def __init__(
self,
channels,
layers,
ssl=0,
n_ssl=0,
ckpt=None,
):
super().__init__()
self.channels = channels
self.layers_params = layers
self.ssl = ssl
self.n_ssl = n_ssl
logger.debug(f"ssl : {self.ssl}, n_ssl : {self.n_ssl}")
self.init_layers()
self.normalized_dict = False
logger.info(f"Using SSL : {self.ssl}")
self.ckpt = ckpt
if self.ckpt is not None:
logger.info(f"Loading ckpt {self.ckpt!r}")
d = torch.load(self.ckpt)
self.load_state_dict(d["state_dict"])
def init_layers(self):
list_layers = []
in_channels = self.channels
for i in range(len(self.layers_params)):
logger.debug(f"Initializing layer {i}")
name = self.layers_params[f"l{i}"]["name"]
params = self.layers_params[f"l{i}"]["params"]
layer_cls = layers.__dict__[name]
layer = layer_cls(
in_channels=in_channels,
**params,
)
in_channels = layer.code_size
list_layers.append(layer)
self.layers = nn.ModuleList(list_layers)
def forward(
self, x, mode=None, img_id=None, sigmas=None, ssl_idx=None, **kwargs
):
assert mode in ["encode", "decode", None], f"Mode {mode!r} unknown"
x = x.float().clone()
if mode in ["encode", None]:
x = self.encode(x, img_id, sigmas=sigmas, ssl_idx=ssl_idx)
if mode in ["decode", None]:
x = self.decode(x, img_id)
return x
def encode(self, x, img_id, sigmas, ssl_idx):
for layer in self.layers:
x = layer(
x,
mode="encode",
img_id=img_id,
sigmas=sigmas,
ssl_idx=ssl_idx,
)
return x
def decode(self, x, img_id):
for layer in self.layers[::-1]:
x = layer(x, mode="decode", img_id=img_id)
return x
| 2,291 | 25.964706 | 76 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/lowrank_sc_layer.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import math
import logging
from models.competing_methods.T3SC.layers.encoding_layer import EncodingLayer
from models.competing_methods.T3SC.layers.soft_thresholding import SoftThresholding
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LowRankSCLayer(EncodingLayer):
def __init__(
self,
patch_side,
stride,
K,
rank,
patch_centering,
lbda_init,
lbda_mode,
beta=0,
ssl=0,
**kwargs,
):
super().__init__(**kwargs)
assert self.in_channels is not None
assert self.code_size is not None
self.patch_side = patch_side
self.stride = stride
self.K = K
self.rank = rank
self.patch_centering = patch_centering
self.lbda_init = lbda_init
self.lbda_mode = lbda_mode
self.patch_size = self.in_channels * self.patch_side ** 2
self.spat_dim = self.patch_side ** 2
self.spec_dim = self.in_channels
self.beta = beta
self.ssl = ssl
# first is spectral, second is spatial
self.init_weights(
[
(self.code_size, self.spec_dim, self.rank),
(self.code_size, self.rank, self.spat_dim),
]
)
self.thresholds = SoftThresholding(
mode=self.lbda_mode,
lbda_init=self.lbda_init,
code_size=self.code_size,
K=self.K,
)
if self.patch_centering and self.patch_side == 1:
raise ValueError(
"Patch centering and 1x1 kernel will result in null patches"
)
if self.patch_centering:
ones = torch.ones(
self.in_channels, 1, self.patch_side, self.patch_side
)
self.ker_mean = (ones / self.patch_side ** 2).to(device)
self.ker_divider = torch.ones(
1, 1, self.patch_side, self.patch_side
).to(device)
self.divider = None
if self.beta:
self.beta_estimator = nn.Sequential(
# layer1
nn.Conv2d(
in_channels=1, out_channels=64, kernel_size=5, stride=2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
# layer2
nn.Conv2d(
in_channels=64, out_channels=128, kernel_size=3, stride=2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
# layer3
nn.Conv2d(
in_channels=128, out_channels=1, kernel_size=3, stride=1
),
nn.Sigmoid(),
)
def init_weights(self, shape):
for w in ["C", "D", "W"]:
setattr(self, w, self.init_param(shape))
def init_param(self, shape):
def init_tensor(shape):
tensor = torch.empty(*shape)
torch.nn.init.kaiming_uniform_(tensor, a=math.sqrt(5))
return tensor
if isinstance(shape, list):
return torch.nn.ParameterList([self.init_param(s) for s in shape])
return torch.nn.Parameter(init_tensor(shape))
def _encode(self, x, sigmas=None, ssl_idx=None, **kwargs):
self.shape_in = x.shape
bs, c, h, w = self.shape_in
if self.beta:
block = min(56, h)
c_w = (w - block) // 2
c_h = (h - block) // 2
to_estimate = x[:, :, c_h : c_h + block, c_w : c_w + block].view(
bs * c, 1, block, block
)
beta = 1 - self.beta_estimator(to_estimate)
# (bs * c, 1)
beta = beta.view(bs, c, 1, 1)
else:
beta = torch.ones((bs, c, 1, 1), device=x.device)
if self.ssl:
# discard error on bands we want to predict
with torch.no_grad():
mask = torch.ones_like(beta)
mask[:, ssl_idx.long()] = 0.0
beta = beta * mask
if self.beta or self.ssl:
# applying beta before or after centering is equivalent
x = x * beta
CT = (self.C[0] @ self.C[1]).view(
self.code_size,
self.in_channels,
self.patch_side,
self.patch_side,
)
if self.patch_centering:
A = F.conv2d(x, CT - CT.mean(dim=[2, 3], keepdim=True))
self.means = F.conv2d(x, self.ker_mean, groups=self.in_channels)
else:
A = F.conv2d(x, CT)
alpha = self.thresholds(A, 0)
D = (self.D[0] @ self.D[1]).view(
self.code_size,
self.in_channels,
self.patch_side,
self.patch_side,
)
for k in range(1, self.K):
D_alpha = F.conv_transpose2d(alpha, D)
D_alpha = D_alpha * beta
alpha = self.thresholds(A + alpha - F.conv2d(D_alpha, CT), k)
return alpha
def _decode(self, alpha, **kwargs):
W = ((self.W[0]) @ self.W[1]).view(
self.code_size,
self.in_channels,
self.patch_side,
self.patch_side,
)
x = F.conv_transpose2d(alpha, W)
if self.patch_centering:
x += F.conv_transpose2d(
self.means,
self.ker_mean * self.patch_side ** 2,
groups=self.in_channels,
)
if self.divider is None or self.divider.shape[-2:] != (x.shape[-2:]):
ones = torch.ones(
1, 1, alpha.shape[2], alpha.shape[3], device=alpha.device
).to(alpha.device)
self.divider = F.conv_transpose2d(ones, self.ker_divider)
x = x / self.divider
return x
| 5,915 | 29.65285 | 83 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/soft_thresholding.py | import torch
import torch.nn as nn
import torch.nn.functional as F
MODES = ["SG", "SC", "MG", "MC"]
class SoftThresholding(nn.Module):
def __init__(self, mode, lbda_init, code_size=None, K=None):
super().__init__()
assert mode in MODES, f"Mode {mode!r} not recognized"
self.mode = mode
if self.mode[1] == "C":
# 1 lambda per channel
lbda_shape = (1, code_size, 1, 1)
else:
# 1 lambda for all channels
lbda_shape = (1, 1, 1, 1)
if self.mode[0] == "M":
# 1 set of lambdas per unfolding
self.lbda = nn.ParameterList(
[
nn.Parameter(lbda_init * torch.ones(*lbda_shape))
for _ in range(K)
]
)
else:
# 1 set of lambdas for all unfoldings
self.lbda = nn.Parameter(lbda_init * torch.ones(*lbda_shape))
def forward(self, x, k=None):
if self.mode[0] == "M":
return self._forward(x, self.lbda[k])
else:
return self._forward(x, self.lbda)
def _forward(self, x, lbda):
return F.relu(x - lbda) - F.relu(-x - lbda)
| 1,204 | 27.690476 | 73 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/encoding_layer.py | import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class EncodingLayer(nn.Module):
def __init__(
self,
in_channels=None,
code_size=None,
input_centering=False,
**kwargs,
):
super().__init__()
self.in_channels = in_channels
self.code_size = code_size
self.input_centering = input_centering
def forward(self, x, mode=None, **kwargs):
assert mode in ["encode", "decode", None], f"Mode {mode!r} unknown"
if mode in ["encode", None]:
x = self.encode(x, **kwargs)
if mode in ["decode", None]:
x = self.decode(x, **kwargs)
return x
def encode(self, x, **kwargs):
if self.input_centering:
self.input_means = x.mean(dim=[2, 3], keepdim=True)
x -= self.input_means
x = self._encode(x, **kwargs)
return x
def decode(self, x, **kwargs):
x = self._decode(x, **kwargs)
if self.input_centering:
x += self.input_means
return x
def _encode(self, x, **kwargs):
raise NotImplementedError
def _decode(self, x, **kwargs):
raise NotImplementedError
| 1,251 | 22.622642 | 75 | py |
SERT | SERT-master/models/competing_methods/T3SC/layers/__init__.py | from .lowrank_sc_layer import LowRankSCLayer
from .encoding_layer import EncodingLayer
from .soft_thresholding import SoftThresholding
| 135 | 33 | 47 | py |
CamDiff | CamDiff-main/inpainting_diff.py | from diffusers import StableDiffusionInpaintPipeline
import torch
import os
# from einops import repeat
import numpy as np
import time
import argparse
from PIL import Image
import random
# from efficientnet_classification import EfficientnetPipeline
from clip_classification import ClipPipeline
WIDTH = 512
HEIGHT = 512
RATIO = 0.0625
RATIO_MIN = 0.0625
RATIO_MAX = 0.25
LENGTH_RATIO_MIN = 1/5
LENGTH_RATIO_MAX = 5
MASK_RATIO = 0.75
SHRINK = np.sqrt(MASK_RATIO)
PROB = 0.4
MASK_WIDTH = 128
MASK_HEIGHT = 128
def make_mask(mask, image):
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32)/255.0
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
image = np.array(image.convert("RGB"))
image = image.transpose(2,0,1)
# increase mask to box
coord = np.where(mask == 1)
xmin = min(coord[0])
xmax = max(coord[0])
ymin = min(coord[1])
ymax = max(coord[1])
new_image, new_mask, mask_ratio, coord, flag = choose_area(xmin, xmax, ymin, ymax, image)
if flag == 1:
new_image = Image.fromarray(new_image.astype(np.uint8).transpose(1, 2, 0))
mask_image = Image.fromarray(new_mask.astype(np.uint8)*255).convert("RGB")
else:
mask_image = 0
return new_image, mask_image, mask_ratio, coord, flag
def choose_area(xmin, xmax, ymin, ymax, image):
A = np.array([[0, 0], [xmin, ymin]])
B = np.array([[0, ymin], [xmin, ymax]])
C = np.array([[0, ymax], [xmin, WIDTH]])
D = np.array([[xmin, 0], [xmax, ymin]])
E = np.array([[xmin, ymax], [xmax, WIDTH]])
F = np.array([[xmax, 0], [HEIGHT, ymin]])
G = np.array([[xmax, ymin], [HEIGHT, ymax]])
H = np.array([[xmax, ymax], [HEIGHT, WIDTH]])
candidates = [A, B, C, D, E, F, G, H]
random.shuffle(candidates)
flag = 0
for i in candidates:
mask_ratio = (i[1, 0] - i[0, 0]) * (i[1, 1] - i[0, 1]) / (WIDTH * HEIGHT)
if mask_ratio > RATIO_MIN: # avoid mask ratio is zero
# Mask is a square, because DM's input size is 512 x 512
if ((i[1, 0] - i[0, 0]) < (i[1, 1] - i[0, 1])):
i[1, 1] = i[0, 1] + (i[1, 0] - i[0, 0])
else:
i[1, 0] = i[0, 0] + (i[1, 1] - i[0, 1])
if mask_ratio > RATIO_MAX: # avoid mask ratio is too big
shrink = np.sqrt(RATIO_MAX / mask_ratio)
x_mid = int((i[1, 0] + i[0, 0]) / 2)
y_mid = int((i[1, 1] + i[0, 1]) / 2)
dx = int((i[1, 0] - i[0, 0]) * shrink)
dy = int((i[1, 1] - i[0, 1]) * shrink)
d = min(dx, dy)
i[0, 0] = int(x_mid - dx / 2)
i[1, 0] = int(x_mid + dx / 2)
i[0, 1] = int(y_mid - dy / 2)
i[1, 1] = int(y_mid + dy / 2)
# new_mask[i[0, 0]:i[1, 0], i[0, 1]:i[1, 1]] = 1
new_image = image[:, i[0, 0]:i[1, 0], i[0, 1]:i[1, 1]]
flag += 1
break
if flag == 1:
new_mask = np.zeros((new_image.shape[1], new_image.shape[2]))
x_mid_mask = int(new_image.shape[1] / 2)
y_mid_mask = int(new_image.shape[2] / 2)
dx_half_mask = int(new_image.shape[1] * SHRINK / 2)
dy_half_mask = int(new_image.shape[2] * SHRINK / 2)
new_mask[(x_mid_mask-dx_half_mask) : (x_mid_mask+dx_half_mask), (y_mid_mask-dy_half_mask):(y_mid_mask+dy_half_mask)] = 1
mask_ratio = (i[1, 0] - i[0, 0]) * (i[1, 1] - i[0, 1]) / (WIDTH * HEIGHT) * MASK_RATIO
else:
new_mask = 0
new_image = 0
mask_ratio = 0
i = 0
return new_image, new_mask, mask_ratio, i, flag
def crop_object(image, mask):
image = np.array(image.convert("RGB"))
image = image.transpose(2,0,1)
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32)/255.0
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# get box info
coord = np.where(mask == 1)
xmin = min(coord[0])
xmax = max(coord[0])
ymin = min(coord[1])
ymax = max(coord[1])
# dimension = RGB image
mask = mask[None]
mask_image = image * (mask > 0.5)
mask_image = Image.fromarray(mask_image[:, xmin:xmax, ymin:ymax].transpose(1, 2, 0))
## Save mask
# mask_image = image * (mask < 0.5)
# mask_image = Image.fromarray(mask_image.transpose(1, 2, 0))
return mask_image
def num_bad_img(images, mask_image, prompt, org_w , org_h, coord, org_image):
del_idx = []
left_images = []
for idx, image in enumerate(images):
test_object = crop_object(image, mask_image)
label, prob = classifier.forward(test_object)
# avoid many types of fish
if "Fish" in label or "fish" in label:
label = "Fish"
if "Frogmouth" in label:
label = "Bird"
# insert the sampled image into the original image
image = image.resize((org_w, org_h))
image = np.array(image.convert("RGB"))
image = image.transpose(2,0,1)
new_image = org_image.copy()
new_image = np.array(new_image.convert("RGB"))
new_image = new_image.transpose(2,0,1)
new_image[:, coord[0, 0]:coord[1, 0], coord[0,1]:coord[1,1]] = image
new_image = Image.fromarray(new_image.transpose(1, 2, 0))
# new_image.save("./image.jpg")
# breakpoint()
if label not in prompt or prob < PROB:
del_idx.append(idx)
else:
left_images.append(new_image)
return len(del_idx), left_images
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--indir",
type=str,
default="./Dataset",
nargs="?",
help="dir containing image-mask pairs (`example.png` and `example_mask.png`)",
)
parser.add_argument(
"--outdir",
type=str,
default="./result",
nargs="?",
help="dir to write results to",
)
parser.add_argument(
"--steps",
type=int,
default=50,
help="number of ddim sampling steps",
)
parser.add_argument(
"-d",
"--device",
default="cuda",
help="computation device to use",
choices=["cpu", "cuda"]
)
opt = parser.parse_args()
data_root = os.path.join(opt.indir, "Imgs")
mask_root = os.path.join(opt.indir, "GT")
images = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
masks = [os.path.join(mask_root, os.path.splitext(os.path.split(file_path)[-1])[0] + '.png') for file_path in images]
print(f"Found {len(masks)} inputs.")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
revision="fp16",
torch_dtype=torch.float16,
).to(opt.device)
print("Pretrained model is loaded")
classifier = ClipPipeline(data_root, opt.device)
print("-------------Begin inpainting-------------")
start = time.time()
os.makedirs(opt.outdir, exist_ok=True)
for image_path, mask_path in zip(images, masks):
print(f"Image file: {image_path}")
# breakpoint()
outpath = os.path.join(opt.outdir, os.path.split(image_path)[1])
if len(os.path.split(outpath)[1].split("-")) == 1:
# camo, chameleon, nc4k
prompt = "a " + random.choice(classifier.labels)
else:
prompt = "a " + os.path.split(outpath)[1].split("-")[-2]
print("Prompt: " + prompt)
# avoid many types of fish
if "Fish" in prompt or "fish" in prompt:
prompt = "a Fish"
if "Frogmouth" in prompt:
prompt = "a Bird"
#image and mask_image should be PIL images.
#The mask structure is white for inpainting and black for keeping as is
image = Image.open(image_path)
mask = Image.open(mask_path)
image = image.resize((WIDTH, HEIGHT))
mask= mask.resize((WIDTH, HEIGHT))
print(f"resized to ({WIDTH}, {HEIGHT})")
# Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
# usually at the expense of lower image quality.
num_samples = 1
guidance_scale= 7.5
seed = 0
for i in range(num_samples):
if len(os.path.split(outpath)[1].split("-")) == 1:
# camo, chameleon, nc4k
prompt = "a " + random.choice(classifier.labels)
seed = random.randint(seed + 1, seed + 10)
# mask position is randomly generated
new_image, mask_image, mask_ratio, coord, flag = make_mask(mask, image)
print(f"mask ratio is {mask_ratio}")
if flag == 0:
print("Remask")
continue
org_w , org_h = mask_image.size
new_image = new_image.resize((WIDTH, HEIGHT))
mask_image= mask_image.resize((WIDTH, HEIGHT))
generator = torch.Generator(device="cuda").manual_seed(seed) # change the seed to get different results
images = pipe(prompt=prompt,
image=new_image,
mask_image=mask_image,
guidance_scale=guidance_scale,
generator=generator,
num_images_per_prompt=1,
).images
num_resamples, images = num_bad_img(images, mask_image, prompt, org_w , org_h, coord, image)
# avoid no break in while loop
count = 0
while (len(images) < 1) & (count < 10):
print(f"Resample {num_resamples} images")
new_image, mask_image, mask_ratio, coord, flag = make_mask(mask, image)
print(f"mask ratio is {mask_ratio}")
if flag == 0:
print("Remask")
continue
org_w , org_h = mask_image.size
new_image = new_image.resize((WIDTH, HEIGHT))
mask_image= mask_image.resize((WIDTH, HEIGHT))
generator = torch.Generator(device="cuda").manual_seed(random.randint(seed + 1, seed + 10))
resample_images = pipe(prompt=prompt,
image=new_image,
mask_image=mask_image,
guidance_scale=guidance_scale,
generator=generator,
num_images_per_prompt=num_resamples,
).images
num_resamples, left_images = num_bad_img(resample_images, mask_image, prompt, org_w , org_h, coord, image)
for img in left_images:
images.append(img)
count += 1
if num_resamples != 1:
subpath = os.path.join(os.path.splitext(outpath)[0] + "-" + str(i) + os.path.splitext(outpath)[1])
images[0].save(subpath)
end = time.time()
print(f"Total time: {end - start}")
| 11,126 | 35.009709 | 128 | py |
CamDiff | CamDiff-main/paper.py | import PIL
import os
from PIL import Image
def image_grid(imgs, rows, cols):
assert len(imgs) == rows*cols
w, h = imgs[0].size
grid = PIL.Image.new('RGB', size=(cols*w, rows*h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i%cols*w, i//cols*h))
return grid
root = "COD10K-CAM-3-Flying-53-Bird-3016.jpg"
test3 = os.path.join("/cluster/work/cvl/denfan/Train/out/test3", root)
test4 = os.path.join("/cluster/work/cvl/denfan/Train/out/test4", root)
num_samples = 5
images = []
images_no = []
for i in range(num_samples):
img = Image.open(os.path.splitext(test3)[0] + "-" + str(i+1) + os.path.splitext(test3)[1])
img_no = Image.open(os.path.splitext(test4)[0] + "-" + str(i+1) + os.path.splitext(test4)[1])
images.append(img)
images_no.append(img_no)
grid = image_grid(images, 1, num_samples)
grid_no = image_grid(images_no, 1, num_samples)
grid.save("./result/grid_bird.png")
grid_no.save("./result/grid_no_bird.png") | 1,004 | 31.419355 | 97 | py |
CamDiff | CamDiff-main/clip_classification.py | import os
import clip
import torch
import numpy as np
def get_label_list(input_dir):
images = [os.path.join(input_dir, file_path) for file_path in os.listdir(input_dir)]
label_list = []
for image in images:
if len(os.path.split(image)[1].split("-")) == 1:
continue
else:
label = os.path.split(image)[1].split("-")[-2]
if label not in label_list:
label_list.append(label)
return label_list
class ClipPipeline():
def __init__(self, input_dir, device) -> None:
self.device = device
self.model, self.preprocess = clip.load("ViT-B/32", device=device)
self.labels = get_label_list(input_dir)
# self.labels = ["Fish", "Rabbit", "Butterfly", "Bird", "Cat", "Dog", "Duck", "Bee", "Owl", "Frog"]
def forward(self, image):
img = self.preprocess(image).unsqueeze(0).to(self.device)
# labels = get_label_list(input_dir)
txt = clip.tokenize(self.labels).to(self.device)
with torch.no_grad():
image_features = self.model.encode_image(img)
text_features = self.model.encode_text(txt)
logits_per_image, logits_per_text = self.model(img, txt)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
idx = np.argmax(probs)
print(f"Predicted label {self.labels[idx]} has the probality of {probs[0][idx]*100}%")
label = self.labels[idx]
prob = probs[0][idx]
return label, prob
| 1,529 | 32.26087 | 107 | py |
EBM-HEP | EBM-HEP-main/mcmc.py | import torch
def energy_wrapper(nenergy):
'''
Wrapper to facilitate flexible energy function sign
'''
energy = - nenergy
return energy
# Partially based on code from Yilun Du, Improved Contrastive Divergence Training of Energy Based Models.
# https://github.com/yilundu/improved_contrastive_divergence
def hamiltonian(x, v, model):
energy = 0.5 * torch.pow(v, 2).sum(dim=1) + energy_wrapper(model.forward(x).squeeze())
return energy
def leapfrog_step(x, v, model, step_size, num_steps, sample=False, mh=True):
x0 = x
v0 = v
x.requires_grad_(requires_grad=True)
energy = energy_wrapper(model.forward(x))
x_grad = torch.autograd.grad([energy.sum()], [x])[0]
v = v - 0.5 * step_size * x_grad
x_negs = []
for i in range(num_steps):
x.requires_grad_(requires_grad=True)
energy = energy_wrapper(model.forward(x))
if i == num_steps - 1:
x_grad = torch.autograd.grad([energy.sum()], [x], create_graph=True)[0]
v = v - step_size * x_grad
x = x + step_size * v
v = v.detach()
else:
x_grad = torch.autograd.grad([energy.sum()], [x])[0]
v = v - step_size * x_grad
x = x + step_size * v
x = x.detach()
v = v.detach()
if sample:
x_negs.append(x)
if i % 10 == 0:
print(i, hamiltonian(torch.sigmoid(x), v, model).mean(), torch.abs(v).mean(), torch.abs(x_grad).mean())
if mh:
accept = MH_accept(model, x0, x)
x = accept * x + (1 - accept) * x0
v = accept * v + (1 - accept) * v0
x_grad = accept * x_grad
if sample:
return x, torch.stack(x_negs, dim=0), v, x_grad
else:
return x, v, x_grad
def gen_hmc_samples(model, x_neg, num_steps, step_size, sample=False, mh=True):
v = 0.001 * torch.randn_like(x_neg)
if sample:
x_neg, x_negs, v, x_grad = leapfrog_step(x_neg, v, model, step_size, num_steps, sample=sample, mh=mh)
return x_neg, x_negs, x_grad, v
else:
x_neg, v, x_grad = leapfrog_step(x_neg, v, model, step_size, num_steps, sample=sample, mh=mh)
return x_neg, x_grad, v
####
def MH_accept(model, x0, x1):
'''
Add a Metropolis-Hastings step after HMC to move around the energy landscape
'''
energy0 = energy_wrapper(model.forward(x0))
energy1 = energy_wrapper(model.forward(x1))
likelihood_ratio = torch.exp(-energy1 + energy0)
u = torch.rand_like(likelihood_ratio)
accept = ((u - likelihood_ratio) < 0).float()
return accept | 2,631 | 31.493827 | 115 | py |
EBM-HEP | EBM-HEP-main/ebm_models.py |
import copy
import math
import torch
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.data import Dataset
import torch.optim as optim
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class MLPJet(nn.Module):
def __init__(self, input_dim=80, **kwargs):
super().__init__()
self.mlp = nn.Sequential(
spectral_norm(nn.Linear(input_dim, 512)),
nn.ReLU(),
spectral_norm(nn.Linear(512, 512)),
nn.ReLU(),
spectral_norm(nn.Linear(512, 128)),
nn.ReLU(),
spectral_norm(nn.Linear(128, 64)),
nn.ReLU(),
spectral_norm(nn.Linear(64, 1))
)
def forward(self, x):
x = self.mlp(x)
return x
class Embedder(nn.Module):
def __init__(self, d_in, d_model):
super().__init__()
self.embed = nn.Linear(d_in, d_model)
def forward(self, x):
return self.embed(x)
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
#mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // num_heads
self.h = num_heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out_linear = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
batch_size = q.size(0)
k = self.k_linear(k).view(batch_size, -1, self.h, self.d_k)
q = self.q_linear(q).view(batch_size, -1, self.h, self.d_k)
v = self.v_linear(v).view(batch_size, -1, self.h, self.d_k)
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
v_out = attention(q, k, v, self.d_k, mask, self.dropout)
v_out = v_out.transpose(1,2).contiguous().view(batch_size, -1, self.d_model)
output = self.out_linear(v_out)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=1024, dropout = 0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.act = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.act(self.linear_1(x))
x = self.dropout(x)
x = self.linear_2(x)
return x
class EncoderLayer(nn.Module):
def __init__(self, d_model, num_heads, dff, dropout=0.1):
super().__init__()
self.norm_1 = nn.LayerNorm(d_model)
self.norm_2 = nn.LayerNorm(d_model)
self.attn = MultiHeadAttention(num_heads, d_model)
self.ff = FeedForward(d_model, dff, dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x0 = x
#x = self.norm_1(x)
x = self.attn(x,x,x,mask)
x = x0 + self.dropout_1(x)
x0 = x
#x = self.norm_2(x)
x = self.ff(x)
x = x0 + self.dropout_2(x)
return x
class Encoder(nn.Module):
def __init__(self, num_layers, d_model, num_heads, dff, dropout):
super().__init__()
self.num_layers = num_layers
self.embedding = Embedder(3, d_model)
self.layers = nn.ModuleList([EncoderLayer(d_model, num_heads, dff, dropout) for _ in range(num_layers)])
self.norm = nn.LayerNorm(d_model)
def forward(self, x, mask):
x = self.embedding(x)
for i in range(self.num_layers):
x = self.layers[i](x, mask)
#x = self.norm(x)
return x
class Transformer(nn.Module):
def __init__(self, num_layers=3, d_model=128, num_heads=8, dff=256, rate=0.1, n_output=1):
super().__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff, rate)
self.mlp = nn.Sequential(
nn.Linear(d_model, 500),
Swish(),
nn.Linear(500, 500),
Swish(),
nn.Linear(500, n_output)
)
def _create_padding_mask(self, seq):
seq = torch.sum(seq, 2)
seq = torch.eq(seq, 0)
#seq = tf.cast(torch.eq(seq, 0), tf.float32)
seq = torch.unsqueeze(seq, 1)
seq = torch.unsqueeze(seq, 1)
return seq # (batch_size, 1, 1, seq_len)
def forward(self, x, mask=None):
x = x.view(x.shape[0], -1, 3)
if mask is None:
mask = self._create_padding_mask(x)
e_outputs = self.encoder(x, mask)
e_outputs = torch.sum(e_outputs, 1)
output = self.mlp(e_outputs)
return output
'''
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
#mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
'''
| 6,173 | 29.564356 | 112 | py |
EBM-HEP | EBM-HEP-main/utils.py |
import os
from pathlib import Path
import random
import h5py
import numpy as np
from numpy import inf
import torch
import torch.nn.functional as F
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar
import uproot_methods
def jet_from_ptetaphi(X, scaled=False):
from sklearn.preprocessing import RobustScaler
def load_attn_train(n_train=None, input_dim=160, scale=False):
f = h5py.File(os.environ["VAE_DIR"] +"qcd_preprocessed.h5", "r")
qcd_train = f["constituents" if "constituents" in f.keys() else "table"]
if n_train:
qcd_train = qcd_train[:n_train, :input_dim]
else:
qcd_train = qcd_train[:, :input_dim]
X = qcd_train
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
scaler = RobustScaler().fit(X)
X = scaler.transform(X)
else:
scaler = None
return X, scaler
if scaled:
input_dim = X.shape[1] // 3 * 4
_, scaler = load_attn_train(n_train=10000, input_dim=input_dim, scale=True)
X = scaler.inverse_transform(X)
X = np.reshape(X, (len(X), -1, 3))
log_pt = X[:,:,0]
eta = X[:,:,1]
phi = X[:,:,2]
pt = np.exp(log_pt)
m = np.zeros_like(pt)
p4 = uproot_methods.TLorentzVectorArray.from_ptetaphim(pt, eta, phi, m)
e = p4.energy
px = p4.x
py = p4.y
pz = p4.z
e = e.reshape(len(e), -1, 1)
px = px.reshape(len(px), -1, 1)
py = py.reshape(len(py), -1, 1)
pz = pz.reshape(len(pz), -1, 1)
X = np.concatenate((e, px, py, pz), -1)
X = X.reshape(len(X), -1)
return X
def jet_e(jet):
E_j = 0.0
Px_j = 0.0
Py_j = 0.0
Pz_j = 0.0
jet = np.reshape(jet, (-1, 4))
E_j, _, _, _ = np.sum(jet, axis=0)
return E_j
def jet_mass(jet):
E_j=0
Px_j=0
Py_j=0
Pz_j=0
jet = np.reshape(jet, (-1, 4))
E_j, Px_j, Py_j, Pz_j = np.sum(jet, axis=0)
if E_j**2 > (Px_j**2 + Py_j**2 + Pz_j**2):
m = np.sqrt(E_j**2 - (Px_j**2 + Py_j**2 + Pz_j**2))
else:
m = 0
return m
def jet_pt(jet):
Px_j=0
Py_j=0
jet = np.reshape(jet, (-1, 4))
n_consti = len(jet)
for i in range(n_consti):
Px_j += jet[i, 1]
Py_j += jet[i ,2]
pt = np.sqrt(Px_j**2 + Py_j**2)
return pt
def jet_girth(jet): ##### to be modified
jet = copy.deepcopy(jet)
eta_j=jet["eta"] # just using pseudo-rapidity here
phi_j=jet["phi"]
pt_j=jet["pt"]
m_j=jet["mass"]
j=LorentzVector()
j.set_pt_eta_phi_m(pt_j, eta_j, phi_j, m_j)
rap_j = j.Rapidity() # jet rapidity here
constituents = jet["content"][jet["tree"][:, 0] == -1]
g = 0
for i in range(len(constituents)):
v = LorentzVector(constituents[i])
e=v.E()
pz=v.Pz()
pt=v.Pt()
eta = 0.5 * (np.log(e + pz) - np.log(e - pz)) # using rapidity here
phi=v.phi()
delta_eta=eta-rap_j
delta_phi=phi-phi_j
if (delta_phi)>np.pi:
delta_phi -= 2*np.pi
elif (delta_phi)<-np.pi:
delta_phi += 2*np.pi
dr=np.sqrt(delta_eta**2 + delta_phi**2)
g += pt * dr
g /= pt_j
return g
def plot_jet_image(jets, ax, cmap="Blues"):
'''
Inputs: [n, l]
n: number of jets
l: four-vectors of jet constituents
Four-vectors: (E, Px, Py, Pz)
Outputs: average jet images on (eta, phi) plane
'''
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
#plt.rcParams["figure.figsize"] = (6,6)
a=[]
for i in range(len(jets)):
constituents=jets[i].reshape(-1,4)
jet=constituents.sum(axis=0)
#v=LorentzVector(jet[1], jet[2], jet[3], jet[0])
#pt_j=v.Pt()
pt_j=np.sqrt(jet[1]**2+jet[2]**2)
for c in constituents:
if c[0]<1e-10:
continue
eta=0.5*np.log((c[0]+c[3])/(c[0]-c[3]))
phi=np.arctan2(c[2], c[1])
pt=np.sqrt(c[1]**2+c[2]**2)
#v=LorentzVector(c[1], c[2], c[3], c[0])
#a.append(np.array([v.eta(), v.phi(), v.Pt()/pt_j]))
a.append(np.array([eta, phi, pt/pt_j]))
a=np.vstack(a)
ax.hist2d(a[:, 0], a[:, 1], range=[(-1.0, 1.0), (-1.0,1.0)],
weights=a[:, 2],
bins=50, cmap=cmap, norm=LogNorm())
ax.set_xlabel(r"$\eta$")
ax.set_ylabel(r"$\phi$")
def calc_js_div(real, gen, plot_range=[200, 1000]):
prob_gen = torch.histc(gen, bins=50, min=plot_range[0], max=plot_range[1])
prob_gen = prob_gen / prob_gen.sum()
prob_real = torch.histc(real, bins=50, min=plot_range[0], max=plot_range[1])
prob_real = prob_real / prob_real.sum()
prob_mean = (prob_real + prob_gen) / 2.0
js_div = (F.kl_div(torch.log(prob_mean), prob_real) + F.kl_div(torch.log(prob_mean), prob_gen)) / 2.0
return js_div
class LitProgressBar(TQDMProgressBar):
def on_train_epoch_start(self, trainer, pl_module):
if trainer.current_epoch:
print()
super().on_train_epoch_start(trainer, pl_module)
def get_metrics(self, trainer, pl_module, **kwargs):
# don't show the version number
items = super().get_metrics(trainer, pl_module)
items.pop("v_num", None)
return items
class PeriodicCheckpoint(ModelCheckpoint):
def __init__(self, interval, **kwargs):
super().__init__()
self.interval = interval
def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):
if pl_module.global_step % self.interval == 0:
assert self.dirpath is not None
#current = Path(self.dirpath) / f"{pl_module.global_step // self.interval}-{pl_module.global_step}.ckpt"
current = Path(self.dirpath) / f"e{pl_module.global_step // self.interval}.ckpt"
prev = Path(self.dirpath) / f"{pl_module.global_step - self.interval}.ckpt"
trainer.save_checkpoint(current)
#prev.unlink() | 6,933 | 27.652893 | 116 | py |
EBM-HEP | EBM-HEP-main/ebm_preamble.py | #__all__ = ['utils', 'load_data', 'ebm_models']
import os
import json
import math
import numpy as np
from math import inf
import h5py
import random
import copy
import time, argparse
import timeit
import datetime
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import torch
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.data import Dataset
import torch.optim as optim
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import seaborn as sns
import uproot_methods
from utils import jet_e, jet_pt, jet_mass, jet_from_ptetaphi, plot_jet_image
from utils import calc_js_div
from utils import LitProgressBar, PeriodicCheckpoint
from load_data import *
from ebm_models import Transformer, MLPJet
from mcmc import gen_hmc_samples
CHECKPOINT_PATH = "./tmp"
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
| 1,444 | 27.333333 | 93 | py |
EBM-HEP | EBM-HEP-main/ebm_jet_attn.py | #!/usr/bin/env python
from ebm_preamble import *
FLAGS = {
'max_len': 10000,
'new_sample_rate': 0.05,
'singlestep': False, # for KL improved training, only back-prop through the last LD step
'MH': True, # Metropolis-Hastings step for HMC
'val_steps': 128,
'scaled': False # Input feature scaling
}
def random_sample(n_sample, n_consti):
if FLAGS['scaled']:
rand_logpt = torch.normal(0.0, 1.0, (n_sample, n_consti, 1))
rand_eta = torch.normal(0.0, 1.0, (n_sample, n_consti, 1))
rand_phi = torch.normal(0.0, 1.0, (n_sample, n_consti, 1))
else:
rand_logpt = torch.normal(2.0, 1.0, (n_sample, n_consti, 1))
rand_eta = torch.normal(0.0, 0.1, (n_sample, n_consti, 1))
rand_phi = torch.normal(0.0, 0.2, (n_sample, n_consti, 1))
rand_jets = torch.cat([rand_logpt, rand_eta, rand_phi], dim=-1)
rand_jets = rand_jets.view(n_sample, n_consti*3)
return rand_jets
class Sampler:
def __init__(self, model, jet_shape, sample_size, max_len=FLAGS['max_len'], kl=False, hmc=False, epsilon=0.005, return_grad=False):
super().__init__()
self.model = model
self.jet_shape = jet_shape
self.sample_size = sample_size
self.max_len = max_len
self.kl = kl
self.hmc = hmc
self.epsilon = epsilon
self.return_grad = return_grad
self.examples = [random_sample(1, jet_shape[0] // 3) for _ in range(sample_size)]
def sample_new_exmps(self, steps=60, step_size=10):
n_new = np.random.binomial(self.sample_size, FLAGS['new_sample_rate'])
n_consti = self.jet_shape[0] // 3
rand_jets = random_sample(n_new, n_consti)
old_jets = torch.cat(random.choices(self.examples, k=self.sample_size-n_new), dim=0)
inp_jets = torch.cat([rand_jets, old_jets], dim=0).detach().to(device)
if self.hmc:
inp_jets, x_grad, v = Sampler.generate_samples(self.model, inp_jets, steps=steps, step_size=step_size, hmc=True)
self.examples = list(inp_jets.to(torch.device("cpu")).chunk(self.sample_size, dim=0)) + self.examples
self.examples = self.examples[:self.max_len]
return inp_jets, x_grad, v
else:
inp_jets, inp_jets_kl, grad_norm = Sampler.generate_samples(self.model, inp_jets, steps=steps, step_size=step_size, kl=self.kl, epsilon=self.epsilon, return_grad=self.return_grad)
self.examples = list(inp_jets.to(torch.device("cpu")).chunk(self.sample_size, dim=0)) + self.examples
self.examples = self.examples[:self.max_len]
return inp_jets, inp_jets_kl, grad_norm
@staticmethod
def generate_samples(model, inp_jets, steps=60, step_size=10, return_jet_per_step=False, return_grad=False, kl=False, hmc=False, epsilon=0.005):
if hmc:
if return_jet_per_step:
im_neg, im_samples, x_grad, v = gen_hmc_samples(model, inp_jets, steps, step_size, sample=True, mh=FLAGS['MH'])
return im_samples, v
else:
im_neg, x_grad, v = gen_hmc_samples(model, inp_jets, steps, step_size, sample=False, mh=FLAGS['MH'])
return im_neg, x_grad, v
else:
is_training = model.training
model.eval()
for p in model.parameters():
p.requires_grad = False
had_gradients_enabled = torch.is_grad_enabled()
torch.set_grad_enabled(True)
inp_jets.requires_grad = True
noise = torch.randn(inp_jets.shape, device=inp_jets.device)
grad_norm = 0.0
jets_per_step = []
for i in range(steps):
if i == steps - 1:
inp_jets_orig = inp_jets
noise.normal_(0, epsilon)
inp_jets.data.add_(noise.data)
out_jets = - model.forward(inp_jets.float())
if kl and not FLAGS['singlestep']:
x_grad = torch.autograd.grad([out_jets.sum()], [inp_jets], create_graph=True)[0]
else:
x_grad = torch.autograd.grad([out_jets.sum()], [inp_jets])[0]
inp_jets = inp_jets - step_size * x_grad
grad_norm += x_grad.norm(dim=1)
if return_jet_per_step:
jets_per_step.append(inp_jets.clone().detach())
if i == steps - 1:
if kl:
inp_jets_kl = inp_jets_orig
energy = - model.forward(inp_jets_kl)
x_grad = torch.autograd.grad([energy.sum()], [inp_jets_kl], create_graph=True)[0]
inp_jets_kl = inp_jets_kl - step_size * x_grad
else:
inp_jets_kl = torch.zeros_like(inp_jets)
inp_jets = inp_jets.detach()
for p in model.parameters():
p.requires_grad = True
model.train(is_training)
torch.set_grad_enabled(had_gradients_enabled)
if return_grad:
grad_norm = grad_norm / steps
else:
grad_norm = 0.0
if return_jet_per_step:
return torch.stack(jets_per_step, dim=0), grad_norm
else:
return inp_jets, inp_jets_kl, grad_norm
class DeepEnergyModel(pl.LightningModule):
def __init__(self, jet_shape, batch_size, steps=60, step_size=10, kl=False, repel=False, hmc=False, epsilon=0.005, alpha=0.1, lr=1e-4, beta1=0.0, **net_args):
super().__init__()
self.save_hyperparameters()
self.jet_shape = jet_shape
self.batch_size = batch_size
self.hmc = hmc
self.epsilon = epsilon
self.net = Transformer(**net_args)
self.sampler = Sampler(self.net, jet_shape=jet_shape, sample_size=batch_size, kl=kl, hmc=hmc, epsilon=epsilon, return_grad=True)
def forward(self, x):
z = self.net(x)
return z
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.lr, betas=(self.hparams.beta1, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.98)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
self.train()
real_jets = batch
small_noise = torch.randn_like(real_jets) * self.epsilon
real_jets = real_jets + small_noise
if self.hparams.hmc:
fake_jets, x_grad, v = self.sampler.sample_new_exmps(steps=self.hparams.steps, step_size=self.hparams.step_size)
else:
fake_jets, fake_jets_kl, v = self.sampler.sample_new_exmps(steps=self.hparams.steps, step_size=self.hparams.step_size)
inp_jets = torch.cat([real_jets, fake_jets], dim=0)
real_out, fake_out = self.net(inp_jets.float()).chunk(2, dim=0)
reg_loss = self.hparams.alpha * (real_out ** 2 + fake_out ** 2).mean()
cdiv_loss = fake_out.mean() - real_out.mean()
loss = reg_loss + cdiv_loss
if self.hparams.hmc:
v_flat = v.view(v.size(0), -1)
x_grad_flat = x_grad.view(x_grad.size(0), -1)
dot_product = F.normalize(v_flat, dim=1) * F.normalize(x_grad_flat, dim=1)
loss_hmc = torch.abs(dot_product.sum(dim=1)).mean()
loss = loss + 0.1 * loss_hmc
v = v.norm(dim=1)
else:
loss_hmc = torch.zeros(1)
if self.hparams.kl:
self.net.requires_grad_(False)
loss_kl = - self.net.forward(fake_jets_kl)
self.net.requires_grad_(True)
loss = loss + loss_kl.mean()
if self.hparams.repel:
bs = fake_jets_kl.size(0)
fake_jets_flat = fake_jets_kl.view(bs, -1)
if len(self.sampler.examples) > 1000:
compare_batch = torch.cat(random.choices(self.sampler.examples, k=100), dim=0)
compare_batch = torch.Tensor(compare_batch).cuda(0)
compare_flat = compare_batch.view(100, -1)
dist_matrix = torch.norm(fake_jets_flat[:, None, :] - compare_flat[None, :, :], p=2, dim=-1)
loss_repel = torch.log(dist_matrix.min(dim=1)[0]).mean()
loss = loss - 0.3 * loss_repel
else:
loss_repel = torch.zeros(1)
else:
loss_repel = torch.zeros(1)
else:
loss_kl = torch.zeros(1)
loss_repel = torch.zeros(1)
self.log('loss', loss)
self.log('loss_reg', reg_loss)
self.log('loss_cd', cdiv_loss, prog_bar=True)
self.log('loss_kl', loss_kl.mean(), prog_bar=True)
self.log('loss_repel', loss_repel)
self.log('loss_hmc', loss_hmc.mean(), prog_bar=True)
self.log('nenergy_real', real_out.mean())
self.log('nenergy_sample', fake_out.mean())
self.log('train_average_v', v.mean())
return loss
def validation_step(self, batch, batch_idx):
self.eval()
jets, labels = batch
batch_size = len(labels)
qcd = jets[labels==0]
signal = jets[labels==1]
jets = torch.cat([qcd, signal], dim=0)
qcd_out, signal_out = self.net(jets.float()).chunk(2, dim=0)
cdiv_top = signal_out.mean() - qcd_out.mean()
y_pred = np.concatenate((-qcd_out.cpu(), -signal_out.cpu()))
y_true = np.concatenate((np.zeros_like(qcd_out.cpu()), np.ones_like(signal_out.cpu())))
auc = roc_auc_score(y_true, y_pred)
n_consti = self.jet_shape[0] // 3
random_jets = random_sample(batch_size, n_consti).to(device)
random_out = self.net(random_jets.float())
cdiv_random = random_out.mean() - qcd_out.mean()
self.log('val_cd_top', cdiv_top, prog_bar=True)
self.log('val_cd_random', cdiv_random, prog_bar=True)
self.log('val_nenergy_top', signal_out.mean())
self.log('val_nenergy_qcd', qcd_out.mean())
self.log('val_auc_top', auc, prog_bar=True)
self.log('hp_metric', auc)
init_samples = random_sample(batch_size, n_consti).to(self.device)
torch.set_grad_enabled(True)
if self.hparams.hmc:
gen_samples, x_grad, v = self.sampler.generate_samples(self.net, init_samples, steps=FLAGS['val_steps'], step_size=self.hparams.step_size, hmc=True)
else:
gen_samples, _, _ = self.sampler.generate_samples(self.net, init_samples, steps=FLAGS['val_steps'], step_size=self.hparams.step_size, kl=False, hmc=False) # turn off KL for saving memory and faster generation
torch.set_grad_enabled(False)
gen_out = self.net(gen_samples)
cdiv_gen = gen_out.mean() - qcd_out.mean()
self.log('val_cd_gen', cdiv_gen, prog_bar=True)
gen_samples = jet_from_ptetaphi(gen_samples.cpu(), scaled=FLAGS['scaled'])
qcd = jet_from_ptetaphi(qcd.cpu(), scaled=FLAGS['scaled'])
gen_pts = list(map(jet_pt, gen_samples))
gen_pts = torch.tensor(gen_pts)
real_pts = list(map(jet_pt, qcd))
real_pts = torch.tensor(real_pts)
js_pt = calc_js_div(real_pts, gen_pts, plot_range=[200, 1000])
self.log('val_JS_pt', js_pt, prog_bar=True)
gen_ms = list(map(jet_mass, gen_samples))
gen_ms = torch.tensor(gen_ms)
real_ms = list(map(jet_mass, qcd))
real_ms = torch.tensor(real_ms)
js_m = calc_js_div(real_ms, gen_ms, plot_range=[0, 500])
self.log('val_JS_m', js_m, prog_bar=True)
self.log('val_JS_avg', (js_pt + js_m)/2.0, prog_bar=True)
def get_progress_bar_dict(self):
tqdm_dict = super().get_progress_bar_dict()
tqdm_dict.pop("v_num", None)
return tqdm_dict
def train_model(train_loader, val_loader, model_name, epochs, **kwargs):
default_path = os.path.join(CHECKPOINT_PATH, "attn", datetime.datetime.now().strftime("%m%d-%H%M%S")+"_"+model_name)
tb_logger = TensorBoardLogger(default_path, name=None, version=None)
trainer = pl.Trainer(logger=tb_logger,
gpus=-1 if str(device).startswith("cuda") else 0, # set gpus=-1 to use all available gpus
#accelerator="ddp",
max_epochs=epochs,
gradient_clip_val=0.1,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="min", monitor='val_JS_avg'),
PeriodicCheckpoint(interval=len(train_loader), save_weights_only=True),
LitProgressBar(),
LearningRateMonitor("epoch")
])
model = DeepEnergyModel(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = DeepEnergyModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
return model
def eval_ood(model, train_loader, test_loader):
model.to(device)
model.eval()
with torch.no_grad():
train_energy = []
test_energy = []
for train_imgs in train_loader:
train_imgs = train_imgs.to(model.device)
train_energy.append(model.net(train_imgs.float()))
for test_imgs in test_loader:
test_imgs = test_imgs.to(model.device)
test_energy.append(model.net(test_imgs.float()))
train_energy = torch.concat(train_energy)
test_energy = torch.concat(test_energy)
y_true = np.concatenate((np.zeros_like(train_energy), np.ones_like(test_energy)))
y_pred = np.concatenate((-train_energy, -test_energy))
auc = roc_auc_score(y_true, y_pred)
print(f"Test AUC: {auc:4.3f}")
def main():
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_dim', type=int, default=160)
parser.add_argument('--input_scaler', action='store_true')
# MCMC
parser.add_argument('--steps', type=int, default=128)
parser.add_argument('--step_size', type=float, default=1.0)
parser.add_argument('--epsilon', type=float, default=0.005)
parser.add_argument('--kl', action='store_true')
parser.add_argument('--repel', action='store_true')
parser.add_argument('--hmc', action='store_true')
# Training
parser.add_argument('--n_train', type=int, default=50000)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--topref', action='store_true')
# Saving models
parser.add_argument('--mode', default="train")
parser.add_argument('--model_name', default=None)
parser.add_argument('--tag', default=None)
args = parser.parse_args()
e_func = "attnv3"
train_set, scaler = load_attn_train(n_train=args.n_train, input_dim=args.input_dim, scale=args.input_scaler, topref=args.topref)
val_X, val_y = load_attn_val(scaler, n_val=10000, input_dim=args.input_dim, scale=args.input_scaler, topref=args.topref)
train_loader = data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=2, pin_memory=True)
val_loader = data.DataLoader([[val_X[i], val_y[i]] for i in range(len(val_X))], batch_size=args.batch_size, shuffle=False, drop_last=True, num_workers=2, pin_memory=True)
test_fn = os.environ['VAE_DIR'] + 'h3_m174_h80_01_preprocessed.h5'
test_set = load_attn_test(scaler, test_fn, input_dim=args.input_dim, scale=args.input_scaler)
test_loader = data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=2)
if args.mode == "train":
if args.model_name is None:
model_path = 'models/{}_n{}k_d{}_stp{}_ss{}_eps{}_bs{}_e{}_l{}'.format(e_func, int(args.n_train / 1000.), args.input_dim, args.steps, args.step_size, args.epsilon, args.batch_size, args.epochs, args.lr)
model_path += "_kl" if args.kl else ""
model_path += "_hmc" if args.hmc else ""
model_path += "_scale" if args.input_scaler else ""
model_path += "_{}".format(args.tag) if args.tag else ""
else:
model_path = "models/" + args.model_name
model = train_model(train_loader,
val_loader,
os.path.basename(model_path),
epochs=args.epochs,
jet_shape=(args.input_dim // 4 * 3,),
batch_size=train_loader.batch_size,
lr=args.lr,
beta1=0.0,
steps=args.steps,
step_size=args.step_size,
num_layers=8,
d_model=128,
num_heads=16,
dff=1024,
rate=0.1,
kl=args.kl,
repel=args.repel,
hmc=args.hmc,
epsilon=args.epsilon
)
torch.save(model.state_dict(), model_path)
eval_ood(model, train_loader, test_loader)
elif args.mode == "test":
model = DeepEnergyModel(img_shape=(args.input_dim // 4 * 3,),
batch_size=train_loader.batch_size,
lr=args.lr,
beta1=0.0,
step_size=args.step_size
)
model.load_state_dict(torch.load('models/'+model_name))
eval_ood(model, train_loader, test_loader)
return model
if __name__ == "__main__":
main()
| 18,407 | 40.647059 | 221 | py |
EBM-HEP | EBM-HEP-main/load_data.py |
import os
import numpy as np
import h5py
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import torch
import torch.nn.functional as F
import uproot_methods
from utils import jet_e, jet_pt, jet_mass
from math import inf
def load_attn_train(n_train=None, input_dim=160, scale=False, topref=False):
if topref:
f = h5py.File(os.environ['TOPREF_DIR']+'train_preprocessed.h5', "r")
X_train = np.array(f['table'])
y_train = np.array(f['labels'])
qcd_train = X_train[y_train==0]
else:
f = h5py.File(os.environ["VAE_DIR"] +"qcd_preprocessed.h5", "r")
qcd_train = f["constituents" if "constituents" in f.keys() else "table"]
if n_train:
qcd_train = qcd_train[:n_train, :input_dim]
else:
qcd_train = qcd_train[:, :input_dim]
X = qcd_train
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
scaler = RobustScaler().fit(X)
X = scaler.transform(X)
else:
scaler = None
return X, scaler
def load_attn_val(scaler, n_val=10000, input_dim=160, scale=False, pt_scaling=False, pt_refine=True, m_window=False, topref=False):
'''
construct validation set for OOD detection.
different from training data, validation set has sample lables.
TODO: readjust n_val to match the final number of events
'''
from sklearn.utils import shuffle
from utils import jet_pt, jet_mass
if topref:
f = h5py.File(os.environ['TOPREF_DIR']+'val_preprocessed.h5', "r")
val_X = np.array(f['table'])
val_y = np.array(f['labels'])
val_X = val_X[-n_val:, :input_dim]
val_y = val_y[-n_val:]
else:
f1 = h5py.File(os.environ["VAE_DIR"] +"qcd_preprocessed.h5", "r")
qcd_val = f1["constituents" if "constituents" in f1.keys() else "table"]
qcd_val = np.array(qcd_val)
if pt_refine:
from utils import jet_pt, jet_mass
pts = []
for j in qcd_val:
pts.append(jet_pt(j))
pts = np.array(pts)
qcd_val = qcd_val[(pts>550) & (pts<=650)]
qcd_val = qcd_val[-n_val:, :input_dim]
f = h5py.File(os.environ["VAE_DIR"] +"top_preprocessed.h5", 'r')
for key in ['table', 'constituents', 'jet1']:
if key in f.keys():
w_test=f[key]
if key == "jet1":
labels=f["labels"]
labels=np.array(labels)
w_test = np.array(w_test)
if pt_refine:
from utils import jet_pt, jet_mass
pts = []
for j in w_test:
pts.append(jet_pt(j))
pts = np.array(pts)
w_test = w_test[(pts>550) & (pts<=650)]
if m_window:
ms=[]
for j in w_test:
ms.append(jet_mass(j))
ms=np.array(ms)
w_test=w_test[(ms>150)&(ms<=200)]
if pt_scaling:
for i in range(len(w_test)):
pt=jet_pt(w_test[i])
w_test[i]=w_test[i]/pt
w_test = w_test[-n_val:, :input_dim]
val_X = np.concatenate((qcd_val, w_test))
val_y = np.concatenate((np.zeros(len(qcd_val)), np.ones(len(w_test))))
val_X, val_y = shuffle(val_X, val_y)
f1.close()
X = val_X
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
val_X = scaler.transform(X)
val_X = X
f.close()
return val_X, val_y
def load_attn_test(scaler, fn, input_dim=160, n_test=10000, scale=False, pt_scaling=False, pt_refine=True, m_window=False):
f = h5py.File(fn, 'r')
for key in ['table', 'constituents', 'jet1']:
if key in f.keys():
w_test=f[key]
if key == "jet1":
labels=f["labels"]
labels=np.array(labels)
w_test = np.array(w_test)
if pt_refine:
from utils import jet_pt, jet_mass
pts=[]
for j in w_test:
pts.append(jet_pt(j))
pts=np.array(pts)
w_test=w_test[(pts>550)&(pts<=650)]
if m_window:
ms=[]
for j in w_test:
ms.append(jet_mass(j))
ms=np.array(ms)
w_test=w_test[(ms>150)&(ms<=200)]
w_test = w_test[:n_test,:input_dim]
if pt_scaling:
for i in range(len(w_test)):
pt=jet_pt(w_test[i])
w_test[i]=w_test[i]/pt
X = w_test
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
v = {}
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
if scale:
X = scaler.transform(X)
f.close()
return X
def load_clf_train(n_train=None, input_dim=80, ova=None):
'''
ova: 1 - QCD/others; 2 - W/others; 3 - Top/others
'''
def load_data(n_train_pclass=350000, input_dim=160, ova=None):
from sklearn.utils import shuffle
f = h5py.File(os.environ["CLFAD_DIR"] + 'qcd_pt600_preprocessed.h5', 'r')
qcd = np.array(f['constituents'])
f.close()
f = h5py.File(os.environ["CLFAD_DIR"] + 'w_pt600_preprocessed.h5', 'r')
w = np.array(f['constituents'])
f.close()
f = h5py.File(os.environ["CLFAD_DIR"] + 'top_pt600_preprocessed.h5', 'r')
top = np.array(f['constituents'])
f.close()
X = np.concatenate((qcd[:n_train_pclass, :input_dim], w[:n_train_pclass, :input_dim], top[:n_train_pclass, :input_dim]), axis=0)
#m = np.concatenate((qcd_obs[:n_train_pclass], w_obs[:n_train_pclass], top_obs[:n_train_pclass]))
if ova:
y = np.concatenate(((1 - (ova == 1))*np.ones(n_train_pclass), (1 - (ova == 2))*np.ones(n_train_pclass), (1 - (ova == 3))*np.ones(n_train_pclass)))
else:
labels_2 = np.empty(n_train_pclass)
labels_2.fill(2)
y = np.concatenate((np.zeros(n_train_pclass),np.ones(n_train_pclass), labels_2))
X, y = shuffle(X, y)
#y = F.one_hot(torch.tensor(y).to(torch.int64), num_classes=3) # commented out due to torch.nn.CrossEntropyLoss()
return X, y
X, y = load_data(n_train // 3, input_dim, ova)
e_j = np.array(list(map(jet_e, X))).reshape(-1,1)
pt_j = np.array(list(map(jet_pt, X))).reshape(-1,1)
X = X.reshape(len(X), -1, 4)
e = X[:,:,0]
px = X[:,:,1]
py = X[:,:,2]
pz = X[:,:,3]
p4 = uproot_methods.TLorentzVectorArray.from_cartesian(px, py, pz, e)
e = np.log(e)
pt = np.log(p4.pt)
eta = p4.eta
phi = p4.phi
pt[pt == -inf] = 0.0
e[e == -inf] = 0.0
eta = np.nan_to_num(eta)
e = e.reshape(len(e), -1, 1)
pt = pt.reshape(len(pt), -1, 1)
eta = eta.reshape(len(eta), -1, 1)
phi = phi.reshape(len(phi), -1, 1)
X = np.concatenate((pt, eta, phi), -1)
X = X.reshape(len(X), -1)
return X, y | 8,832 | 28.055921 | 158 | py |
EBM-HEP | EBM-HEP-main/__init__.py |
__version__ = "0.1"
| 22 | 4.75 | 19 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex2_tpr_proposed.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 1.5
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval(z_interval, eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,136 | 19.548077 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex4_count_no_interval.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
return len(list_zk), len(z_interval)
# start_time = time.time()
#
# en, tn = run()
# print(en, tn)
#
# print("--- %s seconds ---" % (time.time() - start_time))
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_truncation_interval = []
local_list_encounted_interval = []
for i in range(iter_each_thread):
en, tn = run()
local_list_truncation_interval.append(tn)
local_list_encounted_interval.append(en)
total_list_tn = COMM.gather(local_list_truncation_interval, root=0)
total_list_en = COMM.gather(local_list_encounted_interval, root=0)
if COMM.rank == 0:
total_list_tn = [_i for temp in total_list_tn for _i in temp]
total_list_en = [_i for temp in total_list_en for _i in temp]
print(total_list_tn)
print()
print(total_list_en)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,198 | 20.144231 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex3_len_interval_proposed_oc.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
global_list_ineq = []
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
model = load_model('./model/test_' + str(d) + '.h5')
# model.summary()
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
x = X_vec
eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)
u, v = util.compute_u_v(x, eta, d * d)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar == 0:
if b > 0:
print('Error B')
elif a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
else:
Vminus = max(Vminus, -b_scalar / a_scalar)
return Vplus - Vminus
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_length = []
for i in range(iter_each_thread):
length = run()
if length is not None:
local_list_length.append(length)
total_list_length = COMM.gather(local_list_length, root=0)
if COMM.rank == 0:
total_list_length = [_i for temp in total_list_length for _i in temp]
print(total_list_length)
print("--- %s seconds ---" % (time.time() - start_time)) | 3,789 | 23.294872 | 108 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/training.py | import numpy as np
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D, UpSampling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
import gen_data
def run(d):
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 1
X_train, Y_train = gen_data.generate(5000, IMG_WIDTH, mu_1, mu_2)
print(X_train.shape, Y_train.shape)
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
c1 = Conv2D(4, (3, 3), padding='same')(inputs)
p1 = MaxPooling2D((2, 2))(c1)
u2 = UpSampling2D(size=(2, 2))(p1)
c2 = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(u2)
outputs = c2
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
earlystopper = EarlyStopping(patience=15, verbose=1)
checkpointer = ModelCheckpoint('./model/test_' + str(d) + '.h5', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.1, epochs=20,
callbacks=[earlystopper, checkpointer])
if __name__ == '__main__':
list_d = [4, 8, 16, 32]
for d in list_d:
run(d) | 1,416 | 27.34 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex2_tpr_proposed_oc.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
global_list_ineq = []
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
model = load_model('./model/test_' + str(d) + '.h5')
# model.summary()
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
x = X_vec
eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x, eta, d * d)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar == 0:
if b > 0:
print('Error B')
elif a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
else:
Vminus = max(Vminus, -b_scalar / a_scalar)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval([[Vminus, Vplus]], eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 200
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 4,137 | 23.05814 | 108 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/parametric_si.py | import numpy as np
import tensorflow as tf
import util
def run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold):
zk = -threshold
list_zk = [zk]
list_results = []
while zk < threshold:
x = u + v * zk
global_list_ineq = []
X_test = np.reshape(x, (1, d, d, IMG_CHANNELS))
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
# zk = Vplus + 0.0001
zk = Vplus + 0.005
# print(zk)
# print(binary_vec)
# print("===========")
if zk < threshold:
list_zk.append(zk)
else:
list_zk.append(threshold)
list_results.append(binary_vec)
return list_zk, list_results | 3,232 | 26.87069 | 112 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/util.py | import numpy as np
from mpmath import mp
mp.dps = 500
def compute_naive_p(test_statistic, n_a, n_b, sigma):
z = test_statistic / (sigma * np.sqrt(1 / n_a + 1 / n_b))
naive_p = mp.ncdf(z)
return float(naive_p)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def construct_z(binary_vec, list_zk, list_results):
z_interval = []
for i in range(len(list_results)):
if np.array_equal(binary_vec, list_results[i]):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
new_z_interval = []
# for each_interval in z_interval:
# if len(new_z_interval) == 0:
# new_z_interval.append(each_interval)
# else:
# sub = each_interval[0] - new_z_interval[-1][1]
# if abs(sub) < 0.01:
# new_z_interval[-1][1] = each_interval[1]
# else:
# new_z_interval.append(each_interval)
#
# z_interval = new_z_interval
return z_interval
def create_X_para(X_test, d):
X_3D = X_test[0]
X_2D = []
for element in X_3D:
X_2D.append(list(element.flatten()))
X_2D = np.array(X_2D)
X_vec = (X_2D.flatten()).reshape((d * d, 1))
X_test = []
for i in range(d):
X_test.append([])
for j in range(d):
index = i * d + j
pT = np.zeros(d * d)
pT[index] = 1
pT = (pT.reshape((d*d, 1))).T
X_test[i].append([[pT, 0]])
return np.array([X_test]), X_vec
def create_X_pad(X_para, d, no_channel):
X_para_pad = []
X_para = X_para[0]
for i in range(d + 2):
X_para_pad.append([])
for j in range(d + 2):
X_para_pad[i].append([])
for k in range(no_channel):
if (i == 0) or (j == 0) or (i == (d + 1)) or (j == (d + 1)):
pT = np.zeros(d * d)
pT = (pT.reshape((d * d, 1))).T
X_para_pad[i][j].append([pT, 0])
else:
X_para_pad[i][j].append(X_para[i-1][j-1][k])
return np.array([X_para_pad])
def conv(X_test, X_para_pad, kernel):
# X_test: d x d x channel
output = []
output_para = []
_, d, _, no_channel = X_test.shape
_, w, _, no_filter = kernel.shape
X_test = X_test[0]
X_para_pad = X_para_pad[0]
for i in range(1, d + 1):
output.append([])
output_para.append([])
for j in range(1, d + 1):
output[i - 1].append([])
output_para[i - 1].append([])
for filter_idx in range(no_filter):
sum = 0
sum_para = np.array([(np.zeros(d * d).reshape((d * d, 1))).T, 0])
for k in range(no_channel):
X_k = X_test[:, :, k]
X_k = np.pad(X_k, ((1, 1), (1, 1)), 'constant')
X_k_para = X_para_pad[:, :, k, :]
kernel_k = kernel[:, :, k, filter_idx]
sum = sum + \
X_k[i - 1, j - 1] * kernel_k[0, 0] + \
X_k[i - 1, j] * kernel_k[0, 1] + \
X_k[i - 1, j + 1] * kernel_k[0, 2] + \
X_k[i, j - 1] * kernel_k[1, 0] + \
X_k[i, j] * kernel_k[1, 1] + \
X_k[i, j + 1] * kernel_k[1, 2] + \
X_k[i + 1, j - 1] * kernel_k[2, 0] + \
X_k[i + 1, j] * kernel_k[2, 1] + \
X_k[i + 1, j + 1] * kernel_k[2, 2]
sum_para = sum_para + \
X_k_para[i - 1, j - 1] * kernel_k[0, 0] + \
X_k_para[i - 1, j] * kernel_k[0, 1] + \
X_k_para[i - 1, j + 1] * kernel_k[0, 2] + \
X_k_para[i, j - 1] * kernel_k[1, 0] + \
X_k_para[i, j] * kernel_k[1, 1] + \
X_k_para[i, j + 1] * kernel_k[1, 2] + \
X_k_para[i + 1, j - 1] * kernel_k[2, 0] + \
X_k_para[i + 1, j] * kernel_k[2, 1] + \
X_k_para[i + 1, j + 1] * kernel_k[2, 2]
# check_sum = np.dot(sum_para[0], X_vec)[0][0] + sum_para[1]
# print(np.around(sum - check_sum, 3))
output[i - 1][j - 1].append(sum)
output_para[i - 1][j - 1].append(sum_para)
output = np.array([output])
output_para = np.array([output_para])
return output, output_para
def max_pooling_event(e1, e2, e3, e4):
return [
np.array(e2) - np.array(e1),
np.array(e3) - np.array(e1),
np.array(e4) - np.array(e1)
]
def max_pooling(input, input_para):
list_ineq = []
input = input[0]
input_para = input_para[0]
d, _, no_channel = input.shape
output = []
output_para = []
for i in range(0, d, 2):
output.append([])
output_para.append([])
for j in range(0, d, 2):
output[-1].append([])
output_para[-1].append([])
for k in range(no_channel):
list_local_event = None
X_k = input[:, :, k]
X_k_para = input_para[:, :, k, :]
max_val = max(X_k[i, j], X_k[i, j+1], X_k[i+1, j], X_k[i+1, j+1])
output[-1][-1].append(max_val)
max_idx = np.argmax([X_k[i, j], X_k[i, j+1], X_k[i+1, j], X_k[i+1, j+1]])
if max_idx == 0:
output_para[-1][-1].append(X_k_para[i, j])
list_local_event = max_pooling_event(X_k_para[i, j], X_k_para[i, j+1], X_k_para[i+1, j], X_k_para[i+1, j+1])
elif max_idx == 1:
output_para[-1][-1].append(X_k_para[i, j + 1])
list_local_event = max_pooling_event(X_k_para[i, j + 1], X_k_para[i, j], X_k_para[i + 1, j], X_k_para[i + 1, j + 1])
elif max_idx == 2:
output_para[-1][-1].append(X_k_para[i + 1, j])
list_local_event = max_pooling_event(X_k_para[i + 1, j], X_k_para[i, j], X_k_para[i, j + 1], X_k_para[i + 1, j + 1])
else:
output_para[-1][-1].append(X_k_para[i + 1, j + 1])
list_local_event = max_pooling_event(X_k_para[i + 1, j + 1], X_k_para[i, j], X_k_para[i, j + 1], X_k_para[i + 1, j])
for element in list_local_event:
list_ineq.append(element)
output = np.array([output])
output_para = np.array([output_para])
return output, output_para, list_ineq
def up_sampling(input, input_para):
input = input[0]
input_para = input_para[0]
d, _, no_channel = input.shape
output = []
output_para = []
for i in range(d):
output.append([])
output_para.append([])
for j in range(d):
output[-1].append([])
output_para[-1].append([])
for k in range(no_channel):
X_k = input[:, :, k]
val = X_k[i, j]
output[-1][-1].append(val)
X_k_para = input_para[:, :, k, :]
val_para = X_k_para[i, j]
output_para[-1][-1].append(val_para)
output[-1].append(output[-1][-1])
output_para[-1].append(output_para[-1][-1])
output.append(output[-1])
output_para.append(output_para[-1])
output = np.array([output])
output_para = np.array([output_para])
return output, output_para
def compute_u_v(x, eta, d):
sq_norm = (np.linalg.norm(eta)) ** 2
e1 = np.identity(d) - (np.dot(eta, eta.T)) / sq_norm
u = np.dot(e1, x)
v = eta / sq_norm
return u, v
def construct_test_statistic(x, binary_vec, d):
vector_1_S_a = np.zeros(d)
vector_1_S_b = np.zeros(d)
n_a = 0
n_b = 0
for i in range(d):
if binary_vec[i] == 0:
n_a = n_a + 1
vector_1_S_a[i] = 1.0
elif binary_vec[i] == 1:
n_b = n_b + 1
vector_1_S_b[i] = 1.0
if (n_a == 0) or (n_b == 0):
return None, None
vector_1_S_a = np.reshape(vector_1_S_a, (vector_1_S_a.shape[0], 1))
vector_1_S_b = np.reshape(vector_1_S_b, (vector_1_S_b.shape[0], 1))
first_element = np.dot(vector_1_S_a.T, x)[0][0]
second_element = np.dot(vector_1_S_b.T, x)[0][0]
etaTx = first_element / n_a - second_element / n_b
eta = vector_1_S_a / n_a - vector_1_S_b / n_b
return eta, etaTx
def pivot_with_specified_interval(z_interval, eta, etaTx, cov, tn_mu):
tn_sigma = np.sqrt(np.dot(np.dot(eta.T, cov), eta))[0][0]
# print(tn_sigma)
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etaTx >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etaTx >= al) and (etaTx < ar):
numerator = numerator + mp.ncdf((etaTx - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None | 9,342 | 28.380503 | 136 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/gen_data.py | import numpy as np
from scipy.stats import skewnorm
def generate_non_normal(n, d, mu_1, mu_2):
list_X_train = []
list_X_label = []
for _ in range(n):
X_train = []
X_label = []
for i in range(d):
if (i < d / 4) or (i >= 3 * d / 4):
vec_train = []
vec_label = []
gen_vec = list(np.random.normal(mu_1, 1, d))
# gen_vec = list(np.random.laplace(mu_1, 1, d))
# gen_vec = list(skewnorm.rvs(a=10, loc=mu_1, scale=1, size=d))
# gen_vec = list(np.random.standard_t(20, d))
for j in range(d):
vec_train.append([gen_vec[j]])
vec_label.append([False])
X_train.append(list(vec_train))
X_label.append(list(vec_label))
else:
vec_train = []
vec_label = []
for j in range(d):
if (j < d / 4) or (j >= 3 * d / 4):
vec_train.append([float(np.random.normal(mu_1, 1))])
# vec_train.append([float(np.random.laplace(mu_1, 1))])
# vec_train.append([float(skewnorm.rvs(a=10, loc=mu_1, scale=1))])
# vec_train.append([float(np.random.standard_t(20, 1))])
vec_label.append([False])
else:
vec_train.append([float(np.random.normal(mu_2, 1))])
# vec_train.append([float(np.random.laplace(mu_2, 1))])
# vec_train.append([float(skewnorm.rvs(a=10, loc=mu_2, scale=1))])
# vec_train.append([float(np.random.standard_t(20, 1))])
vec_label.append([True])
X_train.append(list(vec_train))
X_label.append(list(vec_label))
list_X_train.append(np.array(X_train))
list_X_label.append(np.array(X_label))
return np.array(list_X_train), np.array(list_X_label)
def generate(n, d, mu_1, mu_2):
list_X_train = []
list_X_label = []
for _ in range(n):
X_train = []
X_label = []
for i in range(d):
if (i < d / 4) or (i >= 3 * d / 4):
vec_train = []
vec_label = []
gen_vec = list(np.random.normal(mu_1, 1, d))
for j in range(d):
vec_train.append([gen_vec[j]])
vec_label.append([False])
X_train.append(list(vec_train))
X_label.append(list(vec_label))
else:
vec_train = []
vec_label = []
for j in range(d):
if (j < d / 4) or (j >= 3 * d / 4):
vec_train.append([float(np.random.normal(mu_1, 1))])
vec_label.append([False])
else:
vec_train.append([float(np.random.normal(mu_2, 1))])
vec_label.append([True])
X_train.append(list(vec_train))
X_label.append(list(vec_label))
list_X_train.append(np.array(X_train))
list_X_label.append(np.array(X_label))
return np.array(list_X_train), np.array(list_X_label)
if __name__ == '__main__':
list_X_train, list_X_label = generate(2, 8, 2, 8)
print(list_X_label[0])
print(list_X_label[1])
| 3,463 | 32.631068 | 90 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex1_fpr_proposed_oc.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 0
global_list_ineq = []
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
X_para, X_vec = util.create_X_para(X_test, d)
X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)
model = load_model('./model/test_' + str(d) + '.h5')
# model.summary()
weights = model.get_weights()
kernel_1 = weights[0]
bias_1 = weights[1]
kernel_2 = weights[2]
bias_2 = weights[3]
out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)
_, d, _, no_channel = out_conv_1.shape
out_conv_1 = out_conv_1 + bias_1
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]
out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)
for element in max_pooling_event:
global_list_ineq.append(element)
out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)
_, d, _, no_channel = out_up_sampling.shape
out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)
out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)
_, d, _, no_channel = out_conv_2.shape
out_conv_2 = out_conv_2 + bias_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]
out_conv_2 = util.sigmoid(out_conv_2)
output = out_conv_2
for i in range(d):
for j in range(d):
for k in range(no_channel):
pT = out_conv_2_para[0][i][j][k][0]
q = out_conv_2_para[0][i][j][k][1]
val = np.dot(pT, X_vec)[0][0] + q
val = util.sigmoid(val)
if val <= 0.5:
global_list_ineq.append([pT, q])
else:
global_list_ineq.append([-pT, -q])
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
x = X_vec
eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x, eta, d * d)
Vminus = np.NINF
Vplus = np.Inf
for element in global_list_ineq:
aT = element[0]
b = element[1]
a_scalar = np.dot(aT, v)[0][0]
b_scalar = np.dot(aT, u)[0][0] + b
if a_scalar == 0:
if b > 0:
print('Error B')
elif a_scalar > 0:
Vplus = min(Vplus, -b_scalar / a_scalar)
else:
Vminus = max(Vminus, -b_scalar / a_scalar)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval([[Vminus, Vplus]], eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 4,163 | 22.931034 | 108 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex1_fpr_proposed.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 0
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
if eta is None:
return None
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
cov = np.identity(d * d)
pivot = util.pivot_with_specified_interval(z_interval, eta, etaTx, cov, 0)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,160 | 19.386792 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex1_fpr_naive.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
n = 16
d = int(np.sqrt(n))
IMG_WIDTH = d
mu_1 = 0
mu_2 = 0
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=0)
output = output.flatten()
X_vec = X_test.flatten()
m_a = 0
m_b = 0
n_a = 0
n_b = 0
for i in range(len(output)):
if output[i] <= 0.5:
n_a = n_a + 1
m_a = m_a + X_vec[i]
else:
n_b = n_b + 1
m_b = m_b + X_vec[i]
if (n_a == 0) or (n_b == 0):
return None
m_a = m_a / n_a
m_b = m_b / n_b
test_statistic = m_a - m_b
pivot = util.compute_naive_p(test_statistic, n_a, n_b, 1)
return pivot
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 200
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_pivot = []
for i in range(iter_each_thread):
pivot = run()
if pivot is not None:
local_list_pivot.append(pivot)
total_list_pivot = COMM.gather(local_list_pivot, root=0)
if COMM.rank == 0:
total_list_pivot = [_i for temp in total_list_pivot for _i in temp]
detect = 0
reject = 0
for pivot in total_list_pivot:
if pivot is not None:
detect = detect + 1
if pivot < 0.05:
reject = reject + 1
print(reject, detect, reject / detect)
print("--- %s seconds ---" % (time.time() - start_time)) | 1,834 | 17.72449 | 71 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/ex3_len_interval_proposed.py | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
length = 0
for interval in z_interval:
length = length + (interval[1] - interval[0])
# print(length)
return length
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_length = []
for i in range(iter_each_thread):
length = run()
if length is not None:
local_list_length.append(length)
total_list_length = COMM.gather(local_list_length, root=0)
if COMM.rank == 0:
total_list_length = [_i for temp in total_list_length for _i in temp]
print(total_list_length)
print("--- %s seconds ---" % (time.time() - start_time)) | 1,907 | 19.516129 | 100 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/plot/plot_fpr.py | import numpy as np
import matplotlib.pyplot as plt
line1 = [0.04, 0.04, 0.05, 0.04]
line2 = [0.04, 0.05, 0.05, 0.05]
line3 = [0.11, 0.33, 0.60, 0.77]
index = ['16', '64', '256', '1024']
xi = [1, 2, 3, 4]
plt.rcParams.update({'font.size': 17})
plt.title("False Positive Rate (FPR)")
plt.ylim(0, 1.03)
plt.plot(xi, line1, 'o-', label='proposed-method', linewidth=3)
plt.plot(xi, line2, 'o-', label='proposed-method-oc', linewidth=3)
plt.plot(xi, line3, 'o-', label='naive', linewidth=3)
plt.xticks([1, 2, 3, 4], index)
plt.xlabel("n")
plt.ylabel("FPR")
plt.legend()
plt.tight_layout()
plt.savefig('../results/fpr_plot.pdf')
plt.show()
| 642 | 20.433333 | 66 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/plot/plot_len_interval.py | import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
plt.title("Interval Length")
x_1 = [0.12254761267840242, 0.2360774560483192, 0.26384502143271105, 0.055860407571420634, 0.18671892561562808, 0.05340873883171593, 0.02656998260333987, 0.37134713479488624, 0.5174398587536528, 0.3164338308651262, 0.36082332998066513, 0.2476516709253631, 0.11101131067235293, 0.14507703194395583, 0.1923474656022981, 0.2325486472629572, 0.5033187110813198, 0.20652015092307074, 0.5325939563994169, 0.07621078240516077, 0.3038790602772119, 0.053792144500862005, 0.16662039770393178, 0.10411729885914633, 0.26306407562083245, 0.24142595563289815, 0.08947374188110047, 0.3205642492973, 0.518599312079991, 0.11242591246379607, 0.3553356105370842, 0.21981540268493882, 0.03771635096608472, 0.14031057549831671, 0.39106824529088824, 0.13905501949997312, 0.11386411494024973, 0.028601857941103592, 0.3230992104584518, 0.5185802372660386, 0.05220610018207572, 0.2389030500420426, 0.31906781729014, 0.3560623555087574, 0.22176370351188135, 0.10860855637698341, 0.17011389683403744, 0.031136576335471622, 0.5297445827589149, 0.15395921961470505, 0.21150241799022274, 0.1163724457509363, 0.11084782632891121, 0.2147356379507641, 0.27217703219492373, 0.13889112987934937, 0.20009383663394598, 0.23230157960831455, 0.21429644585443142, 0.25547399879716837, 0.09577517056485929, 0.2767530603056232, 0.09786774595492087, 0.22314807972619222, 0.16245336585446357, 0.2085676993080492, 0.433535842054837, 0.35018273517069654, 0.2943571488059229, 0.06582209127711924, 0.2287528994654151, 0.07113014227475567, 0.26464246511130796, 0.2723344511517345, 0.6416783827946164, 0.22403208423010046, 0.1084990425117136, 0.05444997406242891, 0.18545876786068782, 0.0728508821264704, 0.6664990341839121, 0.3041527907004551, 0.27943728858904504, 0.6063499707306641, 0.7125139015980608, 0.09355293771184514, 0.1064709047598078, 0.19500410846695426, 0.09395259374325904, 0.07980196859593769, 0.29297626131486143, 0.36247883507373957, 0.22284087426925003, 0.1146273725481437, 0.4815373346698062, 0.21951104835454904, 0.2551555498563134, 0.12458143974566749, 0.20873627612071832, 0.46416791262909474, 0.08324435172699385, 1.4557846976545818, 0.3084698721967307, 0.10654997517663967, 0.22840675379732578, 0.20893346690155346, 0.17118963899468076, 0.716476152051484, 0.4799223334586804, 0.305161928281773, 0.439197168366926, 0.2041464332227816, 0.1302550213672098, 0.13548100794237605, 0.06593508830188988, 0.06592213834820604, 0.07791770192924896, 0.07272319208990607, 0.08846470422309682, 0.1535116163227963]
x_2 = [1.7083195628812462, 7.045280597896085, 6.69181470810972, 0.4862483889312177, 1.437773018564704, 5.574920656493349, 1.0118183898202098, 2.3461959921638957, 0.42412056339493254, 1.561416025090913, 1.0047437588173804, 2.0107342374264157, 19.297425721651628, 0.8624559278815975, 11.830081108691598, 10.472818591000282, 0.413588012011586, 10.098593050881684, 11.004813533770328, 1.3460574403011947, 8.161353625354826, 0.8393024769510835, 1.0940796467438747, 8.004323480253397, 0.470719256211682, 18.470132114129214, 6.366642827981131, 7.76609581395119, 13.657135501325666, 11.673213688761194, 6.67221527743971, 15.395166108671495, 10.083729822162704, 0.1677780479169817, 6.034825145912925, 0.27503397287397924, 1.1715340592320573, 4.546061613197973, 0.10768721744335252, 14.039313175836696, 5.269672577584997, 9.73814015668614, 4.708523488771584, 4.12861610280604, 8.309470763288445, 1.0846546843779739, 3.5886793849486436, 0.6988428080737461, 1.193815777525376, 1.3953563911610434, 3.880015380559918, 0.5473236437502895, 7.666563843109529, 7.879705040808479, 1.3782738631903122, 6.3521033303220715, 8.101934112181427, 6.930674286829188, 1.3440381003617399, 6.982071186832932, 0.8917103520573291, 0.2448468318598045, 0.6534073032943672, 8.66215537701409, 9.633046028677473, 2.1646298449336543, 9.262838161036377, 6.902739330548906, 2.7509458991113016, 10.225430592705887, 8.73773998029086, 0.38449919329554083, 11.595211164326315, 0.7544801699861263, 10.25538520643885, 8.392633426272116, 3.6767356212308413, 12.462606007542876, 8.550068058154519, 1.125578724838566, 7.532013463652212, 3.667204194621119, 9.171275663975408, 7.294061790893738, 1.3356919364644686, 7.0613996382653905, 12.12544152411091, 2.195804688972243, 6.331964601504158, 6.210634071529293, 2.590350228752384, 10.162793547143504, 13.472419647489417, 0.9979905868507402, 8.085631525225956, 10.030521550572736, 1.3811864154780973, 0.5999056805140701, 7.512429846503318, 9.715335024739883, 4.123697420403304, 11.271829079852745, 11.028886083127132, 1.4035356769884233, 10.348513037774602, 1.046673782964251, 8.616452466491536, 1.6743511451388053, 13.202881064886641, 1.1638324877916577, 7.60571105980809, 8.798750753767106, 1.3124296946171485, 0.2874084724964452, 10.970863300055777, 6.269993577903628, 0.5358323881133873, 1.1672147821995753, 4.736570807955902, 7.906087739998402]
x = [x_1, x_2]
plt.boxplot(x, positions=[1, 2], showfliers=False, widths=0.4)
plt.xticks([1, 2], ["proposed-method-oc", "proposed-method"])
plt.xlabel("Method")
plt.ylabel("Length of truncation interval")
plt.tight_layout()
plt.savefig('../results/len_interval_plot.pdf')
plt.show()
| 5,238 | 186.107143 | 2,474 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/plot/plot_no_interval_increase_node.py | import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
# plt.title("# encounted intervals")
xx_1 = [36, 42, 40, 41, 37, 37, 42, 37, 43, 44, 34, 41, 42, 41, 39, 36, 42, 40, 36, 38, 36, 41, 38, 42, 39, 38, 42, 39, 42, 39, 41, 38, 36, 39, 37, 44, 37, 40, 38, 42, 39, 39, 37, 39, 40, 41, 38, 38, 37, 39, 41, 40, 38, 39, 39, 39, 40, 34, 39, 40, 39, 40, 39, 37, 42, 38, 38, 35, 36, 36, 39, 37, 38, 42, 39, 37, 40, 38, 37, 42, 39, 37, 36, 42, 39, 39, 39, 44, 37, 36, 41, 35, 40, 38, 39, 38, 37, 37, 40, 39, 42, 40, 39, 33, 41, 39, 40, 38, 37, 41, 36, 37, 40, 41, 36, 40, 41, 37, 41, 41]
xx_2 = [154, 151, 155, 162, 149, 155, 160, 157, 146, 166, 150, 154, 150, 154, 154, 142, 159, 156, 160, 152, 156, 149, 147, 154, 148, 147, 158, 163, 161, 149, 149, 157, 143, 157, 146, 156, 148, 152, 147, 159, 156, 149, 158, 155, 152, 144, 169, 146, 159, 160, 152, 158, 145, 151, 154, 153, 155, 157, 155, 158, 171, 154, 151, 152, 157, 154, 158, 161, 161, 153, 143, 145, 142, 150, 144, 149, 155, 149, 156, 166, 150, 150, 148, 151, 154, 150, 154, 142, 147, 165, 151, 143, 151, 148, 156, 160, 146, 158, 152, 162, 153, 151, 158, 155, 153, 164, 160, 143, 152, 154, 144, 147, 147, 150, 149, 156, 153, 158, 169, 149]
xx_3 = [443, 415, 409, 412, 411, 411, 409, 411, 393, 453, 430, 425, 438, 427, 422, 428, 430, 444, 404, 414, 461, 450, 409, 427, 435, 417, 417, 424, 401, 402, 466, 412, 438, 417, 428, 423, 439, 384, 419, 415, 414, 434, 446, 452, 442, 447, 428, 421, 412, 439, 418, 427, 404, 416, 407, 426, 440, 415, 401, 417, 395, 433, 430, 422, 413, 414, 403, 416, 406, 436, 426, 419, 436, 443, 425, 408, 420, 433, 438, 420, 420, 392, 411, 435, 416, 424, 429, 439, 416, 402, 417, 416, 430, 434, 433, 451, 430, 436, 391, 412, 406, 451, 402, 411, 420, 437, 441, 419, 437, 426, 431, 417, 400, 443, 442, 405, 407, 432, 422, 398]
xx_4 = [728, 753, 764, 762, 718, 756, 776, 763, 791, 757, 770, 737, 796, 758, 751, 775, 761, 775, 779, 807, 791, 730, 752, 771, 737, 781, 778, 757, 754, 775, 766, 739, 746, 723, 761, 772, 780, 778, 770, 760, 749, 758, 761, 816, 773, 756, 777, 719, 764, 730, 771, 800, 736, 753, 769, 811, 767, 752, 746, 753, 743, 749, 757, 740, 784, 761, 767, 729, 744, 750, 756, 738, 744, 779, 758, 756, 766, 793, 774, 754]
xx = [xx_1, xx_2, xx_3, xx_4]
xx_line = [np.mean(xx_1), np.mean(xx_2), np.mean(xx_3), np.mean(xx_4)]
plt.plot(xx_line, label='# encounted intervals', linewidth=2)
xi = [0, 1, 2, 3]
bplot2 = plt.boxplot(xx, positions=xi, showfliers=False, widths=0.5)
plt.xticks(xi, ["16", "64", "256", "1024"])
plt.xlabel("n")
plt.ylabel("# intervals")
plt.legend()
plt.tight_layout()
plt.savefig('../results/no_interval_increase_plot.pdf')
plt.show()
| 2,691 | 69.842105 | 607 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/plot/plot_fpr_violate.py | import numpy as np
import matplotlib.pyplot as plt
# line1 = [0.06, 0.059, 0.06, 0.05]
# line2 = [0.11, 0.1, 0.1, 0.1]
#
# index = ['16', '64', '256', '1024']
#
# xi = [1, 2, 3, 4]
#
# # plt.rcParams.update({'font.size': 18})
# # plt.figure(figsize=(7, 4.5))
# plt.rcParams.update({'font.size': 18})
# plt.figure(figsize=(7, 5.2))
#
# plt.title("FPR (laplace distribution)")
# plt.ylim(0, 0.5)
#
# plt.plot(xi, line1, 'o-', label='alpha=0.05')
# plt.plot(xi, line2, 'o-', label='alpha=0.1')
#
# plt.xticks([1, 2, 3, 4], index)
# plt.xlabel("n")
# plt.ylabel("FPR")
# plt.legend()
# plt.tight_layout()
# plt.savefig('../results/fpr_laplace.pdf')
# plt.show()
# line1 = [0.05, 0.041, 0.05, 0.05]
# line2 = [0.1, 0.091, 0.09, 0.1]
#
# index = ['16', '64', '256', '1024']
#
# xi = [1, 2, 3, 4]
#
# # plt.rcParams.update({'font.size': 18})
# # plt.figure(figsize=(7, 4.5))
# plt.rcParams.update({'font.size': 18})
# plt.figure(figsize=(7, 5.2))
#
# plt.title("FPR (skew normal distribution)")
# plt.ylim(0, 0.5)
#
# plt.plot(xi, line1, 'o-', label='alpha=0.05')
# plt.plot(xi, line2, 'o-', label='alpha=0.1')
#
# plt.xticks([1, 2, 3, 4], index)
# plt.xlabel("n")
# plt.ylabel("FPR")
# plt.legend()
# plt.tight_layout()
# plt.savefig('../results/fpr_skew.pdf')
# plt.show()
# line1 = [0.05, 0.054, 0.04, 0.05]
# line2 = [0.1, 0.081, 0.09, 0.08]
#
# index = ['16', '64', '256', '1024']
#
# xi = [1, 2, 3, 4]
#
# # plt.rcParams.update({'font.size': 18})
# # plt.figure(figsize=(7, 4.5))
# plt.rcParams.update({'font.size': 18})
# plt.figure(figsize=(7, 5.2))
#
# plt.title("FPR (t20 distribution)")
# plt.ylim(0, 0.5)
#
# plt.plot(xi, line1, 'o-', label='alpha=0.05')
# plt.plot(xi, line2, 'o-', label='alpha=0.1')
#
# plt.xticks([1, 2, 3, 4], index)
# plt.xlabel("n")
# plt.ylabel("FPR")
# plt.legend()
# plt.tight_layout()
# plt.savefig('../results/fpr_t20.pdf')
# plt.show()
line1 = [0.05, 0.045, 0.05, .045]
line2 = [0.12, 0.1, 0.08, 0.1]
index = ['16', '64', '256', '1024']
xi = [1, 2, 3, 4]
# plt.rcParams.update({'font.size': 18})
# plt.figure(figsize=(7, 4.5))
plt.rcParams.update({'font.size': 18})
plt.figure(figsize=(7, 5.2))
plt.title("FPR (estimated sigma)")
plt.ylim(0, 0.5)
plt.plot(xi, line1, 'o-', label='alpha=0.05')
plt.plot(xi, line2, 'o-', label='alpha=0.1')
plt.xticks([1, 2, 3, 4], index)
plt.xlabel("n")
plt.ylabel("FPR")
plt.legend()
plt.tight_layout()
plt.savefig('../results/fpr_estimated_sigma.pdf')
plt.show()
| 2,445 | 21.036036 | 49 | py |
dnn_segmentation_selective_inference | dnn_segmentation_selective_inference-main/plot/plot_power.py | import numpy as np
import matplotlib.pyplot as plt
line1 = [0.09, 0.31, 0.62, 0.79]
line2 = [0.04, 0.09, 0.22, 0.36]
index = ['0.5', '1.0', '1.5', '2.0']
xi = [1, 2, 3, 4]
plt.rcParams.update({'font.size': 18})
plt.title("Power")
plt.ylim(0, 1.03)
plt.plot(xi, line1, 'o-', label='proposed-method', linewidth=3)
plt.plot(xi, line2, 'o-', label='proposed-method-oc', linewidth=3)
plt.xticks([1, 2, 3, 4], index)
plt.xlabel("delta mu")
plt.ylabel("Power")
plt.legend()
plt.tight_layout()
plt.savefig('../results/power_plot.pdf')
plt.show()
| 547 | 18.571429 | 66 | py |
UNITER | UNITER-master/train_nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for NLVR2
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, DetectFeatLmdb, TxtTokLmdb,
Nlvr2PairedDataset, Nlvr2PairedEvalDataset,
Nlvr2TripletDataset, Nlvr2TripletEvalDataset,
nlvr2_paired_collate, nlvr2_paired_eval_collate,
nlvr2_triplet_collate, nlvr2_triplet_eval_collate,
PrefetchLoader)
from model.nlvr2 import (UniterForNlvr2Paired, UniterForNlvr2Triplet,
UniterForNlvr2PairedAttn)
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, BUCKET_SIZE
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db = DetectFeatLmdb(img_path, opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
dset = dset_cls(txt_db, img_db, opts.use_img_type)
sampler = TokenBucketSampler(dset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return PrefetchLoader(loader)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
if 'paired' in opts.model:
DatasetCls = Nlvr2PairedDataset
EvalDatasetCls = Nlvr2PairedEvalDataset
collate_fn = nlvr2_paired_collate
eval_collate_fn = nlvr2_paired_eval_collate
if opts.model == 'paired':
ModelCls = UniterForNlvr2Paired
elif opts.model == 'paired-attn':
ModelCls = UniterForNlvr2PairedAttn
else:
raise ValueError('unrecognized model type')
elif opts.model == 'triplet':
DatasetCls = Nlvr2TripletDataset
EvalDatasetCls = Nlvr2TripletEvalDataset
ModelCls = UniterForNlvr2Triplet
collate_fn = nlvr2_triplet_collate
eval_collate_fn = nlvr2_triplet_eval_collate
else:
raise ValueError('unrecognized model type')
# data loaders
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
DatasetCls, collate_fn, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
EvalDatasetCls, eval_collate_fn, opts)
test_dataloader = create_dataloader(opts.test_img_db, opts.test_txt_db,
opts.val_batch_size, False,
EvalDatasetCls, eval_collate_fn, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = ModelCls.from_pretrained(opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM)
model.init_type_embedding()
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results')) # store val predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
targets = batch['targets']
n_examples += targets.size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'Step {global_step}: '
f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
if global_step % opts.valid_steps == 0:
for split, loader in [('val', val_dataloader),
('test', test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
log, results = validate(model, loader, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
TB_LOGGER.log_scaler_dict(log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"Step {global_step}: finished {n_epoch} epochs")
if opts.num_train_steps % opts.valid_steps != 0:
for split, loader in [('val', val_dataloader),
('test', test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
log, results = validate(model, loader, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
TB_LOGGER.log_scaler_dict(log)
model_saver.save(model, global_step)
@torch.no_grad()
def validate(model, val_loader, split):
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = []
for i, batch in enumerate(val_loader):
qids = batch['qids']
targets = batch['targets']
del batch['targets']
del batch['qids']
scores = model(batch, compute_loss=False)
loss = F.cross_entropy(scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += (scores.max(dim=-1, keepdim=False)[1] == targets
).sum().item()
answers = ['True' if i == 1 else 'False'
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
results.extend(zip(qids, answers))
n_ex += len(qids)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/{split}_loss': val_loss,
f'valid/{split}_acc': val_acc,
f'valid/{split}_ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument("--test_txt_db",
default=None, type=str,
help="The input test corpus. (LMDB)")
parser.add_argument("--test_img_db",
default=None, type=str,
help="The input test images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument("--model", default='paired',
choices=['paired', 'triplet', 'paired-attn'],
help="choose from 2 model architecture")
parser.add_argument('--use_img_type', action='store_true',
help="expand the type embedding for 2 image types")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+', type=float,
help="beta for adam optimizer")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 17,550 | 41.703163 | 79 | py |
UNITER | UNITER-master/pretrain.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER pre-training
"""
import argparse
from collections import defaultdict
import json
import math
import os
from os.path import exists, join
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, TokenBucketSamplerForItm,
MetaLoader, PrefetchLoader,
TxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
MlmDataset, MrfrDataset, MrcDataset,
mlm_collate, mrfr_collate, mrc_collate,
ItmDataset, itm_collate, itm_ot_collate)
from model.pretrain import UniterForPretraining
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, IMG_LABEL_DIM, BUCKET_SIZE
def build_dataloader(dataset, collate_fn, is_train, opts):
if is_train:
batch_size = opts.train_batch_size
else:
batch_size = opts.val_batch_size
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return loader
def build_dataloader_itm(dataset, collate_fn, is_train, opts):
if is_train:
batch_size = opts.train_batch_size
else:
batch_size = opts.val_batch_size
sampler = TokenBucketSamplerForItm(
dataset, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return loader
def build_mlm_dataset(txt_db, img_db, is_train, opts):
if is_train:
collate_fn = mlm_collate
datasets = [MlmDataset(t, i) for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
collate_fn = mlm_collate
dataset = MlmDataset(txt_db, img_db)
return dataset, collate_fn
def build_mrfr_dataset(txt_db, img_db, is_train, opts):
if is_train:
datasets = [MrfrDataset(opts.mrm_prob, t, i)
for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrfrDataset(opts.mrm_prob, txt_db, img_db)
return dataset, mrfr_collate
def build_mrc_dataset(txt_db, img_db, is_train, opts):
if is_train:
datasets = [MrcDataset(opts.mrm_prob, t, i)
for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrcDataset(opts.mrm_prob, txt_db, img_db)
return dataset, mrc_collate
def build_itm_dataset(txt_db, img_db, is_train, opts):
if is_train:
datasets = [ItmDataset(t, i, opts.itm_neg_prob)
for t, i in zip(txt_db, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = ItmDataset(txt_db, img_db, opts.itm_neg_prob)
collate_fn = itm_ot_collate if opts.itm_ot_lambda > 0 else itm_collate
return dataset, collate_fn
def create_dataloaders(datasets, is_train, opts, all_img_dbs=None):
if all_img_dbs is None:
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
dataloaders = {}
for dset in datasets:
if is_train:
assert len(dset['db']) == len(dset['img'])
assert len(dset['tasks']) == len(dset['mix_ratio'])
img_db = [all_img_dbs[path] for path in dset['img']]
else:
assert len(dset['db']) == len(dset['img']) == 1
img_db = all_img_dbs[dset['img'][0]]
for i, t in enumerate(dset['tasks']):
task = f'{t}_{dset["name"]}'
if is_train:
LOGGER.info(f"Loading {task} train dataset "
f"{dset['db']}, {[img.img_dir for img in img_db]}")
txt_db = [TxtTokLmdb(path, opts.max_txt_len)
for path in dset['db']]
else:
LOGGER.info(f"Loading {task} validation dataset, "
f"{dset['db']}, {img_db.img_dir}")
txt_db = TxtTokLmdb(dset['db'][0], -1)
if task.startswith('mlm'):
dataset = build_mlm_dataset(txt_db, img_db, is_train, opts)
elif task.startswith('mrfr'):
dataset = build_mrfr_dataset(txt_db, img_db, is_train, opts)
elif task.startswith('mrc'):
dataset = build_mrc_dataset(txt_db, img_db, is_train, opts)
elif task.startswith('itm'):
dataset = build_itm_dataset(txt_db, img_db, is_train, opts)
else:
raise ValueError(f'Undefined task {task}')
LOGGER.info(f"{len(dataset[0])*hvd.size()} samples loaded")
if task.startswith('itm'):
# itm handles distributed training in dset not sampler
loader = build_dataloader_itm(*dataset, is_train, opts)
else:
loader = build_dataloader(*dataset, is_train, opts)
if is_train:
ratio = dset['mix_ratio'][i]
dataloaders[task] = (loader, ratio)
else:
dataloaders[task] = PrefetchLoader(loader)
return dataloaders, all_img_dbs
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(args.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
all_dbs = [db for datasets in [opts.train_datasets, opts.val_datasets]
for dset in datasets for db in dset['db']]
tokenizer = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(tokenizer == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
# build data loaders
train_dataloaders, all_img_dbs = create_dataloaders(
opts.train_datasets, True, opts)
val_dataloaders, _ = create_dataloaders(
opts.val_datasets, False, opts, all_img_dbs)
meta_loader = MetaLoader(train_dataloaders,
accum_steps=opts.gradient_accumulation_steps,
distributed=n_gpu > 1)
meta_loader = PrefetchLoader(meta_loader)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForPretraining.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, img_label_dim=IMG_LABEL_DIM)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
task2scaler = {t: i for i, t in enumerate(train_dataloaders.keys())}
model, optimizer = amp.initialize(model, optimizer,
num_losses=len(task2scaler),
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
# to compute training statistics
task2loss = {task: RunningMeter(f'loss/{task}')
for task in train_dataloaders.keys()}
# ITM w/ OT
if opts.itm_ot_lambda > 0:
for task in train_dataloaders.keys():
if task.startswith('itm'):
task2loss[f'{task}_xe'] = RunningMeter(f'loss/{task}_xe')
task2loss[f'{task}_ot'] = RunningMeter(f'loss/{task}_ot')
task2loss[f'{task}_ot_pos'] = RunningMeter(
f'loss/{task}_ot_pos')
task2loss[f'{task}_ot_neg'] = RunningMeter(
f'loss/{task}_ot_neg')
n_examples = defaultdict(int)
n_in_units = defaultdict(int)
n_loss_units = defaultdict(int)
grad_norm = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
for step, (name, batch) in enumerate(meta_loader):
# forward pass
n_examples[name] += batch['input_ids'].size(0)
n_in_units[name] += (batch['attn_masks'] == 1).sum().item()
task = name.split('_')[0]
loss = model(batch, task=task, compute_loss=True)
if task.startswith('itm'):
# OT
itm_loss, ot_loss = loss
n_loss_units[name] += itm_loss.size(0)
itm_loss = itm_loss.mean()
if ot_loss is not None:
ot_pos, ot_neg = ot_loss
ot_loss = (ot_pos.sum() - ot_neg.sum()
) / (ot_pos.size(0) + ot_neg.size(0))
# NOTE: be ware of empty tensor
ot_pos = ot_pos.mean().item()
if not math.isnan(ot_pos):
task2loss[f'{name}_ot_pos'](ot_pos)
ot_neg = ot_neg.mean().item()
if not math.isnan(ot_neg):
task2loss[f'{name}_ot_neg'](ot_neg)
loss = itm_loss + opts.itm_ot_lambda * ot_loss
task2loss[f'{name}_xe'](itm_loss.item())
task2loss[f'{name}_ot'](ot_loss.item())
else:
loss = itm_loss
else:
n_loss_units[name] += loss.size(0)
loss = loss.mean() # loss is not normalized in model
# backward pass
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale,
loss_id=task2scaler[name]) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
task2loss[name](loss.item())
# optimizer update and logging
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.log_scaler_dict({ll.name: ll.val
for ll in task2loss.values()
if ll.val is not None})
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'==============Step {global_step}===============')
for t in train_dataloaders.keys():
assert all(tt == t for tt in all_gather_list(t))
tot_ex = sum(all_gather_list(n_examples[t]))
ex_per_sec = int(tot_ex / (time()-start))
tot_in = sum(all_gather_list(n_in_units[t]))
in_per_sec = int(tot_in / (time()-start))
tot_l = sum(all_gather_list(n_loss_units[t]))
l_per_sec = int(tot_l / (time()-start))
LOGGER.info(f'{t}: {tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar(f'perf/{t}_ex_per_s', ex_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_in_per_s', in_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_loss_per_s', l_per_sec,
global_step)
LOGGER.info('===============================================')
if global_step % opts.valid_steps == 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step % opts.valid_steps != 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
def validate(model, val_dataloaders):
model.eval()
for task, loader in val_dataloaders.items():
LOGGER.info(f"validate on {task} task")
if task.startswith('mlm'):
val_log = validate_mlm(model, loader)
elif task.startswith('mrfr'):
val_log = validate_mrfr(model, loader)
elif task.startswith('mrc'):
val_log = validate_mrc(model, loader, task)
elif task.startswith('itm'):
val_log = validate_itm(model, loader)
else:
raise ValueError(f'Undefined task {task}')
val_log = {f'{task}_{k}': v for k, v in val_log.items()}
TB_LOGGER.log_scaler_dict(
{f'valid_{task}/{k}': v for k, v in val_log.items()})
model.train()
@torch.no_grad()
def validate_mlm(model, val_loader):
LOGGER.info("start running MLM validation...")
val_loss = 0
n_correct = 0
n_word = 0
st = time()
for i, batch in enumerate(val_loader):
scores = model(batch, task='mlm', compute_loss=False)
labels = batch['txt_labels']
labels = labels[labels != -1]
loss = F.cross_entropy(scores, labels, reduction='sum')
val_loss += loss.item()
n_correct += (scores.max(dim=-1)[1] == labels).sum().item()
n_word += labels.numel()
val_loss = sum(all_gather_list(val_loss))
n_correct = sum(all_gather_list(n_correct))
n_word = sum(all_gather_list(n_word))
tot_time = time()-st
val_loss /= n_word
acc = n_correct / n_word
val_log = {'loss': val_loss,
'acc': acc,
'tok_per_s': n_word/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"acc: {acc*100:.2f}")
return val_log
def accuracy_count(out, labels):
outputs = out.max(dim=-1)[1]
mask = labels != -1
n_correct = (outputs == labels).masked_select(mask).sum().item()
return n_correct
@torch.no_grad()
def validate_mrfr(model, val_loader):
LOGGER.info("start running MRFR validation...")
val_loss = 0
n_feat = 0
st = time()
for i, batch in enumerate(val_loader):
loss = model(batch, task='mrfr', compute_loss=True)
val_loss += loss.sum().item() / IMG_DIM
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_log = {'loss': val_loss,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"loss: {val_loss:.2f}")
return val_log
@torch.no_grad()
def validate_mrc(model, val_loader, task):
LOGGER.info("start running MRC validation...")
val_loss = 0
n_feat = 0
st = time()
tot_score = 0
for i, batch in enumerate(val_loader):
prediction_soft_label = model(
batch, task=task, compute_loss=False)
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
label_targets = batch['label_targets']
loss = F.kl_div(
prediction_soft_label, label_targets, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label, label_targets)
else:
# background class should not be the target
cls_label_targets = label_targets[:, 1:].max(dim=-1)[1] + 1
loss = F.cross_entropy(
prediction_soft_label, cls_label_targets,
ignore_index=0, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label[:, 1:], label_targets[:, 1:])
val_loss += loss.item()
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_acc = tot_score / n_feat
val_log = {'loss': val_loss,
'acc': val_acc,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log
def compute_accuracy_for_soft_targets(out, labels):
outputs = out.max(dim=-1)[1]
labels = labels.max(dim=-1)[1] # argmax
n_correct = (outputs == labels).sum().item()
return n_correct
@torch.no_grad()
def validate_itm(model, val_loader):
LOGGER.info("start running ITM validation...")
val_loss = 0
tot_ot_loss = 0
tot_ot_pos = 0
tot_ot_neg = 0
tot_score = 0
n_ex = 0
st = time()
for i, batch in enumerate(val_loader):
scores, ot_loss = model(batch, task='itm', compute_loss=False)
if ot_loss is not None:
if isinstance(ot_loss, tuple):
ot_pos, ot_neg = ot_loss
ot_pos = ot_pos.sum().item()
ot_neg = ot_neg.sum().item()
tot_ot_pos += ot_pos
tot_ot_neg += ot_neg
tot_ot_loss += ot_pos - ot_neg
else:
tot_ot_loss += ot_loss.sum().item()
targets = batch['targets']
loss = F.cross_entropy(scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += (scores.max(dim=-1)[1] == targets).sum().item()
n_ex += len(targets)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {'valid/loss': val_loss,
'valid/acc': val_acc,
'valid/ex_per_s': n_ex/tot_time}
if ot_loss is not None:
tot_ot_loss = sum(all_gather_list(tot_ot_loss))
tot_ot_pos = sum(all_gather_list(tot_ot_pos))
tot_ot_neg = sum(all_gather_list(tot_ot_neg))
val_log['valid/ot_loss'] = tot_ot_loss / n_ex
val_log['valid/ot_pos'] = tot_ot_pos / n_ex
val_log['valid/ot_neg'] = tot_ot_neg / n_ex
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
# NOTE: train tasks and val tasks cannot take command line arguments
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config", type=str,
help="path to model structure config json")
parser.add_argument("--checkpoint", default=None, type=str,
help="path to model checkpoint (*.pt)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
parser.add_argument('--mrm_prob', default=0.15, type=float,
help='probability to mask in MRM training')
parser.add_argument('--itm_neg_prob', default=0.5, type=float,
help='probability to make negative examples'
'in ITM training')
parser.add_argument('--itm_ot_lambda', default=0.0, type=float,
help='weight of OT (optimal transport) loss (WRA)')
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adamw',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=10000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', required=True, help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 25,780 | 39.094868 | 79 | py |
UNITER | UNITER-master/train_itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for Image-Text Retrieval
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, ConcatDataset
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, TxtTokLmdb, ImageLmdbGroup,
ItmRankDataset, itm_rank_collate,
ItmValDataset, itm_val_collate,
ItmEvalDataset, itm_eval_collate)
from model.itm import UniterForImageTextRetrieval
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM
from utils.itm_eval import evaluate
def build_dataloader(dataset, collate_fn, is_train, opts):
batch_size = opts.train_batch_size if is_train else 1
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=is_train, drop_last=is_train,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
if hvd.rank() == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
# store ITM predictions
os.makedirs(join(opts.output_dir, 'results_val'))
os.makedirs(join(opts.output_dir, 'results_test'))
os.makedirs(join(opts.output_dir, 'results_train'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_dbs}, "
f"{opts.train_img_dbs}")
# check multiple DBs
assert len(opts.train_txt_dbs) == len(opts.train_img_dbs), \
"train txt_db and img_db have different length"
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets.append(ItmRankDataset(txt_db, img_db,
opts.negative_size))
train_dataset = ConcatDataset(train_datasets)
# val
LOGGER.info(f"Loading Val Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, -1)
val_dataset = ItmValDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
val_dataloader = build_dataloader(val_dataset, itm_val_collate,
False, opts)
# eval
LOGGER.info(f"Loading val, test Dataset for full evaluation: "
f"{opts.val_txt_db}, {opts.val_img_db}"
f"{opts.test_txt_db}, {opts.test_img_db}")
eval_dataset_val = ItmEvalDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
eval_loader_val = build_dataloader(eval_dataset_val, itm_eval_collate,
False, opts)
test_img_db = all_img_dbs[opts.test_img_db]
test_txt_db = TxtTokLmdb(opts.test_txt_db, -1)
eval_dataset_test = ItmEvalDataset(test_txt_db, test_img_db,
opts.inf_minibatch_size)
eval_loader_test = build_dataloader(eval_dataset_test, itm_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForImageTextRetrieval.from_pretrained(
opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM, margin=opts.margin)
model.init_output() # pretrain ITM head is different from ranking head
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training on {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset) * hvd.size())
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
train_dataloader = build_dataloader(
train_dataset, itm_rank_collate, True, opts)
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'------------Step {global_step}-------------')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info(f'-------------------------------------------')
if global_step % opts.valid_steps == 0:
if opts.full_val:
LOGGER.info(
f"========================== Step {global_step} "
f"==========================")
val_log = evaluate(model, eval_loader_val)
TB_LOGGER.log_scaler_dict(
{f"valid/{k}": v for k, v in val_log.items()})
LOGGER.info(f"image retrieval R1: "
f"{val_log['img_r1']*100:.2f},\n"
f"image retrieval R5: "
f"{val_log['img_r5']*100:.2f},\n"
f"image retrieval R10: "
f"{val_log['img_r10']*100:.2f}\n"
f"text retrieval R1: "
f"{val_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: "
f"{val_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: "
f"{val_log['txt_r10']*100:.2f}")
LOGGER.info("================================="
"=================================")
else:
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
pbar.close()
if opts.num_train_steps % opts.valid_steps != 0:
# final validation
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
# evaluation
for split, loader in [('val', eval_loader_val),
('test', eval_loader_test)]:
eval_log = evaluate(model, loader)
TB_LOGGER.log_scaler_dict({f"eval/{split}_{k}": v
for k, v in eval_log.items()})
if hvd.rank() != 0:
continue
LOGGER.info(
f"========================= {split} ===========================\n"
f"image retrieval R1: {eval_log['img_r1']*100:.2f},\n"
f"image retrieval R5: {eval_log['img_r5']*100:.2f},\n"
f"image retrieval R10: {eval_log['img_r10']*100:.2f}\n"
f"text retrieval R1: {eval_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: {eval_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: {eval_log['txt_r10']*100:.2f}")
LOGGER.info("=========================================================")
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
pbar = tqdm(total=len(val_loader))
else:
pbar = NoOp()
LOGGER.info("start running Image Retrieval validation ...")
model.eval()
n_ex = 0
st = time()
recall_at_1, recall_at_5, recall_at_10 = 0, 0, 0
for batch in val_loader:
scores = model(batch, compute_loss=False)
_, indices = scores.squeeze(1).topk(10, dim=0)
rank = (indices == 0).nonzero()
if rank.numel():
rank = rank.item()
if rank < 1:
recall_at_1 += 1
if rank < 5:
recall_at_5 += 1
if rank < 10:
recall_at_10 += 1
n_ex += 1
pbar.update(1)
n_ex = sum(all_gather_list(n_ex))
recall_at_1 = sum(all_gather_list(recall_at_1)) / n_ex
recall_at_5 = sum(all_gather_list(recall_at_5)) / n_ex
recall_at_10 = sum(all_gather_list(recall_at_10)) / n_ex
tot_time = time()-st
val_log = {'valid/ex_per_s': n_ex/tot_time,
'valid/recall_1': recall_at_1,
'valid/recall_5': recall_at_5,
'valid/recall_10': recall_at_10}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"recall_1: {recall_at_1*100:.2f}, "
f"recall_5: {recall_at_5*100:.2f}, "
f"recall_10: {recall_at_10*100:.2f}")
pbar.close()
return val_log
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained MLM")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory where the model "
"checkpoints will be written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=128, type=int,
help="Total batch size for training. "
"(batch by examples)")
parser.add_argument("--negative_size", default=1, type=int,
help="Number of negative samples per positive sample")
parser.add_argument("--inf_minibatch_size", default=400, type=int,
help="batch size for running inference. "
"(used for validation, and evaluation)")
parser.add_argument("--margin", default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=0.25, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--full_val', action='store_true',
help="Always run full evaluation during training")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 17,930 | 42.627737 | 79 | py |
UNITER | UNITER-master/prepro.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
preprocess NLVR annotations into LMDB
"""
import argparse
import json
import pickle
import os
from os.path import exists
from cytoolz import curry
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from data.data import open_lmdb
@curry
def bert_tokenize(tokenizer, text):
ids = []
for word in text.strip().split():
ws = tokenizer.tokenize(word)
if not ws:
# some special char
continue
ids.extend(tokenizer.convert_tokens_to_ids(ws))
return ids
def process_nlvr2(jsonl, db, tokenizer, missing=None):
id2len = {}
txt2img = {} # not sure if useful
for line in tqdm(jsonl, desc='processing NLVR2'):
example = json.loads(line)
id_ = example['identifier']
img_id = '-'.join(id_.split('-')[:-1])
img_fname = (f'nlvr2_{img_id}-img0.npz', f'nlvr2_{img_id}-img1.npz')
if missing and (img_fname[0] in missing or img_fname[1] in missing):
continue
input_ids = tokenizer(example['sentence'])
if 'label' in example:
target = 1 if example['label'] == 'True' else 0
else:
target = None
txt2img[id_] = img_fname
id2len[id_] = len(input_ids)
example['input_ids'] = input_ids
example['img_fname'] = img_fname
example['target'] = target
db[id_] = example
return id2len, txt2img
def process_referring_expressions(refs, instances, iid_to_ann_ids,
db, tokenizer, split):
"""
Inputs:
- refs: [ref_id, ann_id, image_id, split, sent_ids, sentences]
- instances: {images, annotations, categories}
- iid_to_ann_ids: image_id -> ann_ids ordered by extracted butd features
Return:
- id2len : sent_id -> tokenized question length
- images : [{id, file_name, ann_ids, height, width} ]
- annotations: [{id, area, bbox, image_id, category_id, iscrowd}]
- categories : [{id, name, supercategory}]
"""
# images within split
image_set = set([ref['image_id'] for ref in refs if ref['split'] == split])
images = []
for img in instances['images']:
if img['id'] in image_set:
images.append({
'id': img['id'], 'file_name': img['file_name'],
'ann_ids': iid_to_ann_ids[str(img['id'])],
'height': img['height'], 'width': img['width']})
# Images = {img['id']: img for img in images}
# anns within split
annotations = []
for ann in instances['annotations']:
if ann['image_id'] in image_set:
annotations.append({
'id': ann['id'], 'area': ann['area'], 'bbox': ann['bbox'],
'image_id': ann['image_id'],
'category_id': ann['category_id'],
'iscrowd': ann['iscrowd']
})
Anns = {ann['id']: ann for ann in annotations}
# category info
categories = instances['categories']
# refs within split
refs = [ref for ref in refs if ref['split'] == split]
print(f"Processing {len(refs)} annotations...")
id2len = {}
for ref in tqdm(refs, desc='processing referring expressions'):
ref_id = ref['ref_id']
ann_id = ref['ann_id']
image_id = ref['image_id']
img_fname = f"visual_grounding_coco_gt_{int(image_id):012}.npz"
for sent in ref['sentences']:
sent_id = sent['sent_id']
input_ids = tokenizer(sent['sent'])
id2len[str(sent_id)] = len(input_ids)
db[str(sent_id)] = {
'sent_id': sent_id, 'sent': sent['sent'],
'ref_id': ref_id, 'ann_id': ann_id,
'image_id': image_id, 'bbox': Anns[ann_id]['bbox'],
'input_ids': input_ids,
'img_fname': img_fname
}
return id2len, images, annotations, categories, refs
def main(opts):
if not exists(opts.output):
os.makedirs(opts.output)
else:
raise ValueError('Found existing DB. Please explicitly remove '
'for re-processing')
meta = vars(opts)
meta['tokenizer'] = opts.toker
toker = BertTokenizer.from_pretrained(
opts.toker, do_lower_case='uncased' in opts.toker)
tokenizer = bert_tokenize(toker)
meta['UNK'] = toker.convert_tokens_to_ids(['[UNK]'])[0]
meta['CLS'] = toker.convert_tokens_to_ids(['[CLS]'])[0]
meta['SEP'] = toker.convert_tokens_to_ids(['[SEP]'])[0]
meta['MASK'] = toker.convert_tokens_to_ids(['[MASK]'])[0]
meta['v_range'] = (toker.convert_tokens_to_ids('!')[0],
len(toker.vocab))
with open(f'{opts.output}/meta.json', 'w') as f:
json.dump(vars(opts), f, indent=4)
open_db = curry(open_lmdb, opts.output, readonly=False)
output_field_name = ['id2len', 'txt2img']
with open_db() as db:
if opts.task == 'nlvr':
with open(opts.annotations[0]) as ann:
if opts.missing_imgs is not None:
missing_imgs = set(json.load(open(opts.missing_imgs)))
else:
missing_imgs = None
jsons = process_nlvr2(
ann, db, tokenizer, missing_imgs)
elif opts.task == 're':
data = pickle.load(open(opts.annotations[0], 'rb'))
instances = json.load(open(opts.annotations[1], 'r'))
iid_to_ann_ids = json.load(
open(opts.annotations[2], 'r'))['iid_to_ann_ids']
# dirs/refcoco_testA_bert-base-cased.db -> testA
img_split = opts.output.split('/')[-1].split('.')[0].split('_')[1]
jsons = process_referring_expressions(
data, instances, iid_to_ann_ids,
db, tokenizer, img_split)
output_field_name = [
'id2len', 'images', 'annotations',
'categories', 'refs']
for dump, name in zip(jsons, output_field_name):
with open(f'{opts.output}/{name}.json', 'w') as f:
json.dump(dump, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--annotations', required=True, nargs='+',
help='annotation JSON')
parser.add_argument('--missing_imgs',
help='some training image features are corrupted')
parser.add_argument('--output', required=True,
help='output dir of DB')
parser.add_argument('--task', required=True, default='nlvr',
choices=['nlvr', 're'])
parser.add_argument('--toker', default='bert-base-cased',
help='which BERT tokenizer to used')
args = parser.parse_args()
if args.task == 'nlvr':
assert len(args.annotations) == 1
elif args.task == 're':
assert len(args.annotations) == 3
main(args)
| 6,939 | 36.923497 | 79 | py |
UNITER | UNITER-master/inf_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VCR for submission
"""
import argparse
import json
import os
from os.path import exists
import pandas as pd
from time import time
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
import numpy as np
from torch.utils.data.distributed import DistributedSampler
from data import (PrefetchLoader,
DetectFeatLmdb, VcrTxtTokLmdb, VcrEvalDataset,
vcr_eval_collate)
from model.vcr import UniterForVisualCommonsenseReasoning
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import NoOp, Struct
from utils.const import IMG_DIM
from tqdm import tqdm
NUM_SPECIAL_TOKENS = 81
def load_img_feat(dir_list, opts):
dir_ = dir_list.split(";")
assert len(dir_) <= 2, "More than two img_dirs found"
img_db_gt, img_db = None, None
gt_db_path, db_path = "", ""
for d in dir_:
if "gt" in d:
gt_db_path = d
else:
db_path = d
if gt_db_path != "":
img_db_gt = DetectFeatLmdb(
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
if db_path != "":
img_db = DetectFeatLmdb(
db_path, opts.conf_th,
opts.max_bb, opts.min_bb, opts.num_bb,
opts.compressed_db)
return img_db, img_db_gt
def save_for_submission(pred_file):
with open(os.path.join(pred_file), "r") as f:
data = json.load(f)
probs_grp = []
ids_grp = []
ordered_data = sorted(data.items(),
key=lambda item: int(item[0].split("-")[1]))
for annot_id, scores in ordered_data:
ids_grp.append(annot_id)
probs_grp.append(np.array(scores).reshape(1, 5, 4))
# Double check the IDs are in the same order for everything
# assert [x == ids_grp[0] for x in ids_grp]
probs_grp = np.stack(probs_grp, 1)
# essentially probs_grp is a [num_ex, 5, 4] array of probabilities.
# The 5 'groups' are
# [answer, rationale_conditioned_on_a0, rationale_conditioned_on_a1,
# rationale_conditioned_on_a2, rationale_conditioned_on_a3].
# We will flatten this to a CSV file so it's easy to submit.
group_names = ['answer'] + [f'rationale_conditioned_on_a{i}'
for i in range(4)]
probs_df = pd.DataFrame(data=probs_grp.reshape((-1, 20)),
columns=[f'{group_name}_{i}'
for group_name in group_names for i in range(4)])
probs_df['annot_id'] = ids_grp
probs_df = probs_df.set_index('annot_id', drop=True)
return probs_df
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if rank != 0:
LOGGER.disabled = True
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(json.load(open(hps_file)))
assert opts.split in opts.img_db and opts.split in opts.txt_db
# load DBs and image dirs
eval_img_db, eval_img_db_gt = load_img_feat(opts.img_db, model_opts)
eval_txt_db = VcrTxtTokLmdb(opts.txt_db, -1)
eval_dataset = VcrEvalDataset(
"test", eval_txt_db, img_db=eval_img_db,
img_db_gt=eval_img_db_gt)
# Prepare model
model = UniterForVisualCommonsenseReasoning.from_pretrained(
f'{opts.output_dir}/log/model.json', state_dict={},
img_dim=IMG_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
state_dict = checkpoint.get('model_state', checkpoint)
matched_state_dict = {}
unexpected_keys = set()
missing_keys = set()
for name, param in model.named_parameters():
missing_keys.add(name)
for key, data in state_dict.items():
if key in missing_keys:
matched_state_dict[key] = data
missing_keys.remove(key)
else:
unexpected_keys.add(key)
LOGGER.info(f"Unexpected_keys: {list(unexpected_keys)}")
LOGGER.info(f"Missing_keys: {list(missing_keys)}")
model.load_state_dict(matched_state_dict, strict=False)
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=True, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset,
batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
shuffle=False,
collate_fn=vcr_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
_, results = evaluate(model, eval_dataloader)
result_dir = f'{opts.output_dir}/results_{opts.split}'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results = {}
for id2res in all_gather_list(results):
all_results.update(id2res)
if hvd.rank() == 0:
with open(f'{result_dir}/'
f'results_{opts.checkpoint}_all.json', 'w') as f:
json.dump(all_results, f)
probs_df = save_for_submission(
f'{result_dir}/results_{opts.checkpoint}_all.json')
probs_df.to_csv(f'{result_dir}/results_{opts.checkpoint}_all.csv')
@torch.no_grad()
def evaluate(model, eval_loader):
model.eval()
LOGGER.info("start running evaluation ...")
if hvd.rank() == 0:
val_pbar = tqdm(total=len(eval_loader))
else:
val_pbar = NoOp()
val_qa_loss, val_qar_loss = 0, 0
tot_qa_score, tot_qar_score, tot_score = 0, 0, 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(eval_loader):
qids = batch['qids']
qa_targets, qar_targets = batch['qa_targets'], batch['qar_targets']
scores = model(batch, compute_loss=False)
scores = scores.view(len(qids), -1)
if torch.max(qa_targets) > -1:
vcr_qa_loss = F.cross_entropy(
scores[:, :4], qa_targets.squeeze(-1), reduction="sum")
if scores.shape[1] > 8:
qar_scores = []
for batch_id in range(scores.shape[0]):
answer_ind = qa_targets[batch_id].item()
qar_index = [4+answer_ind*4+i
for i in range(4)]
qar_scores.append(scores[batch_id, qar_index])
qar_scores = torch.stack(qar_scores, dim=0)
else:
qar_scores = scores[:, 4:]
vcr_qar_loss = F.cross_entropy(
qar_scores, qar_targets.squeeze(-1), reduction="sum")
val_qa_loss += vcr_qa_loss.item()
val_qar_loss += vcr_qar_loss.item()
curr_qa_score, curr_qar_score, curr_score = compute_accuracies(
scores[:, :4], qa_targets, qar_scores, qar_targets)
tot_qar_score += curr_qar_score
tot_qa_score += curr_qa_score
tot_score += curr_score
for qid, score in zip(qids, scores):
results[qid] = score.cpu().tolist()
n_ex += len(qids)
val_pbar.update(1)
val_qa_loss = sum(all_gather_list(val_qa_loss))
val_qar_loss = sum(all_gather_list(val_qar_loss))
tot_qa_score = sum(all_gather_list(tot_qa_score))
tot_qar_score = sum(all_gather_list(tot_qar_score))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_qa_loss /= n_ex
val_qar_loss /= n_ex
val_qa_acc = tot_qa_score / n_ex
val_qar_acc = tot_qar_score / n_ex
val_acc = tot_score / n_ex
val_log = {'valid/ex_per_s': n_ex/tot_time,
'valid/vcr_qa_loss': val_qa_loss,
'valid/vcr_qar_loss': val_qar_loss,
'valid/acc_qa': val_qa_acc,
'valid/acc_qar': val_qar_acc,
'valid/acc': val_acc}
model.train()
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds, "
f"score_qa: {val_qa_acc*100:.2f} "
f"score_qar: {val_qar_acc*100:.2f} "
f"score: {val_acc*100:.2f} ")
return val_log, results
def compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):
outputs_qa = out_qa.max(dim=-1)[1]
outputs_qar = out_qar.max(dim=-1)[1]
matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()
matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()
matched_joined = matched_qa & matched_qar
n_correct_qa = matched_qa.sum().item()
n_correct_qar = matched_qar.sum().item()
n_correct_joined = matched_joined.sum().item()
return n_correct_qa, n_correct_qar, n_correct_joined
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
default="/txt/vcr_val.db/", type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db",
default="/img/vcr_gt_val/;/img/vcr_val/", type=str,
help="The input train images.")
parser.add_argument("--split",
default="val", type=str,
help="The input split")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="can be the path to binary or int number (step)")
parser.add_argument("--batch_size",
default=10, type=int,
help="number of examples in a batch")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory of the training command")
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
main(args)
| 10,802 | 36.905263 | 78 | py |
UNITER | UNITER-master/train_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VCR
"""
import argparse
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, PrefetchLoader, DetectFeatLmdb,
VcrTxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
VcrDataset, VcrEvalDataset,
vcr_collate, vcr_eval_collate,)
from model.vcr import UniterForVisualCommonsenseReasoning
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import BUCKET_SIZE, IMG_DIM
NUM_SPECIAL_TOKENS = 81
def build_dataloader(dataset, collate_fn, is_train, opts):
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
if is_train:
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
dataloader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
else:
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=opts.n_workers, shuffle=False,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" vqa linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 'vcr_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 'vcr_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def load_img_feat(db_list, all_img_dbs, opts):
db_ = db_list.split(";")
assert len(db_) <= 2, "More than two img_dbs found"
gt_db_path, db_path = "", ""
for d in db_:
if "gt" in d:
gt_db_path = d
else:
db_path = d
if gt_db_path != "":
img_db_gt = DetectFeatLmdb(
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
all_img_dbs.path2imgdb[gt_db_path] = img_db_gt
else:
img_db_gt = None
img_db = all_img_dbs[db_path] if db_path != "" else None
all_img_dbs.path2imgdb[db_path] = img_db
return img_db, img_db_gt
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db, img_db_gt = load_img_feat(img_path, all_img_dbs, opts)
qa_txt_db = VcrTxtTokLmdb(txt_path, opts.max_txt_len, task="qa")
qar_txt_db = VcrTxtTokLmdb(txt_path, opts.max_txt_len, task="qar")
train_datasets.append(
VcrDataset(qa_txt_db, img_db_gt=img_db_gt, img_db=img_db))
train_datasets.append(
VcrDataset(qar_txt_db, img_db_gt=img_db_gt, img_db=img_db))
train_dataset = ConcatDatasetWithLens(train_datasets)
train_dataloader = build_dataloader(train_dataset, vcr_collate, True, opts)
# val
LOGGER.info(f"Loading Val Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db, val_img_db_gt = load_img_feat(
opts.val_img_db, all_img_dbs, opts)
val_txt_db = VcrTxtTokLmdb(opts.val_txt_db, -1)
val_dataset = VcrEvalDataset(
"val", val_txt_db, img_db=val_img_db, img_db_gt=val_img_db_gt)
val_final_dataset = VcrEvalDataset(
"test", val_txt_db, img_db=val_img_db, img_db_gt=val_img_db_gt)
val_dataloader = build_dataloader(val_dataset, vcr_eval_collate,
False, opts)
val_final_dataloader = build_dataloader(
val_final_dataset, vcr_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint and opts.checkpoint_from == "pretrain":
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = opts.train_txt_dbs + [opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(toker == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
model = UniterForVisualCommonsenseReasoning.from_pretrained(
opts.model_config, checkpoint, img_dim=IMG_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
if opts.checkpoint_from == "vcr_pretrain":
checkpoint = torch.load(opts.checkpoint)
state_dict = checkpoint.get('model_state', checkpoint)
matched_state_dict = {}
unexpected_keys = set()
missing_keys = set()
for name, param in model.named_parameters():
missing_keys.add(name)
for key, data in state_dict.items():
if key in missing_keys:
matched_state_dict[key] = data
missing_keys.remove(key)
else:
unexpected_keys.add(key)
print("Unexpected_keys:", list(unexpected_keys))
print("Missing_keys:", list(missing_keys))
model.load_state_dict(matched_state_dict, strict=False)
del checkpoint
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset) * hvd.size())
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info('===========================================')
if global_step % opts.valid_steps == 0:
val_log, results = validate(
model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
if global_step % opts.valid_steps != 0:
val_log, results = validate(
model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
val_log, results = validate(model, val_final_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_final_qa_qar_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
def compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):
outputs_qa = out_qa.max(dim=-1)[1]
outputs_qar = out_qar.max(dim=-1)[1]
matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()
matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()
matched_joined = matched_qa & matched_qar
n_correct_qa = matched_qa.sum().item()
n_correct_qar = matched_qar.sum().item()
n_correct_joined = matched_joined.sum().item()
return n_correct_qa, n_correct_qar, n_correct_joined
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
val_pbar = tqdm(total=len(val_loader))
else:
val_pbar = NoOp()
LOGGER.info("start running validation...")
model.eval()
val_qa_loss, val_qar_loss = 0, 0
tot_qa_score, tot_qar_score, tot_score = 0, 0, 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
scores = model(batch, compute_loss=False)
qa_targets = batch['qa_targets']
qar_targets = batch['qar_targets']
qids = batch['qids']
scores = scores.view(len(qids), -1)
vcr_qa_loss = F.cross_entropy(
scores[:, :4], qa_targets.squeeze(-1), reduction="sum")
if scores.shape[1] > 8:
qar_scores = []
for batch_id in range(scores.shape[0]):
answer_ind = qa_targets[batch_id].item()
qar_index = [4+answer_ind*4+i
for i in range(4)]
qar_scores.append(scores[batch_id, qar_index])
qar_scores = torch.stack(qar_scores, dim=0)
else:
qar_scores = scores[:, 4:]
vcr_qar_loss = F.cross_entropy(
qar_scores, qar_targets.squeeze(-1), reduction="sum")
val_qa_loss += vcr_qa_loss.item()
val_qar_loss += vcr_qar_loss.item()
curr_qa_score, curr_qar_score, curr_score = compute_accuracies(
scores[:, :4], qa_targets, qar_scores, qar_targets)
tot_qar_score += curr_qar_score
tot_qa_score += curr_qa_score
tot_score += curr_score
for qid, score in zip(qids, scores):
results[qid] = score.cpu().tolist()
n_ex += len(qids)
val_pbar.update(1)
val_qa_loss = sum(all_gather_list(val_qa_loss))
val_qar_loss = sum(all_gather_list(val_qar_loss))
tot_qa_score = sum(all_gather_list(tot_qa_score))
tot_qar_score = sum(all_gather_list(tot_qar_score))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_qa_loss /= n_ex
val_qar_loss /= n_ex
val_qa_acc = tot_qa_score / n_ex
val_qar_acc = tot_qar_score / n_ex
val_acc = tot_score / n_ex
val_log = {'valid/vcr_qa_loss': val_qa_loss,
'valid/vcr_qar_loss': val_qar_loss,
'valid/acc_qa': val_qa_acc,
'valid/acc_qar': val_qar_acc,
'valid/acc': val_acc,
'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score_qa: {val_qa_acc*100:.2f} "
f"score_qar: {val_qar_acc*100:.2f} "
f"score: {val_acc*100:.2f} ")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument("--checkpoint_from",
default='pretrain', type=str,
choices=['pretrain', 'vcr_pretrain'],
help="which setting is checkpoint from")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--lr_mul", default=10.0, type=float,
help="multiplier for top layer lr")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 20,770 | 41.131846 | 79 | py |
UNITER | UNITER-master/inf_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VQA for submission
"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
import numpy as np
from cytoolz import concat
from data import (TokenBucketSampler, PrefetchLoader,
DetectFeatLmdb, TxtTokLmdb, VqaEvalDataset, vqa_eval_collate)
from model.vqa import UniterForVisualQuestionAnswering
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.const import BUCKET_SIZE, IMG_DIM
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(json.load(open(hps_file)))
# train_examples = None
ans2label_file = f'{opts.output_dir}/ckpt/ans2label.json'
ans2label = json.load(open(ans2label_file))
label2ans = {label: ans for ans, label in ans2label.items()}
# load DBs and image dirs
eval_img_db = DetectFeatLmdb(opts.img_db,
model_opts.conf_th, model_opts.max_bb,
model_opts.min_bb, model_opts.num_bb,
opts.compressed_db)
eval_txt_db = TxtTokLmdb(opts.txt_db, -1)
eval_dataset = VqaEvalDataset(len(ans2label), eval_txt_db, eval_img_db)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
model = UniterForVisualQuestionAnswering.from_pretrained(
f'{opts.output_dir}/log/model.json', checkpoint,
img_dim=IMG_DIM, num_answer=len(ans2label))
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=True, opt_level='O2')
sampler = TokenBucketSampler(eval_dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=opts.batch_size, droplast=False)
eval_dataloader = DataLoader(eval_dataset,
batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vqa_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
val_log, results, logits = evaluate(model, eval_dataloader, label2ans,
opts.save_logits)
result_dir = f'{opts.output_dir}/results_test'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results = list(concat(all_gather_list(results)))
if opts.save_logits:
all_logits = {}
for id2logit in all_gather_list(logits):
all_logits.update(id2logit)
if hvd.rank() == 0:
with open(f'{result_dir}/'
f'results_{opts.checkpoint}_all.json', 'w') as f:
json.dump(all_results, f)
if opts.save_logits:
np.savez(f'{result_dir}/logits_{opts.checkpoint}_all.npz',
**all_logits)
@torch.no_grad()
def evaluate(model, eval_loader, label2ans, save_logits=False):
LOGGER.info("start running evaluation...")
model.eval()
n_ex = 0
st = time()
results = []
logits = {}
for i, batch in enumerate(eval_loader):
qids = batch['qids']
scores = model(batch, compute_loss=False)
answers = [label2ans[i]
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
for qid, answer in zip(qids, answers):
results.append({'answer': answer, 'question_id': int(qid)})
if save_logits:
scores = scores.cpu()
for i, qid in enumerate(qids):
logits[qid] = scores[i].half().numpy()
if i % 100 == 0 and hvd.rank() == 0:
n_results = len(results)
n_results *= hvd.size() # an approximation to avoid hangs
LOGGER.info(f'{n_results}/{len(eval_loader.dataset)} '
'answers predicted')
n_ex += len(qids)
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_log = {'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds "
f"at {int(n_ex/tot_time)} examples per second")
return val_log, results, logits
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1] # argmax
one_hots = torch.zeros(*labels.size(), device=labels.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="can be the path to binary or int number (step)")
parser.add_argument("--batch_size",
default=8192, type=int,
help="number of tokens in a batch")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory of the training command")
parser.add_argument("--save_logits", action='store_true',
help="Whether to save logits (for making ensemble)")
# Prepro parameters
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
main(args)
| 6,692 | 35.774725 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.