repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
UNITER | UNITER-master/inf_nlvr2.py | """run inference of NLVR2 (single GPU only)"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from data import (DetectFeatLmdb, TxtTokLmdb,
PrefetchLoader, TokenBucketSampler,
Nlvr2PairedEvalDataset, Nlvr2TripletEvalDataset,
nlvr2_paired_eval_collate, nlvr2_triplet_eval_collate)
from model.model import UniterConfig
from model.nlvr2 import (UniterForNlvr2Paired, UniterForNlvr2Triplet,
UniterForNlvr2PairedAttn)
from utils.misc import Struct
from utils.const import IMG_DIM, BUCKET_SIZE
def main(opts):
hvd.init()
device = torch.device("cuda") # support single GPU only
train_opts = Struct(json.load(open(f'{opts.train_dir}/log/hps.json')))
if 'paired' in train_opts.model:
EvalDatasetCls = Nlvr2PairedEvalDataset
eval_collate_fn = nlvr2_paired_eval_collate
if train_opts.model == 'paired':
ModelCls = UniterForNlvr2Paired
elif train_opts.model == 'paired-attn':
ModelCls = UniterForNlvr2PairedAttn
else:
raise ValueError('unrecognized model type')
elif train_opts.model == 'triplet':
EvalDatasetCls = Nlvr2TripletEvalDataset
ModelCls = UniterForNlvr2Triplet
eval_collate_fn = nlvr2_triplet_eval_collate
else:
raise ValueError('unrecognized model type')
img_db = DetectFeatLmdb(opts.img_db,
train_opts.conf_th, train_opts.max_bb,
train_opts.min_bb, train_opts.num_bb,
opts.compressed_db)
txt_db = TxtTokLmdb(opts.txt_db, -1)
dset = EvalDatasetCls(txt_db, img_db, train_opts.use_img_type)
batch_size = (train_opts.val_batch_size if opts.batch_size is None
else opts.batch_size)
sampler = TokenBucketSampler(dset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=False)
eval_dataloader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=eval_collate_fn)
eval_dataloader = PrefetchLoader(eval_dataloader)
# Prepare model
ckpt_file = f'{opts.train_dir}/ckpt/model_step_{opts.ckpt}.pt'
checkpoint = torch.load(ckpt_file)
model_config = UniterConfig.from_json_file(
f'{opts.train_dir}/log/model.json')
model = ModelCls(model_config, img_dim=IMG_DIM)
model.init_type_embedding()
model.load_state_dict(checkpoint, strict=False)
model.to(device)
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
results = evaluate(model, eval_dataloader, device)
# write results
if not exists(opts.output_dir):
os.makedirs(opts.output_dir)
with open(f'{opts.output_dir}/results.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
print(f'all results written')
@torch.no_grad()
def evaluate(model, eval_loader, device):
print("start running evaluation...")
model.eval()
n_ex = 0
st = time()
results = []
for i, batch in enumerate(eval_loader):
qids = batch['qids']
del batch['targets']
del batch['qids']
scores = model(batch, compute_loss=False)
answers = ['True' if i == 1 else 'False'
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
results.extend(zip(qids, answers))
n_results = len(results)
print(f'{n_results}/{len(eval_loader.dataset)} answers predicted')
n_ex += len(qids)
tot_time = time()-st
model.train()
print(f"evaluation finished in {int(tot_time)} seconds "
f"at {int(n_ex/tot_time)} examples per second")
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
type=str, required=True,
help="The input train corpus.")
parser.add_argument("--img_db",
type=str, required=True,
help="The input train images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--batch_size", type=int,
help="batch size for evaluation")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
parser.add_argument('--fp16', action='store_true',
help="fp16 inference")
parser.add_argument("--train_dir", type=str, required=True,
help="The directory storing NLVR2 finetuning output")
parser.add_argument("--ckpt", type=int, required=True,
help="specify the checkpoint to run inference")
parser.add_argument("--output_dir", type=str, required=True,
help="The output directory where the prediction "
"results will be written.")
args = parser.parse_args()
main(args)
| 5,465 | 37.765957 | 77 | py |
UNITER | UNITER-master/pretrain_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER pre-training
"""
import argparse
from collections import defaultdict
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler,
MetaLoader, PrefetchLoader, DetectFeatLmdb,
VcrTxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
MlmDatasetForVCR, mlm_collate_for_vcr,
MrfrDatasetForVCR, mrfr_collate_for_vcr,
MrcDatasetForVCR, mrc_collate_for_vcr)
from model.pretrain_vcr import UniterForPretrainingForVCR
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, IMG_LABEL_DIM, BUCKET_SIZE
NUM_SPECIAL_TOKENS = 81
def build_dataloader(dataset, collate_fn, is_train, opts):
if is_train:
batch_size = opts.train_batch_size
else:
batch_size = opts.val_batch_size
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return loader
def build_mlm_dataset(txt_db, img_db_gt, img_db, is_train, opts):
if is_train:
collate_fn = mlm_collate_for_vcr
datasets = [MlmDatasetForVCR(t, i_gt, i)
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
collate_fn = mlm_collate_for_vcr
dataset = MlmDatasetForVCR(txt_db, img_db_gt, img_db)
return dataset, collate_fn
def build_mrfr_dataset(txt_db, img_db_gt, img_db, is_train, opts):
if is_train:
datasets = [MrfrDatasetForVCR(opts.mrm_prob, t, i_gt, i)
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrfrDatasetForVCR(opts.mrm_prob, txt_db, img_db_gt, img_db)
return dataset, mrfr_collate_for_vcr
def build_mrc_dataset(txt_db, img_db_gt, img_db, is_train, opts):
if is_train:
datasets = [MrcDatasetForVCR(opts.mrm_prob, t, i_gt, i)
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrcDatasetForVCR(opts.mrm_prob, txt_db, img_db_gt, img_db)
return dataset, mrc_collate_for_vcr
def load_img_feat(db_list, all_img_dbs, opts):
db_ = db_list.split(";")
assert len(db_) <= 2, "More than two img_dbs found"
gt_db_path, db_path = "", ""
for d in db_:
if "gt" in d:
gt_db_path = d
else:
db_path = d
if gt_db_path != "":
img_db_gt = DetectFeatLmdb(
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
all_img_dbs.path2imgdb[gt_db_path] = img_db_gt
else:
img_db_gt = None
img_db = all_img_dbs[db_path] if db_path != "" else None
all_img_dbs.path2imgdb[db_path] = img_db
return img_db, img_db_gt
def create_dataloaders(datasets, is_train, opts, all_img_dbs=None):
if all_img_dbs is None:
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
dataloaders = {}
for dset in datasets:
for vcr_task in ["qa", "qar"]:
if is_train:
assert len(dset['db']) == len(dset['img'])
assert len(dset['tasks']) == len(dset['mix_ratio'])
img_db, img_db_gt = [], []
for img_path in dset['img']:
curr_img_db, curr_img_db_gt = load_img_feat(
img_path, all_img_dbs, opts)
img_db.append(curr_img_db)
img_db_gt.append(curr_img_db_gt)
else:
assert len(dset['db']) == len(dset['img']) == 1
img_db, img_db_gt = load_img_feat(
dset['img'][0], all_img_dbs, opts)
for i, t in enumerate(dset['tasks']):
task = f'{t}_{dset["name"]}'
if is_train:
LOGGER.info(
f"Loading {task} train dataset with vcr_{vcr_task}, "
f"{dset['db']}, {[img.img_dir for img in img_db]},"
f"{[img.img_dir for img in img_db_gt]}")
txt_db = [VcrTxtTokLmdb(path, opts.max_txt_len,
task=vcr_task)
for path in dset['db']]
else:
LOGGER.info(
f"Loading {task} val dataset with vcr_{vcr_task}, "
f"{dset['db']}, {img_db.img_dir},"
f"{img_db_gt.img_dir}")
txt_db = VcrTxtTokLmdb(dset['db'][0], -1,
task=vcr_task)
if task.startswith('mlm'):
dataset = build_mlm_dataset(
txt_db, img_db_gt, img_db, is_train, opts)
elif task.startswith('mrfr'):
dataset = build_mrfr_dataset(
txt_db, img_db_gt, img_db, is_train, opts)
elif task.startswith('mrc'):
dataset = build_mrc_dataset(
txt_db, img_db_gt, img_db, is_train, opts)
else:
raise ValueError(f'Undefined task {task}')
LOGGER.info(f"{len(dataset[0])*hvd.size()} samples loaded")
loader = build_dataloader(*dataset, is_train, opts)
if is_train:
ratio = dset['mix_ratio'][i]
dataloaders[task] = (loader, ratio)
else:
dataloaders[task] = PrefetchLoader(loader)
return dataloaders, all_img_dbs
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(args.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
all_dbs = [db for datasets in [opts.train_datasets, opts.val_datasets]
for dset in datasets for db in dset['db']]
tokenizer = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(tokenizer == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
# build data loaders
train_dataloaders, all_img_dbs = create_dataloaders(
opts.train_datasets, True, opts)
val_dataloaders, _ = create_dataloaders(
opts.val_datasets, False, opts, all_img_dbs)
meta_loader = MetaLoader(train_dataloaders,
accum_steps=opts.gradient_accumulation_steps,
distributed=n_gpu > 1)
meta_loader = PrefetchLoader(meta_loader)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForPretrainingForVCR.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, img_label_dim=IMG_LABEL_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
task2scaler = {t: i for i, t in enumerate(train_dataloaders.keys())}
model, optimizer = amp.initialize(model, optimizer,
num_losses=len(task2scaler),
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
# to compute training statistics
task2loss = {task: RunningMeter(f'loss/{task}')
for task in train_dataloaders.keys()}
n_examples = defaultdict(int)
n_in_units = defaultdict(int)
n_loss_units = defaultdict(int)
grad_norm = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
for step, (name, batch) in enumerate(meta_loader):
# forward pass
n_examples[name] += batch['input_ids'].size(0)
n_in_units[name] += (batch['attn_masks'] == 1).sum().item()
task = name.split('_')[0]
loss = model(batch, task=task, compute_loss=True)
n_loss_units[name] += loss.size(0)
loss = loss.mean() # loss is not normalized in model
# backward pass
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale,
loss_id=task2scaler[name]) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
task2loss[name](loss.item())
# optimizer update and logging
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.log_scaler_dict({ll.name: ll.val
for ll in task2loss.values()
if ll.val is not None})
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'==============Step {global_step}===============')
for t in train_dataloaders.keys():
assert all(tt == t for tt in all_gather_list(t))
tot_ex = sum(all_gather_list(n_examples[t]))
ex_per_sec = int(tot_ex / (time()-start))
tot_in = sum(all_gather_list(n_in_units[t]))
in_per_sec = int(tot_in / (time()-start))
tot_l = sum(all_gather_list(n_loss_units[t]))
l_per_sec = int(tot_l / (time()-start))
LOGGER.info(f'{t}: {tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar(f'perf/{t}_ex_per_s', ex_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_in_per_s', in_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_loss_per_s', l_per_sec,
global_step)
LOGGER.info('===============================================')
if global_step % opts.valid_steps == 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step % opts.valid_steps != 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
def validate(model, val_dataloaders):
model.eval()
for task, loader in val_dataloaders.items():
LOGGER.info(f"validate on {task} task")
if task.startswith('mlm'):
val_log = validate_mlm(model, loader)
elif task.startswith('mrfr'):
val_log = validate_mrfr(model, loader)
elif task.startswith('mrc'):
val_log = validate_mrc(model, loader, task)
else:
raise ValueError(f'Undefined task {task}')
val_log = {f'{task}_{k}': v for k, v in val_log.items()}
TB_LOGGER.log_scaler_dict(
{f'valid_{task}/{k}': v for k, v in val_log.items()})
model.train()
@torch.no_grad()
def validate_mlm(model, val_loader):
LOGGER.info("start running MLM validation...")
val_loss = 0
n_correct = 0
n_word = 0
st = time()
for i, batch in enumerate(val_loader):
scores = model(batch, task='mlm', compute_loss=False)
labels = batch['txt_labels']
labels = labels[labels != -1]
loss = F.cross_entropy(scores, labels, reduction='sum')
val_loss += loss.item()
n_correct += (scores.max(dim=-1)[1] == labels).sum().item()
n_word += labels.numel()
val_loss = sum(all_gather_list(val_loss))
n_correct = sum(all_gather_list(n_correct))
n_word = sum(all_gather_list(n_word))
tot_time = time()-st
val_loss /= n_word
acc = n_correct / n_word
val_log = {'loss': val_loss,
'acc': acc,
'tok_per_s': n_word/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"acc: {acc*100:.2f}")
return val_log
def accuracy_count(out, labels):
outputs = out.max(dim=-1)[1]
mask = labels != -1
n_correct = (outputs == labels).masked_select(mask).sum().item()
return n_correct
@torch.no_grad()
def validate_mrfr(model, val_loader):
LOGGER.info("start running MRFR validation...")
val_loss = 0
n_feat = 0
st = time()
for i, batch in enumerate(val_loader):
loss = model(batch, task='mrfr', compute_loss=True)
val_loss += loss.sum().item() / IMG_DIM
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_log = {'loss': val_loss,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"loss: {val_loss:.2f}")
return val_log
@torch.no_grad()
def validate_mrc(model, val_loader, task):
LOGGER.info("start running MRC validation...")
val_loss = 0
n_feat = 0
st = time()
tot_score = 0
for i, batch in enumerate(val_loader):
prediction_soft_label = model(
batch, task=task, compute_loss=False)
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
label_targets = batch['label_targets']
loss = F.kl_div(
prediction_soft_label, label_targets, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label, label_targets)
else:
# background class should not be the target
cls_label_targets = label_targets[:, 1:].max(dim=-1)[1] + 1
loss = F.cross_entropy(
prediction_soft_label, cls_label_targets,
ignore_index=0, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label[:, 1:], label_targets[:, 1:])
val_loss += loss.item()
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_acc = tot_score / n_feat
val_log = {'loss': val_loss,
'acc': val_acc,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log
def compute_accuracy_for_soft_targets(out, labels):
outputs = out.max(dim=-1)[1]
labels = labels.max(dim=-1)[1] # argmax
n_correct = (outputs == labels).sum().item()
return n_correct
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
# NOTE: train tasks and val tasks cannot take command line arguments
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config", type=str,
help="path to model structure config json")
parser.add_argument("--checkpoint", default=None, type=str,
help="path to model checkpoint (*.pt)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
parser.add_argument('--mrm_prob', default=0.15, type=float,
help='probability to mask in MRM training')
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adamw',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=10000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', required=True, help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 22,741 | 39.538324 | 79 | py |
UNITER | UNITER-master/inf_re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VQA for submission
"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from cytoolz import concat
from data import (PrefetchLoader, DetectFeatLmdb, ReTxtTokLmdb,
ReEvalDataset, re_eval_collate)
from data.sampler import DistributedSampler
from model.re import UniterForReferringExpressionComprehension
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.const import IMG_DIM
def write_to_tmp(txt, tmp_file):
if tmp_file:
f = open(tmp_file, "a")
f.write(txt)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = json.load(open(hps_file))
if 'mlp' not in model_opts:
model_opts['mlp'] = 1
model_opts = Struct(model_opts)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_epoch_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
model = UniterForReferringExpressionComprehension.from_pretrained(
f'{opts.output_dir}/log/model.json', checkpoint,
img_dim=IMG_DIM, mlp=model_opts.mlp)
model.to(device)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
if opts.fp16:
model = amp.initialize(model, enabled=True, opt_level='O2')
# load DBs and image dirs
img_db_type = "gt" if "coco_gt" in opts.img_db else "det"
conf_th = -1 if img_db_type == "gt" else model_opts.conf_th
num_bb = 100 if img_db_type == "gt" else model_opts.num_bb
eval_img_db = DetectFeatLmdb(opts.img_db,
conf_th, model_opts.max_bb,
model_opts.min_bb, num_bb,
opts.compressed_db)
# Prepro txt_dbs
txt_dbs = opts.txt_db.split(':')
for txt_db in txt_dbs:
print(f'Evaluating {txt_db}')
eval_txt_db = ReTxtTokLmdb(txt_db, -1)
eval_dataset = ReEvalDataset(
eval_txt_db, eval_img_db, use_gt_feat=img_db_type == "gt")
sampler = DistributedSampler(eval_dataset, num_replicas=n_gpu,
rank=rank, shuffle=False)
eval_dataloader = DataLoader(eval_dataset,
sampler=sampler,
batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=re_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
# evaluate
val_log, results = evaluate(model, eval_dataloader)
result_dir = f'{opts.output_dir}/results_test'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
write_to_tmp(
f"{txt_db.split('_')[1].split('.')[0]}-acc({img_db_type}): {results['acc']*100:.2f}% ",
args.tmp_file)
all_results = list(concat(all_gather_list(results)))
if hvd.rank() == 0:
db_split = txt_db.split('/')[-1].split('.')[0] # refcoco+_val
img_dir = opts.img_db.split('/')[-1] # re_coco_gt
with open(f'{result_dir}/'
f'results_{opts.checkpoint}_{db_split}_on_{img_dir}_all.json', 'w') as f:
json.dump(all_results, f)
# print
print(f'{opts.output_dir}/results_test')
write_to_tmp(f'\n', args.tmp_file)
@torch.no_grad()
def evaluate(model, eval_loader):
LOGGER.info("start running evaluation...")
model.eval()
tot_score = 0
n_ex = 0
st = time()
predictions = []
for i, batch in enumerate(eval_loader):
(tgt_box_list, obj_boxes_list, sent_ids) = (
batch['tgt_box'], batch['obj_boxes'], batch['sent_ids'])
# scores (n, max_num_bb)
scores = model(batch, compute_loss=False)
ixs = torch.argmax(scores, 1).cpu().detach().numpy() # (n, )
# pred_boxes
for ix, obj_boxes, tgt_box, sent_id in \
zip(ixs, obj_boxes_list, tgt_box_list, sent_ids):
pred_box = obj_boxes[ix]
predictions.append({'sent_id': int(sent_id),
'pred_box': pred_box.tolist(),
'tgt_box': tgt_box.tolist()})
if eval_loader.loader.dataset.computeIoU(pred_box, tgt_box) > .5:
tot_score += 1
n_ex += 1
if i % 100 == 0 and hvd.rank() == 0:
n_results = len(predictions)
n_results *= hvd.size() # an approximation to avoid hangs
LOGGER.info(f'{n_results}/{len(eval_loader.dataset)} '
'answers predicted')
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
tot_score = sum(all_gather_list(tot_score))
val_acc = tot_score / n_ex
val_log = {'valid/acc': val_acc, 'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation ({n_ex} sents) finished in"
f" {int(tot_time)} seconds"
f", accuracy: {val_acc*100:.2f}%")
# summarizae
results = {'acc': val_acc, 'predictions': predictions}
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="can be the path to binary or int number (step)")
parser.add_argument("--batch_size",
default=256, type=int,
help="number of sentences per batch")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory of the training command")
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# Write simple results to some tmp file
parser.add_argument('--tmp_file', type=str, default=None,
help="write results to tmp file")
args = parser.parse_args()
main(args)
| 7,395 | 35.98 | 99 | py |
UNITER | UNITER-master/inf_itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference for Image Text Retrieval
"""
import argparse
import json
import os
from os.path import exists
import pickle
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from data import (PrefetchLoader,
DetectFeatLmdb, TxtTokLmdb, ItmEvalDataset, itm_eval_collate)
from model.itm import UniterForImageTextRetrieval
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.const import IMG_DIM
from utils.itm_eval import inference, itm_eval
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.train_config is not None:
train_opts = Struct(json.load(open(opts.train_config)))
opts.conf_th = train_opts.conf_th
opts.max_bb = train_opts.max_bb
opts.min_bb = train_opts.min_bb
opts.num_bb = train_opts.num_bb
# load DBs and image dirs
eval_img_db = DetectFeatLmdb(opts.img_db,
opts.conf_th, opts.max_bb,
opts.min_bb, opts.num_bb,
opts.compressed_db)
eval_txt_db = TxtTokLmdb(opts.txt_db, -1)
eval_dataset = ItmEvalDataset(eval_txt_db, eval_img_db, opts.batch_size)
# Prepare model
checkpoint = torch.load(opts.checkpoint)
model = UniterForImageTextRetrieval.from_pretrained(
opts.model_config, checkpoint, img_dim=IMG_DIM)
if 'rank_output' not in checkpoint:
model.init_output() # zero shot setting
model.to(device)
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset, batch_size=1,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=itm_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
eval_log, results = evaluate(model, eval_dataloader)
if hvd.rank() == 0:
if not exists(opts.output_dir) and rank == 0:
os.makedirs(opts.output_dir)
with open(f'{opts.output_dir}/config.json', 'w') as f:
json.dump(vars(opts), f)
with open(f'{opts.output_dir}/results.bin', 'wb') as f:
pickle.dump(results, f)
with open(f'{opts.output_dir}/scores.json', 'w') as f:
json.dump(eval_log, f)
LOGGER.info(f'evaluation finished')
LOGGER.info(
f"======================== Results =========================\n"
f"image retrieval R1: {eval_log['img_r1']*100:.2f},\n"
f"image retrieval R5: {eval_log['img_r5']*100:.2f},\n"
f"image retrieval R10: {eval_log['img_r10']*100:.2f}\n"
f"text retrieval R1: {eval_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: {eval_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: {eval_log['txt_r10']*100:.2f}")
LOGGER.info("========================================================")
@torch.no_grad()
def evaluate(model, eval_loader):
model.eval()
st = time()
LOGGER.info("start running Image/Text Retrieval evaluation ...")
score_matrix = inference(model, eval_loader)
dset = eval_loader.dataset
all_score = hvd.allgather(score_matrix)
all_txt_ids = [i for ids in all_gather_list(dset.ids)
for i in ids]
all_img_ids = dset.all_img_ids
assert all_score.size() == (len(all_txt_ids), len(all_img_ids))
if hvd.rank() != 0:
return {}, tuple()
# NOTE: only use rank0 to compute final scores
eval_log = itm_eval(all_score, all_txt_ids, all_img_ids,
dset.txt2img, dset.img2txts)
results = (all_score, all_txt_ids, all_img_ids)
tot_time = time()-st
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds, ")
return eval_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db", default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db", default=None, type=str,
help="The input train images.")
parser.add_argument("--checkpoint", default=None, type=str,
help="model checkpoint binary")
parser.add_argument("--model_config", default=None, type=str,
help="model config json")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the inference results will be "
"written.")
# optional parameters
parser.add_argument("--train_config", default=None, type=str,
help="hps.json from training (for prepro hps)")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
parser.add_argument("--batch_size", default=400, type=int,
help="number of tokens in a batch")
# device parameters
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
main(args)
| 6,413 | 38.109756 | 79 | py |
UNITER | UNITER-master/train_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VQA
"""
import argparse
import json
import os
from os.path import abspath, dirname, exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, PrefetchLoader,
TxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
VqaDataset, VqaEvalDataset,
vqa_collate, vqa_eval_collate)
from model.vqa import UniterForVisualQuestionAnswering
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import BUCKET_SIZE, IMG_DIM
def build_dataloader(dataset, collate_fn, is_train, opts):
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
dataloader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" vqa linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 'vqa_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 'vqa_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
ans2label = json.load(open(f'{dirname(abspath(__file__))}'
f'/utils/ans2label.json'))
label2ans = {label: ans for ans, label in ans2label.items()}
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets.append(VqaDataset(len(ans2label), txt_db, img_db))
train_dataset = ConcatDatasetWithLens(train_datasets)
train_dataloader = build_dataloader(train_dataset, vqa_collate, True, opts)
# val
LOGGER.info(f"Loading Train Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, -1)
val_dataset = VqaEvalDataset(len(ans2label), val_txt_db, val_img_db)
val_dataloader = build_dataloader(val_dataset, vqa_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = opts.train_txt_dbs + [opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(toker == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
model = UniterForVisualQuestionAnswering.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, num_answer=len(ans2label))
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
json.dump(ans2label,
open(join(opts.output_dir, 'ckpt', 'ans2label.json'), 'w'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset) * hvd.size())
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean() * batch['targets'].size(1) # instance-leval bce
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info(f'===========================================')
if global_step % opts.valid_steps == 0:
val_log, results = validate(
model, val_dataloader, label2ans)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
if opts.num_train_steps % opts.valid_steps != 0:
val_log, results = validate(model, val_dataloader, label2ans)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
@torch.no_grad()
def validate(model, val_loader, label2ans):
LOGGER.info("start running validation...")
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
scores = model(batch, compute_loss=False)
targets = batch['targets']
loss = F.binary_cross_entropy_with_logits(
scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += compute_score_with_logits(scores, targets).sum().item()
answers = [label2ans[i]
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
for qid, answer in zip(batch['qids'], answers):
results[qid] = answer
n_ex += len(batch['qids'])
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {'valid/loss': val_loss,
'valid/acc': val_acc,
'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1] # argmax
one_hots = torch.zeros(*labels.size(), device=labels.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--lr_mul", default=10.0, type=float,
help="multiplier for top layer lr")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 16,988 | 41.261194 | 79 | py |
UNITER | UNITER-master/train_ve.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for SNLI-VE
"""
import argparse
import json
import os
from os.path import exists, join
import pickle
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, PrefetchLoader,
DetectFeatLmdb, TxtTokLmdb,
VeDataset, VeEvalDataset,
ve_collate, ve_eval_collate)
from model.ve import UniterForVisualEntailment
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.misc import VE_ENT2IDX as ans2label
from utils.misc import VE_IDX2ENT as label2ans
from utils.const import IMG_DIM, BUCKET_SIZE
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db = DetectFeatLmdb(img_path, opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
dset = dset_cls(txt_db, img_db)
sampler = TokenBucketSampler(dset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return PrefetchLoader(loader)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
VeDataset, ve_collate, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
VeEvalDataset, ve_eval_collate, opts)
test_dataloader = create_dataloader(opts.test_img_db, opts.test_txt_db,
opts.val_batch_size, False,
VeEvalDataset, ve_eval_collate, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
bert_model = json.load(open(f'{opts.train_txt_db}/meta.json'))['bert']
if 'bert' not in bert_model:
bert_model = 'bert-large-cased' # quick hack for glove exp
model = UniterForVisualEntailment.from_pretrained(
opts.model_config, state_dict=checkpoint, img_dim=IMG_DIM)
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
pickle.dump(ans2label,
open(join(opts.output_dir, 'ckpt', 'ans2label.pkl'), 'wb'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean() * batch['targets'].size(1) # instance-leval bce
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info(f'===========================================')
if global_step % opts.valid_steps == 0:
for split, loader in [("val", val_dataloader),
("test", test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
val_log, results = validate(
model, loader, label2ans, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"Step {global_step}: finished {n_epoch} epochs")
if opts.num_train_steps % opts.valid_steps != 0:
for split, loader in [("val", val_dataloader),
("test", test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
val_log, results = validate(model, loader, label2ans, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}_final.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
@torch.no_grad()
def validate(model, val_loader, label2ans, split='val'):
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
scores = model(batch, compute_loss=False)
targets = batch['targets']
loss = F.binary_cross_entropy_with_logits(
scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += compute_score_with_logits(scores, targets).sum().item()
answers = [label2ans[i]
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
qids = batch['qids']
for qid, answer in zip(qids, answers):
results[qid] = answer
n_ex += len(qids)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/{split}_loss': val_loss,
f'valid/{split}_acc': val_acc,
f'valid/{split}_ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1] # argmax
one_hots = torch.zeros(*labels.size(), device=labels.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument("--test_txt_db",
default=None, type=str,
help="The input test corpus. (LMDB)")
parser.add_argument("--test_img_db",
default=None, type=str,
help="The input test images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 16,875 | 41.724051 | 79 | py |
UNITER | UNITER-master/train_re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for RE
"""
import argparse
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, DetectFeatLmdb,
ReTxtTokLmdb, ReDataset, ReEvalDataset,
re_collate, re_eval_collate)
from data.sampler import DistributedSampler
from model.re import UniterForReferringExpressionComprehension
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (
all_gather_list, all_reduce_and_rescale_tensors,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import (
NoOp, parse_with_config, set_dropout, set_random_seed)
from utils.const import IMG_DIM
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db_type = "gt" if "coco_gt" in img_path else "det"
conf_th = -1 if img_db_type == "gt" else opts.conf_th
num_bb = 100 if img_db_type == "gt" else opts.num_bb
img_db = DetectFeatLmdb(img_path, conf_th, opts.max_bb, opts.min_bb,
num_bb, opts.compressed_db)
txt_db = ReTxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
if is_train:
dset = dset_cls(txt_db, img_db)
else:
dset = dset_cls(txt_db, img_db, use_gt_feat=img_db_type == "gt")
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
sampler = DistributedSampler(dset, num_replicas=hvd.size(),
rank=hvd.rank(), shuffle=False)
dataloader = DataLoader(dset, sampler=sampler,
batch_size=batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" Re linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 're_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 're_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
ReDataset, re_collate, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
ReEvalDataset, re_eval_collate, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = [opts.train_txt_db, opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['toker']
assert all(toker == json.load(open(f'{db}/meta.json'))['toker']
for db in all_dbs)
model = UniterForReferringExpressionComprehension.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, loss=opts.train_loss,
margin=opts.margin,
hard_ratio=opts.hard_ratio, mlp=opts.mlp,)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
optimizer = build_optimizer(model, opts)
# Apex
model, optimizer = amp.initialize(
model, optimizer, enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'), 'model_epoch')
os.makedirs(join(opts.output_dir, 'results')) # store RE predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
best_val_acc, best_epoch = None, None
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
if global_step == 0:
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
if global_step >= opts.num_train_steps:
break
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.sum() # sum over vectorized loss TODO: investigate
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(
loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info('===========================================')
# evaluate after each epoch
val_log, _ = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
# save model
n_epoch += 1
model_saver.save(model, n_epoch)
LOGGER.info(f"finished {n_epoch} epochs")
# save best model
if best_val_acc is None or val_log['valid/acc'] > best_val_acc:
best_val_acc = val_log['valid/acc']
best_epoch = n_epoch
model_saver.save(model, 'best')
# shuffle training data for the next epoch
train_dataloader.loader.dataset.shuffle()
# is training finished?
if global_step >= opts.num_train_steps:
break
val_log, results = validate(model, val_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}_final.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
# print best model
LOGGER.info(
f'best_val_acc = {best_val_acc*100:.2f}% at epoch {best_epoch}.')
@torch.no_grad()
def validate(model, val_dataloader):
LOGGER.info("start running evaluation.")
model.eval()
tot_score = 0
n_ex = 0
st = time()
predictions = {}
for i, batch in enumerate(val_dataloader):
# inputs
(tgt_box_list, obj_boxes_list, sent_ids) = (
batch['tgt_box'], batch['obj_boxes'], batch['sent_ids'])
# scores (n, max_num_bb)
scores = model(batch, compute_loss=False)
ixs = torch.argmax(scores, 1).cpu().detach().numpy() # (n, )
# pred_boxes
for ix, obj_boxes, tgt_box, sent_id in \
zip(ixs, obj_boxes_list, tgt_box_list, sent_ids):
pred_box = obj_boxes[ix]
predictions[int(sent_id)] = {
'pred_box': pred_box.tolist(),
'tgt_box': tgt_box.tolist()}
if val_dataloader.loader.dataset.computeIoU(
pred_box, tgt_box) > .5:
tot_score += 1
n_ex += 1
tot_time = time()-st
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
val_acc = tot_score / n_ex
val_log = {'valid/acc': val_acc, 'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(
f"validation ({n_ex} sents) finished in {int(tot_time)} seconds"
f", accuracy: {val_acc*100:.2f}%")
return val_log, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument("--mlp", default=1, type=int,
help="number of MLP layers for RE output")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=128, type=int,
help="Total batch size for training. "
"(batch by examples)")
parser.add_argument("--val_batch_size",
default=256, type=int,
help="Total batch size for validation. "
"(batch by examples)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--train_loss",
default="cls", type=str,
choices=['cls', 'rank'],
help="loss to used during training")
parser.add_argument("--margin",
default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument("--hard_ratio",
default=0.3, type=float,
help="sampling ratio of hard negatives")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_steps",
default=32000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+', type=float,
help="beta for adam optimizer")
parser.add_argument("--decay", default='linear',
choices=['linear', 'invsqrt', 'constant'],
help="learning rate decay method")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed',
type=int,
default=24,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
# options safe guard
main(args)
| 18,420 | 39.220524 | 79 | py |
UNITER | UNITER-master/train_itm_hard_negatives.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for Image-Text Retrieval with hard negatives
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, ConcatDataset
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, TxtTokLmdb, ImageLmdbGroup,
ItmRankDatasetHardNegFromText,
ItmRankDatasetHardNegFromImage, itm_rank_hn_collate,
ItmValDataset, itm_val_collate,
ItmEvalDataset, itm_eval_collate)
from model.itm import UniterForImageTextRetrievalHardNeg
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM
from utils.itm_eval import evaluate
def build_dataloader(dataset, collate_fn, is_train, opts):
dataloader = DataLoader(dataset, batch_size=1,
shuffle=is_train, drop_last=is_train,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
set_random_seed(opts.seed)
if hvd.rank() == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
# store ITM predictions
os.makedirs(join(opts.output_dir, 'results_val'))
os.makedirs(join(opts.output_dir, 'results_test'))
os.makedirs(join(opts.output_dir, 'results_train'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_dbs}, "
f"{opts.train_img_dbs}")
# check multiple DBs
assert len(opts.train_txt_dbs) == len(opts.train_img_dbs), \
"train txt_db and img_db have different length"
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets_t = []
train_datasets_i = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets_t.append(
ItmRankDatasetHardNegFromText(txt_db, img_db, opts.negative_size))
train_datasets_i.append(
ItmRankDatasetHardNegFromImage(txt_db, img_db, opts.negative_size))
train_dataset_t = ConcatDataset(train_datasets_t)
train_dataset_i = ConcatDataset(train_datasets_i)
train_dataloader_t = build_dataloader(
train_dataset_t, itm_rank_hn_collate, True, opts)
train_dataloader_i = build_dataloader(
train_dataset_i, itm_rank_hn_collate, True, opts)
# val
LOGGER.info(f"Loading Val Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, -1)
val_dataset = ItmValDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
val_dataloader = build_dataloader(val_dataset, itm_val_collate,
False, opts)
# eval
LOGGER.info(f"Loading val, test Dataset for full evaluation: "
f"{opts.val_txt_db}, {opts.val_img_db}"
f"{opts.test_txt_db}, {opts.test_img_db}")
eval_dataset_val = ItmEvalDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
eval_loader_val = build_dataloader(eval_dataset_val, itm_eval_collate,
False, opts)
test_img_db = all_img_dbs[opts.test_img_db]
test_txt_db = TxtTokLmdb(opts.test_txt_db, -1)
eval_dataset_test = ItmEvalDataset(test_txt_db, test_img_db,
opts.inf_minibatch_size)
eval_loader_test = build_dataloader(eval_dataset_test, itm_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForImageTextRetrievalHardNeg.from_pretrained(
opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM, margin=opts.margin, hard_size=opts.hard_neg_size)
model.init_output() # pretrain ITM head is different from ranking head
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
LOGGER.info(f"***** Running training on {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d",
sum(all_gather_list(len(train_dataset_t))))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
global_step = 0
step = 0
n_examples = 0
n_hard_ex = 0
start = time()
train_iter_i = iter(train_dataloader_i)
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for batch in train_dataloader_t:
# hard text from image
try:
batch_i = next(train_iter_i)
except StopIteration:
train_iter_i = iter(train_dataloader_i)
batch_i = next(train_iter_i)
n_examples += batch_i['attn_masks'].size(0)
loss = model(batch_i, sample_from='i', compute_loss=True)
n_hard_ex += loss.numel()
loss = loss.mean() / opts.train_batch_size
with amp.scale_loss(loss, optimizer, delay_unscale=True
) as scaled_loss:
scaled_loss.backward()
# hard image from text
n_examples += batch['attn_masks'].size(0)
loss = model(batch, sample_from='t', compute_loss=True)
n_hard_ex += loss.numel()
# NOTE we use gradient accumulation to implemented train_batch_size
loss = loss.mean() / opts.train_batch_size
step += 1
delay_unscale = step % opts.train_batch_size != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if step % opts.train_batch_size == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'------------Step {global_step}-------------')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
tot_hn = sum(all_gather_list(n_hard_ex))
hn_per_sec = int(tot_hn / (time()-start))
LOGGER.info(f'{tot_ex} ({tot_hn}) examples (hard) '
f'trained at {ex_per_sec} ({hn_per_sec}) ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
TB_LOGGER.add_scalar('perf/hn_per_s',
hn_per_sec, global_step)
LOGGER.info(f'-------------------------------------------')
if global_step % opts.valid_steps == 0:
if opts.full_val:
LOGGER.info(
f"========================== Step {global_step} "
f"==========================")
val_log = evaluate(model, eval_loader_val)
TB_LOGGER.log_scaler_dict(
{f"valid/{k}": v for k, v in val_log.items()})
LOGGER.info(f"image retrieval R1: "
f"{val_log['img_r1']*100:.2f},\n"
f"image retrieval R5: "
f"{val_log['img_r5']*100:.2f},\n"
f"image retrieval R10: "
f"{val_log['img_r10']*100:.2f}\n"
f"text retrieval R1: "
f"{val_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: "
f"{val_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: "
f"{val_log['txt_r10']*100:.2f}")
LOGGER.info("================================="
"=================================")
else:
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
pbar.close()
# final validation
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
# evaluation
for split, loader in [('val', eval_loader_val),
('test', eval_loader_test)]:
eval_log = evaluate(model, loader)
TB_LOGGER.log_scaler_dict({f"eval/{split}_{k}": v
for k, v in eval_log.items()})
if hvd.rank() != 0:
continue
LOGGER.info(
f"========================= {split} ===========================\n"
f"image retrieval R1: {eval_log['img_r1']*100:.2f},\n"
f"image retrieval R5: {eval_log['img_r5']*100:.2f},\n"
f"image retrieval R10: {eval_log['img_r10']*100:.2f}\n"
f"text retrieval R1: {eval_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: {eval_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: {eval_log['txt_r10']*100:.2f}")
LOGGER.info("=========================================================")
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
pbar = tqdm(total=len(val_loader))
else:
pbar = NoOp()
LOGGER.info("start running Image Retrieval validation ...")
model.eval()
n_ex = 0
st = time()
recall_at_1, recall_at_5, recall_at_10 = 0, 0, 0
for batch in val_loader:
scores = model(batch, compute_loss=False)
_, indices = scores.squeeze(1).topk(10, dim=0)
rank = (indices == 0).nonzero()
if rank.numel():
rank = rank.item()
if rank < 1:
recall_at_1 += 1
if rank < 5:
recall_at_5 += 1
if rank < 10:
recall_at_10 += 1
n_ex += 1
pbar.update(1)
n_ex = sum(all_gather_list(n_ex))
recall_at_1 = sum(all_gather_list(recall_at_1)) / n_ex
recall_at_5 = sum(all_gather_list(recall_at_5)) / n_ex
recall_at_10 = sum(all_gather_list(recall_at_10)) / n_ex
tot_time = time()-st
val_log = {'valid/ex_per_s': n_ex/tot_time,
'valid/recall_1': recall_at_1,
'valid/recall_5': recall_at_5,
'valid/recall_10': recall_at_10}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"recall_1: {recall_at_1*100:.2f}, "
f"recall_5: {recall_at_5*100:.2f}, "
f"recall_10: {recall_at_10*100:.2f}")
pbar.close()
return val_log
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained MLM")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory where the model "
"checkpoints will be written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=32, type=int,
help="batch size (# positive examples) for training. "
"(implemented with gradient accumulation)")
parser.add_argument("--negative_size", default=511, type=int,
help="Number of negative samples per positive sample"
"(forward only)")
parser.add_argument("--hard_neg_size", default=31, type=int,
help="Number of hard negative samples "
"per positive sample (acutally used to train)")
parser.add_argument("--inf_minibatch_size", default=512, type=int,
help="batch size for running inference. "
"(used for validation and evaluation)")
parser.add_argument("--margin", default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=0.25, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--full_val', action='store_true',
help="Always run full evaluation during training")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
# for tensor core
assert (args.negative_size+1) % 8 == (args.hard_neg_size+1) % 8 == 0
main(args)
| 19,146 | 42.417234 | 79 | py |
UNITER | UNITER-master/optim/misc.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Misc lr helper
"""
from torch.optim import Adam, Adamax
from .adamw import AdamW
def build_optimizer(model, opts):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
| 1,037 | 27.833333 | 65 | py |
UNITER | UNITER-master/optim/adamw.py | """
AdamW optimizer (weight decay fix)
copied from hugginface (https://github.com/huggingface/transformers).
"""
import math
import torch
from torch.optim import Optimizer
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2).
Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias
in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError(
"Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - "
"should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - "
"should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - "
"should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse '
'gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = (step_size * math.sqrt(bias_correction2)
/ bias_correction1)
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is
# *not* the correct way of using L2 regularization/weight decay
# with Adam, since that will interact with the m and v
# parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't
# interact with the m/v parameters. This is equivalent to
# adding the square of the weights to the loss with plain
# (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss
| 4,450 | 41.798077 | 79 | py |
UNITER | UNITER-master/optim/__init__.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
from .sched import noam_schedule, warmup_linear, vqa_schedule, get_lr_sched
from .adamw import AdamW
| 179 | 21.5 | 75 | py |
UNITER | UNITER-master/optim/sched.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
optimizer learning rate scheduling helpers
"""
from math import ceil
def noam_schedule(step, warmup_step=4000):
""" original Transformer schedule"""
if step <= warmup_step:
return step / warmup_step
return (warmup_step ** 0.5) * (step ** -0.5)
def warmup_linear(step, warmup_step, tot_step):
""" BERT schedule """
if step < warmup_step:
return step / warmup_step
return max(0, (tot_step-step)/(tot_step-warmup_step))
def vqa_schedule(step, warmup_interval, decay_interval,
decay_start, decay_rate):
""" VQA schedule from MCAN """
if step < warmup_interval:
return 1/4
elif step < 2 * warmup_interval:
return 2/4
elif step < 3 * warmup_interval:
return 3/4
elif step >= decay_start:
num_decay = ceil((step - decay_start) / decay_interval)
return decay_rate ** num_decay
else:
return 1
def get_lr_sched(global_step, opts):
# learning rate scheduling
lr_this_step = opts.learning_rate * warmup_linear(
global_step, opts.warmup_steps, opts.num_train_steps)
if lr_this_step <= 0:
lr_this_step = 1e-8
return lr_this_step
| 1,258 | 25.787234 | 63 | py |
UNITER | UNITER-master/scripts/eval_nlvr2.py | """
copied from official NLVR2 github
(https://github.com/lil-lab/nlvr/tree/master/nlvr2)
python scripts/eval_nlvr2.py <output.csv> <annotation.json>
"""
import json
import sys
# Load the predictions file. Assume it is a CSV.
predictions = { }
for line in open(sys.argv[1]).readlines():
if line:
splits = line.strip().split(",")
# We assume identifiers are in the format "split-####-#-#.png".
identifier = splits[0]
prediction = splits[1]
predictions[identifier] = prediction
# Load the labeled examples.
labeled_examples = [json.loads(line) for line in open(sys.argv[2]).readlines() if line]
# If not, identify the ones that are missing, and exit.
total_num = len(labeled_examples)
if len(predictions) < total_num:
print("Some predictions are missing!")
print("Got " + str(len(predictions)) + " predictions but expected " + str(total_num))
for example in labeled_examples:
lookup = example["identifier"]
if not lookup in predictions:
print("Missing prediction for item " + str(lookup))
exit()
# Get the precision by iterating through the examples and checking the value
# that was predicted.
# Also update the "consistency" dictionary that keeps track of whether all
# predictions for a given sentence were correct.
num_correct = 0.
consistency_dict = { }
for example in labeled_examples:
anon_label = example["identifier"].split("-")
anon_label[2] = ''
anon_label = '-'.join(anon_label)
if not anon_label in consistency_dict:
consistency_dict[anon_label] = True
lookup = example["identifier"]
prediction = predictions[lookup]
if prediction.lower() == example["label"].lower():
num_correct += 1.
else:
consistency_dict[anon_label] = False
# Calculate consistency.
num_consistent = 0.
unique_sentence = len(consistency_dict)
for identifier, consistent in consistency_dict.items():
if consistent:
num_consistent += 1
# Report values.
print("accuracy=" + str(num_correct / total_num))
print("consistency=" + str(num_consistent / unique_sentence))
| 2,038 | 30.369231 | 87 | py |
UNITER | UNITER-master/scripts/convert_imgdir.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
convert image npz to LMDB
"""
import argparse
import glob
import io
import json
import multiprocessing as mp
import os
from os.path import basename, exists
from cytoolz import curry
import numpy as np
from tqdm import tqdm
import lmdb
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
def _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb):
num_bb = max(min_bb, (img_dump['conf'] > conf_th).sum())
num_bb = min(max_bb, num_bb)
return int(num_bb)
@curry
def load_npz(conf_th, max_bb, min_bb, num_bb, fname, keep_all=False):
try:
img_dump = np.load(fname, allow_pickle=True)
if keep_all:
nbb = None
else:
nbb = _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb)
dump = {}
for key, arr in img_dump.items():
if arr.dtype == np.float32:
arr = arr.astype(np.float16)
if arr.ndim == 2:
dump[key] = arr[:nbb, :]
elif arr.ndim == 1:
dump[key] = arr[:nbb]
else:
raise ValueError('wrong ndim')
except Exception as e:
# corrupted file
print(f'corrupted file {fname}', e)
dump = {}
nbb = 0
name = basename(fname)
return name, dump, nbb
def dumps_npz(dump, compress=False):
with io.BytesIO() as writer:
if compress:
np.savez_compressed(writer, **dump, allow_pickle=True)
else:
np.savez(writer, **dump, allow_pickle=True)
return writer.getvalue()
def dumps_msgpack(dump):
return msgpack.dumps(dump, use_bin_type=True)
def main(opts):
if opts.img_dir[-1] == '/':
opts.img_dir = opts.img_dir[:-1]
split = basename(opts.img_dir)
if opts.keep_all:
db_name = 'all'
else:
if opts.conf_th == -1:
db_name = f'feat_numbb{opts.num_bb}'
else:
db_name = (f'feat_th{opts.conf_th}_max{opts.max_bb}'
f'_min{opts.min_bb}')
if opts.compress:
db_name += '_compressed'
if not exists(f'{opts.output}/{split}'):
os.makedirs(f'{opts.output}/{split}')
env = lmdb.open(f'{opts.output}/{split}/{db_name}', map_size=1024**4)
txn = env.begin(write=True)
files = glob.glob(f'{opts.img_dir}/*.npz')
load = load_npz(opts.conf_th, opts.max_bb, opts.min_bb, opts.num_bb,
keep_all=opts.keep_all)
name2nbb = {}
with mp.Pool(opts.nproc) as pool, tqdm(total=len(files)) as pbar:
for i, (fname, features, nbb) in enumerate(
pool.imap_unordered(load, files, chunksize=128)):
if not features:
continue # corrupted feature
if opts.compress:
dump = dumps_npz(features, compress=True)
else:
dump = dumps_msgpack(features)
txn.put(key=fname.encode('utf-8'), value=dump)
if i % 1000 == 0:
txn.commit()
txn = env.begin(write=True)
name2nbb[fname] = nbb
pbar.update(1)
txn.put(key=b'__keys__',
value=json.dumps(list(name2nbb.keys())).encode('utf-8'))
txn.commit()
env.close()
if opts.conf_th != -1 and not opts.keep_all:
with open(f'{opts.output}/{split}/'
f'nbb_th{opts.conf_th}_'
f'max{opts.max_bb}_min{opts.min_bb}.json', 'w') as f:
json.dump(name2nbb, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--img_dir", default=None, type=str,
help="The input images.")
parser.add_argument("--output", default=None, type=str,
help="output lmdb")
parser.add_argument('--nproc', type=int, default=8,
help='number of cores used')
parser.add_argument('--compress', action='store_true',
help='compress the tensors')
parser.add_argument('--keep_all', action='store_true',
help='keep all features, overrides all following args')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=100,
help='number of bounding boxes (fixed)')
args = parser.parse_args()
main(args)
| 4,777 | 32.412587 | 79 | py |
UNITER | UNITER-master/scripts/convert_ckpt.py | import sys
from collections import OrderedDict
import torch
bert_ckpt, output_ckpt = sys.argv[1:]
bert = torch.load(bert_ckpt)
uniter = OrderedDict()
for k, v in bert.items():
uniter[k.replace('bert', 'uniter')] = v
torch.save(uniter, output_ckpt)
| 256 | 17.357143 | 43 | py |
UNITER | UNITER-master/utils/const.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
constants
"""
IMG_DIM = 2048
IMG_LABEL_DIM = 1601
BUCKET_SIZE = 8192
| 143 | 13.4 | 36 | py |
UNITER | UNITER-master/utils/misc.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Misc utilities
"""
import json
import random
import sys
import torch
import numpy as np
from utils.logger import LOGGER
class NoOp(object):
""" useful for distributed training No-Ops """
def __getattr__(self, name):
return self.noop
def noop(self, *args, **kwargs):
return
def parse_with_config(parser):
args = parser.parse_args()
if args.config is not None:
config_args = json.load(open(args.config))
override_keys = {arg[2:].split('=')[0] for arg in sys.argv[1:]
if arg.startswith('--')}
for k, v in config_args.items():
if k not in override_keys:
setattr(args, k, v)
del args.config
return args
VE_ENT2IDX = {
'contradiction': 0,
'entailment': 1,
'neutral': 2
}
VE_IDX2ENT = {
0: 'contradiction',
1: 'entailment',
2: 'neutral'
}
class Struct(object):
def __init__(self, dict_):
self.__dict__.update(dict_)
def set_dropout(model, drop_p):
for name, module in model.named_modules():
# we might want to tune dropout for smaller dataset
if isinstance(module, torch.nn.Dropout):
if module.p != drop_p:
module.p = drop_p
LOGGER.info(f'{name} set to {drop_p}')
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 1,507 | 20.239437 | 70 | py |
UNITER | UNITER-master/utils/save.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
saving utilities
"""
import json
import os
from os.path import abspath, dirname, exists, join
import subprocess
import torch
from utils.logger import LOGGER
def save_training_meta(args):
if args.rank > 0:
return
if not exists(args.output_dir):
os.makedirs(join(args.output_dir, 'log'))
os.makedirs(join(args.output_dir, 'ckpt'))
with open(join(args.output_dir, 'log', 'hps.json'), 'w') as writer:
json.dump(vars(args), writer, indent=4)
model_config = json.load(open(args.model_config))
with open(join(args.output_dir, 'log', 'model.json'), 'w') as writer:
json.dump(model_config, writer, indent=4)
# git info
try:
LOGGER.info("Waiting on git info....")
c = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"],
timeout=10, stdout=subprocess.PIPE)
git_branch_name = c.stdout.decode().strip()
LOGGER.info("Git branch: %s", git_branch_name)
c = subprocess.run(["git", "rev-parse", "HEAD"],
timeout=10, stdout=subprocess.PIPE)
git_sha = c.stdout.decode().strip()
LOGGER.info("Git SHA: %s", git_sha)
git_dir = abspath(dirname(__file__))
git_status = subprocess.check_output(
['git', 'status', '--short'],
cwd=git_dir, universal_newlines=True).strip()
with open(join(args.output_dir, 'log', 'git_info.json'),
'w') as writer:
json.dump({'branch': git_branch_name,
'is_dirty': bool(git_status),
'status': git_status,
'sha': git_sha},
writer, indent=4)
except subprocess.TimeoutExpired as e:
LOGGER.exception(e)
LOGGER.warn("Git info not found. Moving right along...")
class ModelSaver(object):
def __init__(self, output_dir, prefix='model_step', suffix='pt'):
self.output_dir = output_dir
self.prefix = prefix
self.suffix = suffix
def save(self, model, step, optimizer=None):
output_model_file = join(self.output_dir,
f"{self.prefix}_{step}.{self.suffix}")
state_dict = {k: v.cpu() if isinstance(v, torch.Tensor) else v
for k, v in model.state_dict().items()}
torch.save(state_dict, output_model_file)
if optimizer is not None:
dump = {'step': step, 'optimizer': optimizer.state_dict()}
if hasattr(optimizer, '_amp_stash'):
pass # TODO fp16 optimizer
torch.save(dump, f'{self.output_dir}/train_state_{step}.pt')
| 2,734 | 35.959459 | 73 | py |
UNITER | UNITER-master/utils/logger.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
helper for logging
NOTE: loggers are global objects use with caution
"""
import logging
import math
import tensorboardX
_LOG_FMT = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
_DATE_FMT = '%m/%d/%Y %H:%M:%S'
logging.basicConfig(format=_LOG_FMT, datefmt=_DATE_FMT, level=logging.INFO)
LOGGER = logging.getLogger('__main__') # this is the global logger
def add_log_to_file(log_path):
fh = logging.FileHandler(log_path)
formatter = logging.Formatter(_LOG_FMT, datefmt=_DATE_FMT)
fh.setFormatter(formatter)
LOGGER.addHandler(fh)
class TensorboardLogger(object):
def __init__(self):
self._logger = None
self._global_step = 0
def create(self, path):
self._logger = tensorboardX.SummaryWriter(path)
def noop(self, *args, **kwargs):
return
def step(self):
self._global_step += 1
@property
def global_step(self):
return self._global_step
def log_scaler_dict(self, log_dict, prefix=''):
""" log a dictionary of scalar values"""
if self._logger is None:
return
if prefix:
prefix = f'{prefix}_'
for name, value in log_dict.items():
if isinstance(value, dict):
self.log_scaler_dict(value, self._global_step,
prefix=f'{prefix}{name}')
else:
self._logger.add_scalar(f'{prefix}{name}', value,
self._global_step)
def __getattr__(self, name):
if self._logger is None:
return self.noop
return self._logger.__getattribute__(name)
TB_LOGGER = TensorboardLogger()
class RunningMeter(object):
""" running meteor of a scalar value
(useful for monitoring training loss)
"""
def __init__(self, name, val=None, smooth=0.99):
self._name = name
self._sm = smooth
self._val = val
def __call__(self, value):
val = (value if self._val is None
else value*(1-self._sm) + self._val*self._sm)
if not math.isnan(val):
self._val = val
def __str__(self):
return f'{self._name}: {self._val:.4f}'
@property
def val(self):
if self._val is None:
return 0
return self._val
@property
def name(self):
return self._name
| 2,449 | 24.789474 | 75 | py |
UNITER | UNITER-master/utils/itm_eval.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Image Text Retrieval evaluation helper
"""
from time import time
import torch
from horovod import torch as hvd
from tqdm import tqdm
from .logger import LOGGER
from .misc import NoOp
from .distributed import all_gather_list
@torch.no_grad()
def itm_eval(score_matrix, txt_ids, img_ids, txt2img, img2txts):
# image retrieval
img2j = {i: j for j, i in enumerate(img_ids)}
_, rank_txt = score_matrix.topk(10, dim=1)
gt_img_j = torch.LongTensor([img2j[txt2img[txt_id]]
for txt_id in txt_ids],
).to(rank_txt.device
).unsqueeze(1).expand_as(rank_txt)
rank = (rank_txt == gt_img_j).nonzero()
if rank.numel():
ir_r1 = (rank < 1).sum().item() / len(txt_ids)
ir_r5 = (rank < 5).sum().item() / len(txt_ids)
ir_r10 = (rank < 10).sum().item() / len(txt_ids)
else:
ir_r1, ir_r5, ir_r10 = 0, 0, 0
# text retrieval
txt2i = {t: i for i, t in enumerate(txt_ids)}
_, rank_img = score_matrix.topk(10, dim=0)
tr_r1, tr_r5, tr_r10 = 0, 0, 0
for j, img_id in enumerate(img_ids):
gt_is = [txt2i[t] for t in img2txts[img_id]]
ranks = [(rank_img[:, j] == i).nonzero() for i in gt_is]
rank = min([10] + [r.item() for r in ranks if r.numel()])
if rank < 1:
tr_r1 += 1
if rank < 5:
tr_r5 += 1
if rank < 10:
tr_r10 += 1
tr_r1 /= len(img_ids)
tr_r5 /= len(img_ids)
tr_r10 /= len(img_ids)
tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
r_mean = (tr_mean + ir_mean) / 2
eval_log = {'txt_r1': tr_r1,
'txt_r5': tr_r5,
'txt_r10': tr_r10,
'txt_r_mean': tr_mean,
'img_r1': ir_r1,
'img_r5': ir_r5,
'img_r10': ir_r10,
'img_r_mean': ir_mean,
'r_mean': r_mean}
return eval_log
@torch.no_grad()
def evaluate(model, eval_loader):
st = time()
LOGGER.info("start running Image/Text Retrieval evaluation ...")
score_matrix = inference(model, eval_loader)
dset = eval_loader.dataset
all_score = hvd.allgather(score_matrix)
all_txt_ids = [i for ids in all_gather_list(dset.ids)
for i in ids]
all_img_ids = dset.all_img_ids
assert all_score.size() == (len(all_txt_ids), len(all_img_ids))
if hvd.rank() != 0:
return {}
# NOTE: only use rank0 to compute final scores
eval_log = itm_eval(all_score, all_txt_ids, all_img_ids,
dset.txt2img, dset.img2txts)
tot_time = time()-st
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds")
return eval_log
@torch.no_grad()
def inference(model, eval_loader):
model.eval()
if hvd.rank() == 0:
pbar = tqdm(total=len(eval_loader))
else:
pbar = NoOp()
score_matrix = torch.zeros(len(eval_loader.dataset),
len(eval_loader.dataset.all_img_ids),
device=torch.device("cuda"),
dtype=torch.float16)
for i, mini_batches in enumerate(eval_loader):
j = 0
for batch in mini_batches:
scores = model(batch, compute_loss=False)
bs = scores.size(0)
score_matrix.data[i, j:j+bs] = scores.data.squeeze(1).half()
j += bs
assert j == score_matrix.size(1)
pbar.update(1)
model.train()
pbar.close()
return score_matrix
| 3,661 | 30.843478 | 72 | py |
UNITER | UNITER-master/utils/distributed.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
distributed API using Horovod
Modified from OpenNMT's native pytorch distributed utils
(https://github.com/OpenNMT/OpenNMT-py)
"""
import math
import pickle
import torch
from horovod import torch as hvd
def all_reduce_and_rescale_tensors(tensors, rescale_denom):
"""All-reduce and rescale tensors at once (as a flattened tensor)
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
"""
# buffer size in bytes, determine equiv. # of elements based on data type
sz = sum(t.numel() for t in tensors)
buffer_t = tensors[0].new(sz).zero_()
# copy tensors into buffer_t
offset = 0
for t in tensors:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
hvd.allreduce_(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in tensors:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
def all_reduce_and_rescale_tensors_chunked(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
hvd.allreduce_(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
hvd.allreduce_(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
def broadcast_tensors(tensors, root_rank, buffer_size=10485760):
"""broadcast tensors in chunks of the specified size.
Args:
tensors: list of Tensors to broadcast
root_rank: rank to broadcast
buffer_size: broadcast chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def broadcast_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# broadcast
hvd.broadcast_(buffer_t[:offset], root_rank)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, broadcast directly
hvd.broadcast_(t, root_rank)
elif filled + sz > buffer_size:
# buffer is full, broadcast and replace buffer with tensor
broadcast_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
broadcast_buffer()
def _encode(enc, max_size, use_max_size=False):
enc_size = len(enc)
enc_byte = max(math.floor(math.log(max_size, 256)+1), 1)
if use_max_size:
# this is used for broadcasting
buffer_ = torch.cuda.ByteTensor(max_size+enc_byte)
else:
buffer_ = torch.cuda.ByteTensor(enc_size+enc_byte)
remainder = enc_size
for i in range(enc_byte):
base = 256 ** (enc_byte-i-1)
buffer_[i] = remainder // base
remainder %= base
buffer_[enc_byte:enc_byte+enc_size] = torch.ByteTensor(list(enc))
return buffer_, enc_byte
def _decode(buffer_, enc_byte):
size = sum(256 ** (enc_byte-i-1) * buffer_[i].item()
for i in range(enc_byte))
bytes_list = bytes(buffer_[enc_byte:enc_byte+size].tolist())
shift = size + enc_byte
return bytes_list, shift
_BUFFER_SIZE = 4096
def all_gather_list(data):
"""Gathers arbitrary data from all nodes into a list."""
enc = pickle.dumps(data)
enc_size = len(enc)
max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
in_buffer, enc_byte = _encode(enc, max_size)
out_buffer = hvd.allgather(in_buffer[:enc_byte+enc_size])
results = []
for _ in range(hvd.size()):
bytes_list, shift = _decode(out_buffer, enc_byte)
out_buffer = out_buffer[shift:]
result = pickle.loads(bytes_list)
results.append(result)
return results
def any_broadcast(data, root_rank):
"""broadcast arbitrary data from root_rank to all nodes."""
enc = pickle.dumps(data)
max_size = hvd.allgather(torch.tensor([len(enc)]).cuda()).max().item()
buffer_, enc_byte = _encode(enc, max_size, use_max_size=True)
hvd.broadcast_(buffer_, root_rank)
bytes_list, _ = _decode(buffer_, enc_byte)
result = pickle.loads(bytes_list)
return result
| 6,296 | 28.985714 | 77 | py |
UNITER | UNITER-master/utils/__init__.py | 0 | 0 | 0 | py |
|
UNITER | UNITER-master/data/vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VCR dataset
"""
import copy
import json
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
TxtLmdb, get_ids_and_lens, pad_tensors,
get_gather_index)
class VcrTxtTokLmdb(TxtTokLmdb):
def __init__(self, db_dir, max_txt_len=120, task="qa,qar"):
assert task == "qa" or task == "qar" or task == "qa,qar",\
"VCR only support the following tasks: 'qa', 'qar' or 'qa,qar'"
self.task = task
if task == "qa,qar":
id2len_task = "qar"
else:
id2len_task = task
if max_txt_len == -1:
self.id2len = json.load(
open(f'{db_dir}/id2len_{id2len_task}.json'))
else:
self.id2len = {
id_: len_
for id_, len_ in json.load(
open(f'{db_dir}/id2len_{id2len_task}.json')
).items()
if len_ <= max_txt_len
}
self.db_dir = db_dir
self.db = TxtLmdb(db_dir, readonly=True)
meta = json.load(open(f'{db_dir}/meta.json', 'r'))
self.cls_ = meta['CLS']
self.sep = meta['SEP']
self.mask = meta['MASK']
self.v_range = meta['v_range']
class VcrDetectFeatTxtTokDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db_gt=None, img_db=None):
assert not (img_db_gt is None and img_db is None),\
"img_db_gt and img_db cannot all be None"
assert isinstance(txt_db, VcrTxtTokLmdb)
assert img_db_gt is None or isinstance(img_db_gt, DetectFeatLmdb)
assert img_db is None or isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
self.img_db_gt = img_db_gt
self.task = self.txt_db.task
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
if self.img_db and self.img_db_gt:
self.lens = [tl+self.img_db_gt.name2nbb[txt2img[id_][0]] +
self.img_db.name2nbb[txt2img[id_][1]]
for tl, id_ in zip(txt_lens, self.ids)]
elif self.img_db:
self.lens = [tl+self.img_db.name2nbb[txt2img[id_][1]]
for tl, id_ in zip(txt_lens, self.ids)]
else:
self.lens = [tl+self.img_db_gt.name2nbb[txt2img[id_][0]]
for tl, id_ in zip(txt_lens, self.ids)]
def _get_img_feat(self, fname_gt, fname):
if self.img_db and self.img_db_gt:
img_feat_gt, bb_gt = self.img_db_gt[fname_gt]
img_bb_gt = torch.cat([bb_gt, bb_gt[:, 4:5]*bb_gt[:, 5:]], dim=-1)
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_feat = torch.cat([img_feat_gt, img_feat], dim=0)
img_bb = torch.cat([img_bb_gt, img_bb], dim=0)
num_bb = img_feat.size(0)
elif self.img_db:
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
elif self.img_db_gt:
img_feat, bb = self.img_db_gt[fname_gt]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
return img_feat, img_bb, num_bb
class VcrDataset(VcrDetectFeatTxtTokDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.task != "qa,qar",\
"loading training dataset with each task separately"
def _get_input_ids(self, txt_dump):
# text input
input_ids_q = txt_dump['input_ids']
type_ids_q = [0]*len(input_ids_q)
input_ids_as = txt_dump['input_ids_as']
if self.task == "qar":
input_ids_rs = txt_dump['input_ids_rs']
answer_label = txt_dump['qa_target']
assert answer_label >= 0, "answer_label < 0"
input_ids_gt_a = [self.txt_db.sep] + copy.deepcopy(
input_ids_as[answer_label])
type_ids_gt_a = [2] * len(input_ids_gt_a)
type_ids_q += type_ids_gt_a
input_ids_q += input_ids_gt_a
input_ids_for_choices = input_ids_rs
else:
input_ids_for_choices = input_ids_as
return input_ids_q, input_ids_for_choices, type_ids_q
def __getitem__(self, i):
"""
[[txt, img1],
[txt, img2]]
"""
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
input_ids_q, input_ids_for_choices, type_ids_q = self._get_input_ids(
example)
label = example['%s_target' % (self.task)]
outs = []
for index, input_ids_a in enumerate(input_ids_for_choices):
if index == label:
target = torch.tensor([1]).long()
else:
target = torch.tensor([0]).long()
input_ids = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\
[self.txt_db.sep] + input_ids_a + [self.txt_db.sep]
# type_id
# 0 -- question
# 1 -- region
# 2 -- answer
# 3 -- rationale
type_id_for_choice = 3 if type_ids_q[-1] == 2 else 2
txt_type_ids = [0] + type_ids_q + [type_id_for_choice]*(
len(input_ids_a)+2)
attn_masks = torch.ones(
len(input_ids) + num_bb, dtype=torch.long)
input_ids = torch.tensor(input_ids)
txt_type_ids = torch.tensor(txt_type_ids)
outs.append(
(input_ids, txt_type_ids,
img_feat, img_pos_feat,
attn_masks, target))
return tuple(outs)
def vcr_collate(inputs):
(input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks, targets) = map(list, unzip(concat(inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_type_ids = pad_sequence(
txt_type_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'txt_type_ids': txt_type_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
class VcrEvalDataset(VcrDetectFeatTxtTokDataset):
def __init__(self, split, *args, **kwargs):
super().__init__(*args, **kwargs)
self.split = split
assert self.task == "qa,qar",\
"loading evaluation dataset with two tasks together"
def _get_input_ids(self, txt_dump):
# text input
input_ids_for_choices = []
type_ids_for_choices = []
input_ids_q = txt_dump['input_ids']
type_ids_q = [0]*len(input_ids_q)
input_ids_as = txt_dump['input_ids_as']
input_ids_rs = txt_dump['input_ids_rs']
for index, input_ids_a in enumerate(input_ids_as):
curr_input_ids_qa = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\
[self.txt_db.sep] + input_ids_a + [self.txt_db.sep]
curr_type_ids_qa = [0] + type_ids_q + [2]*(
len(input_ids_a)+2)
input_ids_for_choices.append(curr_input_ids_qa)
type_ids_for_choices.append(curr_type_ids_qa)
for index, input_ids_a in enumerate(input_ids_as):
curr_input_ids_qa = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\
[self.txt_db.sep] + input_ids_a + [self.txt_db.sep]
curr_type_ids_qa = [0] + type_ids_q + [2]*(
len(input_ids_a)+1)
if (self.split == "val" and index == txt_dump["qa_target"]) or\
self.split == "test":
for input_ids_r in input_ids_rs:
curr_input_ids_qar = copy.deepcopy(curr_input_ids_qa) +\
input_ids_r + [self.txt_db.sep]
curr_type_ids_qar = copy.deepcopy(curr_type_ids_qa) +\
[3]*(len(input_ids_r)+2)
input_ids_for_choices.append(curr_input_ids_qar)
type_ids_for_choices.append(curr_type_ids_qar)
return input_ids_for_choices, type_ids_for_choices
def __getitem__(self, i):
qid = self.ids[i]
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
input_ids_for_choices, type_ids_for_choices = self._get_input_ids(
example)
qa_target = torch.tensor([int(example["qa_target"])])
qar_target = torch.tensor([int(example["qar_target"])])
outs = []
for index, input_ids in enumerate(input_ids_for_choices):
attn_masks = torch.ones(
len(input_ids) + num_bb, dtype=torch.long)
input_ids = torch.tensor(input_ids)
txt_type_ids = torch.tensor(
type_ids_for_choices[index])
outs.append(
(input_ids, txt_type_ids,
img_feat, img_pos_feat,
attn_masks))
return tuple(outs), qid, qa_target, qar_target
def vcr_eval_collate(inputs):
(input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks) = map(
list, unzip(concat(outs for outs, _, _, _ in inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_type_ids = pad_sequence(
txt_type_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
qa_targets = torch.stack(
[t for _, _, t, _ in inputs], dim=0)
qar_targets = torch.stack(
[t for _, _, _, t in inputs], dim=0)
qids = [id_ for _, id_, _, _ in inputs]
return {'qids': qids,
'input_ids': input_ids,
'txt_type_ids': txt_type_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'qa_targets': qa_targets,
'qar_targets': qar_targets}
| 11,643 | 37.556291 | 82 | py |
UNITER | UNITER-master/data/mlm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MLM datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb,
pad_tensors, get_gather_index)
def random_word(tokens, vocab_range, mask):
"""
Masking some random tokens for Language Model task with probabilities as in
the original BERT paper.
:param tokens: list of int, tokenized sentence.
:param vocab_range: for choosing a random word
:return: (list of int, list of int), masked tokens and related labels for
LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = mask
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(range(*vocab_range)))
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
output_label.append(token)
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
if all(o == -1 for o in output_label):
# at least mask 1
output_label[0] = tokens[0]
tokens[0] = mask
return tokens, output_label
class MlmDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, TxtTokLmdb)
super().__init__(txt_db, img_db)
def __getitem__(self, i):
"""
Return:
- input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- img_feat : (num_bb, d)
- img_pos_feat : (num_bb, 7)
- attn_masks : (L + num_bb, ), ie., [1, 1, ..., 0, 0, 1, 1]
- txt_labels : (L, ), [-1, -1, wid, -1, -1, -1]
0's padded so that (L + num_bb) % 8 == 0
"""
example = super().__getitem__(i)
# text input
input_ids, txt_labels = self.create_mlm_io(example['input_ids'])
# img input
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return input_ids, img_feat, img_pos_feat, attn_masks, txt_labels
def create_mlm_io(self, input_ids):
input_ids, txt_labels = random_word(input_ids,
self.txt_db.v_range,
self.txt_db.mask)
input_ids = torch.tensor([self.txt_db.cls_]
+ input_ids
+ [self.txt_db.sep])
txt_labels = torch.tensor([-1] + txt_labels + [-1])
return input_ids, txt_labels
def mlm_collate(inputs):
"""
Return:
:input_ids (n, max_L) padded with 0
:position_ids (n, max_L) padded with 0
:txt_lens list of [txt_len]
:img_feat (n, max_num_bb, feat_dim)
:img_pos_feat (n, max_num_bb, 7)
:num_bbs list of [num_bb]
:attn_masks (n, max_{L + num_bb}) padded with 0
:txt_labels (n, max_L) padded with -1
"""
(input_ids, img_feats, img_pos_feats, attn_masks, txt_labels
) = map(list, unzip(inputs))
# text batches
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_labels = pad_sequence(txt_labels, batch_first=True, padding_value=-1)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'txt_labels': txt_labels}
return batch
| 4,551 | 32.226277 | 79 | py |
UNITER | UNITER-master/data/sampler.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
sampler for length bucketing (batch by tokens)
"""
import math
import random
import horovod.torch as hvd
import torch
from torch.utils.data import Sampler
from cytoolz import partition_all
class TokenBucketSampler(Sampler):
def __init__(self, lens, bucket_size, batch_size,
droplast=False, size_multiple=8):
self._lens = lens
self._max_tok = batch_size
self._bucket_size = bucket_size
self._droplast = droplast
self._size_mul = size_multiple
def _create_ids(self):
return list(range(len(self._lens)))
def _sort_fn(self, i):
return self._lens[i]
def __iter__(self):
ids = self._create_ids()
random.shuffle(ids)
buckets = [sorted(ids[i:i+self._bucket_size],
key=self._sort_fn, reverse=True)
for i in range(0, len(ids), self._bucket_size)]
# fill batches until max_token (include padding)
batches = []
for bucket in buckets:
max_len = 0
batch_indices = []
for indices in partition_all(self._size_mul, bucket):
max_len = max(max_len, max(self._lens[i] for i in indices))
if (max_len * (len(batch_indices) + self._size_mul)
> self._max_tok):
if not batch_indices:
raise ValueError(
"max_tokens too small / max_seq_len too long")
assert len(batch_indices) % self._size_mul == 0
batches.append(batch_indices)
batch_indices = list(indices)
else:
batch_indices.extend(indices)
if not self._droplast and batch_indices:
batches.append(batch_indices)
random.shuffle(batches)
return iter(batches)
def __len__(self):
raise ValueError("NOT supported. "
"This has some randomness across epochs")
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
shuffle (optional): If true (default), sampler will shuffle the indices
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
num_replicas = hvd.size()
if rank is None:
rank = hvd.rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset)
* 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
if self.shuffle:
shufle_ind = torch.randperm(len(indices), generator=g).tolist()
indices = [indices[i] for i in shufle_ind]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 4,199 | 33.42623 | 79 | py |
UNITER | UNITER-master/data/ve.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Visual entailment dataset
# NOTE: basically reuse VQA dataset
"""
from .vqa import VqaDataset, VqaEvalDataset, vqa_collate, vqa_eval_collate
class VeDataset(VqaDataset):
def __init__(self, *args, **kwargs):
super().__init__(3, *args, **kwargs)
class VeEvalDataset(VqaEvalDataset):
def __init__(self, *args, **kwargs):
super().__init__(3, *args, **kwargs)
ve_collate = vqa_collate
ve_eval_collate = vqa_eval_collate
| 519 | 21.608696 | 74 | py |
UNITER | UNITER-master/data/mrm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MRM Datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
def _get_img_mask(mask_prob, num_bb):
img_mask = [random.random() < mask_prob for _ in range(num_bb)]
if not any(img_mask):
# at least mask 1
img_mask[random.choice(range(num_bb))] = True
img_mask = torch.tensor(img_mask)
return img_mask
def _get_img_tgt_mask(img_mask, txt_len):
z = torch.zeros(txt_len, dtype=torch.uint8)
img_mask_tgt = torch.cat([z, img_mask], dim=0)
return img_mask_tgt
def _get_feat_target(img_feat, img_masks):
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat) # (n, m, d)
feat_dim = img_feat.size(-1)
feat_targets = img_feat[img_masks_ext].contiguous().view(
-1, feat_dim) # (s, d)
return feat_targets
def _mask_img_feat(img_feat, img_masks):
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat)
img_feat_masked = img_feat.data.masked_fill(img_masks_ext, 0)
return img_feat_masked
class MrfrDataset(DetectFeatTxtTokDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def __getitem__(self, i):
"""
Return:
- input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- img_feat : (num_bb, d)
- img_pos_feat : (num_bb, 7)
- attn_masks : (L + num_bb, ), ie., [1, 1, ..., 0, 0, 1, 1]
- img_mask : (num_bb, ) between {0, 1}
"""
example = super().__getitem__(i)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
# image input features
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat,
attn_masks, img_mask, img_mask_tgt)
def mrfr_collate(inputs):
"""
Return:
- input_ids : (n, max_L), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- position_ids : (n, max_L)
- txt_lens : list of [input_len]
- img_feat : (n, max_num_bb, d)
- img_pos_feat : (n, max_num_bb, 7)
- num_bbs : list of [num_bb]
- attn_masks : (n, max_{L + num_bb}), ie., [1, 1, ..., 0, 0, 1, 1]
- img_masks : (n, max_num_bb) between {0, 1}
"""
(input_ids, img_feats, img_pos_feats, attn_masks, img_masks, img_mask_tgts,
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
# mask features
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
feat_targets = _get_feat_target(img_feat, img_masks)
img_feat = _mask_img_feat(img_feat, img_masks)
img_mask_tgt = pad_sequence(img_mask_tgts,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'feat_targets': feat_targets,
'img_masks': img_masks,
'img_mask_tgt': img_mask_tgt}
return batch
def _get_targets(img_masks, img_soft_label):
soft_label_dim = img_soft_label.size(-1)
img_masks_ext_for_label = img_masks.unsqueeze(-1).expand_as(img_soft_label)
label_targets = img_soft_label[img_masks_ext_for_label].contiguous().view(
-1, soft_label_dim)
return label_targets
class MrcDataset(DetectFeatTxtTokDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def _get_img_feat(self, fname):
img_dump = self.img_db.get_dump(fname)
num_bb = self.img_db.name2nbb[fname]
img_feat = torch.tensor(img_dump['features'])
bb = torch.tensor(img_dump['norm_bb'])
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_soft_label = torch.tensor(img_dump['soft_labels'])
return img_feat, img_bb, img_soft_label, num_bb
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, img_soft_labels, num_bb = self._get_img_feat(
example['img_fname'])
# image input features
img_mask = _get_img_mask(self.mask_prob, num_bb)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat,
img_soft_labels, attn_masks, img_mask, img_mask_tgt)
def mrc_collate(inputs):
(input_ids, img_feats, img_pos_feats, img_soft_labels,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
num_bbs = [f.size(0) for f in img_feats]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
img_soft_label = pad_tensors(img_soft_labels, num_bbs)
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
label_targets = _get_targets(img_masks, img_soft_label)
img_feat = _mask_img_feat(img_feat, img_masks)
img_mask_tgt = pad_sequence(img_mask_tgts,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_masks': img_masks,
'img_mask_tgt': img_mask_tgt,
'label_targets': label_targets}
return batch
| 7,228 | 34.965174 | 79 | py |
UNITER | UNITER-master/data/vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VQA dataset
"""
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
def _get_vqa_target(example, num_answers):
target = torch.zeros(num_answers)
labels = example['target']['labels']
scores = example['target']['scores']
if labels and scores:
target.scatter_(0, torch.tensor(labels), torch.tensor(scores))
return target
class VqaDataset(DetectFeatTxtTokDataset):
def __init__(self, num_answers, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_answers = num_answers
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
target = _get_vqa_target(example, self.num_answers)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return input_ids, img_feat, img_pos_feat, attn_masks, target
def vqa_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
class VqaEvalDataset(VqaDataset):
def __getitem__(self, i):
qid = self.ids[i]
example = DetectFeatTxtTokDataset.__getitem__(self, i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
if 'target' in example:
target = _get_vqa_target(example, self.num_answers)
else:
target = None
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return qid, input_ids, img_feat, img_pos_feat, attn_masks, target
def vqa_eval_collate(inputs):
(qids, input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
if targets[0] is None:
targets = None
else:
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'qids': qids,
'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
| 4,105 | 31.330709 | 76 | py |
UNITER | UNITER-master/data/data.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Dataset interfaces
"""
from collections import defaultdict
from contextlib import contextmanager
import io
import json
from os.path import exists
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
import horovod.torch as hvd
from tqdm import tqdm
import lmdb
from lz4.frame import compress, decompress
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
def _fp16_to_fp32(feat_dict):
out = {k: arr.astype(np.float32)
if arr.dtype == np.float16 else arr
for k, arr in feat_dict.items()}
return out
def compute_num_bb(confs, conf_th, min_bb, max_bb):
num_bb = max(min_bb, (confs > conf_th).sum())
num_bb = min(max_bb, num_bb)
return num_bb
def _check_distributed():
try:
dist = hvd.size() != hvd.local_size()
except ValueError:
# not using horovod
dist = False
return dist
class DetectFeatLmdb(object):
def __init__(self, img_dir, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36,
compress=True):
self.img_dir = img_dir
if conf_th == -1:
db_name = f'feat_numbb{num_bb}'
self.name2nbb = defaultdict(lambda: num_bb)
else:
db_name = f'feat_th{conf_th}_max{max_bb}_min{min_bb}'
nbb = f'nbb_th{conf_th}_max{max_bb}_min{min_bb}.json'
if not exists(f'{img_dir}/{nbb}'):
# nbb is not pre-computed
self.name2nbb = None
else:
self.name2nbb = json.load(open(f'{img_dir}/{nbb}'))
self.compress = compress
if compress:
db_name += '_compressed'
if self.name2nbb is None:
if compress:
db_name = 'all_compressed'
else:
db_name = 'all'
# only read ahead on single node training
self.env = lmdb.open(f'{img_dir}/{db_name}',
readonly=True, create=False,
readahead=not _check_distributed())
self.txn = self.env.begin(buffers=True)
if self.name2nbb is None:
self.name2nbb = self._compute_nbb()
def _compute_nbb(self):
name2nbb = {}
fnames = json.loads(self.txn.get(key=b'__keys__').decode('utf-8'))
for fname in tqdm(fnames, desc='reading images'):
dump = self.txn.get(fname.encode('utf-8'))
if self.compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
confs = img_dump['conf']
else:
img_dump = msgpack.loads(dump, raw=False)
confs = img_dump['conf']
name2nbb[fname] = compute_num_bb(confs, self.conf_th,
self.min_bb, self.max_bb)
return name2nbb
def __del__(self):
self.env.close()
def get_dump(self, file_name):
# hack for MRC
dump = self.txn.get(file_name.encode('utf-8'))
nbb = self.name2nbb[file_name]
if self.compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
img_dump = _fp16_to_fp32(img_dump)
else:
img_dump = msgpack.loads(dump, raw=False)
img_dump = _fp16_to_fp32(img_dump)
img_dump = {k: arr[:nbb, ...] for k, arr in img_dump.items()}
return img_dump
def __getitem__(self, file_name):
dump = self.txn.get(file_name.encode('utf-8'))
nbb = self.name2nbb[file_name]
if self.compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
img_dump = {'features': img_dump['features'],
'norm_bb': img_dump['norm_bb']}
else:
img_dump = msgpack.loads(dump, raw=False)
img_feat = torch.tensor(img_dump['features'][:nbb, :]).float()
img_bb = torch.tensor(img_dump['norm_bb'][:nbb, :]).float()
return img_feat, img_bb
@contextmanager
def open_lmdb(db_dir, readonly=False):
db = TxtLmdb(db_dir, readonly)
try:
yield db
finally:
del db
class TxtLmdb(object):
def __init__(self, db_dir, readonly=True):
self.readonly = readonly
if readonly:
# training
self.env = lmdb.open(db_dir,
readonly=True, create=False,
readahead=not _check_distributed())
self.txn = self.env.begin(buffers=True)
self.write_cnt = None
else:
# prepro
self.env = lmdb.open(db_dir, readonly=False, create=True,
map_size=4 * 1024**4)
self.txn = self.env.begin(write=True)
self.write_cnt = 0
def __del__(self):
if self.write_cnt:
self.txn.commit()
self.env.close()
def __getitem__(self, key):
return msgpack.loads(decompress(self.txn.get(key.encode('utf-8'))),
raw=False)
def __setitem__(self, key, value):
# NOTE: not thread safe
if self.readonly:
raise ValueError('readonly text DB')
ret = self.txn.put(key.encode('utf-8'),
compress(msgpack.dumps(value, use_bin_type=True)))
self.write_cnt += 1
if self.write_cnt % 1000 == 0:
self.txn.commit()
self.txn = self.env.begin(write=True)
self.write_cnt = 0
return ret
class TxtTokLmdb(object):
def __init__(self, db_dir, max_txt_len=60):
if max_txt_len == -1:
self.id2len = json.load(open(f'{db_dir}/id2len.json'))
else:
self.id2len = {
id_: len_
for id_, len_ in json.load(open(f'{db_dir}/id2len.json')
).items()
if len_ <= max_txt_len
}
self.db_dir = db_dir
self.db = TxtLmdb(db_dir, readonly=True)
meta = json.load(open(f'{db_dir}/meta.json', 'r'))
self.cls_ = meta['CLS']
self.sep = meta['SEP']
self.mask = meta['MASK']
self.v_range = meta['v_range']
def __getitem__(self, id_):
txt_dump = self.db[id_]
return txt_dump
def combine_inputs(self, *inputs):
input_ids = [self.cls_]
for ids in inputs:
input_ids.extend(ids + [self.sep])
return torch.tensor(input_ids)
@property
def txt2img(self):
txt2img = json.load(open(f'{self.db_dir}/txt2img.json'))
return txt2img
@property
def img2txts(self):
img2txts = json.load(open(f'{self.db_dir}/img2txts.json'))
return img2txts
def get_ids_and_lens(db):
assert isinstance(db, TxtTokLmdb)
lens = []
ids = []
for id_ in list(db.id2len.keys())[hvd.rank()::hvd.size()]:
lens.append(db.id2len[id_])
ids.append(id_)
return lens, ids
class DetectFeatTxtTokDataset(Dataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [tl + self.img_db.name2nbb[txt2img[id_]]
for tl, id_ in zip(txt_lens, self.ids)]
def __len__(self):
return len(self.ids)
def __getitem__(self, i):
id_ = self.ids[i]
example = self.txt_db[id_]
return example
def _get_img_feat(self, fname):
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
return img_feat, img_bb, num_bb
def pad_tensors(tensors, lens=None, pad=0):
"""B x [T, ...]"""
if lens is None:
lens = [t.size(0) for t in tensors]
max_len = max(lens)
bs = len(tensors)
hid = tensors[0].size(-1)
dtype = tensors[0].dtype
output = torch.zeros(bs, max_len, hid, dtype=dtype)
if pad:
output.data.fill_(pad)
for i, (t, l) in enumerate(zip(tensors, lens)):
output.data[i, :l, ...] = t.data
return output
def get_gather_index(txt_lens, num_bbs, batch_size, max_len, out_size):
assert len(txt_lens) == len(num_bbs) == batch_size
gather_index = torch.arange(0, out_size, dtype=torch.long,
).unsqueeze(0).repeat(batch_size, 1)
for i, (tl, nbb) in enumerate(zip(txt_lens, num_bbs)):
gather_index.data[i, tl:tl+nbb] = torch.arange(max_len, max_len+nbb,
dtype=torch.long).data
return gather_index
class ConcatDatasetWithLens(ConcatDataset):
""" A thin wrapper on pytorch concat dataset for lens batching """
def __init__(self, datasets):
super().__init__(datasets)
self.lens = [l for dset in datasets for l in dset.lens]
def __getattr__(self, name):
return self._run_method_on_all_dsets(name)
def _run_method_on_all_dsets(self, name):
def run_all(*args, **kwargs):
return [dset.__getattribute__(name)(*args, **kwargs)
for dset in self.datasets]
return run_all
class ImageLmdbGroup(object):
def __init__(self, conf_th, max_bb, min_bb, num_bb, compress):
self.path2imgdb = {}
self.conf_th = conf_th
self.max_bb = max_bb
self.min_bb = min_bb
self.num_bb = num_bb
self.compress = compress
def __getitem__(self, path):
img_db = self.path2imgdb.get(path, None)
if img_db is None:
img_db = DetectFeatLmdb(path, self.conf_th, self.max_bb,
self.min_bb, self.num_bb, self.compress)
return img_db
| 10,028 | 31.041534 | 78 | py |
UNITER | UNITER-master/data/pretrain_vcr.py | from .vcr import VcrDetectFeatTxtTokDataset
from .mlm import random_word
import torch
from toolz.sandbox import unzip
from torch.nn.utils.rnn import pad_sequence
from .data import pad_tensors, get_gather_index
from .mrm import (
_get_img_tgt_mask, _get_img_mask, _mask_img_feat,
_get_feat_target, _get_targets)
class VcrPretrainDataset(VcrDetectFeatTxtTokDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _get_input_ids(self, txt_dump, mask=False):
# text input
input_ids_q = txt_dump['input_ids']
type_ids_q = [0]*len(input_ids_q)
if mask:
input_ids_q, txt_labels_q = random_word(
input_ids_q, self.txt_db.v_range,
self.txt_db.mask)
else:
txt_labels_q = input_ids_q
answer_label = txt_dump['qa_target']
assert answer_label >= 0, "answer_label < 0"
input_ids_a = txt_dump['input_ids_as'][answer_label]
type_ids_a = [2]*len(input_ids_a)
if mask:
input_ids_a, txt_labels_a = random_word(
input_ids_a, self.txt_db.v_range,
self.txt_db.mask)
else:
txt_labels_a = input_ids_a
input_ids = input_ids_q + [self.txt_db.sep] + input_ids_a
type_ids = type_ids_q + [0] + type_ids_a
txt_labels = txt_labels_q + [-1] + txt_labels_a
if self.task == "qar":
rationale_label = txt_dump['qar_target']
assert rationale_label >= 0, "rationale_label < 0"
input_ids_r = txt_dump['input_ids_rs'][rationale_label]
type_ids_r = [3]*len(input_ids_r)
if mask:
input_ids_r, txt_labels_r = random_word(
input_ids_r, self.txt_db.v_range,
self.txt_db.mask)
else:
txt_labels_r = input_ids_r
input_ids += [self.txt_db.sep] + input_ids_r
type_ids += [2] + type_ids_r
txt_labels += [-1] + txt_labels_r
if mask:
return input_ids, type_ids, txt_labels
else:
return input_ids, type_ids
def combine_txt_inputs(self, input_ids, txt_type_ids, txt_labels=None):
input_ids = torch.tensor([self.txt_db.cls_]
+ input_ids
+ [self.txt_db.sep])
txt_type_ids = torch.tensor(
[txt_type_ids[0]] + txt_type_ids
+ [txt_type_ids[-1]])
if txt_labels is not None:
txt_labels = torch.tensor([-1] + txt_labels + [-1])
return input_ids, txt_type_ids, txt_labels
return input_ids, txt_type_ids
def vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks):
# text batches
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_type_ids = pad_sequence(txt_type_ids, batch_first=True,
padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'txt_type_ids': txt_type_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
class MlmDatasetForVCR(VcrPretrainDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_mlm_io(self, example):
(input_ids, txt_type_ids,
txt_labels) = self._get_input_ids(example, mask=True)
return self.combine_txt_inputs(
input_ids, txt_type_ids, txt_labels)
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
# txt inputs, create mlm io
input_ids, txt_type_ids, txt_labels = self.create_mlm_io(example)
attn_masks = torch.ones(
len(input_ids) + num_bb,
dtype=torch.long)
return (input_ids, txt_type_ids, img_feat,
img_pos_feat, attn_masks, txt_labels)
def mlm_collate_for_vcr(inputs):
(input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks,
txt_labels) = map(list, unzip(inputs))
batch = vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks)
txt_labels = pad_sequence(txt_labels, batch_first=True, padding_value=-1)
batch['txt_labels'] = txt_labels
return batch
class MrfrDatasetForVCR(VcrPretrainDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def __getitem__(self, i):
example = super().__getitem__(i)
# text input
input_ids, txt_type_ids = self._get_input_ids(example, mask=False)
input_ids, txt_type_ids = self.combine_txt_inputs(
input_ids, txt_type_ids)
# image input features
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, txt_type_ids, img_feat, img_pos_feat,
attn_masks, img_mask, img_mask_tgt)
def mrfr_collate_for_vcr(inputs):
(input_ids, txt_type_ids, img_feats, img_pos_feats,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
batch = vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks)
# mask features
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
feat_targets = _get_feat_target(batch['img_feat'], img_masks)
img_mask_tgt = pad_sequence(
img_mask_tgts, batch_first=True, padding_value=0)
batch['img_feat'] = _mask_img_feat(batch['img_feat'], img_masks)
batch['img_masks'] = img_masks
batch['feat_targets'] = feat_targets
batch['img_mask_tgt'] = img_mask_tgt
return batch
class MrcDatasetForVCR(VcrPretrainDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def _get_img_feat_for_db(self, img_db, fname):
img_dump = img_db.get_dump(fname)
img_feat = torch.tensor(img_dump['features'])
bb = torch.tensor(img_dump['norm_bb'])
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_soft_label = torch.tensor(img_dump['soft_labels'])
return img_feat, img_bb, img_soft_label
def _get_img_feat(self, fname_gt, fname):
if self.img_db and self.img_db_gt:
(img_feat_gt, img_bb_gt,
img_soft_label_gt) = self._get_img_feat_for_db(
self.img_db_gt, fname_gt)
(img_feat, img_bb,
img_soft_label) = self._get_img_feat_for_db(
self.img_db, fname)
img_feat = torch.cat([img_feat_gt, img_feat], dim=0)
img_bb = torch.cat([img_bb_gt, img_bb], dim=0)
img_soft_label = torch.cat(
[img_soft_label_gt, img_soft_label], dim=0)
elif self.img_db:
(img_feat, img_bb,
img_soft_label) = self._get_img_feat_for_db(
self.img_db, fname)
else:
(img_feat, img_bb,
img_soft_label) = self._get_img_feat_for_db(
self.img_db_gt, fname_gt)
num_bb = img_feat.size(0)
return img_feat, img_bb, img_soft_label, num_bb
def __getitem__(self, i):
example = super().__getitem__(i)
# text input
input_ids, txt_type_ids = self._get_input_ids(example, mask=False)
input_ids, txt_type_ids = self.combine_txt_inputs(
input_ids, txt_type_ids)
# image input features
img_feat, img_pos_feat, img_soft_labels, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, txt_type_ids, img_feat, img_pos_feat,
img_soft_labels, attn_masks, img_mask, img_mask_tgt)
def mrc_collate_for_vcr(inputs):
(input_ids, txt_type_ids, img_feats, img_pos_feats, img_soft_labels,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
num_bbs = [f.size(0) for f in img_feats]
batch = vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks)
# mask features
img_soft_label = pad_tensors(img_soft_labels, num_bbs)
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
label_targets = _get_targets(img_masks, img_soft_label)
img_mask_tgt = pad_sequence(
img_mask_tgts, batch_first=True, padding_value=0)
batch['img_feat'] = _mask_img_feat(batch['img_feat'], img_masks)
batch['img_masks'] = img_masks
batch['label_targets'] = label_targets
batch['img_mask_tgt'] = img_mask_tgt
return batch
| 9,933 | 35.255474 | 77 | py |
UNITER | UNITER-master/data/nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
NLVR2 dataset
"""
import copy
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
get_ids_and_lens, pad_tensors, get_gather_index)
class Nlvr2PairedDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_img_type=True):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [2*tl + sum(self.img_db.name2nbb[img]
for img in txt2img[id_])
for tl, id_ in zip(txt_lens, self.ids)]
self.use_img_type = use_img_type
def __getitem__(self, i):
"""
[[txt, img1],
[txt, img2]]
"""
example = super().__getitem__(i)
target = example['target']
outs = []
for i, img in enumerate(example['img_fname']):
img_feat, img_pos_feat, num_bb = self._get_img_feat(img)
# text input
input_ids = copy.deepcopy(example['input_ids'])
input_ids = [self.txt_db.cls_] + input_ids + [self.txt_db.sep]
attn_masks = [1] * (len(input_ids) + num_bb)
input_ids = torch.tensor(input_ids)
attn_masks = torch.tensor(attn_masks)
if self.use_img_type:
img_type_ids = torch.tensor([i+1]*num_bb)
else:
img_type_ids = None
outs.append((input_ids, img_feat, img_pos_feat,
attn_masks, img_type_ids))
return tuple(outs), target
def nlvr2_paired_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks,
img_type_ids) = map(list, unzip(concat(outs for outs, _ in inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
if img_type_ids[0] is None:
img_type_ids = None
else:
img_type_ids = pad_sequence(img_type_ids,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.Tensor([t for _, t in inputs]).long()
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_type_ids': img_type_ids,
'targets': targets}
return batch
class Nlvr2PairedEvalDataset(Nlvr2PairedDataset):
def __getitem__(self, i):
qid = self.ids[i]
outs, targets = super().__getitem__(i)
return qid, outs, targets
def nlvr2_paired_eval_collate(inputs):
qids, batch = [], []
for id_, *tensors in inputs:
qids.append(id_)
batch.append(tensors)
batch = nlvr2_paired_collate(batch)
batch['qids'] = qids
return batch
class Nlvr2TripletDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_img_type=True):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [tl + sum(self.img_db.name2nbb[img]
for img in txt2img[id_])
for tl, id_ in zip(txt_lens, self.ids)]
self.use_img_type = use_img_type
def __getitem__(self, i):
"""
[[txt, img1],
[txt, img2]]
"""
example = super().__getitem__(i)
target = example['target']
img_feats = []
img_pos_feats = []
num_bb = 0
img_type_ids = []
for i, img in enumerate(example['img_fname']):
feat, pos, nbb = self._get_img_feat(img)
img_feats.append(feat)
img_pos_feats.append(pos)
num_bb += nbb
if self.use_img_type:
img_type_ids.extend([i+1]*nbb)
img_feat = torch.cat(img_feats, dim=0)
img_pos_feat = torch.cat(img_pos_feats, dim=0)
if self.use_img_type:
img_type_ids = torch.tensor(img_type_ids)
else:
img_type_ids = None
# text input
input_ids = copy.deepcopy(example['input_ids'])
input_ids = [self.txt_db.cls_] + input_ids + [self.txt_db.sep]
attn_masks = [1] * (len(input_ids) + num_bb)
input_ids = torch.tensor(input_ids)
attn_masks = torch.tensor(attn_masks)
return (input_ids, img_feat, img_pos_feat, attn_masks,
img_type_ids, target)
def nlvr2_triplet_collate(inputs):
(input_ids, img_feats, img_pos_feats,
attn_masks, img_type_ids, targets) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
if img_type_ids[0] is None:
img_type_ids = None
else:
img_type_ids = pad_sequence(img_type_ids,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.Tensor(targets).long()
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_type_ids': img_type_ids,
'targets': targets}
return batch
class Nlvr2TripletEvalDataset(Nlvr2TripletDataset):
def __getitem__(self, i):
qid = self.ids[i]
tensors = super().__getitem__(i)
return (qid, *tensors)
def nlvr2_triplet_eval_collate(inputs):
qids, batch = [], []
for id_, *tensors in inputs:
qids.append(id_)
batch.append(tensors)
batch = nlvr2_triplet_collate(batch)
batch['qids'] = qids
return batch
| 7,186 | 31.817352 | 76 | py |
UNITER | UNITER-master/data/__init__.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
from .data import (TxtTokLmdb, DetectFeatLmdb,
ImageLmdbGroup, ConcatDatasetWithLens)
from .sampler import TokenBucketSampler
from .loader import PrefetchLoader, MetaLoader
from .vqa import VqaDataset, VqaEvalDataset, vqa_collate, vqa_eval_collate
from .ve import VeDataset, VeEvalDataset, ve_collate, ve_eval_collate
from .nlvr2 import (Nlvr2PairedDataset, Nlvr2PairedEvalDataset,
Nlvr2TripletDataset, Nlvr2TripletEvalDataset,
nlvr2_paired_collate, nlvr2_paired_eval_collate,
nlvr2_triplet_collate, nlvr2_triplet_eval_collate)
from .itm import (TokenBucketSamplerForItm, ItmDataset,
itm_collate, itm_ot_collate,
ItmRankDataset, ItmValDataset, ItmEvalDataset,
ItmRankDatasetHardNegFromImage,
ItmRankDatasetHardNegFromText,
itm_rank_collate, itm_val_collate, itm_eval_collate,
itm_rank_hn_collate)
from .mlm import MlmDataset, mlm_collate
from .mrm import MrfrDataset, MrcDataset, mrfr_collate, mrc_collate
from .vcr import (VcrTxtTokLmdb, VcrDataset, VcrEvalDataset,
vcr_collate, vcr_eval_collate)
from .re import (ReTxtTokLmdb, ReDataset, ReEvalDataset,
re_collate, re_eval_collate)
| 1,384 | 46.758621 | 74 | py |
UNITER | UNITER-master/data/loader.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
A prefetch loader to speedup data loading
Modified from Nvidia Deep Learning Examples
(https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch).
"""
import random
import torch
from torch.utils.data import DataLoader
from utils.distributed import any_broadcast
class MetaLoader(object):
""" wraps multiple data loaders """
def __init__(self, loaders, accum_steps=1, distributed=False):
assert isinstance(loaders, dict)
self.name2loader = {}
self.name2iter = {}
self.sampling_pools = []
for n, l in loaders.items():
if isinstance(l, tuple):
l, r = l
elif isinstance(l, DataLoader):
r = 1
else:
raise ValueError()
self.name2loader[n] = l
self.name2iter[n] = iter(l)
self.sampling_pools.extend([n]*r)
self.accum_steps = accum_steps
self.distributed = distributed
self.step = 0
def __iter__(self):
""" this iterator will run indefinitely """
task = self.sampling_pools[0]
while True:
if self.step % self.accum_steps == 0:
task = random.choice(self.sampling_pools)
if self.distributed:
# make sure all process is training same task
task = any_broadcast(task, 0)
self.step += 1
iter_ = self.name2iter[task]
try:
batch = next(iter_)
except StopIteration:
iter_ = iter(self.name2loader[task])
batch = next(iter_)
self.name2iter[task] = iter_
yield task, batch
def move_to_cuda(batch):
if isinstance(batch, torch.Tensor):
return batch.cuda(non_blocking=True)
elif isinstance(batch, list):
new_batch = [move_to_cuda(t) for t in batch]
elif isinstance(batch, tuple):
new_batch = tuple(move_to_cuda(t) for t in batch)
elif isinstance(batch, dict):
new_batch = {n: move_to_cuda(t) for n, t in batch.items()}
else:
return batch
return new_batch
def record_cuda_stream(batch):
if isinstance(batch, torch.Tensor):
batch.record_stream(torch.cuda.current_stream())
elif isinstance(batch, list) or isinstance(batch, tuple):
for t in batch:
record_cuda_stream(t)
elif isinstance(batch, dict):
for t in batch.values():
record_cuda_stream(t)
else:
pass
class PrefetchLoader(object):
"""
overlap compute and cuda data transfer
(copied and then modified from nvidia apex)
"""
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while batch is not None:
yield batch
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
# if record_stream() doesn't work, another option is to make sure
# device inputs are created on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input,
# device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target,
# device='cuda')
# Need to make sure the memory allocated for next_* is not still in use
# by the main stream at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this
# side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is not None:
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method
| 4,747 | 32.202797 | 79 | py |
UNITER | UNITER-master/data/re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Referring Expression dataset
"""
import random
import numpy as np
import json
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
TxtLmdb, pad_tensors, get_gather_index)
class ReTxtTokLmdb(TxtTokLmdb):
def __init__(self, db_dir, max_txt_len=120):
# load refs = [{ref_id, sent_ids, ann_id, image_id, sentences, split}]
refs = json.load(open(f'{db_dir}/refs.json', 'r'))
self.ref_ids = [ref['ref_id'] for ref in refs]
self.Refs = {ref['ref_id']: ref for ref in refs}
# load annotations = [{id, area, bbox, image_id, category_id}]
anns = json.load(open(f'{db_dir}/annotations.json', 'r'))
self.Anns = {ann['id']: ann for ann in anns}
# load categories = [{id, name, supercategory}]
categories = json.load(open(f'{db_dir}/categories.json', 'r'))
self.Cats = {cat['id']: cat['name'] for cat in categories}
# load images = [{id, file_name, ann_ids, height, width}]
images = json.load(open(f'{db_dir}/images.json', 'r'))
self.Images = {img['id']: img for img in images}
if max_txt_len == -1:
self.id2len = json.load(open(f'{db_dir}/id2len.json'))
else:
self.id2len = {
id_: len_
for id_, len_ in json.load(open(f'{db_dir}/id2len.json')
).items()
if len_ <= max_txt_len
}
self.max_txt_len = max_txt_len
# self.sent_ids = self._get_sent_ids()
self.db_dir = db_dir
self.db = TxtLmdb(db_dir, readonly=True)
meta = json.load(open(f'{db_dir}/meta.json', 'r'))
self.cls_ = meta['CLS']
self.sep = meta['SEP']
self.mask = meta['MASK']
self.v_range = meta['v_range']
def _get_sent_ids(self):
sent_ids = []
for ref_id in self.ref_ids:
for sent_id in self.Refs[ref_id]['sent_ids']:
sent_len = self.id2len[str(sent_id)]
if self.max_txt_len == -1 or sent_len < self.max_txt_len:
sent_ids.append(str(sent_id))
return sent_ids
def shuffle(self):
# we shuffle ref_ids and make sent_ids according to ref_ids
random.shuffle(self.ref_ids)
self.sent_ids = self._get_sent_ids()
def __getitem__(self, id_):
# sent_id = self.sent_ids[i]
txt_dump = self.db[id_]
return txt_dump
class ReDetectFeatTxtTokDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, ReTxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
self.ids = self.txt_db._get_sent_ids()
def __getitem__(self, i):
id_ = self.ids[i]
example = self.txt_db[id_]
return example
def shuffle(self):
self.txt_db.shuffle()
class ReDataset(ReDetectFeatTxtTokDataset):
def __getitem__(self, i):
"""
Return:
:input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0]
:position_ids : range(L)
:img_feat : (num_bb, d)
:img_pos_feat : (num_bb, 7)
:attn_masks : (L+num_bb, ), i.e., [1, 1, ..., 0, 0, 1, 1]
:obj_masks : (num_bb, ) all 0's
:target : (1, )
"""
# {sent_id, sent, ref_id, ann_id, image_id, bbox, input_ids}
example = super().__getitem__(i)
image_id = example['image_id']
fname = f'visual_grounding_coco_gt_{int(image_id):012}.npz'
img_feat, img_pos_feat, num_bb = self._get_img_feat(fname)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
# target bbox
img = self.txt_db.Images[image_id]
assert len(img['ann_ids']) == num_bb, \
'Please use visual_grounding_coco_gt'
target = img['ann_ids'].index(example['ann_id'])
target = torch.tensor([target])
# obj_masks, to be padded with 1, for masking out non-object prob.
obj_masks = torch.tensor([0]*len(img['ann_ids']), dtype=torch.uint8)
return input_ids, img_feat, img_pos_feat, attn_masks, obj_masks, target
def re_collate(inputs):
"""
Return:
:input_ids : (n, max_L) padded with 0
:position_ids : (n, max_L) padded with 0
:txt_lens : list of [txt_len]
:img_feat : (n, max_num_bb, feat_dim)
:img_pos_feat : (n, max_num_bb, 7)
:num_bbs : list of [num_bb]
:attn_masks : (n, max_{L+num_bb}) padded with 0
:obj_masks : (n, max_num_bb) padded with 1
:targets : (n, )
"""
(input_ids, img_feats, img_pos_feats, attn_masks, obj_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
obj_masks = pad_sequence(
obj_masks, batch_first=True, padding_value=1)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
return {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'obj_masks': obj_masks,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets,
'txt_lens': txt_lens,
'num_bbs': num_bbs}
class ReEvalDataset(ReDetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_gt_feat=True):
super().__init__(txt_db, img_db)
self.use_gt_feat = use_gt_feat
def __getitem__(self, i):
"""
Return:
:input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0]
:position_ids : range(L)
:img_feat : (num_bb, d)
:img_pos_feat : (num_bb, 7)
:attn_masks : (L+num_bb, ), i.e., [1, 1, ..., 0, 0, 1, 1]
:obj_masks : (num_bb, ) all 0's
:tgt_box : ndarray (4, ) xywh
:obj_boxes : ndarray (num_bb, 4) xywh
:sent_id
"""
# {sent_id, sent, ref_id, ann_id, image_id, bbox, input_ids}
sent_id = self.ids[i]
example = super().__getitem__(i)
image_id = example['image_id']
if self.use_gt_feat:
fname = f'visual_grounding_coco_gt_{int(image_id):012}.npz'
else:
fname = f'visual_grounding_det_coco_{int(image_id):012}.npz'
img_feat, img_pos_feat, num_bb = self._get_img_feat(fname)
# image info
img = self.txt_db.Images[image_id]
im_width, im_height = img['width'], img['height']
# object boxes, img_pos_feat (xyxywha) -> xywh
obj_boxes = np.stack([img_pos_feat[:, 0]*im_width,
img_pos_feat[:, 1]*im_height,
img_pos_feat[:, 4]*im_width,
img_pos_feat[:, 5]*im_height], axis=1)
obj_masks = torch.tensor([0]*num_bb, dtype=torch.uint8)
# target box
tgt_box = np.array(example['bbox']) # xywh
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat, attn_masks, obj_masks,
tgt_box, obj_boxes, sent_id)
# IoU function
def computeIoU(self, box1, box2):
# each box is of [x1, y1, w, h]
inter_x1 = max(box1[0], box2[0])
inter_y1 = max(box1[1], box2[1])
inter_x2 = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
inter_y2 = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)
if inter_x1 < inter_x2 and inter_y1 < inter_y2:
inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1)
else:
inter = 0
union = box1[2]*box1[3] + box2[2]*box2[3] - inter
return float(inter)/union
def re_eval_collate(inputs):
"""
Return:
:input_ids : (n, max_L)
:position_ids : (n, max_L)
:txt_lens : list of [txt_len]
:img_feat : (n, max_num_bb, d)
:img_pos_feat : (n, max_num_bb, 7)
:num_bbs : list of [num_bb]
:attn_masks : (n, max{L+num_bb})
:obj_masks : (n, max_num_bb)
:tgt_box : list of n [xywh]
:obj_boxes : list of n [[xywh, xywh, ...]]
:sent_ids : list of n [sent_id]
"""
(input_ids, img_feats, img_pos_feats, attn_masks, obj_masks,
tgt_box, obj_boxes, sent_ids) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
obj_masks = pad_sequence(
obj_masks, batch_first=True, padding_value=1)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
return {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'obj_masks': obj_masks,
'attn_masks': attn_masks,
'gather_index': gather_index,
'tgt_box': tgt_box,
'obj_boxes': obj_boxes,
'sent_ids': sent_ids,
'txt_lens': txt_lens,
'num_bbs': num_bbs}
| 10,442 | 35.260417 | 79 | py |
UNITER | UNITER-master/data/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Itm dataset
"""
from collections import defaultdict
import copy
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
import numpy as np
from .data import (DetectFeatTxtTokDataset, DetectFeatLmdb, TxtTokLmdb,
pad_tensors, get_gather_index, get_ids_and_lens)
from .sampler import TokenBucketSampler
class TokenBucketSamplerForItm(TokenBucketSampler):
def __init__(self, dset, *args, **kwargs):
super().__init__(dset.lens, *args, **kwargs)
self.dset = dset
def __iter__(self):
it = super().__iter__()
self.dset.new_epoch()
self._lens = self.dset.lens
return it
def _has_overlap(la, lb):
if len(la) < len(lb):
la, lb = lb, la
s = set(la)
return any(b in s for b in lb)
def sample_negative(sample_pool, ground_truths, num_sample):
""" random and retry """
outputs = ground_truths[:1]
while _has_overlap(outputs, ground_truths):
outputs = random.sample(sample_pool, num_sample)
return outputs
class ItmDataset(DetectFeatTxtTokDataset):
""" NOTE this Dataset handles distributed training itself
(for more efficient negative sampling) """
def __init__(self, txt_db, img_db, neg_sample_p=0.5):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
self.txt_lens, self.ids = get_ids_and_lens(txt_db)
self.all_imgs = list(set(txt_db[id_]['img_fname'] for id_ in self.ids))
self.neg_sample_p = neg_sample_p
self.new_epoch()
def new_epoch(self):
""" should be called every epoch for more randomness"""
self.labels = np.random.choice(
[0, 1], size=len(self.ids),
p=[self.neg_sample_p, 1-self.neg_sample_p])
self.lens = []
self.train_imgs = []
for i, (id_, tl) in enumerate(zip(self.ids, self.txt_lens)):
img_fname = super().__getitem__(i)['img_fname']
if self.labels[i] == 0:
img_fname = sample_negative(self.all_imgs, [img_fname], 1)[0]
self.train_imgs.append(img_fname)
self.lens.append(tl + self.img_db.name2nbb[img_fname])
def __getitem__(self, i):
example = super().__getitem__(i)
# labels and negative images should be sampled every epoch
ground_truth_label = self.labels[i]
img_fname = self.train_imgs[i]
img_feat, img_pos_feat, num_bb = self._get_img_feat(img_fname)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
target = torch.Tensor(1).long()
target.data.fill_(ground_truth_label)
return input_ids, img_feat, img_pos_feat, attn_masks, target
def itm_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.cat(targets, dim=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
def _compute_ot_scatter(txt_lens, max_txt_len, joint_len):
ot_scatter = torch.arange(0, joint_len, dtype=torch.long
).unsqueeze(0).repeat(len(txt_lens), 1)
for i, tl in enumerate(txt_lens):
max_ind = max_txt_len + (joint_len-tl)
ot_scatter.data[i, tl:] = torch.arange(max_txt_len, max_ind,
dtype=torch.long).data
return ot_scatter
def _compute_pad(lens, max_len):
pad = torch.zeros(len(lens), max_len, dtype=torch.uint8)
for i, l in enumerate(lens):
pad.data[i, l:].fill_(1)
return pad
def itm_ot_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.cat(targets, dim=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
# OT inputs
max_tl = max(txt_lens)
max_nbb = max(num_bbs)
ot_scatter = _compute_ot_scatter(txt_lens, max_tl, attn_masks.size(1))
txt_pad = _compute_pad(txt_lens, max_tl)
img_pad = _compute_pad(num_bbs, max_nbb)
ot_inputs = {'ot_scatter': ot_scatter,
'scatter_max': ot_scatter.max().item(),
'txt_pad': txt_pad,
'img_pad': img_pad}
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets,
'ot_inputs': ot_inputs}
return batch
class ItmRankDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, \
"ItmRankDataset need at least 1 negative sample"
super().__init__(txt_db, img_db)
txt2img = self.txt_db.txt2img
self.txt2img = {id_: txt2img[id_] for id_ in self.ids}
# images partitioned by rank
self.img2txts = defaultdict(list)
for id_, img in self.txt2img.items():
self.img2txts[img].append(id_)
self.img_name_list = list(self.img2txts.keys())
assert neg_sample_size > 0
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_fname = self.txt2img[gt_txt_id]
id_pairs = [(gt_txt_id, gt_img_fname)]
# sample negatives
neg_sample_img_ids = sample_negative(
self.img_name_list, [gt_img_fname], self.neg_sample_size)
neg_sample_txt_ids = sample_negative(
self.ids, self.img2txts[gt_img_fname], self.neg_sample_size)
id_pairs.extend([(gt_txt_id, neg_img_id)
for neg_img_id in neg_sample_img_ids] +
[(neg_txt_id, gt_img_fname)
for neg_txt_id in neg_sample_txt_ids])
inputs = self._collect_inputs(id_pairs)
assert len(inputs) == (1 + 2*self.neg_sample_size)
return inputs
def _collect_inputs(self, id_pairs):
# create input features
inputs = []
for txt_id, img_id in id_pairs:
example = self.txt_db[txt_id]
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
# img input
img_feat, img_pos_feat, num_bb = self._get_img_feat(img_id)
# mask
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
inputs.append((input_ids, img_feat, img_pos_feat, attn_masks))
return inputs
def itm_rank_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks,
) = map(list, unzip(concat(i for i in inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
sample_size = len(inputs[0])
assert all(sample_size == len(i) for i in inputs)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'sample_size': sample_size}
return batch
class ItmRankDatasetHardNegFromText(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, "need at least 1 negative sample"
super().__init__(txt_db, img_db)
txt2img = self.txt_db.txt2img
self.txt2img = {id_: txt2img[id_] for id_ in self.ids}
self.img2txts = self.txt_db.img2txts
self.img_name_list = list(self.img2txts.keys())
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_fname = self.txt2img[gt_txt_id]
input_ids = self.txt_db[gt_txt_id]['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
input_ids = input_ids.unsqueeze(0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
neg_img_ids = sample_negative(
self.img_name_list, [gt_img_fname], self.neg_sample_size)
img_ids = [gt_img_fname] + neg_img_ids
# process image features (gt always first)
img_feats, img_pos_feats, num_bbs = map(
list, unzip(map(self._get_img_feat, img_ids)))
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
tl = input_ids.size(1)
attn_masks = torch.zeros(len(img_ids), max(num_bbs) + tl).long()
for i, nbb in enumerate(num_bbs):
attn_masks.data[i, :tl+nbb].fill_(1)
out_size = attn_masks.size(1)
gather_index = get_gather_index([tl]*len(img_ids), num_bbs,
len(img_ids), tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
class ItmRankDatasetHardNegFromImage(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, "need at least 1 negative sample"
super().__init__(txt_db, img_db)
txt2img = self.txt_db.txt2img
self.txt2img = {id_: txt2img[id_] for id_ in self.ids}
self.img2txts = self.txt_db.img2txts
self.txt_name_list = list(self.txt2img.keys())
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_id = self.txt2img[gt_txt_id]
gt_txt_ids = self.img2txts[gt_img_id]
# process image features (gt always first)
img_feat, img_pos_feat, nbb = self._get_img_feat(gt_img_id)
img_feat = img_feat.unsqueeze(0)
img_pos_feat = img_pos_feat.unsqueeze(0)
# sample negative
neg_txt_ids = sample_negative(
self.txt_name_list, gt_txt_ids, self.neg_sample_size)
txt_ids = [gt_txt_id] + neg_txt_ids
# process text inputs
all_inputs = []
txt_lens = []
for txt_id in txt_ids:
input_ids = self.txt_db.combine_inputs(
self.txt_db[txt_id]['input_ids'])
all_inputs.append(input_ids)
txt_lens.append(len(input_ids))
input_ids = pad_sequence(all_inputs, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = torch.zeros(len(txt_ids), max(txt_lens) + nbb).long()
for i, tl in enumerate(txt_lens):
attn_masks.data[i, :tl+nbb].fill_(1)
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, [nbb]*len(txt_ids),
len(txt_ids), tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
def itm_rank_hn_collate(inputs):
assert len(inputs) == 1
return inputs[0]
class ItmValDataset(DetectFeatTxtTokDataset):
""" For evaluating Image-Text-Retrieval task """
def __init__(self, db_dir, img_dir, mini_batch_size=400):
super().__init__(db_dir, img_dir)
del self.lens
self.txt2img = self.txt_db.txt2img
self.img2txts = self.txt_db.img2txts
self.all_img_ids = list(self.img2txts.keys())
assert len(self.img2txts) >= mini_batch_size > 0
self.bs = mini_batch_size
def _get_batch_ids(self, i):
gt_txt_id = self.ids[i]
gt_img_id = self.txt2img[gt_txt_id]
# sample fixed negatives for each gt image
i = self.all_img_ids.index(gt_img_id)
neg_st = i+1
neg_end = neg_st+self.bs-1
if neg_end > len(self.all_img_ids):
# warp around
neg_end -= len(self.all_img_ids)
neg_img_ids = (self.all_img_ids[neg_st:]
+ self.all_img_ids[:neg_end])
else:
neg_img_ids = self.all_img_ids[neg_st:neg_end]
assert len(neg_img_ids) == (self.bs - 1),\
"Did not sample enough neg samples"
return gt_img_id, neg_img_ids
def __getitem__(self, i):
""" this returns list of mini-batches """
gt_img_id, neg_img_ids = self._get_batch_ids(i)
# NOTE 1st one is gt img
batch = self.get_batch(i, [gt_img_id] + neg_img_ids)
return batch
def get_batch(self, i, img_ids):
example = super().__getitem__(i)
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
input_ids = input_ids.unsqueeze(0).expand(len(img_ids), -1).clone()
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# process image features (gt always first)
img_feats, img_pos_feats, num_bbs = map(
list, unzip(map(self._get_img_feat, img_ids)))
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
tl = input_ids.size(1)
attn_masks = torch.zeros(len(img_ids), max(num_bbs) + tl).long()
for i, nbb in enumerate(num_bbs):
attn_masks.data[i, :tl+nbb].fill_(1)
out_size = attn_masks.size(1)
gather_index = get_gather_index([tl]*len(img_ids), num_bbs,
len(img_ids), tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
def itm_val_collate(inputs):
assert len(inputs) == 1, "input batch size > 1"
return inputs[0]
class ItmEvalDataset(ItmValDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.all_img_ids = sorted(copy.deepcopy(self.all_img_ids),
key=lambda i: self.img_db.name2nbb[i])
def __getitem__(self, i):
mini_batches = []
for st in range(0, len(self.all_img_ids), self.bs):
mini_batches.append(
self.get_batch(i, self.all_img_ids[st:st+self.bs]))
return mini_batches
itm_eval_collate = itm_val_collate
| 16,959 | 35.162047 | 79 | py |
UNITER | UNITER-master/model/vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VCR model
"""
from collections import defaultdict
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
# from .layer import GELU
from .model import (
UniterPreTrainedModel, UniterModel)
class UniterForVisualCommonsenseReasoning(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 2)
)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
if compute_loss:
targets = batch['targets']
vcr_loss = F.cross_entropy(
rank_scores, targets.squeeze(-1),
reduction='mean')
return vcr_loss
else:
rank_scores = rank_scores[:, 1:]
return rank_scores
| 3,024 | 37.782051 | 80 | py |
UNITER | UNITER-master/model/pretrain.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for pretraining
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU, BertOnlyMLMHead
from .model import UniterModel, UniterPreTrainedModel
from .ot import optimal_transport_dist
class RegionFeatureRegression(nn.Module):
" for MRM"
def __init__(self, hidden_size, feat_dim, img_linear_weight):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
GELU(),
LayerNorm(hidden_size, eps=1e-12))
self.weight = img_linear_weight
self.bias = nn.Parameter(torch.zeros(feat_dim))
def forward(self, input_):
hidden = self.net(input_)
output = F.linear(hidden, self.weight.t(), self.bias)
return output
class RegionClassification(nn.Module):
" for MRC(-kl)"
def __init__(self, hidden_size, label_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
GELU(),
LayerNorm(hidden_size, eps=1e-12),
nn.Linear(hidden_size, label_dim))
def forward(self, input_):
output = self.net(input_)
return output
class UniterForPretraining(UniterPreTrainedModel):
""" UNITER pretraining """
def __init__(self, config, img_dim, img_label_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.cls = BertOnlyMLMHead(
config, self.uniter.embeddings.word_embeddings.weight)
self.feat_regress = RegionFeatureRegression(
config.hidden_size, img_dim,
self.uniter.img_embeddings.img_linear.weight)
self.region_classifier = RegionClassification(
config.hidden_size, img_label_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.apply(self.init_weights)
def forward(self, batch, task, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
if task == 'mlm':
txt_labels = batch['txt_labels']
return self.forward_mlm(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
txt_labels, compute_loss)
elif task == 'mrfr':
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrfr_feat_target = batch['feat_targets']
return self.forward_mrfr(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrfr_feat_target, compute_loss)
elif task == 'itm':
targets = batch['targets']
ot_inputs = batch['ot_inputs']
return self.forward_itm(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
targets, ot_inputs, compute_loss)
elif task.startswith('mrc'):
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrc_label_target = batch['label_targets']
return self.forward_mrc(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrc_label_target, task, compute_loss)
else:
raise ValueError('invalid task')
def forward_mlm(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
txt_labels, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
# get only the text part
sequence_output = sequence_output[:, :input_ids.size(1), :]
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
txt_labels != -1)
prediction_scores = self.cls(masked_output)
if compute_loss:
masked_lm_loss = F.cross_entropy(prediction_scores,
txt_labels[txt_labels != -1],
reduction='none')
return masked_lm_loss
else:
return prediction_scores
def _compute_masked_hidden(self, hidden, mask):
""" get only the masked region (don't compute unnecessary hiddens) """
mask = mask.unsqueeze(-1).expand_as(hidden)
hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1))
return hidden_masked
def forward_mrfr(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
feat_targets, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks)
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_feat = self.feat_regress(masked_output)
if compute_loss:
mrfr_loss = F.mse_loss(prediction_feat, feat_targets,
reduction='none')
return mrfr_loss
else:
return prediction_feat
def forward_itm(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index, targets, ot_inputs,
compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
itm_scores = self.itm_output(pooled_output)
# OT loss
if ot_inputs is not None:
ot_scatter = ot_inputs['ot_scatter']
b = sequence_output.size(0)
tl = input_ids.size(1)
il = img_feat.size(1)
max_l = max(ot_inputs['scatter_max'] + 1, tl+il)
ot_scatter = ot_scatter.unsqueeze(-1).expand_as(sequence_output)
ctx_emb = torch.zeros(b, max_l, self.config.hidden_size,
dtype=sequence_output.dtype,
device=sequence_output.device
).scatter_(dim=1, index=ot_scatter,
src=sequence_output)
txt_emb = ctx_emb[:, :tl, :]
img_emb = ctx_emb[:, tl:tl+il, :]
txt_pad = ot_inputs['txt_pad']
img_pad = ot_inputs['img_pad']
# NOTE: run in fp32 for stability
ot_dist = optimal_transport_dist(txt_emb.float(), img_emb.float(),
txt_pad, img_pad).to(txt_emb)
ot_pos_dist = ot_dist.masked_select(targets == 1)
ot_neg_dist = ot_dist.masked_select(targets == 0)
ot_loss = (ot_pos_dist, ot_neg_dist)
else:
ot_loss = None
if compute_loss:
itm_loss = F.cross_entropy(itm_scores, targets, reduction='none')
return itm_loss, ot_loss
else:
return itm_scores, ot_loss
def forward_mrc(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
label_targets, task, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks)
# only compute masked regions for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_soft_label = self.region_classifier(masked_output)
if compute_loss:
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
mrc_loss = F.kl_div(
prediction_soft_label, label_targets, reduction='none')
else:
# background class should not be the target
label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1
mrc_loss = F.cross_entropy(
prediction_soft_label, label_targets,
ignore_index=0, reduction='none')
return mrc_loss
else:
return prediction_soft_label
| 10,155 | 43.156522 | 78 | py |
UNITER | UNITER-master/model/layer.py | """
BERT layers from the huggingface implementation
(https://github.com/huggingface/transformers)
"""
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class GELU(nn.Module):
def forward(self, input_):
output = gelu(input_)
return output
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(
torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config,
bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
| 9,378 | 39.081197 | 104 | py |
UNITER | UNITER-master/model/ve.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for VE model
"""
from .vqa import UniterForVisualQuestionAnswering
class UniterForVisualEntailment(UniterForVisualQuestionAnswering):
""" Finetune UNITER for VE
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim, 3)
| 342 | 21.866667 | 66 | py |
UNITER | UNITER-master/model/model.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from .layer import BertLayer, BertPooler
logger = logging.getLogger(__name__)
class UniterConfig(object):
"""Configuration class to store the configuration of a `UniterModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs UniterConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
`UniterModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer
encoder.
num_attention_heads: Number of attention heads for each attention
layer in the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e.
feed-forward) layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string)
in the encoder and pooler. If string, "gelu", "relu" and
"swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully
connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this
model might ever be used with. Typically set this to something
large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed
into `UniterModel`.
initializer_range: The sttdev of the truncated_normal_initializer
for initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `UniterConfig` from a
Python dictionary of parameters."""
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `UniterConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
"""
Instantiate a UniterPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific Uniter class
"""
# Load config
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
""" Modification for Joint Vision-Language Encoding
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
# align back to most compact input
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
| 15,887 | 42.173913 | 79 | py |
UNITER | UNITER-master/model/vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VQA model
"""
from collections import defaultdict
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU
from .model import UniterPreTrainedModel, UniterModel
class UniterForVisualQuestionAnswering(UniterPreTrainedModel):
""" Finetune UNITER for VQA
"""
def __init__(self, config, img_dim, num_answer):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.vqa_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
GELU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, num_answer)
)
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
answer_scores = self.vqa_output(pooled_output)
if compute_loss:
targets = batch['targets']
vqa_loss = F.binary_cross_entropy_with_logits(
answer_scores, targets, reduction='none')
return vqa_loss
else:
return answer_scores
| 1,860 | 34.113208 | 75 | py |
UNITER | UNITER-master/model/pretrain_vcr.py | from .pretrain import UniterForPretraining
from torch import nn
from .layer import BertOnlyMLMHead
from collections import defaultdict
from torch.nn import functional as F
import torch
class UniterForPretrainingForVCR(UniterForPretraining):
""" 2nd Stage Pretrain UNITER for VCR
"""
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
self.cls = BertOnlyMLMHead(
self.uniter.config, self.uniter.embeddings.word_embeddings.weight)
def forward(self, batch, task, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
if task == 'mlm':
txt_labels = batch['txt_labels']
return self.forward_mlm(input_ids, position_ids,
txt_type_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
txt_labels, compute_loss)
elif task == 'mrfr':
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrfr_feat_target = batch['feat_targets']
return self.forward_mrfr(input_ids, position_ids,
txt_type_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrfr_feat_target, compute_loss)
elif task.startswith('mrc'):
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrc_label_target = batch['label_targets']
return self.forward_mrc(input_ids, position_ids,
txt_type_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrc_label_target, task, compute_loss)
else:
raise ValueError('invalid task')
# MLM
def forward_mlm(self, input_ids, position_ids, txt_type_ids, img_feat,
img_pos_feat, attention_mask, gather_index,
txt_labels, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
# get only the text part
sequence_output = sequence_output[:, :input_ids.size(1), :]
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
txt_labels != -1)
prediction_scores = self.cls(masked_output)
if compute_loss:
masked_lm_loss = F.cross_entropy(prediction_scores,
txt_labels[txt_labels != -1],
reduction='none')
return masked_lm_loss
else:
return prediction_scores
# MRFR
def forward_mrfr(self, input_ids, position_ids, txt_type_ids,
img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
feat_targets, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks,
txt_type_ids=txt_type_ids)
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_feat = self.feat_regress(masked_output)
if compute_loss:
mrfr_loss = F.mse_loss(prediction_feat, feat_targets,
reduction='none')
return mrfr_loss
else:
return prediction_feat
# MRC
def forward_mrc(self, input_ids, position_ids, txt_type_ids,
img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
label_targets, task, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks,
txt_type_ids=txt_type_ids)
# only compute masked regions for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_soft_label = self.region_classifier(masked_output)
if compute_loss:
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
mrc_loss = F.kl_div(
prediction_soft_label, label_targets, reduction='none')
else:
# background class should not be the target
label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1
mrc_loss = F.cross_entropy(
prediction_soft_label, label_targets,
ignore_index=0, reduction='none')
return mrc_loss
else:
return prediction_soft_label
| 7,123 | 46.493333 | 80 | py |
UNITER | UNITER-master/model/nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for NLVR2 model
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
from .model import UniterPreTrainedModel, UniterModel
from .attention import MultiheadAttention
class UniterForNlvr2Paired(UniterPreTrainedModel):
""" Finetune UNITER for NLVR2 (paired format)
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.nlvr2_output = nn.Linear(config.hidden_size*2, 2)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(3, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings\
.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
new_emb.weight.data[2, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
img_type_ids = batch['img_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
img_type_ids=img_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
# concat CLS of the pair
n_pair = pooled_output.size(0) // 2
reshaped_output = pooled_output.contiguous().view(n_pair, -1)
answer_scores = self.nlvr2_output(reshaped_output)
if compute_loss:
targets = batch['targets']
nlvr2_loss = F.cross_entropy(
answer_scores, targets, reduction='none')
return nlvr2_loss
else:
return answer_scores
class UniterForNlvr2Triplet(UniterPreTrainedModel):
""" Finetune UNITER for NLVR2 (triplet format)
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.nlvr2_output = nn.Linear(config.hidden_size, 2)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(3, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings\
.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
new_emb.weight.data[2, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
img_type_ids = batch['img_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
img_type_ids=img_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
answer_scores = self.nlvr2_output(pooled_output)
if compute_loss:
targets = batch['targets']
nlvr2_loss = F.cross_entropy(
answer_scores, targets, reduction='none')
return nlvr2_loss
else:
return answer_scores
class AttentionPool(nn.Module):
""" attention pooling layer """
def __init__(self, hidden_size, drop=0.0):
super().__init__()
self.fc = nn.Sequential(nn.Linear(hidden_size, 1), nn.ReLU())
self.dropout = nn.Dropout(drop)
def forward(self, input_, mask=None):
"""input: [B, T, D], mask = [B, T]"""
score = self.fc(input_).squeeze(-1)
if mask is not None:
mask = mask.to(dtype=input_.dtype) * -1e4
score = score + mask
norm_score = self.dropout(F.softmax(score, dim=1))
output = norm_score.unsqueeze(1).matmul(input_).squeeze(1)
return output
class UniterForNlvr2PairedAttn(UniterPreTrainedModel):
""" Finetune UNITER for NLVR2
(paired format with additional attention layer)
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.attn1 = MultiheadAttention(config.hidden_size,
config.num_attention_heads,
config.attention_probs_dropout_prob)
self.attn2 = MultiheadAttention(config.hidden_size,
config.num_attention_heads,
config.attention_probs_dropout_prob)
self.fc = nn.Sequential(
nn.Linear(2*config.hidden_size, config.hidden_size),
nn.ReLU(),
nn.Dropout(config.hidden_dropout_prob))
self.attn_pool = AttentionPool(config.hidden_size,
config.attention_probs_dropout_prob)
self.nlvr2_output = nn.Linear(2*config.hidden_size, 2)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(3, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings\
.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
new_emb.weight.data[2, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
img_type_ids = batch['img_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
img_type_ids=img_type_ids)
# separate left image and right image
bs, tl, d = sequence_output.size()
left_out, right_out = sequence_output.contiguous().view(
bs//2, tl*2, d).chunk(2, dim=1)
# bidirectional attention
mask = attn_masks == 0
left_mask, right_mask = mask.contiguous().view(bs//2, tl*2
).chunk(2, dim=1)
left_out = left_out.transpose(0, 1)
right_out = right_out.transpose(0, 1)
l2r_attn, _ = self.attn1(left_out, right_out, right_out,
key_padding_mask=right_mask)
r2l_attn, _ = self.attn2(right_out, left_out, left_out,
key_padding_mask=left_mask)
left_out = self.fc(torch.cat([l2r_attn, left_out], dim=-1)
).transpose(0, 1)
right_out = self.fc(torch.cat([r2l_attn, right_out], dim=-1)
).transpose(0, 1)
# attention pooling and final prediction
left_out = self.attn_pool(left_out, left_mask)
right_out = self.attn_pool(right_out, right_mask)
answer_scores = self.nlvr2_output(
torch.cat([left_out, right_out], dim=-1))
if compute_loss:
targets = batch['targets']
nlvr2_loss = F.cross_entropy(
answer_scores, targets, reduction='none')
return nlvr2_loss
else:
return answer_scores
| 8,505 | 40.492683 | 76 | py |
UNITER | UNITER-master/model/ot.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Wasserstein Distance (Optimal Transport)
"""
import torch
from torch.nn import functional as F
def cost_matrix_cosine(x, y, eps=1e-5):
""" Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.uint8, device=x.device
).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(
b, n).sum(dim=-1, keepdim=False)
return trace
@torch.no_grad()
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device
) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2)/beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def optimal_transport_dist(txt_emb, img_emb, txt_pad, img_pad,
beta=0.5, iteration=50, k=1):
""" [B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)
).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)
).to(dtype=cost.dtype)
T = ipot(cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad,
beta, iteration, k)
distance = trace(cost.matmul(T.detach()))
return distance
| 2,866 | 32.337209 | 74 | py |
UNITER | UNITER-master/model/attention.py | """
copy multi-head attention code from pytorch
(https://github.com/pytorch/pytorch),
"""
import warnings
import torch
from torch.nn import Module, Parameter, Linear
from torch.nn.init import xavier_normal_, xavier_uniform_, constant_
from torch.nn.functional import linear, softmax, dropout
def multi_head_attention_forward(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
static_k=None, # type: Optional[Tensor]
static_v=None # type: Optional[Tensor]
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer).
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in differnt forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
qkv_same = torch.equal(query, key) and torch.equal(key, value)
kv_same = torch.equal(key, value)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if use_separate_proj_weight is not True:
if qkv_same:
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif kv_same:
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask,
torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in key. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer).
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
if not hasattr(self, '_qkv_same_embed_dim'):
warnings.warn('A new version of MultiheadAttention module has been implemented. \
Please re-train your model with the new module',
UserWarning)
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| 19,463 | 47.297767 | 130 | py |
UNITER | UNITER-master/model/re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for RE model
"""
from collections import defaultdict
import torch
from torch import nn
import random
import numpy as np
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU
from .model import UniterPreTrainedModel, UniterModel
class UniterForReferringExpressionComprehension(UniterPreTrainedModel):
""" Finetune UNITER for RE
"""
def __init__(self, config, img_dim, loss="cls",
margin=0.2, hard_ratio=0.3, mlp=1):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
if mlp == 1:
self.re_output = nn.Linear(config.hidden_size, 1)
elif mlp == 2:
self.re_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size),
GELU(),
LayerNorm(config.hidden_size, eps=1e-12),
nn.Linear(config.hidden_size, 1)
)
else:
raise ValueError("MLP restricted to be 1 or 2 layers.")
self.loss = loss
assert self.loss in ['cls', 'rank']
if self.loss == 'rank':
self.margin = margin
self.hard_ratio = hard_ratio
else:
self.crit = nn.CrossEntropyLoss(reduction='none')
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
obj_masks = batch['obj_masks']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False)
# get only the region part
txt_lens, num_bbs = batch["txt_lens"], batch["num_bbs"]
sequence_output = self._get_image_hidden(
sequence_output, txt_lens, num_bbs)
# re score (n, max_num_bb)
scores = self.re_output(sequence_output).squeeze(2)
scores = scores.masked_fill(obj_masks, -1e4) # mask out non-objects
if compute_loss:
targets = batch["targets"]
if self.loss == 'cls':
ce_loss = self.crit(scores, targets.squeeze(-1)) # (n, ) as no reduction
return ce_loss
else:
# ranking
_n = len(num_bbs)
# positive (target)
pos_ix = targets
pos_sc = scores.gather(1, pos_ix.view(_n, 1)) # (n, 1)
pos_sc = torch.sigmoid(pos_sc).view(-1) # (n, ) sc[0, 1]
# negative
neg_ix = self.sample_neg_ix(scores, targets, num_bbs)
neg_sc = scores.gather(1, neg_ix.view(_n, 1)) # (n, 1)
neg_sc = torch.sigmoid(neg_sc).view(-1) # (n, ) sc[0, 1]
# ranking
mm_loss = torch.clamp(
self.margin + neg_sc - pos_sc, 0) # (n, )
return mm_loss
else:
# (n, max_num_bb)
return scores
def sample_neg_ix(self, scores, targets, num_bbs):
"""
Inputs:
:scores (n, max_num_bb)
:targets (n, )
:num_bbs list of [num_bb]
return:
:neg_ix (n, ) easy/hard negative (!= target)
"""
neg_ix = []
cand_ixs = torch.argsort(
scores, dim=-1, descending=True) # (n, num_bb)
for i in range(len(num_bbs)):
num_bb = num_bbs[i]
if np.random.uniform(0, 1, 1) < self.hard_ratio:
# sample hard negative, w/ highest score
for ix in cand_ixs[i].tolist():
if ix != targets[i]:
assert ix < num_bb, f'ix={ix}, num_bb={num_bb}'
neg_ix.append(ix)
break
else:
# sample easy negative, i.e., random one
ix = random.randint(0, num_bb-1) # [0, num_bb-1]
while ix == targets[i]:
ix = random.randint(0, num_bb-1)
neg_ix.append(ix)
neg_ix = torch.tensor(neg_ix).type(targets.type())
assert neg_ix.numel() == targets.numel()
return neg_ix
def _get_image_hidden(self, sequence_output, txt_lens, num_bbs):
"""
Extracting the img_hidden part from sequence_output.
Inputs:
- sequence_output: (n, txt_len+num_bb, hid_size)
- txt_lens : [txt_len]
- num_bbs : [num_bb]
Output:
- img_hidden : (n, max_num_bb, hid_size)
"""
outputs = []
max_bb = max(num_bbs)
hid_size = sequence_output.size(-1)
for seq_out, len_, nbb in zip(sequence_output.split(1, dim=0),
txt_lens, num_bbs):
img_hid = seq_out[:, len_:len_+nbb, :]
if nbb < max_bb:
img_hid = torch.cat(
[img_hid, self._get_pad(
img_hid, max_bb-nbb, hid_size)],
dim=1)
outputs.append(img_hid)
img_hidden = torch.cat(outputs, dim=0)
return img_hidden
def _get_pad(self, t, len_, hidden_size):
pad = torch.zeros(1, len_, hidden_size, dtype=t.dtype, device=t.device)
return pad
| 5,705 | 36.051948 | 89 | py |
UNITER | UNITER-master/model/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for ITM model
"""
from collections import defaultdict
import torch
from torch import nn
from .model import UniterPreTrainedModel, UniterModel
class UniterForImageTextRetrieval(UniterPreTrainedModel):
""" Finetune UNITER for image text retrieval
"""
def __init__(self, config, img_dim, margin=0.2):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.rank_output = nn.Linear(config.hidden_size, 1)
self.margin = margin
self.apply(self.init_weights)
def init_output(self):
""" need to be called after from pretrained """
self.rank_output.weight.data = self.itm_output.weight.data[1:, :]
self.rank_output.bias.data = self.itm_output.bias.data[1:]
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.rank_output(pooled_output)
if compute_loss:
# triplet loss
rank_scores_sigmoid = torch.sigmoid(rank_scores)
sample_size = batch['sample_size']
scores = rank_scores_sigmoid.contiguous().view(-1, sample_size)
pos = scores[:, :1]
neg = scores[:, 1:]
rank_loss = torch.clamp(self.margin + neg - pos, 0)
return rank_loss
else:
return rank_scores
class UniterForImageTextRetrievalHardNeg(UniterForImageTextRetrieval):
""" Finetune UNITER for image text retrieval
"""
def __init__(self, config, img_dim, margin=0.2, hard_size=16):
super().__init__(config, img_dim, margin)
self.hard_size = hard_size
def forward(self, batch, sample_from='t', compute_loss=True):
# expect same input_ids for all pairs
batch_size = batch['attn_masks'].size(0)
input_ids = batch['input_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
if sample_from == 't':
if input_ids.size(0) == 1:
batch['input_ids'] = input_ids.expand(batch_size, -1)
elif sample_from == 'i':
if img_feat.size(0) == 1:
batch['img_feat'] = img_feat.expand(batch_size, -1, -1)
if img_pos_feat.size(0) == 1:
batch['img_pos_feat'] = img_pos_feat.expand(batch_size, -1, -1)
else:
raise ValueError()
if self.training and compute_loss:
with torch.no_grad():
self.eval()
scores = super().forward(batch, compute_loss=False)
hard_batch = self._get_hard_batch(batch, scores, sample_from)
self.train()
return super().forward(hard_batch, compute_loss=True)
else:
return super().forward(batch, compute_loss)
def _get_hard_batch(self, batch, scores, sample_from='t'):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
hard_batch = {'sample_size': self.hard_size + 1}
# NOTE first example is positive
hard_indices = scores.squeeze(-1)[1:].topk(
self.hard_size, sorted=False)[1] + 1
indices = torch.cat([torch.zeros(1, dtype=torch.long,
device=hard_indices.device),
hard_indices])
attention_mask = attention_mask.index_select(0, indices)
gather_index = gather_index.index_select(0, indices)
if position_ids.size(0) != 1:
position_ids = position_ids[:self.hard_size+1]
if sample_from == 't':
# cut to minimum padding
max_len = attention_mask.sum(dim=1).max().item()
max_i = max_len - input_ids.size(1)
attention_mask = attention_mask[:, :max_len]
gather_index = gather_index[:, :max_len]
img_feat = img_feat.index_select(0, indices)[:, :max_i, :]
img_pos_feat = img_pos_feat.index_select(0, indices)[:, :max_i, :]
# expect same input_ids for all pairs
input_ids = input_ids[:self.hard_size+1]
elif sample_from == 'i':
input_ids = input_ids.index_select(0, indices)
# expect same image features for all pairs
img_feat = img_feat[:self.hard_size+1]
img_pos_feat = img_pos_feat[:self.hard_size+1]
else:
raise ValueError()
hard_batch['input_ids'] = input_ids
hard_batch['position_ids'] = position_ids
hard_batch['img_feat'] = img_feat
hard_batch['img_pos_feat'] = img_pos_feat
hard_batch['attn_masks'] = attention_mask
hard_batch['gather_index'] = gather_index
return hard_batch
| 5,619 | 39.142857 | 79 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/setup.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
from pathlib import Path
from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import find_packages, setup
cwd = Path(__file__).resolve().parent
package_name = "compressai"
version = "1.1.9.dev0"
git_hash = "unknown"
try:
git_hash = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode().strip()
)
except (FileNotFoundError, subprocess.CalledProcessError):
pass
def write_version_file():
path = cwd / package_name / "version.py"
with path.open("w") as f:
f.write(f'__version__ = "{version}"\n')
f.write(f'git_version = "{git_hash}"\n')
write_version_file()
def get_extensions():
ext_dirs = cwd / package_name / "cpp_exts"
ext_modules = []
# Add rANS module
rans_lib_dir = cwd / "third_party/ryg_rans"
rans_ext_dir = ext_dirs / "rans"
extra_compile_args = ["-std=c++17"]
if os.getenv("DEBUG_BUILD", None):
extra_compile_args += ["-O0", "-g", "-UNDEBUG"]
else:
extra_compile_args += ["-O3"]
ext_modules.append(
Pybind11Extension(
name=f"{package_name}.ans",
sources=[str(s) for s in rans_ext_dir.glob("*.cpp")],
language="c++",
include_dirs=[rans_lib_dir, rans_ext_dir],
extra_compile_args=extra_compile_args,
)
)
# Add ops
ops_ext_dir = ext_dirs / "ops"
ext_modules.append(
Pybind11Extension(
name=f"{package_name}._CXX",
sources=[str(s) for s in ops_ext_dir.glob("*.cpp")],
language="c++",
extra_compile_args=extra_compile_args,
)
)
return ext_modules
TEST_REQUIRES = ["pytest", "pytest-cov"]
DEV_REQUIRES = TEST_REQUIRES + [
"black",
"flake8",
"flake8-bugbear",
"flake8-comprehensions",
"isort",
"mypy",
]
def get_extra_requirements():
extras_require = {
"test": TEST_REQUIRES,
"dev": DEV_REQUIRES,
"doc": ["sphinx", "furo"],
"tutorials": ["jupyter", "ipywidgets"],
}
extras_require["all"] = {req for reqs in extras_require.values() for req in reqs}
return extras_require
setup(
name=package_name,
version=version,
description="A PyTorch library and evaluation platform for end-to-end compression research",
url="https://github.com/InterDigitalInc/CompressAI",
author="InterDigital AI Lab",
author_email="[email protected]",
packages=find_packages(exclude=("tests",)),
zip_safe=False,
python_requires=">=3.6",
install_requires=[
"numpy",
"scipy",
"matplotlib",
"torch>=1.7.1",
"torchvision",
"pytorch-msssim",
],
extras_require=get_extra_requirements(),
license="Apache-2",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
ext_modules=get_extensions(),
cmdclass={"build_ext": build_ext},
)
| 5,093 | 31.653846 | 96 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/test.py | import os, glob
import math
import logging
import time
import argparse
from collections import OrderedDict
import json
import torch
import torch.nn.functional as F
import numpy as np
from criterions.criterion import Criterion
import options.options as option
import utils.util as util
import compressai
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
compressai.set_entropy_coder(compressai.available_entropy_coders()[0])
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to options YMAL file.', default='./conf/test/sample.yml')
opt = option.parse(parser.parse_args().opt, is_train=False)
opt = option.dict_to_nonedict(opt)
util.mkdir(opt['path']['results_root'])
util.mkdir(opt['path']['checkpoint_updated'])
util.setup_logger('base', opt['path']['log'], opt['name'], level=logging.INFO, screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
#### loading test model if exists
update = opt['path'].get('update', False)
if update and opt['path'].get('checkpoint', None):
device_id = torch.cuda.current_device()
checkpoint = torch.load(opt['path']['checkpoint'], map_location=lambda storage, loc: storage.cuda(device_id))
model = util.create_model(opt, checkpoint, None, rank=0)
logger.info('model checkpoint loaded from {:s}'.format(opt['path']['checkpoint']))
model.update(force=True)
# save the updated checkpoint
state_dict = model.state_dict()
for f in os.listdir(opt['path']['checkpoint_updated']):
os.remove(os.path.join(opt['path']['checkpoint_updated'], f))
filepath = os.path.join(opt['path']['checkpoint_updated'], opt['path']['checkpoint'].split('/')[-1])
torch.save(state_dict, filepath)
logger.info('updated model checkpoint saved to {:s}'.format(filepath))
else:
try:
state_dict_path = os.path.join(opt['path']['checkpoint_updated'], os.listdir(opt['path']['checkpoint_updated'])[0])
state_dict = torch.load(state_dict_path)
model = util.create_model(opt, None, state_dict, rank=0)
logger.info('updated model checkpoint loaded from {:s}'.format(state_dict_path))
except:
raise Exception('Choose not to update from a model checkpoint but fail to load from a updated model checkpoint (state_dict).')
checkpoint = None
state_dict = None
model.eval()
logger.info('Model parameter numbers: {:d}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
#### Create test dataset and dataloader
runs = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
if phase == 'train' or phase == 'val':
pass
else:
device = 'cuda' if dataset_opt['cuda'] else 'cpu'
estimation = dataset_opt['estimation']
test_set = util.create_dataset(dataset_opt)
test_loader = util.create_dataloader(test_set, dataset_opt, opt, None)
logger.info('Number of test samples in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
runs.append((device, estimation, test_loader))
for device, estimation, test_loader in runs:
model = model.to(device)
phase = test_loader.dataset.phase
mode = 'est' if estimation else 'coder'
logger.info('\nTesting [{:s}: {:s}]...'.format(mode, phase))
save_dir = os.path.join(opt['path']['results_root'], mode, phase)
util.mkdir(save_dir)
for f in glob.glob(os.path.join(save_dir, '*')):
os.remove(f)
test_metrics = {
'psnr': [],
'ms-ssim': [],
'bpp': [],
'encoding_time': [],
'decoding_time': [],
}
test_start_time = time.time()
for i, data in enumerate(test_loader):
logger.info('{:20s} - testing sample {:04d}'.format(phase, i))
if len(data) == 1:
gt = None
noise = data.to(device)
noise = util.cropping(noise)
else:
gt, noise = data
gt = gt.to(device)
gt = util.cropping(gt)
noise = noise.to(device)
noise = util.cropping(noise)
# estimation mode using model.forward()
if estimation:
start = time.time()
out_net = model.forward(noise, gt)
elapsed_time = time.time() - start
enc_time = dec_time = elapsed_time / 2
num_pixels = noise.size(0) * noise.size(2) * noise.size(3)
bpp = sum(
(torch.log(likelihoods).sum() / (-math.log(2) * num_pixels))
for likelihoods in out_net["likelihoods"].values()
)
rec = out_net["x_hat"]
# coder mode using model.compress() and model.decompress()
else:
start = time.time()
out_enc = model.compress(noise)
enc_time = time.time() - start
start = time.time()
out_dec = model.decompress(out_enc["strings"], out_enc["shape"])
dec_time = time.time() - start
num_pixels = noise.size(0) * noise.size(2) * noise.size(3)
bpp = sum(len(s[0]) for s in out_enc["strings"]) * 8.0 / num_pixels
rec = out_dec["x_hat"]
cur_psnr = util.psnr(gt, rec.clamp(0, 1)) if gt is not None else 0.
cur_ssim = util.ms_ssim(gt, rec.clamp(0, 1), data_range=1.0).item() if gt is not None else 0.
denoise = util.torch2img(rec[0])
denoise.save(os.path.join(save_dir, '{:04d}_{:.3f}dB_{:.4f}_{:.4f}bpp.png'.format(i, cur_psnr, cur_ssim, bpp)))
if gt is not None:
gt = util.torch2img(gt[0])
gt.save(os.path.join(save_dir, '{:04d}_gt.png'.format(i)))
noise = util.torch2img(noise[0])
noise.save(os.path.join(save_dir, '{:04d}_noise.png'.format(i)))
logger.info('{:20s} - sample {:04d} image: bpp = {:.4f}, psnr = {:.3f}dB, ssim = {:.4f}'.format(phase, i, bpp, cur_psnr, cur_ssim))
test_metrics['psnr'].append(cur_psnr)
test_metrics['ms-ssim'].append(cur_ssim)
test_metrics['bpp'].append(bpp)
test_metrics['encoding_time'].append(enc_time)
test_metrics['decoding_time'].append(dec_time)
for k, v in test_metrics.items():
test_metrics[k] = [sum(v) / len(v)]
logger.info('----Average results for phase {:s}----'.format(phase))
for k, v in test_metrics.items():
logger.info('\t{:s}: {:.4f}'.format(k, v[0]))
test_end_time = time.time()
logger.info('Total testing time for phase {:s} = {:.3f}s'.format(phase, test_end_time - test_start_time))
# save results
description = "entropy estimation" if estimation else "ans"
output = {
"name": '{:s}_{:s}'.format(opt['name'], phase),
"description": f"Inference ({description})",
"results": test_metrics,
}
json_path = os.path.join(opt['path']['results_root'], mode, '{:s}.json'.format(phase))
with open(json_path, 'w') as f:
json.dump(output, f, indent=2)
| 6,949 | 38.714286 | 139 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/train.py | import os
import math
import argparse
import random
import logging
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
import options.options as option
from utils import util
from utils.util import (
configure_optimizers, load_optimizer,
configure_schedulers, load_scheduler,
create_model, create_dataloader, create_dataset,
print_network,
torch2img,
compute_metrics,
AverageMeter,
)
from criterions.criterion import Criterion
import warnings
warnings.filterwarnings("ignore")
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def main():
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YMAL file.', default='./conf/train/sample.yml')
parser.add_argument('--local_rank', type=int, default=-1)
args = parser.parse_args()
opt = option.parse(args.opt, is_train=True)
#### distributed training settings
rank = args.local_rank
world_size = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if rank == -1:
opt['dist'] = False
print('Disabled distributed training.')
else:
opt['dist'] = True
if world_size > 1:
torch.cuda.set_device(rank) # 这里设定每一个进程使用的GPU是一定的
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
#### loading resume state if exists
if opt['path'].get('checkpoint', None):
# distributed resuming: all load into default GPU
device_id = torch.cuda.current_device()
checkpoint = torch.load(opt['path']['checkpoint'],
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = None
#### mkdir and loggers
if rank <= 0: # normal training (rank -1) OR distributed training (rank 0)
if checkpoint is None:
util.mkdir_and_rename(
opt['path']['experiments_root']) # rename experiment folder if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root'
and 'pretrain_model' not in key and 'resume' not in key))
# config loggers. Before it, the log will not work
util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
util.setup_logger('val', opt['path']['log'], 'val_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger_val = logging.getLogger('val') # validation logger
logger.info(option.dict2str(opt))
# tensorboard logger
if opt['use_tb_logger']:
version = float(torch.__version__[0:3])
if version >= 1.1: # PyTorch 1.1
from torch.utils.tensorboard import SummaryWriter
else:
logger.info(
'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))
from tensorboardX import SummaryWriter
tb_logger = SummaryWriter(log_dir='../../tb_logger/' + opt['name'])
else:
util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
# convert to NoneDict, which returns None for missing keys
opt = option.dict_to_nonedict(opt)
#### random seed
seed = opt['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
if rank <= 0:
logger.info('Random seed: {}'.format(seed))
util.set_random_seed(seed)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
#### create train and val dataloader
# dataset_ratio = 200 # enlarge the size of each epoch
mode = opt['train']['mode']
device = 'cuda' if opt['gpu_ids'] is not None else 'cpu'
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
if mode == 'epoch':
train_size = int(math.floor(len(train_set) / dataset_opt['batch_size']))
total_epochs = int(opt['train'][mode]['value'])
total_iters = train_size * total_epochs
if 'debug' not in opt['name']:
opt['train']['epoch']['val_freq'] *= train_size
elif mode == 'step':
train_size = int(math.floor(len(train_set) / dataset_opt['batch_size']))
total_iters = int(opt['train'][mode]['value'])
total_epochs = int(math.ceil(total_iters / train_size))
else:
raise NotImplementedError('mode [{:s}] is not recognized.'.format(mode))
if opt['dist']:
train_sampler = Sampler()
# train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)
# total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))
else:
train_sampler = None
train_loader = create_dataloader(train_set, dataset_opt, opt, train_sampler)
if rank <= 0:
logger.info('Number of train samples: {:,d}, iters: {:,d}'.format(
len(train_set), train_size))
logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
total_epochs, total_iters))
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt, opt, None)
if rank <= 0:
logger.info('Number of val samples in [{:s}]: {:d}'.format(
dataset_opt['name'], len(val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert train_loader is not None
assert val_loader is not None
#### create model
model = create_model(opt, checkpoint, None, rank)
model = model.to(device)
#### create optimizer and schedulers
optimizer_dict = configure_optimizers(opt, model)
scheduler_dict = configure_schedulers(opt, optimizer_dict)
optimizer = load_optimizer(optimizer_dict, 'optimizer', checkpoint)
aux_optimizer = load_optimizer(optimizer_dict, 'aux_optimizer', checkpoint)
lr_scheduler = load_scheduler(scheduler_dict, 'lr_scheduler', checkpoint)
aux_lr_scheduler = load_scheduler(scheduler_dict, 'aux_lr_scheduler', checkpoint)
#### resume training
if checkpoint:
if rank <= 0:
logger.info('Resuming training from epoch: {}, iter: {}.'.format(
checkpoint['epoch'], checkpoint['iter']))
# training state
start_epoch = checkpoint['epoch']
best_loss = checkpoint['loss']
current_step = start_epoch * math.ceil(len(train_loader.dataset) / opt['datasets']['train']['batch_size'])
checkpoint = None
else:
start_epoch = 0
best_loss = 1e10
current_step = 0
#### criterion
criterion = Criterion(opt)
# torch.cuda.empty_cache()
#### training
if rank <= 0:
logger.info('Model parameter numbers: {:d}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
loss_cap = opt['train']['loss_cap']
for epoch in range(start_epoch, total_epochs + 1):
if opt['dist']:
train_sampler.set_epoch(epoch)
if rank <= 0 and mode == 'epoch':
message = 'lr_main: {:e}'.format(optimizer.param_groups[0]['lr'])
message += ' | lr_aux: {:e}'.format(aux_optimizer.param_groups[0]['lr'])
logger.info(message)
for _, train_data in enumerate(train_loader):
# torch.cuda.empty_cache()
current_step += 1
if current_step > total_iters:
break
#### training
model.train()
# device = next(model.parameters()).device
gt, noise = train_data
gt = gt.to(device)
noise = noise.to(device)
optimizer.zero_grad()
aux_optimizer.zero_grad()
# forward
out_net = model(noise, gt)
out_train = criterion(out_net, gt)
# do optimization if and only if the loss is small (rec is somehow bounded with 0-1)
optimizer_flag = out_train["loss"].item() >= 0 and out_train["loss"].item() < loss_cap
if not optimizer_flag:
message = '[Warning]: network parameters are not optimized due to train loss = {:.4f}.'.format(out_train['loss'].item())
print(message)
# logger.info(message)
# optimizer
out_train["loss"].backward()
if opt['train']['clip_max_norm'] > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), opt['train']['clip_max_norm'])
if not optimizer_flag:
optimizer.zero_grad()
optimizer.step()
# aux_optimizer
aux_loss = model.aux_loss()
out_train['aux_loss'] = aux_loss
aux_loss.backward()
if not optimizer_flag:
aux_optimizer.zero_grad()
aux_optimizer.step()
#### update learning rate for step mode
if mode == 'step':
lr_scheduler.step()
aux_lr_scheduler.step()
#### log: weighted loss
if current_step % opt['logger']['print_freq'] == 0:
wanted_keys = ['loss', 'bpp_loss', 'aux_loss']
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> [weighted]'.format(epoch, current_step, optimizer.param_groups[0]['lr'])
for k, v in out_train.items():
# tensorboard logger
if opt['use_tb_logger']:
if rank <= 0:
mode_counter = epoch if mode == 'epoch' else current_step
tb_logger.add_scalar('[train]: {}'.format(k), v.item(), mode_counter)
# message
if k in wanted_keys or 'weighted' in k:
k = k.replace('weighted_', '')
message += ' | {:s}: {:.4f}'.format(k, v.item())
if rank <= 0:
logger.info(message)
# validation
if current_step % opt['train'][mode]['val_freq'] == 0 and rank <= 0:
model.eval()
# device = next(model.parameters()).device
log = {}
for k in out_train.keys():
log[k] = AverageMeter()
log['psnr'] = AverageMeter()
log['ms_ssim'] = AverageMeter()
with torch.no_grad():
mode_counter = epoch if mode == 'epoch' else current_step
this_val_dir = os.path.join(opt['path']['val_samples'], '{:d}'.format(mode_counter))
if not os.path.exists(this_val_dir):
os.makedirs(this_val_dir)
for i, val_data in enumerate(val_loader):
gt, noise = val_data
gt = gt.to(device)
noise = noise.to(device)
out_net = model(noise, gt)
out_val = criterion(out_net, gt)
out_val['aux_loss'] = model.aux_loss()
for k, v in out_val.items():
log[k].update(v.item())
# save
rec = torch2img(out_net['x_hat'])
gt = torch2img(gt)
noise = torch2img(noise)
p, m = compute_metrics(rec, gt)
log['psnr'].update(p)
log['ms_ssim'].update(m)
if i < 12:
rec.save(os.path.join(this_val_dir, '{:03d}_rec.png'.format(i)))
gt.save(os.path.join(this_val_dir, '{:03d}_gt.png'.format(i)))
noise.save(os.path.join(this_val_dir, '{:03d}_noise.png'.format(i)))
# val tensorboard
for k, v in log.items():
if opt['use_tb_logger']:
if rank <= 0:
mode_counter = epoch if mode == 'epoch' else current_step
tb_logger.add_scalar('[val]: {}'.format(k), v.avg, mode_counter)
# [val] weighted loss
wanted_keys = ['loss', 'bpp_loss', 'aux_loss']
message = '<epoch:{:3d}, iter:{:8,d}> [weighted]'.format(epoch, current_step)
for k, v in log.items():
if k in wanted_keys or 'weighted' in k:
k = k.replace('weighted_', '')
message += ' | {:s}: {:.4f}'.format(k, v.avg)
if rank <= 0:
logger_val.info(message)
# [val] raw loss
unwanted_keys = ['psnr', 'ms_ssim', 'rd_loss']
message = '<epoch:{:3d}, iter:{:8,d}> [raw loss]'.format(epoch, current_step)
for k, v in log.items():
if k in unwanted_keys or 'weighted' in k:
continue
message += ' | {:s}: {:.4f}'.format(k, v.avg)
if rank <= 0:
logger_val.info(message)
# [val] rate distortion
wanted_keys = ['rd_loss', 'bpp_loss', 'psnr', 'ms_ssim']
message = '<epoch:{:3d}, iter:{:8,d}> [rate-dis]'.format(epoch, current_step)
for k, v in log.items():
if k in wanted_keys:
k = k.replace('_loss', '')
message += ' | {:s}: {:.4f}'.format(k, v.avg)
if rank <= 0:
logger.info(message)
logger_val.info(message)
#### save checkpoints
loss = log['rd_loss'].avg
is_best = loss < best_loss
best_loss = min(loss, best_loss)
if rank <= 0 and is_best:
save_dict = {
"epoch": epoch,
"iter": current_step,
"state_dict": model.state_dict(),
"loss": loss,
"optimizer": optimizer_dict['optimizer'].state_dict(),
"aux_optimizer": optimizer_dict['aux_optimizer'].state_dict(),
"lr_scheduler": scheduler_dict['lr_scheduler'].state_dict(),
"aux_lr_scheduler": scheduler_dict['aux_lr_scheduler'].state_dict(),
}
mode_counter = epoch if mode == 'epoch' else current_step
save_path = os.path.join(opt['path']['checkpoints'], "checkpoint_best_loss.pth.tar")
torch.save(save_dict, save_path)
logger.info('best checkpoint saved.')
logger_val.info('best checkpoint saved.')
torch.cuda.empty_cache()
#### save checkpoints
if rank <= 0 and (epoch + 1) % opt['logger']['save_checkpoint_freq'] == 0:
save_dict = {
"epoch": epoch,
"iter": current_step,
"state_dict": model.state_dict(),
"loss": best_loss,
"optimizer": optimizer_dict['optimizer'].state_dict(),
"aux_optimizer": optimizer_dict['aux_optimizer'].state_dict(),
"lr_scheduler": scheduler_dict['lr_scheduler'].state_dict(),
"aux_lr_scheduler": scheduler_dict['aux_lr_scheduler'].state_dict(),
}
mode_counter = epoch if mode == 'epoch' else current_step
save_path = os.path.join(opt['path']['checkpoints'], "checkpoint_{:d}.pth.tar".format(mode_counter))
torch.save(save_dict, save_path)
#### update learning rate for epoch mode
if mode == 'epoch':
lr_scheduler.step()
aux_lr_scheduler.step()
if rank <= 0:
logger.info('Saving the final model.')
save_dict = {
"epoch": epoch,
"iter": current_step,
"state_dict": model.state_dict(),
"loss": best_loss,
"optimizer": optimizer_dict['optimizer'].state_dict(),
"aux_optimizer": optimizer_dict['aux_optimizer'].state_dict(),
"lr_scheduler": scheduler_dict['lr_scheduler'].state_dict(),
"aux_lr_scheduler": scheduler_dict['aux_lr_scheduler'].state_dict(),
}
mode_counter = epoch if mode == 'epoch' else current_step
save_path = os.path.join(opt['path']['checkpoints'], "checkpoint_latest.pth.tar")
torch.save(save_dict, save_path)
logger.info('End of training.')
if __name__ == '__main__':
main()
| 17,639 | 41 | 137 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/options/options.py | import os
import logging
import yaml
from utils.util import OrderedYaml
Loader, Dumper = OrderedYaml()
# type = normal, forward, backward
def parse(opt_path, is_train=True):
with open(opt_path, mode='r') as f:
opt = yaml.load(f, Loader=Loader)
opt['is_train'] = is_train
# export CUDA_VISIBLE_DEVICES
if 'gpu_ids' in opt.keys():
gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
# print('export CUDA_VISIBLE_DEVICES=' + gpu_list)
# datasets
for phase, dataset in opt['datasets'].items():
dataset['phase'] = phase
# path
opt['path']['root'] = os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir))
opt['path']['experiments_root'] = os.path.join(opt['path']['root'], 'experiments', opt['name'])
if is_train:
opt['path']['checkpoints'] = os.path.join(opt['path']['experiments_root'], 'checkpoints')
opt['path']['log'] = opt['path']['experiments_root']
opt['path']['val_samples'] = os.path.join(opt['path']['experiments_root'], 'val_samples')
# change some options for debug mode
if 'debug' in opt['name']:
mode = opt['train']['mode']
opt['use_tb_logger'] = False
opt['train'][mode]['val_freq'] = 8
opt['logger']['print_freq'] = 1
opt['logger']['save_checkpoint_freq'] = 1
else: # test
opt['path']['checkpoint_updated'] = os.path.join(opt['path']['experiments_root'], 'checkpoint_updated')
opt['path']['results_root'] = os.path.join(opt['path']['root'], 'results', opt['name'])
opt['path']['log'] = opt['path']['results_root']
return opt
def dict2str(opt, indent_l=1):
'''dict to string for logger'''
msg = ''
for k, v in opt.items():
if isinstance(v, dict):
msg += ' ' * (indent_l * 2) + k + ':[\n'
msg += dict2str(v, indent_l + 1)
msg += ' ' * (indent_l * 2) + ']\n'
else:
msg += ' ' * (indent_l * 2) + k + ': ' + str(v) + '\n'
return msg
class NoneDict(dict):
def __missing__(self, key):
return None
# convert to NoneDict, which return None for missing key.
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for key, sub_opt in opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
| 2,606 | 33.302632 | 129 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/options/__init__.py | 0 | 0 | 0 | py |
|
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/criterions/__init__.py | 0 | 0 | 0 | py |
|
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/criterions/criterion.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
import torch.nn.functional as F
class Criterion(nn.Module):
def __init__(self, opt):
super(Criterion, self).__init__()
self.opt = opt
# criterions
self.criterion_metric = opt['network']['criterions']['criterion_metric']
self.criterion_fea = opt['network']['criterions']['criterion_fea']
# lambdas
self.lambda_metric = opt['network']['lambdas']['lambda_metric']
self.lambda_fea = opt['network']['lambdas']['lambda_fea']
self.metric_loss = RateDistortionLoss(lmbda=self.lambda_metric, criterion=self.criterion_metric)
if self.criterion_fea:
self.fea_loss = FeaLoss(lmbda=self.lambda_fea, criterion=self.criterion_fea)
def forward(self, out_net, gt):
out = {'loss': 0, 'rd_loss': 0}
# bpp loss and metric loss
out_metric = self.metric_loss(out_net, gt)
out['loss'] += out_metric['bpp_loss']
out['rd_loss'] += out_metric['bpp_loss']
for k, v in out_metric.items():
out[k] = v
if 'weighted' in k:
out['loss'] += v
out['rd_loss'] += v
# fea loss
if self.criterion_fea:
if 'y_inter' in out_net.keys():
out_fea = self.fea_loss(out_net['y'], out_net['y_gt'], out_net['y_inter'], out_net['y_inter_gt'])
else:
out_fea = self.fea_loss(out_net['y'], out_net['y_gt'])
for k, v in out_fea.items():
out[k] = v
if 'weighted' in k:
out['loss'] += v
return out
# rate distortion loss
class RateDistortionLoss(nn.Module):
"""Custom rate distortion loss with a Lagrangian parameter."""
def __init__(self, lmbda=1e-2, criterion='mse'):
super().__init__()
self.lmbda = lmbda
self.criterion = criterion
if self.criterion == 'mse':
self.loss = nn.MSELoss()
elif self.criterion == 'ms-ssim':
from pytorch_msssim import ms_ssim
self.loss = ms_ssim
else:
NotImplementedError('RateDistortionLoss criterion [{:s}] is not recognized.'.format(criterion))
def forward(self, out_net, target):
N, _, H, W = target.size()
out = {}
num_pixels = N * H * W
out["bpp_loss"] = sum(
(torch.log(likelihoods).sum() / (-math.log(2) * num_pixels))
for likelihoods in out_net["likelihoods"].values()
)
if self.criterion == 'mse':
out["mse_loss"] = self.loss(out_net["x_hat"], target)
out["weighted_mse_loss"] = self.lmbda * 255 ** 2 * out["mse_loss"]
elif self.criterion == 'ms-ssim':
out["ms_ssim_loss"] = 1 - self.loss(out_net["x_hat"], target, data_range=1.0)
out["weighted_ms_ssim_loss"] = self.lmbda * out["ms_ssim_loss"]
return out
# fea loss
class FeaLoss(nn.Module):
def __init__(self, lmbda=1., criterion='l2'):
super(FeaLoss, self).__init__()
self.lmbda = lmbda
self.criterion = criterion
if self.criterion == 'l2':
self.loss = nn.MSELoss()
elif self.criterion == 'l1':
self.loss = nn.L1Loss()
else:
NotImplementedError('FeaLoss criterion [{:s}] is not recognized.'.format(criterion))
def forward(self, fea, fea_gt, fea_inter=None, fea_inter_gt=None):
loss = self.loss(fea, fea_gt)
if fea_inter is not None and fea_inter_gt is not None:
loss += self.loss(fea_inter, fea_inter_gt)
out = {
'fea_loss': loss,
'weighted_fea_loss': loss * self.lmbda,
}
return out
| 3,832 | 33.223214 | 113 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/scripts/sidd_block.py | import os
import scipy.io
from PIL import Image
val_noisy_blocks_path = '../../../data/SIDD/SIDD_Benchmark_zips/ValidationNoisyBlocksSrgb.mat'
val_noisy_blocks = scipy.io.loadmat(val_noisy_blocks_path)
val_noisy_blocks_numpy = val_noisy_blocks['ValidationNoisyBlocksSrgb']
val_gt_blocks_path = '../../../data/SIDD/SIDD_Benchmark_zips/ValidationGtBlocksSrgb.mat'
val_gt_blocks = scipy.io.loadmat(val_gt_blocks_path)
val_gt_blocks_numpy = val_gt_blocks['ValidationGtBlocksSrgb']
test_noisy_blocks_path = '../../../data/SIDD/SIDD_Benchmark_zips/BenchmarkNoisyBlocksSrgb.mat'
test_noisy_blocks = scipy.io.loadmat(test_noisy_blocks_path)
test_noisy_blocks_numpy = test_noisy_blocks['BenchmarkNoisyBlocksSrgb']
val_dir = '../../../data/SIDD/SIDD_Benchmark_Data_Blocks/SIDD_Validation'
test_dir = '../../../data/SIDD/SIDD_Benchmark_Data_Blocks/SIDD_Test'
num_of_scenes = 40
num_of_blocks = 32
scene_names = sorted(os.listdir('../../../data/SIDD/SIDD_Benchmark_Data'))
for idx in range(num_of_scenes):
scene_name = scene_names[idx]
scene = scene_name.split('_')[0]
val_scene_dir = os.path.join(val_dir, scene_name)
if not os.path.exists(val_scene_dir):
os.makedirs(val_scene_dir)
test_scene_dir = os.path.join(test_dir, scene_name)
if not os.path.exists(test_scene_dir):
os.makedirs(test_scene_dir)
for block in range(num_of_blocks):
val_noisy_img = Image.fromarray(val_noisy_blocks_numpy[idx, block])
val_noisy_path = os.path.join(val_scene_dir, '{:s}_NOISY_SRGB_{:02d}.png'.format(scene, block))
val_noisy_img.save(val_noisy_path)
val_gt_img = Image.fromarray(val_gt_blocks_numpy[idx, block])
val_gt_path = os.path.join(val_scene_dir, '{:s}_GT_SRGB_{:02d}.png'.format(scene, block))
val_gt_img.save(val_gt_path)
test_noisy_img = Image.fromarray(test_noisy_blocks_numpy[idx, block])
test_noisy_path = os.path.join(test_scene_dir, '{:s}_NOISY_SRGB_{:02d}.png'.format(scene, block))
test_noisy_img.save(test_noisy_path) | 2,043 | 44.422222 | 105 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/scripts/flicker_process.py | import os
import json
import imagesize
from shutil import copyfile
# directory
data_root_dir = '../../../data/flicker/flicker_2W_images'
train_root_dir = '../../../data/flicker/train'
val_root_dir = '../../../data/flicker/val'
if not os.path.exists(train_root_dir):
os.makedirs(train_root_dir)
if not os.path.exists(val_root_dir):
os.makedirs(val_root_dir)
# get filtered index dict
index_list = []
count = 0
for img in sorted(os.listdir(data_root_dir)):
img_path = os.path.join(data_root_dir, img)
width, height = imagesize.get(img_path)
if width < 256 or height < 256:
pass
else:
index_list.append(count)
count += 1
if count % 1000 == 0:
print(count, 'done!')
imgs = sorted(os.listdir(data_root_dir))
alls = [imgs[i] for i in index_list]
all_length = len(alls)
train_ratio = 0.99
train_length = round(len(alls) * train_ratio)
val_length = all_length - train_length
for img_name in alls[:train_length]:
copyfile(os.path.join(data_root_dir, img_name), os.path.join(train_root_dir, img_name))
for img_name in alls[-val_length:]:
copyfile(os.path.join(data_root_dir, img_name), os.path.join(val_root_dir, img_name))
| 1,184 | 27.214286 | 91 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/scripts/sidd_tile_annotations.py | from PIL import Image
import glob, os
import json
import numpy as np
import image_slicer
row_col_h_w = {
'S6': (6, 9, 500, 592),
'GP': (4, 8, 761, 506),
'N6': (6, 8, 520, 526),
'G4': (6, 8, 498, 664),
'IP': (6, 8, 504, 504),
}
checked = {
'S6': False,
'GP': False,
'N6': False,
'G4': False,
'IP': False,
}
annotation_dir = '../annotations'
if not os.path.exists(annotation_dir):
os.makedirs(annotation_dir)
# prepare for train dataset
data_root = '../../../data/SIDD/SIDD_Medium_Srgb/Data'
train_dict = {}
folders = sorted(os.listdir(data_root))
for folder in folders:
scene_instance, scene, camera, ISO, shutter_speed, temperature, brightness = folder.split('_')
image_paths = sorted(glob.glob(os.path.join(data_root, folder, '*.PNG')))
save_dir = os.path.join(data_root, folder).replace('SIDD_Medium_Srgb/Data', 'SIDD_Medium_Srgb_Tiles')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for image_path in image_paths:
row, col, h, w = row_col_h_w[camera]
# checking
if not checked[camera]:
img = Image.open(image_path)
C, R = img.size
assert R == row * h
assert C == col * w
checked[camera] = True
print('camera {:s} pass checking.'.format(camera))
print('camera {:s} checked = {}, with row = {:d}, col = {:d}'.format(camera, checked[camera], row, col))
# tiles = image_slicer.slice(image_path, col=col, row=row, save=False)
prefix = image_path.split('/')[-1].split('.')[0]
print('saving {:s} tiles to {:s}\n'.format(prefix, save_dir))
# image_slicer.save_tiles(tiles, directory=save_dir, prefix=prefix, format='png')
train_dict[scene_instance] = {}
gt_prefixs = sorted([image_path.split('/')[-1].split('.')[0] for image_path in image_paths if 'GT' in image_path])
noisy_prefixs = sorted([image_path.split('/')[-1].split('.')[0] for image_path in image_paths if 'NOISY' in image_path])
for gt_prefix, noisy_prefix in zip(gt_prefixs, noisy_prefixs):
entry = gt_prefix.split('_')[-1]
row, col, h, w = row_col_h_w[camera]
train_dict[scene_instance][entry] = {
'img_dir': save_dir,
'gt_prefix': gt_prefix,
'noisy_prefix': noisy_prefix,
'row': row,
'col': col,
'h': h,
'w': w,
'H': row * h,
'W': col * w,
}
train_path = os.path.join(annotation_dir, 'sidd_medium_srgb_train.json')
with open(train_path, 'w') as f:
json.dump(train_dict, f, sort_keys=True, indent=4)
# prepare for validation dataset
data_root = '../../../data/SIDD/SIDD_Benchmark_Data_Blocks/SIDD_Validation'
val_dict = {}
folders = sorted(os.listdir(data_root))
for folder in folders:
scene_instance, scene, camera, ISO, shutter_speed, temperature, brightness = folder.split('_')
val_dict[scene_instance] = {}
image_paths = sorted(glob.glob(os.path.join(data_root, folder, '*.png')))
gt_paths = sorted([image_path for image_path in image_paths if 'GT' in image_path])
noisy_paths = sorted([image_path for image_path in image_paths if 'NOISY' in image_path])
for entry, (gt_path, noisy_path) in enumerate(zip(gt_paths, noisy_paths)):
val_dict[scene_instance]['{:03d}'.format(entry)] = {
'gt_path': gt_path,
'noisy_path': noisy_path,
}
val_path = os.path.join(annotation_dir, 'sidd_medium_srgb_val.json')
with open(val_path, 'w') as f:
json.dump(val_dict, f, sort_keys=True, indent=4)
# prepare for test dataset
data_root = '../../../data/SIDD/SIDD_Benchmark_Data_Blocks/SIDD_Test'
test_dict = {}
folders = sorted(os.listdir(data_root))
for folder in folders:
scene_instance, scene, camera, ISO, shutter_speed, temperature, brightness = folder.split('_')
test_dict[scene_instance] = {}
image_paths = sorted(glob.glob(os.path.join(data_root, folder, '*.png')))
noisy_paths = sorted([image_path for image_path in image_paths if 'NOISY' in image_path])
for entry, noisy_path in enumerate(noisy_paths):
test_dict[scene_instance]['{:03d}'.format(entry)] = {
'noisy_path': noisy_path,
}
test_path = os.path.join(annotation_dir, 'sidd_medium_srgb_test.json')
with open(test_path, 'w') as f:
json.dump(test_dict, f, sort_keys=True, indent=4) | 4,415 | 35.495868 | 124 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/utils/util.py | import os
import sys
import time
import math
from datetime import datetime
import random
import logging
from collections import OrderedDict
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.utils.data
from torchvision import transforms
from torchvision.utils import make_grid
from shutil import get_terminal_size
from PIL import Image
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from compressai.zoo import models
from compressai.zoo.image import model_architectures as architectures
from typing import Tuple, Union
from pytorch_msssim import ms_ssim
import torch.nn.functional as F
# optimizers
def configure_optimizers(opt, net):
parameters = [
p for n, p in net.named_parameters() if not n.endswith(".quantiles")
]
aux_parameters = [
p for n, p in net.named_parameters() if n.endswith(".quantiles")
]
# Make sure we don't have an intersection of parameters
params_dict = dict(net.named_parameters())
inter_params = set(parameters) & set(aux_parameters)
union_params = set(parameters) | set(aux_parameters)
assert len(inter_params) == 0
assert len(union_params) - len(params_dict.keys()) == 0
mode = opt['train']['mode']
optimizer_dict = {}
optimizer_dict['optimizer'] = torch.optim.Adam(
(p for p in parameters if p.requires_grad),
lr=opt['train'][mode]['lr'],
)
optimizer_dict['aux_optimizer'] = torch.optim.Adam(
(p for p in aux_parameters if p.requires_grad),
lr=opt['train'][mode]['lr_aux'],
)
return optimizer_dict
def load_optimizer(optimizer_dict, name, checkpoint):
optimizer = optimizer_dict.get(name, None)
if optimizer is not None and checkpoint is not None:
optimizer.load_state_dict(checkpoint[name])
return optimizer
# schedulers
def configure_schedulers(opt, optimizer_dict):
mode = opt['train']['mode']
scheduler = opt['train'][mode]['lr_scheme']
warm_up_counts = opt['train'][mode]['warm_up_counts']
milestones = opt['train'][mode]['milestones']
gamma = opt['train'][mode]['gamma']
scheduler_dict = {}
if scheduler == 'MultiStepLR':
scheduler_dict['lr_scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
optimizer_dict['optimizer'],
milestones=milestones,
gamma=gamma
)
scheduler_dict['aux_lr_scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
optimizer_dict['aux_optimizer'],
milestones=[],
gamma=1.0
)
elif scheduler == 'LambdaLR':
warm_up_with_multistep_lr = lambda i: (i + 1) / warm_up_counts if i < warm_up_counts else gamma**len([m for m in milestones if m <= i])
scheduler_dict['lr_scheduler'] = torch.optim.lr_scheduler.LambdaLR(
optimizer_dict['optimizer'],
lr_lambda=warm_up_with_multistep_lr
)
warm_up_with_multistep_lr = lambda i: (i + 1) / warm_up_counts if i < warm_up_counts else 1.0
scheduler_dict['aux_lr_scheduler'] = torch.optim.lr_scheduler.LambdaLR(
optimizer_dict['aux_optimizer'],
lr_lambda=warm_up_with_multistep_lr
)
elif scheduler == 'ReduceLROnPlateau':
scheduler_dict['lr_scheduler'] = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_dict['optimizer'], "min")
scheduler_dict['aux_lr_scheduler'] = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_dict['aux_optimizer'], "min")
else:
raise NotImplementedError('scheduler [{:s}] is not recognized.'.format(scheduler))
return scheduler_dict
def load_scheduler(scheduler_dict, name, checkpoint):
lr_scheduler = scheduler_dict.get(name, None)
if lr_scheduler is not None and checkpoint is not None:
lr_scheduler.load_state_dict(checkpoint[name])
return lr_scheduler
# model
def create_model(opt, checkpoint, state_dict, rank):
logger = logging.getLogger('base')
model = opt['network']['model']
if checkpoint is not None:
m = architectures[model].from_state_dict(checkpoint['state_dict'], opt)
elif state_dict is not None:
m = architectures[model].from_state_dict(state_dict, opt)
else:
quality = int(opt['network']['quality'])
metric = opt['network']['criterions']['criterion_metric']
pretrained = opt['network']['pretrained']
m = models[model](quality=quality, metric=metric, pretrained=pretrained, opt=opt)
print_network(m, rank)
logger.info('Model [{:s}] is created.'.format(m.__class__.__name__))
return m
def print_network(net, rank):
logger = logging.getLogger('base')
if isinstance(net, nn.DataParallel) or isinstance(net, nn.parallel.DistributedDataParallel):
net = net.module
s = str(net)
n = sum(map(lambda x: x.numel(), net.parameters()))
if isinstance(net, nn.DataParallel) or isinstance(net, nn.parallel.DistributedDataParallel):
net_struc_str = '{} - {}'.format(net.__class__.__name__,
net.module.__class__.__name__)
else:
net_struc_str = '{}'.format(net.__class__.__name__)\
m = 'structure: {}, with parameters: {:,d}'.format(net_struc_str, n)
if rank <= 0:
logger.info(m)
logger.info(s)
# dataloader
def create_dataloader(dataset, dataset_opt, opt=None, sampler=None):
phase = dataset_opt['phase']
if phase == 'train':
if opt['dist']:
world_size = torch.distributed.get_world_size()
num_workers = dataset_opt['n_workers']
assert dataset_opt['batch_size'] % world_size == 0
batch_size = dataset_opt['batch_size'] // world_size
shuffle = False
else:
num_workers = dataset_opt['n_workers'] * len(opt['gpu_ids'])
batch_size = dataset_opt['batch_size']
shuffle = dataset_opt['use_shuffle']
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, sampler=sampler, drop_last=True,
pin_memory=False)
else:
return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0,
pin_memory=False)
# dataset
def create_dataset(dataset_opt):
mode = dataset_opt['name']
if mode == 'synthetic':
from compressai.datasets import SyntheticDataset as D
elif mode == 'synthetic-test':
from compressai.datasets import SyntheticTestDataset as D
elif mode == 'sidd':
from compressai.datasets import SiddDataset as D
else:
raise NotImplementedError('Dataset [{:s}] is not recognized.'.format(mode))
dataset = D(dataset_opt)
logger = logging.getLogger('base')
logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__,
dataset_opt['name']))
return dataset
# related to compression
def torch2img(x: torch.Tensor) -> Image.Image:
return transforms.ToPILImage()(x.clamp_(0, 1).squeeze()[0:3])
def psnr(a: torch.Tensor, b: torch.Tensor) -> float:
a = transforms.ToTensor()(torch2img(a))
b = transforms.ToTensor()(torch2img(b))
mse = F.mse_loss(a, b).item()
return -10 * math.log10(mse)
def compute_metrics(
a: Union[np.array, Image.Image],
b: Union[np.array, Image.Image],
max_val: float = 255.0,
) -> Tuple[float, float]:
"""Returns PSNR and MS-SSIM between images `a` and `b`. """
if isinstance(a, Image.Image):
a = np.asarray(a)
if isinstance(b, Image.Image):
b = np.asarray(b)
a = torch.from_numpy(a.copy()).float().unsqueeze(0)
if a.size(3) == 3:
a = a.permute(0, 3, 1, 2)
b = torch.from_numpy(b.copy()).float().unsqueeze(0)
if b.size(3) == 3:
b = b.permute(0, 3, 1, 2)
mse = torch.mean((a - b) ** 2).item()
p = 20 * np.log10(max_val) - 10 * np.log10(mse)
m = ms_ssim(a, b, data_range=max_val).item()
return p, m
class AverageMeter:
"""Compute running average."""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def OrderedYaml():
'''yaml orderedDict support'''
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
return Loader, Dumper
####################
# miscellaneous
####################
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):
'''set up logger'''
lg = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
if tofile:
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
class ProgressBar(object):
'''A progress bar which can print the progress
modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py
'''
def __init__(self, task_num=0, bar_width=50, start=True):
self.task_num = task_num
max_bar_width = self._get_max_bar_width()
self.bar_width = (bar_width if bar_width <= max_bar_width else max_bar_width)
self.completed = 0
if start:
self.start()
def _get_max_bar_width(self):
terminal_width, _ = get_terminal_size()
max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50)
if max_bar_width < 10:
print('terminal width is too small ({}), please consider widen the terminal for better '
'progressbar visualization'.format(terminal_width))
max_bar_width = 10
return max_bar_width
def start(self):
if self.task_num > 0:
sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:\n{}\n'.format(
' ' * self.bar_width, self.task_num, 'Start...'))
else:
sys.stdout.write('completed: 0, elapsed: 0s')
sys.stdout.flush()
self.start_time = time.time()
def update(self, msg='In progress...'):
self.completed += 1
elapsed = time.time() - self.start_time
fps = self.completed / elapsed
if self.task_num > 0:
percentage = self.completed / float(self.task_num)
eta = int(elapsed * (1 - percentage) / percentage + 0.5)
mark_width = int(self.bar_width * percentage)
bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width)
sys.stdout.write('\033[2F') # cursor up 2 lines
sys.stdout.write('\033[J') # clean the output (remove extra chars since last display)
sys.stdout.write('[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\n{}\n'.format(
bar_chars, self.completed, self.task_num, fps, int(elapsed + 0.5), eta, msg))
else:
sys.stdout.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format(
self.completed, int(elapsed + 0.5), fps))
sys.stdout.flush()
# evaluation
def cropping(x):
h, w = x.size(2), x.size(3)
p = 64 # maximum 6 strides of 2
new_h = h // p * p
new_w = w // p * p
cropping_left = (w - new_w) // 2
cropping_right = w - new_w - cropping_left
cropping_top = (h - new_h) // 2
cropping_bottom = h - new_h - cropping_top
x = F.pad(x, (-cropping_left, -cropping_right, -cropping_top, -cropping_bottom))
return x | 13,098 | 34.498645 | 143 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/utils/__init__.py | 0 | 0 | 0 | py |
|
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from compressai import datasets, entropy_models, layers, models, ops
try:
from .version import __version__
except ImportError:
pass
_entropy_coder = "ans"
_available_entropy_coders = [_entropy_coder]
try:
import range_coder
_available_entropy_coders.append("rangecoder")
except ImportError:
pass
def set_entropy_coder(entropy_coder):
"""
Specifies the default entropy coder used to encode the bit-streams.
Use :mod:`available_entropy_coders` to list the possible values.
Args:
entropy_coder (string): Name of the entropy coder
"""
global _entropy_coder
if entropy_coder not in _available_entropy_coders:
raise ValueError(
f'Invalid entropy coder "{entropy_coder}", choose from'
f'({", ".join(_available_entropy_coders)}).'
)
_entropy_coder = entropy_coder
def get_entropy_coder():
"""
Return the name of the default entropy coder used to encode the bit-streams.
"""
return _entropy_coder
def available_entropy_coders():
"""
Return the list of available entropy coders.
"""
return _available_entropy_coders
| 2,868 | 35.782051 | 80 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/MultiscaleDecomp.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from .waseda import Cheng2020Anchor
from compressai.layers import (
AttentionBlock,
ResidualBlock,
ResidualBlockUpsample,
ResidualBlockWithStride,
conv3x3,
subpel_conv3x3,
conv1x1
)
import warnings
class MultiscaleDecomp(Cheng2020Anchor):
def __init__(self, N=192, opt=None, **kwargs):
super().__init__(N=N, **kwargs)
self.g_a = None
self.g_a_block1 = nn.Sequential(
ResidualBlockWithStride(3, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
)
self.g_a_block2 = nn.Sequential(
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
conv3x3(N, N, stride=2),
)
self.denoise_module_1 = AttentionBlock(N)
self.denoise_module_2 = AttentionBlock(N)
def g_a_func(self, x, denoise=False):
x = self.g_a_block1(x)
if denoise:
x = self.denoise_module_1(x)
y_inter = x
x = self.g_a_block2(x)
if denoise:
x = self.denoise_module_2(x)
y = x
return y_inter, y
def forward(self, x, gt=None):
# g_a for noisy input
y_inter, y = self.g_a_func(x, denoise=True)
# g_a for clean input
if gt is not None:
y_inter_gt, y_gt = self.g_a_func(gt)
else:
y_inter_gt, y_gt = None, None
# h_a and h_s
z = self.h_a(y)
z_hat, z_likelihoods = self.entropy_bottleneck(z)
params = self.h_s(z_hat)
# g_s
y_hat = self.gaussian_conditional.quantize(
y, "noise" if self.training else "dequantize"
)
ctx_params = self.context_prediction(y_hat)
gaussian_params = self.entropy_parameters(
torch.cat((params, ctx_params), dim=1)
)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
_, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"y_inter": y_inter,
"y_inter_gt": y_inter_gt,
"y": y,
"y_gt": y_gt,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
@classmethod
def from_state_dict(cls, state_dict, opt=None):
"""Return a new model instance from `state_dict`."""
N = state_dict["h_a.0.weight"].size(0)
net = cls(N, opt)
net.load_state_dict(state_dict)
return net
def compress(self, x):
if next(self.parameters()).device != torch.device("cpu"):
warnings.warn(
"Inference on GPU is not recommended for the autoregressive "
"models (the entropy coder is run sequentially on CPU)."
)
_, y = self.g_a_func(x, denoise=True)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
params = self.h_s(z_hat)
s = 4 # scaling factor between z and y
kernel_size = 5 # context prediction kernel size
padding = (kernel_size - 1) // 2
y_height = z_hat.size(2) * s
y_width = z_hat.size(3) * s
y_hat = F.pad(y, (padding, padding, padding, padding))
y_strings = []
for i in range(y.size(0)):
string = self._compress_ar(
y_hat[i : i + 1],
params[i : i + 1],
y_height,
y_width,
kernel_size,
padding,
)
y_strings.append(string)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
| 3,860 | 28.930233 | 84 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/priors.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from compressai.ans import BufferedRansEncoder, RansDecoder
from compressai.entropy_models import EntropyBottleneck, GaussianConditional
from compressai.layers import GDN, MaskedConv2d
from .utils import conv, deconv, update_registered_buffers
__all__ = [
"CompressionModel",
"FactorizedPrior",
"ScaleHyperprior",
"MeanScaleHyperprior",
"JointAutoregressiveHierarchicalPriors",
]
class CompressionModel(nn.Module):
"""Base class for constructing an auto-encoder with at least one entropy
bottleneck module.
Args:
entropy_bottleneck_channels (int): Number of channels of the entropy
bottleneck
"""
def __init__(self, entropy_bottleneck_channels, init_weights=True, **kwargs):
super().__init__()
self.entropy_bottleneck = EntropyBottleneck(entropy_bottleneck_channels)
if init_weights:
self._initialize_weights()
def aux_loss(self):
"""Return the aggregated loss over the auxiliary entropy bottleneck
module(s).
"""
aux_loss = sum(
m.loss() for m in self.modules() if isinstance(m, EntropyBottleneck)
)
return aux_loss
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, *args):
raise NotImplementedError()
def update(self, force=False):
"""Updates the entropy bottleneck(s) CDF values.
Needs to be called once after training to be able to later perform the
evaluation with an actual entropy coder.
Args:
force (bool): overwrite previous values (default: False)
Returns:
updated (bool): True if one of the EntropyBottlenecks was updated.
"""
updated = False
for m in self.children():
if not isinstance(m, EntropyBottleneck):
continue
rv = m.update(force=force)
updated |= rv
return updated
def load_state_dict(self, state_dict):
# Dynamically update the entropy bottleneck buffers related to the CDFs
update_registered_buffers(
self.entropy_bottleneck,
"entropy_bottleneck",
["_quantized_cdf", "_offset", "_cdf_length"],
state_dict,
)
super().load_state_dict(state_dict)
class FactorizedPrior(CompressionModel):
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations
(ICLR), 2018.
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(entropy_bottleneck_channels=M, **kwargs)
self.g_a = nn.Sequential(
conv(3, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, M),
)
self.g_s = nn.Sequential(
deconv(M, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, 3),
)
self.N = N
self.M = M
@property
def downsampling_factor(self) -> int:
return 2 ** 4
def forward(self, x):
y = self.g_a(x)
y_hat, y_likelihoods = self.entropy_bottleneck(y)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {
"y": y_likelihoods,
},
}
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.weight"].size(0)
M = state_dict["g_a.6.weight"].size(0)
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def compress(self, x):
y = self.g_a(x)
y_strings = self.entropy_bottleneck.compress(y)
return {"strings": [y_strings], "shape": y.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 1
y_hat = self.entropy_bottleneck.decompress(strings[0], shape)
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
# From Balle's tensorflow compression examples
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64
def get_scale_table(min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS):
return torch.exp(torch.linspace(math.log(min), math.log(max), levels))
class ScaleHyperprior(CompressionModel):
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
(ICLR), 2018.
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(entropy_bottleneck_channels=N, **kwargs)
self.g_a = nn.Sequential(
conv(3, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, M),
)
self.g_s = nn.Sequential(
deconv(M, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, 3),
)
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.ReLU(inplace=True),
conv(N, N),
nn.ReLU(inplace=True),
conv(N, N),
)
self.h_s = nn.Sequential(
deconv(N, N),
nn.ReLU(inplace=True),
deconv(N, N),
nn.ReLU(inplace=True),
conv(N, M, stride=1, kernel_size=3),
nn.ReLU(inplace=True),
)
self.gaussian_conditional = GaussianConditional(None)
self.N = int(N)
self.M = int(M)
@property
def downsampling_factor(self) -> int:
return 2 ** (4 + 2)
def forward(self, x):
y = self.g_a(x)
z = self.h_a(torch.abs(y))
z_hat, z_likelihoods = self.entropy_bottleneck(z)
scales_hat = self.h_s(z_hat)
y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
def load_state_dict(self, state_dict):
update_registered_buffers(
self.gaussian_conditional,
"gaussian_conditional",
["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
state_dict,
)
super().load_state_dict(state_dict)
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.weight"].size(0)
M = state_dict["g_a.6.weight"].size(0)
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def update(self, scale_table=None, force=False):
if scale_table is None:
scale_table = get_scale_table()
updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)
updated |= super().update(force=force)
return updated
def compress(self, x):
y = self.g_a(x)
z = self.h_a(torch.abs(y))
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
scales_hat = self.h_s(z_hat)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_strings = self.gaussian_conditional.compress(y, indexes)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
scales_hat = self.h_s(z_hat)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_hat = self.gaussian_conditional.decompress(strings[0], indexes, z_hat.dtype)
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
class MeanScaleHyperprior(ScaleHyperprior):
r"""Scale Hyperprior with non zero-mean Gaussian conditionals from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(N, M, **kwargs)
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.LeakyReLU(inplace=True),
conv(N, N),
nn.LeakyReLU(inplace=True),
conv(N, N),
)
self.h_s = nn.Sequential(
deconv(N, M),
nn.LeakyReLU(inplace=True),
deconv(M, M * 3 // 2),
nn.LeakyReLU(inplace=True),
conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),
)
def forward(self, x):
y = self.g_a(x)
z = self.h_a(y)
z_hat, z_likelihoods = self.entropy_bottleneck(z)
gaussian_params = self.h_s(z_hat)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
def compress(self, x):
y = self.g_a(x)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
gaussian_params = self.h_s(z_hat)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_strings = self.gaussian_conditional.compress(y, indexes, means=means_hat)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
gaussian_params = self.h_s(z_hat)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_hat = self.gaussian_conditional.decompress(
strings[0], indexes, means=means_hat
)
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
class JointAutoregressiveHierarchicalPriors(MeanScaleHyperprior):
r"""Joint Autoregressive Hierarchical Priors model from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N=192, M=192, **kwargs):
super().__init__(N=N, M=M, **kwargs)
self.g_a = nn.Sequential(
conv(3, N, kernel_size=5, stride=2),
GDN(N),
conv(N, N, kernel_size=5, stride=2),
GDN(N),
conv(N, N, kernel_size=5, stride=2),
GDN(N),
conv(N, M, kernel_size=5, stride=2),
)
self.g_s = nn.Sequential(
deconv(M, N, kernel_size=5, stride=2),
GDN(N, inverse=True),
deconv(N, N, kernel_size=5, stride=2),
GDN(N, inverse=True),
deconv(N, N, kernel_size=5, stride=2),
GDN(N, inverse=True),
deconv(N, 3, kernel_size=5, stride=2),
)
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.LeakyReLU(inplace=True),
conv(N, N, stride=2, kernel_size=5),
nn.LeakyReLU(inplace=True),
conv(N, N, stride=2, kernel_size=5),
)
self.h_s = nn.Sequential(
deconv(N, M, stride=2, kernel_size=5),
nn.LeakyReLU(inplace=True),
deconv(M, M * 3 // 2, stride=2, kernel_size=5),
nn.LeakyReLU(inplace=True),
conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),
)
self.entropy_parameters = nn.Sequential(
nn.Conv2d(M * 12 // 3, M * 10 // 3, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(M * 10 // 3, M * 8 // 3, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(M * 8 // 3, M * 6 // 3, 1),
)
self.context_prediction = MaskedConv2d(
M, 2 * M, kernel_size=5, padding=2, stride=1
)
self.gaussian_conditional = GaussianConditional(None)
self.N = int(N)
self.M = int(M)
@property
def downsampling_factor(self) -> int:
return 2 ** (4 + 2)
def forward(self, x):
y = self.g_a(x)
z = self.h_a(y)
z_hat, z_likelihoods = self.entropy_bottleneck(z)
params = self.h_s(z_hat)
y_hat = self.gaussian_conditional.quantize(
y, "noise" if self.training else "dequantize"
)
ctx_params = self.context_prediction(y_hat)
gaussian_params = self.entropy_parameters(
torch.cat((params, ctx_params), dim=1)
)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
_, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.weight"].size(0)
M = state_dict["g_a.6.weight"].size(0)
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def compress(self, x):
if next(self.parameters()).device != torch.device("cpu"):
warnings.warn(
"Inference on GPU is not recommended for the autoregressive "
"models (the entropy coder is run sequentially on CPU)."
)
y = self.g_a(x)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
params = self.h_s(z_hat)
s = 4 # scaling factor between z and y
kernel_size = 5 # context prediction kernel size
padding = (kernel_size - 1) // 2
y_height = z_hat.size(2) * s
y_width = z_hat.size(3) * s
y_hat = F.pad(y, (padding, padding, padding, padding))
y_strings = []
for i in range(y.size(0)):
string = self._compress_ar(
y_hat[i : i + 1],
params[i : i + 1],
y_height,
y_width,
kernel_size,
padding,
)
y_strings.append(string)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def _compress_ar(self, y_hat, params, height, width, kernel_size, padding):
cdf = self.gaussian_conditional.quantized_cdf.tolist()
cdf_lengths = self.gaussian_conditional.cdf_length.tolist()
offsets = self.gaussian_conditional.offset.tolist()
encoder = BufferedRansEncoder()
symbols_list = []
indexes_list = []
# Warning, this is slow...
# TODO: profile the calls to the bindings...
masked_weight = self.context_prediction.weight * self.context_prediction.mask
for h in range(height):
for w in range(width):
y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]
ctx_p = F.conv2d(
y_crop,
masked_weight,
bias=self.context_prediction.bias,
)
# 1x1 conv for the entropy parameters prediction network, so
# we only keep the elements in the "center"
p = params[:, :, h : h + 1, w : w + 1]
gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))
gaussian_params = gaussian_params.squeeze(3).squeeze(2)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_crop = y_crop[:, :, padding, padding]
y_q = self.gaussian_conditional.quantize(y_crop, "symbols", means_hat)
y_hat[:, :, h + padding, w + padding] = y_q + means_hat
symbols_list.extend(y_q.squeeze().tolist())
indexes_list.extend(indexes.squeeze().tolist())
encoder.encode_with_indexes(
symbols_list, indexes_list, cdf, cdf_lengths, offsets
)
string = encoder.flush()
return string
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
if next(self.parameters()).device != torch.device("cpu"):
warnings.warn(
"Inference on GPU is not recommended for the autoregressive "
"models (the entropy coder is run sequentially on CPU)."
)
# FIXME: we don't respect the default entropy coder and directly call the
# range ANS decoder
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
params = self.h_s(z_hat)
s = 4 # scaling factor between z and y
kernel_size = 5 # context prediction kernel size
padding = (kernel_size - 1) // 2
y_height = z_hat.size(2) * s
y_width = z_hat.size(3) * s
# initialize y_hat to zeros, and pad it so we can directly work with
# sub-tensors of size (N, C, kernel size, kernel_size)
y_hat = torch.zeros(
(z_hat.size(0), self.M, y_height + 2 * padding, y_width + 2 * padding),
device=z_hat.device,
)
for i, y_string in enumerate(strings[0]):
self._decompress_ar(
y_string,
y_hat[i : i + 1],
params[i : i + 1],
y_height,
y_width,
kernel_size,
padding,
)
y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
def _decompress_ar(
self, y_string, y_hat, params, height, width, kernel_size, padding
):
cdf = self.gaussian_conditional.quantized_cdf.tolist()
cdf_lengths = self.gaussian_conditional.cdf_length.tolist()
offsets = self.gaussian_conditional.offset.tolist()
decoder = RansDecoder()
decoder.set_stream(y_string)
# Warning: this is slow due to the auto-regressive nature of the
# decoding... See more recent publication where they use an
# auto-regressive module on chunks of channels for faster decoding...
for h in range(height):
for w in range(width):
# only perform the 5x5 convolution on a cropped tensor
# centered in (h, w)
y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]
ctx_p = F.conv2d(
y_crop,
self.context_prediction.weight,
bias=self.context_prediction.bias,
)
# 1x1 conv for the entropy parameters prediction network, so
# we only keep the elements in the "center"
p = params[:, :, h : h + 1, w : w + 1]
gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
rv = decoder.decode_stream(
indexes.squeeze().tolist(), cdf, cdf_lengths, offsets
)
rv = torch.Tensor(rv).reshape(1, -1, 1, 1)
rv = self.gaussian_conditional.dequantize(rv, means_hat)
hp = h + padding
wp = w + padding
y_hat[:, :, hp : hp + 1, wp : wp + 1] = rv
| 23,319 | 34.226586 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/utils.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
def find_named_module(module, query):
"""Helper function to find a named module. Returns a `nn.Module` or `None`
Args:
module (nn.Module): the root module
query (str): the module name to find
Returns:
nn.Module or None
"""
return next((m for n, m in module.named_modules() if n == query), None)
def find_named_buffer(module, query):
"""Helper function to find a named buffer. Returns a `torch.Tensor` or `None`
Args:
module (nn.Module): the root module
query (str): the buffer name to find
Returns:
torch.Tensor or None
"""
return next((b for n, b in module.named_buffers() if n == query), None)
def _update_registered_buffer(
module,
buffer_name,
state_dict_key,
state_dict,
policy="resize_if_empty",
dtype=torch.int,
):
new_size = state_dict[state_dict_key].size()
registered_buf = find_named_buffer(module, buffer_name)
if policy in ("resize_if_empty", "resize"):
if registered_buf is None:
raise RuntimeError(f'buffer "{buffer_name}" was not registered')
if policy == "resize" or registered_buf.numel() == 0:
registered_buf.resize_(new_size)
elif policy == "register":
if registered_buf is not None:
raise RuntimeError(f'buffer "{buffer_name}" was already registered')
module.register_buffer(buffer_name, torch.empty(new_size, dtype=dtype).fill_(0))
else:
raise ValueError(f'Invalid policy "{policy}"')
def update_registered_buffers(
module,
module_name,
buffer_names,
state_dict,
policy="resize_if_empty",
dtype=torch.int,
):
"""Update the registered buffers in a module according to the tensors sized
in a state_dict.
(There's no way in torch to directly load a buffer with a dynamic size)
Args:
module (nn.Module): the module
module_name (str): module name in the state dict
buffer_names (list(str)): list of the buffer names to resize in the module
state_dict (dict): the state dict
policy (str): Update policy, choose from
('resize_if_empty', 'resize', 'register')
dtype (dtype): Type of buffer to be registered (when policy is 'register')
"""
valid_buffer_names = [n for n, _ in module.named_buffers()]
for buffer_name in buffer_names:
if buffer_name not in valid_buffer_names:
raise ValueError(f'Invalid buffer name "{buffer_name}"')
for buffer_name in buffer_names:
_update_registered_buffer(
module,
buffer_name,
f"{module_name}.{buffer_name}",
state_dict,
policy,
dtype,
)
def conv(in_channels, out_channels, kernel_size=5, stride=2):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
)
def deconv(in_channels, out_channels, kernel_size=5, stride=2):
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=stride - 1,
padding=kernel_size // 2,
)
| 4,989 | 33.178082 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/waseda.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch.nn as nn
from compressai.layers import (
AttentionBlock,
ResidualBlock,
ResidualBlockUpsample,
ResidualBlockWithStride,
conv3x3,
subpel_conv3x3,
)
from .priors import JointAutoregressiveHierarchicalPriors
class Cheng2020Anchor(JointAutoregressiveHierarchicalPriors):
"""Anchor model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Uses residual blocks with small convolutions (3x3 and 1x1), and sub-pixel
convolutions for up-sampling.
Args:
N (int): Number of channels
"""
def __init__(self, N=192, **kwargs):
super().__init__(N=N, M=N, **kwargs)
self.g_a = nn.Sequential(
ResidualBlockWithStride(3, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
conv3x3(N, N, stride=2),
)
self.h_a = nn.Sequential(
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
conv3x3(N, N, stride=2),
nn.LeakyReLU(inplace=True),
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
conv3x3(N, N, stride=2),
)
self.h_s = nn.Sequential(
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
subpel_conv3x3(N, N, 2),
nn.LeakyReLU(inplace=True),
conv3x3(N, N * 3 // 2),
nn.LeakyReLU(inplace=True),
subpel_conv3x3(N * 3 // 2, N * 3 // 2, 2),
nn.LeakyReLU(inplace=True),
conv3x3(N * 3 // 2, N * 2),
)
self.g_s = nn.Sequential(
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
subpel_conv3x3(N, 3, 2),
)
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.conv1.weight"].size(0)
net = cls(N)
net.load_state_dict(state_dict)
return net
class Cheng2020Attention(Cheng2020Anchor):
"""Self-attention model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Uses self-attention, residual blocks with small convolutions (3x3 and 1x1),
and sub-pixel convolutions for up-sampling.
Args:
N (int): Number of channels
"""
def __init__(self, N=192, **kwargs):
super().__init__(N=N, **kwargs)
self.g_a = nn.Sequential(
ResidualBlockWithStride(3, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
AttentionBlock(N),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
conv3x3(N, N, stride=2),
AttentionBlock(N),
)
self.g_s = nn.Sequential(
AttentionBlock(N),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
AttentionBlock(N),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
subpel_conv3x3(N, 3, 2),
)
| 5,591 | 35.077419 | 79 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .priors import *
from .waseda import *
from .MultiscaleDecomp import MultiscaleDecomp | 1,808 | 55.53125 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/zoo/image.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from torch.hub import load_state_dict_from_url
import torch
from compressai.models import (
Cheng2020Anchor,
Cheng2020Attention,
FactorizedPrior,
JointAutoregressiveHierarchicalPriors,
MeanScaleHyperprior,
ScaleHyperprior,
MultiscaleDecomp,
)
from .pretrained import load_pretrained
__all__ = [
"bmshj2018_factorized",
"bmshj2018_hyperprior",
"mbt2018",
"mbt2018_mean",
"cheng2020_anchor",
"cheng2020_attn",
"multiscale_decomp",
]
model_architectures = {
"bmshj2018-factorized": FactorizedPrior,
"bmshj2018-hyperprior": ScaleHyperprior,
"mbt2018-mean": MeanScaleHyperprior,
"mbt2018": JointAutoregressiveHierarchicalPriors,
"cheng2020-anchor": Cheng2020Anchor,
"cheng2020-attn": Cheng2020Attention,
"multiscale-decomp": MultiscaleDecomp,
}
root_url = "https://compressai.s3.amazonaws.com/models/v1"
model_urls = {
"bmshj2018-factorized": {
"mse": {
1: f"{root_url}/bmshj2018-factorized-prior-1-446d5c7f.pth.tar",
2: f"{root_url}/bmshj2018-factorized-prior-2-87279a02.pth.tar",
3: f"{root_url}/bmshj2018-factorized-prior-3-5c6f152b.pth.tar",
4: f"{root_url}/bmshj2018-factorized-prior-4-1ed4405a.pth.tar",
5: f"{root_url}/bmshj2018-factorized-prior-5-866ba797.pth.tar",
6: f"{root_url}/bmshj2018-factorized-prior-6-9b02ea3a.pth.tar",
7: f"{root_url}/bmshj2018-factorized-prior-7-6dfd6734.pth.tar",
8: f"{root_url}/bmshj2018-factorized-prior-8-5232faa3.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/bmshj2018-factorized-ms-ssim-1-9781d705.pth.tar",
2: f"{root_url}/bmshj2018-factorized-ms-ssim-2-4a584386.pth.tar",
3: f"{root_url}/bmshj2018-factorized-ms-ssim-3-5352f123.pth.tar",
4: f"{root_url}/bmshj2018-factorized-ms-ssim-4-4f91b847.pth.tar",
5: f"{root_url}/bmshj2018-factorized-ms-ssim-5-b3a88897.pth.tar",
6: f"{root_url}/bmshj2018-factorized-ms-ssim-6-ee028763.pth.tar",
7: f"{root_url}/bmshj2018-factorized-ms-ssim-7-8c265a29.pth.tar",
8: f"{root_url}/bmshj2018-factorized-ms-ssim-8-8811bd14.pth.tar",
},
},
"bmshj2018-hyperprior": {
"mse": {
1: f"{root_url}/bmshj2018-hyperprior-1-7eb97409.pth.tar",
2: f"{root_url}/bmshj2018-hyperprior-2-93677231.pth.tar",
3: f"{root_url}/bmshj2018-hyperprior-3-6d87be32.pth.tar",
4: f"{root_url}/bmshj2018-hyperprior-4-de1b779c.pth.tar",
5: f"{root_url}/bmshj2018-hyperprior-5-f8b614e1.pth.tar",
6: f"{root_url}/bmshj2018-hyperprior-6-1ab9c41e.pth.tar",
7: f"{root_url}/bmshj2018-hyperprior-7-3804dcbd.pth.tar",
8: f"{root_url}/bmshj2018-hyperprior-8-a583f0cf.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/bmshj2018-hyperprior-ms-ssim-1-5cf249be.pth.tar",
2: f"{root_url}/bmshj2018-hyperprior-ms-ssim-2-1ff60d1f.pth.tar",
3: f"{root_url}/bmshj2018-hyperprior-ms-ssim-3-92dd7878.pth.tar",
4: f"{root_url}/bmshj2018-hyperprior-ms-ssim-4-4377354e.pth.tar",
5: f"{root_url}/bmshj2018-hyperprior-ms-ssim-5-c34afc8d.pth.tar",
6: f"{root_url}/bmshj2018-hyperprior-ms-ssim-6-3a6d8229.pth.tar",
7: f"{root_url}/bmshj2018-hyperprior-ms-ssim-7-8747d3bc.pth.tar",
8: f"{root_url}/bmshj2018-hyperprior-ms-ssim-8-cc15b5f3.pth.tar",
},
},
"mbt2018-mean": {
"mse": {
1: f"{root_url}/mbt2018-mean-1-e522738d.pth.tar",
2: f"{root_url}/mbt2018-mean-2-e54a039d.pth.tar",
3: f"{root_url}/mbt2018-mean-3-723404a8.pth.tar",
4: f"{root_url}/mbt2018-mean-4-6dba02a3.pth.tar",
5: f"{root_url}/mbt2018-mean-5-d504e8eb.pth.tar",
6: f"{root_url}/mbt2018-mean-6-a19628ab.pth.tar",
7: f"{root_url}/mbt2018-mean-7-d5d441d1.pth.tar",
8: f"{root_url}/mbt2018-mean-8-8089ae3e.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/mbt2018-mean-ms-ssim-1-5bf9c0b6.pth.tar",
2: f"{root_url}/mbt2018-mean-ms-ssim-2-e2a1bf3f.pth.tar",
3: f"{root_url}/mbt2018-mean-ms-ssim-3-640ce819.pth.tar",
4: f"{root_url}/mbt2018-mean-ms-ssim-4-12626c13.pth.tar",
5: f"{root_url}/mbt2018-mean-ms-ssim-5-1be7f059.pth.tar",
6: f"{root_url}/mbt2018-mean-ms-ssim-6-b83bf379.pth.tar",
7: f"{root_url}/mbt2018-mean-ms-ssim-7-ddf9644c.pth.tar",
8: f"{root_url}/mbt2018-mean-ms-ssim-8-0cc7b94f.pth.tar",
},
},
"mbt2018": {
"mse": {
1: f"{root_url}/mbt2018-1-3f36cd77.pth.tar",
2: f"{root_url}/mbt2018-2-43b70cdd.pth.tar",
3: f"{root_url}/mbt2018-3-22901978.pth.tar",
4: f"{root_url}/mbt2018-4-456e2af9.pth.tar",
5: f"{root_url}/mbt2018-5-b4a046dd.pth.tar",
6: f"{root_url}/mbt2018-6-7052e5ea.pth.tar",
7: f"{root_url}/mbt2018-7-8ba2bf82.pth.tar",
8: f"{root_url}/mbt2018-8-dd0097aa.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/mbt2018-ms-ssim-1-2878436b.pth.tar",
2: f"{root_url}/mbt2018-ms-ssim-2-c41cb208.pth.tar",
3: f"{root_url}/mbt2018-ms-ssim-3-d0dd64e8.pth.tar",
4: f"{root_url}/mbt2018-ms-ssim-4-a120e037.pth.tar",
5: f"{root_url}/mbt2018-ms-ssim-5-9b30e3b7.pth.tar",
6: f"{root_url}/mbt2018-ms-ssim-6-f8b3626f.pth.tar",
7: f"{root_url}/mbt2018-ms-ssim-7-16e6ff50.pth.tar",
8: f"{root_url}/mbt2018-ms-ssim-8-0cb49d43.pth.tar",
},
},
"cheng2020-anchor": {
"mse": {
1: f"{root_url}/cheng2020-anchor-1-dad2ebff.pth.tar",
2: f"{root_url}/cheng2020-anchor-2-a29008eb.pth.tar",
3: f"{root_url}/cheng2020-anchor-3-e49be189.pth.tar",
4: f"{root_url}/cheng2020-anchor-4-98b0b468.pth.tar",
5: f"{root_url}/cheng2020-anchor-5-23852949.pth.tar",
6: f"{root_url}/cheng2020-anchor-6-4c052b1a.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/cheng2020_anchor-ms-ssim-1-20f521db.pth.tar",
2: f"{root_url}/cheng2020_anchor-ms-ssim-2-c7ff5812.pth.tar",
3: f"{root_url}/cheng2020_anchor-ms-ssim-3-c23e22d5.pth.tar",
4: f"{root_url}/cheng2020_anchor-ms-ssim-4-0e658304.pth.tar",
5: f"{root_url}/cheng2020_anchor-ms-ssim-5-c0a95e77.pth.tar",
6: f"{root_url}/cheng2020_anchor-ms-ssim-6-f2dc1913.pth.tar",
},
},
"cheng2020-attn": {
"mse": {
1: f"{root_url}/cheng2020_attn-mse-1-465f2b64.pth.tar",
2: f"{root_url}/cheng2020_attn-mse-2-e0805385.pth.tar",
3: f"{root_url}/cheng2020_attn-mse-3-2d07bbdf.pth.tar",
4: f"{root_url}/cheng2020_attn-mse-4-f7b0ccf2.pth.tar",
5: f"{root_url}/cheng2020_attn-mse-5-26c8920e.pth.tar",
6: f"{root_url}/cheng2020_attn-mse-6-730501f2.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/cheng2020_attn-ms-ssim-1-c5381d91.pth.tar",
2: f"{root_url}/cheng2020_attn-ms-ssim-2-5dad201d.pth.tar",
3: f"{root_url}/cheng2020_attn-ms-ssim-3-5c9be841.pth.tar",
4: f"{root_url}/cheng2020_attn-ms-ssim-4-8b2f647e.pth.tar",
5: f"{root_url}/cheng2020_attn-ms-ssim-5-5ca1f34c.pth.tar",
6: f"{root_url}/cheng2020_attn-ms-ssim-6-216423ec.pth.tar",
},
},
}
cfgs = {
"bmshj2018-factorized": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (128, 192),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"bmshj2018-hyperprior": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (128, 192),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"mbt2018-mean": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (192, 320),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"mbt2018": {
1: (192, 192),
2: (192, 192),
3: (192, 192),
4: (192, 192),
5: (192, 320),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"cheng2020-anchor": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
"cheng2020-attn": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
"multiscale-decomp": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
}
def _load_model(
architecture, metric, quality, pretrained=False, progress=True, **kwargs
):
if architecture not in model_architectures:
raise ValueError(f'Invalid architecture name "{architecture}"')
if quality not in cfgs[architecture]:
raise ValueError(f'Invalid quality value "{quality}"')
if pretrained:
if (
architecture not in model_urls
or metric not in model_urls[architecture]
or quality not in model_urls[architecture][metric]
):
raise RuntimeError("Pre-trained model not yet available")
url = model_urls[architecture][metric][quality]
state_dict = load_state_dict_from_url(url, progress=progress)
state_dict = load_pretrained(state_dict)
model = model_architectures[architecture].from_state_dict(state_dict)
return model
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
return model
def bmshj2018_factorized(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations
(ICLR), 2018.
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"bmshj2018-factorized", metric, quality, pretrained, progress, **kwargs
)
def bmshj2018_hyperprior(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
(ICLR), 2018.
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"bmshj2018-hyperprior", metric, quality, pretrained, progress, **kwargs
)
def mbt2018_mean(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Scale Hyperprior with non zero-mean Gaussian conditionals from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model("mbt2018-mean", metric, quality, pretrained, progress, **kwargs)
def mbt2018(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Joint Autoregressive Hierarchical Priors model from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model("mbt2018", metric, quality, pretrained, progress, **kwargs)
def cheng2020_anchor(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Anchor model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
quality (int): Quality levels (1: lowest, highest: 6)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model(
"cheng2020-anchor", metric, quality, pretrained, progress, **kwargs
)
def cheng2020_attn(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Self-attention model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
quality (int): Quality levels (1: lowest, highest: 6)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model(
"cheng2020-attn", metric, quality, pretrained, progress, **kwargs
)
def _load_model_for_multi_scale(
architecture, metric, quality, pretrained=False, progress=True, **kwargs
):
if architecture not in model_architectures:
raise ValueError(f'Invalid architecture name "{architecture}"')
if quality not in cfgs[architecture]:
raise ValueError(f'Invalid quality value "{quality}"')
if pretrained:
url = model_urls["cheng2020-anchor"][metric][quality]
state_dict = load_state_dict_from_url(url, progress=progress)
state_dict = load_pretrained(state_dict)
new_statedict = {}
for key in state_dict.keys():
if "g_a" not in key:
new_statedict[key] = state_dict[key]
continue
for num in range(0, 3):
if "g_a."+ str(num) in key:
new_statedict[key.replace("g_a", "g_a_block1")] = state_dict[key]
break
for num in range(3, 7):
if "g_a."+ str(num) in key:
new_statedict[key.replace("g_a", "g_a_block2").replace(str(num), str(num-3))] = state_dict[key]
break
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
my_model_dict = model.state_dict()
my_model_dict.update(new_statedict)
model.load_state_dict(my_model_dict)
# free memory
state_dict = None
new_statedict = None
my_model_dict = None
return model
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
return model
def multiscale_decomp(quality, metric="mse", pretrained=False, progress=True, **kwargs):
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model_for_multi_scale(
"multiscale-decomp", metric, quality, pretrained, progress, **kwargs
)
| 19,465 | 39.469854 | 115 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/zoo/pretrained.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Dict
from torch import Tensor
def rename_key(key: str) -> str:
"""Rename state_dict key."""
# Deal with modules trained with DataParallel
if key.startswith("module."):
key = key[7:]
# ResidualBlockWithStride: 'downsample' -> 'skip'
if ".downsample." in key:
return key.replace("downsample", "skip")
# EntropyBottleneck: nn.ParameterList to nn.Parameters
if key.startswith("entropy_bottleneck."):
if key.startswith("entropy_bottleneck._biases."):
return f"entropy_bottleneck._bias{key[-1]}"
if key.startswith("entropy_bottleneck._matrices."):
return f"entropy_bottleneck._matrix{key[-1]}"
if key.startswith("entropy_bottleneck._factors."):
return f"entropy_bottleneck._factor{key[-1]}"
return key
def load_pretrained(state_dict: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""Convert state_dict keys."""
state_dict = {rename_key(k): v for k, v in state_dict.items()}
return state_dict
| 2,750 | 41.323077 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/zoo/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .image import (
bmshj2018_factorized,
bmshj2018_hyperprior,
cheng2020_anchor,
cheng2020_attn,
mbt2018,
mbt2018_mean,
multiscale_decomp,
)
from .pretrained import load_pretrained as load_state_dict
models = {
"bmshj2018-factorized": bmshj2018_factorized,
"bmshj2018-hyperprior": bmshj2018_hyperprior,
"mbt2018-mean": mbt2018_mean,
"mbt2018": mbt2018,
"cheng2020-anchor": cheng2020_anchor,
"cheng2020-attn": cheng2020_attn,
"multiscale-decomp": multiscale_decomp,
}
| 2,244 | 43.9 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/SiddDataset.py | import random
import os, glob
import json
import torch
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
class SiddDataset(Dataset):
def __init__(self, dataset_opt):
self.root = dataset_opt['root']
self.transform = transforms.ToTensor()
self.patch_size = dataset_opt['patch_size']
self.phase = dataset_opt['phase']
if self.phase not in ['train', 'val', 'sidd']:
raise NotImplementedError('wrong phase argument!')
alpha = 60 if self.phase == 'train' else 1
self.samples = []
with open(self.root, 'r') as f:
data = json.load(f)
for i, scene in enumerate(sorted(data.keys())):
all_scene_items = sorted(data[scene].items())
for entry, entry_dict in all_scene_items:
for _ in range(alpha):
self.samples.append(entry_dict)
def __getitem__(self, index):
sample_dict = self.samples[index]
if self.phase == 'train':
img_dir = sample_dict['img_dir']
gt_prefix = sample_dict['gt_prefix']
noisy_prefix = sample_dict['noisy_prefix']
row, col = sample_dict['row'], sample_dict['col']
h, w = sample_dict['h'], sample_dict['w']
H, W = sample_dict['H'], sample_dict['W']
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
r1 = rnd_h // h
r2 = (rnd_h+self.patch_size-1) // h
c1 = rnd_w // w
c2 = (rnd_w+self.patch_size-1) // w
rs = list(set({r1, r2}))
cs = list(set({c1, c2}))
rnd_h = rnd_h % h
rnd_w = rnd_w % w
# assert r1 < row and r2 < row and c1 < col and c2 < col, 'row={:d}, r1={:d}, r2={:d}; col={:d}, c1={:d}, c2={:d}'.format(row, r1, r2, col, c1, c2)
gt = []
noisy = []
for r in rs:
gt_r = []
noisy_r = []
for c in cs:
gt_path = os.path.join(img_dir, '{:s}_{:02d}_{:02d}.png'.format(gt_prefix, r+1, c+1))
gt_rc = Image.open(gt_path).convert("RGB")
gt_rc = self.transform(gt_rc)
gt_r.append(gt_rc)
noisy_path = os.path.join(img_dir, '{:s}_{:02d}_{:02d}.png'.format(noisy_prefix, r+1, c+1))
noisy_rc = Image.open(noisy_path).convert("RGB")
noisy_rc = self.transform(noisy_rc)
noisy_r.append(noisy_rc)
gt_r = torch.cat(gt_r, dim=2)
gt.append(gt_r)
noisy_r = torch.cat(noisy_r, dim=2)
noisy.append(noisy_r)
gt = torch.cat(gt, dim=1)[:, rnd_h:rnd_h+self.patch_size, rnd_w:rnd_w+self.patch_size]
noisy = torch.cat(noisy, dim=1)[:, rnd_h:rnd_h+self.patch_size, rnd_w:rnd_w+self.patch_size]
return gt, noisy
elif self.phase == 'val':
gt = Image.open(sample_dict['gt_path']).convert("RGB")
noisy = Image.open(sample_dict['noisy_path']).convert("RGB")
gt = self.transform(gt)
noisy = self.transform(noisy)
return gt, noisy
else:
noisy = Image.open(sample_dict['noisy_path']).convert("RGB")
noisy = self.transform(noisy)
return noisy
def __len__(self):
return len(self.samples)
| 3,608 | 38.659341 | 159 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/SyntheticDataset.py | import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
from pathlib import Path
from .utils import sRGBGamma, UndosRGBGamma
from torchvision import transforms
class SyntheticDataset(Dataset):
def __init__(self, dataset_opt):
splitdir = Path(dataset_opt['root']) / dataset_opt['phase']
if not splitdir.is_dir():
raise RuntimeError(f'Invalid directory "{splitdir}"')
self.samples = sorted([f for f in splitdir.iterdir() if f.is_file()])
self.phase = dataset_opt['phase']
if self.phase == 'train':
self.transform = transforms.Compose(
[transforms.RandomCrop(dataset_opt['patch_size']), transforms.ToTensor()]
)
elif self.phase == 'val':
self.transform = transforms.Compose(
[transforms.CenterCrop(dataset_opt['patch_size']), transforms.ToTensor()]
)
self.sigma_reads = [0.0068354, 0.01572141, 0.03615925, 0.08316627]
self.sigma_shots = [0.05200081**2, 0.07886314**2, 0.11960187**2, 0.18138522**2]
self.choices = len(self.sigma_reads)
else:
raise NotImplementedError('wrong phase argument!')
def __getitem__(self, index):
gt = Image.open(self.samples[index]).convert("RGB")
gt = self.transform(gt)
# degamma
noisy_degamma = UndosRGBGamma(gt)
# sample read and shot noise
if self.phase == 'train':
sigma_read = torch.from_numpy(
np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1)))
).type_as(noisy_degamma)
sigma_shot = torch.from_numpy(
np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1)))
).type_as(noisy_degamma)
else:
sigma_read = torch.from_numpy(
np.array([[[self.sigma_reads[index % self.choices]]]])
).type_as(noisy_degamma)
sigma_shot = torch.from_numpy(
np.array([[[self.sigma_shots[index % self.choices]]]])
).type_as(noisy_degamma)
sigma_read_com = sigma_read.expand_as(noisy_degamma)
sigma_shot_com = sigma_shot.expand_as(noisy_degamma)
# apply formular in the paper
if self.phase == 'train':
generator = None
else:
generator = torch.Generator()
generator.manual_seed(index)
noisy_degamma = torch.normal(noisy_degamma,
torch.sqrt(sigma_read_com ** 2 + noisy_degamma * sigma_shot_com),
generator=generator
).type_as(noisy_degamma)
# gamma
noisy = sRGBGamma(noisy_degamma)
# clamping
noisy = torch.clamp(noisy, 0.0, 1.0)
return gt, noisy
def __len__(self):
return len(self.samples)
| 2,867 | 34.85 | 91 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/SyntheticTestDataset.py | import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
from pathlib import Path
from .utils import sRGBGamma, UndosRGBGamma
from torchvision import transforms
class SyntheticTestDataset(Dataset):
def __init__(self, dataset_opt):
root = Path(dataset_opt['root'])
if not root.is_dir():
raise RuntimeError(f'Invalid directory "{root}"')
self.samples = sorted([f for f in root.iterdir() if f.is_file()])
self.phase = dataset_opt['phase']
self.transform = transforms.ToTensor()
noise_level = dataset_opt['level']
sigma_reads = [0.0068354, 0.01572141, 0.03615925, 0.08316627]
sigma_shots = [0.05200081**2, 0.07886314**2, 0.11960187**2, 0.18138522**2]
self.sigma_read = sigma_reads[noise_level-1]
self.sigma_shot = sigma_shots[noise_level-1]
def __getitem__(self, index):
gt = Image.open(self.samples[index]).convert("RGB")
gt = self.transform(gt)
# degamma
noisy_degamma = UndosRGBGamma(gt)
# read and shot noise
sigma_read = torch.from_numpy(
np.array([[[self.sigma_read]]])
).type_as(noisy_degamma)
sigma_shot = torch.from_numpy(
np.array([[[self.sigma_shot]]])
).type_as(noisy_degamma)
sigma_read_com = sigma_read.expand_as(noisy_degamma)
sigma_shot_com = sigma_shot.expand_as(noisy_degamma)
# apply formular in the paper
generator = torch.Generator()
generator.manual_seed(0)
noisy_degamma = torch.normal(noisy_degamma,
torch.sqrt(sigma_read_com ** 2 + noisy_degamma * sigma_shot_com),
generator=generator
).type_as(noisy_degamma)
# gamma
noisy = sRGBGamma(noisy_degamma)
# clamping
noisy = torch.clamp(noisy, 0.0, 1.0)
return gt, noisy
def __len__(self):
return len(self.samples)
| 1,969 | 31.295082 | 82 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/utils.py | import random
import numpy as np
import torch
def sRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
res[mask] = (1 + a) * torch.pow(tensor[mask] + 0.001, 1.0 / gamma) - a
res[~mask] = tensor[~mask] * mult
return res
def UndosRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
res[~mask] = tensor[~mask] / mult
res[mask] = torch.pow(tensor[mask] + a, gamma) / (1 + a)
return res
def set_seeds(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed=seed) | 717 | 22.933333 | 74 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .SiddDataset import SiddDataset
from .SyntheticDataset import SyntheticDataset
from .SyntheticTestDataset import SyntheticTestDataset
__all__ = ["SiddDataset", "SyntheticDataset", "SyntheticTestDataset"]
| 1,928 | 54.114286 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/layers/gdn.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from compressai.ops.parametrizers import NonNegativeParametrizer
__all__ = ["GDN", "GDN1"]
class GDN(nn.Module):
r"""Generalized Divisive Normalization layer.
Introduced in `"Density Modeling of Images Using a Generalized Normalization
Transformation" <https://arxiv.org/abs/1511.06281>`_,
by Balle Johannes, Valero Laparra, and Eero P. Simoncelli, (2016).
.. math::
y[i] = \frac{x[i]}{\sqrt{\beta[i] + \sum_j(\gamma[j, i] * x[j]^2)}}
"""
def __init__(
self,
in_channels: int,
inverse: bool = False,
beta_min: float = 1e-6,
gamma_init: float = 0.1,
):
super().__init__()
beta_min = float(beta_min)
gamma_init = float(gamma_init)
self.inverse = bool(inverse)
self.beta_reparam = NonNegativeParametrizer(minimum=beta_min)
beta = torch.ones(in_channels)
beta = self.beta_reparam.init(beta)
self.beta = nn.Parameter(beta)
self.gamma_reparam = NonNegativeParametrizer()
gamma = gamma_init * torch.eye(in_channels)
gamma = self.gamma_reparam.init(gamma)
self.gamma = nn.Parameter(gamma)
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(x ** 2, gamma, beta)
if self.inverse:
norm = torch.sqrt(norm)
else:
norm = torch.rsqrt(norm)
out = x * norm
return out
class GDN1(GDN):
r"""Simplified GDN layer.
Introduced in `"Computationally Efficient Neural Image Compression"
<http://arxiv.org/abs/1912.08771>`_, by Johnston Nick, Elad Eban, Ariel
Gordon, and Johannes Ballé, (2019).
.. math::
y[i] = \frac{x[i]}{\beta[i] + \sum_j(\gamma[j, i] * |x[j]|}
"""
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(torch.abs(x), gamma, beta)
if not self.inverse:
norm = 1.0 / norm
out = x * norm
return out
| 4,085 | 32.491803 | 80 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/layers/layers.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Any
import torch
import torch.nn as nn
from torch import Tensor
from .gdn import GDN
__all__ = [
"AttentionBlock",
"MaskedConv2d",
"ResidualBlock",
"ResidualBlockUpsample",
"ResidualBlockWithStride",
"conv3x3",
"subpel_conv3x3",
"conv1x1",
]
class MaskedConv2d(nn.Conv2d):
r"""Masked 2D convolution implementation, mask future "unseen" pixels.
Useful for building auto-regressive network components.
Introduced in `"Conditional Image Generation with PixelCNN Decoders"
<https://arxiv.org/abs/1606.05328>`_.
Inherits the same arguments as a `nn.Conv2d`. Use `mask_type='A'` for the
first layer (which also masks the "current pixel"), `mask_type='B'` for the
following layers.
"""
def __init__(self, *args: Any, mask_type: str = "A", **kwargs: Any):
super().__init__(*args, **kwargs)
if mask_type not in ("A", "B"):
raise ValueError(f'Invalid "mask_type" value "{mask_type}"')
self.register_buffer("mask", torch.ones_like(self.weight.data))
_, _, h, w = self.mask.size()
self.mask[:, :, h // 2, w // 2 + (mask_type == "B") :] = 0
self.mask[:, :, h // 2 + 1 :] = 0
def forward(self, x: Tensor) -> Tensor:
# TODO(begaintj): weight assigment is not supported by torchscript
self.weight.data *= self.mask
return super().forward(x)
def conv3x3(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
def subpel_conv3x3(in_ch: int, out_ch: int, r: int = 1) -> nn.Sequential:
"""3x3 sub-pixel convolution for up-sampling."""
return nn.Sequential(
nn.Conv2d(in_ch, out_ch * r ** 2, kernel_size=3, padding=1), nn.PixelShuffle(r)
)
def conv1x1(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:
"""1x1 convolution."""
return nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride)
class ResidualBlockWithStride(nn.Module):
"""Residual block with a stride on the first convolution.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
stride (int): stride value (default: 2)
"""
def __init__(self, in_ch: int, out_ch: int, stride: int = 2):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch, stride=stride)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
self.gdn = GDN(out_ch)
if stride != 1 or in_ch != out_ch:
self.skip = conv1x1(in_ch, out_ch, stride=stride)
else:
self.skip = None
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.gdn(out)
if self.skip is not None:
identity = self.skip(x)
out += identity
return out
class ResidualBlockUpsample(nn.Module):
"""Residual block with sub-pixel upsampling on the last convolution.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
upsample (int): upsampling factor (default: 2)
"""
def __init__(self, in_ch: int, out_ch: int, upsample: int = 2):
super().__init__()
self.subpel_conv = subpel_conv3x3(in_ch, out_ch, upsample)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv = conv3x3(out_ch, out_ch)
self.igdn = GDN(out_ch, inverse=True)
self.upsample = subpel_conv3x3(in_ch, out_ch, upsample)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.subpel_conv(x)
out = self.leaky_relu(out)
out = self.conv(out)
out = self.igdn(out)
identity = self.upsample(x)
out += identity
return out
class ResidualBlock(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch: int, out_ch: int, stride: int = 1):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch, stride)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
if in_ch != out_ch or stride > 1:
self.skip = conv1x1(in_ch, out_ch, stride)
else:
self.skip = None
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
if self.skip is not None:
identity = self.skip(x)
out = out + identity
return out
class AttentionBlock(nn.Module):
"""Self attention block.
Simplified variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
N (int): Number of channels)
"""
def __init__(self, N: int):
super().__init__()
class ResidualUnit(nn.Module):
"""Simple residual unit."""
def __init__(self):
super().__init__()
self.conv = nn.Sequential(
conv1x1(N, N // 2),
nn.ReLU(inplace=True),
conv3x3(N // 2, N // 2),
nn.ReLU(inplace=True),
conv1x1(N // 2, N),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv(x)
out += identity
out = self.relu(out)
return out
self.conv_a = nn.Sequential(ResidualUnit(), ResidualUnit(), ResidualUnit())
self.conv_b = nn.Sequential(
ResidualUnit(),
ResidualUnit(),
ResidualUnit(),
conv1x1(N, N),
)
def forward(self, x: Tensor) -> Tensor:
identity = x
a = self.conv_a(x)
b = self.conv_b(x)
out = a * torch.sigmoid(b)
out += identity
return out
| 8,243 | 32.376518 | 87 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/layers/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .gdn import *
from .layers import *
| 1,759 | 54 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1,717 | 58.241379 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/find_close/__main__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Find the closest codec quality parameter to reach a given metric (bpp, ms-ssim,
or psnr).
Example usages:
* :code:`python -m compressai.utils.find_close webp ~/picture.png 0.5 --metric bpp`
* :code:`python -m compressai.utils.find_close jpeg ~/picture.png 35 --metric psnr --save`
"""
import argparse
import sys
from typing import Dict, List, Tuple
from PIL import Image
from compressai.utils.bench.codecs import AV1, BPG, HM, JPEG, JPEG2000, VTM, Codec, WebP
def get_codec_q_bounds(codec: Codec) -> Tuple[bool, int, int]:
rev = False # higher Q -> better quality or reverse
if isinstance(codec, (JPEG, JPEG2000, WebP)):
lower = -1
upper = 101
elif isinstance(codec, (BPG, HM)):
lower = -1
upper = 51
rev = True
elif isinstance(codec, (AV1, VTM)):
lower = -1
upper = 64
rev = True
else:
raise ValueError(f"Invalid codec {codec}")
return rev, lower, upper
def find_closest(
codec: Codec, img: str, target: float, metric: str = "psnr"
) -> Tuple[int, Dict[str, float], Image.Image]:
rev, lower, upper = get_codec_q_bounds(codec)
best_rv = {}
best_rec = None
while upper > lower + 1:
mid = (upper + lower) // 2
rv, rec = codec.run(img, mid, return_rec=True)
is_best = best_rv == {} or abs(rv[metric] - target) < abs(
best_rv[metric] - target
)
if is_best:
best_rv = rv
best_rec = rec
if rv[metric] > target:
if not rev:
upper = mid
else:
lower = mid
elif rv[metric] < target:
if not rev:
lower = mid
else:
upper = mid
else:
break
sys.stderr.write(
f"\rtarget {metric}: {target:.4f} | value: {rv[metric]:.4f} | q: {mid}"
)
sys.stderr.flush()
sys.stderr.write("\n")
sys.stderr.flush()
return mid, best_rv, best_rec
codecs = [JPEG, WebP, JPEG2000, BPG, VTM, HM, AV1]
def setup_args():
description = "Collect codec metrics and performances."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="codec", help="Select codec")
subparsers.required = True
parser.add_argument("image", type=str, help="image filepath")
parser.add_argument("target", type=float, help="target value to match")
parser.add_argument(
"-m", "--metric", type=str, choices=["bpp", "psnr", "ms-ssim"], default="bpp"
)
parser.add_argument(
"--save", action="store_true", help="Save reconstructed image to disk"
)
return parser, subparsers
def main(argv: List[str]):
parser, subparsers = setup_args()
for c in codecs:
cparser = subparsers.add_parser(c.__name__.lower(), help=f"{c.__name__}")
c.setup_args(cparser)
args = parser.parse_args(argv)
codec_cls = next(c for c in codecs if c.__name__.lower() == args.codec)
codec = codec_cls(args)
quality, metrics, rec = find_closest(codec, args.image, args.target, args.metric)
for k, v in metrics.items():
print(f"{k}: {v:.4f}")
if args.save:
rec.save(f"output_{codec_cls.__name__.lower()}_q{quality}.png")
if __name__ == "__main__":
main(sys.argv[1:])
| 5,072 | 34.475524 | 94 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/find_close/__init__.py | 0 | 0 | 0 | py |
|
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/plot/__main__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Simple plotting utility to display Rate-Distortion curves (RD) comparison
between codecs.
"""
import argparse
import json
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
_backends = ["matplotlib", "plotly"]
def parse_json_file(filepath, metric):
filepath = Path(filepath)
name = filepath.name.split(".")[0]
with filepath.open("r") as f:
try:
data = json.load(f)
except json.decoder.JSONDecodeError as err:
print(f'Error reading file "{filepath}"')
raise err
if "results" not in data or "bpp" not in data["results"]:
raise ValueError(f'Invalid file "{filepath}"')
if metric not in data["results"]:
raise ValueError(
f'Error: metric "{metric}" not available.'
f' Available metrics: {", ".join(data["results"].keys())}'
)
if metric == "ms-ssim":
# Convert to db
values = np.array(data["results"][metric])
data["results"][metric] = -10 * np.log10(1 - values)
return {
"name": data.get("name", name),
"xs": data["results"]["bpp"],
"ys": data["results"][metric],
}
def matplotlib_plt(
scatters, title, ylabel, output_file, limits=None, show=False, figsize=None, fontsize=10, loc="lower right"
):
linestyle = "-"
hybrid_matches = ["HM", "VTM", "JPEG", "JPEG2000", "WebP", "BPG", "AV1"]
if figsize is None:
figsize = (9, 6)
fig, ax = plt.subplots(figsize=figsize)
for sc in scatters:
if any(x in sc["name"] for x in hybrid_matches):
linestyle = "--"
if sc["name"].lower() == 'deamnet':
plt.axhline(
y=sc["ys"][0],
linestyle="--",
linewidth=1.2,
color="black",
)
else:
ax.plot(
sc["xs"],
sc["ys"],
marker=".",
linestyle=linestyle,
linewidth=1.2,
label=sc["name"],
)
ax.set_xlabel("Bit-rate [bpp]", fontsize=fontsize)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.grid()
if limits is not None:
ax.axis(limits)
ax.legend(loc=loc.replace('_', ' '), fontsize=12)
if title:
ax.title.set_text(title.replace('_', ' '))
ax.title.set_fontsize(fontsize)
if show:
plt.show()
if output_file:
fig.savefig(output_file, dpi=300, bbox_inches='tight')
def plotly_plt(
scatters, title, ylabel, output_file, limits=None, show=False, figsize=None, fontsize=10, loc="lower right"
):
del figsize
try:
import plotly.graph_objs as go
import plotly.io as pio
except ImportError:
raise SystemExit(
"Unable to import plotly, install with: pip install pandas plotly"
)
fig = go.Figure()
for sc in scatters:
fig.add_traces(go.Scatter(x=sc["xs"], y=sc["ys"], name=sc["name"]))
fig.update_xaxes(title_text="Bit-rate [bpp]")
fig.update_yaxes(title_text=ylabel)
if limits is not None:
fig.update_xaxes(range=[limits[0], limits[1]])
fig.update_yaxes(range=[limits[2], limits[3]])
filename = output_file or "plot.html"
pio.write_html(fig, file=filename, auto_open=True)
def setup_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"-f",
"--results-file",
metavar="",
default="",
type=str,
nargs="*",
required=True,
)
parser.add_argument(
"-m",
"--metric",
metavar="",
type=str,
default="psnr",
help="Metric (default: %(default)s)",
)
parser.add_argument("-t", "--title", metavar="", type=str, help="Plot title")
parser.add_argument("-o", "--output", metavar="", type=str, help="Output file name")
parser.add_argument(
"--fontsize",
metavar="",
type=int,
default=10,
help="Font size for title and labels, default: %(default)s",
)
parser.add_argument(
"--figsize",
metavar="",
type=int,
nargs=2,
default=(9, 6),
help="Figure relative size (width, height), default: %(default)s",
)
parser.add_argument(
"--axes",
metavar="",
type=float,
nargs=4,
default=None,
help="Axes limit (xmin, xmax, ymin, ymax), default: autorange",
)
parser.add_argument(
"--backend",
type=str,
metavar="",
default=_backends[0],
choices=_backends,
help="Change plot backend (default: %(default)s)",
)
parser.add_argument("--show", action="store_true", help="Open plot figure")
parser.add_argument(
"--loc",
metavar="",
type=str,
default="lower right",
help="Location for the legend, default: %(default)s",
)
return parser
def main(argv):
args = setup_args().parse_args(argv)
scatters = []
for f in args.results_file:
rv = parse_json_file(f, args.metric)
scatters.append(rv)
ylabel = f"{args.metric.upper()} [dB]"
func_map = {
"matplotlib": matplotlib_plt,
"plotly": plotly_plt,
}
func_map[args.backend](
scatters,
args.title,
ylabel,
args.output,
limits=args.axes,
figsize=args.figsize,
show=args.show,
fontsize=args.fontsize,
loc=args.loc
)
if __name__ == "__main__":
main(sys.argv[1:])
| 7,320 | 29.504167 | 111 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/plot/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1,717 | 58.241379 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/bench/__main__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Collect performance metrics of published traditional or end-to-end image
codecs.
"""
import argparse
import json
import multiprocessing as mp
import os
import sys
from collections import defaultdict
from itertools import starmap
from typing import List
from .codecs import AV1, BPG, HM, JPEG, JPEG2000, TFCI, VTM, Codec, WebP
# from torchvision.datasets.folder
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
codecs = [JPEG, WebP, JPEG2000, BPG, TFCI, VTM, HM, AV1]
# we need the quality index (not value) to compute the stats later
def func(codec, i, *args):
rv = codec.run(*args)
return i, rv
def collect(
codec: Codec,
dataset: str,
qualities: List[int],
metrics: List[str],
num_jobs: int = 1,
):
if not os.path.isdir(dataset):
raise OSError(f"No such directory: {dataset}")
filepaths = [
os.path.join(dirpath, f)
for dirpath, _, filenames in os.walk(dataset)
for f in filenames
if os.path.splitext(f)[-1].lower() in IMG_EXTENSIONS
]
pool = mp.Pool(num_jobs) if num_jobs > 1 else None
if len(filepaths) == 0:
print("No images found in the dataset directory")
sys.exit(1)
args = [
(codec, i, f, q, metrics) for i, q in enumerate(qualities) for f in filepaths
]
if pool:
rv = pool.starmap(func, args)
else:
rv = list(starmap(func, args))
results = [defaultdict(float) for _ in range(len(qualities))]
for i, metrics in rv:
for k, v in metrics.items():
results[i][k] += v
# aggregate results for all images
for i, _ in enumerate(results):
for k, v in results[i].items():
results[i][k] = v / len(filepaths)
# list of dict -> dict of list
out = defaultdict(list)
for r in results:
for k, v in r.items():
out[k].append(v)
return out
def setup_args():
description = "Collect codec metrics."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="codec", help="Select codec")
subparsers.required = True
return parser, subparsers
def setup_common_args(parser):
parser.add_argument("dataset", type=str)
parser.add_argument(
"-j",
"--num-jobs",
type=int,
metavar="N",
default=1,
help="number of parallel jobs (default: %(default)s)",
)
parser.add_argument(
"-q",
"--quality",
dest="qualities",
metavar="Q",
default=[75],
nargs="+",
type=int,
help="quality parameter (default: %(default)s)",
)
parser.add_argument(
"--metrics",
dest="metrics",
default=["psnr", "ms-ssim"],
nargs="+",
help="do not return PSNR and MS-SSIM metrics (use for very small images)",
)
def main(argv):
parser, subparsers = setup_args()
for c in codecs:
cparser = subparsers.add_parser(c.__name__.lower(), help=f"{c.__name__}")
setup_common_args(cparser)
c.setup_args(cparser)
args = parser.parse_args(argv)
codec_cls = next(c for c in codecs if c.__name__.lower() == args.codec)
codec = codec_cls(args)
results = collect(
codec,
args.dataset,
args.qualities,
args.metrics,
args.num_jobs,
)
output = {
"name": codec.name,
"description": codec.description,
"results": results,
}
print(json.dumps(output, indent=2))
if __name__ == "__main__":
main(sys.argv[1:])
| 5,369 | 28.184783 | 85 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/bench/codecs.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import io
import os
import platform
import subprocess
import sys
import time
from tempfile import mkstemp
from typing import Dict, List, Optional, Union
import numpy as np
import PIL
import PIL.Image as Image
import torch
from pytorch_msssim import ms_ssim
from compressai.transforms.functional import rgb2ycbcr, ycbcr2rgb
# from torchvision.datasets.folder
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
def filesize(filepath: str) -> int:
"""Return file size in bits of `filepath`."""
if not os.path.isfile(filepath):
raise ValueError(f'Invalid file "{filepath}".')
return os.stat(filepath).st_size
def read_image(filepath: str, mode: str = "RGB") -> np.array:
"""Return PIL image in the specified `mode` format."""
if not os.path.isfile(filepath):
raise ValueError(f'Invalid file "{filepath}".')
return Image.open(filepath).convert(mode)
def _compute_psnr(a, b, max_val: float = 255.0) -> float:
mse = torch.mean((a - b) ** 2).item()
psnr = 20 * np.log10(max_val) - 10 * np.log10(mse)
return psnr
def _compute_ms_ssim(a, b, max_val: float = 255.0) -> float:
return ms_ssim(a, b, data_range=max_val).item()
_metric_functions = {
"psnr": _compute_psnr,
"ms-ssim": _compute_ms_ssim,
}
def compute_metrics(
a: Union[np.array, Image.Image],
b: Union[np.array, Image.Image],
metrics: Optional[List[str]] = None,
max_val: float = 255.0,
) -> Dict[str, float]:
"""Returns PSNR and MS-SSIM between images `a` and `b`."""
if metrics is None:
metrics = ["psnr"]
def _convert(x):
if isinstance(x, Image.Image):
x = np.asarray(x)
x = torch.from_numpy(x.copy()).float().unsqueeze(0)
if x.size(3) == 3:
# (1, H, W, 3) -> (1, 3, H, W)
x = x.permute(0, 3, 1, 2)
return x
a = _convert(a)
b = _convert(b)
out = {}
for metric_name in metrics:
out[metric_name] = _metric_functions[metric_name](a, b, max_val)
return out
def run_command(cmd, ignore_returncodes=None):
cmd = [str(c) for c in cmd]
try:
rv = subprocess.check_output(cmd)
return rv.decode("ascii")
except subprocess.CalledProcessError as err:
if ignore_returncodes is not None and err.returncode in ignore_returncodes:
return err.output
print(err.output.decode("utf-8"))
sys.exit(1)
def _get_ffmpeg_version():
rv = run_command(["ffmpeg", "-version"])
return rv.split()[2]
def _get_bpg_version(encoder_path):
rv = run_command([encoder_path, "-h"], ignore_returncodes=[1])
return rv.split()[4]
class Codec(abc.ABC):
"""Abstract base class"""
_description = None
def __init__(self, args):
self._set_args(args)
def _set_args(self, args):
return args
@classmethod
def setup_args(cls, parser):
pass
@property
def description(self):
return self._description
@property
@abc.abstractmethod
def name(self):
raise NotImplementedError()
def _load_img(self, img):
return read_image(os.path.abspath(img))
@abc.abstractmethod
def _run_impl(self, img, quality, *args, **kwargs):
raise NotImplementedError()
def run(
self,
img,
quality: int,
metrics: Optional[List[str]] = None,
return_rec: bool = False,
):
img = self._load_img(img)
info, rec = self._run_impl(img, quality)
info.update(compute_metrics(rec, img, metrics))
if return_rec:
return info, rec
return info
class PillowCodec(Codec):
"""Abstract codec based on Pillow bindings."""
fmt = None
@property
def name(self):
raise NotImplementedError()
def _run_impl(self, img, quality):
start = time.time()
tmp = io.BytesIO()
img.save(tmp, format=self.fmt, quality=int(quality))
enc_time = time.time() - start
tmp.seek(0)
size = tmp.getbuffer().nbytes
start = time.time()
rec = Image.open(tmp)
rec.load()
dec_time = time.time() - start
bpp_val = float(size) * 8 / (img.size[0] * img.size[1])
out = {
"bpp": bpp_val,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
return out, rec
class JPEG(PillowCodec):
"""Use libjpeg linked in Pillow"""
fmt = "jpeg"
_description = f"JPEG. Pillow version {PIL.__version__}"
@property
def name(self):
return "JPEG"
class WebP(PillowCodec):
"""Use libwebp linked in Pillow"""
fmt = "webp"
_description = f"WebP. Pillow version {PIL.__version__}"
@property
def name(self):
return "WebP"
class BinaryCodec(Codec):
"""Call an external binary."""
fmt = None
@property
def name(self):
raise NotImplementedError()
def _run_impl(self, img, quality):
fd0, png_filepath = mkstemp(suffix=".png")
fd1, out_filepath = mkstemp(suffix=self.fmt)
# Encode
start = time.time()
run_command(self._get_encode_cmd(img, quality, out_filepath))
enc_time = time.time() - start
size = filesize(out_filepath)
# Decode
start = time.time()
run_command(self._get_decode_cmd(out_filepath, png_filepath))
dec_time = time.time() - start
# Read image
rec = read_image(png_filepath)
os.close(fd0)
os.remove(png_filepath)
os.close(fd1)
os.remove(out_filepath)
bpp_val = float(size) * 8 / (img.size[0] * img.size[1])
out = {
"bpp": bpp_val,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
return out, rec
def _get_encode_cmd(self, img, quality, out_filepath):
raise NotImplementedError()
def _get_decode_cmd(self, out_filepath, rec_filepath):
raise NotImplementedError()
class JPEG2000(BinaryCodec):
"""Use ffmpeg version.
(Not built-in support in default Pillow builds)
"""
fmt = ".jp2"
@property
def name(self):
return "JPEG2000"
@property
def description(self):
return f"JPEG2000. ffmpeg version {_get_ffmpeg_version()}"
def _get_encode_cmd(self, img, quality, out_filepath):
cmd = [
"ffmpeg",
"-loglevel",
"panic",
"-y",
"-i",
img,
"-vcodec",
"jpeg2000",
"-pix_fmt",
"yuv444p",
"-c:v",
"libopenjpeg",
"-compression_level",
quality,
out_filepath,
]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = ["ffmpeg", "-loglevel", "panic", "-y", "-i", out_filepath, rec_filepath]
return cmd
class BPG(BinaryCodec):
"""BPG from Fabrice Bellard."""
fmt = ".bpg"
@property
def name(self):
return (
f"BPG {self.bitdepth}b {self.subsampling_mode} {self.encoder} "
f"{self.color_mode}"
)
@property
def description(self):
return f"BPG. BPG version {_get_bpg_version(self.encoder_path)}"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-m",
choices=["420", "444"],
default="444",
help="subsampling mode (default: %(default)s)",
)
parser.add_argument(
"-b",
choices=["8", "10"],
default="8",
help="bitdepth (default: %(default)s)",
)
parser.add_argument(
"-c",
choices=["rgb", "ycbcr"],
default="ycbcr",
help="colorspace (default: %(default)s)",
)
parser.add_argument(
"-e",
choices=["jctvc", "x265"],
default="x265",
help="HEVC implementation (default: %(default)s)",
)
parser.add_argument("--encoder-path", default="bpgenc", help="BPG encoder path")
parser.add_argument("--decoder-path", default="bpgdec", help="BPG decoder path")
def _set_args(self, args):
args = super()._set_args(args)
self.color_mode = args.c
self.encoder = args.e
self.subsampling_mode = args.m
self.bitdepth = args.b
self.encoder_path = args.encoder_path
self.decoder_path = args.decoder_path
return args
def _get_encode_cmd(self, img, quality, out_filepath):
if not 0 <= quality <= 51:
raise ValueError(f"Invalid quality value: {quality} (0,51)")
cmd = [
self.encoder_path,
"-o",
out_filepath,
"-q",
str(quality),
"-f",
self.subsampling_mode,
"-e",
self.encoder,
"-c",
self.color_mode,
"-b",
self.bitdepth,
img,
]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = [self.decoder_path, "-o", rec_filepath, out_filepath]
return cmd
class TFCI(BinaryCodec):
"""Tensorflow image compression format from tensorflow/compression"""
fmt = ".tfci"
_models = [
"bmshj2018-factorized-mse",
"bmshj2018-hyperprior-mse",
"mbt2018-mean-mse",
]
@property
def description(self):
return "TFCI"
@property
def name(self):
return f"{self.model}"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-m",
"--model",
choices=cls._models,
default=cls._models[0],
help="model architecture (default: %(default)s)",
)
parser.add_argument(
"-p",
"--path",
required=True,
help="tfci python script path (default: %(default)s)",
)
def _set_args(self, args):
args = super()._set_args(args)
self.model = args.model
self.tfci_path = args.path
return args
def _get_encode_cmd(self, img, quality, out_filepath):
if not 1 <= quality <= 8:
raise ValueError(f"Invalid quality value: {quality} (1, 8)")
cmd = [
sys.executable,
self.tfci_path,
"compress",
f"{self.model}-{quality:d}",
img,
out_filepath,
]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = [sys.executable, self.tfci_path, "decompress", out_filepath, rec_filepath]
return cmd
def get_vtm_encoder_path(build_dir):
system = platform.system()
try:
elfnames = {"Darwin": "EncoderApp", "Linux": "EncoderAppStatic"}
return os.path.join(build_dir, elfnames[system])
except KeyError as err:
raise RuntimeError(f'Unsupported platform "{system}"') from err
def get_vtm_decoder_path(build_dir):
system = platform.system()
try:
elfnames = {"Darwin": "DecoderApp", "Linux": "DecoderAppStatic"}
return os.path.join(build_dir, elfnames[system])
except KeyError as err:
raise RuntimeError(f'Unsupported platform "{system}"') from err
class VTM(Codec):
"""VTM: VVC reference software"""
fmt = ".bin"
@property
def description(self):
return "VTM"
@property
def name(self):
return "VTM"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-b",
"--build-dir",
type=str,
required=True,
help="VTM build dir",
)
parser.add_argument(
"-c",
"--config",
type=str,
required=True,
help="VTM config file",
)
parser.add_argument(
"--rgb", action="store_true", help="Use RGB color space (over YCbCr)"
)
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = get_vtm_encoder_path(args.build_dir)
self.decoder_path = get_vtm_decoder_path(args.build_dir)
self.config_path = args.config
self.rgb = args.rgb
return args
def _run_impl(self, img, quality):
if not 0 <= quality <= 63:
raise ValueError(f"Invalid quality value: {quality} (0,63)")
# Taking 8bit input for now
bitdepth = 8
# Convert input image to yuv 444 file
arr = np.asarray(img)
fd, yuv_path = mkstemp(suffix=".yuv")
out_filepath = os.path.splitext(yuv_path)[0] + ".bin"
arr = arr.transpose((2, 0, 1)) # color channel first
if not self.rgb:
# convert rgb content to YCbCr
rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)
with open(yuv_path, "wb") as f:
f.write(arr.tobytes())
# Encode
height, width = arr.shape[1:]
cmd = [
self.encoder_path,
"-i",
yuv_path,
"-c",
self.config_path,
"-q",
quality,
"-o",
"/dev/null",
"-b",
out_filepath,
"-wdt",
width,
"-hgt",
height,
"-fr",
"1",
"-f",
"1",
"--InputChromaFormat=444",
"--InputBitDepth=8",
"--ConformanceMode=1",
]
if self.rgb:
cmd += [
"--InputColourSpaceConvert=RGBtoGBR",
"--SNRInternalColourSpace=1",
"--OutputInternalColourSpace=0",
]
start = time.time()
run_command(cmd)
enc_time = time.time() - start
# cleanup encoder input
os.close(fd)
os.unlink(yuv_path)
# Decode
cmd = [self.decoder_path, "-b", out_filepath, "-o", yuv_path, "-d", 8]
if self.rgb:
cmd.append("--OutputInternalColourSpace=GBRtoRGB")
start = time.time()
run_command(cmd)
dec_time = time.time() - start
# Compute PSNR
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = arr.astype(np.float32) / (2 ** bitdepth - 1)
rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)
if not self.rgb:
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = filesize(out_filepath) * 8.0 / (height * width)
# Cleanup
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {
"bpp": bpp,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
rec = Image.fromarray(
(rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)
)
return out, rec
class HM(Codec):
"""HM: H.265/HEVC reference software"""
fmt = ".bin"
@property
def description(self):
return "HM"
@property
def name(self):
return "HM"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-b",
"--build-dir",
type=str,
required=True,
help="HM build dir",
)
parser.add_argument(
"-c", "--config", type=str, required=True, help="HM config file"
)
parser.add_argument(
"--rgb", action="store_true", help="Use RGB color space (over YCbCr)"
)
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = os.path.join(args.build_dir, "TAppEncoderStatic")
self.decoder_path = os.path.join(args.build_dir, "TAppDecoderStatic")
self.config_path = args.config
self.rgb = args.rgb
return args
def _run_impl(self, img, quality):
if not 0 <= quality <= 51:
raise ValueError(f"Invalid quality value: {quality} (0,51)")
# Convert input image to yuv 444 file
arr = np.asarray(img)
fd, yuv_path = mkstemp(suffix=".yuv")
out_filepath = os.path.splitext(yuv_path)[0] + ".bin"
bitdepth = 8
arr = arr.transpose((2, 0, 1)) # color channel first
if not self.rgb:
# convert rgb content to YCbCr
rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)
with open(yuv_path, "wb") as f:
f.write(arr.tobytes())
# Encode
height, width = arr.shape[1:]
cmd = [
self.encoder_path,
"-i",
yuv_path,
"-c",
self.config_path,
"-q",
quality,
"-o",
"/dev/null",
"-b",
out_filepath,
"-wdt",
width,
"-hgt",
height,
"-fr",
"1",
"-f",
"1",
"--InputChromaFormat=444",
"--InputBitDepth=8",
"--SEIDecodedPictureHash",
"--Level=5.1",
"--CUNoSplitIntraACT=0",
"--ConformanceMode=1",
]
if self.rgb:
cmd += [
"--InputColourSpaceConvert=RGBtoGBR",
"--SNRInternalColourSpace=1",
"--OutputInternalColourSpace=0",
]
start = time.time()
run_command(cmd)
enc_time = time.time() - start
# cleanup encoder input
os.close(fd)
os.unlink(yuv_path)
# Decode
cmd = [self.decoder_path, "-b", out_filepath, "-o", yuv_path, "-d", 8]
if self.rgb:
cmd.append("--OutputInternalColourSpace=GBRtoRGB")
start = time.time()
run_command(cmd)
dec_time = time.time() - start
# Compute PSNR
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = arr.astype(np.float32) / (2 ** bitdepth - 1)
rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)
if not self.rgb:
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = filesize(out_filepath) * 8.0 / (height * width)
# Cleanup
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {
"bpp": bpp,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
rec = Image.fromarray(
(rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)
)
return out, rec
class AV1(Codec):
"""AV1: AOM reference software"""
fmt = ".webm"
@property
def description(self):
return "AV1"
@property
def name(self):
return "AV1"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-b",
"--build-dir",
type=str,
required=True,
help="AOM binaries dir",
)
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = os.path.join(args.build_dir, "aomenc")
self.decoder_path = os.path.join(args.build_dir, "aomdec")
return args
def _run_impl(self, img, quality):
if not 0 <= quality <= 63:
raise ValueError(f"Invalid quality value: {quality} (0,63)")
# Convert input image to yuv 444 file
arr = np.asarray(img)
fd, yuv_path = mkstemp(suffix=".yuv")
out_filepath = os.path.splitext(yuv_path)[0] + ".webm"
bitdepth = 8
arr = arr.transpose((2, 0, 1)) # color channel first
# convert rgb content to YCbCr
rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)
with open(yuv_path, "wb") as f:
f.write(arr.tobytes())
# Encode
height, width = arr.shape[1:]
cmd = [
self.encoder_path,
"-w",
width,
"-h",
height,
"--fps=1/1",
"--limit=1",
"--input-bit-depth=8",
"--cpu-used=0",
"--threads=1",
"--passes=2",
"--end-usage=q",
"--cq-level=" + str(quality),
"--i444",
"--skip=0",
"--tune=psnr",
"--psnr",
"--bit-depth=8",
"-o",
out_filepath,
yuv_path,
]
start = time.time()
run_command(cmd)
enc_time = time.time() - start
# cleanup encoder input
os.close(fd)
os.unlink(yuv_path)
# Decode
cmd = [
self.decoder_path,
out_filepath,
"-o",
yuv_path,
"--rawvideo",
"--output-bit-depth=8",
]
start = time.time()
run_command(cmd)
dec_time = time.time() - start
# Compute PSNR
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = arr.astype(np.float32) / (2 ** bitdepth - 1)
rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = filesize(out_filepath) * 8.0 / (height * width)
# Cleanup
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {
"bpp": bpp,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
rec = Image.fromarray(
(rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)
)
return out, rec
| 24,347 | 26.053333 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/bench/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1,717 | 58.241379 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/transforms/functional.py | from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
YCBCR_WEIGHTS = {
# Spec: (K_r, K_g, K_b) with K_g = 1 - K_r - K_b
"ITU-R_BT.709": (0.2126, 0.7152, 0.0722)
}
def _check_input_tensor(tensor: Tensor) -> None:
if (
not isinstance(tensor, Tensor)
or not tensor.is_floating_point()
or not len(tensor.size()) in (3, 4)
or not tensor.size(-3) == 3
):
raise ValueError(
"Expected a 3D or 4D tensor with shape (Nx3xHxW) or (3xHxW) as input"
)
def rgb2ycbcr(rgb: Tensor) -> Tensor:
"""RGB to YCbCr conversion for torch Tensor.
Using ITU-R BT.709 coefficients.
Args:
rgb (torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
ycbcr (torch.Tensor): converted tensor
"""
_check_input_tensor(rgb)
r, g, b = rgb.chunk(3, -3)
Kr, Kg, Kb = YCBCR_WEIGHTS["ITU-R_BT.709"]
y = Kr * r + Kg * g + Kb * b
cb = 0.5 * (b - y) / (1 - Kb) + 0.5
cr = 0.5 * (r - y) / (1 - Kr) + 0.5
ycbcr = torch.cat((y, cb, cr), dim=-3)
return ycbcr
def ycbcr2rgb(ycbcr: Tensor) -> Tensor:
"""YCbCr to RGB conversion for torch Tensor.
Using ITU-R BT.709 coefficients.
Args:
ycbcr (torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
rgb (torch.Tensor): converted tensor
"""
_check_input_tensor(ycbcr)
y, cb, cr = ycbcr.chunk(3, -3)
Kr, Kg, Kb = YCBCR_WEIGHTS["ITU-R_BT.709"]
r = y + (2 - 2 * Kr) * (cr - 0.5)
b = y + (2 - 2 * Kb) * (cb - 0.5)
g = (y - Kr * r - Kb * b) / Kg
rgb = torch.cat((r, g, b), dim=-3)
return rgb
def yuv_444_to_420(
yuv: Union[Tensor, Tuple[Tensor, Tensor, Tensor]],
mode: str = "avg_pool",
) -> Tuple[Tensor, Tensor, Tensor]:
"""Convert a 444 tensor to a 420 representation.
Args:
yuv (torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)): 444
input to be downsampled. Takes either a (Nx3xHxW) tensor or a tuple
of 3 (Nx1xHxW) tensors.
mode (str): algorithm used for downsampling: ``'avg_pool'``. Default
``'avg_pool'``
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor): Converted 420
"""
if mode not in ("avg_pool",):
raise ValueError(f'Invalid downsampling mode "{mode}".')
if mode == "avg_pool":
def _downsample(tensor):
return F.avg_pool2d(tensor, kernel_size=2, stride=2)
if isinstance(yuv, torch.Tensor):
y, u, v = yuv.chunk(3, 1)
else:
y, u, v = yuv
return (y, _downsample(u), _downsample(v))
def yuv_420_to_444(
yuv: Tuple[Tensor, Tensor, Tensor],
mode: str = "bilinear",
return_tuple: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor]]:
"""Convert a 420 input to a 444 representation.
Args:
yuv (torch.Tensor, torch.Tensor, torch.Tensor): 420 input frames in
(Nx1xHxW) format
mode (str): algorithm used for upsampling: ``'bilinear'`` |
``'nearest'`` Default ``'bilinear'``
return_tuple (bool): return input as tuple of tensors instead of a
concatenated tensor, 3 (Nx1xHxW) tensors instead of one (Nx3xHxW)
tensor (default: False)
Returns:
(torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)): Converted
444
"""
if len(yuv) != 3 or any(not isinstance(c, torch.Tensor) for c in yuv):
raise ValueError("Expected a tuple of 3 torch tensors")
if mode not in ("bilinear", "nearest"):
raise ValueError(f'Invalid upsampling mode "{mode}".')
if mode in ("bilinear", "nearest"):
def _upsample(tensor):
return F.interpolate(tensor, scale_factor=2, mode=mode, align_corners=False)
y, u, v = yuv
u, v = _upsample(u), _upsample(v)
if return_tuple:
return y, u, v
return torch.cat((y, u, v), dim=1)
| 3,953 | 28.073529 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/transforms/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .transforms import *
| 1,744 | 55.290323 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/transforms/transforms.py | from . import functional as F_transforms
__all__ = [
"RGB2YCbCr",
"YCbCr2RGB",
"YUV444To420",
"YUV420To444",
]
class RGB2YCbCr:
"""Convert a RGB tensor to YCbCr.
The tensor is expected to be in the [0, 1] floating point range, with a
shape of (3xHxW) or (Nx3xHxW).
"""
def __call__(self, rgb):
"""
Args:
rgb (torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
ycbcr(torch.Tensor): converted tensor
"""
return F_transforms.rgb2ycbcr(rgb)
def __repr__(self):
return f"{self.__class__.__name__}()"
class YCbCr2RGB:
"""Convert a YCbCr tensor to RGB.
The tensor is expected to be in the [0, 1] floating point range, with a
shape of (3xHxW) or (Nx3xHxW).
"""
def __call__(self, ycbcr):
"""
Args:
ycbcr(torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
rgb(torch.Tensor): converted tensor
"""
return F_transforms.ycbcr2rgb(ycbcr)
def __repr__(self):
return f"{self.__class__.__name__}()"
class YUV444To420:
"""Convert a YUV 444 tensor to a 420 representation.
Args:
mode (str): algorithm used for downsampling: ``'avg_pool'``. Default
``'avg_pool'``
Example:
>>> x = torch.rand(1, 3, 32, 32)
>>> y, u, v = YUV444To420()(x)
>>> y.size() # 1, 1, 32, 32
>>> u.size() # 1, 1, 16, 16
"""
def __init__(self, mode: str = "avg_pool"):
self.mode = str(mode)
def __call__(self, yuv):
"""
Args:
yuv (torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)):
444 input to be downsampled. Takes either a (Nx3xHxW) tensor or
a tuple of 3 (Nx1xHxW) tensors.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor): Converted 420
"""
return F_transforms.yuv_444_to_420(yuv, mode=self.mode)
def __repr__(self):
return f"{self.__class__.__name__}()"
class YUV420To444:
"""Convert a YUV 420 input to a 444 representation.
Args:
mode (str): algorithm used for upsampling: ``'bilinear'`` | ``'nearest'``.
Default ``'bilinear'``
return_tuple (bool): return input as tuple of tensors instead of a
concatenated tensor, 3 (Nx1xHxW) tensors instead of one (Nx3xHxW)
tensor (default: False)
Example:
>>> y = torch.rand(1, 1, 32, 32)
>>> u, v = torch.rand(1, 1, 16, 16), torch.rand(1, 1, 16, 16)
>>> x = YUV420To444()((y, u, v))
>>> x.size() # 1, 3, 32, 32
"""
def __init__(self, mode: str = "bilinear", return_tuple: bool = False):
self.mode = str(mode)
self.return_tuple = bool(return_tuple)
def __call__(self, yuv):
"""
Args:
yuv (torch.Tensor, torch.Tensor, torch.Tensor): 420 input frames in
(Nx1xHxW) format
Returns:
(torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)): Converted
444
"""
return F_transforms.yuv_420_to_444(yuv, return_tuple=self.return_tuple)
def __repr__(self):
return f"{self.__class__.__name__}(return_tuple={self.return_tuple})"
| 3,308 | 26.806723 | 83 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/entropy_models/entropy_models.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import scipy.stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from compressai._CXX import pmf_to_quantized_cdf as _pmf_to_quantized_cdf
from compressai.ops import LowerBound
class _EntropyCoder:
"""Proxy class to an actual entropy coder class."""
def __init__(self, method):
if not isinstance(method, str):
raise ValueError(f'Invalid method type "{type(method)}"')
from compressai import available_entropy_coders
if method not in available_entropy_coders():
methods = ", ".join(available_entropy_coders())
raise ValueError(
f'Unknown entropy coder "{method}"' f" (available: {methods})"
)
if method == "ans":
from compressai import ans
encoder = ans.RansEncoder()
decoder = ans.RansDecoder()
elif method == "rangecoder":
import range_coder
encoder = range_coder.RangeEncoder()
decoder = range_coder.RangeDecoder()
self.name = method
self._encoder = encoder
self._decoder = decoder
def encode_with_indexes(self, *args, **kwargs):
return self._encoder.encode_with_indexes(*args, **kwargs)
def decode_with_indexes(self, *args, **kwargs):
return self._decoder.decode_with_indexes(*args, **kwargs)
def default_entropy_coder():
from compressai import get_entropy_coder
return get_entropy_coder()
def pmf_to_quantized_cdf(pmf: Tensor, precision: int = 16) -> Tensor:
cdf = _pmf_to_quantized_cdf(pmf.tolist(), precision)
cdf = torch.IntTensor(cdf)
return cdf
def _forward(self, *args: Any) -> Any:
raise NotImplementedError()
class EntropyModel(nn.Module):
r"""Entropy model base class.
Args:
likelihood_bound (float): minimum likelihood bound
entropy_coder (str, optional): set the entropy coder to use, use default
one if None
entropy_coder_precision (int): set the entropy coder precision
"""
def __init__(
self,
likelihood_bound: float = 1e-9,
entropy_coder: Optional[str] = None,
entropy_coder_precision: int = 16,
):
super().__init__()
if entropy_coder is None:
entropy_coder = default_entropy_coder()
self.entropy_coder = _EntropyCoder(entropy_coder)
self.entropy_coder_precision = int(entropy_coder_precision)
self.use_likelihood_bound = likelihood_bound > 0
if self.use_likelihood_bound:
self.likelihood_lower_bound = LowerBound(likelihood_bound)
# to be filled on update()
self.register_buffer("_offset", torch.IntTensor())
self.register_buffer("_quantized_cdf", torch.IntTensor())
self.register_buffer("_cdf_length", torch.IntTensor())
def __getstate__(self):
attributes = self.__dict__.copy()
attributes["entropy_coder"] = self.entropy_coder.name
return attributes
def __setstate__(self, state):
self.__dict__ = state
self.entropy_coder = _EntropyCoder(self.__dict__.pop("entropy_coder"))
@property
def offset(self):
return self._offset
@property
def quantized_cdf(self):
return self._quantized_cdf
@property
def cdf_length(self):
return self._cdf_length
# See: https://github.com/python/mypy/issues/8795
forward: Callable[..., Any] = _forward
def quantize(
self, inputs: Tensor, mode: str, means: Optional[Tensor] = None
) -> Tensor:
if mode not in ("noise", "dequantize", "symbols"):
raise ValueError(f'Invalid quantization mode: "{mode}"')
if mode == "noise":
half = float(0.5)
noise = torch.empty_like(inputs).uniform_(-half, half)
inputs = inputs + noise
return inputs
outputs = inputs.clone()
if means is not None:
outputs -= means
outputs = torch.round(outputs)
if mode == "dequantize":
if means is not None:
outputs += means
return outputs
assert mode == "symbols", mode
outputs = outputs.int()
return outputs
def _quantize(
self, inputs: Tensor, mode: str, means: Optional[Tensor] = None
) -> Tensor:
warnings.warn("_quantize is deprecated. Use quantize instead.")
return self.quantize(inputs, mode, means)
@staticmethod
def dequantize(
inputs: Tensor, means: Optional[Tensor] = None, dtype: torch.dtype = torch.float
) -> Tensor:
if means is not None:
outputs = inputs.type_as(means)
outputs += means
else:
outputs = inputs.type(dtype)
return outputs
@classmethod
def _dequantize(cls, inputs: Tensor, means: Optional[Tensor] = None) -> Tensor:
warnings.warn("_dequantize. Use dequantize instead.")
return cls.dequantize(inputs, means)
def _pmf_to_cdf(self, pmf, tail_mass, pmf_length, max_length):
cdf = torch.zeros(
(len(pmf_length), max_length + 2), dtype=torch.int32, device=pmf.device
)
for i, p in enumerate(pmf):
prob = torch.cat((p[: pmf_length[i]], tail_mass[i]), dim=0)
_cdf = pmf_to_quantized_cdf(prob, self.entropy_coder_precision)
cdf[i, : _cdf.size(0)] = _cdf
return cdf
def _check_cdf_size(self):
if self._quantized_cdf.numel() == 0:
raise ValueError("Uninitialized CDFs. Run update() first")
if len(self._quantized_cdf.size()) != 2:
raise ValueError(f"Invalid CDF size {self._quantized_cdf.size()}")
def _check_offsets_size(self):
if self._offset.numel() == 0:
raise ValueError("Uninitialized offsets. Run update() first")
if len(self._offset.size()) != 1:
raise ValueError(f"Invalid offsets size {self._offset.size()}")
def _check_cdf_length(self):
if self._cdf_length.numel() == 0:
raise ValueError("Uninitialized CDF lengths. Run update() first")
if len(self._cdf_length.size()) != 1:
raise ValueError(f"Invalid offsets size {self._cdf_length.size()}")
def compress(self, inputs, indexes, means=None):
"""
Compress input tensors to char strings.
Args:
inputs (torch.Tensor): input tensors
indexes (torch.IntTensor): tensors CDF indexes
means (torch.Tensor, optional): optional tensor means
"""
symbols = self.quantize(inputs, "symbols", means)
if len(inputs.size()) < 2:
raise ValueError(
"Invalid `inputs` size. Expected a tensor with at least 2 dimensions."
)
if inputs.size() != indexes.size():
raise ValueError("`inputs` and `indexes` should have the same size.")
self._check_cdf_size()
self._check_cdf_length()
self._check_offsets_size()
strings = []
for i in range(symbols.size(0)):
rv = self.entropy_coder.encode_with_indexes(
symbols[i].reshape(-1).int().tolist(),
indexes[i].reshape(-1).int().tolist(),
self._quantized_cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
strings.append(rv)
return strings
def decompress(
self,
strings: str,
indexes: torch.IntTensor,
dtype: torch.dtype = torch.float,
means: torch.Tensor = None,
):
"""
Decompress char strings to tensors.
Args:
strings (str): compressed tensors
indexes (torch.IntTensor): tensors CDF indexes
dtype (torch.dtype): type of dequantized output
means (torch.Tensor, optional): optional tensor means
"""
if not isinstance(strings, (tuple, list)):
raise ValueError("Invalid `strings` parameter type.")
if not len(strings) == indexes.size(0):
raise ValueError("Invalid strings or indexes parameters")
if len(indexes.size()) < 2:
raise ValueError(
"Invalid `indexes` size. Expected a tensor with at least 2 dimensions."
)
self._check_cdf_size()
self._check_cdf_length()
self._check_offsets_size()
if means is not None:
if means.size()[:2] != indexes.size()[:2]:
raise ValueError("Invalid means or indexes parameters")
if means.size() != indexes.size():
for i in range(2, len(indexes.size())):
if means.size(i) != 1:
raise ValueError("Invalid means parameters")
cdf = self._quantized_cdf
outputs = cdf.new_empty(indexes.size())
for i, s in enumerate(strings):
values = self.entropy_coder.decode_with_indexes(
s,
indexes[i].reshape(-1).int().tolist(),
cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
outputs[i] = torch.tensor(
values, device=outputs.device, dtype=outputs.dtype
).reshape(outputs[i].size())
outputs = self.dequantize(outputs, means, dtype)
return outputs
class EntropyBottleneck(EntropyModel):
r"""Entropy bottleneck layer, introduced by J. Ballé, D. Minnen, S. Singh,
S. J. Hwang, N. Johnston, in `"Variational image compression with a scale
hyperprior" <https://arxiv.org/abs/1802.01436>`_.
This is a re-implementation of the entropy bottleneck layer in
*tensorflow/compression*. See the original paper and the `tensorflow
documentation
<https://tensorflow.github.io/compression/docs/entropy_bottleneck.html>`__
for an introduction.
"""
_offset: Tensor
def __init__(
self,
channels: int,
*args: Any,
tail_mass: float = 1e-9,
init_scale: float = 10,
filters: Tuple[int, ...] = (3, 3, 3, 3),
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.channels = int(channels)
self.filters = tuple(int(f) for f in filters)
self.init_scale = float(init_scale)
self.tail_mass = float(tail_mass)
# Create parameters
filters = (1,) + self.filters + (1,)
scale = self.init_scale ** (1 / (len(self.filters) + 1))
channels = self.channels
for i in range(len(self.filters) + 1):
init = np.log(np.expm1(1 / scale / filters[i + 1]))
matrix = torch.Tensor(channels, filters[i + 1], filters[i])
matrix.data.fill_(init)
self.register_parameter(f"_matrix{i:d}", nn.Parameter(matrix))
bias = torch.Tensor(channels, filters[i + 1], 1)
nn.init.uniform_(bias, -0.5, 0.5)
self.register_parameter(f"_bias{i:d}", nn.Parameter(bias))
if i < len(self.filters):
factor = torch.Tensor(channels, filters[i + 1], 1)
nn.init.zeros_(factor)
self.register_parameter(f"_factor{i:d}", nn.Parameter(factor))
self.quantiles = nn.Parameter(torch.Tensor(channels, 1, 3))
init = torch.Tensor([-self.init_scale, 0, self.init_scale])
self.quantiles.data = init.repeat(self.quantiles.size(0), 1, 1)
target = np.log(2 / self.tail_mass - 1)
self.register_buffer("target", torch.Tensor([-target, 0, target]))
def _get_medians(self) -> Tensor:
medians = self.quantiles[:, :, 1:2]
return medians
def update(self, force: bool = False) -> bool:
# Check if we need to update the bottleneck parameters, the offsets are
# only computed and stored when the conditonal model is update()'d.
if self._offset.numel() > 0 and not force:
return False
medians = self.quantiles[:, 0, 1]
minima = medians - self.quantiles[:, 0, 0]
minima = torch.ceil(minima).int()
minima = torch.clamp(minima, min=0)
maxima = self.quantiles[:, 0, 2] - medians
maxima = torch.ceil(maxima).int()
maxima = torch.clamp(maxima, min=0)
self._offset = -minima
pmf_start = medians - minima
pmf_length = maxima + minima + 1
max_length = pmf_length.max().item()
device = pmf_start.device
samples = torch.arange(max_length, device=device)
samples = samples[None, :] + pmf_start[:, None, None]
half = float(0.5)
lower = self._logits_cumulative(samples - half, stop_gradient=True)
upper = self._logits_cumulative(samples + half, stop_gradient=True)
sign = -torch.sign(lower + upper)
pmf = torch.abs(torch.sigmoid(sign * upper) - torch.sigmoid(sign * lower))
pmf = pmf[:, 0, :]
tail_mass = torch.sigmoid(lower[:, 0, :1]) + torch.sigmoid(-upper[:, 0, -1:])
quantized_cdf = self._pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)
self._quantized_cdf = quantized_cdf
self._cdf_length = pmf_length + 2
return True
def loss(self) -> Tensor:
logits = self._logits_cumulative(self.quantiles, stop_gradient=True)
loss = torch.abs(logits - self.target).sum()
return loss
def _logits_cumulative(self, inputs: Tensor, stop_gradient: bool) -> Tensor:
# TorchScript not yet working (nn.Mmodule indexing not supported)
logits = inputs
for i in range(len(self.filters) + 1):
matrix = getattr(self, f"_matrix{i:d}")
if stop_gradient:
matrix = matrix.detach()
logits = torch.matmul(F.softplus(matrix), logits)
bias = getattr(self, f"_bias{i:d}")
if stop_gradient:
bias = bias.detach()
logits += bias
if i < len(self.filters):
factor = getattr(self, f"_factor{i:d}")
if stop_gradient:
factor = factor.detach()
logits += torch.tanh(factor) * torch.tanh(logits)
return logits
@torch.jit.unused
def _likelihood(self, inputs: Tensor) -> Tensor:
half = float(0.5)
v0 = inputs - half
v1 = inputs + half
lower = self._logits_cumulative(v0, stop_gradient=False)
upper = self._logits_cumulative(v1, stop_gradient=False)
sign = -torch.sign(lower + upper)
sign = sign.detach()
likelihood = torch.abs(
torch.sigmoid(sign * upper) - torch.sigmoid(sign * lower)
)
return likelihood
def forward(
self, x: Tensor, training: Optional[bool] = None
) -> Tuple[Tensor, Tensor]:
if training is None:
training = self.training
if not torch.jit.is_scripting():
# x from B x C x ... to C x B x ...
perm = np.arange(len(x.shape))
perm[0], perm[1] = perm[1], perm[0]
# Compute inverse permutation
inv_perm = np.arange(len(x.shape))[np.argsort(perm)]
else:
# TorchScript in 2D for static inference
# Convert to (channels, ... , batch) format
perm = (1, 2, 3, 0)
inv_perm = (3, 0, 1, 2)
x = x.permute(*perm).contiguous()
shape = x.size()
values = x.reshape(x.size(0), 1, -1)
# Add noise or quantize
outputs = self.quantize(
values, "noise" if training else "dequantize", self._get_medians()
)
if not torch.jit.is_scripting():
likelihood = self._likelihood(outputs)
if self.use_likelihood_bound:
likelihood = self.likelihood_lower_bound(likelihood)
else:
# TorchScript not yet supported
likelihood = torch.zeros_like(outputs)
# Convert back to input tensor shape
outputs = outputs.reshape(shape)
outputs = outputs.permute(*inv_perm).contiguous()
likelihood = likelihood.reshape(shape)
likelihood = likelihood.permute(*inv_perm).contiguous()
return outputs, likelihood
@staticmethod
def _build_indexes(size):
dims = len(size)
N = size[0]
C = size[1]
view_dims = np.ones((dims,), dtype=np.int64)
view_dims[1] = -1
indexes = torch.arange(C).view(*view_dims)
indexes = indexes.int()
return indexes.repeat(N, 1, *size[2:])
@staticmethod
def _extend_ndims(tensor, n):
return tensor.reshape(-1, *([1] * n)) if n > 0 else tensor.reshape(-1)
def compress(self, x):
indexes = self._build_indexes(x.size())
medians = self._get_medians().detach()
spatial_dims = len(x.size()) - 2
medians = self._extend_ndims(medians, spatial_dims)
medians = medians.expand(x.size(0), *([-1] * (spatial_dims + 1)))
return super().compress(x, indexes, medians)
def decompress(self, strings, size):
output_size = (len(strings), self._quantized_cdf.size(0), *size)
indexes = self._build_indexes(output_size).to(self._quantized_cdf.device)
medians = self._extend_ndims(self._get_medians().detach(), len(size))
medians = medians.expand(len(strings), *([-1] * (len(size) + 1)))
return super().decompress(strings, indexes, medians.dtype, medians)
class GaussianConditional(EntropyModel):
r"""Gaussian conditional layer, introduced by J. Ballé, D. Minnen, S. Singh,
S. J. Hwang, N. Johnston, in `"Variational image compression with a scale
hyperprior" <https://arxiv.org/abs/1802.01436>`_.
This is a re-implementation of the Gaussian conditional layer in
*tensorflow/compression*. See the `tensorflow documentation
<https://tensorflow.github.io/compression/docs/api_docs/python/tfc/GaussianConditional.html>`__
for more information.
"""
def __init__(
self,
scale_table: Optional[Union[List, Tuple]],
*args: Any,
scale_bound: float = 0.11,
tail_mass: float = 1e-9,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
if not isinstance(scale_table, (type(None), list, tuple)):
raise ValueError(f'Invalid type for scale_table "{type(scale_table)}"')
if isinstance(scale_table, (list, tuple)) and len(scale_table) < 1:
raise ValueError(f'Invalid scale_table length "{len(scale_table)}"')
if scale_table and (
scale_table != sorted(scale_table) or any(s <= 0 for s in scale_table)
):
raise ValueError(f'Invalid scale_table "({scale_table})"')
self.tail_mass = float(tail_mass)
if scale_bound is None and scale_table:
scale_bound = self.scale_table[0]
if scale_bound <= 0:
raise ValueError("Invalid parameters")
self.lower_bound_scale = LowerBound(scale_bound)
self.register_buffer(
"scale_table",
self._prepare_scale_table(scale_table) if scale_table else torch.Tensor(),
)
self.register_buffer(
"scale_bound",
torch.Tensor([float(scale_bound)]) if scale_bound is not None else None,
)
@staticmethod
def _prepare_scale_table(scale_table):
return torch.Tensor(tuple(float(s) for s in scale_table))
def _standardized_cumulative(self, inputs: Tensor) -> Tensor:
half = float(0.5)
const = float(-(2 ** -0.5))
# Using the complementary error function maximizes numerical precision.
return half * torch.erfc(const * inputs)
@staticmethod
def _standardized_quantile(quantile):
return scipy.stats.norm.ppf(quantile)
def update_scale_table(self, scale_table, force=False):
# Check if we need to update the gaussian conditional parameters, the
# offsets are only computed and stored when the conditonal model is
# updated.
if self._offset.numel() > 0 and not force:
return False
device = self.scale_table.device
self.scale_table = self._prepare_scale_table(scale_table).to(device)
self.update()
return True
def update(self):
multiplier = -self._standardized_quantile(self.tail_mass / 2)
pmf_center = torch.ceil(self.scale_table * multiplier).int()
pmf_length = 2 * pmf_center + 1
max_length = torch.max(pmf_length).item()
device = pmf_center.device
samples = torch.abs(
torch.arange(max_length, device=device).int() - pmf_center[:, None]
)
samples_scale = self.scale_table.unsqueeze(1)
samples = samples.float()
samples_scale = samples_scale.float()
upper = self._standardized_cumulative((0.5 - samples) / samples_scale)
lower = self._standardized_cumulative((-0.5 - samples) / samples_scale)
pmf = upper - lower
tail_mass = 2 * lower[:, :1]
quantized_cdf = torch.Tensor(len(pmf_length), max_length + 2)
quantized_cdf = self._pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)
self._quantized_cdf = quantized_cdf
self._offset = -pmf_center
self._cdf_length = pmf_length + 2
def _likelihood(
self, inputs: Tensor, scales: Tensor, means: Optional[Tensor] = None
) -> Tensor:
half = float(0.5)
if means is not None:
values = inputs - means
else:
values = inputs
scales = self.lower_bound_scale(scales)
values = torch.abs(values)
upper = self._standardized_cumulative((half - values) / scales)
lower = self._standardized_cumulative((-half - values) / scales)
likelihood = upper - lower
return likelihood
def forward(
self,
inputs: Tensor,
scales: Tensor,
means: Optional[Tensor] = None,
training: Optional[bool] = None,
) -> Tuple[Tensor, Tensor]:
if training is None:
training = self.training
outputs = self.quantize(inputs, "noise" if training else "dequantize", means)
likelihood = self._likelihood(outputs, scales, means)
if self.use_likelihood_bound:
likelihood = self.likelihood_lower_bound(likelihood)
return outputs, likelihood
def build_indexes(self, scales: Tensor) -> Tensor:
scales = self.lower_bound_scale(scales)
indexes = scales.new_full(scales.size(), len(self.scale_table) - 1).int()
for s in self.scale_table[:-1]:
indexes -= (scales <= s).int()
return indexes
| 24,657 | 34.840116 | 99 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/entropy_models/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .entropy_models import EntropyBottleneck, EntropyModel, GaussianConditional
__all__ = [
"EntropyModel",
"EntropyBottleneck",
"GaussianConditional",
]
| 1,886 | 50 | 80 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/parametrizers.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
from torch import Tensor
from .bound_ops import LowerBound
class NonNegativeParametrizer(nn.Module):
"""
Non negative reparametrization.
Used for stability during training.
"""
pedestal: Tensor
def __init__(self, minimum: float = 0, reparam_offset: float = 2 ** -18):
super().__init__()
self.minimum = float(minimum)
self.reparam_offset = float(reparam_offset)
pedestal = self.reparam_offset ** 2
self.register_buffer("pedestal", torch.Tensor([pedestal]))
bound = (self.minimum + self.reparam_offset ** 2) ** 0.5
self.lower_bound = LowerBound(bound)
def init(self, x: Tensor) -> Tensor:
return torch.sqrt(torch.max(x + self.pedestal, self.pedestal))
def forward(self, x: Tensor) -> Tensor:
out = self.lower_bound(x)
out = out ** 2 - self.pedestal
return out
| 2,642 | 39.661538 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/__init__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .bound_ops import LowerBound
from .ops import ste_round
from .parametrizers import NonNegativeParametrizer
__all__ = ["ste_round", "LowerBound", "NonNegativeParametrizer"]
| 1,896 | 53.2 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/bound_ops.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
from torch import Tensor
def lower_bound_fwd(x: Tensor, bound: Tensor) -> Tensor:
return torch.max(x, bound)
def lower_bound_bwd(x: Tensor, bound: Tensor, grad_output: Tensor):
pass_through_if = (x >= bound) | (grad_output < 0)
return pass_through_if * grad_output, None
class LowerBoundFunction(torch.autograd.Function):
"""Autograd function for the `LowerBound` operator."""
@staticmethod
def forward(ctx, x, bound):
ctx.save_for_backward(x, bound)
return lower_bound_fwd(x, bound)
@staticmethod
def backward(ctx, grad_output):
x, bound = ctx.saved_tensors
return lower_bound_bwd(x, bound, grad_output)
class LowerBound(nn.Module):
"""Lower bound operator, computes `torch.max(x, bound)` with a custom
gradient.
The derivative is replaced by the identity function when `x` is moved
towards the `bound`, otherwise the gradient is kept to zero.
"""
bound: Tensor
def __init__(self, bound: float):
super().__init__()
self.register_buffer("bound", torch.Tensor([float(bound)]))
@torch.jit.unused
def lower_bound(self, x):
return LowerBoundFunction.apply(x, self.bound)
def forward(self, x):
if torch.jit.is_scripting():
return torch.max(x, self.bound)
return self.lower_bound(x)
| 3,102 | 37.308642 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/ops.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from torch import Tensor
def ste_round(x: Tensor) -> Tensor:
"""
Rounding with non-zero gradients. Gradients are approximated by replacing
the derivative by the identity function.
Used in `"Lossy Image Compression with Compressive Autoencoders"
<https://arxiv.org/abs/1703.00395>`_
.. note::
Implemented with the pytorch `detach()` reparametrization trick:
`x_round = x_round - x.detach() + x`
"""
return torch.round(x) - x.detach() + x
| 2,223 | 43.48 | 78 | py |
3d_sir | 3d_sir-master/setup.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
3D SIR v0.1
For more information see: https://github.com/aasensio/3d_sir
::
Main Changes in 0.1
---------------------
* Working version
:copyright:
A. Asensio Ramos
:license:
The MIT License (MIT)
"""
from distutils.ccompiler import CCompiler
from distutils.errors import DistutilsExecError, CompileError
from distutils.unixccompiler import UnixCCompiler
from setuptools import find_packages, setup
from setuptools.extension import Extension
import os
import platform
from subprocess import Popen, PIPE
import sys
import numpy
import glob
import re
DOCSTRING = __doc__.strip().split("\n")
tmp = open('sir3d/__init__.py', 'r').read()
author = re.search('__author__ = "([^"]+)"', tmp).group(1)
version = re.search('__version__ = "([^"]+)"', tmp).group(1)
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
arch = platform.architecture()[0].lower()
if (ext == ".f" or ext == ".f90"):
if sys.platform == 'darwin' or sys.platform.startswith('linux'):
compiler_so = ["gfortran"]
if (ext == ".f90"):
cc_args = ["-O3", "-fPIC", "-c", "-ffree-form", "-ffree-line-length-none","-fbounds-check"]
if (ext == ".f"):
cc_args = ["-O3", "-fPIC", "-c", "-fno-automatic", "-ffixed-line-length-none","-fbounds-check"]
# Force architecture of shared library.
if arch == "32bit":
cc_args.append("-m32")
elif arch == "64bit":
cc_args.append("-m64")
else:
print("\nPlatform has architecture '%s' which is unknown to "
"the setup script. Proceed with caution\n" % arch)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
UnixCCompiler._compile = _compile
# Hack to prevent build_ext from trying to append "init" to the export symbols.
class finallist(list):
def append(self, object):
return
class MyExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.export_symbols = finallist(self.export_symbols)
def get_libgfortran_dir():
"""
Helper function returning the library directory of libgfortran. Useful
on OSX where the C compiler oftentimes has no knowledge of the library
directories of the Fortran compiler. I don't think it can do any harm on
Linux.
"""
for ending in [".3.dylib", ".dylib", ".3.so", ".so"]:
try:
p = Popen(['gfortran', "-print-file-name=libgfortran" + ending],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readline().decode().strip()
p.stdout.close()
if os.path.exists(line):
return [os.path.dirname(line)]
except:
continue
return []
pathGlobal = "src/"
# Monkey patch the compilers to treat Fortran files like C files.
CCompiler.language_map['.f90'] = "c"
UnixCCompiler.src_extensions.append(".f90")
CCompiler.language_map['.f'] = "c"
UnixCCompiler.src_extensions.append(".f")
# SIR
path = "src"
list_files = glob.glob(path+'/*.f*')
list_files.append(path+'/sir_code.pyx')
lib_sir = MyExtension('sir3d.sir_code',
libraries=["gfortran"],
library_dirs=get_libgfortran_dir(),
sources=list_files,
include_dirs=[numpy.get_include()])
setup_config = dict(
name='sir3d',
version=version,
description=DOCSTRING[0],
long_description="\n".join(DOCSTRING[2:]),
author=author,
author_email='[email protected]',
url='https://github.com/aasensio/3d_sir',
license='GNU General Public License, version 3 (GPLv3)',
platforms='OS Independent',
install_requires=['numpy','scipy','configobj','h5py','astropy','tqdm','cython'],
ext_modules=[lib_sir],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: MacOS",
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
],
keywords=['sir', 'radiative transfer'],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
)
if __name__ == "__main__":
setup(**setup_config)
# Attempt to remove the mod files once again.
for filename in glob.glob("*.mod"):
try:
os.remove(filename)
except:
pass
| 5,067 | 30.478261 | 111 | py |
3d_sir | 3d_sir-master/examples/rempel.py | import sir3d
#iterator = sir3d.synth.Iterator(use_mpi=False)
iterator = sir3d.synth.Iterator(use_mpi=True, batch=256, workers_slant=None)
mod = sir3d.synth.Model('rempel.ini', rank=iterator.get_rank())
iterator.use_model(model=mod)
# iterator.run_all_pixels(rangex=[0,20], rangey=[0,20])
iterator.run_all_pixels()
| 319 | 23.615385 | 76 | py |
3d_sir | 3d_sir-master/sir3d/configuration.py | import numpy as np
from configobj import ConfigObj
__all__ = ['Configuration']
def _lower_to_sep(string, separator='='):
line=string.partition(separator)
string=str(line[0]).lower()+str(line[1])+str(line[2])
return string
class Configuration(object):
def __init__(self, filename):
f = open(filename, 'r')
tmp = f.readlines()
f.close()
input_lower = ['']
for l in tmp:
input_lower.append(_lower_to_sep(l)) # Convert keys to lowercase
# Parse configuration file
self.config_dict = ConfigObj(input_lower) | 593 | 22.76 | 76 | py |
3d_sir | 3d_sir-master/sir3d/__init__.py | __version__ = "2018.09.13"
__author__ = "Andres Asensio Ramos"
from . import sir_code
from .configuration import *
from . import psf
from . import synth
| 154 | 18.375 | 35 | py |
3d_sir | 3d_sir-master/sir3d/synth/slant.py | import numpy as np
import sys
"""
Model slant+projection tools
"""
# *****************************************************************************
def fftshift_image(im_in, dy=0.0, dx=0.0, useLog=False):
"""
FFTSHIFT_IMAGE, shifts an image by dy, dx pixels using
Fourier transforms.
Input:
im: 2D numpy array with the image (ny,nx)
dy: shift along the leftmost axis in pixels
dx: shift along the rightmost axis in pixels
isPeriodic: if the image is not periodic, it places it
into a (2*Ny,2*Nx) container so it can make
a periodic version of the image. Much slower.
AUTHOR: J. de la Cruz Rodriguez (ISP-SU 2020)
"""
#
# scale image to numbers and amplitudes around 1
#
if (useLog):
im = np.log(im_in)
else:
im = im_in
ny, nx = im.shape
me = im.mean()
st = np.std(im)
im = (im-me)/st
#
# FFT of the input image, check for periodicity
#
ft = np.fft.rfft2(im)
#
# get spatial frequency mesh, the x-axis has only the positive frequencies
# because the input data were non-complex numbers, so the negative part is
# redundant.
#
fx, fy = np.meshgrid(np.fft.rfftfreq(nx), np.fft.fftfreq(ny))
#
# Multiply by exponential phase factor and return to image space
#
if (useLog):
return np.exp((np.real((np.fft.irfft2(ft * np.exp(-2j*np.pi*(fx*-dx + fy*-dy))))[0:ny,0:nx])*st)+me)
else:
return (np.real((np.fft.irfft2(ft * np.exp(-2j*np.pi*(fx*-dx + fy*-dy))))[0:ny,0:nx])*st)+me
def project_field(vy, vx, vz, xmu, ymu):
"""
Projects vector variables into the new LOS.
This routines should be applied to velocities and magnetic field after performing the slant.
The projection is applied in-place, so it does not return anything, but it overwrites the input
arrays.
"""
ysign = np.sign(xmu)
xsign = np.sign(ymu)
ymu2 = np.sqrt(1.0 - ymu**2)
xmu2 = np.sqrt(1.0 - xmu**2)
vz1 = vz * xmu * ymu - ysign * vy * xmu * ymu2 - xsign * vx * xmu2
vy1 = vy * ymu + vz * ymu2 * ysign
vx1 = vx * xmu + (vz * ymu - ysign * vy * ymu2) * xmu2 * xsign
return vy1, vx1, vz1 | 2,325 | 25.735632 | 108 | py |
3d_sir | 3d_sir-master/sir3d/synth/model.py | from collections import OrderedDict
from sir3d import sir_code
from sir3d.configuration import Configuration
import numpy as np
import os
import scipy.stats
import logging
import h5py
import scipy.integrate as integ
from scipy import interpolate
# from ipdb import set_trace as stop
__all__ = ['Model']
class Model(object):
def __init__(self, config=None, rank=0):
if (rank != 0):
return
self.logger = logging.getLogger("model")
self.logger.setLevel(logging.DEBUG)
self.logger.handlers = []
self.rank = rank
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
self.LINES = ff.readlines()
ff.close()
self.macroturbulence = 0.0
ch = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if (config is not None):
self.configuration = Configuration(config)
self.use_configuration(self.configuration.config_dict)
if (self.rank == 0):
if (self.eos_type == 'MANCHA'):
self.logger.info('Reading EOS - MANCHA')
filename = os.path.join(os.path.dirname(__file__), 'data/eos_mancha.h5')
f = h5py.File(filename, 'r')
self.T_eos = np.log10(f['T'][:])
self.P_eos = np.log10(f['P'][:])
self.Pe_eos = np.log10(f['Pel'][:])
f.close()
self.logger.info('Reading kappa5000 - MANCHA')
self.T_kappa5 = np.array([3.32, 3.34, 3.36, 3.38, 3.40, 3.42, 3.44, 3.46, 3.48, 3.50,
3.52, 3.54, 3.56, 3.58, 3.60, 3.62, 3.64, 3.66, 3.68, 3.70,
3.73, 3.76, 3.79, 3.82, 3.85, 3.88, 3.91, 3.94, 3.97, 4.00,
4.05, 4.10, 4.15, 4.20, 4.25, 4.30, 4.35, 4.40, 4.45, 4.50,
4.55, 4.60, 4.65, 4.70, 4.75, 4.80, 4.85, 4.90, 4.95, 5.00,
5.05, 5.10, 5.15, 5.20, 5.25, 5.30 ])
self.P_kappa5 = np.array([-2., -1.5, -1., -.5, 0., .5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6. ,6.5, 7., 7.5, 8. ])
self.kappa = np.zeros((56,21))
filename = os.path.join(os.path.dirname(__file__), 'data/kappa5000_mancha.dat')
f = open(filename, 'r')
for it in range(56):
for ip in range(21):
self.kappa[it,ip] = float(f.readline().split()[-1])
f.close()
else:
self.logger.info('Reading EOS and kappa5000 - SIR')
filename = os.path.join(os.path.dirname(__file__), 'data/kappa5000_eos_sir.h5')
f = h5py.File(filename, 'r')
self.T_eos = np.log10(f['T'][:])
self.P_eos = np.log10(f['P'][:])
self.Pe_eos = np.log10(f['Pe'][:])
self.T_kappa5 = np.log10(f['T'][:])
self.P_kappa5 = np.log10(f['P'][:])
self.kappa = f['kappa5000'][:]
f.close()
def __getstate__(self):
d = self.__dict__.copy()
if 'logger' in d:
d['logger'] = d['logger'].name
return d
def __setstate__(self, d):
if 'logger' in d:
d['logger'] = logging.getLogger(d['logger'])
self.__dict__.update(d)
def use_configuration(self, config_dict):
"""
Use a configuration file
Parameters
----------
config_dict : dict
Dictionary containing all the options from the configuration file previously read
Returns
-------
None
"""
# Deal with the spectral regions
tmp = config_dict['spectral regions']
# Output file and atmosphere type
self.output_file = config_dict['general']['stokes output']
self.atmosphere_type = config_dict['atmosphere']['type']
self.eos_type = config_dict['general']['eos']
self.logger.info('Output Stokes file : {0}'.format(self.output_file))
if (config_dict['general']['interpolated model output'] != 'None'):
self.interpolated_model_filename = config_dict['general']['interpolated model output']
self.interpolated_tau = np.array([float(i) for i in config_dict['general']['interpolate tau']])
self.n_tau = len(self.interpolated_tau)
self.logger.info('Output model file : {0}'.format(self.interpolated_model_filename))
else:
self.interpolated_model_filename = None
# Add spectral regions
self.init_sir(config_dict['spectral regions'])
self.spectral_regions_dict = config_dict['spectral regions']
# Read atmosphere
if (self.atmosphere_type == 'MURAM'):
if (self.rank == 0):
self.logger.info('Using MURAM atmosphere')
self.model_shape = tuple([int(k) for k in config_dict['atmosphere']['dimensions']])
self.nx, self.ny, self.nz = self.model_shape
self.deltaz = float(config_dict['atmosphere']['deltaz']) * np.arange(self.ny)
if ('deltaxy' in config_dict['atmosphere']):
self.deltaxy = float(config_dict['atmosphere']['deltaxy'])
self.T_file = config_dict['atmosphere']['temperature']
self.logger.info(' - T file : {0}'.format(self.T_file))
self.P_file = config_dict['atmosphere']['pressure']
self.logger.info(' - P file : {0}'.format(self.P_file))
self.rho_file = config_dict['atmosphere']['density']
self.logger.info(' - rho file : {0}'.format(self.rho_file))
if ('vz' in config_dict['atmosphere']):
self.vz_file = config_dict['atmosphere']['vz']
self.vz_type = 'vz'
self.logger.info(' - vz file : {0}'.format(self.vz_file))
elif ('rho_vz' in config_dict['atmosphere']):
self.vz_file = config_dict['atmosphere']['rho_vz']
self.vz_type = 'rho_vz'
self.logger.info(' - rho_vz file : {0}'.format(self.vz_file))
else:
raise Exception("You need to provide either vz or rho_vz")
if ('vx' in config_dict['atmosphere']):
self.vx_file = config_dict['atmosphere']['vx']
self.logger.info(' - vx file : {0}'.format(self.vx_file))
else:
self.vx_file = None
if ('vy' in config_dict['atmosphere']):
self.vy_file = config_dict['atmosphere']['vy']
self.logger.info(' - vy file : {0}'.format(self.vy_file))
else:
self.vy_file = None
self.Bx_file = config_dict['atmosphere']['bx']
self.By_file = config_dict['atmosphere']['by']
self.Bz_file = config_dict['atmosphere']['bz']
self.logger.info(' - Bx file : {0}'.format(self.Bx_file))
self.logger.info(' - By file : {0}'.format(self.By_file))
self.logger.info(' - Bz file : {0}'.format(self.Bz_file))
if ('tau delta' in config_dict['atmosphere']):
self.tau_fine = float(config_dict['atmosphere']['tau delta'])
self.logger.info(' - tau axis will be interpolated to have delta={0}'.format(self.tau_fine))
else:
self.tau_fine = 0.0
if ('mux' in config_dict['atmosphere']):
self.mux = float(config_dict['atmosphere']['mux'])
else:
self.mux = 1.0
if ('muy' in config_dict['atmosphere']):
self.muy = float(config_dict['atmosphere']['muy'])
else:
self.muy = 1.0
if (self.mux < 1.0 or self.muy < 1.0):
self.need_slant = True
self.xangle = np.arccos(self.mux)
self.yangle = np.arccos(self.muy)
self.mu = np.cos(np.sqrt(self.xangle**2 + self.yangle**2))
self.deltaz_new = self.deltaz / np.abs(self.mu)
self.logger.info(f' Slating atmosphere to mux={self.mux} - muy={self.muy}')
self.logger.info(f' Equivalent mu={self.mu}')
else:
self.need_slant = False
if (self.mux < 1.0 or self.muy < 1.0):
self.need_slant = True
if (self.vx_file is None or self.vy_file is None):
raise Exception("For inclined rays you need to provide all velocity components")
self.zeros = np.zeros(self.ny)
self.maximum_tau = float(config_dict['atmosphere']['maximum tau'])
self.bx_multiplier = 1.0
self.by_multiplier = 1.0
self.bz_multiplier = 1.0
self.vz_multiplier = 1.0
if ('multipliers' in config_dict['atmosphere']):
if ('bx' in config_dict['atmosphere']['multipliers']):
self.bx_multiplier = float(config_dict['atmosphere']['multipliers']['bx'])
self.logger.info('Bx multiplier : {0}'.format(self.bx_multiplier))
if ('by' in config_dict['atmosphere']['multipliers']):
self.by_multiplier = float(config_dict['atmosphere']['multipliers']['by'])
self.logger.info('By multiplier : {0}'.format(self.by_multiplier))
if ('bz' in config_dict['atmosphere']['multipliers']):
self.bz_multiplier = float(config_dict['atmosphere']['multipliers']['bz'])
self.logger.info('Bz multiplier : {0}'.format(self.bz_multiplier))
if ('vz' in config_dict['atmosphere']['multipliers']):
self.vz_multiplier = float(config_dict['atmosphere']['multipliers']['vz'])
self.logger.info('vz multiplier : {0}'.format(self.vz_multiplier))
# def init_sir_external(self, spectral):
# """
# Initialize SIR for this synthesis
# Parameters
# ----------
# None
# Returns
# -------
# None
# """
# filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
# ff = open(filename, 'r')
# flines = ff.readlines()
# ff.close()
# f = open('malla.grid', 'w')
# f.write("IMPORTANT: a) All items must be separated by commas. \n")
# f.write(" b) The first six characters of the last line \n")
# f.write(" in the header (if any) must contain the symbol --- \n")
# f.write("\n")
# f.write("Line and blends indices : Initial lambda Step Final lambda \n")
# f.write("(in this order) (mA) (mA) (mA) \n")
# f.write("-----------------------------------------------------------------------\n")
# for k, v in spectral.items():
# self.logger.info('Adding spectral regions {0}'.format(v['name']))
# left = float(v['wavelength range'][0])
# right = float(v['wavelength range'][1])
# n_lambda = int(v['n. wavelengths'])
# delta = (right - left) / n_lambda
# for i in range(len(v['spectral lines'])):
# for l in flines:
# tmp = l.split()
# index = int(tmp[0].split('=')[0])
# if (index == int(v['spectral lines'][0])):
# wvl = float(tmp[2])
# lines = ''
# n_lines = len(v['spectral lines'])
# for i in range(n_lines):
# lines += v['spectral lines'][i]
# if (i != n_lines - 1):
# lines += ', '
# f.write("{0} : {1}, {2}, {3}\n".format(lines, 1e3*(left-wvl), 1e3*delta, 1e3*(right-wvl)))
# f.close()
# self.n_lambda_sir = sir_code.init_externalfile(1, filename)
# def init_sir_agents_external(self):
# filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
# self.n_lambda_sir = sir_code.init_externalfile(1, filename)
def init_sir(self, spectral):
"""
Initialize SIR for this synthesis. This version does not make use of any external file, which might be
not safe when running in MPI mode.
Parameters
----------
None
Returns
-------
None
"""
lines = []
n_lines = 0
elements = {'H':1,'HE':2,'LI':3,'BE':4,'B':5,'C':6,'N':7,'O':8,'F':9,'NE':10,
'NA':11,'MG':12,'AL':13,'SI':14,'P':15,'S':16,'CL':17,'AR':18,'K':19,'CA':20,'SC':21,'TI':22,'V':23,'CR':24,
'MN':25,'FE':26,'CO':27,'NI':28,'CU':29,'ZN':30,'GA':31,'GE':32,'AS':33,'SE':34,'BR':35,'KR':36,
'RB':37,'SR':38,'Y':39,'ZR':40,'NB':41,'MO':42,'TC':43,'RU':44,'RH':45,'PD':46,'AG':47,'CD':48,'IN':49,
'SN':50,'SB':51,'TE':52,'I':53,'XE':54,'CS':55,'BA':56,'LA':57,'CE':58,'PR':59,'ND':60,'PM':61,
'SM':62,'EU':63,'GD':64,'TB':65,'DY':66,'HO':67,'ER':68,'TM':69,'YB':70,'LU':71,'HF':72,'TA':73,'W':74,
'RE':75,'OS':76,'IR':77,'PT':78,'AU':79,'HG':80,'TL':81,'PB':82,'BI':83,'PO':84,'AT':85,'RN':86,
'FR':87,'RA':88,'AC':89,'TH':90,'PA':91,'U':92}
states = {'S': 0, 'P': 1, 'D': 2, 'F': 3, 'G': 4, 'H': 5, 'I': 6}
for k, v in spectral.items():
self.logger.info('Adding spectral regions {0}'.format(v['name']))
n_lines += 1
left = float(v['wavelength range'][0])
right = float(v['wavelength range'][1])
n_lambda = int(v['n. wavelengths'])
delta = (right - left) / n_lambda
nblend = len(v['spectral lines'])
lines = np.zeros(nblend, dtype=np.intc)
atom = np.zeros(nblend, dtype=np.intc)
istage = np.zeros(nblend, dtype=np.intc)
wvl = np.zeros(nblend)
zeff = np.zeros(nblend)
energy = np.zeros(nblend)
loggf = np.zeros(nblend)
mult1 = np.zeros(nblend, dtype=np.intc)
mult2 = np.zeros(nblend, dtype=np.intc)
design1 = np.zeros(nblend, dtype=np.intc)
design2 = np.zeros(nblend, dtype=np.intc)
tam1 = np.zeros(nblend)
tam2 = np.zeros(nblend)
alfa = np.zeros(nblend)
sigma = np.zeros(nblend)
for i in range(nblend):
lines[i] = v['spectral lines'][i]
for l in self.LINES:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == int(v['spectral lines'][i])):
atom[i] = elements[tmp[0].split('=')[1]]
istage[i] = tmp[1]
wvl[i] = float(tmp[2])
zeff[i] = float(tmp[3])
energy[i] = float(tmp[4])
loggf[i] = float(tmp[5])
mult1[i] = int(tmp[6][:-1])
mult2[i] = int(tmp[8][:-1])
design1[i] = states[tmp[6][-1]]
design2[i] = states[tmp[8][-1]]
tam1[i] = float(tmp[7].split('-')[0])
tam2[i] = float(tmp[9].split('-')[0])
if (len(tmp) == 12):
alfa[i] = float(tmp[-2])
sigma[i] = float(tmp[-1])
else:
alfa[i] = 0.0
sigma[i] = 0.0
lambda0 = 1e3*(left-wvl[0])
lambda1 = 1e3*(right-wvl[0])
sir_code.init(n_lines, nblend, lines, atom, istage, wvl, zeff, energy, loggf,
mult1, mult2, design1, design2, tam1, tam2, alfa, sigma, lambda0, lambda1, n_lambda)
self.n_lambda_sir = n_lambda
def intpltau(self, newtau, oldtau, var):
fX = interpolate.interp1d(oldtau, var, bounds_error=False, fill_value="extrapolate")
return fX(newtau)
def synth(self, z, T, P, rho, vz, Bx, By, Bz, interpolate_model=False,withstokes=True):
# Get ltau500 axis
log_T = np.log10(T)
log_P = np.log10(P)
it0 = np.searchsorted(self.T_kappa5, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_kappa5, log_P) - 1
ip1 = ip0 + 1
# When close to the edge of the interval
it0 = np.clip(it0, 0, self.T_kappa5.shape[0]-2)
it1 = np.clip(it1, 1, self.T_kappa5.shape[0]-1)
ip0 = np.clip(ip0, 0, self.P_kappa5.shape[0]-2)
ip1 = np.clip(ip1, 1, self.P_kappa5.shape[0]-1)
kappa = self.kappa[it0,ip0] * (self.T_kappa5[it1] - log_T) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it1,ip0] * (log_T - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it0,ip1] * (self.T_kappa5[it1] - log_T) * (log_P - self.P_kappa5[ip0]) + \
self.kappa[it1,ip1] * (log_T - self.T_kappa5[it0]) * (log_P - self.P_kappa5[ip0])
kappa /= ((self.T_kappa5[it1] - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - self.P_kappa5[ip0]))
if (self.eos_type == 'MANCHA'):
chi = (kappa * rho)[::-1]
else:
chi = kappa[::-1]
# Numeric error in higher layers
chi[chi<0] = 1e-20
tau = integ.cumtrapz(chi,x=z)
ltau = np.log10(np.insert(tau, 0, 0.5*tau[0]))[::-1]
ind = np.where(ltau < 2.0)[0]
# Get electron pressure
it0 = np.searchsorted(self.T_eos, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_eos, log_P) - 1
ip1 = ip0 + 1
# When close to the edge of the interval
it0 = np.clip(it0, 0, self.T_eos.shape[0]-2)
it1 = np.clip(it1, 1, self.T_eos.shape[0]-1)
ip0 = np.clip(ip0, 0, self.P_eos.shape[0]-2)
ip1 = np.clip(ip1, 1, self.P_eos.shape[0]-1)
if (self.eos_type == 'MANCHA'):
log_Pe = self.Pe_eos[ip0,it0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip1,it0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip0,it1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[ip1,it1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
else:
log_Pe = self.Pe_eos[it0,ip0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it1,ip0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it0,ip1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[it1,ip1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
log_Pe /= ((self.T_eos[it1] - self.T_eos[it0]) * (self.P_eos[ip1] - self.P_eos[ip0]))
if len(ltau[ind]) < 1:
ind = np.arange(len(ltau)-1)
if (withstokes):
if (self.tau_fine != 0.0):
taufino = np.arange(np.min(ltau), np.max(ltau), self.tau_fine)[::-1]
taufino = taufino[(taufino<2.0) & (taufino>-5.0)]
stokes, error = sir_code.synth(1, self.n_lambda_sir, taufino, self.intpltau(taufino, ltau, T),
10**self.intpltau(taufino, ltau, log_Pe), self.intpltau(taufino, ltau, np.zeros(len(T))),
self.intpltau(taufino, ltau, self.vz_multiplier*vz), self.intpltau(taufino, ltau, self.bx_multiplier*Bx),
self.intpltau(taufino, ltau, self.by_multiplier*By), self.intpltau(taufino, ltau, self.bz_multiplier*Bz), self.macroturbulence)
else:
stokes, error = sir_code.synth(1, self.n_lambda_sir, ltau[ind], T[ind], 10**log_Pe[ind], np.zeros(len(T[ind])), self.vz_multiplier*vz[ind],
self.bx_multiplier*Bx[ind], self.by_multiplier*By[ind], self.bz_multiplier*Bz[ind], self.macroturbulence)
if (error != 0):
stokes = -99.0 * np.ones_like(stokes)
else:
stokes = None
# We want to interpolate the model to certain isotau surfaces
if (interpolate_model):
model = np.zeros((7,self.n_tau))
model[0,:] = self.intpltau(self.interpolated_tau, ltau[::-1], z[::-1])
model[1,:] = self.intpltau(self.interpolated_tau, ltau[::-1], T[::-1])
model[2,:] = np.exp(self.intpltau(self.interpolated_tau, ltau[::-1], np.log(P[::-1])))
model[3,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.vz_multiplier * vz[::-1])
model[4,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bx_multiplier * Bx[::-1])
model[5,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.by_multiplier * By[::-1])
model[6,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bz_multiplier * Bz[::-1])
else:
model = np.zeros((7,len(self.deltaz)))
model[0,:] = z
model[1,:] = T
model[2,:] = P
model[3,:] = vz
model[4,:] = Bx
model[5,:] = By
model[6,:] = Bz
return stokes, model
def synth2d(self, z, T, P, rho, vz, Bx, By, Bz, interpolate_model=False,withstokes=True):
n = T.shape[0]
stokes_out = np.zeros((n,5,self.n_lambda_sir))
if (interpolate_model):
model_out = np.zeros((n,7,self.n_tau))
else:
model_out = np.zeros((n,7,len(self.deltaz)))
for loop in range(n):
# Get ltau500 axis
log_T = np.log10(T[loop,:])
log_P = np.log10(P[loop,:])
it0 = np.searchsorted(self.T_kappa5, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_kappa5, log_P) - 1
ip1 = ip0 + 1
# When close to the edge of the interval
it0 = np.clip(it0, 0, self.T_kappa5.shape[0]-2)
it1 = np.clip(it1, 1, self.T_kappa5.shape[0]-1)
ip0 = np.clip(ip0, 0, self.P_kappa5.shape[0]-2)
ip1 = np.clip(ip1, 1, self.P_kappa5.shape[0]-1)
kappa = self.kappa[it0,ip0] * (self.T_kappa5[it1] - log_T) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it1,ip0] * (log_T - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it0,ip1] * (self.T_kappa5[it1] - log_T) * (log_P - self.P_kappa5[ip0]) + \
self.kappa[it1,ip1] * (log_T - self.T_kappa5[it0]) * (log_P - self.P_kappa5[ip0])
kappa /= ((self.T_kappa5[it1] - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - self.P_kappa5[ip0]))
if (self.eos_type == 'MANCHA'):
chi = (kappa * rho[loop,:])[::-1]
else:
chi = kappa[::-1]
# Numeric error in higher layers
chi[chi<0] = 1e-20
tau = integ.cumtrapz(chi, x=z)
ltau = np.log10(np.insert(tau, 0, 0.5*tau[0]))[::-1]
ind = np.where(ltau < 2.0)[0]
# Get electron pressure
it0 = np.searchsorted(self.T_eos, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_eos, log_P) - 1
ip1 = ip0 + 1
# When close to the edge of the interval
it0 = np.clip(it0, 0, self.T_eos.shape[0]-2)
it1 = np.clip(it1, 1, self.T_eos.shape[0]-1)
ip0 = np.clip(ip0, 0, self.P_eos.shape[0]-2)
ip1 = np.clip(ip1, 1, self.P_eos.shape[0]-1)
if (self.eos_type == 'MANCHA'):
log_Pe = self.Pe_eos[ip0,it0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip1,it0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip0,it1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[ip1,it1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
else:
log_Pe = self.Pe_eos[it0,ip0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it1,ip0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it0,ip1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[it1,ip1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
log_Pe /= ((self.T_eos[it1] - self.T_eos[it0]) * (self.P_eos[ip1] - self.P_eos[ip0]))
if (withstokes):
if (self.tau_fine != 0.0):
taufino = np.arange(np.min(ltau), np.max(ltau), self.tau_fine)[::-1]
taufino = taufino[(taufino<2.0) & (taufino>-5.0)]
stokes_out[loop,:,:], error = sir_code.synth(1, self.n_lambda_sir, taufino, self.intpltau(taufino, ltau, T[loop,:]),
10**self.intpltau(taufino, ltau, log_Pe[:]), self.intpltau(taufino, ltau[:], np.zeros(len(T[loop,:]))),
self.intpltau(taufino, ltau, self.vz_multiplier*vz[loop,:]), self.intpltau(taufino, ltau, self.bx_multiplier*Bx[loop,:]),
self.intpltau(taufino, ltau, self.by_multiplier*By[loop,:]), self.intpltau(taufino, ltau, self.bz_multiplier*Bz[loop,:]), self.macroturbulence)
else:
stokes_out[loop,:,:], error = sir_code.synth(1, self.n_lambda_sir, ltau[ind], T[loop,ind], 10**log_Pe[ind], self.zeros[ind],
self.vz_multiplier*vz[loop,ind], self.bx_multiplier*Bx[loop,ind], self.by_multiplier*By[loop,ind], self.bz_multiplier*Bz[loop,ind], self.macroturbulence)
if (error != 0):
stokes_out[loop,:,:] = -99.0
else:
stokes_out = None
# We want to interpolate the model to certain isotau surfaces
if (interpolate_model):
model_out[loop,0,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.deltaz[::-1])
model_out[loop,1,:] = self.intpltau(self.interpolated_tau, ltau[::-1], T[loop,::-1])
model_out[loop,2,:] = np.exp(self.intpltau(self.interpolated_tau, ltau[::-1], np.log(P[loop,::-1])))
model_out[loop,3,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.vz_multiplier * vz[loop,::-1])
model_out[loop,4,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bx_multiplier * Bx[loop,::-1])
model_out[loop,5,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.by_multiplier * By[loop,::-1])
model_out[loop,6,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bz_multiplier * Bz[loop,::-1])
else:
model_out[loop,0,:] = self.deltaz[:]
model_out[loop,1,:] = T[loop,:]
model_out[loop,2,:] = P[loop,:]
model_out[loop,3,:] = self.vz_multiplier * vz[loop,:]
model_out[loop,4,:] = self.bx_multiplier * Bx[loop,:]
model_out[loop,5,:] = self.by_multiplier * By[loop,:]
model_out[loop,6,:] = self.bz_multiplier * Bz[loop,:]
return stokes_out, model_out
| 28,540 | 44.375199 | 177 | py |
3d_sir | 3d_sir-master/sir3d/synth/__init__.py | from .model import *
from .multiprocessing import *
from .slant import *
| 73 | 17.5 | 30 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.