repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
UNITER | UNITER-master/inf_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VQA for submission
"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
import numpy as np
from cytoolz import concat
from data import (TokenBucketSampler, PrefetchLoader,
DetectFeatLmdb, TxtTokLmdb, VqaEvalDataset, vqa_eval_collate)
from model.vqa import UniterForVisualQuestionAnswering
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.const import BUCKET_SIZE, IMG_DIM
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(json.load(open(hps_file)))
# train_examples = None
ans2label_file = f'{opts.output_dir}/ckpt/ans2label.json'
ans2label = json.load(open(ans2label_file))
label2ans = {label: ans for ans, label in ans2label.items()}
# load DBs and image dirs
eval_img_db = DetectFeatLmdb(opts.img_db,
model_opts.conf_th, model_opts.max_bb,
model_opts.min_bb, model_opts.num_bb,
opts.compressed_db)
eval_txt_db = TxtTokLmdb(opts.txt_db, -1)
eval_dataset = VqaEvalDataset(len(ans2label), eval_txt_db, eval_img_db)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
model = UniterForVisualQuestionAnswering.from_pretrained(
f'{opts.output_dir}/log/model.json', checkpoint,
img_dim=IMG_DIM, num_answer=len(ans2label))
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=True, opt_level='O2')
sampler = TokenBucketSampler(eval_dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=opts.batch_size, droplast=False)
eval_dataloader = DataLoader(eval_dataset,
batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vqa_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
val_log, results, logits = evaluate(model, eval_dataloader, label2ans,
opts.save_logits)
result_dir = f'{opts.output_dir}/results_test'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results = list(concat(all_gather_list(results)))
if opts.save_logits:
all_logits = {}
for id2logit in all_gather_list(logits):
all_logits.update(id2logit)
if hvd.rank() == 0:
with open(f'{result_dir}/'
f'results_{opts.checkpoint}_all.json', 'w') as f:
json.dump(all_results, f)
if opts.save_logits:
np.savez(f'{result_dir}/logits_{opts.checkpoint}_all.npz',
**all_logits)
@torch.no_grad()
def evaluate(model, eval_loader, label2ans, save_logits=False):
LOGGER.info("start running evaluation...")
model.eval()
n_ex = 0
st = time()
results = []
logits = {}
for i, batch in enumerate(eval_loader):
qids = batch['qids']
scores = model(batch, compute_loss=False)
answers = [label2ans[i]
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
for qid, answer in zip(qids, answers):
results.append({'answer': answer, 'question_id': int(qid)})
if save_logits:
scores = scores.cpu()
for i, qid in enumerate(qids):
logits[qid] = scores[i].half().numpy()
if i % 100 == 0 and hvd.rank() == 0:
n_results = len(results)
n_results *= hvd.size() # an approximation to avoid hangs
LOGGER.info(f'{n_results}/{len(eval_loader.dataset)} '
'answers predicted')
n_ex += len(qids)
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_log = {'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds "
f"at {int(n_ex/tot_time)} examples per second")
return val_log, results, logits
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1] # argmax
one_hots = torch.zeros(*labels.size(), device=labels.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="can be the path to binary or int number (step)")
parser.add_argument("--batch_size",
default=8192, type=int,
help="number of tokens in a batch")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory of the training command")
parser.add_argument("--save_logits", action='store_true',
help="Whether to save logits (for making ensemble)")
# Prepro parameters
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
main(args)
| 6,692 | 35.774725 | 79 | py |
UNITER | UNITER-master/inf_nlvr2.py | """run inference of NLVR2 (single GPU only)"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from data import (DetectFeatLmdb, TxtTokLmdb,
PrefetchLoader, TokenBucketSampler,
Nlvr2PairedEvalDataset, Nlvr2TripletEvalDataset,
nlvr2_paired_eval_collate, nlvr2_triplet_eval_collate)
from model.model import UniterConfig
from model.nlvr2 import (UniterForNlvr2Paired, UniterForNlvr2Triplet,
UniterForNlvr2PairedAttn)
from utils.misc import Struct
from utils.const import IMG_DIM, BUCKET_SIZE
def main(opts):
hvd.init()
device = torch.device("cuda") # support single GPU only
train_opts = Struct(json.load(open(f'{opts.train_dir}/log/hps.json')))
if 'paired' in train_opts.model:
EvalDatasetCls = Nlvr2PairedEvalDataset
eval_collate_fn = nlvr2_paired_eval_collate
if train_opts.model == 'paired':
ModelCls = UniterForNlvr2Paired
elif train_opts.model == 'paired-attn':
ModelCls = UniterForNlvr2PairedAttn
else:
raise ValueError('unrecognized model type')
elif train_opts.model == 'triplet':
EvalDatasetCls = Nlvr2TripletEvalDataset
ModelCls = UniterForNlvr2Triplet
eval_collate_fn = nlvr2_triplet_eval_collate
else:
raise ValueError('unrecognized model type')
img_db = DetectFeatLmdb(opts.img_db,
train_opts.conf_th, train_opts.max_bb,
train_opts.min_bb, train_opts.num_bb,
opts.compressed_db)
txt_db = TxtTokLmdb(opts.txt_db, -1)
dset = EvalDatasetCls(txt_db, img_db, train_opts.use_img_type)
batch_size = (train_opts.val_batch_size if opts.batch_size is None
else opts.batch_size)
sampler = TokenBucketSampler(dset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=False)
eval_dataloader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=eval_collate_fn)
eval_dataloader = PrefetchLoader(eval_dataloader)
# Prepare model
ckpt_file = f'{opts.train_dir}/ckpt/model_step_{opts.ckpt}.pt'
checkpoint = torch.load(ckpt_file)
model_config = UniterConfig.from_json_file(
f'{opts.train_dir}/log/model.json')
model = ModelCls(model_config, img_dim=IMG_DIM)
model.init_type_embedding()
model.load_state_dict(checkpoint, strict=False)
model.to(device)
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
results = evaluate(model, eval_dataloader, device)
# write results
if not exists(opts.output_dir):
os.makedirs(opts.output_dir)
with open(f'{opts.output_dir}/results.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
print(f'all results written')
@torch.no_grad()
def evaluate(model, eval_loader, device):
print("start running evaluation...")
model.eval()
n_ex = 0
st = time()
results = []
for i, batch in enumerate(eval_loader):
qids = batch['qids']
del batch['targets']
del batch['qids']
scores = model(batch, compute_loss=False)
answers = ['True' if i == 1 else 'False'
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
results.extend(zip(qids, answers))
n_results = len(results)
print(f'{n_results}/{len(eval_loader.dataset)} answers predicted')
n_ex += len(qids)
tot_time = time()-st
model.train()
print(f"evaluation finished in {int(tot_time)} seconds "
f"at {int(n_ex/tot_time)} examples per second")
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
type=str, required=True,
help="The input train corpus.")
parser.add_argument("--img_db",
type=str, required=True,
help="The input train images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--batch_size", type=int,
help="batch size for evaluation")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
parser.add_argument('--fp16', action='store_true',
help="fp16 inference")
parser.add_argument("--train_dir", type=str, required=True,
help="The directory storing NLVR2 finetuning output")
parser.add_argument("--ckpt", type=int, required=True,
help="specify the checkpoint to run inference")
parser.add_argument("--output_dir", type=str, required=True,
help="The output directory where the prediction "
"results will be written.")
args = parser.parse_args()
main(args)
| 5,465 | 37.765957 | 77 | py |
UNITER | UNITER-master/pretrain_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER pre-training
"""
import argparse
from collections import defaultdict
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler,
MetaLoader, PrefetchLoader, DetectFeatLmdb,
VcrTxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
MlmDatasetForVCR, mlm_collate_for_vcr,
MrfrDatasetForVCR, mrfr_collate_for_vcr,
MrcDatasetForVCR, mrc_collate_for_vcr)
from model.pretrain_vcr import UniterForPretrainingForVCR
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, IMG_LABEL_DIM, BUCKET_SIZE
NUM_SPECIAL_TOKENS = 81
def build_dataloader(dataset, collate_fn, is_train, opts):
if is_train:
batch_size = opts.train_batch_size
else:
batch_size = opts.val_batch_size
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return loader
def build_mlm_dataset(txt_db, img_db_gt, img_db, is_train, opts):
if is_train:
collate_fn = mlm_collate_for_vcr
datasets = [MlmDatasetForVCR(t, i_gt, i)
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
collate_fn = mlm_collate_for_vcr
dataset = MlmDatasetForVCR(txt_db, img_db_gt, img_db)
return dataset, collate_fn
def build_mrfr_dataset(txt_db, img_db_gt, img_db, is_train, opts):
if is_train:
datasets = [MrfrDatasetForVCR(opts.mrm_prob, t, i_gt, i)
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrfrDatasetForVCR(opts.mrm_prob, txt_db, img_db_gt, img_db)
return dataset, mrfr_collate_for_vcr
def build_mrc_dataset(txt_db, img_db_gt, img_db, is_train, opts):
if is_train:
datasets = [MrcDatasetForVCR(opts.mrm_prob, t, i_gt, i)
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
dataset = ConcatDatasetWithLens(datasets)
else:
dataset = MrcDatasetForVCR(opts.mrm_prob, txt_db, img_db_gt, img_db)
return dataset, mrc_collate_for_vcr
def load_img_feat(db_list, all_img_dbs, opts):
db_ = db_list.split(";")
assert len(db_) <= 2, "More than two img_dbs found"
gt_db_path, db_path = "", ""
for d in db_:
if "gt" in d:
gt_db_path = d
else:
db_path = d
if gt_db_path != "":
img_db_gt = DetectFeatLmdb(
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
all_img_dbs.path2imgdb[gt_db_path] = img_db_gt
else:
img_db_gt = None
img_db = all_img_dbs[db_path] if db_path != "" else None
all_img_dbs.path2imgdb[db_path] = img_db
return img_db, img_db_gt
def create_dataloaders(datasets, is_train, opts, all_img_dbs=None):
if all_img_dbs is None:
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
dataloaders = {}
for dset in datasets:
for vcr_task in ["qa", "qar"]:
if is_train:
assert len(dset['db']) == len(dset['img'])
assert len(dset['tasks']) == len(dset['mix_ratio'])
img_db, img_db_gt = [], []
for img_path in dset['img']:
curr_img_db, curr_img_db_gt = load_img_feat(
img_path, all_img_dbs, opts)
img_db.append(curr_img_db)
img_db_gt.append(curr_img_db_gt)
else:
assert len(dset['db']) == len(dset['img']) == 1
img_db, img_db_gt = load_img_feat(
dset['img'][0], all_img_dbs, opts)
for i, t in enumerate(dset['tasks']):
task = f'{t}_{dset["name"]}'
if is_train:
LOGGER.info(
f"Loading {task} train dataset with vcr_{vcr_task}, "
f"{dset['db']}, {[img.img_dir for img in img_db]},"
f"{[img.img_dir for img in img_db_gt]}")
txt_db = [VcrTxtTokLmdb(path, opts.max_txt_len,
task=vcr_task)
for path in dset['db']]
else:
LOGGER.info(
f"Loading {task} val dataset with vcr_{vcr_task}, "
f"{dset['db']}, {img_db.img_dir},"
f"{img_db_gt.img_dir}")
txt_db = VcrTxtTokLmdb(dset['db'][0], -1,
task=vcr_task)
if task.startswith('mlm'):
dataset = build_mlm_dataset(
txt_db, img_db_gt, img_db, is_train, opts)
elif task.startswith('mrfr'):
dataset = build_mrfr_dataset(
txt_db, img_db_gt, img_db, is_train, opts)
elif task.startswith('mrc'):
dataset = build_mrc_dataset(
txt_db, img_db_gt, img_db, is_train, opts)
else:
raise ValueError(f'Undefined task {task}')
LOGGER.info(f"{len(dataset[0])*hvd.size()} samples loaded")
loader = build_dataloader(*dataset, is_train, opts)
if is_train:
ratio = dset['mix_ratio'][i]
dataloaders[task] = (loader, ratio)
else:
dataloaders[task] = PrefetchLoader(loader)
return dataloaders, all_img_dbs
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(args.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
all_dbs = [db for datasets in [opts.train_datasets, opts.val_datasets]
for dset in datasets for db in dset['db']]
tokenizer = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(tokenizer == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
# build data loaders
train_dataloaders, all_img_dbs = create_dataloaders(
opts.train_datasets, True, opts)
val_dataloaders, _ = create_dataloaders(
opts.val_datasets, False, opts, all_img_dbs)
meta_loader = MetaLoader(train_dataloaders,
accum_steps=opts.gradient_accumulation_steps,
distributed=n_gpu > 1)
meta_loader = PrefetchLoader(meta_loader)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForPretrainingForVCR.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, img_label_dim=IMG_LABEL_DIM)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
task2scaler = {t: i for i, t in enumerate(train_dataloaders.keys())}
model, optimizer = amp.initialize(model, optimizer,
num_losses=len(task2scaler),
enabled=opts.fp16, opt_level='O2')
global_step = 0
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
# to compute training statistics
task2loss = {task: RunningMeter(f'loss/{task}')
for task in train_dataloaders.keys()}
n_examples = defaultdict(int)
n_in_units = defaultdict(int)
n_loss_units = defaultdict(int)
grad_norm = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
for step, (name, batch) in enumerate(meta_loader):
# forward pass
n_examples[name] += batch['input_ids'].size(0)
n_in_units[name] += (batch['attn_masks'] == 1).sum().item()
task = name.split('_')[0]
loss = model(batch, task=task, compute_loss=True)
n_loss_units[name] += loss.size(0)
loss = loss.mean() # loss is not normalized in model
# backward pass
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale,
loss_id=task2scaler[name]) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
task2loss[name](loss.item())
# optimizer update and logging
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.log_scaler_dict({ll.name: ll.val
for ll in task2loss.values()
if ll.val is not None})
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'==============Step {global_step}===============')
for t in train_dataloaders.keys():
assert all(tt == t for tt in all_gather_list(t))
tot_ex = sum(all_gather_list(n_examples[t]))
ex_per_sec = int(tot_ex / (time()-start))
tot_in = sum(all_gather_list(n_in_units[t]))
in_per_sec = int(tot_in / (time()-start))
tot_l = sum(all_gather_list(n_loss_units[t]))
l_per_sec = int(tot_l / (time()-start))
LOGGER.info(f'{t}: {tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar(f'perf/{t}_ex_per_s', ex_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_in_per_s', in_per_sec,
global_step)
TB_LOGGER.add_scalar(f'perf/{t}_loss_per_s', l_per_sec,
global_step)
LOGGER.info('===============================================')
if global_step % opts.valid_steps == 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step % opts.valid_steps != 0:
LOGGER.info(f'Step {global_step}: start validation')
validate(model, val_dataloaders)
model_saver.save(model, global_step)
def validate(model, val_dataloaders):
model.eval()
for task, loader in val_dataloaders.items():
LOGGER.info(f"validate on {task} task")
if task.startswith('mlm'):
val_log = validate_mlm(model, loader)
elif task.startswith('mrfr'):
val_log = validate_mrfr(model, loader)
elif task.startswith('mrc'):
val_log = validate_mrc(model, loader, task)
else:
raise ValueError(f'Undefined task {task}')
val_log = {f'{task}_{k}': v for k, v in val_log.items()}
TB_LOGGER.log_scaler_dict(
{f'valid_{task}/{k}': v for k, v in val_log.items()})
model.train()
@torch.no_grad()
def validate_mlm(model, val_loader):
LOGGER.info("start running MLM validation...")
val_loss = 0
n_correct = 0
n_word = 0
st = time()
for i, batch in enumerate(val_loader):
scores = model(batch, task='mlm', compute_loss=False)
labels = batch['txt_labels']
labels = labels[labels != -1]
loss = F.cross_entropy(scores, labels, reduction='sum')
val_loss += loss.item()
n_correct += (scores.max(dim=-1)[1] == labels).sum().item()
n_word += labels.numel()
val_loss = sum(all_gather_list(val_loss))
n_correct = sum(all_gather_list(n_correct))
n_word = sum(all_gather_list(n_word))
tot_time = time()-st
val_loss /= n_word
acc = n_correct / n_word
val_log = {'loss': val_loss,
'acc': acc,
'tok_per_s': n_word/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"acc: {acc*100:.2f}")
return val_log
def accuracy_count(out, labels):
outputs = out.max(dim=-1)[1]
mask = labels != -1
n_correct = (outputs == labels).masked_select(mask).sum().item()
return n_correct
@torch.no_grad()
def validate_mrfr(model, val_loader):
LOGGER.info("start running MRFR validation...")
val_loss = 0
n_feat = 0
st = time()
for i, batch in enumerate(val_loader):
loss = model(batch, task='mrfr', compute_loss=True)
val_loss += loss.sum().item() / IMG_DIM
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_log = {'loss': val_loss,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"loss: {val_loss:.2f}")
return val_log
@torch.no_grad()
def validate_mrc(model, val_loader, task):
LOGGER.info("start running MRC validation...")
val_loss = 0
n_feat = 0
st = time()
tot_score = 0
for i, batch in enumerate(val_loader):
prediction_soft_label = model(
batch, task=task, compute_loss=False)
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
label_targets = batch['label_targets']
loss = F.kl_div(
prediction_soft_label, label_targets, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label, label_targets)
else:
# background class should not be the target
cls_label_targets = label_targets[:, 1:].max(dim=-1)[1] + 1
loss = F.cross_entropy(
prediction_soft_label, cls_label_targets,
ignore_index=0, reduction='sum')
tot_score += compute_accuracy_for_soft_targets(
prediction_soft_label[:, 1:], label_targets[:, 1:])
val_loss += loss.item()
n_feat += batch['img_mask_tgt'].sum().item()
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_feat = sum(all_gather_list(n_feat))
tot_time = time()-st
val_loss /= n_feat
val_acc = tot_score / n_feat
val_log = {'loss': val_loss,
'acc': val_acc,
'feat_per_s': n_feat/tot_time}
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log
def compute_accuracy_for_soft_targets(out, labels):
outputs = out.max(dim=-1)[1]
labels = labels.max(dim=-1)[1] # argmax
n_correct = (outputs == labels).sum().item()
return n_correct
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
# NOTE: train tasks and val tasks cannot take command line arguments
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config", type=str,
help="path to model structure config json")
parser.add_argument("--checkpoint", default=None, type=str,
help="path to model checkpoint (*.pt)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
parser.add_argument('--mrm_prob', default=0.15, type=float,
help='probability to mask in MRM training')
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adamw',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=10000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', required=True, help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 22,741 | 39.538324 | 79 | py |
UNITER | UNITER-master/inf_re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VQA for submission
"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from cytoolz import concat
from data import (PrefetchLoader, DetectFeatLmdb, ReTxtTokLmdb,
ReEvalDataset, re_eval_collate)
from data.sampler import DistributedSampler
from model.re import UniterForReferringExpressionComprehension
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.const import IMG_DIM
def write_to_tmp(txt, tmp_file):
if tmp_file:
f = open(tmp_file, "a")
f.write(txt)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = json.load(open(hps_file))
if 'mlp' not in model_opts:
model_opts['mlp'] = 1
model_opts = Struct(model_opts)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_epoch_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
model = UniterForReferringExpressionComprehension.from_pretrained(
f'{opts.output_dir}/log/model.json', checkpoint,
img_dim=IMG_DIM, mlp=model_opts.mlp)
model.to(device)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
if opts.fp16:
model = amp.initialize(model, enabled=True, opt_level='O2')
# load DBs and image dirs
img_db_type = "gt" if "coco_gt" in opts.img_db else "det"
conf_th = -1 if img_db_type == "gt" else model_opts.conf_th
num_bb = 100 if img_db_type == "gt" else model_opts.num_bb
eval_img_db = DetectFeatLmdb(opts.img_db,
conf_th, model_opts.max_bb,
model_opts.min_bb, num_bb,
opts.compressed_db)
# Prepro txt_dbs
txt_dbs = opts.txt_db.split(':')
for txt_db in txt_dbs:
print(f'Evaluating {txt_db}')
eval_txt_db = ReTxtTokLmdb(txt_db, -1)
eval_dataset = ReEvalDataset(
eval_txt_db, eval_img_db, use_gt_feat=img_db_type == "gt")
sampler = DistributedSampler(eval_dataset, num_replicas=n_gpu,
rank=rank, shuffle=False)
eval_dataloader = DataLoader(eval_dataset,
sampler=sampler,
batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=re_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
# evaluate
val_log, results = evaluate(model, eval_dataloader)
result_dir = f'{opts.output_dir}/results_test'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
write_to_tmp(
f"{txt_db.split('_')[1].split('.')[0]}-acc({img_db_type}): {results['acc']*100:.2f}% ",
args.tmp_file)
all_results = list(concat(all_gather_list(results)))
if hvd.rank() == 0:
db_split = txt_db.split('/')[-1].split('.')[0] # refcoco+_val
img_dir = opts.img_db.split('/')[-1] # re_coco_gt
with open(f'{result_dir}/'
f'results_{opts.checkpoint}_{db_split}_on_{img_dir}_all.json', 'w') as f:
json.dump(all_results, f)
# print
print(f'{opts.output_dir}/results_test')
write_to_tmp(f'\n', args.tmp_file)
@torch.no_grad()
def evaluate(model, eval_loader):
LOGGER.info("start running evaluation...")
model.eval()
tot_score = 0
n_ex = 0
st = time()
predictions = []
for i, batch in enumerate(eval_loader):
(tgt_box_list, obj_boxes_list, sent_ids) = (
batch['tgt_box'], batch['obj_boxes'], batch['sent_ids'])
# scores (n, max_num_bb)
scores = model(batch, compute_loss=False)
ixs = torch.argmax(scores, 1).cpu().detach().numpy() # (n, )
# pred_boxes
for ix, obj_boxes, tgt_box, sent_id in \
zip(ixs, obj_boxes_list, tgt_box_list, sent_ids):
pred_box = obj_boxes[ix]
predictions.append({'sent_id': int(sent_id),
'pred_box': pred_box.tolist(),
'tgt_box': tgt_box.tolist()})
if eval_loader.loader.dataset.computeIoU(pred_box, tgt_box) > .5:
tot_score += 1
n_ex += 1
if i % 100 == 0 and hvd.rank() == 0:
n_results = len(predictions)
n_results *= hvd.size() # an approximation to avoid hangs
LOGGER.info(f'{n_results}/{len(eval_loader.dataset)} '
'answers predicted')
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
tot_score = sum(all_gather_list(tot_score))
val_acc = tot_score / n_ex
val_log = {'valid/acc': val_acc, 'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation ({n_ex} sents) finished in"
f" {int(tot_time)} seconds"
f", accuracy: {val_acc*100:.2f}%")
# summarizae
results = {'acc': val_acc, 'predictions': predictions}
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="can be the path to binary or int number (step)")
parser.add_argument("--batch_size",
default=256, type=int,
help="number of sentences per batch")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory of the training command")
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# Write simple results to some tmp file
parser.add_argument('--tmp_file', type=str, default=None,
help="write results to tmp file")
args = parser.parse_args()
main(args)
| 7,395 | 35.98 | 99 | py |
UNITER | UNITER-master/inf_itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference for Image Text Retrieval
"""
import argparse
import json
import os
from os.path import exists
import pickle
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from data import (PrefetchLoader,
DetectFeatLmdb, TxtTokLmdb, ItmEvalDataset, itm_eval_collate)
from model.itm import UniterForImageTextRetrieval
from utils.logger import LOGGER
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.const import IMG_DIM
from utils.itm_eval import inference, itm_eval
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.train_config is not None:
train_opts = Struct(json.load(open(opts.train_config)))
opts.conf_th = train_opts.conf_th
opts.max_bb = train_opts.max_bb
opts.min_bb = train_opts.min_bb
opts.num_bb = train_opts.num_bb
# load DBs and image dirs
eval_img_db = DetectFeatLmdb(opts.img_db,
opts.conf_th, opts.max_bb,
opts.min_bb, opts.num_bb,
opts.compressed_db)
eval_txt_db = TxtTokLmdb(opts.txt_db, -1)
eval_dataset = ItmEvalDataset(eval_txt_db, eval_img_db, opts.batch_size)
# Prepare model
checkpoint = torch.load(opts.checkpoint)
model = UniterForImageTextRetrieval.from_pretrained(
opts.model_config, checkpoint, img_dim=IMG_DIM)
if 'rank_output' not in checkpoint:
model.init_output() # zero shot setting
model.to(device)
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset, batch_size=1,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=itm_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
eval_log, results = evaluate(model, eval_dataloader)
if hvd.rank() == 0:
if not exists(opts.output_dir) and rank == 0:
os.makedirs(opts.output_dir)
with open(f'{opts.output_dir}/config.json', 'w') as f:
json.dump(vars(opts), f)
with open(f'{opts.output_dir}/results.bin', 'wb') as f:
pickle.dump(results, f)
with open(f'{opts.output_dir}/scores.json', 'w') as f:
json.dump(eval_log, f)
LOGGER.info(f'evaluation finished')
LOGGER.info(
f"======================== Results =========================\n"
f"image retrieval R1: {eval_log['img_r1']*100:.2f},\n"
f"image retrieval R5: {eval_log['img_r5']*100:.2f},\n"
f"image retrieval R10: {eval_log['img_r10']*100:.2f}\n"
f"text retrieval R1: {eval_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: {eval_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: {eval_log['txt_r10']*100:.2f}")
LOGGER.info("========================================================")
@torch.no_grad()
def evaluate(model, eval_loader):
model.eval()
st = time()
LOGGER.info("start running Image/Text Retrieval evaluation ...")
score_matrix = inference(model, eval_loader)
dset = eval_loader.dataset
all_score = hvd.allgather(score_matrix)
all_txt_ids = [i for ids in all_gather_list(dset.ids)
for i in ids]
all_img_ids = dset.all_img_ids
assert all_score.size() == (len(all_txt_ids), len(all_img_ids))
if hvd.rank() != 0:
return {}, tuple()
# NOTE: only use rank0 to compute final scores
eval_log = itm_eval(all_score, all_txt_ids, all_img_ids,
dset.txt2img, dset.img2txts)
results = (all_score, all_txt_ids, all_img_ids)
tot_time = time()-st
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds, ")
return eval_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--txt_db", default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--img_db", default=None, type=str,
help="The input train images.")
parser.add_argument("--checkpoint", default=None, type=str,
help="model checkpoint binary")
parser.add_argument("--model_config", default=None, type=str,
help="model config json")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the inference results will be "
"written.")
# optional parameters
parser.add_argument("--train_config", default=None, type=str,
help="hps.json from training (for prepro hps)")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
parser.add_argument("--batch_size", default=400, type=int,
help="number of tokens in a batch")
# device parameters
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
main(args)
| 6,413 | 38.109756 | 79 | py |
UNITER | UNITER-master/train_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VQA
"""
import argparse
import json
import os
from os.path import abspath, dirname, exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, PrefetchLoader,
TxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
VqaDataset, VqaEvalDataset,
vqa_collate, vqa_eval_collate)
from model.vqa import UniterForVisualQuestionAnswering
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import BUCKET_SIZE, IMG_DIM
def build_dataloader(dataset, collate_fn, is_train, opts):
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
dataloader = DataLoader(dataset, batch_sampler=sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" vqa linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 'vqa_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 'vqa_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
ans2label = json.load(open(f'{dirname(abspath(__file__))}'
f'/utils/ans2label.json'))
label2ans = {label: ans for ans, label in ans2label.items()}
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets.append(VqaDataset(len(ans2label), txt_db, img_db))
train_dataset = ConcatDatasetWithLens(train_datasets)
train_dataloader = build_dataloader(train_dataset, vqa_collate, True, opts)
# val
LOGGER.info(f"Loading Train Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, -1)
val_dataset = VqaEvalDataset(len(ans2label), val_txt_db, val_img_db)
val_dataloader = build_dataloader(val_dataset, vqa_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = opts.train_txt_dbs + [opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(toker == json.load(open(f'{db}/meta.json'))['bert']
for db in all_dbs)
model = UniterForVisualQuestionAnswering.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, num_answer=len(ans2label))
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
json.dump(ans2label,
open(join(opts.output_dir, 'ckpt', 'ans2label.json'), 'w'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset) * hvd.size())
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean() * batch['targets'].size(1) # instance-leval bce
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info(f'===========================================')
if global_step % opts.valid_steps == 0:
val_log, results = validate(
model, val_dataloader, label2ans)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
if opts.num_train_steps % opts.valid_steps != 0:
val_log, results = validate(model, val_dataloader, label2ans)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
@torch.no_grad()
def validate(model, val_loader, label2ans):
LOGGER.info("start running validation...")
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
scores = model(batch, compute_loss=False)
targets = batch['targets']
loss = F.binary_cross_entropy_with_logits(
scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += compute_score_with_logits(scores, targets).sum().item()
answers = [label2ans[i]
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
for qid, answer in zip(batch['qids'], answers):
results[qid] = answer
n_ex += len(batch['qids'])
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {'valid/loss': val_loss,
'valid/acc': val_acc,
'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1] # argmax
one_hots = torch.zeros(*labels.size(), device=labels.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size", default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps', type=int, default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--lr_mul", default=10.0, type=float,
help="multiplier for top layer lr")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=2.0, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true', help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 16,988 | 41.261194 | 79 | py |
UNITER | UNITER-master/train_ve.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for SNLI-VE
"""
import argparse
import json
import os
from os.path import exists, join
import pickle
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (TokenBucketSampler, PrefetchLoader,
DetectFeatLmdb, TxtTokLmdb,
VeDataset, VeEvalDataset,
ve_collate, ve_eval_collate)
from model.ve import UniterForVisualEntailment
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.misc import VE_ENT2IDX as ans2label
from utils.misc import VE_IDX2ENT as label2ans
from utils.const import IMG_DIM, BUCKET_SIZE
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db = DetectFeatLmdb(img_path, opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
dset = dset_cls(txt_db, img_db)
sampler = TokenBucketSampler(dset.lens, bucket_size=BUCKET_SIZE,
batch_size=batch_size, droplast=is_train)
loader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return PrefetchLoader(loader)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
VeDataset, ve_collate, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
VeEvalDataset, ve_eval_collate, opts)
test_dataloader = create_dataloader(opts.test_img_db, opts.test_txt_db,
opts.val_batch_size, False,
VeEvalDataset, ve_eval_collate, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
bert_model = json.load(open(f'{opts.train_txt_db}/meta.json'))['bert']
if 'bert' not in bert_model:
bert_model = 'bert-large-cased' # quick hack for glove exp
model = UniterForVisualEntailment.from_pretrained(
opts.model_config, state_dict=checkpoint, img_dim=IMG_DIM)
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
pickle.dump(ans2label,
open(join(opts.output_dir, 'ckpt', 'ans2label.pkl'), 'wb'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.mean() * batch['targets'].size(1) # instance-leval bce
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info(f'===========================================')
if global_step % opts.valid_steps == 0:
for split, loader in [("val", val_dataloader),
("test", test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
val_log, results = validate(
model, loader, label2ans, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"Step {global_step}: finished {n_epoch} epochs")
if opts.num_train_steps % opts.valid_steps != 0:
for split, loader in [("val", val_dataloader),
("test", test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
val_log, results = validate(model, loader, label2ans, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}_final.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
@torch.no_grad()
def validate(model, val_loader, label2ans, split='val'):
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
scores = model(batch, compute_loss=False)
targets = batch['targets']
loss = F.binary_cross_entropy_with_logits(
scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += compute_score_with_logits(scores, targets).sum().item()
answers = [label2ans[i]
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
qids = batch['qids']
for qid, answer in zip(qids, answers):
results[qid] = answer
n_ex += len(qids)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/{split}_loss': val_loss,
f'valid/{split}_acc': val_acc,
f'valid/{split}_ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1] # argmax
one_hots = torch.zeros(*labels.size(), device=labels.device)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument("--test_txt_db",
default=None, type=str,
help="The input test corpus. (LMDB)")
parser.add_argument("--test_img_db",
default=None, type=str,
help="The input test images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 16,875 | 41.724051 | 79 | py |
UNITER | UNITER-master/train_re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for RE
"""
import argparse
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim import Adam, Adamax
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, DetectFeatLmdb,
ReTxtTokLmdb, ReDataset, ReEvalDataset,
re_collate, re_eval_collate)
from data.sampler import DistributedSampler
from model.re import UniterForReferringExpressionComprehension
from optim import AdamW, get_lr_sched
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (
all_gather_list, all_reduce_and_rescale_tensors,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import (
NoOp, parse_with_config, set_dropout, set_random_seed)
from utils.const import IMG_DIM
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db_type = "gt" if "coco_gt" in img_path else "det"
conf_th = -1 if img_db_type == "gt" else opts.conf_th
num_bb = 100 if img_db_type == "gt" else opts.num_bb
img_db = DetectFeatLmdb(img_path, conf_th, opts.max_bb, opts.min_bb,
num_bb, opts.compressed_db)
txt_db = ReTxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
if is_train:
dset = dset_cls(txt_db, img_db)
else:
dset = dset_cls(txt_db, img_db, use_gt_feat=img_db_type == "gt")
batch_size = (opts.train_batch_size if is_train
else opts.val_batch_size)
sampler = DistributedSampler(dset, num_replicas=hvd.size(),
rank=hvd.rank(), shuffle=False)
dataloader = DataLoader(dset, sampler=sampler,
batch_size=batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def build_optimizer(model, opts):
""" Re linear may get larger learning rate """
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
param_optimizer = [(n, p) for n, p in model.named_parameters()
if 're_output' not in n]
param_top = [(n, p) for n, p in model.named_parameters()
if 're_output' in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': opts.learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_db}")
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
ReDataset, re_collate, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
ReEvalDataset, re_eval_collate, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = [opts.train_txt_db, opts.val_txt_db]
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['toker']
assert all(toker == json.load(open(f'{db}/meta.json'))['toker']
for db in all_dbs)
model = UniterForReferringExpressionComprehension.from_pretrained(
opts.model_config, checkpoint,
img_dim=IMG_DIM, loss=opts.train_loss,
margin=opts.margin,
hard_ratio=opts.hard_ratio, mlp=opts.mlp,)
model.to(device)
model.train()
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
optimizer = build_optimizer(model, opts)
# Apex
model, optimizer = amp.initialize(
model, optimizer, enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'), 'model_epoch')
os.makedirs(join(opts.output_dir, 'results')) # store RE predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
best_val_acc, best_epoch = None, None
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
if global_step == 0:
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
if global_step >= opts.num_train_steps:
break
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = loss.sum() # sum over vectorized loss TODO: investigate
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(
loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for i, param_group in enumerate(optimizer.param_groups):
if i == 0 or i == 1:
param_group['lr'] = lr_this_step * opts.lr_mul
elif i == 2 or i == 3:
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'============Step {global_step}=============')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
LOGGER.info('===========================================')
# evaluate after each epoch
val_log, _ = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
# save model
n_epoch += 1
model_saver.save(model, n_epoch)
LOGGER.info(f"finished {n_epoch} epochs")
# save best model
if best_val_acc is None or val_log['valid/acc'] > best_val_acc:
best_val_acc = val_log['valid/acc']
best_epoch = n_epoch
model_saver.save(model, 'best')
# shuffle training data for the next epoch
train_dataloader.loader.dataset.shuffle()
# is training finished?
if global_step >= opts.num_train_steps:
break
val_log, results = validate(model, val_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}_final.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
# print best model
LOGGER.info(
f'best_val_acc = {best_val_acc*100:.2f}% at epoch {best_epoch}.')
@torch.no_grad()
def validate(model, val_dataloader):
LOGGER.info("start running evaluation.")
model.eval()
tot_score = 0
n_ex = 0
st = time()
predictions = {}
for i, batch in enumerate(val_dataloader):
# inputs
(tgt_box_list, obj_boxes_list, sent_ids) = (
batch['tgt_box'], batch['obj_boxes'], batch['sent_ids'])
# scores (n, max_num_bb)
scores = model(batch, compute_loss=False)
ixs = torch.argmax(scores, 1).cpu().detach().numpy() # (n, )
# pred_boxes
for ix, obj_boxes, tgt_box, sent_id in \
zip(ixs, obj_boxes_list, tgt_box_list, sent_ids):
pred_box = obj_boxes[ix]
predictions[int(sent_id)] = {
'pred_box': pred_box.tolist(),
'tgt_box': tgt_box.tolist()}
if val_dataloader.loader.dataset.computeIoU(
pred_box, tgt_box) > .5:
tot_score += 1
n_ex += 1
tot_time = time()-st
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
val_acc = tot_score / n_ex
val_log = {'valid/acc': val_acc, 'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(
f"validation ({n_ex} sents) finished in {int(tot_time)} seconds"
f", accuracy: {val_acc*100:.2f}%")
return val_log, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_db",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_db",
default=None, type=str,
help="The input validation images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument("--mlp", default=1, type=int,
help="number of MLP layers for RE output")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=128, type=int,
help="Total batch size for training. "
"(batch by examples)")
parser.add_argument("--val_batch_size",
default=256, type=int,
help="Total batch size for validation. "
"(batch by examples)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--train_loss",
default="cls", type=str,
choices=['cls', 'rank'],
help="loss to used during training")
parser.add_argument("--margin",
default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument("--hard_ratio",
default=0.3, type=float,
help="sampling ratio of hard negatives")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_steps",
default=32000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+', type=float,
help="beta for adam optimizer")
parser.add_argument("--decay", default='linear',
choices=['linear', 'invsqrt', 'constant'],
help="learning rate decay method")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed',
type=int,
default=24,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
# options safe guard
main(args)
| 18,420 | 39.220524 | 79 | py |
UNITER | UNITER-master/train_itm_hard_negatives.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for Image-Text Retrieval with hard negatives
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, ConcatDataset
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (PrefetchLoader, TxtTokLmdb, ImageLmdbGroup,
ItmRankDatasetHardNegFromText,
ItmRankDatasetHardNegFromImage, itm_rank_hn_collate,
ItmValDataset, itm_val_collate,
ItmEvalDataset, itm_eval_collate)
from model.itm import UniterForImageTextRetrievalHardNeg
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM
from utils.itm_eval import evaluate
def build_dataloader(dataset, collate_fn, is_train, opts):
dataloader = DataLoader(dataset, batch_size=1,
shuffle=is_train, drop_last=is_train,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem, collate_fn=collate_fn)
dataloader = PrefetchLoader(dataloader)
return dataloader
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
set_random_seed(opts.seed)
if hvd.rank() == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
# store ITM predictions
os.makedirs(join(opts.output_dir, 'results_val'))
os.makedirs(join(opts.output_dir, 'results_test'))
os.makedirs(join(opts.output_dir, 'results_train'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_dbs}, "
f"{opts.train_img_dbs}")
# check multiple DBs
assert len(opts.train_txt_dbs) == len(opts.train_img_dbs), \
"train txt_db and img_db have different length"
# load DBs and image dirs
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
# train
LOGGER.info(f"Loading Train Dataset "
f"{opts.train_txt_dbs}, {opts.train_img_dbs}")
train_datasets_t = []
train_datasets_i = []
for txt_path, img_path in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets_t.append(
ItmRankDatasetHardNegFromText(txt_db, img_db, opts.negative_size))
train_datasets_i.append(
ItmRankDatasetHardNegFromImage(txt_db, img_db, opts.negative_size))
train_dataset_t = ConcatDataset(train_datasets_t)
train_dataset_i = ConcatDataset(train_datasets_i)
train_dataloader_t = build_dataloader(
train_dataset_t, itm_rank_hn_collate, True, opts)
train_dataloader_i = build_dataloader(
train_dataset_i, itm_rank_hn_collate, True, opts)
# val
LOGGER.info(f"Loading Val Dataset {opts.val_txt_db}, {opts.val_img_db}")
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, -1)
val_dataset = ItmValDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
val_dataloader = build_dataloader(val_dataset, itm_val_collate,
False, opts)
# eval
LOGGER.info(f"Loading val, test Dataset for full evaluation: "
f"{opts.val_txt_db}, {opts.val_img_db}"
f"{opts.test_txt_db}, {opts.test_img_db}")
eval_dataset_val = ItmEvalDataset(val_txt_db, val_img_db,
opts.inf_minibatch_size)
eval_loader_val = build_dataloader(eval_dataset_val, itm_eval_collate,
False, opts)
test_img_db = all_img_dbs[opts.test_img_db]
test_txt_db = TxtTokLmdb(opts.test_txt_db, -1)
eval_dataset_test = ItmEvalDataset(test_txt_db, test_img_db,
opts.inf_minibatch_size)
eval_loader_test = build_dataloader(eval_dataset_test, itm_eval_collate,
False, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = UniterForImageTextRetrievalHardNeg.from_pretrained(
opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM, margin=opts.margin, hard_size=opts.hard_neg_size)
model.init_output() # pretrain ITM head is different from ranking head
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
LOGGER.info(f"***** Running training on {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d",
sum(all_gather_list(len(train_dataset_t))))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
global_step = 0
step = 0
n_examples = 0
n_hard_ex = 0
start = time()
train_iter_i = iter(train_dataloader_i)
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for batch in train_dataloader_t:
# hard text from image
try:
batch_i = next(train_iter_i)
except StopIteration:
train_iter_i = iter(train_dataloader_i)
batch_i = next(train_iter_i)
n_examples += batch_i['attn_masks'].size(0)
loss = model(batch_i, sample_from='i', compute_loss=True)
n_hard_ex += loss.numel()
loss = loss.mean() / opts.train_batch_size
with amp.scale_loss(loss, optimizer, delay_unscale=True
) as scaled_loss:
scaled_loss.backward()
# hard image from text
n_examples += batch['attn_masks'].size(0)
loss = model(batch, sample_from='t', compute_loss=True)
n_hard_ex += loss.numel()
# NOTE we use gradient accumulation to implemented train_batch_size
loss = loss.mean() / opts.train_batch_size
step += 1
delay_unscale = step % opts.train_batch_size != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if step % opts.train_batch_size == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
# NOTE: not gathered across GPUs for efficiency
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
LOGGER.info(f'------------Step {global_step}-------------')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
tot_hn = sum(all_gather_list(n_hard_ex))
hn_per_sec = int(tot_hn / (time()-start))
LOGGER.info(f'{tot_ex} ({tot_hn}) examples (hard) '
f'trained at {ex_per_sec} ({hn_per_sec}) ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
TB_LOGGER.add_scalar('perf/hn_per_s',
hn_per_sec, global_step)
LOGGER.info(f'-------------------------------------------')
if global_step % opts.valid_steps == 0:
if opts.full_val:
LOGGER.info(
f"========================== Step {global_step} "
f"==========================")
val_log = evaluate(model, eval_loader_val)
TB_LOGGER.log_scaler_dict(
{f"valid/{k}": v for k, v in val_log.items()})
LOGGER.info(f"image retrieval R1: "
f"{val_log['img_r1']*100:.2f},\n"
f"image retrieval R5: "
f"{val_log['img_r5']*100:.2f},\n"
f"image retrieval R10: "
f"{val_log['img_r10']*100:.2f}\n"
f"text retrieval R1: "
f"{val_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: "
f"{val_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: "
f"{val_log['txt_r10']*100:.2f}")
LOGGER.info("================================="
"=================================")
else:
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
pbar.close()
# final validation
val_log = validate(model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
# evaluation
for split, loader in [('val', eval_loader_val),
('test', eval_loader_test)]:
eval_log = evaluate(model, loader)
TB_LOGGER.log_scaler_dict({f"eval/{split}_{k}": v
for k, v in eval_log.items()})
if hvd.rank() != 0:
continue
LOGGER.info(
f"========================= {split} ===========================\n"
f"image retrieval R1: {eval_log['img_r1']*100:.2f},\n"
f"image retrieval R5: {eval_log['img_r5']*100:.2f},\n"
f"image retrieval R10: {eval_log['img_r10']*100:.2f}\n"
f"text retrieval R1: {eval_log['txt_r1']*100:.2f},\n"
f"text retrieval R5: {eval_log['txt_r5']*100:.2f},\n"
f"text retrieval R10: {eval_log['txt_r10']*100:.2f}")
LOGGER.info("=========================================================")
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
pbar = tqdm(total=len(val_loader))
else:
pbar = NoOp()
LOGGER.info("start running Image Retrieval validation ...")
model.eval()
n_ex = 0
st = time()
recall_at_1, recall_at_5, recall_at_10 = 0, 0, 0
for batch in val_loader:
scores = model(batch, compute_loss=False)
_, indices = scores.squeeze(1).topk(10, dim=0)
rank = (indices == 0).nonzero()
if rank.numel():
rank = rank.item()
if rank < 1:
recall_at_1 += 1
if rank < 5:
recall_at_5 += 1
if rank < 10:
recall_at_10 += 1
n_ex += 1
pbar.update(1)
n_ex = sum(all_gather_list(n_ex))
recall_at_1 = sum(all_gather_list(recall_at_1)) / n_ex
recall_at_5 = sum(all_gather_list(recall_at_5)) / n_ex
recall_at_10 = sum(all_gather_list(recall_at_10)) / n_ex
tot_time = time()-st
val_log = {'valid/ex_per_s': n_ex/tot_time,
'valid/recall_1': recall_at_1,
'valid/recall_5': recall_at_5,
'valid/recall_10': recall_at_10}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"recall_1: {recall_at_1*100:.2f}, "
f"recall_5: {recall_at_5*100:.2f}, "
f"recall_10: {recall_at_10*100:.2f}")
pbar.close()
return val_log
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained MLM")
parser.add_argument("--output_dir", default=None, type=str,
help="The output directory where the model "
"checkpoints will be written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size", default=32, type=int,
help="batch size (# positive examples) for training. "
"(implemented with gradient accumulation)")
parser.add_argument("--negative_size", default=511, type=int,
help="Number of negative samples per positive sample"
"(forward only)")
parser.add_argument("--hard_neg_size", default=31, type=int,
help="Number of hard negative samples "
"per positive sample (acutally used to train)")
parser.add_argument("--inf_minibatch_size", default=512, type=int,
help="batch size for running inference. "
"(used for validation and evaluation)")
parser.add_argument("--margin", default=0.2, type=float,
help="margin of ranking loss")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps", default=1000, type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps", default=100000, type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--dropout", default=0.1, type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm", default=0.25, type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps", default=4000, type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--full_val', action='store_true',
help="Always run full evaluation during training")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
# for tensor core
assert (args.negative_size+1) % 8 == (args.hard_neg_size+1) % 8 == 0
main(args)
| 19,146 | 42.417234 | 79 | py |
UNITER | UNITER-master/optim/misc.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Misc lr helper
"""
from torch.optim import Adam, Adamax
from .adamw import AdamW
def build_optimizer(model, opts):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
| 1,037 | 27.833333 | 65 | py |
UNITER | UNITER-master/optim/adamw.py | """
AdamW optimizer (weight decay fix)
copied from hugginface (https://github.com/huggingface/transformers).
"""
import math
import torch
from torch.optim import Optimizer
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2).
Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias
in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError(
"Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - "
"should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - "
"should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - "
"should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse '
'gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = (step_size * math.sqrt(bias_correction2)
/ bias_correction1)
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is
# *not* the correct way of using L2 regularization/weight decay
# with Adam, since that will interact with the m and v
# parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't
# interact with the m/v parameters. This is equivalent to
# adding the square of the weights to the loss with plain
# (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss
| 4,450 | 41.798077 | 79 | py |
UNITER | UNITER-master/scripts/convert_ckpt.py | import sys
from collections import OrderedDict
import torch
bert_ckpt, output_ckpt = sys.argv[1:]
bert = torch.load(bert_ckpt)
uniter = OrderedDict()
for k, v in bert.items():
uniter[k.replace('bert', 'uniter')] = v
torch.save(uniter, output_ckpt)
| 256 | 17.357143 | 43 | py |
UNITER | UNITER-master/utils/misc.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Misc utilities
"""
import json
import random
import sys
import torch
import numpy as np
from utils.logger import LOGGER
class NoOp(object):
""" useful for distributed training No-Ops """
def __getattr__(self, name):
return self.noop
def noop(self, *args, **kwargs):
return
def parse_with_config(parser):
args = parser.parse_args()
if args.config is not None:
config_args = json.load(open(args.config))
override_keys = {arg[2:].split('=')[0] for arg in sys.argv[1:]
if arg.startswith('--')}
for k, v in config_args.items():
if k not in override_keys:
setattr(args, k, v)
del args.config
return args
VE_ENT2IDX = {
'contradiction': 0,
'entailment': 1,
'neutral': 2
}
VE_IDX2ENT = {
0: 'contradiction',
1: 'entailment',
2: 'neutral'
}
class Struct(object):
def __init__(self, dict_):
self.__dict__.update(dict_)
def set_dropout(model, drop_p):
for name, module in model.named_modules():
# we might want to tune dropout for smaller dataset
if isinstance(module, torch.nn.Dropout):
if module.p != drop_p:
module.p = drop_p
LOGGER.info(f'{name} set to {drop_p}')
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 1,507 | 20.239437 | 70 | py |
UNITER | UNITER-master/utils/save.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
saving utilities
"""
import json
import os
from os.path import abspath, dirname, exists, join
import subprocess
import torch
from utils.logger import LOGGER
def save_training_meta(args):
if args.rank > 0:
return
if not exists(args.output_dir):
os.makedirs(join(args.output_dir, 'log'))
os.makedirs(join(args.output_dir, 'ckpt'))
with open(join(args.output_dir, 'log', 'hps.json'), 'w') as writer:
json.dump(vars(args), writer, indent=4)
model_config = json.load(open(args.model_config))
with open(join(args.output_dir, 'log', 'model.json'), 'w') as writer:
json.dump(model_config, writer, indent=4)
# git info
try:
LOGGER.info("Waiting on git info....")
c = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"],
timeout=10, stdout=subprocess.PIPE)
git_branch_name = c.stdout.decode().strip()
LOGGER.info("Git branch: %s", git_branch_name)
c = subprocess.run(["git", "rev-parse", "HEAD"],
timeout=10, stdout=subprocess.PIPE)
git_sha = c.stdout.decode().strip()
LOGGER.info("Git SHA: %s", git_sha)
git_dir = abspath(dirname(__file__))
git_status = subprocess.check_output(
['git', 'status', '--short'],
cwd=git_dir, universal_newlines=True).strip()
with open(join(args.output_dir, 'log', 'git_info.json'),
'w') as writer:
json.dump({'branch': git_branch_name,
'is_dirty': bool(git_status),
'status': git_status,
'sha': git_sha},
writer, indent=4)
except subprocess.TimeoutExpired as e:
LOGGER.exception(e)
LOGGER.warn("Git info not found. Moving right along...")
class ModelSaver(object):
def __init__(self, output_dir, prefix='model_step', suffix='pt'):
self.output_dir = output_dir
self.prefix = prefix
self.suffix = suffix
def save(self, model, step, optimizer=None):
output_model_file = join(self.output_dir,
f"{self.prefix}_{step}.{self.suffix}")
state_dict = {k: v.cpu() if isinstance(v, torch.Tensor) else v
for k, v in model.state_dict().items()}
torch.save(state_dict, output_model_file)
if optimizer is not None:
dump = {'step': step, 'optimizer': optimizer.state_dict()}
if hasattr(optimizer, '_amp_stash'):
pass # TODO fp16 optimizer
torch.save(dump, f'{self.output_dir}/train_state_{step}.pt')
| 2,734 | 35.959459 | 73 | py |
UNITER | UNITER-master/utils/itm_eval.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Image Text Retrieval evaluation helper
"""
from time import time
import torch
from horovod import torch as hvd
from tqdm import tqdm
from .logger import LOGGER
from .misc import NoOp
from .distributed import all_gather_list
@torch.no_grad()
def itm_eval(score_matrix, txt_ids, img_ids, txt2img, img2txts):
# image retrieval
img2j = {i: j for j, i in enumerate(img_ids)}
_, rank_txt = score_matrix.topk(10, dim=1)
gt_img_j = torch.LongTensor([img2j[txt2img[txt_id]]
for txt_id in txt_ids],
).to(rank_txt.device
).unsqueeze(1).expand_as(rank_txt)
rank = (rank_txt == gt_img_j).nonzero()
if rank.numel():
ir_r1 = (rank < 1).sum().item() / len(txt_ids)
ir_r5 = (rank < 5).sum().item() / len(txt_ids)
ir_r10 = (rank < 10).sum().item() / len(txt_ids)
else:
ir_r1, ir_r5, ir_r10 = 0, 0, 0
# text retrieval
txt2i = {t: i for i, t in enumerate(txt_ids)}
_, rank_img = score_matrix.topk(10, dim=0)
tr_r1, tr_r5, tr_r10 = 0, 0, 0
for j, img_id in enumerate(img_ids):
gt_is = [txt2i[t] for t in img2txts[img_id]]
ranks = [(rank_img[:, j] == i).nonzero() for i in gt_is]
rank = min([10] + [r.item() for r in ranks if r.numel()])
if rank < 1:
tr_r1 += 1
if rank < 5:
tr_r5 += 1
if rank < 10:
tr_r10 += 1
tr_r1 /= len(img_ids)
tr_r5 /= len(img_ids)
tr_r10 /= len(img_ids)
tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
r_mean = (tr_mean + ir_mean) / 2
eval_log = {'txt_r1': tr_r1,
'txt_r5': tr_r5,
'txt_r10': tr_r10,
'txt_r_mean': tr_mean,
'img_r1': ir_r1,
'img_r5': ir_r5,
'img_r10': ir_r10,
'img_r_mean': ir_mean,
'r_mean': r_mean}
return eval_log
@torch.no_grad()
def evaluate(model, eval_loader):
st = time()
LOGGER.info("start running Image/Text Retrieval evaluation ...")
score_matrix = inference(model, eval_loader)
dset = eval_loader.dataset
all_score = hvd.allgather(score_matrix)
all_txt_ids = [i for ids in all_gather_list(dset.ids)
for i in ids]
all_img_ids = dset.all_img_ids
assert all_score.size() == (len(all_txt_ids), len(all_img_ids))
if hvd.rank() != 0:
return {}
# NOTE: only use rank0 to compute final scores
eval_log = itm_eval(all_score, all_txt_ids, all_img_ids,
dset.txt2img, dset.img2txts)
tot_time = time()-st
LOGGER.info(f"evaluation finished in {int(tot_time)} seconds")
return eval_log
@torch.no_grad()
def inference(model, eval_loader):
model.eval()
if hvd.rank() == 0:
pbar = tqdm(total=len(eval_loader))
else:
pbar = NoOp()
score_matrix = torch.zeros(len(eval_loader.dataset),
len(eval_loader.dataset.all_img_ids),
device=torch.device("cuda"),
dtype=torch.float16)
for i, mini_batches in enumerate(eval_loader):
j = 0
for batch in mini_batches:
scores = model(batch, compute_loss=False)
bs = scores.size(0)
score_matrix.data[i, j:j+bs] = scores.data.squeeze(1).half()
j += bs
assert j == score_matrix.size(1)
pbar.update(1)
model.train()
pbar.close()
return score_matrix
| 3,661 | 30.843478 | 72 | py |
UNITER | UNITER-master/utils/distributed.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
distributed API using Horovod
Modified from OpenNMT's native pytorch distributed utils
(https://github.com/OpenNMT/OpenNMT-py)
"""
import math
import pickle
import torch
from horovod import torch as hvd
def all_reduce_and_rescale_tensors(tensors, rescale_denom):
"""All-reduce and rescale tensors at once (as a flattened tensor)
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
"""
# buffer size in bytes, determine equiv. # of elements based on data type
sz = sum(t.numel() for t in tensors)
buffer_t = tensors[0].new(sz).zero_()
# copy tensors into buffer_t
offset = 0
for t in tensors:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
hvd.allreduce_(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in tensors:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
def all_reduce_and_rescale_tensors_chunked(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
hvd.allreduce_(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
hvd.allreduce_(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
def broadcast_tensors(tensors, root_rank, buffer_size=10485760):
"""broadcast tensors in chunks of the specified size.
Args:
tensors: list of Tensors to broadcast
root_rank: rank to broadcast
buffer_size: broadcast chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def broadcast_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# broadcast
hvd.broadcast_(buffer_t[:offset], root_rank)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, broadcast directly
hvd.broadcast_(t, root_rank)
elif filled + sz > buffer_size:
# buffer is full, broadcast and replace buffer with tensor
broadcast_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
broadcast_buffer()
def _encode(enc, max_size, use_max_size=False):
enc_size = len(enc)
enc_byte = max(math.floor(math.log(max_size, 256)+1), 1)
if use_max_size:
# this is used for broadcasting
buffer_ = torch.cuda.ByteTensor(max_size+enc_byte)
else:
buffer_ = torch.cuda.ByteTensor(enc_size+enc_byte)
remainder = enc_size
for i in range(enc_byte):
base = 256 ** (enc_byte-i-1)
buffer_[i] = remainder // base
remainder %= base
buffer_[enc_byte:enc_byte+enc_size] = torch.ByteTensor(list(enc))
return buffer_, enc_byte
def _decode(buffer_, enc_byte):
size = sum(256 ** (enc_byte-i-1) * buffer_[i].item()
for i in range(enc_byte))
bytes_list = bytes(buffer_[enc_byte:enc_byte+size].tolist())
shift = size + enc_byte
return bytes_list, shift
_BUFFER_SIZE = 4096
def all_gather_list(data):
"""Gathers arbitrary data from all nodes into a list."""
enc = pickle.dumps(data)
enc_size = len(enc)
max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
in_buffer, enc_byte = _encode(enc, max_size)
out_buffer = hvd.allgather(in_buffer[:enc_byte+enc_size])
results = []
for _ in range(hvd.size()):
bytes_list, shift = _decode(out_buffer, enc_byte)
out_buffer = out_buffer[shift:]
result = pickle.loads(bytes_list)
results.append(result)
return results
def any_broadcast(data, root_rank):
"""broadcast arbitrary data from root_rank to all nodes."""
enc = pickle.dumps(data)
max_size = hvd.allgather(torch.tensor([len(enc)]).cuda()).max().item()
buffer_, enc_byte = _encode(enc, max_size, use_max_size=True)
hvd.broadcast_(buffer_, root_rank)
bytes_list, _ = _decode(buffer_, enc_byte)
result = pickle.loads(bytes_list)
return result
| 6,296 | 28.985714 | 77 | py |
UNITER | UNITER-master/data/vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VCR dataset
"""
import copy
import json
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
TxtLmdb, get_ids_and_lens, pad_tensors,
get_gather_index)
class VcrTxtTokLmdb(TxtTokLmdb):
def __init__(self, db_dir, max_txt_len=120, task="qa,qar"):
assert task == "qa" or task == "qar" or task == "qa,qar",\
"VCR only support the following tasks: 'qa', 'qar' or 'qa,qar'"
self.task = task
if task == "qa,qar":
id2len_task = "qar"
else:
id2len_task = task
if max_txt_len == -1:
self.id2len = json.load(
open(f'{db_dir}/id2len_{id2len_task}.json'))
else:
self.id2len = {
id_: len_
for id_, len_ in json.load(
open(f'{db_dir}/id2len_{id2len_task}.json')
).items()
if len_ <= max_txt_len
}
self.db_dir = db_dir
self.db = TxtLmdb(db_dir, readonly=True)
meta = json.load(open(f'{db_dir}/meta.json', 'r'))
self.cls_ = meta['CLS']
self.sep = meta['SEP']
self.mask = meta['MASK']
self.v_range = meta['v_range']
class VcrDetectFeatTxtTokDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db_gt=None, img_db=None):
assert not (img_db_gt is None and img_db is None),\
"img_db_gt and img_db cannot all be None"
assert isinstance(txt_db, VcrTxtTokLmdb)
assert img_db_gt is None or isinstance(img_db_gt, DetectFeatLmdb)
assert img_db is None or isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
self.img_db_gt = img_db_gt
self.task = self.txt_db.task
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
if self.img_db and self.img_db_gt:
self.lens = [tl+self.img_db_gt.name2nbb[txt2img[id_][0]] +
self.img_db.name2nbb[txt2img[id_][1]]
for tl, id_ in zip(txt_lens, self.ids)]
elif self.img_db:
self.lens = [tl+self.img_db.name2nbb[txt2img[id_][1]]
for tl, id_ in zip(txt_lens, self.ids)]
else:
self.lens = [tl+self.img_db_gt.name2nbb[txt2img[id_][0]]
for tl, id_ in zip(txt_lens, self.ids)]
def _get_img_feat(self, fname_gt, fname):
if self.img_db and self.img_db_gt:
img_feat_gt, bb_gt = self.img_db_gt[fname_gt]
img_bb_gt = torch.cat([bb_gt, bb_gt[:, 4:5]*bb_gt[:, 5:]], dim=-1)
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_feat = torch.cat([img_feat_gt, img_feat], dim=0)
img_bb = torch.cat([img_bb_gt, img_bb], dim=0)
num_bb = img_feat.size(0)
elif self.img_db:
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
elif self.img_db_gt:
img_feat, bb = self.img_db_gt[fname_gt]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
return img_feat, img_bb, num_bb
class VcrDataset(VcrDetectFeatTxtTokDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.task != "qa,qar",\
"loading training dataset with each task separately"
def _get_input_ids(self, txt_dump):
# text input
input_ids_q = txt_dump['input_ids']
type_ids_q = [0]*len(input_ids_q)
input_ids_as = txt_dump['input_ids_as']
if self.task == "qar":
input_ids_rs = txt_dump['input_ids_rs']
answer_label = txt_dump['qa_target']
assert answer_label >= 0, "answer_label < 0"
input_ids_gt_a = [self.txt_db.sep] + copy.deepcopy(
input_ids_as[answer_label])
type_ids_gt_a = [2] * len(input_ids_gt_a)
type_ids_q += type_ids_gt_a
input_ids_q += input_ids_gt_a
input_ids_for_choices = input_ids_rs
else:
input_ids_for_choices = input_ids_as
return input_ids_q, input_ids_for_choices, type_ids_q
def __getitem__(self, i):
"""
[[txt, img1],
[txt, img2]]
"""
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
input_ids_q, input_ids_for_choices, type_ids_q = self._get_input_ids(
example)
label = example['%s_target' % (self.task)]
outs = []
for index, input_ids_a in enumerate(input_ids_for_choices):
if index == label:
target = torch.tensor([1]).long()
else:
target = torch.tensor([0]).long()
input_ids = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\
[self.txt_db.sep] + input_ids_a + [self.txt_db.sep]
# type_id
# 0 -- question
# 1 -- region
# 2 -- answer
# 3 -- rationale
type_id_for_choice = 3 if type_ids_q[-1] == 2 else 2
txt_type_ids = [0] + type_ids_q + [type_id_for_choice]*(
len(input_ids_a)+2)
attn_masks = torch.ones(
len(input_ids) + num_bb, dtype=torch.long)
input_ids = torch.tensor(input_ids)
txt_type_ids = torch.tensor(txt_type_ids)
outs.append(
(input_ids, txt_type_ids,
img_feat, img_pos_feat,
attn_masks, target))
return tuple(outs)
def vcr_collate(inputs):
(input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks, targets) = map(list, unzip(concat(inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_type_ids = pad_sequence(
txt_type_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'txt_type_ids': txt_type_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
class VcrEvalDataset(VcrDetectFeatTxtTokDataset):
def __init__(self, split, *args, **kwargs):
super().__init__(*args, **kwargs)
self.split = split
assert self.task == "qa,qar",\
"loading evaluation dataset with two tasks together"
def _get_input_ids(self, txt_dump):
# text input
input_ids_for_choices = []
type_ids_for_choices = []
input_ids_q = txt_dump['input_ids']
type_ids_q = [0]*len(input_ids_q)
input_ids_as = txt_dump['input_ids_as']
input_ids_rs = txt_dump['input_ids_rs']
for index, input_ids_a in enumerate(input_ids_as):
curr_input_ids_qa = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\
[self.txt_db.sep] + input_ids_a + [self.txt_db.sep]
curr_type_ids_qa = [0] + type_ids_q + [2]*(
len(input_ids_a)+2)
input_ids_for_choices.append(curr_input_ids_qa)
type_ids_for_choices.append(curr_type_ids_qa)
for index, input_ids_a in enumerate(input_ids_as):
curr_input_ids_qa = [self.txt_db.cls_] + copy.deepcopy(input_ids_q) +\
[self.txt_db.sep] + input_ids_a + [self.txt_db.sep]
curr_type_ids_qa = [0] + type_ids_q + [2]*(
len(input_ids_a)+1)
if (self.split == "val" and index == txt_dump["qa_target"]) or\
self.split == "test":
for input_ids_r in input_ids_rs:
curr_input_ids_qar = copy.deepcopy(curr_input_ids_qa) +\
input_ids_r + [self.txt_db.sep]
curr_type_ids_qar = copy.deepcopy(curr_type_ids_qa) +\
[3]*(len(input_ids_r)+2)
input_ids_for_choices.append(curr_input_ids_qar)
type_ids_for_choices.append(curr_type_ids_qar)
return input_ids_for_choices, type_ids_for_choices
def __getitem__(self, i):
qid = self.ids[i]
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
input_ids_for_choices, type_ids_for_choices = self._get_input_ids(
example)
qa_target = torch.tensor([int(example["qa_target"])])
qar_target = torch.tensor([int(example["qar_target"])])
outs = []
for index, input_ids in enumerate(input_ids_for_choices):
attn_masks = torch.ones(
len(input_ids) + num_bb, dtype=torch.long)
input_ids = torch.tensor(input_ids)
txt_type_ids = torch.tensor(
type_ids_for_choices[index])
outs.append(
(input_ids, txt_type_ids,
img_feat, img_pos_feat,
attn_masks))
return tuple(outs), qid, qa_target, qar_target
def vcr_eval_collate(inputs):
(input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks) = map(
list, unzip(concat(outs for outs, _, _, _ in inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_type_ids = pad_sequence(
txt_type_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
qa_targets = torch.stack(
[t for _, _, t, _ in inputs], dim=0)
qar_targets = torch.stack(
[t for _, _, _, t in inputs], dim=0)
qids = [id_ for _, id_, _, _ in inputs]
return {'qids': qids,
'input_ids': input_ids,
'txt_type_ids': txt_type_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'qa_targets': qa_targets,
'qar_targets': qar_targets}
| 11,643 | 37.556291 | 82 | py |
UNITER | UNITER-master/data/mlm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MLM datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb,
pad_tensors, get_gather_index)
def random_word(tokens, vocab_range, mask):
"""
Masking some random tokens for Language Model task with probabilities as in
the original BERT paper.
:param tokens: list of int, tokenized sentence.
:param vocab_range: for choosing a random word
:return: (list of int, list of int), masked tokens and related labels for
LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = mask
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(range(*vocab_range)))
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
output_label.append(token)
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
if all(o == -1 for o in output_label):
# at least mask 1
output_label[0] = tokens[0]
tokens[0] = mask
return tokens, output_label
class MlmDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, TxtTokLmdb)
super().__init__(txt_db, img_db)
def __getitem__(self, i):
"""
Return:
- input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- img_feat : (num_bb, d)
- img_pos_feat : (num_bb, 7)
- attn_masks : (L + num_bb, ), ie., [1, 1, ..., 0, 0, 1, 1]
- txt_labels : (L, ), [-1, -1, wid, -1, -1, -1]
0's padded so that (L + num_bb) % 8 == 0
"""
example = super().__getitem__(i)
# text input
input_ids, txt_labels = self.create_mlm_io(example['input_ids'])
# img input
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return input_ids, img_feat, img_pos_feat, attn_masks, txt_labels
def create_mlm_io(self, input_ids):
input_ids, txt_labels = random_word(input_ids,
self.txt_db.v_range,
self.txt_db.mask)
input_ids = torch.tensor([self.txt_db.cls_]
+ input_ids
+ [self.txt_db.sep])
txt_labels = torch.tensor([-1] + txt_labels + [-1])
return input_ids, txt_labels
def mlm_collate(inputs):
"""
Return:
:input_ids (n, max_L) padded with 0
:position_ids (n, max_L) padded with 0
:txt_lens list of [txt_len]
:img_feat (n, max_num_bb, feat_dim)
:img_pos_feat (n, max_num_bb, 7)
:num_bbs list of [num_bb]
:attn_masks (n, max_{L + num_bb}) padded with 0
:txt_labels (n, max_L) padded with -1
"""
(input_ids, img_feats, img_pos_feats, attn_masks, txt_labels
) = map(list, unzip(inputs))
# text batches
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_labels = pad_sequence(txt_labels, batch_first=True, padding_value=-1)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'txt_labels': txt_labels}
return batch
| 4,551 | 32.226277 | 79 | py |
UNITER | UNITER-master/data/sampler.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
sampler for length bucketing (batch by tokens)
"""
import math
import random
import horovod.torch as hvd
import torch
from torch.utils.data import Sampler
from cytoolz import partition_all
class TokenBucketSampler(Sampler):
def __init__(self, lens, bucket_size, batch_size,
droplast=False, size_multiple=8):
self._lens = lens
self._max_tok = batch_size
self._bucket_size = bucket_size
self._droplast = droplast
self._size_mul = size_multiple
def _create_ids(self):
return list(range(len(self._lens)))
def _sort_fn(self, i):
return self._lens[i]
def __iter__(self):
ids = self._create_ids()
random.shuffle(ids)
buckets = [sorted(ids[i:i+self._bucket_size],
key=self._sort_fn, reverse=True)
for i in range(0, len(ids), self._bucket_size)]
# fill batches until max_token (include padding)
batches = []
for bucket in buckets:
max_len = 0
batch_indices = []
for indices in partition_all(self._size_mul, bucket):
max_len = max(max_len, max(self._lens[i] for i in indices))
if (max_len * (len(batch_indices) + self._size_mul)
> self._max_tok):
if not batch_indices:
raise ValueError(
"max_tokens too small / max_seq_len too long")
assert len(batch_indices) % self._size_mul == 0
batches.append(batch_indices)
batch_indices = list(indices)
else:
batch_indices.extend(indices)
if not self._droplast and batch_indices:
batches.append(batch_indices)
random.shuffle(batches)
return iter(batches)
def __len__(self):
raise ValueError("NOT supported. "
"This has some randomness across epochs")
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
shuffle (optional): If true (default), sampler will shuffle the indices
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
num_replicas = hvd.size()
if rank is None:
rank = hvd.rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset)
* 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
if self.shuffle:
shufle_ind = torch.randperm(len(indices), generator=g).tolist()
indices = [indices[i] for i in shufle_ind]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 4,199 | 33.42623 | 79 | py |
UNITER | UNITER-master/data/mrm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MRM Datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
def _get_img_mask(mask_prob, num_bb):
img_mask = [random.random() < mask_prob for _ in range(num_bb)]
if not any(img_mask):
# at least mask 1
img_mask[random.choice(range(num_bb))] = True
img_mask = torch.tensor(img_mask)
return img_mask
def _get_img_tgt_mask(img_mask, txt_len):
z = torch.zeros(txt_len, dtype=torch.uint8)
img_mask_tgt = torch.cat([z, img_mask], dim=0)
return img_mask_tgt
def _get_feat_target(img_feat, img_masks):
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat) # (n, m, d)
feat_dim = img_feat.size(-1)
feat_targets = img_feat[img_masks_ext].contiguous().view(
-1, feat_dim) # (s, d)
return feat_targets
def _mask_img_feat(img_feat, img_masks):
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat)
img_feat_masked = img_feat.data.masked_fill(img_masks_ext, 0)
return img_feat_masked
class MrfrDataset(DetectFeatTxtTokDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def __getitem__(self, i):
"""
Return:
- input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- img_feat : (num_bb, d)
- img_pos_feat : (num_bb, 7)
- attn_masks : (L + num_bb, ), ie., [1, 1, ..., 0, 0, 1, 1]
- img_mask : (num_bb, ) between {0, 1}
"""
example = super().__getitem__(i)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
# image input features
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat,
attn_masks, img_mask, img_mask_tgt)
def mrfr_collate(inputs):
"""
Return:
- input_ids : (n, max_L), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- position_ids : (n, max_L)
- txt_lens : list of [input_len]
- img_feat : (n, max_num_bb, d)
- img_pos_feat : (n, max_num_bb, 7)
- num_bbs : list of [num_bb]
- attn_masks : (n, max_{L + num_bb}), ie., [1, 1, ..., 0, 0, 1, 1]
- img_masks : (n, max_num_bb) between {0, 1}
"""
(input_ids, img_feats, img_pos_feats, attn_masks, img_masks, img_mask_tgts,
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
# mask features
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
feat_targets = _get_feat_target(img_feat, img_masks)
img_feat = _mask_img_feat(img_feat, img_masks)
img_mask_tgt = pad_sequence(img_mask_tgts,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'feat_targets': feat_targets,
'img_masks': img_masks,
'img_mask_tgt': img_mask_tgt}
return batch
def _get_targets(img_masks, img_soft_label):
soft_label_dim = img_soft_label.size(-1)
img_masks_ext_for_label = img_masks.unsqueeze(-1).expand_as(img_soft_label)
label_targets = img_soft_label[img_masks_ext_for_label].contiguous().view(
-1, soft_label_dim)
return label_targets
class MrcDataset(DetectFeatTxtTokDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def _get_img_feat(self, fname):
img_dump = self.img_db.get_dump(fname)
num_bb = self.img_db.name2nbb[fname]
img_feat = torch.tensor(img_dump['features'])
bb = torch.tensor(img_dump['norm_bb'])
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_soft_label = torch.tensor(img_dump['soft_labels'])
return img_feat, img_bb, img_soft_label, num_bb
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, img_soft_labels, num_bb = self._get_img_feat(
example['img_fname'])
# image input features
img_mask = _get_img_mask(self.mask_prob, num_bb)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat,
img_soft_labels, attn_masks, img_mask, img_mask_tgt)
def mrc_collate(inputs):
(input_ids, img_feats, img_pos_feats, img_soft_labels,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
num_bbs = [f.size(0) for f in img_feats]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
img_soft_label = pad_tensors(img_soft_labels, num_bbs)
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
label_targets = _get_targets(img_masks, img_soft_label)
img_feat = _mask_img_feat(img_feat, img_masks)
img_mask_tgt = pad_sequence(img_mask_tgts,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_masks': img_masks,
'img_mask_tgt': img_mask_tgt,
'label_targets': label_targets}
return batch
| 7,228 | 34.965174 | 79 | py |
UNITER | UNITER-master/data/vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VQA dataset
"""
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
def _get_vqa_target(example, num_answers):
target = torch.zeros(num_answers)
labels = example['target']['labels']
scores = example['target']['scores']
if labels and scores:
target.scatter_(0, torch.tensor(labels), torch.tensor(scores))
return target
class VqaDataset(DetectFeatTxtTokDataset):
def __init__(self, num_answers, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_answers = num_answers
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
target = _get_vqa_target(example, self.num_answers)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return input_ids, img_feat, img_pos_feat, attn_masks, target
def vqa_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
class VqaEvalDataset(VqaDataset):
def __getitem__(self, i):
qid = self.ids[i]
example = DetectFeatTxtTokDataset.__getitem__(self, i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
if 'target' in example:
target = _get_vqa_target(example, self.num_answers)
else:
target = None
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return qid, input_ids, img_feat, img_pos_feat, attn_masks, target
def vqa_eval_collate(inputs):
(qids, input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
if targets[0] is None:
targets = None
else:
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'qids': qids,
'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
| 4,105 | 31.330709 | 76 | py |
UNITER | UNITER-master/data/data.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Dataset interfaces
"""
from collections import defaultdict
from contextlib import contextmanager
import io
import json
from os.path import exists
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
import horovod.torch as hvd
from tqdm import tqdm
import lmdb
from lz4.frame import compress, decompress
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
def _fp16_to_fp32(feat_dict):
out = {k: arr.astype(np.float32)
if arr.dtype == np.float16 else arr
for k, arr in feat_dict.items()}
return out
def compute_num_bb(confs, conf_th, min_bb, max_bb):
num_bb = max(min_bb, (confs > conf_th).sum())
num_bb = min(max_bb, num_bb)
return num_bb
def _check_distributed():
try:
dist = hvd.size() != hvd.local_size()
except ValueError:
# not using horovod
dist = False
return dist
class DetectFeatLmdb(object):
def __init__(self, img_dir, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36,
compress=True):
self.img_dir = img_dir
if conf_th == -1:
db_name = f'feat_numbb{num_bb}'
self.name2nbb = defaultdict(lambda: num_bb)
else:
db_name = f'feat_th{conf_th}_max{max_bb}_min{min_bb}'
nbb = f'nbb_th{conf_th}_max{max_bb}_min{min_bb}.json'
if not exists(f'{img_dir}/{nbb}'):
# nbb is not pre-computed
self.name2nbb = None
else:
self.name2nbb = json.load(open(f'{img_dir}/{nbb}'))
self.compress = compress
if compress:
db_name += '_compressed'
if self.name2nbb is None:
if compress:
db_name = 'all_compressed'
else:
db_name = 'all'
# only read ahead on single node training
self.env = lmdb.open(f'{img_dir}/{db_name}',
readonly=True, create=False,
readahead=not _check_distributed())
self.txn = self.env.begin(buffers=True)
if self.name2nbb is None:
self.name2nbb = self._compute_nbb()
def _compute_nbb(self):
name2nbb = {}
fnames = json.loads(self.txn.get(key=b'__keys__').decode('utf-8'))
for fname in tqdm(fnames, desc='reading images'):
dump = self.txn.get(fname.encode('utf-8'))
if self.compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
confs = img_dump['conf']
else:
img_dump = msgpack.loads(dump, raw=False)
confs = img_dump['conf']
name2nbb[fname] = compute_num_bb(confs, self.conf_th,
self.min_bb, self.max_bb)
return name2nbb
def __del__(self):
self.env.close()
def get_dump(self, file_name):
# hack for MRC
dump = self.txn.get(file_name.encode('utf-8'))
nbb = self.name2nbb[file_name]
if self.compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
img_dump = _fp16_to_fp32(img_dump)
else:
img_dump = msgpack.loads(dump, raw=False)
img_dump = _fp16_to_fp32(img_dump)
img_dump = {k: arr[:nbb, ...] for k, arr in img_dump.items()}
return img_dump
def __getitem__(self, file_name):
dump = self.txn.get(file_name.encode('utf-8'))
nbb = self.name2nbb[file_name]
if self.compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
img_dump = {'features': img_dump['features'],
'norm_bb': img_dump['norm_bb']}
else:
img_dump = msgpack.loads(dump, raw=False)
img_feat = torch.tensor(img_dump['features'][:nbb, :]).float()
img_bb = torch.tensor(img_dump['norm_bb'][:nbb, :]).float()
return img_feat, img_bb
@contextmanager
def open_lmdb(db_dir, readonly=False):
db = TxtLmdb(db_dir, readonly)
try:
yield db
finally:
del db
class TxtLmdb(object):
def __init__(self, db_dir, readonly=True):
self.readonly = readonly
if readonly:
# training
self.env = lmdb.open(db_dir,
readonly=True, create=False,
readahead=not _check_distributed())
self.txn = self.env.begin(buffers=True)
self.write_cnt = None
else:
# prepro
self.env = lmdb.open(db_dir, readonly=False, create=True,
map_size=4 * 1024**4)
self.txn = self.env.begin(write=True)
self.write_cnt = 0
def __del__(self):
if self.write_cnt:
self.txn.commit()
self.env.close()
def __getitem__(self, key):
return msgpack.loads(decompress(self.txn.get(key.encode('utf-8'))),
raw=False)
def __setitem__(self, key, value):
# NOTE: not thread safe
if self.readonly:
raise ValueError('readonly text DB')
ret = self.txn.put(key.encode('utf-8'),
compress(msgpack.dumps(value, use_bin_type=True)))
self.write_cnt += 1
if self.write_cnt % 1000 == 0:
self.txn.commit()
self.txn = self.env.begin(write=True)
self.write_cnt = 0
return ret
class TxtTokLmdb(object):
def __init__(self, db_dir, max_txt_len=60):
if max_txt_len == -1:
self.id2len = json.load(open(f'{db_dir}/id2len.json'))
else:
self.id2len = {
id_: len_
for id_, len_ in json.load(open(f'{db_dir}/id2len.json')
).items()
if len_ <= max_txt_len
}
self.db_dir = db_dir
self.db = TxtLmdb(db_dir, readonly=True)
meta = json.load(open(f'{db_dir}/meta.json', 'r'))
self.cls_ = meta['CLS']
self.sep = meta['SEP']
self.mask = meta['MASK']
self.v_range = meta['v_range']
def __getitem__(self, id_):
txt_dump = self.db[id_]
return txt_dump
def combine_inputs(self, *inputs):
input_ids = [self.cls_]
for ids in inputs:
input_ids.extend(ids + [self.sep])
return torch.tensor(input_ids)
@property
def txt2img(self):
txt2img = json.load(open(f'{self.db_dir}/txt2img.json'))
return txt2img
@property
def img2txts(self):
img2txts = json.load(open(f'{self.db_dir}/img2txts.json'))
return img2txts
def get_ids_and_lens(db):
assert isinstance(db, TxtTokLmdb)
lens = []
ids = []
for id_ in list(db.id2len.keys())[hvd.rank()::hvd.size()]:
lens.append(db.id2len[id_])
ids.append(id_)
return lens, ids
class DetectFeatTxtTokDataset(Dataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [tl + self.img_db.name2nbb[txt2img[id_]]
for tl, id_ in zip(txt_lens, self.ids)]
def __len__(self):
return len(self.ids)
def __getitem__(self, i):
id_ = self.ids[i]
example = self.txt_db[id_]
return example
def _get_img_feat(self, fname):
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
return img_feat, img_bb, num_bb
def pad_tensors(tensors, lens=None, pad=0):
"""B x [T, ...]"""
if lens is None:
lens = [t.size(0) for t in tensors]
max_len = max(lens)
bs = len(tensors)
hid = tensors[0].size(-1)
dtype = tensors[0].dtype
output = torch.zeros(bs, max_len, hid, dtype=dtype)
if pad:
output.data.fill_(pad)
for i, (t, l) in enumerate(zip(tensors, lens)):
output.data[i, :l, ...] = t.data
return output
def get_gather_index(txt_lens, num_bbs, batch_size, max_len, out_size):
assert len(txt_lens) == len(num_bbs) == batch_size
gather_index = torch.arange(0, out_size, dtype=torch.long,
).unsqueeze(0).repeat(batch_size, 1)
for i, (tl, nbb) in enumerate(zip(txt_lens, num_bbs)):
gather_index.data[i, tl:tl+nbb] = torch.arange(max_len, max_len+nbb,
dtype=torch.long).data
return gather_index
class ConcatDatasetWithLens(ConcatDataset):
""" A thin wrapper on pytorch concat dataset for lens batching """
def __init__(self, datasets):
super().__init__(datasets)
self.lens = [l for dset in datasets for l in dset.lens]
def __getattr__(self, name):
return self._run_method_on_all_dsets(name)
def _run_method_on_all_dsets(self, name):
def run_all(*args, **kwargs):
return [dset.__getattribute__(name)(*args, **kwargs)
for dset in self.datasets]
return run_all
class ImageLmdbGroup(object):
def __init__(self, conf_th, max_bb, min_bb, num_bb, compress):
self.path2imgdb = {}
self.conf_th = conf_th
self.max_bb = max_bb
self.min_bb = min_bb
self.num_bb = num_bb
self.compress = compress
def __getitem__(self, path):
img_db = self.path2imgdb.get(path, None)
if img_db is None:
img_db = DetectFeatLmdb(path, self.conf_th, self.max_bb,
self.min_bb, self.num_bb, self.compress)
return img_db
| 10,028 | 31.041534 | 78 | py |
UNITER | UNITER-master/data/pretrain_vcr.py | from .vcr import VcrDetectFeatTxtTokDataset
from .mlm import random_word
import torch
from toolz.sandbox import unzip
from torch.nn.utils.rnn import pad_sequence
from .data import pad_tensors, get_gather_index
from .mrm import (
_get_img_tgt_mask, _get_img_mask, _mask_img_feat,
_get_feat_target, _get_targets)
class VcrPretrainDataset(VcrDetectFeatTxtTokDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _get_input_ids(self, txt_dump, mask=False):
# text input
input_ids_q = txt_dump['input_ids']
type_ids_q = [0]*len(input_ids_q)
if mask:
input_ids_q, txt_labels_q = random_word(
input_ids_q, self.txt_db.v_range,
self.txt_db.mask)
else:
txt_labels_q = input_ids_q
answer_label = txt_dump['qa_target']
assert answer_label >= 0, "answer_label < 0"
input_ids_a = txt_dump['input_ids_as'][answer_label]
type_ids_a = [2]*len(input_ids_a)
if mask:
input_ids_a, txt_labels_a = random_word(
input_ids_a, self.txt_db.v_range,
self.txt_db.mask)
else:
txt_labels_a = input_ids_a
input_ids = input_ids_q + [self.txt_db.sep] + input_ids_a
type_ids = type_ids_q + [0] + type_ids_a
txt_labels = txt_labels_q + [-1] + txt_labels_a
if self.task == "qar":
rationale_label = txt_dump['qar_target']
assert rationale_label >= 0, "rationale_label < 0"
input_ids_r = txt_dump['input_ids_rs'][rationale_label]
type_ids_r = [3]*len(input_ids_r)
if mask:
input_ids_r, txt_labels_r = random_word(
input_ids_r, self.txt_db.v_range,
self.txt_db.mask)
else:
txt_labels_r = input_ids_r
input_ids += [self.txt_db.sep] + input_ids_r
type_ids += [2] + type_ids_r
txt_labels += [-1] + txt_labels_r
if mask:
return input_ids, type_ids, txt_labels
else:
return input_ids, type_ids
def combine_txt_inputs(self, input_ids, txt_type_ids, txt_labels=None):
input_ids = torch.tensor([self.txt_db.cls_]
+ input_ids
+ [self.txt_db.sep])
txt_type_ids = torch.tensor(
[txt_type_ids[0]] + txt_type_ids
+ [txt_type_ids[-1]])
if txt_labels is not None:
txt_labels = torch.tensor([-1] + txt_labels + [-1])
return input_ids, txt_type_ids, txt_labels
return input_ids, txt_type_ids
def vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks):
# text batches
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
txt_type_ids = pad_sequence(txt_type_ids, batch_first=True,
padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'txt_type_ids': txt_type_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
class MlmDatasetForVCR(VcrPretrainDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_mlm_io(self, example):
(input_ids, txt_type_ids,
txt_labels) = self._get_input_ids(example, mask=True)
return self.combine_txt_inputs(
input_ids, txt_type_ids, txt_labels)
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
# txt inputs, create mlm io
input_ids, txt_type_ids, txt_labels = self.create_mlm_io(example)
attn_masks = torch.ones(
len(input_ids) + num_bb,
dtype=torch.long)
return (input_ids, txt_type_ids, img_feat,
img_pos_feat, attn_masks, txt_labels)
def mlm_collate_for_vcr(inputs):
(input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks,
txt_labels) = map(list, unzip(inputs))
batch = vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks)
txt_labels = pad_sequence(txt_labels, batch_first=True, padding_value=-1)
batch['txt_labels'] = txt_labels
return batch
class MrfrDatasetForVCR(VcrPretrainDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def __getitem__(self, i):
example = super().__getitem__(i)
# text input
input_ids, txt_type_ids = self._get_input_ids(example, mask=False)
input_ids, txt_type_ids = self.combine_txt_inputs(
input_ids, txt_type_ids)
# image input features
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, txt_type_ids, img_feat, img_pos_feat,
attn_masks, img_mask, img_mask_tgt)
def mrfr_collate_for_vcr(inputs):
(input_ids, txt_type_ids, img_feats, img_pos_feats,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
batch = vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks)
# mask features
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
feat_targets = _get_feat_target(batch['img_feat'], img_masks)
img_mask_tgt = pad_sequence(
img_mask_tgts, batch_first=True, padding_value=0)
batch['img_feat'] = _mask_img_feat(batch['img_feat'], img_masks)
batch['img_masks'] = img_masks
batch['feat_targets'] = feat_targets
batch['img_mask_tgt'] = img_mask_tgt
return batch
class MrcDatasetForVCR(VcrPretrainDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def _get_img_feat_for_db(self, img_db, fname):
img_dump = img_db.get_dump(fname)
img_feat = torch.tensor(img_dump['features'])
bb = torch.tensor(img_dump['norm_bb'])
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_soft_label = torch.tensor(img_dump['soft_labels'])
return img_feat, img_bb, img_soft_label
def _get_img_feat(self, fname_gt, fname):
if self.img_db and self.img_db_gt:
(img_feat_gt, img_bb_gt,
img_soft_label_gt) = self._get_img_feat_for_db(
self.img_db_gt, fname_gt)
(img_feat, img_bb,
img_soft_label) = self._get_img_feat_for_db(
self.img_db, fname)
img_feat = torch.cat([img_feat_gt, img_feat], dim=0)
img_bb = torch.cat([img_bb_gt, img_bb], dim=0)
img_soft_label = torch.cat(
[img_soft_label_gt, img_soft_label], dim=0)
elif self.img_db:
(img_feat, img_bb,
img_soft_label) = self._get_img_feat_for_db(
self.img_db, fname)
else:
(img_feat, img_bb,
img_soft_label) = self._get_img_feat_for_db(
self.img_db_gt, fname_gt)
num_bb = img_feat.size(0)
return img_feat, img_bb, img_soft_label, num_bb
def __getitem__(self, i):
example = super().__getitem__(i)
# text input
input_ids, txt_type_ids = self._get_input_ids(example, mask=False)
input_ids, txt_type_ids = self.combine_txt_inputs(
input_ids, txt_type_ids)
# image input features
img_feat, img_pos_feat, img_soft_labels, num_bb = self._get_img_feat(
example['img_fname'][0], example['img_fname'][1])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, txt_type_ids, img_feat, img_pos_feat,
img_soft_labels, attn_masks, img_mask, img_mask_tgt)
def mrc_collate_for_vcr(inputs):
(input_ids, txt_type_ids, img_feats, img_pos_feats, img_soft_labels,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
num_bbs = [f.size(0) for f in img_feats]
batch = vcr_pretrain_collate(
input_ids, txt_type_ids, img_feats,
img_pos_feats, attn_masks)
# mask features
img_soft_label = pad_tensors(img_soft_labels, num_bbs)
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
label_targets = _get_targets(img_masks, img_soft_label)
img_mask_tgt = pad_sequence(
img_mask_tgts, batch_first=True, padding_value=0)
batch['img_feat'] = _mask_img_feat(batch['img_feat'], img_masks)
batch['img_masks'] = img_masks
batch['label_targets'] = label_targets
batch['img_mask_tgt'] = img_mask_tgt
return batch
| 9,933 | 35.255474 | 77 | py |
UNITER | UNITER-master/data/nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
NLVR2 dataset
"""
import copy
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
get_ids_and_lens, pad_tensors, get_gather_index)
class Nlvr2PairedDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_img_type=True):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [2*tl + sum(self.img_db.name2nbb[img]
for img in txt2img[id_])
for tl, id_ in zip(txt_lens, self.ids)]
self.use_img_type = use_img_type
def __getitem__(self, i):
"""
[[txt, img1],
[txt, img2]]
"""
example = super().__getitem__(i)
target = example['target']
outs = []
for i, img in enumerate(example['img_fname']):
img_feat, img_pos_feat, num_bb = self._get_img_feat(img)
# text input
input_ids = copy.deepcopy(example['input_ids'])
input_ids = [self.txt_db.cls_] + input_ids + [self.txt_db.sep]
attn_masks = [1] * (len(input_ids) + num_bb)
input_ids = torch.tensor(input_ids)
attn_masks = torch.tensor(attn_masks)
if self.use_img_type:
img_type_ids = torch.tensor([i+1]*num_bb)
else:
img_type_ids = None
outs.append((input_ids, img_feat, img_pos_feat,
attn_masks, img_type_ids))
return tuple(outs), target
def nlvr2_paired_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks,
img_type_ids) = map(list, unzip(concat(outs for outs, _ in inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
if img_type_ids[0] is None:
img_type_ids = None
else:
img_type_ids = pad_sequence(img_type_ids,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.Tensor([t for _, t in inputs]).long()
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_type_ids': img_type_ids,
'targets': targets}
return batch
class Nlvr2PairedEvalDataset(Nlvr2PairedDataset):
def __getitem__(self, i):
qid = self.ids[i]
outs, targets = super().__getitem__(i)
return qid, outs, targets
def nlvr2_paired_eval_collate(inputs):
qids, batch = [], []
for id_, *tensors in inputs:
qids.append(id_)
batch.append(tensors)
batch = nlvr2_paired_collate(batch)
batch['qids'] = qids
return batch
class Nlvr2TripletDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_img_type=True):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
txt_lens, self.ids = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [tl + sum(self.img_db.name2nbb[img]
for img in txt2img[id_])
for tl, id_ in zip(txt_lens, self.ids)]
self.use_img_type = use_img_type
def __getitem__(self, i):
"""
[[txt, img1],
[txt, img2]]
"""
example = super().__getitem__(i)
target = example['target']
img_feats = []
img_pos_feats = []
num_bb = 0
img_type_ids = []
for i, img in enumerate(example['img_fname']):
feat, pos, nbb = self._get_img_feat(img)
img_feats.append(feat)
img_pos_feats.append(pos)
num_bb += nbb
if self.use_img_type:
img_type_ids.extend([i+1]*nbb)
img_feat = torch.cat(img_feats, dim=0)
img_pos_feat = torch.cat(img_pos_feats, dim=0)
if self.use_img_type:
img_type_ids = torch.tensor(img_type_ids)
else:
img_type_ids = None
# text input
input_ids = copy.deepcopy(example['input_ids'])
input_ids = [self.txt_db.cls_] + input_ids + [self.txt_db.sep]
attn_masks = [1] * (len(input_ids) + num_bb)
input_ids = torch.tensor(input_ids)
attn_masks = torch.tensor(attn_masks)
return (input_ids, img_feat, img_pos_feat, attn_masks,
img_type_ids, target)
def nlvr2_triplet_collate(inputs):
(input_ids, img_feats, img_pos_feats,
attn_masks, img_type_ids, targets) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# image batches
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
if img_type_ids[0] is None:
img_type_ids = None
else:
img_type_ids = pad_sequence(img_type_ids,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.Tensor(targets).long()
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_type_ids': img_type_ids,
'targets': targets}
return batch
class Nlvr2TripletEvalDataset(Nlvr2TripletDataset):
def __getitem__(self, i):
qid = self.ids[i]
tensors = super().__getitem__(i)
return (qid, *tensors)
def nlvr2_triplet_eval_collate(inputs):
qids, batch = [], []
for id_, *tensors in inputs:
qids.append(id_)
batch.append(tensors)
batch = nlvr2_triplet_collate(batch)
batch['qids'] = qids
return batch
| 7,186 | 31.817352 | 76 | py |
UNITER | UNITER-master/data/loader.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
A prefetch loader to speedup data loading
Modified from Nvidia Deep Learning Examples
(https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch).
"""
import random
import torch
from torch.utils.data import DataLoader
from utils.distributed import any_broadcast
class MetaLoader(object):
""" wraps multiple data loaders """
def __init__(self, loaders, accum_steps=1, distributed=False):
assert isinstance(loaders, dict)
self.name2loader = {}
self.name2iter = {}
self.sampling_pools = []
for n, l in loaders.items():
if isinstance(l, tuple):
l, r = l
elif isinstance(l, DataLoader):
r = 1
else:
raise ValueError()
self.name2loader[n] = l
self.name2iter[n] = iter(l)
self.sampling_pools.extend([n]*r)
self.accum_steps = accum_steps
self.distributed = distributed
self.step = 0
def __iter__(self):
""" this iterator will run indefinitely """
task = self.sampling_pools[0]
while True:
if self.step % self.accum_steps == 0:
task = random.choice(self.sampling_pools)
if self.distributed:
# make sure all process is training same task
task = any_broadcast(task, 0)
self.step += 1
iter_ = self.name2iter[task]
try:
batch = next(iter_)
except StopIteration:
iter_ = iter(self.name2loader[task])
batch = next(iter_)
self.name2iter[task] = iter_
yield task, batch
def move_to_cuda(batch):
if isinstance(batch, torch.Tensor):
return batch.cuda(non_blocking=True)
elif isinstance(batch, list):
new_batch = [move_to_cuda(t) for t in batch]
elif isinstance(batch, tuple):
new_batch = tuple(move_to_cuda(t) for t in batch)
elif isinstance(batch, dict):
new_batch = {n: move_to_cuda(t) for n, t in batch.items()}
else:
return batch
return new_batch
def record_cuda_stream(batch):
if isinstance(batch, torch.Tensor):
batch.record_stream(torch.cuda.current_stream())
elif isinstance(batch, list) or isinstance(batch, tuple):
for t in batch:
record_cuda_stream(t)
elif isinstance(batch, dict):
for t in batch.values():
record_cuda_stream(t)
else:
pass
class PrefetchLoader(object):
"""
overlap compute and cuda data transfer
(copied and then modified from nvidia apex)
"""
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while batch is not None:
yield batch
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
# if record_stream() doesn't work, another option is to make sure
# device inputs are created on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input,
# device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target,
# device='cuda')
# Need to make sure the memory allocated for next_* is not still in use
# by the main stream at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this
# side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is not None:
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method
| 4,747 | 32.202797 | 79 | py |
UNITER | UNITER-master/data/re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Referring Expression dataset
"""
import random
import numpy as np
import json
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
TxtLmdb, pad_tensors, get_gather_index)
class ReTxtTokLmdb(TxtTokLmdb):
def __init__(self, db_dir, max_txt_len=120):
# load refs = [{ref_id, sent_ids, ann_id, image_id, sentences, split}]
refs = json.load(open(f'{db_dir}/refs.json', 'r'))
self.ref_ids = [ref['ref_id'] for ref in refs]
self.Refs = {ref['ref_id']: ref for ref in refs}
# load annotations = [{id, area, bbox, image_id, category_id}]
anns = json.load(open(f'{db_dir}/annotations.json', 'r'))
self.Anns = {ann['id']: ann for ann in anns}
# load categories = [{id, name, supercategory}]
categories = json.load(open(f'{db_dir}/categories.json', 'r'))
self.Cats = {cat['id']: cat['name'] for cat in categories}
# load images = [{id, file_name, ann_ids, height, width}]
images = json.load(open(f'{db_dir}/images.json', 'r'))
self.Images = {img['id']: img for img in images}
if max_txt_len == -1:
self.id2len = json.load(open(f'{db_dir}/id2len.json'))
else:
self.id2len = {
id_: len_
for id_, len_ in json.load(open(f'{db_dir}/id2len.json')
).items()
if len_ <= max_txt_len
}
self.max_txt_len = max_txt_len
# self.sent_ids = self._get_sent_ids()
self.db_dir = db_dir
self.db = TxtLmdb(db_dir, readonly=True)
meta = json.load(open(f'{db_dir}/meta.json', 'r'))
self.cls_ = meta['CLS']
self.sep = meta['SEP']
self.mask = meta['MASK']
self.v_range = meta['v_range']
def _get_sent_ids(self):
sent_ids = []
for ref_id in self.ref_ids:
for sent_id in self.Refs[ref_id]['sent_ids']:
sent_len = self.id2len[str(sent_id)]
if self.max_txt_len == -1 or sent_len < self.max_txt_len:
sent_ids.append(str(sent_id))
return sent_ids
def shuffle(self):
# we shuffle ref_ids and make sent_ids according to ref_ids
random.shuffle(self.ref_ids)
self.sent_ids = self._get_sent_ids()
def __getitem__(self, id_):
# sent_id = self.sent_ids[i]
txt_dump = self.db[id_]
return txt_dump
class ReDetectFeatTxtTokDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db):
assert isinstance(txt_db, ReTxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
self.ids = self.txt_db._get_sent_ids()
def __getitem__(self, i):
id_ = self.ids[i]
example = self.txt_db[id_]
return example
def shuffle(self):
self.txt_db.shuffle()
class ReDataset(ReDetectFeatTxtTokDataset):
def __getitem__(self, i):
"""
Return:
:input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0]
:position_ids : range(L)
:img_feat : (num_bb, d)
:img_pos_feat : (num_bb, 7)
:attn_masks : (L+num_bb, ), i.e., [1, 1, ..., 0, 0, 1, 1]
:obj_masks : (num_bb, ) all 0's
:target : (1, )
"""
# {sent_id, sent, ref_id, ann_id, image_id, bbox, input_ids}
example = super().__getitem__(i)
image_id = example['image_id']
fname = f'visual_grounding_coco_gt_{int(image_id):012}.npz'
img_feat, img_pos_feat, num_bb = self._get_img_feat(fname)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
# target bbox
img = self.txt_db.Images[image_id]
assert len(img['ann_ids']) == num_bb, \
'Please use visual_grounding_coco_gt'
target = img['ann_ids'].index(example['ann_id'])
target = torch.tensor([target])
# obj_masks, to be padded with 1, for masking out non-object prob.
obj_masks = torch.tensor([0]*len(img['ann_ids']), dtype=torch.uint8)
return input_ids, img_feat, img_pos_feat, attn_masks, obj_masks, target
def re_collate(inputs):
"""
Return:
:input_ids : (n, max_L) padded with 0
:position_ids : (n, max_L) padded with 0
:txt_lens : list of [txt_len]
:img_feat : (n, max_num_bb, feat_dim)
:img_pos_feat : (n, max_num_bb, 7)
:num_bbs : list of [num_bb]
:attn_masks : (n, max_{L+num_bb}) padded with 0
:obj_masks : (n, max_num_bb) padded with 1
:targets : (n, )
"""
(input_ids, img_feats, img_pos_feats, attn_masks, obj_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
obj_masks = pad_sequence(
obj_masks, batch_first=True, padding_value=1)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
return {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'obj_masks': obj_masks,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets,
'txt_lens': txt_lens,
'num_bbs': num_bbs}
class ReEvalDataset(ReDetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_gt_feat=True):
super().__init__(txt_db, img_db)
self.use_gt_feat = use_gt_feat
def __getitem__(self, i):
"""
Return:
:input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0]
:position_ids : range(L)
:img_feat : (num_bb, d)
:img_pos_feat : (num_bb, 7)
:attn_masks : (L+num_bb, ), i.e., [1, 1, ..., 0, 0, 1, 1]
:obj_masks : (num_bb, ) all 0's
:tgt_box : ndarray (4, ) xywh
:obj_boxes : ndarray (num_bb, 4) xywh
:sent_id
"""
# {sent_id, sent, ref_id, ann_id, image_id, bbox, input_ids}
sent_id = self.ids[i]
example = super().__getitem__(i)
image_id = example['image_id']
if self.use_gt_feat:
fname = f'visual_grounding_coco_gt_{int(image_id):012}.npz'
else:
fname = f'visual_grounding_det_coco_{int(image_id):012}.npz'
img_feat, img_pos_feat, num_bb = self._get_img_feat(fname)
# image info
img = self.txt_db.Images[image_id]
im_width, im_height = img['width'], img['height']
# object boxes, img_pos_feat (xyxywha) -> xywh
obj_boxes = np.stack([img_pos_feat[:, 0]*im_width,
img_pos_feat[:, 1]*im_height,
img_pos_feat[:, 4]*im_width,
img_pos_feat[:, 5]*im_height], axis=1)
obj_masks = torch.tensor([0]*num_bb, dtype=torch.uint8)
# target box
tgt_box = np.array(example['bbox']) # xywh
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat, attn_masks, obj_masks,
tgt_box, obj_boxes, sent_id)
# IoU function
def computeIoU(self, box1, box2):
# each box is of [x1, y1, w, h]
inter_x1 = max(box1[0], box2[0])
inter_y1 = max(box1[1], box2[1])
inter_x2 = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
inter_y2 = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)
if inter_x1 < inter_x2 and inter_y1 < inter_y2:
inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1)
else:
inter = 0
union = box1[2]*box1[3] + box2[2]*box2[3] - inter
return float(inter)/union
def re_eval_collate(inputs):
"""
Return:
:input_ids : (n, max_L)
:position_ids : (n, max_L)
:txt_lens : list of [txt_len]
:img_feat : (n, max_num_bb, d)
:img_pos_feat : (n, max_num_bb, 7)
:num_bbs : list of [num_bb]
:attn_masks : (n, max{L+num_bb})
:obj_masks : (n, max_num_bb)
:tgt_box : list of n [xywh]
:obj_boxes : list of n [[xywh, xywh, ...]]
:sent_ids : list of n [sent_id]
"""
(input_ids, img_feats, img_pos_feats, attn_masks, obj_masks,
tgt_box, obj_boxes, sent_ids) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
obj_masks = pad_sequence(
obj_masks, batch_first=True, padding_value=1)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
return {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'obj_masks': obj_masks,
'attn_masks': attn_masks,
'gather_index': gather_index,
'tgt_box': tgt_box,
'obj_boxes': obj_boxes,
'sent_ids': sent_ids,
'txt_lens': txt_lens,
'num_bbs': num_bbs}
| 10,442 | 35.260417 | 79 | py |
UNITER | UNITER-master/data/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Itm dataset
"""
from collections import defaultdict
import copy
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
import numpy as np
from .data import (DetectFeatTxtTokDataset, DetectFeatLmdb, TxtTokLmdb,
pad_tensors, get_gather_index, get_ids_and_lens)
from .sampler import TokenBucketSampler
class TokenBucketSamplerForItm(TokenBucketSampler):
def __init__(self, dset, *args, **kwargs):
super().__init__(dset.lens, *args, **kwargs)
self.dset = dset
def __iter__(self):
it = super().__iter__()
self.dset.new_epoch()
self._lens = self.dset.lens
return it
def _has_overlap(la, lb):
if len(la) < len(lb):
la, lb = lb, la
s = set(la)
return any(b in s for b in lb)
def sample_negative(sample_pool, ground_truths, num_sample):
""" random and retry """
outputs = ground_truths[:1]
while _has_overlap(outputs, ground_truths):
outputs = random.sample(sample_pool, num_sample)
return outputs
class ItmDataset(DetectFeatTxtTokDataset):
""" NOTE this Dataset handles distributed training itself
(for more efficient negative sampling) """
def __init__(self, txt_db, img_db, neg_sample_p=0.5):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
self.txt_lens, self.ids = get_ids_and_lens(txt_db)
self.all_imgs = list(set(txt_db[id_]['img_fname'] for id_ in self.ids))
self.neg_sample_p = neg_sample_p
self.new_epoch()
def new_epoch(self):
""" should be called every epoch for more randomness"""
self.labels = np.random.choice(
[0, 1], size=len(self.ids),
p=[self.neg_sample_p, 1-self.neg_sample_p])
self.lens = []
self.train_imgs = []
for i, (id_, tl) in enumerate(zip(self.ids, self.txt_lens)):
img_fname = super().__getitem__(i)['img_fname']
if self.labels[i] == 0:
img_fname = sample_negative(self.all_imgs, [img_fname], 1)[0]
self.train_imgs.append(img_fname)
self.lens.append(tl + self.img_db.name2nbb[img_fname])
def __getitem__(self, i):
example = super().__getitem__(i)
# labels and negative images should be sampled every epoch
ground_truth_label = self.labels[i]
img_fname = self.train_imgs[i]
img_feat, img_pos_feat, num_bb = self._get_img_feat(img_fname)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
target = torch.Tensor(1).long()
target.data.fill_(ground_truth_label)
return input_ids, img_feat, img_pos_feat, attn_masks, target
def itm_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.cat(targets, dim=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets}
return batch
def _compute_ot_scatter(txt_lens, max_txt_len, joint_len):
ot_scatter = torch.arange(0, joint_len, dtype=torch.long
).unsqueeze(0).repeat(len(txt_lens), 1)
for i, tl in enumerate(txt_lens):
max_ind = max_txt_len + (joint_len-tl)
ot_scatter.data[i, tl:] = torch.arange(max_txt_len, max_ind,
dtype=torch.long).data
return ot_scatter
def _compute_pad(lens, max_len):
pad = torch.zeros(len(lens), max_len, dtype=torch.uint8)
for i, l in enumerate(lens):
pad.data[i, l:].fill_(1)
return pad
def itm_ot_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks, targets
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
targets = torch.cat(targets, dim=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
# OT inputs
max_tl = max(txt_lens)
max_nbb = max(num_bbs)
ot_scatter = _compute_ot_scatter(txt_lens, max_tl, attn_masks.size(1))
txt_pad = _compute_pad(txt_lens, max_tl)
img_pad = _compute_pad(num_bbs, max_nbb)
ot_inputs = {'ot_scatter': ot_scatter,
'scatter_max': ot_scatter.max().item(),
'txt_pad': txt_pad,
'img_pad': img_pad}
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'targets': targets,
'ot_inputs': ot_inputs}
return batch
class ItmRankDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, \
"ItmRankDataset need at least 1 negative sample"
super().__init__(txt_db, img_db)
txt2img = self.txt_db.txt2img
self.txt2img = {id_: txt2img[id_] for id_ in self.ids}
# images partitioned by rank
self.img2txts = defaultdict(list)
for id_, img in self.txt2img.items():
self.img2txts[img].append(id_)
self.img_name_list = list(self.img2txts.keys())
assert neg_sample_size > 0
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_fname = self.txt2img[gt_txt_id]
id_pairs = [(gt_txt_id, gt_img_fname)]
# sample negatives
neg_sample_img_ids = sample_negative(
self.img_name_list, [gt_img_fname], self.neg_sample_size)
neg_sample_txt_ids = sample_negative(
self.ids, self.img2txts[gt_img_fname], self.neg_sample_size)
id_pairs.extend([(gt_txt_id, neg_img_id)
for neg_img_id in neg_sample_img_ids] +
[(neg_txt_id, gt_img_fname)
for neg_txt_id in neg_sample_txt_ids])
inputs = self._collect_inputs(id_pairs)
assert len(inputs) == (1 + 2*self.neg_sample_size)
return inputs
def _collect_inputs(self, id_pairs):
# create input features
inputs = []
for txt_id, img_id in id_pairs:
example = self.txt_db[txt_id]
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
# img input
img_feat, img_pos_feat, num_bb = self._get_img_feat(img_id)
# mask
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
inputs.append((input_ids, img_feat, img_pos_feat, attn_masks))
return inputs
def itm_rank_collate(inputs):
(input_ids, img_feats, img_pos_feats, attn_masks,
) = map(list, unzip(concat(i for i in inputs)))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
sample_size = len(inputs[0])
assert all(sample_size == len(i) for i in inputs)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'sample_size': sample_size}
return batch
class ItmRankDatasetHardNegFromText(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, "need at least 1 negative sample"
super().__init__(txt_db, img_db)
txt2img = self.txt_db.txt2img
self.txt2img = {id_: txt2img[id_] for id_ in self.ids}
self.img2txts = self.txt_db.img2txts
self.img_name_list = list(self.img2txts.keys())
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_fname = self.txt2img[gt_txt_id]
input_ids = self.txt_db[gt_txt_id]['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
input_ids = input_ids.unsqueeze(0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
neg_img_ids = sample_negative(
self.img_name_list, [gt_img_fname], self.neg_sample_size)
img_ids = [gt_img_fname] + neg_img_ids
# process image features (gt always first)
img_feats, img_pos_feats, num_bbs = map(
list, unzip(map(self._get_img_feat, img_ids)))
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
tl = input_ids.size(1)
attn_masks = torch.zeros(len(img_ids), max(num_bbs) + tl).long()
for i, nbb in enumerate(num_bbs):
attn_masks.data[i, :tl+nbb].fill_(1)
out_size = attn_masks.size(1)
gather_index = get_gather_index([tl]*len(img_ids), num_bbs,
len(img_ids), tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
class ItmRankDatasetHardNegFromImage(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, "need at least 1 negative sample"
super().__init__(txt_db, img_db)
txt2img = self.txt_db.txt2img
self.txt2img = {id_: txt2img[id_] for id_ in self.ids}
self.img2txts = self.txt_db.img2txts
self.txt_name_list = list(self.txt2img.keys())
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_id = self.txt2img[gt_txt_id]
gt_txt_ids = self.img2txts[gt_img_id]
# process image features (gt always first)
img_feat, img_pos_feat, nbb = self._get_img_feat(gt_img_id)
img_feat = img_feat.unsqueeze(0)
img_pos_feat = img_pos_feat.unsqueeze(0)
# sample negative
neg_txt_ids = sample_negative(
self.txt_name_list, gt_txt_ids, self.neg_sample_size)
txt_ids = [gt_txt_id] + neg_txt_ids
# process text inputs
all_inputs = []
txt_lens = []
for txt_id in txt_ids:
input_ids = self.txt_db.combine_inputs(
self.txt_db[txt_id]['input_ids'])
all_inputs.append(input_ids)
txt_lens.append(len(input_ids))
input_ids = pad_sequence(all_inputs, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
attn_masks = torch.zeros(len(txt_ids), max(txt_lens) + nbb).long()
for i, tl in enumerate(txt_lens):
attn_masks.data[i, :tl+nbb].fill_(1)
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, [nbb]*len(txt_ids),
len(txt_ids), tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
def itm_rank_hn_collate(inputs):
assert len(inputs) == 1
return inputs[0]
class ItmValDataset(DetectFeatTxtTokDataset):
""" For evaluating Image-Text-Retrieval task """
def __init__(self, db_dir, img_dir, mini_batch_size=400):
super().__init__(db_dir, img_dir)
del self.lens
self.txt2img = self.txt_db.txt2img
self.img2txts = self.txt_db.img2txts
self.all_img_ids = list(self.img2txts.keys())
assert len(self.img2txts) >= mini_batch_size > 0
self.bs = mini_batch_size
def _get_batch_ids(self, i):
gt_txt_id = self.ids[i]
gt_img_id = self.txt2img[gt_txt_id]
# sample fixed negatives for each gt image
i = self.all_img_ids.index(gt_img_id)
neg_st = i+1
neg_end = neg_st+self.bs-1
if neg_end > len(self.all_img_ids):
# warp around
neg_end -= len(self.all_img_ids)
neg_img_ids = (self.all_img_ids[neg_st:]
+ self.all_img_ids[:neg_end])
else:
neg_img_ids = self.all_img_ids[neg_st:neg_end]
assert len(neg_img_ids) == (self.bs - 1),\
"Did not sample enough neg samples"
return gt_img_id, neg_img_ids
def __getitem__(self, i):
""" this returns list of mini-batches """
gt_img_id, neg_img_ids = self._get_batch_ids(i)
# NOTE 1st one is gt img
batch = self.get_batch(i, [gt_img_id] + neg_img_ids)
return batch
def get_batch(self, i, img_ids):
example = super().__getitem__(i)
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
input_ids = input_ids.unsqueeze(0).expand(len(img_ids), -1).clone()
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
# process image features (gt always first)
img_feats, img_pos_feats, num_bbs = map(
list, unzip(map(self._get_img_feat, img_ids)))
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
tl = input_ids.size(1)
attn_masks = torch.zeros(len(img_ids), max(num_bbs) + tl).long()
for i, nbb in enumerate(num_bbs):
attn_masks.data[i, :tl+nbb].fill_(1)
out_size = attn_masks.size(1)
gather_index = get_gather_index([tl]*len(img_ids), num_bbs,
len(img_ids), tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index}
return batch
def itm_val_collate(inputs):
assert len(inputs) == 1, "input batch size > 1"
return inputs[0]
class ItmEvalDataset(ItmValDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.all_img_ids = sorted(copy.deepcopy(self.all_img_ids),
key=lambda i: self.img_db.name2nbb[i])
def __getitem__(self, i):
mini_batches = []
for st in range(0, len(self.all_img_ids), self.bs):
mini_batches.append(
self.get_batch(i, self.all_img_ids[st:st+self.bs]))
return mini_batches
itm_eval_collate = itm_val_collate
| 16,959 | 35.162047 | 79 | py |
UNITER | UNITER-master/model/vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VCR model
"""
from collections import defaultdict
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
# from .layer import GELU
from .model import (
UniterPreTrainedModel, UniterModel)
class UniterForVisualCommonsenseReasoning(UniterPreTrainedModel):
""" Finetune UNITER for VCR
"""
def __init__(self, config, img_dim):
super().__init__(config, img_dim)
self.uniter = UniterModel(config, img_dim)
self.vcr_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
nn.ReLU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, 2)
)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.vcr_output(pooled_output)
if compute_loss:
targets = batch['targets']
vcr_loss = F.cross_entropy(
rank_scores, targets.squeeze(-1),
reduction='mean')
return vcr_loss
else:
rank_scores = rank_scores[:, 1:]
return rank_scores
| 3,024 | 37.782051 | 80 | py |
UNITER | UNITER-master/model/pretrain.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for pretraining
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU, BertOnlyMLMHead
from .model import UniterModel, UniterPreTrainedModel
from .ot import optimal_transport_dist
class RegionFeatureRegression(nn.Module):
" for MRM"
def __init__(self, hidden_size, feat_dim, img_linear_weight):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
GELU(),
LayerNorm(hidden_size, eps=1e-12))
self.weight = img_linear_weight
self.bias = nn.Parameter(torch.zeros(feat_dim))
def forward(self, input_):
hidden = self.net(input_)
output = F.linear(hidden, self.weight.t(), self.bias)
return output
class RegionClassification(nn.Module):
" for MRC(-kl)"
def __init__(self, hidden_size, label_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
GELU(),
LayerNorm(hidden_size, eps=1e-12),
nn.Linear(hidden_size, label_dim))
def forward(self, input_):
output = self.net(input_)
return output
class UniterForPretraining(UniterPreTrainedModel):
""" UNITER pretraining """
def __init__(self, config, img_dim, img_label_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.cls = BertOnlyMLMHead(
config, self.uniter.embeddings.word_embeddings.weight)
self.feat_regress = RegionFeatureRegression(
config.hidden_size, img_dim,
self.uniter.img_embeddings.img_linear.weight)
self.region_classifier = RegionClassification(
config.hidden_size, img_label_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.apply(self.init_weights)
def forward(self, batch, task, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
if task == 'mlm':
txt_labels = batch['txt_labels']
return self.forward_mlm(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
txt_labels, compute_loss)
elif task == 'mrfr':
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrfr_feat_target = batch['feat_targets']
return self.forward_mrfr(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrfr_feat_target, compute_loss)
elif task == 'itm':
targets = batch['targets']
ot_inputs = batch['ot_inputs']
return self.forward_itm(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
targets, ot_inputs, compute_loss)
elif task.startswith('mrc'):
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrc_label_target = batch['label_targets']
return self.forward_mrc(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrc_label_target, task, compute_loss)
else:
raise ValueError('invalid task')
def forward_mlm(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
txt_labels, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
# get only the text part
sequence_output = sequence_output[:, :input_ids.size(1), :]
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
txt_labels != -1)
prediction_scores = self.cls(masked_output)
if compute_loss:
masked_lm_loss = F.cross_entropy(prediction_scores,
txt_labels[txt_labels != -1],
reduction='none')
return masked_lm_loss
else:
return prediction_scores
def _compute_masked_hidden(self, hidden, mask):
""" get only the masked region (don't compute unnecessary hiddens) """
mask = mask.unsqueeze(-1).expand_as(hidden)
hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1))
return hidden_masked
def forward_mrfr(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
feat_targets, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks)
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_feat = self.feat_regress(masked_output)
if compute_loss:
mrfr_loss = F.mse_loss(prediction_feat, feat_targets,
reduction='none')
return mrfr_loss
else:
return prediction_feat
def forward_itm(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index, targets, ot_inputs,
compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
itm_scores = self.itm_output(pooled_output)
# OT loss
if ot_inputs is not None:
ot_scatter = ot_inputs['ot_scatter']
b = sequence_output.size(0)
tl = input_ids.size(1)
il = img_feat.size(1)
max_l = max(ot_inputs['scatter_max'] + 1, tl+il)
ot_scatter = ot_scatter.unsqueeze(-1).expand_as(sequence_output)
ctx_emb = torch.zeros(b, max_l, self.config.hidden_size,
dtype=sequence_output.dtype,
device=sequence_output.device
).scatter_(dim=1, index=ot_scatter,
src=sequence_output)
txt_emb = ctx_emb[:, :tl, :]
img_emb = ctx_emb[:, tl:tl+il, :]
txt_pad = ot_inputs['txt_pad']
img_pad = ot_inputs['img_pad']
# NOTE: run in fp32 for stability
ot_dist = optimal_transport_dist(txt_emb.float(), img_emb.float(),
txt_pad, img_pad).to(txt_emb)
ot_pos_dist = ot_dist.masked_select(targets == 1)
ot_neg_dist = ot_dist.masked_select(targets == 0)
ot_loss = (ot_pos_dist, ot_neg_dist)
else:
ot_loss = None
if compute_loss:
itm_loss = F.cross_entropy(itm_scores, targets, reduction='none')
return itm_loss, ot_loss
else:
return itm_scores, ot_loss
def forward_mrc(self, input_ids, position_ids, img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
label_targets, task, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks)
# only compute masked regions for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_soft_label = self.region_classifier(masked_output)
if compute_loss:
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
mrc_loss = F.kl_div(
prediction_soft_label, label_targets, reduction='none')
else:
# background class should not be the target
label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1
mrc_loss = F.cross_entropy(
prediction_soft_label, label_targets,
ignore_index=0, reduction='none')
return mrc_loss
else:
return prediction_soft_label
| 10,155 | 43.156522 | 78 | py |
UNITER | UNITER-master/model/layer.py | """
BERT layers from the huggingface implementation
(https://github.com/huggingface/transformers)
"""
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class GELU(nn.Module):
def forward(self, input_):
output = gelu(input_)
return output
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(
torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config,
bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
| 9,378 | 39.081197 | 104 | py |
UNITER | UNITER-master/model/model.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from .layer import BertLayer, BertPooler
logger = logging.getLogger(__name__)
class UniterConfig(object):
"""Configuration class to store the configuration of a `UniterModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs UniterConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
`UniterModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer
encoder.
num_attention_heads: Number of attention heads for each attention
layer in the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e.
feed-forward) layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string)
in the encoder and pooler. If string, "gelu", "relu" and
"swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully
connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this
model might ever be used with. Typically set this to something
large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed
into `UniterModel`.
initializer_range: The sttdev of the truncated_normal_initializer
for initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `UniterConfig` from a
Python dictionary of parameters."""
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `UniterConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
"""
Instantiate a UniterPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific Uniter class
"""
# Load config
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
""" Modification for Joint Vision-Language Encoding
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
# align back to most compact input
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
| 15,887 | 42.173913 | 79 | py |
UNITER | UNITER-master/model/vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VQA model
"""
from collections import defaultdict
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU
from .model import UniterPreTrainedModel, UniterModel
class UniterForVisualQuestionAnswering(UniterPreTrainedModel):
""" Finetune UNITER for VQA
"""
def __init__(self, config, img_dim, num_answer):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.vqa_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size*2),
GELU(),
LayerNorm(config.hidden_size*2, eps=1e-12),
nn.Linear(config.hidden_size*2, num_answer)
)
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
answer_scores = self.vqa_output(pooled_output)
if compute_loss:
targets = batch['targets']
vqa_loss = F.binary_cross_entropy_with_logits(
answer_scores, targets, reduction='none')
return vqa_loss
else:
return answer_scores
| 1,860 | 34.113208 | 75 | py |
UNITER | UNITER-master/model/pretrain_vcr.py | from .pretrain import UniterForPretraining
from torch import nn
from .layer import BertOnlyMLMHead
from collections import defaultdict
from torch.nn import functional as F
import torch
class UniterForPretrainingForVCR(UniterForPretraining):
""" 2nd Stage Pretrain UNITER for VCR
"""
def init_type_embedding(self):
new_emb = nn.Embedding(4, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :]
new_emb.weight.data[2, :].copy_(emb)
new_emb.weight.data[3, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def init_word_embedding(self, num_special_tokens):
orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0)
new_emb = nn.Embedding(
orig_word_num + num_special_tokens, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
emb = self.uniter.embeddings.word_embeddings.weight.data
new_emb.weight.data[:orig_word_num, :].copy_(emb)
self.uniter.embeddings.word_embeddings = new_emb
self.cls = BertOnlyMLMHead(
self.uniter.config, self.uniter.embeddings.word_embeddings.weight)
def forward(self, batch, task, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
txt_type_ids = batch['txt_type_ids']
if task == 'mlm':
txt_labels = batch['txt_labels']
return self.forward_mlm(input_ids, position_ids,
txt_type_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
txt_labels, compute_loss)
elif task == 'mrfr':
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrfr_feat_target = batch['feat_targets']
return self.forward_mrfr(input_ids, position_ids,
txt_type_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrfr_feat_target, compute_loss)
elif task.startswith('mrc'):
img_mask_tgt = batch['img_mask_tgt']
img_masks = batch['img_masks']
mrc_label_target = batch['label_targets']
return self.forward_mrc(input_ids, position_ids,
txt_type_ids, img_feat, img_pos_feat,
attention_mask, gather_index,
img_masks, img_mask_tgt,
mrc_label_target, task, compute_loss)
else:
raise ValueError('invalid task')
# MLM
def forward_mlm(self, input_ids, position_ids, txt_type_ids, img_feat,
img_pos_feat, attention_mask, gather_index,
txt_labels, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
txt_type_ids=txt_type_ids)
# get only the text part
sequence_output = sequence_output[:, :input_ids.size(1), :]
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
txt_labels != -1)
prediction_scores = self.cls(masked_output)
if compute_loss:
masked_lm_loss = F.cross_entropy(prediction_scores,
txt_labels[txt_labels != -1],
reduction='none')
return masked_lm_loss
else:
return prediction_scores
# MRFR
def forward_mrfr(self, input_ids, position_ids, txt_type_ids,
img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
feat_targets, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks,
txt_type_ids=txt_type_ids)
# only compute masked tokens for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_feat = self.feat_regress(masked_output)
if compute_loss:
mrfr_loss = F.mse_loss(prediction_feat, feat_targets,
reduction='none')
return mrfr_loss
else:
return prediction_feat
# MRC
def forward_mrc(self, input_ids, position_ids, txt_type_ids,
img_feat, img_pos_feat,
attention_mask, gather_index, img_masks, img_mask_tgt,
label_targets, task, compute_loss=True):
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False,
img_masks=img_masks,
txt_type_ids=txt_type_ids)
# only compute masked regions for better efficiency
masked_output = self._compute_masked_hidden(sequence_output,
img_mask_tgt)
prediction_soft_label = self.region_classifier(masked_output)
if compute_loss:
if "kl" in task:
prediction_soft_label = F.log_softmax(
prediction_soft_label, dim=-1)
mrc_loss = F.kl_div(
prediction_soft_label, label_targets, reduction='none')
else:
# background class should not be the target
label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1
mrc_loss = F.cross_entropy(
prediction_soft_label, label_targets,
ignore_index=0, reduction='none')
return mrc_loss
else:
return prediction_soft_label
| 7,123 | 46.493333 | 80 | py |
UNITER | UNITER-master/model/nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for NLVR2 model
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
from .model import UniterPreTrainedModel, UniterModel
from .attention import MultiheadAttention
class UniterForNlvr2Paired(UniterPreTrainedModel):
""" Finetune UNITER for NLVR2 (paired format)
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.nlvr2_output = nn.Linear(config.hidden_size*2, 2)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(3, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings\
.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
new_emb.weight.data[2, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
img_type_ids = batch['img_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
img_type_ids=img_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
# concat CLS of the pair
n_pair = pooled_output.size(0) // 2
reshaped_output = pooled_output.contiguous().view(n_pair, -1)
answer_scores = self.nlvr2_output(reshaped_output)
if compute_loss:
targets = batch['targets']
nlvr2_loss = F.cross_entropy(
answer_scores, targets, reduction='none')
return nlvr2_loss
else:
return answer_scores
class UniterForNlvr2Triplet(UniterPreTrainedModel):
""" Finetune UNITER for NLVR2 (triplet format)
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.nlvr2_output = nn.Linear(config.hidden_size, 2)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(3, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings\
.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
new_emb.weight.data[2, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
img_type_ids = batch['img_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
img_type_ids=img_type_ids)
pooled_output = self.uniter.pooler(sequence_output)
answer_scores = self.nlvr2_output(pooled_output)
if compute_loss:
targets = batch['targets']
nlvr2_loss = F.cross_entropy(
answer_scores, targets, reduction='none')
return nlvr2_loss
else:
return answer_scores
class AttentionPool(nn.Module):
""" attention pooling layer """
def __init__(self, hidden_size, drop=0.0):
super().__init__()
self.fc = nn.Sequential(nn.Linear(hidden_size, 1), nn.ReLU())
self.dropout = nn.Dropout(drop)
def forward(self, input_, mask=None):
"""input: [B, T, D], mask = [B, T]"""
score = self.fc(input_).squeeze(-1)
if mask is not None:
mask = mask.to(dtype=input_.dtype) * -1e4
score = score + mask
norm_score = self.dropout(F.softmax(score, dim=1))
output = norm_score.unsqueeze(1).matmul(input_).squeeze(1)
return output
class UniterForNlvr2PairedAttn(UniterPreTrainedModel):
""" Finetune UNITER for NLVR2
(paired format with additional attention layer)
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.attn1 = MultiheadAttention(config.hidden_size,
config.num_attention_heads,
config.attention_probs_dropout_prob)
self.attn2 = MultiheadAttention(config.hidden_size,
config.num_attention_heads,
config.attention_probs_dropout_prob)
self.fc = nn.Sequential(
nn.Linear(2*config.hidden_size, config.hidden_size),
nn.ReLU(),
nn.Dropout(config.hidden_dropout_prob))
self.attn_pool = AttentionPool(config.hidden_size,
config.attention_probs_dropout_prob)
self.nlvr2_output = nn.Linear(2*config.hidden_size, 2)
self.apply(self.init_weights)
def init_type_embedding(self):
new_emb = nn.Embedding(3, self.uniter.config.hidden_size)
new_emb.apply(self.init_weights)
for i in [0, 1]:
emb = self.uniter.embeddings.token_type_embeddings\
.weight.data[i, :]
new_emb.weight.data[i, :].copy_(emb)
new_emb.weight.data[2, :].copy_(emb)
self.uniter.embeddings.token_type_embeddings = new_emb
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
img_type_ids = batch['img_type_ids']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False,
img_type_ids=img_type_ids)
# separate left image and right image
bs, tl, d = sequence_output.size()
left_out, right_out = sequence_output.contiguous().view(
bs//2, tl*2, d).chunk(2, dim=1)
# bidirectional attention
mask = attn_masks == 0
left_mask, right_mask = mask.contiguous().view(bs//2, tl*2
).chunk(2, dim=1)
left_out = left_out.transpose(0, 1)
right_out = right_out.transpose(0, 1)
l2r_attn, _ = self.attn1(left_out, right_out, right_out,
key_padding_mask=right_mask)
r2l_attn, _ = self.attn2(right_out, left_out, left_out,
key_padding_mask=left_mask)
left_out = self.fc(torch.cat([l2r_attn, left_out], dim=-1)
).transpose(0, 1)
right_out = self.fc(torch.cat([r2l_attn, right_out], dim=-1)
).transpose(0, 1)
# attention pooling and final prediction
left_out = self.attn_pool(left_out, left_mask)
right_out = self.attn_pool(right_out, right_mask)
answer_scores = self.nlvr2_output(
torch.cat([left_out, right_out], dim=-1))
if compute_loss:
targets = batch['targets']
nlvr2_loss = F.cross_entropy(
answer_scores, targets, reduction='none')
return nlvr2_loss
else:
return answer_scores
| 8,505 | 40.492683 | 76 | py |
UNITER | UNITER-master/model/ot.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Wasserstein Distance (Optimal Transport)
"""
import torch
from torch.nn import functional as F
def cost_matrix_cosine(x, y, eps=1e-5):
""" Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.uint8, device=x.device
).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(
b, n).sum(dim=-1, keepdim=False)
return trace
@torch.no_grad()
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device
) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2)/beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def optimal_transport_dist(txt_emb, img_emb, txt_pad, img_pad,
beta=0.5, iteration=50, k=1):
""" [B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)
).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)
).to(dtype=cost.dtype)
T = ipot(cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad,
beta, iteration, k)
distance = trace(cost.matmul(T.detach()))
return distance
| 2,866 | 32.337209 | 74 | py |
UNITER | UNITER-master/model/attention.py | """
copy multi-head attention code from pytorch
(https://github.com/pytorch/pytorch),
"""
import warnings
import torch
from torch.nn import Module, Parameter, Linear
from torch.nn.init import xavier_normal_, xavier_uniform_, constant_
from torch.nn.functional import linear, softmax, dropout
def multi_head_attention_forward(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
static_k=None, # type: Optional[Tensor]
static_v=None # type: Optional[Tensor]
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer).
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in differnt forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
qkv_same = torch.equal(query, key) and torch.equal(key, value)
kv_same = torch.equal(key, value)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if use_separate_proj_weight is not True:
if qkv_same:
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif kv_same:
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask,
torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in key. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer).
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
if not hasattr(self, '_qkv_same_embed_dim'):
warnings.warn('A new version of MultiheadAttention module has been implemented. \
Please re-train your model with the new module',
UserWarning)
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| 19,463 | 47.297767 | 130 | py |
UNITER | UNITER-master/model/re.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for RE model
"""
from collections import defaultdict
import torch
from torch import nn
import random
import numpy as np
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU
from .model import UniterPreTrainedModel, UniterModel
class UniterForReferringExpressionComprehension(UniterPreTrainedModel):
""" Finetune UNITER for RE
"""
def __init__(self, config, img_dim, loss="cls",
margin=0.2, hard_ratio=0.3, mlp=1):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
if mlp == 1:
self.re_output = nn.Linear(config.hidden_size, 1)
elif mlp == 2:
self.re_output = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size),
GELU(),
LayerNorm(config.hidden_size, eps=1e-12),
nn.Linear(config.hidden_size, 1)
)
else:
raise ValueError("MLP restricted to be 1 or 2 layers.")
self.loss = loss
assert self.loss in ['cls', 'rank']
if self.loss == 'rank':
self.margin = margin
self.hard_ratio = hard_ratio
else:
self.crit = nn.CrossEntropyLoss(reduction='none')
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attn_masks = batch['attn_masks']
gather_index = batch['gather_index']
obj_masks = batch['obj_masks']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attn_masks, gather_index,
output_all_encoded_layers=False)
# get only the region part
txt_lens, num_bbs = batch["txt_lens"], batch["num_bbs"]
sequence_output = self._get_image_hidden(
sequence_output, txt_lens, num_bbs)
# re score (n, max_num_bb)
scores = self.re_output(sequence_output).squeeze(2)
scores = scores.masked_fill(obj_masks, -1e4) # mask out non-objects
if compute_loss:
targets = batch["targets"]
if self.loss == 'cls':
ce_loss = self.crit(scores, targets.squeeze(-1)) # (n, ) as no reduction
return ce_loss
else:
# ranking
_n = len(num_bbs)
# positive (target)
pos_ix = targets
pos_sc = scores.gather(1, pos_ix.view(_n, 1)) # (n, 1)
pos_sc = torch.sigmoid(pos_sc).view(-1) # (n, ) sc[0, 1]
# negative
neg_ix = self.sample_neg_ix(scores, targets, num_bbs)
neg_sc = scores.gather(1, neg_ix.view(_n, 1)) # (n, 1)
neg_sc = torch.sigmoid(neg_sc).view(-1) # (n, ) sc[0, 1]
# ranking
mm_loss = torch.clamp(
self.margin + neg_sc - pos_sc, 0) # (n, )
return mm_loss
else:
# (n, max_num_bb)
return scores
def sample_neg_ix(self, scores, targets, num_bbs):
"""
Inputs:
:scores (n, max_num_bb)
:targets (n, )
:num_bbs list of [num_bb]
return:
:neg_ix (n, ) easy/hard negative (!= target)
"""
neg_ix = []
cand_ixs = torch.argsort(
scores, dim=-1, descending=True) # (n, num_bb)
for i in range(len(num_bbs)):
num_bb = num_bbs[i]
if np.random.uniform(0, 1, 1) < self.hard_ratio:
# sample hard negative, w/ highest score
for ix in cand_ixs[i].tolist():
if ix != targets[i]:
assert ix < num_bb, f'ix={ix}, num_bb={num_bb}'
neg_ix.append(ix)
break
else:
# sample easy negative, i.e., random one
ix = random.randint(0, num_bb-1) # [0, num_bb-1]
while ix == targets[i]:
ix = random.randint(0, num_bb-1)
neg_ix.append(ix)
neg_ix = torch.tensor(neg_ix).type(targets.type())
assert neg_ix.numel() == targets.numel()
return neg_ix
def _get_image_hidden(self, sequence_output, txt_lens, num_bbs):
"""
Extracting the img_hidden part from sequence_output.
Inputs:
- sequence_output: (n, txt_len+num_bb, hid_size)
- txt_lens : [txt_len]
- num_bbs : [num_bb]
Output:
- img_hidden : (n, max_num_bb, hid_size)
"""
outputs = []
max_bb = max(num_bbs)
hid_size = sequence_output.size(-1)
for seq_out, len_, nbb in zip(sequence_output.split(1, dim=0),
txt_lens, num_bbs):
img_hid = seq_out[:, len_:len_+nbb, :]
if nbb < max_bb:
img_hid = torch.cat(
[img_hid, self._get_pad(
img_hid, max_bb-nbb, hid_size)],
dim=1)
outputs.append(img_hid)
img_hidden = torch.cat(outputs, dim=0)
return img_hidden
def _get_pad(self, t, len_, hidden_size):
pad = torch.zeros(1, len_, hidden_size, dtype=t.dtype, device=t.device)
return pad
| 5,705 | 36.051948 | 89 | py |
UNITER | UNITER-master/model/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for ITM model
"""
from collections import defaultdict
import torch
from torch import nn
from .model import UniterPreTrainedModel, UniterModel
class UniterForImageTextRetrieval(UniterPreTrainedModel):
""" Finetune UNITER for image text retrieval
"""
def __init__(self, config, img_dim, margin=0.2):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.rank_output = nn.Linear(config.hidden_size, 1)
self.margin = margin
self.apply(self.init_weights)
def init_output(self):
""" need to be called after from pretrained """
self.rank_output.weight.data = self.itm_output.weight.data[1:, :]
self.rank_output.bias.data = self.itm_output.bias.data[1:]
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.rank_output(pooled_output)
if compute_loss:
# triplet loss
rank_scores_sigmoid = torch.sigmoid(rank_scores)
sample_size = batch['sample_size']
scores = rank_scores_sigmoid.contiguous().view(-1, sample_size)
pos = scores[:, :1]
neg = scores[:, 1:]
rank_loss = torch.clamp(self.margin + neg - pos, 0)
return rank_loss
else:
return rank_scores
class UniterForImageTextRetrievalHardNeg(UniterForImageTextRetrieval):
""" Finetune UNITER for image text retrieval
"""
def __init__(self, config, img_dim, margin=0.2, hard_size=16):
super().__init__(config, img_dim, margin)
self.hard_size = hard_size
def forward(self, batch, sample_from='t', compute_loss=True):
# expect same input_ids for all pairs
batch_size = batch['attn_masks'].size(0)
input_ids = batch['input_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
if sample_from == 't':
if input_ids.size(0) == 1:
batch['input_ids'] = input_ids.expand(batch_size, -1)
elif sample_from == 'i':
if img_feat.size(0) == 1:
batch['img_feat'] = img_feat.expand(batch_size, -1, -1)
if img_pos_feat.size(0) == 1:
batch['img_pos_feat'] = img_pos_feat.expand(batch_size, -1, -1)
else:
raise ValueError()
if self.training and compute_loss:
with torch.no_grad():
self.eval()
scores = super().forward(batch, compute_loss=False)
hard_batch = self._get_hard_batch(batch, scores, sample_from)
self.train()
return super().forward(hard_batch, compute_loss=True)
else:
return super().forward(batch, compute_loss)
def _get_hard_batch(self, batch, scores, sample_from='t'):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
hard_batch = {'sample_size': self.hard_size + 1}
# NOTE first example is positive
hard_indices = scores.squeeze(-1)[1:].topk(
self.hard_size, sorted=False)[1] + 1
indices = torch.cat([torch.zeros(1, dtype=torch.long,
device=hard_indices.device),
hard_indices])
attention_mask = attention_mask.index_select(0, indices)
gather_index = gather_index.index_select(0, indices)
if position_ids.size(0) != 1:
position_ids = position_ids[:self.hard_size+1]
if sample_from == 't':
# cut to minimum padding
max_len = attention_mask.sum(dim=1).max().item()
max_i = max_len - input_ids.size(1)
attention_mask = attention_mask[:, :max_len]
gather_index = gather_index[:, :max_len]
img_feat = img_feat.index_select(0, indices)[:, :max_i, :]
img_pos_feat = img_pos_feat.index_select(0, indices)[:, :max_i, :]
# expect same input_ids for all pairs
input_ids = input_ids[:self.hard_size+1]
elif sample_from == 'i':
input_ids = input_ids.index_select(0, indices)
# expect same image features for all pairs
img_feat = img_feat[:self.hard_size+1]
img_pos_feat = img_pos_feat[:self.hard_size+1]
else:
raise ValueError()
hard_batch['input_ids'] = input_ids
hard_batch['position_ids'] = position_ids
hard_batch['img_feat'] = img_feat
hard_batch['img_pos_feat'] = img_pos_feat
hard_batch['attn_masks'] = attention_mask
hard_batch['gather_index'] = gather_index
return hard_batch
| 5,619 | 39.142857 | 79 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/setup.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
from pathlib import Path
from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import find_packages, setup
cwd = Path(__file__).resolve().parent
package_name = "compressai"
version = "1.1.9.dev0"
git_hash = "unknown"
try:
git_hash = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode().strip()
)
except (FileNotFoundError, subprocess.CalledProcessError):
pass
def write_version_file():
path = cwd / package_name / "version.py"
with path.open("w") as f:
f.write(f'__version__ = "{version}"\n')
f.write(f'git_version = "{git_hash}"\n')
write_version_file()
def get_extensions():
ext_dirs = cwd / package_name / "cpp_exts"
ext_modules = []
# Add rANS module
rans_lib_dir = cwd / "third_party/ryg_rans"
rans_ext_dir = ext_dirs / "rans"
extra_compile_args = ["-std=c++17"]
if os.getenv("DEBUG_BUILD", None):
extra_compile_args += ["-O0", "-g", "-UNDEBUG"]
else:
extra_compile_args += ["-O3"]
ext_modules.append(
Pybind11Extension(
name=f"{package_name}.ans",
sources=[str(s) for s in rans_ext_dir.glob("*.cpp")],
language="c++",
include_dirs=[rans_lib_dir, rans_ext_dir],
extra_compile_args=extra_compile_args,
)
)
# Add ops
ops_ext_dir = ext_dirs / "ops"
ext_modules.append(
Pybind11Extension(
name=f"{package_name}._CXX",
sources=[str(s) for s in ops_ext_dir.glob("*.cpp")],
language="c++",
extra_compile_args=extra_compile_args,
)
)
return ext_modules
TEST_REQUIRES = ["pytest", "pytest-cov"]
DEV_REQUIRES = TEST_REQUIRES + [
"black",
"flake8",
"flake8-bugbear",
"flake8-comprehensions",
"isort",
"mypy",
]
def get_extra_requirements():
extras_require = {
"test": TEST_REQUIRES,
"dev": DEV_REQUIRES,
"doc": ["sphinx", "furo"],
"tutorials": ["jupyter", "ipywidgets"],
}
extras_require["all"] = {req for reqs in extras_require.values() for req in reqs}
return extras_require
setup(
name=package_name,
version=version,
description="A PyTorch library and evaluation platform for end-to-end compression research",
url="https://github.com/InterDigitalInc/CompressAI",
author="InterDigital AI Lab",
author_email="[email protected]",
packages=find_packages(exclude=("tests",)),
zip_safe=False,
python_requires=">=3.6",
install_requires=[
"numpy",
"scipy",
"matplotlib",
"torch>=1.7.1",
"torchvision",
"pytorch-msssim",
],
extras_require=get_extra_requirements(),
license="Apache-2",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
ext_modules=get_extensions(),
cmdclass={"build_ext": build_ext},
)
| 5,093 | 31.653846 | 96 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/test.py | import os, glob
import math
import logging
import time
import argparse
from collections import OrderedDict
import json
import torch
import torch.nn.functional as F
import numpy as np
from criterions.criterion import Criterion
import options.options as option
import utils.util as util
import compressai
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
compressai.set_entropy_coder(compressai.available_entropy_coders()[0])
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to options YMAL file.', default='./conf/test/sample.yml')
opt = option.parse(parser.parse_args().opt, is_train=False)
opt = option.dict_to_nonedict(opt)
util.mkdir(opt['path']['results_root'])
util.mkdir(opt['path']['checkpoint_updated'])
util.setup_logger('base', opt['path']['log'], opt['name'], level=logging.INFO, screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
#### loading test model if exists
update = opt['path'].get('update', False)
if update and opt['path'].get('checkpoint', None):
device_id = torch.cuda.current_device()
checkpoint = torch.load(opt['path']['checkpoint'], map_location=lambda storage, loc: storage.cuda(device_id))
model = util.create_model(opt, checkpoint, None, rank=0)
logger.info('model checkpoint loaded from {:s}'.format(opt['path']['checkpoint']))
model.update(force=True)
# save the updated checkpoint
state_dict = model.state_dict()
for f in os.listdir(opt['path']['checkpoint_updated']):
os.remove(os.path.join(opt['path']['checkpoint_updated'], f))
filepath = os.path.join(opt['path']['checkpoint_updated'], opt['path']['checkpoint'].split('/')[-1])
torch.save(state_dict, filepath)
logger.info('updated model checkpoint saved to {:s}'.format(filepath))
else:
try:
state_dict_path = os.path.join(opt['path']['checkpoint_updated'], os.listdir(opt['path']['checkpoint_updated'])[0])
state_dict = torch.load(state_dict_path)
model = util.create_model(opt, None, state_dict, rank=0)
logger.info('updated model checkpoint loaded from {:s}'.format(state_dict_path))
except:
raise Exception('Choose not to update from a model checkpoint but fail to load from a updated model checkpoint (state_dict).')
checkpoint = None
state_dict = None
model.eval()
logger.info('Model parameter numbers: {:d}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
#### Create test dataset and dataloader
runs = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
if phase == 'train' or phase == 'val':
pass
else:
device = 'cuda' if dataset_opt['cuda'] else 'cpu'
estimation = dataset_opt['estimation']
test_set = util.create_dataset(dataset_opt)
test_loader = util.create_dataloader(test_set, dataset_opt, opt, None)
logger.info('Number of test samples in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
runs.append((device, estimation, test_loader))
for device, estimation, test_loader in runs:
model = model.to(device)
phase = test_loader.dataset.phase
mode = 'est' if estimation else 'coder'
logger.info('\nTesting [{:s}: {:s}]...'.format(mode, phase))
save_dir = os.path.join(opt['path']['results_root'], mode, phase)
util.mkdir(save_dir)
for f in glob.glob(os.path.join(save_dir, '*')):
os.remove(f)
test_metrics = {
'psnr': [],
'ms-ssim': [],
'bpp': [],
'encoding_time': [],
'decoding_time': [],
}
test_start_time = time.time()
for i, data in enumerate(test_loader):
logger.info('{:20s} - testing sample {:04d}'.format(phase, i))
if len(data) == 1:
gt = None
noise = data.to(device)
noise = util.cropping(noise)
else:
gt, noise = data
gt = gt.to(device)
gt = util.cropping(gt)
noise = noise.to(device)
noise = util.cropping(noise)
# estimation mode using model.forward()
if estimation:
start = time.time()
out_net = model.forward(noise, gt)
elapsed_time = time.time() - start
enc_time = dec_time = elapsed_time / 2
num_pixels = noise.size(0) * noise.size(2) * noise.size(3)
bpp = sum(
(torch.log(likelihoods).sum() / (-math.log(2) * num_pixels))
for likelihoods in out_net["likelihoods"].values()
)
rec = out_net["x_hat"]
# coder mode using model.compress() and model.decompress()
else:
start = time.time()
out_enc = model.compress(noise)
enc_time = time.time() - start
start = time.time()
out_dec = model.decompress(out_enc["strings"], out_enc["shape"])
dec_time = time.time() - start
num_pixels = noise.size(0) * noise.size(2) * noise.size(3)
bpp = sum(len(s[0]) for s in out_enc["strings"]) * 8.0 / num_pixels
rec = out_dec["x_hat"]
cur_psnr = util.psnr(gt, rec.clamp(0, 1)) if gt is not None else 0.
cur_ssim = util.ms_ssim(gt, rec.clamp(0, 1), data_range=1.0).item() if gt is not None else 0.
denoise = util.torch2img(rec[0])
denoise.save(os.path.join(save_dir, '{:04d}_{:.3f}dB_{:.4f}_{:.4f}bpp.png'.format(i, cur_psnr, cur_ssim, bpp)))
if gt is not None:
gt = util.torch2img(gt[0])
gt.save(os.path.join(save_dir, '{:04d}_gt.png'.format(i)))
noise = util.torch2img(noise[0])
noise.save(os.path.join(save_dir, '{:04d}_noise.png'.format(i)))
logger.info('{:20s} - sample {:04d} image: bpp = {:.4f}, psnr = {:.3f}dB, ssim = {:.4f}'.format(phase, i, bpp, cur_psnr, cur_ssim))
test_metrics['psnr'].append(cur_psnr)
test_metrics['ms-ssim'].append(cur_ssim)
test_metrics['bpp'].append(bpp)
test_metrics['encoding_time'].append(enc_time)
test_metrics['decoding_time'].append(dec_time)
for k, v in test_metrics.items():
test_metrics[k] = [sum(v) / len(v)]
logger.info('----Average results for phase {:s}----'.format(phase))
for k, v in test_metrics.items():
logger.info('\t{:s}: {:.4f}'.format(k, v[0]))
test_end_time = time.time()
logger.info('Total testing time for phase {:s} = {:.3f}s'.format(phase, test_end_time - test_start_time))
# save results
description = "entropy estimation" if estimation else "ans"
output = {
"name": '{:s}_{:s}'.format(opt['name'], phase),
"description": f"Inference ({description})",
"results": test_metrics,
}
json_path = os.path.join(opt['path']['results_root'], mode, '{:s}.json'.format(phase))
with open(json_path, 'w') as f:
json.dump(output, f, indent=2)
| 6,949 | 38.714286 | 139 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/train.py | import os
import math
import argparse
import random
import logging
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
import options.options as option
from utils import util
from utils.util import (
configure_optimizers, load_optimizer,
configure_schedulers, load_scheduler,
create_model, create_dataloader, create_dataset,
print_network,
torch2img,
compute_metrics,
AverageMeter,
)
from criterions.criterion import Criterion
import warnings
warnings.filterwarnings("ignore")
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def main():
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YMAL file.', default='./conf/train/sample.yml')
parser.add_argument('--local_rank', type=int, default=-1)
args = parser.parse_args()
opt = option.parse(args.opt, is_train=True)
#### distributed training settings
rank = args.local_rank
world_size = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if rank == -1:
opt['dist'] = False
print('Disabled distributed training.')
else:
opt['dist'] = True
if world_size > 1:
torch.cuda.set_device(rank) # 这里设定每一个进程使用的GPU是一定的
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
#### loading resume state if exists
if opt['path'].get('checkpoint', None):
# distributed resuming: all load into default GPU
device_id = torch.cuda.current_device()
checkpoint = torch.load(opt['path']['checkpoint'],
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = None
#### mkdir and loggers
if rank <= 0: # normal training (rank -1) OR distributed training (rank 0)
if checkpoint is None:
util.mkdir_and_rename(
opt['path']['experiments_root']) # rename experiment folder if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root'
and 'pretrain_model' not in key and 'resume' not in key))
# config loggers. Before it, the log will not work
util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
util.setup_logger('val', opt['path']['log'], 'val_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger_val = logging.getLogger('val') # validation logger
logger.info(option.dict2str(opt))
# tensorboard logger
if opt['use_tb_logger']:
version = float(torch.__version__[0:3])
if version >= 1.1: # PyTorch 1.1
from torch.utils.tensorboard import SummaryWriter
else:
logger.info(
'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))
from tensorboardX import SummaryWriter
tb_logger = SummaryWriter(log_dir='../../tb_logger/' + opt['name'])
else:
util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
# convert to NoneDict, which returns None for missing keys
opt = option.dict_to_nonedict(opt)
#### random seed
seed = opt['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
if rank <= 0:
logger.info('Random seed: {}'.format(seed))
util.set_random_seed(seed)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
#### create train and val dataloader
# dataset_ratio = 200 # enlarge the size of each epoch
mode = opt['train']['mode']
device = 'cuda' if opt['gpu_ids'] is not None else 'cpu'
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
if mode == 'epoch':
train_size = int(math.floor(len(train_set) / dataset_opt['batch_size']))
total_epochs = int(opt['train'][mode]['value'])
total_iters = train_size * total_epochs
if 'debug' not in opt['name']:
opt['train']['epoch']['val_freq'] *= train_size
elif mode == 'step':
train_size = int(math.floor(len(train_set) / dataset_opt['batch_size']))
total_iters = int(opt['train'][mode]['value'])
total_epochs = int(math.ceil(total_iters / train_size))
else:
raise NotImplementedError('mode [{:s}] is not recognized.'.format(mode))
if opt['dist']:
train_sampler = Sampler()
# train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)
# total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))
else:
train_sampler = None
train_loader = create_dataloader(train_set, dataset_opt, opt, train_sampler)
if rank <= 0:
logger.info('Number of train samples: {:,d}, iters: {:,d}'.format(
len(train_set), train_size))
logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
total_epochs, total_iters))
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt, opt, None)
if rank <= 0:
logger.info('Number of val samples in [{:s}]: {:d}'.format(
dataset_opt['name'], len(val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert train_loader is not None
assert val_loader is not None
#### create model
model = create_model(opt, checkpoint, None, rank)
model = model.to(device)
#### create optimizer and schedulers
optimizer_dict = configure_optimizers(opt, model)
scheduler_dict = configure_schedulers(opt, optimizer_dict)
optimizer = load_optimizer(optimizer_dict, 'optimizer', checkpoint)
aux_optimizer = load_optimizer(optimizer_dict, 'aux_optimizer', checkpoint)
lr_scheduler = load_scheduler(scheduler_dict, 'lr_scheduler', checkpoint)
aux_lr_scheduler = load_scheduler(scheduler_dict, 'aux_lr_scheduler', checkpoint)
#### resume training
if checkpoint:
if rank <= 0:
logger.info('Resuming training from epoch: {}, iter: {}.'.format(
checkpoint['epoch'], checkpoint['iter']))
# training state
start_epoch = checkpoint['epoch']
best_loss = checkpoint['loss']
current_step = start_epoch * math.ceil(len(train_loader.dataset) / opt['datasets']['train']['batch_size'])
checkpoint = None
else:
start_epoch = 0
best_loss = 1e10
current_step = 0
#### criterion
criterion = Criterion(opt)
# torch.cuda.empty_cache()
#### training
if rank <= 0:
logger.info('Model parameter numbers: {:d}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
loss_cap = opt['train']['loss_cap']
for epoch in range(start_epoch, total_epochs + 1):
if opt['dist']:
train_sampler.set_epoch(epoch)
if rank <= 0 and mode == 'epoch':
message = 'lr_main: {:e}'.format(optimizer.param_groups[0]['lr'])
message += ' | lr_aux: {:e}'.format(aux_optimizer.param_groups[0]['lr'])
logger.info(message)
for _, train_data in enumerate(train_loader):
# torch.cuda.empty_cache()
current_step += 1
if current_step > total_iters:
break
#### training
model.train()
# device = next(model.parameters()).device
gt, noise = train_data
gt = gt.to(device)
noise = noise.to(device)
optimizer.zero_grad()
aux_optimizer.zero_grad()
# forward
out_net = model(noise, gt)
out_train = criterion(out_net, gt)
# do optimization if and only if the loss is small (rec is somehow bounded with 0-1)
optimizer_flag = out_train["loss"].item() >= 0 and out_train["loss"].item() < loss_cap
if not optimizer_flag:
message = '[Warning]: network parameters are not optimized due to train loss = {:.4f}.'.format(out_train['loss'].item())
print(message)
# logger.info(message)
# optimizer
out_train["loss"].backward()
if opt['train']['clip_max_norm'] > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), opt['train']['clip_max_norm'])
if not optimizer_flag:
optimizer.zero_grad()
optimizer.step()
# aux_optimizer
aux_loss = model.aux_loss()
out_train['aux_loss'] = aux_loss
aux_loss.backward()
if not optimizer_flag:
aux_optimizer.zero_grad()
aux_optimizer.step()
#### update learning rate for step mode
if mode == 'step':
lr_scheduler.step()
aux_lr_scheduler.step()
#### log: weighted loss
if current_step % opt['logger']['print_freq'] == 0:
wanted_keys = ['loss', 'bpp_loss', 'aux_loss']
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> [weighted]'.format(epoch, current_step, optimizer.param_groups[0]['lr'])
for k, v in out_train.items():
# tensorboard logger
if opt['use_tb_logger']:
if rank <= 0:
mode_counter = epoch if mode == 'epoch' else current_step
tb_logger.add_scalar('[train]: {}'.format(k), v.item(), mode_counter)
# message
if k in wanted_keys or 'weighted' in k:
k = k.replace('weighted_', '')
message += ' | {:s}: {:.4f}'.format(k, v.item())
if rank <= 0:
logger.info(message)
# validation
if current_step % opt['train'][mode]['val_freq'] == 0 and rank <= 0:
model.eval()
# device = next(model.parameters()).device
log = {}
for k in out_train.keys():
log[k] = AverageMeter()
log['psnr'] = AverageMeter()
log['ms_ssim'] = AverageMeter()
with torch.no_grad():
mode_counter = epoch if mode == 'epoch' else current_step
this_val_dir = os.path.join(opt['path']['val_samples'], '{:d}'.format(mode_counter))
if not os.path.exists(this_val_dir):
os.makedirs(this_val_dir)
for i, val_data in enumerate(val_loader):
gt, noise = val_data
gt = gt.to(device)
noise = noise.to(device)
out_net = model(noise, gt)
out_val = criterion(out_net, gt)
out_val['aux_loss'] = model.aux_loss()
for k, v in out_val.items():
log[k].update(v.item())
# save
rec = torch2img(out_net['x_hat'])
gt = torch2img(gt)
noise = torch2img(noise)
p, m = compute_metrics(rec, gt)
log['psnr'].update(p)
log['ms_ssim'].update(m)
if i < 12:
rec.save(os.path.join(this_val_dir, '{:03d}_rec.png'.format(i)))
gt.save(os.path.join(this_val_dir, '{:03d}_gt.png'.format(i)))
noise.save(os.path.join(this_val_dir, '{:03d}_noise.png'.format(i)))
# val tensorboard
for k, v in log.items():
if opt['use_tb_logger']:
if rank <= 0:
mode_counter = epoch if mode == 'epoch' else current_step
tb_logger.add_scalar('[val]: {}'.format(k), v.avg, mode_counter)
# [val] weighted loss
wanted_keys = ['loss', 'bpp_loss', 'aux_loss']
message = '<epoch:{:3d}, iter:{:8,d}> [weighted]'.format(epoch, current_step)
for k, v in log.items():
if k in wanted_keys or 'weighted' in k:
k = k.replace('weighted_', '')
message += ' | {:s}: {:.4f}'.format(k, v.avg)
if rank <= 0:
logger_val.info(message)
# [val] raw loss
unwanted_keys = ['psnr', 'ms_ssim', 'rd_loss']
message = '<epoch:{:3d}, iter:{:8,d}> [raw loss]'.format(epoch, current_step)
for k, v in log.items():
if k in unwanted_keys or 'weighted' in k:
continue
message += ' | {:s}: {:.4f}'.format(k, v.avg)
if rank <= 0:
logger_val.info(message)
# [val] rate distortion
wanted_keys = ['rd_loss', 'bpp_loss', 'psnr', 'ms_ssim']
message = '<epoch:{:3d}, iter:{:8,d}> [rate-dis]'.format(epoch, current_step)
for k, v in log.items():
if k in wanted_keys:
k = k.replace('_loss', '')
message += ' | {:s}: {:.4f}'.format(k, v.avg)
if rank <= 0:
logger.info(message)
logger_val.info(message)
#### save checkpoints
loss = log['rd_loss'].avg
is_best = loss < best_loss
best_loss = min(loss, best_loss)
if rank <= 0 and is_best:
save_dict = {
"epoch": epoch,
"iter": current_step,
"state_dict": model.state_dict(),
"loss": loss,
"optimizer": optimizer_dict['optimizer'].state_dict(),
"aux_optimizer": optimizer_dict['aux_optimizer'].state_dict(),
"lr_scheduler": scheduler_dict['lr_scheduler'].state_dict(),
"aux_lr_scheduler": scheduler_dict['aux_lr_scheduler'].state_dict(),
}
mode_counter = epoch if mode == 'epoch' else current_step
save_path = os.path.join(opt['path']['checkpoints'], "checkpoint_best_loss.pth.tar")
torch.save(save_dict, save_path)
logger.info('best checkpoint saved.')
logger_val.info('best checkpoint saved.')
torch.cuda.empty_cache()
#### save checkpoints
if rank <= 0 and (epoch + 1) % opt['logger']['save_checkpoint_freq'] == 0:
save_dict = {
"epoch": epoch,
"iter": current_step,
"state_dict": model.state_dict(),
"loss": best_loss,
"optimizer": optimizer_dict['optimizer'].state_dict(),
"aux_optimizer": optimizer_dict['aux_optimizer'].state_dict(),
"lr_scheduler": scheduler_dict['lr_scheduler'].state_dict(),
"aux_lr_scheduler": scheduler_dict['aux_lr_scheduler'].state_dict(),
}
mode_counter = epoch if mode == 'epoch' else current_step
save_path = os.path.join(opt['path']['checkpoints'], "checkpoint_{:d}.pth.tar".format(mode_counter))
torch.save(save_dict, save_path)
#### update learning rate for epoch mode
if mode == 'epoch':
lr_scheduler.step()
aux_lr_scheduler.step()
if rank <= 0:
logger.info('Saving the final model.')
save_dict = {
"epoch": epoch,
"iter": current_step,
"state_dict": model.state_dict(),
"loss": best_loss,
"optimizer": optimizer_dict['optimizer'].state_dict(),
"aux_optimizer": optimizer_dict['aux_optimizer'].state_dict(),
"lr_scheduler": scheduler_dict['lr_scheduler'].state_dict(),
"aux_lr_scheduler": scheduler_dict['aux_lr_scheduler'].state_dict(),
}
mode_counter = epoch if mode == 'epoch' else current_step
save_path = os.path.join(opt['path']['checkpoints'], "checkpoint_latest.pth.tar")
torch.save(save_dict, save_path)
logger.info('End of training.')
if __name__ == '__main__':
main()
| 17,639 | 41 | 137 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/criterions/criterion.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
import torch.nn.functional as F
class Criterion(nn.Module):
def __init__(self, opt):
super(Criterion, self).__init__()
self.opt = opt
# criterions
self.criterion_metric = opt['network']['criterions']['criterion_metric']
self.criterion_fea = opt['network']['criterions']['criterion_fea']
# lambdas
self.lambda_metric = opt['network']['lambdas']['lambda_metric']
self.lambda_fea = opt['network']['lambdas']['lambda_fea']
self.metric_loss = RateDistortionLoss(lmbda=self.lambda_metric, criterion=self.criterion_metric)
if self.criterion_fea:
self.fea_loss = FeaLoss(lmbda=self.lambda_fea, criterion=self.criterion_fea)
def forward(self, out_net, gt):
out = {'loss': 0, 'rd_loss': 0}
# bpp loss and metric loss
out_metric = self.metric_loss(out_net, gt)
out['loss'] += out_metric['bpp_loss']
out['rd_loss'] += out_metric['bpp_loss']
for k, v in out_metric.items():
out[k] = v
if 'weighted' in k:
out['loss'] += v
out['rd_loss'] += v
# fea loss
if self.criterion_fea:
if 'y_inter' in out_net.keys():
out_fea = self.fea_loss(out_net['y'], out_net['y_gt'], out_net['y_inter'], out_net['y_inter_gt'])
else:
out_fea = self.fea_loss(out_net['y'], out_net['y_gt'])
for k, v in out_fea.items():
out[k] = v
if 'weighted' in k:
out['loss'] += v
return out
# rate distortion loss
class RateDistortionLoss(nn.Module):
"""Custom rate distortion loss with a Lagrangian parameter."""
def __init__(self, lmbda=1e-2, criterion='mse'):
super().__init__()
self.lmbda = lmbda
self.criterion = criterion
if self.criterion == 'mse':
self.loss = nn.MSELoss()
elif self.criterion == 'ms-ssim':
from pytorch_msssim import ms_ssim
self.loss = ms_ssim
else:
NotImplementedError('RateDistortionLoss criterion [{:s}] is not recognized.'.format(criterion))
def forward(self, out_net, target):
N, _, H, W = target.size()
out = {}
num_pixels = N * H * W
out["bpp_loss"] = sum(
(torch.log(likelihoods).sum() / (-math.log(2) * num_pixels))
for likelihoods in out_net["likelihoods"].values()
)
if self.criterion == 'mse':
out["mse_loss"] = self.loss(out_net["x_hat"], target)
out["weighted_mse_loss"] = self.lmbda * 255 ** 2 * out["mse_loss"]
elif self.criterion == 'ms-ssim':
out["ms_ssim_loss"] = 1 - self.loss(out_net["x_hat"], target, data_range=1.0)
out["weighted_ms_ssim_loss"] = self.lmbda * out["ms_ssim_loss"]
return out
# fea loss
class FeaLoss(nn.Module):
def __init__(self, lmbda=1., criterion='l2'):
super(FeaLoss, self).__init__()
self.lmbda = lmbda
self.criterion = criterion
if self.criterion == 'l2':
self.loss = nn.MSELoss()
elif self.criterion == 'l1':
self.loss = nn.L1Loss()
else:
NotImplementedError('FeaLoss criterion [{:s}] is not recognized.'.format(criterion))
def forward(self, fea, fea_gt, fea_inter=None, fea_inter_gt=None):
loss = self.loss(fea, fea_gt)
if fea_inter is not None and fea_inter_gt is not None:
loss += self.loss(fea_inter, fea_inter_gt)
out = {
'fea_loss': loss,
'weighted_fea_loss': loss * self.lmbda,
}
return out
| 3,832 | 33.223214 | 113 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/codes/utils/util.py | import os
import sys
import time
import math
from datetime import datetime
import random
import logging
from collections import OrderedDict
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.utils.data
from torchvision import transforms
from torchvision.utils import make_grid
from shutil import get_terminal_size
from PIL import Image
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from compressai.zoo import models
from compressai.zoo.image import model_architectures as architectures
from typing import Tuple, Union
from pytorch_msssim import ms_ssim
import torch.nn.functional as F
# optimizers
def configure_optimizers(opt, net):
parameters = [
p for n, p in net.named_parameters() if not n.endswith(".quantiles")
]
aux_parameters = [
p for n, p in net.named_parameters() if n.endswith(".quantiles")
]
# Make sure we don't have an intersection of parameters
params_dict = dict(net.named_parameters())
inter_params = set(parameters) & set(aux_parameters)
union_params = set(parameters) | set(aux_parameters)
assert len(inter_params) == 0
assert len(union_params) - len(params_dict.keys()) == 0
mode = opt['train']['mode']
optimizer_dict = {}
optimizer_dict['optimizer'] = torch.optim.Adam(
(p for p in parameters if p.requires_grad),
lr=opt['train'][mode]['lr'],
)
optimizer_dict['aux_optimizer'] = torch.optim.Adam(
(p for p in aux_parameters if p.requires_grad),
lr=opt['train'][mode]['lr_aux'],
)
return optimizer_dict
def load_optimizer(optimizer_dict, name, checkpoint):
optimizer = optimizer_dict.get(name, None)
if optimizer is not None and checkpoint is not None:
optimizer.load_state_dict(checkpoint[name])
return optimizer
# schedulers
def configure_schedulers(opt, optimizer_dict):
mode = opt['train']['mode']
scheduler = opt['train'][mode]['lr_scheme']
warm_up_counts = opt['train'][mode]['warm_up_counts']
milestones = opt['train'][mode]['milestones']
gamma = opt['train'][mode]['gamma']
scheduler_dict = {}
if scheduler == 'MultiStepLR':
scheduler_dict['lr_scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
optimizer_dict['optimizer'],
milestones=milestones,
gamma=gamma
)
scheduler_dict['aux_lr_scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
optimizer_dict['aux_optimizer'],
milestones=[],
gamma=1.0
)
elif scheduler == 'LambdaLR':
warm_up_with_multistep_lr = lambda i: (i + 1) / warm_up_counts if i < warm_up_counts else gamma**len([m for m in milestones if m <= i])
scheduler_dict['lr_scheduler'] = torch.optim.lr_scheduler.LambdaLR(
optimizer_dict['optimizer'],
lr_lambda=warm_up_with_multistep_lr
)
warm_up_with_multistep_lr = lambda i: (i + 1) / warm_up_counts if i < warm_up_counts else 1.0
scheduler_dict['aux_lr_scheduler'] = torch.optim.lr_scheduler.LambdaLR(
optimizer_dict['aux_optimizer'],
lr_lambda=warm_up_with_multistep_lr
)
elif scheduler == 'ReduceLROnPlateau':
scheduler_dict['lr_scheduler'] = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_dict['optimizer'], "min")
scheduler_dict['aux_lr_scheduler'] = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_dict['aux_optimizer'], "min")
else:
raise NotImplementedError('scheduler [{:s}] is not recognized.'.format(scheduler))
return scheduler_dict
def load_scheduler(scheduler_dict, name, checkpoint):
lr_scheduler = scheduler_dict.get(name, None)
if lr_scheduler is not None and checkpoint is not None:
lr_scheduler.load_state_dict(checkpoint[name])
return lr_scheduler
# model
def create_model(opt, checkpoint, state_dict, rank):
logger = logging.getLogger('base')
model = opt['network']['model']
if checkpoint is not None:
m = architectures[model].from_state_dict(checkpoint['state_dict'], opt)
elif state_dict is not None:
m = architectures[model].from_state_dict(state_dict, opt)
else:
quality = int(opt['network']['quality'])
metric = opt['network']['criterions']['criterion_metric']
pretrained = opt['network']['pretrained']
m = models[model](quality=quality, metric=metric, pretrained=pretrained, opt=opt)
print_network(m, rank)
logger.info('Model [{:s}] is created.'.format(m.__class__.__name__))
return m
def print_network(net, rank):
logger = logging.getLogger('base')
if isinstance(net, nn.DataParallel) or isinstance(net, nn.parallel.DistributedDataParallel):
net = net.module
s = str(net)
n = sum(map(lambda x: x.numel(), net.parameters()))
if isinstance(net, nn.DataParallel) or isinstance(net, nn.parallel.DistributedDataParallel):
net_struc_str = '{} - {}'.format(net.__class__.__name__,
net.module.__class__.__name__)
else:
net_struc_str = '{}'.format(net.__class__.__name__)\
m = 'structure: {}, with parameters: {:,d}'.format(net_struc_str, n)
if rank <= 0:
logger.info(m)
logger.info(s)
# dataloader
def create_dataloader(dataset, dataset_opt, opt=None, sampler=None):
phase = dataset_opt['phase']
if phase == 'train':
if opt['dist']:
world_size = torch.distributed.get_world_size()
num_workers = dataset_opt['n_workers']
assert dataset_opt['batch_size'] % world_size == 0
batch_size = dataset_opt['batch_size'] // world_size
shuffle = False
else:
num_workers = dataset_opt['n_workers'] * len(opt['gpu_ids'])
batch_size = dataset_opt['batch_size']
shuffle = dataset_opt['use_shuffle']
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, sampler=sampler, drop_last=True,
pin_memory=False)
else:
return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0,
pin_memory=False)
# dataset
def create_dataset(dataset_opt):
mode = dataset_opt['name']
if mode == 'synthetic':
from compressai.datasets import SyntheticDataset as D
elif mode == 'synthetic-test':
from compressai.datasets import SyntheticTestDataset as D
elif mode == 'sidd':
from compressai.datasets import SiddDataset as D
else:
raise NotImplementedError('Dataset [{:s}] is not recognized.'.format(mode))
dataset = D(dataset_opt)
logger = logging.getLogger('base')
logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__,
dataset_opt['name']))
return dataset
# related to compression
def torch2img(x: torch.Tensor) -> Image.Image:
return transforms.ToPILImage()(x.clamp_(0, 1).squeeze()[0:3])
def psnr(a: torch.Tensor, b: torch.Tensor) -> float:
a = transforms.ToTensor()(torch2img(a))
b = transforms.ToTensor()(torch2img(b))
mse = F.mse_loss(a, b).item()
return -10 * math.log10(mse)
def compute_metrics(
a: Union[np.array, Image.Image],
b: Union[np.array, Image.Image],
max_val: float = 255.0,
) -> Tuple[float, float]:
"""Returns PSNR and MS-SSIM between images `a` and `b`. """
if isinstance(a, Image.Image):
a = np.asarray(a)
if isinstance(b, Image.Image):
b = np.asarray(b)
a = torch.from_numpy(a.copy()).float().unsqueeze(0)
if a.size(3) == 3:
a = a.permute(0, 3, 1, 2)
b = torch.from_numpy(b.copy()).float().unsqueeze(0)
if b.size(3) == 3:
b = b.permute(0, 3, 1, 2)
mse = torch.mean((a - b) ** 2).item()
p = 20 * np.log10(max_val) - 10 * np.log10(mse)
m = ms_ssim(a, b, data_range=max_val).item()
return p, m
class AverageMeter:
"""Compute running average."""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def OrderedYaml():
'''yaml orderedDict support'''
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
return Loader, Dumper
####################
# miscellaneous
####################
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):
'''set up logger'''
lg = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
if tofile:
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
class ProgressBar(object):
'''A progress bar which can print the progress
modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py
'''
def __init__(self, task_num=0, bar_width=50, start=True):
self.task_num = task_num
max_bar_width = self._get_max_bar_width()
self.bar_width = (bar_width if bar_width <= max_bar_width else max_bar_width)
self.completed = 0
if start:
self.start()
def _get_max_bar_width(self):
terminal_width, _ = get_terminal_size()
max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50)
if max_bar_width < 10:
print('terminal width is too small ({}), please consider widen the terminal for better '
'progressbar visualization'.format(terminal_width))
max_bar_width = 10
return max_bar_width
def start(self):
if self.task_num > 0:
sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:\n{}\n'.format(
' ' * self.bar_width, self.task_num, 'Start...'))
else:
sys.stdout.write('completed: 0, elapsed: 0s')
sys.stdout.flush()
self.start_time = time.time()
def update(self, msg='In progress...'):
self.completed += 1
elapsed = time.time() - self.start_time
fps = self.completed / elapsed
if self.task_num > 0:
percentage = self.completed / float(self.task_num)
eta = int(elapsed * (1 - percentage) / percentage + 0.5)
mark_width = int(self.bar_width * percentage)
bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width)
sys.stdout.write('\033[2F') # cursor up 2 lines
sys.stdout.write('\033[J') # clean the output (remove extra chars since last display)
sys.stdout.write('[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\n{}\n'.format(
bar_chars, self.completed, self.task_num, fps, int(elapsed + 0.5), eta, msg))
else:
sys.stdout.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format(
self.completed, int(elapsed + 0.5), fps))
sys.stdout.flush()
# evaluation
def cropping(x):
h, w = x.size(2), x.size(3)
p = 64 # maximum 6 strides of 2
new_h = h // p * p
new_w = w // p * p
cropping_left = (w - new_w) // 2
cropping_right = w - new_w - cropping_left
cropping_top = (h - new_h) // 2
cropping_bottom = h - new_h - cropping_top
x = F.pad(x, (-cropping_left, -cropping_right, -cropping_top, -cropping_bottom))
return x | 13,098 | 34.498645 | 143 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/MultiscaleDecomp.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from .waseda import Cheng2020Anchor
from compressai.layers import (
AttentionBlock,
ResidualBlock,
ResidualBlockUpsample,
ResidualBlockWithStride,
conv3x3,
subpel_conv3x3,
conv1x1
)
import warnings
class MultiscaleDecomp(Cheng2020Anchor):
def __init__(self, N=192, opt=None, **kwargs):
super().__init__(N=N, **kwargs)
self.g_a = None
self.g_a_block1 = nn.Sequential(
ResidualBlockWithStride(3, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
)
self.g_a_block2 = nn.Sequential(
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
conv3x3(N, N, stride=2),
)
self.denoise_module_1 = AttentionBlock(N)
self.denoise_module_2 = AttentionBlock(N)
def g_a_func(self, x, denoise=False):
x = self.g_a_block1(x)
if denoise:
x = self.denoise_module_1(x)
y_inter = x
x = self.g_a_block2(x)
if denoise:
x = self.denoise_module_2(x)
y = x
return y_inter, y
def forward(self, x, gt=None):
# g_a for noisy input
y_inter, y = self.g_a_func(x, denoise=True)
# g_a for clean input
if gt is not None:
y_inter_gt, y_gt = self.g_a_func(gt)
else:
y_inter_gt, y_gt = None, None
# h_a and h_s
z = self.h_a(y)
z_hat, z_likelihoods = self.entropy_bottleneck(z)
params = self.h_s(z_hat)
# g_s
y_hat = self.gaussian_conditional.quantize(
y, "noise" if self.training else "dequantize"
)
ctx_params = self.context_prediction(y_hat)
gaussian_params = self.entropy_parameters(
torch.cat((params, ctx_params), dim=1)
)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
_, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"y_inter": y_inter,
"y_inter_gt": y_inter_gt,
"y": y,
"y_gt": y_gt,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
@classmethod
def from_state_dict(cls, state_dict, opt=None):
"""Return a new model instance from `state_dict`."""
N = state_dict["h_a.0.weight"].size(0)
net = cls(N, opt)
net.load_state_dict(state_dict)
return net
def compress(self, x):
if next(self.parameters()).device != torch.device("cpu"):
warnings.warn(
"Inference on GPU is not recommended for the autoregressive "
"models (the entropy coder is run sequentially on CPU)."
)
_, y = self.g_a_func(x, denoise=True)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
params = self.h_s(z_hat)
s = 4 # scaling factor between z and y
kernel_size = 5 # context prediction kernel size
padding = (kernel_size - 1) // 2
y_height = z_hat.size(2) * s
y_width = z_hat.size(3) * s
y_hat = F.pad(y, (padding, padding, padding, padding))
y_strings = []
for i in range(y.size(0)):
string = self._compress_ar(
y_hat[i : i + 1],
params[i : i + 1],
y_height,
y_width,
kernel_size,
padding,
)
y_strings.append(string)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
| 3,860 | 28.930233 | 84 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/priors.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from compressai.ans import BufferedRansEncoder, RansDecoder
from compressai.entropy_models import EntropyBottleneck, GaussianConditional
from compressai.layers import GDN, MaskedConv2d
from .utils import conv, deconv, update_registered_buffers
__all__ = [
"CompressionModel",
"FactorizedPrior",
"ScaleHyperprior",
"MeanScaleHyperprior",
"JointAutoregressiveHierarchicalPriors",
]
class CompressionModel(nn.Module):
"""Base class for constructing an auto-encoder with at least one entropy
bottleneck module.
Args:
entropy_bottleneck_channels (int): Number of channels of the entropy
bottleneck
"""
def __init__(self, entropy_bottleneck_channels, init_weights=True, **kwargs):
super().__init__()
self.entropy_bottleneck = EntropyBottleneck(entropy_bottleneck_channels)
if init_weights:
self._initialize_weights()
def aux_loss(self):
"""Return the aggregated loss over the auxiliary entropy bottleneck
module(s).
"""
aux_loss = sum(
m.loss() for m in self.modules() if isinstance(m, EntropyBottleneck)
)
return aux_loss
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, *args):
raise NotImplementedError()
def update(self, force=False):
"""Updates the entropy bottleneck(s) CDF values.
Needs to be called once after training to be able to later perform the
evaluation with an actual entropy coder.
Args:
force (bool): overwrite previous values (default: False)
Returns:
updated (bool): True if one of the EntropyBottlenecks was updated.
"""
updated = False
for m in self.children():
if not isinstance(m, EntropyBottleneck):
continue
rv = m.update(force=force)
updated |= rv
return updated
def load_state_dict(self, state_dict):
# Dynamically update the entropy bottleneck buffers related to the CDFs
update_registered_buffers(
self.entropy_bottleneck,
"entropy_bottleneck",
["_quantized_cdf", "_offset", "_cdf_length"],
state_dict,
)
super().load_state_dict(state_dict)
class FactorizedPrior(CompressionModel):
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations
(ICLR), 2018.
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(entropy_bottleneck_channels=M, **kwargs)
self.g_a = nn.Sequential(
conv(3, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, M),
)
self.g_s = nn.Sequential(
deconv(M, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, 3),
)
self.N = N
self.M = M
@property
def downsampling_factor(self) -> int:
return 2 ** 4
def forward(self, x):
y = self.g_a(x)
y_hat, y_likelihoods = self.entropy_bottleneck(y)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {
"y": y_likelihoods,
},
}
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.weight"].size(0)
M = state_dict["g_a.6.weight"].size(0)
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def compress(self, x):
y = self.g_a(x)
y_strings = self.entropy_bottleneck.compress(y)
return {"strings": [y_strings], "shape": y.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 1
y_hat = self.entropy_bottleneck.decompress(strings[0], shape)
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
# From Balle's tensorflow compression examples
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64
def get_scale_table(min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS):
return torch.exp(torch.linspace(math.log(min), math.log(max), levels))
class ScaleHyperprior(CompressionModel):
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
(ICLR), 2018.
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(entropy_bottleneck_channels=N, **kwargs)
self.g_a = nn.Sequential(
conv(3, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, N),
GDN(N),
conv(N, M),
)
self.g_s = nn.Sequential(
deconv(M, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, N),
GDN(N, inverse=True),
deconv(N, 3),
)
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.ReLU(inplace=True),
conv(N, N),
nn.ReLU(inplace=True),
conv(N, N),
)
self.h_s = nn.Sequential(
deconv(N, N),
nn.ReLU(inplace=True),
deconv(N, N),
nn.ReLU(inplace=True),
conv(N, M, stride=1, kernel_size=3),
nn.ReLU(inplace=True),
)
self.gaussian_conditional = GaussianConditional(None)
self.N = int(N)
self.M = int(M)
@property
def downsampling_factor(self) -> int:
return 2 ** (4 + 2)
def forward(self, x):
y = self.g_a(x)
z = self.h_a(torch.abs(y))
z_hat, z_likelihoods = self.entropy_bottleneck(z)
scales_hat = self.h_s(z_hat)
y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
def load_state_dict(self, state_dict):
update_registered_buffers(
self.gaussian_conditional,
"gaussian_conditional",
["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
state_dict,
)
super().load_state_dict(state_dict)
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.weight"].size(0)
M = state_dict["g_a.6.weight"].size(0)
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def update(self, scale_table=None, force=False):
if scale_table is None:
scale_table = get_scale_table()
updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)
updated |= super().update(force=force)
return updated
def compress(self, x):
y = self.g_a(x)
z = self.h_a(torch.abs(y))
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
scales_hat = self.h_s(z_hat)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_strings = self.gaussian_conditional.compress(y, indexes)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
scales_hat = self.h_s(z_hat)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_hat = self.gaussian_conditional.decompress(strings[0], indexes, z_hat.dtype)
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
class MeanScaleHyperprior(ScaleHyperprior):
r"""Scale Hyperprior with non zero-mean Gaussian conditionals from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(N, M, **kwargs)
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.LeakyReLU(inplace=True),
conv(N, N),
nn.LeakyReLU(inplace=True),
conv(N, N),
)
self.h_s = nn.Sequential(
deconv(N, M),
nn.LeakyReLU(inplace=True),
deconv(M, M * 3 // 2),
nn.LeakyReLU(inplace=True),
conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),
)
def forward(self, x):
y = self.g_a(x)
z = self.h_a(y)
z_hat, z_likelihoods = self.entropy_bottleneck(z)
gaussian_params = self.h_s(z_hat)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
def compress(self, x):
y = self.g_a(x)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
gaussian_params = self.h_s(z_hat)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_strings = self.gaussian_conditional.compress(y, indexes, means=means_hat)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
gaussian_params = self.h_s(z_hat)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_hat = self.gaussian_conditional.decompress(
strings[0], indexes, means=means_hat
)
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
class JointAutoregressiveHierarchicalPriors(MeanScaleHyperprior):
r"""Joint Autoregressive Hierarchical Priors model from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N=192, M=192, **kwargs):
super().__init__(N=N, M=M, **kwargs)
self.g_a = nn.Sequential(
conv(3, N, kernel_size=5, stride=2),
GDN(N),
conv(N, N, kernel_size=5, stride=2),
GDN(N),
conv(N, N, kernel_size=5, stride=2),
GDN(N),
conv(N, M, kernel_size=5, stride=2),
)
self.g_s = nn.Sequential(
deconv(M, N, kernel_size=5, stride=2),
GDN(N, inverse=True),
deconv(N, N, kernel_size=5, stride=2),
GDN(N, inverse=True),
deconv(N, N, kernel_size=5, stride=2),
GDN(N, inverse=True),
deconv(N, 3, kernel_size=5, stride=2),
)
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.LeakyReLU(inplace=True),
conv(N, N, stride=2, kernel_size=5),
nn.LeakyReLU(inplace=True),
conv(N, N, stride=2, kernel_size=5),
)
self.h_s = nn.Sequential(
deconv(N, M, stride=2, kernel_size=5),
nn.LeakyReLU(inplace=True),
deconv(M, M * 3 // 2, stride=2, kernel_size=5),
nn.LeakyReLU(inplace=True),
conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),
)
self.entropy_parameters = nn.Sequential(
nn.Conv2d(M * 12 // 3, M * 10 // 3, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(M * 10 // 3, M * 8 // 3, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(M * 8 // 3, M * 6 // 3, 1),
)
self.context_prediction = MaskedConv2d(
M, 2 * M, kernel_size=5, padding=2, stride=1
)
self.gaussian_conditional = GaussianConditional(None)
self.N = int(N)
self.M = int(M)
@property
def downsampling_factor(self) -> int:
return 2 ** (4 + 2)
def forward(self, x):
y = self.g_a(x)
z = self.h_a(y)
z_hat, z_likelihoods = self.entropy_bottleneck(z)
params = self.h_s(z_hat)
y_hat = self.gaussian_conditional.quantize(
y, "noise" if self.training else "dequantize"
)
ctx_params = self.context_prediction(y_hat)
gaussian_params = self.entropy_parameters(
torch.cat((params, ctx_params), dim=1)
)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
_, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s(y_hat)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.weight"].size(0)
M = state_dict["g_a.6.weight"].size(0)
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def compress(self, x):
if next(self.parameters()).device != torch.device("cpu"):
warnings.warn(
"Inference on GPU is not recommended for the autoregressive "
"models (the entropy coder is run sequentially on CPU)."
)
y = self.g_a(x)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
params = self.h_s(z_hat)
s = 4 # scaling factor between z and y
kernel_size = 5 # context prediction kernel size
padding = (kernel_size - 1) // 2
y_height = z_hat.size(2) * s
y_width = z_hat.size(3) * s
y_hat = F.pad(y, (padding, padding, padding, padding))
y_strings = []
for i in range(y.size(0)):
string = self._compress_ar(
y_hat[i : i + 1],
params[i : i + 1],
y_height,
y_width,
kernel_size,
padding,
)
y_strings.append(string)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def _compress_ar(self, y_hat, params, height, width, kernel_size, padding):
cdf = self.gaussian_conditional.quantized_cdf.tolist()
cdf_lengths = self.gaussian_conditional.cdf_length.tolist()
offsets = self.gaussian_conditional.offset.tolist()
encoder = BufferedRansEncoder()
symbols_list = []
indexes_list = []
# Warning, this is slow...
# TODO: profile the calls to the bindings...
masked_weight = self.context_prediction.weight * self.context_prediction.mask
for h in range(height):
for w in range(width):
y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]
ctx_p = F.conv2d(
y_crop,
masked_weight,
bias=self.context_prediction.bias,
)
# 1x1 conv for the entropy parameters prediction network, so
# we only keep the elements in the "center"
p = params[:, :, h : h + 1, w : w + 1]
gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))
gaussian_params = gaussian_params.squeeze(3).squeeze(2)
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_crop = y_crop[:, :, padding, padding]
y_q = self.gaussian_conditional.quantize(y_crop, "symbols", means_hat)
y_hat[:, :, h + padding, w + padding] = y_q + means_hat
symbols_list.extend(y_q.squeeze().tolist())
indexes_list.extend(indexes.squeeze().tolist())
encoder.encode_with_indexes(
symbols_list, indexes_list, cdf, cdf_lengths, offsets
)
string = encoder.flush()
return string
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
if next(self.parameters()).device != torch.device("cpu"):
warnings.warn(
"Inference on GPU is not recommended for the autoregressive "
"models (the entropy coder is run sequentially on CPU)."
)
# FIXME: we don't respect the default entropy coder and directly call the
# range ANS decoder
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
params = self.h_s(z_hat)
s = 4 # scaling factor between z and y
kernel_size = 5 # context prediction kernel size
padding = (kernel_size - 1) // 2
y_height = z_hat.size(2) * s
y_width = z_hat.size(3) * s
# initialize y_hat to zeros, and pad it so we can directly work with
# sub-tensors of size (N, C, kernel size, kernel_size)
y_hat = torch.zeros(
(z_hat.size(0), self.M, y_height + 2 * padding, y_width + 2 * padding),
device=z_hat.device,
)
for i, y_string in enumerate(strings[0]):
self._decompress_ar(
y_string,
y_hat[i : i + 1],
params[i : i + 1],
y_height,
y_width,
kernel_size,
padding,
)
y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))
x_hat = self.g_s(y_hat).clamp_(0, 1)
return {"x_hat": x_hat}
def _decompress_ar(
self, y_string, y_hat, params, height, width, kernel_size, padding
):
cdf = self.gaussian_conditional.quantized_cdf.tolist()
cdf_lengths = self.gaussian_conditional.cdf_length.tolist()
offsets = self.gaussian_conditional.offset.tolist()
decoder = RansDecoder()
decoder.set_stream(y_string)
# Warning: this is slow due to the auto-regressive nature of the
# decoding... See more recent publication where they use an
# auto-regressive module on chunks of channels for faster decoding...
for h in range(height):
for w in range(width):
# only perform the 5x5 convolution on a cropped tensor
# centered in (h, w)
y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]
ctx_p = F.conv2d(
y_crop,
self.context_prediction.weight,
bias=self.context_prediction.bias,
)
# 1x1 conv for the entropy parameters prediction network, so
# we only keep the elements in the "center"
p = params[:, :, h : h + 1, w : w + 1]
gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))
scales_hat, means_hat = gaussian_params.chunk(2, 1)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
rv = decoder.decode_stream(
indexes.squeeze().tolist(), cdf, cdf_lengths, offsets
)
rv = torch.Tensor(rv).reshape(1, -1, 1, 1)
rv = self.gaussian_conditional.dequantize(rv, means_hat)
hp = h + padding
wp = w + padding
y_hat[:, :, hp : hp + 1, wp : wp + 1] = rv
| 23,319 | 34.226586 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/utils.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
def find_named_module(module, query):
"""Helper function to find a named module. Returns a `nn.Module` or `None`
Args:
module (nn.Module): the root module
query (str): the module name to find
Returns:
nn.Module or None
"""
return next((m for n, m in module.named_modules() if n == query), None)
def find_named_buffer(module, query):
"""Helper function to find a named buffer. Returns a `torch.Tensor` or `None`
Args:
module (nn.Module): the root module
query (str): the buffer name to find
Returns:
torch.Tensor or None
"""
return next((b for n, b in module.named_buffers() if n == query), None)
def _update_registered_buffer(
module,
buffer_name,
state_dict_key,
state_dict,
policy="resize_if_empty",
dtype=torch.int,
):
new_size = state_dict[state_dict_key].size()
registered_buf = find_named_buffer(module, buffer_name)
if policy in ("resize_if_empty", "resize"):
if registered_buf is None:
raise RuntimeError(f'buffer "{buffer_name}" was not registered')
if policy == "resize" or registered_buf.numel() == 0:
registered_buf.resize_(new_size)
elif policy == "register":
if registered_buf is not None:
raise RuntimeError(f'buffer "{buffer_name}" was already registered')
module.register_buffer(buffer_name, torch.empty(new_size, dtype=dtype).fill_(0))
else:
raise ValueError(f'Invalid policy "{policy}"')
def update_registered_buffers(
module,
module_name,
buffer_names,
state_dict,
policy="resize_if_empty",
dtype=torch.int,
):
"""Update the registered buffers in a module according to the tensors sized
in a state_dict.
(There's no way in torch to directly load a buffer with a dynamic size)
Args:
module (nn.Module): the module
module_name (str): module name in the state dict
buffer_names (list(str)): list of the buffer names to resize in the module
state_dict (dict): the state dict
policy (str): Update policy, choose from
('resize_if_empty', 'resize', 'register')
dtype (dtype): Type of buffer to be registered (when policy is 'register')
"""
valid_buffer_names = [n for n, _ in module.named_buffers()]
for buffer_name in buffer_names:
if buffer_name not in valid_buffer_names:
raise ValueError(f'Invalid buffer name "{buffer_name}"')
for buffer_name in buffer_names:
_update_registered_buffer(
module,
buffer_name,
f"{module_name}.{buffer_name}",
state_dict,
policy,
dtype,
)
def conv(in_channels, out_channels, kernel_size=5, stride=2):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
)
def deconv(in_channels, out_channels, kernel_size=5, stride=2):
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=stride - 1,
padding=kernel_size // 2,
)
| 4,989 | 33.178082 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/models/waseda.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch.nn as nn
from compressai.layers import (
AttentionBlock,
ResidualBlock,
ResidualBlockUpsample,
ResidualBlockWithStride,
conv3x3,
subpel_conv3x3,
)
from .priors import JointAutoregressiveHierarchicalPriors
class Cheng2020Anchor(JointAutoregressiveHierarchicalPriors):
"""Anchor model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Uses residual blocks with small convolutions (3x3 and 1x1), and sub-pixel
convolutions for up-sampling.
Args:
N (int): Number of channels
"""
def __init__(self, N=192, **kwargs):
super().__init__(N=N, M=N, **kwargs)
self.g_a = nn.Sequential(
ResidualBlockWithStride(3, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
conv3x3(N, N, stride=2),
)
self.h_a = nn.Sequential(
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
conv3x3(N, N, stride=2),
nn.LeakyReLU(inplace=True),
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
conv3x3(N, N, stride=2),
)
self.h_s = nn.Sequential(
conv3x3(N, N),
nn.LeakyReLU(inplace=True),
subpel_conv3x3(N, N, 2),
nn.LeakyReLU(inplace=True),
conv3x3(N, N * 3 // 2),
nn.LeakyReLU(inplace=True),
subpel_conv3x3(N * 3 // 2, N * 3 // 2, 2),
nn.LeakyReLU(inplace=True),
conv3x3(N * 3 // 2, N * 2),
)
self.g_s = nn.Sequential(
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
subpel_conv3x3(N, 3, 2),
)
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a.0.conv1.weight"].size(0)
net = cls(N)
net.load_state_dict(state_dict)
return net
class Cheng2020Attention(Cheng2020Anchor):
"""Self-attention model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Uses self-attention, residual blocks with small convolutions (3x3 and 1x1),
and sub-pixel convolutions for up-sampling.
Args:
N (int): Number of channels
"""
def __init__(self, N=192, **kwargs):
super().__init__(N=N, **kwargs)
self.g_a = nn.Sequential(
ResidualBlockWithStride(3, N, stride=2),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
AttentionBlock(N),
ResidualBlock(N, N),
ResidualBlockWithStride(N, N, stride=2),
ResidualBlock(N, N),
conv3x3(N, N, stride=2),
AttentionBlock(N),
)
self.g_s = nn.Sequential(
AttentionBlock(N),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
AttentionBlock(N),
ResidualBlock(N, N),
ResidualBlockUpsample(N, N, 2),
ResidualBlock(N, N),
subpel_conv3x3(N, 3, 2),
)
| 5,591 | 35.077419 | 79 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/zoo/image.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from torch.hub import load_state_dict_from_url
import torch
from compressai.models import (
Cheng2020Anchor,
Cheng2020Attention,
FactorizedPrior,
JointAutoregressiveHierarchicalPriors,
MeanScaleHyperprior,
ScaleHyperprior,
MultiscaleDecomp,
)
from .pretrained import load_pretrained
__all__ = [
"bmshj2018_factorized",
"bmshj2018_hyperprior",
"mbt2018",
"mbt2018_mean",
"cheng2020_anchor",
"cheng2020_attn",
"multiscale_decomp",
]
model_architectures = {
"bmshj2018-factorized": FactorizedPrior,
"bmshj2018-hyperprior": ScaleHyperprior,
"mbt2018-mean": MeanScaleHyperprior,
"mbt2018": JointAutoregressiveHierarchicalPriors,
"cheng2020-anchor": Cheng2020Anchor,
"cheng2020-attn": Cheng2020Attention,
"multiscale-decomp": MultiscaleDecomp,
}
root_url = "https://compressai.s3.amazonaws.com/models/v1"
model_urls = {
"bmshj2018-factorized": {
"mse": {
1: f"{root_url}/bmshj2018-factorized-prior-1-446d5c7f.pth.tar",
2: f"{root_url}/bmshj2018-factorized-prior-2-87279a02.pth.tar",
3: f"{root_url}/bmshj2018-factorized-prior-3-5c6f152b.pth.tar",
4: f"{root_url}/bmshj2018-factorized-prior-4-1ed4405a.pth.tar",
5: f"{root_url}/bmshj2018-factorized-prior-5-866ba797.pth.tar",
6: f"{root_url}/bmshj2018-factorized-prior-6-9b02ea3a.pth.tar",
7: f"{root_url}/bmshj2018-factorized-prior-7-6dfd6734.pth.tar",
8: f"{root_url}/bmshj2018-factorized-prior-8-5232faa3.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/bmshj2018-factorized-ms-ssim-1-9781d705.pth.tar",
2: f"{root_url}/bmshj2018-factorized-ms-ssim-2-4a584386.pth.tar",
3: f"{root_url}/bmshj2018-factorized-ms-ssim-3-5352f123.pth.tar",
4: f"{root_url}/bmshj2018-factorized-ms-ssim-4-4f91b847.pth.tar",
5: f"{root_url}/bmshj2018-factorized-ms-ssim-5-b3a88897.pth.tar",
6: f"{root_url}/bmshj2018-factorized-ms-ssim-6-ee028763.pth.tar",
7: f"{root_url}/bmshj2018-factorized-ms-ssim-7-8c265a29.pth.tar",
8: f"{root_url}/bmshj2018-factorized-ms-ssim-8-8811bd14.pth.tar",
},
},
"bmshj2018-hyperprior": {
"mse": {
1: f"{root_url}/bmshj2018-hyperprior-1-7eb97409.pth.tar",
2: f"{root_url}/bmshj2018-hyperprior-2-93677231.pth.tar",
3: f"{root_url}/bmshj2018-hyperprior-3-6d87be32.pth.tar",
4: f"{root_url}/bmshj2018-hyperprior-4-de1b779c.pth.tar",
5: f"{root_url}/bmshj2018-hyperprior-5-f8b614e1.pth.tar",
6: f"{root_url}/bmshj2018-hyperprior-6-1ab9c41e.pth.tar",
7: f"{root_url}/bmshj2018-hyperprior-7-3804dcbd.pth.tar",
8: f"{root_url}/bmshj2018-hyperprior-8-a583f0cf.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/bmshj2018-hyperprior-ms-ssim-1-5cf249be.pth.tar",
2: f"{root_url}/bmshj2018-hyperprior-ms-ssim-2-1ff60d1f.pth.tar",
3: f"{root_url}/bmshj2018-hyperprior-ms-ssim-3-92dd7878.pth.tar",
4: f"{root_url}/bmshj2018-hyperprior-ms-ssim-4-4377354e.pth.tar",
5: f"{root_url}/bmshj2018-hyperprior-ms-ssim-5-c34afc8d.pth.tar",
6: f"{root_url}/bmshj2018-hyperprior-ms-ssim-6-3a6d8229.pth.tar",
7: f"{root_url}/bmshj2018-hyperprior-ms-ssim-7-8747d3bc.pth.tar",
8: f"{root_url}/bmshj2018-hyperprior-ms-ssim-8-cc15b5f3.pth.tar",
},
},
"mbt2018-mean": {
"mse": {
1: f"{root_url}/mbt2018-mean-1-e522738d.pth.tar",
2: f"{root_url}/mbt2018-mean-2-e54a039d.pth.tar",
3: f"{root_url}/mbt2018-mean-3-723404a8.pth.tar",
4: f"{root_url}/mbt2018-mean-4-6dba02a3.pth.tar",
5: f"{root_url}/mbt2018-mean-5-d504e8eb.pth.tar",
6: f"{root_url}/mbt2018-mean-6-a19628ab.pth.tar",
7: f"{root_url}/mbt2018-mean-7-d5d441d1.pth.tar",
8: f"{root_url}/mbt2018-mean-8-8089ae3e.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/mbt2018-mean-ms-ssim-1-5bf9c0b6.pth.tar",
2: f"{root_url}/mbt2018-mean-ms-ssim-2-e2a1bf3f.pth.tar",
3: f"{root_url}/mbt2018-mean-ms-ssim-3-640ce819.pth.tar",
4: f"{root_url}/mbt2018-mean-ms-ssim-4-12626c13.pth.tar",
5: f"{root_url}/mbt2018-mean-ms-ssim-5-1be7f059.pth.tar",
6: f"{root_url}/mbt2018-mean-ms-ssim-6-b83bf379.pth.tar",
7: f"{root_url}/mbt2018-mean-ms-ssim-7-ddf9644c.pth.tar",
8: f"{root_url}/mbt2018-mean-ms-ssim-8-0cc7b94f.pth.tar",
},
},
"mbt2018": {
"mse": {
1: f"{root_url}/mbt2018-1-3f36cd77.pth.tar",
2: f"{root_url}/mbt2018-2-43b70cdd.pth.tar",
3: f"{root_url}/mbt2018-3-22901978.pth.tar",
4: f"{root_url}/mbt2018-4-456e2af9.pth.tar",
5: f"{root_url}/mbt2018-5-b4a046dd.pth.tar",
6: f"{root_url}/mbt2018-6-7052e5ea.pth.tar",
7: f"{root_url}/mbt2018-7-8ba2bf82.pth.tar",
8: f"{root_url}/mbt2018-8-dd0097aa.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/mbt2018-ms-ssim-1-2878436b.pth.tar",
2: f"{root_url}/mbt2018-ms-ssim-2-c41cb208.pth.tar",
3: f"{root_url}/mbt2018-ms-ssim-3-d0dd64e8.pth.tar",
4: f"{root_url}/mbt2018-ms-ssim-4-a120e037.pth.tar",
5: f"{root_url}/mbt2018-ms-ssim-5-9b30e3b7.pth.tar",
6: f"{root_url}/mbt2018-ms-ssim-6-f8b3626f.pth.tar",
7: f"{root_url}/mbt2018-ms-ssim-7-16e6ff50.pth.tar",
8: f"{root_url}/mbt2018-ms-ssim-8-0cb49d43.pth.tar",
},
},
"cheng2020-anchor": {
"mse": {
1: f"{root_url}/cheng2020-anchor-1-dad2ebff.pth.tar",
2: f"{root_url}/cheng2020-anchor-2-a29008eb.pth.tar",
3: f"{root_url}/cheng2020-anchor-3-e49be189.pth.tar",
4: f"{root_url}/cheng2020-anchor-4-98b0b468.pth.tar",
5: f"{root_url}/cheng2020-anchor-5-23852949.pth.tar",
6: f"{root_url}/cheng2020-anchor-6-4c052b1a.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/cheng2020_anchor-ms-ssim-1-20f521db.pth.tar",
2: f"{root_url}/cheng2020_anchor-ms-ssim-2-c7ff5812.pth.tar",
3: f"{root_url}/cheng2020_anchor-ms-ssim-3-c23e22d5.pth.tar",
4: f"{root_url}/cheng2020_anchor-ms-ssim-4-0e658304.pth.tar",
5: f"{root_url}/cheng2020_anchor-ms-ssim-5-c0a95e77.pth.tar",
6: f"{root_url}/cheng2020_anchor-ms-ssim-6-f2dc1913.pth.tar",
},
},
"cheng2020-attn": {
"mse": {
1: f"{root_url}/cheng2020_attn-mse-1-465f2b64.pth.tar",
2: f"{root_url}/cheng2020_attn-mse-2-e0805385.pth.tar",
3: f"{root_url}/cheng2020_attn-mse-3-2d07bbdf.pth.tar",
4: f"{root_url}/cheng2020_attn-mse-4-f7b0ccf2.pth.tar",
5: f"{root_url}/cheng2020_attn-mse-5-26c8920e.pth.tar",
6: f"{root_url}/cheng2020_attn-mse-6-730501f2.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/cheng2020_attn-ms-ssim-1-c5381d91.pth.tar",
2: f"{root_url}/cheng2020_attn-ms-ssim-2-5dad201d.pth.tar",
3: f"{root_url}/cheng2020_attn-ms-ssim-3-5c9be841.pth.tar",
4: f"{root_url}/cheng2020_attn-ms-ssim-4-8b2f647e.pth.tar",
5: f"{root_url}/cheng2020_attn-ms-ssim-5-5ca1f34c.pth.tar",
6: f"{root_url}/cheng2020_attn-ms-ssim-6-216423ec.pth.tar",
},
},
}
cfgs = {
"bmshj2018-factorized": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (128, 192),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"bmshj2018-hyperprior": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (128, 192),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"mbt2018-mean": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (192, 320),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"mbt2018": {
1: (192, 192),
2: (192, 192),
3: (192, 192),
4: (192, 192),
5: (192, 320),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"cheng2020-anchor": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
"cheng2020-attn": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
"multiscale-decomp": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
}
def _load_model(
architecture, metric, quality, pretrained=False, progress=True, **kwargs
):
if architecture not in model_architectures:
raise ValueError(f'Invalid architecture name "{architecture}"')
if quality not in cfgs[architecture]:
raise ValueError(f'Invalid quality value "{quality}"')
if pretrained:
if (
architecture not in model_urls
or metric not in model_urls[architecture]
or quality not in model_urls[architecture][metric]
):
raise RuntimeError("Pre-trained model not yet available")
url = model_urls[architecture][metric][quality]
state_dict = load_state_dict_from_url(url, progress=progress)
state_dict = load_pretrained(state_dict)
model = model_architectures[architecture].from_state_dict(state_dict)
return model
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
return model
def bmshj2018_factorized(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations
(ICLR), 2018.
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"bmshj2018-factorized", metric, quality, pretrained, progress, **kwargs
)
def bmshj2018_hyperprior(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
(ICLR), 2018.
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"bmshj2018-hyperprior", metric, quality, pretrained, progress, **kwargs
)
def mbt2018_mean(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Scale Hyperprior with non zero-mean Gaussian conditionals from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model("mbt2018-mean", metric, quality, pretrained, progress, **kwargs)
def mbt2018(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Joint Autoregressive Hierarchical Priors model from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model("mbt2018", metric, quality, pretrained, progress, **kwargs)
def cheng2020_anchor(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Anchor model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
quality (int): Quality levels (1: lowest, highest: 6)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model(
"cheng2020-anchor", metric, quality, pretrained, progress, **kwargs
)
def cheng2020_attn(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Self-attention model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
quality (int): Quality levels (1: lowest, highest: 6)
metric (str): Optimized metric, choose from ('mse', 'ms-ssim')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model(
"cheng2020-attn", metric, quality, pretrained, progress, **kwargs
)
def _load_model_for_multi_scale(
architecture, metric, quality, pretrained=False, progress=True, **kwargs
):
if architecture not in model_architectures:
raise ValueError(f'Invalid architecture name "{architecture}"')
if quality not in cfgs[architecture]:
raise ValueError(f'Invalid quality value "{quality}"')
if pretrained:
url = model_urls["cheng2020-anchor"][metric][quality]
state_dict = load_state_dict_from_url(url, progress=progress)
state_dict = load_pretrained(state_dict)
new_statedict = {}
for key in state_dict.keys():
if "g_a" not in key:
new_statedict[key] = state_dict[key]
continue
for num in range(0, 3):
if "g_a."+ str(num) in key:
new_statedict[key.replace("g_a", "g_a_block1")] = state_dict[key]
break
for num in range(3, 7):
if "g_a."+ str(num) in key:
new_statedict[key.replace("g_a", "g_a_block2").replace(str(num), str(num-3))] = state_dict[key]
break
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
my_model_dict = model.state_dict()
my_model_dict.update(new_statedict)
model.load_state_dict(my_model_dict)
# free memory
state_dict = None
new_statedict = None
my_model_dict = None
return model
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
return model
def multiscale_decomp(quality, metric="mse", pretrained=False, progress=True, **kwargs):
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model_for_multi_scale(
"multiscale-decomp", metric, quality, pretrained, progress, **kwargs
)
| 19,465 | 39.469854 | 115 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/zoo/pretrained.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Dict
from torch import Tensor
def rename_key(key: str) -> str:
"""Rename state_dict key."""
# Deal with modules trained with DataParallel
if key.startswith("module."):
key = key[7:]
# ResidualBlockWithStride: 'downsample' -> 'skip'
if ".downsample." in key:
return key.replace("downsample", "skip")
# EntropyBottleneck: nn.ParameterList to nn.Parameters
if key.startswith("entropy_bottleneck."):
if key.startswith("entropy_bottleneck._biases."):
return f"entropy_bottleneck._bias{key[-1]}"
if key.startswith("entropy_bottleneck._matrices."):
return f"entropy_bottleneck._matrix{key[-1]}"
if key.startswith("entropy_bottleneck._factors."):
return f"entropy_bottleneck._factor{key[-1]}"
return key
def load_pretrained(state_dict: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""Convert state_dict keys."""
state_dict = {rename_key(k): v for k, v in state_dict.items()}
return state_dict
| 2,750 | 41.323077 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/SiddDataset.py | import random
import os, glob
import json
import torch
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
class SiddDataset(Dataset):
def __init__(self, dataset_opt):
self.root = dataset_opt['root']
self.transform = transforms.ToTensor()
self.patch_size = dataset_opt['patch_size']
self.phase = dataset_opt['phase']
if self.phase not in ['train', 'val', 'sidd']:
raise NotImplementedError('wrong phase argument!')
alpha = 60 if self.phase == 'train' else 1
self.samples = []
with open(self.root, 'r') as f:
data = json.load(f)
for i, scene in enumerate(sorted(data.keys())):
all_scene_items = sorted(data[scene].items())
for entry, entry_dict in all_scene_items:
for _ in range(alpha):
self.samples.append(entry_dict)
def __getitem__(self, index):
sample_dict = self.samples[index]
if self.phase == 'train':
img_dir = sample_dict['img_dir']
gt_prefix = sample_dict['gt_prefix']
noisy_prefix = sample_dict['noisy_prefix']
row, col = sample_dict['row'], sample_dict['col']
h, w = sample_dict['h'], sample_dict['w']
H, W = sample_dict['H'], sample_dict['W']
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
r1 = rnd_h // h
r2 = (rnd_h+self.patch_size-1) // h
c1 = rnd_w // w
c2 = (rnd_w+self.patch_size-1) // w
rs = list(set({r1, r2}))
cs = list(set({c1, c2}))
rnd_h = rnd_h % h
rnd_w = rnd_w % w
# assert r1 < row and r2 < row and c1 < col and c2 < col, 'row={:d}, r1={:d}, r2={:d}; col={:d}, c1={:d}, c2={:d}'.format(row, r1, r2, col, c1, c2)
gt = []
noisy = []
for r in rs:
gt_r = []
noisy_r = []
for c in cs:
gt_path = os.path.join(img_dir, '{:s}_{:02d}_{:02d}.png'.format(gt_prefix, r+1, c+1))
gt_rc = Image.open(gt_path).convert("RGB")
gt_rc = self.transform(gt_rc)
gt_r.append(gt_rc)
noisy_path = os.path.join(img_dir, '{:s}_{:02d}_{:02d}.png'.format(noisy_prefix, r+1, c+1))
noisy_rc = Image.open(noisy_path).convert("RGB")
noisy_rc = self.transform(noisy_rc)
noisy_r.append(noisy_rc)
gt_r = torch.cat(gt_r, dim=2)
gt.append(gt_r)
noisy_r = torch.cat(noisy_r, dim=2)
noisy.append(noisy_r)
gt = torch.cat(gt, dim=1)[:, rnd_h:rnd_h+self.patch_size, rnd_w:rnd_w+self.patch_size]
noisy = torch.cat(noisy, dim=1)[:, rnd_h:rnd_h+self.patch_size, rnd_w:rnd_w+self.patch_size]
return gt, noisy
elif self.phase == 'val':
gt = Image.open(sample_dict['gt_path']).convert("RGB")
noisy = Image.open(sample_dict['noisy_path']).convert("RGB")
gt = self.transform(gt)
noisy = self.transform(noisy)
return gt, noisy
else:
noisy = Image.open(sample_dict['noisy_path']).convert("RGB")
noisy = self.transform(noisy)
return noisy
def __len__(self):
return len(self.samples)
| 3,608 | 38.659341 | 159 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/SyntheticDataset.py | import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
from pathlib import Path
from .utils import sRGBGamma, UndosRGBGamma
from torchvision import transforms
class SyntheticDataset(Dataset):
def __init__(self, dataset_opt):
splitdir = Path(dataset_opt['root']) / dataset_opt['phase']
if not splitdir.is_dir():
raise RuntimeError(f'Invalid directory "{splitdir}"')
self.samples = sorted([f for f in splitdir.iterdir() if f.is_file()])
self.phase = dataset_opt['phase']
if self.phase == 'train':
self.transform = transforms.Compose(
[transforms.RandomCrop(dataset_opt['patch_size']), transforms.ToTensor()]
)
elif self.phase == 'val':
self.transform = transforms.Compose(
[transforms.CenterCrop(dataset_opt['patch_size']), transforms.ToTensor()]
)
self.sigma_reads = [0.0068354, 0.01572141, 0.03615925, 0.08316627]
self.sigma_shots = [0.05200081**2, 0.07886314**2, 0.11960187**2, 0.18138522**2]
self.choices = len(self.sigma_reads)
else:
raise NotImplementedError('wrong phase argument!')
def __getitem__(self, index):
gt = Image.open(self.samples[index]).convert("RGB")
gt = self.transform(gt)
# degamma
noisy_degamma = UndosRGBGamma(gt)
# sample read and shot noise
if self.phase == 'train':
sigma_read = torch.from_numpy(
np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1)))
).type_as(noisy_degamma)
sigma_shot = torch.from_numpy(
np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1)))
).type_as(noisy_degamma)
else:
sigma_read = torch.from_numpy(
np.array([[[self.sigma_reads[index % self.choices]]]])
).type_as(noisy_degamma)
sigma_shot = torch.from_numpy(
np.array([[[self.sigma_shots[index % self.choices]]]])
).type_as(noisy_degamma)
sigma_read_com = sigma_read.expand_as(noisy_degamma)
sigma_shot_com = sigma_shot.expand_as(noisy_degamma)
# apply formular in the paper
if self.phase == 'train':
generator = None
else:
generator = torch.Generator()
generator.manual_seed(index)
noisy_degamma = torch.normal(noisy_degamma,
torch.sqrt(sigma_read_com ** 2 + noisy_degamma * sigma_shot_com),
generator=generator
).type_as(noisy_degamma)
# gamma
noisy = sRGBGamma(noisy_degamma)
# clamping
noisy = torch.clamp(noisy, 0.0, 1.0)
return gt, noisy
def __len__(self):
return len(self.samples)
| 2,867 | 34.85 | 91 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/SyntheticTestDataset.py | import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
from pathlib import Path
from .utils import sRGBGamma, UndosRGBGamma
from torchvision import transforms
class SyntheticTestDataset(Dataset):
def __init__(self, dataset_opt):
root = Path(dataset_opt['root'])
if not root.is_dir():
raise RuntimeError(f'Invalid directory "{root}"')
self.samples = sorted([f for f in root.iterdir() if f.is_file()])
self.phase = dataset_opt['phase']
self.transform = transforms.ToTensor()
noise_level = dataset_opt['level']
sigma_reads = [0.0068354, 0.01572141, 0.03615925, 0.08316627]
sigma_shots = [0.05200081**2, 0.07886314**2, 0.11960187**2, 0.18138522**2]
self.sigma_read = sigma_reads[noise_level-1]
self.sigma_shot = sigma_shots[noise_level-1]
def __getitem__(self, index):
gt = Image.open(self.samples[index]).convert("RGB")
gt = self.transform(gt)
# degamma
noisy_degamma = UndosRGBGamma(gt)
# read and shot noise
sigma_read = torch.from_numpy(
np.array([[[self.sigma_read]]])
).type_as(noisy_degamma)
sigma_shot = torch.from_numpy(
np.array([[[self.sigma_shot]]])
).type_as(noisy_degamma)
sigma_read_com = sigma_read.expand_as(noisy_degamma)
sigma_shot_com = sigma_shot.expand_as(noisy_degamma)
# apply formular in the paper
generator = torch.Generator()
generator.manual_seed(0)
noisy_degamma = torch.normal(noisy_degamma,
torch.sqrt(sigma_read_com ** 2 + noisy_degamma * sigma_shot_com),
generator=generator
).type_as(noisy_degamma)
# gamma
noisy = sRGBGamma(noisy_degamma)
# clamping
noisy = torch.clamp(noisy, 0.0, 1.0)
return gt, noisy
def __len__(self):
return len(self.samples)
| 1,969 | 31.295082 | 82 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/datasets/utils.py | import random
import numpy as np
import torch
def sRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
res[mask] = (1 + a) * torch.pow(tensor[mask] + 0.001, 1.0 / gamma) - a
res[~mask] = tensor[~mask] * mult
return res
def UndosRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
res[~mask] = tensor[~mask] / mult
res[mask] = torch.pow(tensor[mask] + a, gamma) / (1 + a)
return res
def set_seeds(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed=seed) | 717 | 22.933333 | 74 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/layers/gdn.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from compressai.ops.parametrizers import NonNegativeParametrizer
__all__ = ["GDN", "GDN1"]
class GDN(nn.Module):
r"""Generalized Divisive Normalization layer.
Introduced in `"Density Modeling of Images Using a Generalized Normalization
Transformation" <https://arxiv.org/abs/1511.06281>`_,
by Balle Johannes, Valero Laparra, and Eero P. Simoncelli, (2016).
.. math::
y[i] = \frac{x[i]}{\sqrt{\beta[i] + \sum_j(\gamma[j, i] * x[j]^2)}}
"""
def __init__(
self,
in_channels: int,
inverse: bool = False,
beta_min: float = 1e-6,
gamma_init: float = 0.1,
):
super().__init__()
beta_min = float(beta_min)
gamma_init = float(gamma_init)
self.inverse = bool(inverse)
self.beta_reparam = NonNegativeParametrizer(minimum=beta_min)
beta = torch.ones(in_channels)
beta = self.beta_reparam.init(beta)
self.beta = nn.Parameter(beta)
self.gamma_reparam = NonNegativeParametrizer()
gamma = gamma_init * torch.eye(in_channels)
gamma = self.gamma_reparam.init(gamma)
self.gamma = nn.Parameter(gamma)
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(x ** 2, gamma, beta)
if self.inverse:
norm = torch.sqrt(norm)
else:
norm = torch.rsqrt(norm)
out = x * norm
return out
class GDN1(GDN):
r"""Simplified GDN layer.
Introduced in `"Computationally Efficient Neural Image Compression"
<http://arxiv.org/abs/1912.08771>`_, by Johnston Nick, Elad Eban, Ariel
Gordon, and Johannes Ballé, (2019).
.. math::
y[i] = \frac{x[i]}{\beta[i] + \sum_j(\gamma[j, i] * |x[j]|}
"""
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(torch.abs(x), gamma, beta)
if not self.inverse:
norm = 1.0 / norm
out = x * norm
return out
| 4,085 | 32.491803 | 80 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/layers/layers.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Any
import torch
import torch.nn as nn
from torch import Tensor
from .gdn import GDN
__all__ = [
"AttentionBlock",
"MaskedConv2d",
"ResidualBlock",
"ResidualBlockUpsample",
"ResidualBlockWithStride",
"conv3x3",
"subpel_conv3x3",
"conv1x1",
]
class MaskedConv2d(nn.Conv2d):
r"""Masked 2D convolution implementation, mask future "unseen" pixels.
Useful for building auto-regressive network components.
Introduced in `"Conditional Image Generation with PixelCNN Decoders"
<https://arxiv.org/abs/1606.05328>`_.
Inherits the same arguments as a `nn.Conv2d`. Use `mask_type='A'` for the
first layer (which also masks the "current pixel"), `mask_type='B'` for the
following layers.
"""
def __init__(self, *args: Any, mask_type: str = "A", **kwargs: Any):
super().__init__(*args, **kwargs)
if mask_type not in ("A", "B"):
raise ValueError(f'Invalid "mask_type" value "{mask_type}"')
self.register_buffer("mask", torch.ones_like(self.weight.data))
_, _, h, w = self.mask.size()
self.mask[:, :, h // 2, w // 2 + (mask_type == "B") :] = 0
self.mask[:, :, h // 2 + 1 :] = 0
def forward(self, x: Tensor) -> Tensor:
# TODO(begaintj): weight assigment is not supported by torchscript
self.weight.data *= self.mask
return super().forward(x)
def conv3x3(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
def subpel_conv3x3(in_ch: int, out_ch: int, r: int = 1) -> nn.Sequential:
"""3x3 sub-pixel convolution for up-sampling."""
return nn.Sequential(
nn.Conv2d(in_ch, out_ch * r ** 2, kernel_size=3, padding=1), nn.PixelShuffle(r)
)
def conv1x1(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:
"""1x1 convolution."""
return nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride)
class ResidualBlockWithStride(nn.Module):
"""Residual block with a stride on the first convolution.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
stride (int): stride value (default: 2)
"""
def __init__(self, in_ch: int, out_ch: int, stride: int = 2):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch, stride=stride)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
self.gdn = GDN(out_ch)
if stride != 1 or in_ch != out_ch:
self.skip = conv1x1(in_ch, out_ch, stride=stride)
else:
self.skip = None
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.gdn(out)
if self.skip is not None:
identity = self.skip(x)
out += identity
return out
class ResidualBlockUpsample(nn.Module):
"""Residual block with sub-pixel upsampling on the last convolution.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
upsample (int): upsampling factor (default: 2)
"""
def __init__(self, in_ch: int, out_ch: int, upsample: int = 2):
super().__init__()
self.subpel_conv = subpel_conv3x3(in_ch, out_ch, upsample)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv = conv3x3(out_ch, out_ch)
self.igdn = GDN(out_ch, inverse=True)
self.upsample = subpel_conv3x3(in_ch, out_ch, upsample)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.subpel_conv(x)
out = self.leaky_relu(out)
out = self.conv(out)
out = self.igdn(out)
identity = self.upsample(x)
out += identity
return out
class ResidualBlock(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch: int, out_ch: int, stride: int = 1):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch, stride)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
if in_ch != out_ch or stride > 1:
self.skip = conv1x1(in_ch, out_ch, stride)
else:
self.skip = None
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
if self.skip is not None:
identity = self.skip(x)
out = out + identity
return out
class AttentionBlock(nn.Module):
"""Self attention block.
Simplified variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
N (int): Number of channels)
"""
def __init__(self, N: int):
super().__init__()
class ResidualUnit(nn.Module):
"""Simple residual unit."""
def __init__(self):
super().__init__()
self.conv = nn.Sequential(
conv1x1(N, N // 2),
nn.ReLU(inplace=True),
conv3x3(N // 2, N // 2),
nn.ReLU(inplace=True),
conv1x1(N // 2, N),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv(x)
out += identity
out = self.relu(out)
return out
self.conv_a = nn.Sequential(ResidualUnit(), ResidualUnit(), ResidualUnit())
self.conv_b = nn.Sequential(
ResidualUnit(),
ResidualUnit(),
ResidualUnit(),
conv1x1(N, N),
)
def forward(self, x: Tensor) -> Tensor:
identity = x
a = self.conv_a(x)
b = self.conv_b(x)
out = a * torch.sigmoid(b)
out += identity
return out
| 8,243 | 32.376518 | 87 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/bench/__main__.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Collect performance metrics of published traditional or end-to-end image
codecs.
"""
import argparse
import json
import multiprocessing as mp
import os
import sys
from collections import defaultdict
from itertools import starmap
from typing import List
from .codecs import AV1, BPG, HM, JPEG, JPEG2000, TFCI, VTM, Codec, WebP
# from torchvision.datasets.folder
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
codecs = [JPEG, WebP, JPEG2000, BPG, TFCI, VTM, HM, AV1]
# we need the quality index (not value) to compute the stats later
def func(codec, i, *args):
rv = codec.run(*args)
return i, rv
def collect(
codec: Codec,
dataset: str,
qualities: List[int],
metrics: List[str],
num_jobs: int = 1,
):
if not os.path.isdir(dataset):
raise OSError(f"No such directory: {dataset}")
filepaths = [
os.path.join(dirpath, f)
for dirpath, _, filenames in os.walk(dataset)
for f in filenames
if os.path.splitext(f)[-1].lower() in IMG_EXTENSIONS
]
pool = mp.Pool(num_jobs) if num_jobs > 1 else None
if len(filepaths) == 0:
print("No images found in the dataset directory")
sys.exit(1)
args = [
(codec, i, f, q, metrics) for i, q in enumerate(qualities) for f in filepaths
]
if pool:
rv = pool.starmap(func, args)
else:
rv = list(starmap(func, args))
results = [defaultdict(float) for _ in range(len(qualities))]
for i, metrics in rv:
for k, v in metrics.items():
results[i][k] += v
# aggregate results for all images
for i, _ in enumerate(results):
for k, v in results[i].items():
results[i][k] = v / len(filepaths)
# list of dict -> dict of list
out = defaultdict(list)
for r in results:
for k, v in r.items():
out[k].append(v)
return out
def setup_args():
description = "Collect codec metrics."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="codec", help="Select codec")
subparsers.required = True
return parser, subparsers
def setup_common_args(parser):
parser.add_argument("dataset", type=str)
parser.add_argument(
"-j",
"--num-jobs",
type=int,
metavar="N",
default=1,
help="number of parallel jobs (default: %(default)s)",
)
parser.add_argument(
"-q",
"--quality",
dest="qualities",
metavar="Q",
default=[75],
nargs="+",
type=int,
help="quality parameter (default: %(default)s)",
)
parser.add_argument(
"--metrics",
dest="metrics",
default=["psnr", "ms-ssim"],
nargs="+",
help="do not return PSNR and MS-SSIM metrics (use for very small images)",
)
def main(argv):
parser, subparsers = setup_args()
for c in codecs:
cparser = subparsers.add_parser(c.__name__.lower(), help=f"{c.__name__}")
setup_common_args(cparser)
c.setup_args(cparser)
args = parser.parse_args(argv)
codec_cls = next(c for c in codecs if c.__name__.lower() == args.codec)
codec = codec_cls(args)
results = collect(
codec,
args.dataset,
args.qualities,
args.metrics,
args.num_jobs,
)
output = {
"name": codec.name,
"description": codec.description,
"results": results,
}
print(json.dumps(output, indent=2))
if __name__ == "__main__":
main(sys.argv[1:])
| 5,369 | 28.184783 | 85 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/utils/bench/codecs.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import io
import os
import platform
import subprocess
import sys
import time
from tempfile import mkstemp
from typing import Dict, List, Optional, Union
import numpy as np
import PIL
import PIL.Image as Image
import torch
from pytorch_msssim import ms_ssim
from compressai.transforms.functional import rgb2ycbcr, ycbcr2rgb
# from torchvision.datasets.folder
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
def filesize(filepath: str) -> int:
"""Return file size in bits of `filepath`."""
if not os.path.isfile(filepath):
raise ValueError(f'Invalid file "{filepath}".')
return os.stat(filepath).st_size
def read_image(filepath: str, mode: str = "RGB") -> np.array:
"""Return PIL image in the specified `mode` format."""
if not os.path.isfile(filepath):
raise ValueError(f'Invalid file "{filepath}".')
return Image.open(filepath).convert(mode)
def _compute_psnr(a, b, max_val: float = 255.0) -> float:
mse = torch.mean((a - b) ** 2).item()
psnr = 20 * np.log10(max_val) - 10 * np.log10(mse)
return psnr
def _compute_ms_ssim(a, b, max_val: float = 255.0) -> float:
return ms_ssim(a, b, data_range=max_val).item()
_metric_functions = {
"psnr": _compute_psnr,
"ms-ssim": _compute_ms_ssim,
}
def compute_metrics(
a: Union[np.array, Image.Image],
b: Union[np.array, Image.Image],
metrics: Optional[List[str]] = None,
max_val: float = 255.0,
) -> Dict[str, float]:
"""Returns PSNR and MS-SSIM between images `a` and `b`."""
if metrics is None:
metrics = ["psnr"]
def _convert(x):
if isinstance(x, Image.Image):
x = np.asarray(x)
x = torch.from_numpy(x.copy()).float().unsqueeze(0)
if x.size(3) == 3:
# (1, H, W, 3) -> (1, 3, H, W)
x = x.permute(0, 3, 1, 2)
return x
a = _convert(a)
b = _convert(b)
out = {}
for metric_name in metrics:
out[metric_name] = _metric_functions[metric_name](a, b, max_val)
return out
def run_command(cmd, ignore_returncodes=None):
cmd = [str(c) for c in cmd]
try:
rv = subprocess.check_output(cmd)
return rv.decode("ascii")
except subprocess.CalledProcessError as err:
if ignore_returncodes is not None and err.returncode in ignore_returncodes:
return err.output
print(err.output.decode("utf-8"))
sys.exit(1)
def _get_ffmpeg_version():
rv = run_command(["ffmpeg", "-version"])
return rv.split()[2]
def _get_bpg_version(encoder_path):
rv = run_command([encoder_path, "-h"], ignore_returncodes=[1])
return rv.split()[4]
class Codec(abc.ABC):
"""Abstract base class"""
_description = None
def __init__(self, args):
self._set_args(args)
def _set_args(self, args):
return args
@classmethod
def setup_args(cls, parser):
pass
@property
def description(self):
return self._description
@property
@abc.abstractmethod
def name(self):
raise NotImplementedError()
def _load_img(self, img):
return read_image(os.path.abspath(img))
@abc.abstractmethod
def _run_impl(self, img, quality, *args, **kwargs):
raise NotImplementedError()
def run(
self,
img,
quality: int,
metrics: Optional[List[str]] = None,
return_rec: bool = False,
):
img = self._load_img(img)
info, rec = self._run_impl(img, quality)
info.update(compute_metrics(rec, img, metrics))
if return_rec:
return info, rec
return info
class PillowCodec(Codec):
"""Abstract codec based on Pillow bindings."""
fmt = None
@property
def name(self):
raise NotImplementedError()
def _run_impl(self, img, quality):
start = time.time()
tmp = io.BytesIO()
img.save(tmp, format=self.fmt, quality=int(quality))
enc_time = time.time() - start
tmp.seek(0)
size = tmp.getbuffer().nbytes
start = time.time()
rec = Image.open(tmp)
rec.load()
dec_time = time.time() - start
bpp_val = float(size) * 8 / (img.size[0] * img.size[1])
out = {
"bpp": bpp_val,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
return out, rec
class JPEG(PillowCodec):
"""Use libjpeg linked in Pillow"""
fmt = "jpeg"
_description = f"JPEG. Pillow version {PIL.__version__}"
@property
def name(self):
return "JPEG"
class WebP(PillowCodec):
"""Use libwebp linked in Pillow"""
fmt = "webp"
_description = f"WebP. Pillow version {PIL.__version__}"
@property
def name(self):
return "WebP"
class BinaryCodec(Codec):
"""Call an external binary."""
fmt = None
@property
def name(self):
raise NotImplementedError()
def _run_impl(self, img, quality):
fd0, png_filepath = mkstemp(suffix=".png")
fd1, out_filepath = mkstemp(suffix=self.fmt)
# Encode
start = time.time()
run_command(self._get_encode_cmd(img, quality, out_filepath))
enc_time = time.time() - start
size = filesize(out_filepath)
# Decode
start = time.time()
run_command(self._get_decode_cmd(out_filepath, png_filepath))
dec_time = time.time() - start
# Read image
rec = read_image(png_filepath)
os.close(fd0)
os.remove(png_filepath)
os.close(fd1)
os.remove(out_filepath)
bpp_val = float(size) * 8 / (img.size[0] * img.size[1])
out = {
"bpp": bpp_val,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
return out, rec
def _get_encode_cmd(self, img, quality, out_filepath):
raise NotImplementedError()
def _get_decode_cmd(self, out_filepath, rec_filepath):
raise NotImplementedError()
class JPEG2000(BinaryCodec):
"""Use ffmpeg version.
(Not built-in support in default Pillow builds)
"""
fmt = ".jp2"
@property
def name(self):
return "JPEG2000"
@property
def description(self):
return f"JPEG2000. ffmpeg version {_get_ffmpeg_version()}"
def _get_encode_cmd(self, img, quality, out_filepath):
cmd = [
"ffmpeg",
"-loglevel",
"panic",
"-y",
"-i",
img,
"-vcodec",
"jpeg2000",
"-pix_fmt",
"yuv444p",
"-c:v",
"libopenjpeg",
"-compression_level",
quality,
out_filepath,
]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = ["ffmpeg", "-loglevel", "panic", "-y", "-i", out_filepath, rec_filepath]
return cmd
class BPG(BinaryCodec):
"""BPG from Fabrice Bellard."""
fmt = ".bpg"
@property
def name(self):
return (
f"BPG {self.bitdepth}b {self.subsampling_mode} {self.encoder} "
f"{self.color_mode}"
)
@property
def description(self):
return f"BPG. BPG version {_get_bpg_version(self.encoder_path)}"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-m",
choices=["420", "444"],
default="444",
help="subsampling mode (default: %(default)s)",
)
parser.add_argument(
"-b",
choices=["8", "10"],
default="8",
help="bitdepth (default: %(default)s)",
)
parser.add_argument(
"-c",
choices=["rgb", "ycbcr"],
default="ycbcr",
help="colorspace (default: %(default)s)",
)
parser.add_argument(
"-e",
choices=["jctvc", "x265"],
default="x265",
help="HEVC implementation (default: %(default)s)",
)
parser.add_argument("--encoder-path", default="bpgenc", help="BPG encoder path")
parser.add_argument("--decoder-path", default="bpgdec", help="BPG decoder path")
def _set_args(self, args):
args = super()._set_args(args)
self.color_mode = args.c
self.encoder = args.e
self.subsampling_mode = args.m
self.bitdepth = args.b
self.encoder_path = args.encoder_path
self.decoder_path = args.decoder_path
return args
def _get_encode_cmd(self, img, quality, out_filepath):
if not 0 <= quality <= 51:
raise ValueError(f"Invalid quality value: {quality} (0,51)")
cmd = [
self.encoder_path,
"-o",
out_filepath,
"-q",
str(quality),
"-f",
self.subsampling_mode,
"-e",
self.encoder,
"-c",
self.color_mode,
"-b",
self.bitdepth,
img,
]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = [self.decoder_path, "-o", rec_filepath, out_filepath]
return cmd
class TFCI(BinaryCodec):
"""Tensorflow image compression format from tensorflow/compression"""
fmt = ".tfci"
_models = [
"bmshj2018-factorized-mse",
"bmshj2018-hyperprior-mse",
"mbt2018-mean-mse",
]
@property
def description(self):
return "TFCI"
@property
def name(self):
return f"{self.model}"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-m",
"--model",
choices=cls._models,
default=cls._models[0],
help="model architecture (default: %(default)s)",
)
parser.add_argument(
"-p",
"--path",
required=True,
help="tfci python script path (default: %(default)s)",
)
def _set_args(self, args):
args = super()._set_args(args)
self.model = args.model
self.tfci_path = args.path
return args
def _get_encode_cmd(self, img, quality, out_filepath):
if not 1 <= quality <= 8:
raise ValueError(f"Invalid quality value: {quality} (1, 8)")
cmd = [
sys.executable,
self.tfci_path,
"compress",
f"{self.model}-{quality:d}",
img,
out_filepath,
]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = [sys.executable, self.tfci_path, "decompress", out_filepath, rec_filepath]
return cmd
def get_vtm_encoder_path(build_dir):
system = platform.system()
try:
elfnames = {"Darwin": "EncoderApp", "Linux": "EncoderAppStatic"}
return os.path.join(build_dir, elfnames[system])
except KeyError as err:
raise RuntimeError(f'Unsupported platform "{system}"') from err
def get_vtm_decoder_path(build_dir):
system = platform.system()
try:
elfnames = {"Darwin": "DecoderApp", "Linux": "DecoderAppStatic"}
return os.path.join(build_dir, elfnames[system])
except KeyError as err:
raise RuntimeError(f'Unsupported platform "{system}"') from err
class VTM(Codec):
"""VTM: VVC reference software"""
fmt = ".bin"
@property
def description(self):
return "VTM"
@property
def name(self):
return "VTM"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-b",
"--build-dir",
type=str,
required=True,
help="VTM build dir",
)
parser.add_argument(
"-c",
"--config",
type=str,
required=True,
help="VTM config file",
)
parser.add_argument(
"--rgb", action="store_true", help="Use RGB color space (over YCbCr)"
)
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = get_vtm_encoder_path(args.build_dir)
self.decoder_path = get_vtm_decoder_path(args.build_dir)
self.config_path = args.config
self.rgb = args.rgb
return args
def _run_impl(self, img, quality):
if not 0 <= quality <= 63:
raise ValueError(f"Invalid quality value: {quality} (0,63)")
# Taking 8bit input for now
bitdepth = 8
# Convert input image to yuv 444 file
arr = np.asarray(img)
fd, yuv_path = mkstemp(suffix=".yuv")
out_filepath = os.path.splitext(yuv_path)[0] + ".bin"
arr = arr.transpose((2, 0, 1)) # color channel first
if not self.rgb:
# convert rgb content to YCbCr
rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)
with open(yuv_path, "wb") as f:
f.write(arr.tobytes())
# Encode
height, width = arr.shape[1:]
cmd = [
self.encoder_path,
"-i",
yuv_path,
"-c",
self.config_path,
"-q",
quality,
"-o",
"/dev/null",
"-b",
out_filepath,
"-wdt",
width,
"-hgt",
height,
"-fr",
"1",
"-f",
"1",
"--InputChromaFormat=444",
"--InputBitDepth=8",
"--ConformanceMode=1",
]
if self.rgb:
cmd += [
"--InputColourSpaceConvert=RGBtoGBR",
"--SNRInternalColourSpace=1",
"--OutputInternalColourSpace=0",
]
start = time.time()
run_command(cmd)
enc_time = time.time() - start
# cleanup encoder input
os.close(fd)
os.unlink(yuv_path)
# Decode
cmd = [self.decoder_path, "-b", out_filepath, "-o", yuv_path, "-d", 8]
if self.rgb:
cmd.append("--OutputInternalColourSpace=GBRtoRGB")
start = time.time()
run_command(cmd)
dec_time = time.time() - start
# Compute PSNR
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = arr.astype(np.float32) / (2 ** bitdepth - 1)
rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)
if not self.rgb:
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = filesize(out_filepath) * 8.0 / (height * width)
# Cleanup
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {
"bpp": bpp,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
rec = Image.fromarray(
(rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)
)
return out, rec
class HM(Codec):
"""HM: H.265/HEVC reference software"""
fmt = ".bin"
@property
def description(self):
return "HM"
@property
def name(self):
return "HM"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-b",
"--build-dir",
type=str,
required=True,
help="HM build dir",
)
parser.add_argument(
"-c", "--config", type=str, required=True, help="HM config file"
)
parser.add_argument(
"--rgb", action="store_true", help="Use RGB color space (over YCbCr)"
)
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = os.path.join(args.build_dir, "TAppEncoderStatic")
self.decoder_path = os.path.join(args.build_dir, "TAppDecoderStatic")
self.config_path = args.config
self.rgb = args.rgb
return args
def _run_impl(self, img, quality):
if not 0 <= quality <= 51:
raise ValueError(f"Invalid quality value: {quality} (0,51)")
# Convert input image to yuv 444 file
arr = np.asarray(img)
fd, yuv_path = mkstemp(suffix=".yuv")
out_filepath = os.path.splitext(yuv_path)[0] + ".bin"
bitdepth = 8
arr = arr.transpose((2, 0, 1)) # color channel first
if not self.rgb:
# convert rgb content to YCbCr
rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)
with open(yuv_path, "wb") as f:
f.write(arr.tobytes())
# Encode
height, width = arr.shape[1:]
cmd = [
self.encoder_path,
"-i",
yuv_path,
"-c",
self.config_path,
"-q",
quality,
"-o",
"/dev/null",
"-b",
out_filepath,
"-wdt",
width,
"-hgt",
height,
"-fr",
"1",
"-f",
"1",
"--InputChromaFormat=444",
"--InputBitDepth=8",
"--SEIDecodedPictureHash",
"--Level=5.1",
"--CUNoSplitIntraACT=0",
"--ConformanceMode=1",
]
if self.rgb:
cmd += [
"--InputColourSpaceConvert=RGBtoGBR",
"--SNRInternalColourSpace=1",
"--OutputInternalColourSpace=0",
]
start = time.time()
run_command(cmd)
enc_time = time.time() - start
# cleanup encoder input
os.close(fd)
os.unlink(yuv_path)
# Decode
cmd = [self.decoder_path, "-b", out_filepath, "-o", yuv_path, "-d", 8]
if self.rgb:
cmd.append("--OutputInternalColourSpace=GBRtoRGB")
start = time.time()
run_command(cmd)
dec_time = time.time() - start
# Compute PSNR
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = arr.astype(np.float32) / (2 ** bitdepth - 1)
rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)
if not self.rgb:
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = filesize(out_filepath) * 8.0 / (height * width)
# Cleanup
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {
"bpp": bpp,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
rec = Image.fromarray(
(rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)
)
return out, rec
class AV1(Codec):
"""AV1: AOM reference software"""
fmt = ".webm"
@property
def description(self):
return "AV1"
@property
def name(self):
return "AV1"
@classmethod
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument(
"-b",
"--build-dir",
type=str,
required=True,
help="AOM binaries dir",
)
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = os.path.join(args.build_dir, "aomenc")
self.decoder_path = os.path.join(args.build_dir, "aomdec")
return args
def _run_impl(self, img, quality):
if not 0 <= quality <= 63:
raise ValueError(f"Invalid quality value: {quality} (0,63)")
# Convert input image to yuv 444 file
arr = np.asarray(img)
fd, yuv_path = mkstemp(suffix=".yuv")
out_filepath = os.path.splitext(yuv_path)[0] + ".webm"
bitdepth = 8
arr = arr.transpose((2, 0, 1)) # color channel first
# convert rgb content to YCbCr
rgb = torch.from_numpy(arr.copy()).float() / (2 ** bitdepth - 1)
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * (2 ** bitdepth - 1)).astype(np.uint8)
with open(yuv_path, "wb") as f:
f.write(arr.tobytes())
# Encode
height, width = arr.shape[1:]
cmd = [
self.encoder_path,
"-w",
width,
"-h",
height,
"--fps=1/1",
"--limit=1",
"--input-bit-depth=8",
"--cpu-used=0",
"--threads=1",
"--passes=2",
"--end-usage=q",
"--cq-level=" + str(quality),
"--i444",
"--skip=0",
"--tune=psnr",
"--psnr",
"--bit-depth=8",
"-o",
out_filepath,
yuv_path,
]
start = time.time()
run_command(cmd)
enc_time = time.time() - start
# cleanup encoder input
os.close(fd)
os.unlink(yuv_path)
# Decode
cmd = [
self.decoder_path,
out_filepath,
"-o",
yuv_path,
"--rawvideo",
"--output-bit-depth=8",
]
start = time.time()
run_command(cmd)
dec_time = time.time() - start
# Compute PSNR
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = arr.astype(np.float32) / (2 ** bitdepth - 1)
rec_arr = rec_arr.astype(np.float32) / (2 ** bitdepth - 1)
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = filesize(out_filepath) * 8.0 / (height * width)
# Cleanup
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {
"bpp": bpp,
"encoding_time": enc_time,
"decoding_time": dec_time,
}
rec = Image.fromarray(
(rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8)
)
return out, rec
| 24,347 | 26.053333 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/transforms/functional.py | from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
YCBCR_WEIGHTS = {
# Spec: (K_r, K_g, K_b) with K_g = 1 - K_r - K_b
"ITU-R_BT.709": (0.2126, 0.7152, 0.0722)
}
def _check_input_tensor(tensor: Tensor) -> None:
if (
not isinstance(tensor, Tensor)
or not tensor.is_floating_point()
or not len(tensor.size()) in (3, 4)
or not tensor.size(-3) == 3
):
raise ValueError(
"Expected a 3D or 4D tensor with shape (Nx3xHxW) or (3xHxW) as input"
)
def rgb2ycbcr(rgb: Tensor) -> Tensor:
"""RGB to YCbCr conversion for torch Tensor.
Using ITU-R BT.709 coefficients.
Args:
rgb (torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
ycbcr (torch.Tensor): converted tensor
"""
_check_input_tensor(rgb)
r, g, b = rgb.chunk(3, -3)
Kr, Kg, Kb = YCBCR_WEIGHTS["ITU-R_BT.709"]
y = Kr * r + Kg * g + Kb * b
cb = 0.5 * (b - y) / (1 - Kb) + 0.5
cr = 0.5 * (r - y) / (1 - Kr) + 0.5
ycbcr = torch.cat((y, cb, cr), dim=-3)
return ycbcr
def ycbcr2rgb(ycbcr: Tensor) -> Tensor:
"""YCbCr to RGB conversion for torch Tensor.
Using ITU-R BT.709 coefficients.
Args:
ycbcr (torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
rgb (torch.Tensor): converted tensor
"""
_check_input_tensor(ycbcr)
y, cb, cr = ycbcr.chunk(3, -3)
Kr, Kg, Kb = YCBCR_WEIGHTS["ITU-R_BT.709"]
r = y + (2 - 2 * Kr) * (cr - 0.5)
b = y + (2 - 2 * Kb) * (cb - 0.5)
g = (y - Kr * r - Kb * b) / Kg
rgb = torch.cat((r, g, b), dim=-3)
return rgb
def yuv_444_to_420(
yuv: Union[Tensor, Tuple[Tensor, Tensor, Tensor]],
mode: str = "avg_pool",
) -> Tuple[Tensor, Tensor, Tensor]:
"""Convert a 444 tensor to a 420 representation.
Args:
yuv (torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)): 444
input to be downsampled. Takes either a (Nx3xHxW) tensor or a tuple
of 3 (Nx1xHxW) tensors.
mode (str): algorithm used for downsampling: ``'avg_pool'``. Default
``'avg_pool'``
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor): Converted 420
"""
if mode not in ("avg_pool",):
raise ValueError(f'Invalid downsampling mode "{mode}".')
if mode == "avg_pool":
def _downsample(tensor):
return F.avg_pool2d(tensor, kernel_size=2, stride=2)
if isinstance(yuv, torch.Tensor):
y, u, v = yuv.chunk(3, 1)
else:
y, u, v = yuv
return (y, _downsample(u), _downsample(v))
def yuv_420_to_444(
yuv: Tuple[Tensor, Tensor, Tensor],
mode: str = "bilinear",
return_tuple: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor]]:
"""Convert a 420 input to a 444 representation.
Args:
yuv (torch.Tensor, torch.Tensor, torch.Tensor): 420 input frames in
(Nx1xHxW) format
mode (str): algorithm used for upsampling: ``'bilinear'`` |
``'nearest'`` Default ``'bilinear'``
return_tuple (bool): return input as tuple of tensors instead of a
concatenated tensor, 3 (Nx1xHxW) tensors instead of one (Nx3xHxW)
tensor (default: False)
Returns:
(torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)): Converted
444
"""
if len(yuv) != 3 or any(not isinstance(c, torch.Tensor) for c in yuv):
raise ValueError("Expected a tuple of 3 torch tensors")
if mode not in ("bilinear", "nearest"):
raise ValueError(f'Invalid upsampling mode "{mode}".')
if mode in ("bilinear", "nearest"):
def _upsample(tensor):
return F.interpolate(tensor, scale_factor=2, mode=mode, align_corners=False)
y, u, v = yuv
u, v = _upsample(u), _upsample(v)
if return_tuple:
return y, u, v
return torch.cat((y, u, v), dim=1)
| 3,953 | 28.073529 | 88 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/transforms/transforms.py | from . import functional as F_transforms
__all__ = [
"RGB2YCbCr",
"YCbCr2RGB",
"YUV444To420",
"YUV420To444",
]
class RGB2YCbCr:
"""Convert a RGB tensor to YCbCr.
The tensor is expected to be in the [0, 1] floating point range, with a
shape of (3xHxW) or (Nx3xHxW).
"""
def __call__(self, rgb):
"""
Args:
rgb (torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
ycbcr(torch.Tensor): converted tensor
"""
return F_transforms.rgb2ycbcr(rgb)
def __repr__(self):
return f"{self.__class__.__name__}()"
class YCbCr2RGB:
"""Convert a YCbCr tensor to RGB.
The tensor is expected to be in the [0, 1] floating point range, with a
shape of (3xHxW) or (Nx3xHxW).
"""
def __call__(self, ycbcr):
"""
Args:
ycbcr(torch.Tensor): 3D or 4D floating point RGB tensor
Returns:
rgb(torch.Tensor): converted tensor
"""
return F_transforms.ycbcr2rgb(ycbcr)
def __repr__(self):
return f"{self.__class__.__name__}()"
class YUV444To420:
"""Convert a YUV 444 tensor to a 420 representation.
Args:
mode (str): algorithm used for downsampling: ``'avg_pool'``. Default
``'avg_pool'``
Example:
>>> x = torch.rand(1, 3, 32, 32)
>>> y, u, v = YUV444To420()(x)
>>> y.size() # 1, 1, 32, 32
>>> u.size() # 1, 1, 16, 16
"""
def __init__(self, mode: str = "avg_pool"):
self.mode = str(mode)
def __call__(self, yuv):
"""
Args:
yuv (torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)):
444 input to be downsampled. Takes either a (Nx3xHxW) tensor or
a tuple of 3 (Nx1xHxW) tensors.
Returns:
(torch.Tensor, torch.Tensor, torch.Tensor): Converted 420
"""
return F_transforms.yuv_444_to_420(yuv, mode=self.mode)
def __repr__(self):
return f"{self.__class__.__name__}()"
class YUV420To444:
"""Convert a YUV 420 input to a 444 representation.
Args:
mode (str): algorithm used for upsampling: ``'bilinear'`` | ``'nearest'``.
Default ``'bilinear'``
return_tuple (bool): return input as tuple of tensors instead of a
concatenated tensor, 3 (Nx1xHxW) tensors instead of one (Nx3xHxW)
tensor (default: False)
Example:
>>> y = torch.rand(1, 1, 32, 32)
>>> u, v = torch.rand(1, 1, 16, 16), torch.rand(1, 1, 16, 16)
>>> x = YUV420To444()((y, u, v))
>>> x.size() # 1, 3, 32, 32
"""
def __init__(self, mode: str = "bilinear", return_tuple: bool = False):
self.mode = str(mode)
self.return_tuple = bool(return_tuple)
def __call__(self, yuv):
"""
Args:
yuv (torch.Tensor, torch.Tensor, torch.Tensor): 420 input frames in
(Nx1xHxW) format
Returns:
(torch.Tensor or (torch.Tensor, torch.Tensor, torch.Tensor)): Converted
444
"""
return F_transforms.yuv_420_to_444(yuv, return_tuple=self.return_tuple)
def __repr__(self):
return f"{self.__class__.__name__}(return_tuple={self.return_tuple})"
| 3,308 | 26.806723 | 83 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/entropy_models/entropy_models.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import scipy.stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from compressai._CXX import pmf_to_quantized_cdf as _pmf_to_quantized_cdf
from compressai.ops import LowerBound
class _EntropyCoder:
"""Proxy class to an actual entropy coder class."""
def __init__(self, method):
if not isinstance(method, str):
raise ValueError(f'Invalid method type "{type(method)}"')
from compressai import available_entropy_coders
if method not in available_entropy_coders():
methods = ", ".join(available_entropy_coders())
raise ValueError(
f'Unknown entropy coder "{method}"' f" (available: {methods})"
)
if method == "ans":
from compressai import ans
encoder = ans.RansEncoder()
decoder = ans.RansDecoder()
elif method == "rangecoder":
import range_coder
encoder = range_coder.RangeEncoder()
decoder = range_coder.RangeDecoder()
self.name = method
self._encoder = encoder
self._decoder = decoder
def encode_with_indexes(self, *args, **kwargs):
return self._encoder.encode_with_indexes(*args, **kwargs)
def decode_with_indexes(self, *args, **kwargs):
return self._decoder.decode_with_indexes(*args, **kwargs)
def default_entropy_coder():
from compressai import get_entropy_coder
return get_entropy_coder()
def pmf_to_quantized_cdf(pmf: Tensor, precision: int = 16) -> Tensor:
cdf = _pmf_to_quantized_cdf(pmf.tolist(), precision)
cdf = torch.IntTensor(cdf)
return cdf
def _forward(self, *args: Any) -> Any:
raise NotImplementedError()
class EntropyModel(nn.Module):
r"""Entropy model base class.
Args:
likelihood_bound (float): minimum likelihood bound
entropy_coder (str, optional): set the entropy coder to use, use default
one if None
entropy_coder_precision (int): set the entropy coder precision
"""
def __init__(
self,
likelihood_bound: float = 1e-9,
entropy_coder: Optional[str] = None,
entropy_coder_precision: int = 16,
):
super().__init__()
if entropy_coder is None:
entropy_coder = default_entropy_coder()
self.entropy_coder = _EntropyCoder(entropy_coder)
self.entropy_coder_precision = int(entropy_coder_precision)
self.use_likelihood_bound = likelihood_bound > 0
if self.use_likelihood_bound:
self.likelihood_lower_bound = LowerBound(likelihood_bound)
# to be filled on update()
self.register_buffer("_offset", torch.IntTensor())
self.register_buffer("_quantized_cdf", torch.IntTensor())
self.register_buffer("_cdf_length", torch.IntTensor())
def __getstate__(self):
attributes = self.__dict__.copy()
attributes["entropy_coder"] = self.entropy_coder.name
return attributes
def __setstate__(self, state):
self.__dict__ = state
self.entropy_coder = _EntropyCoder(self.__dict__.pop("entropy_coder"))
@property
def offset(self):
return self._offset
@property
def quantized_cdf(self):
return self._quantized_cdf
@property
def cdf_length(self):
return self._cdf_length
# See: https://github.com/python/mypy/issues/8795
forward: Callable[..., Any] = _forward
def quantize(
self, inputs: Tensor, mode: str, means: Optional[Tensor] = None
) -> Tensor:
if mode not in ("noise", "dequantize", "symbols"):
raise ValueError(f'Invalid quantization mode: "{mode}"')
if mode == "noise":
half = float(0.5)
noise = torch.empty_like(inputs).uniform_(-half, half)
inputs = inputs + noise
return inputs
outputs = inputs.clone()
if means is not None:
outputs -= means
outputs = torch.round(outputs)
if mode == "dequantize":
if means is not None:
outputs += means
return outputs
assert mode == "symbols", mode
outputs = outputs.int()
return outputs
def _quantize(
self, inputs: Tensor, mode: str, means: Optional[Tensor] = None
) -> Tensor:
warnings.warn("_quantize is deprecated. Use quantize instead.")
return self.quantize(inputs, mode, means)
@staticmethod
def dequantize(
inputs: Tensor, means: Optional[Tensor] = None, dtype: torch.dtype = torch.float
) -> Tensor:
if means is not None:
outputs = inputs.type_as(means)
outputs += means
else:
outputs = inputs.type(dtype)
return outputs
@classmethod
def _dequantize(cls, inputs: Tensor, means: Optional[Tensor] = None) -> Tensor:
warnings.warn("_dequantize. Use dequantize instead.")
return cls.dequantize(inputs, means)
def _pmf_to_cdf(self, pmf, tail_mass, pmf_length, max_length):
cdf = torch.zeros(
(len(pmf_length), max_length + 2), dtype=torch.int32, device=pmf.device
)
for i, p in enumerate(pmf):
prob = torch.cat((p[: pmf_length[i]], tail_mass[i]), dim=0)
_cdf = pmf_to_quantized_cdf(prob, self.entropy_coder_precision)
cdf[i, : _cdf.size(0)] = _cdf
return cdf
def _check_cdf_size(self):
if self._quantized_cdf.numel() == 0:
raise ValueError("Uninitialized CDFs. Run update() first")
if len(self._quantized_cdf.size()) != 2:
raise ValueError(f"Invalid CDF size {self._quantized_cdf.size()}")
def _check_offsets_size(self):
if self._offset.numel() == 0:
raise ValueError("Uninitialized offsets. Run update() first")
if len(self._offset.size()) != 1:
raise ValueError(f"Invalid offsets size {self._offset.size()}")
def _check_cdf_length(self):
if self._cdf_length.numel() == 0:
raise ValueError("Uninitialized CDF lengths. Run update() first")
if len(self._cdf_length.size()) != 1:
raise ValueError(f"Invalid offsets size {self._cdf_length.size()}")
def compress(self, inputs, indexes, means=None):
"""
Compress input tensors to char strings.
Args:
inputs (torch.Tensor): input tensors
indexes (torch.IntTensor): tensors CDF indexes
means (torch.Tensor, optional): optional tensor means
"""
symbols = self.quantize(inputs, "symbols", means)
if len(inputs.size()) < 2:
raise ValueError(
"Invalid `inputs` size. Expected a tensor with at least 2 dimensions."
)
if inputs.size() != indexes.size():
raise ValueError("`inputs` and `indexes` should have the same size.")
self._check_cdf_size()
self._check_cdf_length()
self._check_offsets_size()
strings = []
for i in range(symbols.size(0)):
rv = self.entropy_coder.encode_with_indexes(
symbols[i].reshape(-1).int().tolist(),
indexes[i].reshape(-1).int().tolist(),
self._quantized_cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
strings.append(rv)
return strings
def decompress(
self,
strings: str,
indexes: torch.IntTensor,
dtype: torch.dtype = torch.float,
means: torch.Tensor = None,
):
"""
Decompress char strings to tensors.
Args:
strings (str): compressed tensors
indexes (torch.IntTensor): tensors CDF indexes
dtype (torch.dtype): type of dequantized output
means (torch.Tensor, optional): optional tensor means
"""
if not isinstance(strings, (tuple, list)):
raise ValueError("Invalid `strings` parameter type.")
if not len(strings) == indexes.size(0):
raise ValueError("Invalid strings or indexes parameters")
if len(indexes.size()) < 2:
raise ValueError(
"Invalid `indexes` size. Expected a tensor with at least 2 dimensions."
)
self._check_cdf_size()
self._check_cdf_length()
self._check_offsets_size()
if means is not None:
if means.size()[:2] != indexes.size()[:2]:
raise ValueError("Invalid means or indexes parameters")
if means.size() != indexes.size():
for i in range(2, len(indexes.size())):
if means.size(i) != 1:
raise ValueError("Invalid means parameters")
cdf = self._quantized_cdf
outputs = cdf.new_empty(indexes.size())
for i, s in enumerate(strings):
values = self.entropy_coder.decode_with_indexes(
s,
indexes[i].reshape(-1).int().tolist(),
cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
outputs[i] = torch.tensor(
values, device=outputs.device, dtype=outputs.dtype
).reshape(outputs[i].size())
outputs = self.dequantize(outputs, means, dtype)
return outputs
class EntropyBottleneck(EntropyModel):
r"""Entropy bottleneck layer, introduced by J. Ballé, D. Minnen, S. Singh,
S. J. Hwang, N. Johnston, in `"Variational image compression with a scale
hyperprior" <https://arxiv.org/abs/1802.01436>`_.
This is a re-implementation of the entropy bottleneck layer in
*tensorflow/compression*. See the original paper and the `tensorflow
documentation
<https://tensorflow.github.io/compression/docs/entropy_bottleneck.html>`__
for an introduction.
"""
_offset: Tensor
def __init__(
self,
channels: int,
*args: Any,
tail_mass: float = 1e-9,
init_scale: float = 10,
filters: Tuple[int, ...] = (3, 3, 3, 3),
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.channels = int(channels)
self.filters = tuple(int(f) for f in filters)
self.init_scale = float(init_scale)
self.tail_mass = float(tail_mass)
# Create parameters
filters = (1,) + self.filters + (1,)
scale = self.init_scale ** (1 / (len(self.filters) + 1))
channels = self.channels
for i in range(len(self.filters) + 1):
init = np.log(np.expm1(1 / scale / filters[i + 1]))
matrix = torch.Tensor(channels, filters[i + 1], filters[i])
matrix.data.fill_(init)
self.register_parameter(f"_matrix{i:d}", nn.Parameter(matrix))
bias = torch.Tensor(channels, filters[i + 1], 1)
nn.init.uniform_(bias, -0.5, 0.5)
self.register_parameter(f"_bias{i:d}", nn.Parameter(bias))
if i < len(self.filters):
factor = torch.Tensor(channels, filters[i + 1], 1)
nn.init.zeros_(factor)
self.register_parameter(f"_factor{i:d}", nn.Parameter(factor))
self.quantiles = nn.Parameter(torch.Tensor(channels, 1, 3))
init = torch.Tensor([-self.init_scale, 0, self.init_scale])
self.quantiles.data = init.repeat(self.quantiles.size(0), 1, 1)
target = np.log(2 / self.tail_mass - 1)
self.register_buffer("target", torch.Tensor([-target, 0, target]))
def _get_medians(self) -> Tensor:
medians = self.quantiles[:, :, 1:2]
return medians
def update(self, force: bool = False) -> bool:
# Check if we need to update the bottleneck parameters, the offsets are
# only computed and stored when the conditonal model is update()'d.
if self._offset.numel() > 0 and not force:
return False
medians = self.quantiles[:, 0, 1]
minima = medians - self.quantiles[:, 0, 0]
minima = torch.ceil(minima).int()
minima = torch.clamp(minima, min=0)
maxima = self.quantiles[:, 0, 2] - medians
maxima = torch.ceil(maxima).int()
maxima = torch.clamp(maxima, min=0)
self._offset = -minima
pmf_start = medians - minima
pmf_length = maxima + minima + 1
max_length = pmf_length.max().item()
device = pmf_start.device
samples = torch.arange(max_length, device=device)
samples = samples[None, :] + pmf_start[:, None, None]
half = float(0.5)
lower = self._logits_cumulative(samples - half, stop_gradient=True)
upper = self._logits_cumulative(samples + half, stop_gradient=True)
sign = -torch.sign(lower + upper)
pmf = torch.abs(torch.sigmoid(sign * upper) - torch.sigmoid(sign * lower))
pmf = pmf[:, 0, :]
tail_mass = torch.sigmoid(lower[:, 0, :1]) + torch.sigmoid(-upper[:, 0, -1:])
quantized_cdf = self._pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)
self._quantized_cdf = quantized_cdf
self._cdf_length = pmf_length + 2
return True
def loss(self) -> Tensor:
logits = self._logits_cumulative(self.quantiles, stop_gradient=True)
loss = torch.abs(logits - self.target).sum()
return loss
def _logits_cumulative(self, inputs: Tensor, stop_gradient: bool) -> Tensor:
# TorchScript not yet working (nn.Mmodule indexing not supported)
logits = inputs
for i in range(len(self.filters) + 1):
matrix = getattr(self, f"_matrix{i:d}")
if stop_gradient:
matrix = matrix.detach()
logits = torch.matmul(F.softplus(matrix), logits)
bias = getattr(self, f"_bias{i:d}")
if stop_gradient:
bias = bias.detach()
logits += bias
if i < len(self.filters):
factor = getattr(self, f"_factor{i:d}")
if stop_gradient:
factor = factor.detach()
logits += torch.tanh(factor) * torch.tanh(logits)
return logits
@torch.jit.unused
def _likelihood(self, inputs: Tensor) -> Tensor:
half = float(0.5)
v0 = inputs - half
v1 = inputs + half
lower = self._logits_cumulative(v0, stop_gradient=False)
upper = self._logits_cumulative(v1, stop_gradient=False)
sign = -torch.sign(lower + upper)
sign = sign.detach()
likelihood = torch.abs(
torch.sigmoid(sign * upper) - torch.sigmoid(sign * lower)
)
return likelihood
def forward(
self, x: Tensor, training: Optional[bool] = None
) -> Tuple[Tensor, Tensor]:
if training is None:
training = self.training
if not torch.jit.is_scripting():
# x from B x C x ... to C x B x ...
perm = np.arange(len(x.shape))
perm[0], perm[1] = perm[1], perm[0]
# Compute inverse permutation
inv_perm = np.arange(len(x.shape))[np.argsort(perm)]
else:
# TorchScript in 2D for static inference
# Convert to (channels, ... , batch) format
perm = (1, 2, 3, 0)
inv_perm = (3, 0, 1, 2)
x = x.permute(*perm).contiguous()
shape = x.size()
values = x.reshape(x.size(0), 1, -1)
# Add noise or quantize
outputs = self.quantize(
values, "noise" if training else "dequantize", self._get_medians()
)
if not torch.jit.is_scripting():
likelihood = self._likelihood(outputs)
if self.use_likelihood_bound:
likelihood = self.likelihood_lower_bound(likelihood)
else:
# TorchScript not yet supported
likelihood = torch.zeros_like(outputs)
# Convert back to input tensor shape
outputs = outputs.reshape(shape)
outputs = outputs.permute(*inv_perm).contiguous()
likelihood = likelihood.reshape(shape)
likelihood = likelihood.permute(*inv_perm).contiguous()
return outputs, likelihood
@staticmethod
def _build_indexes(size):
dims = len(size)
N = size[0]
C = size[1]
view_dims = np.ones((dims,), dtype=np.int64)
view_dims[1] = -1
indexes = torch.arange(C).view(*view_dims)
indexes = indexes.int()
return indexes.repeat(N, 1, *size[2:])
@staticmethod
def _extend_ndims(tensor, n):
return tensor.reshape(-1, *([1] * n)) if n > 0 else tensor.reshape(-1)
def compress(self, x):
indexes = self._build_indexes(x.size())
medians = self._get_medians().detach()
spatial_dims = len(x.size()) - 2
medians = self._extend_ndims(medians, spatial_dims)
medians = medians.expand(x.size(0), *([-1] * (spatial_dims + 1)))
return super().compress(x, indexes, medians)
def decompress(self, strings, size):
output_size = (len(strings), self._quantized_cdf.size(0), *size)
indexes = self._build_indexes(output_size).to(self._quantized_cdf.device)
medians = self._extend_ndims(self._get_medians().detach(), len(size))
medians = medians.expand(len(strings), *([-1] * (len(size) + 1)))
return super().decompress(strings, indexes, medians.dtype, medians)
class GaussianConditional(EntropyModel):
r"""Gaussian conditional layer, introduced by J. Ballé, D. Minnen, S. Singh,
S. J. Hwang, N. Johnston, in `"Variational image compression with a scale
hyperprior" <https://arxiv.org/abs/1802.01436>`_.
This is a re-implementation of the Gaussian conditional layer in
*tensorflow/compression*. See the `tensorflow documentation
<https://tensorflow.github.io/compression/docs/api_docs/python/tfc/GaussianConditional.html>`__
for more information.
"""
def __init__(
self,
scale_table: Optional[Union[List, Tuple]],
*args: Any,
scale_bound: float = 0.11,
tail_mass: float = 1e-9,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
if not isinstance(scale_table, (type(None), list, tuple)):
raise ValueError(f'Invalid type for scale_table "{type(scale_table)}"')
if isinstance(scale_table, (list, tuple)) and len(scale_table) < 1:
raise ValueError(f'Invalid scale_table length "{len(scale_table)}"')
if scale_table and (
scale_table != sorted(scale_table) or any(s <= 0 for s in scale_table)
):
raise ValueError(f'Invalid scale_table "({scale_table})"')
self.tail_mass = float(tail_mass)
if scale_bound is None and scale_table:
scale_bound = self.scale_table[0]
if scale_bound <= 0:
raise ValueError("Invalid parameters")
self.lower_bound_scale = LowerBound(scale_bound)
self.register_buffer(
"scale_table",
self._prepare_scale_table(scale_table) if scale_table else torch.Tensor(),
)
self.register_buffer(
"scale_bound",
torch.Tensor([float(scale_bound)]) if scale_bound is not None else None,
)
@staticmethod
def _prepare_scale_table(scale_table):
return torch.Tensor(tuple(float(s) for s in scale_table))
def _standardized_cumulative(self, inputs: Tensor) -> Tensor:
half = float(0.5)
const = float(-(2 ** -0.5))
# Using the complementary error function maximizes numerical precision.
return half * torch.erfc(const * inputs)
@staticmethod
def _standardized_quantile(quantile):
return scipy.stats.norm.ppf(quantile)
def update_scale_table(self, scale_table, force=False):
# Check if we need to update the gaussian conditional parameters, the
# offsets are only computed and stored when the conditonal model is
# updated.
if self._offset.numel() > 0 and not force:
return False
device = self.scale_table.device
self.scale_table = self._prepare_scale_table(scale_table).to(device)
self.update()
return True
def update(self):
multiplier = -self._standardized_quantile(self.tail_mass / 2)
pmf_center = torch.ceil(self.scale_table * multiplier).int()
pmf_length = 2 * pmf_center + 1
max_length = torch.max(pmf_length).item()
device = pmf_center.device
samples = torch.abs(
torch.arange(max_length, device=device).int() - pmf_center[:, None]
)
samples_scale = self.scale_table.unsqueeze(1)
samples = samples.float()
samples_scale = samples_scale.float()
upper = self._standardized_cumulative((0.5 - samples) / samples_scale)
lower = self._standardized_cumulative((-0.5 - samples) / samples_scale)
pmf = upper - lower
tail_mass = 2 * lower[:, :1]
quantized_cdf = torch.Tensor(len(pmf_length), max_length + 2)
quantized_cdf = self._pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)
self._quantized_cdf = quantized_cdf
self._offset = -pmf_center
self._cdf_length = pmf_length + 2
def _likelihood(
self, inputs: Tensor, scales: Tensor, means: Optional[Tensor] = None
) -> Tensor:
half = float(0.5)
if means is not None:
values = inputs - means
else:
values = inputs
scales = self.lower_bound_scale(scales)
values = torch.abs(values)
upper = self._standardized_cumulative((half - values) / scales)
lower = self._standardized_cumulative((-half - values) / scales)
likelihood = upper - lower
return likelihood
def forward(
self,
inputs: Tensor,
scales: Tensor,
means: Optional[Tensor] = None,
training: Optional[bool] = None,
) -> Tuple[Tensor, Tensor]:
if training is None:
training = self.training
outputs = self.quantize(inputs, "noise" if training else "dequantize", means)
likelihood = self._likelihood(outputs, scales, means)
if self.use_likelihood_bound:
likelihood = self.likelihood_lower_bound(likelihood)
return outputs, likelihood
def build_indexes(self, scales: Tensor) -> Tensor:
scales = self.lower_bound_scale(scales)
indexes = scales.new_full(scales.size(), len(self.scale_table) - 1).int()
for s in self.scale_table[:-1]:
indexes -= (scales <= s).int()
return indexes
| 24,657 | 34.840116 | 99 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/parametrizers.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
from torch import Tensor
from .bound_ops import LowerBound
class NonNegativeParametrizer(nn.Module):
"""
Non negative reparametrization.
Used for stability during training.
"""
pedestal: Tensor
def __init__(self, minimum: float = 0, reparam_offset: float = 2 ** -18):
super().__init__()
self.minimum = float(minimum)
self.reparam_offset = float(reparam_offset)
pedestal = self.reparam_offset ** 2
self.register_buffer("pedestal", torch.Tensor([pedestal]))
bound = (self.minimum + self.reparam_offset ** 2) ** 0.5
self.lower_bound = LowerBound(bound)
def init(self, x: Tensor) -> Tensor:
return torch.sqrt(torch.max(x + self.pedestal, self.pedestal))
def forward(self, x: Tensor) -> Tensor:
out = self.lower_bound(x)
out = out ** 2 - self.pedestal
return out
| 2,642 | 39.661538 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/bound_ops.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
from torch import Tensor
def lower_bound_fwd(x: Tensor, bound: Tensor) -> Tensor:
return torch.max(x, bound)
def lower_bound_bwd(x: Tensor, bound: Tensor, grad_output: Tensor):
pass_through_if = (x >= bound) | (grad_output < 0)
return pass_through_if * grad_output, None
class LowerBoundFunction(torch.autograd.Function):
"""Autograd function for the `LowerBound` operator."""
@staticmethod
def forward(ctx, x, bound):
ctx.save_for_backward(x, bound)
return lower_bound_fwd(x, bound)
@staticmethod
def backward(ctx, grad_output):
x, bound = ctx.saved_tensors
return lower_bound_bwd(x, bound, grad_output)
class LowerBound(nn.Module):
"""Lower bound operator, computes `torch.max(x, bound)` with a custom
gradient.
The derivative is replaced by the identity function when `x` is moved
towards the `bound`, otherwise the gradient is kept to zero.
"""
bound: Tensor
def __init__(self, bound: float):
super().__init__()
self.register_buffer("bound", torch.Tensor([float(bound)]))
@torch.jit.unused
def lower_bound(self, x):
return LowerBoundFunction.apply(x, self.bound)
def forward(self, x):
if torch.jit.is_scripting():
return torch.max(x, self.bound)
return self.lower_bound(x)
| 3,102 | 37.308642 | 78 | py |
DenoiseCompression | DenoiseCompression-main/CompressAI/compressai/ops/ops.py | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from torch import Tensor
def ste_round(x: Tensor) -> Tensor:
"""
Rounding with non-zero gradients. Gradients are approximated by replacing
the derivative by the identity function.
Used in `"Lossy Image Compression with Compressive Autoencoders"
<https://arxiv.org/abs/1703.00395>`_
.. note::
Implemented with the pytorch `detach()` reparametrization trick:
`x_round = x_round - x.detach() + x`
"""
return torch.round(x) - x.detach() + x
| 2,223 | 43.48 | 78 | py |
CaBERT-SLU | CaBERT-SLU-main/baseline_midsf.py | """For model training and inference
Data input should be a single sentence.
"""
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam, RMSprop
from transformers import BertTokenizer, BertModel, BertConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pickle
import copy
import numpy as np
import collections
from tqdm import tqdm
from collections import Counter, defaultdict
from model import MULTI
from all_data_slot import get_dataloader
from config import opt
from utils import *
def train(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path_with_tokens)
print('Data Type: ', opt.datatype)
print('Use pretrained weights: ', opt.retrain)
# dataset
with open(opt.dic_path_with_tokens, 'rb') as f:
dic = pickle.load(f)
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
if opt.datatype == "mixatis" or opt.datatype == "mixsnips":
# ATIS Dataset
X_train, y_train, _ = zip(*train_data)
X_test, y_test, _ = zip(*test_data)
elif opt.datatype == "semantic":
# Semantic parsing Dataset
X, y = zip(*train_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
elif opt.datatype == "e2e" or opt.datatype == "sgd":
# Microsoft Dialogue Dataset / SGD Dataset
all_data = []
dialogue_id = {}
dialogue_counter = 0
counter = 0
for data in train_data:
for instance in data:
all_data.append(instance)
dialogue_id[counter] = dialogue_counter
counter += 1
dialogue_counter += 1
indices = np.random.permutation(len(all_data))
train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000]
test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100]
train_loader = get_dataloader(train, len(dic), len(slot_dic), opt)
val_loader = get_dataloader(test, len(dic), len(slot_dic), opt)
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = MULTI(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
else:
print("Train from scratch...")
model = model.to(device)
# optimizer, criterion
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'gamma', 'beta']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
# 'weight_decay_rate': 0.01},
# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
# 'weight_decay_rate': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,lr=opt.learning_rate_bert, warmup=.1)
optimizer = Adam(model.parameters(), weight_decay=0.01, lr=opt.learning_rate_classifier)
if opt.data_mode == 'single':
criterion = nn.CrossEntropyLoss().to(device)
else:
criterion = nn.BCEWithLogitsLoss(reduction='sum').to(device)
criterion2 = nn.CrossEntropyLoss(reduction='sum').to(device)
best_loss = 100
best_accuracy = 0
best_f1 = 0
# Start training
for epoch in range(opt.epochs):
print("====== epoch %d / %d: ======"% (epoch+1, opt.epochs))
# Training Phase
total_train_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.train()
ccounter = 0
for (captions_t, masks, labels, slot_labels) in tqdm(train_loader):
captions_t = captions_t.to(device)
masks = masks.to(device)
labels = labels.to(device)
slot_labels = slot_labels.to(device)
slot_labels = slot_labels.reshape(-1)
optimizer.zero_grad()
encoder_logits, decoder_logits, slot_logits = model(captions_t)
train_loss = criterion(encoder_logits, labels)
decoder_logits = decoder_logits.view(-1, len(dic))
slabels = labels.unsqueeze(1)
slabels = slabels.repeat(1, opt.maxlen, 1)
slabels = slabels.view(-1, len(dic))
train_loss += criterion(decoder_logits, slabels)
train_loss += criterion2(slot_logits, slot_labels)
train_loss.backward()
optimizer.step()
total_train_loss += train_loss
P, R, F1, acc = f1_score_intents(encoder_logits, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
print('Average train loss: {:.4f} '.format(total_train_loss / train_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/train_loader.dataset.num_data)
# Validation Phase
total_val_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.eval()
ccounter = 0
stats = defaultdict(Counter)
for (captions_t, masks, labels, slot_labels) in val_loader:
captions_t = captions_t.to(device)
masks = masks.to(device)
labels = labels.to(device)
slot_labels = slot_labels.to(device)
slot_labels = slot_labels.reshape(-1)
with torch.no_grad():
encoder_logits, decoder_logits, slot_logits = model(captions_t)
val_loss = criterion(encoder_logits, labels)
decoder_logits = decoder_logits.view(-1, len(dic))
slabels = labels.unsqueeze(1)
slabels = slabels.repeat(1, opt.maxlen, 1)
slabels = slabels.view(-1, len(dic))
val_loss += criterion(decoder_logits, slabels)
total_val_loss += val_loss
P, R, F1, acc = f1_score_intents(encoder_logits, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
_, index = torch.topk(slot_logits, k=1, dim=-1)
evaluate_iob(index, slot_labels, slot_dic, stats)
print('========= Validation =========')
print('Average val loss: {:.4f} '.format(total_val_loss / val_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/val_loader.dataset.num_data)
val_acc = total_acc/val_loader.dataset.num_data
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print('========= Slot =========')
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
# for label in stats:
# if label != 'total':
# p, r, f1 = prf(stats[label])
# print(f'{label:4s}: P = {p:.4f}, R = {r:.4f}, F1 = {f1:.4f}')
if f1 > best_f1:
print('saving with loss of {}'.format(total_val_loss),
'improved over previous {}'.format(best_loss))
best_loss = total_val_loss
best_accuracy = val_acc
best_f1 = f1
best_stats = copy.deepcopy(stats)
torch.save(model.state_dict(), 'checkpoints/best_{}_{}_baseline.pth'.format(opt.datatype, opt.data_mode))
print()
print('Best total val loss: {:.4f}'.format(total_val_loss))
print('Best Test Accuracy: {:.4f}'.format(best_accuracy))
print('Best F1 Score: {:.4f}'.format(best_f1))
p_slot, r_slot, f1_slot = prf(best_stats['total'])
print('Final evaluation on slot filling of the validation set:')
print(f'Overall: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
#####################################################################
def test(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path)
# dataset
with open(opt.dic_path, 'rb') as f:
dic = pickle.load(f)
reverse_dic = {v: k for k,v in dic.items()}
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
if opt.test_path:
with open(opt.test_path, 'rb') as f:
test_data = pickle.load(f)
if opt.datatype == "atis":
# ATIS Dataset
X_train, y_train, _ = zip(*train_data)
X_test, y_test, _ = zip(*test_data)
elif opt.datatype == "semantic":
# Semantic parsing Dataset
X, y = zip(*train_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
elif opt.datatype == "e2e" or opt.datatype == "sgd":
# Microsoft Dialogue Dataset / SGD Dataset
all_data = []
dialogue_id = {}
dialogue_counter = 0
counter = 0
for data in train_data:
for instance in data:
all_data.append(instance)
dialogue_id[counter] = dialogue_counter
counter += 1
dialogue_counter += 1
indices = np.random.permutation(len(all_data))
X_train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000]
X_test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100]
X_train, mask_train = load_data(X_train)
X_test, mask_test = load_data(X_test)
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = MULTI(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
model = model.to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Store embeddings
if opt.test_mode == "embedding":
train_loader = get_dataloader(X_train, y_train, mask_train, opt)
results = collections.defaultdict(list)
model.eval()
for i, (captions_t, labels, masks) in enumerate(train_loader):
captions_t = captions_t.to(device)
labels = labels.to(device)
masks = masks.to(device)
with torch.no_grad():
hidden_states, pooled_output, outputs = model(captions_t, masks)
print("Saving Data: %d" % i)
for ii in range(len(labels)):
key = labels[ii].data.cpu().item()
embedding = pooled_output[ii].data.cpu().numpy().reshape(-1)
word_embeddings = hidden_states[-1][ii].data.cpu().numpy()
tokens = tokenizer.convert_ids_to_tokens(captions_t[ii].data.cpu().numpy())
tokens = [token for token in tokens if token != "[CLS]" and token != "[SEP]" and token != "[PAD]"]
original_sentence = " ".join(tokens)
results[key].append((original_sentence, embedding, word_embeddings))
torch.save(results, embedding_path)
# Run test classification
elif opt.test_mode == "data":
# Single instance
# index = np.random.randint(0, len(X_test), 1)[0]
# input_ids = X_test[index]
# attention_masks = mask_test[index]
# print(" ".join(tokenizer.convert_ids_to_tokens(input_ids)))
# captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device)
# mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device)
# with torch.no_grad():
# pooled_output, outputs = model(captions_t, mask)
# print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()])
# print("Real label: ", reverse_dic[y_test[index]])
# Validation Phase
test_loader = get_dataloader(X_test, y_test, mask_test, len(dic), opt)
error_ids = []
pred_labels = []
real_labels = []
test_corrects = 0
totals = 0
model.eval()
for i, (captions_t, labels, masks) in enumerate(test_loader):
print('predict batches: ', i)
captions_t = captions_t.to(device)
labels = labels.to(device)
masks = masks.to(device)
with torch.no_grad():
_, pooled_output, outputs = model(captions_t, masks)
co, to = calc_score(outputs, labels)
test_corrects += co
totals += to
if opt.data_mode == 'single':
idx = torch.max(outputs, 1)[1] != labels
wrong_ids = [tokenizer.convert_ids_to_tokens(caption, skip_special_tokens=True) for caption in captions_t[idx]]
error_ids += wrong_ids
pred_labels += [reverse_dic[label.item()] for label in torch.max(outputs, 1)[1][idx]]
real_labels += [reverse_dic[label.item()] for label in labels[idx]]
else:
for i, logits in enumerate(outputs):
log = torch.sigmoid(logits)
correct = (labels[i][torch.where(log>0.5)[0]]).sum()
total = len(torch.where(labels[i]==1)[0])
if correct != total:
wrong_caption = tokenizer.convert_ids_to_tokens(captions_t[i], skip_special_tokens=True)
error_ids.append(wrong_caption)
pred_ls = [reverse_dic[p] for p in torch.where(log>0.5)[0].detach().cpu().numpy()]
real_ls = [reverse_dic[i] for i, r in enumerate(labels[i].detach().cpu().numpy()) if r == 1]
pred_labels.append(pred_ls)
real_labels.append(real_ls)
with open('error_analysis/{}_{}.txt'.format(opt.datatype, opt.data_mode), 'w') as f:
f.write('----------- Wrong Examples ------------\n')
for i, (caption, pred, real) in enumerate(zip(error_ids, pred_labels, real_labels)):
f.write(str(i)+'\n')
f.write(' '.join(caption)+'\n')
f.write('Predicted label: {}\n'.format(pred))
f.write('Real label: {}\n'.format(real))
f.write('------\n')
test_acc = test_corrects.double() / test_loader.dataset.num_data if opt.data_mode == 'single' else test_corrects.double() / totals
print('Test accuracy: {:.4f}'.format(test_acc))
# User defined
elif opt.test_mode == "user":
while True:
print("Please input the sentence: ")
text = input()
print("\n======== Predicted Results ========")
print(text)
text = "[CLS] " + text + " [SEP]"
tokenized_text = tokenizer.tokenize(text)
tokenized_ids = np.array(tokenizer.convert_tokens_to_ids(tokenized_text))[np.newaxis,:]
input_ids = pad_sequences(tokenized_ids, maxlen=opt.maxlen, dtype="long", truncating="post", padding="post").squeeze(0)
attention_masks = [float(i>0) for i in input_ids]
captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device)
mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device)
with torch.no_grad():
pooled_output, outputs = model(captions_t, mask)
print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()])
print("=================================")
if __name__ == '__main__':
import fire
fire.Fire()
| 16,880 | 36.182819 | 138 | py |
CaBERT-SLU | CaBERT-SLU-main/all_data_context.py | import torch as t
from torch.utils.data import Dataset, DataLoader
import pickle
from config import opt
from sklearn.model_selection import train_test_split
import numpy as np
from keras.preprocessing.sequence import pad_sequences
class Turns:
def __init__(self, token_ids, slot_ids):
token_ids, mask = self.load_data(token_ids)
slot_ids, _ = self.load_data(slot_ids)
self.token_ids = np.stack(token_ids, axis=0)
self.slot_ids = np.stack(slot_ids, axis=0)
self.attention_masks = mask
def load_data(self, X):
input_ids = pad_sequences(X, maxlen=60, dtype="long", truncating="post", padding="post")
attention_masks = []
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
return input_ids, attention_masks
class CoreDataset(Dataset):
def __init__(self, data, dic, slot_dic, opt):
self.data = data
self.dic = dic
self.slot_dic = slot_dic
self.opt = opt
self.num_labels = len(dic)
self.num_slot_labels = len(slot_dic)
self.X_turns, self.Y_turns = self.postprocess()
self.num_data = sum([len(turn.token_ids) for turn in self.X_turns])
def postprocess(self):
dialogs = []
y_slots = []
y_labels = []
for dialog in self.data:
utts, slots, labels = zip(*dialog)
dialogs.append(utts)
y_slots.append(slots)
y_labels.append(labels)
X_turns = np.array([Turns(turns, slots) for turns, slots in zip(dialogs, y_slots)])
Y_turns = np.array(y_labels)
return X_turns, Y_turns
def __getitem__(self, index):
# onehot
labels = self.Y_turns[index]
new_labels = t.zeros((len(labels), self.num_labels)).long()
for i, label in enumerate(labels):
label = t.LongTensor(np.array(label))
label = t.zeros(self.num_labels).scatter_(0, label, 1)
new_labels[i] = label
return self.X_turns[index], new_labels
def __len__(self):
return len(self.X_turns)
def collate_fn(batch):
X_turns, Y_update = zip(*batch)
num_labels = Y_update[0].shape[1]
lengths = [i.token_ids.shape[0] for i in X_turns]
lengths = t.LongTensor(lengths)
max_len = max([i.token_ids.shape[0] for i in X_turns])
max_dim = max([i.token_ids.shape[1] for i in X_turns])
result_ids = t.zeros((len(X_turns), max_len, max_dim)).long()
result_token_masks = t.zeros((len(X_turns), max_len, max_dim)).long()
result_masks = t.zeros((len(X_turns), max_len)).long()
result_slot_labels = t.zeros((len(X_turns), max_len, max_dim)).long()
result_labels = t.ones((len(X_turns), max_len, num_labels))*-1
for i in range(len(X_turns)):
len1 = X_turns[i].token_ids.shape[0]
dim1 = X_turns[i].token_ids.shape[1]
result_ids[i, :len1, :dim1] = t.Tensor(X_turns[i].token_ids)
result_token_masks[i, :len1, :dim1] = t.Tensor(X_turns[i].attention_masks)
for j in range(lengths[i]):
result_masks[i][j] = 1
result_slot_labels[i, :len1, :dim1] = t.Tensor(X_turns[i].slot_ids)
result_labels[i, :len1, :] = Y_update[i]
return result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels
def get_dataloader_context(data, dic, slot_dic, opt):
dataset = CoreDataset(data, dic, slot_dic, opt)
batch_size = opt.batch_size
return DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
collate_fn= lambda x: collate_fn(x))
######################################################################
if __name__ == '__main__':
with open(opt.dic_path_with_tokens, 'rb') as f:
dic = pickle.load(f)
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
np.random.seed(0)
indices = np.arange(len(train_data)) #np.random.permutation(len(train_data))
train = np.array(train_data)[indices[:int(len(train_data)*0.7)]]
test = np.array(train_data)[indices[int(len(train_data)*0.7):]]
train_loader = get_dataloader_context(train, dic, slot_dic, opt)
for result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels in train_loader:
print(result_ids[0])
print(result_token_masks[0])
print(result_masks[0])
print(lengths[0])
print(result_slot_labels[0])
print(result_labels[0])
dae
| 4,711 | 34.164179 | 113 | py |
CaBERT-SLU | CaBERT-SLU-main/all_data_slot.py | import torch as t
from torch.utils.data import Dataset, DataLoader
import pickle
from config import opt
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
import numpy as np
class CoreDataset(Dataset):
def __init__(self, data, num_labels, num_slot_labels, opt):
self.data = data
self.num_data = len(self.data)
self.maxlen = opt.maxlen
self.num_labels = num_labels
self.num_slot_labels = num_slot_labels
self.opt = opt
caps, slots, labels = zip(*self.data)
self.caps, self.masks = self.load_data(caps, self.maxlen)
self.slot_labels, _ = self.load_data(slots, self.maxlen)
self.labels = labels
def load_data(self, X, maxlen):
input_ids = pad_sequences(X, maxlen=maxlen, dtype="long", truncating="post", padding="post")
attention_masks = []
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
return t.tensor(input_ids), t.tensor(attention_masks)
def __getitem__(self, index):
# caps
caps = self.caps[index]
slot_labels = self.slot_labels[index]
masks = self.masks[index]
# labels
label = t.LongTensor(np.array(self.labels[index]))
labels = t.zeros(self.num_labels).scatter_(0, label, 1)
return caps, masks, labels, slot_labels
def __len__(self):
return len(self.data)
def get_dataloader(data, num_labels, num_slot_labels, opt):
dataset = CoreDataset(data, num_labels, num_slot_labels, opt)
batch_size = opt.batch_size
return DataLoader(dataset,
batch_size=batch_size,
shuffle=False)
| 1,819 | 29.847458 | 100 | py |
CaBERT-SLU | CaBERT-SLU-main/utils.py | import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam, RMSprop
from transformers import BertTokenizer, BertModel, BertConfig, AdamW
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pickle
import copy
import numpy as np
import collections
from tqdm import tqdm
from more_itertools import collapse
from collections import defaultdict
from model import BertContextNLU
from all_data_context import get_dataloader_context
from config import opt
def load_data(X, maxlen):
input_ids = pad_sequences(X, maxlen=maxlen, dtype="long", truncating="post", padding="post")
attention_masks = []
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
return (input_ids, attention_masks)
def f1_score_intents(outputs, labels):
P, R, F1, acc = 0, 0, 0, 0
outputs = torch.sigmoid(outputs)
for i in range(outputs.shape[0]):
TP, FP, FN = 0, 0, 0
for j in range(outputs.shape[1]):
if outputs[i][j] > 0.5 and labels[i][j] == 1:
TP += 1
elif outputs[i][j] <= 0.5 and labels[i][j] == 1:
FN += 1
elif outputs[i][j] > 0.5 and labels[i][j] == 0:
FP += 1
precision = TP / float(TP + FP) if (TP + FP) != 0 else 0
recall = TP / float(TP + FN) if (TP + FN) != 0 else 0
F1 += 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0
P += precision
R += recall
p = (torch.where(outputs[i]>0.5)[0])
r = (torch.where(labels[i]==1)[0])
if len(p) == len(r) and (p == r).all():
acc += 1
P /= outputs.shape[0]
R /= outputs.shape[0]
F1 /= outputs.shape[0]
return P, R, F1, acc
############################################3
def to_spans(l_ids, voc):
"""Convert a list of BIO labels, coded as integers, into spans identified by a beginning, an end, and a label.
To allow easy comparison later, we store them in a dictionary indexed by the start position.
@param l_ids: a list of predicted label indices
@param voc: label vocabulary dictionary: index to label ex. 0: B-C
"""
spans = {}
current_lbl = None
current_start = None
for i, l_id in enumerate(l_ids):
l = voc[l_id]
if l[0] == 'B':
# Beginning of a named entity: B-something.
if current_lbl:
# If we're working on an entity, close it.
spans[current_start] = (current_lbl, i)
# Create a new entity that starts here.
current_lbl = l[2:]
current_start = i
elif l[0] == 'I':
# Continuation of an entity: I-something.
if current_lbl:
# If we have an open entity, but its label does not
# correspond to the predicted I-tag, then we close
# the open entity and create a new one.
if current_lbl != l[2:]:
spans[current_start] = (current_lbl, i)
current_lbl = l[2:]
current_start = i
else:
# If we don't have an open entity but predict an I tag,
# we create a new entity starting here even though we're
# not following the format strictly.
current_lbl = l[2:]
current_start = i
else:
# Outside: O.
if current_lbl:
# If we have an open entity, we close it.
spans[current_start] = (current_lbl, i)
current_lbl = None
current_start = None
if current_lbl != None:
spans[current_start] = (current_lbl, i+1)
return spans
def compare(gold, pred, stats, mode='strict'):
"""Compares two sets of spans and records the results for future aggregation.
@param gold: ground truth
@param pred: predictions
@param stats: the final dictionary with keys of different counts including total and specific labels
ex. {'total': {'gold': 5, 'pred': 5},
'Cause': {'gold': 5, 'pred': 5}}
"""
for start, (lbl, end) in gold.items():
stats['total']['gold'] += 1
stats[lbl]['gold'] += 1
for start, (lbl, end) in pred.items():
stats['total']['pred'] += 1
stats[lbl]['pred'] += 1
if mode == 'strict':
for start, (glbl, gend) in gold.items():
if start in pred:
plbl, pend = pred[start]
if glbl == plbl and gend == pend:
stats['total']['corr'] += 1
stats[glbl]['corr'] += 1
elif mode == 'partial':
for gstart, (glbl, gend) in gold.items():
for pstart, (plbl, pend) in pred.items():
if glbl == plbl:
g = set(range(gstart, gend+1))
p = set(range(pstart, pend+1))
if len(g & p) / max(len(g), len(p)) >= opt.token_percent:
stats['total']['corr'] += 1
stats[glbl]['corr'] += 1
break
def evaluate_iob(predicted, gold, label_field, stats):
"""This function will evaluate the model from bert dataloader pipeline.
"""
gold_cpu = gold.cpu().numpy()
pred_cpu = predicted.cpu().numpy()
gold_cpu = list(gold_cpu.reshape(-1))
pred_cpu = list(pred_cpu.reshape(-1))
# pred_cpu = [l for sen in predicted for l in sen]
id2label = {v:k for k,v in label_field.items()}
# Compute spans for the gold standard and prediction.
gold_spans = to_spans(gold_cpu, id2label)
pred_spans = to_spans(pred_cpu, id2label)
# Finally, update the counts for correct, predicted and gold-standard spans.
compare(gold_spans, pred_spans, stats, 'strict')
def prf(stats):
"""
Computes precision, recall and F-score, given a dictionary that contains
the counts of correct, predicted and gold-standard items.
@params stats: the final statistics
"""
if stats['pred'] == 0:
return 0, 0, 0
p = stats['corr']/stats['pred']
r = stats['corr']/stats['gold']
if p > 0 and r > 0:
f = 2*p*r/(p+r)
else:
f = 0
return p, r, f | 6,421 | 34.877095 | 115 | py |
CaBERT-SLU | CaBERT-SLU-main/bert_context.py | """For model training and inference (multi dialogue act & slot detection)
"""
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam, RMSprop
from transformers import BertTokenizer, BertModel, BertConfig, AdamW
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pickle
import copy
import numpy as np
import collections
from tqdm import tqdm
from collections import defaultdict, Counter
from model import BertContextNLU, ECA
from all_data_context import get_dataloader_context
from config import opt
from utils import *
def train(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path_with_tokens)
print('Data Type: ', opt.datatype)
print('Use pretrained weights: ', opt.retrain)
# dataset
with open(opt.dic_path_with_tokens, 'rb') as f:
dic = pickle.load(f)
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
# Microsoft Dialogue Dataset / SGD Dataset
indices = np.random.permutation(len(train_data))
train = np.array(train_data)[indices[:int(len(train_data)*0.7)]]#[:1000]
test = np.array(train_data)[indices[int(len(train_data)*0.7):]]#[:100]
train_loader = get_dataloader_context(train, dic, slot_dic, opt)
val_loader = get_dataloader_context(test, dic, slot_dic, opt)
# label tokens
intent_tokens = [intent for name, (tag, intent) in dic.items()]
intent_tok, mask_tok = load_data(intent_tokens, 10)
intent_tokens = torch.zeros(len(intent_tok), 10).long().to(device)
mask_tokens = torch.zeros(len(mask_tok), 10).long().to(device)
for i in range(len(intent_tok)):
intent_tokens[i] = torch.tensor(intent_tok[i])
for i in range(len(mask_tok)):
mask_tokens[i] = torch.tensor(mask_tok[i])
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertContextNLU(config, opt, len(dic), len(slot_dic))
# model = ECA(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
else:
print("Train from scratch...")
model = model.to(device)
optimizer = AdamW(model.parameters(), weight_decay=0.01, lr=opt.learning_rate_bert)
criterion = nn.BCEWithLogitsLoss(reduction='sum').to(device)
criterion2 = nn.CrossEntropyLoss(reduction='sum').to(device)
best_loss = 100
best_accuracy = 0
best_f1 = 0
#################################### Start training ####################################
for epoch in range(opt.epochs):
print("====== epoch %d / %d: ======"% (epoch+1, opt.epochs))
# Training Phase
total_train_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.train()
ccounter = 0
for (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in tqdm(train_loader):
result_ids = result_ids.to(device)
result_token_masks = result_token_masks.to(device)
result_masks = result_masks.to(device)
lengths = lengths.to(device)
result_slot_labels = result_slot_labels.to(device)
result_slot_labels = result_slot_labels.reshape(-1)
result_labels = result_labels.to(device)
optimizer.zero_grad()
outputs, labels, slot_out = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens)
train_loss = criterion(outputs, labels)
slot_loss = criterion2(slot_out, result_slot_labels)
total_loss = train_loss + slot_loss
total_loss.backward()
optimizer.step()
total_train_loss += total_loss
P, R, F1, acc = f1_score_intents(outputs, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
print('Average train loss: {:.4f} '.format(total_train_loss / train_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/train_loader.dataset.num_data)
# Validation Phase
total_val_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.eval()
ccounter = 0
stats = defaultdict(Counter)
for (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in val_loader:
result_ids = result_ids.to(device)
result_token_masks = result_token_masks.to(device)
result_masks = result_masks.to(device)
lengths = lengths.to(device)
result_slot_labels = result_slot_labels.to(device)
result_slot_labels = result_slot_labels.reshape(-1)
result_labels = result_labels.to(device)
with torch.no_grad():
outputs, labels, predicted_slot_outputs = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens)
val_loss = criterion(outputs, labels)
total_val_loss += val_loss
P, R, F1, acc = f1_score_intents(outputs, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
_, index = torch.topk(predicted_slot_outputs, k=1, dim=-1)
evaluate_iob(index, result_slot_labels, slot_dic, stats)
print('========= Validation =========')
print('Average val loss: {:.4f} '.format(total_val_loss / val_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/val_loader.dataset.num_data)
val_acc = total_acc/val_loader.dataset.num_data
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print('========= Slot =========')
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
if f1 > best_f1:
print('saving with loss of {}'.format(total_val_loss),
'improved over previous {}'.format(best_loss))
best_loss = total_val_loss
best_accuracy = val_acc
best_f1 = f1
best_stats = copy.deepcopy(stats)
torch.save(model.state_dict(), 'checkpoints/best_{}_{}.pth'.format(opt.datatype, opt.data_mode))
print()
print('Best total val loss: {:.4f}'.format(total_val_loss))
print('Best Test Accuracy: {:.4f}'.format(best_accuracy))
print('Best F1 Score: {:.4f}'.format(best_f1))
p_slot, r_slot, f1_slot = prf(best_stats['total'])
print('Final evaluation on slot filling of the validation set:')
print(f'Overall: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
#####################################################################
def test(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path_with_tokens)
# dataset
with open(opt.dic_path_with_tokens, 'rb') as f:
dic = pickle.load(f)
print(dic)
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
reverse_dic = {v[0]: k for k,v in dic.items()}
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
with open(opt.test_path, 'rb') as f:
test_data = pickle.load(f)
# Microsoft Dialogue Dataset / SGD Dataset
indices = np.random.permutation(len(train_data))
train = np.array(train_data)[indices[:int(len(train_data)*0.7)]]
test = np.array(train_data)[indices[int(len(train_data)*0.7):]][:1000]
train_loader = get_dataloader_context(train, dic, slot_dic, opt)
test_loader = get_dataloader_context(test, dic, slot_dic, opt)
# label tokens
intent_tokens = [intent for name, (tag, intent) in dic.items()]
intent_tok, mask_tok = load_data(intent_tokens, 10)
intent_tokens = torch.zeros(len(intent_tok), 10).long().to(device)
mask_tokens = torch.zeros(len(mask_tok), 10).long().to(device)
for i in range(len(intent_tok)):
intent_tokens[i] = torch.tensor(intent_tok[i])
for i in range(len(mask_tok)):
mask_tokens[i] = torch.tensor(mask_tok[i])
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertContextNLU(config, opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model {} has been loaded.".format(opt.model_path))
model = model.to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Run multi-intent validation
if opt.test_mode == "validation":
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.eval()
ccounter = 0
stats = defaultdict(Counter)
for (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in tqdm(test_loader):
result_ids = result_ids.to(device)
result_token_masks = result_token_masks.to(device)
result_masks = result_masks.to(device)
lengths = lengths.to(device)
result_slot_labels = result_slot_labels.to(device)
result_slot_labels = result_slot_labels.reshape(-1)
result_labels = result_labels.to(device)
with torch.no_grad():
outputs, labels, predicted_slot_outputs = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens)
P, R, F1, acc = f1_score_intents(outputs, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
_, index = torch.topk(predicted_slot_outputs, k=1, dim=-1)
evaluate_iob(index, result_slot_labels, slot_dic, stats)
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/test_loader.dataset.num_data)
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print('========= Slot =========')
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
# Run test classification
elif opt.test_mode == "data":
# Validation Phase
pred_labels = []
real_labels = []
error_ids = []
total_P, total_R, total_F1, total_acc = 0, 0, 0, 0
ccounter = 0
stats = defaultdict(Counter)
model.eval()
print(len(test_loader.dataset))
for num, (result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels) in enumerate(test_loader):
print('predict batches: ', num)
result_ids = result_ids.to(device)
result_token_masks = result_token_masks.to(device)
result_masks = result_masks.to(device)
lengths = lengths.to(device)
result_slot_labels = result_slot_labels.to(device)
result_slot_labels = result_slot_labels.reshape(-1)
result_labels = result_labels.to(device)
# Remove padding
texts_no_pad = []
for i in range(len(result_ids)):
texts_no_pad.append(result_ids[i,:lengths[i],:])
texts_no_pad = torch.vstack(texts_no_pad)
with torch.no_grad():
outputs, labels, predicted_slot_outputs, ffscores = model(result_ids, result_token_masks, result_masks, lengths, result_slot_labels, result_labels, intent_tokens, mask_tokens)
# total
P, R, F1, acc = f1_score_intents(outputs, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
_, index = torch.topk(predicted_slot_outputs, k=1, dim=-1)
evaluate_iob(index, result_slot_labels, slot_dic, stats)
for i, logits in enumerate(outputs):
log = torch.sigmoid(logits)
correct = (labels[i][torch.where(log>0.5)[0]]).sum()
total = len(torch.where(labels[i]==1)[0])
wrong_caption = tokenizer.convert_ids_to_tokens(texts_no_pad[i], skip_special_tokens=True)
error_ids.append(wrong_caption)
pred_ls = [p for p in torch.where(log>0.5)[0].detach().cpu().numpy()]
real_ls = [i for i, r in enumerate(labels[i].detach().cpu().numpy()) if r == 1]
pred_labels.append(pred_ls)
real_labels.append(real_ls)
with open('error_analysis/{}_{}_context_slots.txt'.format(opt.datatype, opt.data_mode), 'w') as f:
f.write('----------- Examples ------------\n')
for i, (caption, pred, real) in enumerate(zip(error_ids, pred_labels, real_labels)):
f.write(str(i)+'\n')
f.write(' '.join(caption)+'\n')
p_r = [reverse_dic[p] for p in pred]
r_r = [reverse_dic[r] for r in real]
f.write('Predicted label: {}\n'.format(p_r))
f.write('Real label: {}\n'.format(r_r))
f.write('------\n')
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/test_loader.dataset.num_data)
print(len(ffscores))
with open('ffscores.pkl', 'wb') as f:
pickle.dump(ffscores, f)
if __name__ == '__main__':
import fire
fire.Fire()
| 15,293 | 36.211679 | 192 | py |
CaBERT-SLU | CaBERT-SLU-main/baseline_stackprop/utils_bert.py | import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam, RMSprop
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pickle
import copy
import numpy as np
import collections
from tqdm import tqdm
from more_itertools import collapse
from collections import defaultdict
def load_data(X, maxlen):
input_ids = pad_sequences(X, maxlen=maxlen, dtype="long", truncating="post", padding="post")
attention_masks = []
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
return (input_ids, attention_masks)
def f1_score_intents(outputs, labels):
P, R, F1, acc = 0, 0, 0, 0
outputs = torch.sigmoid(outputs)
for i in range(outputs.shape[0]):
TP, FP, FN = 0, 0, 0
for j in range(outputs.shape[1]):
if outputs[i][j] > 0.5 and labels[i][j] == 1:
TP += 1
elif outputs[i][j] <= 0.5 and labels[i][j] == 1:
FN += 1
elif outputs[i][j] > 0.5 and labels[i][j] == 0:
FP += 1
precision = TP / float(TP + FP) if (TP + FP) != 0 else 0
recall = TP / float(TP + FN) if (TP + FN) != 0 else 0
F1 += 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0
P += precision
R += recall
p = (torch.where(outputs[i]>0.5)[0])
r = (torch.where(labels[i]==1)[0])
if len(p) == len(r) and (p == r).all():
acc += 1
P /= outputs.shape[0]
R /= outputs.shape[0]
F1 /= outputs.shape[0]
return P, R, F1, acc
############################################3
def to_spans(l_ids, voc):
"""Convert a list of BIO labels, coded as integers, into spans identified by a beginning, an end, and a label.
To allow easy comparison later, we store them in a dictionary indexed by the start position.
@param l_ids: a list of predicted label indices
@param voc: label vocabulary dictionary: index to label ex. 0: B-C
"""
spans = {}
current_lbl = None
current_start = None
for i, l_id in enumerate(l_ids):
#l = voc[l_id]
l = l_id
if l[0] == 'B':
# Beginning of a named entity: B-something.
if current_lbl:
# If we're working on an entity, close it.
spans[current_start] = (current_lbl, i)
# Create a new entity that starts here.
current_lbl = l[2:]
current_start = i
elif l[0] == 'I':
# Continuation of an entity: I-something.
if current_lbl:
# If we have an open entity, but its label does not
# correspond to the predicted I-tag, then we close
# the open entity and create a new one.
if current_lbl != l[2:]:
spans[current_start] = (current_lbl, i)
current_lbl = l[2:]
current_start = i
else:
# If we don't have an open entity but predict an I tag,
# we create a new entity starting here even though we're
# not following the format strictly.
current_lbl = l[2:]
current_start = i
else:
# Outside: O.
if current_lbl:
# If we have an open entity, we close it.
spans[current_start] = (current_lbl, i)
current_lbl = None
current_start = None
if current_lbl != None:
spans[current_start] = (current_lbl, i+1)
return spans
def compare(gold, pred, stats, mode='strict'):
"""Compares two sets of spans and records the results for future aggregation.
@param gold: ground truth
@param pred: predictions
@param stats: the final dictionary with keys of different counts including total and specific labels
ex. {'total': {'gold': 5, 'pred': 5},
'Cause': {'gold': 5, 'pred': 5}}
"""
for start, (lbl, end) in gold.items():
stats['total']['gold'] += 1
stats[lbl]['gold'] += 1
for start, (lbl, end) in pred.items():
stats['total']['pred'] += 1
stats[lbl]['pred'] += 1
if mode == 'strict':
for start, (glbl, gend) in gold.items():
if start in pred:
plbl, pend = pred[start]
if glbl == plbl and gend == pend:
stats['total']['corr'] += 1
stats[glbl]['corr'] += 1
def evaluate_iob(predicted, gold, label_field, stats):
"""This function will evaluate the model from bert dataloader pipeline.
"""
#gold_cpu = gold.cpu().numpy()
#pred_cpu = predicted.cpu().numpy()
#gold_cpu = list(gold_cpu.reshape(-1))
#pred_cpu = list(pred_cpu.reshape(-1))
gold_cpu = gold
pred_cpu = predicted
# pred_cpu = [l for sen in predicted for l in sen]
id2label = {v:k for k,v in label_field.items()}
# Compute spans for the gold standard and prediction.
gold_spans = to_spans(gold_cpu, id2label)
pred_spans = to_spans(pred_cpu, id2label)
# Finally, update the counts for correct, predicted and gold-standard spans.
compare(gold_spans, pred_spans, stats, 'strict')
def prf(stats):
"""
Computes precision, recall and F-score, given a dictionary that contains
the counts of correct, predicted and gold-standard items.
@params stats: the final statistics
"""
if stats['pred'] == 0:
return 0, 0, 0
p = stats['corr']/stats['pred']
r = stats['corr']/stats['gold']
if p > 0 and r > 0:
f = 2*p*r/(p+r)
else:
f = 0
return p, r, f | 5,829 | 34.120482 | 115 | py |
CaBERT-SLU | CaBERT-SLU-main/baseline_stackprop/train.py | """
@Author : Lee, Qin
@StartTime : 2018/08/13
@Filename : train.py
@Software : Pycharm
@Framework : Pytorch
@LastModify : 2019/05/07
"""
from utils.module import ModelManager
from utils.loader import DatasetManager ######
from utils.process import Processor #####
import torch
import os
import json
import random
import argparse
import numpy as np
parser = argparse.ArgumentParser()
# Training parameters.
parser.add_argument('--data_dir', '-dd', type=str, default='data_with_slots/e2e')
parser.add_argument('--save_dir', '-sd', type=str, default='save/e2e')
parser.add_argument("--random_state", '-rs', type=int, default=0)
parser.add_argument('--gpu', '-g', action='store_true', help='use gpu', required=False, default=False)
parser.add_argument('--num_epoch', '-ne', type=int, default=20)
parser.add_argument('--batch_size', '-bs', type=int, default=16)
parser.add_argument('--l2_penalty', '-lp', type=float, default=1e-6)
parser.add_argument("--learning_rate", '-lr', type=float, default=0.001)
parser.add_argument('--dropout_rate', '-dr', type=float, default=0.4)
parser.add_argument('--intent_forcing_rate', '-ifr', type=float, default=0.9)
parser.add_argument("--differentiable", "-d", action="store_true", default=False)
parser.add_argument('--slot_forcing_rate', '-sfr', type=float, default=0.9)
# model parameters.
parser.add_argument('--word_embedding_dim', '-wed', type=int, default=64)
parser.add_argument('--encoder_hidden_dim', '-ehd', type=int, default=256)
parser.add_argument('--intent_embedding_dim', '-ied', type=int, default=8)
parser.add_argument('--slot_embedding_dim', '-sed', type=int, default=32)
parser.add_argument('--slot_decoder_hidden_dim', '-sdhd', type=int, default=64)
parser.add_argument('--intent_decoder_hidden_dim', '-idhd', type=int, default=64)
parser.add_argument('--attention_hidden_dim', '-ahd', type=int, default=1024)
parser.add_argument('--attention_output_dim', '-aod', type=int, default=128)
if __name__ == "__main__":
args = parser.parse_args()
#args.gpu = args.gpu and torch.cuda.is_available()
# Save training and model parameters.
if not os.path.exists(args.save_dir):
os.system("mkdir -p " + args.save_dir)
log_path = os.path.join(args.save_dir, "param.json")
with open(log_path, "w") as fw:
fw.write(json.dumps(args.__dict__, indent=True))
# Fix the random seed of package random.
random.seed(args.random_state)
np.random.seed(args.random_state)
# Fix the random seed of Pytorch when using GPU.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.random_state)
torch.cuda.manual_seed(args.random_state)
# Fix the random seed of Pytorch when using CPU.
torch.manual_seed(args.random_state)
torch.random.manual_seed(args.random_state)
# Instantiate a dataset object.
dataset = DatasetManager(args)
dataset.quick_build()
dataset.show_summary()
# Instantiate a network model object.
model = ModelManager(
args, len(dataset.word_alphabet),
len(dataset.slot_alphabet),
len(dataset.intent_alphabet))
model.show_summary()
# To train and evaluate the models.
process = Processor(dataset, model, args.batch_size)
process.train()
print('\nAccepted performance slot_f1, intent_f1, intent_acc, sent_acc: ' + str(Processor.validate(
os.path.join(args.save_dir, "model/model.pkl"),
os.path.join(args.save_dir, "model/dataset.pkl"),
args.batch_size)) + " at test dataset;\n")
| 3,585 | 36.747368 | 103 | py |
CaBERT-SLU | CaBERT-SLU-main/baseline_stackprop/utils/module.py | """
@Author : Lee, Qin
@StartTime : 2018/08/13
@Filename : module.py
@Software : Pycharm
@Framework : Pytorch
@LastModify : 2019/05/07
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
class ModelManager(nn.Module):
def __init__(self, args, num_word, num_slot, num_intent):
super(ModelManager, self).__init__()
self.__num_word = num_word
self.__num_slot = num_slot
self.__num_intent = num_intent
self.__args = args
# Initialize an embedding object.
self.__embedding = EmbeddingCollection(
self.__num_word,
self.__args.word_embedding_dim
)
# Initialize an LSTM Encoder object.
self.__encoder = LSTMEncoder(
self.__args.word_embedding_dim,
self.__args.encoder_hidden_dim,
self.__args.dropout_rate
)
# Initialize an self-attention layer.
self.__attention = SelfAttention(
self.__args.word_embedding_dim,
self.__args.attention_hidden_dim,
self.__args.attention_output_dim,
self.__args.dropout_rate
)
# Initialize an Decoder object for intent.
self.__intent_decoder = LSTMDecoder(
self.__args.encoder_hidden_dim + self.__args.attention_output_dim,
self.__args.intent_decoder_hidden_dim,
self.__num_intent, self.__args.dropout_rate,
embedding_dim=self.__args.intent_embedding_dim
)
# Initialize an Decoder object for slot.
self.__slot_decoder = LSTMDecoder(
self.__args.encoder_hidden_dim + self.__args.attention_output_dim,
self.__args.slot_decoder_hidden_dim,
self.__num_slot, self.__args.dropout_rate,
embedding_dim=self.__args.slot_embedding_dim,
extra_dim=self.__num_intent
)
# One-hot encoding for augment data feed.
self.__intent_embedding = nn.Embedding(
self.__num_intent, self.__num_intent
)
self.__intent_embedding.weight.data = torch.eye(self.__num_intent)
self.__intent_embedding.weight.requires_grad = False
def show_summary(self):
"""
print the abstract of the defined model.
"""
print('Model parameters are listed as follows:\n')
print('\tnumber of word: {};'.format(self.__num_word))
print('\tnumber of slot: {};'.format(self.__num_slot))
print('\tnumber of intent: {};'.format(self.__num_intent))
print('\tword embedding dimension: {};'.format(self.__args.word_embedding_dim))
print('\tencoder hidden dimension: {};'.format(self.__args.encoder_hidden_dim))
print('\tdimension of intent embedding: {};'.format(self.__args.intent_embedding_dim))
print('\tdimension of slot embedding: {};'.format(self.__args.slot_embedding_dim))
print('\tdimension of slot decoder hidden: {};'.format(self.__args.slot_decoder_hidden_dim))
print('\tdimension of intent decoder hidden: {};'.format(self.__args.intent_decoder_hidden_dim))
print('\thidden dimension of self-attention: {};'.format(self.__args.attention_hidden_dim))
print('\toutput dimension of self-attention: {};'.format(self.__args.attention_output_dim))
print('\nEnd of parameters show. Now training begins.\n\n')
def forward(self, text, seq_lens, n_predicts=None, forced_slot=None, forced_intent=None):
word_tensor, _ = self.__embedding(text)
lstm_hiddens = self.__encoder(word_tensor, seq_lens)
# transformer_hiddens = self.__transformer(pos_tensor, seq_lens)
attention_hiddens = self.__attention(word_tensor, seq_lens)
hiddens = torch.cat([attention_hiddens, lstm_hiddens], dim=1)
pred_intent = self.__intent_decoder(
hiddens, seq_lens,
forced_input=forced_intent
)
if not self.__args.differentiable:
_, idx_intent = pred_intent.topk(1, dim=-1)
feed_intent = self.__intent_embedding(idx_intent.squeeze(1))
else:
feed_intent = pred_intent
pred_slot = self.__slot_decoder(
hiddens, seq_lens,
forced_input=forced_slot,
extra_input=feed_intent ######
)
if n_predicts is None:
return F.log_softmax(pred_slot, dim=1), F.log_softmax(pred_intent, dim=1)
else:
_, slot_index = pred_slot.topk(n_predicts, dim=1)
_, intent_index = pred_intent.topk(n_predicts, dim=1)
return slot_index.cpu().data.numpy().tolist(), intent_index.cpu().data.numpy().tolist()
def golden_intent_predict_slot(self, text, seq_lens, golden_intent, n_predicts=1):
word_tensor, _ = self.__embedding(text)
embed_intent = self.__intent_embedding(golden_intent)
lstm_hiddens = self.__encoder(word_tensor, seq_lens)
attention_hiddens = self.__attention(word_tensor, seq_lens)
hiddens = torch.cat([attention_hiddens, lstm_hiddens], dim=1)
pred_slot = self.__slot_decoder(
hiddens, seq_lens, extra_input=embed_intent
)
_, slot_index = pred_slot.topk(n_predicts, dim=-1)
# Just predict single slot value.
return slot_index.cpu().data.numpy().tolist()
class EmbeddingCollection(nn.Module):
"""
Provide word vector and position vector encoding.
"""
def __init__(self, input_dim, embedding_dim, max_len=5000):
super(EmbeddingCollection, self).__init__()
self.__input_dim = input_dim
# Here embedding_dim must be an even embedding.
self.__embedding_dim = embedding_dim
self.__max_len = max_len
# Word vector encoder.
self.__embedding_layer = nn.Embedding(
self.__input_dim, self.__embedding_dim
)
# Position vector encoder.
# self.__position_layer = torch.zeros(self.__max_len, self.__embedding_dim)
# position = torch.arange(0, self.__max_len).unsqueeze(1)
# div_term = torch.exp(torch.arange(0, self.__embedding_dim, 2) *
# (-math.log(10000.0) / self.__embedding_dim))
# Sine wave curve design.
# self.__position_layer[:, 0::2] = torch.sin(position * div_term)
# self.__position_layer[:, 1::2] = torch.cos(position * div_term)
#
# self.__position_layer = self.__position_layer.unsqueeze(0)
# self.register_buffer('pe', self.__position_layer)
def forward(self, input_x):
# Get word vector encoding.
embedding_x = self.__embedding_layer(input_x)
# Get position encoding.
# position_x = Variable(self.pe[:, :input_x.size(1)], requires_grad=False)
# Board-casting principle.
return embedding_x, embedding_x
class LSTMEncoder(nn.Module):
"""
Encoder structure based on bidirectional LSTM.
"""
def __init__(self, embedding_dim, hidden_dim, dropout_rate):
super(LSTMEncoder, self).__init__()
# Parameter recording.
self.__embedding_dim = embedding_dim
self.__hidden_dim = hidden_dim // 2
self.__dropout_rate = dropout_rate
# Network attributes.
self.__dropout_layer = nn.Dropout(self.__dropout_rate)
self.__lstm_layer = nn.LSTM(
input_size=self.__embedding_dim,
hidden_size=self.__hidden_dim,
batch_first=True,
bidirectional=True,
dropout=self.__dropout_rate,
num_layers=1
)
def forward(self, embedded_text, seq_lens):
""" Forward process for LSTM Encoder.
(batch_size, max_sent_len)
-> (batch_size, max_sent_len, word_dim)
-> (batch_size, max_sent_len, hidden_dim)
-> (total_word_num, hidden_dim)
:param embedded_text: padded and embedded input text.
:param seq_lens: is the length of original input text.
:return: is encoded word hidden vectors.
"""
# Padded_text should be instance of LongTensor.
dropout_text = self.__dropout_layer(embedded_text)
# Pack and Pad process for input of variable length.
packed_text = pack_padded_sequence(dropout_text, seq_lens, batch_first=True)
lstm_hiddens, (h_last, c_last) = self.__lstm_layer(packed_text)
padded_hiddens, _ = pad_packed_sequence(lstm_hiddens, batch_first=True)
return torch.cat([padded_hiddens[i][:seq_lens[i], :] for i in range(0, len(seq_lens))], dim=0)
class LSTMDecoder(nn.Module):
"""
Decoder structure based on unidirectional LSTM.
"""
def __init__(self, input_dim, hidden_dim, output_dim, dropout_rate, embedding_dim=None, extra_dim=None):
""" Construction function for Decoder.
:param input_dim: input dimension of Decoder. In fact, it's encoder hidden size.
:param hidden_dim: hidden dimension of iterative LSTM.
:param output_dim: output dimension of Decoder. In fact, it's total number of intent or slot.
:param dropout_rate: dropout rate of network which is only useful for embedding.
:param embedding_dim: if it's not None, the input and output are relevant.
:param extra_dim: if it's not None, the decoder receives information tensors.
"""
super(LSTMDecoder, self).__init__()
self.__input_dim = input_dim
self.__hidden_dim = hidden_dim
self.__output_dim = output_dim
self.__dropout_rate = dropout_rate
self.__embedding_dim = embedding_dim
self.__extra_dim = extra_dim
# If embedding_dim is not None, the output and input
# of this structure is relevant.
if self.__embedding_dim is not None:
self.__embedding_layer = nn.Embedding(output_dim, embedding_dim)
self.__init_tensor = nn.Parameter(
torch.randn(1, self.__embedding_dim),
requires_grad=True
)
# Make sure the input dimension of iterative LSTM.
if self.__extra_dim is not None and self.__embedding_dim is not None:
lstm_input_dim = self.__input_dim + self.__extra_dim + self.__embedding_dim
elif self.__extra_dim is not None:
lstm_input_dim = self.__input_dim + self.__extra_dim
elif self.__embedding_dim is not None:
lstm_input_dim = self.__input_dim + self.__embedding_dim
else:
lstm_input_dim = self.__input_dim
# Network parameter definition.
self.__dropout_layer = nn.Dropout(self.__dropout_rate)
self.__lstm_layer = nn.LSTM(
input_size=lstm_input_dim,
hidden_size=self.__hidden_dim,
batch_first=True,
bidirectional=False,
dropout=self.__dropout_rate,
num_layers=1
)
self.__linear_layer = nn.Linear(
self.__hidden_dim,
self.__output_dim
)
def forward(self, encoded_hiddens, seq_lens, forced_input=None, extra_input=None):
""" Forward process for decoder.
:param encoded_hiddens: is encoded hidden tensors produced by encoder.
:param seq_lens: is a list containing lengths of sentence.
:param forced_input: is truth values of label, provided by teacher forcing.
:param extra_input: comes from another decoder as information tensor.
:return: is distribution of prediction labels.
"""
# Concatenate information tensor if possible.
if extra_input is not None:
input_tensor = torch.cat([encoded_hiddens, extra_input], dim=1)
else:
input_tensor = encoded_hiddens
output_tensor_list, sent_start_pos = [], 0
if self.__embedding_dim is None or forced_input is not None:
for sent_i in range(0, len(seq_lens)):
sent_end_pos = sent_start_pos + seq_lens[sent_i]
# Segment input hidden tensors.
seg_hiddens = input_tensor[sent_start_pos: sent_end_pos, :]
if self.__embedding_dim is not None and forced_input is not None:
if seq_lens[sent_i] > 1:
seg_forced_input = forced_input[sent_start_pos: sent_end_pos]
seg_forced_tensor = self.__embedding_layer(seg_forced_input).view(seq_lens[sent_i], -1)
seg_prev_tensor = torch.cat([self.__init_tensor, seg_forced_tensor[:-1, :]], dim=0)
else:
seg_prev_tensor = self.__init_tensor
# Concatenate forced target tensor.
combined_input = torch.cat([seg_hiddens, seg_prev_tensor], dim=1)
else:
combined_input = seg_hiddens
dropout_input = self.__dropout_layer(combined_input)
lstm_out, _ = self.__lstm_layer(dropout_input.view(1, seq_lens[sent_i], -1))
linear_out = self.__linear_layer(lstm_out.view(seq_lens[sent_i], -1))
output_tensor_list.append(linear_out)
sent_start_pos = sent_end_pos
else:
for sent_i in range(0, len(seq_lens)):
prev_tensor = self.__init_tensor
# It's necessary to remember h and c state
# when output prediction every single step.
last_h, last_c = None, None
sent_end_pos = sent_start_pos + seq_lens[sent_i]
for word_i in range(sent_start_pos, sent_end_pos):
seg_input = input_tensor[[word_i], :]
combined_input = torch.cat([seg_input, prev_tensor], dim=1)
dropout_input = self.__dropout_layer(combined_input).view(1, 1, -1)
if last_h is None and last_c is None:
lstm_out, (last_h, last_c) = self.__lstm_layer(dropout_input)
else:
lstm_out, (last_h, last_c) = self.__lstm_layer(dropout_input, (last_h, last_c))
lstm_out = self.__linear_layer(lstm_out.view(1, -1))
output_tensor_list.append(lstm_out)
_, index = lstm_out.topk(1, dim=1)
prev_tensor = self.__embedding_layer(index).view(1, -1)
sent_start_pos = sent_end_pos
return torch.cat(output_tensor_list, dim=0)
class QKVAttention(nn.Module):
"""
Attention mechanism based on Query-Key-Value architecture. And
especially, when query == key == value, it's self-attention.
"""
def __init__(self, query_dim, key_dim, value_dim, hidden_dim, output_dim, dropout_rate):
super(QKVAttention, self).__init__()
# Record hyper-parameters.
self.__query_dim = query_dim
self.__key_dim = key_dim
self.__value_dim = value_dim
self.__hidden_dim = hidden_dim
self.__output_dim = output_dim
self.__dropout_rate = dropout_rate
# Declare network structures.
self.__query_layer = nn.Linear(self.__query_dim, self.__hidden_dim)
self.__key_layer = nn.Linear(self.__key_dim, self.__hidden_dim)
self.__value_layer = nn.Linear(self.__value_dim, self.__output_dim)
self.__dropout_layer = nn.Dropout(p=self.__dropout_rate)
def forward(self, input_query, input_key, input_value):
""" The forward propagation of attention.
Here we require the first dimension of input key
and value are equal.
:param input_query: is query tensor, (n, d_q)
:param input_key: is key tensor, (m, d_k)
:param input_value: is value tensor, (m, d_v)
:return: attention based tensor, (n, d_h)
"""
# Linear transform to fine-tune dimension.
linear_query = self.__query_layer(input_query)
linear_key = self.__key_layer(input_key)
linear_value = self.__value_layer(input_value)
score_tensor = F.softmax(torch.matmul(
linear_query,
linear_key.transpose(-2, -1)
), dim=-1) / math.sqrt(self.__hidden_dim)
forced_tensor = torch.matmul(score_tensor, linear_value)
forced_tensor = self.__dropout_layer(forced_tensor)
return forced_tensor
class SelfAttention(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, dropout_rate):
super(SelfAttention, self).__init__()
# Record parameters.
self.__input_dim = input_dim
self.__hidden_dim = hidden_dim
self.__output_dim = output_dim
self.__dropout_rate = dropout_rate
# Record network parameters.
self.__dropout_layer = nn.Dropout(self.__dropout_rate)
self.__attention_layer = QKVAttention(
self.__input_dim, self.__input_dim, self.__input_dim,
self.__hidden_dim, self.__output_dim, self.__dropout_rate
)
def forward(self, input_x, seq_lens):
dropout_x = self.__dropout_layer(input_x)
attention_x = self.__attention_layer(
dropout_x, dropout_x, dropout_x
)
flat_x = torch.cat(
[attention_x[i][:seq_lens[i], :] for
i in range(0, len(seq_lens))], dim=0
)
return flat_x
| 17,569 | 38.483146 | 111 | py |
CaBERT-SLU | CaBERT-SLU-main/baseline_stackprop/utils/process.py | """
@Author : Lee, Qin
@StartTime : 2018/08/13
@Filename : process.py
@Software : Pycharm
@Framework : Pytorch
@LastModify : 2019/05/07
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import pickle
import os
import time
import random
import numpy as np
from tqdm import tqdm
from collections import Counter, defaultdict
from sklearn.metrics import f1_score
from sklearn.preprocessing import MultiLabelBinarizer
# Utils functions copied from Slot-gated model, origin url:
# https://github.com/MiuLab/SlotGated-SLU/blob/master/utils.py
from utils import miulab
from utils_bert import evaluate_iob, prf
def multilabel2one_hot(labels, nums):
res = [0.] * nums
if len(labels) == 0:
return res
if isinstance(labels[0], list):
for label in labels[0]:
res[label] = 1.
return res
for label in labels:
res[label] = 1.
return res
def instance2onehot(func, num_intent, data):
res = []
for intents in func(data):
#res.append(multilabel2one_hot(intents, num_intent))
res.append(intents)
return np.array(res)
class Processor(object):
def __init__(self, dataset, model, batch_size):
self.__dataset = dataset
self.__model = model
self.__batch_size = batch_size
if torch.cuda.is_available():
time_start = time.time()
self.__model = self.__model.cuda()
time_con = time.time() - time_start
print("The model has been loaded into GPU and cost {:.6f} seconds.\n".format(time_con))
self.__criterion = nn.NLLLoss()
self.__optimizer = optim.Adam(
self.__model.parameters(), lr=self.__dataset.learning_rate,
weight_decay=self.__dataset.l2_penalty
)
with open("data_with_slots/e2e/slot2id.pkl", 'rb') as f:
self.slot_dic = pickle.load(f)
def train(self):
best_dev_slot = 0.0
best_dev_intent = 0.0
best_dev_sent = 0.0
dataloader = self.__dataset.batch_delivery('train')
for epoch in range(0, self.__dataset.num_epoch):
total_slot_loss, total_intent_loss = 0.0, 0.0
time_start = time.time()
self.__model.train()
for text_batch, slot_batch, intent_batch in tqdm(dataloader, ncols=50):
padded_text, [sorted_slot, sorted_intent], seq_lens, _ = self.__dataset.add_padding(
text_batch, [(slot_batch, False), (intent_batch, False)]
)
sorted_intent = [item * num for item, num in zip(sorted_intent, seq_lens)]
sorted_intent = list(Evaluator.expand_list(sorted_intent))
text_var = Variable(torch.LongTensor(padded_text))
slot_var = Variable(torch.LongTensor(list(Evaluator.expand_list(sorted_slot))))
intent_var = Variable(torch.LongTensor(sorted_intent))
if torch.cuda.is_available():
text_var = text_var.cuda()
slot_var = slot_var.cuda()
intent_var = intent_var.cuda()
random_slot, random_intent = random.random(), random.random()
if random_slot < self.__dataset.slot_forcing_rate and \
random_intent < self.__dataset.intent_forcing_rate:
slot_out, intent_out = self.__model(
text_var, seq_lens, forced_slot=slot_var, forced_intent=intent_var
)
elif random_slot < self.__dataset.slot_forcing_rate:
slot_out, intent_out = self.__model(
text_var, seq_lens, forced_slot=slot_var
)
elif random_intent < self.__dataset.intent_forcing_rate:
slot_out, intent_out = self.__model(
text_var, seq_lens, forced_intent=intent_var
)
else:
slot_out, intent_out = self.__model(text_var, seq_lens)
slot_loss = self.__criterion(slot_out, slot_var)
intent_loss = self.__criterion(intent_out, intent_var)
batch_loss = slot_loss + intent_loss
self.__optimizer.zero_grad()
batch_loss.backward()
self.__optimizer.step()
try:
total_slot_loss += slot_loss.cpu().item()
total_intent_loss += intent_loss.cpu().item()
except AttributeError:
total_slot_loss += slot_loss.cpu().data.numpy()[0]
total_intent_loss += intent_loss.cpu().data.numpy()[0]
time_con = time.time() - time_start
print('[Epoch {:2d}]: The total slot loss on train data is {:2.6f}, intent data is {:2.6f}, cost ' \
'about {:2.6} seconds.'.format(epoch, total_slot_loss, total_intent_loss, time_con))
change, time_start = False, time.time()
dev_f1_score, dev_intent_f1,dev_acc, dev_sent_acc = self.estimate(if_dev=True, test_batch=self.__batch_size)
if dev_f1_score > best_dev_slot or dev_acc > best_dev_intent or dev_sent_acc > best_dev_sent:
test_f1, test_intent_f1,test_acc, test_sent_acc = self.estimate(if_dev=False, test_batch=self.__batch_size)
if dev_f1_score > best_dev_slot:
best_dev_slot = dev_f1_score
if dev_acc > best_dev_intent:
best_dev_intent = dev_acc
if dev_sent_acc > best_dev_sent:
best_dev_sent = dev_sent_acc
print('\nTest result: slot f1 score: {:.6f}, intent f1 score: {:.6f},intent acc score: {:.6f}, semantic '
'accuracy score: {:.6f}.'.format(test_f1, test_intent_f1,test_acc, test_sent_acc))
model_save_dir = os.path.join(self.__dataset.save_dir, "model")
if not os.path.exists(model_save_dir):
os.mkdir(model_save_dir)
torch.save(self.__model, os.path.join(model_save_dir, "model.pkl"))
torch.save(self.__dataset, os.path.join(model_save_dir, 'dataset.pkl'))
time_con = time.time() - time_start
print('[Epoch {:2d}]: In validation process, the slot f1 score is {:2.6f}, ' \
'the intent f1 is {:2.6f}, the intent acc is {:2.6f}, the semantic acc is {:.2f}, cost about ' \
'{:2.6f} seconds.\n'.format(epoch, dev_f1_score, dev_intent_f1,dev_acc, dev_sent_acc, time_con))
def estimate(self, if_dev, test_batch=100):
"""
Estimate the performance of model on dev or test dataset.
"""
if if_dev:
pred_slot, real_slot, pred_intent, real_intent, _ = self.prediction(
self.__model, self.__dataset, "dev", test_batch
)
else:
pred_slot, real_slot, pred_intent, real_intent, _ = self.prediction(
self.__model, self.__dataset, "test", test_batch
)
# evaluate IOB
stats = defaultdict(Counter)
for pred, real in zip(pred_slot, real_slot):
evaluate_iob(pred, real, self.slot_dic, stats)
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
#slot_f1_socre = f1_score(bi_pred_slot, bi_real_slot, average='micro')
intent_f1_socre = f1_score(pred_intent, real_intent, average='micro')
intent_acc = Evaluator.accuracy(pred_intent, real_intent)
sent_acc = Evaluator.semantic_acc(pred_slot, real_slot, pred_intent, real_intent)
return f1_slot,intent_f1_socre, intent_acc, sent_acc #######
@staticmethod
def validate(model_path, dataset_path, batch_size):
"""
validation will write mistaken samples to files and make scores.
"""
model = torch.load(model_path)
dataset = torch.load(dataset_path)
# Get the sentence list in test dataset.
sent_list = dataset.test_sentence
pred_slot, real_slot, exp_pred_intent, real_intent, pred_intent = Processor.prediction(
model, dataset, "test", batch_size
)
# To make sure the directory for save error prediction.
mistake_dir = os.path.join(dataset.save_dir, "error")
if not os.path.exists(mistake_dir):
os.mkdir(mistake_dir)
slot_file_path = os.path.join(mistake_dir, "slot.txt")
intent_file_path = os.path.join(mistake_dir, "intent.txt")
both_file_path = os.path.join(mistake_dir, "both.txt")
# Write those sample with mistaken slot prediction.
with open(slot_file_path, 'w') as fw:
for w_list, r_slot_list, p_slot_list in zip(sent_list, real_slot, pred_slot):
if r_slot_list != p_slot_list:
for w, r, p in zip(w_list, r_slot_list, p_slot_list):
fw.write(w + '\t' + r + '\t' + p + '\n')
fw.write('\n')
# Write those sample with mistaken intent prediction.
with open(intent_file_path, 'w') as fw:
for w_list, p_intent_list, r_intent, p_intent in zip(sent_list, pred_intent, real_intent, exp_pred_intent):
if p_intent != r_intent:
for w, p in zip(w_list, p_intent_list):
fw.write(w + '\t' + p + '\n')
fw.write(r_intent + '\t' + p_intent + '\n\n')
# Write those sample both have intent and slot errors.
with open(both_file_path, 'w') as fw:
for w_list, r_slot_list, p_slot_list, p_intent_list, r_intent, p_intent in \
zip(sent_list, real_slot, pred_slot, pred_intent, real_intent, exp_pred_intent):
if r_slot_list != p_slot_list or r_intent != p_intent:
for w, r_slot, p_slot, p_intent_ in zip(w_list, r_slot_list, p_slot_list, p_intent_list):
fw.write(w + '\t' + r_slot + '\t' + p_slot + '\t' + p_intent_ + '\n')
fw.write(r_intent + '\t' + p_intent + '\n\n')
# evaluate IOB
with open("data_with_slots/e2e/slot2id.pkl", 'rb') as f:
slot_dic = pickle.load(f)
stats = defaultdict(Counter)
for pred, real in zip(pred_slot, real_slot):
evaluate_iob(pred, real, slot_dic, stats)
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
slot_f1 = miulab.computeF1Score(pred_slot, real_slot)[0]
intent_f1 = f1_score(pred_intent, real_intent, average='micro')
intent_acc = Evaluator.accuracy(exp_pred_intent, real_intent)
sent_acc = Evaluator.semantic_acc(pred_slot, real_slot, exp_pred_intent, real_intent)
return slot_f1, intent_f1, intent_acc, sent_acc
@staticmethod
def prediction(model, dataset, mode, batch_size):
model.eval()
if mode == "dev":
dataloader = dataset.batch_delivery('dev', batch_size=batch_size, shuffle=False, is_digital=False)
elif mode == "test":
dataloader = dataset.batch_delivery('test', batch_size=batch_size, shuffle=False, is_digital=False)
else:
raise Exception("Argument error! mode belongs to {\"dev\", \"test\"}.")
pred_slot, real_slot = [], []
pred_intent, real_intent = [], []
for text_batch, slot_batch, intent_batch in tqdm(dataloader, ncols=50):
padded_text, [sorted_slot, sorted_intent], seq_lens, sorted_index = dataset.add_padding(
text_batch, [(slot_batch, False), (intent_batch, False)], digital=False
)
# Because it's a visualization bug, in valid time, it doesn't matter
# Only in test time will it need to restore
if mode == 'test':
tmp_r_slot = [[] for _ in range(len(sorted_index))]
for i in range(len(sorted_index)):
tmp_r_slot[sorted_index[i]] = sorted_slot[i]
sorted_slot = tmp_r_slot
tmp_intent = [[] for _ in range(len(sorted_index))]
for i in range(len(sorted_index)):
tmp_intent[sorted_index[i]] = sorted_intent[i]
sorted_intent = tmp_intent
real_slot.extend(sorted_slot)
real_intent.extend(list(Evaluator.expand_list(sorted_intent)))
digit_text = dataset.word_alphabet.get_index(padded_text)
var_text = Variable(torch.LongTensor(digit_text))
if torch.cuda.is_available():
var_text = var_text.cuda()
slot_idx, intent_idx = model(var_text, seq_lens, n_predicts=1)
nested_slot = Evaluator.nested_list([list(Evaluator.expand_list(slot_idx))], seq_lens)[0]
if mode == 'test':
tmp_r_slot = [[] for _ in range(len(sorted_index))]
for i in range(len(sorted_index)):
tmp_r_slot[sorted_index[i]] = nested_slot[i]
nested_slot = tmp_r_slot
pred_slot.extend(dataset.slot_alphabet.get_instance(nested_slot))
nested_intent = Evaluator.nested_list([list(Evaluator.expand_list(intent_idx))], seq_lens)[0]
if mode == 'test':
tmp_intent = [[] for _ in range(len(sorted_index))]
for i in range(len(sorted_index)):
tmp_intent[sorted_index[i]] = nested_intent[i]
nested_intent = tmp_intent
pred_intent.extend(dataset.intent_alphabet.get_instance(nested_intent))
exp_pred_intent = Evaluator.max_freq_predict(pred_intent)
return pred_slot, real_slot, exp_pred_intent, real_intent, pred_intent
class Evaluator(object):
@staticmethod
def semantic_acc(pred_slot, real_slot, pred_intent, real_intent):
"""
Compute the accuracy based on the whole predictions of
given sentence, including slot and intent.
"""
total_count, correct_count = 0.0, 0.0
for p_slot, r_slot, p_intent, r_intent in zip(pred_slot, real_slot, pred_intent, real_intent):
if p_slot == r_slot and p_intent == r_intent:
correct_count += 1.0
total_count += 1.0
return 1.0 * correct_count / total_count
@staticmethod
def accuracy(pred_list, real_list):
"""
Get accuracy measured by predictions and ground-trues.
"""
pred_array = np.array(list(Evaluator.expand_list(pred_list)))
real_array = np.array(list(Evaluator.expand_list(real_list)))
return (pred_array == real_array).sum() * 1.0 / len(pred_array)
@staticmethod
def f1_score(pred_list, real_list):
"""
Get F1 score measured by predictions and ground-trues.
"""
tp, fp, fn = 0.0, 0.0, 0.0
for i in range(len(pred_list)):
seg = set()
result = [elem.strip() for elem in pred_list[i]]
target = [elem.strip() for elem in real_list[i]]
j = 0
while j < len(target):
cur = target[j]
if cur[0] == 'B':
k = j + 1
while k < len(target):
str_ = target[k]
if not (str_[0] == 'I' and cur[1:] == str_[1:]):
break
k = k + 1
seg.add((cur, j, k - 1))
j = k - 1
j = j + 1
tp_ = 0
j = 0
while j < len(result):
cur = result[j]
if cur[0] == 'B':
k = j + 1
while k < len(result):
str_ = result[k]
if not (str_[0] == 'I' and cur[1:] == str_[1:]):
break
k = k + 1
if (cur, j, k - 1) in seg:
tp_ += 1
else:
fp += 1
j = k - 1
j = j + 1
fn += len(seg) - tp_
tp += tp_
p = tp / (tp + fp) if tp + fp != 0 else 0
r = tp / (tp + fn) if tp + fn != 0 else 0
return 2 * p * r / (p + r) if p + r != 0 else 0
"""
Max frequency prediction.
"""
@staticmethod
def max_freq_predict(sample):
predict = []
for items in sample:
predict.append(Counter(items).most_common(1)[0][0])
return predict
@staticmethod
def exp_decay_predict(sample, decay_rate=0.8):
predict = []
for items in sample:
item_dict = {}
curr_weight = 1.0
for item in items[::-1]:
item_dict[item] = item_dict.get(item, 0) + curr_weight
curr_weight *= decay_rate
predict.append(sorted(item_dict.items(), key=lambda x_: x_[1])[-1][0])
return predict
@staticmethod
def expand_list(nested_list):
for item in nested_list:
if isinstance(item, (list, tuple)):
for sub_item in Evaluator.expand_list(item):
yield sub_item
else:
yield item
@staticmethod
def nested_list(items, seq_lens):
num_items = len(items)
trans_items = [[] for _ in range(0, num_items)]
count = 0
for jdx in range(0, len(seq_lens)):
for idx in range(0, num_items):
trans_items[idx].append(items[idx][count:count + seq_lens[jdx]])
count += seq_lens[jdx]
return trans_items
| 18,042 | 39.364653 | 123 | py |
CaBERT-SLU | CaBERT-SLU-main/baseline_stackprop/utils/loader.py | """
@Author : Lee, Qin
@StartTime : 2018/08/13
@Filename : loader.py
@Software : Pycharm
@Framework : Pytorch
@LastModify : 2019/05/07
"""
import os
import numpy as np
from copy import deepcopy
from collections import Counter
from collections import OrderedDict
from ordered_set import OrderedSet
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class Alphabet(object):
"""
Storage and serialization a set of elements.
"""
def __init__(self, name, if_use_pad, if_use_unk):
self.__name = name
self.__if_use_pad = if_use_pad
self.__if_use_unk = if_use_unk
self.__index2instance = OrderedSet()
self.__instance2index = OrderedDict()
# Counter Object record the frequency
# of element occurs in raw text.
self.__counter = Counter()
if if_use_pad:
self.__sign_pad = "<PAD>"
self.add_instance(self.__sign_pad)
if if_use_unk:
self.__sign_unk = "<UNK>"
self.add_instance(self.__sign_unk)
@property
def name(self):
return self.__name
def add_instance(self, instance):
""" Add instances to alphabet.
1, We support any iterative data structure which
contains elements of str type.
2, We will count added instances that will influence
the serialization of unknown instance.
:param instance: is given instance or a list of it.
"""
if isinstance(instance, (list, tuple)):
for element in instance:
self.add_instance(element)
return
# We only support elements of str type.
assert isinstance(instance, str)
# count the frequency of instances.
self.__counter[instance] += 1
if instance not in self.__index2instance:
self.__instance2index[instance] = len(self.__index2instance)
self.__index2instance.append(instance)
def get_index(self, instance):
""" Serialize given instance and return.
For unknown words, the return index of alphabet
depends on variable self.__use_unk:
1, If True, then return the index of "<UNK>";
2, If False, then return the index of the
element that hold max frequency in training data.
:param instance: is given instance or a list of it.
:return: is the serialization of query instance.
"""
if isinstance(instance, (list, tuple)):
return [self.get_index(elem) for elem in instance]
assert isinstance(instance, str)
try:
return self.__instance2index[instance]
except KeyError:
if self.__if_use_unk:
return self.__instance2index[self.__sign_unk]
else:
max_freq_item = self.__counter.most_common(1)[0][0]
return self.__instance2index[max_freq_item]
def get_instance(self, index):
""" Get corresponding instance of query index.
if index is invalid, then throws exception.
:param index: is query index, possibly iterable.
:return: is corresponding instance.
"""
if isinstance(index, list):
return [self.get_instance(elem) for elem in index]
return self.__index2instance[index]
def save_content(self, dir_path):
""" Save the content of alphabet to files.
There are two kinds of saved files:
1, The first is a list file, elements are
sorted by the frequency of occurrence.
2, The second is a dictionary file, elements
are sorted by it serialized index.
:param dir_path: is the directory path to save object.
"""
# Check if dir_path exists.
if not os.path.exists(dir_path):
os.mkdir(dir_path)
list_path = os.path.join(dir_path, self.__name + "_list.txt")
with open(list_path, 'w') as fw:
for element, frequency in self.__counter.most_common():
fw.write(element + '\t' + str(frequency) + '\n')
dict_path = os.path.join(dir_path, self.__name + "_dict.txt")
with open(dict_path, 'w') as fw:
for index, element in enumerate(self.__index2instance):
fw.write(element + '\t' + str(index) + '\n')
def __len__(self):
return len(self.__index2instance)
def __str__(self):
return 'Alphabet {} contains about {} words: \n\t{}'.format(self.name, len(self), self.__index2instance)
class TorchDataset(Dataset):
"""
Helper class implementing torch.utils.data.Dataset to
instantiate DataLoader which deliveries data batch.
"""
def __init__(self, text, slot, intent):
self.__text = text
self.__slot = slot
self.__intent = intent
def __getitem__(self, index):
return self.__text[index], self.__slot[index], self.__intent[index]
def __len__(self):
# Pre-check to avoid bug.
assert len(self.__text) == len(self.__slot)
assert len(self.__text) == len(self.__intent)
return len(self.__text)
class DatasetManager(object):
def __init__(self, args):
# Instantiate alphabet objects.
self.__word_alphabet = Alphabet('word', if_use_pad=True, if_use_unk=True)
self.__slot_alphabet = Alphabet('slot', if_use_pad=False, if_use_unk=False)
self.__intent_alphabet = Alphabet('intent', if_use_pad=False, if_use_unk=False)
# Record the raw text of dataset.
self.__text_word_data = {}
self.__text_slot_data = {}
self.__text_intent_data = {}
# Record the serialization of dataset.
self.__digit_word_data = {}
self.__digit_slot_data = {}
self.__digit_intent_data = {}
self.__args = args
@property
def test_sentence(self):
return deepcopy(self.__text_word_data['test'])
@property
def word_alphabet(self):
return deepcopy(self.__word_alphabet)
@property
def slot_alphabet(self):
return deepcopy(self.__slot_alphabet)
@property
def intent_alphabet(self):
return deepcopy(self.__intent_alphabet)
@property
def num_epoch(self):
return self.__args.num_epoch
@property
def batch_size(self):
return self.__args.batch_size
@property
def learning_rate(self):
return self.__args.learning_rate
@property
def l2_penalty(self):
return self.__args.l2_penalty
@property
def save_dir(self):
return self.__args.save_dir
@property
def intent_forcing_rate(self):
return self.__args.intent_forcing_rate
@property
def slot_forcing_rate(self):
return self.__args.slot_forcing_rate
def show_summary(self):
"""
:return: show summary of dataset, training parameters.
"""
print("Training parameters are listed as follows:\n")
print('\tnumber of train sample: {};'.format(len(self.__text_word_data['train'])))
print('\tnumber of dev sample: {};'.format(len(self.__text_word_data['dev'])))
print('\tnumber of test sample: {};'.format(len(self.__text_word_data['test'])))
print('\tnumber of epoch: {};'.format(self.num_epoch))
print('\tbatch size: {};'.format(self.batch_size))
print('\tlearning rate: {};'.format(self.learning_rate))
print('\trandom seed: {};'.format(self.__args.random_state))
print('\trate of l2 penalty: {};'.format(self.l2_penalty))
print('\trate of dropout in network: {};'.format(self.__args.dropout_rate))
print('\tteacher forcing rate(slot) {};'.format(self.slot_forcing_rate))
print('\tteacher forcing rate(intent): {};'.format(self.intent_forcing_rate))
print("\nEnd of parameters show. Save dir: {}.\n\n".format(self.save_dir))
def quick_build(self):
"""
Convenient function to instantiate a dataset object.
"""
train_path = os.path.join(self.__args.data_dir, 'train.txt')
dev_path = os.path.join(self.__args.data_dir, 'dev.txt')
test_path = os.path.join(self.__args.data_dir, 'test.txt')
self.add_file(train_path, 'train', if_train_file=True)
self.add_file(dev_path, 'dev', if_train_file=False)
self.add_file(test_path, 'test', if_train_file=False)
# Check if save path exists.
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
alphabet_dir = os.path.join(self.__args.save_dir, "alphabet")
self.__word_alphabet.save_content(alphabet_dir)
self.__slot_alphabet.save_content(alphabet_dir)
self.__intent_alphabet.save_content(alphabet_dir)
def get_dataset(self, data_name, is_digital):
""" Get dataset of given unique name.
:param data_name: is name of stored dataset.
:param is_digital: make sure if want serialized data.
:return: the required dataset.
"""
if is_digital:
return self.__digit_word_data[data_name], \
self.__digit_slot_data[data_name], \
self.__digit_intent_data[data_name]
else:
return self.__text_word_data[data_name], \
self.__text_slot_data[data_name], \
self.__text_intent_data[data_name]
def add_file(self, file_path, data_name, if_train_file):
text, slot, intent = self.__read_file(file_path)
if if_train_file:
self.__word_alphabet.add_instance(text)
self.__slot_alphabet.add_instance(slot)
self.__intent_alphabet.add_instance(intent)
# Record the raw text of dataset.
self.__text_word_data[data_name] = text
self.__text_slot_data[data_name] = slot
self.__text_intent_data[data_name] = intent
# Serialize raw text and stored it.
self.__digit_word_data[data_name] = self.__word_alphabet.get_index(text)
if if_train_file:
self.__digit_slot_data[data_name] = self.__slot_alphabet.get_index(slot)
self.__digit_intent_data[data_name] = self.__intent_alphabet.get_index(intent)
@staticmethod
def __read_file(file_path):
""" Read data file of given path.
:param file_path: path of data file.
:return: list of sentence, list of slot and list of intent.
"""
texts, slots, intents = [], [], []
text, slot = [], []
with open(file_path, 'r') as fr:
for line in fr.readlines():
items = line.strip().split()
if len(items) == 1:
texts.append(text)
slots.append(slot)
intents.append(items)
# clear buffer lists.
text, slot = [], []
elif len(items) == 2:
text.append(items[0].strip())
slot.append(items[1].strip())
return texts, slots, intents
def batch_delivery(self, data_name, batch_size=None, is_digital=True, shuffle=True):
if batch_size is None:
batch_size = self.batch_size
if is_digital:
text = self.__digit_word_data[data_name]
slot = self.__digit_slot_data[data_name]
intent = self.__digit_intent_data[data_name]
else:
text = self.__text_word_data[data_name]
slot = self.__text_slot_data[data_name]
intent = self.__text_intent_data[data_name]
dataset = TorchDataset(text, slot, intent)
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=self.__collate_fn)
@staticmethod
def add_padding(texts, items=None, digital=True):
len_list = [len(text) for text in texts]
max_len = max(len_list)
# Get sorted index of len_list.
sorted_index = np.argsort(len_list)[::-1]
trans_texts, seq_lens, trans_items = [], [], None
if items is not None:
trans_items = [[] for _ in range(0, len(items))]
for index in sorted_index:
seq_lens.append(deepcopy(len_list[index]))
trans_texts.append(deepcopy(texts[index]))
if digital:
trans_texts[-1].extend([0] * (max_len - len_list[index]))
else:
trans_texts[-1].extend(['<PAD>'] * (max_len - len_list[index]))
# This required specific if padding after sorting.
if items is not None:
for item, (o_item, required) in zip(trans_items, items):
item.append(deepcopy(o_item[index]))
if required:
if digital:
item[-1].extend([0] * (max_len - len_list[index]))
else:
item[-1].extend(['<PAD>'] * (max_len - len_list[index]))
if items is not None:
return trans_texts, trans_items, seq_lens, sorted_index
else:
return trans_texts, seq_lens, sorted_index
@staticmethod
def __collate_fn(batch):
"""
helper function to instantiate a DataLoader Object.
"""
n_entity = len(batch[0])
modified_batch = [[] for _ in range(0, n_entity)]
for idx in range(0, len(batch)):
for jdx in range(0, n_entity):
modified_batch[jdx].append(batch[idx][jdx])
return modified_batch
| 13,758 | 32.31477 | 112 | py |
CaBERT-SLU | CaBERT-SLU-main/data/dialogue_data.py | import torch as t
from torch.autograd import Variable
import numpy as np
import pandas as pd
import re
import pickle
import h5py
import json
import os
import csv
import spacy
from nltk.tokenize import word_tokenize
from transformers import BertTokenizer, BertModel, BertForMaskedLM
import time
class Data:
def __init__(self, data_path, rawdata_path, intent2id_path):
self.data_path = data_path
self.rawdata_path = rawdata_path
self.intent2id_path = intent2id_path
self.REPLACE_BY_SPACE_RE = re.compile(r'[/(){}\[\]\|@,;]')
self.BAD_SYMBOLS_RE = re.compile(r'[^0-9a-z #+_]')
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
#==================================================#
# Text Prepare #
#==================================================#
#pure virtual function
def prepare_text(self):
raise NotImplementedError("Please define virtual function!!")
# prepare text
def text_prepare(self, text, mode):
"""
text: a string
return: modified string
"""
text = text.lower() # lowercase text
text = re.sub(self.REPLACE_BY_SPACE_RE, ' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = re.sub(self.BAD_SYMBOLS_RE, '', text) # delete symbols which are in BAD_SYMBOLS_RE from text
text = re.sub(r"[ ]+", " ", text)
text = re.sub(r"\!+", "!", text)
text = re.sub(r"\,+", ",", text)
text = re.sub(r"\?+", "?", text)
if mode == "Bert":
text = "[CLS] " + text + " [SEP]"
tokenized_text = self.tokenizer.tokenize(text)
tokenized_ids = self.tokenizer.convert_tokens_to_ids(tokenized_text)
text = tokenized_ids
return text
##################################
class E2EData(Data):
def __init__(self, data_path, rawdata_path, intent2id_path, slot2id_path, done=True):
super(E2EData, self).__init__(data_path, rawdata_path, intent2id_path)
self.slot2id_path = slot2id_path
self.train_data, self.intent2id, self.slot2id = self.prepare_dialogue(done)
self.num_labels = len(self.intent2id)
def get_tags(self, slot_name, string):
tags = []
slot_words = word_tokenize(string.lower())
for i, slot in enumerate(slot_words):
if i == 0:
tags.append('B-'+slot_name)
else:
tags.append('I-'+slot_name)
if len(slot_words) > 0:
return slot_words[0], (tags, ' '.join(slot_words))
else:
return None, None
def modify_slots(self, slots):
slot_dic = {}
for slot_pair in slots:
slot_n = slot_pair[0].strip()
if slot_n != 'other' and slot_n != 'description':
if slot_pair[1].find('{') == -1:
# only one slot value
key, value = self.get_tags(slot_n, slot_pair[1])
if key:
slot_dic[key] = value
else:
# more than one slot value
strings = slot_pair[1][1:-1].split('#')
for string in strings:
key, value = self.get_tags(slot_n, string)
if key:
slot_dic[key] = value
return slot_dic
def text_prepare_tag(self, tokens, text_labels):
"""Auxiliary function for parsing tokens.
@param tokens: raw tokens
@param text_labels: raw_labels
"""
tokenized_sentence = []
labels = []
# Reparse the labels in parallel with the results after Bert tokenization
for word, label in zip(tokens, text_labels):
tokenized_word = self.tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
tokenized_sentence.extend(tokenized_word)
if label.find('B-') != -1:
labels.extend([label])
labels.extend(['I-'+label[2:]] * (n_subwords-1))
else:
labels.extend([label] * n_subwords)
tokenized_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+tokenized_sentence+['[SEP]'])
labels = ['[PAD]']+labels+['[PAD]']
return tokenized_sentence, tokenized_ids, labels
def prepare(self, data_path, intent2id, counter, slot2id, scounter):
print('Parsing file: ', data_path)
all_data = []
data = []
prev_id = '1'
with open(self.data_path+data_path, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue
infos = line.split('\t')
dialogue_id = infos[0]
message_id = infos[1]
speaker = infos[3]
text = infos[4]
intents = []
slots = []
for act in infos[5:]:
if act[:act.find('(')] != '':
intents.append(act[:act.find('(')])
s = re.findall('\((.*)\)', act)
if s:
slots.append(s[0].split(';'))
############################### single intent ###############################
# intents = "@".join(sorted(intents))
# if intents not in intent2id:
# intent2id[intents] = counter
# counter += 1
# intents = intent2id[intents]
############################### multi intents ###############################
for intent in intents:
if intent not in intent2id:
intent2id[intent] = (counter, self.text_prepare(intent, 'Bert')) # counter
counter += 1
intents = [intent2id[intent][0] for intent in intents]
intents = list(set(intents))
#################################### slots ###################################
text = word_tokenize(text.lower())
if len(slots) == 0:
final_tags = ['O']*len(text)
else:
if len(slots) == 1:
slots_split = [slot.split('=') for slot in slots[0] if len(slot.split('=')) == 2]
else:
news = []
for slot in slots:
news.extend(slot)
slots_split = [slot.split('=') for slot in news if len(slot.split('=')) == 2]
slot_dic = self.modify_slots(slots_split)
final_tags = []
cc = 0
for i, word in enumerate(text):
if i < cc:
continue
if word in slot_dic and ' '.join(text[i:i+len(slot_dic[word][0])]) == slot_dic[word][1]:
final_tags.extend(slot_dic[word][0])
cc += len(slot_dic[word][0])
else:
final_tags.append('O')
cc += 1
if data and prev_id != dialogue_id:
all_data.append(data)
data = []
prev_id = dialogue_id
utt, utt_ids, final_tags = self.text_prepare_tag(text, final_tags)
############################ slots conver to ids ###################################
for slot in final_tags:
if slot not in slot2id:
slot2id[slot] = scounter # counter
scounter += 1
slots_ids = [slot2id[slot] for slot in final_tags]
data.append((utt_ids, slots_ids, intents))
# data.append((utt, utt_ids, final_tags, slots_ids, intents))
# data.append((text, intents, slots))
return all_data, counter, scounter
def prepare_dialogue(self, done):
"""
train_data:
a list of dialogues
for each dialogue:
[(sent1, [label1, label2], [slot1, slot2]),
(sent2, [label2], [slot2]),...]
"""
if done:
with open(self.rawdata_path, "rb") as f:
train_data = pickle.load(f)
with open(self.intent2id_path, "rb") as f:
intent2id = pickle.load(f)
with open(self.slot2id_path, "rb") as f:
slot2id = pickle.load(f)
return train_data, intent2id, slot2id
ptime = time.time()
# if os.path.exists(self.intent2id_path):
# with open(self.intent2id_path, "rb") as f:
# intent2id = pickle.load(f)
# counter = len(intent2id)
# else:
intent2id = {}
counter = 0
slot2id = {}
scounter = 0
all_data = []
for data_path in os.listdir(self.data_path):
data, counter, scounter = self.prepare(data_path, intent2id, counter, slot2id, scounter)
all_data += data
with open(self.rawdata_path, "wb") as f:
pickle.dump(all_data, f)
with open(self.intent2id_path, "wb") as f:
pickle.dump(intent2id, f)
with open(self.slot2id_path, "wb") as f:
pickle.dump(slot2id, f)
print("Process time: ", time.time()-ptime)
return all_data, intent2id, slot2id
############################################################################
class SGDData(Data):
def __init__(self, data_path, rawdata_path, intent2id_path, slot2id_path, turn_path, done=True):
super(SGDData, self).__init__(data_path, rawdata_path, intent2id_path)
self.slot2id_path = slot2id_path
self.turn_path = turn_path
self.train_data, self.intent2id, self.slot2id, self.turn_data_all = self.prepare_dialogue(done)
self.num_labels = len(self.intent2id)
self.num_slot_labels = len(self.slot2id)
def build_ids(self, items, item2id, counter):
for item in items:
if item not in item2id:
item2id[item] = (counter, self.text_prepare(item, 'Bert')) # counter
counter += 1
items = [item2id[item][0] for item in items]
return items, item2id, counter
def get_tags(self, slot_name, string):
tags = []
slot_words = word_tokenize(string.lower())
for i, slot in enumerate(slot_words):
if i == 0:
tags.append('B-'+slot_name)
else:
tags.append('I-'+slot_name)
if len(slot_words) > 0:
return slot_words[0], (tags, ' '.join(slot_words))
else:
return None, None
def text_prepare_tag(self, tokens, text_labels):
"""Auxiliary function for parsing tokens.
@param tokens: raw tokens
@param text_labels: raw_labels
"""
tokenized_sentence = []
labels = []
# Reparse the labels in parallel with the results after Bert tokenization
for word, label in zip(tokens, text_labels):
tokenized_word = self.tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
tokenized_sentence.extend(tokenized_word)
if label.find('B-') != -1:
labels.extend([label])
labels.extend(['I-'+label[2:]] * (n_subwords-1))
else:
labels.extend([label] * n_subwords)
tokenized_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+tokenized_sentence+['[SEP]'])
labels = ['[PAD]']+labels+['[PAD]']
return tokenized_sentence, tokenized_ids, labels
def prepare_dialogue(self, done):
"""
train_data:
a list of dialogues (utterance-level)
for each dialogue:
[(sent1, [label1, label2], [slot1, slot2]),
(sent2, [label2], [slot2]),...]
a list of dialogues (turn-level)
for each dialogue:
[(turn1, intents1, requested_slots1, slots1, values1),...
(turn2, intents2, requested_slots2, slots2, values2),...]
"""
if done:
with open(self.rawdata_path, "rb") as f:
train_data = pickle.load(f)
with open(self.intent2id_path, "rb") as f:
intent2id = pickle.load(f)
with open(self.slot2id_path, "rb") as f:
slot2id = pickle.load(f)
with open(self.turn_path, "rb") as f:
turn_data_all = pickle.load(f)
return train_data, intent2id, slot2id, turn_data_all
ptime = time.time()
# if os.path.exists(self.intent2id_path):
# with open(self.intent2id_path, "rb") as f:
# intent2id = pickle.load(f)
# counter = len(intent2id)
# else:
intent2id = {}
counter = 0
aintent2id = {}
acounter = 0
request2id = {}
rcounter = 0
slot2id = {}
scounter = 0
all_data = []
all_data_turn = []
services = []
for file in sorted(os.listdir(self.data_path))[:-1]:
with open(os.path.join(self.data_path, file), 'r') as f:
print('Parsing file: ', file)
raw_data = json.load(f)
for dialogue in raw_data:
# if len(dialogue['services']) == 1:
# continue
# utterance data
data = []
# turn data
prev_text = 'this is a dummy sentence'
prev_data = ('', '', '')
data_turn = []
for turns in dialogue['turns']:
###################### utterance ##########################
intents = []
slots = []
for action in turns['frames'][0]['actions']:
intents.append(action['act'])
slots.append((action['slot'], action['values']))
intents = list(set(intents))
# single intent
# intents = "@".join(intents)
# if intents not in intent2id:
# intent2id[intents] = counter
# counter += 1
# intents = intent2id[intents]
###################### multi intents ######################
for intent in intents:
if intent not in intent2id:
intent2id[intent] = (counter, self.text_prepare(intent, 'Bert')) # counter
counter += 1
intents = [intent2id[intent][0] for intent in intents]
# slot values number
if 'slots' in turns['frames'][0]:
slot_nums = turns['frames'][0]['slots']
else:
slot_nums = []
###################### slots ######################
utt = turns['utterance']
utt_token = word_tokenize(utt.lower())
slot_dic = {}
if len(slot_nums) == 0:
final_tags = ['O']*len(utt_token)
else:
for slot_dic_example in slot_nums:
start = slot_dic_example['start']
end = slot_dic_example['exclusive_end']
slot_name = slot_dic_example['slot']
slot_words = utt[start:end]
key, value = self.get_tags(slot_name, slot_words)
if key:
slot_dic[key] = value
final_tags = []
rc = 0
for i, word in enumerate(utt_token):
if i < rc:
continue
if word in slot_dic and ' '.join(utt_token[i:i+len(slot_dic[word][0])]) == slot_dic[word][1]:
final_tags.extend(slot_dic[word][0])
rc += len(slot_dic[word][0])
else:
final_tags.append('O')
rc += 1
utt, utt_ids, final_tags = self.text_prepare_tag(utt_token, final_tags)
############################ slots conver to ids ###################################
for slot in final_tags:
if slot not in slot2id:
slot2id[slot] = scounter # counter
scounter += 1
slots_ids = [slot2id[slot] for slot in final_tags]
# data.append((self.text_prepare(turns['utterance'], 'Bert'), intents, slots))
data.append((utt_ids, slots_ids, intents))
# data.append((utt_token, utt_ids, slot_nums, slots_ids, intents))
###################### turn ##########################
if 'state' in turns['frames'][0]:
slot_values = turns['frames'][0]['state']['slot_values']
if not slot_values:
s_turn = []
v_turn = []
else:
s_turn, v_turn = zip(*[(k,v[0]) for k, v in slot_values.items()])
encoded = self.tokenizer.encode_plus(prev_text, text_pair=turns['utterance'], return_tensors='pt')
aintents, aintent2id, acounter = self.build_ids([turns['frames'][0]['state']['active_intent']], aintent2id, acounter)
requests, request2id, rcounter = self.build_ids(turns['frames'][0]['state']['requested_slots'], request2id, rcounter)
data_turn.append((encoded['input_ids'], aintents, requests, s_turn, v_turn, (prev_data, data[-1])))
prev_text = turns['utterance']
else:
prev_text = turns['utterance']
prev_data = data[-1]
all_data.append(data)
all_data_turn.append(data_turn)
services.append(dialogue['services'])
with open(self.rawdata_path, "wb") as f:
pickle.dump(all_data, f)
with open(self.intent2id_path, "wb") as f:
pickle.dump(intent2id, f)
with open(self.slot2id_path, "wb") as f:
pickle.dump(slot2id, f)
with open("sgd_dialogue/services.pkl", "wb") as f:
pickle.dump(services, f)
turn_data_all = {'turns': all_data_turn,
'aintent2id': aintent2id,
'request2id': request2id}
with open(self.turn_path, "wb") as f:
pickle.dump(turn_data_all, f)
print("Process time: ", time.time()-ptime)
return all_data, intent2id, slot2id, turn_data_all
if __name__ == "__main__":
if not os.path.exists('e2e_dialogue/'):
os.mkdir('e2e_dialogue/')
if not os.path.exists('sgd_dialogue/'):
os.mkdir('sgd_dialogue/')
# e2e dataset
data_path = "../raw_datasets/e2e_dialogue/"
rawdata_path = "e2e_dialogue/dialogue_data_multi_with_slots.pkl"
intent2id_path = "e2e_dialogue/intent2id_multi_with_tokens.pkl"
slot2id_path = "e2e_dialogue/slot2id.pkl"
data = E2EData(data_path, rawdata_path, intent2id_path, slot2id_path, done=False)
# print(data.intent2id)
# print(data.slot2id)
# for utt, utt_ids, slot, slot_ids, intents in data.train_data[10]:
# print(utt)
# print(utt_ids)
# print(slot)
# print(slot_ids)
# print(intents)
# print('--------------')
# for utt_ids, slot_ids, intents in data.train_data[10]:
# print(utt_ids)
# print(slot_ids)
# print(intents)
# print('--------------')
# sgd dataset
data_path = "../raw_datasets/dstc8-schema-guided-dialogue/train"
rawdata_path = "sgd_dialogue/dialogue_data_multi_with_slots.pkl"
intent2id_path = "sgd_dialogue/intent2id_multi_with_tokens.pkl"
slot2id_path = "sgd_dialogue/slot2id.pkl"
turn_path = "sgd_dialogue/turns.pkl"
data = SGDData(data_path, rawdata_path, intent2id_path, slot2id_path, turn_path, done=False)
# print(data.turn_data_all['turns'][0])
# print(data.train_data[100])
# print(data.intent2id)
# print(data.slot2id)
# for utt_token, utt_ids, slot_nums, slots_ids, intents in data.train_data[10]:
# print(utt_token)
# print(utt_ids)
# print(slot_nums)
# print(slots_ids)
# print(intents)
# print('--------------')
# for utt_ids, slot_ids, intents in data.train_data[10]:
# print(utt_ids)
# print(slot_ids)
# print(intents)
# print('--------------') | 22,069 | 38.837545 | 145 | py |
CaBERT-SLU | CaBERT-SLU-main/model/transformer_new.py | """Transformer module with masks"""
import torch
import torch.nn as nn
import numpy as np
class ScaledDotProductAttention(nn.Module):
"""Scaled dot-product attention mechanism.
"""
def __init__(self, attention_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, cross1, cross2, scale=None, attn_mask=None):
"""
Args:
q: Queries [B, L_q, D_q]
k: Keys [B, L_k, D_k]
v: Values [B, L_v, D_v]
scale: 1/sqrt(dk)
attn_mask: [B, L_q, L_k]
Returns:
context, attention
"""
attention = torch.bmm(q, k.transpose(1, 2))
if scale:
attention = attention * scale
attention = attention.masked_fill_(attn_mask, -np.inf)
attention = self.softmax(attention)
attention = self.dropout(attention)
context = torch.bmm(attention, v)
attention_score = torch.bmm(cross1, cross2.transpose(1,2))
return context, attention_score
class Transformer(nn.Module):
"""Transformer module.
"""
def __init__(self, hidden_dim, model_dim=512, num_heads=8, dropout=0.0):
super(Transformer, self).__init__()
self.dim_per_head = model_dim // num_heads
self.num_heads = num_heads
self.linear_k = nn.Linear(hidden_dim, self.dim_per_head * num_heads)
self.linear_v = nn.Linear(hidden_dim, self.dim_per_head * num_heads)
self.linear_q = nn.Linear(hidden_dim, self.dim_per_head * num_heads)
self.dot_product_attention = ScaledDotProductAttention(dropout)
self.linear_final = nn.Linear(model_dim, model_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
# cross attention mechanism
self.embed_k = nn.Linear(hidden_dim, 200)
self.embed_q = nn.Linear(hidden_dim, 200)
def forward(self, key, value, query, masks=None):
# Padding mask: Input size: (B, T)
len_q = masks.size(1)
pad_mask = masks.eq(0)
attn_mask = pad_mask.unsqueeze(1).expand(-1, len_q, -1)
attn_mask1 = masks.unsqueeze(1).expand(-1, len_q, -1)
attn_mask2 = masks.unsqueeze(2).expand(-1, -1, len_q)
attn_mask3 = (attn_mask1*attn_mask2).eq(0)
residual = query
dim_per_head = self.dim_per_head
num_heads = self.num_heads
batch_size = key.size(0)
# cross attention
cross1 = self.embed_k(key)
cross2 = self.embed_q(query)
# linear projection
key = self.linear_k(key)
value = self.linear_v(value)
query = self.linear_q(query)
# split by heads
key = key.view(batch_size * num_heads, -1, dim_per_head)
value = value.view(batch_size * num_heads, -1, dim_per_head)
query = query.view(batch_size * num_heads, -1, dim_per_head)
attn_mask = attn_mask.repeat(num_heads, 1, 1)
# scaled dot product attention
scale = (key.size(-1) // num_heads) ** -0.5
context, attention = self.dot_product_attention(
query, key, value, cross1, cross2, scale, attn_mask)
# concat heads
context = context.view(batch_size, -1, dim_per_head * num_heads)
output = torch.cat([residual, context], dim=2)
# average attention over head
# attention = attention.view(batch_size, num_heads, len_q, len_q)
# attention = torch.mean(attention, dim=1)
attention = attention.masked_fill(attn_mask3, 0.)
attention = nn.Softmax(dim=2)(attention)
#print(attn_mask3[0])
# attention = attention*attn_mask3
return output, attention | 3,804 | 32.672566 | 76 | py |
CaBERT-SLU | CaBERT-SLU-main/model/baseline_multi.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from transformers import BertTokenizer, BertModel
class MULTI(nn.Module):
def __init__(self, opt, num_labels=2, num_slot_labels=10):
super(MULTI, self).__init__()
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.embedding = nn.Embedding(len(self.tokenizer.vocab), 64)
self.rnn_sentence = nn.LSTM(input_size=64,
hidden_size=64,
bidirectional=True,
batch_first=True,
num_layers=1)
self.decoder = AttnDecoderRNN(64, opt)
self.slot_decoder = AttnDecoderRNN(64, opt)
self.classifier1 = nn.Linear(128, num_labels)
nn.init.xavier_normal_(self.classifier1.weight)
self.classifier2 = nn.Linear(128, num_labels)
nn.init.xavier_normal_(self.classifier2.weight)
self.classifier_slot = nn.Linear(128, num_slot_labels)
nn.init.xavier_normal_(self.classifier_slot.weight)
#self.dropout = nn.Dropout(0.1)
self.num_labels = num_labels
self.num_slot_labels = num_slot_labels
self.opt = opt
def forward(self, x_inputs):
# Encoder
X = self.embedding(x_inputs)
rnn_out, encoder_hidden = self.rnn_sentence(X)
#rnn_out = self.dropout(rnn_out)
logits = self.classifier1(rnn_out[:,-1,:])
encoder_logits = logits
# Decoder
decoder_hidden = encoder_hidden
decoder_outputs = torch.zeros(*rnn_out.shape, device=self.device)
for di in range(x_inputs.shape[1]):
decoder_output, decoder_hidden = self.decoder(decoder_hidden, rnn_out, di)
decoder_outputs[:,di,:] = decoder_output.squeeze(1)
#decoder_outputs = self.dropout(decoder_outputs)
decoder_logits = self.classifier2(decoder_outputs)
# Slot Decoder
decoder_hidden = encoder_hidden
slot_outputs = torch.zeros(*rnn_out.shape, device=self.device)
for di in range(x_inputs.shape[1]):
decoder_output, decoder_hidden = self.slot_decoder(decoder_hidden, rnn_out, di)
slot_outputs[:,di,:] = decoder_output.squeeze(1)
#decoder_outputs = self.dropout(decoder_outputs)
slot_logits = self.classifier_slot(slot_outputs)
slot_logits = slot_logits.view(-1, self.num_slot_labels)
return encoder_logits, decoder_logits, slot_logits
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, opt):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = 64
self.max_length = opt.maxlen
self.attn = nn.Linear(self.hidden_size * 4, 1)
self.attn_combine = nn.Linear(self.hidden_size * 4, self.hidden_size)
self.rnn_token = nn.LSTM(input_size=self.hidden_size,
hidden_size=self.hidden_size,
bidirectional=True,
batch_first=True,
num_layers=1)
self.W = nn.Parameter(torch.zeros(self.hidden_size*2,1))
self.v = nn.Parameter(torch.zeros(1))
def forward(self, hidden, encoder_outputs, di):
b, t, h = encoder_outputs.shape
# repeat decoder hidden
decoder_hidden = hidden[0].view(-1, 128) # (b,2h)
hidden_repeat = decoder_hidden.unsqueeze(1) # (b,1,2h)
hidden_repeat = hidden_repeat.repeat(1,t,1) # (b,t,2h)
# attention
attn_weights = self.attn(torch.cat((encoder_outputs, hidden_repeat), 2)) # (b,t,1)
attn_weights = F.softmax(attn_weights, dim=1) # (b,t,1)
attn_applied = torch.bmm(encoder_outputs.transpose(2,1), attn_weights).squeeze(2) # (b,2h)
# # slot-gated:
# print(attn_applied.shape)
# print(encoder_outputs[:,-1,:].shape)
# print(self.W.shape)
# print(self.v.shape)
# g = torch.sum(self.v * torch.tanh(attn_applied + encoder_outputs[:,-1,:] * self.W), dim=1) # (b,)
# g = g.expand(dim=1).repeat(1,1,2*h) # (b,1)
output = torch.cat((encoder_outputs[:,di,:], attn_applied), dim=1) # (b,4h)
# linear layer
output = self.attn_combine(output) # (b,h)
output = F.relu(output)
output = output.unsqueeze(1) # (b,1,h)
output, hidden = self.rnn_token(output, hidden)
return output, hidden
| 4,648 | 39.426087 | 107 | py |
CaBERT-SLU | CaBERT-SLU-main/model/CHAN.py | import os.path
import math
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn import CosineEmbeddingLoss
ffscores = []
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
self.scores = None
def attention(self, q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
self.scores = scores
ffscores.append(self.scores.cpu())
output = torch.matmul(scores, v)
return output
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = self.attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
def get_scores(self):
return self.scores
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class Encoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model, requires_grad=False)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class ContextAttention(nn.Module):
def __init__(self, device):
super(ContextAttention, self).__init__()
self.hidden_dim = 100
self.attn_head = 4
self.device = device
### Attention layer
self.attn = MultiHeadAttention(self.attn_head, 768, dropout=0.)
self.attn2 = MultiHeadAttention(self.attn_head, 768, dropout=0.)
self.add_pe = PositionalEncoding(768, 0.)
### Belief Tracker
self.nbt = Encoder(EncoderLayer(768,
MultiHeadAttention(self.attn_head, 768, dropout=0.),
PositionwiseFeedForward(768, self.hidden_dim, 0.),
0.1),
N=6)
def _make_aux_tensors(self, ids, len):
token_type_ids = torch.zeros(ids.size(), dtype=torch.long).to(self.device)
for i in range(len.size(0)):
for j in range(len.size(1)):
if len[i,j,0] == 0: # padding
break
elif len[i,j,1] > 0: # escape only text_a case
start = len[i,j,0]
ending = len[i,j,0] + len[i,j,1]
token_type_ids[i, j, start:ending] = 1
attention_mask = ids > 0
return token_type_ids, attention_mask
def forward(self, input_ids, result_masks):
ds = input_ids.size(0) # dialog size
ts = input_ids.size(1) # turn size
hidden = self.add_pe(input_ids)
# NBT
turn_mask = torch.Tensor(ds, ts, ts).byte().to(self.device)
for d in range(ds):
padding_utter = (result_masks[d,:].sum(-1) != 0)
turn_mask[d] = padding_utter.unsqueeze(0).repeat(ts,1) & subsequent_mask(ts).to(self.device)
hidden = self.nbt(hidden, turn_mask)
return hidden, ffscores | 6,881 | 33.238806 | 104 | py |
CaBERT-SLU | CaBERT-SLU-main/model/baseline_eca.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from transformers import BertTokenizer, BertModel
class ECA(nn.Module):
def __init__(self, opt, num_labels=2, num_slot_labels=10):
super(ECA, self).__init__()
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.embedding = nn.Embedding(len(self.tokenizer.vocab), 256)
self.utterance_encoder = nn.LSTM(input_size=256,
hidden_size=256,
bidirectional=True,
batch_first=True,
num_layers=1)
self.conversation_layer = nn.LSTM(input_size=512,
hidden_size=256,
bidirectional=True,
batch_first=True,
num_layers=1)
self.dense1 = nn.Linear(512, 256)
self.dense2 = nn.Linear(256, num_labels)
self.slot_decoder = AttnDecoderRNN(256, opt)
self.classifier_slot = nn.Linear(512, num_slot_labels)
nn.init.xavier_normal_(self.classifier_slot.weight)
#self.dropout = nn.Dropout(0.1)
self.num_labels = num_labels
self.num_slot_labels = num_slot_labels
self.dropout = nn.Dropout(0.1)
self.opt = opt
def forward(self, result_ids, result_token_masks, result_masks, lengths, result_slot_labels, labels, y_caps, y_masks):
# Utterance Encoder
b,d,t = result_ids.shape
result_ids = result_ids.view(-1, t)
X = self.embedding(result_ids)
rnn_out, encoder_hidden = self.utterance_encoder(X)
# pooling & conversation
pooled = rnn_out[:,-1,:].view(b,d,2*256)
out, hidden = self.conversation_layer(pooled)
out = self.dense1(out)
logits = self.dense2(out)
# Remove padding
logits_no_pad = []
labels_no_pad = []
for i in range(b):
logits_no_pad.append(logits[i,:lengths[i],:])
labels_no_pad.append(labels[i,:lengths[i],:])
logits = torch.cat(logits_no_pad, dim=0)
labels = torch.cat(labels_no_pad, dim=0)
# Slot Decoder
decoder_hidden = encoder_hidden
slot_outputs = torch.zeros(*rnn_out.shape, device=self.device)
for di in range(t):
decoder_output, decoder_hidden = self.slot_decoder(decoder_hidden, rnn_out, di)
slot_outputs[:,di,:] = decoder_output.squeeze(1)
#decoder_outputs = self.dropout(decoder_outputs)
slot_outputs = self.dropout(slot_outputs)
slot_logits = self.classifier_slot(slot_outputs)
slot_logits = slot_logits.view(-1, self.num_slot_labels)
return logits, labels, slot_logits
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, opt):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = 256
self.max_length = opt.maxlen
self.attn = nn.Linear(self.hidden_size * 4, 1)
self.attn_combine = nn.Linear(self.hidden_size * 4, self.hidden_size)
self.rnn_token = nn.LSTM(input_size=self.hidden_size,
hidden_size=self.hidden_size,
bidirectional=True,
batch_first=True,
num_layers=1)
self.W = nn.Parameter(torch.zeros(self.hidden_size*2,1))
self.v = nn.Parameter(torch.zeros(1))
def forward(self, hidden, encoder_outputs, di):
b, t, h = encoder_outputs.shape
# repeat decoder hidden
decoder_hidden = hidden[0].view(-1, 2*self.hidden_size) # (b,2h)
hidden_repeat = decoder_hidden.unsqueeze(1) # (b,1,2h)
hidden_repeat = hidden_repeat.repeat(1,t,1) # (b,t,2h)
# attention
attn_weights = self.attn(torch.cat((encoder_outputs, hidden_repeat), 2)) # (b,t,1)
attn_weights = F.softmax(attn_weights, dim=1) # (b,t,1)
attn_applied = torch.bmm(encoder_outputs.transpose(2,1), attn_weights).squeeze(2) # (b,2h)
output = torch.cat((encoder_outputs[:,di,:], attn_applied), dim=1) # (b,4h)
# linear layer
output = self.attn_combine(output) # (b,h)
output = F.relu(output)
output = output.unsqueeze(1) # (b,1,h)
output, hidden = self.rnn_token(output, hidden)
return output, hidden
| 4,601 | 37.033058 | 122 | py |
CaBERT-SLU | CaBERT-SLU-main/model/transformer.py | """Transformer module with masks"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncoderLayer
class TransformerModel(nn.Module):
def __init__(self, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
self.model_type = 'Transformer'
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.ninp = ninp
self.decoder = nn.Linear(ninp, 256)
self.init_weights()
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, src_mask):
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
# output = self.decoder(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x) | 1,979 | 36.358491 | 100 | py |
CaBERT-SLU | CaBERT-SLU-main/model/mia.py | import os.path
import math
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn import CosineEmbeddingLoss
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
self.scores = None
def attention(self, q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
self.scores = scores
output = torch.matmul(scores, v)
return output
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = self.attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
def get_scores(self):
return self.scores
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return self.norm(x[0] + self.dropout(sublayer(x)))
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, *args):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](args, lambda x: self.self_attn(x[0], x[1], x[1]))
return self.sublayer[1](x, self.feed_forward)
class MutualIterativeAttention(nn.Module):
def __init__(self, device):
super(MutualIterativeAttention, self).__init__()
self.hidden_dim = 100
self.attn_head = 4
self.N = 2
self.device = device
self.layer_refine = EncoderLayer(768,MultiHeadAttention(self.attn_head, 768, dropout=0.),
PositionwiseFeedForward(768, self.hidden_dim, 0.),
0.1)
def forward(self, enc_intent, enc_slot):
for i in range(self.N):
# Refining intent
enc_intent = self.layer_refine( enc_slot, enc_intent )
# Refining slot
enc_slot = self.layer_refine( enc_intent, enc_slot )
# SGIR = self.layer_norm( Refine_T + enc_slot ) # SGIR: Semantic-Grounded Image Representations
return enc_slot | 4,445 | 32.428571 | 103 | py |
CaBERT-SLU | CaBERT-SLU-main/model/torchcrf.py | """SOURCE CODE FROM PYTORCH-CRF
"""
from typing import List, Optional
import torch
import torch.nn as nn
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError(f'invalid number of tags: {num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(
self,
emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = 'sum',
) -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise ValueError(f'invalid reduction: {reduction}')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.float().sum()
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise ValueError(
f'expected last dimension of emissions is {self.num_tags}, '
f'got {emissions.size(2)}')
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
'the first two dimensions of emissions and tags must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def _compute_score(
self, emissions: torch.Tensor, tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(
self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list | 14,331 | 43.098462 | 95 | py |
CaBERT-SLU | CaBERT-SLU-main/model/bert_model_context.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from transformers import BertTokenizer, BertModel
from model.transformer import TransformerModel
from model.transformer_new import Transformer
from model.CHAN import ContextAttention
from model.torchcrf import CRF
from model.mia import MutualIterativeAttention
class BertContextNLU(nn.Module):
def __init__(self, config, opt, num_labels=2, num_slot_labels=144):
super(BertContextNLU, self).__init__()
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.num_labels = num_labels
self.num_slot_labels = num_slot_labels
self.bert = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True, output_attentions=True)
self.dropout = nn.Dropout(0.1)
self.hidden_size = config.hidden_size
self.rnn_hidden = opt.rnn_hidden
#########################################
# transformer
self.transformer_model = TransformerModel(ninp=self.hidden_size, nhead=4, nhid=64, nlayers=2, dropout=0.1)
self.transformer_encoder = Transformer(hidden_dim=self.hidden_size,
model_dim=256,
num_heads=2,
dropout=0.1)
# DiSAN
self.conv1 = nn.Conv1d(self.hidden_size, self.hidden_size, 3, padding=1)
self.conv2 = nn.Conv1d(self.hidden_size, self.hidden_size, 3, padding=1)
self.fc1 = nn.Linear(2*self.hidden_size, self.rnn_hidden)
# CHAN
self.context_encoder = ContextAttention(self.device)
# rnn
self.rnn = nn.LSTM(input_size=self.hidden_size,
hidden_size=self.rnn_hidden,
batch_first=True,
num_layers=1)
# classifier
self.classifier_rnn = nn.Linear(self.rnn_hidden, num_labels)
nn.init.xavier_normal_(self.classifier_rnn.weight)
self.classifier_bert = nn.Linear(self.hidden_size, num_labels)
nn.init.xavier_normal_(self.classifier_bert.weight)
self.classifier_transformer = nn.Linear(self.rnn_hidden*4, num_labels)
nn.init.xavier_normal_(self.classifier_transformer.weight)
# label embedding
self.clusters = nn.Parameter(torch.randn(num_labels, config.hidden_size).float(), requires_grad=True)
self.mapping = nn.Linear(config.hidden_size, self.rnn_hidden)
# slot prediction
self.slot_rnn = nn.LSTM(input_size=self.hidden_size+self.rnn_hidden,
hidden_size=self.rnn_hidden,
batch_first=True,
bidirectional=True,
num_layers=1)
self.slot_classifier = nn.Linear(2*self.rnn_hidden, num_slot_labels)
self.crf = CRF(self.num_slot_labels)
# mutual iterative attention
self.mia_encoder = MutualIterativeAttention(self.device)
# self attentive
self.linear1 = nn.Linear(config.hidden_size, 256)
self.linear2 = nn.Linear(4*256, config.hidden_size)
self.tanh = nn.Tanh()
self.context_vector = nn.Parameter(torch.randn(256, 4), requires_grad=True)
def self_attentive(self, last_hidden_states, d, b):
# input should be (b,d,h)
vectors = self.context_vector.unsqueeze(0).repeat(b*d, 1, 1)
h = self.linear1(last_hidden_states) # (b*d, t, h)
scores = torch.bmm(h, vectors) # (b*d, t, 4)
scores = nn.Softmax(dim=1)(scores) # (b*d, t, 4)
outputs = torch.bmm(scores.permute(0, 2, 1), h).view(b*d, -1) # (b*d, 4h)
pooled_output = self.linear2(outputs) # (b*d, h)
pooled_output = pooled_output.view(b,d,self.hidden_size) # (b,d,h)
return pooled_output
def mha(self, pooled_output, d, b):
# input should be (d,b,h)
pooled_output = pooled_output.view(d,b,self.hidden_size)
# src_mask = self.transformer_model.generate_square_subsequent_mask(d).to(self.device)
pooled_output = self.transformer_model(pooled_output, src_mask=None)
pooled_output = pooled_output.view(b,d,self.hidden_size)
return pooled_output
def label_embed(self, y_caps, y_masks, rnn_out, d, b):
last_hidden, clusters, hidden, att = self.bert(y_caps, attention_mask=y_masks)
# clusters = self.mapping(clusters) # (n, 256)
gram = torch.mm(clusters, clusters.permute(1,0)) # (n, n)
rnn_out = rnn_out.reshape(b*d, self.hidden_size) # (b*d, 768)
weights = torch.mm(rnn_out, clusters.permute(1,0)) # (b*d, n)
logits = torch.mm(weights, torch.inverse(gram))
logits = logits.view(b,d,self.num_labels)
return logits
def DiSAN(self, pooled_output, d, b):
# input should be (b,h,d)
pooled_score = pooled_output.view(b,self.hidden_size,d)
pooled_score = torch.sigmoid(self.conv1(pooled_score))
pooled_score = self.conv2(pooled_score)
pooled_score = F.softmax(pooled_score, dim=-1)
pooled_score = pooled_score.view(b,d,self.hidden_size)
pooled_output = pooled_score * pooled_output
return pooled_output
def forward(self, result_ids, result_token_masks, result_masks, lengths, result_slot_labels, labels, y_caps, y_masks):
"""
Inputs:
result_ids: (b, d, t)
result_token_masks: (b, d, t)
result_masks: (b, d)
lengths: (b)
result_slot_labels: (b, d, t)
labels: (b, d, l)
BERT outputs:
last_hidden_states: (bxd, t, h)
pooled_output: (bxd, h), from output of a linear classifier + tanh
hidden_states: 13 x (bxd, t, h), embed to last layer embedding
attentions: 12 x (bxd, num_heads, t, t)
"""
# BERT encoding
b,d,t = result_ids.shape
result_ids = result_ids.view(-1, t)
result_token_masks = result_token_masks.view(-1, t)
last_hidden_states, pooled_output, hidden_states, attentions = self.bert(result_ids, attention_mask=result_token_masks)
pooled_output = pooled_output.view(b,d,self.hidden_size)
## Token: Self-attentive
pooled_output = self.self_attentive(last_hidden_states, d, b) # (b,d,l)
# logits = self.classifier_bert(pooled_output)
## Turn: MHA
# pooled_output = self.mha(pooled_output, d, b) # (b,d,l)
## Turn: DiSAN
# context_vector = self.DiSAN(pooled_output, d, b)
# final_hidden = torch.cat([pooled_output, context_vector], dim=-1)
# final_hidden = self.fc1(final_hidden)
# logits = self.classifier_rnn(final_hidden)
## Turn: CHAN
pooled_output, ffscores = self.context_encoder(pooled_output, result_masks)
# logits = self.classifier_bert(pooled_output) # (b,d,l)
## Turn: transformer
# transformer_out, attention = self.transformer_encoder(pooled_output, pooled_output, pooled_output, result_masks)
# transformer_out = self.dropout(transformer_out)
# logits = self.classifier_transformer(transformer_out) # (b,d,l)
## Prediction: RNN
rnn_out, _ = self.rnn(pooled_output)
rnn_out = self.dropout(rnn_out)
logits = self.classifier_rnn(rnn_out) # (b,d,l)
## Prediction: Label Embedding
# logits = self.label_embed(y_caps, y_masks, pooled_output, d, b)
# Remove padding
logits_no_pad = []
labels_no_pad = []
for i in range(b):
logits_no_pad.append(logits[i,:lengths[i],:])
labels_no_pad.append(labels[i,:lengths[i],:])
logits = torch.cat(logits_no_pad, dim=0)
labels = torch.cat(labels_no_pad, dim=0)
#######
# slot prediction
slot_vectors = last_hidden_states # (b*d,t,h)
intent_context = rnn_out.unsqueeze(2).repeat(1,1,t,1).reshape(-1,t,self.rnn_hidden) # (b*d,t,hr)
# comia
# intent_context = pooled_output.unsqueeze(2)
# slot_refined = self.mia_encoder(intent_context, slot_vectors)
slot_inputs = torch.cat([slot_vectors, intent_context], dim=-1) # (b*d,t,h+hr)
slot_rnn_out, _ = self.slot_rnn(slot_inputs)
slot_rnn_out = self.dropout(slot_rnn_out)
slot_out = self.slot_classifier(slot_rnn_out)
slot_out = slot_out.view(-1, self.num_slot_labels) # (b*d*t, num_slots)
# slot_loss = -self.crf(slot_out, result_slot_labels)
return logits, labels, slot_out#, ffscores
| 8,750 | 41.072115 | 127 | py |
LayerAct | LayerAct-main/ResNet.py | from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import numpy as np
import random
import os
import torch
import torch.nn as nn
from torch import Tensor
def random_seed_set(rs) :
torch.manual_seed(rs)
torch.cuda.manual_seed(rs)
torch.cuda.manual_seed_all(rs)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
np.random.seed(rs)
random.seed(rs)
os.environ["PYTHONHASHSEED"] = str(rs)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
activation,
activation_params,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.act1 = activation(**activation_params)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.act2 = activation(**activation_params)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.act2(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition" https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
activation,
activation_params,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.act1 = activation(**activation_params)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.act2 = activation(**activation_params)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.act3 = activation(**activation_params)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.act3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
activation,
activation_params,
rs,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
random_seed_set(rs)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.act1 = activation(**activation_params)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(activation, activation_params, block, 64, layers[0])
self.layer2 = self._make_layer(activation, activation_params, block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(activation, activation_params, block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(activation, activation_params, block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
activation,
activation_params,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
activation, activation_params, self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
activation,
activation_params,
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
activation,
activation_params,
rs,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
**kwags: Any,
) -> ResNet:
model = ResNet(activation, activation_params, rs, block, layers, **kwags)
return model
def resnet18(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
def resnet32(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, BasicBlock, [3, 4, 6, 3], num_classes=num_classes)
def resnet50(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
def resnet101(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet32' : return resnet32
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
| 11,184 | 33.953125 | 149 | py |
LayerAct | LayerAct-main/test.py | import argparse
import os
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from collections import OrderedDict as OD
from LayerAct import LA_HardSiLU, LA_SiLU
import data_augmentation
from train_validate import validate, validate_10crop
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
elif name == 'resnet20' : return resnet20
elif name == 'resnet32' : return resnet32
elif name == 'resnet44' : return resnet44
def activation_set(name) :
if name == 'relu' : return nn.ReLU
elif name == 'leakyrelu' : return nn.LeakyReLU
elif name == 'prelu' : return nn.PReLU
elif name == 'mish' : return nn.Mish
elif name == 'silu' : return nn.SiLU
elif name == 'hardsilu' : return nn.Hardswish
elif name == 'la_silu' : return LA_SiLU
elif name == 'la_hardsilu' : return LA_HardSiLU
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
def folder_check(path, data_name, model_name) :
path_f = path + data_name + '/'
path_m = path_f + model_name + '/'
if data_name not in os.listdir(path) :
os.makedirs(path_f)
if model_name not in os.listdir(path_f) :
os.makedirs(path_m)
return path_m
random_seed = [11*i for i in range(1, 21)]
#######################################################################################################
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', '-d', default='CIFAR10')
parser.add_argument('--model', '-m', default='resnet20')
parser.add_argument('--activations', '-a', default='relu,leakyrelu,prelu,mish,silu,hardsilu,la_silu,la_hardsilu')
parser.add_argument('--noise', '-n', default='None')
parser.add_argument('--noise_param1', '-np1', default='')
parser.add_argument('--noise_param2', '-np2', default='')
parser.add_argument('--device', default=0, type=int)
parser.add_argument('--crop', default='center')
parser.add_argument('--start_trial', default=1, type=int)
parser.add_argument('--end_trial', default=5, type=int)
parser.add_argument('--alpha', default=1e-5)
parser.add_argument('--batch_size', '-bs', default=128)
parser.add_argument('--num_workers', '-nw', default=16)
parser.add_argument('--data_path', '-dp', default='')
parser.add_argument('--model_path', default='trained_models/')
parser.add_argument('--save_path', default='result/')
parser.add_argument('--resume', default=True, type=bool)
parser.add_argument('--duplicate', default=True, type=bool)
parser.add_argument('--save', default=True, type=bool)
args = parser.parse_args()
activation_list = [a for a in args.activations.split(',')]
device = torch.device('cuda:{}'.format(args.device))
model_path = folder_check(args.model_path, args.data, args.model)
save_path = folder_check(args.save_path, args.data, args.model)
if args.noise == 'gaussian' :
param1, param2 = float(args.noise_param1), float(args.noise_param2)
elif args.noise == 'blur' :
param1 = (int(args.noise_param1.split(',')[0]), int(args.noise_param1.split(',')[1]))
param2 = (int(args.noise_param2.split(',')[0]), int(args.noise_param2.split(',')[1]))
else :
param1, param2 = 0, 0
for activation_name in activation_list :
activation = activation_set(activation_name)
activation_params = {'alpha' : args.alpha} if 'la_' in activation_name else {} # parameter alpha of LayerAct functions for stable training
for trial in range(args.start_trial, args.end_trial+1) :
rs = random_seed[trial-1]
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
cudnn.deterministic = True
cudnn.benchmark = False
file_name = '{}_{}'.format(activation_name, trial)
if args.data == 'CIFAR10' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR10(
args.data_path, args.noise, param1, param2, args.batch_size, args.num_workers, rs
)
in_channel, H, W, out_num = 3, 32, 32, 10
elif args.data == 'CIFAR100' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(
args.data_path, args.noise, param1, param2, args.batch_size, args.num_workers, rs
)
in_channel, H, W, out_num = 3, 32, 32, 100
elif args.data == 'ImageNet' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(
args.data_path, args.noise, param1, param2, args.batch_size, args.num_workers, rs, args.crop
)
in_channel, H, W, out_num = 3, 224, 224, 1000
else :
raise Exception('Dataset should be "CIFAR10", "CIFAR100", and "ImageNet"')
model = model_loader(args.model, activation, activation_params, rs, out_num)
model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
trained = torch.load(model_path + file_name + '.pth.tar', map_location=device)
try :
model.load_state_dict(trained)
except :
trained_ = OD([(k.split('module.')[-1], trained[k]) for k in trained.keys()])
model.load_state_dict(trained_)
if args.crop == '10crop' :
test_loss, test_acc1, test_acc5 = validate_10crop(test_loader, model, criterion, device)
else :
test_loss, test_acc1, test_acc5 = validate(test_loader, model, criterion, device)
print("{} | {} | {} | Test | acc1 {} | acc5 {}".format(args.model, trial, activation_name, test_acc1, test_acc5), end = '\n') | 6,329 | 42.356164 | 149 | py |
LayerAct | LayerAct-main/train_validate.py | import time
from enum import Enum
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
import shutil
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
print(' '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def train(train_loader, model, criterion, optimizer, lr_scheduler, device, iter, output_device=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
if output_device is None :
output_device = device
else :
output_device = torch.device('cuda:{}'.format(output_device))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
data_time.update(time.time() - end)
images = images.to(device, non_blocking=True)
target = target.to(output_device, non_blocking=True)
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
batch_time.update(time.time() - end)
end = time.time()
iter += 1
return iter, lr_scheduler
def validate(val_loader, model, criterion, device, output_device=None):
if output_device is None :
output_device = device
else :
if type(output_device) == int :
output_device = torch.device('cuda:{}'.format(output_device))
else :
output_device = output_device
def run_validate(loader, base_progress=0, topk=(1,5)):
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
i = base_progress + i
images = images.to(device, non_blocking=True)
target = target.to(output_device, non_blocking=True)
output = model(images)
try :
loss = criterion(output, target)
except :
print('i : ', i, ' | output : ', output.device, ' | target : ', target.device)
acc1, acc5 = accuracy(output, target, topk=topk)
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
# switch to evaluate mode
model.eval()
run_validate(val_loader)
return losses.avg, top1.avg, top5.avg
def validate_10crop(val_loader, model, criterion, device):
def run_validate(loader, base_progress=0, topk=(1,5)):
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
i = base_progress + i
if device is not None and torch.cuda.is_available():
images = images.cuda(device, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(device, non_blocking=True)
bs, ncrops, c, h, w = images.size()
images = images.view(-1, c, h, w)
# compute output
output = model(images)
output = output.view(bs, ncrops, -1).mean(1)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=topk)
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
# switch to evaluate mode
model.eval()
run_validate(val_loader)
return losses.avg, top1.avg, top5.avg | 7,326 | 32.153846 | 101 | py |
LayerAct | LayerAct-main/train_parallel.py | import argparse
import time
import os
import sys
import numpy as np
import random
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from LayerAct import LA_HardSiLU, LA_SiLU
import data_augmentation
from train_validate import train, validate
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
elif name == 'resnet20' : return resnet20
elif name == 'resnet32' : return resnet32
elif name == 'resnet44' : return resnet44
def activation_set(name) :
if name == 'relu' : return nn.ReLU
elif name == 'leakyrelu' : return nn.LeakyReLU
elif name == 'prelu' : return nn.PReLU
elif name == 'mish' : return nn.Mish
elif name == 'silu' : return nn.SiLU
elif name == 'hardsilu' : return nn.Hardswish
elif name == 'la_silu' : return LA_SiLU
elif name == 'la_hardsilu' : return LA_HardSiLU
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
def folder_check(path, data_name, model_name) :
path_f = path + data_name + '/'
path_m = path_f + model_name + '/'
if data_name not in os.listdir(path) :
os.makedirs(path_f)
if model_name not in os.listdir(path_f) :
os.makedirs(path_m)
return path_m
random_seed = [11*i for i in range(1, 21)]
#######################################################################################################
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', '-d', default='CIFAR10')
parser.add_argument('--model', '-m', default='resnet20')
parser.add_argument('--activation', '-a', default='relu')
parser.add_argument('--device_ids', default='0')
parser.add_argument('--output_device', default=0, type=int)
parser.add_argument('--crop', default='center')
parser.add_argument('--start_trial', default=1, type=int)
parser.add_argument('--end_trial', default=5, type=int)
parser.add_argument('--alpha', default=1e-1)
parser.add_argument('--batch_size', '-bs', default=256)
parser.add_argument('--num_workers', '-nw', default=16)
parser.add_argument('--learning_rate', '-lr', default=0.1)
parser.add_argument('--momentum', default=0.9)
parser.add_argument('--weight_decay', '-wd', default=0.0001)
parser.add_argument('--max_iter', default=600000)
parser.add_argument('--milestones', default='180000,360000,540000')
parser.add_argument('--data_path', '-dp', default='')
parser.add_argument('--save_path', default='trained_models/')
parser.add_argument('--resume', default="True", type=str)
parser.add_argument('--duplicate', default="False", type=str)
parser.add_argument('--save', default="True", type=str)
args = parser.parse_args()
activation = activation_set(args.activation)
activation_params = {'alpha' : args.alpha} if 'la_' in args.activation else {} # parameter alpha of LayerAct functions for stable training
milestones = [int(m) for m in args.milestones.split(',')]
device_ids = [int(d) for d in args.device_ids.split(',')]
output_device = torch.device('cuda:{}'.format(args.output_device))
save_path = folder_check(args.save_path, args.data, args.model)
resume = True if args.resume == 'True' else False
duplicate = True if args.duplicate == 'True' else False
save = True if args.save == 'True' else False
for trial in range(args.start_trial, args.end_trial+1) :
rs = random_seed[trial-1]
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
cudnn.deterministic = True
cudnn.benchmark = False
file_name = '{}_{}'.format(args.activation, trial)
if not duplicate and '{}.pth.tar'.format(file_name) in os.listdir(save_path) :
sys.exit('Model ({} | {} | {}) exists'.format(args.data, args.model, args.activation))
if args.data == 'CIFAR10' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR10(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 10
elif args.data == 'CIFAR100' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 100
elif args.data == 'ImageNet' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs, args.crop)
in_channel, H, W, out_num = 3, 224, 224, 1000
else :
raise Exception('Dataset should be "CIFAR10", "CIFAR100", and "ImageNet"')
model = model_loader(args.model, activation, activation_params, rs, out_num)
model.to(torch.device('cuda'))
model = nn.DataParallel(model, device_ids=device_ids, output_device=output_device)
criterion = nn.CrossEntropyLoss().to(torch.device('cuda'))
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, last_epoch=0-1)
print('model make', end='\n')
best_model = None
best_acc1 = 0
start_time = time.time()
start_iter = 0
if resume and os.path.isfile(save_path + file_name + '_checkpoint.pth.tar') :
print('Resume', end='\r')
checkpoint = torch.load(save_path + file_name + '_checkpoint.pth.tar', map_location=torch.device('cuda'))
start_iter = checkpoint['iter']
best_acc1 = checkpoint['best_acc1']
best_model = checkpoint['best_model']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['scheduler'])
iter = start_iter
while iter < args.max_iter :
iter, lr_scheduler = train(train_loader, model, criterion, optimizer, lr_scheduler, torch.device('cuda'), iter, output_device=output_device)
val_loss, val_acc1, val_acc5 = validate(val_loader, model, criterion, torch.device('cuda'), output_device=output_device)
train_loss, train_acc1, train_acc5 = validate(train_loader, model, criterion, torch.device('cuda'), output_device=output_device)
t = time.time()
is_best = val_acc1 > best_acc1
best_acc1 = max(val_acc1, best_acc1)
if is_best :
best_model = model.state_dict()
best_iter = iter
print(
'Updated | Iter {}/{} | {}% | {} min | {} min left | Train loss {} | top1 {} | top5 {} | val loss {} | top1 {} | top5 {}'.format(
iter, args.max_iter, round(100*(iter+1)/args.max_iter), round((t-start_time)/60), round((t-start_time)/60*((args.max_iter-iter-1)/(iter+1))),
round(train_loss, 3), round(train_acc1.item(), 3), round(train_acc5.item(), 3),
round(val_loss, 3), round(val_acc1.item(), 3), round(val_acc5.item(), 3)
) + ' '*10, end='\r'
)
save_checkpoint(
{
'iter' : iter + 1,
'time' : t,
'state_dict' : model.state_dict(),
'best_model' : best_model,
'best_acc1' : best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : lr_scheduler.state_dict(),
}, is_best, save_path + file_name + '_checkpoint.pth.tar'
)
if iter > args.max_iter :
break
if save :
torch.save(best_model, '{}.pth.tar'.format(save_path + file_name))
model.load_state_dict(best_model)
test_loss, test_acc1, test_acc5 = validate(test_loader, model, criterion, torch.device('cuda'), output_device=output_device)
print("{} | {} | {} | Test | acc1 {} | acc5 {}".format(args.model, trial, args.activation, test_acc1, test_acc5), end = '\n')
| 8,871 | 42.920792 | 166 | py |
LayerAct | LayerAct-main/ResNet_small.py | import torch.nn as nn
import torch.nn.functional as F
class ResNet(nn.Module):
def __init__(self, activation, activation_params, rs, layers, num_classes):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.norm1 = nn.BatchNorm2d(16)
self.act1 = activation(**activation_params)
self.layers1 = self._make_layer(activation, activation_params, layers[0], 16, 16, 1)
self.layers2 = self._make_layer(activation, activation_params, layers[1], 32, 16, 2)
self.layers3 = self._make_layer(activation, activation_params, layers[2], 64, 32, 2)
self.avgpool = nn.AvgPool2d(8)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, activation, activation_params, layer_count, channels, channels_in, stride):
return nn.Sequential(
ResBlock(activation, activation_params, channels, channels_in, stride),
*[ResBlock(activation, activation_params, channels) for _ in range(layer_count-1)]
)
def forward(self, x):
out = self.conv1(x)
out = self.norm1(out)
out = self.act1(out)
out = self.layers1(out)
out = self.layers2(out)
out = self.layers3(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResBlock(nn.Module):
def __init__(self, activation, activation_params, num_filters, channels_in=None, stride=1):
super(ResBlock, self).__init__()
# uses 1x1 convolutions for downsampling
if not channels_in or channels_in == num_filters:
channels_in = num_filters
self.projection = None
else :
self.projection = IdentityPadding(num_filters, channels_in, stride)
self.conv1 = nn.Conv2d(channels_in, num_filters, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(num_filters)
self.act1 = activation(**activation_params)
self.conv2 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(num_filters)
self.act2 = activation(**activation_params)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.projection:
residual = self.projection(x)
out += residual
out = self.act2(out)
return out
# various projection options to change number of filters in residual connection
# option A from paper
class IdentityPadding(nn.Module):
def __init__(self, num_filters, channels_in, stride):
super(IdentityPadding, self).__init__()
# with kernel_size=1, max pooling is equivalent to identity mapping with stride
self.identity = nn.MaxPool2d(1, stride=stride)
self.num_zeros = num_filters - channels_in
def forward(self, x):
out = F.pad(x, (0, 0, 0, 0, 0, self.num_zeros))
out = self.identity(out)
return out
def resnet20(activation, activation_params, rs, num_classes) :
return ResNet(activation, activation_params, rs, [3, 3, 3], num_classes=num_classes)
def resnet32(activation, activation_params, rs, num_classes) :
return ResNet(activation, activation_params, rs, [5, 5, 5], num_classes=num_classes)
def resnet44(activation, activation_params, rs, num_classes) :
return ResNet(activation, activation_params, rs, [7, 7, 7], num_classes=num_classes)
| 3,640 | 36.536082 | 101 | py |
LayerAct | LayerAct-main/LayerAct.py | # importing
import torch
import torch.nn as nn
import warnings
warnings.filterwarnings('ignore')
# function to calculate the layer-direction mean and variance.
def calculate_mean_std_for_forward(inputs, std = True) :
if len(inputs.shape) < 4 :
cal_dim = [1]
else :
cal_dim = [1, 2, 3]
mean = inputs.mean(dim=cal_dim, keepdim=True)
if std :
var = inputs.var(dim=cal_dim, keepdim=True)
return mean, var, cal_dim
else :
return mean, cal_dim
#############################################################
class LA_SiLU(nn.Module) :
"""
# alpha
- float
- the parameter for stability of activation
# save_less
- bool
- if true, do not save mean, variance, standard deviation, and normalized input for "backward" by ctx.save_for_backward()
- if false, save mean, variance, standard deviation, and normalized input for "backward" by ctx.save_for_backward()
"""
def __init__(self, alpha=1e-5, save_less=False) :
super(LA_SiLU, self).__init__()
self.alpha = alpha
self.save_less = save_less
def forward(self, inputs) :
if self.training :
return la_silu.apply(inputs, self.alpha, self.save_less, self.training)
else :
return la_silu.apply(inputs, self.alpha, self.save_less, self.training)
class la_silu(torch.autograd.Function) :
@staticmethod
def forward(ctx, inputs, alpha, save_less, training=True) :
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
if save_less or not training :
z = torch.mul(torch.sigmoid(torch.div(torch.sub(inputs, mean), torch.sqrt(var+alpha))), inputs)
else :
var_ = var+alpha
std = torch.sqrt(var_)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.sigmoid(n)
z = torch.mul(s, inputs)
if training :
ctx.save_less = save_less
ctx.alpha = alpha
if save_less :
ctx.save_for_backward(inputs)
else :
ctx.save_for_backward(inputs, mean, var, std, n, s)
ctx.cal_dim = cal_dim
return z
@staticmethod
def backward(ctx, output_grad):
alpha = ctx.alpha
if ctx.save_less :
inputs, = ctx.saved_tensors
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
std = torch.sqrt(var+alpha)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.sigmoid(n)
else :
inputs, mean, var, std, n, s = ctx.saved_tensors
cal_dim = ctx.cal_dim
inputs_grad = torch.mul(output_grad.clone(), s)
dn = torch.div(
torch.mul(
torch.mul(output_grad.clone(), inputs.clone()),
torch.mul(s, 1-s)
),
std
)
dn = torch.sub(
dn,
torch.add(
torch.mean(dn, dim=cal_dim, keepdim=True),
torch.mul(torch.mean(torch.mul(dn, n), dim=cal_dim, keepdim=True), n)
)
)
inputs_grad = torch.add(inputs_grad, dn)
return inputs_grad, None, None, None
#############################################################
class LA_HardSiLU(nn.Module) :
def __init__(self, alpha=1e-5, save_less=False) :
super(LA_HardSiLU, self).__init__()
self.alpha = alpha
self.save_less = save_less
def forward(self, inputs) :
return la_hardsilu.apply(inputs, self.alpha, self.save_less, self.training)
class la_hardsilu(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, alpha, save_less, training=True):
shape = inputs.shape
device = inputs.device
ones = torch.ones(shape, device=device)
zeros = torch.zeros(shape, device=device)
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
if save_less or not training :
n = torch.div(torch.sub(inputs, mean), torch.sqrt(var+alpha))
z = torch.mul(inputs, torch.where(n<=3, torch.where(n<=-3, zeros.clone(), n/6+0.5), ones.clone()))
else :
var_ = var+alpha
std = torch.sqrt(var_)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.where(n<=-3, zeros.clone(), n/6+0.5)
s = torch.where(n<=3, s, ones.clone())
z = torch.mul(inputs, s)
if training :
ctx.save_less = save_less
if save_less :
ctx.save_for_backward(inputs)
ctx.alpha = alpha
else :
ctx.save_for_backward(inputs, mean, std, n, s)
ctx.cal_dim = cal_dim
return z
@staticmethod
def backward(ctx, output_grad):
if ctx.save_less :
inputs, = ctx.saved_tensors
shape = inputs.shape
device = inputs.device
ones = torch.ones(shape, device=device)
zeros = torch.zeros(shape, device=device)
alpha = ctx.alpha
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
std = torch.sqrt(var+alpha)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.where(
n<=3,
torch.where(n<=-3, zeros.clone(), n/6+0.5),
ones.clone()
)
else :
cal_dim = ctx.cal_dim
inputs, mean, std, n, s = ctx.saved_tensors
shape = inputs.shape
device = inputs.device
ones = torch.ones(shape, device=device)
zeros = torch.zeros(shape, device=device)
inputs_grad = torch.mul(output_grad.clone(), s)
ds = torch.where(
n<=3,
torch.where(n<=-3, zeros.clone(), ones.clone()/6),
zeros.clone()
)
da = torch.mul(output_grad.clone(), inputs.clone())
dn = torch.div(torch.mul(da, ds), std)
dn = torch.sub(
dn,
torch.add(
torch.mean(dn, dim=cal_dim, keepdim=True),
torch.mul(torch.mean(torch.mul(dn, n), dim=cal_dim, keepdim=True), n)
)
)
inputs_grad = torch.add(inputs_grad, dn)
return inputs_grad, None, None, None
#############################################################
| 6,523 | 32.803109 | 125 | py |
LayerAct | LayerAct-main/data_augmentation.py | import os
import numpy as np
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.model_selection import StratifiedShuffleSplit
import random
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
class AddGaussianNoise(object):
def __init__(self, mean=0, std=1, random_seed=0):
self.std = std
self.mean = mean
self.random_seed = random_seed
def __call__(self, tensor):
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed(self.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class AddPoissonNoise(object):
def __init__(self, random_seed=0):
self.random_seed=random_seed
def __call__(self, tensor):
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed(self.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
vals = len(torch.unique(tensor))
vals = 2**np.ceil(np.log2(vals))
return tensor + torch.poisson(tensor*vals)/float(vals)
def __repr__(self):
return self.__class__.__name__
def CIFAR_transforms(noise, normalize, test, param1, param2, random_seed) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
compose_list = [transforms.ToTensor()]
if noise == 'blur' :
compose_list.append(transforms.GaussianBlur(param1, param2))
elif noise == 'gaussian' :
compose_list.append(AddGaussianNoise(param1, param2, random_seed))
elif noise == 'poisson' :
compose_list.append(AddPoissonNoise(random_seed))
if not test :
compose_list += [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4)]
compose_list.append(normalize)
return transforms.Compose(compose_list)
def load_CIFAR10(data_path, test_noise, param1, param2, batch_size, num_workers, random_seed) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_dataset = torchvision.datasets.CIFAR10(root = data_path, train=True, transform=transforms.ToTensor(), download=False)
test_dataset = torchvision.datasets.CIFAR10(root = data_path, train=False, transform=transforms.ToTensor(), download=False)
imgs = torch.stack([d[0] for d in train_dataset], dim=0).numpy()
mean = [imgs[:, 0, :, :].mean(), imgs[:, 1, :, :].mean(), imgs[:, 2, :, :].mean()]
std = [imgs[:, 0, :, :].std(), imgs[:, 1, :, :].std(), imgs[:, 2, :, :].std()]
normalize = transforms.Normalize(mean=mean, std=std)
train_transforms = CIFAR_transforms('None', normalize, False, param1, param2, random_seed)
test_transforms = CIFAR_transforms(test_noise, normalize, True, param1, param2, random_seed)
train_dataset = torchvision.datasets.CIFAR10(root = data_path, train=True, transform=train_transforms, download=False)
test_dataset = torchvision.datasets.CIFAR10(root = data_path, train=False, transform=test_transforms, download=False)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=random_seed)
indices = list(range(len(train_dataset)))
train_list = [t for _, t in train_dataset]
for train_index, val_index in sss.split(indices, train_list):
train_index = train_index
val_index = val_index
train_sampler = SubsetRandomSampler(train_index)
val_sampler = SubsetRandomSampler(val_index)
pin_memory = True
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
val_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=val_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory = pin_memory
)
return train_loader, val_loader, test_loader
def load_CIFAR100(data_path, test_noise, param1, param2, batch_size, num_workers, random_seed) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_dataset = torchvision.datasets.CIFAR100(root = data_path, train=True, transform=transforms.ToTensor(), download=False)
test_dataset = torchvision.datasets.CIFAR100(root = data_path, train=False, transform=transforms.ToTensor(), download=False)
imgs = torch.stack([d[0] for d in train_dataset], dim=0).numpy()
mean = [imgs[:, 0, :, :].mean(), imgs[:, 1, :, :].mean(), imgs[:, 2, :, :].mean()]
std = [imgs[:, 0, :, :].std(), imgs[:, 1, :, :].std(), imgs[:, 2, :, :].std()]
normalize = transforms.Normalize(mean=mean, std=std)
train_transforms = CIFAR_transforms('None', normalize, False, param1, param2, random_seed)
test_transforms = CIFAR_transforms(test_noise, normalize, True, param1, param2, random_seed)
train_dataset = torchvision.datasets.CIFAR100(root = data_path, train=True, transform=train_transforms, download=False)
test_dataset = torchvision.datasets.CIFAR100(root = data_path, train=False, transform=test_transforms, download=False)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=random_seed)
indices = list(range(len(train_dataset)))
train_list = [t for _, t in train_dataset]
for train_index, val_index in sss.split(indices, train_list):
train_index = train_index
val_index = val_index
train_sampler = SubsetRandomSampler(train_index)
val_sampler = SubsetRandomSampler(val_index)
pin_memory = True
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
val_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=val_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory = pin_memory
)
return train_loader, val_loader, test_loader
def imagenet_transforms(noise, normalize, test, param1, param2, crop='center', random_seed=0) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
compose_list = [transforms.ToTensor()]
if noise == 'blur' :
compose_list.append(transforms.GaussianBlur(param1, param2))
elif noise == 'gaussian' :
compose_list.append(AddGaussianNoise(param1, param2))
elif noise == 'poisson' :
compose_list.append(AddPoissonNoise())
if not test :
compose_list += [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), normalize]
else :
if crop == 'random' :
compose_list += [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), normalize]
elif crop == 'center' :
compose_list += [transforms.Resize(256), transforms.CenterCrop(224), normalize]
elif crop == '10crop' :
compose_list = [
transforms.Resize(256), transforms.TenCrop(224),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops])),
]
return transforms.Compose(compose_list)
def load_ImageNet(data_path, test_noise, param1, param2, batch_size, num_workers, random_seed, crop) :
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
train_transforms = imagenet_transforms('None', normalize, False, param1, param2, random_seed=random_seed)
test_transforms = imagenet_transforms(test_noise, normalize, True, param1, param2, crop, random_seed=random_seed)
if crop == '10crop' :
batch_size = 32
else :
batch_size = 256
pin_memory = True
train_dataset = torchvision.datasets.ImageFolder(root = data_path + 'train', transform=train_transforms)
val_dataset = torchvision.datasets.ImageFolder(root = data_path + 'val', transform=test_transforms)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory = pin_memory
)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory
)
return train_loader, val_loader, val_loader
| 9,943 | 39.422764 | 128 | py |
LayerAct | LayerAct-main/train.py | import argparse
import time
import os
import sys
import numpy as np
import random
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from LayerAct import LA_HardSiLU, LA_SiLU
import data_augmentation
from train_validate import train, validate
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
elif name == 'resnet20' : return resnet20
elif name == 'resnet32' : return resnet32
elif name == 'resnet44' : return resnet44
def activation_set(name) :
if name == 'relu' : return nn.ReLU
elif name == 'leakyrelu' : return nn.LeakyReLU
elif name == 'prelu' : return nn.PReLU
elif name == 'mish' : return nn.Mish
elif name == 'silu' : return nn.SiLU
elif name == 'hardsilu' : return nn.Hardswish
elif name == 'la_silu' : return LA_SiLU
elif name == 'la_hardsilu' : return LA_HardSiLU
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
def folder_check(path, data_name, model_name) :
path_f = path + data_name + '/'
path_m = path_f + model_name + '/'
if data_name not in os.listdir(path) :
os.makedirs(path_f)
if model_name not in os.listdir(path_f) :
os.makedirs(path_m)
return path_m
random_seed = [11*i for i in range(1, 21)]
#######################################################################################################
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', '-d', default='CIFAR10', type=str)
parser.add_argument('--model', '-m', default='resnet20', type=str)
parser.add_argument('--activation', '-a', default='relu', type=str)
parser.add_argument('--device', default=0, type=int)
parser.add_argument('--crop', default='center', type=str)
parser.add_argument('--start_trial', default=1, type=int)
parser.add_argument('--end_trial', default=5, type=int)
parser.add_argument('--alpha', default=1e-5, type=float)
parser.add_argument('--batch_size', '-bs', default=128, type=int)
parser.add_argument('--num_workers', '-nw', default=16, type=int)
parser.add_argument('--learning_rate', '-lr', default=0.1, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--weight_decay', '-wd', default=0.0001, type=float)
parser.add_argument('--max_iter', default=64000, type=int)
parser.add_argument('--milestones', default='32000,48000', type=str)
parser.add_argument('--data_path', '-dp', default='', type=str)
parser.add_argument('--save_path', default='trained_models/', type=str)
parser.add_argument('--resume', default="True", type=str)
parser.add_argument('--duplicate', default="False", type=str)
parser.add_argument('--save', default="True", type=str)
args = parser.parse_args()
activation = activation_set(args.activation)
activation_params = {'alpha' : args.alpha} if 'la_' in args.activation else {} # parameter alpha of LayerAct functions for stable training
milestones = [int(m) for m in args.milestones.split(',')]
device = torch.device('cuda:{}'.format(args.device))
save_path = folder_check(args.save_path, args.data, args.model)
resume = True if args.resume == 'True' else False
duplicate = True if args.duplicate == 'True' else False
save = True if args.save == 'True' else False
for trial in range(args.start_trial, args.end_trial+1) :
rs = random_seed[trial-1]
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
cudnn.deterministic = True
cudnn.benchmark = False
file_name = '{}_{}'.format(args.activation, trial)
if not duplicate and '{}.pth.tar'.format(file_name) in os.listdir(save_path) :
sys.exit('Model ({} | {} | {}) exists'.format(args.data, args.model, args.activation))
if args.data == 'CIFAR10' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR10(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 10
elif args.data == 'CIFAR100' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 100
elif args.data == 'ImageNet' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs, args.crop)
in_channel, H, W, out_num = 3, 224, 224, 1000
else :
raise Exception('Dataset should be "CIFAR10", "CIFAR100", and "ImageNet"')
model = model_loader(args.model, activation, activation_params, rs, out_num)
model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, last_epoch=0-1)
print('model make', end='\n')
best_model = None
best_acc1 = 0
start_time = time.time()
start_iter = 0
if resume and os.path.isfile(save_path + file_name + '_checkpoint.pth.tar') :
print('model resume', end='\n')
checkpoint = torch.load(save_path + file_name + '_checkpoint.pth.tar', map_location=device)
start_iter = checkpoint['iter']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['scheduler'])
iter = start_iter
while iter < args.max_iter :
iter, lr_scheduler = train(train_loader, model, criterion, optimizer, lr_scheduler, device, iter)
val_loss, val_acc1, val_acc5 = validate(val_loader, model, criterion, device)
train_loss, train_acc1, train_acc5 = validate(train_loader, model, criterion, device)
t = time.time()
is_best = val_acc1 > best_acc1
best_acc1 = max(val_acc1, best_acc1)
if is_best :
best_model = model.state_dict()
best_iter = iter
print(
'Updated | Iter {}/{} | {}% | {} min | {} min left | Train loss {} | top1 {} | top5 {} | val loss {} | top1 {} | top5 {}'.format(
iter, args.max_iter, round(100*(iter+1)/(args.max_iter)),
round((t-start_time)/60), round((t-start_time)/60*((args.max_iter-iter-1)/(iter+1))),
round(train_loss, 3), round(train_acc1.item(), 3), round(train_acc5.item(), 3),
round(val_loss, 3), round(val_acc1.item(), 3), round(val_acc5.item(), 3)
) + ' '*10, end='\r'
)
save_checkpoint(
{
'iter' : iter + 1,
'time' : t,
'state_dict' : model.state_dict(),
'best_model' : best_model,
'best_acc1' : best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : lr_scheduler.state_dict(),
}, is_best, save_path + file_name + '_checkpoint.pth.tar'
)
if iter > args.max_iter :
break
torch.save(best_model, '{}.pth.tar'.format(save_path + file_name))
model.load_state_dict(best_model)
test_loss, test_acc1, test_acc5 = validate(test_loader, model, criterion, device)
print("{} | {} | {} | Test | acc1 {} | acc5 {} ".format(args.model, trial, args.activation, test_acc1, test_acc5) + ' '*20, end = '\n')
| 8,519 | 42.469388 | 165 | py |
mkbe | mkbe-master/DesGAN/generate.py | import argparse
import numpy as np
import random
import torch
from torch.autograd import Variable
from models import load_models, generate
###############################################################################
# Generation methods
###############################################################################
def interpolate(ae, gg, z1, z2, vocab,
steps=5, sample=None, maxlen=None):
"""
Interpolating in z space
Assumes that type(z1) == type(z2)
"""
if type(z1) == Variable:
noise1 = z1
noise2 = z2
elif type(z1) == torch.FloatTensor or type(z1) == torch.cuda.FloatTensor:
noise1 = Variable(z1, volatile=True)
noise2 = Variable(z2, volatile=True)
elif type(z1) == np.ndarray:
noise1 = Variable(torch.from_numpy(z1).float(), volatile=True)
noise2 = Variable(torch.from_numpy(z2).float(), volatile=True)
else:
raise ValueError("Unsupported input type (noise): {}".format(type(z1)))
# interpolation weights
lambdas = [x*1.0/(steps-1) for x in range(steps)]
gens = []
for L in lambdas:
gens.append(generate(ae, gg, (1-L)*noise1 + L*noise2,
vocab, sample, maxlen))
interpolations = []
for i in range(len(gens[0])):
interpolations.append([s[i] for s in gens])
return interpolations
def main(args):
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
else:
print("Note that our pre-trained models require CUDA to evaluate.")
###########################################################################
# Load the models
###########################################################################
model_args, idx2word, autoencoder, gan_gen, gan_disc \
= load_models(args.load_path)
###########################################################################
# Generation code
###########################################################################
# Generate sentences
if args.ngenerations > 0:
noise = torch.ones(args.ngenerations, model_args['z_size'])
noise.normal_()
sentences = generate(autoencoder, gan_gen, z=noise,
vocab=idx2word, sample=args.sample,
maxlen=model_args['maxlen'])
if not args.noprint:
print("\nSentence generations:\n")
for sent in sentences:
print(sent)
with open(args.outf, "w") as f:
f.write("Sentence generations:\n\n")
for sent in sentences:
f.write(sent+"\n")
# Generate interpolations
if args.ninterpolations > 0:
noise1 = torch.ones(args.ninterpolations, model_args['z_size'])
noise1.normal_()
noise2 = torch.ones(args.ninterpolations, model_args['z_size'])
noise2.normal_()
interps = interpolate(autoencoder, gan_gen,
z1=noise1,
z2=noise2,
vocab=idx2word,
steps=args.steps,
sample=args.sample,
maxlen=model_args['maxlen'])
if not args.noprint:
print("\nSentence interpolations:\n")
for interp in interps:
for sent in interp:
print(sent)
print("")
with open(args.outf, "a") as f:
f.write("\nSentence interpolations:\n\n")
for interp in interps:
for sent in interp:
f.write(sent+"\n")
f.write('\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch ARAE for Text Eval')
parser.add_argument('--load_path', type=str, required=True,
help='directory to load models from')
parser.add_argument('--temp', type=float, default=1,
help='softmax temperature (lower --> more discrete)')
parser.add_argument('--ngenerations', type=int, default=10,
help='Number of sentences to generate')
parser.add_argument('--ninterpolations', type=int, default=5,
help='Number z-space sentence interpolation examples')
parser.add_argument('--steps', type=int, default=5,
help='Number of steps in each interpolation')
parser.add_argument('--outf', type=str, default='./generated.txt',
help='filename and path to write to')
parser.add_argument('--noprint', action='store_true',
help='prevents examples from printing')
parser.add_argument('--sample', action='store_true',
help='sample when decoding for generation')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
args = parser.parse_args()
print(vars(args))
main(args)
| 5,149 | 36.867647 | 79 | py |
mkbe | mkbe-master/DesGAN/utils.py | import os
import torch
import numpy as np
import random
def load_kenlm():
global kenlm
import kenlm
def to_gpu(gpu, var):
if gpu:
return var.cuda()
return var
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.word2idx['<pad>'] = 0
self.word2idx['<sos>'] = 1
self.word2idx['<eos>'] = 2
self.word2idx['<oov>'] = 3
self.wordcounts = {}
# to track word counts
def add_word(self, word):
if word not in self.wordcounts:
self.wordcounts[word] = 1
else:
self.wordcounts[word] += 1
# prune vocab based on count k cutoff or most frequently seen k words
def prune_vocab(self, k=5, cnt=False):
# get all words and their respective counts
vocab_list = [(word, count) for word, count in self.wordcounts.items()]
if cnt:
# prune by count
self.pruned_vocab = \
{pair[0]: pair[1] for pair in vocab_list if pair[1] > k}
else:
# prune by most frequently seen words
vocab_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = [pair[0] for pair in vocab_list[:k]]
# sort to make vocabulary determistic
self.pruned_vocab.sort()
# add all chosen words to new vocabulary/dict
for word in self.pruned_vocab:
if word not in self.word2idx:
self.word2idx[word] = len(self.word2idx)
print("original vocab {}; pruned to {}".
format(len(self.wordcounts), len(self.word2idx)))
self.idx2word = {v: k for k, v in self.word2idx.items()}
def __len__(self):
return len(self.word2idx)
class Corpus(object):
def __init__(self, path, maxlen, vocab_size=11000, lowercase=False):
self.dictionary = Dictionary()
self.maxlen = maxlen
self.lowercase = lowercase
self.vocab_size = vocab_size
self.train_path = os.path.join(path, 'train.txt')
self.test_path = os.path.join(path, 'test.txt')
# make the vocabulary from training set
self.make_vocab()
self.train = self.tokenize(self.train_path)
self.test = self.tokenize(self.test_path)
def make_vocab(self):
assert os.path.exists(self.train_path)
# Add words to the dictionary
with open(self.train_path, 'r') as f:
for line in f:
if self.lowercase:
# -1 to get rid of \n character
words = line[:-1].lower().split(" ")
else:
words = line[:-1].split(" ")
for word in words:
self.dictionary.add_word(word)
# prune the vocabulary
self.dictionary.prune_vocab(k=self.vocab_size, cnt=False)
def tokenize(self, path):
"""Tokenizes a text file."""
dropped = 0
with open(path, 'r') as f:
linecount = 0
lines = []
for line in f:
linecount += 1
if self.lowercase:
words = line[:-1].lower().strip().split(" ")
else:
words = line[:-1].strip().split(" ")
if len(words) > self.maxlen:
dropped += 1
continue
words = ['<sos>'] + words
words += ['<eos>']
# vectorize
vocab = self.dictionary.word2idx
unk_idx = vocab['<oov>']
indices = [vocab[w] if w in vocab else unk_idx for w in words]
lines.append(indices)
print("Number of sentences dropped from {}: {} out of {} total".
format(path, dropped, linecount))
return lines
def batchify(data, bsz, shuffle=False, gpu=False):
#if shuffle:
# random.shuffle(data)
nbatch = len(data) // bsz
batches = []
for i in range(nbatch):
# Pad batches to maximum sequence length in batch
batch = data[i*bsz:(i+1)*bsz]
# subtract 1 from lengths b/c includes BOTH starts & end symbols
lengths = [len(x)-1 for x in batch]
# sort items by length (decreasing)
batch, lengths = length_sort(batch, lengths)
# source has no end symbol
source = [x[:-1] for x in batch]
# target has no start symbol
target = [x[1:] for x in batch]
# find length to pad to
maxlen = max(lengths)
for x, y in zip(source, target):
zeros = (maxlen-len(x))*[0]
x += zeros
y += zeros
source = torch.LongTensor(np.array(source))
target = torch.LongTensor(np.array(target)).view(-1)
if gpu:
source = source.cuda()
target = target.cuda()
batches.append((source, target, lengths))
return batches
def batchify_C(data, condition, bsz, shuffle=False, gpu=False):
#if shuffle:
# random.shuffle(data)
nbatch = len(data) // bsz
batches = []
cond_batch = []
for i in range(nbatch):
# Pad batches to maximum sequence length in batch
batch = data[i*bsz:(i+1)*bsz]
cond = condition[i*bsz:(i+1)*bsz]
# subtract 1 from lengths b/c includes BOTH starts & end symbols
lengths = [len(x)-1 for x in batch]
lengths_cond = [len(x) for x in cond]
# sort items by length (decreasing)
batch, lengths, cond = length_sort_c(batch, lengths, cond)
# source has no end symbol
source = [x[1:-1] for x in batch]
# target has no start symbol
target = [x[1:-1] for x in batch]
# source has no end symbol
source_cond = [x[1:-1] for x in cond]
# target has no start symbol
target_cond = [x[1:-1] for x in cond]
# find length to pad to
maxlen = max(lengths)
for x, y in zip(source, target):
zeros = (maxlen-len(x))*[0]
x += zeros
y += zeros
source_cond = torch.LongTensor(np.array(source_cond))
target_cond = torch.LongTensor(np.array(target_cond)).view(-1)
source = torch.LongTensor(np.array(source))
target = torch.LongTensor(np.array(target)).view(-1)
if gpu:
source_cond = source_cond.cuda()
target_cond = target_cond.cuda()
source = source.cuda()
target = target.cuda()
cond_batch.append((source_cond, target_cond, lengths_cond))
batches.append((source, target, lengths))
return batches, cond_batch
def length_sort(items, lengths, descending=True):
"""In order to use pytorch variable length sequence package"""
items = list(zip(items, lengths))
items.sort(key=lambda x: x[1], reverse=True)
items, lengths = zip(*items)
return list(items), list(lengths)
def length_sort_c(items, lengths, cond, descending=True):
"""In order to use pytorch variable length sequence package"""
items = list(zip(items, lengths, cond))
items.sort(key=lambda x: x[1], reverse=True)
items, lengths, cond = zip(*items)
return list(items), list(lengths), list(cond)
def train_ngram_lm(kenlm_path, data_path, output_path, N):
"""
Trains a modified Kneser-Ney n-gram KenLM from a text file.
Creates a .arpa file to store n-grams.
"""
# create .arpa file of n-grams
curdir = os.path.abspath(os.path.curdir)
#
command = "bin/lmplz -o "+str(N)+" <"+os.path.join(curdir, data_path) + \
" >"+os.path.join(curdir, output_path)
os.system("cd "+os.path.join(kenlm_path, 'build')+" && "+command)
load_kenlm()
# create language model
model = kenlm.Model(output_path)
return model
def get_ppl(lm, sentences):
"""
Assume sentences is a list of strings (space delimited sentences)
"""
total_nll = 0
total_wc = 0
for sent in sentences:
words = sent.strip().split()
score = lm.score(sent, bos=True, eos=False)
word_count = len(words)
total_wc += word_count
total_nll += score
ppl = 10**-(total_nll/total_wc)
return ppl
| 8,267 | 30.557252 | 79 | py |
mkbe | mkbe-master/DesGAN/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from utils import to_gpu
import json
import os
import numpy as np
class MLP_D(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.LeakyReLU(0.2), gpu=False):
super(MLP_D, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
if i==0:
layer = nn.Linear(layer_sizes[i]+199, layer_sizes[i+1])
else:
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
# No batch normalization after first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i+1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x, c):
for i, layer in enumerate(self.layers):
if i==0:
x = torch.cat((x, c), 1)
x = layer(x)
y = torch.mean(torch.sigmoid(x))
x = torch.mean(x)
return x, y
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_G(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_G, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
bn = nn.BatchNorm1d(layer_sizes[i+1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class Seq2Seq(nn.Module):
def __init__(self, emsize, nhidden, ntokens, nlayers, noise_radius=0.2,
hidden_init=False, dropout=0, gpu=False):
super(Seq2Seq, self).__init__()
self.nhidden = nhidden
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
# RNN Encoder and Decoder
self.encoder = nn.LSTM(input_size=emsize,
hidden_size=nhidden,
num_layers=nlayers,
dropout=dropout,
batch_first=True)
decoder_input_size = emsize+nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
# Initialize Linear Transformation
self.linear = nn.Linear(nhidden, ntokens)
self.init_weights()
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return (to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2))
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False):
batch_size, maxlen = indices.size()
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
# Encode
packed_output, state = self.encoder(packed_embeddings)
hidden, cell = state
# batch_size x nhidden
hidden = hidden[-1] # get hidden state of last layer of encoder
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
# For older versions of PyTorch use:
#hidden = torch.div(hidden, norms.expand_as(hidden))
# For newest version of PyTorch (as of 8/25) use this:
hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
std=self.noise_radius)
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
embeddings = self.embedding_decoder(indices)
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
packed_embeddings = pack_padded_sequence(input=augmented_embeddings,
lengths=lengths,
batch_first=True)
packed_output, state = self.decoder(packed_embeddings, state)
output, lengths = pad_packed_sequence(packed_output, batch_first=True)
# reshape to batch_size*maxlen x nhidden before linear over vocab
decoded = self.linear(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, hidden, maxlen, sample=True, temp=1.0):###changed
"""Generate through decoder; no backprop"""
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
# <sos>
self.start_symbols.data.resize_(batch_size, 1)
self.start_symbols.data.fill_(1)
embedding = self.embedding_decoder(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2) ###
# unroll
all_indices = []
for i in range(maxlen):
change = 0
output, state = self.decoder(inputs, state)
overvocab = self.linear(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
else:
# sampling
change = 1
probs = F.softmax(overvocab/temp)
indices = torch.multinomial(probs, 1)
if change ==0:
indices = indices.unsqueeze(1)
all_indices.append(indices) ### indices -> indices.unsqueeze(1)
embedding = self.embedding_decoder(indices)
#embedding = embedding.unsqueeze(1) ### man ezafe kardam vase generate avalie
#print (embedding.shape, hidden.unsqueeze(1).shape)
####print (hidden.shape)
####print (hidden.unsqueeze(1).shape)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2) ###
max_indices = torch.cat(all_indices, 1)##
return max_indices
def load_models(load_path):
model_args = json.load(open("{}/args.json".format(load_path), "r"))
word2idx = json.load(open("{}/vocab.json".format(load_path), "r"))
idx2word = {v: k for k, v in word2idx.items()}
autoencoder = Seq2Seq(emsize=model_args['emsize'],
nhidden=model_args['nhidden'],
ntokens=model_args['ntokens'],
nlayers=model_args['nlayers'],
hidden_init=model_args['hidden_init'])
gan_gen = MLP_G(ninput=model_args['z_size'],
noutput=model_args['nhidden'],
layers=model_args['arch_g'])
gan_disc = MLP_D(ninput=model_args['nhidden'],
noutput=1,
layers=model_args['arch_d'])
print('Loading models from'+load_path)
ae_path = os.path.join(load_path, "autoencoder_model.pt")
gen_path = os.path.join(load_path, "gan_gen_model.pt")
disc_path = os.path.join(load_path, "gan_disc_model.pt")
autoencoder.load_state_dict(torch.load(ae_path))
gan_gen.load_state_dict(torch.load(gen_path))
gan_disc.load_state_dict(torch.load(disc_path))
return model_args, idx2word, autoencoder, gan_gen, gan_disc
def generate(autoencoder, gan_gen, z, vocab, sample, maxlen):### chaanged
"""
Assume noise is batch_size x z_size
"""
if type(z) == Variable:
noise = z
elif type(z) == torch.FloatTensor or type(z) == torch.cuda.FloatTensor:
noise = Variable(z, volatile=True)
elif type(z) == np.ndarray:
noise = Variable(torch.from_numpy(z).float(), volatile=True)
else:
raise ValueError("Unsupported input type (noise): {}".format(type(z)))
gan_gen.eval()
autoencoder.eval()
# generate from random noise
fake_hidden = gan_gen(noise)
max_indices = autoencoder.generate(hidden=fake_hidden,
maxlen=maxlen,
sample=sample)
max_indices = max_indices.data.cpu().numpy()
sentences = []
for idx in max_indices:
# generated sentence
words = [vocab[x] for x in idx]
# truncate sentences to first occurrence of <eos>
truncated_sent = []
for w in words:
if w != '<eos>':
truncated_sent.append(w)
else:
break
sent = " ".join(truncated_sent)
sentences.append(sent)
return sentences
| 12,561 | 33.991643 | 82 | py |
mkbe | mkbe-master/DesGAN/train.py | import argparse
import os
import time
import math
import numpy as np
import random
import sys
import json
from sklearn import preprocessing
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from utils import to_gpu, Corpus, batchify, train_ngram_lm, get_ppl, batchify_C
from models import Seq2Seq, MLP_D, MLP_G
parser = argparse.ArgumentParser(description='PyTorch ARAE for Text')
# Path Arguments
parser.add_argument('--data_path', type=str, required=True,
help='location of the data corpus')
parser.add_argument('--kenlm_path', type=str, default='./kenlm',
help='path to kenlm directory')
parser.add_argument('--outf', type=str, default='example',
help='output directory name')
# Data Processing Arguments
parser.add_argument('--vocab_size', type=int, default=11000,
help='cut vocabulary down to this size '
'(most frequently seen words in train)')
parser.add_argument('--maxlen', type=int, default=30, ### 30 -> 7
help='maximum sentence length')
parser.add_argument('--lowercase', action='store_true',
help='lowercase all text')
# Model Arguments
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhidden', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--noise_radius', type=float, default=0.2,
help='stdev of noise for autoencoder (regularizer)')
parser.add_argument('--noise_anneal', type=float, default=0.995,
help='anneal noise_radius exponentially by this'
'every 100 iterations')
parser.add_argument('--hidden_init', action='store_true',
help="initialize decoder hidden state with encoder's")
parser.add_argument('--arch_g', type=str, default='300-300',
help='generator architecture (MLP)')
parser.add_argument('--arch_d', type=str, default='300-300',
help='critic/discriminator architecture (MLP)')
parser.add_argument('--z_size', type=int, default=199,
help='dimension of random noise z to feed into generator')
parser.add_argument('--temp', type=float, default=1,
help='softmax temperature (lower --> more discrete)')
parser.add_argument('--enc_grad_norm', type=bool, default=True,
help='norm code gradient from critic->encoder')
parser.add_argument('--gan_toenc', type=float, default=-0.01,
help='weight factor passing gradient from gan to encoder')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
# Training Arguments
parser.add_argument('--epochs', type=int, default=15,
help='maximum number of epochs')
parser.add_argument('--min_epochs', type=int, default=6,
help="minimum number of epochs to train for")
parser.add_argument('--no_earlystopping', action='store_true',
help="won't use KenLM for early stopping")
parser.add_argument('--patience', type=int, default=5,
help="number of language model evaluations without ppl "
"improvement to wait before early stopping")
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--niters_ae', type=int, default=1,
help='number of autoencoder iterations in training')
parser.add_argument('--niters_gan_d', type=int, default=5,
help='number of discriminator iterations in training')
parser.add_argument('--niters_gan_g', type=int, default=1,
help='number of generator iterations in training')
parser.add_argument('--niters_gan_schedule', type=str, default='2-4-6',
help='epoch counts to increase number of GAN training '
' iterations (increment by 1 each time)')
parser.add_argument('--lr_ae', type=float, default=1,
help='autoencoder learning rate')
parser.add_argument('--lr_gan_g', type=float, default=5e-05,
help='generator learning rate')
parser.add_argument('--lr_gan_d', type=float, default=1e-05,
help='critic/discriminator learning rate')
parser.add_argument('--lr_ae_l', type=float, default=5e-03,
help='autoencoder l1 rate')
parser.add_argument('--lr_gan_l', type=float, default=5e-03,
help='l1 learning rate')
parser.add_argument('--beta1', type=float, default=0.9,
help='beta1 for adam. default=0.9')
parser.add_argument('--clip', type=float, default=1,
help='gradient clipping, max norm')
parser.add_argument('--gan_clamp', type=float, default=0.01,
help='WGAN clamp')
# Evaluation Arguments
parser.add_argument('--sample', action='store_true',
help='sample when decoding for generation')
parser.add_argument('--N', type=int, default=5,
help='N-gram order for training n-gram language model')
parser.add_argument('--log_interval', type=int, default=200,
help='interval to log autoencoder training results')
# Other
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
args = parser.parse_args()
print(vars(args))
# make output directory if it doesn't already exist
if not os.path.isdir('./output'):
os.makedirs('./output')
if not os.path.isdir('./output/{}'.format(args.outf)):
os.makedirs('./output/{}'.format(args.outf))
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, "
"so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
# create corpus
corpus = Corpus(args.data_path,
maxlen=args.maxlen,
vocab_size=args.vocab_size,
lowercase=args.lowercase)
# dumping vocabulary
with open('./output/{}/vocab.json'.format(args.outf), 'w') as f:
json.dump(corpus.dictionary.word2idx, f)
# save arguments
ntokens = len(corpus.dictionary.word2idx)
print("Vocabulary Size: {}".format(ntokens))
args.ntokens = ntokens
with open('./output/{}/args.json'.format(args.outf), 'w') as f:
json.dump(vars(args), f)
with open("./output/{}/logs.txt".format(args.outf), 'w') as f:
f.write(str(vars(args)))
f.write("\n\n")
eval_batch_size = 10
#load conditonal information
test_C = np.load('data/test_weight-YAGO.npy')
train_C = np.load('data/train_weight-YAGO.npy')
test_C = preprocessing.normalize(test_C, norm='l2')
train_C = preprocessing.normalize(train_C, norm='l2')
test_data, test_c = batchify_C(corpus.test, test_C, eval_batch_size, shuffle=False)
train_data, train_c = batchify_C(corpus.train, train_C, args.batch_size, shuffle=False)
test_final = batchify(test_C, len(test_C), shuffle=False)
print("Loaded data!")
###############################################################################
# Build the models
###############################################################################
ntokens = len(corpus.dictionary.word2idx)
autoencoder = Seq2Seq(emsize=args.emsize,
nhidden=args.nhidden,
ntokens=ntokens,
nlayers=args.nlayers,
noise_radius=args.noise_radius,
hidden_init=args.hidden_init,
dropout=args.dropout,
gpu=args.cuda)
gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)
print(autoencoder)
print(gan_gen)
print(gan_disc)
optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(),
lr=args.lr_gan_g,
betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(),
lr=args.lr_gan_d,
betas=(args.beta1, 0.999))
#optimizer_gan_l = optim.Adam(gan_gen.parameters(),
# lr=args.lr_gan_l,
# betas=(args.beta1, 0.999))
#optimizer_ae_l = optim.Adam(autoencoder.parameters(), lr=args.lr_ae_l)
criterion_ce = nn.CrossEntropyLoss()
if args.cuda:
autoencoder = autoencoder.cuda()
gan_gen = gan_gen.cuda()
gan_disc = gan_disc.cuda()
criterion_ce = criterion_ce.cuda()
###############################################################################
# Training code
###############################################################################
def save_model():
print("Saving models")
with open('./output/{}/autoencoder_model.pt'.format(args.outf), 'wb') as f:
torch.save(autoencoder.state_dict(), f)
with open('./output/{}/gan_gen_model.pt'.format(args.outf), 'wb') as f:
torch.save(gan_gen.state_dict(), f)
with open('./output/{}/gan_disc_model.pt'.format(args.outf), 'wb') as f:
torch.save(gan_disc.state_dict(), f)
def evaluate_autoencoder(data_source, epoch):
# Turn on evaluation mode which disables dropout.
autoencoder.eval()
total_loss = 0
ntokens = len(corpus.dictionary.word2idx)
all_accuracies = 0
bcnt = 0
for i, batch in enumerate(data_source):
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source, volatile=True))
target = to_gpu(args.cuda, Variable(target, volatile=True))
mask = target.gt(0)
masked_target = target.masked_select(mask)
# examples x ntokens
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
# output: batch x seq_len x ntokens
output = autoencoder(source, lengths, noise=True)
flattened_output = output.view(-1, ntokens)
masked_output = \
flattened_output.masked_select(output_mask).view(-1, ntokens)
total_loss += criterion_ce(masked_output/args.temp, masked_target).data
# accuracy
max_vals, max_indices = torch.max(masked_output, 1)
all_accuracies += \
torch.mean(max_indices.eq(masked_target).float()).data[0]
bcnt += 1
aeoutf = "./output/%s/%d_autoencoder.txt" % (args.outf, epoch)
# with open(aeoutf, "a") as f:
# max_values, max_indices = torch.max(output, 2)
# max_indices = \
# max_indices.view(output.size(0), -1).data.cpu().numpy()
# target = target.view(output.size(0), -1).data.cpu().numpy()
# for t, idx in zip(target, max_indices):
# # real sentence
# chars = " ".join([corpus.dictionary.idx2word[x] for x in t])
# f.write(chars)
# f.write("\n")
# autoencoder output sentence
# chars = " ".join([corpus.dictionary.idx2word[x] for x in idx])
# f.write(chars)
# f.write("\n\n")
return total_loss[0] / len(data_source), all_accuracies/bcnt
def evaluate_generator(noise, epoch):
gan_gen.eval()
autoencoder.eval()
# generate from fixed random noise
fake_hidden = gan_gen(noise)
max_indices = \
autoencoder.generate(fake_hidden, args.maxlen, sample=args.sample)
# with open("./output/%s/%s_generated.txt" % (args.outf, epoch), "w") as f:
# max_indices = max_indices.data.cpu().numpy()
# for idx in max_indices:
# generated sentence
# words = [corpus.dictionary.idx2word[x] for x in idx]
# truncate sentences to first occurrence of <eos>
# truncated_sent = []
# for w in words:
# if w != '<eos>':
# truncated_sent.append(w)
# else:
# break
# chars = " ".join(truncated_sent)
# f.write(chars)
# f.write("\n")
def train_lm(test, eval_path, save_path):#####(test, eval_path, save_path) or (eval_path, save_path)
# generate examples
indices = []
noise = to_gpu(args.cuda, Variable(torch.ones(100, args.z_size)))
test = to_gpu(args.cuda, Variable(test[0][0]))
for i in range(1):
noise.data.normal_(0, 1)
fake_hidden = gan_gen(test)
max_indices = autoencoder.generate(fake_hidden, args.maxlen)
indices.append(max_indices.data.cpu().numpy())
indices = np.concatenate(indices, axis=0)
# write generated sentences to text file
with open(save_path+".txt", "w") as f:
# laplacian smoothing
#for word in corpus.dictionary.word2idx.keys():
# f.write(word+"\n")
for idx in indices:
# generated sentence
words = [corpus.dictionary.idx2word[x] for x in idx]
# truncate sentences to first occurrence of <eos>
truncated_sent = []
for w in words:
if w != '<eos>':
truncated_sent.append(w)
else:
break
chars = " ".join(truncated_sent)
f.write(chars+"\n")
#save_path = "./snli_lm/dev_gen"
# train language model on generated examples
lm = train_ngram_lm(kenlm_path=args.kenlm_path,
data_path=save_path+".txt",
output_path=save_path+".arpa",
N=args.N)
# load sentences to evaluate on
with open(eval_path, 'r') as f:
lines = f.readlines()
sentences = [l.replace('\n', '') for l in lines]
ppl = get_ppl(lm, sentences)
return ppl
def train_ae(batch, total_loss_ae, start_time, i):
autoencoder.train()
autoencoder.zero_grad()
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# Create sentence length mask over padding
mask = target.gt(0)
masked_target = target.masked_select(mask)
# examples x ntokens
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
# output: batch x seq_len x ntokens
output = autoencoder(source, lengths, noise=True)
# output_size: batch_size, maxlen, self.ntokens
flattened_output = output.view(-1, ntokens)
masked_output = \
flattened_output.masked_select(output_mask).view(-1, ntokens)
loss = criterion_ce(masked_output/args.temp, masked_target)
loss.backward()
# `clip_grad_norm` to prevent exploding gradient in RNNs / LSTMs
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_ae.step()
total_loss_ae += loss.data
accuracy = None
if i % args.log_interval == 0 and i > 0:
# accuracy
probs = F.softmax(masked_output)
max_vals, max_indices = torch.max(probs, 1)
accuracy = torch.mean(max_indices.eq(masked_target).float()).data[0]
cur_loss = total_loss_ae[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | acc {:8.2f}'
.format(epoch, i, len(train_data),
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss), accuracy))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | acc {:8.2f}\n'.
format(epoch, i, len(train_data),
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss), accuracy))
total_loss_ae = 0
start_time = time.time()
return total_loss_ae, start_time
def train_gan_l(batch, batch2):##(batch2) or ()
gan_gen.train()
gan_gen.zero_grad()
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
batch_C = to_gpu(args.cuda, Variable(batch2[0]))
fake_hidden = gan_gen(batch_C)
########## l1 loss
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# batch_size x nhidden
real_hidden = autoencoder(source, lengths, noise=False, encode_only=True)
err_l = torch.mean(torch.abs(fake_hidden - real_hidden))
err_l.backward( )
##########
optimizer_gan_l.step()
return err_l
def train_gan_g(batch, batch2):##(batch2) or ()
gan_gen.train()
gan_gen.zero_grad()
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
batch_C = to_gpu(args.cuda, Variable(batch2[0]))
fake_hidden = gan_gen(batch_C)
errG, y = gan_disc(fake_hidden, batch_C)
# loss / backprop
errG.backward(one)
optimizer_gan_g.step()
return errG
def grad_hook(grad):
# Gradient norm: regularize to be same
# code_grad_gan * code_grad_ae / norm(code_grad_gan)
if args.enc_grad_norm:
gan_norm = torch.norm(grad, 2, 1).detach().data.mean()
normed_grad = grad * autoencoder.grad_norm / gan_norm
else:
normed_grad = grad
# weight factor and sign flip
normed_grad *= -math.fabs(args.gan_toenc)
return normed_grad
def train_gan_d(batch, batch2):###(batch, batch2) or (batch)
# clamp parameters to a cube
for p in gan_disc.parameters():
p.data.clamp_(-args.gan_clamp, args.gan_clamp)
autoencoder.train()
autoencoder.zero_grad()
gan_disc.train()
gan_disc.zero_grad()
# positive samples ----------------------------
# generate real codes
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# batch_size x nhidden
real_hidden = autoencoder(source, lengths, noise=False, encode_only=True)
real_hidden_l = real_hidden
real_hidden.register_hook(grad_hook)
batch_C = to_gpu(args.cuda, Variable(batch2[0]))
# loss / backprop
errD_real, y1 = gan_disc(real_hidden, batch_C)
errD_real.backward(one)
# negative samples ----------------------------
# generate fake codes
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
fake_hidden = gan_gen(batch_C)
errD_fake, y2 = gan_disc(fake_hidden.detach(), batch_C)
errD_fake.backward(mone)
# `clip_grad_norm` to prvent exploding gradient problem in RNNs / LSTMs
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_gan_d.step()
optimizer_ae.step()
errD = -(errD_real - errD_fake)
return errD, errD_real, errD_fake
print("Training...")
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('Training...\n')
# schedule of increasing GAN training loops
if args.niters_gan_schedule != "":
gan_schedule = [int(x) for x in args.niters_gan_schedule.split("-")]
else:
gan_schedule = []
niter_gan = 1
print gan_schedule
fixed_noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
fixed_noise.data.normal_(0, 1)
print (len(fixed_noise))
one = to_gpu(args.cuda, torch.FloatTensor([1]))
mone = one * -1
best_ppl = None
impatience = 0
all_ppl = []
for epoch in range(1, args.epochs+1):
# update gan training schedule
if epoch in gan_schedule:
niter_gan += 1
print("GAN training loop schedule increased to {}".format(niter_gan))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write("GAN training loop schedule increased to {}\n".
format(niter_gan))
total_loss_ae = 0
epoch_start_time = time.time()
start_time = time.time()
niter = 0
niter_global = 1
# loop through all batches in training data
while niter < len(train_data):
# train autoencoder ----------------------------
for i in range(args.niters_ae):
if niter == len(train_data):
break # end of epoch
print train_data[niter][0].shape
total_loss_ae, start_time = \
train_ae(train_data[niter], total_loss_ae, start_time, niter)
niter += 1
# train gan ----------------------------------
for k in range(niter_gan):
# train discriminator/critic
for i in range(args.niters_gan_d):
# feed a seen sample within this epoch; good for early training
point = random.randint(0, len(train_data)-1)
errD, errD_real, errD_fake = \
train_gan_d(train_data[point], train_c[point])
# train generator
for i in range(args.niters_gan_g):
point = random.randint(0, len(train_data)-1)
errG = train_gan_g(train_data[point], train_c[point])
niter_global += 1
if niter_global % 100 == 0:
print('[%d/%d][%d/%d] Loss_D: %.8f (Loss_D_real: %.8f '
'Loss_D_fake: %.8f) Loss_G: %.8f'
% (epoch, args.epochs, niter, len(train_data),
errD.data[0], errD_real.data[0],
errD_fake.data[0], errG.data[0]))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('[%d/%d][%d/%d] Loss_D: %.8f (Loss_D_real: %.8f '
'Loss_D_fake: %.8f) Loss_G: %.8f\n'
% (epoch, args.epochs, niter, len(train_data),
errD.data[0], errD_real.data[0],
errD_fake.data[0], errG.data[0]))
# exponentially decaying noise on autoencoder
autoencoder.noise_radius = \
autoencoder.noise_radius*args.noise_anneal
if niter_global % 3000 == 0:
evaluate_generator(fixed_noise, "epoch{}_step{}".
format(epoch, niter_global))
# evaluate with lm
if not args.no_earlystopping and epoch > args.min_epochs:
ppl = train_lm(eval_path=os.path.join(args.data_path,
"test.txt"),
save_path="output/{}/"
"epoch{}_step{}_lm_generations".
format(args.outf, epoch,
niter_global))
print("Perplexity {}".format(ppl))
all_ppl.append(ppl)
print(all_ppl)
with open("./output/{}/logs.txt".
format(args.outf), 'a') as f:
f.write("\n\nPerplexity {}\n".format(ppl))
f.write(str(all_ppl)+"\n\n")
if best_ppl is None or ppl < best_ppl:
impatience = 0
best_ppl = ppl
print("New best ppl {}\n".format(best_ppl))
with open("./output/{}/logs.txt".
format(args.outf), 'a') as f:
f.write("New best ppl {}\n".format(best_ppl))
save_model()
else:
impatience += 1
# end training
if impatience > args.patience:
print("Ending training")
with open("./output/{}/logs.txt".
format(args.outf), 'a') as f:
f.write("\nEnding Training\n")
sys.exit()
# end of epoch ----------------------------
# evaluation
test_loss, accuracy = evaluate_autoencoder(test_data, epoch)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:5.2f} | acc {:3.3f}'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
print('-' * 89)
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('-' * 89)
f.write('\n| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} |'
' test ppl {:5.2f} | acc {:3.3f}\n'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
f.write('-' * 89)
f.write('\n')
evaluate_generator(fixed_noise, "end_of_epoch_{}".format(epoch))
if not args.no_earlystopping and epoch >= args.min_epochs:
ppl = train_lm(test_final, eval_path=os.path.join(args.data_path, "test.txt"),
save_path="./output/{}/end_of_epoch{}_lm_generations".
format(args.outf, epoch))
print("Perplexity {}".format(ppl))
all_ppl.append(ppl)
print(all_ppl)
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write("\n\nPerplexity {}\n".format(ppl))
f.write(str(all_ppl)+"\n\n")
if best_ppl is None or ppl < best_ppl:
impatience = 0
best_ppl = ppl
print("New best ppl {}\n".format(best_ppl))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write("New best ppl {}\n".format(best_ppl))
save_model()
else:
pouya = 0
# impatience += 1
# end training
# if impatience > args.patience:
# print("Ending training")
# with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
# f.write("\nEnding Training\n")
# sys.exit()
# shuffle between epochs
train_data = batchify(corpus.train, args.batch_size, shuffle=False)
| 26,814 | 37.472023 | 100 | py |
mkbe | mkbe-master/MKBE/models/yago_convE_kb_model.py | import tensorflow as tf
from tensorpack import *
from tensorflow.contrib.keras import backend as K
class YAGOConveMultimodel(ModelDesc):
def __init__(self, hyperparams):
super(YAGOConveMultimodel, self).__init__()
self.hyperparams = hyperparams
def _get_inputs(self):
return [InputDesc(tf.int32, (None,), "e1"),
InputDesc(tf.int32, (None,), "r"),
InputDesc(tf.int8, (None, self.hyperparams["entity_size"]), "e2_multihot"),
InputDesc(tf.int32, (None,), "e2_ind")]
def generate_onehot(self, indices):
entity_size = self.hyperparams["entity_size"]
return tf.one_hot(
indices, entity_size, dtype=tf.float32, on_value=1.0 - self.hyperparams["label_smoothing"],
off_value=self.hyperparams["label_smoothing"] / (entity_size - 1.0))
def label_smoothing(self, onehots, lambd):
e2 = tf.cast(onehots, tf.float32)
e2_multi = (1.0 - lambd) * e2 + (10 * lambd / self.hyperparams["entity_size"])
return e2_multi
def _build_graph(self, inputs):
hyperparams = self.hyperparams
dtype = tf.float32
id_dtype = tf.int32
e1, r, e2_multihot, e2_ind = inputs
label_smooth = tf.placeholder(tf.float32, name="label_smoothing", shape=())
mlp_keepprob = tf.placeholder(tf.float32, name="mlp_keepprob")
enc_keepprob = tf.placeholder(tf.float32, name="enc_keepprob")
emb_keepprob = tf.placeholder(tf.float32, name="emb_keepprob")
fm_keepprob = tf.placeholder(tf.float32, name="fm_keepprob")
is_training = tf.placeholder(tf.bool, name="is_training")
# Weights for embeddings
if hyperparams["emb_dim"] > 3:
self.entity_weights = tf.get_variable(
"entity_weights", shape=[hyperparams["entity_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
self.rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
self.word_weights = tf.get_variable(
"word_weights", shape=[hyperparams["word_size"], hyperparams["emb_dim"] // 2],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
else:
self.entity_weights = tf.get_variable(
"entity_weights", shape=[hyperparams["entity_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
self.rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
self.word_weights = tf.get_variable(
"word_weights", shape=[hyperparams["word_size"], hyperparams["emb_dim"] // 2],
dtype=dtype, initializer=tf.truncated_normal_initializer(dtype=dtype)
)
# Encode e1 and r
e1_emb = tf.nn.embedding_lookup(self.entity_weights, e1)
r_emb = tf.nn.embedding_lookup(self.rel_weights, r)
# Collect Regularization variables
regularized_variables = []
# Aggregate and normalize e1
e1_list = [e1_emb]
if hyperparams["normalize_e1"]:
Pos_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in e1_list], axis=0),
dim=1)
else:
Pos_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in e1_list], axis=0)
regularized_variables += [self.entity_weights]
# Aggregate r
r_list = [r_emb]
if hyperparams["normalize_relation"]:
Pos_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in r_list], axis=0),
dim=1)
else:
Pos_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in r_list], axis=0)
regularized_variables += [self.rel_weights]
# ConvE link prediction
with tf.variable_scope("convE"):
emb_dim = hyperparams["emb_dim"]
pose1_img = tf.reshape(Pos_e1, (-1, emb_dim // 10, 10, 1))
posr_img = tf.reshape(Pos_r, (-1, emb_dim // 10, 10, 1))
pos_stack = tf.layers.batch_normalization(tf.concat(
[pose1_img, posr_img], 2), training=is_training, epsilon=1e-5, momentum=0.1)
pos_indrop = tf.nn.dropout(pos_stack, emb_keepprob)
convE_ker = tf.get_variable("convE_ker", shape=[3, 3, 1, 32], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
convE_bias = tf.get_variable("convE_bias", shape=[32], dtype=dtype, initializer=tf.zeros_initializer)
pos_convE_conv = tf.nn.relu(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.convolution(
pos_indrop, convE_ker, "VALID"), convE_bias), training=is_training, epsilon=1e-5, momentum=0.1))
fm_dropout = tf.contrib.keras.layers.SpatialDropout2D(1.0 - fm_keepprob)
pos_flat = tf.reshape(fm_dropout(pos_convE_conv, training=is_training), (-1, 10368))
self.convE_fc_w = tf.get_variable(
"convE_fc_w", shape=[10368, hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
self.convE_fc_b = tf.get_variable(
"convE_fc_b", shape=[hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.constant_initializer(value=0.0))
pos_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.nn.bias_add(tf.matmul(
pos_flat, self.convE_fc_w), self.convE_fc_b), mlp_keepprob), training=is_training, epsilon=1e-5,
momentum=0.1))
self.pred = tf.matmul(pos_fc, self.entity_weights, transpose_b=True)
regularized_variables += [convE_ker, convE_bias, self.convE_fc_w, self.convE_fc_b]
# Generate e2 labels
e2_label = self.label_smoothing(e2_multihot, label_smooth)
# Sigmoid BCE loss/ sigmoid cross entropy
#self.ll_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=e2_label, logits=self.pred))
self.ll_loss = tf.reduce_mean(
tf.losses.sigmoid_cross_entropy(e2_label, self.pred, reduction=tf.losses.Reduction.NONE))
# Regularization term
regularizer = tf.contrib.layers.l2_regularizer(hyperparams["regularization_coefficient"])
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularized_variables)
# Aggregate loss
self.loss = tf.add(self.ll_loss, regularization_term, name="loss")
self.cost = self.loss
# Learning rate decay
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.maximum(tf.train.exponential_decay(
hyperparams["learning_rate"], global_step / 15000, 1, hyperparams["lr_decay"]), 1e-7, name="lr")
# Training op
self.train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss, global_step=global_step)
# Testing Graph
self.test_graph(e2_ind)
# Summaries
self.summaries()
return self.cost
def test_graph(self, pos_e2):
self.likelihood = tf.nn.sigmoid(self.pred)
pos_score = tf.diag_part(tf.nn.embedding_lookup(tf.transpose(self.likelihood), pos_e2))
cmp = tf.expand_dims(pos_score, axis=1) > self.likelihood
self.rank = tf.reduce_sum(tf.cast(cmp, tf.int32), axis=1) + 1
mrr = tf.reduce_mean(1.0 / tf.cast(self.rank, tf.float32), name="mrr")
hits_10 = tf.reduce_mean(tf.cast(self.rank <= 10, tf.float32), name="hits_10")
hits_3 = tf.reduce_mean(tf.cast(self.rank <= 3, tf.float32), name="hits_3")
hits_1 = tf.reduce_mean(tf.cast(self.rank <= 1, tf.float32), name="hits_1")
invalid_e2 = tf.reduce_mean(tf.cast(pos_e2 == 0, tf.float32), name="inv_e2")
return mrr, hits_1, hits_3, hits_10
def summaries(self):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("bce_loss", self.ll_loss)
tf.summary.histogram("logits", self.pred)
tf.summary.histogram("rank", self.rank)
tf.summary.histogram("probability", self.likelihood)
tf.summary.histogram("entity weights", self.entity_weights)
tf.summary.histogram("relation weights", self.rel_weights)
tf.summary.histogram("dense weights", self.convE_fc_w)
def _get_optimizer(self):
return self.train_op, self.loss, self.ll_loss
| 9,042 | 45.137755 | 120 | py |
UString | UString-master/main.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import os, time
import argparse
import shutil
from torch.utils.data import DataLoader
from src.Models import UString
from src.eval_tools import evaluation, print_results, vis_results
import ipdb
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
from tqdm import tqdm
from sklearn.metrics import average_precision_score
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
ROOT_PATH = os.path.dirname(__file__)
def average_losses(losses_all):
total_loss, cross_entropy, log_posterior, log_prior, aux_loss, rank_loss = 0, 0, 0, 0, 0, 0
losses_mean = {}
for losses in losses_all:
total_loss += losses['total_loss']
cross_entropy += losses['cross_entropy']
log_posterior += losses['log_posterior']
log_prior += losses['log_prior']
aux_loss += losses['auxloss']
rank_loss += losses['ranking']
losses_mean['total_loss'] = total_loss / len(losses_all)
losses_mean['cross_entropy'] = cross_entropy / len(losses_all)
losses_mean['log_posterior'] = log_posterior / len(losses_all)
losses_mean['log_prior'] = log_prior / len(losses_all)
losses_mean['auxloss'] = aux_loss / len(losses_all)
losses_mean['ranking'] = rank_loss / len(losses_all)
return losses_mean
def test_all(testdata_loader, model):
all_pred = []
all_labels = []
all_toas = []
losses_all = []
with torch.no_grad():
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas) in enumerate(testdata_loader):
# run forward inference
losses, all_outputs, hiddens = model(batch_xs, batch_ys, batch_toas, graph_edges,
hidden_in=None, edge_weights=edge_weights, npass=10, nbatch=len(testdata_loader), testing=False)
# make total loss
losses['total_loss'] = p.loss_alpha * (losses['log_posterior'] - losses['log_prior']) + losses['cross_entropy']
losses['total_loss'] += p.loss_beta * losses['auxloss']
losses['total_loss'] += p.loss_yita * losses['ranking']
losses_all.append(losses)
num_frames = batch_xs.size()[1]
batch_size = batch_xs.size()[0]
pred_frames = np.zeros((batch_size, num_frames), dtype=np.float32)
# run inference
for t in range(num_frames):
pred = all_outputs[t]['pred_mean']
pred = pred.cpu().numpy() if pred.is_cuda else pred.detach().numpy()
pred_frames[:, t] = np.exp(pred[:, 1]) / np.sum(np.exp(pred), axis=1)
# gather results and ground truth
all_pred.append(pred_frames)
label_onehot = batch_ys.cpu().numpy()
label = np.reshape(label_onehot[:, 1], [batch_size,])
all_labels.append(label)
toas = np.squeeze(batch_toas.cpu().numpy()).astype(np.int)
all_toas.append(toas)
all_pred = np.vstack((np.vstack(all_pred[:-1]), all_pred[-1]))
all_labels = np.hstack((np.hstack(all_labels[:-1]), all_labels[-1]))
all_toas = np.hstack((np.hstack(all_toas[:-1]), all_toas[-1]))
return all_pred, all_labels, all_toas, losses_all
def test_all_vis(testdata_loader, model, vis=True, multiGPU=False, device=torch.device('cuda')):
if multiGPU:
model = torch.nn.DataParallel(model)
model = model.to(device=device)
model.eval()
all_pred = []
all_labels = []
all_toas = []
vis_data = []
all_uncertains = []
with torch.no_grad():
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas, detections, video_ids) in tqdm(enumerate(testdata_loader), desc="batch progress", total=len(testdata_loader)):
# run forward inference
losses, all_outputs, hiddens = model(batch_xs, batch_ys, batch_toas, graph_edges,
hidden_in=None, edge_weights=edge_weights, npass=10, nbatch=len(testdata_loader), testing=False, eval_uncertain=True)
num_frames = batch_xs.size()[1]
batch_size = batch_xs.size()[0]
pred_frames = np.zeros((batch_size, num_frames), dtype=np.float32)
pred_uncertains = np.zeros((batch_size, num_frames, 2), dtype=np.float32)
# run inference
for t in range(num_frames):
# prediction
pred = all_outputs[t]['pred_mean'] # B x 2
pred = pred.cpu().numpy() if pred.is_cuda else pred.detach().numpy()
pred_frames[:, t] = np.exp(pred[:, 1]) / np.sum(np.exp(pred), axis=1)
# uncertainties
aleatoric = all_outputs[t]['aleatoric'] # B x 2 x 2
aleatoric = aleatoric.cpu().numpy() if aleatoric.is_cuda else aleatoric.detach().numpy()
epistemic = all_outputs[t]['epistemic'] # B x 2 x 2
epistemic = epistemic.cpu().numpy() if epistemic.is_cuda else epistemic.detach().numpy()
pred_uncertains[:, t, 0] = aleatoric[:, 0, 0] + aleatoric[:, 1, 1]
pred_uncertains[:, t, 1] = epistemic[:, 0, 0] + epistemic[:, 1, 1]
# gather results and ground truth
all_pred.append(pred_frames)
label_onehot = batch_ys.cpu().numpy()
label = np.reshape(label_onehot[:, 1], [batch_size,])
all_labels.append(label)
toas = np.squeeze(batch_toas.cpu().numpy()).astype(np.int)
all_toas.append(toas)
all_uncertains.append(pred_uncertains)
if vis:
# gather data for visualization
vis_data.append({'pred_frames': pred_frames, 'label': label, 'pred_uncertain': pred_uncertains,
'toa': toas, 'detections': detections, 'video_ids': video_ids})
all_pred = np.vstack((np.vstack(all_pred[:-1]), all_pred[-1]))
all_labels = np.hstack((np.hstack(all_labels[:-1]), all_labels[-1]))
all_toas = np.hstack((np.hstack(all_toas[:-1]), all_toas[-1]))
all_uncertains = np.vstack((np.vstack(all_uncertains[:-1]), all_uncertains[-1]))
return all_pred, all_labels, all_toas, all_uncertains, vis_data
def write_scalars(logger, cur_epoch, cur_iter, losses, lr):
# fetch results
total_loss = losses['total_loss'].mean().item()
cross_entropy = losses['cross_entropy'].mean()
log_prior = losses['log_prior'].mean().item()
log_posterior = losses['log_posterior'].mean().item()
aux_loss = losses['auxloss'].mean().item()
rank_loss = losses['ranking'].mean().item()
# print info
print('----------------------------------')
print('epoch: %d, iter: %d' % (cur_epoch, cur_iter))
print('total loss = %.6f' % (total_loss))
print('cross_entropy = %.6f' % (cross_entropy))
print('log_posterior = %.6f' % (log_posterior))
print('log_prior = %.6f' % (log_prior))
print('aux_loss = %.6f' % (aux_loss))
print('rank_loss = %.6f' % (rank_loss))
# write to tensorboard
logger.add_scalars("train/losses/total_loss", {'total_loss': total_loss}, cur_iter)
logger.add_scalars("train/losses/cross_entropy", {'cross_entropy': cross_entropy}, cur_iter)
logger.add_scalars("train/losses/log_posterior", {'log_posterior': log_posterior}, cur_iter)
logger.add_scalars("train/losses/log_prior", {'log_prior': log_prior}, cur_iter)
logger.add_scalars("train/losses/complexity_cost", {'complexity_cost': log_posterior-log_prior}, cur_iter)
logger.add_scalars("train/losses/aux_loss", {'aux_loss': aux_loss}, cur_iter)
logger.add_scalars("train/losses/rank_loss", {'rank_loss': rank_loss}, cur_iter)
# write learning rate
logger.add_scalars("train/learning_rate/lr", {'lr': lr}, cur_iter)
def write_test_scalars(logger, cur_epoch, cur_iter, losses, metrics):
# fetch results
total_loss = losses['total_loss'].mean().item()
cross_entropy = losses['cross_entropy'].mean()
# write to tensorboard
loss_info = {'total_loss': total_loss, 'cross_entropy': cross_entropy}
aux_loss = losses['auxloss'].mean().item()
loss_info.update({'aux_loss': aux_loss})
logger.add_scalars("test/losses/total_loss", loss_info, cur_iter)
logger.add_scalars("test/accuracy/AP", {'AP': metrics['AP']}, cur_iter)
logger.add_scalars("test/accuracy/time-to-accident", {'mTTA': metrics['mTTA'],
'TTA_R80': metrics['TTA_R80']}, cur_iter)
def write_weight_histograms(writer, net, epoch):
writer.add_histogram('histogram/w1_mu', net.predictor.l1.weight_mu, epoch)
writer.add_histogram('histogram/w1_rho', net.predictor.l1.weight_rho, epoch)
writer.add_histogram('histogram/w2_mu', net.predictor.l2.weight_mu, epoch)
writer.add_histogram('histogram/w2_rho', net.predictor.l2.weight_rho, epoch)
writer.add_histogram('histogram/b1_mu', net.predictor.l1.bias_mu, epoch)
writer.add_histogram('histogram/b1_rho', net.predictor.l1.bias_rho, epoch)
writer.add_histogram('histogram/b2_mu', net.predictor.l2.bias_mu, epoch)
writer.add_histogram('histogram/b2_rho', net.predictor.l2.bias_rho, epoch)
def load_checkpoint(model, optimizer=None, filename='checkpoint.pth.tar', isTraining=True):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
if isTraining:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch
def train_eval():
### --- CONFIG PATH ---
data_path = os.path.join(ROOT_PATH, p.data_path, p.dataset)
# model snapshots
model_dir = os.path.join(p.output_dir, p.dataset, 'snapshot')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# tensorboard logging
logs_dir = os.path.join(p.output_dir, p.dataset, 'logs')
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
logger = SummaryWriter(logs_dir)
# gpu options
gpu_ids = [int(id) for id in p.gpus.split(',')]
print("Using GPU devices: ", gpu_ids)
os.environ['CUDA_VISIBLE_DEVICES'] = p.gpus
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# create data loader
if p.dataset == 'dad':
from src.DataLoader import DADDataset
train_data = DADDataset(data_path, p.feature_name, 'training', toTensor=True, device=device)
test_data = DADDataset(data_path, p.feature_name, 'testing', toTensor=True, device=device)
elif p.dataset == 'a3d':
from src.DataLoader import A3DDataset
train_data = A3DDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = A3DDataset(data_path, p.feature_name, 'test', toTensor=True, device=device)
elif p.dataset == 'crash':
from src.DataLoader import CrashDataset
train_data = CrashDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = CrashDataset(data_path, p.feature_name, 'test', toTensor=True, device=device)
else:
raise NotImplementedError
traindata_loader = DataLoader(dataset=train_data, batch_size=p.batch_size, shuffle=True, drop_last=True)
testdata_loader = DataLoader(dataset=test_data, batch_size=p.batch_size, shuffle=False, drop_last=True)
# building model
model = UString(train_data.dim_feature, p.hidden_dim, p.latent_dim,
n_layers=p.num_rnn, n_obj=train_data.n_obj, n_frames=train_data.n_frames, fps=train_data.fps,
with_saa=True, uncertain_ranking=True)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=p.base_lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5)
if len(gpu_ids) > 1:
model = torch.nn.DataParallel(model)
model = model.to(device=device)
model.train() # set the model into training status
# resume training
start_epoch = -1
if p.resume:
model, optimizer, start_epoch = load_checkpoint(model, optimizer=optimizer, filename=p.model_file)
# write histograms
write_weight_histograms(logger, model, 0)
iter_cur = 0
best_metric = 0
for k in range(p.epoch):
if k <= start_epoch:
iter_cur += len(traindata_loader)
continue
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas) in enumerate(traindata_loader):
# ipdb.set_trace()
optimizer.zero_grad()
losses, all_outputs, hidden_st = model(batch_xs, batch_ys, batch_toas, graph_edges, edge_weights=edge_weights, npass=2, nbatch=len(traindata_loader), eval_uncertain=True)
complexity_loss = losses['log_posterior'] - losses['log_prior']
losses['total_loss'] = p.loss_alpha * complexity_loss + losses['cross_entropy']
losses['total_loss'] += p.loss_beta * losses['auxloss']
losses['total_loss'] += p.loss_yita * losses['ranking']
# backward
losses['total_loss'].mean().backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
# write the losses info
lr = optimizer.param_groups[0]['lr']
write_scalars(logger, k, iter_cur, losses, lr)
iter_cur += 1
# test and evaluate the model
if iter_cur % p.test_iter == 0:
model.eval()
all_pred, all_labels, all_toas, losses_all = test_all(testdata_loader, model)
model.train()
loss_val = average_losses(losses_all)
print('----------------------------------')
print("Starting evaluation...")
metrics = {}
metrics['AP'], metrics['mTTA'], metrics['TTA_R80'] = evaluation(all_pred, all_labels, all_toas, fps=test_data.fps)
print('----------------------------------')
# keep track of validation losses
write_test_scalars(logger, k, iter_cur, loss_val, metrics)
# save model
model_file = os.path.join(model_dir, 'bayesian_gcrnn_model_%02d.pth'%(k))
torch.save({'epoch': k,
'model': model.module.state_dict() if len(gpu_ids)>1 else model.state_dict(),
'optimizer': optimizer.state_dict()}, model_file)
if metrics['AP'] > best_metric:
best_metric = metrics['AP']
# update best model file
update_final_model(model_file, os.path.join(model_dir, 'final_model.pth'))
print('Model has been saved as: %s'%(model_file))
scheduler.step(losses['log_posterior'])
# write histograms
write_weight_histograms(logger, model, k+1)
logger.close()
def update_final_model(src_file, dest_file):
# source file must exist
assert os.path.exists(src_file), "src file does not exist!"
# destinate file should be removed first if exists
if os.path.exists(dest_file):
if not os.path.samefile(src_file, dest_file):
os.remove(dest_file)
# copy file
shutil.copyfile(src_file, dest_file)
def test_eval():
### --- CONFIG PATH ---
data_path = os.path.join(ROOT_PATH, p.data_path, p.dataset)
# result path
result_dir = os.path.join(p.output_dir, p.dataset, 'test')
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# visualization results
p.visualize = False if p.evaluate_all else p.visualize
vis_dir = None
if p.visualize:
vis_dir = os.path.join(result_dir, 'vis')
if not os.path.exists(vis_dir):
os.makedirs(vis_dir)
# gpu options
gpu_ids = [int(id) for id in p.gpus.split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = p.gpus
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# create data loader
if p.dataset == 'dad':
from src.DataLoader import DADDataset
test_data = DADDataset(data_path, p.feature_name, 'testing', toTensor=True, device=device, vis=True)
elif p.dataset == 'a3d':
from src.DataLoader import A3DDataset
test_data = A3DDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
elif p.dataset == 'crash':
from src.DataLoader import CrashDataset
test_data = CrashDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
else:
raise NotImplementedError
testdata_loader = DataLoader(dataset=test_data, batch_size=p.batch_size, shuffle=False, drop_last=True)
num_samples = len(test_data)
print("Number of testing samples: %d"%(num_samples))
# building model
model = UString(test_data.dim_feature, p.hidden_dim, p.latent_dim,
n_layers=p.num_rnn, n_obj=test_data.n_obj, n_frames=test_data.n_frames, fps=test_data.fps,
with_saa=True, uncertain_ranking=True)
# start to evaluate
if p.evaluate_all:
model_dir = os.path.join(p.output_dir, p.dataset, 'snapshot')
assert os.path.exists(model_dir)
Epochs, APvid_all, AP_all, mTTA_all, TTA_R80_all, Unc_all = [], [], [], [], [], []
modelfiles = sorted(os.listdir(model_dir))
for filename in modelfiles:
epoch_str = filename.split("_")[-1].split(".pth")[0]
print("Evaluation for epoch: " + epoch_str)
model_file = os.path.join(model_dir, filename)
model, _, _ = load_checkpoint(model, filename=model_file, isTraining=False)
# run model inference
all_pred, all_labels, all_toas, all_uncertains, _ = test_all_vis(testdata_loader, model, vis=False, device=device)
# evaluate results
AP, mTTA, TTA_R80 = evaluation(all_pred, all_labels, all_toas, fps=test_data.fps)
mUncertains = np.mean(all_uncertains, axis=(0, 1))
all_vid_scores = [max(pred[:int(toa)]) for toa, pred in zip(all_toas, all_pred)]
AP_video = average_precision_score(all_labels, all_vid_scores)
APvid_all.append(AP_video)
# save
Epochs.append(epoch_str)
AP_all.append(AP)
mTTA_all.append(mTTA)
TTA_R80_all.append(TTA_R80)
Unc_all.append(mUncertains)
# print results to file
print_results(Epochs, APvid_all, AP_all, mTTA_all, TTA_R80_all, Unc_all, result_dir)
else:
result_file = os.path.join(vis_dir, "..", "pred_res.npz")
if not os.path.exists(result_file):
model, _, _ = load_checkpoint(model, filename=p.model_file, isTraining=False)
# run model inference
all_pred, all_labels, all_toas, all_uncertains, vis_data = test_all_vis(testdata_loader, model, vis=True, device=device)
# save predictions
np.savez(result_file[:-4], pred=all_pred, label=all_labels, toas=all_toas, uncertainties=all_uncertains, vis_data=vis_data)
else:
print("Result file exists. Loaded from cache.")
all_results = np.load(result_file, allow_pickle=True)
all_pred, all_labels, all_toas, all_uncertains, vis_data = \
all_results['pred'], all_results['label'], all_results['toas'], all_results['uncertainties'], all_results['vis_data']
# evaluate results
all_vid_scores = [max(pred[:int(toa)]) for toa, pred in zip(all_toas, all_pred)]
AP_video = average_precision_score(all_labels, all_vid_scores)
print("video-level AP=%.5f"%(AP_video))
AP, mTTA, TTA_R80 = evaluation(all_pred, all_labels, all_toas, fps=test_data.fps)
# evaluate uncertainties
mUncertains = np.mean(all_uncertains, axis=(0, 1))
print("Mean aleatoric uncertainty: %.6f"%(mUncertains[0]))
print("Mean epistemic uncertainty: %.6f"%(mUncertains[1]))
# visualize
vis_results(vis_data, p.batch_size, vis_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='./data',
help='The relative path of dataset.')
parser.add_argument('--dataset', type=str, default='dad', choices=['a3d', 'dad', 'crash'],
help='The name of dataset. Default: dad')
parser.add_argument('--base_lr', type=float, default=1e-3,
help='The base learning rate. Default: 1e-3')
parser.add_argument('--epoch', type=int, default=30,
help='The number of training epoches. Default: 30')
parser.add_argument('--batch_size', type=int, default=10,
help='The batch size in training process. Default: 10')
parser.add_argument('--num_rnn', type=int, default=1,
help='The number of RNN cells for each timestamp. Default: 1')
parser.add_argument('--feature_name', type=str, default='vgg16', choices=['vgg16', 'res101'],
help='The name of feature embedding methods. Default: vgg16')
parser.add_argument('--test_iter', type=int, default=64,
help='The number of iteration to perform a evaluation process. Default: 64')
parser.add_argument('--hidden_dim', type=int, default=256,
help='The dimension of hidden states in RNN. Default: 256')
parser.add_argument('--latent_dim', type=int, default=256,
help='The dimension of latent space. Default: 256')
parser.add_argument('--loss_alpha', type=float, default=0.001,
help='The weighting factor of posterior and prior losses. Default: 1e-3')
parser.add_argument('--loss_beta', type=float, default=10,
help='The weighting factor of auxiliary loss. Default: 10')
parser.add_argument('--loss_yita', type=float, default=10,
help='The weighting factor of uncertainty ranking loss. Default: 10')
parser.add_argument('--gpus', type=str, default="0",
help="The delimited list of GPU IDs separated with comma. Default: '0'.")
parser.add_argument('--phase', type=str, choices=['train', 'test'],
help='The state of running the model. Default: train')
parser.add_argument('--evaluate_all', action='store_true',
help='Whether to evaluate models of all epoches. Default: False')
parser.add_argument('--visualize', action='store_true',
help='The visualization flag. Default: False')
parser.add_argument('--resume', action='store_true',
help='If to resume the training. Default: False')
parser.add_argument('--model_file', type=str, default='./output_debug/bayes_gcrnn/vgg16/dad/snapshot/gcrnn_model_90.pth',
help='The trained GCRNN model file for demo test only.')
parser.add_argument('--output_dir', type=str, default='./output_debug/bayes_gcrnn/vgg16',
help='The directory of src need to save in the training.')
p = parser.parse_args()
if p.phase == 'test':
test_eval()
else:
train_eval()
| 23,703 | 48.280665 | 185 | py |
UString | UString-master/demo.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import os, sys
import os.path as osp
import argparse
import torch
import torch.nn as nn
from torchvision import models, transforms
from PIL import Image
import matplotlib.pyplot as plt
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
VGG = models.vgg16(pretrained=True)
self.feature = VGG.features
self.classifier = nn.Sequential(*list(VGG.classifier.children())[:-3])
pretrained_dict = VGG.state_dict()
model_dict = self.classifier.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.classifier.load_state_dict(model_dict)
self.dim_feat = 4096
def forward(self, x):
output = self.feature(x)
output = output.view(output.size(0), -1)
output = self.classifier(output)
return output
def init_feature_extractor(backbone='vgg16', device=torch.device('cuda')):
feat_extractor = None
if backbone == 'vgg16':
feat_extractor = VGG16()
feat_extractor = feat_extractor.to(device=device)
feat_extractor.eval()
else:
raise NotImplementedError
return feat_extractor
def bbox_sampling(bbox_result, nbox=19, imsize=None, topN=5):
"""
imsize[0]: height
imsize[1]: width
"""
assert not isinstance(bbox_result, tuple)
bboxes = np.vstack(bbox_result) # n x 5
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)]
labels = np.concatenate(labels) # n
ndet = bboxes.shape[0]
# fix bbox
new_boxes = []
for box, label in zip(bboxes, labels):
x1 = min(max(0, int(box[0])), imsize[1])
y1 = min(max(0, int(box[1])), imsize[0])
x2 = min(max(x1 + 1, int(box[2])), imsize[1])
y2 = min(max(y1 + 1, int(box[3])), imsize[0])
if (y2 - y1 + 1 > 2) and (x2 - x1 + 1 > 2):
new_boxes.append([x1, y1, x2, y2, box[4], label])
if len(new_boxes) == 0: # no bboxes
new_boxes.append([0, 0, imsize[1]-1, imsize[0]-1, 1.0, 0])
new_boxes = np.array(new_boxes, dtype=int)
# sampling
n_candidate = min(topN, len(new_boxes))
if len(new_boxes) <= nbox - n_candidate:
indices = np.random.choice(n_candidate, nbox - len(new_boxes), replace=True)
sampled_boxes = np.vstack((new_boxes, new_boxes[indices]))
elif len(new_boxes) > nbox - n_candidate and len(new_boxes) <= nbox:
indices = np.random.choice(n_candidate, nbox - len(new_boxes), replace=False)
sampled_boxes = np.vstack((new_boxes, new_boxes[indices]))
else:
sampled_boxes = new_boxes[:nbox]
return sampled_boxes
def bbox_to_imroi(transform, bboxes, image):
imroi_data = []
for bbox in bboxes:
imroi = image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
imroi = transform(Image.fromarray(imroi)) # (3, 224, 224), torch.Tensor
imroi_data.append(imroi)
imroi_data = torch.stack(imroi_data)
return imroi_data
def extract_features(detector, feat_extractor, video_file, n_frames=100, n_boxes=19):
assert os.path.join(video_file), video_file
# prepare video reader and data transformer
videoReader = mmcv.VideoReader(video_file)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()]
)
features = np.zeros((n_frames, n_boxes + 1, feat_extractor.dim_feat), dtype=np.float32)
detections = np.zeros((n_frames, n_boxes, 6)) # (50 x 19 x 6)
frame_prev = None
for idx in range(n_frames):
if idx >= len(videoReader):
print("Copy frame from previous time step.")
frame = frame_prev.copy()
else:
frame = videoReader.get_frame(idx)
# run object detection inference
bbox_result = inference_detector(detector, frame)
# sampling a fixed number of bboxes
bboxes = bbox_sampling(bbox_result, nbox=n_boxes, imsize=frame.shape[:2])
detections[idx, :, :] = bboxes
# prepare frame data
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
with torch.no_grad():
# bboxes to roi feature
ims_roi = bbox_to_imroi(transform, bboxes, frame)
ims_roi = ims_roi.float().to(device=device)
feature_roi = feat_extractor(ims_roi)
# extract image feature
ims_frame = transform(Image.fromarray(frame))
ims_frame = torch.unsqueeze(ims_frame, dim=0).float().to(device=device)
feature_frame = feat_extractor(ims_frame)
# obtain feature matrix
features[idx, 0, :] = np.squeeze(feature_frame.cpu().numpy()) if feature_frame.is_cuda else np.squeeze(feature_frame.detach().numpy())
features[idx, 1:, :] = np.squeeze(feature_roi.cpu().numpy()) if feature_roi.is_cuda else np.squeeze(feature_roi.detach().numpy())
frame_prev = frame
return detections, features
def init_accident_model(model_file, dim_feature=4096, hidden_dim=256, latent_dim=256, n_obj=19, n_frames=50, fps=10.0):
# building model
model = UString(dim_feature, hidden_dim, latent_dim,
n_layers=1, n_obj=n_obj, n_frames=n_frames, fps=fps, with_saa=False, uncertain_ranking=True)
model = model.to(device=device)
model.eval()
# load check point
model, _, _ = load_checkpoint(model, filename=model_file, isTraining=False)
return model
def load_input_data(feature_file, device=torch.device('cuda')):
# load feature file and return the transformed data
data = np.load(feature_file)
features = data['data'] # 50 x 20 x 4096
labels = [0, 1]
detections = data['det'] # 50 x 19 x 6
toa = [45] # [useless]
def generate_st_graph(detections):
# create graph edges
num_frames, num_boxes = detections.shape[:2]
num_edges = int(num_boxes * (num_boxes - 1) / 2)
graph_edges = []
edge_weights = np.zeros((num_frames, num_edges), dtype=np.float32)
for i in range(num_frames):
# generate graph edges (fully-connected)
edge = generate_graph_from_list(range(num_boxes))
graph_edges.append(np.transpose(np.stack(edge).astype(np.int32))) # 2 x 171
# compute the edge weights by distance
edge_weights[i] = compute_graph_edge_weights(detections[i, :, :4], edge) # 171,
return graph_edges, edge_weights
def generate_graph_from_list(L, create_using=None):
import networkx, itertools
G = networkx.empty_graph(len(L),create_using)
if len(L)>1:
if G.is_directed():
edges = itertools.permutations(L,2)
else:
edges = itertools.combinations(L,2)
G.add_edges_from(edges)
graph_edges = list(G.edges())
return graph_edges
def compute_graph_edge_weights(boxes, edges):
"""
:param: boxes: (19, 4)
:param: edges: (171, 2)
:return: weights: (171,)
"""
N = boxes.shape[0]
assert len(edges) == N * (N-1) / 2
weights = np.ones((len(edges),), dtype=np.float32)
for i, edge in enumerate(edges):
c1 = [0.5 * (boxes[edge[0], 0] + boxes[edge[0], 2]),
0.5 * (boxes[edge[0], 1] + boxes[edge[0], 3])]
c2 = [0.5 * (boxes[edge[1], 0] + boxes[edge[1], 2]),
0.5 * (boxes[edge[1], 1] + boxes[edge[1], 3])]
d = (c1[0] - c2[0])**2 + (c1[1] - c2[1])**2
weights[i] = np.exp(-d)
# normalize weights
if np.sum(weights) > 0:
weights = weights / np.sum(weights) # N*(N-1)/2,
else:
weights = np.ones((len(edges),), dtype=np.float32)
return weights
graph_edges, edge_weights = generate_st_graph(detections)
# transform to torch.Tensor
features = torch.Tensor(np.expand_dims(features, axis=0)).to(device) # 50 x 20 x 4096
labels = torch.Tensor(np.expand_dims(labels, axis=0)).to(device)
graph_edges = torch.Tensor(np.expand_dims(graph_edges, axis=0)).long().to(device)
edge_weights = torch.Tensor(np.expand_dims(edge_weights, axis=0)).to(device)
toa = torch.Tensor(np.expand_dims(toa, axis=0)).to(device)
detections = np.expand_dims(detections, axis=0)
vid = feature_file.split('/')[-1].split('.')[0]
return features, labels, graph_edges, edge_weights, toa, detections, vid
def load_checkpoint(model, optimizer=None, filename='checkpoint.pth.tar', isTraining=True):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
# filter out modules only used in training
pretrained_dict = {k: v for k, v in checkpoint['model'].items() if not any(filtered in k for filtered in ['self_aggregation', 'predictor_aux'])}
model.load_state_dict(pretrained_dict)
# model.load_state_dict(checkpoint['model'])
if isTraining:
optimizer.load_state_dict(checkpoint['optimizer'])
# print("=> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch
def parse_results(all_outputs, batch_size=1, n_frames=50):
# parse inference results
pred_score = np.zeros((batch_size, n_frames), dtype=np.float32)
pred_au = np.zeros((batch_size, n_frames), dtype=np.float32)
pred_eu = np.zeros((batch_size, n_frames), dtype=np.float32)
# run inference
for t in range(n_frames):
# prediction
pred = all_outputs[t]['pred_mean'] # B x 2
pred = pred.cpu().numpy() if pred.is_cuda else pred.detach().numpy()
pred_score[:, t] = np.exp(pred[:, 1]) / np.sum(np.exp(pred), axis=1)
# uncertainties
aleatoric = all_outputs[t]['aleatoric'] # B x 2 x 2
aleatoric = aleatoric.cpu().numpy() if aleatoric.is_cuda else aleatoric.detach().numpy()
epistemic = all_outputs[t]['epistemic'] # B x 2 x 2
epistemic = epistemic.cpu().numpy() if epistemic.is_cuda else epistemic.detach().numpy()
pred_au[:, t] = aleatoric[:, 0, 0] + aleatoric[:, 1, 1]
pred_eu[:, t] = epistemic[:, 0, 0] + epistemic[:, 1, 1]
return pred_score, pred_au, pred_eu
def get_video_frames(video_file, n_frames=50):
# get the video data
cap = cv2.VideoCapture(video_file)
ret, frame = cap.read()
video_data = []
counter = 0
while (ret):
video_data.append(frame)
ret, frame = cap.read()
counter += 1
assert len(video_data) >= n_frames, video_file
video_data = video_data[:n_frames]
return video_data
def preprocess_results(pred_score, aleatoric, epistemic, cumsum=False):
from scipy.interpolate import make_interp_spline
std_alea = 1.0 * np.sqrt(aleatoric)
std_epis = 1.0 * np.sqrt(epistemic)
# sampling
xvals = np.linspace(0,len(pred_score)-1,10)
pred_mean_reduce = pred_score[xvals.astype(np.int)]
pred_std_alea_reduce = std_alea[xvals.astype(np.int)]
pred_std_epis_reduce = std_epis[xvals.astype(np.int)]
# smoothing
xvals_new = np.linspace(1,len(pred_score)+1, p.n_frames)
pred_score = make_interp_spline(xvals, pred_mean_reduce)(xvals_new)
std_alea = make_interp_spline(xvals, pred_std_alea_reduce)(xvals_new)
std_epis = make_interp_spline(xvals, pred_std_epis_reduce)(xvals_new)
pred_score[pred_score >= 1.0] = 1.0-1e-3
xvals = np.copy(xvals_new)
# copy the first value into x=0
xvals = np.insert(xvals_new, 0, 0)
pred_score = np.insert(pred_score, 0, pred_score[0])
std_alea = np.insert(std_alea, 0, std_alea[0])
std_epis = np.insert(std_epis, 0, std_epis[0])
# take cummulative sum of results
if cumsum:
pred_score = np.cumsum(pred_score)
pred_score = pred_score / np.max(pred_score)
return xvals, pred_score, std_alea, std_epis
def draw_curve(xvals, pred_score, std_alea, std_epis):
ax.fill_between(xvals, pred_score - std_alea, pred_score + std_alea, facecolor='wheat', alpha=0.5)
ax.fill_between(xvals, pred_score - std_epis, pred_score + std_epis, facecolor='yellow', alpha=0.5)
plt.plot(xvals, pred_score, linewidth=3.0)
plt.axhline(y=0.5, xmin=0, xmax=max(xvals)/(p.n_frames + 2), linewidth=3.0, color='g', linestyle='--')
# plt.grid(True)
plt.tight_layout()
def set_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='visualize', choices=['extract_feature', 'inference', 'visualize'])
parser.add_argument('--gpu_id', help='GPU ID', type=int, default=0)
parser.add_argument('--n_frames', type=int, help='The number of input video frames.', default=50)
parser.add_argument('--seed', type=int, help='The random seed.', default=123)
parser.add_argument('--fps', type=float, help='The fps of input video.', default=10.0)
parser.add_argument('--fps_display', type=float, help='The fps of output video.', default=2.0)
# feature extraction
parser.add_argument('--video_file', type=str, default='demo/000821.mp4')
parser.add_argument('--mmdetection', type=str, help="the path to the mmdetection.", default="lib/mmdetection")
# inference
parser.add_argument('--feature_file', type=str, help="the path to the feature file.", default="demo/000821_feature.npz")
parser.add_argument('--ckpt_file', type=str, help="the path to the model file.", default="demo/final_model_ccd.pth")
# visualize
parser.add_argument('--result_file', type=str, help="the path to the result file.", default="demo/000821_result.npz")
parser.add_argument('--vis_file', type=str, help="the path to the visualization file.", default="demo/000821_vis.avi")
p = parser.parse_args()
set_random_seed(p.seed)
device = torch.device('cuda:'+str(p.gpu_id)) if torch.cuda.is_available() else torch.device('cpu')
if p.task == 'extract_feature':
from mmdet.apis import init_detector, inference_detector, show_result
import mmcv
# init object detector
cfg_file = osp.join(p.mmdetection, "configs/cascade_rcnn_x101_64x4d_fpn_1x_kitti2d.py")
model_file = osp.join(p.mmdetection, "work_dirs/cascade_rcnn_x101_64x4d_fpn_1x_kitti2d/latest.pth")
detector = init_detector(cfg_file, model_file, device=device)
# init feature extractor
feat_extractor = init_feature_extractor(backbone='vgg16', device=device)
# object detection & feature extraction
detections, features = extract_features(detector, feat_extractor, p.video_file, n_frames=p.n_frames)
feat_file = p.video_file[:-4] + '_feature.npz'
np.savez_compressed(feat_file, data=features, det=detections)
elif p.task == 'inference':
from src.Models import UString
# load feature file
features, labels, graph_edges, edge_weights, toa, detections, vid = load_input_data(p.feature_file, device=device)
# prepare model
model = init_accident_model(p.ckpt_file, dim_feature=features.shape[-1], n_frames=p.n_frames, fps=p.fps)
with torch.no_grad():
# run inference
_, all_outputs, _ = model(features, labels, toa, graph_edges, hidden_in=None, edge_weights=edge_weights, npass=10, eval_uncertain=True)
# parse and save results
pred_score, pred_au, pred_eu = parse_results(all_outputs, n_frames=p.n_frames)
result_file = osp.join(osp.dirname(p.feature_file), p.feature_file.split('/')[-1].split('_')[0] + '_result.npz')
np.savez_compressed(result_file, score=pred_score[0], aleatoric=pred_au[0], epistemic=pred_eu[0], det=detections[0])
elif p.task == 'visualize':
video_data = get_video_frames(p.video_file, n_frames=p.n_frames)
all_results = np.load(p.result_file, allow_pickle=True)
pred_score, aleatoric, epistemic, detections = all_results['score'], all_results['aleatoric'], all_results['epistemic'], all_results['det']
xvals, pred_score, std_alea, std_epis = preprocess_results(pred_score, aleatoric, epistemic, cumsum=False)
fig, ax = plt.subplots(1, figsize=(24, 3.5))
fontsize = 25
plt.ylim(0, 1.1)
plt.xlim(0, len(xvals)+1)
plt.ylabel('Probability', fontsize=fontsize)
plt.xlabel('Frame (FPS=%d)'%(p.fps), fontsize=fontsize)
plt.xticks(range(0, len(xvals)+1, int(p.n_frames / p.fps_display)), fontsize=fontsize)
plt.yticks(fontsize=fontsize)
from matplotlib.animation import FFMpegWriter
curve_writer = FFMpegWriter(fps=p.fps_display, metadata=dict(title='Movie Test', artist='Matplotlib',comment='Movie support!'))
with curve_writer.saving(fig, "demo/curve_video.mp4", 100):
for t in range(len(xvals)):
draw_curve(xvals[:(t+1)], pred_score[:(t+1)], std_alea[:(t+1)], std_epis[:(t+1)])
curve_writer.grab_frame()
curve_frames = get_video_frames("demo/curve_video.mp4", n_frames=p.n_frames)
# create video writer
video_writer = cv2.VideoWriter(p.vis_file, cv2.VideoWriter_fourcc(*'DIVX'), p.fps_display, (video_data[0].shape[1], video_data[0].shape[0]))
for t, frame in enumerate(video_data):
det_boxes = detections[t] # 19 x 6
for box in det_boxes:
if box[4] > 0:
print(box[4])
cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 3)
img = curve_frames[t]
width = frame.shape[1]
height = int(img.shape[0] * (width / img.shape[1]))
img = cv2.resize(img, (width, height), interpolation = cv2.INTER_AREA)
frame[frame.shape[0]-height:frame.shape[0]] = cv2.addWeighted(frame[frame.shape[0]-height:frame.shape[0]], 0.3, img, 0.7, 0)
video_writer.write(frame)
else:
print("invalid task.")
| 18,597 | 44.920988 | 152 | py |
UString | UString-master/src/DataLoader.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pickle
import torch
from torch.utils.data import Dataset
import networkx
import itertools
class DADDataset(Dataset):
def __init__(self, data_path, feature, phase='training', toTensor=False, device=torch.device('cuda'), vis=False):
self.data_path = os.path.join(data_path, feature + '_features')
self.feature = feature
self.phase = phase
self.toTensor = toTensor
self.device = device
self.vis = vis
self.n_frames = 100
self.n_obj = 19
self.fps = 20.0
self.dim_feature = self.get_feature_dim(feature)
filepath = os.path.join(self.data_path, phase)
self.files_list = self.get_filelist(filepath)
def __len__(self):
data_len = len(self.files_list)
return data_len
def get_feature_dim(self, feature_name):
if feature_name == 'vgg16':
return 4096
elif feature_name == 'res101':
return 2048
else:
raise ValueError
def get_filelist(self, filepath):
assert os.path.exists(filepath), "Directory does not exist: %s"%(filepath)
file_list = []
for filename in sorted(os.listdir(filepath)):
file_list.append(filename)
return file_list
def __getitem__(self, index):
data_file = os.path.join(self.data_path, self.phase, self.files_list[index])
assert os.path.exists(data_file)
try:
data = np.load(data_file)
features = data['data'] # 100 x 20 x 4096
labels = data['labels'] # 2
detections = data['det'] # 100 x 19 x 6
except:
raise IOError('Load data error! File: %s'%(data_file))
if labels[1] > 0:
toa = [90.0]
else:
toa = [self.n_frames + 1]
graph_edges, edge_weights = generate_st_graph(detections)
if self.toTensor:
features = torch.Tensor(features).to(self.device) # 100 x 20 x 4096
labels = torch.Tensor(labels).to(self.device)
graph_edges = torch.Tensor(graph_edges).long().to(self.device)
edge_weights = torch.Tensor(edge_weights).to(self.device)
toa = torch.Tensor(toa).to(self.device)
if self.vis:
video_id = str(data['ID'])[5:11] # e.g.: b001_000490_*
return features, labels, graph_edges, edge_weights, toa, detections, video_id
else:
return features, labels, graph_edges, edge_weights, toa
class A3DDataset(Dataset):
def __init__(self, data_path, feature, phase='train', toTensor=False, device=torch.device('cuda'), vis=False):
self.data_path = data_path
self.feature = feature
self.phase = phase
self.toTensor = toTensor
self.device = device
self.vis = vis
self.n_frames = 100
self.n_obj = 19
self.fps = 20.0
self.dim_feature = self.get_feature_dim(feature)
self.files_list, self.labels_list = self.read_datalist(data_path, phase)
def __len__(self):
data_len = len(self.files_list)
return data_len
def get_feature_dim(self, feature_name):
if feature_name == 'vgg16':
return 4096
elif feature_name == 'res101':
return 2048
else:
raise ValueError
def read_datalist(self, data_path, phase):
# load training set
list_file = os.path.join(data_path, self.feature + '_features', '%s.txt' % (phase))
assert os.path.exists(list_file), "file not exists: %s"%(list_file)
fid = open(list_file, 'r')
data_files, data_labels = [], []
for line in fid.readlines():
filename, label = line.rstrip().split(' ')
data_files.append(filename)
data_labels.append(int(label))
fid.close()
return data_files, data_labels
def get_toa(self, clip_id):
# handle clip id like "uXXC8uQHCoc_000011_0" which should be "uXXC8uQHCoc_000011"
clip_id = clip_id if len(clip_id.split('_')[-1]) > 1 else clip_id[:-2]
label_file = os.path.join(self.data_path, 'frame_labels', clip_id + '.txt')
assert os.path.exists(label_file)
f = open(label_file, 'r')
label_all = []
for line in f.readlines():
label = int(line.rstrip().split(' ')[1])
label_all.append(label)
f.close()
label_all = np.array(label_all, dtype=np.int32)
toa = np.where(label_all == 1)[0][0]
toa = max(1, toa) # time-of-accident should not be equal to zero
return toa
def __getitem__(self, index):
data_file = os.path.join(self.data_path, self.feature + '_features', self.files_list[index])
assert os.path.exists(data_file), "file not exists: %s"%(data_file)
data = np.load(data_file)
features = data['features']
label = self.labels_list[index]
label_onehot = np.array([0, 1]) if label > 0 else np.array([1, 0])
# get time of accident
file_id = self.files_list[index].split('/')[1].split('.npz')[0]
if label > 0:
toa = [self.get_toa(file_id)]
else:
toa = [self.n_frames + 1]
# construct graph
attr = 'positive' if label > 0 else 'negative'
dets_file = os.path.join(self.data_path, 'detections', attr, file_id + '.pkl')
assert os.path.exists(dets_file), "file not exists: %s"%(dets_file)
with open(dets_file, 'rb') as f:
detections = pickle.load(f)
detections = np.array(detections) # 100 x 19 x 6
graph_edges, edge_weights = generate_st_graph(detections)
f.close()
if self.toTensor:
features = torch.Tensor(features).to(self.device) # 100 x 20 x 4096
label_onehot = torch.Tensor(label_onehot).to(self.device) # 2
graph_edges = torch.Tensor(graph_edges).long().to(self.device)
edge_weights = torch.Tensor(edge_weights).to(self.device)
toa = torch.Tensor(toa).to(self.device)
if self.vis:
# file_id = file_id if len(file_id.split('_')[-1]) > 1 else file_id[:-2]
# video_path = os.path.join(self.data_path, 'video_frames', file_id, 'images')
# assert os.path.exists(video_path), video_path
return features, label_onehot, graph_edges, edge_weights, toa, detections, file_id
else:
return features, label_onehot, graph_edges, edge_weights, toa
class CrashDataset(Dataset):
def __init__(self, data_path, feature, phase='train', toTensor=False, device=torch.device('cuda'), vis=False):
self.data_path = data_path
self.feature = feature
self.phase = phase
self.toTensor = toTensor
self.device = device
self.vis = vis
self.n_frames = 50
self.n_obj = 19
self.fps = 10.0
self.dim_feature = self.get_feature_dim(feature)
self.files_list, self.labels_list = self.read_datalist(data_path, phase)
self.toa_dict = self.get_toa_all(data_path)
def __len__(self):
data_len = len(self.files_list)
return data_len
def get_feature_dim(self, feature_name):
if feature_name == 'vgg16':
return 4096
elif feature_name == 'res101':
return 2048
else:
raise ValueError
def read_datalist(self, data_path, phase):
# load training set
list_file = os.path.join(data_path, self.feature + '_features', '%s.txt' % (phase))
assert os.path.exists(list_file), "file not exists: %s"%(list_file)
fid = open(list_file, 'r')
data_files, data_labels = [], []
for line in fid.readlines():
filename, label = line.rstrip().split(' ')
data_files.append(filename)
data_labels.append(int(label))
fid.close()
return data_files, data_labels
def get_toa_all(self, data_path):
toa_dict = {}
annofile = os.path.join(data_path, 'videos', 'Crash-1500.txt')
annoData = self.read_anno_file(annofile)
for anno in annoData:
labels = np.array(anno['label'], dtype=np.int)
toa = np.where(labels == 1)[0][0]
toa = min(max(1, toa), self.n_frames-1)
toa_dict[anno['vid']] = toa
return toa_dict
def read_anno_file(self, anno_file):
assert os.path.exists(anno_file), "Annotation file does not exist! %s"%(anno_file)
result = []
with open(anno_file, 'r') as f:
for line in f.readlines():
items = {}
items['vid'] = line.strip().split(',[')[0]
labels = line.strip().split(',[')[1].split('],')[0]
items['label'] = [int(val) for val in labels.split(',')]
assert sum(items['label']) > 0, 'invalid accident annotation!'
others = line.strip().split(',[')[1].split('],')[1].split(',')
items['startframe'], items['vid_ytb'], items['lighting'], items['weather'], items['ego_involve'] = others
result.append(items)
f.close()
return result
def __getitem__(self, index):
data_file = os.path.join(self.data_path, self.feature + '_features', self.files_list[index])
assert os.path.exists(data_file), "file not exists: %s"%(data_file)
try:
data = np.load(data_file)
features = data['data'] # 50 x 20 x 4096
labels = data['labels'] # 2
detections = data['det'] # 50 x 19 x 6
vid = str(data['ID'])
except:
raise IOError('Load data error! File: %s'%(data_file))
if labels[1] > 0:
toa = [self.toa_dict[vid]]
else:
toa = [self.n_frames + 1]
graph_edges, edge_weights = generate_st_graph(detections)
if self.toTensor:
features = torch.Tensor(features).to(self.device) # 50 x 20 x 4096
labels = torch.Tensor(labels).to(self.device)
graph_edges = torch.Tensor(graph_edges).long().to(self.device)
edge_weights = torch.Tensor(edge_weights).to(self.device)
toa = torch.Tensor(toa).to(self.device)
if self.vis:
return features, labels, graph_edges, edge_weights, toa, detections, vid
else:
return features, labels, graph_edges, edge_weights, toa
def generate_st_graph(detections):
# create graph edges
num_frames, num_boxes = detections.shape[:2]
num_edges = int(num_boxes * (num_boxes - 1) / 2)
graph_edges = []
edge_weights = np.zeros((num_frames, num_edges), dtype=np.float32)
for i in range(num_frames):
# generate graph edges (fully-connected)
edge = generate_graph_from_list(range(num_boxes))
graph_edges.append(np.transpose(np.stack(edge).astype(np.int32))) # 2 x 171
# compute the edge weights by distance
edge_weights[i] = compute_graph_edge_weights(detections[i, :, :4], edge) # 171,
return graph_edges, edge_weights
def generate_graph_from_list(L, create_using=None):
G = networkx.empty_graph(len(L),create_using)
if len(L)>1:
if G.is_directed():
edges = itertools.permutations(L,2)
else:
edges = itertools.combinations(L,2)
G.add_edges_from(edges)
graph_edges = list(G.edges())
return graph_edges
def compute_graph_edge_weights(boxes, edges):
"""
:param: boxes: (19, 4)
:param: edges: (171, 2)
:return: weights: (171,)
"""
N = boxes.shape[0]
assert len(edges) == N * (N-1) / 2
weights = np.ones((len(edges),), dtype=np.float32)
for i, edge in enumerate(edges):
c1 = [0.5 * (boxes[edge[0], 0] + boxes[edge[0], 2]),
0.5 * (boxes[edge[0], 1] + boxes[edge[0], 3])]
c2 = [0.5 * (boxes[edge[1], 0] + boxes[edge[1], 2]),
0.5 * (boxes[edge[1], 1] + boxes[edge[1], 3])]
d = (c1[0] - c2[0])**2 + (c1[1] - c2[1])**2
weights[i] = np.exp(-d)
# normalize weights
if np.sum(weights) > 0:
weights = weights / np.sum(weights) # N*(N-1)/2,
else:
weights = np.ones((len(edges),), dtype=np.float32)
return weights
if __name__ == '__main__':
from torch.utils.data import DataLoader
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='./data',
help='The relative path of dataset.')
parser.add_argument('--dataset', type=str, default='dad', choices=['a3d', 'dad', 'crash'],
help='The name of dataset. Default: dad')
parser.add_argument('--batch_size', type=int, default=10,
help='The batch size in training process. Default: 10')
parser.add_argument('--feature_name', type=str, default='vgg16', choices=['vgg16', 'res101'],
help='The name of feature embedding methods. Default: vgg16')
p = parser.parse_args()
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
data_path = os.path.join(ROOT_PATH, p.data_path, p.dataset)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# create data loader
if p.dataset == 'dad':
train_data = DADDataset(data_path, p.feature_name, 'training', toTensor=True, device=device)
test_data = DADDataset(data_path, p.feature_name, 'testing', toTensor=True, device=device, vis=True)
elif p.dataset == 'a3d':
train_data = A3DDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = A3DDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
elif p.dataset == 'crash':
train_data = CrashDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = CrashDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
else:
raise NotImplementedError
traindata_loader = DataLoader(dataset=train_data, batch_size=p.batch_size, shuffle=True, drop_last=True)
testdata_loader = DataLoader(dataset=test_data, batch_size=p.batch_size, shuffle=False, drop_last=True)
for e in range(2):
print('Epoch: %d'%(e))
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas) in tqdm(enumerate(traindata_loader), total=len(traindata_loader)):
if i == 0:
print('feature dim:', batch_xs.size())
print('label dim:', batch_ys.size())
print('graph edges dim:', graph_edges.size())
print('edge weights dim:', edge_weights.size())
print('time of accidents dim:', batch_toas.size())
for e in range(2):
print('Epoch: %d'%(e))
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas, detections, video_ids) in \
tqdm(enumerate(testdata_loader), desc="batch progress", total=len(testdata_loader)):
if i == 0:
print('feature dim:', batch_xs.size())
print('label dim:', batch_ys.size())
print('graph edges dim:', graph_edges.size())
print('edge weights dim:', edge_weights.size())
print('time of accidents dim:', batch_toas.size())
| 15,669 | 39.386598 | 141 | py |
UString | UString-master/src/BayesModels.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(0,1)
@property
def sigma(self):
return torch.log1p(torch.exp(self.rho))
def sample(self):
epsilon = self.normal.sample(self.rho.size()).to(self.mu.device)
return self.mu + self.sigma * epsilon
def log_prob(self, input):
return (-math.log(math.sqrt(2 * math.pi))
- torch.log(self.sigma)
- ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum()
class ScaleMixtureGaussian(object):
def __init__(self, pi, sigma1, sigma2):
super().__init__()
self.pi = pi
self.sigma1 = sigma1
self.sigma2 = sigma2
def log_prob(self, input):
gaussian1 = torch.distributions.Normal(0, self.sigma1.to(input.device))
gaussian2 = torch.distributions.Normal(0, self.sigma2.to(input.device))
prob1 = torch.exp(gaussian1.log_prob(input))
prob2 = torch.exp(gaussian2.log_prob(input))
return (torch.log(self.pi * prob1 + (1-self.pi) * prob2)).sum()
class BayesianLinear(nn.Module):
def __init__(self, in_features, out_features, pi=0.5, sigma_1=None, sigma_2=None):
super().__init__()
self.in_features = in_features
self.out_features = out_features
if sigma_1 is None or sigma_2 is None:
sigma_1 = torch.FloatTensor([math.exp(-0)])
sigma_2 = torch.FloatTensor([math.exp(-6)])
# Weight parameters
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2))
self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-5,-4))
self.weight = Gaussian(self.weight_mu, self.weight_rho)
# Bias parameters
self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2))
self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-5,-4))
self.bias = Gaussian(self.bias_mu, self.bias_rho)
# Prior distributions
self.weight_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2)
self.bias_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2)
self.log_prior = 0
self.log_variational_posterior = 0
def forward(self, input, sample=False, calculate_log_probs=False):
if self.training or sample:
weight = self.weight.sample()
bias = self.bias.sample()
else:
weight = self.weight.mu
bias = self.bias.mu
if self.training or calculate_log_probs:
self.log_prior = self.weight_prior.log_prob(weight) + self.bias_prior.log_prob(bias)
self.log_variational_posterior = self.weight.log_prob(weight) + self.bias.log_prob(bias)
else:
self.log_prior, self.log_variational_posterior = 0, 0
return F.linear(input, weight, bias)
| 3,130 | 38.632911 | 100 | py |
UString | UString-master/src/Models.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from torch.nn.parameter import Parameter
import torch
import torch.nn as nn
from src.utils import glorot, zeros, uniform, reset
from torch_geometric.utils import remove_self_loops, add_self_loops
import torch_scatter
from torch_scatter import scatter_mean, scatter_max, scatter_add
from torch.autograd import Variable
import torch.nn.functional as F
from src.BayesModels import BayesianLinear
class MessagePassing(torch.nn.Module):
r"""Base class for creating message passing layers
.. math::
\mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i,
\square_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}}
\left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{i,j}\right) \right),
where :math:`\square` denotes a differentiable, permutation invariant
function, *e.g.*, sum, mean or max, and :math:`\gamma_{\mathbf{\Theta}}`
and :math:`\phi_{\mathbf{\Theta}}` denote differentiable functions such as
MLPs.
See `here <https://rusty1s.github.io/pytorch_geometric/build/html/notes/
create_gnn.html>`__ for the accompanying tutorial.
"""
def __init__(self, aggr='add'):
super(MessagePassing, self).__init__()
self.message_args = inspect.getargspec(self.message)[0][1:]
self.update_args = inspect.getargspec(self.update)[0][2:]
def propagate(self, aggr, edge_index, **kwargs):
r"""The initial call to start propagating messages.
Takes in an aggregation scheme (:obj:`"add"`, :obj:`"mean"` or
:obj:`"max"`), the edge indices, and all additional data which is
needed to construct messages and to update node embeddings."""
assert aggr in ['add', 'mean', 'max']
kwargs['edge_index'] = edge_index
size = None
message_args = []
for arg in self.message_args:
if arg[-2:] == '_i':
tmp = kwargs[arg[:-2]]
size = tmp.size(0)
message_args.append(tmp[edge_index[0]])
elif arg[-2:] == '_j':
tmp = kwargs[arg[:-2]]
size = tmp.size(0)
message_args.append(tmp[edge_index[1]])
else:
message_args.append(kwargs[arg])
update_args = [kwargs[arg] for arg in self.update_args]
out = self.message(*message_args)
out = scatter_(aggr, out, edge_index[0], dim_size=size)
out = self.update(out, *update_args)
return out
def message(self, x_j): # pragma: no cover
r"""Constructs messages in analogy to :math:`\phi_{\mathbf{\Theta}}`
for each edge in :math:`(i,j) \in \mathcal{E}`.
Can take any argument which was initially passed to :meth:`propagate`.
In addition, features can be lifted to the source node :math:`i` and
target node :math:`j` by appending :obj:`_i` or :obj:`_j` to the
variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`."""
return x_j
def update(self, aggr_out): # pragma: no cover
r"""Updates node embeddings in analogy to
:math:`\gamma_{\mathbf{\Theta}}` for each node
:math:`i \in \mathcal{V}`.
Takes in the output of aggregation as first argument and any argument
which was initially passed to :meth:`propagate`."""
return aggr_out
def scatter_(name, src, index, dim_size=None):
r"""Aggregates all values from the :attr:`src` tensor at the indices
specified in the :attr:`index` tensor along the first dimension.
If multiple indices reference the same location, their contributions
are aggregated according to :attr:`name` (either :obj:`"add"`,
:obj:`"mean"` or :obj:`"max"`).
Args:
name (string): The aggregation to use (:obj:`"add"`, :obj:`"mean"`,
:obj:`"max"`).
src (Tensor): The source tensor.
index (LongTensor): The indices of elements to scatter.
dim_size (int, optional): Automatically create output tensor with size
:attr:`dim_size` in the first dimension. If set to :attr:`None`, a
minimal sized output tensor is returned. (default: :obj:`None`)
:rtype: :class:`Tensor`
"""
assert name in ['add', 'mean', 'max']
op = getattr(torch_scatter, 'scatter_{}'.format(name))
fill_value = -1e38 if name is 'max' else 0
out = op(src, index, 0, None, dim_size, fill_value)
if isinstance(out, tuple):
out = out[0]
if name is 'max':
out[out == fill_value] = 0
return out
# layers
class GCNConv(MessagePassing):
def __init__(self, in_channels, out_channels, act=F.relu, improved=True, bias=False):
super(GCNConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.act = act
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def add_self_loops(self, edge_index, edge_weight=None, fill_value=1, num_nodes=None):
"""
:param edge_index: 10 x 2 x 171
:param edge_weight: 10 x 171
:param fill_value: 1
:param num_nodes: 20
:return:
"""
batch_size = edge_index.size(0)
num_nodes = edge_index.max().item() + 1 if num_nodes is None else num_nodes
loop_index = torch.arange(0, num_nodes, dtype=torch.long,
device=edge_index.device)
loop_index = loop_index.unsqueeze(0).repeat(2, 1)
loop_index = loop_index.unsqueeze(0).repeat(batch_size, 1, 1) # 10 x 2 x 20
if edge_weight is not None:
assert edge_weight.size(-1) == edge_index.size(-1)
loop_weight = edge_weight.new_full((num_nodes,), fill_value)
loop_weight = loop_weight.unsqueeze(0).repeat(batch_size, 1)
edge_weight = torch.cat([edge_weight, loop_weight], dim=-1)
edge_index = torch.cat([edge_index, loop_index], dim=-1)
return edge_index, edge_weight
def forward(self, x, edge_index, edge_weight=None):
if edge_weight is None:
edge_weight = torch.ones(
(edge_index.size(0), edge_index.size(-1), ), dtype=x.dtype, device=x.device)
# edge_weight = edge_weight.view(-1)
assert edge_weight.size(-1) == edge_index.size(-1)
# for pytorch 1.4, there are two outputs
edge_index, edge_weight = self.add_self_loops(edge_index, edge_weight=edge_weight, num_nodes=x.size(1))
out_batch = []
for i in range(edge_index.size(0)):
row, col = edge_index[i]
deg = scatter_add(edge_weight[i], row, dim=0, dim_size=x.size(1))
deg_inv = deg.pow(-0.5)
deg_inv[deg_inv == float('inf')] = 0
norm = deg_inv[row] * edge_weight[i] * deg_inv[col]
weight = self.weight.to(x.device)
x_w = torch.matmul(x[i], weight)
out = self.propagate('add', edge_index[i], x=x_w, norm=norm)
out_batch.append(self.act(out))
out_batch = torch.stack(out_batch)
return out_batch
def message(self, x_j, norm):
return norm.view(-1, 1) * x_j
def update(self, aggr_out):
if self.bias is not None:
bias = self.bias.to(aggr_out.device)
aggr_out = aggr_out + bias
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class Graph_GRU_GCN(nn.Module):
def __init__(self, input_size, hidden_size, n_layer, bias=True):
super(Graph_GRU_GCN, self).__init__()
self.hidden_size = hidden_size
self.n_layer = n_layer
# gru weights
self.weight_xz = []
self.weight_hz = []
self.weight_xr = []
self.weight_hr = []
self.weight_xh = []
self.weight_hh = []
for i in range(self.n_layer):
if i == 0:
self.weight_xz.append(GCNConv(input_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_hz.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_xr.append(GCNConv(input_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_hr.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_xh.append(GCNConv(input_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_hh.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
else:
self.weight_xz.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_hz.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_xr.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_hr.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_xh.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
self.weight_hh.append(GCNConv(hidden_size, hidden_size, act=lambda x: x, bias=bias))
def forward(self, inp, edgidx, h, edge_weight=None):
h_out = torch.zeros(h.size())
h_out = h_out.to(inp.device)
for i in range(self.n_layer):
if i == 0:
z_g = torch.sigmoid(self.weight_xz[i](inp, edgidx, edge_weight) + self.weight_hz[i](h[i], edgidx, edge_weight))
r_g = torch.sigmoid(self.weight_xr[i](inp, edgidx, edge_weight) + self.weight_hr[i](h[i], edgidx, edge_weight))
h_tilde_g = torch.tanh(self.weight_xh[i](inp, edgidx, edge_weight) + self.weight_hh[i](r_g * h[i], edgidx, edge_weight))
h_out[i] = z_g * h[i] + (1 - z_g) * h_tilde_g
else:
z_g = torch.sigmoid(self.weight_xz[i](h_out[i - 1], edgidx, edge_weight) + self.weight_hz[i](h[i], edgidx, edge_weight))
r_g = torch.sigmoid(self.weight_xr[i](h_out[i - 1], edgidx, edge_weight) + self.weight_hr[i](h[i], edgidx, edge_weight))
h_tilde_g = torch.tanh(self.weight_xh[i](h_out[i - 1], edgidx, edge_weight) + self.weight_hh[i](r_g * h[i], edgidx, edge_weight))
h_out[i] = z_g * h[i] + (1 - z_g) * h_tilde_g
return h_out
class AccidentPredictor(nn.Module):
def __init__(self, input_dim, output_dim=2, act=torch.relu, dropout=[0, 0]):
super(AccidentPredictor, self).__init__()
self.act = act
self.dropout = dropout
self.dense1 = torch.nn.Linear(input_dim, 64)
self.dense2 = torch.nn.Linear(64, output_dim)
def forward(self, x):
x = F.dropout(x, self.dropout[0], training=self.training)
x = self.act(self.dense1(x))
x = F.dropout(x, self.dropout[1], training=self.training)
x = self.dense2(x)
return x
class SelfAttAggregate(torch.nn.Module):
def __init__(self, agg_dim):
super(SelfAttAggregate, self).__init__()
self.agg_dim = agg_dim
self.weight = nn.Parameter(torch.Tensor(agg_dim, 1)) # (100, 1)
self.softmax = nn.Softmax(dim=-1)
# initialize parameters
import math
torch.nn.init.kaiming_normal_(self.weight, a=math.sqrt(5))
def forward(self, hiddens, avgsum='sum'):
"""
hiddens: (10, 19, 256, 100)
"""
maxpool = torch.max(hiddens, dim=1)[0] # (10, 256, 100)
if avgsum=='sum':
avgpool = torch.sum(hiddens, dim=1)
else:
avgpool = torch.mean(hiddens, dim=1) # (10, 256, 100)
agg_spatial = torch.cat((avgpool, maxpool), dim=1) # (10, 512, 100)
# soft-attention
energy = torch.bmm(agg_spatial.permute([0, 2, 1]), agg_spatial) # (10, 100, 100)
attention = self.softmax(energy)
weighted_feat = torch.bmm(attention, agg_spatial.permute([0, 2, 1])) # (10, 100, 512)
weight = self.weight.unsqueeze(0).repeat([hiddens.size(0), 1, 1])
agg_feature = torch.bmm(weighted_feat.permute([0, 2, 1]), weight) # (10, 512, 1)
return agg_feature.squeeze(dim=-1) # (10, 512)
class BayesianPredictor(nn.Module):
def __init__(self, input_dim, output_dim=2, act=torch.relu, pi=0.5, sigma_1=None, sigma_2=None):
super(BayesianPredictor, self).__init__()
self.act = act
self.l1 = BayesianLinear(input_dim, 64, pi=pi, sigma_1=sigma_1, sigma_2=sigma_2)
self.l2 = BayesianLinear(64, output_dim, pi=pi, sigma_1=sigma_1, sigma_2=sigma_2)
def forward(self, x, sample=False):
x = self.act(self.l1(x, sample))
x = self.l2(x, sample)
return x
def log_prior(self):
return self.l1.log_prior + self.l2.log_prior
def log_variational_posterior(self):
return self.l1.log_variational_posterior + self.l2.log_variational_posterior
def sample_elbo(self, input, out_dim=2, npass=2, testing=False, eval_uncertain=False):
npass = npass + 1 if testing else npass
outputs = torch.zeros(npass, input.size(0), out_dim).to(input.device)
log_priors = torch.zeros(npass).to(input.device)
log_variational_posteriors = torch.zeros(npass).to(input.device)
for i in range(npass):
outputs[i] = self(input, sample=True)
log_priors[i] = self.log_prior()
log_variational_posteriors[i] = self.log_variational_posterior()
if testing:
outputs[npass] = self(input, sample=False)
output = outputs.mean(0)
log_prior = log_priors.mean()
log_variational_posterior = log_variational_posteriors.mean()
# predict the aleatoric and epistemic uncertainties
uncertain_alea = torch.zeros(input.size(0), out_dim, out_dim).to(input.device)
uncertain_epis = torch.zeros(input.size(0), out_dim, out_dim).to(input.device)
if eval_uncertain:
p = F.softmax(outputs, dim=-1) # N x B x C
# compute aleatoric uncertainty
p_diag = torch.diag_embed(p, offset=0, dim1=-2, dim2=-1) # N x B x C x C
p_cov = torch.matmul(p.unsqueeze(-1), p.unsqueeze(-1).permute(0, 1, 3, 2)) # N x B x C x C
uncertain_alea = torch.mean(p_diag - p_cov, dim=0) # B x C x C
# compute epistemic uncertainty
p_bar= torch.mean(p, dim=0) # B x C
p_diff_var = torch.matmul((p-p_bar).unsqueeze(-1), (p-p_bar).unsqueeze(-1).permute(0, 1, 3, 2)) # N x B x C x C
uncertain_epis = torch.mean(p_diff_var, dim=0) # B x C x C
output_dict = {'pred_mean': output,
'log_prior': log_prior,
'log_posterior': log_variational_posterior,
'aleatoric': uncertain_alea,
'epistemic': uncertain_epis}
return output_dict
class UString(nn.Module):
def __init__(self, x_dim, h_dim, z_dim, n_layers=1, n_obj=19, n_frames=100, fps=20.0, with_saa=True, uncertain_ranking=False):
super(UString, self).__init__()
self.x_dim = x_dim
self.h_dim = h_dim # 512 (-->256)
self.z_dim = z_dim # 256 (-->128)
self.n_layers = n_layers
self.n_obj = n_obj
self.n_frames = n_frames
self.fps = fps
self.with_saa = with_saa
self.uncertain_ranking = uncertain_ranking
self.phi_x = nn.Sequential(nn.Linear(x_dim, h_dim), nn.ReLU())
# GCN encoder
self.enc_gcn1 = GCNConv(h_dim + h_dim, h_dim)
self.enc_gcn2 = GCNConv(h_dim + h_dim, z_dim, act=lambda x: x)
# rnn layer
self.rnn = Graph_GRU_GCN(h_dim + h_dim + z_dim, h_dim, n_layers, bias=True)
# BNN decoder
self.predictor = BayesianPredictor(n_obj * z_dim, 2)
if self.with_saa:
# auxiliary branch
self.predictor_aux = AccidentPredictor(h_dim + h_dim, 2, dropout=[0.5, 0.0])
self.self_aggregation = SelfAttAggregate(self.n_frames)
# loss function
self.ce_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, x, y, toa, graph, hidden_in=None, edge_weights=None, npass=2, nbatch=80, testing=False, eval_uncertain=False):
"""
:param x, (batchsize, nFrames, nBoxes, Xdim) = (10 x 100 x 20 x 4096)
:param y, (10 x 2)
:param toa, (10,)
"""
losses = {'cross_entropy': 0,
'log_posterior': 0,
'log_prior': 0,
'total_loss': 0}
if self.with_saa:
losses.update({'auxloss': 0})
if self.uncertain_ranking:
losses.update({'ranking': 0})
Ut = torch.zeros(x.size(0)).to(x.device) # B
all_outputs, all_hidden = [], []
# import ipdb; ipdb.set_trace()
if hidden_in is None:
h = Variable(torch.zeros(self.n_layers, x.size(0), self.n_obj, self.h_dim)) # 1 x 10 x 19 x 256
else:
h = Variable(hidden_in)
h = h.to(x.device)
for t in range(x.size(1)):
# reduce the dim of node feature (FC layer)
x_t = self.phi_x(x[:, t]) # 10 x 20 x 256
img_embed = x_t[:, 0, :].unsqueeze(1).repeat(1, self.n_obj, 1).contiguous() # 10 x 1 x 256
obj_embed = x_t[:, 1:, :] # 10 x 19 x 256
x_t = torch.cat([obj_embed, img_embed], dim=-1) # 10 x 19 x 512
# GCN encoder
enc = self.enc_gcn1(x_t, graph[:, t], edge_weight=edge_weights[:, t]) # 10 x 19 x 256 (512-->256)
z_t = self.enc_gcn2(torch.cat([enc, h[-1]], -1), graph[:, t], edge_weight=edge_weights[:, t]) # 10 x 19 x 128 (512-->128)
# BNN decoder
embed = z_t.view(z_t.size(0), -1) # 10 x (19 x 128)
output_dict = self.predictor.sample_elbo(embed, npass=npass, testing=testing, eval_uncertain=eval_uncertain) # B x 2
dec_t = output_dict['pred_mean']
# recurrence
h = self.rnn(torch.cat([x_t, z_t], -1), graph[:, t], h, edge_weight=edge_weights[:, t]) # rnn latent (640)-->256
# computing losses
L1 = output_dict['log_posterior'] / nbatch
L2 = output_dict['log_prior'] / nbatch
L3 = self._exp_loss(dec_t, y, t, toa=toa, fps=self.fps)
losses['log_posterior'] += L1
losses['log_prior'] += L2
losses['cross_entropy'] += L3
# uncertainty ranking loss
if self.uncertain_ranking:
L5, Ut = self._uncertainty_ranking(output_dict, Ut)
losses['ranking'] += L5
all_outputs.append(output_dict)
all_hidden.append(h[-1])
if self.with_saa:
# soft attention to aggregate hidden states of all frames
embed_video = self.self_aggregation(torch.stack(all_hidden, dim=-1), 'avg')
dec = self.predictor_aux(embed_video)
L4 = torch.mean(self.ce_loss(dec, y[:, 1].to(torch.long)))
losses['auxloss'] = L4
return losses, all_outputs, all_hidden
def _exp_loss(self, pred, target, time, toa, fps=20.0):
'''
:param pred:
:param target: onehot codings for binary classification
:param time:
:param toa:
:param fps:
:return:
'''
# positive example (exp_loss)
target_cls = target[:, 1]
target_cls = target_cls.to(torch.long)
penalty = -torch.max(torch.zeros_like(toa).to(toa.device, pred.dtype), (toa.to(pred.dtype) - time - 1) / fps)
pos_loss = -torch.mul(torch.exp(penalty), -self.ce_loss(pred, target_cls))
# negative example
neg_loss = self.ce_loss(pred, target_cls)
loss = torch.mean(torch.add(torch.mul(pos_loss, target[:, 1]), torch.mul(neg_loss, target[:, 0])))
return loss
def _uncertainty_ranking(self, output_dict, Ut, eU_only=True):
"""
:param label: 10 x 2
:param output_dict:
"""
aleatoric = output_dict['aleatoric'] # B x 2 x 2
epistemic = output_dict['epistemic'] # B x 2 x 2
if eU_only:
# here we use the trace of matrix to quantify uncertainty
uncertainty = epistemic[:, 0, 0] + epistemic[:, 1, 1]
else:
uncertainty = aleatoric[:, 1, 1] + epistemic[:, 1, 1] # B
loss = torch.mean(torch.max(torch.zeros_like(Ut).to(Ut.device), uncertainty - Ut))
return loss, uncertainty
| 20,930 | 41.03012 | 145 | py |
Subsets and Splits