repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
GaitForeMer
|
GaitForeMer-main/training/transformer_model_fn.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implments the model function for the POTR model."""
import numpy as np
import os
import sys
import argparse
import json
import time
# from potr.data.Gait17JointsDataset import Gait17JointsDataset
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import classification_report
from numpyencoder import NumpyEncoder
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import training.seq2seq_model_fn as seq2seq_model_fn
import models.PoseTransformer as PoseTransformer
import models.PoseEncoderDecoder as PoseEncoderDecoder
import data.NTURGDDataset as NTURGDDataset
import data.GaitJointsDataset as GaitJointsDataset
import utils.utils as utils
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_WEIGHT_DECAY = 0.00001
_NSEEDS = 8
class POTRModelFn(seq2seq_model_fn.ModelFn):
def __init__(self,
params,
train_dataset_fn,
eval_dataset_fn,
pose_encoder_fn=None,
pose_decoder_fn=None):
super(POTRModelFn, self).__init__(
params, train_dataset_fn, eval_dataset_fn, pose_encoder_fn, pose_decoder_fn)
self._loss_fn = self.layerwise_loss_fn
self.task = params['task']
if self.task == 'downstream':
weights = torch.tensor([9., 28., 13., 4.])
weights = weights / weights.sum() # turn into percentage
weights = 1.0 / weights # inverse
weights = weights / weights.sum()
self._loss_weights = weights.to(_DEVICE)
self._weighted_ce_loss = nn.CrossEntropyLoss(weight=self._loss_weights)
print('Using a weighted CE loss for gait impairment score prediction.')
else:
print('Using a standard CE loss for activity prediction.')
def smooth_l1(self, decoder_pred, decoder_gt):
l1loss = nn.SmoothL1Loss(reduction='mean')
return l1loss(decoder_pred, decoder_gt)
def loss_l1(self, decoder_pred, decoder_gt):
return nn.L1Loss(reduction='mean')(decoder_pred, decoder_gt)
def loss_activity(self, logits, class_gt):
"""Computes entropy loss from logits between predictions and class."""
if self.task == 'downstream':
return self._weighted_ce_loss(logits, class_gt)
else:
return nn.functional.cross_entropy(logits, class_gt, reduction='mean')
def compute_class_loss(self, class_logits, class_gt):
"""Computes the class loss for each of the decoder layers predictions or memory."""
class_loss = 0.0
for l in range(len(class_logits)):
class_loss += self.loss_activity(class_logits[l], class_gt)
return class_loss/len(class_logits)
def select_loss_fn(self):
if self._params['loss_fn'] == 'mse':
return self.loss_mse
elif self._params['loss_fn'] == 'smoothl1':
return self.smooth_l1
elif self._params['loss_fn'] == 'l1':
return self.loss_l1
else:
raise ValueError('Unknown loss name {}.'.format(self._params['loss_fn']))
def layerwise_loss_fn(self, decoder_pred, decoder_gt, class_logits=None, class_gt=None):
"""Computes layerwise loss between predictions and ground truth."""
pose_loss = 0.0
loss_fn = self.select_loss_fn()
for l in range(len(decoder_pred)):
pose_loss += loss_fn(decoder_pred[l], decoder_gt)
pose_loss = pose_loss/len(decoder_pred)
if class_logits is not None:
return pose_loss, self.compute_class_loss(class_logits, class_gt)
return pose_loss, None
def init_model(self, pose_encoder_fn=None, pose_decoder_fn=None):
self._model = PoseTransformer.model_factory(
self._params,
pose_encoder_fn,
pose_decoder_fn
)
def select_optimizer(self):
optimizer = optim.AdamW(
self._model.parameters(), lr=self._params['learning_rate'],
betas=(0.9, 0.999),
weight_decay=_WEIGHT_DECAY
)
return optimizer
def dataset_factory(params, fold, model_prefix):
if params['dataset'] == 'ntu_rgbd':
return NTURGDDataset.dataset_factory(params)
elif params['dataset'] == 'pd_gait':
return GaitJointsDataset.dataset_factory(params, fold)
else:
raise ValueError('Unknown dataset {}'.format(params['dataset']))
def single_vote(pred):
"""
Get majority vote of predicted classes for the clips in one video.
:param preds: list of predicted class for each clip of one video
:return: majority vote of predicted class for one video
"""
p = np.array(pred)
counts = np.bincount(p)
max_count = 0
max_index = 0
for i in range(len(counts)):
if max_count < counts[i]:
max_index = i
max_count = counts[i]
return max_index
def save_json(filename, attributes, names):
"""
Save training parameters and evaluation results to json file.
:param filename: save filename
:param attributes: attributes to save
:param names: name of attributes to save in json file
"""
with open(filename, "w", encoding="utf8") as outfile:
d = {}
for i in range(len(attributes)):
name = names[i]
attribute = attributes[i]
d[name] = attribute
json.dump(d, outfile, indent=4, cls=NumpyEncoder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_prefix', type=str, default='')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--data_path', type=str)
parser.add_argument('--learning_rate', type=float, default=1e-5)
parser.add_argument('--max_epochs', type=int, default=500)
parser.add_argument('--steps_per_epoch', type=int, default=200)
parser.add_argument('--action', nargs='*', type=str, default=None)
parser.add_argument('--use_one_hot', action='store_true')
parser.add_argument('--init_fn', type=str, default='xavier_init')
parser.add_argument('--include_last_obs', action='store_true')
parser.add_argument('--task', type=str, default='downstream', choices=['pretext', 'downstream'])
parser.add_argument('--downstream_strategy', default='both_then_class', choices=['both', 'class', 'both_then_class'])
# pose transformers related parameters
parser.add_argument('--model_dim', type=int, default=256)
parser.add_argument('--num_encoder_layers', type=int, default=4)
parser.add_argument('--num_decoder_layers', type=int, default=4)
parser.add_argument('--num_heads', type=int, default=4)
parser.add_argument('--dim_ffn', type=int, default=2048)
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--source_seq_len', type=int, default=50)
parser.add_argument('--target_seq_len', type=int, default=25)
parser.add_argument('--max_gradient_norm', type=float, default=0.1)
parser.add_argument('--lr_step_size',type=int, default=400)
parser.add_argument('--learning_rate_fn',type=str, default='step')
parser.add_argument('--warmup_epochs', type=int, default=100)
parser.add_argument('--pose_format', type=str, default='rotmat')
parser.add_argument('--remove_low_std', action='store_true')
parser.add_argument('--remove_global_trans', action='store_true')
parser.add_argument('--loss_fn', type=str, default='l1')
parser.add_argument('--pad_decoder_inputs', action='store_true')
parser.add_argument('--pad_decoder_inputs_mean', action='store_true')
parser.add_argument('--use_wao_amass_joints', action='store_true')
parser.add_argument('--non_autoregressive', action='store_true')
parser.add_argument('--pre_normalization', action='store_true')
parser.add_argument('--use_query_embedding', action='store_true')
parser.add_argument('--predict_activity', action='store_true')
parser.add_argument('--use_memory', action='store_true')
parser.add_argument('--query_selection',action='store_true')
parser.add_argument('--activity_weight', type=float, default=1.0)
parser.add_argument('--pose_embedding_type', type=str, default='gcn_enc')
parser.add_argument('--encoder_ckpt', type=str, default=None)
parser.add_argument('--dataset', type=str, default='h36m_v2')
parser.add_argument('--skip_rate', type=int, default=5)
parser.add_argument('--eval_num_seeds', type=int, default=_NSEEDS)
parser.add_argument('--copy_method', type=str, default=None)
parser.add_argument('--finetuning_ckpt', type=str, default=None)
parser.add_argument('--pos_enc_alpha', type=float, default=10)
parser.add_argument('--pos_enc_beta', type=float, default=500)
args = parser.parse_args()
params = vars(args)
if params['task'] == 'downstream':
num_folds = 54
else:
num_folds = 1
total_preds = []
total_gts = []
preds_votes = []
preds_probs = []
all_folds = range(1, 55)
for fold in all_folds:
print(f'Fold {fold} out of {num_folds}')
utils.create_dir_tree(params['model_prefix']) # moving this up because dataset mean and std stored under it
train_dataset_fn, eval_dataset_fn = dataset_factory(params, fold, params['model_prefix'])
params['input_dim'] = train_dataset_fn.dataset._data_dim
params['pose_dim'] = train_dataset_fn.dataset._pose_dim
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
config_path = os.path.join(params['model_prefix'], 'config', 'config.json')
with open(config_path, 'w') as file_:
json.dump(params, file_, indent=4)
model_fn = POTRModelFn(
params, train_dataset_fn,
eval_dataset_fn,
pose_encoder_fn, pose_decoder_fn
)
if params['task'] == 'downstream':
predictions, gts, pred_probs = model_fn.train()
print('predicitons:', predictions)
# save predicted classes
preds_votes.append(predictions.tolist())
# save predicted probabilities
preds_probs.append(pred_probs.tolist())
# save final predictions and true labels
if np.shape(gts)[0] == 1: # only 1 clip
pred = int(predictions)
else:
pred = single_vote(predictions)
gt = gts[0]
total_preds.append(pred)
total_gts.append(int(gt))
del model_fn, pose_encoder_fn, pose_decoder_fn
attributes = [preds_votes, total_preds, preds_probs, total_gts]
names = ['predicted_classes', 'predicted_final_classes', 'prediction_list', 'true_labels']
jsonfilename = os.path.join(params['model_prefix'], 'results.json')
save_json(jsonfilename, attributes, names)
else:
model_fn.train()
if params['task'] == 'downstream':
print(classification_report(total_gts, total_preds))
| 11,607 | 37.059016 | 119 |
py
|
GaitForeMer
|
GaitForeMer-main/training/seq2seq_model_fn.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implements a model function estimator for training, evaluation and predict.
Take and adapted from the code presented in [4]
[1] https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-249404882
[2] https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/motionGenerationError.m#L40-L54
[3] https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-247769197
[4] https://arxiv.org/pdf/1705.02445.pdf
"""
import sys
import numpy as np
import json
import sys
import os
import argparse
import time
from abc import abstractmethod
import tqdm
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
# import wandb
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import utils.WarmUpScheduler as warm_up_scheduler
import visualize.viz as viz
import models.seq2seq_model as seq2seq_model
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# min threshold for mean average precision in metters
# Set to 10 cm
_MAP_TRESH = 0.10
class ModelFn(object):
"""Implements the model functionalities: training, evaliation and prediction."""
def __init__(
self,
params,
train_dataset_fn=None,
eval_dataset_fn=None,
pose_encoder_fn=None,
pose_decoder_fn=None):
"""Initialization of model function."""
self._params = params
self._train_dataset_fn = train_dataset_fn
self._eval_dataset_fn = eval_dataset_fn
self._visualize = False
thisname = self.__class__.__name__
# self._norm_stats = train_dataset_fn.dataset._norm_stats
self._norm_stats = None
self._ms_range = [80, 160, 320, 400, 560, 1000]
self.init_model(pose_encoder_fn, pose_decoder_fn)
self._loss_fn = self.loss_mse
self._model.to(_DEVICE)
self._optimizer_fn = self.select_optimizer()
self.select_lr_fn()
self.finetune_init()
self._lr_db_curve = []
lr_type = 'stepwise' if self._params['learning_rate_fn'] == 'beatles' \
else 'epochwise'
self._params['lr_schedule_type'] = lr_type
self.evaluate_fn = self.evaluate_nturgbd
self._writer = SummaryWriter(
os.path.join(self._params['model_prefix'], 'tf_logs'))
self._time_range_eval = []
m_params = filter(lambda p: p.requires_grad, self._model.parameters())
nparams = sum([np.prod(p.size()) for p in m_params])
#print arguments
# print('[INFO] ({}) This module has {} parameters!'.format(thisname, nparams))
# print('[INFO] ({}) Intializing ModelFn with params'.format(thisname))
# for k,v in self._params.items():
# print('[INFO] ({}) {}: {}'.format(thisname, k, v))
def finetune_init(self):
if self._params['finetuning_ckpt'] is not None:
print('[INFO] (finetune_model) Finetuning from:',
self._params['finetuning_ckpt'])
# edits made here to exclude activity prediction head
model_state_dict = torch.load(self._params['finetuning_ckpt'], map_location=_DEVICE)
if 'gait' in self._params['dataset']: # exclude prediction head
del model_state_dict['_action_head.0.weight']
del model_state_dict['_action_head.0.bias']
self._model.load_state_dict(model_state_dict, strict=False)
else:
self._model.load_state_dict(model_state_dict)
def select_lr_fn(self):
"""Calls the selection of learning rate function."""
self._lr_scheduler = self.get_lr_fn()
lr_fn = self._params['learning_rate_fn']
if self._params['warmup_epochs'] > 0 and lr_fn != 'beatles':
self._lr_scheduler = warm_up_scheduler.GradualWarmupScheduler(
self._optimizer_fn, multiplier=1,
total_epoch=self._params['warmup_epochs'],
after_scheduler=self._lr_scheduler
)
def get_lr_fn(self):
"""Creates the function to be used to generate the learning rate."""
if self._params['learning_rate_fn'] == 'step':
return torch.optim.lr_scheduler.StepLR(
self._optimizer_fn, step_size=self._params['lr_step_size'], gamma=0.1
)
elif self._params['learning_rate_fn'] == 'exponential':
return torch.optim.lr_scheduler.ExponentialLR(
self._optimizer_fn, gamma=0.95
)
elif self._params['learning_rate_fn'] == 'linear':
# sets learning rate by multipliying initial learning rate times a function
lr0, T = self._params['learning_rate'], self._params['max_epochs']
lrT = lr0*0.5
m = (lrT - 1) / T
lambda_fn = lambda epoch: m*epoch + 1.0
return torch.optim.lr_scheduler.LambdaLR(
self._optimizer_fn, lr_lambda=lambda_fn
)
elif self._params['learning_rate_fn'] == 'beatles':
# D^(-0.5)*min(i^(-0.5), i*warmup_steps^(-1.5))
D = float(self._params['model_dim'])
warmup = self._params['warmup_epochs']
lambda_fn = lambda e: (D**(-0.5))*min((e+1.0)**(-0.5), (e+1.0)*warmup**(-1.5))
return torch.optim.lr_scheduler.LambdaLR(
self._optimizer_fn, lr_lambda=lambda_fn
)
else:
raise ValueError('Unknown learning rate function: {}'.format(
self._params['learning_rate_fn']))
@abstractmethod
def init_model(self, pose_encoder_fn, pose_decoder_fn):
pass
@abstractmethod
def select_optimizer(self):
pass
def loss_mse(self, decoder_pred, decoder_gt):
"""Computes the L2 loss between predictions and ground truth."""
step_loss = (decoder_pred - decoder_gt)**2
step_loss = step_loss.mean()
return step_loss
@abstractmethod
def compute_loss(self, inputs=None, target=None, preds=None, class_logits=None, class_gt=None):
return self._loss_fn(preds, target, class_logits, class_gt)
def print_logs(self, step_loss, current_step, pose_loss, activity_loss, selection_loss):
selection_logs = ''
if self._params['query_selection']:
selection_logs = 'selection loss {:.4f}'.format(selection_loss)
if self._params['predict_activity']:
print("[INFO] global {:06d}; step {:04d}; pose_loss {:4f} - class_loss {:4f}; step_loss: {:.4f}; lr: {:.2e} {:s}".\
format(self._global_step, current_step, pose_loss, activity_loss,
step_loss, self._params['learning_rate'], selection_logs)
)
else:
print("[INFO] global {3:06d}; step {0:04d}; step_loss: {1:.4f}; lr: {2:.2e} {4:s}".\
format(current_step, step_loss, self._params['learning_rate'],
self._global_step, selection_logs)
)
def compute_selection_loss(self, inputs, target, cols_softmax=False):
"""Compute the query entry selection loss.
Args:
inputs: [batch_size, src_len, tgt_len]
target: [batch_size, src_len, tgt_len]
"""
axis_ = 2 if cols_softmax else 1
target = F.softmax(-target, dim=axis_)
return torch.nn.MSELoss(reduction='mean')(inputs, target)
def train_one_epoch(self, epoch):
"""Trains for a number of steps before evaluation."""
epoch_loss = 0
act_loss = 0
sel_loss = 0
N = len(self._train_dataset_fn)
for current_step, sample in enumerate(self._train_dataset_fn):
self._optimizer_fn.zero_grad()
for k in sample.keys():
if k == 'actions' or k == 'decoder_outputs_euler' or k=='action_str':
continue
sample[k] = sample[k].to(_DEVICE)
decoder_pred = self._model(
sample['encoder_inputs'], sample['decoder_inputs'])
selection_loss = 0
if self._params['query_selection']:
prob_mat = decoder_pred[-1][-1]
selection_loss = self.compute_selection_loss(
inputs=prob_mat,
target=sample['src_tgt_distance']
)
sel_loss += selection_loss
pred_class, gt_class = None, None
if self._params['predict_activity']:
gt_class = sample['action_ids'] # one label for the sequence
pred_class = decoder_pred[1]
pose_loss, activity_loss = self.compute_loss(
inputs=sample['encoder_inputs'],
target=sample['decoder_outputs'],
preds=decoder_pred[0],
class_logits=pred_class,
class_gt=gt_class
)
step_loss = pose_loss + selection_loss
if self._params['predict_activity']:
if self._params['task'] == 'pretext':
step_loss += self._params['activity_weight']*activity_loss
else:
if self._params['downstream_strategy'] == 'both':
step_loss += self._params['activity_weight']*activity_loss
elif self._params['downstream_strategy'] == 'class':
step_loss = activity_loss
elif self._params['downstream_strategy'] == 'both_then_class':
if epoch >= 50:
step_loss = activity_loss
else:
step_loss += self._params['activity_weight']*activity_loss
act_loss += activity_loss
epoch_loss += step_loss.item()
step_loss.backward()
if self._params['max_gradient_norm'] is not None:
torch.nn.utils.clip_grad_norm_(
self._model.parameters(), self._params['max_gradient_norm'])
self._optimizer_fn.step()
if current_step % 10 == 0:
step_loss = step_loss.cpu().data.numpy()
# self.print_logs(step_loss, current_step, pose_loss, activity_loss,
# selection_loss)
self.update_learning_rate(self._global_step, mode='stepwise')
self._global_step += 1
if self._params['query_selection']:
self._scalars['train_selectioin_loss'] = sel_loss/N
if self._params['predict_activity']:
return epoch_loss/N, act_loss/N
return epoch_loss/N
def train(self):
"""Main training loop."""
self._params['learning_rate'] = self._lr_scheduler.get_lr()[0]
self._global_step = 1
thisname = self.__class__.__name__
# wandb.init(name='training', project='GaitForeMer')
for e in range(self._params['max_epochs']):
self._scalars = {}
self._model.train()
start_time = time.time()
epoch_loss = self.train_one_epoch(e)
act_log = ''
if self._params['predict_activity']:
act_loss = epoch_loss[1]
epoch_loss = epoch_loss[0]
act_log = '; activity_loss: {}'.format(act_loss)
self._scalars['act_loss_train'] = act_loss
self._scalars['epoch_loss'] = epoch_loss
print("epoch {0:04d}; epoch_loss: {1:.4f}".format(e, epoch_loss)+act_log)
self.flush_extras(e, 'train')
_time = time.time() - start_time
self._model.eval()
eval_loss = self.evaluate_fn(e, _time)
act_log = ''
if self._params['predict_activity']:
self._scalars['act_loss_eval'] = eval_loss[1]
self._scalars['accuracy'] = eval_loss[2]
act_log = '; act_eval_loss {}; accuracy {}'.format(eval_loss[1], eval_loss[2])
eval_activity_loss = eval_loss[1]
eval_accuracy = eval_loss[2]
# eval_loss = eval_loss[0]
self._scalars['eval_loss'] = eval_loss[0]
print("[INFO] ({}) Epoch {:04d}; eval_loss: {:.4f}; lr: {:.2e}".format(
thisname, e, eval_loss[0], self._params['learning_rate'])+act_log)
self.write_summary(e)
# wandb_logs = {"train loss": epoch_loss, "train activity loss": act_loss, "eval loss": eval_loss, "eval activity loss": eval_activity_loss, "eval accuracy": eval_accuracy}
# wandb.log(wandb_logs)
model_path = os.path.join(
self._params['model_prefix'], 'models', 'ckpt_epoch_%04d.pt'%e)
if (e+1)%100 == 0:
torch.save(self._model.state_dict(), model_path)
self.update_learning_rate(e, mode='epochwise')
self.flush_extras(e, 'eval')
# return predictions and real ones
predictions = eval_loss[3]
gt = eval_loss[4]
pred_probs = eval_loss[5]
return predictions, gt, pred_probs
# save the last one
# model_path = os.path.join(
# self._params['model_prefix'], 'models', 'ckpt_epoch_%04d.pt'%e)
# torch.save(self._model.state_dict(). model_path)
# self.flush_curves()
def write_summary(self, epoch):
# for action_, ms_errors_ in ms_eval_loss.items():
self._writer.add_scalars(
'loss/recon_loss',
{'train':self._scalars['epoch_loss'], 'eval': self._scalars['eval_loss']},
epoch
)
# write scalars for H36M dataset prediction style
action_ = self._train_dataset_fn.dataset._monitor_action
if 'ms_eval_loss' in self._scalars.keys():
range_len = len(self._scalars['ms_eval_loss'][action_])
# range_len = len(self._ms_range)
ms_dict = {str(self._ms_range[i]): self._scalars['ms_eval_loss'][action_][i]
for i in range(range_len)}
ms_e = np.concatenate([np.array(v).reshape(1,range_len)
for k,v in self._scalars['ms_eval_loss'].items()], axis=0)
self._writer.add_scalars('ms_loss/eval-'+action_, ms_dict, epoch)
ms_e = np.mean(ms_e, axis=0) # (n_actions)
self._time_range_eval.append(np.expand_dims(ms_e, axis=0)) # (1, n_actions)
all_ms = {str(self._ms_range[i]): ms_e[i] for i in range(len(ms_e))}
self._writer.add_scalars('ms_loss/eval-all', all_ms, epoch)
self._writer.add_scalar('MSRE/msre_eval', self._scalars['msre'], epoch)
self._writer.add_scalars('time_range/eval',
{'short-term':np.mean(ms_e[:4]), 'long-term':np.mean(ms_e)}, epoch)
if self._params['predict_activity']:
self._writer.add_scalars(
'loss/class_loss',
{'train': self._scalars['act_loss_train'], 'eval': self._scalars['act_loss_eval']},
epoch
)
self._writer.add_scalar('class/accuracy', self._scalars['accuracy'], epoch)
if self._params['query_selection']:
self._writer.add_scalars(
'selection/query_selection',
{'eval': self._scalars['eval_selection_loss'],
'train': self._scalars['train_selectioin_loss']},
epoch
)
if 'mAP' in self._scalars.keys():
self._writer.add_scalar('mAP/mAP', self._scalars['mAP'], epoch)
if 'MPJPE' in self._scalars.keys():
self._writer.add_scalar('MPJPE/MPJPE', self._scalars['MPJPE'], epoch)
def print_range_summary(self, action, mean_mean_errors):
mean_eval_error = []
# Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms
print("{0: <16} |".format(action), end="")
for ms in [1,3,7,9,13,24]:
if self._params['target_seq_len'] >= ms + 1:
print(" {0:.3f} |".format(mean_mean_errors[ms]), end="")
mean_eval_error.append(mean_mean_errors[ms])
else:
print(" n/a |", end="")
print()
return mean_eval_error
def print_table_header(self):
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in [80, 160, 320, 400, 560, 1000]:
print(" {0:5d} |".format(ms), end="")
print()
def flush_curves(self):
path_ = os.path.join(self._params['model_prefix'], 'loss_info')
os.makedirs(path_, exist_ok=True)
path_ = os.path.join(path_, 'eval_time_range.npy')
np.save(path_, np.concatenate(self._time_range_eval, axis=0))
path_ = os.path.join(path_, 'lr_schedule.npy')
np.save(path_, np.array(self._lr_db_curve))
def update_learning_rate(self, epoch_step, mode='stepwise'):
"""Update learning rate handler updating only when the mode matches."""
if self._params['lr_schedule_type'] == mode:
self._lr_scheduler.step(epoch_step)
self._writer.add_scalar(
'learning_rate/lr', self._params['learning_rate'], epoch_step)
self._lr_db_curve.append([self._params['learning_rate'], epoch_step])
# self._params['learning_rate'] = self._lr_scheduler.get_last_lr()[0]
self._params['learning_rate'] = self._lr_scheduler.get_lr()[0]
@abstractmethod
def flush_extras(self, epoch, phase):
pass
def compute_class_accurracy_sequence(self, class_logits, class_gt):
# softmax on last dimension and get max on last dimension
class_pred = torch.argmax(class_logits.softmax(-1), -1)
accuracy = (class_pred == class_gt).float().sum()
accuracy = accuracy / class_logits.size()[0]
return accuracy.item()
def compute_class_accurracy_instance(self, class_logits, class_gt):
# softmax on last dimension and get max on last dimension
tar_seq_len = self._params['target_seq_len']
class_pred = torch.argmax(class_logits.softmax(-1), -1)
accuracy = (class_pred == class_gt).float().sum()
accuracy = accuracy / (class_logits.size()[0]*tar_seq_len)
return accuracy.item()
def validation_srnn_ms(self, sample, decoder_pred):
# the data was flatened from a sequence of size
# [n_actions, n_seeds, target_length, pose_size]
n_actions = len(self._params['action_subset'])
seq_shape = (n_actions, self._params['eval_num_seeds'],
self._params['target_seq_len'], self._params['pose_dim'])
srnn_gts_euler = sample['decoder_outputs_euler']
decoder_pred_ = decoder_pred.cpu().numpy()
decoder_pred_ = decoder_pred_.reshape(seq_shape)
do_remove = self._params['remove_low_std']
mean_eval_error_dict = {}
self.print_table_header()
eval_ms_mean = []
for ai, action in enumerate(sample['actions']):
action = action[0]
decoder_pred = decoder_pred_[ai, :, :, :]
if self._params['dataset'] == 'h36m':
# seq_len x n_seeds x pose_dim
decoder_pred = decoder_pred.transpose([1, 0, 2])
# a list or a vector of length n_seeds
# each entry of: shape seq_len x complete_pose_dim (H36M == 99)
srnn_pred_euler = self._eval_dataset_fn.dataset.post_process_to_euler(decoder_pred)
# n_seeds x seq_len
mean_errors = np.zeros((self._params['eval_num_seeds'],
self._params['target_seq_len']))
# Training is done in exponential map or rotation matrix or quaternion
# but the error is reported in Euler angles, as in previous work [3,4,5]
for i in np.arange(self._params['eval_num_seeds']):
# seq_len x complete_pose_dim (H36M==99)
eulerchannels_pred = srnn_pred_euler[i]
# n_seeds x seq_len x complete_pose_dim (H36M==96)
action_gt = srnn_gts_euler[action]
# seq_len x complete_pose_dim (H36M==96)
gt_i = np.copy(action_gt.squeeze()[i].numpy())
# Only remove global rotation. Global translation was removed before
gt_i[:, 0:3] = 0
# here [2,4,5] remove data based on the std of the batch THIS IS WEIRD!
# (seq_len, 96) - (seq_len, 96)
idx_to_use = np.where(np.std(gt_i, 0) > 1e-4)[0]
euc_error = np.power(gt_i[:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
# shape: seq_len
euc_error = np.sum(euc_error, 1)
# shape: seq_len
euc_error = np.sqrt(euc_error)
mean_errors[i,:] = euc_error
# This is simply the mean error over the eval_num_seeds examples
# with shape [eval_num_seeds]
mean_mean_errors = np.mean(mean_errors, 0)
mean_eval_error_dict[action] = self.print_range_summary(action, mean_mean_errors)
return mean_eval_error_dict
@torch.no_grad()
def evaluate_nturgbd(self, current_step, dummy_entry=None):
eval_loss = 0.0
mAP_all = 0.0
class_loss = 0.0
mean_accuracy = 0.0
N = len(self._eval_dataset_fn)
gt_class_ = []
pred_class_ = []
num_joints = self._params['pose_dim'] // 3
TP = np.zeros((num_joints,))
FN = np.zeros((num_joints,))
MPJPE = np.zeros((num_joints,))
for (i, sample) in tqdm.tqdm(enumerate(self._eval_dataset_fn)):
for k in sample.keys():
if k=='action_str':
continue
sample[k] = sample[k].to(_DEVICE)
decoder_pred = self._model(
sample['encoder_inputs'], sample['decoder_inputs'])
pred_class, gt_class = None, None
if self._params['predict_activity']:
gt_class = sample['action_ids'] # one label for the sequence
pred_class = decoder_pred[1]
decoder_pred = decoder_pred[0]
gt_class_.append(gt_class.item())
pred_class_.append(pred_class[-1].cpu().numpy())
pose_loss, activity_loss = self.compute_loss(
inputs=sample['encoder_inputs'],
target=sample['decoder_outputs'],
preds=decoder_pred,
class_logits=pred_class,
class_gt=gt_class
)
# Can save predicted outputs for visualization here
# if i == 2:
# predicted_pose = decoder_pred[-1].squeeze().reshape(20, 17, 3).cpu().numpy()
# input_pose = sample['encoder_inputs'].squeeze().reshape(39, 17, 3).cpu().numpy()
# gt_pose = sample['decoder_outputs'].squeeze().reshape(20, 17, 3).cpu().numpy()
# np.save('output_poses/v37_pred.npy', predicted_pose)
# np.save('output_poses/v37_gt.npy', gt_pose)
# np.save('output_poses/v37_input.npy', input_pose)
# # break
eval_loss+= pose_loss
eval_loss /= N
if self._params['predict_activity']:
class_loss /= N
pred_class_ = torch.squeeze(torch.from_numpy(np.stack(pred_class_)))
gt_class_ = torch.from_numpy(np.array(gt_class_))
# print(pred_class_.size(), gt_class_.size())
accuracy = self.compute_class_accurracy_sequence(pred_class_, gt_class_)
return (eval_loss, class_loss, accuracy, torch.argmax(pred_class_.softmax(-1), -1), gt_class_, pred_class_.softmax(-1))
# return (eval_loss, class_loss, accuracy, torch.argmax(pred_class_.softmax(-1), -1), gt_class_)
# return (eval_loss, class_loss, accuracy)
return eval_loss
def compute_mean_average_precision(self, prediction, target):
pred = np.squeeze(prediction)
tgt = np.squeeze(target)
T, D = pred.shape
pred = self._eval_dataset_fn.dataset.unormalize_sequence(pred)
tgt = self._eval_dataset_fn.dataset.unormalize_sequence(tgt)
# num_frames x num_joints x 3
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
# compute the norm for the last axis: (x,y,z) coordinates
# num_frames x num_joints
TP = np.linalg.norm(pred-tgt, axis=-1) <= _MAP_TRESH
TP = TP.astype(int)
FN = np.logical_not(TP).astype(int)
# num_joints
TP = np.sum(TP, axis=0)
FN = np.sum(FN, axis=0)
# compute recall for each joint
recall = TP / (TP+FN)
# average over joints
mAP = np.mean(recall)
return mAP, TP, FN
def compute_MPJPE(self, prediction, target):
pred = np.squeeze(prediction)
tgt = np.squeeze(target)
T, D = pred.shape
pred = self._eval_dataset_fn.dataset.unormalize_sequence(pred)
tgt = self._eval_dataset_fn.dataset.unormalize_sequence(tgt)
# num_frames x num_joints x 3
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
# compute the norm for the last axis: (x,y,z) coordinates
# num_frames x num_joints
norm = np.linalg.norm(pred-tgt, axis=-1)
# num_joints
MPJPE = np.mean(norm, axis=0)
return MPJPE
| 24,065 | 36.024615 | 179 |
py
|
GaitForeMer
|
GaitForeMer-main/training/pose_classifier_fn.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
[1] https://arxiv.org/abs/1312.6114
"""
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
import numpy as np
import os
import sys
import argparse
import tqdm
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import models.PoseActionClassifier as ActionClass
import data.H36MDatasetPose as H36MDataset
import utils.utils as utils
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class PoseActionFn(object):
def __init__(self, params, train_dataset, val_dataset=None):
self._params = params
self._train_dataset = train_dataset
self._val_dataset = val_dataset
self._writer = SummaryWriter(
os.path.join(self._params['model_prefix'], 'tf_logs'))
self.load_constants()
self.init_model()
self.select_optimizer()
thisname = self.__class__.__name__
self._lr_scheduler = utils.get_lr_fn(self._params, self._optimizer_fn)
for k, v in self._params.items():
print('[INFO] ({}) {}: {}'.format(thisname, k, v))
def load_constants(self):
self._params['use_one_hot'] = False
self._params['parent'], self._params['offset'], \
self._params['rot_ind'], self._params['exp_map_ind'] = \
utils.load_constants(self._params['data_path'])
def init_model(self):
self._model = ActionClass.ActionClassifier(
dim=self._params['model_dim'],
n_classes=len(self._params['action_subset'])
)
self._model.to(_DEVICE)
n_params = filter(lambda p: p.requires_grad, self._model.parameters())
n_params = sum([np.prod(p.size()) for p in n_params])
print('++++++++ Total Parameters:', n_params)
def select_optimizer(self):
self._optimizer_fn = optim.Adam(
self._model.parameters(),
lr=self._params['learning_rate']
)
def compute_accuracy(self, class_logits, class_gt):
class_pred = torch.argmax(class_logits.softmax(-1), -1)
accuracy = (class_pred == class_gt).float().sum()
accuracy = accuracy / (class_logits.size()[0])
return accuracy
def forward_model(self, sample):
pose_gt = sample['pose'].to(_DEVICE)
class_gt = sample['action'].to(_DEVICE)
class_logits = self._model(pose_gt)
loss = nn.functional.cross_entropy(class_logits, class_gt, reduction='mean')
accuracy = self.compute_accuracy(class_logits, class_gt)
return loss, accuracy
def train_one_epoch(self, epoch):
epoch_loss, epoch_accuracy = 0, 0
N = len(self._train_dataset)
self._model.train()
for i, sample in enumerate(self._train_dataset):
self._optimizer_fn.zero_grad()
loss, accuracy = self.forward_model(sample)
if i%1000 == 0:
print('[INFO] epoch: {:04d}; it: {:04d} loss: {:.4f}; acc: {:.4f}'.format(
epoch, i, loss, accuracy))
loss.backward()
self._optimizer_fn.step()
epoch_loss += loss
epoch_accuracy += accuracy
return epoch_loss/N, epoch_accuracy/N
@torch.no_grad()
def validation(self, epoch):
epoch_loss, epoch_accuracy = 0, 0
N = len(self._val_dataset)
self._model.eval()
for i, sample in tqdm.tqdm(enumerate(self._val_dataset)):
loss, accuracy = self.forward_model(sample)
epoch_loss += loss
epoch_accuracy += accuracy
return epoch_loss/N, epoch_accuracy/N
def train(self):
thisname = self.__class__.__name__
self._params['learning_rate'] = self._lr_scheduler.get_last_lr()[0]
for e in range(self._params['max_epochs']):
self._model.train()
epoch_loss, epoch_accuracy = self.train_one_epoch(e)
val_loss, val_accuracy = self.validation(e)
# save models
model_path = os.path.join(
self._params['model_prefix'], 'models', 'ckpt_epoch_%04d.pt'%e)
torch.save(self._model.state_dict(), model_path)
# verbose and write the scalars
print('[INFO] Epoch: {:04d}; epoch_loss: {:.4f}; epoch_accuracy: {:.4f}; val_loss: {:.4f}; val_accuracy: {:.4f}; lr: {:2.2e}'.format(
e, epoch_loss, epoch_accuracy, val_loss, val_accuracy, self._params['learning_rate']))
self._writer.add_scalars(
'loss/loss', {'train': epoch_loss, 'val': val_loss}, e)
self._writer.add_scalars(
'accurracy/accurracy', {'train': epoch_accuracy, 'val': val_accuracy}, e)
self._writer.add_scalar(
'learning_rate/lr', self._params['learning_rate'], e)
self._lr_scheduler.step(e)
self._params['learning_rate'] = self._lr_scheduler.get_last_lr()[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--action', type=str, nargs='*', default=None)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--pose_format', type=str, default='expmap')
parser.add_argument('--remove_low_std', action='store_true')
parser.add_argument('--model_dim', type=int, default=128)
parser.add_argument('--max_epochs', type=int, default=500)
parser.add_argument('--model_prefix', type=str, default=None)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--learning_rate_fn', type=str, default='linear')
args = parser.parse_args()
params = vars(args)
if 'all' in args.action:
args.action = H36MDataset._ALL_ACTIONS
params['action_subset'] = args.action
dataset_t = H36MDataset.H36MDataset(params, mode='train')
dataset_v = H36MDataset.H36MDataset(
params, mode='eval', norm_stats=dataset_t._norm_stats)
train_dataset_fn= torch.utils.data.DataLoader(
dataset_t,
batch_size=params['batch_size'],
shuffle=True,
num_workers=4,
collate_fn=H36MDataset.collate_fn,
drop_last=True
)
val_dataset_fn = torch.utils.data.DataLoader(
dataset_v,
batch_size=1,
shuffle=True,
num_workers=1,
collate_fn=H36MDataset.collate_fn,
drop_last=True
)
params['input_dim'] = train_dataset_fn.dataset._data_dim
params['pose_dim'] = train_dataset_fn.dataset._pose_dim
vae_trainer = PoseActionFn(params, train_dataset_fn, val_dataset_fn)
vae_trainer.train()
| 7,534 | 34.21028 | 139 |
py
|
GaitForeMer
|
GaitForeMer-main/models/Conv1DEncoder.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Model of 1D convolutions for encoding pose sequences."""
import numpy as np
import os
import sys
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import torch
import torch.nn as nn
class Pose1DEncoder(nn.Module):
def __init__(self, input_channels=3, output_channels=128, n_joints=21):
super(Pose1DEncoder, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self._n_joints = n_joints
self.init_model()
def init_model(self):
self._model = nn.Sequential(
nn.Conv1d(in_channels=self._input_channels, out_channels=32, kernel_size=7),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=self._output_channels, kernel_size=3),
nn.BatchNorm1d(self._output_channels),
nn.ReLU(True),
nn.Conv1d(in_channels=self._output_channels, out_channels=self._output_channels, kernel_size=3)
)
def forward(self, x):
"""
Args:
x: [batch_size, seq_len, skeleton_dim].
"""
# inputs to model is [batch_size, channels, n_joints]
# transform the batch to [batch_size*seq_len, dof, n_joints]
bs, seq_len, dim = x.size()
dof = dim//self._n_joints
x = x.view(bs*seq_len, dof, self._n_joints)
# [batch_size*seq_len, dof, n_joints]
x = self._model(x)
# [batch_size, seq_len, output_channels]
x = x.view(bs, seq_len, self._output_channels)
return x
class Pose1DTemporalEncoder(nn.Module):
def __init__(self, input_channels, output_channels):
super(Pose1DTemporalEncoder, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self.init_model()
def init_model(self):
self._model = nn.Sequential(
nn.Conv1d(
in_channels=self._input_channels, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=self._output_channels, kernel_size=3, padding=1),
nn.BatchNorm1d(self._output_channels),
nn.ReLU(True),
nn.Conv1d(in_channels=self._output_channels, out_channels=self._output_channels, kernel_size=3, padding=1)
)
def forward(self, x):
"""
Args:
x: [batch_size, seq_len, skeleton_dim].
"""
# batch_size, skeleton_dim, seq_len
x = torch.transpose(x, 1,2)
x = self._model(x)
# batch_size, seq_len, skeleton_dim
x = torch.transpose(x, 1, 2)
return x
if __name__ == '__main__':
dof = 9
output_channels = 128
n_joints = 21
seq_len = 49
model = Pose1DTemporalEncoder(input_channels=dof*n_joints, output_channels=output_channels)
inputs = torch.FloatTensor(10, seq_len, dof*n_joints)
X = model(inputs)
print(X.size())
# model = Pose1DEncoder(input_channels=dof, output_channels=output_channels)
# inputs = torch.FloatTensor(10, seq_len, dof*n_joints)
# X = model(inputs)
# print(X.size())
| 5,262 | 32.954839 | 114 |
py
|
GaitForeMer
|
GaitForeMer-main/models/Transformer.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of the Transformer for sequence-to-sequence decoding.
Implementation of the transformer for sequence to sequence prediction as in
[1] and [2].
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import os
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import utils.PositionEncodings as PositionEncodings
import models.TransformerEncoder as Encoder
import models.TransformerDecoder as Decoder
class Transformer(nn.Module):
def __init__(self,
num_encoder_layers=6,
num_decoder_layers=6,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
use_query_embedding=False,
pre_normalization=False,
query_selection=False,
target_seq_len=25):
"""Implements the Transformer model for sequence-to-sequence modeling."""
super(Transformer, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._use_query_embedding = use_query_embedding
self._query_selection = query_selection
self._tgt_seq_len = target_seq_len
self._encoder = Encoder.TransformerEncoder(
num_layers=num_encoder_layers,
model_dim=model_dim,
num_heads=num_heads,
dim_ffn=dim_ffn,
dropout=dropout,
init_fn=init_fn,
pre_normalization=pre_normalization
)
self._decoder = Decoder.TransformerDecoder(
num_layers=num_decoder_layers,
model_dim=model_dim,
num_heads=num_heads,
dim_ffn=dim_ffn,
dropout=dropout,
init_fn=init_fn,
use_query_embedding=use_query_embedding,
pre_normalization=pre_normalization
)
if self._query_selection:
self._position_predictor = nn.Linear(self._model_dim, self._tgt_seq_len)
def process_index_selection(self, self_attn, one_to_one_selection=False):
"""Selection of query elments using position predictor from encoder memory.
After prediction a maximum assignement problem is solved to get indices for
each element in the query sequence.
Args:
self_attn: Encoder memory with shape [src_len, batch_size, model_dim]
Returns:
A tuple with two list of i and j matrix entries of m
"""
batch_size = self_attn.size()[1]
# batch_size x src_seq_len x model_dim
in_pos = torch.transpose(self_attn, 0, 1)
# predict the matrix of similitudes
# batch_size x src_seq_len x tgt_seq_len
prob_matrix = self._position_predictor(in_pos)
# apply softmax on the similutes to get probabilities on positions
# batch_size x src_seq_len x tgt_seq_len
if one_to_one_selection:
soft_matrix = F.softmax(prob_matrix, dim=2)
# predict assignments in a one to one fashion maximizing the sum of probs
indices = [linear_sum_assignment(soft_matrix[i].cpu().detach(), maximize=True)
for i in range(batch_size)
]
else:
# perform softmax by rows to have many targets to one input assignements
soft_matrix = F.softmax(prob_matrix, dim=1)
indices_rows = torch.argmax(soft_matrix, 1)
indices = [(indices_rows[i], list(range(prob_matrix.size()[2])))
for i in range(batch_size)
]
return indices, soft_matrix
def forward(self,
source_seq,
target_seq,
encoder_position_encodings=None,
decoder_position_encodings=None,
query_embedding=None,
mask_target_padding=None,
mask_look_ahead=None,
get_attn_weights=False,
query_selection_fn=None,
fold=None,
eval_step=None):
if self._use_query_embedding:
bs = source_seq.size()[1]
query_embedding = query_embedding.unsqueeze(1).repeat(1, bs, 1)
decoder_position_encodings = encoder_position_encodings
memory, enc_weights = self._encoder(source_seq, encoder_position_encodings)
# Save encoder outputs
# if fold is not None:
# encoder_output_dir = 'encoder_outputs'
# if not os.path.exists(f'{encoder_output_dir}f{fold}/'):
# os.makedirs(f'{encoder_output_dir}f{fold}/')
# outpath = f'{encoder_output_dir}f{fold}/{eval_step}.npy'
# encoder_output = memory.detach().cpu().numpy()
# np.save(outpath, encoder_output)
tgt_plain = None
# perform selection from input sequence
if self._query_selection:
indices, prob_matrix = self.process_index_selection(memory)
tgt_plain, target_seq = query_selection_fn(indices)
out_attn, out_weights = self._decoder(
target_seq,
memory,
decoder_position_encodings,
query_embedding=query_embedding,
mask_target_padding=mask_target_padding,
mask_look_ahead=mask_look_ahead,
get_attn_weights=get_attn_weights
)
out_weights_ = None
enc_weights_ = None
prob_matrix_ = None
if get_attn_weights:
out_weights_, enc_weights_ = out_weights, enc_weights
if self._query_selection:
prob_matrix_ = prob_matrix
return out_attn, memory, out_weights_, enc_weights_, (tgt_plain, prob_matrix_)
| 6,581 | 33.103627 | 85 |
py
|
GaitForeMer
|
GaitForeMer-main/models/PoseTransformer.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of the Transformer for sequence-to-sequence decoding.
Implementation of the transformer for sequence to sequence prediction as in
[1] and [2].
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import os
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import utils.PositionEncodings as PositionEncodings
import models.TransformerEncoder as Encoder
import models.TransformerDecoder as Decoder
from models.Transformer import Transformer
_SOURCE_LENGTH = 110
_TARGET_LENGTH = 55
_POSE_DIM = 54
_PAD_LENGTH = _SOURCE_LENGTH
class PoseTransformer(nn.Module):
"""Implements the sequence-to-sequence Transformer .model for pose prediction."""
def __init__(self,
pose_dim=_POSE_DIM,
model_dim=256,
num_encoder_layers=6,
num_decoder_layers=6,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
target_seq_length=_TARGET_LENGTH,
source_seq_length=_SOURCE_LENGTH,
input_dim=None,
init_fn=utils.xavier_init_,
non_autoregressive=False,
use_query_embedding=False,
pre_normalization=False,
predict_activity=False,
use_memory=False,
num_activities=None,
pose_embedding=None,
pose_decoder=None,
copy_method='uniform_scan',
query_selection=False,
pos_encoding_params=(10000, 1)):
"""Initialization of pose transformers."""
super(PoseTransformer, self).__init__()
self._target_seq_length = target_seq_length
self._source_seq_length = source_seq_length
self._pose_dim = pose_dim
self._input_dim = pose_dim if input_dim is None else input_dim
self._model_dim = model_dim
self._use_query_embedding = use_query_embedding
self._predict_activity = predict_activity
self._num_activities = num_activities
self._num_decoder_layers = num_decoder_layers
self._mlp_dim = model_dim
self._non_autoregressive = non_autoregressive
self._pose_embedding = pose_embedding
self._pose_decoder = pose_decoder
self._query_selection = query_selection
thisname = self.__class__.__name__
self._copy_method = copy_method
self._pos_encoding_params = pos_encoding_params
self._transformer = Transformer(
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
model_dim=model_dim,
num_heads=num_heads,
dim_ffn=dim_ffn,
dropout=dropout,
init_fn=init_fn,
use_query_embedding=use_query_embedding,
pre_normalization=pre_normalization,
query_selection=query_selection,
target_seq_len=target_seq_length
)
t_params = filter(lambda p: p.requires_grad, self._transformer.parameters())
nparams = sum([np.prod(p.size()) for p in t_params])
print('[INFO] ({}) Transformer has {} parameters!'.format(thisname, nparams))
self._pos_encoder = PositionEncodings.PositionEncodings1D(
num_pos_feats=self._model_dim,
temperature=self._pos_encoding_params[0],
alpha=self._pos_encoding_params[1]
)
self._pos_decoder = PositionEncodings.PositionEncodings1D(
num_pos_feats=self._model_dim,
temperature=self._pos_encoding_params[0],
alpha=self._pos_encoding_params[1]
)
# self.init_pose_encoder_decoders(init_fn)
self._use_class_token = False
self.init_position_encodings()
self.init_query_embedding()
if self._use_class_token:
self.init_class_token()
if self._predict_activity:
self._action_head_size = self._model_dim if self._use_class_token \
else self._model_dim*(self._source_seq_length-1)
self._action_head = nn.Sequential(
nn.Linear(self._action_head_size, self._num_activities),
)
def init_query_embedding(self):
"""Initialization of query sequence embedding."""
self._query_embed = nn.Embedding(self._target_seq_length, self._model_dim)
print('[INFO] ({}) Init query embedding!'.format(self.__class__.__name__))
nn.init.xavier_uniform_(self._query_embed.weight.data)
# self._query_embed.weight.data.normal_(0.0, 0.004)
def init_class_token(self):
token = torch.FloatTensor(1, self._model_dim)
print('[INFO] ({}) Init class token!'.format(self.__class__.__name__))
self._class_token = nn.Parameter(token, requires_grad=True)
nn.init.xavier_uniform_(self._class_token.data)
def init_position_encodings(self):
src_len = self._source_seq_length-1
# when using a token we need an extra element in the sequence
if self._use_class_token:
src_len = src_len + 1
encoder_pos_encodings = self._pos_encoder(src_len).view(
src_len, 1, self._model_dim)
decoder_pos_encodings = self._pos_decoder(self._target_seq_length).view(
self._target_seq_length, 1, self._model_dim)
mask_look_ahead = torch.from_numpy(
utils.create_look_ahead_mask(
self._target_seq_length, self._non_autoregressive))
self._encoder_pos_encodings = nn.Parameter(
encoder_pos_encodings, requires_grad=False)
self._decoder_pos_encodings = nn.Parameter(
decoder_pos_encodings, requires_grad=False)
self._mask_look_ahead = nn.Parameter(
mask_look_ahead, requires_grad=False)
def forward(self,
input_pose_seq,
target_pose_seq=None,
mask_target_padding=None,
get_attn_weights=False,
fold=None,
eval_step=None):
"""Performs the forward pass of the pose transformers.
Args:
input_pose_seq: Shape [batch_size, src_sequence_length, dim_pose].
target_pose_seq: Shape [batch_size, tgt_sequence_length, dim_pose].
Returns:
A tensor of the predicted sequence with shape [batch_size,
tgt_sequence_length, dim_pose].
"""
if self.training:
return self.forward_training(
input_pose_seq, target_pose_seq, mask_target_padding, get_attn_weights) # no fold here, only want to save eval
# eval forward for non auto regressive type of model
if self._non_autoregressive:
return self.forward_training(
input_pose_seq, target_pose_seq, mask_target_padding, get_attn_weights, fold=fold, eval_step=eval_step)
return self.forward_autoregressive(
input_pose_seq, target_pose_seq, mask_target_padding, get_attn_weights)
def handle_class_token(self, input_pose_seq):
"""
Args:
input_pose_seq: [src_len, batch_size, model_dim]
"""
# concatenate extra token for activity prediction as an extra
# element of the input sequence
# specialized token is not a skeleton
_, B, _ = input_pose_seq.size()
token = self._class_token.squeeze().repeat(1, B, 1)
input_pose_seq = torch.cat([token, input_pose_seq], axis=0)
return input_pose_seq
def handle_copy_query(self, indices, input_pose_seq_):
"""Handles the way queries are generated copying items from the inputs.
Args:
indices: A list of tuples len `batch_size`. Each tuple contains has the
form (input_list, target_list) where input_list contains indices of
elements in the input to be copy to elements in the target specified by
target_list.
input_pose_seq_: Source skeleton sequence [batch_size, src_len, pose_dim].
Returns:
A tuple with first elements the decoder input skeletons with shape
[tgt_len, batch_size, skeleton_dim], and the skeleton embeddings of the
input sequence with shape [tgt_len, batch_size, pose_dim].
"""
batch_size = input_pose_seq_.size()[0]
decoder_inputs = torch.FloatTensor(
batch_size,
self._target_seq_length,
self._pose_dim
).to(self._decoder_pos_encodings.device)
for i in range(batch_size):
for j in range(self._target_seq_length):
src_idx, tgt_idx = indices[i][0][j], indices[i][1][j]
decoder_inputs[i, tgt_idx] = input_pose_seq_[i, src_idx]
dec_inputs_encode = self._pose_embedding(decoder_inputs)
return torch.transpose(decoder_inputs, 0, 1), \
torch.transpose(dec_inputs_encode, 0, 1)
def forward_training(self,
input_pose_seq_,
target_pose_seq_,
mask_target_padding,
get_attn_weights=False,
fold=None,
eval_step=None):
"""Compute forward pass for training and non recursive inference.
Args:
input_pose_seq_: Source sequence [batch_size, src_len, skeleton_dim].
target_pose_seq_: Query target sequence [batch_size, tgt_len, skeleton_dim].
mask_target_padding: Mask for target masking with ones where elements
belong to the padding elements of shape [batch_size, tgt_len, skeleton_dim].
get_attn_weights: Boolean to indicate if attention weights should be returned.
Returns:
"""
# 1) Encode the sequence with given pose encoder
# [batch_size, sequence_length, model_dim]
input_pose_seq = input_pose_seq_
target_pose_seq = target_pose_seq_
if self._pose_embedding is not None:
input_pose_seq = self._pose_embedding(input_pose_seq)
target_pose_seq = self._pose_embedding(target_pose_seq)
# 2) compute the look-ahead mask and the positional encodings
# [sequence_length, batch_size, model_dim]
input_pose_seq = torch.transpose(input_pose_seq, 0, 1)
target_pose_seq = torch.transpose(target_pose_seq, 0, 1)
def query_copy_fn(indices):
return self.handle_copy_query(indices, input_pose_seq_)
# concatenate extra token for activity prediction as an extr element of the
# input sequence, i.e. specialized token is not a skeleton
if self._use_class_token:
input_pose_seq = self.handle_class_token(input_pose_seq)
# 3) compute the attention weights using the transformer
# [target_sequence_length, batch_size, model_dim]
attn_output, memory, attn_weights, enc_weights, mat = self._transformer(
input_pose_seq,
target_pose_seq,
query_embedding=self._query_embed.weight,
encoder_position_encodings=self._encoder_pos_encodings,
decoder_position_encodings=self._decoder_pos_encodings,
mask_look_ahead=self._mask_look_ahead,
mask_target_padding=mask_target_padding,
get_attn_weights=get_attn_weights,
query_selection_fn=query_copy_fn,
fold=fold,
eval_step=eval_step
)
end = self._input_dim if self._input_dim == self._pose_dim else self._pose_dim
out_sequence = []
target_pose_seq_ = mat[0] if self._query_selection else \
torch.transpose(target_pose_seq_, 0, 1)
# 4) decode sequence with pose decoder. The decoding process is time
# independent. It means non-autoregressive or parallel decoding.
# [batch_size, target_sequence_length, pose_dim]
for l in range(self._num_decoder_layers):
# [target_seq_length*batch_size, pose_dim]
out_sequence_ = self._pose_decoder(
attn_output[l].view(-1, self._model_dim))
# [target_seq_length, batch_size, pose_dim]
out_sequence_ = out_sequence_.view(
self._target_seq_length, -1, self._pose_dim)
# apply residual connection between target query and predicted pose
# [tgt_seq_len, batch_size, pose_dim]
out_sequence_ = out_sequence_ + target_pose_seq_[:, :, 0:end]
# [batch_size, tgt_seq_len, pose_dim]
out_sequence_ = torch.transpose(out_sequence_, 0, 1)
out_sequence.append(out_sequence_)
if self._predict_activity:
out_class = self.predict_activity(attn_output, memory)
return out_sequence, out_class, attn_weights, enc_weights, mat
return out_sequence, attn_weights, enc_weights, mat
def predict_activity(self, attn_output, memory):
"""Performs activity prediction either from memory or class token.
attn_output: Encoder memory. Shape [src_seq_len, batch_size, model_dim].
"""
# [batch_size, src_len, model_dim]
in_act = torch.transpose(memory, 0, 1)
# use a single specialized token for predicting activity
# the specialized token is in the first element of the sequence
if self._use_class_token:
# [batch_size, model_dim]
token = in_act[:, 0]
actions = self._action_head(token)
return [actions]
# use all the input sequence attention to predict activity
# [batch_size, src_len*model_dim]
in_act = torch.reshape(in_act, (-1, self._action_head_size))
actions = self._action_head(in_act)
return [actions]
#out_class = []
#for l in range(self._num_decoder_layers):
# in_act = torch.transpose(attn_output[l], 0, 1)
# in_act = torch.reshape(in_act, (-1, self._action_head_size))
# actions = self._action_head(in_act)
# out_class.append(actions)
#return out_class
def forward_autoregressive(self,
input_pose_seq,
target_pose_seq=None,
mask_target_padding=None,
get_attn_weights=False):
"""Compute forward pass for auto-regressive inferece in test time."""
thisdevice = self._encoder_pos_encodings.device
# the first query pose is the first in the target
prev_target = target_pose_seq[:, 0, :]
# 1) Enconde using the pose embeding
if self._pose_embedding is not None:
input_pose_seq = self._pose_embedding(input_pose_seq)
target_pose_seq = self._pose_embedding(target_pose_seq)
# [batch_size, 1, model_dim]
target_seq = target_pose_seq[:, 0:1, :]
# 2) compute the look-ahead mask and the positional encodings
# [sequence_length, batch_size, model_dim]
input_pose_seq = torch.transpose(input_pose_seq, 0, 1)
target_seq = torch.transpose(target_seq, 0, 1)
# concatenate extra token for activity prediction as an extra
if self._use_class_token:
input_pose_seq = self.handle_class_token(input_pose_seq)
# 3) use auto recursion to compute the predicted set of tokens
memory, enc_attn_weights = self._transformer._encoder(
input_pose_seq, self._encoder_pos_encodings)
# get only the first In teory it should only be one target pose at testing
batch_size = memory.size()[1]
out_pred_seq = torch.FloatTensor(
batch_size, self._target_seq_length, self._pose_dim).to(thisdevice)
for t in range(self._target_seq_length):
position_encodings = self._pos_decoder(t+1).view(
t+1, 1, self._model_dim).to(thisdevice)
mask_look_ahead = torch.from_numpy(
utils.create_look_ahead_mask(t+1)).to(thisdevice)
# a list of length n_decoder_layers with elements of
# shape [t, batch_size, model_dim]
out_attn, out_weights = self._transformer._decoder(
target_seq,
memory,
position_encodings,
mask_look_ahead=mask_look_ahead
)
# get only the last predicted token decode it to get the pose and
# then encode the pose. shape [1*batch_size, pose_dim]
# for 8 seeds of evaluation (batch_size)
pred_pose = self._pose_decoder(
out_attn[-1][t:(t+1), :, :].view(-1, self._model_dim))
# apply residual between last target pose and recently generated pose
if self._pose_dim == self._input_dim:
pred_pose = pred_pose + prev_target
else:
prev_target[:, 0:self._pose_dim] = pred_pose + prev_target[:,0:self._pose_dim]
pred_pose = prev_target
prev_target = pred_pose
out_pred_seq[:, t, :] = pred_pose.view(-1, self._input_dim)[:, 0:self._pose_dim]
if self._pose_embedding is not None:
pose_code = self._pose_embedding(pred_pose)
# [1, batch_size, model_dim]
pose_code = pose_code.view(-1, batch_size, self._model_dim)
# [t+1, batch_size, model_dim]
target_seq = torch.cat([target_seq, pose_code], axis=0)
# 1) the last attention output contains all the necessary sequence; or
# 2) Use all the memory to predict
if self._predict_activity:
actions = self.predict_activity(out_attn, memory)
if self._predict_activity:
return [out_pred_seq], [actions[-1]], None, None
return [out_pred_seq]
def model_factory(params, pose_embedding_fn, pose_decoder_fn):
init_fn = utils.normal_init_ \
if params['init_fn'] == 'normal_init' else utils.xavier_init_
return PoseTransformer(
pose_dim=params['pose_dim'],
input_dim=params['input_dim'],
model_dim=params['model_dim'],
num_encoder_layers=params['num_encoder_layers'],
num_decoder_layers=params['num_decoder_layers'],
num_heads=params['num_heads'],
dim_ffn=params['dim_ffn'],
dropout=params['dropout'],
target_seq_length=params['target_seq_len'],
source_seq_length=params['source_seq_len'],
init_fn=init_fn,
non_autoregressive=params['non_autoregressive'],
use_query_embedding=params['use_query_embedding'],
pre_normalization=params['pre_normalization'],
predict_activity=params['predict_activity'],
num_activities=params['num_activities'],
use_memory=params['use_memory'],
pose_embedding=pose_embedding_fn(params),
pose_decoder=pose_decoder_fn(params),
query_selection=params['query_selection'],
pos_encoding_params=(params['pos_enc_beta'], params['pos_enc_alpha'])
)
if __name__ == '__main__':
transformer = PoseTransformer(model_dim=_POSE_DIM, num_heads=6)
transformer.eval()
batch_size = 8
model_dim = 256
tgt_seq = torch.FloatTensor(batch_size, _TARGET_LENGTH, _POSE_DIM).fill_(1)
src_seq = torch.FloatTensor(batch_size, _SOURCE_LENGTH-1, _POSE_DIM).fill_(1)
outputs = transformer(src_seq, tgt_seq)
print(outputs[-1].size())
| 19,154 | 38.658385 | 120 |
py
|
GaitForeMer
|
GaitForeMer-main/models/seq2seq_model.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Sequence to sequence model for human motion prediction.
The model has been implemented according to [1] and adapted from its pytorch
version [2]. The reimplementation has the purpose of reducing clutter in
code and for learing purposes.
[1]
[2]
"""
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Seq2SeqModel(nn.Module):
"""Sequence to sequence model."""
def __init__(
self,
architecture='tied',
source_seq_len=50,
target_seq_len=25,
rnn_size=1024, # hidden recurrent layer size
num_layers=1,
max_gradient_norm=5,
batch_size=16,
learning_rate=0.005,
learning_rate_decay_factor=0.95,
loss_to_use='sampling_based',
number_of_actions=1,
one_hot=True,
residual_velocities=False,
dropout=0.0,
dtype=torch.float32,
device=None):
"""
Args:
architecture: [basic, tied] whether to tie the decoder and decoder.
source_seq_len: lenght of the input sequence.
target_seq_len: lenght of the target sequence.
rnn_size: number of units in the rnn.
num_layers: number of rnns to stack.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
loss_to_use: [supervised, sampling_based]. Whether to use ground truth in
each timestep to compute the loss after decoding, or to feed back the
prediction from the previous time-step.
number_of_actions: number of classes we have.
one_hot: whether to use one_hot encoding during train/test (sup models).
residual_velocities: whether to use a residual connection that models velocities.
dtype: the data type to use to store internal variables.
"""
super(Seq2SeqModel, self).__init__()
self.HUMAN_SIZE = 54
self.input_size = self.HUMAN_SIZE + number_of_actions if one_hot else self.HUMAN_SIZE
print( "One hot is ", one_hot )
print( "Input size is %d" % self.input_size )
# Summary writers for train and test runs
self.source_seq_len = source_seq_len
self.target_seq_len = target_seq_len
self.rnn_size = rnn_size
self.batch_size = batch_size
self.dropout = dropout
# === Create the RNN that will keep the state ===
print('rnn_size = {0}'.format( rnn_size ))
self.cell = torch.nn.GRUCell(self.input_size, self.rnn_size)
# self.cell2 = torch.nn.GRUCell(self.rnn_size, self.rnn_size)
self.fc1 = nn.Linear(self.rnn_size, self.input_size)
def forward(self, encoder_inputs, decoder_inputs):
def loop_function(prev, i):
return prev
batchsize = encoder_inputs.size()[0]
encoder_inputs = torch.transpose(encoder_inputs, 0, 1)
decoder_inputs = torch.transpose(decoder_inputs, 0, 1)
state = torch.zeros(batchsize, self.rnn_size).\
to(encoder_inputs.get_device())
# state2 = torch.zeros(batchsize, self.rnn_size)
# if use_cuda:
# state = state.cuda()
# # state2 = state2.cuda()
for i in range(self.source_seq_len-1):
state = self.cell(encoder_inputs[i], state)
# state2 = self.cell2(state, state2)
state = F.dropout(state, self.dropout, training=self.training)
# if use_cuda:
# state = state.cuda()
## state2 = state2.cuda()
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
# loop function is trained as in auto regressive
if loop_function is not None and prev is not None:
inp = loop_function(prev, i)
inp = inp.detach()
state = self.cell(inp, state)
# state2 = self.cell2(state, state2)
# output = inp + self.fc1(state2)
# state = F.dropout(state, self.dropout, training=self.training)
output = inp + self.fc1(F.dropout(state, self.dropout, training=self.training))
outputs.append(output.view([1, batchsize, self.input_size]))
if loop_function is not None:
prev = output
# return outputs, state
outputs = torch.cat(outputs, 0)
return torch.transpose(outputs, 0, 1)
| 5,425 | 34.697368 | 89 |
py
|
GaitForeMer
|
GaitForeMer-main/models/PoseEncoderDecoder.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Definition of pose encoder and encoder embeddings and model factory."""
import numpy as np
import os
import sys
import torch
import torch.nn as nn
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import models.PoseGCN as GCN
import models.Conv1DEncoder as Conv1DEncoder
def pose_encoder_mlp(params):
# These two encoders should be experimented with a graph NN and
# a prior based pose decoder using also the graph
init_fn = utils.normal_init_ \
if params['init_fn'] == 'normal_init' else utils.xavier_init_
pose_embedding = nn.Sequential(
nn.Linear(params['input_dim'], params['model_dim']),
nn.Dropout(0.1)
)
utils.weight_init(pose_embedding, init_fn_=init_fn)
return pose_embedding
def pose_decoder_mlp(params):
init_fn = utils.normal_init_ \
if params['init_fn'] == 'normal_init' else utils.xavier_init_
pose_decoder = nn.Linear(params['model_dim'], params['pose_dim'])
utils.weight_init(pose_decoder, init_fn_=init_fn)
return pose_decoder
def pose_decoder_gcn(params):
decoder = GCN.PoseGCN(
input_features=params['model_dim'],
output_features = 9 if params['pose_format'] == 'rotmat' else 3,
model_dim=params['model_dim'],
output_nodes=params['n_joints'],
p_dropout=params['dropout'],
num_stage=1
)
return decoder
def pose_encoder_gcn(params):
encoder = GCN.SimpleEncoder(
n_nodes=params['n_joints'],
input_features=9 if params['pose_format'] == 'rotmat' else 3,
#n_nodes=params['pose_dim'],
#input_features=1,
model_dim=params['model_dim'],
p_dropout=params['dropout']
)
return encoder
def pose_encoder_conv1d(params):
encoder = Conv1DEncoder.Pose1DEncoder(
input_channels=9 if params['pose_format'] == 'rotmat' else 3,
output_channels=params['model_dim'],
n_joints=params['n_joints']
)
return encoder
def pose_encoder_conv1dtemporal(params):
dof = 9 if params['pose_format'] == 'rotmat' else 3
encoder = Conv1DEncoder.Pose1DTemporalEncoder(
input_channels=dof*params['n_joints'],
output_channels=params['model_dim']
)
return encoder
def select_pose_encoder_decoder_fn(params):
if params['pose_embedding_type'].lower() == 'simple':
return pose_encoder_mlp, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'conv1d_enc':
return pose_encoder_conv1d, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'convtemp_enc':
return pose_encoder_conv1dtemporal, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'gcn_dec':
return pose_encoder_mlp, pose_decoder_gcn
if params['pose_embedding_type'].lower() == 'gcn_enc':
return pose_encoder_gcn, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'gcn_full':
return pose_encoder_gcn, pose_decoder_gcn
elif params['pose_embedding_type'].lower() == 'vae':
return pose_encoder_vae, pose_decoder_mlp
else:
raise ValueError('Unknown pose embedding {}'.format(params['pose_embedding_type']))
| 4,120 | 32.504065 | 87 |
py
|
GaitForeMer
|
GaitForeMer-main/models/TransformerEncoder.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of Transformer encoder and encoder layer with self attention.
Implementation of the encoder layer as in [1] and [2] for sequence to
sequence modeling.
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import sys
import os
import torch
import torch.nn as nn
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
class EncoderLayer(nn.Module):
"""Implements the transformer encoder Layer."""
def __init__(self,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False):
"""Encoder layer initialization.
Args:
model_dim:
num_heads:
dim_ffn:
dropout:
"""
super(EncoderLayer, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._pre_normalization = pre_normalization
self._self_attn = nn.MultiheadAttention(model_dim, num_heads, dropout)
self._relu = nn.ReLU()
self._dropout_layer = nn.Dropout(self._dropout)
self._linear1 = nn.Linear(model_dim, self._dim_ffn)
self._linear2 = nn.Linear(self._dim_ffn, self._model_dim)
self._norm1 = nn.LayerNorm(model_dim, eps=1e-5)
self._norm2 = nn.LayerNorm(model_dim, eps=1e-5)
utils.weight_init(self._linear1, init_fn_=init_fn)
utils.weight_init(self._linear2, init_fn_=init_fn)
def forward(self, source_seq, pos_encodings):
"""Computes forward pass according.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
if self._pre_normalization:
return self.forward_pre(source_seq, pos_encodings)
return self.forward_post(source_seq, pos_encodings)
def forward_post(self, source_seq, pos_encodings):
"""Computes decoder layer forward pass with pre normalization.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
# add positional encodings to the input sequence
# for self attention query is the same as key
query = source_seq + pos_encodings
key = query
value = source_seq
attn_output, attn_weights = self._self_attn(
query,
key,
value,
need_weights=True
)
norm_attn = self._dropout_layer(attn_output) + source_seq
norm_attn = self._norm1(norm_attn)
output = self._linear1(norm_attn)
output = self._relu(output)
output = self._dropout_layer(output)
output = self._linear2(output)
output = self._dropout_layer(output) + norm_attn
output = self._norm2(output)
return output, attn_weights
def forward_pre(self, source_seq_, pos_encodings):
"""Computes decoder layer forward pass with pre normalization.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
# add positional encodings to the input sequence
# for self attention query is the same as key
source_seq = self._norm1(source_seq_)
query = source_seq + pos_encodings
key = query
value = source_seq
attn_output, attn_weights = self._self_attn(
query,
key,
value,
need_weights=True
)
norm_attn_ = self._dropout_layer(attn_output) + source_seq_
norm_attn = self._norm2(norm_attn_)
output = self._linear1(norm_attn)
output = self._relu(output)
output = self._dropout_layer(output)
output = self._linear2(output)
output = self._dropout_layer(output) + norm_attn_
return output, attn_weights
class TransformerEncoder(nn.Module):
def __init__(self,
num_layers=6,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False):
super(TransformerEncoder, self).__init__()
"""Transforme encoder initialization."""
self._num_layers = num_layers
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
# self._norm = norm
self._pre_normalization = pre_normalization
self._encoder_stack = self.init_encoder_stack(init_fn)
def init_encoder_stack(self, init_fn):
"""Create the stack of encoder layers."""
stack = nn.ModuleList()
for s in range(self._num_layers):
layer = EncoderLayer(
model_dim=self._model_dim,
num_heads=self._num_heads,
dim_ffn=self._dim_ffn,
dropout=self._dropout,
init_fn=init_fn,
pre_normalization=self._pre_normalization
)
stack.append(layer)
return stack
def forward(self, input_sequence, pos_encodings):
"""Computes decoder forward pass.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
outputs = input_sequence
for l in range(self._num_layers):
outputs, attn_weights = self._encoder_stack[l](outputs, pos_encodings)
# if self._norm:
# outputs = self._norm(outputs)
return outputs, attn_weights
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
seq_length = 50
pos_encodings = torch.FloatTensor(seq_length, 1, 256).uniform_(0,1)
seq = torch.FloatTensor(seq_length, 8, 256).fill_(1.0)
pos_encodings = pos_encodings.to(device)
seq = seq.to(device)
encoder = TransformerEncoder(num_layers=6)
encoder.to(device)
encoder.eval()
print(encoder(seq, pos_encodings).size())
| 7,169 | 28.628099 | 79 |
py
|
GaitForeMer
|
GaitForeMer-main/models/TransformerDecoder.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of Transformer decoder and decoder layer with self attention.
Implementation of the decoder layer as in [1] and [2] for sequence to
sequence modeling.
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import sys
import os
import torch
import torch.nn as nn
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
class DecoderLayer(nn.Module):
"""Implements the transformer decoder Layer."""
def __init__(self,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False,
use_query_embedding=False):
"""Decoder layer initialization.
Args:
model_dim:
num_heads:
dim_ffn:
dropout:
"""
super(DecoderLayer, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._pre_normalization = pre_normalization
self._use_query_embedding = use_query_embedding
self._self_attn = nn.MultiheadAttention(
model_dim, num_heads, dropout=dropout
)
self._multihead_attn = nn.MultiheadAttention(
model_dim, num_heads, dropout=dropout
)
# the so-called point-wise network
self._linear1 = nn.Linear(model_dim, dim_ffn)
self._linear2 = nn.Linear(dim_ffn, model_dim)
self._relu = nn.ReLU()
self._norm1 = nn.LayerNorm(model_dim)
self._norm2 = nn.LayerNorm(model_dim)
self._norm3 = nn.LayerNorm(model_dim)
self._dropout1 = nn.Dropout(dropout)
self._dropout2 = nn.Dropout(dropout)
self._dropout3 = nn.Dropout(dropout)
self._dropout4 = nn.Dropout(dropout)
utils.weight_init(self._linear1, init_fn_=init_fn)
utils.weight_init(self._linear2, init_fn_=init_fn)
self._forward_fn = self.forward_pre if pre_normalization else self.forward_post
def forward(self,
target_seq,
memory,
pos_encodings,
query_embedding=None,
mask_look_ahead=None,
mask_target_padding=None):
"""Forward pass of the layer.
Args:
target_seq: [target_seq_length, batch_size, model_dim]
memory: [source_seq_length, batch_size, model_dim]
mask_look_ahead: []
mask_target_padding:
"""
return self._forward_fn(
target_seq,
memory,
pos_encodings,
query_embedding=query_embedding,
mask_look_ahead=mask_look_ahead,
mask_target_padding=mask_target_padding
)
def handle_query_embedding(self, sequence, embedding):
"""Handle """
if self._use_query_embedding:
return sequence + embedding
return sequence
def forward_post(self,
target_seq,
memory,
pos_encodings,
query_embedding=None,
mask_look_ahead=None,
mask_target_padding=None):
"""Forward pass of the layer with post normalization.
Args:
target_seq: [target_seq_length, batch_size, model_dim]
memory: [source_seq_length, batch_size, model_dim]
mask_look_ahead: []
mask_target_padding:
"""
# 1) Compute self attention with current sequence of inferred tokens
# query is the same as key for self attention
# [batch_size, seq_length, model_dim]
if self._use_query_embedding:
q = k = v = target_seq + query_embedding
else:
q = k = v = target_seq + pos_encodings
self_attn, self_attn_weights = self._self_attn(
query=q, key=k, value=v, #target_seq,
attn_mask=mask_look_ahead,
key_padding_mask=mask_target_padding
)
self_attn = self._dropout1(self_attn)
out_self_attn = self._norm1(self_attn + target_seq)
# 2) Attend the encoder's memory given the comptued self attention
# [batch_size, seq_length, model_dim]
attn, attn_weights = self._multihead_attn(
query=self.handle_query_embedding(out_self_attn, query_embedding),
key=self.handle_query_embedding(memory, pos_encodings),
value=memory)
attn = self._dropout2(attn)
out_attn = self._norm2(attn + out_self_attn)
# 3) Compute pointwise embeding by expanding and projecting + dropout
ffn_output = self._linear1(out_attn)
ffn_output = self._relu(ffn_output)
ffn_output = self._dropout4(ffn_output)
ffn_output = self._linear2(ffn_output)
# 4) Compute residual connection as final output
ffn_output = self._dropout3(ffn_output)
outputs = self._norm3(ffn_output + out_attn)
return outputs, self_attn_weights, attn_weights
def forward_pre(self,
target_seq_,
memory,
pos_encodings,
query_embedding=None,
mask_look_ahead=None,
mask_target_padding=None):
"""Forward pass of the layer with pre normalization.
Args:
target_seq: [target_seq_length, batch_size, model_dim]
memory: [source_seq_length, batch_size, model_dim]
mask_look_ahead: []
mask_target_padding:
"""
target_seq = self._norm1(target_seq_)
# 1) Compute self attention with current sequence of inferred tokens
# query is the same as key for self attention
# [batch_size, seq_length, model_dim]
if self._use_query_embedding:
# in case of using only the query embedding follow DETR [2] which drops
# values to zero and uses only the query embeddings
q = k = target_seq + query_embedding
v = target_seq
else:
q = k = v = target_seq + pos_encodings
self_attn, self_attn_weights = self._self_attn(
query=q, key=k, value=v,
attn_mask=mask_look_ahead,
key_padding_mask=mask_target_padding
)
self_attn = self._dropout1(self_attn)
out_self_attn = self._norm2(self_attn + target_seq_)
# 2) Attend the encoder's memory given the comptued self attention
# [batch_size, seq_length, model_dim]
attn, attn_weights = self._multihead_attn(
query=self.handle_query_embedding(out_self_attn, query_embedding),
key=self.handle_query_embedding(memory, pos_encodings),
value=memory)
attn = self._dropout2(attn)
out_attn = self._norm3(attn + out_self_attn)
# 3) Compute pointwise embeding by expanding and projecting + dropout
ffn_output = self._linear1(out_attn)
ffn_output = self._relu(ffn_output)
ffn_output = self._dropout4(ffn_output)
ffn_output = self._linear2(ffn_output)
# 4) Compute residual connection as final output
ffn_output = self._dropout3(ffn_output)
return ffn_output, self_attn_weights, attn_weights
class TransformerDecoder(nn.Module):
"""Transformer decoder module."""
def __init__(self,
num_layers=6,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False,
use_query_embedding=False):
super(TransformerDecoder, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._num_layers = num_layers
self._use_query_embedding = use_query_embedding
self._pre_normalization = pre_normalization
self._decoder_stack = self.init_decoder_stack(init_fn)
def init_decoder_stack(self, init_fn):
stack = nn.ModuleList()
for s in range(self._num_layers):
layer = DecoderLayer(
model_dim=self._model_dim,
num_heads=self._num_heads,
dim_ffn=self._dim_ffn,
dropout=self._dropout,
init_fn=init_fn,
pre_normalization=self._pre_normalization,
use_query_embedding=self._use_query_embedding
)
stack.append(layer)
return stack
def forward(self,
target_seq,
memory,
pos_encodings,
query_embedding=None,
mask_target_padding=None,
mask_look_ahead=None,
get_attn_weights=False):
"""Computes forward pass of decoder.
Args:
target_seq: [target_sequence_length, batch_size, model_dim].
memory: [source_sequence_length, batch_size, model_dim].
pos_encodings: [target_seq_length, model_dim].
mask_look_ahead: [target_seq_length, model_dim].
Returns:
A tensor with the decoded attention with shape [target_sequence_length,
batch_size, model_dim].
"""
seq_length = target_seq.size()[0]
output_list = []
attn_weights_list = [] if get_attn_weights else None
outputs = torch.zeros_like(target_seq) if self._use_query_embedding else target_seq
for l in range(self._num_layers):
outputs, self_attn_weights, attn_weights = self._decoder_stack[l](
outputs, memory,
pos_encodings=pos_encodings,
query_embedding=query_embedding,
mask_target_padding=mask_target_padding,
mask_look_ahead=mask_look_ahead
)
if get_attn_weights:
attn_weights_list.append(attn_weights)
output_list.append(outputs)
return output_list, attn_weights_list
if __name__ == '__main__':
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
seq_length = 55
batch_size = 8
model_dim = 256
tgt_seq = torch.FloatTensor(seq_length, batch_size, model_dim).fill_(1)
memory = torch.FloatTensor(seq_length, batch_size, model_dim).uniform_(0, 1)
mask_look_ahead = utils.create_look_ahead_mask(seq_length)
mask_look_ahead = torch.from_numpy(mask_look_ahead)
encodings = torch.FloatTensor(seq_length, 1, model_dim).uniform_(0,1)
decoder = TransformerDecoder()
outputs = decoder(tgt_seq, memory, encodings, mask_look_ahead=mask_look_ahead)
print(outputs.size())
| 11,059 | 31.818991 | 87 |
py
|
GaitForeMer
|
GaitForeMer-main/models/__init__.py
| 0 | 0 | 0 |
py
|
|
GaitForeMer
|
GaitForeMer-main/models/PoseGCN.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Graph Convolutional Neural Network implementation.
Code adapted from [1].
[1] https://github.com/wei-mao-2019/HisRepItself
[2] https://github.com/tkipf/gcn/blob/92600c39797c2bfb61a508e52b88fb554df30177/gcn/layers.py#L132
"""
import os
import sys
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
import math
import numpy as np
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
class GraphConvolution(nn.Module):
"""Implements graph convolutions."""
def __init__(self, in_features, out_features, output_nodes=48, bias=False):
"""Constructor.
The graph convolutions can be defined as \sigma(AxHxW), where A is the
adjacency matrix, H is the feature representation from previous layer
and W is the wegith of the current layer. The dimensions of such martices
A\in R^{NxN}, H\in R^{NxM} and W\in R^{MxO} where
- N is the number of nodes
- M is the number of input features per node
- O is the number of output features per node
Args:
in_features: Number of input features per node.
out_features: Number of output features per node.
output_nodes: Number of nodes in the graph.
"""
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self._output_nodes = output_nodes
# W\in R^{MxO}
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
# A\in R^{NxN}
self.att = Parameter(torch.FloatTensor(output_nodes, output_nodes))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.att.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""Forward pass.
Args:
x: [batch_size, n_nodes, input_features]
Returns:
Feature representation computed from inputs.
Shape is [batch_size, n_nodes, output_features].
"""
# [batch_size, input_dim, output_features]
# HxW = {NxM}x{MxO} = {NxO}
support = torch.matmul(x, self.weight)
# [batch_size, n_nodes, output_features]
# = {NxN}x{NxO} = {NxO}
output = torch.matmul(self.att, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GC_Block(nn.Module):
"""Residual block with graph convolutions.
The implementation uses the same number of input features for outputs.
"""
def __init__(self, in_features, p_dropout, output_nodes=48, bias=False):
"""Constructor.
Args:
in_features: Number of input and output features.
p_dropout: Dropout used in the layers.
output_nodes: Number of output nodes in the graph.
"""
super(GC_Block, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.gc1 = GraphConvolution(
in_features, in_features,
output_nodes=output_nodes,
bias=bias
)
self.bn1 = nn.BatchNorm1d(output_nodes * in_features)
self.gc2 = GraphConvolution(
in_features, in_features,
output_nodes=output_nodes,
bias=bias
)
self.bn2 = nn.BatchNorm1d(output_nodes * in_features)
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
def forward(self, x):
"""Forward pass of the residual module"""
y = self.gc1(x)
b, n, f = y.shape
y = self.bn1(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
y = self.gc2(y)
b, n, f = y.shape
y = self.bn2(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
return y + x
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class PoseGCN(nn.Module):
def __init__(self,
input_features=128,
output_features=3,
model_dim=128,
output_nodes=21,
p_dropout=0.1,
num_stage=1):
"""Constructor.
Args:
input_feature: num of input feature of the graph nodes.
model_dim: num of hidden features of the generated embeddings.
p_dropout: dropout probability
num_stage: number of residual blocks in the network.
output_nodes: number of nodes in graph.
"""
super(PoseGCN, self).__init__()
self.num_stage = num_stage
self._n_nodes = output_nodes
self._model_dim = model_dim
self._output_features = output_features
self._hidden_dim = 512
self._front = nn.Sequential(
nn.Linear(model_dim, output_nodes*self._hidden_dim),
nn.Dropout(p_dropout)
)
utils.weight_init(self._front, init_fn_=utils.xavier_init_)
self.gc1 = GraphConvolution(
self._hidden_dim,
self._hidden_dim,
output_nodes=output_nodes
)
self.bn1 = nn.BatchNorm1d(output_nodes * self._hidden_dim)
self.gcbs = []
for i in range(num_stage):
self.gcbs.append(GC_Block(
self._hidden_dim,
p_dropout=p_dropout,
output_nodes=output_nodes)
)
self.gcbs = nn.ModuleList(self.gcbs)
self.gc7 = GraphConvolution(
self._hidden_dim,
output_features,
output_nodes=output_nodes
)
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
gcn_params = filter(lambda p: p.requires_grad, self.parameters())
nparams = sum([np.prod(p.size()) for p in gcn_params])
print('[INFO] ({}) GCN has {} params!'.format(self.__class__.__name__, nparams))
def preprocess(self, x):
if len(x.size()) < 3:
_, D = x.size()
# seq_len, batch_size, input_dim
x = x.view(self._seq_len, -1, D)
# [batch_size, seq_len, input_dim]
x = torch.transpose(x, 0, 1)
# [batch_size, input_dim, seq_len]
x = torch.transpose(x, 1, 2)
return x
return x
def postprocess(self, y):
"""Flattents the input tensor.
Args:
y: Input tensor of shape [batch_size, n_nodes, output_features].
"""
y = y.view(-1, self._n_nodes*self._output_features)
return y
def forward(self, x):
"""Forward pass of network.
Args:
x: [batch_size, model_dim].
"""
# [batch_size, model_dim*n_nodes]
x = self._front(x)
x = x.view(-1, self._n_nodes, self._hidden_dim)
# [batch_size, n_joints, model_dim]
y = self.gc1(x)
b, n, f = y.shape
y = self.bn1(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
for i in range(self.num_stage):
y = self.gcbs[i](y)
# [batch_size, n_joints, output_features]
y = self.gc7(y)
# y = y + x
# [seq_len*batch_size, input_dim]
y = self.postprocess(y)
return y
class SimpleEncoder(nn.Module):
def __init__(self,
n_nodes=63,
input_features=1,
model_dim=128,
p_dropout=0.1):
"""Constructor.
Args:
input_dim: Dimension of the input vector. This will be equivalent to
the number of nodes in the graph, each node with 1 feature each.
model_dim: Dimension of the output vector to produce.
p_dropout: Dropout to be applied for regularization.
"""
super(SimpleEncoder, self).__init__()
#The graph convolutions can be defined as \sigma(AxHxW), where A is the
#A\in R^{NxN} x H\in R^{NxM} x W\in R ^{MxO}
self._input_features = input_features
self._output_nodes = n_nodes
self._hidden_dim = 512
self._model_dim = model_dim
self._num_stage = 1
print('[INFO] ({}) Hidden dimension: {}!'.format(
self.__class__.__name__, self._hidden_dim))
self.gc1 = GraphConvolution(
in_features=self._input_features,
out_features=self._hidden_dim,
output_nodes=self._output_nodes
)
self.bn1 = nn.BatchNorm1d(self._output_nodes*self._hidden_dim)
self.gc2 = GraphConvolution(
in_features=self._hidden_dim,
out_features=model_dim,
output_nodes=self._output_nodes
)
self.gcbs = []
for i in range(self._num_stage):
self.gcbs.append(GC_Block(
self._hidden_dim,
p_dropout=p_dropout,
output_nodes=self._output_nodes)
)
self.gcbs = nn.ModuleList(self.gcbs)
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
self._back = nn.Sequential(
nn.Linear(model_dim*self._output_nodes, model_dim),
nn.Dropout(p_dropout)
)
utils.weight_init(self._back, init_fn_=utils.xavier_init_)
gcn_params = filter(lambda p: p.requires_grad, self.parameters())
nparams = sum([np.prod(p.size()) for p in gcn_params])
print('[INFO] ({}) GCN has {} params!'.format(self.__class__.__name__, nparams))
def forward(self, x):
"""Forward pass of network.
Args:
x: [batch_size, n_poses, pose_dim/input_dim].
"""
B, S, D = x.size()
# [batch_size, n_joints, model_dim]
y = self.gc1(x.view(-1, self._output_nodes, self._input_features))
b, n, f = y.shape
y = self.bn1(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
for i in range(self._num_stage):
y = self.gcbs[i](y)
# [batch_size, n_joints, model_dim]
y = self.gc2(y)
# [batch_size, model_dim]
y = self._back(y.view(-1, self._model_dim*self._output_nodes))
# [batch_size, n_poses, model_dim]
y = y.view(B, S, self._model_dim)
return y
def test_decoder():
seq_len = 25
input_size = 63
model_dim = 128
dropout = 0.3
n_stages = 2
output_nodes = 21
joint_dof = 1
n_joints = model_dim
layer = GraphConvolution(
in_features=joint_dof,
out_features=model_dim,
output_nodes=n_joints
)
X = torch.FloatTensor(10, n_joints, joint_dof)
print(layer(X).size())
gcn = PoseGCN(
input_features=model_dim,
output_features=3,
model_dim=model_dim,
output_nodes=output_nodes,
p_dropout=0.1,
num_stage=2
)
X = torch.FloatTensor(10*seq_len, model_dim)
print(gcn(X).size())
def test_encoder():
input_size = 63
model_dim = 128
dropout = 0.3
n_stages = 2
output_nodes = 21
dof = 9
encoder = SimpleEncoder(
n_nodes=output_nodes,
model_dim=model_dim,
input_features=dof,
p_dropout=0.1
)
X = torch.FloatTensor(10, 25, output_nodes*dof)
print(encoder(X).size())
if __name__ == '__main__':
#test_decoder()
test_encoder()
| 11,945 | 26.976581 | 97 |
py
|
GaitForeMer
|
GaitForeMer-main/models/potr_fn.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Model function to deploy POTR models for visualization and generation."""
import numpy as np
import os
import sys
import argparse
import json
import time
import cv2
from matplotlib import image
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import training.seq2seq_model_fn as seq2seq_model_fn
import models.PoseTransformer as PoseTransformer
import models.PoseEncoderDecoder as PoseEncoderDecoder
import data.H36MDataset_v2 as H36MDataset_v2
import data.AMASSDataset as AMASSDataset
import utils.utils as utils
import radam.radam as radam
import training.transformer_model_fn as tr_fn
import tqdm
# _DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_DEVICE = torch.device('cpu')
def plot_conf_mat(matrix):
import matplotlib.pyplot as plt
import matplotlib
fig, ax = plt.subplots(figsize=(30,30))
#im = ax.imshow(matrix, cmap='Wistia')
im = ax.imshow(matrix, cmap='Blues')
action_labels = ['A%02d'%i for i in range(1, 61, 1)]
ax.set_xticks(np.arange(len(action_labels)))
ax.set_yticks(np.arange(len(action_labels)))
ax.set_xticklabels(action_labels, fontdict={'fontsize':15})#, rotation=90)
ax.set_yticklabels(action_labels, fontdict={'fontsize':15})
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for i in range(len(action_labels)):
for j in range(len(action_labels)):
# color= "w" if round(matrix[i, j],2) < nmax else "black"
text = ax.text(j, i, round(matrix[i, j], 2),
ha="center", va="center", color="black", fontsize=10)
plt.ylabel("")
plt.xlabel("")
# ax.set_title("Small plot")
fig.tight_layout()
#plt.show()
plt.savefig('confusion_matrix.png')
plt.close()
def crop_image(img):
size = max(img.shape[0], img.shape[1])
h = int(size*0.30)
w = int(size*0.30)
cy = img.shape[0]//2
cx = img.shape[1]//2
crop = img[cy-h//2:cy+h//2, cx-w//2:cx+w//2]
return crop
def visualize_h36mdataset():
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--model_file', type=str)
parser.add_argument('--data_path', type=str, default=None)
args = parser.parse_args()
params = json.load(open(args.config_file))
if args.data_path is not None:
params['data_path'] = args.data_path
args.data_path = params['data_path']
train_dataset_fn, eval_dataset_fn = tr_fn.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
for k,v in params.items():
print('[INFO] (POTRFn@main) {}: {}'.format(k, v))
# ids of most common actions in H36M
actions = [('walking', 12), ('eating', 2), ('smoking', 9),
('discussion', 1), ('directions', 0)]
with torch.no_grad():
for i in range(len(actions)):
action, acidx = actions[i]
sample = next(iter(eval_dataset_fn))
enc_inputs = sample['encoder_inputs'].to(_DEVICE)
dec_inputs = sample['decoder_inputs'].to(_DEVICE)
gts = np.squeeze(sample['decoder_outputs'].cpu().numpy())[8*acidx:8*acidx+8]
ins = np.squeeze(sample['encoder_inputs'].cpu().numpy())[8*acidx:8*acidx+8]
ins = eval_dataset_fn.dataset.unnormalize_pad_data_to_expmap(ins)
H36MDataset_v2.visualize_sequence(
ins[0:1], args.data_path, prefix='skeletons/%s/gt_in'%action, colors=['gray', 'gray'])
#print(gts.shape)
gts = eval_dataset_fn.dataset.unnormalize_pad_data_to_expmap(gts)
H36MDataset_v2.visualize_sequence(
gts[0:1], args.data_path, prefix='skeletons/%s/gt'%action, colors=['gray', 'gray'])
enc_inputs = torch.squeeze(enc_inputs)
dec_inputs = torch.squeeze(dec_inputs)
model = PoseTransformer.model_factory(
params,
pose_encoder_fn,
pose_decoder_fn
)
model.load_state_dict(torch.load(args.model_file, map_location=_DEVICE))
model.to(_DEVICE)
model.eval()
prediction, attn_weights, memory = model(
enc_inputs,
dec_inputs,
get_attn_weights=True
)
prediction = prediction[-1][8*acidx:8*acidx+8].cpu().numpy()[0:1]
preds = eval_dataset_fn.dataset.unnormalize_pad_data_to_expmap(prediction)
H36MDataset_v2.visualize_sequence(preds, args.data_path,
prefix='skeletons/%s/pred'%action, colors=['red', 'red'])
def compute_mean_average_precision(prediction, target, dataset_fn):
pred = prediction.cpu().numpy().squeeze()
tgt = target.cpu().numpy().squeeze()
T, D = pred.shape
pred = dataset_fn.dataset.unormalize_sequence(pred)
tgt = dataset_fn.dataset.unormalize_sequence(tgt)
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
mAP, _, _, (TP, FN) = utils.compute_mean_average_precision(
pred, tgt, seq2seq_model_fn._MAP_TRESH, per_frame=True
)
return mAP, TP, FN
def compute_mpjpe(prediction, target, dataset_fn):
pred = prediction.cpu().numpy().squeeze()
tgt = target.cpu().numpy().squeeze()
T, D = pred.shape
pred = dataset_fn.dataset.unormalize_sequence(pred)
tgt = dataset_fn.dataset.unormalize_sequence(tgt)
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
# seq_len x n_joints
norm = np.squeeze(np.linalg.norm(pred-tgt, axis=-1))
return norm
def compute_test_mAP_nturgbd():
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--model_file', type=str)
parser.add_argument('--data_path', type=str, default=None)
args = parser.parse_args()
params = json.load(open(args.config_file))
if args.data_path is not None:
params['data_path'] = args.data_path
args.data_path = params['data_path']
params['test_phase'] = True
_, test_dataset_fn = tr_fn.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
for k,v in params.items():
print('[INFO] (POTRFn@main) {}: {}'.format(k, v))
model = PoseTransformer.model_factory(
params,
pose_encoder_fn,
pose_decoder_fn
)
model.load_state_dict(torch.load(args.model_file, map_location=_DEVICE))
model.to(_DEVICE)
model.eval()
FN = np.zeros((params['target_seq_len'],), dtype=np.float32)
TP = np.zeros((params['target_seq_len'],), dtype=np.float32)
FN_joint = np.zeros((params['n_joints'],), dtype=np.float32)
TP_joint = np.zeros((params['n_joints'],), dtype=np.float32)
MPJPE = np.zeros((params['n_joints'],), dtype=np.float32)
pred_activity = []
gt_activity = []
with torch.no_grad():
print('Running testing...')
for n, sample in tqdm.tqdm(enumerate(test_dataset_fn)):
enc_inputs = sample['encoder_inputs'].to(_DEVICE)
dec_inputs = sample['decoder_inputs'].to(_DEVICE)
gts = sample['decoder_outputs'].to(_DEVICE)
# ins = np.squeeze(sample['encoder_inputs'].cpu().numpy())
outputs = model(
enc_inputs,
dec_inputs,
get_attn_weights=True
)
if params['predict_activity']:
a_ids = sample['action_ids']
prediction, out_logits, attn_weights, memory = outputs
out_class = torch.argmax(out_logits[-1].softmax(-1), -1)
else:
prediction, attn_weights, memory = outputs
mAP, TP_, FN_ = compute_mean_average_precision(prediction[-1], gts, test_dataset_fn)
MPJPE_ = compute_mpjpe(prediction[-1], gts, test_dataset_fn)
# reduce by frame to get per joint MPJPE
MPJPE = MPJPE + np.sum(MPJPE_, axis=0)
# reduce by frame to get per joint AP
TP_joint = TP_joint + np.sum(TP_, axis=0)
FN_joint = FN_joint + np.sum(FN_, axis=0)
# reduce by joint to get per frame AP
TP_ = np.sum(TP_, axis=-1)
FN_ = np.sum(FN_, axis=-1)
TP = TP + TP_
FN = FN + FN_
# print(n, ':', prediction[-1].size(), out_class.item(), a_ids.item(), mAP, TP.shape, FN.shape)
if params['predict_activity']:
pred_activity.append(out_class.item())
gt_activity.append(a_ids.item())
#accurracy = (np.array(gt_activity) == np.array(pred_activity)).astype(np.float32).sum()
#accurracy = accurracy / len(gt_activity)
accurracy = -1
if params['predict_activity']:
accurracy = accuracy_score(gt_activity, pred_activity, normalize='true')
conf_matrix = confusion_matrix(gt_activity, pred_activity, normalize='true')
plot_conf_mat(conf_matrix)
AP = TP / (TP+FN)
AP_joints = TP_joint / (TP_joint + FN_joint)
MPJPE = MPJPE / (n*params['target_seq_len'])
print('[INFO] The mAP per joint\n', np.around(AP_joints, 2))
print('[INFO] The MPJPE\n', np.around(MPJPE,4)*100.0)
print('[INFO] The accuracy: {} mAP: {}'.format(
round(accurracy, 2), round(np.mean(AP), 2)))
ms_range = [0.08, 0.160, 0.320, 0.400, 0.5, 0.66]
FPS = 30.0
ms_map = []
for ms in ms_range:
nf = int(round(ms*FPS))
ms_map.append(np.mean(AP[0:nf]))
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in ms_range:
print(" {0:5d} |".format(int(ms*1000)), end="")
print()
print("{0: <16} |".format("global mAP"), end="")
for mAP in ms_map:
print(" {0:.3f} |".format(mAP), end="")
print()
def visualize_attn_weights():
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--model_file', type=str)
parser.add_argument('--data_path', type=str, default=None)
args = parser.parse_args()
params = json.load(open(args.config_file))
if args.data_path is not None:
params['data_path'] = args.data_path
args.data_path = params['data_path']
train_dataset_fn, eval_dataset_fn = tr_fn.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
model = PoseTransformer.model_factory(
params,
pose_encoder_fn,
pose_decoder_fn
)
model.load_state_dict(torch.load(args.model_file, map_location=_DEVICE))
model.to(_DEVICE)
model.eval()
for k,v in params.items():
print('[INFO] (POTRFn@main) {}: {}'.format(k, v))
# ids of most common actions in H36M
actions = [('walking', 12)]
#[('walking', 12), ('eating', 2), ('smoking', 9),
# ('discussion', 1), ('directions', 0)]
with torch.no_grad():
for i in range(len(actions)):
action, acidx = actions[i]
sample = next(iter(eval_dataset_fn))
enc_inputs = sample['encoder_inputs'].to(_DEVICE)
dec_inputs = sample['decoder_inputs'].to(_DEVICE)
enc_inputs = torch.squeeze(enc_inputs)
dec_inputs = torch.squeeze(dec_inputs)
prediction, attn_weights, enc_weights = model(
enc_inputs,
dec_inputs,
get_attn_weights=True
)
attn_weights= attn_weights[-1][8*acidx:8*acidx+8].cpu().numpy()[0:1]
attn_weights = np.squeeze(attn_weights)
print(attn_weights.shape)
path = 'skeletons/%s'%action
in_imgs_ = [crop_image(cv2.imread(os.path.join(path, x)) )
for x in os.listdir(path) if 'gt_in' in x]
in_imgs = [in_imgs_[i] for i in range(0, len(in_imgs_), 2)]
pred_imgs = [crop_image(cv2.imread(os.path.join(path, x)))
for x in os.listdir(path) if 'pred_0' in x]
the_shape = in_imgs[0].shape
cx = the_shape[1]//2
cy = the_shape[0]//2
in_imgs = np.concatenate(in_imgs, axis=1)
pred_imgs = np.concatenate(pred_imgs, axis=1)
#cv2.imshow('In IMG', in_imgs)
#cv2.imshow('pred IMG', pred_imgs)
#cv2.waitKey()
spaces_between = 5
print(in_imgs.shape, pred_imgs.shape, the_shape)
canvas = np.ones(
(in_imgs.shape[0]*spaces_between, in_imgs.shape[1], 3),
dtype=in_imgs.dtype)*255
canvas[0:the_shape[0], :] = in_imgs
canvas[the_shape[0]*(spaces_between-1):, 0:pred_imgs.shape[1]] = pred_imgs
#cx_pred = cx + the_shape[1]*(spaces_between-1) - cx//2
cy_pred = cy + the_shape[0]*(spaces_between-1) - cy//3*2
print(attn_weights.min(), attn_weights.max())
mean = attn_weights.mean()
#plt.imshow(canvas, origin='lower')
pil_canvas = Image.fromarray(canvas)
d_canvas = ImageDraw.Draw(pil_canvas)
for pred_idx in range(attn_weights.shape[0]):
# cy_pred = cy + pred_idx*the_shape[0]
cx_pred = cx + pred_idx*the_shape[1]
#cv2.circle(canvas, (cx_pred, cy_pred), 5, [0,255,0], -1)
for ii, in_idx in enumerate(range(0, attn_weights.shape[1], 2)):
# cy_in = cy + ii*the_shape[0]
cx_in = cx + ii*the_shape[1]
this_weight = attn_weights[pred_idx, in_idx]
if this_weight > mean:
#d_canvas.line([(cx+cx//2, cy_in), (cx_pred, cy_pred)], fill=(255,0,0, 25), width=this_weight/mean)
d_canvas.line([(cx_in, cy+cy//3*2), (cx_pred, cy_pred)], fill=(255,0,0, 25), width=this_weight/mean)
name = 'the_canvas.png'
#cv2.imwrite('the_canvas.jpg', canvas)
# plt.show()
#plt.imsave(name, canvas)
pil_canvas.save(name)
print(pil_canvas.info)
fig, ax = plt.subplots(figsize=(20,10))
ax.matshow(attn_weights)
plt.ylabel("")
plt.xlabel("")
fig.tight_layout()
#plt.show()
name = 'attn_map.png'
plt.savefig(name)
plt.close()
if __name__ == '__main__':
# visualize_h36mdataset()
visualize_attn_weights()
#compute_test_mAP_nturgbd()
| 14,622 | 30.245726 | 110 |
py
|
GaitForeMer
|
GaitForeMer-main/visualize/forward_kinematics.py
|
"""Visualize predictions as a sequence of skeletons."""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as anumation
import numpy as np
import json
import argparse
import viz
import os
import sys
import h5py
sys.path.append('../')
import utils.utils as utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str)
parser.add_argument('--input_sample', type=str)
args = parser.parse_args()
parent, offset, rot_ind, exp_map_ind = utils.load_constants(args.dataset_path)
print(parent.shape)
print(rot_ind)
print(offset)
# expmap = np.load(args.input_sample)
with h5py.File(args.input_sample, 'r') as h5f:
expmap_gt = h5f['expmap/gt/walking_0'][:]
expmap_pred = h5f['expmap/preds/walking_0'][:]
nframes_gt, nframes_pred = expmap_gt.shape[0], expmap_pred.shape[0]
input_pose = np.vstack((expmap_gt, expmap_pred))
print(input_pose.shape)
expmap_all = utils.revert_coordinate_space(input_pose, np.eye(3), np.zeros(3))
print(expmap_gt.shape, expmap_pred.shape)
expmap_gt = expmap_all[:nframes_gt,:]
expmap_pred = expmap_all[nframes_gt,:]
# compute 3d points for each frame
xyz_gt, xyz_pred = np.zeros((nframes_gt, 96)), np.zeros((nframes_pred, 96))
print(xyz_gt.shape, xyz_pred.shape)
for i in range(nframes_gt):
xyz_gt[i, :] = utils.compute_forward_kinematics(
expmap_gt[i, :], parent, offset, rot_ind, exp_map_ind)
#for i in range(nframes_pred):
# xyz_pred[i, :] = compute_forward_kinematics(
# expmap_pred[i, :], parent, offset, rot_ind, exp_map_ind)
# plot and animate the poses
fig = plt.figure()
ax = plt.gca(projection='3d')
ob = viz.Ax3DPose(ax)
for i in range(nframes_gt):
ob.update(xyz_gt[i, :])
plt.show(block=False)
fig.canvas.draw()
plt.pause(0.01)
| 1,867 | 23.578947 | 80 |
py
|
GaitForeMer
|
GaitForeMer-main/visualize/viz.py
|
"""Functions to visualize human poses"""
import matplotlib.pyplot as plt
import numpy as np
import h5py
import os
from mpl_toolkits.mplot3d import Axes3D
# red color "#e74c3c"
# blue color "#3498db"
class Ax3DPose(object):
def __init__(self, ax, lcolor="#3498db", rcolor="#e74c3c"):
"""
Create a 3d pose visualizer that can be updated with new poses.
Args
ax: 3d axis to plot the 3d pose on
lcolor: String. Colour for the left part of the body
rcolor: String. Colour for the right part of the body
"""
# Start and endpoints of our representation
self.I = np.array([1,2,3,1,7,8,1, 13,14,15,14,18,19,14,26,27])-1
self.J = np.array([2,3,4,7,8,9,13,14,15,16,18,19,20,26,27,28])-1
# Left / right indicator
self.LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
self.ax = ax
vals = np.zeros((32, 3))
# Make connection matrix
self.plots = []
for i in np.arange( len(self.I) ):
x = np.array( [vals[self.I[i], 0], vals[self.J[i], 0]] )
y = np.array( [vals[self.I[i], 1], vals[self.J[i], 1]] )
z = np.array( [vals[self.I[i], 2], vals[self.J[i], 2]] )
self.plots.append(
self.ax.plot(x, y, z, lw=2, c=lcolor if self.LR[i] else rcolor))
# self.ax.set_xlabel("x")
# self.ax.set_ylabel("y")
# self.ax.set_zlabel("z")
def update(self, channels, lcolor="#3498db", rcolor="#e74c3c"):
"""
Update the plotted 3d pose.
Args
channels: 96-dim long np array. The pose to plot.
lcolor: String. Colour for the left part of the body.
rcolor: String. Colour for the right part of the body.
Returns
Nothing. Simply updates the axis with the new pose.
"""
assert channels.size == 96, \
"channels should have 96 entries, it has %d instead" % channels.size
vals = np.reshape( channels, (32, -1) )
for i in np.arange( len(self.I) ):
x = np.array( [vals[self.I[i], 0], vals[self.J[i], 0]] )
y = np.array( [vals[self.I[i], 1], vals[self.J[i], 1]] )
z = np.array( [vals[self.I[i], 2], vals[self.J[i], 2]] )
self.plots[i][0].set_xdata(x)
self.plots[i][0].set_ydata(y)
self.plots[i][0].set_3d_properties(z)
self.plots[i][0].set_color(lcolor if self.LR[i] else rcolor)
r = 750;
xroot, yroot, zroot = vals[0,0], vals[0,1], vals[0,2]
self.ax.set_xlim3d([-r+xroot, r+xroot])
self.ax.set_zlim3d([-r+zroot, r+zroot])
self.ax.set_ylim3d([-r+yroot, r+yroot])
self.ax.set_axis_off()
#self.ax.set_xticks([])
#self.ax.set_yticks([])
#self.ax.set_zticks([])
# self.ax.set_aspect('equal')
| 2,647 | 30.903614 | 79 |
py
|
GaitForeMer
|
GaitForeMer-main/utils/WarmUpScheduler.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Warm up scheduler implementation.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Adapted from https://github.com/ildoonet/pytorch-gradual-warmup-lr/blob/master/warmup_scheduler/scheduler.py
"""
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer."""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
"""Constructor.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if
multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with
the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler
(eg. ReduceLROnPlateau)
"""
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater than or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [
base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
# return self.after_scheduler.get_last_lr()
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch)
for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
# ReduceLROnPlateau is called at the end of epoch, whereas others
# are called at beginning
self.last_epoch = epoch if epoch != 0 else 1
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
# self._last_lr = self.after_scheduler.get_last_lr()
self._last_lr = self.after_scheduler.get_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 4,280 | 39.771429 | 108 |
py
|
GaitForeMer
|
GaitForeMer-main/utils/utils.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Set of utility functions."""
import torch
import numpy as np
import copy
import json
import os
import cv2
import torch.nn as nn
def expmap_to_euler(action_sequence):
rotmats = expmap_to_rotmat(action_sequence)
eulers = rotmat_to_euler(rotmats)
return eulers
def expmap_to_rotmat(action_sequence):
"""Convert exponential maps to rotmats.
Args:
action_sequence: [n_samples, n_joints, 3]
Returns:
Rotation matrices for exponenital maps [n_samples, n_joints, 9].
"""
n_samples, n_joints, _ = action_sequence.shape
expmap = np.reshape(action_sequence, [n_samples*n_joints, 1, 3])
# first three values are positions, so technically it's meaningless to convert them,
# but we do it anyway because later we discard this values anywho
rotmats = np.zeros([n_samples*n_joints, 3, 3])
for i in range(rotmats.shape[0]):
rotmats[i] = cv2.Rodrigues(expmap[i])[0]
rotmats = np.reshape(rotmats, [n_samples, n_joints, 3*3])
return rotmats
def rotmat_to_expmap(action_sequence):
"""Convert rotmats to expmap.
Args:
action_sequence: [n_samples, n_joints, 9]
Returns:
Rotation exponenital maps [n_samples, n_joints, 3].
"""
n_samples, n_joints, _ = action_sequence.shape
rotmats = np.reshape(action_sequence, [n_samples*n_joints, 3, 3])
# first three values are positions, so technically it's meaningless to convert them,
# but we do it anyway because later we discard this values anywho
expmaps = np.zeros([n_samples*n_joints, 3, 1])
for i in range(rotmats.shape[0]):
expmaps[i] = cv2.Rodrigues(rotmats[i])[0]
expmaps = np.reshape(expmaps, [n_samples, n_joints, 3])
return expmaps
def rotmat_to_euler(action_sequence):
"""Convert exponential maps to rotmats.
Args:
action_sequence: [n_samples, n_joints, 9]
Returns:
Euler angles for rotation maps given [n_samples, n_joints, 3].
"""
n_samples, n_joints, _ = action_sequence.shape
rotmats = np.reshape(action_sequence, [n_samples*n_joints, 3, 3])
eulers = np.zeros([n_samples*n_joints, 3])
for i in range(eulers.shape[0]):
eulers[i] = rotmat2euler(rotmats[i])
eulers = np.reshape(eulers, [n_samples, n_joints, 3])
return eulers
def rotmat2euler(R):
"""Converts a rotation matrix to Euler angles.
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/RotMat2Euler.m#L1
Args:
R: a 3x3 rotation matrix
Returns:
eul: a 3x1 Euler angle representation of R
"""
if R[0,2] >= 1 or R[0,2] <= -1:
# special case values are out of bounds for arcsinc
E3 = 0 # set arbitrarily
dlta = np.arctan2( R[0,1], R[0,2] );
if R[0,2] == -1:
E2 = np.pi/2;
E1 = E3 + dlta;
else:
E2 = -np.pi/2;
E1 = -E3 + dlta;
else:
E2 = -np.arcsin(R[0,2])
E1 = np.arctan2(R[1,2]/np.cos(E2), R[2,2]/np.cos(E2) )
E3 = np.arctan2(R[0,1]/np.cos(E2), R[0,0]/np.cos(E2) )
eul = np.array([E1, E2, E3]);
return eul
def load_constants(data_path):
offset = json.load(open(os.path.join(data_path, 'offset.json')))
parent = json.load(open(os.path.join(data_path, 'parent.json')))
rot_ind = json.load(open(os.path.join(data_path, 'rot_ind.json')))
parent = np.array(parent)-1
offset = np.array(offset).reshape(-1, 3)
exp_map_ind = np.split(np.arange(4, 100)-1, 32)
return parent, offset, rot_ind, exp_map_ind
def compute_forward_kinematics(angles, parent, offset, rotInd, expmapInd):
"""Computes forward kinematics from angles to 3d points.
Convert joint angles and bone lenghts into the 3d points of a person.
Based on expmap2xyz.m, available at
https://github.com/asheshjain399/RNNexp/blob/7fc5a53292dc0f232867beb66c3a9ef845d705cb/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/exp2xyz.m
Args
angles: 99-long vector with 3d position and 3d joint angles in expmap format
parent: 32-long vector with parent-child relationships in the kinematic tree
offset: 96-long vector with bone lenghts
rotInd: 32-long list with indices into angles
expmapInd: 32-long list with indices into expmap angles
Returns
xyz: 32x3 3d points that represent a person in 3d space
"""
assert len(angles) == 99, 'Incorrect number of angles.'
# Structure that indicates parents for each joint
njoints = 32
xyzStruct = [dict() for x in range(njoints)]
for i in np.arange(njoints):
if not rotInd[i] : # If the list is empty
xangle, yangle, zangle = 0, 0, 0
else:
xangle = angles[rotInd[i][0]-1]
yangle = angles[rotInd[i][1]-1]
zangle = angles[rotInd[i][2]-1]
r = angles[expmapInd[i]]
thisRotation = expmap2rotmat(r)
thisPosition = np.array([xangle, yangle, zangle])
if parent[i] == -1: # Root node
xyzStruct[i]['rotation'] = thisRotation
xyzStruct[i]['xyz'] = np.reshape(offset[i,:], (1,3)) + thisPosition
else:
xyzStruct[i]['xyz'] = (offset[i,:] + thisPosition).dot(
xyzStruct[parent[i]]['rotation']) + xyzStruct[parent[i]]['xyz']
xyzStruct[i]['rotation'] = thisRotation.dot(
xyzStruct[parent[i]]['rotation'])
xyz = [xyzStruct[i]['xyz'] for i in range(njoints)]
xyz = np.array(xyz).squeeze()
xyz = xyz[:,[0,2,1]]
# xyz = xyz[:,[2,0,1]]
return np.reshape( xyz, [-1] )
def revert_coordinate_space(channels, R0, T0):
"""Arrange poses to a canonical form to face the camera.
Bring a series of poses to a canonical form so they are facing the camera
when they start. Adapted from
https://github.com/asheshjain399/RNNexp/blob/7fc5a53292dc0f232867beb66c3a9ef845d705cb/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/revertCoordinateSpace.m
Args:
channels: n-by-99 matrix of poses
R0: 3x3 rotation for the first frame
T0: 1x3 position for the first frame
Returns:
channels_rec: The passed poses, but the first has T0 and R0, and the
rest of the sequence is modified accordingly.
"""
n, d = channels.shape
channels_rec = copy.copy(channels)
R_prev = R0
T_prev = T0
rootRotInd = np.arange(3,6)
for ii in range(n):
R_diff = expmap2rotmat(channels[ii, rootRotInd])
R = R_diff.dot(R_prev)
channels_rec[ii, rootRotInd] = rotmat2expmap(R)
T = T_prev + (R_prev.T).dot(np.reshape(channels[ii,:3],[3,1])).reshape(-1)
channels_rec[ii,:3] = T
T_prev = T
R_prev = R
return channels_rec
def rotmat2quat(R):
"""Converts a rotation matrix to a quaternion.
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/rotmat2quat.m#L4
Args:
R: 3x3 rotation matrix
Returns:
q: 1x4 quaternion
"""
rotdiff = R - R.T;
r = np.zeros(3)
r[0] = -rotdiff[1,2]
r[1] = rotdiff[0,2]
r[2] = -rotdiff[0,1]
sintheta = np.linalg.norm(r) / 2;
r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps );
costheta = (np.trace(R)-1) / 2;
theta = np.arctan2( sintheta, costheta );
q = np.zeros(4)
q[0] = np.cos(theta/2)
q[1:] = r0*np.sin(theta/2)
return q
def quat2expmap(q):
"""Convert quaternions to an exponential map.
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args:
q: 1x4 quaternion
Returns:
r: 1x3 exponential map
Raises:
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q)-1)>1e-3):
raise(ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide( q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps));
theta = 2 * np.arctan2( sinhalftheta, coshalftheta )
theta = np.mod( theta + 2*np.pi, 2*np.pi )
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r
def rotmat2expmap(R):
return quat2expmap( rotmat2quat(R) )
def expmap2rotmat(r):
"""Converts an exponential map (axis angle number) to rotation matrix.
Converts an exponential map angle to a rotation matrix Matlab port to python
for evaluation purposes. This is also called Rodrigues' formula and can be
found also implemented in opencv as cv2.Rodrigues.
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/expmap2rotmat.m
Args:
r: 1x3 exponential map
Returns:
R: 3x3 rotation matrix
"""
theta = np.linalg.norm( r )
r0 = np.divide( r, theta + np.finfo(np.float32).eps )
r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3,3)
r0x = r0x - r0x.T
R = np.eye(3,3) + np.sin(theta)*r0x + (1-np.cos(theta))*(r0x).dot(r0x);
return R
def revert_output_format(
poses,
data_mean,
data_std,
dim_to_ignore,
actions,
use_one_hot):
"""Transforms pose predictions to a more interpretable format.
Converts the output of the neural network to a format that is more easy to
manipulate for, e.g. conversion to other format or visualization
Args:
poses: Sequence of pose predictions. A list with (seq_length) entries,
each with a (batch_size, dim) output
Returns:
poses_out: List of tensors each of size (batch_size, seq_length, dim).
"""
seq_len = len(poses)
if seq_len == 0:
return []
batch_size, dim = poses[0].shape
poses_out = np.concatenate(poses)
poses_out = np.reshape(poses_out, (seq_len, batch_size, dim))
poses_out = np.transpose(poses_out, [1, 0, 2])
poses_out_list = []
for i in range(poses_out.shape[0]):
poses_out_list.append(
unnormalize_data(poses_out[i, :, :], data_mean, data_std,
dim_to_ignore, actions, use_one_hot))
return poses_out_list
def unnormalize_data(
normalizedData,
data_mean,
data_std,
dimensions_to_ignore=None,
actions=[],
use_one_hot=False):
"""
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
Args
normalizedData: nxd matrix with normalized data
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dimensions_to_ignore: vector with dimensions not used by the model
actions: list of strings with the encoded actions
use_one_hot: whether the data comes with one-hot encoding
Returns
origData: data originally used to
"""
T = normalizedData.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_ignore = [] if dimensions_to_ignore==None else dimensions_to_ignore
dimensions_to_use = [i for i in range(D) if i not in dimensions_to_ignore]
dimensions_to_use = np.array(dimensions_to_use)
#print('Size of the normalized data', normalizedData.shape)
#print('Size of the mean data', data_mean.shape[0])
#print('Lenght of the dimensions to use', len(dimensions_to_use))
if use_one_hot:
origData[:, dimensions_to_use] = normalizedData[:, :-len(actions)]
else:
# print('++++++++++++++++++++',origData.shape, normalizedData.shape, len(dimensions_to_use))
origData[:, dimensions_to_use] = normalizedData
# potentially ineficient, but only done once per experiment
stdMat = data_std.reshape((1, D))
stdMat = np.repeat(stdMat, T, axis=0)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
origData = np.multiply(origData, stdMat) + meanMat
return origData
def get_srnn_gts(
actions,
model,
test_set,
data_mean,
data_std,
dim_to_ignore,
one_hot,
to_euler=True):
"""
Get the ground truths for srnn's sequences, and convert to Euler angles.
(the error is always computed in Euler angles).
Args
actions: a list of actions to get ground truths for.
model: training model we are using (we only use the "get_batch" method).
test_set: dictionary with normalized training data.
data_mean: d-long vector with the mean of the training data.
data_std: d-long vector with the standard deviation of the training data.
dim_to_ignore: dimensions that we are not using to train/predict.
one_hot: whether the data comes with one-hot encoding indicating action.
to_euler: whether to convert the angles to Euler format or keep thm in exponential map
Returns
srnn_gts_euler: a dictionary where the keys are actions, and the values
are the ground_truth, denormalized expected outputs of srnns's seeds.
"""
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
if to_euler:
for j in np.arange( denormed.shape[0] ):
for k in np.arange(3,97,3):
denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed );
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def normal_init_(layer, mean_, sd_, bias, norm_bias=True):
"""Intialization of layers with normal distribution with mean and bias"""
classname = layer.__class__.__name__
# Only use the convolutional layers of the module
#if (classname.find('Conv') != -1 ) or (classname.find('Linear')!=-1):
if classname.find('Linear') != -1:
print('[INFO] (normal_init) Initializing layer {}'.format(classname))
layer.weight.data.normal_(mean_, sd_)
if norm_bias:
layer.bias.data.normal_(bias, 0.05)
else:
layer.bias.data.fill_(bias)
def weight_init(
module,
mean_=0,
sd_=0.004,
bias=0.0,
norm_bias=False,
init_fn_=normal_init_):
"""Initialization of layers with normal distribution"""
moduleclass = module.__class__.__name__
try:
for layer in module:
if layer.__class__.__name__ == 'Sequential':
for l in layer:
init_fn_(l, mean_, sd_, bias, norm_bias)
else:
init_fn_(layer, mean_, sd_, bias, norm_bias)
except TypeError:
init_fn_(module, mean_, sd_, bias, norm_bias)
def xavier_init_(layer, mean_, sd_, bias, norm_bias=True):
classname = layer.__class__.__name__
if classname.find('Linear')!=-1:
print('[INFO] (xavier_init) Initializing layer {}'.format(classname))
nn.init.xavier_uniform_(layer.weight.data)
# nninit.xavier_normal(layer.bias.data)
if norm_bias:
layer.bias.data.normal_(0, 0.05)
else:
layer.bias.data.zero_()
def create_dir_tree(base_dir):
dir_tree = ['models', 'tf_logs', 'config', 'std_log']
for dir_ in dir_tree:
os.makedirs(os.path.join(base_dir, dir_), exist_ok=True)
def create_look_ahead_mask(seq_length, is_nonautoregressive=False):
"""Generates a binary mask to prevent to use future context in a sequence."""
if is_nonautoregressive:
return np.zeros((seq_length, seq_length), dtype=np.float32)
x = np.ones((seq_length, seq_length), dtype=np.float32)
mask = np.triu(x, 1).astype(np.float32)
return mask # (seq_len, seq_len)
def pose_expmap2rotmat(input_pose):
"""Convert exponential map pose format to rotation matrix pose format."""
pose_rotmat = []
for j in np.arange(input_pose.shape[0]):
rot_mat = [expmap2rotmat(input_pose[j, k:k+3]) for k in range(3, 97, 3)]
pose_rotmat.append(np.stack(rot_mat).flatten())
pose_rotmat = np.stack(pose_rotmat)
return pose_rotmat
def expmap23d_sequence(sequence, norm_stats, params):
viz_poses = revert_output_format(
[sequence], norm_stats['mean'], norm_stats['std'],
norm_stats['dim_to_ignore'], params['action_subset'],
params['use_one_hot'])
nframes = sequence.shape[0]
expmap = revert_coordinate_space(
viz_poses[0], np.eye(3), np.zeros(3))
xyz_data = np.zeros((nframes, 96))
for i in range(nframes):
xyz_data[i, :] = compute_forward_kinematics(
expmap[i, :],
params['parent'],
params['offset'],
params['rot_ind'],
params['exp_map_ind']
)
return xyz_data
def get_lr_fn(params, optimizer_fn):
"""Creates the function to be used to generate the learning rate."""
if params['learning_rate_fn'] == 'step':
return torch.optim.lr_scheduler.StepLR(
optimizer_fn, step_size=params['lr_step_size'], gamma=0.1
)
elif params['learning_rate_fn'] == 'exponential':
return torch.optim.lr_scheduler.ExponentialLR(
optimizer_fn, gamma=0.95
)
elif params['learning_rate_fn'] == 'linear':
# sets learning rate by multipliying initial learning rate times a function
lr0, T = params['learning_rate'], params['max_epochs']
lrT = lr0*0.1
m = (lrT - 1) / T
lambda_fn = lambda epoch: m*epoch + 1.0
return torch.optim.lr_scheduler.LambdaLR(
optimizer_fn, lr_lambda=lambda_fn
)
elif params['learning_rate_fn'] == 'beatles':
# D^(-0.5)*min(i^(-0.5), i*warmup_steps^(-1.5))
D = float(params['model_dim'])
warmup = params['warmup_epochs']
lambda_fn = lambda e: (D**(-0.5))*min((e+1.0)**(-0.5), (e+1.0)*warmup**(-1.5))
return torch.optim.lr_scheduler.LambdaLR(
optimizer_fn, lr_lambda=lambda_fn
)
else:
raise ValueError('Unknown learning rate function: {}'.format(
params['learning_rate_fn']))
def compute_mean_average_precision(prediction, target, threshold, per_frame=False):
"""
Args:
prediction: unormalized sequece of shape [seq_len, num_joints, 3]
target: unormalized sequence of shape [seq_len, num_joints, 3]
threshold: float
"""
# compute the norm for the last axis: (x,y,z) coordinates
# [num_frames x num_joints]
TP = np.linalg.norm(prediction-target, axis=-1) <= threshold
TP_ = TP.astype(int)
FN_ = np.logical_not(TP).astype(int)
# [num_joints]
TP = np.sum(TP_, axis=0)
FN = np.sum(FN_, axis=0)
# compute recall for each joint
recall = TP / (TP+FN)
# average over joints
mAP = np.mean(recall)
if per_frame:
return mAP, TP, FN, (TP_, FN_)
return mAP, TP, FN
| 19,278 | 30.708882 | 161 |
py
|
GaitForeMer
|
GaitForeMer-main/utils/visualize_attention_weights.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Visualization of the attention weights."""
import torch
import torch.nn as nn
import numpy as np
import argparse
import sys
import os
import tqdm
import json
from sklearn.metrics import confusion_matrix
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import data.H36MDataset_v2 as H36M_v2
import models.PoseTransformer as PoseTransformer
import models.PoseEncoderDecoder as PoseEncoderDecoder
import matplotlib.pyplot as plt
import matplotlib
def plot_conf_mat(matrix, action_labels):
fig, ax = plt.subplots()
im = ax.imshow(matrix, cmap='Wistia')
ax.set_xticks(np.arange(len(action_labels)))
ax.set_yticks(np.arange(len(action_labels)))
ax.set_xticklabels(action_labels, fontdict={'fontsize':10}, rotation=90)
ax.set_yticklabels(action_labels, fontdict={'fontsize':10})
# cbar_kw={}
# if set_colorbar:
# cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
# cbar.ax.set_ylabel("", rotation=-90, va="bottom")
# nmax= np.max(matrix)/2.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for i in range(len(action_labels)):
for j in range(len(action_labels)):
# color= "w" if round(matrix[i, j],2) < nmax else "black"
text = ax.text(j, i, round(matrix[i, j], 2),
ha="center", va="center", color="black", fontsize=5)
plt.ylabel("")
plt.xlabel("")
# ax.set_title("Small plot")
fig.tight_layout()
plt.show()
#plt.savefig(name)
plt.close()
the_keys = [(5, 'directions', 1), (5, 'directions', 2), (5, 'discussion', 1), (5, 'discussion', 2), (5, 'eating', 1), (5, 'eating', 2), (5, 'greeting', 1), (5, 'greeting', 2), (5, 'phoning', 1), (5, 'phoning', 2), (5, 'posing', 1), (5, 'posing', 2), (5, 'purchases', 1), (5, 'purchases', 2), (5, 'sitting', 1), (5, 'sitting', 2), (5, 'sittingdown', 1), (5, 'sittingdown', 2), (5, 'smoking', 1), (5, 'smoking', 2), (5, 'takingphoto', 1), (5, 'takingphoto', 2), (5, 'waiting', 1), (5, 'waiting', 2), (5, 'walking', 1), (5, 'walking', 2), (5, 'walkingdog', 1), (5, 'walkingdog', 2), (5, 'walkingtogether', 1), (5, 'walkingtogether', 2)
]
def get_windows(
data,
source_seq_len,
target_seq_len,
pad_decoder_inputs,
input_size, n_windows):
N, _ = data.shape
src_seq_len = source_seq_len - 1
encoder_inputs_ = []
decoder_inputs_ = []
decoder_outputs_ = []
start_frame = 0
for n in range(n_windows):
encoder_inputs = np.zeros((src_seq_len, input_size), dtype=np.float32)
decoder_inputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
decoder_outputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
# total_frames x n_joints*joint_dim
total_frames = source_seq_len + target_seq_len
data_sel = data[start_frame:(start_frame+total_frames), :]
encoder_inputs[:, 0:input_size] = data_sel[0:src_seq_len,:]
decoder_inputs[:, 0:input_size] = data_sel[src_seq_len:src_seq_len+target_seq_len, :]
decoder_outputs[:, 0:input_size] = data_sel[source_seq_len:, 0:input_size]
if pad_decoder_inputs:
query = decoder_inputs[0:1, :]
decoder_inputs = np.repeat(query, target_seq_len, axis=0)
encoder_inputs_.append(encoder_inputs)
decoder_inputs_.append(decoder_inputs)
decoder_outputs_.append(decoder_outputs)
start_frame = start_frame + src_seq_len
return (
torch.from_numpy(np.stack(encoder_inputs_)),
torch.from_numpy(np.stack(decoder_inputs_)),
torch.from_numpy(np.stack(decoder_outputs_))
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--params_json', type=str, default=None)
parser.add_argument('--model', type=str, default= None)
args = parser.parse_args()
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
params = json.load(open(args.params_json))
train_dataset_fn, eval_dataset_fn = H36M_v2.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
potr = PoseTransformer.model_factory(
params, pose_encoder_fn, pose_decoder_fn)
potr.load_state_dict(torch.load(args.model, map_location=_DEVICE))
potr.to(_DEVICE)
potr.eval()
all_pred, all_gt = [], []
n_windows = 8
the_keys_ = [the_keys[i] for i in range(1, len(the_keys), 2)]
with torch.no_grad():
for i in range(len(the_keys_)):
entry_key = the_keys_[i] # (5, 'walking', 1)
data = eval_dataset_fn.dataset._data[entry_key]
encoder_inputs, decoder_inputs, decoder_outputs = get_windows(
data,
params['source_seq_len'],
params['target_seq_len'],
params['pad_decoder_inputs'],
params['input_dim'],
n_windows
)
pred_sequence, attn_weights, enc_weights= potr(
encoder_inputs.to(_DEVICE),
decoder_inputs.to(_DEVICE),
get_attn_weights=True
)
enc_weights = enc_weights.cpu().numpy()
attn_weights = attn_weights[-1].cpu().numpy()
attn_weights = [attn_weights[j] for j in range(n_windows)]
mat = np.concatenate(attn_weights, axis=-1)
mat = np.concatenate([enc_weights[j] for j in range(n_windows)], axis=-1)
print(enc_weights.shape)
fig, ax = plt.subplots(figsize=(20,10))
ax.matshow(mat)
plt.ylabel("")
plt.xlabel("")
fig.tight_layout()
#plt.show()
name = 'vis_attn/%s_.png'%(entry_key[1])
plt.savefig(name)
plt.close()
| 6,648 | 33.630208 | 632 |
py
|
GaitForeMer
|
GaitForeMer-main/utils/PositionEncodings.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of the 2D positional encodings used in [1].
Position encodings gives a signature to each pixel in the image by a set
of sine frequecies computed with a 2D sine function.
[1] https://arxiv.org/abs/2005.12872
[2] https://arxiv.org/pdf/1706.03762.pdf
"""
import numpy as np
import math
import torch
from torch import nn
class PositionEncodings2D(object):
"""Implementation of 2d masked position encodings as a NN layer.
This is a more general version of the position embedding, very similar
to the one used by the Attention is all you need paper, but generalized
to work on images as used in [1].
"""
def __init__(
self,
num_pos_feats=64,
temperature=10000,
normalize=False,
scale=None):
"""Constructs position embeding layer.
Args:
num_pos_feats: An integer for the depth of the encoding signature per
pixel for each axis `x` and `y`.
temperature: Value of the exponential temperature.
normalize: Bool indicating if the encodings shuld be normalized by number
of pixels in each image row.
scale: Use for scaling factor. Normally None is used for 2*pi scaling.
"""
super().__init__()
self._num_pos_feats = num_pos_feats
self._temperature = temperature
self._normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self._scale = scale
def __call__(self, mask):
"""Generates the positional encoding given image boolean mask.
Args:
mask: Boolean tensor of shape [batch_size, width, height] with ones
in pixels that belong to the padding and zero in valid pixels.
Returns:
Sine position encodings. Shape [batch_size, num_pos_feats*2, width, height]
"""
# the positional encodings are generated for valid pixels hence
# we need to take the negation of the boolean mask
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self._normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self._scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self._scale
dim_t = torch.arange(
self._num_pos_feats, dtype=torch.float32)
dim_t = self._temperature ** (2 * (dim_t // 2) / self._num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEncodings1D(object):
"""Positional encodings for `1D` sequences.
Implements the following equations:
PE_{(pos, 2i)} = sin(pos/10000^{2i/d_model})
PE_{(pos, 2i+1)} = cos(pos/10000^{2i/d_model})
Where d_model is the number of positional features. Also known as the
depth of the positional encodings. These are the positional encodings
proposed in [2].
"""
def __init__(self, num_pos_feats=512, temperature=10000, alpha=1):
self._num_pos_feats = num_pos_feats
self._temperature = temperature
self._alpha = alpha
def __call__(self, seq_length):
angle_rads = self.get_angles(
np.arange(seq_length)[:, np.newaxis],
np.arange(self._num_pos_feats)[np.newaxis, :]
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
pos_encoding = pos_encoding.astype(np.float32)
return torch.from_numpy(pos_encoding)
def get_angles(self, pos, i):
angle_rates = 1 / np.power(
self._temperature, (2 * (i//2)) / np.float32(self._num_pos_feats))
return self._alpha*pos * angle_rates
def visualize_2d_encodings():
import cv2
import numpy as np
import matplotlib.pyplot as pplt
# Create a mask where pixels are all valid
mask = torch.BoolTensor(1, 32, 32).fill_(False)
# position encodigns with a signature of depth per pixel
# the efective pixel signature is num_pos_feats*2 (128 for each axis)
pos_encodings_gen = PositionEncodings2D(num_pos_feats=128, normalize=True)
encodings = pos_encodings_gen(mask).numpy()
print('Shape of encodings', encodings.shape)
# visualize the first frequency channel for x and y
y_encodings = encodings[0,0, :, :]
x_encodings = encodings[0,128, : ,:]
pplt.matshow(x_encodings, cmap=pplt.get_cmap('jet'))
pplt.matshow(y_encodings, cmap=pplt.get_cmap('jet'))
pplt.show()
def visualize_1d_encodings():
import matplotlib.pyplot as plt
pos_encoder_gen = PositionEncodings1D()
pos_encoding = pos_encoder_gen(50).numpy()
print(pos_encoding.shape)
plt.pcolormesh(pos_encoding[0], cmap='RdBu')
plt.xlabel('Depth')
plt.xlim((0, 512))
plt.ylabel('position in sequence')
plt.colorbar()
plt.show()
if __name__ == "__main__":
visualize_2d_encodings()
# visualize_1d_encodings()
| 6,337 | 32.712766 | 81 |
py
|
GaitForeMer
|
GaitForeMer-main/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
GaitForeMer
|
GaitForeMer-main/data/GaitJointsDataset.py
|
import os
import sys
import numpy as np
import torch
import argparse
import tqdm
import pickle
import random
_TOTAL_ACTIONS = 4
# Mapping from 1-base of NTU to vibe 49 joints
# hip, thorax,
_MAJOR_JOINTS = [39, 41, 37, 43, 34, 35, 36, 33, 32, 31, 28, 29, 30, 27, 26, 25, 40]
# 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 21
_NMAJOR_JOINTS = len(_MAJOR_JOINTS)
_MIN_STD = 1e-4
_SPINE_ROOT = 0 # after only taking major joints (ie index in _MAJOR_JOINTS)
def collate_fn(batch):
"""Collate function for data loaders."""
e_inp = torch.from_numpy(np.stack([e['encoder_inputs'] for e in batch]))
d_inp = torch.from_numpy(np.stack([e['decoder_inputs'] for e in batch]))
d_out = torch.from_numpy(np.stack([e['decoder_outputs'] for e in batch]))
action_id = torch.from_numpy(np.stack([e['action_id'] for e in batch]))
action = [e['action_str'] for e in batch]
batch_ = {
'encoder_inputs': e_inp,
'decoder_inputs': d_inp,
'decoder_outputs': d_out,
'action_str': action,
'action_ids': action_id
}
return batch_
class GaitJointsDataset(torch.utils.data.Dataset):
def __init__(self, params=None, mode='train', fold=1):
super(GaitJointsDataset, self).__init__()
self._params = params
self._mode = mode
thisname = self.__class__.__name__
self._monitor_action = 'normal'
self._action_str = ['normal', 'slight', 'moderate', 'severe']
self.data_dir = self._params['data_path']
self.fold = fold
self.load_data()
def load_data(self):
train_data = pickle.load(open(self.data_dir+"EPG_train_" + str(self.fold) + ".pkl", "rb"))
test_data = pickle.load(open(self.data_dir+"EPG_test_" + str(self.fold) + ".pkl", "rb"))
if self._mode == 'train':
X_1, Y = self.data_generator(train_data, mode='train', fold_number=self.fold)
else:
X_1, Y = self.data_generator(test_data)
self.X_1 = X_1
self.Y = Y
self._action_str = ['none', 'mild', 'moderate', 'severe']
self._pose_dim = 3 * _NMAJOR_JOINTS
self._data_dim = self._pose_dim
def data_generator(self, T, mode='test', fold_number=1):
X_1 = []
Y = []
# bootstrap_number = 3
# num_samples = 39
total_num_clips = 0
for i in range(len(T['pose'])):
total_num_clips += 1
p = np.copy(T['pose'][i])
# print(np.shape(p))
y_label_index = T['label'][i]
label = y_label_index
X_1.append(p)
Y.append(label)
# can't stack X_1 because not all have equal frames
Y = np.stack(Y)
# For using a subset of the dataset (few-shot)
# if mode == 'train':
# sampling_dir = 'PATH/TO/BOOTSTRAP_SAMPLING_DIR'
# all_clip_video_names = pickle.load(open(sampling_dir + "all_clip_video_names.pkl", "rb"))
# clip_video_names = all_clip_video_names[fold_number - 1]
# all_bootstrap_samples = pickle.load(open(sampling_dir + f'{num_samples}_samples/bootstrap_{bootstrap_number}_samples.pkl', 'rb'))
# bootstrap_samples = all_bootstrap_samples[fold_number - 1]
# mask_list = [1 if video_name in bootstrap_samples else 0 for video_name in clip_video_names]
# train_indices = [train_idx for train_idx, mask_value in enumerate(mask_list) if mask_value == 1]
# X_1 = [X_1[train_idx] for train_idx in train_indices]
# Y = Y[train_indices]
return X_1, Y
def __len__(self):
return len(self.Y)
def __getitem__(self, idx):
return self._get_item_train(idx)
def _get_item_train(self, idx):
"""Get item for the training mode."""
x = self.X_1[idx]
y = self.Y[idx]
# adjust for mapping/subset of joints from vibe to ntu
x = x[:,_MAJOR_JOINTS,:]
action_id = y
source_seq_len = self._params['source_seq_len']
target_seq_len = self._params['target_seq_len']
input_size = 3 * _NMAJOR_JOINTS # not sure if this is right
pose_size = 3 * _NMAJOR_JOINTS # note sure if thiis is right
total_frames = source_seq_len + target_seq_len
src_seq_len = source_seq_len - 1
encoder_inputs = np.zeros((src_seq_len, input_size), dtype=np.float32)
decoder_inputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
decoder_outputs = np.zeros((target_seq_len, pose_size), dtype=np.float32)
# total_framesxn_joints*joint_dim
N = np.shape(x)[0]
x = x.reshape(N, -1)
start_frame = np.random.randint(0, N - total_frames + 1)
# original code did not change start frame between epochs
start_frame = random.randint(0, N - total_frames) # high inclusive
data_sel = x[start_frame:(start_frame + total_frames), :]
encoder_inputs[:, 0:input_size] = data_sel[0:src_seq_len,:]
decoder_inputs[:, 0:input_size] = \
data_sel[src_seq_len:src_seq_len+target_seq_len, :]
decoder_outputs[:, 0:pose_size] = data_sel[source_seq_len:, 0:pose_size]
if self._params['pad_decoder_inputs']:
query = decoder_inputs[0:1, :]
decoder_inputs = np.repeat(query, target_seq_len, axis=0)
return {
'encoder_inputs': encoder_inputs,
'decoder_inputs': decoder_inputs,
'decoder_outputs': decoder_outputs,
'action_id': action_id,
'action_str': self._action_str[action_id],
}
def dataset_factory(params, fold):
"""Defines the datasets that will be used for training and validation."""
params['num_activities'] = _TOTAL_ACTIONS
params['virtual_dataset_size'] = params['steps_per_epoch']*params['batch_size']
params['n_joints'] = _NMAJOR_JOINTS
eval_mode = 'test' if 'test_phase' in params.keys() else 'eval'
if eval_mode == 'test':
train_dataset_fn = None
else:
train_dataset = GaitJointsDataset(params, mode='train', fold=fold)
train_dataset_fn = torch.utils.data.DataLoader(
train_dataset,
batch_size=params['batch_size'],
shuffle=True,
num_workers=4,
collate_fn=collate_fn,
)
eval_dataset = GaitJointsDataset(
params,
mode=eval_mode,
fold=fold,
)
eval_dataset_fn = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=collate_fn,
)
return train_dataset_fn, eval_dataset_fn
| 6,232 | 31.128866 | 137 |
py
|
GaitForeMer
|
GaitForeMer-main/data/NTURGDDataset.py
|
###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <[email protected]>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Pytorch dataset of skeletons for the NTU-RGB+D [1] dataset.
[1] http://rose1.ntu.edu.sg/Datasets/actionRecognition.asp
[2] https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shahroudy_NTU_RGBD_A_CVPR_2016_paper.pdf
"""
import os
import sys
import numpy as np
import torch
import argparse
import tqdm
# tran subjects id can be found in [2]
_TRAIN_SUBJECTS = [
1, 2, 4, 5, 8, 9, 13, 14, 15,16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38
]
_TEST_SUBJECTS = [x for x in range(1, 40) if x not in _TRAIN_SUBJECTS]
# the joints according to [2] in 1-base
# 1-base of the spine 2-middle of the spine 3-neck 4-head 5-left shoulder
# 6-left elbow 7-left wrist 8-left hand 9-right shoulder 10-right elbow
# 11-right wrist 12-right hand 13-left hip 14-left knee 15-left ankle
# 16-left foot 17-right hip 18-right knee 19-right ankle 20-right foot
# 21-spine 22-tip of the left hand 23-left thumb 24-tip of the right
# hand 25-right thumb
# here set the joint indices in base 0
_MAJOR_JOINTS = [x-1 for x in
[1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 21]
]
_NMAJOR_JOINTS = len(_MAJOR_JOINTS)
_SPINE_ROOT = 0
_MIN_STD = 1e-4
# NTURGB+D contains 60 actions
_TOTAL_ACTIONS = 60
_MIN_REQ_FRAMES = 65
def collate_fn(batch):
"""Collate function for data loaders."""
e_inp = torch.from_numpy(np.stack([e['encoder_inputs'] for e in batch]))
d_inp = torch.from_numpy(np.stack([e['decoder_inputs'] for e in batch]))
d_out = torch.from_numpy(np.stack([e['decoder_outputs'] for e in batch]))
action_id = torch.from_numpy(np.stack([e['action_id'] for e in batch]))
action = [e['action_str'] for e in batch]
batch_ = {
'encoder_inputs': e_inp,
'decoder_inputs': d_inp,
'decoder_outputs': d_out,
'action_str': action,
'action_ids': action_id
}
return batch_
def load_action_labels(data_path):
data_labels = []
with open(os.path.join(data_path, 'action_labels.txt')) as file_:
for line in file_:
data_labels.append(line.strip())
return data_labels
def get_activity_from_file(path_file):
# The pattern is SsssCcccPpppRrrrAaaa.skeleton
pattern = path_file.split('/')[-1].split('.')[0]
setup_id= int(pattern[1:4])
camera_id = int(pattern[5:8])
subject_id = int(pattern[9:12])
replication_id = int(pattern[13:16])
activity_id = int(pattern[17:])
return (setup_id, camera_id, subject_id, replication_id, activity_id)
def select_fold_files(path_to_data, skip_files_path):
all_files = [x for x in os.listdir(path_to_data) if x.endswith('skeleton')]
with open(skip_files_path) as file_:
skip_files = [line.strip() for line in file_]
training_files = []
test_files = []
for path_file in all_files:
if path_file.split('.')[0] in skip_files:
print('Skiping file:', path_file)
continue
seq_info = get_activity_from_file(path_file)
if seq_info[2] in _TRAIN_SUBJECTS:
training_files.append(path_file)
else:
test_files.append(path_file)
return training_files, test_files
def save_fold_files(path_to_data, output_path, skip_files_path):
training_files, test_files = select_fold_files(path_to_data, skip_files_path)
val_idx = np.random.choice(
len(training_files), int(len(training_files)*0.05), replace=False)
training_files = [training_files[i]
for i in range(len(training_files)) if i not in val_idx]
val_files = [training_files[i]
for i in range(len(training_files)) if i in val_idx]
with open(os.path.join(output_path, 'training_files.txt'), 'w') as file_:
for f in training_files:
print(f, file=file_)
with open(os.path.join(output_path, 'testing_files.txt'), 'w') as file_:
for f in test_files:
print(f, file=file_)
with open(os.path.join(output_path, 'validation_files.txt'), 'w') as file_:
for f in val_files:
print(f, file=file_)
def read_sequence_kinect_skeletons(path_file):
"""Reads the text file provided in the
"""
fid = open(path_file, 'r')
seq_info = get_activity_from_file(path_file)
# first line is the number of frames
framecount = int(fid.readline().strip())
bodies = {}
for i in range(framecount):
bodycount = int(fid.readline().strip())
for b in range(bodycount):
# traccking ID of the skeleton
line = fid.readline().strip().split(' ')
body_id = int(line[0])
arrayint = [int(x) for x in line[1:7]]
lean = [float(x) for x in line[7:9]]
tracking_state = int(line[-1])
#number of joints
joint_count = int(fid.readline().strip())
joints = []
for j in range(joint_count):
line = fid.readline().strip().split(' ')
# 3D location of the joint
joint_3d = [float(x) for x in line[0:3]]
# 2D depth location of joints
joint_2d_depth = [float(x) for x in line[3:5]]
# 2D color location of joints
joint_2d_color = [float(x) for x in line[5:7]]
# orientation of joints (?)
joint_orientation = [float(x) for x in line[7:11]]
# tracking state
joint_track_state = int(line[-1])
joints.append(joint_3d)
if body_id in list(bodies.keys()):
bodies[body_id].append(np.array(joints, dtype=np.float32))
else:
bodies[body_id] = [np.array(joints, dtype=np.float32)]
for k, v in bodies.items():
bodies[k] = np.stack(v)
return bodies, seq_info
def select_sequence_based_var(action_sequence_dict):
"""Selects the actor in sequence based on the sum of variance of X, Y, Z."""
larger_var = -1
selected_key = None
for k, v in action_sequence_dict.items():
var = np.var(v, axis=-1)
sum_var = np.sum(var)
if sum_var > larger_var:
larger_var = sum_var
selected_key = k
return action_sequence_dict[selected_key]
class NTURGDDatasetSkeleton(torch.utils.data.Dataset):
def __init__(self, params=None, mode='train'):
super(NTURGDDatasetSkeleton, self).__init__()
self._params = params
self._mode = mode
thisname = self.__class__.__name__
self._monitor_action = 'walking'
for k, v in params.items():
print('[INFO] ({}) {}: {}'.format(thisname, k, v))
data_path = self._params['data_path']
self._action_str = load_action_labels(data_path)
self._fold_file = ''
if self._mode.lower() == 'train':
self._fold_file = os.path.join(data_path, 'training_files.txt')
elif self._mode.lower() == 'eval':
self._fold_file = os.path.join(data_path, 'validation_files.txt')
elif self._mode.lower() == 'test':
self._fold_file = os.path.join(data_path, 'testing_files.txt')
else:
raise ValueError('Unknown launching mode: {}'.format(self._mode))
self.load_data()
def read_fold_file(self, fold_file):
files = []
with open(fold_file) as file_:
for line in file_:
files.append(line.strip())
return files
def compute_norm_stats(self, data):
self._norm_stats = {}
mean = np.mean(data, axis=0)
std = np.mean(data, axis=0)
std[np.where(std<_MIN_STD)] = 1
self._norm_stats['mean'] = mean.ravel()
self._norm_stats['std'] = std.ravel()
def load_compute_norm_stats(self, data):
mean_path = os.path.join(self._params['data_path'], 'mean.npy')
std_path = os.path.join(self._params['data_path'], 'std.npy')
thisname = self.__class__.__name__
self._norm_stats = {}
if os.path.exists(mean_path):
print('[INFO] ({}) Loading normalization stats!'.format(thisname))
self._norm_stats['mean'] = np.load(mean_path)
self._norm_stats['std'] = np.load(std_path)
elif self._mode == 'train':
print('[INFO] ({}) Computing normalization stats!'.format(thisname))
self.compute_norm_stats(data)
np.save(mean_path, self._norm_stats['mean'])
np.save(std_path, self._norm_stats['std'])
else:
raise ValueError('Cant compute statistics in not training mode!')
def normalize_data(self):
for k in self._data.keys():
tmp_data = self._data[k]
tmp_data = tmp_data - self._norm_stats['mean']
tmp_data = np.divide(tmp_data, self._norm_stats['std'])
self._data[k] = tmp_data
def load_data(self):
seq_files = self.read_fold_file(self._fold_file)
self._data = {}
all_dataset = []
seq_lens = []
for sequence_file in tqdm.tqdm(seq_files):
sequence_file = os.path.join(self._params['data_path'],
'nturgb+d_skeletons', sequence_file)
# the sequence key contains
# (setup_id, camera_id, subject_id, replication_id, activity_id)
# sequence shape [num_frames, 25, 3]
action_sequence, seq_key = read_sequence_kinect_skeletons(sequence_file)
# added code, there are no actors in sequence
if len(action_sequence) == 0:
continue
# FOR TESTING PURPOSES, EXIT LOADING CODE EARLY
# if len(all_dataset) > 100:
# break
action_sequence = select_sequence_based_var(action_sequence)
# sequence shape [num_frames, 16, 3]
action_sequence = action_sequence[:, _MAJOR_JOINTS, :]
# Only consider sequences with more than _MIN_REQ_FRAMES frames
if action_sequence.shape[0]<_MIN_REQ_FRAMES:
continue
# center joints in the spine of the skeleton
root_sequence = np.expand_dims(action_sequence[:, _SPINE_ROOT, :], axis=1)
action_sequence = action_sequence - root_sequence
T, N, D = action_sequence.shape
seq_lens.append(T)
# total_frames x n_joints*3
self._data[seq_key] = action_sequence.reshape((T, -1))
all_dataset.append(action_sequence)
all_dataset = np.concatenate(all_dataset, axis=0)
self.load_compute_norm_stats(all_dataset)
self.normalize_data()
self._pose_dim = self._norm_stats['std'].shape[-1]
self._data_dim = self._pose_dim
self._data_keys = list(self._data.keys())
thisname = self.__class__.__name__
print('[INFO] ({}) The min seq len for mode: {} is: {}'.format(
thisname, self._mode, min(seq_lens)))
print('[INFO] ({}) Pose dim: {} Data dim: {}'.format(
thisname, self._pose_dim, self._data_dim))
def __len__(self):
if self._mode == 'train':
return max(len(self._data_keys), self._params['virtual_dataset_size'])
return len(self._data_keys)
def __getitem__(self, idx):
return self._get_item_train(idx)
def _get_item_train(self, idx):
"""Get item for the training mode."""
if self._mode == 'train':
# idx = np.random.choice(len(self._data_keys), 1)[0]
idx = np.random.choice(len(self._data_keys))
the_key = self._data_keys[idx]
# the action id in the files come in 1 based index
action_id = the_key[-1] - 1
source_seq_len = self._params['source_seq_len']
target_seq_len = self._params['target_seq_len']
input_size = self._pose_dim
pose_size = self._pose_dim
total_frames = source_seq_len + target_seq_len
src_seq_len = source_seq_len - 1
encoder_inputs = np.zeros((src_seq_len, input_size), dtype=np.float32)
decoder_inputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
decoder_outputs = np.zeros((target_seq_len, pose_size), dtype=np.float32)
N, _ = self._data[the_key].shape
start_frame = np.random.randint(0, N-total_frames)
# total_framesxn_joints*joint_dim
data_sel = self._data[the_key][start_frame:(start_frame+total_frames), :]
encoder_inputs[:, 0:input_size] = data_sel[0:src_seq_len,:]
decoder_inputs[:, 0:input_size] = \
data_sel[src_seq_len:src_seq_len+target_seq_len, :]
decoder_outputs[:, 0:pose_size] = data_sel[source_seq_len:, 0:pose_size]
if self._params['pad_decoder_inputs']:
query = decoder_inputs[0:1, :]
decoder_inputs = np.repeat(query, target_seq_len, axis=0)
return {
'encoder_inputs': encoder_inputs,
'decoder_inputs': decoder_inputs,
'decoder_outputs': decoder_outputs,
'action_id': action_id,
'action_str': self._action_str[action_id],
}
def unormalize_sequence(self, action_sequence):
sequence = action_sequence*self._norm_stats['std']
sequence = sequence + self._norm_stats['mean']
return sequence
def dataset_factory(params):
"""Defines the datasets that will be used for training and validation."""
params['num_activities'] = _TOTAL_ACTIONS
params['virtual_dataset_size'] = params['steps_per_epoch']*params['batch_size']
params['n_joints'] = _NMAJOR_JOINTS
eval_mode = 'test' if 'test_phase' in params.keys() else 'eval'
if eval_mode == 'test':
train_dataset_fn = None
else:
train_dataset = NTURGDDatasetSkeleton(params, mode='train')
train_dataset_fn = torch.utils.data.DataLoader(
train_dataset,
batch_size=params['batch_size'],
shuffle=True,
num_workers=4,
collate_fn=collate_fn,
drop_last=True
)
eval_dataset = NTURGDDatasetSkeleton(
params,
mode=eval_mode
)
eval_dataset_fn = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=True,
num_workers=1,
drop_last=True,
collate_fn=collate_fn,
)
return train_dataset_fn, eval_dataset_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--pad_decoder_inputs', action='store_true')
parser.add_argument('--source_seq_len', type=int, default=40)
parser.add_argument('--target_seq_len', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--steps_per_epoch', type=int, default=200)
args = parser.parse_args()
params = vars(args)
print('Creating fold files')
save_fold_files(
'data/nturgb+d_data/nturgb+d_skeletons', # path_to_data
'data/nturgb+d_data', # output_path
'data/nturgb+d_data/missing_skeletons.txt' # skip_files_path
)
train_dataset_load, val_dataset_load = dataset_factory(params)
for n, sample in enumerate(val_dataset_load):
print(n,
sample['encoder_inputs'].size(),
sample['decoder_inputs'].size(),
sample['decoder_outputs'].size(),
sample['action_ids'].size())
| 15,284 | 32.084416 | 110 |
py
|
CPFN
|
CPFN-master/training_PatchSelection.py
|
# Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
# Importing the Dataset file
from Dataset import dataloaders
# Importing the Network file
from PointNet2 import pn2_network
# Importing the Utils files
from Utils import config_loader, training_utils, training_visualisation
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_patchSelec.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Dataset', default=os.path.expanduser('data/TraceParts_v2_LowRes/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--scale', help='Scale of the Primitives', type=float, default=0.05)
parser.add_argument('--patchselec_weigths', help='Filename of the model weights to load', default='')
args = parser.parse_args()
# Loading the config file
conf = config_loader.Patch_SelecConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Training Parameters
nb_epochs = conf.get_n_epochs()
init_learning_rate = conf.get_init_learning_rate()
val_interval = conf.get_val_interval()
snapshot_interval = conf.get_snapshot_interval()
# Training Dataset
csv_path_train = os.path.join('Dataset', conf.get_train_data_file())
noisy_train = conf.get_train_data_first_n()
first_n_train = conf.is_train_data_noisy()
num_workers_train = conf.get_nb_train_workers()
if not os.path.isdir(conf.get_weights_folder()):
os.mkdir(conf.get_weights_folder())
# Validation Dataset
csv_path_val = os.path.join('Dataset', conf.get_val_data_file())
noisy_val = conf.get_val_data_first_n()
first_n_val = conf.is_val_data_noisy()
num_workers_val = conf.get_nb_val_workers()
# Launching the Network
patchselec_weights_filename = 'patchselec_%s_module'%str(round(args.scale, 2))
patchselec_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[2]).to(device)
if os.path.isfile(os.path.join(conf.get_weights_folder(), args.patchselec_weigths)):
dict = torch.load(os.path.join(conf.get_weights_folder(), args.patchselec_weigths))
patchselec_module.load_state_dict(dict, strict=True)
train_dataset = dataloaders.Dataset_PatchSelection(csv_path_train, args.lowres_dataset, args.highres_dataset, args.scale, n_points=8192, normalisation=True)
train_datasampler = dataloaders.RandomSampler(data_source=train_dataset, seed=12345, identical_epochs=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_train_workers(), pin_memory=True)
val_dataset = dataloaders.Dataset_PatchSelection(csv_path_val, args.lowres_dataset, args.highres_dataset, args.scale, n_points=8192, normalisation=True)
val_datasampler = dataloaders.RandomSampler(data_source=val_dataset, seed=12345, identical_epochs=False)
val_dataloader = torch.utils.data.DataLoader(val_dataset, sampler=val_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_val_workers(), pin_memory=True)
# Optimizer
optimizer = torch.optim.Adam(patchselec_module.parameters(), lr=init_learning_rate)
# Visualisation
visualiser = training_visualisation.Visualiser(conf.get_visualisation_interval())
# Initialisation
global_step = 0
best_loss = np.inf
for epoch in range(nb_epochs):
global_step, _ = training_utils.patch_selection_train_val_epoch(train_dataloader, patchselec_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train')
if (epoch % conf.get_val_interval() == 0) and (epoch > 0):
with torch.no_grad():
_, loss = training_utils.patch_selection_train_val_epoch(val_dataloader, patchselec_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='val')
if loss < best_loss:
torch.save(patchselec_module.state_dict(), os.path.join(conf.get_weights_folder(), patchselec_weights_filename + '.pth'))
best_loss = loss
if (epoch % conf.get_snapshot_interval() == 0) and (epoch > 0):
torch.save(patchselec_module.state_dict(), os.path.join(conf.get_weights_folder(), patchselec_weights_filename + '%d.pth'%epoch))
torch.save(patchselec_module.state_dict(), os.path.join(conf.get_weights_folder(), patchselec_weights_filename + '%d.pth' % epoch))
| 4,812 | 54.321839 | 193 |
py
|
CPFN
|
CPFN-master/evaluation_globalSPFN.py
|
# Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, metric_implementation, losses_implementation
from PointNet2 import pn2_network
# Importing Utils files
from Utils import config_loader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_globalSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Input Dataset', default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Input Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--path_patches', help='Path to Sampled Patches h5 files', default=os.path.expanduser('data/TraceParts_v2_patches/'))
parser.add_argument('--scale', help='Scale to select the smallest primitive', default=0.05, type=float)
parser.add_argument('--output_folder', help='Directory of the output folder', default=os.path.expanduser('data/TraceParts_v2_globalspfn/'))
parser.add_argument('--evaluation_set', help='Whether to evaluate on the train or test set', default='test')
args = parser.parse_args()
path_patches = os.path.join(args.path_patches, str(round(args.scale,2)))
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
# Loading the config file
conf = config_loader.SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
# Test Dataset
if args.evaluation_set == 'train':
csv_path_test = os.path.join('Dataset', conf.get_train_data_file())
else:
csv_path_test = os.path.join('Dataset', conf.get_test_data_file())
noisy_test = conf.is_test_data_noisy()
first_n_test = conf.get_test_data_first_n()
# Launching the Network
spfn_module_filename = 'globalspfn_module.pth'
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_global_instances]).to(device)
dict = torch.load(os.path.join(conf.get_weights_folder(), spfn_module_filename))
spfn_module.load_state_dict(dict, strict=True)
spfn_module.eval()
test_dataset = dataloaders.Dataset_GlobalSPFN(n_max_global_instances, csv_path_test, args.lowres_dataset, args.highres_dataset, path_patches, noisy_test, test=True, n_points=None, first_n=first_n_test, fixed_order=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
dataframe_results = pd.DataFrame(columns=['Filename', 'mIoU', 'Type', 'Normal', 'Axis', 'MeanRes', 'StdRes', 'SkCoverage0.01', 'SkCoverage0.02', 'PCoverage0.01', 'PCoverage0.02'])
list_mIoU = []
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
P = data[0].type(torch.FloatTensor).to(device)
X_gt = data[1].type(torch.FloatTensor).to(device)
points_per_instance = data[2].type(torch.FloatTensor).to(device)
I_gt = data[3].type(torch.LongTensor).to(device)
T_gt = data[4].type(torch.LongTensor).to(device)
plane_n_gt = data[5].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[6].type(torch.FloatTensor).to(device)
cone_axis_gt = data[7].type(torch.FloatTensor).to(device)
patch_centers = data[8].type(torch.LongTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt, 'cylinder_axis': cylinder_axis_gt, 'cone_axis': cone_axis_gt}
glob_features = None
loc_features = None
if not os.path.isdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''))):
os.mkdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5','')))
with torch.no_grad():
X, T, W, global_feat, local_feat = spfn_module(P, glob_features=glob_features, loc_features=loc_features)
if args.evaluation_set == 'test':
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''), 'local_feat_full.npy'), local_feat[0].cpu().numpy())
local_feat = local_feat[:,:,patch_centers[0]]
X = X / torch.norm(X, dim=2, keepdim=True)
W = torch.softmax(W, dim=2)
with torch.no_grad():
W = metric_implementation.hard_W_encoding(W)
matching_indices, mask = metric_implementation.hungarian_matching(W, I_gt)
mask = mask.float()
mIoU = metric_implementation.compute_segmentation_iou(W, I_gt, matching_indices, mask)
if not os.path.isdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''))):
os.mkdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', '')))
if args.evaluation_set == 'test':
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''), 'object_seg.npy'), W[0].cpu().numpy())
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''), 'object_normals.npy'), X[0].cpu().numpy())
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''), 'object_type.npy'), T[0].cpu().numpy())
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T = metric_implementation.compute_all_metrics(P, X, X_gt, W, I_gt, T, T_gt, points_per_instance, gt_parameters, list_epsilon=[0.01, 0.02], classes=conf.get_list_of_primitives())
list_mIoU.append(mIoU.item())
if batch_id%100==0: print('mIoU: ', np.mean(list_mIoU))
dataframe_results.loc[batch_id] = [test_dataset.hdf5_file_list[batch_id].replace('.h5',''), mIoU.item(), type_accuracy.item(), normal_difference.item(), axis_difference.item(), mean_residual.item(), std_residual.item(), Sk_coverage[0].item(), Sk_coverage[1].item(), P_coverage[0].item(), P_coverage[1].item()]
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''), 'global_feat.npy'), global_feat[0,:,0].cpu().numpy())
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''), 'local_feat.npy'), local_feat[0].cpu().numpy())
dataframe_results.to_csv(os.path.join(args.output_folder, 'Results.csv'))
| 7,235 | 63.607143 | 321 |
py
|
CPFN
|
CPFN-master/evaluation_baselineSPFN.py
|
# Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, metric_implementation, losses_implementation
from PointNet2 import pn2_network
# Importing Utils files
from Utils import config_loader, merging_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_localSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Input Dataset', default=os.path.expanduser('data/TraceParts_v2_LowRes/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Input Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--dir_spfn', help='Directory of the global SPFN output', default=os.path.expanduser('data/GlobalSPFN_Results/'))
parser.add_argument('--dir_indices', help='Directory of the indices', default=os.path.expanduser('data/Heatmap/'))
parser.add_argument('--output_folder', help='Directory of the output folder', default=os.path.expanduser('data/LocalSPFN_Results/'))
parser.add_argument('--scale', help='Scale of the primitives', default=0.05)
args = parser.parse_args()
dir_indices = os.path.join(args.dir_indices, str(round(args.scale,2)))
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
# Loading the config file
conf = config_loader.Local_SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
n_max_local_instances = conf.get_n_max_local_instances()
# Test Dataset
csv_path_test = os.path.join(args.lowres_dataset, conf.get_test_data_file())
noisy_test = conf.get_test_data_first_n()
first_n_test = conf.is_test_data_noisy()
test_dataset = dataloaders.Dataset_TestLocalSPFN(n_max_global_instances, n_max_local_instances, csv_path_test, args.dir_spfn, args.lowres_dataset, args.highres_dataset,
dir_indices, noisy_test, first_n=first_n_test, fixed_order=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
dataframe_results = pd.DataFrame(columns=['Filename', 'mIoU', 'Type', 'Normal', 'Axis', 'MeanRes', 'StdRes', 'SkCoverage0.01', 'SkCoverage0.02', 'PCoverage0.01', 'PCoverage0.02'])
cpt_df_stats = 0
dataframe_results_stats = pd.DataFrame(columns=['Filename', 'Primtive Id', 'Mask', 'Nb Points', 'mIoU'])
list_mIoU = []
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
P = data[0].type(torch.FloatTensor).squeeze(0).to(device)
nb_patches, num_points, _ = P.size()
P_gt = data[2].type(torch.FloatTensor).squeeze(0).to(device)
I_gt = data[3].type(torch.LongTensor).squeeze(0).to(device)
T_gt = data[4].type(torch.LongTensor).squeeze(0).to(device)
patch_indices = data[5].type(torch.LongTensor).squeeze(0).to(device)
spfn_labels = data[6].type(torch.LongTensor).squeeze(0).to(device)
num_global_points = spfn_labels.size(0)
spfn_normals = data[7].type(torch.FloatTensor).squeeze(0).to(device)
spfn_type = data[8].type(torch.FloatTensor).squeeze(0).to(device)
glob_features = data[9].type(torch.FloatTensor).squeeze(0).to(device)
loc_features = data[10].type(torch.FloatTensor).squeeze(0).to(device)
P_global = data[11].type(torch.FloatTensor).squeeze(0).to(device)
X_gt_global = data[12].type(torch.FloatTensor).squeeze(0).to(device)
I_gt_global = data[13].type(torch.LongTensor).squeeze(0).to(device)
plane_n_gt = data[14].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[15].type(torch.FloatTensor).to(device)
cone_axis_gt = data[16].type(torch.FloatTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt,
'cylinder_axis': cylinder_axis_gt,
'cone_axis': cone_axis_gt}
W_fusion = torch.eye(n_max_global_instances + 1).to(spfn_labels.device)[torch.argmax(spfn_labels, dim=1) + 1]
W_fusion = W_fusion[:, 1:]
X_global = spfn_normals
T_global = spfn_type
with torch.no_grad():
W_fusion = metric_implementation.hard_W_encoding(W_fusion.unsqueeze(0))
matching_indices_fusion, mask_fusion = metric_implementation.hungarian_matching(W_fusion, I_gt_global.unsqueeze(0))
mask_fusion = mask_fusion.float()
mIoU_fusion = metric_implementation.compute_segmentation_iou(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion, mask_fusion)
mIoU_fusion_per_primitive = 1 - losses_implementation.compute_miou_loss(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion)[0]
_, unique_counts_primitives_fusion = np.unique(I_gt_global.cpu().numpy(), return_counts=True)
for j in range(len(unique_counts_primitives_fusion)):
dataframe_results_stats.loc[cpt_df_stats] = [test_dataset.hdf5_file_list[batch_id], j, mask_fusion[0, j].item(), unique_counts_primitives_fusion[j], mIoU_fusion_per_primitive[0, j].item()]
cpt_df_stats += 1
# ADDED
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T = metric_implementation.compute_all_metrics(
P_global.unsqueeze(0), X_global.unsqueeze(0), X_gt_global.unsqueeze(0), W_fusion, I_gt_global.unsqueeze(0),
T_global.unsqueeze(0), T_gt.unsqueeze(0), P_gt.unsqueeze(0), gt_parameters,
list_epsilon=[0.01, 0.02], classes=['plane', 'sphere', 'cylinder', 'cone'])
list_mIoU.append(mIoU.item())
if batch_id%100==0: print('mIoU: ', np.mean(list_mIoU))
dataframe_results.loc[batch_id] = [test_dataset.hdf5_file_list[batch_id], mIoU.item(), type_accuracy.item(),
normal_difference.item(), axis_difference.item(), mean_residual.item(),
std_residual.item(), Sk_coverage[0].item(), Sk_coverage[1].item(), P_coverage[0].item(), P_coverage[1].item()]
dataframe_results.to_csv(os.path.join(args.output_folder, 'Results_baseline.csv'), index=False)
dataframe_results_stats.to_csv(os.path.join(args.output_folder, 'Results_Stats_baseline.csv'), index=False)
| 7,131 | 60.482759 | 197 |
py
|
CPFN
|
CPFN-master/evaluation_PatchSelection.py
|
# Importation of packages
import os
import sys
import h5py
import torch
import argparse
import numpy as np
# Importing the Dataset file
from Dataset import dataloaders
# Importing the Network file
from PointNet2 import pn2_network
# Importing the Utils files
from Utils import config_loader, sampling_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_patchSelec.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Dataset', default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--heatmap_folder', help='Directory to save the heatmaps in', default=os.path.expanduser('data/TraceParts_v2_heatmaps/'))
parser.add_argument('--scale', help='Scale of the Primitives', type=float, default=0.05)
args = parser.parse_args()
heatmap_folder = os.path.join(args.heatmap_folder, str(args.scale))
os.makedirs(heatmap_folder, exist_ok=True)
# Loading the config file
conf = config_loader.Patch_SelecConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Test Dataset
csv_path_test = os.path.join('Dataset', conf.get_test_data_file())
noisy_test = conf.get_test_data_first_n()
first_n_test = conf.is_test_data_noisy()
# Launching the Network
if args.scale<1:
patchselec_module_filename = 'patchselec_%s_module'%str(round(args.scale, 2)) + '.pth'
patchselec_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[2]).to(device)
dict = torch.load(os.path.join(conf.get_weights_folder(), patchselec_module_filename))
patchselec_module.load_state_dict(dict, strict=True)
patchselec_module.eval()
test_dataset = dataloaders.Dataset_PatchSelection(csv_path_test, args.lowres_dataset, args.highres_dataset, args.scale, n_points=8192, normalisation=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
# Initialisation
if args.scale<1:
confusion_matrix = np.zeros([2, 2])
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
# Computing the prediction
points = data[0].type(torch.FloatTensor).to(device)
batch_size_current, num_points, _ = points.size()
output_labels = data[1].type(torch.LongTensor).to(device)
shuffled_indices = data[2].type(torch.LongTensor).to(device)
if args.scale<1:
predicted_labels = patchselec_module(points)[0]
predicted_labels = torch.argmax(predicted_labels, dim=2)
else:
predicted_labels = output_labels[0]
if not os.path.isdir(os.path.join(heatmap_folder, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1].replace('.h5',''))):
os.mkdir(os.path.join(heatmap_folder, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1].replace('.h5', '')))
# Computing the confusion matrix
if args.scale<1:
confusion_matrix[0, 0] += torch.sum((predicted_labels == 0) * (output_labels == 0)).item()
confusion_matrix[0, 1] += torch.sum((predicted_labels == 0) * (output_labels == 1)).item()
confusion_matrix[1, 0] += torch.sum((predicted_labels == 1) * (output_labels == 0)).item()
confusion_matrix[1, 1] += torch.sum((predicted_labels == 1) * (output_labels == 1)).item()
predicted_labels = torch.gather(predicted_labels[0], 0, shuffled_indices[0])
# Selecting the indices
with h5py.File(os.path.join(args.highres_dataset, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1]), 'r') as f:
gt_points_hr = f['gt_points'][()]
gt_labels_hr = f['gt_labels'][()]
with h5py.File(os.path.join(os.path.join(args.lowres_dataset, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1])), 'r') as f:
gt_points_lr = f['gt_points'][()]
gt_labels_lr = f['gt_labels'][()]
pool_indices = np.where(predicted_labels.detach().cpu().numpy())[0]
if len(pool_indices) > 0:
patch_indices = sampling_utils.sample(gt_points_lr, gt_points_hr, pool_indices, max_number_patches=len(pool_indices))
np.save(os.path.join(heatmap_folder, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1].replace('.h5', '_indices.npy')), patch_indices)
if args.scale<1:
confusion_matrix = confusion_matrix / np.sum(confusion_matrix)
print('Confusion Matrix', confusion_matrix)
np.save(os.path.join(heatmap_folder, 'confusion_matrix.npy'), confusion_matrix)
| 5,095 | 54.391304 | 158 |
py
|
CPFN
|
CPFN-master/evaluation_localSPFN.py
|
# Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, metric_implementation, losses_implementation
from PointNet2 import pn2_network
# Importing utils files
from Utils import config_loader, merging_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_localSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Input Dataset', default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Input Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--dir_spfn', help='Directory of the global SPFN output', default=os.path.expanduser('data/TraceParts_v2_globalspfn/'))
parser.add_argument('--dir_indices', help='Directory of the indices', default=os.path.expanduser('data/TraceParts_v2_heatmaps/'))
parser.add_argument('--output_folder', help='Directory of the output folder', default=os.path.expanduser('data/TraceParts_v2_localspfn/'))
parser.add_argument('--scale', help='Scale of the primitives', default=0.05, type=float)
args = parser.parse_args()
dir_indices = os.path.join(args.dir_indices, str(round(args.scale,2)))
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
# Loading the config file
conf = config_loader.Local_SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
n_max_local_instances = conf.get_n_max_local_instances()
# Test Dataset
csv_path_test = os.path.join('Dataset', conf.get_test_data_file())
noisy_test = conf.get_test_data_first_n()
first_n_test = conf.is_test_data_noisy()
# Launching the Network
spfn_module_filename = 'localspfn_%s_module.pth'%str(round(args.scale, 2))
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_local_instances], use_glob_features=False, use_loc_features=False).to(device)
dict = torch.load(os.path.join(conf.get_weights_folder(), spfn_module_filename))
spfn_module.load_state_dict(dict, strict=True)
spfn_module.eval()
test_dataset = dataloaders.Dataset_TestLocalSPFN(n_max_global_instances, n_max_local_instances, csv_path_test, args.dir_spfn, args.lowres_dataset, args.highres_dataset,
dir_indices, noisy_test, first_n=first_n_test, fixed_order=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
dataframe_results = pd.DataFrame(columns=['Filename', 'mIoU', 'Type', 'Normal', 'Axis', 'MeanRes', 'StdRes', 'SkCoverage0.01', 'SkCoverage0.02', 'PCoverage0.01', 'PCoverage0.02'])
cpt_df_stats = 0
dataframe_results_stats = pd.DataFrame(columns=['Filename', 'Primitive Id', 'Mask', 'Nb Points', 'mIoU'])
list_mIoU = []
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
P = data[0].type(torch.FloatTensor).squeeze(0).to(device)
nb_patches, num_points, _ = P.size()
P_gt = data[2].type(torch.FloatTensor).squeeze(0).to(device)
I_gt = data[3].type(torch.LongTensor).squeeze(0).to(device)
T_gt = data[4].type(torch.LongTensor).squeeze(0).to(device)
patch_indices = data[5].type(torch.LongTensor).squeeze(0).to(device)
spfn_labels = data[6].type(torch.LongTensor).squeeze(0).to(device)
num_global_points = spfn_labels.size(0)
spfn_normals = data[7].type(torch.FloatTensor).squeeze(0).to(device)
spfn_type = data[8].type(torch.FloatTensor).squeeze(0).to(device)
glob_features = data[9].type(torch.FloatTensor).squeeze(0).to(device)
loc_features = data[10].type(torch.FloatTensor).squeeze(0).to(device)
P_global = data[11].type(torch.FloatTensor).squeeze(0).to(device)
X_gt_global = data[12].type(torch.FloatTensor).squeeze(0).to(device)
I_gt_global = data[13].type(torch.LongTensor).squeeze(0).to(device)
plane_n_gt = data[14].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[15].type(torch.FloatTensor).to(device)
cone_axis_gt = data[16].type(torch.FloatTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt,
'cylinder_axis': cylinder_axis_gt,
'cone_axis': cone_axis_gt}
if nb_patches > 0:
X, T, W, _, _ = spfn_module(P, glob_features=glob_features, loc_features=loc_features)
X = X / torch.norm(X, dim=2, keepdim=True)
W = torch.softmax(W, dim=2)
with torch.no_grad():
W_fusion = W
similarity_fusion = merging_utils.similarity_soft(spfn_labels, W_fusion, patch_indices)
labels_fusion = merging_utils.run_heuristic_solver(similarity_fusion.cpu().numpy(), nb_patches, n_max_global_instances, n_max_local_instances)
point2primitive_fusion = torch.zeros([num_global_points, nb_patches * n_max_local_instances + n_max_global_instances]).float().to(device)
for b in range(nb_patches):
point2primitive_fusion[patch_indices[b], b * n_max_local_instances:(b + 1) * n_max_local_instances] = W_fusion[b]
point2primitive_fusion[:, (b+1)*n_max_local_instances:] = spfn_labels
# Deleting the patch prediction for points within any patches
flag = torch.sum(point2primitive_fusion[:,:(b+1)*n_max_local_instances], dim=1)>0
point2primitive_fusion[flag,(b+1)*n_max_local_instances:] = 0
W_fusion = merging_utils.get_point_final(point2primitive_fusion, torch.from_numpy(labels_fusion).to(device))
with torch.no_grad():
patch_indices = patch_indices.contiguous()
X = X.contiguous()
T = T.contiguous()
# Normal estimation
X_global = torch.zeros_like(X_gt_global)
X_global = X_global.scatter_add_(0, patch_indices.view(-1).unsqueeze(1).expand(-1, 3), X.view(-1, 3))
empty_indices = torch.all(X_global==0, axis=1)
X_global[empty_indices] = spfn_normals[empty_indices]
X_global = torch.nn.functional.normalize(X_global, p=2, dim=1, eps=1e-12)
# Type estimation
T_gt_perpoint = torch.gather(T_gt, 0, I_gt_global)
patch_indices = patch_indices.view(-1).unsqueeze(1).expand(-1, len(conf.get_list_of_primitives()))
num = torch.zeros_like(T_gt_perpoint).float().unsqueeze(1).expand(-1, len(conf.get_list_of_primitives()))
num = num.scatter_add(0, patch_indices, T.view(-1, len(conf.get_list_of_primitives())))
den = torch.zeros_like(T_gt_perpoint).float().unsqueeze(1).expand(-1, len(conf.get_list_of_primitives()))
den = den.scatter_add(0, patch_indices, torch.ones_like(patch_indices).float())
T_global = num / den.clamp(min=1)
T_global[empty_indices] = spfn_type[empty_indices]
else:
W_fusion = torch.eye(n_max_global_instances + 1).to(spfn_labels.device)[torch.argmax(spfn_labels, dim=1) + 1]
W_fusion = W_fusion[:, 1:]
X_global = spfn_normals
T_global = spfn_type
W_fusion = W_fusion[:,torch.sum(W_fusion, dim=0)>1]
if W_fusion.shape[1] < n_max_global_instances:
W_fusion = torch.cat((W_fusion, torch.zeros([W_fusion.shape[0], n_max_global_instances-W_fusion.shape[1]]).to(device)), dim=1)
with torch.no_grad():
W_fusion = metric_implementation.hard_W_encoding(W_fusion.unsqueeze(0))
matching_indices_fusion, mask_fusion = metric_implementation.hungarian_matching(W_fusion, I_gt_global.unsqueeze(0))
mask_fusion = mask_fusion.float()
mIoU_fusion = metric_implementation.compute_segmentation_iou(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion, mask_fusion)
mIoU_fusion_per_primitive = 1 - losses_implementation.compute_miou_loss(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion)[0]
_, unique_counts_primitives_fusion = np.unique(I_gt_global.cpu().numpy(), return_counts=True)
for j in range(len(unique_counts_primitives_fusion)):
dataframe_results_stats.loc[cpt_df_stats] = [test_dataset.hdf5_file_list[batch_id], j, mask_fusion[0, j].item(), unique_counts_primitives_fusion[j], mIoU_fusion_per_primitive[0, j].item()]
cpt_df_stats += 1
with torch.no_grad():
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T = metric_implementation.compute_all_metrics(
P_global.unsqueeze(0), X_global.unsqueeze(0), X_gt_global.unsqueeze(0), W_fusion, I_gt_global.unsqueeze(0),
T_global.unsqueeze(0), T_gt.unsqueeze(0), P_gt.unsqueeze(0), gt_parameters,
list_epsilon=[0.01, 0.02], classes=['plane', 'sphere', 'cylinder', 'cone'])
list_mIoU.append(mIoU.item())
if batch_id%100==0: print('mIoU: ', np.mean(list_mIoU))
dataframe_results.loc[batch_id] = [test_dataset.hdf5_file_list[batch_id], mIoU.item(), type_accuracy.item(),
normal_difference.item(), axis_difference.item(), mean_residual.item(),
std_residual.item(), Sk_coverage[0].item(), Sk_coverage[1].item(), P_coverage[0].item(), P_coverage[1].item()]
dataframe_results.to_csv(os.path.join(args.output_folder, 'Results.csv'), index=False)
dataframe_results_stats.to_csv(os.path.join(args.output_folder, 'Results_Stats.csv'), index=False)
| 10,606 | 63.284848 | 197 |
py
|
CPFN
|
CPFN-master/training_SPFN.py
|
# Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, losses_implementation
from PointNet2 import pn2_network
# Importing Utils files
from Utils import config_loader, training_utils, training_visualisation
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', type=str, default='Configs/config_globalSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Input Dataset', type=str, default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--network', help='Network to train: GlobalSPFN, LocalSPFN', type=str, default='GlobalSPFN')
parser.add_argument('--path_patches', help='Path to Sampled Patches h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2_patches'))
parser.add_argument('--scale', help='Scale to select the smallest primitive', type=float, default=0.05)
parser.add_argument('--spfn_weigths', help='Filename of the model weights to load', type=str, default='')
args = parser.parse_args()
# Loading the config file
assert (args.network in ['GlobalSPFN', 'LocalSPFN'])
if args.network == 'GlobalSPFN':
conf = config_loader.Global_SPFNConfig(args.config_file)
elif args.network == 'LocalSPFN':
conf = config_loader.Local_SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
if args.network == 'LocalSPFN':
n_max_local_instances = conf.get_n_max_local_instances()
# Training Parameters
nb_epochs = conf.get_n_epochs()
init_learning_rate = conf.get_init_learning_rate()
val_interval = conf.get_val_interval()
snapshot_interval = conf.get_snapshot_interval()
# Training Dataset
csv_path_train = os.path.join('Dataset', conf.get_train_data_file())
noisy_train = conf.is_train_data_noisy()
first_n_train = conf.get_train_data_first_n()
num_workers_train = conf.get_nb_train_workers()
path_patches = os.path.join(args.path_patches, str(round(args.scale, 2)))
# Validation Dataset
csv_path_val = os.path.join('Dataset', conf.get_val_data_file())
noisy_val = conf.is_val_data_noisy()
first_n_val = conf.get_val_data_first_n()
num_workers_val = conf.get_nb_val_workers()
# Launching the Network
if args.network == 'GlobalSPFN':
spfn_weights_filename = 'globalspfn_module'
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_global_instances]).to(device)
elif args.network == 'LocalSPFN':
spfn_weights_filename = 'localspfn_%s_module'%str(round(args.scale, 2))
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_local_instances]).to(device)
if os.path.isfile(os.path.join(conf.get_weights_folder(), args.spfn_weigths)):
dict = torch.load(os.path.join(conf.get_weights_folder(), args.spfn_weigths))
spfn_module.load_state_dict(dict, strict=True)
# Loading the dataset
if args.network == 'GlobalSPFN':
train_dataset = dataloaders.Dataset_GlobalSPFN(n_max_global_instances, csv_path_train, args.lowres_dataset, None, None, noisy_train, n_points=8192, first_n=first_n_train, fixed_order=False)
train_datasampler = dataloaders.RandomSampler(data_source=train_dataset, seed=12345, identical_epochs=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=conf.get_batch_size(), num_workers=num_workers_train, pin_memory=True)
val_dataset = dataloaders.Dataset_GlobalSPFN(n_max_global_instances, csv_path_val, args.lowres_dataset, None, None, noisy_val, n_points=8192, first_n=first_n_val, fixed_order=False)
val_datasampler = dataloaders.RandomSampler(data_source=val_dataset, seed=12345, identical_epochs=False)
val_dataloader = torch.utils.data.DataLoader(val_dataset, sampler=val_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_val_workers(), pin_memory=True)
elif args.network == 'LocalSPFN':
train_dataset = dataloaders.Dataset_TrainLocalSPFN(n_max_local_instances, csv_path_train, path_patches, noisy_train, first_n=first_n_train, fixed_order=False, lean=True)
train_datasampler = dataloaders.RandomSampler(data_source=train_dataset, seed=12345, identical_epochs=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=conf.get_batch_size(), num_workers=num_workers_train, pin_memory=True)
val_dataset = dataloaders.Dataset_TrainLocalSPFN(n_max_local_instances, csv_path_val, path_patches, noisy_val, first_n=first_n_val, fixed_order=False, lean=True)
val_datasampler = dataloaders.RandomSampler(data_source=val_dataset, seed=12345, identical_epochs=False)
val_dataloader = torch.utils.data.DataLoader(val_dataset, sampler=val_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_val_workers(), pin_memory=True)
# Optimizer
optimizer = torch.optim.Adam(spfn_module.parameters(), lr=init_learning_rate)
# Visualisation
visualiser = training_visualisation.Visualiser(conf.get_visualisation_interval())
# Initialisation
global_step = 0
old_learning_rate = init_learning_rate
best_loss = np.inf
for epoch in range(nb_epochs):
global_step, _ = training_utils.spfn_train_val_epoch(train_dataloader, spfn_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train')
if (epoch % conf.get_val_interval() == 0) and (epoch > 0):
with torch.no_grad():
_, loss = training_utils.spfn_train_val_epoch(val_dataloader, spfn_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='val')
if loss < best_loss:
torch.save(spfn_module.state_dict(), os.path.join(conf.get_weights_folder(), spfn_weights_filename + '.pth'))
best_loss = loss
if (epoch % conf.get_snapshot_interval() == 0) and (epoch > 0):
torch.save(spfn_module.state_dict(), os.path.join(conf.get_weights_folder(), spfn_weights_filename + '%d.pth' % epoch))
torch.save(spfn_module.state_dict(), os.path.join(conf.get_weights_folder(), spfn_weights_filename + '%d.pth' % epoch))
| 6,960 | 59.530435 | 198 |
py
|
CPFN
|
CPFN-master/Preprocessing/preprocessing_sampling_patch.py
|
# Importation of packages
import os
import h5py
import numba
import argparse
import numpy as np
import pandas as pd
import multiprocessing as mp
from joblib import Parallel, delayed
def get_small_primitives(gt_labels_hr, max_nb_points):
unique_labels, unique_counts = np.unique(gt_labels_hr, return_counts=True)
small_primitives_pool = np.where(unique_counts < max_nb_points)[0]
small_primitives_id = unique_labels[small_primitives_pool]
return small_primitives_id
def extract_pool_indices(gt_points_lr, gt_labels_lr, small_primitives_id):
pool_indices = np.where(np.isin(gt_labels_lr, small_primitives_id))[0]
pool_labels = gt_labels_lr[pool_indices]
return pool_indices, pool_labels
def sample(gt_points_lr, gt_points_hr, pool_indices, pool_labels, num_points_patch=8192, max_number_patches=32):
list_patch_indices = []
while (len(list_patch_indices) < max_number_patches) and (len(pool_indices) != 0):
# Selecting the remaining labels
unique_pool_labels = np.unique(pool_labels)
for label in unique_pool_labels:
# Checking if the maximum number of patches have been reached
if len(list_patch_indices) >= max_number_patches:
break
# Selecting a random pool index for label l
ind_pool_indices = np.where(pool_labels==label)[0]
if len(ind_pool_indices) == 0:
continue
i = pool_indices[np.random.choice(ind_pool_indices)]
# Getting the patch indices for that query points
distances = np.linalg.norm(np.expand_dims(gt_points_lr[i], axis=0) - gt_points_hr, axis=1)
patch_indices = np.argsort(distances)[:num_points_patch]
list_patch_indices.append(patch_indices)
patch_distances = np.sort(distances)[:num_points_patch]
# Deleting the neighbours in the pool of indices
distances = np.linalg.norm(np.expand_dims(gt_points_lr[i], axis=0) - gt_points_lr[pool_indices], axis=1)
pool_indices_selected = np.where(distances <= np.max(patch_distances))[0]
pool_indices = np.delete(pool_indices, pool_indices_selected)
pool_labels = np.delete(pool_labels, pool_indices_selected)
patch_indices = np.stack(list_patch_indices, axis=0)
return patch_indices
def multiprocessing(tuple):
i, n, file_, max_number_patches, num_points_patch, scale, path_lowres, path_highres, path_patches = tuple
if i%100==0: print('Processing File (%d / %d): '%(i, n), file_)
with h5py.File(os.path.join(path_highres, file_), 'r') as f:
gt_points_hr = f['gt_points'][()]
gt_labels_hr = f['gt_labels'][()]
with h5py.File(os.path.join(path_lowres, file_), 'r') as f:
gt_points_lr = f['gt_points'][()]
gt_labels_lr = f['gt_labels'][()]
nb_points, _ = gt_points_hr.shape
max_nb_points = int(scale * nb_points)
small_primitives_id = get_small_primitives(gt_labels_hr, max_nb_points=max_nb_points)
pool_indices, pool_labels = extract_pool_indices(gt_points_lr, gt_labels_lr, small_primitives_id)
if len(pool_indices) == 0:
return
patch_indices = sample(gt_points_lr, gt_points_hr, pool_indices, pool_labels, num_points_patch=num_points_patch, max_number_patches=max_number_patches)
np.save(os.path.join(path_patches, file_.replace('.h5', '_indices.npy')), patch_indices)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path_highres', help='Path to Highres h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--path_lowres', help='Path to Highres h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--path_patches', help='Path to Sampled Patches h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2_patches/'))
parser.add_argument('--path_split_file', help='Path to the csv file for the corresponding split', type=str, default='Dataset/train_models.csv')
parser.add_argument('--scale', help='Scale to select the smallest primitive', type=float, default=0.05)
parser.add_argument('--max_number_patches', help='Maximum number of patch', type=int, default=32)
parser.add_argument('--num_points_patch', help='Number of points per patch', type=int, default=8192)
parser.add_argument('--ratio_cpu_touse', help='Ratio of the Total number of CPUs to use', type=float, default=0.70)
args = parser.parse_args()
path_patches = os.path.join(args.path_patches, str(round(args.scale,2)))
if not os.path.isdir(path_patches):
os.makedirs(path_patches, exist_ok=True)
nb_cores = int(args.ratio_cpu_touse * mp.cpu_count())
list_files = pd.read_csv(args.path_split_file, header=None).values[:,0]
n_files = len(list_files)
results = Parallel(n_jobs=nb_cores)(delayed(multiprocessing)((i, n_files, file_, args.max_number_patches, args.num_points_patch, args.scale, args.path_lowres, args.path_highres, path_patches)) for i, file_ in enumerate(list_files))
| 5,085 | 58.139535 | 235 |
py
|
CPFN
|
CPFN-master/Preprocessing/preprocessing_creation_patch.py
|
# Importatiom of packages
import os
import re
import sys
import h5py
import pickle
import argparse
import numpy as np
import pandas as pd
import multiprocessing as mp
from joblib import Parallel, delayed
def multiprocessing(tuple):
ind_file, n_file, file_, path_lowres, path_highres, path_features, path_patches, num_points = tuple
if ind_file%100==0: print('%d / %d'%(ind_file, n_file))
if not os.path.isfile(os.path.join(path_patches, file_.replace('.h5', '_indices.npy'))):
return
patch_indices = np.load(os.path.join(path_patches, file_.replace('.h5', '_indices.npy')))
nb_patches, _ = patch_indices.shape
with h5py.File(os.path.join(path_highres, file_), 'r') as f:
P = f['gt_points'][()].astype(np.float32)
P_noisy = f['noisy_points'][()].astype(np.float32)
normal_gt = f['gt_normals'][()].astype(np.float32)
I_gt = f['gt_labels'][()].astype(np.int64)
with h5py.File(os.path.join(path_lowres, file_), 'r') as f:
index_query_points = f['index_query_points'][()]
# Primitive keys
found_soup_ids = []
soup_id_to_key = {}
soup_prog = re.compile('(.*)_soup_([0-9]+)$')
for key in list(f.keys()):
m = soup_prog.match(key)
if m is not None:
soup_id = int(m.group(2))
found_soup_ids.append(soup_id)
soup_id_to_key[soup_id] = key
found_soup_ids.sort()
n_instances = len(found_soup_ids)
instances = []
P_gt = []
N_gt = []
metas = []
for i in range(n_instances):
g = f[soup_id_to_key[i]]
P_gt_cur = g['gt_points'][()]
P_gt.append(P_gt_cur)
N_gt_cur = g['gt_normals'][()]
N_gt.append(N_gt_cur)
meta = pickle.loads(g.attrs['meta'])
metas.append(meta)
P_gt = np.array(P_gt)
N_gt = np.array(N_gt)
# Patch Selection
P = np.reshape(P[patch_indices.flatten()], [nb_patches, num_points, 3])
P_noisy = np.reshape(P_noisy[patch_indices.flatten()], [nb_patches, num_points, 3])
normal_gt = np.reshape(normal_gt[patch_indices.flatten()], [nb_patches, num_points, 3])
I_gt = np.reshape(I_gt[patch_indices.flatten()], [nb_patches, num_points])
# Normalisation
mean = np.mean(P, axis=1, keepdims=True)
P = P - mean
norm = np.linalg.norm(P, axis=2, keepdims=True).max(axis=1, keepdims=True)
P = P / norm
P_noisy = P_noisy - mean
P_noisy = P_noisy / norm
P_gt = P_gt - np.expand_dims(mean, axis=1)
P_gt = P_gt / np.expand_dims(norm, axis=1)
flag = ~np.all(P_gt == - np.expand_dims(mean, axis=1) / np.expand_dims(norm, axis=1), axis=3, keepdims=True).all(axis=2, keepdims=True)
P_gt = P_gt * flag.astype(np.float32)
# SPFN Feature
glob_features = np.load(os.path.join(path_features, file_.replace('.h5', ''), 'global_feat.npy'))
loc_features = np.load(os.path.join(path_features, file_.replace('.h5', ''), 'local_feat.npy'))
# Export
if not os.path.isdir(os.path.join(path_patches, file_.replace('.h5', ''))):
os.mkdir(os.path.join(path_patches, file_.replace('.h5', '')))
for i in range(nb_patches):
flag = -1 in I_gt[i]
unique_values, inverse_values = np.unique(I_gt[i], return_inverse=True)
unique_values = unique_values[unique_values != -1]
if flag: inverse_values = inverse_values - 1
with h5py.File(os.path.join(path_patches, file_.replace('.h5', ''), file_.replace('.h5','_patch%d.h5'%i)), 'w') as f:
f.create_dataset('gt_points', data=P[i].astype(np.float32))
f.create_dataset('gt_normals', data=normal_gt[i].astype(np.float32))
f.create_dataset('gt_labels', data=inverse_values.astype(np.int64))
f.create_dataset('noisy_points', data=P_noisy[i].astype(np.float32))
f.create_dataset('glob_features', data=glob_features.astype(np.float32))
f.create_dataset('loc_features', data=loc_features[:,i].astype(np.float32))
for j, value in enumerate(unique_values):
key = file_.replace('.h5','_soup_%d'%value)
new_key = file_.replace('.h5','_soup_%d'%j)
grp = f.create_group(new_key)
grp['gt_points'] = P_gt[i][value].astype(np.float32)
grp['gt_normals'] = N_gt[value].astype(np.float32)
if metas[value]['type'] == 'plane':
metas[value]['location_x'] = str((float(metas[value]['location_x']) - mean[i,0,0]) / norm[i,0,0])
metas[value]['location_y'] = str((float(metas[value]['location_y']) - mean[i,0,1]) / norm[i,0,0])
metas[value]['location_z'] = str((float(metas[value]['location_z']) - mean[i,0,2]) / norm[i,0,0])
elif metas[value]['type'] == 'sphere':
metas[value]['location_x'] = str((float(metas[value]['location_x']) - mean[i,0,0]) / norm[i,0,0])
metas[value]['location_y'] = str((float(metas[value]['location_y']) - mean[i,0,1]) / norm[i,0,0])
metas[value]['location_z'] = str((float(metas[value]['location_z']) - mean[i,0,2]) / norm[i,0,0])
metas[value]['radius'] = str(float(metas[value]['radius'])/ norm[i, 0, 0])
elif metas[value]['type'] == 'cylinder':
metas[value]['location_x'] = str((float(metas[value]['location_x']) - mean[i,0,0]) / norm[i,0,0])
metas[value]['location_y'] = str((float(metas[value]['location_y']) - mean[i,0,1]) / norm[i,0,0])
metas[value]['location_z'] = str((float(metas[value]['location_z']) - mean[i,0,2]) / norm[i,0,0])
metas[value]['radius'] = str(float(metas[value]['radius'])/ norm[i, 0, 0])
elif metas[value]['type'] == 'cone':
metas[value]['apex_x'] = str((float(metas[value]['apex_x']) - mean[i,0,0]) / norm[i,0,0])
metas[value]['apex_y'] = str((float(metas[value]['apex_y']) - mean[i,0,1]) / norm[i,0,0])
metas[value]['apex_z'] = str((float(metas[value]['apex_z']) - mean[i,0,2]) / norm[i,0,0])
grp.attrs['meta'] = str(metas[value])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path_highres', help='Path to Highres h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--path_lowres', help='Path to Lowres h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--path_features', help='Path to SPFN Local and Global Features', type=str, default=os.path.expanduser('data/TraceParts_v2_globalspfn/'))
parser.add_argument('--path_patches', help='Path to Sampled Patches h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2_patches/'))
parser.add_argument('--path_split_file', help='Path to the csv file for the corresponding split', type=str, default='Dataset/train_models.csv')
parser.add_argument('--scale', help='Scale to select the smallest primitive', type=float, default=0.05)
parser.add_argument('--max_number_patches', help='Maximum number of patch', type=int, default=32)
parser.add_argument('--num_points_patch', help='Number of points per patch', type=int, default=8192)
parser.add_argument('--ratio_cpu_touse', help='Ratio of the Total number of CPUs to use', type=float, default=0.70)
args = parser.parse_args()
path_patches = os.path.join(args.path_patches, str(round(args.scale,2)))
files = np.sort(pd.read_csv(args.path_split_file, delimiter=',', header=None)[0])
nfiles = len(files)
nb_cores = int(args.ratio_cpu_touse * mp.cpu_count())
results = Parallel(n_jobs=nb_cores)(delayed(multiprocessing)((i, nfiles, file_, args.path_lowres, args.path_highres, args.path_features, path_patches, args.num_points_patch)) for i, file_ in enumerate(files))
| 7,998 | 59.598485 | 212 |
py
|
CPFN
|
CPFN-master/Preprocessing/preprocessing_sampling_lowres.py
|
# Importation of packages
import os
import h5py
import time
import numba
import shutil
import argparse
import numpy as np
import pandas as pd
import multiprocessing as mp
from joblib import Parallel, delayed
# Furthest point sampling code
@numba.jit(numba.int32[:](numba.float32[:, :], numba.int32[:], numba.int32), nopython=True)
def furthest_point_sampling(input_points, index_query_points1, nb_query_points):
num_points, _ = input_points.shape
index_query_points2 = np.zeros(nb_query_points, dtype=numba.int32)
min_distances = 10 ** 6 * np.ones(num_points, dtype=numba.float64)
min_distances[index_query_points1] = 0
index = np.argmax(min_distances)
for i in range(nb_query_points):
index_query_points2[i] = index
additional_distances = np.sqrt(np.sum((input_points - input_points[index]) ** 2, axis=1))
min_distances = np.minimum(min_distances, additional_distances)
index = np.argmax(min_distances)
return index_query_points2
@numba.jit(numba.int32[:](numba.float32[:, :], numba.int32[:]), nopython=True)
def furthest_point_sampling_per_label(input_points, labels):
num_points, _ = input_points.shape
unique_labels = np.unique(labels)
index_query_points = np.zeros(len(unique_labels), dtype=numba.int32)
min_distances = 10 ** 6 * np.ones(num_points, dtype=numba.float64)
index = np.random.randint(0, num_points)
for i in range(len(unique_labels)):
label = labels[index]
index_query_points[i] = index
additional_distances = np.sqrt(np.sum((input_points - input_points[index]) ** 2, axis=1))
min_distances = np.minimum(min_distances, additional_distances)
min_distances[labels==label] = 0
index = np.argmax(min_distances)
return index_query_points
# Furthest point sampling per labels code
def multiprocessing_sampling(input_tuple):
ind_file, file_, nb_query_points, path_lowres, path_highres = input_tuple
object_filename = file_.replace('.h5', '')
if ind_file%100==0: print('%d / %d' % (ind_file, nfiles))
# Loading the GT data
try:
with h5py.File(os.path.join(path_highres, object_filename + '.h5'), 'r') as f:
gt_points = f['gt_points'][()].astype(np.float32)
noisy_points = f['noisy_points'][()].astype(np.float32)
gt_labels = f['gt_labels'][()].astype(np.int32)
gt_normals = f['gt_normals'][()].astype(np.float32)
primitives = {}
nb_labels = gt_labels.max() + 1
for i in range(nb_labels):
key = object_filename + '_soup_' + str(i)
primitives[key] = {'gt_points': f[key]['gt_points'][()],
'gt_normals': f[key]['gt_normals'][()],
'meta': f[key].attrs['meta'].copy()}
except:
return
index_query_points1 = furthest_point_sampling_per_label(gt_points, gt_labels)
index_query_points2 = furthest_point_sampling(gt_points, index_query_points1, nb_query_points)
index_query_points = np.concatenate((index_query_points1, index_query_points2))
assert(len(np.unique(gt_labels)) == len(np.unique(gt_labels[index_query_points])))
with h5py.File(os.path.join(path_lowres, object_filename + '.h5'), 'w') as f:
f.create_dataset('gt_points', data=gt_points[index_query_points])
f.create_dataset('gt_normals', data=gt_normals[index_query_points])
f.create_dataset('gt_labels', data=gt_labels[index_query_points])
f.create_dataset('noisy_points', data=noisy_points[index_query_points])
f.create_dataset('index_query_points', data=index_query_points)
for key in primitives.keys():
grp = f.create_group(key)
grp['gt_points'] = primitives[key]['gt_points']
grp['gt_normals'] = primitives[key]['gt_normals']
grp.attrs['meta'] = primitives[key]['meta']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path_highres', help='Path to Highres h5 files', type=str, default='data/TraceParts_v2/')
parser.add_argument('--path_lowres', help='Path to Highres h5 files', type=str, default='data/TraceParts_v2_lowres/')
parser.add_argument('--path_split_file', help='Path to the csv file for the corresponding split', type=str, default='Dataset/train_models.csv')
parser.add_argument('--nb_query_points', help='Number of Query Points', type=int, default=8192)
parser.add_argument('--ratio_cpu_touse', help='Ratio of the Total number of CPUs to use', type=float, default=0.70)
args = parser.parse_args()
# Path
files = pd.read_csv(args.path_split_file, header=None).values[:,0]
nfiles = len(files)
if not os.path.isdir(args.path_lowres):
os.mkdir(args.path_lowres)
# Multiprocessing
num_cores = int(args.ratio_cpu_touse * mp.cpu_count())
results = Parallel(n_jobs=num_cores)(delayed(multiprocessing_sampling)((i, file_, args.nb_query_points, args.path_lowres, args.path_highres)) for i, file_ in enumerate(files))
| 5,072 | 50.765306 | 179 |
py
|
CPFN
|
CPFN-master/SPFN/sphere_fitter.py
|
# Importatiomn of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Sphere
from SPFN.geometry_utils import weighted_sphere_fitting, weighted_sphere_fitting_tensorflow
def compute_parameters(P, W):
batch_size, n_points, _ = P.size()
_, _, n_max_primitives = W.size()
P = P.unsqueeze(1).expand(batch_size, n_max_primitives, n_points, 3).contiguous()
W = W.transpose(1, 2).contiguous()
P = P.view(batch_size * n_max_primitives, n_points, 3)
W = W.view(batch_size * n_max_primitives, n_points)
center, radius_squared = weighted_sphere_fitting(P, W)
center = center.view(batch_size, n_max_primitives, 3)
radius_squared = radius_squared.view(batch_size, n_max_primitives)
return center, radius_squared
def compute_parameters_tensorflow(P, W):
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_primitives = tf.shape(W)[2]
P = tf.tile(tf.expand_dims(P, axis=1), [1, n_max_primitives, 1, 1]) # BxKxNx3
W = tf.transpose(W, perm=[0, 2, 1]) # BxKxN
P = tf.reshape(P, [batch_size * n_max_primitives, n_points, 3]) # BKxNx3
W = tf.reshape(W, [batch_size * n_max_primitives, n_points]) # BKxN
center, radius_squared = weighted_sphere_fitting_tensorflow(P, W)
center = tf.reshape(center, [batch_size, n_max_primitives, 3])
radius_squared = tf.reshape(radius_squared, [batch_size, n_max_primitives])
return center, radius_squared
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
center_torch, radius_squared_torch = compute_parameters(P_torch, W_torch)
center_torch = center_torch.detach().cpu().numpy()
radius_squared_torch = radius_squared_torch.detach().cpu().numpy()
print('center_torch', center_torch)
print('radius_squared_torch', radius_squared_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
center_tensorflow, radius_squared_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
center_tensorflow, radius_squared_tensorflow = sess.run([center_tensorflow, radius_squared_tensorflow])
print(np.abs(center_tensorflow-center_torch).max())
print(np.abs(radius_squared_tensorflow-radius_squared_torch).max())
def sqrt_safe(x):
return torch.sqrt(torch.abs(x) + 1e-10)
def compute_residue_single(center, radius_squared, p):
return (sqrt_safe(torch.sum((p - center)**2, dim=-1)) - sqrt_safe(radius_squared))**2
def sqrt_safe_tensorflow(x):
return tf.sqrt(tf.abs(x) + 1e-10)
def compute_residue_single_tensorflow(center, radius_squared, p):
return tf.square(sqrt_safe_tensorflow(tf.reduce_sum(tf.square(p - center), axis=-1)) - sqrt_safe_tensorflow(radius_squared))
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
center = np.random.randn(batch_size, num_points, 3)
radius_squared = np.random.rand(batch_size, num_points)
p = np.random.rand(batch_size, num_points, 3)
center_torch = torch.from_numpy(center).float().to(device)
radius_squared_torch = torch.from_numpy(radius_squared).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
residue_loss_torch = compute_residue_single(center_torch, radius_squared_torch, p_torch)
residue_loss_torch = residue_loss_torch.detach().cpu().numpy()
print('residue_loss_torch', residue_loss_torch)
# Debugging with Tensorflow
center_tensorflow = tf.constant(center, dtype=tf.float32)
radius_squared_tensorflow = tf.constant(radius_squared, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
residue_loss_torch_tensorflow = compute_residue_single_tensorflow(center_tensorflow, radius_squared_tensorflow, p_tensorflow)
sess = tf.Session()
residue_loss_torch_tensorflow = sess.run(residue_loss_torch_tensorflow)
print(np.abs(residue_loss_torch_tensorflow - residue_loss_torch).max())
def create_primitive_from_dict(d):
assert d['type'] == 'sphere'
location = np.array([d['location_x'], d['location_y'], d['location_z']], dtype=float)
radius = float(d['radius'])
return Sphere(center=location, radius=radius)
def extract_parameter_data_as_dict(primitives, n_max_primitives):
return {}
def extract_predicted_parameters_as_json(sphere_center, sphere_radius_squared, k):
sphere = Sphere(sphere_center, np.sqrt(sphere_radius_squared))
return {
'type': 'sphere',
'center_x': float(sphere.center[0]),
'center_y': float(sphere.center[1]),
'center_z': float(sphere.center[2]),
'radius': float(sphere.radius),
'label': k,
}
| 5,084 | 44.810811 | 129 |
py
|
CPFN
|
CPFN-master/SPFN/fitter_factory.py
|
import numpy as np
from SPFN import plane_fitter, sphere_fitter, cylinder_fitter, cone_fitter
primitive_name_to_id_dict = {}
def primitive_name_to_id(name):
return primitive_name_to_id_dict[name]
def get_n_registered_primitives():
return len(primitive_name_to_id_dict)
def register_primitives(primitive_name_list):
# Must be called once before everything
global primitive_name_to_id_dict
primitive_name_to_id_dict = {}
for idx, name in enumerate(primitive_name_list):
primitive_name_to_id_dict[name] = idx
print('Registered ' + ','.join(primitive_name_list))
def create_primitive_from_dict(d):
if d['type'] == 'plane':
return plane_fitter.create_primitive_from_dict(d)
elif d['type'] == 'sphere':
return sphere_fitter.create_primitive_from_dict(d)
elif d['type'] == 'cylinder':
return cylinder_fitter.create_primitive_from_dict(d)
elif d['type'] == 'cone':
return cone_fitter.create_primitive_from_dict(d)
else:
raise NotImplementedError
| 1,039 | 32.548387 | 74 |
py
|
CPFN
|
CPFN-master/SPFN/plane_fitter.py
|
# Importatiomn of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Plane
from SPFN.geometry_utils import weighted_plane_fitting, weighted_plane_fitting_tensorflow
def compute_parameters(P, W):
batch_size, n_points, _ = P.size()
_, _, n_max_instances = W.size()
W_reshaped = W.transpose(1, 2).contiguous().view(batch_size * n_max_instances, n_points)
P_tiled = P.unsqueeze(1).expand(batch_size, n_max_instances, n_points, 3).contiguous().view(batch_size * n_max_instances, n_points, 3)
n, c = weighted_plane_fitting(P_tiled, W_reshaped) # BKx3
n = n.view(batch_size, n_max_instances, 3)
c = c.view(batch_size, n_max_instances)
return n, c
def compute_parameters_tensorflow(P, W):
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_instances = tf.shape(W)[2]
W_reshaped = tf.reshape(tf.transpose(W, [0, 2, 1]), [batch_size * n_max_instances, n_points]) # BKxN
P_tiled = tf.reshape(tf.tile(tf.expand_dims(P, axis=1), [1, n_max_instances, 1, 1]), [batch_size * n_max_instances, n_points, 3]) # BKxNx3, important there to match indices in W_reshaped!!!
n, c = weighted_plane_fitting_tensorflow(P_tiled, W_reshaped) # BKx3
n = tf.reshape(n, [batch_size, n_max_instances, 3]) # BxKx3
c = tf.reshape(c, [batch_size, n_max_instances]) # BxK
return n, c
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
n_torch, c_torch = compute_parameters(P_torch, W_torch)
n_torch = n_torch.detach().cpu().numpy()
c_torch = c_torch.detach().cpu().numpy()
print('n_torch', n_torch)
print('c_torch', c_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
n_tensorflow, c_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
n_tensorflow, c_tensorflow = sess.run([n_tensorflow, c_tensorflow])
print(np.minimum(np.abs(n_tensorflow - n_torch), np.abs(n_tensorflow + n_torch)).max())
print(np.minimum(np.abs(c_tensorflow - c_torch), np.abs(c_tensorflow + c_torch)).max())
def compute_residue_single(n, c, p):
return (torch.sum(p * n, dim=-1) - c)**2
def compute_residue_single_tensorflow(n, c, p):
# n: ...x3, c: ..., p: ...x3
return tf.square(tf.reduce_sum(p * n, axis=-1) - c)
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
n = np.random.randn(batch_size, num_points, 3)
c = np.random.rand(batch_size, num_points)
p = np.random.rand(batch_size, num_points, 3)
n_torch = torch.from_numpy(n).float().to(device)
c_torch = torch.from_numpy(c).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
residue_loss_torch = compute_residue_single(n_torch, c_torch, p_torch)
residue_loss_torch = residue_loss_torch.detach().cpu().numpy()
print('residue_loss_torch', residue_loss_torch)
# Debugging with Tensorflow
n_tensorflow = tf.constant(n, dtype=tf.float32)
c_tensorflow = tf.constant(c, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
residue_loss_torch_tensorflow = compute_residue_single_tensorflow(n_tensorflow, c_tensorflow, p_tensorflow)
sess = tf.Session()
residue_loss_torch_tensorflow = sess.run(residue_loss_torch_tensorflow)
print(np.abs(residue_loss_torch_tensorflow-residue_loss_torch).max())
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_parameter_loss(predicted_n, gt_n, matching_indices, angle_diff):
# predicted_axis: BxK1x3
# gt_axis: BXK2x3
# matching indices: BxK2
batch_size, nb_primitives, _ = gt_n.size()
predicted_n = torch.gather(predicted_n, 1, matching_indices.unsqueeze(2).expand(batch_size, nb_primitives, 3))
dot_abs = torch.abs(torch.sum(predicted_n * gt_n, axis=2))
if angle_diff:
return acos_safe(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
def batched_gather(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_parameter_loss_tensorflow(predicted_n, gt_n, matching_indices, angle_diff):
n = batched_gather(predicted_n, matching_indices, axis=1)
dot_abs = tf.abs(tf.reduce_sum(n * gt_n, axis=2))
if angle_diff:
return acos_safe_tensorflow(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
if __name__ == '__main__':
batch_size = 100
num_primitives1 = 15
num_primitives2 = 5
device = torch.device('cuda:0')
np.random.seed(0)
predicted_axis = np.random.randn(batch_size, num_primitives1, 3)
gt_axis = np.random.randn(batch_size, num_primitives2, 3)
matching_indices = np.random.randint(0, 15, (batch_size, num_primitives2))
angle_diff = True
predicted_axis_torch = torch.from_numpy(predicted_axis).float().to(device)
gt_axis_torch = torch.from_numpy(gt_axis).float().to(device)
matching_indices_torch = torch.from_numpy(matching_indices).long().to(device)
loss_torch = compute_parameter_loss(predicted_axis_torch, gt_axis_torch, matching_indices_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
predicted_axis_tensorflow = tf.constant(predicted_axis, dtype=tf.float32)
gt_axis_tensorflow = tf.constant(gt_axis, dtype=tf.float32)
matching_indices_tensorflow = tf.constant(matching_indices, dtype=tf.int32)
loss_tensorflow = compute_parameter_loss_tensorflow(predicted_axis_tensorflow, gt_axis_tensorflow,
matching_indices_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def create_primitive_from_dict(d):
assert d['type'] == 'plane'
location = np.array([d['location_x'], d['location_y'], d['location_z']], dtype=float)
axis = np.array([d['axis_x'], d['axis_y'], d['axis_z']], dtype=float)
return Plane(n=axis, c=np.dot(location, axis))
def extract_parameter_data_as_dict(primitives, n_max_instances):
n = np.zeros(dtype=float, shape=[n_max_instances, 3])
for i, primitive in enumerate(primitives):
if isinstance(primitive, Plane):
n[i] = primitive.n
return {
'plane_n_gt': n
}
def extract_predicted_parameters_as_json(plane_normal, plane_center, k):
# This is only for a single plane
plane = Plane(plane_normal, plane_center)
json_info = {
'type': 'plane',
'center_x': float(plane.center[0]),
'center_y': float(plane.center[1]),
'center_z': float(plane.center[2]),
'normal_x': float(plane.n[0]),
'normal_y': float(plane.n[1]),
'normal_z': float(plane.n[2]),
'x_size': float(plane.x_range[1] - plane.x_range[0]),
'y_size': float(plane.y_range[1] - plane.y_range[0]),
'x_axis_x': float(plane.x_axis[0]),
'x_axis_y': float(plane.x_axis[1]),
'x_axis_z': float(plane.x_axis[2]),
'y_axis_x': float(plane.y_axis[0]),
'y_axis_y': float(plane.y_axis[1]),
'y_axis_z': float(plane.y_axis[2]),
'label': k,
}
return json_info
| 8,518 | 43.369792 | 194 |
py
|
CPFN
|
CPFN-master/SPFN/metric_implementation.py
|
# Importation of packages
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from SPFN import plane_fitter, sphere_fitter, cylinder_fitter, cone_fitter
from SPFN import losses_implementation
def hungarian_matching(W_pred, I_gt):
# This non-tf function does not backprob gradient, only output matching indices
# W_pred - BxNxK
# I_gt - BxN, may contain -1's
# Output: matching_indices - BxK, where (b,k)th ground truth primitive is matched with (b, matching_indices[b, k])
# where only n_gt_labels entries on each row have meaning. The matching does not include gt background instance
batch_size, n_points, n_max_labels = W_pred.size()
matching_indices = torch.zeros([batch_size, n_max_labels], dtype=torch.long).to(W_pred.device)
mask = torch.zeros([batch_size, n_max_labels], dtype=torch.bool).to(W_pred.device)
for b in range(batch_size):
# assuming I_gt[b] does not have gap
n_gt_labels = torch.max(I_gt[b]).item() + 1 # this is K'
W_gt = torch.eye(n_gt_labels+1).to(I_gt.device)[I_gt[b]]
dot = torch.mm(W_gt.transpose(0,1), W_pred[b])
denominator = torch.sum(W_gt, dim=0).unsqueeze(1) + torch.sum(W_pred[b], dim=0).unsqueeze(0) - dot
cost = dot / torch.clamp(denominator, min=1e-10, max=None) # K'xK
cost = cost[:n_gt_labels, :] # remove last row, corresponding to matching gt background instance
_, col_ind = linear_sum_assignment(-cost.detach().cpu().numpy()) # want max solution
col_ind = torch.from_numpy(col_ind).long().to(matching_indices.device)
matching_indices[b, :n_gt_labels] = col_ind
mask[b, :n_gt_labels] = True
return matching_indices, mask
# Converting W to hard encoding
def hard_W_encoding(W):
# W - BxNxK
_, _, num_labels = W.size()
hardW = torch.eye(num_labels).to(W.device)[torch.argmax(W, dim=2)]
return hardW
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
W_torch = torch.from_numpy(W).float().to(device)
hardW = hard_W_encoding(W_torch)
print('hardW', hardW.size())
# Getting the per instance type
def get_instance_type(T, W):
instance_type = torch.bmm(W.transpose(1,2), T)
instance_type = torch.argmax(instance_type, dim=2)
return instance_type
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
n_type = 4
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, n_type)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
instance_type = get_instance_type(T_torch, W_torch)
print('instance_type', instance_type.size())
def sqrt_safe(x):
return torch.sqrt(torch.abs(x) + 1e-10)
# Getting the residual loss
def get_residual_loss(parameters, matching_indices, points_per_instance, T, classes=['plane','sphere','cylinder','cone']):
batch_size, num_primitives, num_primitive_points, _ = points_per_instance.shape
_, residue_per_point_array = losses_implementation.compute_residue_loss(parameters, matching_indices, points_per_instance, torch.gather(T, 1, matching_indices), classes=classes)
residue_per_point_array = torch.gather(residue_per_point_array, 3, T.view(batch_size, num_primitives, 1, 1).expand(batch_size, num_primitives, num_primitive_points, 1)).squeeze(3)
residual_loss = sqrt_safe(residue_per_point_array)
return residual_loss
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
W_torch = hard_W_encoding(W_torch)
parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, _ = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
residual_loss = get_residual_loss(parameters_torch, matching_indices_torch, points_per_instance_torch, T_torch, classes=['plane','sphere','cylinder','cone'])
print('residual_loss', residual_loss.size())
# Arccos safe
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
# Segmentation mIoU
def compute_segmentation_iou(W, I_gt, matching_indices, mask):# W - BxNxK
mIoU = 1 - losses_implementation.compute_miou_loss(W, I_gt, matching_indices)[0]
mIoU = torch.sum(mask * mIoU, dim=1) / torch.sum(mask, dim=1)
return mIoU
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
W_torch = torch.from_numpy(W).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
W_torch = hard_W_encoding(W_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
mIou = compute_segmentation_iou(W_torch, I_gt_torch, matching_indices_torch, mask_torch)
print('mIou', mIou.size())
# Mean primitive type accuracy
def compute_type_accuracy(T, T_gt, matching_indices, mask):
T_reordered = torch.gather(T, 1, matching_indices) # BxNxK
type_accuracy = torch.sum(mask*(T_reordered == T_gt), dim=1) / torch.sum(mask, dim=1)
return type_accuracy
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
mask = np.random.randint(0, 2, (batch_size, n_max_instances))
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
mask_torch = torch.from_numpy(mask).float().to(device)
W_torch = hard_W_encoding(W_torch)
T_torch = get_instance_type(T_torch, W_torch)
type_accuracy = compute_type_accuracy(T_torch, T_gt_torch, mask_torch)
print('type_accuracy', type_accuracy.size())
# Mean point normal difference
def compute_normal_difference(X, X_gt):
normal_difference = torch.mean(acos_safe(torch.abs(torch.sum(X*X_gt, dim=2))), dim=1)
return normal_difference
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
X_gt = np.random.randn(batch_size, num_points, 3)
X_gt = X_gt / np.linalg.norm(X_gt, axis=2, keepdims=True)
X_torch = torch.from_numpy(X).float().to(device)
X_gt_torch = torch.from_numpy(X_gt).float().to(device)
normal_difference = compute_normal_difference(X_gt_torch, X_gt_torch)
print('normal_difference', normal_difference.size())
# Mean primitive axis difference
def compute_axis_difference(predicted_parameters, gt_parameters, matching_indices, T, T_gt, mask, classes=['plane','sphere','cylinder','cone'], div_eps=1e-10):
mask = mask * (T == T_gt).float()
parameter_loss = losses_implementation.compute_parameter_loss(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval=True, classes=classes)
axis_difference = torch.sum(mask * parameter_loss, dim=1) / torch.clamp(torch.sum(parameter_loss, dim=1), min=div_eps, max=None)
return axis_difference
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
axis_difference = compute_axis_difference(predicted_parameters_torch, gt_parameters_torch, matching_indices_torch, T_torch, T_gt_torch, mask_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
print('axis_difference', axis_difference.size())
# Mean/Std Sk residual
def compute_meanstd_Sk_residual(residue_loss, mask):
mean_residual = torch.sum(mask * torch.mean(residue_loss, dim=2), dim=1) / torch.sum(mask, dim=1)
std_residual = torch.sum(mask * torch.std(residue_loss, dim=2), dim=1) / torch.sum(mask, dim=1)
return mean_residual, std_residual
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
residue_loss_torch = get_residual_loss(predicted_parameters_torch, matching_indices_torch, points_per_instance_torch, T_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
mean_residual, std_residual = compute_meanstd_Sk_residual(residue_loss_torch, mask_torch)
print('Mean Sk Residual Loss: ', mean_residual)
print('Std Sk Residual Loss: ', std_residual)
# Sk coverage
def compute_Sk_coverage(residue_loss, epsilon, mask):
residue_loss = torch.mean((residue_loss < epsilon).float(), dim=2)
Sk_coverage = torch.sum(mask * residue_loss, dim=1) / torch.sum(mask, dim=1)
return Sk_coverage
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
epsilon = 0.01
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
residue_loss_torch = get_residual_loss(predicted_parameters_torch, matching_indices_torch,
points_per_instance_torch, T_torch,
classes=['plane', 'sphere', 'cylinder', 'cone'])
Sk_coverage = compute_Sk_coverage(residue_loss_torch, epsilon, mask_torch)
print('Sk Coverage : ', Sk_coverage)
# P coverage
def compute_P_coverage(P, T, matching_indices, predicted_parameters, epsilon, classes=['plane', 'sphere', 'cylinder', 'cone']):
batch_size, num_points, _ = P.size()
_, num_primitives = T.size()
residue_loss = get_residual_loss(predicted_parameters, matching_indices, P.unsqueeze(1).expand(batch_size, num_primitives, num_points, 3), torch.gather(T, 1, matching_indices), classes=classes)
residue_loss, _ = torch.min(residue_loss, dim=1)
P_coverage = torch.mean((residue_loss < epsilon).float(), dim=1)
return P_coverage
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
epsilon = 0.01
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
P_coverage = compute_P_coverage(P_torch, T_torch, matching_indices_torch, predicted_parameters_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
print('P Coverage : ', P_coverage)
def compute_all_metrics(P, X, X_gt, W, I_gt, T, T_gt, points_per_instance, gt_parameters, list_epsilon=[0.01, 0.02], classes=['plane', 'sphere', 'cylinder', 'cone']):
W = hard_W_encoding(W)
T = get_instance_type(T, W)
diff = T.size(1) - T_gt.size(1)
if diff>0:
T_gt = torch.cat((T_gt, torch.zeros_like(T_gt[:, 0:1]).expand(-1, diff)), dim=1)
elif diff < 0:
W = torch.cat((W, torch.zeros_like(W[:,:,0:1]).expand(-1, -1, -diff)), dim=2)
T = torch.cat((T, torch.zeros_like(T[:, 0:1]).expand(-1, -diff)), dim=1)
matching_indices, mask = hungarian_matching(W, I_gt)
mask = mask.float()
mIoU = compute_segmentation_iou(W, I_gt, matching_indices, mask)
type_accuracy = compute_type_accuracy(T, T_gt, matching_indices, mask)
normal_difference = compute_normal_difference(X, X_gt)
predicted_parameters = losses_implementation.compute_parameters(P, W, X)
if diff > 0:
gt_parameters['plane_normal'] = torch.cat((gt_parameters['plane_normal'], torch.zeros_like(gt_parameters['plane_normal'][:, 0:1]).expand(-1, diff, 3)), dim=1)
gt_parameters['cylinder_axis'] = torch.cat((gt_parameters['cylinder_axis'], torch.zeros_like(gt_parameters['cylinder_axis'][:, 0:1]).expand(-1, diff, 3)), dim=1)
gt_parameters['cone_axis'] = torch.cat((gt_parameters['cone_axis'], torch.zeros_like(gt_parameters['cone_axis'][:, 0:1]).expand(-1, diff, 3)), dim=1)
points_per_instance = torch.cat((points_per_instance, torch.zeros_like(points_per_instance[:,0:1]).expand(-1, diff, 512, 3)), dim=1)
axis_difference = compute_axis_difference(predicted_parameters, gt_parameters, matching_indices, T, T_gt, mask, classes=classes)
residue_loss = get_residual_loss(predicted_parameters, matching_indices, points_per_instance, T_gt, classes=classes)
mean_residual, std_residual = compute_meanstd_Sk_residual(residue_loss, mask)
Sk_coverage = []
for epsilon in list_epsilon:
Sk_coverage.append(compute_Sk_coverage(residue_loss, epsilon, mask))
P_coverage = []
for epsilon in list_epsilon:
P_coverage.append(compute_P_coverage(P, T, matching_indices, predicted_parameters, epsilon, classes=classes))
return mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
X_gt = np.random.randn(batch_size, num_points, 3)
X_gt = X_gt / np.linalg.norm(X_gt, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
X_gt_torch = torch.from_numpy(X_gt).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage = compute_all_metrics(P_torch, X_torch, X_gt_torch, W_torch, I_gt_torch, T_torch, T_gt_torch, points_per_instance_torch, gt_parameters_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
print('mIoU', mIoU.size())
print('type_accuracy', type_accuracy.size())
print('normal_difference', normal_difference.size())
print('axis_difference', axis_difference.size())
print('mean_residual', mean_residual.size())
print('std_residual', std_residual.size())
for i in range(len(Sk_coverage)):
print('Sk_coverage_%d'%i, Sk_coverage[i].size())
for i in range(len(P_coverage)):
print('P_coverage_%d'%i, P_coverage[i].size())
def creates_json(T, predicted_parameters):
list_json = []
for i, type_id in enumerate(T):
if type_id == 0:
json = plane_fitter.extract_predicted_parameters_as_json(predicted_parameters['plane_normal'][0,i].cpu().numpy(), predicted_parameters['plane_center'][0,i].cpu().numpy(), i)
elif type_id == 1:
json = sphere_fitter.extract_predicted_parameters_as_json(predicted_parameters['sphere_center'][0,i].cpu().numpy(), predicted_parameters['sphere_radius_squared'][0,i].cpu().numpy(), i)
elif type_id == 2:
json = cylinder_fitter.extract_predicted_parameters_as_json(predicted_parameters['cylinder_center'][0,i].cpu().numpy(), predicted_parameters['cylinder_radius_squared'][0,i].cpu().numpy(), predicted_parameters['cylinder_axis'][0,i].cpu().numpy(), i)
elif type_id == 3:
json = cone_fitter.extract_predicted_parameters_as_json(predicted_parameters['cone_apex'][0,i].cpu().numpy(), predicted_parameters['cone_axis'][0,i].cpu().numpy(), predicted_parameters['cone_half_angle'][0,i].cpu().numpy(), i)
list_json.append(json)
return list_json
| 36,132 | 59.121464 | 304 |
py
|
CPFN
|
CPFN-master/SPFN/geometry_utils.py
|
# Importing packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.differentiable_tls import solve_weighted_tls, solve_weighted_tls_tensorflow
def compute_consistent_plane_frame(normal):
# Input: normal is Bx3
# Returns: x_axis, y_axis, both of dimension Bx3
device = normal.get_device()
batch_size, _ = normal.size()
candidate_axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # Actually, 2 should be enough. This may still cause singularity TODO!!!
y_axes = []
for tmp_axis in candidate_axes:
torch_axis = torch.FloatTensor(tmp_axis).to(device).unsqueeze(0)
y_axes.append(torch.cross(normal, torch_axis.expand(batch_size, 3)))
y_axes = torch.stack(y_axes, dim=0) # QxBx3
y_axes_norm = torch.norm(y_axes, dim=2) # QxB
# choose the axis with largest norm
y_axes_chosen_idx = torch.argmax(y_axes_norm, dim=0) # B
y_axes_chosen_idx = y_axes_chosen_idx.view(1, batch_size, 1).expand(1, batch_size, 3)
# y_axes_chosen[b, :] = y_axes[y_axes_chosen_idx[b], b, :]
y_axes = torch.gather(y_axes, 0, y_axes_chosen_idx).squeeze(0)
y_axes = torch.nn.functional.normalize(y_axes, p=2, dim=1, eps=1e-12)
x_axes = torch.cross(y_axes, normal) # Bx3
return x_axes, y_axes
def compute_consistent_plane_frame_tensorflow(normal):
# Input: normal is Bx3
# Returns: x_axis, y_axis, both of dimension Bx3
batch_size = tf.shape(normal)[0]
candidate_axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # Actually, 2 should be enough. This may still cause singularity TODO!!!
y_axes = []
for tmp_axis in candidate_axes:
tf_axis = tf.tile(tf.expand_dims(tf.constant(dtype=tf.float32, value=tmp_axis), axis=0), [batch_size, 1]) # Bx3
y_axes.append(tf.cross(normal, tf_axis))
y_axes = tf.stack(y_axes, axis=0) # QxBx3
y_axes_norm = tf.norm(y_axes, axis=2) # QxB
# choose the axis with largest norm
y_axes_chosen_idx = tf.argmax(y_axes_norm, axis=0) # B
# y_axes_chosen[b, :] = y_axes[y_axes_chosen_idx[b], b, :]
indices_0 = tf.tile(tf.expand_dims(y_axes_chosen_idx, axis=1), [1, 3]) # Bx3
indices_1 = tf.tile(tf.expand_dims(tf.range(batch_size), axis=1), [1, 3]) # Bx3
indices_2 = tf.tile(tf.expand_dims(tf.range(3), axis=0), [batch_size, 1]) # Bx3
indices = tf.stack([tf.cast(indices_0, tf.int32), indices_1, indices_2], axis=2) # Bx3x3
y_axes = tf.gather_nd(y_axes, indices=indices) # Bx3
if tf.VERSION == '1.4.1':
y_axes = tf.nn.l2_normalize(y_axes, dim=1)
else:
y_axes = tf.nn.l2_normalize(y_axes, axis=1)
x_axes = tf.cross(y_axes, normal) # Bx3
return x_axes, y_axes
if __name__ == '__main__':
batch_size = 100
device = torch.device('cuda:0')
np.random.seed(0)
normal = np.random.randn(batch_size, 3)
normal = normal / np.linalg.norm(normal, axis=1, keepdims=True)
normal_torch = torch.from_numpy(normal).float().to(device)
x_axes_torch, y_axes_torch = compute_consistent_plane_frame(normal_torch)
x_axes_torch = x_axes_torch.detach().cpu().numpy()
y_axes_torch = y_axes_torch.detach().cpu().numpy()
print('x_axes_torch', x_axes_torch)
print('y_axes_torch', y_axes_torch)
# Debugging with Tensorflow
normal_tensorflow = tf.constant(normal, dtype=tf.float32)
x_axes_tensorflow, y_axes_tensorflow = compute_consistent_plane_frame_tensorflow(normal_tensorflow)
sess = tf.Session()
x_axes_tensorflow, y_axes_tensorflow = sess.run([x_axes_tensorflow, y_axes_tensorflow])
print(np.max(np.abs(x_axes_tensorflow-x_axes_torch)))
def weighted_plane_fitting(P, W, division_eps=1e-10):
# P - BxNx3
# W - BxN
# Returns n, c, with n - Bx3, c - B
WP = P * W.unsqueeze(2) # BxNx3
W_sum = torch.sum(W, dim=1, keepdim=True) # Bx1
P_weighted_mean = torch.sum(WP, dim=1) / torch.clamp(W_sum, min=division_eps, max=None) # Bx3
A = P - P_weighted_mean.unsqueeze(1) # BxNx3
n = solve_weighted_tls(A, W) # Bx3
c = torch.sum(n * P_weighted_mean, dim=1)
return n, c
def weighted_plane_fitting_tensorflow(P, W, division_eps=1e-10):
# P - BxNx3
# W - BxN
# Returns n, c, with n - Bx3, c - B
WP = P * tf.expand_dims(W, axis=2) # BxNx3
W_sum = tf.reduce_sum(W, axis=1) # B
P_weighted_mean = tf.reduce_sum(WP, axis=1) / tf.maximum(tf.expand_dims(W_sum, 1), division_eps) # Bx3
A = P - tf.expand_dims(P_weighted_mean, axis=1) # BxNx3
n = solve_weighted_tls_tensorflow(A, W) # Bx3
c = tf.reduce_sum(n * P_weighted_mean, axis=1)
return n, c
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
n_torch, c_torch = weighted_plane_fitting(P_torch, W_torch)
n_torch = n_torch.detach().cpu().numpy()
c_torch = c_torch.detach().cpu().numpy()
print('n_torch', n_torch)
print('c_torch', c_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
n_tensorflow, c_tensorflow = weighted_plane_fitting_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
n_tensorflow, c_tensorflow = sess.run([n_tensorflow, c_tensorflow])
print(np.minimum(np.abs(n_tensorflow - n_torch), np.abs(n_tensorflow + n_torch)).max())
print(np.minimum(np.abs(c_tensorflow - c_torch), np.abs(c_tensorflow + c_torch)).max())
def guarded_matrix_solve_ls(A, b, W, condition_number_cap=1e5, sqrt_eps=1e-10, ls_l2_regularizer=1e-8):
# Solve weighted least square ||\sqrt(W)(Ax-b)||^2
# A - BxNxD
# b - BxNx1
# W - BxN
batch_size, _, dim = A.size()
sqrt_W = torch.sqrt(torch.clamp(W, min=sqrt_eps, max=None)).unsqueeze(2) # BxN
A = A * sqrt_W # BxNxD
b = b * sqrt_W # BxNx1
# Compute singular value, trivializing the problem when condition number is too large
AtA = torch.bmm(A.transpose(1,2), A)
_, s, _ = torch.svd(AtA, compute_uv=False) # s will be BxD
s = s.detach()
mask = s[:,0] / s[:,-1] < condition_number_cap # B
#import pdb; pdb.set_trace()
#A = A * mask.float().view(batch_size, 1, 1)
#x = torch.linalg.lstsq(A, b).solution
AtA = AtA * mask.float().view(batch_size, 1, 1) + ls_l2_regularizer * torch.eye(dim).unsqueeze(0).to(A.device) # zero out badly conditioned data
Atb = torch.bmm(A.transpose(1, 2) * mask.float().view(batch_size, 1, 1), b)
x, _ = torch.solve(Atb, AtA)
x = x.squeeze(2)
return x # BxD
if __name__ == '__main__':
sqrt_eps = 1e-10
ls_l2_regularizer = 1e-8
batch_size = 100
num_points = 1024
dimension = 3
device = torch.device('cuda:0')
np.random.seed(0)
A = np.random.randn(batch_size, num_points, dimension)
b = np.random.randn(batch_size, num_points, 1)
W = np.random.rand(batch_size, num_points)
A = torch.from_numpy(A).float().to(device)
b = torch.from_numpy(b).float().to(device)
W = torch.from_numpy(W).float().to(device)
sqrt_W = torch.sqrt(torch.clamp(W, sqrt_eps)).unsqueeze(2) # BxN
A = A * sqrt_W # BxNxD
b = b * sqrt_W # BxNx1
AtA = torch.bmm(A.transpose(1, 2), A)
mask = torch.zeros([batch_size]).float().to(A.device) # B
AtA = AtA * mask.view(batch_size, 1, 1) + ls_l2_regularizer * torch.eye(dimension).unsqueeze(0).to(device) # zero out badly conditioned data
Atb = torch.bmm(A.transpose(1, 2) * mask.view(batch_size, 1, 1), b)
x = torch.solve(Atb, AtA)
def guarded_matrix_solve_ls_tensorflow(A, b, W, condition_number_cap=1e5, sqrt_eps=1e-10, ls_l2_regularizer=1e-8):
# Solve weighted least square ||\sqrt(W)(Ax-b)||^2
# A - BxNxD
# b - BxNx1
# W - BxN
sqrt_W = tf.sqrt(tf.maximum(W, sqrt_eps)) # BxN
A *= tf.expand_dims(sqrt_W, axis=2) # BxNxD
b *= tf.expand_dims(sqrt_W, axis=2) # BxNx1
# Compute singular value, trivializing the problem when condition number is too large
AtA = tf.matmul(a=A, b=A, transpose_a=True)
s, _, _ = [tf.stop_gradient(u) for u in tf.svd(AtA)] # s will be BxD
mask = tf.less(s[:, 0] / s[:, -1], condition_number_cap) # B
A *= tf.to_float(tf.expand_dims(tf.expand_dims(mask, axis=1), axis=2)) # zero out badly conditioned data
x = tf.matrix_solve_ls(A, b, l2_regularizer=ls_l2_regularizer, fast=True) # BxDx1
return tf.squeeze(x, axis=2) # BxD
if __name__ == '__main__':
batch_size = 100
num_points = 1024
dimension = 3
device = torch.device('cuda:0')
np.random.seed(0)
A = np.random.randn(batch_size, num_points, dimension)
b = np.random.randn(batch_size, num_points, 1)
W = np.random.rand(batch_size, num_points)
A_torch = torch.from_numpy(A).float().to(device)
b_torch = torch.from_numpy(b).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
x_torch = guarded_matrix_solve_ls(A_torch, b_torch, W_torch)
x_torch = x_torch.detach().cpu().numpy()
print('x_torch', x_torch)
# Debugging with Tensorflow
A_tensorflow = tf.constant(A, dtype=tf.float32)
b_tensorflow = tf.constant(b, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
x_tensorflow = guarded_matrix_solve_ls_tensorflow(A_tensorflow, b_tensorflow, W_tensorflow)
sess = tf.Session()
x_tensorflow = sess.run(x_tensorflow)
print(np.max(np.abs(x_tensorflow - x_torch)))
def weighted_sphere_fitting(P, W, division_eps=1e-10):
# P - BxNxD
# W - BxN
W_sum = torch.sum(W, axis=1) # B
WP_sqr_sum = torch.sum(W * torch.sum(P**2, axis=2), axis=1) # B
P_sqr = torch.sum(P**2, axis=2) # BxN
b = ((WP_sqr_sum / torch.clamp(W_sum, min=division_eps, max=None)).unsqueeze(1) - P_sqr).unsqueeze(2) # BxNx1
WP_sum = torch.sum(W.unsqueeze(2) * P, dim=1) # BxD
A = 2 * ((WP_sum / torch.clamp(W_sum, min=division_eps, max=None).unsqueeze(1)).unsqueeze(1) - P) # BxNxD
# Seek least norm solution to the least square
center = guarded_matrix_solve_ls(A, b, W) # BxD
W_P_minus_C_sqr_sum = P - center.unsqueeze(1) # BxNxD
W_P_minus_C_sqr_sum = W * torch.sum(W_P_minus_C_sqr_sum**2, dim=2) # BxN
r_sqr = torch.sum(W_P_minus_C_sqr_sum, dim=1) / torch.clamp(W_sum, min=division_eps, max=None) # B
return center, r_sqr
def weighted_sphere_fitting_tensorflow(P, W, division_eps=1e-10):
# P - BxNxD
# W - BxN
W_sum = tf.reduce_sum(W, axis=1) # B
WP_sqr_sum = tf.reduce_sum(W * tf.reduce_sum(tf.square(P), axis=2), axis=1) # B
P_sqr = tf.reduce_sum(tf.square(P), axis=2) # BxN
b = tf.expand_dims(tf.expand_dims(WP_sqr_sum / tf.maximum(W_sum, division_eps), axis=1) - P_sqr, axis=2) # BxNx1
WP_sum = tf.reduce_sum(tf.expand_dims(W, axis=2) * P, axis=1) # BxD
A = 2 * (tf.expand_dims(WP_sum / tf.expand_dims(tf.maximum(W_sum, division_eps), axis=1), axis=1) - P) # BxNxD
# Seek least norm solution to the least square
center = guarded_matrix_solve_ls_tensorflow(A, b, W) # BxD
W_P_minus_C_sqr_sum = P - tf.expand_dims(center, axis=1) # BxNxD
W_P_minus_C_sqr_sum = W * tf.reduce_sum(tf.square(W_P_minus_C_sqr_sum), axis=2) # BxN
r_sqr = tf.reduce_sum(W_P_minus_C_sqr_sum, axis=1) / tf.maximum(W_sum, division_eps) # B
return center, r_sqr
if __name__ == '__main__':
batch_size = 100
num_points = 1024
dimension = 3
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, dimension)
W = np.random.rand(batch_size, num_points)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
center_torch, r_sqr_torch = weighted_sphere_fitting(P_torch, W_torch)
center_torch = center_torch.detach().cpu().numpy()
r_sqr_torch = r_sqr_torch.detach().cpu().numpy()
print('center_torch', center_torch)
print('r_sqr_torch', r_sqr_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
center_tensorflow, r_sqr_tensorflow = weighted_sphere_fitting_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
center_tensorflow, r_sqr_tensorflow = sess.run([center_tensorflow, r_sqr_tensorflow])
print(np.max(np.abs(center_tensorflow - center_torch)))
print(np.max(np.abs(r_sqr_tensorflow - r_sqr_torch)))
| 12,490 | 46.494297 | 148 |
py
|
CPFN
|
CPFN-master/SPFN/differentiable_tls.py
|
# Importation of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from torch.autograd import gradcheck
def guard_one_over_matrix(M, min_abs_value=1e-10):
_, row, _ = M.size()
device = M.get_device()
up = torch.triu(torch.clamp(M, min=min_abs_value, max=None), diagonal=0)
low = torch.tril(torch.clamp(M, min=None, max=-min_abs_value), diagonal=0)
M = up + low
M = M + torch.eye(row).to(device)
M = 1 / M
M = M - torch.eye(row).to(device)
return M
def guard_one_over_matrix_tensorflow(M, min_abs_value=1e-10):
up = tf.matrix_band_part(tf.maximum(min_abs_value, M), 0, -1)
low = tf.matrix_band_part(tf.minimum(-min_abs_value, M), -1, 0)
M = up + low
M += tf.eye(tf.shape(M)[1])
M = 1 / M
M -= tf.eye(tf.shape(M)[1])
return M
if __name__ == '__main__':
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
M_torch = guard_one_over_matrix(M_torch)
M_torch = M_torch.detach().cpu().numpy()
print('M_torch', M_torch)
# Debugging with Tensorflow
M_tensorflow = tf.constant(M, dtype=tf.float32)
M_tensorflow_ = guard_one_over_matrix_tensorflow(M_tensorflow)
sess = tf.Session()
M_tensorflow = sess.run(M_tensorflow_)
print(np.max(np.abs(M_tensorflow - M_torch)))
def compute_svd_K(s):
# s should be BxP
# res[b,i,j] = 1/(s[b,i]^2 - s[b,j]^2) if i != j, 0 otherwise
# res will be BxPxP
s = s**2
res = s.unsqueeze(2) - s.unsqueeze(1)
# making absolute value in res is at least 1e-10
res = guard_one_over_matrix(res)
return res
def compute_svd_K_tensorflow(s):
# s should be BxP
# res[b,i,j] = 1/(s[b,i]^2 - s[b,j]^2) if i != j, 0 otherwise
# res will be BxPxP
s = tf.square(s)
res = tf.expand_dims(s, 2) - tf.expand_dims(s, 1)
# making absolute value in res is at least 1e-10
res = guard_one_over_matrix_tensorflow(res)
return res
if __name__ == '__main__':
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
s = np.random.randn(batch_size, P)
s_torch = torch.from_numpy(s).float().to(device)
res_torch = compute_svd_K(s_torch)
res_torch = res_torch.detach().cpu().numpy()
print('res_torch', res_torch)
# Debugging with Tensorflow
s_tensorflow = tf.constant(s, dtype=tf.float32)
res_tensorflow = compute_svd_K_tensorflow(s_tensorflow)
sess = tf.Session()
res_tensorflow = sess.run(res_tensorflow)
print(np.max(np.abs(res_tensorflow - res_torch)))
def custom_svd_v_column_tensorflow(M, col_index=-1):
# Must make sure M is finite. Otherwise cudaSolver might fail.
assert_op = tf.Assert(tf.logical_not(tf.reduce_any(tf.logical_not(tf.is_finite(M)))), [M], summarize=10)
with tf.control_dependencies([assert_op]):
with tf.get_default_graph().gradient_override_map({'Svd': 'CustomSvd'}):
s, u, v = tf.svd(M, name='Svd') # M = usv^T
return v[:, :, col_index]
def register_custom_svd_gradient_tensorflow():
tf.RegisterGradient('CustomSvd')(custom_gradient_svd_tensorflow)
def custom_gradient_svd_tensorflow(op, grad_s, grad_u, grad_v):
s, u, v = op.outputs
# s - BxP
# u - BxNxP, N >= P
# v - BxPxP
v_t = tf.transpose(v, [0, 2, 1])
K = compute_svd_K_tensorflow(s)
inner = tf.transpose(K, [0, 2, 1]) * tf.matmul(v_t, grad_v)
inner = (inner + tf.transpose(inner, [0, 2, 1])) / 2
# ignoring gradient coming from grad_s and grad_u for our purpose
res = tf.matmul(u, tf.matmul(2 * tf.matmul(tf.matrix_diag(s), inner), v_t))
return res
if __name__ == '__main__' and 1:
register_custom_svd_gradient_tensorflow()
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_tensorflow = tf.constant(M, dtype=tf.float32)
M_input = tf.placeholder(dtype=tf.float32, shape=[None, P, P])
with tf.get_default_graph().gradient_override_map({'Svd': 'CustomSvd'}):
s, u, v = tf.svd(M_input, name='Svd') # M = usv^T
with tf.Session() as sess:
error = tf.test.compute_gradient_error(M_input, [batch_size, P, P], v, [batch_size, P, P])
print('Error: ', error)
class Custom_svd_v_colum(torch.autograd.Function):
@staticmethod
def forward(ctx, M, col_index=-1):
u, s, v = torch.svd(M, some=True)
out = v[:,:,col_index]
ctx.save_for_backward(u, s, v)
ctx.col_index = col_index
return out
@staticmethod
def backward(ctx, grad_out):
u, s, v = ctx.saved_tensors
col_index = ctx.col_index
grad_v = torch.zeros_like(v)
grad_v[:,:,col_index] = grad_out
v_t = v.transpose(1, 2)
K = compute_svd_K(s)
inner = K.transpose(1,2) * torch.bmm(v_t, grad_v)
inner = (inner + inner.transpose(1, 2)) / 2
# ignoring gradient coming from grad_s and grad_u for our purpose
res = torch.bmm(u, torch.bmm(2 * torch.bmm(torch.diag_embed(s, offset=0, dim1=-2, dim2=-1), inner), v_t))
return res, None
if __name__ == '__main__':
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
out_torch = Custom_svd_v_colum().apply(M_torch)
out_torch = out_torch.detach().cpu().numpy()
print('out_torch', out_torch)
# Debugging with Tensorflow
M_tensorflow = tf.constant(M, dtype=tf.float32)
out_tensorflow = custom_svd_v_column_tensorflow(M_tensorflow)
sess = tf.Session()
out_tensorflow = sess.run(out_tensorflow)
print(np.minimum(np.abs(out_tensorflow-out_torch), np.abs(out_tensorflow+out_torch)).max())
if __name__ == '__main__' and 1:
batch_size = 4
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
M_torch = torch.nn.Parameter(M_torch, requires_grad=True)
try:
custom_svd_v_colum = Custom_svd_v_colum.apply
torch.autograd.gradcheck(custom_svd_v_colum, (M_torch, -1), raise_exception=True)
print('Test on Custom_svd_v_colum: Success')
except:
print('Test on Custom_svd_v_colum: Failure')
raise
if __name__ == '__main__' and 1:
register_custom_svd_gradient_tensorflow()
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
M_torch = torch.nn.Parameter(M_torch, requires_grad=True)
out = Custom_svd_v_colum().apply(M_torch)
out.backward(torch.ones_like(out))
M_grad_torch = M_torch.grad.detach().cpu().numpy()
M_tensorflow = tf.constant(M, dtype=tf.float32)
out = custom_svd_v_column_tensorflow(M_tensorflow)
M_grad_tensorflow = tf.gradients(out, [M_tensorflow])[0]
sess = tf.Session()
M_grad_tensorflow = sess.run(M_grad_tensorflow)
print(np.minimum(np.abs(M_grad_tensorflow - M_grad_torch), np.abs(M_grad_tensorflow + M_grad_torch)).max())
def solve_weighted_tls(A, W):
# A - BxNx3
# W - BxN, positive weights
# Find solution to min x^T A^T diag(W) A x = min ||\sqrt{diag(W)} A x||^2, subject to ||x|| = 1
batch_size, num_points, _ = A.size()
A_p = A.unsqueeze(2) * A.unsqueeze(3) # BxNx3x3
W_p = W.view(batch_size, num_points, 1, 1)
M = torch.sum(W_p * A_p, dim=1) # Bx3x3
x = Custom_svd_v_colum().apply(M) # Bx3
return x
def solve_weighted_tls_tensorflow(A, W):
# A - BxNx3
# W - BxN, positive weights
# Find solution to min x^T A^T diag(W) A x = min ||\sqrt{diag(W)} A x||^2, subject to ||x|| = 1
A_p = tf.expand_dims(A, axis=2) * tf.expand_dims(A, axis=3) # BxNx3x3
W_p = tf.expand_dims(tf.expand_dims(W, axis=2), axis=3) # BxNx1x1
M = tf.reduce_sum(W_p * A_p, axis=1) # Bx3x3
x = custom_svd_v_column_tensorflow(M) # Bx3
return x
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
A = np.random.randn(batch_size, num_points, 3)
W = np.random.randn(batch_size, num_points)
A_torch = torch.from_numpy(A).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
x_torch = solve_weighted_tls(A_torch, W_torch)
x_torch = x_torch.detach().cpu().numpy()
print('x_torch', x_torch)
# Debugging with Tensorflow
A_tensorflow = tf.constant(A, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
x_tensorflow = solve_weighted_tls_tensorflow(A_tensorflow, W_tensorflow)
sess = tf.Session()
x_tensorflow = sess.run(x_tensorflow)
print(np.minimum(np.abs(x_tensorflow-x_torch), np.abs(x_tensorflow+x_torch)).max())
| 8,960 | 36.493724 | 113 |
py
|
CPFN
|
CPFN-master/SPFN/primitives.py
|
import math
import random
import numpy as np
def normalized(v, epsilon=1e-12):
return v / (np.linalg.norm(v) + epsilon)
def make_rand_unit_vector(dims=3):
vec = np.array([random.gauss(0, 1) for i in range(dims)])
return normalized(vec)
class Plane: # A finite plane patch spanned by x_axis and y_axis
@staticmethod
def get_primitive_name():
return 'plane'
def __init__(self, n, c, center=None, x_axis=None, y_axis=None, x_range=[-1, 1], y_range=[-1, 1], epsilon=1e-12):
if type(n) is not np.ndarray:
print('Normal {} needs to be a numpy array!'.format(n))
raise
# Plane is defined by {p: n^T p = c}, where the bound is determined by xy_range w.r.t. center
if center is None:
center = n * c
self.n = n / (np.linalg.norm(n) + epsilon)
self.c = c
self.center = center
self.x_range = x_range
self.y_range = y_range
# parameterize the plane by picking axes
if x_axis is None or y_axis is None:
ax_tmp = make_rand_unit_vector()
self.x_axis = normalized(np.cross(ax_tmp, self.n))
self.y_axis = normalized(np.cross(self.n, self.x_axis))
else:
self.x_axis = x_axis
self.y_axis = y_axis
def get_area(self):
return (self.x_range[1]-self.x_range[0])*(self.y_range[1]-self.y_range[0])*np.linalg.norm(np.cross(self.x_axis, self.y_axis))
def distance_to(self, p): # p should be point as a numpy array
return abs(np.dot(self.n, p) - self.c)
def sample_single_point(self, noise_radius=0.0):
origin = self.center
x = random.uniform(*self.x_range)
y = random.uniform(*self.y_range)
p = origin + x * self.x_axis + y * self.y_axis
if noise_radius > 0:
p += random.uniform(0, noise_radius) * make_rand_unit_vector()
return (p, self.n)
@classmethod
def create_random(cls, intercept_range=[-1, 1]):
return cls(make_rand_unit_vector(), random.uniform(*intercept_range))
class Sphere:
@staticmethod
def get_primitive_name():
return 'sphere'
def __init__(self, center, radius):
self.center = center
self.radius = radius
def get_area(self):
return 4 * np.pi * self.radius * self.radius
def sample_single_point(self):
n = make_rand_unit_vector()
p = self.center + self.radius * n
return (p, n)
class Cylinder:
@staticmethod
def get_primitive_name():
return 'cylinder'
def __init__(self, center, radius, axis, height=10.0):
self.center = center
self.radius = radius
self.axis = axis
self.height = height
tmp_axis = make_rand_unit_vector()
self.x_axis = normalized(np.cross(tmp_axis, self.axis))
self.y_axis = normalized(np.cross(self.axis, self.x_axis))
def get_area(self):
return 2 * np.pi * self.radius * self.height
def sample_single_point(self):
kx, ky = make_rand_unit_vector(dims=2)
n = kx * self.x_axis + ky * self.y_axis
p = random.uniform(-self.height/2, self.height/2) * self.axis + self.radius * n + self.center
return (p, n)
class Cone:
@staticmethod
def get_primitive_name():
return 'cone'
def __init__(self, apex, axis, half_angle, z_min=0.0, z_max=10.0):
self.apex = apex
self.axis = axis
self.half_angle = half_angle
self.z_min = z_min
self.z_max = z_max
class Box:
def __init__(self, center, axes, halflengths):
# axes is 3x3, representing an orthogonal frame
# sidelength is length-3 array
self.center = center
self.axes = axes
self.halflengths = halflengths
def get_six_planes(self):
result = []
for i, axis in enumerate(self.axes):
for sgn in range(-1, 2, 2):
n = sgn * axis
center = self.center + self.halflengths[i] * n
c = np.dot(n, center)
j = (i + 1) % 3
k = (j + 1) % 3
x_range = [-self.halflengths[j], self.halflengths[j]]
y_range = [-self.halflengths[k], self.halflengths[k]]
plane = Plane(n, c, center=center, x_axis=self.axes[j], y_axis=self.axes[k], x_range=x_range, y_range=y_range)
result.append(plane)
return result
@classmethod
def create_random(cls, center_range=[-1, 1], halflength_range=[0.5,2]):
center = np.array([random.uniform(*center_range) for _ in range(3)])
x_axis = make_rand_unit_vector()
ax_tmp = make_rand_unit_vector()
y_axis = normalized(np.cross(ax_tmp, x_axis))
z_axis = normalized(np.cross(x_axis, y_axis))
axes = [x_axis, y_axis, z_axis]
halflengths = [random.uniform(*halflength_range) for _ in range(3)]
return Box(center, axes, halflengths)
| 4,985 | 33.625 | 133 |
py
|
CPFN
|
CPFN-master/SPFN/cylinder_fitter.py
|
# Importation of pqckqges
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Cylinder
from SPFN.differentiable_tls import solve_weighted_tls, solve_weighted_tls_tensorflow
from SPFN.geometry_utils import compute_consistent_plane_frame, compute_consistent_plane_frame_tensorflow, weighted_sphere_fitting, weighted_sphere_fitting_tensorflow
def compute_parameters(P, W, X):
# First determine n as the solution to \min \sum W_i (X_i \cdot n)^2
batch_size, n_points, _ = P.size()
_, _, n_max_primitives = W.size()
W_reshaped = W.transpose(1,2).contiguous().view(batch_size * n_max_primitives, n_points) # BKxN
X_reshaped = X.unsqueeze(1).expand(batch_size, n_max_primitives, n_points, 3).contiguous().view(batch_size * n_max_primitives, n_points, 3)
n = solve_weighted_tls(X_reshaped, W_reshaped).view(batch_size, n_max_primitives, 3) # BxKx3
x_axes, y_axes = compute_consistent_plane_frame(n.view(batch_size * n_max_primitives, 3))
x_axes = x_axes.view(batch_size, n_max_primitives, 3) # BxKx3
y_axes = y_axes.view(batch_size, n_max_primitives, 3) # BxKx3
x_coord = torch.sum(P.unsqueeze(1) * x_axes.unsqueeze(2), dim=3) # BxKxN
y_coord = torch.sum(P.unsqueeze(1) * y_axes.unsqueeze(2), dim=3) # BxKxN
P_proj = torch.stack([x_coord, y_coord], dim=3) # BxKxNx2, 2D projection point
P_proj_reshaped = P_proj.view(batch_size * n_max_primitives, n_points, 2) # BKxNx2
circle_center, circle_radius_squared = weighted_sphere_fitting(P_proj_reshaped, W_reshaped)
circle_center = circle_center.view(batch_size, n_max_primitives, 2) # BxKx2
center = circle_center[:,:,0].unsqueeze(2) * x_axes + circle_center[:,:,1].unsqueeze(2) * y_axes # BxKx3
radius_square = circle_radius_squared.view(batch_size, n_max_primitives) # BxK
return n, center, radius_square
def compute_parameters_tensorflow(P, W, X):
# First determine n as the solution to \min \sum W_i (X_i \cdot n)^2
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_primitives = tf.shape(W)[2]
W_reshaped = tf.reshape(tf.transpose(W, [0, 2, 1]), [batch_size * n_max_primitives, n_points]) # BKxN
X_reshaped = tf.reshape(tf.tile(tf.expand_dims(X, axis=1), [1, n_max_primitives, 1, 1]), [batch_size * n_max_primitives, n_points, 3]) # BKxNx3
n = tf.reshape(solve_weighted_tls_tensorflow(X_reshaped, W_reshaped), [batch_size, n_max_primitives, 3]) # BxKx3
x_axes, y_axes = compute_consistent_plane_frame_tensorflow(tf.reshape(n, [batch_size * n_max_primitives, 3]))
x_axes = tf.reshape(x_axes, [batch_size, n_max_primitives, 3]) # BxKx3
y_axes = tf.reshape(y_axes, [batch_size, n_max_primitives, 3]) # BxKx3
x_coord = tf.reduce_sum(tf.expand_dims(P, axis=1) * tf.expand_dims(x_axes, axis=2), axis=3) # BxKxN
y_coord = tf.reduce_sum(tf.expand_dims(P, axis=1) * tf.expand_dims(y_axes, axis=2), axis=3) # BxKxN
P_proj = tf.stack([x_coord, y_coord], axis=3) # BxKxNx2, 2D projection point
P_proj_reshaped = tf.reshape(P_proj, [batch_size * n_max_primitives, n_points, 2]) # BKxNx2
circle_center, circle_radius_squared = weighted_sphere_fitting_tensorflow(P_proj_reshaped, W_reshaped)
circle_center = tf.reshape(circle_center, [batch_size, n_max_primitives, 2]) # BxKx2
center = tf.expand_dims(circle_center[:, :, 0], axis=2) * x_axes + tf.expand_dims(circle_center[:, :, 1], axis=2) * y_axes # BxKx3
radius_square = tf.reshape(circle_radius_squared, [batch_size, n_max_primitives]) # BxK
return n, center, radius_square
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
n_torch, center_torch, radius_square_torch = compute_parameters(P_torch, W_torch, X_torch)
n_torch = n_torch.detach().cpu().numpy()
center_torch = center_torch.detach().cpu().numpy()
radius_square_torch = radius_square_torch.detach().cpu().numpy()
print('n_torch', n_torch)
print('center_torch', center_torch)
print('radius_square_torch', radius_square_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
n_tensorflow, center_tensorflow, radius_square_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
sess = tf.Session()
n_tensorflow, center_tensorflow, radius_square_tensorflow = sess.run([n_tensorflow, center_tensorflow, radius_square_tensorflow])
print(np.minimum(np.abs(n_tensorflow - n_torch), np.abs(n_tensorflow + n_torch)).max())
print(np.abs(center_tensorflow - center_torch).max())
print(np.abs(radius_square_tensorflow - radius_square_torch).max())
def sqrt_safe(x):
return torch.sqrt(torch.abs(x) + 1e-10)
def compute_residue_single(axis, center, radius_squared, p):
p_minus_c = p - center
p_minus_c_sqr = torch.sum(p_minus_c**2, dim=-1)
p_minus_c_dot_n = torch.sum(p_minus_c * axis, dim=-1)
return (sqrt_safe(p_minus_c_sqr - p_minus_c_dot_n**2) - sqrt_safe(radius_squared))**2
def sqrt_safe_tensorflow(x):
return tf.sqrt(tf.abs(x) + 1e-10)
def compute_residue_single_tensorflow(axis, center, radius_squared, p):
p_minus_c = p - center
p_minus_c_sqr = tf.reduce_sum(tf.square(p_minus_c), axis=-1)
p_minus_c_dot_n = tf.reduce_sum(p_minus_c * axis, axis=-1)
return tf.square(sqrt_safe_tensorflow(p_minus_c_sqr - tf.square(p_minus_c_dot_n)) - sqrt_safe_tensorflow(radius_squared))
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
axis = np.random.randn(batch_size, num_points, 3)
center = np.random.randn(batch_size, num_points, 3)
radius_squared = np.random.randn(batch_size, num_points)
p = np.random.randn(batch_size, num_points, 3)
axis_torch = torch.from_numpy(axis).float().to(device)
center_torch = torch.from_numpy(center).float().to(device)
radius_squared_torch = torch.from_numpy(radius_squared).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
loss_torch = compute_residue_single(axis_torch, center_torch, radius_squared_torch, p_torch)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
axis_tensorflow = tf.constant(axis, dtype=tf.float32)
center_tensorflow = tf.constant(center, dtype=tf.float32)
radius_squared_tensorflow = tf.constant(radius_squared, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
loss_tensorflow = compute_residue_single_tensorflow(axis_tensorflow, center_tensorflow, radius_squared_tensorflow, p_tensorflow)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_parameter_loss(predicted_axis, gt_axis, matching_indices, angle_diff):
# predicted_axis: BxK1x3
# gt_axis: BXK2x3
# matching indices: BxK2
batch_size, nb_primitives, _ = gt_axis.size()
predicted_axis = torch.gather(predicted_axis, 1, matching_indices.unsqueeze(2).expand(batch_size, nb_primitives, 3))
dot_abs = torch.abs(torch.sum(predicted_axis * gt_axis, axis=2))
if angle_diff:
return acos_safe(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
def batched_gather(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_parameter_loss_tensorflow(predicted_axis, gt_axis, matching_indices, angle_diff):
n = batched_gather(predicted_axis, matching_indices, axis=1)
dot_abs = tf.abs(tf.reduce_sum(n * gt_axis, axis=2))
if angle_diff:
return acos_safe_tensorflow(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
if __name__ == '__main__':
batch_size = 100
num_primitives1 = 15
num_primitives2 = 5
device = torch.device('cuda:0')
np.random.seed(0)
predicted_axis = np.random.randn(batch_size, num_primitives1, 3)
gt_axis = np.random.randn(batch_size, num_primitives2, 3)
matching_indices = np.random.randint(0, 15, (batch_size, num_primitives2))
angle_diff = True
predicted_axis_torch = torch.from_numpy(predicted_axis).float().to(device)
gt_axis_torch = torch.from_numpy(gt_axis).float().to(device)
matching_indices_torch = torch.from_numpy(matching_indices).long().to(device)
loss_torch = compute_parameter_loss(predicted_axis_torch, gt_axis_torch, matching_indices_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
predicted_axis_tensorflow = tf.constant(predicted_axis, dtype=tf.float32)
gt_axis_tensorflow = tf.constant(gt_axis, dtype=tf.float32)
matching_indices_tensorflow = tf.constant(matching_indices, dtype=tf.int32)
loss_tensorflow = compute_parameter_loss_tensorflow(predicted_axis_tensorflow, gt_axis_tensorflow, matching_indices_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def create_primitive_from_dict(d):
assert d['type'] == 'cylinder'
location = np.array([d['location_x'], d['location_y'], d['location_z']], dtype=float)
axis = np.array([d['axis_x'], d['axis_y'], d['axis_z']], dtype=float)
radius = float(d['radius'])
return Cylinder(center=location, radius=radius, axis=axis)
def extract_parameter_data_as_dict(primitives, n_max_primitives):
n = np.zeros(dtype=float, shape=[n_max_primitives, 3])
for i, primitive in enumerate(primitives):
if isinstance(primitive, Cylinder):
n[i] = primitive.axis
return {
'cylinder_axis_gt': n
}
def extract_predicted_parameters_as_json(cylinder_center, cylinder_radius_squared, cylinder_axis, k):
cylinder = Cylinder(cylinder_center, np.sqrt(cylinder_radius_squared), cylinder_axis, height=5)
return {
'type': 'cylinder',
'center_x': float(cylinder.center[0]),
'center_y': float(cylinder.center[1]),
'center_z': float(cylinder.center[2]),
'radius': float(cylinder.radius),
'axis_x': float(cylinder.axis[0]),
'axis_y': float(cylinder.axis[1]),
'axis_z': float(cylinder.axis[2]),
'height': float(cylinder.height),
'label': k,
}
| 11,804 | 51.234513 | 166 |
py
|
CPFN
|
CPFN-master/SPFN/losses_implementation.py
|
# Importation of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from scipy.optimize import linear_sum_assignment
from SPFN import plane_fitter, sphere_fitter, cylinder_fitter, cone_fitter
# Segmentation Loss
def hungarian_matching(W_pred, I_gt):
# This non-tf function does not backprob gradient, only output matching indices
# W_pred - BxNxK
# I_gt - BxN, may contain -1's
# Output: matching_indices - BxK, where (b,k)th ground truth primitive is matched with (b, matching_indices[b, k])
# where only n_gt_labels entries on each row have meaning. The matching does not include gt background instance
batch_size, n_points, n_max_labels = W_pred.size()
matching_indices = torch.zeros([batch_size, n_max_labels], dtype=torch.long).to(W_pred.device)
for b in range(batch_size):
# assuming I_gt[b] does not have gap
n_gt_labels = torch.max(I_gt[b]).item() + 1 # this is K'
W_gt = torch.eye(n_gt_labels+1).to(I_gt.device)[I_gt[b]]
dot = torch.mm(W_gt.transpose(0,1), W_pred[b])
denominator = torch.sum(W_gt, dim=0).unsqueeze(1) + torch.sum(W_pred[b], dim=0).unsqueeze(0) - dot
cost = dot / torch.clamp(denominator, min=1e-10, max=None) # K'xK
cost = cost[:n_gt_labels, :] # remove last row, corresponding to matching gt background instance
_, col_ind = linear_sum_assignment(-cost.detach().cpu().numpy()) # want max solution
col_ind = torch.from_numpy(col_ind).long().to(matching_indices.device)
matching_indices[b, :n_gt_labels] = col_ind
return matching_indices
def hungarian_matching_tensorflow(W_pred, I_gt):
# This non-tf function does not backprob gradient, only output matching indices
# W_pred - BxNxK
# I_gt - BxN, may contain -1's
# Output: matching_indices - BxK, where (b,k)th ground truth primitive is matched with (b, matching_indices[b, k])
# where only n_gt_labels entries on each row have meaning. The matching does not include gt background instance
batch_size = I_gt.shape[0]
n_points = I_gt.shape[1]
n_max_labels = W_pred.shape[2]
matching_indices = np.zeros([batch_size, n_max_labels], dtype=np.int32)
for b in range(batch_size):
# assuming I_gt[b] does not have gap
n_gt_labels = np.max(I_gt[b]) + 1 # this is K'
W_gt = np.zeros([n_points, n_gt_labels + 1]) # HACK: add an extra column to contain -1's
W_gt[np.arange(n_points), I_gt[b]] = 1.0 # NxK'
dot = np.sum(np.expand_dims(W_gt, axis=2) * np.expand_dims(W_pred[b], axis=1), axis=0) # K'xK
denominator = np.expand_dims(np.sum(W_gt, axis=0), axis=1) + np.expand_dims(np.sum(W_pred[b], axis=0), axis=0) - dot
cost = dot / np.maximum(denominator, 1e-10) # K'xK
cost = cost[:n_gt_labels, :] # remove last row, corresponding to matching gt background instance
_, col_ind = linear_sum_assignment(-cost) # want max solution
matching_indices[b, :n_gt_labels] = col_ind
return matching_indices
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W_pred = np.random.rand(batch_size, num_points, n_max_instances)
I_gt = np.random.randint(-1, n_max_instances, (batch_size, num_points))
W_pred = W_pred / np.linalg.norm(W_pred, axis=2, keepdims=True)
W_pred_torch = torch.from_numpy(W_pred).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
matching_indices_torch = hungarian_matching(W_pred_torch, I_gt_torch)
matching_indices_torch = matching_indices_torch.detach().cpu().numpy()
print('matching_indices_torch', matching_indices_torch)
# Debugging with Tensorflow
W_pred_tensorflow = tf.constant(W_pred, dtype=tf.float32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
matching_indices_tensorflow = tf.py_func(hungarian_matching_tensorflow, [W_pred_tensorflow, I_gt_tensorflow], Tout=tf.int32)
sess = tf.Session()
matching_indices_tensorflow = sess.run(matching_indices_tensorflow)
print(np.abs(matching_indices_torch - matching_indices_tensorflow).max())
def compute_miou_loss(W, I_gt, matching_indices, div_eps=1e-10):
# W - BxNxK
# I_gt - BxN
batch_size, n_points, n_max_labels = W.size()
_, n_labels = matching_indices.size()
W_reordered = torch.gather(W, 2, matching_indices.unsqueeze(1).expand(batch_size, n_points, n_labels)) # BxNxK
# notice in tf.one_hot, -1 will result in a zero row, which is what we want
W_gt = torch.eye(n_labels+2).to(I_gt.device)[I_gt]
W_gt = W_gt[:,:,:n_labels]
dot = torch.sum(W_gt * W_reordered, axis=1) # BxK
denominator = torch.sum(W_gt, dim=1) + torch.sum(W_reordered, dim=1) - dot
mIoU = dot / (denominator + div_eps) # BxK
return 1.0 - mIoU, 1 - dot / n_points
def batched_gather_tensorflow(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def compute_miou_loss_tensorflow(W, I_gt, matching_indices):
# W - BxNxK
# I_gt - BxN
W_reordered = batched_gather_tensorflow(W, indices=matching_indices, axis=2) # BxNxK
depth = tf.shape(W)[2]
# notice in tf.one_hot, -1 will result in a zero row, which is what we want
W_gt = tf.one_hot(I_gt, depth=depth, dtype=tf.float32) # BxNxK
dot = tf.reduce_sum(W_gt * W_reordered, axis=1) # BxK
denominator = tf.reduce_sum(W_gt, axis=1) + tf.reduce_sum(W_reordered, axis=1) - dot
mIoU = dot / (denominator + 1e-10) # BxK
return 1.0 - mIoU
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
I_gt = np.random.randint(-1, n_max_instances, (batch_size, num_points))
W = W / np.linalg.norm(W, axis=2, keepdims=True)
W_torch = torch.from_numpy(W).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
with torch.no_grad():
matching_indices_torch = hungarian_matching(W_torch, I_gt_torch)
loss_torch, _ = compute_miou_loss(W_torch, I_gt_torch, matching_indices_torch)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
W_tensorflow = tf.constant(W, dtype=tf.float32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
matching_indices_tensorflow = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W_tensorflow, I_gt_tensorflow], Tout=tf.int32))
loss_tensorflow = compute_miou_loss_tensorflow(W_tensorflow, I_gt_tensorflow, matching_indices_tensorflow)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
# Normal Loss
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_normal_loss(normal, normal_gt, angle_diff):
# normal, normal_gt: BxNx3
# Assume normals are unoriented
dot_abs = torch.abs(torch.sum(normal * normal_gt, dim=2)) # BxN
if angle_diff:
return torch.mean(acos_safe(dot_abs), dim=1)
else:
return torch.mean(1.0 - dot_abs, dim=1)
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_normal_loss_tensorflow(normal, normal_gt, angle_diff):
# normal, normal_gt: BxNx3
# Assume normals are unoriented
dot_abs = tf.abs(tf.reduce_sum(normal * normal_gt, axis=2)) # BxN
if angle_diff:
return tf.reduce_mean(acos_safe_tensorflow(dot_abs), axis=1)
else:
return tf.reduce_mean(1.0 - dot_abs, axis=1)
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
normal = np.random.randn(batch_size, num_points, 3)
normal_gt = np.random.randn(batch_size, num_points, 3)
angle_diff = True
normal_torch = torch.from_numpy(normal).float().to(device)
normal_gt_torch = torch.from_numpy(normal_gt).float().to(device)
loss_torch = compute_normal_loss(normal_torch, normal_gt_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
normal_tensorflow = tf.constant(normal, dtype=tf.float32)
normal_gt_tensorflow = tf.constant(normal_gt, dtype=tf.float32)
loss_tensorflow = compute_normal_loss_tensorflow(normal_tensorflow, normal_gt_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
# Type Loss
def compute_per_point_type_loss(per_point_type, I_gt, T_gt, is_eval):
# For training, per_point_type is BxNxQ, where Q = n_registered_primitives
# For test, per_point_type is BxN
# I_gt - BxN, allow -1
# T_gt - BxK
batch_size, n_points = I_gt.size()
per_point_type_gt = torch.gather(T_gt, 1, torch.clamp(I_gt, min=0, max=None))
if is_eval:
type_loss = 1.0 - (per_point_type == per_point_type_gt).float()
else:
type_loss = torch.nn.functional.cross_entropy(per_point_type.contiguous().view(batch_size*n_points, -1), per_point_type_gt.view(batch_size*n_points), reduction='none') # BxN
type_loss = type_loss.view(batch_size, n_points)
# do not add loss to background points in gt
type_loss = torch.where(I_gt == -1, torch.zeros_like(type_loss), type_loss)
return torch.sum(type_loss, dim=1) / (torch.sum((I_gt != -1).float(), dim=1).float()) # B
def compute_per_point_type_loss_tensorflow(per_point_type, I_gt, T_gt, is_eval):
# For training, per_point_type is BxNxQ, where Q = n_registered_primitives
# For test, per_point_type is BxN
# I_gt - BxN, allow -1
# T_gt - BxK
batch_size = tf.shape(I_gt)[0]
n_points = tf.shape(I_gt)[1]
indices_0 = tf.tile(tf.expand_dims(tf.range(batch_size), axis=1), [1, n_points]) # BxN
indices = tf.stack([indices_0, tf.maximum(0, I_gt)], axis=2)
per_point_type_gt = tf.gather_nd(T_gt, indices=indices) # BxN
if is_eval:
type_loss = 1.0 - tf.to_float(tf.equal(per_point_type, per_point_type_gt))
else:
type_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=per_point_type, labels=per_point_type_gt) # BxN
# do not add loss to background points in gt
type_loss = tf.where(tf.equal(I_gt, -1), tf.zeros_like(type_loss), type_loss)
return tf.reduce_sum(type_loss, axis=1) / tf.to_float(tf.count_nonzero(tf.not_equal(I_gt, -1), axis=1)) # B
if __name__ == '__main__':
batch_size = 100
num_points = 1024
Q = 4
K = 10
device = torch.device('cuda:0')
np.random.seed(0)
per_point_type = np.random.randn(batch_size, num_points, Q)
I_gt = np.random.randint(-1, K, (batch_size, num_points))
T_gt = np.random.randint(0, Q, (batch_size, K))
is_eval = False
per_point_type_torch = torch.from_numpy(per_point_type).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
loss_torch = compute_per_point_type_loss(per_point_type_torch, I_gt_torch, T_gt_torch, is_eval)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
per_point_type_tensorflow = tf.constant(per_point_type, dtype=tf.float32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
loss_tensorflow = compute_per_point_type_loss_tensorflow(per_point_type_tensorflow, I_gt_tensorflow, T_gt_tensorflow, is_eval)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def compute_parameters(P, W, X, classes=['plane','sphere','cylinder','cone']):
parameters = {}
for class_ in classes:
if class_ == 'plane':
plane_normal, plane_center = plane_fitter.compute_parameters(P, W)
parameters['plane_normal'] = plane_normal
parameters['plane_center'] = plane_center
elif class_ == 'sphere':
sphere_center, sphere_radius_squared = sphere_fitter.compute_parameters(P, W)
parameters['sphere_center'] = sphere_center
parameters['sphere_radius_squared'] = sphere_radius_squared
elif class_ == 'cylinder':
cylinder_axis, cylinder_center, cylinder_radius_squared = cylinder_fitter.compute_parameters(P, W, X)
parameters['cylinder_axis'] = cylinder_axis
parameters['cylinder_center'] = cylinder_center
parameters['cylinder_radius_squared'] = cylinder_radius_squared
elif class_ == 'cone':
cone_apex, cone_axis, cone_half_angle = cone_fitter.compute_parameters(P, W, X)
parameters['cone_apex'] = cone_apex
parameters['cone_axis'] = cone_axis
parameters['cone_half_angle'] = cone_half_angle
else:
raise NotImplementedError
return parameters
def compute_parameters_tensorflow(P, W, X, classes=['plane','sphere','cylinder','cone']):
parameters = {}
for class_ in classes:
if class_ == 'plane':
plane_normal, plane_center = plane_fitter.compute_parameters_tensorflow(P, W)
parameters['plane_normal'] = plane_normal
parameters['plane_center'] = plane_center
elif class_ == 'sphere':
sphere_center, sphere_radius_squared = sphere_fitter.compute_parameters_tensorflow(P, W)
parameters['sphere_center'] = sphere_center
parameters['sphere_radius_squared'] = sphere_radius_squared
elif class_ == 'cylinder':
cylinder_axis, cylinder_center, cylinder_radius_squared = cylinder_fitter.compute_parameters_tensorflow(P, W, X)
parameters['cylinder_axis'] = cylinder_axis
parameters['cylinder_center'] = cylinder_center
parameters['cylinder_radius_squared'] = cylinder_radius_squared
elif class_ == 'cone':
cone_apex, cone_axis, cone_half_angle = cone_fitter.compute_parameters_tensorflow(P, W, X)
parameters['cone_apex'] = cone_apex
parameters['cone_axis'] = cone_axis
parameters['cone_half_angle'] = cone_half_angle
else:
raise NotImplementedError
return parameters
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
parameters = compute_parameters(P_torch, W_torch, X_torch)
plane_normal_torch, plane_center_torch, sphere_center_torch, sphere_radius_squared_torch, cylinder_axis_torch, cylinder_center_torch, cylinder_radius_square_torch, cone_apex_torch, cone_axis_torch, cone_half_angle_torch = \
parameters['plane_normal'], parameters['plane_center'], parameters['sphere_center'], parameters['sphere_radius_squared'], parameters['cylinder_axis'], parameters['cylinder_center'], parameters['cylinder_radius_square'], parameters['cone_apex'] ,parameters['cone_axis'], parameters['cone_half_angle']
plane_normal_torch = plane_normal_torch.detach().cpu().numpy()
plane_center_torch = plane_center_torch.detach().cpu().numpy()
sphere_center_torch = sphere_center_torch.detach().cpu().numpy()
sphere_radius_squared_torch = sphere_radius_squared_torch.detach().cpu().numpy()
cylinder_axis_torch = cylinder_axis_torch.detach().cpu().numpy()
cylinder_center_torch = cylinder_center_torch.detach().cpu().numpy()
cylinder_radius_square_torch = cylinder_radius_square_torch.detach().cpu().numpy()
cone_apex_torch = cone_apex_torch.detach().cpu().numpy()
cone_axis_torch = cone_axis_torch.detach().cpu().numpy()
cone_half_angle_torch = cone_half_angle_torch.detach().cpu().numpy()
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
parameters = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
sess = tf.Session()
plane_normal_tensorflow, plane_center_tensorflow, sphere_center_tensorflow, sphere_radius_squared_tensorflow, cylinder_axis_tensorflow, cylinder_center_tensorflow, cylinder_radius_square_tensorflow, cone_apex_tensorflow, cone_axis_tensorflow, cone_half_angle_tensorflow = \
sess.run([parameters['plane_normal'], parameters['plane_center'], parameters['sphere_center'], parameters['sphere_radius_squared'], parameters['cylinder_axis'], parameters['cylinder_center'], parameters['cylinder_radius_square'], parameters['cone_apex'] ,parameters['cone_axis'], parameters['cone_half_angle']])
print(np.minimum(np.abs(plane_normal_tensorflow - plane_normal_torch), np.abs(plane_normal_tensorflow + plane_normal_torch)).max())
print(np.minimum(np.abs(plane_center_tensorflow - plane_center_torch), np.abs(plane_center_tensorflow + plane_center_torch)).max())
print(np.abs(sphere_center_tensorflow - sphere_center_torch).max())
print(np.abs(sphere_radius_squared_tensorflow - sphere_radius_squared_torch).max())
print(np.minimum(np.abs(cylinder_axis_tensorflow - cylinder_axis_torch), np.abs(cylinder_axis_tensorflow + cylinder_axis_torch)).max())
print(np.abs(cylinder_center_tensorflow - cylinder_center_torch).max())
print(np.abs(cylinder_radius_square_tensorflow - cylinder_radius_square_torch).max())
print(np.abs(cone_apex_tensorflow - cone_apex_torch).max())
print(np.minimum(np.abs(cone_axis_tensorflow - cone_axis_torch), np.abs(cone_axis_tensorflow + cone_axis_torch)).max())
print(np.abs(cone_half_angle_tensorflow - cone_half_angle_torch).max())
# Residue Loss
def compute_residue_loss(parameters, matching_indices, points_per_instance, T_gt, classes=['plane','sphere','cylinder','cone']):
# parameters is a dictionary where each key represents a different parameter
# points_per_instance of size BxKxN'x3
residue_losses = [] # a length T array of BxK tensors
residue_per_point_array = [] # a length T array of BxKxN' tensors
#residue_per_class = []
batch_size, n_labels = matching_indices.size()
for class_ in classes:
if class_ == 'plane':
residue_per_point = plane_fitter.compute_residue_single(torch.gather(parameters['plane_normal'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['plane_center'], 1, matching_indices).unsqueeze(2),
points_per_instance)
elif class_ == 'sphere':
residue_per_point = sphere_fitter.compute_residue_single(torch.gather(parameters['sphere_center'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['sphere_radius_squared'], 1, matching_indices.expand(batch_size, n_labels)).unsqueeze(2),
points_per_instance)
elif class_ == 'cylinder':
residue_per_point = cylinder_fitter.compute_residue_single(torch.gather(parameters['cylinder_axis'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cylinder_center'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cylinder_radius_squared'], 1, matching_indices.expand(batch_size, n_labels)).unsqueeze(2),
points_per_instance)
elif class_ == 'cone':
residue_per_point = cone_fitter.compute_residue_single(torch.gather(parameters['cone_apex'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cone_axis'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cone_half_angle'], 1, matching_indices.expand(batch_size, n_labels)).unsqueeze(2),
points_per_instance)
else:
raise NotImplementedError
#residue_per_class.append(residue_per_point)
residue_per_point_array.append(residue_per_point)
residue_losses.append(torch.mean(residue_per_point, dim=2))
residue_losses = torch.stack(residue_losses, dim=2)
residue_loss = torch.gather(residue_losses, 2, T_gt.unsqueeze(2)).squeeze(2)
residue_per_point_array = torch.stack(residue_per_point_array, dim=3) # BxKxN'xT
return residue_loss, residue_per_point_array#, residue_per_class
def aggregate_loss_from_stacked_tensorflow(loss_stacked, T_gt):
# loss_stacked - BxKxT, T_gt - BxK
# out[b, k] = loss_stacked[b, k, T_gt[b, k]]
B = tf.shape(loss_stacked)[0]
K = tf.shape(loss_stacked)[1]
indices_0 = tf.tile(tf.expand_dims(tf.range(B), axis=1), multiples=[1, K]) # BxK
indices_1 = tf.tile(tf.expand_dims(tf.range(K), axis=0), multiples=[B, 1]) # BxK
indices = tf.stack([indices_0, indices_1, T_gt], axis=2) # BxKx3
return tf.gather_nd(loss_stacked, indices=indices)
def compute_residue_loss_tensorflow(parameters, matching_indices, points_per_instance, T_gt, classes=['plane','sphere','cylinder','cone']):
residue_losses = [] # a length T array of BxK tensors
residue_per_point_array = [] # a length T array of BxKxN' tensors
#residue_per_class = []
for class_ in classes:
if class_ == 'plane':
residue_per_point = plane_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['plane_normal'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['plane_center'], matching_indices, axis=1), axis=2),
points_per_instance)
elif class_ == 'sphere':
residue_per_point = sphere_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['sphere_center'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['sphere_radius_squared'], matching_indices, axis=1), axis=2),
points_per_instance)
elif class_ == 'cylinder':
residue_per_point = cylinder_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['cylinder_axis'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cylinder_center'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cylinder_radius_squared'], matching_indices, axis=1), axis=2),
points_per_instance)
elif class_ == 'cone':
residue_per_point = cone_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['cone_apex'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cone_axis'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cone_half_angle'], matching_indices, axis=1), axis=2),
points_per_instance)
else:
raise NotImplementedError
#residue_per_class.append(residue_per_point)
residue_per_point_array.append(residue_per_point)
residue_losses.append(tf.reduce_mean(residue_per_point, axis=2))
residue_losses = tf.stack(residue_losses, axis=2)
residue_per_point_array = tf.stack(residue_per_point_array, axis=3) # BxKxN'xT
# Aggregate losses across fitters
residue_loss = aggregate_loss_from_stacked_tensorflow(residue_losses, T_gt) # BxK
return residue_loss, residue_per_point_array#, residue_per_class
if __name__ == '__main__':
batch_size = 100
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
parameters_torch = compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch = hungarian_matching(W_torch, I_gt_torch)
residue_loss_torch, residue_per_point_array_torch, residue_per_class_torch = compute_residue_loss(parameters_torch, matching_indices_torch, points_per_instance_torch, T_gt_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
residue_loss_torch = residue_loss_torch.detach().cpu().numpy()
residue_per_point_array_torch = residue_per_point_array_torch.detach().cpu().numpy()
residue_per_class_torch = [elt.detach().cpu().numpy() for elt in residue_per_class_torch]
print('residue_loss_torch', residue_loss_torch)
print('residue_per_point_array_torch', residue_per_point_array_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
points_per_instance_tensorflow = tf.constant(points_per_instance, dtype=tf.float32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
parameters_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
matching_indices_tensorflow = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W_tensorflow, I_gt_tensorflow], Tout=tf.int32))
residue_loss_tensorflow, residue_per_point_array_tensorflow, residue_per_class_tensorflow = compute_residue_loss_tensorflow(parameters_tensorflow, matching_indices_tensorflow, points_per_instance_tensorflow, T_gt_tensorflow, classes=['plane', 'sphere', 'cylinder', 'cone'])
sess = tf.Session()
residue_loss_tensorflow, residue_per_point_array_tensorflow, residue_per_class_tensorflow = sess.run([residue_loss_tensorflow, residue_per_point_array_tensorflow, residue_per_class_tensorflow])
print(np.abs(residue_loss_tensorflow - residue_loss_torch).max())
print(np.abs(residue_per_point_array_tensorflow - residue_per_point_array_torch).max())
for i, class_ in enumerate(['plane', 'sphere', 'cylinder', 'cone']):
print(class_, np.abs(residue_per_class_tensorflow[i] - residue_per_class_torch[i]).max())
def compute_parameter_loss(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval=False, classes=['plane','sphere','cylinder','cone']):
parameter_losses = [] # a length T array of BxK tensors
batch_size, n_max_instances = predicted_parameters[list(predicted_parameters.keys())[0]].size()[0:2]
for class_ in classes:
if class_ == 'plane':
parameter_loss = plane_fitter.compute_parameter_loss(predicted_parameters['plane_normal'], gt_parameters['plane_normal'], matching_indices, angle_diff=is_eval)
elif class_ == 'sphere':
parameter_loss = torch.zeros([batch_size, n_max_instances], dtype=torch.float).to(T_gt.device)
elif class_ == 'cylinder':
parameter_loss = cylinder_fitter.compute_parameter_loss(predicted_parameters['cylinder_axis'], gt_parameters['cylinder_axis'], matching_indices, angle_diff=is_eval)
elif class_ == 'cone':
parameter_loss = cone_fitter.compute_parameter_loss(predicted_parameters['cone_axis'], gt_parameters['cone_axis'], matching_indices, angle_diff=is_eval)
else:
raise NotImplementedError
parameter_losses.append(parameter_loss)
parameter_losses = torch.stack(parameter_losses, dim=2)
parameter_loss = torch.gather(parameter_losses, 2, T_gt.unsqueeze(2)).squeeze(2) # BxK
return parameter_loss
def compute_parameter_loss_tensorflow(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval=False, classes=['plane','sphere','cylinder','cone']):
parameter_losses = [] # a length T array of BxK tensors
for class_ in classes:
if class_ == 'plane':
parameter_loss = plane_fitter.compute_parameter_loss_tensorflow(predicted_parameters['plane_normal'], gt_parameters['plane_normal'], matching_indices, angle_diff=is_eval)
elif class_ == 'sphere':
parameter_loss = tf.zeros(dtype=tf.float32, shape=[batch_size, n_max_instances])
elif class_ == 'cylinder':
parameter_loss = cylinder_fitter.compute_parameter_loss_tensorflow(predicted_parameters['cylinder_axis'], gt_parameters['cylinder_axis'], matching_indices, angle_diff=is_eval)
elif class_ == 'cone':
parameter_loss = cone_fitter.compute_parameter_loss_tensorflow(predicted_parameters['cone_axis'], gt_parameters['cone_axis'], matching_indices, angle_diff=is_eval)
else:
raise NotImplementedError
parameter_losses.append(parameter_loss)
parameter_losses = tf.stack(parameter_losses, axis=2)
parameter_loss = aggregate_loss_from_stacked_tensorflow(parameter_losses, T_gt) # BxK
return parameter_loss
if __name__ == '__main__':
batch_size = 100
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
predicted_parameters_torch = compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch = hungarian_matching(W_torch, I_gt_torch)
parameter_loss_torch = compute_parameter_loss(predicted_parameters_torch, gt_parameters_torch, matching_indices_torch, T_gt_torch, is_eval=False, classes=['plane','sphere','cylinder','cone'])
parameter_loss_torch = parameter_loss_torch.detach().cpu().numpy()
print('parameter_loss_torch', parameter_loss_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
gt_parameters_tensorflow = {'plane_normal': tf.constant(gt_parameters['plane_normal'], dtype=tf.float32),
'plane_center': tf.constant(gt_parameters['plane_center'], dtype=tf.float32),
'sphere_center': tf.constant(gt_parameters['sphere_center'], dtype=tf.float32),
'sphere_radius_squared': tf.constant(gt_parameters['sphere_radius_squared'], dtype=tf.float32),
'cylinder_axis': tf.constant(gt_parameters['cylinder_axis'], dtype=tf.float32),
'cylinder_center': tf.constant(gt_parameters['cylinder_center'], dtype=tf.float32),
'cylinder_radius_square': tf.constant(gt_parameters['cylinder_radius_square'], dtype=tf.float32),
'cone_apex': tf.constant(gt_parameters['cone_apex'], dtype=tf.float32),
'cone_axis': tf.constant(gt_parameters['cone_axis'], dtype=tf.float32),
'cone_half_angle': tf.constant(gt_parameters['cone_half_angle'], dtype=tf.float32)}
predicted_parameters_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
matching_indices_tensorflow = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W_tensorflow, I_gt_tensorflow], Tout=tf.int32))
parameter_loss_tensorflow = compute_parameter_loss_tensorflow(predicted_parameters_tensorflow, gt_parameters_tensorflow, matching_indices_tensorflow, T_gt_tensorflow, is_eval=False, classes=['plane', 'sphere', 'cylinder', 'cone'])
sess = tf.Session()
parameter_loss_tensorflow = sess.run(parameter_loss_tensorflow)
print(np.abs(parameter_loss_tensorflow - parameter_loss_torch).max())
def sequence_mask(lengths, maxlen=None):
if maxlen is None:
maxlen = lengths.max()
row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
matrix = lengths.unsqueeze(dim=-1)
mask = row_vector < matrix
return mask
def get_mask_gt(I_gt, n_max_instances):
n_instances_gt = torch.max(I_gt, dim=1)[0] + 1 # only count known primitive type instances, as -1 will be ignored
mask_gt = sequence_mask(n_instances_gt, maxlen=n_max_instances)
return mask_gt
def get_mask_gt_tensorflow(I_gt, n_max_instances):
n_instances_gt = tf.reduce_max(I_gt, axis=1) + 1 # only count known primitive type instances, as -1 will be ignored
mask_gt = tf.sequence_mask(n_instances_gt, maxlen=n_max_instances)
return mask_gt
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
I_gt1 = np.random.randint(0, n_max_instances, (batch_size-batch_size//2, num_points))
I_gt2 = np.random.randint(0, n_max_instances//2, (batch_size//2, num_points))
I_gt = np.concatenate((I_gt1, I_gt2), axis=0)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
mask_gt_torch = get_mask_gt(I_gt_torch, n_max_instances)
mask_gt_torch = mask_gt_torch.detach().cpu().numpy()
print('mask_gt_torch', mask_gt_torch)
# Debugging with Tensorflow
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
mask_gt_tensorflow = get_mask_gt_tensorflow(I_gt_tensorflow, n_max_instances)
sess = tf.Session()
mask_gt_tensorflow = sess.run(mask_gt_tensorflow)
print(np.all(mask_gt_torch == mask_gt_tensorflow))
def reduce_mean_masked_instance(loss, mask_gt):
# loss: BxK
loss = torch.where(mask_gt, loss, torch.zeros_like(loss))
reduced_loss = torch.sum(loss, axis=1) # B
denom = torch.sum(mask_gt.float(), dim=1) # B
return torch.where(denom > 0, reduced_loss / denom, torch.zeros_like(reduced_loss)) # B
def collect_losses(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier):
total_loss = 0
# Normal Loss
normal_loss_per_data = normal_loss
total_normal_loss = torch.mean(normal_loss_per_data)
if normal_loss_multiplier > 0:
total_loss = total_loss + normal_loss_multiplier * total_normal_loss
# Total loss
type_loss_per_data = type_loss
total_type_loss = torch.mean(type_loss_per_data)
if type_loss_multiplier > 0:
total_loss = total_loss + type_loss_multiplier * total_type_loss
# mIoU Loss
miou_loss_per_data = avg_miou_loss
miou_loss_per_instance = miou_loss
total_miou_loss = torch.mean(miou_loss_per_data)
if miou_loss_multiplier > 0:
total_loss = total_loss + miou_loss_multiplier * total_miou_loss
# Residue Loss
residue_loss_per_data = avg_residue_loss
residue_loss_per_instance = residue_loss
total_residue_loss = torch.mean(residue_loss_per_data)
if residue_loss_multiplier > 0:
total_loss = total_loss + residue_loss_multiplier * total_residue_loss
# Paramerer Loss
parameter_loss_per_data = avg_parameter_loss
parameter_loss_per_instance = parameter_loss
total_parameter_loss = torch.mean(parameter_loss_per_data)
if parameter_loss_multiplier > 0:
total_loss = total_loss + parameter_loss_multiplier * total_parameter_loss
total_loss = total_loss * total_loss_multiplier
return total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss
def compute_all_losses(P, W, I_gt, X, X_gt, T, T_gt, gt_parameters, points_per_instance,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier, total_loss_multiplier, is_eval,
mode_seg='mIoU', classes=['plane','sphere','cylinder','cone']):
assert(mode_seg in ['mIoU', 'intersection'])
batch_size, _, n_max_instances = W.size()
matching_indices = hungarian_matching(W, I_gt)
if (residue_loss_multiplier>0) or (parameter_loss_multiplier>0):
predicted_parameters = compute_parameters(P, W, X)
mask_gt = get_mask_gt(I_gt, n_max_instances)
if normal_loss_multiplier>0:
normal_loss = compute_normal_loss(X, X_gt, angle_diff=is_eval)
else:
normal_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
if type_loss_multiplier>0:
type_loss = compute_per_point_type_loss(T, I_gt, T_gt, is_eval)
else:
type_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
if (mode_seg == 'mIoU') and (miou_loss_multiplier>0):
miou_loss, _ = compute_miou_loss(W, I_gt, matching_indices)
avg_miou_loss = reduce_mean_masked_instance(miou_loss, mask_gt)
elif (mode_seg == 'intersection') and (miou_loss_multiplier>0):
_, miou_loss = compute_miou_loss(W, I_gt, matching_indices)
avg_miou_loss = reduce_mean_masked_instance(miou_loss, mask_gt)
else:
miou_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
avg_miou_loss = torch.zeros([batch_size]).to(P.device)
if residue_loss_multiplier>0:
residue_loss, residue_per_point_array = compute_residue_loss(predicted_parameters, matching_indices, points_per_instance, T_gt, classes=classes)
avg_residue_loss = reduce_mean_masked_instance(residue_loss, mask_gt)
else:
residue_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
avg_residue_loss = torch.zeros([batch_size]).to(P.device)
if parameter_loss_multiplier>0:
parameter_loss = compute_parameter_loss(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval, classes=classes)
avg_parameter_loss = reduce_mean_masked_instance(parameter_loss, mask_gt)
else:
parameter_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
avg_parameter_loss = torch.zeros([batch_size]).to(P.device)
total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss = \
collect_losses(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier)
if (residue_loss_multiplier > 0) or (parameter_loss_multiplier > 0):
return total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss, predicted_parameters['plane_normal'], predicted_parameters['cylinder_axis'], predicted_parameters['cone_axis']
else:
return total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss, None, None, None
def reduce_mean_masked_instance_tensorflow(loss, mask_gt):
# loss: BxK
loss = tf.where(mask_gt, loss, tf.zeros_like(loss))
reduced_loss = tf.reduce_sum(loss, axis=1) # B
denom = tf.reduce_sum(tf.to_float(mask_gt), axis=1) # B
return tf.where(denom > 0, reduced_loss / denom, tf.zeros_like(reduced_loss)) # B
def collect_losses_tensorflow(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier):
total_loss = tf.zeros(shape=[], dtype=tf.float32)
normal_loss_per_data = normal_loss
total_normal_loss = tf.reduce_mean(normal_loss_per_data)
if normal_loss_multiplier > 0:
total_loss += normal_loss_multiplier * total_normal_loss
type_loss_per_data = type_loss
total_type_loss = tf.reduce_mean(type_loss_per_data)
if type_loss_multiplier > 0:
total_loss += type_loss_multiplier * total_type_loss
miou_loss_per_data = avg_miou_loss
miou_loss_per_instance = miou_loss
total_miou_loss = tf.reduce_mean(miou_loss_per_data)
if miou_loss_multiplier > 0:
total_loss += miou_loss_multiplier * total_miou_loss
residue_loss_per_data = avg_residue_loss
residue_loss_per_instance = residue_loss
total_residue_loss = tf.reduce_mean(residue_loss_per_data)
if residue_loss_multiplier > 0:
total_loss += residue_loss_multiplier * total_residue_loss
parameter_loss_per_data = avg_parameter_loss
parameter_loss_per_instance = parameter_loss
total_parameter_loss = tf.reduce_mean(parameter_loss_per_data)
if parameter_loss_multiplier > 0:
total_loss += parameter_loss_multiplier * total_parameter_loss
total_loss *= total_loss_multiplier
return total_loss
def compute_all_losses_tensorflow(P, W, I_gt, X, X_gt, T, T_gt, gt_parameters, points_per_instance,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier, total_loss_multiplier, is_eval,
classes=['plane','sphere','cylinder','cone']):
b_max_instances = W.shape[2]
matching_indices = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W, I_gt], Tout=tf.int32))
predicted_parameters = compute_parameters_tensorflow(P, W, X)
mask_gt = get_mask_gt_tensorflow(I_gt, n_max_instances)
normal_loss = compute_normal_loss_tensorflow(X, X_gt, angle_diff=is_eval)
type_loss = compute_per_point_type_loss_tensorflow(T, I_gt, T_gt, is_eval)
miou_loss = compute_miou_loss_tensorflow(W, I_gt, matching_indices)
avg_miou_loss = reduce_mean_masked_instance_tensorflow(miou_loss, mask_gt)
residue_loss, residue_per_point_array = compute_residue_loss_tensorflow(predicted_parameters, matching_indices, points_per_instance, T_gt, classes=classes)
avg_residue_loss = reduce_mean_masked_instance_tensorflow(residue_loss, mask_gt)
parameter_loss = compute_parameter_loss_tensorflow(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval, classes=classes)
avg_parameter_loss = reduce_mean_masked_instance_tensorflow(parameter_loss, mask_gt)
total_loss = collect_losses_tensorflow(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier)
return total_loss
if __name__ == '__main__':
batch_size = 100
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
X_gt = np.random.randn(batch_size, num_points, 3)
X_gt = X_gt / np.linalg.norm(X_gt, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
normal_loss_multiplier = 1.0
type_loss_multiplier = 1.0
miou_loss_multiplier = 1.0
residue_loss_multiplier = 1.0
parameter_loss_multiplier = 1.0
total_loss_multiplier = 1.0
is_eval = False
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
X_gt_torch = torch.from_numpy(X_gt).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).long().to(device)
total_loss_torch = compute_all_losses(P_torch, W_torch, I_gt_torch, X_torch, X_gt_torch, T_torch, T_gt_torch, gt_parameters_torch, points_per_instance_torch,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier,
total_loss_multiplier, is_eval)[0]
total_loss_torch = total_loss_torch.detach().cpu().numpy()
print('total_loss_torch', total_loss_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
X_gt_tensorflow = tf.constant(X_gt, dtype=tf.float32)
T_tensorflow = tf.constant(T, dtype=tf.float32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
gt_parameters_tensorflow = {'plane_normal': tf.constant(gt_parameters['plane_normal'], dtype=tf.float32),
'plane_center': tf.constant(gt_parameters['plane_center'], dtype=tf.float32),
'sphere_center': tf.constant(gt_parameters['sphere_center'], dtype=tf.float32),
'sphere_radius_squared': tf.constant(gt_parameters['sphere_radius_squared'], dtype=tf.float32),
'cylinder_axis': tf.constant(gt_parameters['cylinder_axis'], dtype=tf.float32),
'cylinder_center': tf.constant(gt_parameters['cylinder_center'], dtype=tf.float32),
'cylinder_radius_square': tf.constant(gt_parameters['cylinder_radius_square'], dtype=tf.float32),
'cone_apex': tf.constant(gt_parameters['cone_apex'], dtype=tf.float32),
'cone_axis': tf.constant(gt_parameters['cone_axis'], dtype=tf.float32),
'cone_half_angle': tf.constant(gt_parameters['cone_half_angle'], dtype=tf.float32)}
points_per_instance_tensorflow = tf.constant(points_per_instance, dtype=tf.float32)
total_loss_tensorflow = compute_all_losses_tensorflow(P_tensorflow, W_tensorflow, I_gt_tensorflow, X_tensorflow, X_gt_tensorflow, T_tensorflow, T_gt_tensorflow, gt_parameters_tensorflow, points_per_instance_tensorflow,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier,
total_loss_multiplier, is_eval)
sess = tf.Session()
total_loss_tensorflow = sess.run(total_loss_tensorflow)
print(np.abs(total_loss_tensorflow - total_loss_torch).max())
| 55,650 | 62.819954 | 319 |
py
|
CPFN
|
CPFN-master/SPFN/cone_fitter.py
|
# Importation of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Cone
from SPFN.geometry_utils import guarded_matrix_solve_ls, guarded_matrix_solve_ls_tensorflow, weighted_plane_fitting, weighted_plane_fitting_tensorflow
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_parameters(P, W, X, div_eps=1e-10):
batch_size, n_points, _ = P.size()
_, _, n_max_instances = W.size()
W_reshaped = W.transpose(1,2).contiguous().view(batch_size * n_max_instances, n_points) # BKxN
# A - BKxNx3
A = X.unsqueeze(1).expand(batch_size, n_max_instances, n_points, 3).contiguous().view(batch_size * n_max_instances, n_points, 3)
# b - BKxNx1
b = torch.sum(P * X, dim=2).unsqueeze(1).expand(batch_size, n_max_instances, n_points).contiguous().view(batch_size * n_max_instances, n_points, 1)
apex = guarded_matrix_solve_ls(A, b, W_reshaped).view(batch_size, n_max_instances, 3) # BxKx3
X_tiled = A
# TODO: use P-apex instead of X for plane fitting
plane_n, plane_c = weighted_plane_fitting(X_tiled, W_reshaped)
axis = plane_n.view(batch_size, n_max_instances, 3) # BxKx3
P_minus_apex = P.unsqueeze(2) - apex.unsqueeze(1) # BxNxKx3
P_minus_apex_normalized = torch.nn.functional.normalize(P_minus_apex, p=2, dim=3, eps=1e-12)
P_minus_apex_normalized_dot_axis = torch.sum(axis.unsqueeze(1) * P_minus_apex_normalized, dim=3) # BxNxK
# flip direction of axis if wrong
sgn_axis = torch.sign(torch.sum(W * P_minus_apex_normalized_dot_axis, dim=1)) # BxK
sgn_axis = sgn_axis + (sgn_axis==0.0).float() # prevent sgn == 0
axis = axis * sgn_axis.unsqueeze(2) # BxKx3
tmp = W * acos_safe(torch.abs(P_minus_apex_normalized_dot_axis)) # BxNxK
W_sum = torch.sum(W, dim=1) # BxK
half_angle = torch.sum(tmp, dim=1) / (W_sum + div_eps) # BxK
half_angle = torch.clamp(half_angle, min=1e-3, max=np.pi/2-1e-3) # angle cannot be too weird
return apex, axis, half_angle
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_parameters_tensorflow(P, W, X):
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_instances = W.get_shape()[2]
W_reshaped = tf.reshape(tf.transpose(W, [0, 2, 1]), [batch_size * n_max_instances, n_points]) # BKxN
# A - BKxNx3
A = tf.reshape(tf.tile(tf.expand_dims(X, axis=1), [1, n_max_instances, 1, 1]), [batch_size * n_max_instances, n_points, 3]) # BKxNx3
# b - BKxNx1
b = tf.expand_dims(tf.reshape(tf.tile(tf.expand_dims(tf.reduce_sum(P * X, axis=2), axis=1), [1, n_max_instances, 1]), [batch_size * n_max_instances, n_points]), axis=2)
apex = tf.reshape(guarded_matrix_solve_ls_tensorflow(A, b, W_reshaped), [batch_size, n_max_instances, 3]) # BxKx3
X_tiled = A
# TODO: use P-apex instead of X for plane fitting
plane_n, plane_c = weighted_plane_fitting_tensorflow(X_tiled, W_reshaped)
axis = tf.reshape(plane_n, [batch_size, n_max_instances, 3]) # BxKx3
P_minus_apex_normalized = tf.nn.l2_normalize(tf.expand_dims(P, axis=2) - tf.expand_dims(apex, 1), axis=3) # BxNxKx3
P_minus_apex_normalized_dot_axis = tf.reduce_sum(tf.expand_dims(axis, axis=1) * P_minus_apex_normalized, axis=3) # BxNxK
# flip direction of axis if wrong
sgn_axis = tf.sign(tf.reduce_sum(W * P_minus_apex_normalized_dot_axis, axis=1)) # BxK
sgn_axis += tf.to_float(tf.equal(sgn_axis, 0.0)) # prevent sgn == 0
axis *= tf.expand_dims(sgn_axis, axis=2) # BxKx3
tmp = W * acos_safe_tensorflow(tf.abs(P_minus_apex_normalized_dot_axis)) # BxNxK
W_sum = tf.reduce_sum(W, axis=1) # BxK
half_angle = tf.reduce_sum(tmp, axis=1) / W_sum # BxK
tf.clip_by_value(half_angle, 1e-3, np.pi / 2 - 1e-3) # angle cannot be too weird
return apex, axis, half_angle
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
apex_torch, axis_torch, half_angle_torch = compute_parameters(P_torch, W_torch, X_torch)
apex_torch = apex_torch.detach().cpu().numpy()
axis_torch = axis_torch.detach().cpu().numpy()
half_angle_torch = half_angle_torch.detach().cpu().numpy()
print('apex_torch', apex_torch)
print('axis_torch', axis_torch)
print('half_angle_torch', half_angle_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
apex_tensorflow, axis_tensorflow, half_angle_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
sess = tf.Session()
apex_tensorflow, axis_tensorflow, half_angle_tensorflow = sess.run([apex_tensorflow, axis_tensorflow, half_angle_tensorflow])
print(np.abs(apex_tensorflow - apex_torch).max())
print(np.minimum(np.abs(axis_tensorflow - axis_torch), np.abs(axis_tensorflow + axis_torch)).max())
print(np.abs(half_angle_tensorflow - half_angle_torch).max())
def compute_residue_single(apex, axis, half_angle, p):
# apex: ...x3, axis: ...x3, half_angle: ..., p: ...x30
v = p - apex
v_normalized = torch.nn.functional.normalize(v, p=2, dim=-1, eps=1e-12)
alpha = acos_safe(torch.sum(v_normalized * axis, dim=-1))
return (torch.sin(torch.clamp(torch.abs(alpha - half_angle), min=None, max=np.pi / 2)))**2 * torch.sum(v * v, dim=-1)
def compute_residue_single_tensorflow(apex, axis, half_angle, p):
# apex: ...x3, axis: ...x3, half_angle: ..., p: ...x3
v = p - apex
v_normalized = tf.nn.l2_normalize(v, axis=-1)
alpha = acos_safe_tensorflow(tf.reduce_sum(v_normalized * axis, axis=-1))
return tf.square(tf.sin(tf.minimum(tf.abs(alpha - half_angle), np.pi / 2))) * tf.reduce_sum(v * v, axis=-1)
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
apex = np.random.randn(batch_size, num_points, 3)
axis = np.random.randn(batch_size, num_points, 3)
half_angle = np.random.randn(batch_size, num_points)
p = np.random.randn(batch_size, num_points, 3)
apex_torch = torch.from_numpy(apex).float().to(device)
axis_torch = torch.from_numpy(axis).float().to(device)
half_angle_torch = torch.from_numpy(half_angle).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
loss_torch = compute_residue_single(apex_torch, axis_torch, half_angle_torch, p_torch)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
apex_tensorflow = tf.constant(apex, dtype=tf.float32)
axis_tensorflow = tf.constant(axis, dtype=tf.float32)
half_angle_tensorflow = tf.constant(half_angle, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
loss_tensorflow = compute_residue_single_tensorflow(apex_tensorflow, axis_tensorflow, half_angle_tensorflow, p_tensorflow)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def compute_parameter_loss(predicted_axis, gt_axis, matching_indices, angle_diff):
# predicted_axis: BxK1x3
# gt_axis: BXK2x3
# matching indices: BxK2
batch_size, nb_primitives, _ = gt_axis.size()
predicted_axis = torch.gather(predicted_axis, 1, matching_indices.unsqueeze(2).expand(batch_size, nb_primitives, 3))
dot_abs = torch.abs(torch.sum(predicted_axis * gt_axis, axis=2))
if angle_diff:
return acos_safe(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
def batched_gather(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def compute_parameter_loss_tensorflow(predicted_axis, gt_axis, matching_indices, angle_diff):
axis = batched_gather(predicted_axis, matching_indices, axis=1)
dot_abs = tf.abs(tf.reduce_sum(axis * gt_axis, axis=2))
if angle_diff:
return acos_safe_tensorflow(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
if __name__ == '__main__':
batch_size = 100
num_primitives1 = 15
num_primitives2 = 5
device = torch.device('cuda:0')
np.random.seed(0)
predicted_axis = np.random.randn(batch_size, num_primitives1, 3)
gt_axis = np.random.randn(batch_size, num_primitives2, 3)
matching_indices = np.random.randint(0, 15, (batch_size, num_primitives2))
angle_diff = True
predicted_axis_torch = torch.from_numpy(predicted_axis).float().to(device)
gt_axis_torch = torch.from_numpy(gt_axis).float().to(device)
matching_indices_torch = torch.from_numpy(matching_indices).long().to(device)
loss_torch = compute_parameter_loss(predicted_axis_torch, gt_axis_torch, matching_indices_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
predicted_axis_tensorflow = tf.constant(predicted_axis, dtype=tf.float32)
gt_axis_tensorflow = tf.constant(gt_axis, dtype=tf.float32)
matching_indices_tensorflow = tf.constant(matching_indices, dtype=tf.int32)
loss_tensorflow = compute_parameter_loss_tensorflow(predicted_axis_tensorflow, gt_axis_tensorflow, matching_indices_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def create_primitive_from_dict(d):
assert d['type'] == 'cone'
apex = np.array([d['apex_x'], d['apex_y'], d['apex_z']], dtype=float)
axis = np.array([d['axis_x'], d['axis_y'], d['axis_z']], dtype=float)
half_angle = float(d['semi_angle'])
return Cone(apex=apex, axis=axis, half_angle=half_angle)
def extract_parameter_data_as_dict(primitives, n_max_instances):
axis_gt = np.zeros(dtype=float, shape=[n_max_instances, 3])
apex_gt = np.zeros(dtype=float, shape=[n_max_instances, 3])
half_angle_gt = np.zeros(dtype=float, shape=[n_max_instances])
for i, primitive in enumerate(primitives):
if isinstance(primitive, Cone):
axis_gt[i] = primitive.axis
apex_gt[i] = primitive.apex
half_angle_gt[i] = primitive.half_angle
return {
'cone_axis_gt': axis_gt,
}
def extract_predicted_parameters_as_json(cone_apex, cone_axis, cone_half_angle, k):
cone = Cone(cone_apex, cone_axis, cone_half_angle, z_min=0.0, z_max=5.0)
return {
'type': 'cone',
'apex_x': float(cone.apex[0]),
'apex_y': float(cone.apex[1]),
'apex_z': float(cone.apex[2]),
'axis_x': float(cone.axis[0]),
'axis_y': float(cone.axis[1]),
'axis_z': float(cone.axis[2]),
'angle': float(cone.half_angle * 2),
'z_min': float(cone.z_min),
'z_max': float(cone.z_max),
'label': k,
}
| 12,047 | 49.835443 | 172 |
py
|
CPFN
|
CPFN-master/Utils/dataset_utils.py
|
# Importation of packages
import os
import re
import h5py
import pickle
import numpy as np
from SPFN import cone_fitter, cylinder_fitter, fitter_factory, plane_fitter, sphere_fitter
def create_unit_data_from_hdf5_patch_selection(h5file_lowres, h5file_highres, normalisation, scale, n_points=None):
with h5py.File(h5file_lowres, 'r') as f:
points = f['noisy_points'][()].astype(np.float32)
if n_points is not None:
points = points[:n_points]
else:
n_points, _ = points.shape
labels = f['gt_labels'][()].astype(np.int64)[:n_points]
with h5py.File(h5file_highres, 'r') as f:
highres_labels = f['gt_labels'][()].astype(np.int64)
highres_npoints = highres_labels.shape[0]
unique_labels, unique_counts = np.unique(highres_labels, return_counts=True)
unique_labels = unique_labels[unique_counts>highres_npoints*scale]
output_labels = 1 - np.isin(labels, unique_labels).astype(np.int64)
highres_output_labels = 1 - np.isin(highres_labels, unique_labels).astype(np.int64)
if normalisation:
points = (points - np.mean(points, axis=0))
points = points / np.linalg.norm(points, axis=1).max()
shuffled_indices = np.random.choice(n_points, n_points, replace=False)
points = points[shuffled_indices]
output_labels = output_labels[shuffled_indices]
shuffled_indices = np.argsort(shuffled_indices)
return points, output_labels, shuffled_indices
def create_unit_data_from_hdf5_spfn(f, n_max_instances, noisy, n_points=None, use_glob_features=False, use_loc_features=False, fixed_order=False, shuffle=True):
# Loading Point Features
P = f['noisy_points'][()] if noisy else f['gt_points'][()] # Nx3
normal_gt = f['gt_normals'][()]
I_gt = f['gt_labels'][()]
# Loading the SPFN global and local features
if use_glob_features:
glob_features = f['glob_features'][()]
if use_loc_features:
loc_features = f['loc_features'][()]
# Reducing the number of points if needed
if n_points is not None:
P = P[:n_points]
normal_gt = normal_gt[:n_points]
I_gt = I_gt[:n_points]
n_total_points = P.shape[0]
# Checking if soup_ids are consecutive
found_soup_ids = []
soup_id_to_key = {}
soup_prog = re.compile('(.*)_soup_([0-9]+)$')
for key in list(f.keys()):
m = soup_prog.match(key)
if m is not None:
soup_id = int(m.group(2))
found_soup_ids.append(soup_id)
soup_id_to_key[soup_id] = key
found_soup_ids.sort()
n_instances = len(found_soup_ids)
if n_instances == 0:
return None
for i in range(n_instances):
if i not in found_soup_ids:
print('{} is not found in soup ids!'.format(i))
return None
# Adding Primitive Information
P_gt = []
instances = []
for i in range(n_instances):
g = f[soup_id_to_key[i]]
P_gt_cur = g['gt_points'][()]
P_gt.append(P_gt_cur)
if type(g.attrs['meta']) == np.void:
meta = pickle.loads(g.attrs['meta'])
else:
meta = eval(g.attrs['meta'])
primitive = fitter_factory.create_primitive_from_dict(meta)
if primitive is None:
return None
instances.append(primitive)
if n_instances > n_max_instances:
print('n_instances {} > n_max_instances {}'.format(n_instances, n_max_instances))
return None
if np.amax(I_gt) >= n_instances:
print('max label {} > n_instances {}'.format(np.amax(I_gt), n_instances))
return None
T_gt = [fitter_factory.primitive_name_to_id(primitive.get_primitive_name()) for primitive in instances]
T_gt.extend([0 for _ in range(n_max_instances - n_instances)])
n_gt_points_per_instance = P_gt[0].shape[0]
P_gt.extend([np.zeros(dtype=float, shape=[n_gt_points_per_instance, 3]) for _ in range(n_max_instances - n_instances)])
# Converting everything to numpy array
P_gt = np.array(P_gt)
T_gt = np.array(T_gt)
if shuffle and (not fixed_order):
# shuffle per point information around
perm = np.random.permutation(n_total_points)
P = P[perm]
normal_gt = normal_gt[perm]
I_gt = I_gt[perm]
result = {
'P': P,
'normal_gt': normal_gt,
'P_gt': P_gt,
'I_gt': I_gt,
'T_gt': T_gt,
}
if use_glob_features: result['glob_features'] = glob_features
if use_loc_features: result['loc_features'] = loc_features
# Adding in primitive parameters
for class_ in fitter_factory.primitive_name_to_id_dict.keys():
if class_ == 'plane':
result.update(plane_fitter.extract_parameter_data_as_dict(instances, n_max_instances))
elif class_ == 'sphere':
result.update(sphere_fitter.extract_parameter_data_as_dict(instances, n_max_instances))
elif class_ == 'cylinder':
result.update(cylinder_fitter.extract_parameter_data_as_dict(instances, n_max_instances))
elif class_ == 'cone':
result.update(cone_fitter.extract_parameter_data_as_dict(instances, n_max_instances))
else:
raise NotImplementedError
return result
| 5,228 | 41.512195 | 160 |
py
|
CPFN
|
CPFN-master/Utils/training_utils.py
|
# Importation of packages
import sys
import torch
import numpy as np
from SPFN import losses_implementation
# BN Decay
def get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True):
BN_INIT_DECAY = 0.5
BN_DECAY_RATE = 0.5
BN_DECAY_CLIP = 0.99
p = global_step * batch_size / bn_decay_step
if staircase:
p = int(np.floor(p))
bn_momentum = max(BN_INIT_DECAY * (BN_DECAY_RATE ** p), 1-BN_DECAY_CLIP)
return bn_momentum
def update_momentum(module, bn_momentum):
for name, module_ in module.named_modules():
if 'bn' in name:
module_.momentum = bn_momentum
# LR Decay
def get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True):
p = global_step * batch_size / decay_step
if staircase:
p = int(np.floor(p))
learning_rate = init_learning_rate * (decay_rate ** p)
return learning_rate
# Train For One Epoch
def patch_selection_train_val_epoch(dataloader, patchselec_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train'):
assert(network_mode in ['train', 'val'])
# Loading conf information related to current file
batch_size = conf.get_batch_size()
bn_decay_step = conf.get_bn_decay_step()
decay_step = conf.get_decay_step()
decay_rate = conf.get_decay_rate()
init_learning_rate = conf.get_init_learning_rate()
# Iteration over the dataset
old_bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
old_learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
total_loss = 0
if network_mode == 'train':
patchselec_module.train()
elif network_mode == 'val':
patchselec_module.eval()
patchselec_module.train()
for batch_id, data in enumerate(dataloader, 0):
optimizer.zero_grad()
# Updating the BN decay
bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
if old_bn_momentum != bn_momentum:
update_momentum(patchselec_module, bn_momentum)
old_bn_momentum = bn_momentum
# Updating the LR decay
learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
if old_learning_rate != learning_rate:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
old_learning_rate = learning_rate
# Proper training
points = data[0].type(torch.FloatTensor).to(device)
batch_size_current, num_points, _ = points.size()
output_labels = data[1].type(torch.LongTensor).to(device)
predicted_labels = patchselec_module(points)[0]
predicted_labels = predicted_labels.contiguous().view(batch_size_current * num_points, 2)
output_labels = output_labels.view(batch_size_current * num_points)
loss = torch.nn.functional.cross_entropy(predicted_labels, output_labels)
total_loss += batch_size_current * loss.item()
# Printing Values
if batch_id%100==0: print('[%s][Epoch %d - Iteration %d] Loss: %f' % (network_mode, epoch, batch_id, loss.item()))
if network_mode == 'train':
# Backward Pass
loss.backward()
optimizer.step()
global_step += 1
# Updating the visualiser
visualiser.log_loss(loss.item(), '%s_loss' % network_mode)
visualiser.update()
return global_step, total_loss
def spfn_train_val_epoch(dataloader, spfn_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train'):
assert(network_mode in ['train', 'val'])
# Loading conf information related to current file
batch_size = conf.get_batch_size()
bn_decay_step = conf.get_bn_decay_step()
decay_step = conf.get_decay_step()
decay_rate = conf.get_decay_rate()
init_learning_rate = conf.get_init_learning_rate()
# Losses
miou_loss_multiplier = conf.get_miou_loss_multiplier()
normal_loss_multiplier = conf.get_normal_loss_multiplier()
type_loss_multiplier = conf.get_type_loss_multiplier()
parameter_loss_multiplier = conf.get_parameter_loss_multiplier()
residue_loss_multiplier = conf.get_residue_loss_multiplier()
total_loss_multiplier = conf.get_total_loss_multiplier()
# Iteration over the dataset
old_bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
old_learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
total_loss_ = 0
if network_mode == 'train':
spfn_module.train()
elif network_mode == 'val':
spfn_module.eval()
for batch_id, data in enumerate(dataloader, 0):
if batch_id%100==0: print('[%s][Epoch %d - Iteration %d]' % (network_mode, epoch, batch_id))
optimizer.zero_grad()
# Updating the BN decay
bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
if old_bn_momentum != bn_momentum:
update_momentum(spfn_module, bn_momentum)
old_bn_momentum = bn_momentum
# Updating the LR decay
learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
if old_learning_rate != learning_rate:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
old_learning_rate = learning_rate
# Loading the inputs
P = data[0].type(torch.FloatTensor).to(device)
batch_size_current, num_points, _ = P.size()
X_gt = data[1].type(torch.FloatTensor).to(device)
points_per_instance = data[2].type(torch.FloatTensor).to(device)
_, nb_primitives, nb_points_primitives, _ = points_per_instance.size()
I_gt = data[3].type(torch.LongTensor).to(device)
T_gt = data[4].type(torch.LongTensor).to(device)
plane_n_gt = data[5].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[6].type(torch.FloatTensor).to(device)
cone_axis_gt = data[7].type(torch.FloatTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt, 'cylinder_axis': cylinder_axis_gt, 'cone_axis': cone_axis_gt}
if args.network == 'GlobalSPFN':
glob_features = None
loc_features = None
elif args.network == 'LocalSPFN':
glob_features = data[8].type(torch.FloatTensor).to(device)
loc_features = data[9].type(torch.FloatTensor).to(device)
# Forward Pass
X, T, W, _, _ = spfn_module(P, glob_features=glob_features, loc_features=loc_features)
X = torch.nn.functional.normalize(X, p=2, dim=2, eps=1e-12)
W = torch.softmax(W, dim=2)
total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss, _, _, _ = losses_implementation.compute_all_losses(
P, W, I_gt, X, X_gt, T, T_gt, gt_parameters, points_per_instance, normal_loss_multiplier,
type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier,
total_loss_multiplier, False, mode_seg='mIoU', classes=conf.get_list_of_primitives())
total_loss_ += batch_size_current * total_loss.item()
if network_mode == 'train':
# Backward Pass
total_loss.backward()
# Unecessary check for the gradient
flag = False
for param in spfn_module.parameters():
if param.requires_grad and ((torch.any(torch.isinf(param.grad))) or torch.any(torch.isnan(param.grad))):
flag = True
break
if not flag:
optimizer.step()
global_step += 1
# Printing Values
if batch_id%100==0:
print('Loss Value: ', total_loss.item())
print('Normal Loss', total_normal_loss.item())
print('Type Loss', total_type_loss.item())
print('mIoU Loss', total_miou_loss.item())
print('Residue Loss', total_residue_loss.item())
print('Parameter Loss', total_parameter_loss.item())
# Updating the visualiser
visualiser.log_loss(total_loss.item(), '%s_loss'%network_mode)
visualiser.log_loss(total_normal_loss.item(), '%s_normal_loss'%network_mode)
visualiser.log_loss(total_type_loss.item(), '%s_type_loss'%network_mode)
visualiser.log_loss(total_miou_loss.item(), '%s_miou_loss'%network_mode)
visualiser.log_loss(total_residue_loss.item(), '%s_residue_loss'%network_mode)
visualiser.log_loss(total_parameter_loss.item(), '%s_parameter_loss'%network_mode)
visualiser.update()
return global_step, total_loss_
| 8,974 | 49.994318 | 166 |
py
|
CPFN
|
CPFN-master/Utils/training_visualisation.py
|
import torch
import numpy as np
from torch import nn
from visdom import Visdom
ORANGE = np.array([[255, 105, 0]])
BLUE = np.array([[40, 40, 255]])
RED = np.array([[255, 40, 40]])
class Visualiser(object):
def __init__(self, plotting_interval, port=8097):
self.vis = Visdom(port=port)
self.line_plotter = VisdomLinePlotter(self.vis)
self.plotting_interval = plotting_interval
self.plotting_step = 0
self.loss_history_dict = {}
self.image_dict = {}
self.window_elements = []
def log_image(self, image, name):
image = torch.clamp(image, 0, 1)
image = image.cpu().detach().numpy()
self.image_dict[name] = image
if not name in self.window_elements:
self.window_elements.append(name)
def log_loss(self, loss, name):
current_history = self.loss_history_dict.get(name, [np.nan] * self.plotting_interval)
updated_history = current_history[1:] + [loss]
self.loss_history_dict[name] = updated_history
if not name in self.window_elements:
self.window_elements.append(name)
def update(self):
if self.plotting_step % self.plotting_interval == 0:
loss_avg_dict = {k: torch.tensor(self.loss_history_dict[k]).mean().item() for k in self.loss_history_dict}
for name in loss_avg_dict:
loss_avg = loss_avg_dict[name]
self.line_plotter.plot(name, name, name, self.plotting_step, loss_avg, color=ORANGE)
for name in self.image_dict:
self.vis.image(self.image_dict[name], opts=dict(title=name), win=self.window_elements.index(name))
self.plotting_step += 1
class VisdomLinePlotter(object):
def __init__(self, vis, env_name='main'):
self.vis = vis
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, title_name, x, y, color):
if var_name not in self.plots:
self.plots[var_name] = self.vis.line(X=np.array([x, x]), Y=np.array([y, y]), env=self.env, opts=dict(
legend=[split_name],
title=title_name,
linecolor=color,
xlabel='Training steps',
ylabel=var_name
))
else:
self.vis.line(X=np.array([x]), Y=np.array([y]), env=self.env,
opts=dict(
legend=[split_name],
title=title_name,
linecolor=color,
xlabel='Training steps',
ylabel=var_name
),
win=self.plots[var_name], name=split_name, update = 'append')
| 2,658 | 38.102941 | 118 |
py
|
CPFN
|
CPFN-master/Utils/config_loader.py
|
# Importation of packages
import yaml
class Config(object):
def __init__(self, filename):
self.conf = yaml.safe_load(open(filename, 'r'))
def fetch(self, name, default_value=None):
result = self.conf.get(name, default_value)
assert result is not None
return result
def get_CUDA_visible_GPUs(self):
return self.fetch('CUDA_visible_GPUs')
def get_batch_size(self):
return self.fetch('batch_size')
def get_train_data_file(self):
return self.fetch('train_data_file')
def get_train_data_first_n(self):
return self.fetch('train_first_n')
def is_train_data_noisy(self):
return self.fetch('train_data_noisy')
def get_nb_train_workers(self):
return self.fetch('train_workers')
def get_val_data_file(self):
return self.fetch('val_data_file')
def get_val_data_first_n(self):
return self.fetch('val_first_n')
def is_val_data_noisy(self):
return self.fetch('val_data_noisy')
def get_nb_val_workers(self):
return self.fetch('val_workers')
def get_test_data_file(self):
return self.fetch('test_data_file')
def get_test_data_first_n(self):
return self.fetch('test_first_n')
def is_test_data_noisy(self):
return self.fetch('test_data_noisy')
def get_n_epochs(self):
return self.fetch('n_epochs')
def get_bn_decay_step(self):
return self.fetch('bn_decay_step', -1)
def get_decay_step(self):
return self.fetch('decay_step')
def get_decay_rate(self):
return self.fetch('decay_rate')
def get_init_learning_rate(self):
return self.fetch('init_learning_rate')
def get_val_interval(self):
return self.fetch('val_interval', 5)
def get_snapshot_interval(self):
return self.fetch('snapshot_interval', 100)
def get_visualisation_interval(self):
return self.fetch('visualisation_interval', 50)
def get_weights_folder(self):
return self.fetch('weights_folder')
class SPFNConfig(Config):
def __init__(self, filename):
Config.__init__(self, filename)
def get_miou_loss_multiplier(self):
return self.fetch('miou_loss_multiplier')
def get_normal_loss_multiplier(self):
return self.fetch('normal_loss_multiplier')
def get_type_loss_multiplier(self):
return self.fetch('type_loss_multiplier')
def get_parameter_loss_multiplier(self):
return self.fetch('parameter_loss_multiplier')
def get_residue_loss_multiplier(self):
return self.fetch('residue_loss_multiplier')
def get_total_loss_multiplier(self):
return self.fetch('total_loss_multiplier')
def get_list_of_primitives(self):
return self.fetch('list_of_primitives')
def get_n_max_global_instances(self):
return self.fetch('n_max_global_instances')
class Global_SPFNConfig(SPFNConfig):
def __init__(self, filename):
SPFNConfig.__init__(self, filename)
class Local_SPFNConfig(SPFNConfig):
def __init__(self, filename):
SPFNConfig.__init__(self, filename)
def get_n_max_local_instances(self):
return self.fetch('n_max_local_instances')
class Patch_SelecConfig(Config):
def __init__(self, filename):
Config.__init__(self, filename)
| 3,341 | 26.85 | 55 |
py
|
CPFN
|
CPFN-master/Utils/merging_utils.py
|
# Importation of packages
import torch
import numba
import numpy as np
def similarity_soft(spfn_labels, predicted_labels, point_indices):
num_points_per_object, max_label_per_object = spfn_labels.size()
nb_patches, num_points_per_patch, max_label_per_patch = predicted_labels.size()
point2primitive_prediction = torch.zeros([num_points_per_object, nb_patches*max_label_per_patch+max_label_per_object]).to(predicted_labels.device)
for b in range(nb_patches):
predicted_labels_b = predicted_labels[b]
point2primitive_prediction[point_indices[b],b*max_label_per_patch:(b+1)*max_label_per_patch] += predicted_labels_b
point2primitive_prediction[:,(b+1)*max_label_per_patch:] = spfn_labels
intersection_primitives = torch.mm(point2primitive_prediction.transpose(0,1),point2primitive_prediction)
return intersection_primitives
@numba.jit(numba.int64[:](numba.int64[:,:], numba.int64[:], numba.float64[:]), nopython=True)
def heuristic_merging(pairs_id, patch_id, penalty_value):
pairs_id1 = pairs_id[:,0]
pairs_id2 = pairs_id[:,1]
segment_id = np.arange(len(patch_id), dtype=numba.int64)
patch_1hot = np.eye(patch_id.max()+1)[patch_id]
while len(pairs_id1) > 0:
pair_id1 = pairs_id1[np.argmax(penalty_value)]
pair_id2 = pairs_id2[np.argmax(penalty_value)]
segment_id[segment_id==segment_id[pair_id2]] = segment_id[pair_id1]
selection_row = segment_id==segment_id[pair_id1]
patch_1hot[selection_row] = np.sum(patch_1hot[selection_row], axis=0)
intersection = np.sum(patch_1hot[pairs_id1] * patch_1hot[pairs_id2], axis=1)
pairs_id1 = pairs_id1[intersection==0]
pairs_id2 = pairs_id2[intersection==0]
penalty_value = penalty_value[intersection==0]
return segment_id
def run_heuristic_solver(similarity_matrix, nb_patches, max_label_per_object, max_label_per_patch, threshold=0):
# Building the Gurobi optimisation problem
indices = np.where(similarity_matrix>threshold)
penalty_array = np.stack((indices[0], indices[1], similarity_matrix[indices[0], indices[1]]), axis=1)
penalty_array = penalty_array[penalty_array[:,0]<penalty_array[:,1]]
# Heuristic
patch_id = np.concatenate((np.repeat(np.arange(nb_patches), repeats=max_label_per_patch, axis=0), nb_patches*np.ones([max_label_per_object], dtype=int)), axis=0)
glob_output_labels_heuristic = heuristic_merging(penalty_array[:,:2].astype(int), patch_id, penalty_array[:,2])
flag = np.diag(similarity_matrix)
replacement_values = np.concatenate((np.tile(np.arange(-max_label_per_patch, 0), nb_patches), np.arange(-max_label_per_object, 0)), axis=0)
glob_output_labels_heuristic[flag<threshold] = replacement_values[flag<threshold]
_, glob_output_labels_heuristic = np.unique(glob_output_labels_heuristic, return_inverse=True)
return glob_output_labels_heuristic
def get_point_final(point2primitive_prediction, output_labels_heuristic):
output_labels_heuristic = torch.eye(output_labels_heuristic.max()+1).to(output_labels_heuristic.device)[output_labels_heuristic.long()]
output_labels_heuristic = output_labels_heuristic / (torch.sum(output_labels_heuristic, dim=0, keepdim=True) + 1e-10)
final_output_labels_heuristic = torch.mm(point2primitive_prediction, output_labels_heuristic)
return final_output_labels_heuristic
| 3,371 | 62.622642 | 165 |
py
|
CPFN
|
CPFN-master/Utils/sampling_utils.py
|
# Importation of packages
import numpy as np
def sample(gt_points_lr, gt_points_hr, pool_indices, num_points_patch=8192, max_number_patches=32):
list_patch_indices = []
while (len(list_patch_indices) < max_number_patches) and (len(pool_indices) != 0):
# Selecting a random pool index for label l
i = pool_indices[np.random.choice(len(pool_indices))]
# Getting the patch indices for that query points
distances = np.linalg.norm(np.expand_dims(gt_points_lr[i], axis=0) - gt_points_hr, axis=1)
patch_indices = np.argsort(distances)[:num_points_patch]
list_patch_indices.append(patch_indices)
patch_distances = np.sort(distances)[:num_points_patch]
# Deleting the neighbours in the pool of indices
distances = np.linalg.norm(np.expand_dims(gt_points_lr[i], axis=0) - gt_points_lr[pool_indices], axis=1)
pool_indices_selected = np.where(distances <= np.max(patch_distances))[0]
pool_indices = np.delete(pool_indices, pool_indices_selected)
patch_indices = np.stack(list_patch_indices, axis=0)
return patch_indices
| 1,113 | 57.631579 | 112 |
py
|
CPFN
|
CPFN-master/Dataset/dataloaders.py
|
# Importation of packages
import os
import re
import h5py
import torch
import pickle
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch.utils.data as data
# Importing Utils files
from Utils import dataset_utils
class Dataset_PatchSelection(data.Dataset):
def __init__(self, csv_path, lowres_folder, highres_folder, scale, n_points=None, normalisation=True):
self.lowres_folder = lowres_folder
self.highres_folder = highres_folder
self.scale = scale
self.n_points = n_points
self.normalisation = normalisation
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort([file_ for file_ in csv_raw])
self.hdf5_file_list_lowres = [os.path.join(self.lowres_folder, file_.split('.')[0] + '.h5') for file_ in self.hdf5_file_list]
self.hdf5_file_list_highres = [os.path.join(self.highres_folder, file_.split('.')[0] + '.h5') for file_ in self.hdf5_file_list]
self.n_data = len(self.hdf5_file_list)
self.preload_dataset()
def preload_dataset(self):
self.list_points = []
self.list_output_labels = []
self.list_shuffled_indices = []
print('Preloading Dataset:')
for i in tqdm(range(self.n_data)):
points, output_labels, shuffled_indices = dataset_utils.create_unit_data_from_hdf5_patch_selection(self.hdf5_file_list_lowres[i], self.hdf5_file_list_highres[i], normalisation=self.normalisation, scale=self.scale, n_points=self.n_points)
self.list_points.append(points)
self.list_output_labels.append(output_labels)
self.list_shuffled_indices.append(shuffled_indices)
def __getitem__(self, index):
# find shape that contains the point with given global index
points = self.list_points[index]
points = torch.from_numpy(points).float()
output_labels = self.list_output_labels[index]
output_labels = torch.from_numpy(output_labels).long()
shuffled_indices = self.list_shuffled_indices[index]
shuffled_indices = torch.from_numpy(shuffled_indices).long()
return points, output_labels, shuffled_indices
def __len__(self):
return self.n_data
class Dataset_GlobalSPFN(data.Dataset):
def __init__(self, n_max_global_instances, csv_path, lowres_folder, highres_folder, path_patches, noisy, n_points=8192, test=False, first_n=-1, fixed_order=False):
self.n_max_global_instances = n_max_global_instances
self.lowres_folder = lowres_folder
self.highres_folder = highres_folder
if not test:
self.dir_files = self.lowres_folder
self.path_patches = None
else:
self.dir_files = self.highres_folder
self.path_patches = path_patches
self.noisy = noisy
self.n_points = n_points
self.test = test
self.first_n = first_n
self.fixed_order = fixed_order
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort(csv_raw)
if not fixed_order:
random.shuffle(self.hdf5_file_list)
if first_n != -1:
self.hdf5_file_list = self.hdf5_file_list[:first_n]
self.n_data = len(self.hdf5_file_list)
if not self.test:
self.preload_dataset()
def preload_dataset(self):
print(f'Preloading Dataset:')
for index in tqdm(range(self.__len__())):
data_elt = self.fetch_data_at_index(index)
if not hasattr(self, 'data_matrix'):
self.data_matrix = {}
for key in data_elt.keys():
trailing_ones = np.full([len(data_elt[key].shape)], 1, dtype=int)
self.data_matrix[key] = np.tile(np.expand_dims(np.zeros_like(data_elt[key]), axis=0), [self.n_data, *trailing_ones])
for key in data_elt.keys():
self.data_matrix[key][index, ...] = data_elt[key]
def fetch_data_at_index(self, i):
file_ = self.hdf5_file_list[i]
with h5py.File(os.path.join(self.dir_files, file_), 'r') as f:
data = dataset_utils.create_unit_data_from_hdf5_spfn(f, self.n_max_global_instances, self.noisy, n_points=self.n_points, use_glob_features=False, use_loc_features=False, fixed_order=self.fixed_order, shuffle=not self.fixed_order)
assert data is not None # assume data are all clean
if self.test:
if os.path.isfile(os.path.join(self.path_patches, file_.replace('.h5','_indices.npy'))):
data['patch_centers'] = np.load(os.path.join(self.path_patches, file_.replace('.h5','_indices.npy')))[:,0]
else:
data['patch_centers'] = np.array([])
return data
def __getitem__(self, index):
# find shape that contains the point with given global index
if not self.test:
data = {}
for key in self.data_matrix.keys():
data[key] = self.data_matrix[key][index,...]
else:
data = self.fetch_data_at_index(index)
P = torch.from_numpy(data['P'].astype(np.float32))
normal_gt = torch.from_numpy(data['normal_gt'].astype(np.float32))
P_gt = torch.from_numpy(data['P_gt'].astype(np.float32))
I_gt = torch.from_numpy(data['I_gt'].astype(np.int64))
T_gt = torch.from_numpy(data['T_gt'].astype(np.int64))
plane_n_gt = torch.from_numpy(data['plane_n_gt'].astype(np.float32))
cylinder_axis_gt = torch.from_numpy(data['cylinder_axis_gt'].astype(np.float32))
cone_axis_gt = torch.from_numpy(data['cone_axis_gt'].astype(np.float32))
if self.test:
patch_centers = torch.from_numpy(data['patch_centers'].astype(np.int64))
return P, normal_gt, P_gt, I_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt, patch_centers
else:
return P, normal_gt, P_gt, I_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt
def __len__(self):
return self.n_data
class Dataset_TrainLocalSPFN(data.Dataset):
def __init__(self, n_max_local_instances, csv_path, patch_folder, noisy, first_n=-1, fixed_order=False, lean=False):
self.n_max_local_instances = n_max_local_instances
self.noisy = noisy
self.first_n = first_n
self.fixed_order = fixed_order
self.lean = lean
self.patch_folder = patch_folder
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort(csv_raw)
self.n_data = 0
self.hdf5_file_list = np.sort([elt for elt in self.hdf5_file_list if self.check_dataset(elt)])
if not fixed_order:
random.shuffle(self.hdf5_file_list)
if self.lean:
nb_patch_file = np.zeros([len(self.hdf5_file_list)])
for i, file_ in enumerate(self.hdf5_file_list):
patch_files = [os.path.join(self.patch_folder, file_.split('.')[0], file_) for file_ in os.listdir(os.path.join(self.patch_folder, file_.split('.')[0])) if file_.split('.')[1] == 'h5']
nb_patch_file[i] = len(patch_files)
self.nb_patch_file = nb_patch_file
if first_n != -1:
self.hdf5_file_list = self.hdf5_file_list[:first_n]
if not self.lean:
self.preload_data()
def check_dataset(self, file_):
cond = os.path.isdir(os.path.join(self.patch_folder, file_.split('.')[0]))
if not cond:
return False
patch_files = [os.path.join(self.patch_folder, file_.split('.')[0], file_) for file_ in os.listdir(os.path.join(self.patch_folder, file_.split('.')[0])) if file_.split('.')[1] == 'h5']
self.n_data += len(patch_files)
return True
def preload_data(self):
cpt = 0
print('Preloading Dataset:')
for i, file_ in tqdm(enumerate(self.hdf5_file_list)):
if i%100==0: print('%d / %d'%(i, len(self.hdf5_file_list)))
patch_files = [os.path.join(self.patch_folder, file_.split('.')[0], file_) for file_ in os.listdir(os.path.join(self.patch_folder, file_.split('.')[0])) if file_.split('.')[1] == 'h5']
patch_files = np.sort(patch_files)
for j in range(len(patch_files)):
patch_file = os.path.join(self.patch_folder, file_.split('.')[0], file_.replace('.h5','_patch%d.h5'%j))
data_elt = self.fetch_data_at_index(patch_file)
if not hasattr(self, 'data_matrix'):
self.data_matrix = {}
for key in data_elt.keys():
trailing_ones = np.full([len(data_elt[key].shape)], 1, dtype=int)
self.data_matrix[key] = np.tile(np.expand_dims(np.zeros_like(data_elt[key]), axis=0), [self.n_data, *trailing_ones])
for key in data_elt.keys():
self.data_matrix[key][cpt, ...] = data_elt[key]
cpt += 1
def fetch_data_at_index(self, patch_file):
with h5py.File(patch_file, 'r') as f:
data = dataset_utils.create_unit_data_from_hdf5_spfn(f, self.n_max_local_instances, noisy=self.noisy, n_points=None, use_glob_features=True, use_loc_features=True, fixed_order=self.fixed_order, shuffle=not self.fixed_order)
assert data is not None # assume data are all clean
return data
def __getitem__(self, index):
# find shape that contains the point with given global index
if not self.lean:
data = {}
for key in self.data_matrix.keys():
data[key] = self.data_matrix[key][index, ...]
else:
cumsum = np.cumsum(self.nb_patch_file)
index_ = np.where(index<cumsum)[0][0]
file_ = self.hdf5_file_list[index_]
if index_ == 0:
j = index
else:
j = int(index - cumsum[index_-1])
patch_file = os.path.join(self.patch_folder, file_.split('.')[0], file_.replace('.h5', '_patch%d.h5' % j))
data = self.fetch_data_at_index(patch_file)
P = torch.from_numpy(data['P'].astype(np.float32))
normal_gt = torch.from_numpy(data['normal_gt'].astype(np.float32))
P_gt = torch.from_numpy(data['P_gt'].astype(np.float32))
I_gt = torch.from_numpy(data['I_gt'].astype(np.int64))
T_gt = torch.from_numpy(data['T_gt'].astype(np.int64))
plane_n_gt = torch.from_numpy(data['plane_n_gt'].astype(np.float32))
cylinder_axis_gt = torch.from_numpy(data['cylinder_axis_gt'].astype(np.float32))
cone_axis_gt = torch.from_numpy(data['cone_axis_gt'].astype(np.float32))
glob_features = torch.from_numpy(data['glob_features'].astype(np.float32))
loc_features = torch.from_numpy(data['loc_features'].astype(np.float32))
output_tuple = (P, normal_gt, P_gt, I_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt, glob_features, loc_features)
return output_tuple
def __len__(self):
return self.n_data
class Dataset_TestLocalSPFN(data.Dataset):
def __init__(self, n_max_global_instances, n_max_local_instances, csv_path, dir_spfn, dir_lowres, dir_highres, dir_indices, noisy, first_n=-1, fixed_order=False):
self.n_max_global_instances = n_max_global_instances
self.n_max_local_instances = n_max_local_instances
self.dir_spfn = dir_spfn
self.dir_lowres = dir_lowres
self.dir_highres = dir_highres
self.dir_indices = dir_indices
self.noisy = noisy
self.first_n = first_n
self.fixed_order = fixed_order
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort(csv_raw)
self.n_data = len(self.hdf5_file_list)
self.hdf5_file_list_improvement = [elt for elt in self.hdf5_file_list if self.check_dataset(elt)]
def check_dataset(self, file_):
cond = os.path.isfile(os.path.join(self.dir_indices, file_.split('.')[0] + '_indices.npy'))
if not cond:
return False
return True
def fetch_data_at_index(self, patch_file):
with h5py.File(patch_file, 'r') as f:
data = dataset_utils.create_unit_data_from_hdf5_spfn(f, self.n_max_global_instances, self.noisy, n_points=None, fixed_order=True, shuffle=False)
assert data is not None # assume data are all clean
return data
def __getitem__(self, index):
# find shape that contains the point with given global index
folder = self.hdf5_file_list[index]
# Loading the highres file
data_elt = self.fetch_data_at_index(os.path.join(self.dir_highres, folder))
P_global = data_elt['P']
normal_gt_global = data_elt['normal_gt']
P_gt_global = data_elt['P_gt']
I_gt_global = data_elt['I_gt']
T_gt_global = data_elt['T_gt']
plane_n_gt_global = data_elt['plane_n_gt']
cylinder_axis_gt_global = data_elt['cylinder_axis_gt']
cone_axis_gt_global = data_elt['cone_axis_gt']
if (folder in self.hdf5_file_list_improvement):
# Loading the patch indices
patch_indices = np.load(os.path.join(self.dir_indices, folder.replace('.h5', '_indices.npy')))
nb_patches, _ = patch_indices.shape
P_unormalised = P_global[patch_indices]
mean = np.mean(P_unormalised, axis=1, keepdims=True)
P = P_unormalised - mean
norm = np.linalg.norm(P, axis=2, keepdims=True).max(axis=1, keepdims=True)
P = P / norm
_, num_local_points, _ = P.shape
normal_gt = normal_gt_global[patch_indices]
I_gt = I_gt_global[patch_indices]
P_gt = np.zeros((nb_patches,) + P_gt_global[:self.n_max_local_instances].shape)
T_gt = np.zeros((nb_patches,) + T_gt_global[:self.n_max_local_instances].shape)
plane_n_gt = np.zeros((nb_patches,) + plane_n_gt_global[:self.n_max_local_instances].shape)
cylinder_axis_gt = np.zeros((nb_patches,) + cylinder_axis_gt_global[:self.n_max_local_instances].shape)
cone_axis_gt = np.zeros((nb_patches,) + cone_axis_gt_global[:self.n_max_local_instances].shape)
for i in range(nb_patches):
flag = -1 in I_gt[i]
unique_values, inverse_values = np.unique(I_gt[i], return_inverse=True)
if flag: inverse_values = inverse_values - 1
I_gt[i] = inverse_values
P_gt[i,np.arange(len(unique_values))] = P_gt_global[unique_values]
T_gt[i, np.arange(len(unique_values))] = T_gt_global[unique_values]
plane_n_gt[i, np.arange(len(unique_values))] = plane_n_gt_global[unique_values]
cylinder_axis_gt[i, np.arange(len(unique_values))] = cylinder_axis_gt_global[unique_values]
cone_axis_gt[i, np.arange(len(unique_values))] = cone_axis_gt_global[unique_values]
# Loading the features
glob_features = np.load(os.path.join(self.dir_spfn, folder.replace('.h5',''), 'global_feat.npy'))
loc_features = np.load(os.path.join(self.dir_spfn, folder.replace('.h5',''), 'local_feat_full.npy'))
list_glob_features = []
list_loc_features = []
for patch_id in range(nb_patches):
list_glob_features.append(glob_features)
list_loc_features.append(loc_features[:,patch_id])
glob_features = np.stack(list_glob_features, axis=0)
loc_features = np.stack(list_loc_features, axis=0)
else:
nb_patches = 0
P = np.zeros([0, 8192, 3]).astype(np.float32)
normal_gt = np.zeros([0, 8192, 3]).astype(np.float32)
I_gt = np.zeros([0, 8192]).astype(np.int64)
glob_features = np.zeros([0, 1024]).astype(np.float32)
loc_features = np.zeros([0, 128]).astype(np.float32)
patch_indices = np.zeros([0, 8192]).astype(np.int64)
P_unormalised = np.zeros([0, 8192, 3]).astype(np.float32)
P_gt = np.zeros([0, 21, 512, 3]).astype(np.float32)
T_gt = np.zeros([0, 21]).astype(np.int64)
plane_n_gt = np.zeros([0, 21, 3]).astype(np.float32)
cylinder_axis_gt = np.zeros([0, 21, 3]).astype(np.float32)
cone_axis_gt = np.zeros([0, 21, 3]).astype(np.float32)
# Loading the SPFN output
spfn_labels = np.load(os.path.join(self.dir_spfn, folder.replace('.h5', ''), 'object_seg.npy'))
spfn_normals = np.load(os.path.join(self.dir_spfn, folder.replace('.h5', ''), 'object_normals.npy'))
spfn_type = np.load(os.path.join(self.dir_spfn, folder.replace('.h5', ''), 'object_type.npy'))
# Shuffling the output
for i in range(nb_patches):
perm = np.random.permutation(num_local_points)
P[i] = P[i, perm]
P_unormalised[i] = P_unormalised[i, perm]
normal_gt[i] = normal_gt[i, perm]
I_gt[i] = I_gt[i, perm]
patch_indices[i] = patch_indices[i, perm]
# Exporting all the data
P = torch.from_numpy(P.astype(np.float32))
normal_gt = torch.from_numpy(normal_gt.astype(np.float32))
P_gt = torch.from_numpy(P_gt.astype(np.float32))
I_gt = torch.from_numpy(I_gt.astype(np.int64))
T_gt = torch.from_numpy(T_gt.astype(np.int64))
plane_n_gt = torch.from_numpy(plane_n_gt.astype(np.float32))
cylinder_axis_gt = torch.from_numpy(cylinder_axis_gt.astype(np.float32))
cone_axis_gt = torch.from_numpy(cone_axis_gt.astype(np.float32))
patch_indices = torch.from_numpy(patch_indices.astype(np.float32))
spfn_labels = torch.from_numpy(spfn_labels.astype(np.int64))
spfn_normals = torch.from_numpy(spfn_normals.astype(np.float32))
spfn_type = torch.from_numpy(spfn_type.astype(np.float32))
glob_features = torch.from_numpy(glob_features.astype(np.float32))
loc_features = torch.from_numpy(loc_features.astype(np.float32))
I_gt_global = torch.from_numpy(I_gt_global.astype(np.int64))
return P, normal_gt, P_gt_global, I_gt, T_gt_global, patch_indices, spfn_labels, spfn_normals, spfn_type, glob_features, loc_features, P_global, normal_gt_global, I_gt_global, plane_n_gt_global, cylinder_axis_gt_global, cone_axis_gt_global, P_unormalised, P_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt
def __len__(self):
return self.n_data
class RandomSampler(data.sampler.Sampler):
def __init__(self, data_source, seed=None, identical_epochs=False):
self.data_source = data_source
self.seed = seed
if self.seed is None:
self.seed = np.random.randint(0, 2 ** 32 - 1, 1)[0]
self.identical_epochs = identical_epochs
self.rng = np.random.RandomState(self.seed)
self.total_samples_count = len(self.data_source)
def __iter__(self):
if self.identical_epochs:
self.rng.seed(self.seed)
return iter(self.rng.choice(self.total_samples_count, size=self.total_samples_count, replace=False))
def __len__(self):
return self.total_samples_count
class Sampler(data.sampler.Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.total_samples_count = len(self.data_source)
def __iter__(self):
return iter(np.arange(0, self.total_samples_count))
def __len__(self):
return self.total_samples_count
| 19,560 | 54.729345 | 318 |
py
|
CPFN
|
CPFN-master/PointNet2/pn2_network.py
|
# Importation of packages
import os
import sys
import torch
import numpy as np
from SPFN.losses_implementation import compute_all_losses
from PointNet2.pointnet2_ops.modules.pointset_abstraction import PointsetAbstraction
from PointNet2.pointnet2_ops.modules.pointset_feature_propagation import PointsetFeaturePropagation
class PointNet2(torch.nn.Module):
def __init__(self, dim_input=3, dim_pos=3, output_sizes=[16], use_glob_features=False, use_loc_features=False, features_extractor=False):
super(PointNet2, self).__init__()
self.dim_pos = dim_pos
self.use_glob_features = use_glob_features
self.use_loc_features = use_loc_features
self.features_extractor = features_extractor
# Encoding stage
self.sa1 = PointsetAbstraction(num_points=512, dim_pos=dim_pos, dim_feats=dim_input-dim_pos, radius_list=[0.2], num_samples_list=[64], mlp_list=[[64,64,128]], group_all=False)
self.sa2 = PointsetAbstraction(num_points=128, dim_pos=dim_pos, dim_feats=128, radius_list=[0.4], num_samples_list=[64], mlp_list=[[128,128,256]], group_all=False)
self.sa3 = PointsetAbstraction(num_points=None, dim_pos=dim_pos, dim_feats=256, radius_list=None, num_samples_list=None, mlp_list=[256, 512, 1024], group_all=True)
# Decoding stage
offset = 0
if self.use_glob_features:
offset += 1024
if self.use_loc_features:
offset += 128
self.sfp1 = PointsetFeaturePropagation(dim_feats=1024+offset+256, mlp=[256,256])
self.sfp2 = PointsetFeaturePropagation(dim_feats=256+128, mlp=[256,128])
self.sfp3 = PointsetFeaturePropagation(dim_feats=128+dim_input-dim_pos, mlp=[128,128,128])
# FC stage
self.fc1 = torch.nn.Conv1d(128, 128, 1)
if not self.features_extractor:
self.bn1 = torch.nn.BatchNorm1d(128)
self.fc2 = torch.nn.ModuleList()
for output_size in output_sizes:
self.fc2.append(torch.nn.Conv1d(128, output_size, 1))
def forward(self, x, glob_features=None, loc_features=None, fast=True):
x = x.transpose(2,1)
batch_size = x.shape[0]
input_pos = x[:,:self.dim_pos,:]
if x.shape[1] > self.dim_pos:
input_feats = x[:,self.dim_pos:,:]
else:
input_feats = None
# Encoding stage
l1_pos, l1_feats = self.sa1(input_pos, input_feats, fast=fast)
l2_pos, l2_feats = self.sa2(l1_pos, l1_feats, fast=fast)
l3_pos, l3_feats = self.sa3(l2_pos, l2_feats, fast=fast)
# Adding additional features
if self.use_glob_features:
l3_feats = torch.cat((l3_feats, glob_features.unsqueeze(2)), dim=1)
if self.use_loc_features:
l3_feats = torch.cat((l3_feats, loc_features.unsqueeze(2)), dim=1)
# Decoding stage
l4_feats = self.sfp1(l2_pos, l3_pos, l2_feats, l3_feats, fast=fast)
l5_feats = self.sfp2(l1_pos, l2_pos, l1_feats, l4_feats, fast=fast)
l6_feats = self.sfp3(input_pos, l1_pos, input_feats, l5_feats, fast=fast)
# FC stage
output_feat = self.fc1(l6_feats)
if not self.features_extractor:
output_feat = torch.nn.functional.relu(self.bn1(output_feat))
output_feat = torch.nn.functional.dropout(output_feat, p=0.5)
results = []
for fc2_layer in self.fc2:
result = fc2_layer(output_feat)
result = result.transpose(1,2)
results.append(result)
results.append(l3_feats)
results.append(output_feat)
return results
else:
return l3_feats, output_feat
| 3,692 | 49.589041 | 183 |
py
|
CPFN
|
CPFN-master/PointNet2/pointnet2_ops/setup.py
|
import os
import glob
import setuptools
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
sources = glob.glob("cuda_ops/src/*.cpp") + glob.glob("cuda_ops/src/*.cu")
headers = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cuda_ops/include')
setuptools.setup(
name="pointnet2_ops",
version="1.0",
description="PointNet++ modules",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
ext_modules=[
CUDAExtension(
name='cuda_ops',
sources=sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format(headers)],
"nvcc": ["-O2", "-I{}".format(headers)],
},
)
],
cmdclass={"build_ext": BuildExtension},
)
| 915 | 28.548387 | 86 |
py
|
CPFN
|
CPFN-master/PointNet2/pointnet2_ops/modules/pointset_feature_propagation.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .geometry_utils import three_nn, three_weighted_sum
class PointsetFeaturePropagation(nn.Module):
"""
Propagate features from an abstracted point set back to the original point set,
analogous to upsampling followed by 1x1 convolutions on an image grid.
"""
def __init__(self, dim_feats, mlp):
super(PointsetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
in_channel = dim_feats
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(in_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
in_channel = out_channel
def forward(self, pos1, pos2, feats1, feats2, fast=True):
"""
Run PointSetFeaturePropagation.
Args:
pos1: input point set position data, [B, C, N]
pos2: abstracted point set position data, [B, C, S]
feats1: input point set feature data, [B, D, N]
feats2: abstracted point set feature data, [B, D, S]
Returns:
new_feats: upsampled point set feature data, [B, D', N]
"""
B, _, N = pos1.shape
if pos2 is None:
interpolated_feats = feats2.repeat(1, 1, N)
else:
S = pos2.shape[2]
# get 3 nearest neighbors for interpolation
nn_dists, nn_indices = three_nn(point_pos=pos2, query_pos=pos1, fast=fast)
# get interpolation weights
nn_dists_recip = 1.0 / (nn_dists + 1e-8)
norm = torch.sum(nn_dists_recip, dim=2, keepdim=True)
nn_weights = nn_dists_recip / norm
# interpolate features of 3 nearest neighbors
interpolated_feats = three_weighted_sum(point_feats=feats2, indices=nn_indices, weights=nn_weights, fast=fast)
if feats1 is not None:
new_feats = torch.cat([feats1, interpolated_feats], dim=1)
else:
new_feats = interpolated_feats
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_feats = F.relu(bn(conv(new_feats)))
return new_feats
| 2,234 | 41.980769 | 122 |
py
|
CPFN
|
CPFN-master/PointNet2/pointnet2_ops/modules/pointset_abstraction.py
|
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from .geometry_utils import farthest_point_sample, select_point_subset, ball_query
class PointsetAbstraction(nn.Module):
"""
Abstract a point set (possibly with features) into a smaller point set,
analogous to a strided convolution on an image grid.
"""
def __init__(self, num_points, dim_pos, dim_feats, radius_list, num_samples_list, mlp_list, group_all=False):
super(PointsetAbstraction, self).__init__()
self.num_points = num_points
self.group_all = group_all
self.radius_list = radius_list if isinstance(radius_list, Sequence) else [radius_list]
self.num_samples_list = num_samples_list if isinstance(num_samples_list, Sequence) else [num_samples_list]
self.mlp_list = mlp_list if isinstance(mlp_list[0], Sequence) else [mlp_list]
if len(self.radius_list) != len(self.num_samples_list) or len(self.radius_list) != len(self.mlp_list):
raise ValueError('Radius, number of samples and mlps lists must have the same number of entries.')
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(self.mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
in_channel = dim_pos + dim_feats
for out_channel in self.mlp_list[i]:
convs.append(nn.Conv2d(in_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
in_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, pos, feats, fast=True):
"""
Args:
pos: input point set position data, [B, C, N]
feats: input point set feature data, [B, D, N]
Returns:
new_pos: abstracted point set position data, [B, C, S]
new_feats: abstracted point set feature data, [B, D', S]
"""
B, C, N = pos.shape
S = self.num_points
if self.group_all:
subsampling_indices = None
new_pos = None
else:
subsampling_indices = farthest_point_sample(pos, S, fast=fast)
new_pos = select_point_subset(pos, subsampling_indices)
new_feats_list = []
for i, r in enumerate(self.radius_list):
if self.group_all:
grouped_pos = pos.view(B, C, 1, N)
if feats is not None:
grouped_feats = torch.cat([grouped_pos, feats.view(B, -1, 1, N)], dim=1)
else:
grouped_feats = grouped_pos
else:
K = self.num_samples_list[i]
group_idx = ball_query(r, K, pos, new_pos, fast=fast)
grouped_pos = select_point_subset(pos, group_idx)
grouped_pos -= new_pos.view(B, C, S, 1)
if feats is not None:
grouped_feats = select_point_subset(feats, group_idx)
grouped_feats = torch.cat([grouped_feats, grouped_pos], dim=1)
else:
grouped_feats = grouped_pos
# grouped_feats = grouped_feats.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_feats = F.relu(bn(conv(grouped_feats.contiguous()))) # grouped_feats: [B, D, S, K]
new_feats = torch.max(grouped_feats, dim=3)[0] # new_feats: [B, D', S]
new_feats_list.append(new_feats)
new_feats = torch.cat(new_feats_list, dim=1)
return new_pos, new_feats
| 3,755 | 47.779221 | 114 |
py
|
CPFN
|
CPFN-master/PointNet2/pointnet2_ops/modules/geometry_utils.py
|
import torch
from .. import cuda_ops
def pairwise_squared_distance(src, dst):
"""
Calculate squared euclidean distance between each pair of points from src to dst.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Args:
src: source points, [B, C, N]
dst: target points, [B, C, M]
Output:
dist: per-point square distance, [B, N, M]
"""
B, _, N = src.shape
_, _, M = dst.shape
dist = -2 * torch.matmul(src.permute(0, 2, 1), dst)
dist += torch.sum(src ** 2, dim=1).view(B, N, 1)
dist += torch.sum(dst ** 2, dim=1).view(B, 1, M)
return dist
def select_point_subset(points, idx):
"""
Select a different subset of points in each batch (same number, but different indices in each batch).
If the indices have more than one dimension per batch, the returned point tensor is shaped like the indices
(see args/returns for details).
Args:
points: input points data, [B, C, N]
idx: sample index data, [B]+[*] (* may be any number of dimensions)
Returns:
new_points:, indexed points data, [B, C]+[*]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=idx.dtype, device=idx.device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, :, idx].permute(0, -1, *range(1, points.dim()+idx.dim()-3))
return new_points
class _FastFarthestPointSample(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative farthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
[B, N, 3] tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
[B, num_point] tensor containing the set
Based on: https://github.com/erikwijmans/Pointnet2_PyTorch
"""
return cuda_ops.farthest_point_sampling(xyz, npoint)
@staticmethod
def backward(xyz, a=None):
return None, None
_fast_farthest_point_sample = _FastFarthestPointSample.apply
def farthest_point_sample(point_pos, num_point, fast=True):
"""
Args:
point_pos: pointcloud data, [B, C, N]
num_point: number of samples
fast: use faster version with custom CUDA kernel (only works with C==3)
Returns:
farthest_indices: sampled pointcloud index, [B, num_point]
"""
if fast:
if point_pos.shape[1] != 3:
raise ValueError('Points must have exactly three position dimensions when using the fast method.')
return _fast_farthest_point_sample(point_pos.permute(0, 2, 1).contiguous(), num_point).to(dtype=torch.long)
else:
device = point_pos.device
B, C, N = point_pos.shape
farthest_indices = torch.zeros(B, num_point, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest_index = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(num_point):
farthest_indices[:, i] = farthest_index
far_pos = point_pos[batch_indices, :, farthest_index].view(B, C, 1)
dist = torch.sum((point_pos - far_pos) ** 2, dim=1)
mask = dist < distance
distance[mask] = dist[mask]
farthest_index = torch.max(distance, -1)[1]
return farthest_indices
class _FastBallQuery(torch.autograd.Function):
@staticmethod
def forward(ctx, radius, num_samples, point_pos, query_pos):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
num_samples : int
maximum number of features in the balls
point_pos : torch.Tensor
[B, N, 3] xyz coordinates of the features
query_pos : torch.Tensor
[B, S, 3] centers of the ball query
Returns
-------
torch.Tensor
[B, S, num_samples] tensor with the indicies of the features that form the query balls
"""
return cuda_ops.ball_query(query_pos, point_pos, radius, num_samples)
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
_fast_ball_query = _FastBallQuery.apply
def ball_query(radius, num_samples, point_pos, query_pos, fast=True):
"""
Return the smaller set of: all points within a fixed radius of the query point, or the num_samples nearest neighbors.
Args:
radius: local region radius
num_samples: max sample number in local region
point_pos: all points, [B, C, N]
query_pos: query points, [B, C, S]
fast: use faster version with custom CUDA kernel (only works with C==3)
Returns:
group_indices: grouped point indices, [B, S, num_samples]
"""
if fast:
if point_pos.shape[1] != 3:
raise ValueError('Points must have exactly three position dimensions when using the fast method.')
return _fast_ball_query(
radius, num_samples, point_pos.permute(0, 2, 1).contiguous(), query_pos.permute(0, 2, 1).contiguous()).to(dtype=torch.long)
else:
device = point_pos.device
B, _, N = point_pos.shape
_, _, S = query_pos.shape
group_indices = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = pairwise_squared_distance(query_pos, point_pos)
group_indices[sqrdists > radius ** 2] = N
group_indices = group_indices.sort(dim=-1)[0][:, :, :num_samples]
group_first = group_indices[:, :, 0].view(B, S, 1).repeat([1, 1, num_samples])
mask = group_indices == N
group_indices[mask] = group_first[mask]
return group_indices
class _FastThreeNN(torch.autograd.Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
[B, S, 3] tensor of known features
known : torch.Tensor
[B, N, 3] tensor of unknown features
Returns
-------
dist : torch.Tensor
[B, S, 3] l2 distance to the three nearest neighbors
idx : torch.Tensor
[B, S, 3] index of 3 nearest neighbors
"""
dist2, idx = cuda_ops.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
_fast_three_nn = _FastThreeNN.apply
def three_nn(point_pos, query_pos, fast=True):
"""
Return the three nearest neighbors for each of the query points.
Args:
point_pos: all points, [B, C, N]
query_pos: query points, [B, C, S]
fast: use faster version with custom CUDA kernel (only works with C==3)
Returns:
dists: squared euclidean distances, [B, S, 3]
indices: indices of the nearest neighbors, [B, S, 3]
"""
if fast:
if point_pos.shape[1] != 3:
raise ValueError('Points must have exactly three position dimensions when using the fast method.')
dists, indices = _fast_three_nn(
query_pos.permute(0, 2, 1).contiguous(),
point_pos.permute(0, 2, 1).contiguous())
indices = indices.to(dtype=torch.long)
return dists, indices
else:
dists = pairwise_squared_distance(query_pos, point_pos)
dists, indices = dists.sort(dim=-1)
dists, indices = dists[:, :, :3], indices[:, :, :3]
return dists, indices
class _FastThreeWeightedSum(torch.autograd.Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
[B, C, N] Features descriptors to be interpolated from
idx : torch.Tensor
[B, S, 3] three nearest neighbors of the target features in features
weight : torch.Tensor
[B, S, 3] weights
Returns
-------
torch.Tensor
[B, C, S] tensor of the interpolated features
"""
_, _, N = features.size()
# S = idx.size(1)
ctx.three_weighted_sum_for_backward = (idx, weight, N)
return cuda_ops.three_weighted_sum(features, idx.int(), weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, S) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, C, N) tensor with gradients of features
None
None
"""
idx, weight, N = ctx.three_weighted_sum_for_backward
grad_features = cuda_ops.three_weighted_sum_grad(
grad_out.contiguous(), idx.int(), weight, N
)
return grad_features, None, None
_fast_three_weighted_sum = _FastThreeWeightedSum.apply
def three_weighted_sum(point_feats, indices, weights, fast=True):
"""
Intrepolate three nearest neighbors for each of the query points.
Args:
point_feats: all points, [B, C, N]
indices: indices of the points to be summed, [B, S, 3]
weights: weights of the points to be summed, [B, S, 3]
fast: use faster version with custom CUDA kernel
Returns:
weighted sum of each triple [B, C, S]
"""
if fast:
return _fast_three_weighted_sum(point_feats, indices, weights)
else:
return torch.sum(
select_point_subset(point_feats, indices) *
weights.view(indices.shape[0], 1, indices.shape[1], indices.shape[2]), dim=-1)
| 10,626 | 36.419014 | 135 |
py
|
pdf2image
|
pdf2image-master/tests.py
|
import os
import sys
import errno
import pathlib
import tempfile
import unittest
import time
import shutil
import subprocess
from inspect import signature
from subprocess import Popen, PIPE
from tempfile import TemporaryDirectory
from multiprocessing.dummy import Pool
from memory_profiler import profile as profile_memory
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from pdf2image import (
convert_from_bytes,
convert_from_path,
pdfinfo_from_bytes,
pdfinfo_from_path,
)
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError,
PDFPopplerTimeoutError,
)
from functools import wraps
PROFILE_MEMORY = os.environ.get("PROFILE_MEMORY", False)
try:
subprocess.call(
["pdfinfo", "-h"], stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w")
)
POPPLER_INSTALLED = True
except OSError as e:
if e.errno == errno.ENOENT:
POPPLER_INSTALLED = False
def profile(f):
if PROFILE_MEMORY:
@wraps(f)
@profile_memory
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return r
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return r
return wrapped
def get_poppler_path():
return pathlib.Path(
Popen(["which", "pdftoppm"], stdout=PIPE).communicate()[0].strip().decode()
).parent
class PDFConversionMethods(unittest.TestCase):
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read())
self.assertTrue(len(images_from_bytes) == 1)
print("test_conversion_from_bytes: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf")
self.assertTrue(len(images_from_path) == 1)
print("test_conversion_from_path: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path("./tests/test.pdf", output_folder=path)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read())
self.assertTrue(len(images_from_bytes) == 14)
print(
"test_conversion_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf")
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_path_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path
)
self.assertTrue(len(images_from_bytes) == 14)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path
)
self.assertTrue(len(images_from_path) == 14)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_bytes_241(self): # pragma: no cover
start_time = time.time()
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read())
self.assertTrue(len(images_from_bytes) == 241)
print(
"test_conversion_from_bytes_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_path_241(self): # pragma: no cover
start_time = time.time()
images_from_path = convert_from_path("./tests/test_241.pdf")
self.assertTrue(len(images_from_path) == 241)
print(
"test_conversion_from_path_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_bytes_using_dir_241(self): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path
)
self.assertTrue(len(images_from_bytes) == 241)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_path_using_dir_241(self): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_241.pdf", output_folder=path
)
self.assertTrue(len(images_from_path) == 241)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_empty_if_not_pdf(self):
start_time = time.time()
with self.assertRaises(Exception):
convert_from_path("./tests/test.jpg")
print("test_empty_if_not_pdf: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_empty_if_file_not_found(self):
start_time = time.time()
with self.assertRaises(Exception):
convert_from_path("./tests/totally_a_real_file_in_folder.xyz")
print("test_empty_if_file_not_found: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_empty_if_corrupted_pdf(self):
start_time = time.time()
with self.assertRaises(Exception):
convert_from_path("./tests/test_corrupted.pdf")
print("test_empty_if_corrupted_pdf: {} sec".format(time.time() - start_time))
## Test first page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_first_page_12(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), first_page=12)
self.assertTrue(len(images_from_bytes) == 3)
print(
"test_conversion_from_bytes_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_12(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", first_page=12)
self.assertTrue(len(images_from_path) == 3)
print(
"test_conversion_from_path_14_first_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_first_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, first_page=12
)
self.assertTrue(len(images_from_bytes) == 3)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_first_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_first_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, first_page=12
)
self.assertTrue(len(images_from_path) == 3)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_first_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test last page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_last_page_12(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), last_page=12)
self.assertTrue(len(images_from_bytes) == 12)
print(
"test_conversion_from_bytes_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_last_page_12(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", last_page=12)
self.assertTrue(len(images_from_path) == 12)
print(
"test_conversion_from_path_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, last_page=12
)
self.assertTrue(len(images_from_bytes) == 12)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, last_page=12
)
self.assertTrue(len(images_from_path) == 12)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test first and last page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_first_page_2_last_page_12(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), first_page=2, last_page=12
)
self.assertTrue(len(images_from_bytes) == 11)
print(
"test_conversion_from_bytes_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_2_last_page_12(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test_14.pdf", first_page=2, last_page=12
)
self.assertTrue(len(images_from_path) == 11)
print(
"test_conversion_from_path_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_first_page_2_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, first_page=2, last_page=12
)
self.assertTrue(len(images_from_bytes) == 11)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_first_page_2_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, first_page=2, last_page=12
)
self.assertTrue(len(images_from_path) == 11)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test output as jpeg
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_jpeg_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="jpg")
self.assertTrue(images_from_bytes[0].format == "JPEG")
print(
"test_conversion_to_jpeg_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_jpeg_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, fmt="jpeg"
)
self.assertTrue(images_from_path[0].format == "JPEG")
[im.close() for im in images_from_path]
print(
"test_conversion_to_jpeg_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test output as png
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_png_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="png")
self.assertTrue(images_from_bytes[0].format == "PNG")
print(
"test_conversion_to_png_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_png_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, fmt="png"
)
self.assertTrue(images_from_path[0].format == "PNG")
[im.close() for im in images_from_path]
print(
"test_conversion_to_png_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test output with not-empty output_folder
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_non_empty_output_folder(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder="./tests/"
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
[os.remove(im.filename) for im in images_from_path]
print(
"test_non_empty_output_folder: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test format that starts with a dot
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_format_that_starts_with_a_dot(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_format_that_starts_with_a_dot: {} sec".format(
time.time() - start_time
)
)
## Test locked PDF
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_userpw_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_user_only.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_not_locked_pdf(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_ownerpw_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_owner_only.pdf", "rb") as pdf_file:
# No need to pass a ownerpw because the absence of userpw means we can read it anyway
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_ownerpw_only: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_ownerpw_and_userpw(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_both.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_ownerpw_and_userpw: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_ownerpw_and_userpw_forgotten(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_both_user_forgotten.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", ownerpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_ownerpw_and_userpw_forgotten: {} sec".format(
time.time() - start_time
)
)
## Tests cropbox
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_cropbox(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), use_cropbox=True)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_using_cropbox: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_cropbox(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", use_cropbox=True)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_using_cropbox: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_and_cropbox(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, use_cropbox=True
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_and_cropbox: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_and_cropbox(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, use_cropbox=True
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_and_cropbox: {} sec".format(
time.time() - start_time
)
)
## Tests multithreading
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_with_4_threads(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=4)
self.assertTrue(len(images_from_bytes) == 14)
print(
"test_conversion_from_bytes_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_with_4_threads(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", thread_count=4)
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_path_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_with_15_threads(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=15)
self.assertTrue(len(images_from_bytes) == 14)
print(
"test_conversion_from_bytes_14_with_15_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_with_0_threads(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", thread_count=0)
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_path_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_with_4_threads(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_bytes) == 14)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_with_4_threads(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_path) == 14)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_241_with_4_threads(self): # pragma: no cover
start_time = time.time()
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=4)
self.assertTrue(len(images_from_bytes) == 241)
print(
"test_conversion_from_bytes_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_241_with_4_threads(self): # pragma: no cover
start_time = time.time()
images_from_path = convert_from_path("./tests/test_241.pdf", thread_count=4)
self.assertTrue(len(images_from_path) == 241)
print(
"test_conversion_from_path_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_241_with_4_threads(
self,
): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_bytes) == 241)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_241_with_4_threads(
self,
): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_241.pdf", output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_path) == 241)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
# Testing custom exceptions
@unittest.skipIf(POPPLER_INSTALLED, "Poppler is installed, skipping.")
def test_pdfinfo_not_installed_throws(self):
start_time = time.time()
try:
images_from_path = convert_from_path("./tests/test_14.pdf")
raise Exception("This should not happen")
except PDFInfoNotInstalledError as ex:
pass
print(
"test_pdfinfo_not_installed_throws: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_missingfonterror_throws(self):
start_time = time.time()
try:
images_from_path = convert_from_path("./tests/test_strict.pdf", strict=True)
raise Exception("This should not happen")
except PDFSyntaxError as ex:
pass
print("test_syntaxerror_throws: {} sec".format(time.time() - start_time))
# Test transparent
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_transparent(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), transparent=True, fmt="png"
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_using_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_transparent(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf", transparent=True, fmt="png"
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_using_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_and_transparent(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, transparent=True, fmt="png"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_and_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_and_transparent(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, transparent=True, fmt="png"
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_and_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_transparent_without_png(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", transparent=True)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_transparent_without_png: {} sec".format(
time.time() - start_time
)
)
## Test output as TIFF
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_tiff_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="tiff")
self.assertTrue(images_from_bytes[0].format == "TIFF")
print(
"test_conversion_to_tiff_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_tiff_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, fmt="tiff"
)
self.assertTrue(images_from_path[0].format == "TIFF")
[im.close() for im in images_from_path]
print(
"test_conversion_to_tiff_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test hanging file handles
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(not os.name == "posix", "This test only works on posix systems")
def test_close_tempfile_after_conversion(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
fd_count_before = len(
subprocess.check_output(
["ls", "-l", "/proc/" + str(os.getpid()) + "/fd"]
)
.decode("utf8")
.split("\n")
)
pdf_data = pdf_file.read()
images_from_bytes = []
for i in range(50):
images_from_bytes.extend(convert_from_bytes(pdf_data))
# Closing the images
[im.close() for im in images_from_bytes]
pid = os.getpid()
fd_count_after = len(
subprocess.check_output(
["ls", "-l", "/proc/" + str(os.getpid()) + "/fd"]
)
.decode("utf8")
.split("\n")
)
# Add an error margin
self.assertTrue(abs(fd_count_before - fd_count_after) <= 3)
print(
"test_close_tempfile_after_conversion: {} sec".format(
time.time() - start_time
)
)
## Test poppler_path
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(not os.name == "posix", "This test only works on posix systems")
def test_use_poppler_path(self):
os.mkdir("./bin")
shutil.copy("/usr/bin/pdftoppm", "./bin")
shutil.copy("/usr/bin/pdfinfo", "./bin")
start_time = time.time()
try:
images_from_path = convert_from_path(
"./tests/test.pdf", poppler_path="./bin"
)
finally:
shutil.rmtree("./bin")
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_poppler_path: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(not os.name == "posix", "This test only works on posix systems")
def test_use_poppler_path_with_trailing_slash(self):
os.mkdir("./bin")
shutil.copy("/usr/bin/pdftoppm", "./bin")
shutil.copy("/usr/bin/pdfinfo", "./bin")
start_time = time.time()
try:
images_from_path = convert_from_path(
"./tests/test.pdf", poppler_path="./bin/"
)
finally:
shutil.rmtree("./bin")
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_poppler_path_with_trailing_slash: {} sec".format(
time.time() - start_time
)
)
## Test first page greater or equal to last_page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_1_last_page_1(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test_14.pdf", first_page=1, last_page=1
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_14_first_page_1_last_page_1: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_12_last_page_1(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test_14.pdf", first_page=12, last_page=1
)
self.assertTrue(len(images_from_path) == 0)
print(
"test_conversion_from_path_14_first_page_12_last_page_1: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test singlefile
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_single_file(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
output_folder=path,
output_file="test",
single_file=True,
)
self.assertTrue(len(images_from_bytes) == 1)
self.assertTrue(
images_from_bytes[0].filename == os.path.join(path, "test.ppm")
)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_single_file: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_single_file(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf",
output_folder=path,
output_file="test",
single_file=True,
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(
images_from_path[0].filename == os.path.join(path, "test.ppm")
)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_single_file: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_single_file(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf",
output_folder=path,
output_file="test",
single_file=True,
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(
images_from_path[0].filename == os.path.join(path, "test.ppm")
)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_single_file: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test file with same name in directory
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_with_containing_file_with_same_name(self):
start_time = time.time()
with TemporaryDirectory() as path:
shutil.copyfile("./tests/test.pdf", os.path.join(path, "test.pdf"))
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, output_file="test"
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(
images_from_path[0].filename == os.path.join(path, "test0001-1.ppm")
)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_single_file: {} sec".format(
time.time() - start_time
)
)
## Test grayscale option
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_grayscale_from_bytes(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), grayscale=True)
self.assertTrue(images_from_bytes[0].mode == "L")
print(
"test_conversion_to_grayscale_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_grayscale_from_path(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", grayscale=True)
self.assertTrue(images_from_path[0].mode == "L")
[im.close() for im in images_from_path]
print(
"test_conversion_to_grayscale_from_path_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_grayscale_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, grayscale=True
)
self.assertTrue(images_from_path[0].mode == "L")
[im.close() for im in images_from_path]
print(
"test_conversion_to_grayscale_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test pathlib support
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_pathlib_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
pathlib.Path("./tests/test.pdf"),
output_folder=pathlib.Path(path),
poppler_path=get_poppler_path(),
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_pathlib_path_using_dir: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_pathlib_path_14(self):
start_time = time.time()
images_from_path = convert_from_path(pathlib.Path("./tests/test_14.pdf"))
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_pathlib_path_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_pathlib_path_using_dir_14(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
pathlib.Path("./tests/test_14.pdf"),
output_folder=pathlib.Path(path),
poppler_path=get_poppler_path(),
)
self.assertTrue(len(images_from_path) == 14)
[im.close() for im in images_from_path]
print(
"test_conversion_from_pathlib_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test jpegopt parameter
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf", fmt="jpeg", jpegopt={"quality": 100}
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), fmt="jpg", jpegopt={"quality": 100}
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_quality: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality_and_progressive(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf",
fmt="jpeg",
jpegopt={"quality": 100, "progressive": True},
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality_and_progressive: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality_and_not_progressive(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf",
fmt="jpeg",
jpegopt={"quality": 100, "progressive": False},
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality_and_progressive: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality_and_progressive(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
fmt="jpg",
jpegopt={"quality": 100, "progressive": True},
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_quality_and_progressive: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality_and_not_progressive(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
try:
images_from_bytes = convert_from_bytes(
pdf_file.read(), fmt="jpg", jpegopt={"quality": 100}
)
except PDFInfoNotInstalledError:
pass
print(
"test_conversion_from_bytes_with_quality_and_poppler_not_installed: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality_and_progressive_and_optimize(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf",
fmt="jpeg",
jpegopt={"quality": 100, "progressive": True, "optimize": True},
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality_and_progressive_and_optimize: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality_and_progressive_and_optimize(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
fmt="jpg",
jpegopt={"quality": 100, "progressive": True, "optimize": True},
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_quality_and_progressive_and_optimize: {} sec".format(
time.time() - start_time
)
)
## Test size parameter
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_int_size(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=400)
self.assertTrue(images_from_path[0].size[1] == 400)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_int_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_1d_tuple_size(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(400,))
self.assertTrue(images_from_path[0].size[1] == 400)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_1d_tuple_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_2d_tuple_size(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(400, 400))
self.assertTrue(images_from_path[0].size == (400, 400))
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_2d_tuple_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_invalid_size(self):
start_time = time.time()
try:
images_from_path = convert_from_path("./tests/test.pdf", size="bad value")
raise Exception("This should not happen")
except ValueError:
pass
print(
"test_conversion_from_path_with_invalid_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_2d_tuple_size_with_None_width(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(None, 400))
self.assertTrue(images_from_path[0].size[0] == 310)
self.assertTrue(images_from_path[0].size[1] == 400)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_2d_tuple_size_with_None_width: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_2d_tuple_size_with_None_height(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(400, None))
self.assertTrue(images_from_path[0].size[0] == 400)
self.assertTrue(images_from_path[0].size[1] == 518)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_2d_tuple_size_with_None_height: {} sec".format(
time.time() - start_time
)
)
## Test hide annotations parameter
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_hide_annotations(self):
images_from_path = convert_from_path(
"./tests/test_annotations.pdf", hide_annotations=True
)
start_time = time.time()
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_hide_annotations: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_hide_annotations(self):
start_time = time.time()
with open("./tests/test_annotations.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
hide_annotations=True,
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_hide_annotations: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_hide_annotations_with_invalid_arg_combination(
self,
):
start_time = time.time()
try:
images_from_path = convert_from_path(
"./tests/test_annotations.pdf",
hide_annotations=True,
use_pdftocairo=True,
)
raise Exception("This should not happen")
except NotImplementedError:
pass
print(
"test_conversion_from_path_with_hide_annotations_with_invalid_arg_combination: {} sec".format(
time.time() - start_time
)
)
## Test pdfinfo
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_path(self):
start_time = time.time()
info = pdfinfo_from_path("./tests/test.pdf")
self.assertTrue(info.get("Pages", 0) == 1)
print("test_pdfinfo_from_path: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as fh:
info = pdfinfo_from_bytes(fh.read())
self.assertTrue(info.get("Pages", 0) == 1)
print("test_pdfinfo_from_bytes: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_path_241(self):
start_time = time.time()
info = pdfinfo_from_path("./tests/test_241.pdf")
self.assertTrue(info.get("Pages", 0) == 241)
print("test_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_bytes_241(self):
start_time = time.time()
with open("./tests/test_241.pdf", "rb") as fh:
info = pdfinfo_from_bytes(fh.read())
self.assertTrue(info.get("Pages", 0) == 241)
print("test_pdfinfo_from_bytes_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_path_invalid(self):
start_time = time.time()
try:
info = pdfinfo_from_path("./tests/test.jpg")
raise Exception("This should not happen")
except PDFPageCountError:
pass
print("test_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_bytes_invalid(self):
start_time = time.time()
try:
with open("./tests/test.jpg", "rb") as fh:
info = pdfinfo_from_bytes(fh.read())
raise Exception("This should not happen")
except PDFPageCountError:
pass
print("test_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
# Test conversion with paths_only
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_paths_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, paths_only=True
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(type(images_from_path[0]) == str)
print(
"test_conversion_from_path_using_dir: {} sec".format(
time.time() - start_time
)
)
# Test for issue #125
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed")
def test_multithread_conversion(self):
start_time = time.time()
files = [
"./tests/test.pdf",
] * 50
with Pool(10) as p:
res = p.map(convert_from_path, files)
self.assertTrue(len(res) == 50)
print("test_multithread_conversion: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_use_pdftocairo(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", use_pdftocairo=True)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_use_pdftocairo: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_use_pdftocairo(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as fh:
images_from_bytes = convert_from_bytes(fh.read(), use_pdftocairo=True)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_use_pdftocairo: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_rawdates(self):
start_time = time.time()
info = pdfinfo_from_path("./tests/test.pdf", rawdates=True)
self.assertTrue("D:" in info["CreationDate"])
print("test_pdfinfo_rawdates: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_locked_pdf_with_userpw_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_user_only.pdf", "rb") as pdf_file:
info = pdfinfo_from_bytes(pdf_file.read(), userpw="pdf2image")
self.assertTrue("CreationDate" in info)
print(
"test_pdfinfo_locked_pdf_with_userpw_only: {} sec".format(
time.time() - start_time
)
)
@profile
def test_convert_from_functions_same_number_of_parameters(self):
start_time = time.time()
self.assertEqual(
len(signature(convert_from_path).parameters),
len(signature(convert_from_bytes).parameters),
)
print(
"test_convert_from_functions_same_number_of_parameters: {} sec".format(
time.time() - start_time
)
)
@profile
def test_pdfinfo_functions_same_number_of_parameters(self):
start_time = time.time()
self.assertEqual(
len(signature(pdfinfo_from_path).parameters),
len(signature(pdfinfo_from_bytes).parameters),
)
print(
"test_pdfinfo_functions_same_number_of_parameters: {} sec".format(
time.time() - start_time
)
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_timeout_pdfinfo_from_path_241(self):
start_time = time.time()
with self.assertRaises(PDFPopplerTimeoutError):
info = pdfinfo_from_path("./tests/test_241.pdf", timeout=0.00001)
print(
"test_timeout_pdfinfo_from_path_241: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_timeout_convert_from_path_241(self):
start_time = time.time()
with self.assertRaises(PDFPopplerTimeoutError):
imgs = convert_from_path("./tests/test_241.pdf", timeout=1)
print(
"test_timeout_convert_from_path_241: {} sec".format(
time.time() - start_time
)
)
if __name__ == "__main__":
unittest.main()
| 67,434 | 37.446408 | 106 |
py
|
pdf2image
|
pdf2image-master/setup.py
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="pdf2image",
version="1.16.3",
description="A wrapper around the pdftoppm and pdftocairo command line tools to convert PDF to a PIL Image list.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Belval/pdf2image",
author="Edouard Belval",
author_email="[email protected]",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords="pdf image png jpeg jpg convert",
packages=find_packages(exclude=["contrib", "docs", "tests"]),
install_requires=["pillow"],
package_data={"pdf2image": ["py.typed"]},
)
| 1,479 | 33.418605 | 118 |
py
|
pdf2image
|
pdf2image-master/docs/conf.py
|
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "pdf2image"
copyright = "2022, Edouard Belval"
author = "Edouard Belval"
# The short X.Y version
version = "1.16.1"
# The full version, including alpha/beta/rc tags
release = "latest"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"recommonmark",
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pdf2image"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pdf2image.tex",
"pdf2image Documentation",
"Edouard Belval",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pdf2image", "pdf2image Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pdf2image",
"pdf2image Documentation",
author,
"pdf2image",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
from recommonmark.parser import CommonMarkParser
source_parsers = {
".md": CommonMarkParser,
}
source_suffix = [".rst", ".md"]
| 5,595 | 27.697436 | 79 |
py
|
pdf2image
|
pdf2image-master/pdf2image/generators.py
|
"""
pdf2image filename generators
"""
import uuid
import threading
class ThreadSafeGenerator(object):
"""Wrapper around generator that protects concurrent access"""
def __init__(self, gen):
self.gen = gen
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.gen)
def threadsafe(f):
"""Decorator to make generator threadsafe. Fix #125"""
def g(*a, **kw):
return ThreadSafeGenerator(f(*a, **kw))
return g
@threadsafe
def uuid_generator():
"""Returns a UUID4"""
while True:
yield str(uuid.uuid4())
@threadsafe
def counter_generator(prefix="", suffix="", padding_goal=4):
"""Returns a joined prefix, iteration number, and suffix"""
i = 0
while True:
i += 1
yield str(prefix) + str(i).zfill(padding_goal) + str(suffix)
| 917 | 18.531915 | 68 |
py
|
pdf2image
|
pdf2image-master/pdf2image/exceptions.py
|
"""
Define exceptions specific to pdf2image
"""
class PopplerNotInstalledError(Exception):
"""Raised when poppler is not installed"""
pass
class PDFInfoNotInstalledError(PopplerNotInstalledError):
"""Raised when pdfinfo is not installed"""
pass
class PDFPageCountError(Exception):
"""Raised when the pdfinfo was unable to retrieve the page count"""
pass
class PDFSyntaxError(Exception):
"""Raised when a syntax error was thrown during rendering"""
pass
class PDFPopplerTimeoutError(Exception):
"""Raised when the timeout is exceeded while converting a PDF"""
pass
| 623 | 17.352941 | 71 |
py
|
pdf2image
|
pdf2image-master/pdf2image/pdf2image.py
|
"""
pdf2image is a light wrapper for the poppler-utils tools that can convert your
PDFs into Pillow images.
"""
import os
import platform
import tempfile
import types
import shutil
import subprocess
from subprocess import Popen, PIPE, TimeoutExpired
from typing import Any, Union, Tuple, List, Dict, Callable
from pathlib import PurePath
from PIL import Image
from pdf2image.generators import uuid_generator, counter_generator, ThreadSafeGenerator
from pdf2image.parsers import (
parse_buffer_to_pgm,
parse_buffer_to_ppm,
parse_buffer_to_jpeg,
parse_buffer_to_png,
)
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError,
PDFPopplerTimeoutError,
)
TRANSPARENT_FILE_TYPES = ["png", "tiff"]
PDFINFO_CONVERT_TO_INT = ["Pages"]
def convert_from_path(
pdf_path: Union[str, PurePath],
dpi: int = 200,
output_folder: Union[str, PurePath] = None,
first_page: int = None,
last_page: int = None,
fmt: str = "ppm",
jpegopt: Dict = None,
thread_count: int = 1,
userpw: str = None,
ownerpw: str = None,
use_cropbox: bool = False,
strict: bool = False,
transparent: bool = False,
single_file: bool = False,
output_file: Any = uuid_generator(),
poppler_path: Union[str, PurePath] = None,
grayscale: bool = False,
size: Union[Tuple, int] = None,
paths_only: bool = False,
use_pdftocairo: bool = False,
timeout: int = None,
hide_annotations: bool = False,
) -> List[Image.Image]:
"""Function wrapping pdftoppm and pdftocairo
:param pdf_path: Path to the PDF that you want to convert
:type pdf_path: Union[str, PurePath]
:param dpi: Image quality in DPI (default 200), defaults to 200
:type dpi: int, optional
:param output_folder: Write the resulting images to a folder (instead of directly in memory), defaults to None
:type output_folder: Union[str, PurePath], optional
:param first_page: First page to process, defaults to None
:type first_page: int, optional
:param last_page: Last page to process before stopping, defaults to None
:type last_page: int, optional
:param fmt: Output image format, defaults to "ppm"
:type fmt: str, optional
:param jpegopt: jpeg options `quality`, `progressive`, and `optimize` (only for jpeg format), defaults to None
:type jpegopt: Dict, optional
:param thread_count: How many threads we are allowed to spawn for processing, defaults to 1
:type thread_count: int, optional
:param userpw: PDF's password, defaults to None
:type userpw: str, optional
:param ownerpw: PDF's owner password, defaults to None
:type ownerpw: str, optional
:param use_cropbox: Use cropbox instead of mediabox, defaults to False
:type use_cropbox: bool, optional
:param strict: When a Syntax Error is thrown, it will be raised as an Exception, defaults to False
:type strict: bool, optional
:param transparent: Output with a transparent background instead of a white one, defaults to False
:type transparent: bool, optional
:param single_file: Uses the -singlefile option from pdftoppm/pdftocairo, defaults to False
:type single_file: bool, optional
:param output_file: What is the output filename or generator, defaults to uuid_generator()
:type output_file: Any, optional
:param poppler_path: Path to look for poppler binaries, defaults to None
:type poppler_path: Union[str, PurePath], optional
:param grayscale: Output grayscale image(s), defaults to False
:type grayscale: bool, optional
:param size: Size of the resulting image(s), uses the Pillow (width, height) standard, defaults to None
:type size: Union[Tuple, int], optional
:param paths_only: Don't load image(s), return paths instead (requires output_folder), defaults to False
:type paths_only: bool, optional
:param use_pdftocairo: Use pdftocairo instead of pdftoppm, may help performance, defaults to False
:type use_pdftocairo: bool, optional
:param timeout: Raise PDFPopplerTimeoutError after the given time, defaults to None
:type timeout: int, optional
:param hide_annotations: Hide PDF annotations in the output, defaults to False
:type hide_annotations: bool, optional
:raises NotImplementedError: Raised when conflicting parameters are given (hide_annotations for pdftocairo)
:raises PDFPopplerTimeoutError: Raised after the timeout for the image processing is exceeded
:raises PDFSyntaxError: Raised if there is a syntax error in the PDF and strict=True
:return: A list of Pillow images, one for each page between first_page and last_page
:rtype: List[Image.Image]
"""
if use_pdftocairo and fmt == "ppm":
fmt = "png"
# We make sure that if passed arguments are Path objects, they're converted to strings
if isinstance(pdf_path, PurePath):
pdf_path = pdf_path.as_posix()
if isinstance(output_folder, PurePath):
output_folder = output_folder.as_posix()
if isinstance(poppler_path, PurePath):
poppler_path = poppler_path.as_posix()
page_count = pdfinfo_from_path(
pdf_path, userpw, ownerpw, poppler_path=poppler_path
)["Pages"]
# We start by getting the output format, the buffer processing function and if we need pdftocairo
parsed_fmt, final_extension, parse_buffer_func, use_pdfcairo_format = _parse_format(
fmt, grayscale
)
# We use pdftocairo is the format requires it OR we need a transparent output
use_pdfcairo = (
use_pdftocairo
or use_pdfcairo_format
or (transparent and parsed_fmt in TRANSPARENT_FILE_TYPES)
)
poppler_version_major, poppler_version_minor = _get_poppler_version(
"pdftocairo" if use_pdfcairo else "pdftoppm", poppler_path=poppler_path
)
if poppler_version_major == 0 and poppler_version_minor <= 57:
jpegopt = None
if poppler_version_major == 0 and poppler_version_minor <= 83:
hide_annotations = False
# If output_file isn't a generator, it will be turned into one
if not isinstance(output_file, types.GeneratorType) and not isinstance(
output_file, ThreadSafeGenerator
):
if single_file:
output_file = iter([output_file])
else:
output_file = counter_generator(output_file)
if thread_count < 1:
thread_count = 1
if first_page is None or first_page < 1:
first_page = 1
if last_page is None or last_page > page_count:
last_page = page_count
if first_page > last_page:
return []
try:
auto_temp_dir = False
if output_folder is None and use_pdfcairo:
output_folder = tempfile.mkdtemp()
auto_temp_dir = True
# Recalculate page count based on first and last page
page_count = last_page - first_page + 1
if thread_count > page_count:
thread_count = page_count
reminder = page_count % thread_count
current_page = first_page
processes = []
for _ in range(thread_count):
thread_output_file = next(output_file)
# Get the number of pages the thread will be processing
thread_page_count = page_count // thread_count + int(reminder > 0)
# Build the command accordingly
args = _build_command(
["-r", str(dpi), pdf_path],
output_folder,
current_page,
current_page + thread_page_count - 1,
parsed_fmt,
jpegopt,
thread_output_file,
userpw,
ownerpw,
use_cropbox,
transparent,
single_file,
grayscale,
size,
hide_annotations,
)
if use_pdfcairo:
if hide_annotations:
raise NotImplementedError(
"Hide annotations flag not implemented in pdftocairo."
)
args = [_get_command_path("pdftocairo", poppler_path)] + args
else:
args = [_get_command_path("pdftoppm", poppler_path)] + args
# Update page values
current_page = current_page + thread_page_count
reminder -= int(reminder > 0)
# Add poppler path to LD_LIBRARY_PATH
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = (
poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
)
# Spawn the process and save its uuid
startupinfo = None
if platform.system() == "Windows":
# this startupinfo structure prevents a console window from popping up on Windows
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
processes.append(
(
thread_output_file,
Popen(
args, env=env, stdout=PIPE, stderr=PIPE, startupinfo=startupinfo
),
)
)
images = []
for uid, proc in processes:
try:
data, err = proc.communicate(timeout=timeout)
except TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
raise PDFPopplerTimeoutError("Run poppler timeout.")
if b"Syntax Error" in err and strict:
raise PDFSyntaxError(err.decode("utf8", "ignore"))
if output_folder is not None:
images += _load_from_output_folder(
output_folder,
uid,
final_extension,
paths_only,
in_memory=auto_temp_dir,
)
else:
images += parse_buffer_func(data)
finally:
if auto_temp_dir:
shutil.rmtree(output_folder)
return images
def convert_from_bytes(
pdf_file: bytes,
dpi: int = 200,
output_folder: Union[str, PurePath] = None,
first_page: int = None,
last_page: int = None,
fmt: str = "ppm",
jpegopt: Dict = None,
thread_count: int = 1,
userpw: str = None,
ownerpw: str = None,
use_cropbox: bool = False,
strict: bool = False,
transparent: bool = False,
single_file: bool = False,
output_file: Union[str, PurePath] = uuid_generator(),
poppler_path: Union[str, PurePath] = None,
grayscale: bool = False,
size: Union[Tuple, int] = None,
paths_only: bool = False,
use_pdftocairo: bool = False,
timeout: int = None,
hide_annotations: bool = False,
) -> List[Image.Image]:
"""Function wrapping pdftoppm and pdftocairo.
:param pdf_bytes: Bytes of the PDF that you want to convert
:type pdf_bytes: bytes
:param dpi: Image quality in DPI (default 200), defaults to 200
:type dpi: int, optional
:param output_folder: Write the resulting images to a folder (instead of directly in memory), defaults to None
:type output_folder: Union[str, PurePath], optional
:param first_page: First page to process, defaults to None
:type first_page: int, optional
:param last_page: Last page to process before stopping, defaults to None
:type last_page: int, optional
:param fmt: Output image format, defaults to "ppm"
:type fmt: str, optional
:param jpegopt: jpeg options `quality`, `progressive`, and `optimize` (only for jpeg format), defaults to None
:type jpegopt: Dict, optional
:param thread_count: How many threads we are allowed to spawn for processing, defaults to 1
:type thread_count: int, optional
:param userpw: PDF's password, defaults to None
:type userpw: str, optional
:param ownerpw: PDF's owner password, defaults to None
:type ownerpw: str, optional
:param use_cropbox: Use cropbox instead of mediabox, defaults to False
:type use_cropbox: bool, optional
:param strict: When a Syntax Error is thrown, it will be raised as an Exception, defaults to False
:type strict: bool, optional
:param transparent: Output with a transparent background instead of a white one, defaults to False
:type transparent: bool, optional
:param single_file: Uses the -singlefile option from pdftoppm/pdftocairo, defaults to False
:type single_file: bool, optional
:param output_file: What is the output filename or generator, defaults to uuid_generator()
:type output_file: Any, optional
:param poppler_path: Path to look for poppler binaries, defaults to None
:type poppler_path: Union[str, PurePath], optional
:param grayscale: Output grayscale image(s), defaults to False
:type grayscale: bool, optional
:param size: Size of the resulting image(s), uses the Pillow (width, height) standard, defaults to None
:type size: Union[Tuple, int], optional
:param paths_only: Don't load image(s), return paths instead (requires output_folder), defaults to False
:type paths_only: bool, optional
:param use_pdftocairo: Use pdftocairo instead of pdftoppm, may help performance, defaults to False
:type use_pdftocairo: bool, optional
:param timeout: Raise PDFPopplerTimeoutError after the given time, defaults to None
:type timeout: int, optional
:param hide_annotations: Hide PDF annotations in the output, defaults to False
:type hide_annotations: bool, optional
:raises NotImplementedError: Raised when conflicting parameters are given (hide_annotations for pdftocairo)
:raises PDFPopplerTimeoutError: Raised after the timeout for the image processing is exceeded
:raises PDFSyntaxError: Raised if there is a syntax error in the PDF and strict=True
:return: A list of Pillow images, one for each page between first_page and last_page
:rtype: List[Image.Image]
"""
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, "wb") as f:
f.write(pdf_file)
f.flush()
return convert_from_path(
f.name,
dpi=dpi,
output_folder=output_folder,
first_page=first_page,
last_page=last_page,
fmt=fmt,
jpegopt=jpegopt,
thread_count=thread_count,
userpw=userpw,
ownerpw=ownerpw,
use_cropbox=use_cropbox,
strict=strict,
transparent=transparent,
single_file=single_file,
output_file=output_file,
poppler_path=poppler_path,
grayscale=grayscale,
size=size,
paths_only=paths_only,
use_pdftocairo=use_pdftocairo,
timeout=timeout,
hide_annotations=hide_annotations,
)
finally:
os.close(fh)
os.remove(temp_filename)
def _build_command(
args: List,
output_folder: str,
first_page: int,
last_page: int,
fmt: str,
jpegopt: Dict,
output_file: str,
userpw: str,
ownerpw: str,
use_cropbox: bool,
transparent: bool,
single_file: bool,
grayscale: bool,
size: Union[int, Tuple[int, int]],
hide_annotations: bool,
) -> List[str]:
if use_cropbox:
args.append("-cropbox")
if hide_annotations:
args.append("-hide-annotations")
if transparent and fmt in TRANSPARENT_FILE_TYPES:
args.append("-transp")
if first_page is not None:
args.extend(["-f", str(first_page)])
if last_page is not None:
args.extend(["-l", str(last_page)])
if fmt not in ["pgm", "ppm"]:
args.append("-" + fmt)
if fmt in ["jpeg", "jpg"] and jpegopt:
args.extend(["-jpegopt", _parse_jpegopt(jpegopt)])
if single_file:
args.append("-singlefile")
if output_folder is not None:
args.append(os.path.join(output_folder, output_file))
if userpw is not None:
args.extend(["-upw", userpw])
if ownerpw is not None:
args.extend(["-opw", ownerpw])
if grayscale:
args.append("-gray")
if size is None:
pass
elif isinstance(size, tuple) and len(size) == 2:
if size[0] is not None:
args.extend(["-scale-to-x", str(int(size[0]))])
else:
args.extend(["-scale-to-x", str(-1)])
if size[1] is not None:
args.extend(["-scale-to-y", str(int(size[1]))])
else:
args.extend(["-scale-to-y", str(-1)])
elif isinstance(size, tuple) and len(size) == 1:
args.extend(["-scale-to", str(int(size[0]))])
elif isinstance(size, int) or isinstance(size, float):
args.extend(["-scale-to", str(int(size))])
else:
raise ValueError(f"Size {size} is not a tuple or an integer")
return args
def _parse_format(fmt: str, grayscale: bool = False) -> Tuple[str, str, Callable, bool]:
fmt = fmt.lower()
if fmt[0] == ".":
fmt = fmt[1:]
if fmt in ("jpeg", "jpg"):
return "jpeg", "jpg", parse_buffer_to_jpeg, False
if fmt == "png":
return "png", "png", parse_buffer_to_png, False
if fmt in ("tif", "tiff"):
return "tiff", "tif", None, True
if fmt == "ppm" and grayscale:
return "pgm", "pgm", parse_buffer_to_pgm, False
# Unable to parse the format so we'll use the default
return "ppm", "ppm", parse_buffer_to_ppm, False
def _parse_jpegopt(jpegopt: Dict) -> str:
parts = []
for k, v in jpegopt.items():
if v is True:
v = "y"
if v is False:
v = "n"
parts.append("{}={}".format(k, v))
return ",".join(parts)
def _get_command_path(command: str, poppler_path: str = None) -> str:
if platform.system() == "Windows":
command = command + ".exe"
if poppler_path is not None:
command = os.path.join(poppler_path, command)
return command
def _get_poppler_version(
command: str, poppler_path: str = None, timeout: int = None
) -> Tuple[int, int]:
command = [_get_command_path(command, poppler_path), "-v"]
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
proc = Popen(command, env=env, stdout=PIPE, stderr=PIPE)
try:
data, err = proc.communicate(timeout=timeout)
except TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
raise PDFPopplerTimeoutError("Run poppler poppler timeout.")
try:
# TODO: Make this more robust
version = err.decode("utf8", "ignore").split("\n")[0].split(" ")[-1].split(".")
return int(version[0]), int(version[1])
except:
# Lowest version that includes pdftocairo (2011)
return 0, 17
def pdfinfo_from_path(
pdf_path: str,
userpw: str = None,
ownerpw: str = None,
poppler_path: str = None,
rawdates: bool = False,
timeout: int = None,
) -> Dict:
"""Function wrapping poppler's pdfinfo utility and returns the result as a dictionary.
:param pdf_path: Path to the PDF that you want to convert
:type pdf_path: str
:param userpw: PDF's password, defaults to None
:type userpw: str, optional
:param ownerpw: PDF's owner password, defaults to None
:type ownerpw: str, optional
:param poppler_path: Path to look for poppler binaries, defaults to None
:type poppler_path: Union[str, PurePath], optional
:param rawdates: Return the undecoded data strings, defaults to False
:type rawdates: bool, optional
:param timeout: Raise PDFPopplerTimeoutError after the given time, defaults to None
:type timeout: int, optional
:raises PDFPopplerTimeoutError: Raised after the timeout for the image processing is exceeded
:raises PDFInfoNotInstalledError: Raised if pdfinfo is not installed
:raises PDFPageCountError: Raised if the output could not be parsed
:return: Dictionary containing various information on the PDF
:rtype: Dict
"""
try:
command = [_get_command_path("pdfinfo", poppler_path), pdf_path]
if userpw is not None:
command.extend(["-upw", userpw])
if ownerpw is not None:
command.extend(["-opw", ownerpw])
if rawdates:
command.extend(["-rawdates"])
# Add poppler path to LD_LIBRARY_PATH
env = os.environ.copy()
if poppler_path is not None:
env["LD_LIBRARY_PATH"] = poppler_path + ":" + env.get("LD_LIBRARY_PATH", "")
proc = Popen(command, env=env, stdout=PIPE, stderr=PIPE)
try:
out, err = proc.communicate(timeout=timeout)
except TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
raise PDFPopplerTimeoutError("Run poppler poppler timeout.")
d = {}
for field in out.decode("utf8", "ignore").split("\n"):
sf = field.split(":")
key, value = sf[0], ":".join(sf[1:])
if key != "":
d[key] = (
int(value.strip())
if key in PDFINFO_CONVERT_TO_INT
else value.strip()
)
if "Pages" not in d:
raise ValueError
return d
except OSError:
raise PDFInfoNotInstalledError(
"Unable to get page count. Is poppler installed and in PATH?"
)
except ValueError:
raise PDFPageCountError(
f"Unable to get page count.\n{err.decode('utf8', 'ignore')}"
)
def pdfinfo_from_bytes(
pdf_bytes: bytes,
userpw: str = None,
ownerpw: str = None,
poppler_path: str = None,
rawdates: bool = False,
timeout: int = None,
) -> Dict:
"""Function wrapping poppler's pdfinfo utility and returns the result as a dictionary.
:param pdf_bytes: Bytes of the PDF that you want to convert
:type pdf_bytes: bytes
:param userpw: PDF's password, defaults to None
:type userpw: str, optional
:param ownerpw: PDF's owner password, defaults to None
:type ownerpw: str, optional
:param poppler_path: Path to look for poppler binaries, defaults to None
:type poppler_path: Union[str, PurePath], optional
:param rawdates: Return the undecoded data strings, defaults to False
:type rawdates: bool, optional
:param timeout: Raise PDFPopplerTimeoutError after the given time, defaults to None
:type timeout: int, optional
:return: Dictionary containing various information on the PDF
:rtype: Dict
"""
fh, temp_filename = tempfile.mkstemp()
try:
with open(temp_filename, "wb") as f:
f.write(pdf_bytes)
f.flush()
return pdfinfo_from_path(
temp_filename,
userpw=userpw,
ownerpw=ownerpw,
poppler_path=poppler_path,
rawdates=rawdates,
timeout=timeout,
)
finally:
os.close(fh)
os.remove(temp_filename)
def _load_from_output_folder(
output_folder: str,
output_file: str,
ext: str,
paths_only: bool,
in_memory: bool = False,
) -> List[Image.Image]:
images = []
for f in sorted(os.listdir(output_folder)):
if f.startswith(output_file) and f.split(".")[-1] == ext:
if paths_only:
images.append(os.path.join(output_folder, f))
else:
images.append(Image.open(os.path.join(output_folder, f)))
if in_memory:
images[-1].load()
return images
| 23,863 | 34.993967 | 114 |
py
|
pdf2image
|
pdf2image-master/pdf2image/parsers.py
|
"""
pdf2image custom buffer parsers
"""
from io import BytesIO
from typing import List
from PIL import Image
def parse_buffer_to_ppm(data: bytes) -> List[Image.Image]:
"""Parse PPM file bytes to Pillow Image
:param data: pdftoppm/pdftocairo output bytes
:type data: bytes
:return: List of PPM images parsed from the output
:rtype: List[Image.Image]
"""
images = []
index = 0
while index < len(data):
code, size, rgb = tuple(data[index : index + 40].split(b"\n")[0:3])
size_x, size_y = tuple(size.split(b" "))
file_size = len(code) + len(size) + len(rgb) + 3 + int(size_x) * int(size_y) * 3
images.append(Image.open(BytesIO(data[index : index + file_size])))
index += file_size
return images
def parse_buffer_to_pgm(data: bytes) -> List[Image.Image]:
"""Parse PGM file bytes to Pillow Image
:param data: pdftoppm/pdftocairo output bytes
:type data: bytes
:return: List of PGM images parsed from the output
:rtype: List[Image.Image]
"""
images = []
index = 0
while index < len(data):
code, size, maxval = tuple(data[index : index + 40].split(b"\n")[0:3])
size_x, size_y = tuple(size.split(b" "))
file_size = len(code) + len(size) + len(maxval) + 3 + int(size_x) * int(size_y)
images.append(Image.open(BytesIO(data[index : index + file_size])))
index += file_size
return images
def parse_buffer_to_jpeg(data: bytes) -> List[Image.Image]:
"""Parse JPEG file bytes to Pillow Image
:param data: pdftoppm/pdftocairo output bytes
:type data: bytes
:return: List of JPEG images parsed from the output
:rtype: List[Image.Image]
"""
return [
Image.open(BytesIO(image_data + b"\xff\xd9"))
for image_data in data.split(b"\xff\xd9")[
:-1
] # Last element is obviously empty
]
def parse_buffer_to_png(data: bytes) -> List[Image.Image]:
"""Parse PNG file bytes to Pillow Image
:param data: pdftoppm/pdftocairo output bytes
:type data: bytes
:return: List of PNG images parsed from the output
:rtype: List[Image.Image]
"""
images = []
c1 = 0
c2 = 0
data_len = len(data)
while c1 < data_len:
# IEND can appear in a PNG without being the actual end
if data[c2 : c2 + 4] == b"IEND" and (
c2 + 8 == data_len or data[c2 + 9 : c2 + 12] == b"PNG"
):
images.append(Image.open(BytesIO(data[c1 : c2 + 8])))
c1 = c2 + 8
c2 = c1
c2 += 1
return images
| 2,609 | 25.363636 | 88 |
py
|
pdf2image
|
pdf2image-master/pdf2image/__init__.py
|
"""
__init__ of the pdf2image module
"""
from .pdf2image import (
convert_from_bytes,
convert_from_path,
pdfinfo_from_bytes,
pdfinfo_from_path,
)
| 167 | 14.272727 | 36 |
py
|
learning-to-quantize
|
learning-to-quantize-master/args.py
|
import argparse
import yaml
import os
import torch
import utils
def add_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch NUQSGD')
# options overwritting yaml options
parser.add_argument('--path_opt', default='default.yaml',
type=str, help='path to a yaml options file')
parser.add_argument('--data', default=argparse.SUPPRESS,
type=str, help='path to data')
parser.add_argument('--logger_name', default='runs/runX')
parser.add_argument('--dataset', default='mnist', help='mnist|cifar10')
# options that can be changed from default
parser.add_argument('--batch_size', type=int, default=argparse.SUPPRESS,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size',
type=int, default=argparse.SUPPRESS, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=argparse.SUPPRESS,
metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=argparse.SUPPRESS,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=argparse.SUPPRESS,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no_cuda', action='store_true',
default=argparse.SUPPRESS,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=argparse.SUPPRESS,
metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=argparse.SUPPRESS,
metavar='N',
help='how many batches to wait before logging training'
' status')
parser.add_argument('--tblog_interval',
type=int, default=argparse.SUPPRESS)
parser.add_argument('--optim', default=argparse.SUPPRESS, help='sgd|dmom')
parser.add_argument('--arch', '-a', metavar='ARCH',
default=argparse.SUPPRESS,
help='model architecture: (default: resnet32)')
parser.add_argument('-j', '--workers', default=argparse.SUPPRESS,
type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--weight_decay', '--wd', default=argparse.SUPPRESS,
type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--train_accuracy', action='store_true',
default=argparse.SUPPRESS)
parser.add_argument('--log_profiler', action='store_true')
parser.add_argument('--lr_decay_epoch',
default=argparse.SUPPRESS)
parser.add_argument('--log_keys', default='')
parser.add_argument('--exp_lr',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--nodropout',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--data_aug',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--noresume', action='store_true',
help='do not resume from checkpoint')
parser.add_argument('--pretrained',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--num_class',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--lr_decay_rate',
default=argparse.SUPPRESS, type=float)
parser.add_argument('--nesterov',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--run_dir', default='runs/runX')
parser.add_argument('--ckpt_name', default='checkpoint.pth.tar')
parser.add_argument('--g_estim', default=argparse.SUPPRESS, type=str)
parser.add_argument('--epoch_iters',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--gvar_log_iter',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--gvar_estim_iter',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--gvar_start',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--g_optim',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--g_optim_start',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--g_osnap_iter',
default='100,1000,10000', type=str)
parser.add_argument('--g_bsnap_iter',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--g_epoch',
default=argparse.SUPPRESS, action='store_true')
parser.add_argument('--niters',
default=argparse.SUPPRESS, type=int)
parser.add_argument('--no_batch_norm',
default=argparse.SUPPRESS, type=bool)
# NUQ
parser.add_argument('--nuq_method', default='q', help='q|nuq|qinf')
parser.add_argument('--nuq_bits', default=4, type=int)
parser.add_argument('--nuq_bucket_size', default=1024, type=int)
parser.add_argument('--nuq_ngpu', default=1, type=int)
parser.add_argument('--nuq_mul', default=0.5, type=float)
parser.add_argument('--nuq_amq_lr',
default=0.7, type=float)
parser.add_argument('--nuq_amq_epochs',
default=50, type=int)
parser.add_argument('--untrain_steps', default=0, type=int)
parser.add_argument('--untrain_lr', default=0.001, type=float)
parser.add_argument('--untrain_std', default=0.001, type=float)
parser.add_argument('--nuq_sym', default=False, action='store_true')
parser.add_argument('--nuq_inv', default=False, action='store_true')
parser.add_argument('--nuq_parallel', default='no', help='no|gpu1|ngpu')
parser.add_argument('--dist_num', default=20, type=int)
parser.add_argument('--chkpt_iter', default=20, type=int)
parser.add_argument('--nuq_number_of_samples',
default=argparse.SUPPRESS,
type=int,
help='NUQ Number of Samples')
parser.add_argument('--nuq_ig_sm_bkts',
action='store_true',
help='NUQ Ignore Small Buckets')
parser.add_argument('--nuq_truncated_interval',
default=argparse.SUPPRESS,
type=float,
help='NUQ Truncated Interval')
parser.add_argument('--nuq_cd_epochs', default=argparse.SUPPRESS,
help='NUQ Adaptive CD Epochs', type=int)
parser.add_argument('--nuq_layer', action='store_true',
help='NUQ Enable Network Wide Quantization')
args = parser.parse_args()
return args
def opt_to_nuq_kwargs(opt):
return {
'ngpu': opt.nuq_ngpu, 'bits': opt.nuq_bits,
'bucket_size': opt.nuq_bucket_size, 'method': opt.nuq_method,
'multiplier': opt.nuq_mul, 'cd_epochs': opt.nuq_cd_epochs,
'number_of_samples': opt.nuq_number_of_samples,
'path': opt.logger_name, 'symmetric': opt.nuq_sym,
'interval': opt.nuq_truncated_interval, 'amq_epochs': opt.nuq_amq_epochs,
'learning_rate': opt.nuq_learning_rate, 'amq_lr': opt.nuq_amq_lr,
'ig_sm_bkts': opt.nuq_ig_sm_bkts, 'inv': opt.nuq_inv
}
def yaml_opt(yaml_path):
opt = {}
with open(yaml_path, 'r') as handle:
opt = yaml.load(handle, Loader=yaml.FullLoader)
return opt
def get_opt():
args = add_args()
opt = yaml_opt('options/default.yaml')
opt_s = yaml_opt(os.path.join('options/{}/{}'.format(args.dataset,
args.path_opt)))
opt.update(opt_s)
opt.update(vars(args).items())
opt = utils.DictWrapper(opt)
opt.cuda = not opt.no_cuda and torch.cuda.is_available()
if opt.g_batch_size == -1:
opt.g_batch_size = opt.batch_size
return opt
| 8,495 | 47 | 81 |
py
|
learning-to-quantize
|
learning-to-quantize-master/grid_run.py
|
from __future__ import print_function
import argparse
import grid
import grid.cluster
import grid.nuq
class RunSingle(object):
def __init__(self, log_dir, module_name, exclude, prefix, parallel=False):
self.log_dir = log_dir
self.num = 0
self.module_name = module_name
self.exclude = exclude
self.parallel = parallel
self.prefix = prefix
def __call__(self, args):
logger_name = 'runs/%s/%s_%03d_' % (self.log_dir, self.prefix, self.num)
cmd = ['python -m {}'.format(self.module_name)]
self.num += 1
for k, v in args:
if v is not None:
cmd += ['--{} {}'.format(k, v)]
if k not in self.exclude:
logger_name += '{}_{},'.format(k, v)
dir_name = logger_name.strip(',')
cmd += ['--logger_name "$dir_name"']
cmd += ['> "$dir_name/log" 2>&1']
cmd = ['dir_name="%s"; mkdir -p "$dir_name" && ' % dir_name] + cmd
if self.parallel:
cmd += ['&']
return ' '.join(cmd)
def deep_product(args, index=0, cur_args=[]):
if index >= len(args):
yield cur_args
elif isinstance(args, list):
# Disjoint
for a in args:
for b in deep_product(a):
yield b
elif isinstance(args, tuple):
# Disjoint product
for a in deep_product(args[index]):
next_args = cur_args + a
for b in deep_product(args, index+1, next_args):
yield b
elif isinstance(args, dict):
# Product
keys = list(args.keys())
values = list(args.values())
if not isinstance(values[index], list):
values[index] = [values[index]]
for v in values[index]:
if not isinstance(v, tuple):
next_args = cur_args + [(keys[index], v)]
for a in deep_product(args, index+1, next_args):
yield a
else:
for dv in deep_product(v[1]):
next_args = cur_args + [(keys[index], v[0])]
next_args += dv
for a in deep_product(args, index+1, next_args):
yield a
def run_multi(run_single, args):
cmds = []
for arg in deep_product(args):
cmds += [run_single(arg)]
return cmds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--grid', default='gvar', type=str)
parser.add_argument('--run_name', default='', type=str)
parser.add_argument('--cluster', default='bolt', type=str)
parser.add_argument('--cluster_args', default='8,4,gpu', type=str)
parser.add_argument('--run0_id', default=0, type=int)
parser.add_argument('--prefix', default='0', type=str)
args = parser.parse_args()
prefix = args.prefix
run0_id = args.run0_id
val = grid.__dict__[args.grid].__dict__[args.run_name]([])
jobs, parallel = grid.cluster.__dict__[args.cluster](args.cluster_args, args.prefix)
args, log_dir, module_name, exclude = val
run_single = RunSingle(log_dir, module_name, exclude, prefix, parallel)
run_single.num = run0_id
cmds = run_multi(run_single, args)
print(len(cmds))
for j, job_index in enumerate(jobs):
file_name = 'jobs/{prefix}_{job}.sh'.format(prefix=prefix, job=str(int(job_index)))
with open(file_name, 'w') as f:
for i in range(j, len(cmds), len(jobs)):
print(cmds[i], file=f)
if parallel:
print('wait', file=f)
| 3,583 | 34.137255 | 91 |
py
|
learning-to-quantize
|
learning-to-quantize-master/utils.py
|
import shutil
import torch
import numpy as np
class DictWrapper(object):
def __init__(self, d):
self.d = d
def __getattr__(self, key):
if key in self.d:
return self.d[key]
else:
return None
class SaveCheckpoint(object):
def __init__(self):
# remember best prec@1 and save checkpoint
self.best_prec1 = 0
def __call__(self, model, prec1, opt, optimizer,
filename='checkpoint.pth.tar', gvar=None):
is_best = prec1 > self.best_prec1
self.best_prec1 = max(prec1, self.best_prec1)
state = {
'epoch': optimizer.epoch,
'niters': optimizer.niters,
'opt': opt.d,
'model': model.state_dict(),
'best_prec1': self.best_prec1,
}
if gvar is not None:
state.update({'gvar': gvar.state_dict()})
torch.save(state, opt.logger_name+'/'+filename)
if is_best:
shutil.copyfile(opt.logger_name+'/'+filename,
opt.logger_name+'/model_best.pth.tar')
def base_lr(optimizer, opt):
lr = opt.lr
return lr
def adjust_lr(optimizer, opt):
if opt.niters > 0:
niters = optimizer.niters
else:
niters = optimizer.niters//opt.epoch_iters
if isinstance(opt.lr_decay_epoch, str):
adjust_learning_rate_multi(optimizer, niters, opt)
else:
adjust_learning_rate(optimizer, niters, opt)
def adjust_learning_rate(optimizer, epoch, opt):
""" Sets the learning rate to the initial LR decayed by 10 """
if opt.exp_lr:
""" test
A=np.arange(200);
np.round(np.power(.1, np.power(2., A/80.)-1), 6)[[0,80,120,160]]
test """
last_epoch = 2. ** (float(epoch) / int(opt.lr_decay_epoch)) - 1
else:
last_epoch = epoch // int(opt.lr_decay_epoch)
lr = base_lr(optimizer, opt) * (0.1 ** last_epoch)
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate_multi(optimizer, epoch, opt):
"""Sets the learning rate to the initial LR decayed by 10"""
lr_decay_epoch = np.array(list(map(int, opt.lr_decay_epoch.split(','))))
if len(lr_decay_epoch) == 1:
return adjust_learning_rate(optimizer, epoch, opt)
el = (epoch // lr_decay_epoch)
ei = np.where(el > 0)[0]
if len(ei) == 0:
ei = [0]
print(el)
print(ei)
# lr = opt.lr * (opt.lr_decay_rate ** (ei[-1] + el[ei[-1]]))
lr = base_lr(optimizer, opt) * (
opt.lr_decay_rate ** (ei[-1]+(el[ei[-1]] > 0)))
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 3,161 | 28.551402 | 76 |
py
|
learning-to-quantize
|
learning-to-quantize-master/data.py
|
import torch
from torchvision import datasets, transforms
import torch.utils.data as data
import numpy as np
import os
def get_loaders(opt):
if opt.dataset == 'mnist':
return get_mnist_loaders(opt)
elif opt.dataset == 'cifar10':
return get_cifar10_loaders(opt)
elif opt.dataset == 'cifar100':
return get_cifar100_loaders(opt)
elif opt.dataset == 'svhn':
return get_svhn_loaders(opt)
elif opt.dataset.startswith('imagenet'):
return get_imagenet_loaders(opt)
elif opt.dataset == 'logreg':
return get_logreg_loaders(opt)
elif 'class' in opt.dataset:
return get_logreg_loaders(opt)
def dataset_to_loaders(train_dataset, test_dataset, opt):
kwargs = {'num_workers': opt.workers,
'pin_memory': True} if opt.cuda else {}
idxdataset = IndexedDataset(train_dataset, opt, train=True)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
idxdataset,
batch_size=opt.batch_size,
sampler=train_sampler,
shuffle=(train_sampler is None),
drop_last=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
IndexedDataset(test_dataset, opt),
batch_size=opt.test_batch_size, shuffle=False,
**kwargs)
train_test_loader = torch.utils.data.DataLoader(
IndexedDataset(train_dataset, opt, train=True),
batch_size=opt.test_batch_size, shuffle=False,
**kwargs)
return train_loader, test_loader, train_test_loader
def get_minvar_loader(train_loader, opt):
kwargs = {'num_workers': opt.workers,
'pin_memory': True} if opt.cuda else {}
idxdataset = train_loader.dataset
train_loader = torch.utils.data.DataLoader(
idxdataset,
batch_size=opt.g_batch_size,
shuffle=True,
drop_last=False, **kwargs)
return train_loader
class IndexedDataset(data.Dataset):
def __init__(self, dataset, opt, train=False):
np.random.seed(2222)
self.ds = dataset
self.opt = opt
def __getitem__(self, index):
subindex = index
img, target = self.ds[subindex]
return img, target, index
def __len__(self):
return len(self.ds)
def get_mnist_loaders(opt, **kwargs):
transform = transforms.ToTensor()
if not opt.no_transform:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(
opt.data, train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(opt.data, train=False, transform=transform)
return dataset_to_loaders(train_dataset, test_dataset, opt, **kwargs)
def get_cifar10_100_transform(opt):
normalize = transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),
std=(0.2023, 0.1994, 0.2010))
if opt.data_aug:
transform = [
transforms.RandomAffine(10, (.1, .1), (0.7, 1.2), 10),
transforms.ColorJitter(.2, .2, .2),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor(),
normalize,
]
else:
transform = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
return normalize, transform
def get_cifar10_loaders(opt):
normalize, transform = get_cifar10_100_transform(opt)
train_dataset = datasets.CIFAR10(root=opt.data, train=True,
transform=transforms.Compose(transform),
download=True)
test_dataset = datasets.CIFAR10(
root=opt.data, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
def get_cifar100_loaders(opt):
normalize, transform = get_cifar10_100_transform(opt)
train_dataset = datasets.CIFAR100(root=opt.data, train=True,
transform=transforms.Compose(transform),
download=True)
test_dataset = datasets.CIFAR100(
root=opt.data, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
def get_svhn_loaders(opt, **kwargs):
normalize = transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
if opt.data_aug:
transform = [
transforms.RandomAffine(10, (.1, .1), (0.7, 1.), 10),
transforms.ColorJitter(.2, .2, .2),
transforms.RandomCrop(32),
transforms.ToTensor(),
normalize,
]
else:
transform = [
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
]
train_dataset = torch.utils.data.ConcatDataset(
(datasets.SVHN(
opt.data, split='train', download=True,
transform=transforms.Compose(transform)),
datasets.SVHN(
opt.data, split='extra', download=True,
transform=transforms.Compose(transform))))
test_dataset = datasets.SVHN(opt.data, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
def get_imagenet_loaders(opt):
# Data loading code
traindir = os.path.join(opt.data, 'train')
valdir = os.path.join(opt.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
test_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
return dataset_to_loaders(train_dataset, test_dataset, opt)
class InfiniteLoader(object):
def __init__(self, data_loader):
self.data_loader = data_loader
def __iter__(self):
self.data_iter = iter([])
return self
def __next__(self):
try:
data = next(self.data_iter)
except StopIteration:
if isinstance(self.data_loader, list):
II = self.data_loader
self.data_iter = (II[i] for i in torch.randperm(len(II)))
else:
self.data_iter = iter(self.data_loader)
data = next(self.data_iter)
return data
def next(self):
# for python2
return self.__next__()
def __len__(self):
return len(self.data_loader)
def random_orthogonal_matrix(gain, shape):
if len(shape) < 2:
raise RuntimeError("Only shapes of length 2 or more are "
"supported.")
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return np.asarray(gain * q, dtype=np.float)
class LinearDataset(data.Dataset):
def __init__(self, C, D, num, dim, num_class, train=True):
X = np.zeros((C.shape[0], num))
Y = np.zeros((num,))
for i in range(num_class):
n = num // num_class
e = np.random.normal(0.0, 1.0, (dim, n))
X[:, i * n:(i + 1) * n] = np.dot(D[:, :, i], e) + C[:, i:i + 1]
Y[i * n:(i + 1) * n] = i
self.X = X
self.Y = Y
self.classes = range(num_class)
def __getitem__(self, index):
X = torch.Tensor(self.X[:, index]).float()
Y = int(self.Y[index])
return X, Y
def __len__(self):
return self.X.shape[1]
def get_logreg_loaders(opt, **kwargs):
# np.random.seed(1234)
np.random.seed(2222)
# print("Create W")
C = opt.c_const * random_orthogonal_matrix(1.0, (opt.dim, opt.num_class))
D = opt.d_const * random_orthogonal_matrix(
1.0, (opt.dim, opt.dim, opt.num_class))
# print("Create train")
train_dataset = LinearDataset(C, D, opt.num_train_data, opt.dim,
opt.num_class, train=True)
# print("Create test")
test_dataset = LinearDataset(C, D,
opt.num_test_data, opt.dim, opt.num_class,
train=False)
torch.save((train_dataset.X, train_dataset.Y,
test_dataset.X, test_dataset.Y,
C), opt.logger_name + '/data.pth.tar')
return dataset_to_loaders(train_dataset, test_dataset, opt)
| 9,431 | 31.979021 | 78 |
py
|
learning-to-quantize
|
learning-to-quantize-master/log_utils.py
|
from collections import OrderedDict, defaultdict
import numpy as np
from tensorboardX import SummaryWriter
import time
import torch
import os
class TBXWrapper(object):
def configure(self, logger_name, flush_secs=5, opt=None):
self.writer = SummaryWriter(logger_name, flush_secs=flush_secs)
self.logger_name = logger_name
self.logobj = defaultdict(lambda: list())
self.opt = opt
def log_value(self, name, val, step):
self.writer.add_scalar(name, val, step)
self.logobj[name] += [(time.time(), step, float(val))]
def log_histogram(self, name, val, step):
self.writer.add_histogram(name, val, step)
def add_scalar(self, name, val, step):
self.log_value(name, val, step)
def save_log(self, filename='log.pth.tar'):
try:
os.makedirs(self.opt.logger_name)
except os.error:
pass
torch.save(dict(self.logobj), self.opt.logger_name+'/'+filename)
def close(self):
self.writer.close()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=0):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / (.0001 + self.count)
def __str__(self):
if self.count == 0:
return '%d' % self.val
return '%.4f (%.4f)' % (self.val, self.avg)
def tb_log(self, tb_logger, name, step=None):
tb_logger.log_value(name, self.val, step=step)
class TimeMeter(object):
"""Store last K times"""
def __init__(self, k=1000):
self.k = k
self.reset()
def reset(self):
self.vals = [0]*self.k
self.i = 0
self.mu = 0
def update(self, val):
self.vals[self.i] = val
self.i = (self.i + 1) % self.k
self.mu = (1-1./self.k)*self.mu+(1./self.k)*val
def __str__(self):
# return '%.4f +- %.2f' % (np.mean(self.vals), np.std(self.vals))
return '%.4f +- %.2f' % (self.mu, np.std(self.vals))
def tb_log(self, tb_logger, name, step=None):
tb_logger.log_value(name, self.vals[0], step=step)
class StatisticMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.mu = AverageMeter()
self.std = AverageMeter()
self.min = AverageMeter()
self.max = AverageMeter()
self.med = AverageMeter()
def update(self, val, n=0):
val = np.ma.masked_invalid(val)
val = val.compressed()
n = min(n, len(val))
if n == 0:
return
self.mu.update(np.mean(val), n=n)
self.std.update(np.std(val), n=n)
self.min.update(np.min(val), n=n)
self.max.update(np.max(val), n=n)
self.med.update(np.median(val), n=n)
def __str__(self):
# return 'mu:{}|med:{}|std:{}|min:{}|max:{}'.format(
# self.mu, self.med, self.std, self.min, self.max)
return 'mu:{}|med:{}'.format(self.mu, self.med)
def tb_log(self, tb_logger, name, step=None):
self.mu.tb_log(tb_logger, name+'_mu', step=step)
self.med.tb_log(tb_logger, name+'_med', step=step)
self.std.tb_log(tb_logger, name+'_std', step=step)
self.min.tb_log(tb_logger, name+'_min', step=step)
self.max.tb_log(tb_logger, name+'_max', step=step)
class LogCollector(object):
"""A collection of logging objects that can change from train to val"""
def __init__(self, opt):
self.meters = OrderedDict()
self.log_keys = opt.log_keys.split(',')
def reset(self):
self.meters = OrderedDict()
def update(self, k, v, n=0, log_scale=False, bins=100):
if k not in self.meters:
if type(v).__module__ == np.__name__:
self.meters[k] = StatisticMeter()
else:
self.meters[k] = AverageMeter()
self.meters[k].update(v, n)
def __str__(self):
s = ''
for i, (k, v) in enumerate(self.meters.items()):
if k in self.log_keys or 'all' in self.log_keys:
if i > 0:
s += ' '
s += k+': '+str(v)
return s
def tb_log(self, tb_logger, prefix='', step=None):
for k, v in self.meters.items():
v.tb_log(tb_logger, prefix+k, step=step)
class Profiler(object):
def __init__(self, k=10):
self.k = k
self.meters = OrderedDict()
self.start()
def tic(self):
self.t = time.time()
def toc(self, name):
end = time.time()
if name not in self.times:
self.times[name] = []
self.times[name] += [end-self.t]
self.tic()
def start(self):
self.times = OrderedDict()
self.tic()
def end(self):
for k, v in self.times.items():
if k not in self.meters:
self.meters[k] = TimeMeter(self.k)
self.meters[k].update(sum(v))
self.start()
def __str__(self):
s = ''
for i, (k, v) in enumerate(self.meters.items()):
if i > 0:
s += ' '
s += k+': ' + str(v)
return s
| 5,411 | 27.041451 | 75 |
py
|
learning-to-quantize
|
learning-to-quantize-master/log_plotter.py
|
from scipy import interpolate
import numpy as np
import os
import re
import torch
import pylab as plt
import matplotlib.ticker as mtick
from tensorboard.backend.event_processing import event_accumulator
def get_run_names(logdir, patterns):
run_names = []
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names += [root]
# print(run_names)
run_names.sort()
return run_names
def get_run_names_events(logdir, patterns):
run_names = {}
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names[root] = []
for file in files:
if re.match('.*events\.out.*', file):
run_names[root].append(file)
run_names[root] = sorted(run_names[root])
# print(run_names)
return run_names
def get_data_pth(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name in run_names:
d = {}
logdata = torch.load(run_name + '/log.pth.tar')
for tag_name in tag_names:
if tag_name not in logdata:
continue
js = logdata[tag_name]
d[tag_name] = np.array([[x[j] for x in js]
for j in range(1, 3)])
data += [d]
return data
def get_data_pth_events(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name, events in run_names.items():
d = {}
for event in events:
ea = event_accumulator.EventAccumulator(run_name+'/'+event,
size_guidance={ # see below regarding this argument
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 0,
event_accumulator.HISTOGRAMS: 1,
})
ea.Reload()
for tag_name in tag_names:
if tag_name not in ea.Tags()['scalars']:
continue
scalar = ea.Scalars(tag_name)
if tag_name not in d:
d[tag_name] = np.array(
[[dp.step for dp in scalar], [dp.value for dp in scalar]])
else:
new_array = np.array([dp.step for dp in scalar])
indexes = new_array > d[tag_name][0][-1]
res1 = np.concatenate(
(d[tag_name][0], np.array([dp.step for dp in scalar])[indexes]))
res2 = np.concatenate(
(d[tag_name][1], np.array([dp.value for dp in scalar])[indexes]))
d[tag_name] = (res1, res2)
data += [d]
return data
def plot_smooth(x, y, npts=100, order=3, *args, **kwargs):
x_smooth = np.linspace(x.min(), x.max(), npts)
tck = interpolate.splrep(x, y, s=0)
y_smooth = interpolate.splev(x_smooth, tck, der=0)
plt.plot(x_smooth, y_smooth, *args, **kwargs)
def plot_smooth_o1(x, y, *args, **kwargs):
plot_smooth(x, y, 100, 1, *args, **kwargs)
def get_legend(lg_tags, run_name, lg_replace=[]):
lg = ""
for lgt in lg_tags:
res = ".*?($|,)" if ',' not in lgt and '$' not in lgt else ''
mg = re.search(lgt + res, run_name)
if mg:
lg += mg.group(0)
lg = lg.replace('_,', ',')
lg = lg.strip(',')
for a, b in lg_replace:
lg = lg.replace(a, b)
return lg
def plot_tag(data, plot_f, run_names, tag_name, lg_tags, ylim=None, color0=0,
ncolor=None, lg_replace=[], no_title=False):
xlabel = {}
ylabel = {'Tacc': 'Training Accuracy (%)', 'Terror': 'Training Error (%)',
'train/accuracy': 'Training Accuracy (%)',
'Vacc': 'Test Accuracy (%)', 'Verror': 'Test Error (%)',
'valid/accuracy': 'Test Accuracy (%)',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss', 'Vloss': 'Loss', 'lr': 'Learning rate',
'grad_bias': 'Gradient Diff norm',
'est_var': 'Mean variance',
'est_snr': 'Mean SNR',
'nb_error': 'NB Error',
'est_nvar': 'Mean Normalized Variance'}
titles = {'Tacc': 'Training Accuracy', 'Terror': 'Training Error',
'train/accuracy': 'Training Accuracy',
'Vacc': 'Test Accuracy', 'Verror': 'Test Error',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss on full training set', 'lr': 'Learning rate',
'Vloss': 'Loss on validation set',
'grad_bias': 'Optimization Step Bias',
'nb_error': 'Norm-based Variance Error',
'est_var': 'Optimization Step Variance (w/o learning rate)',
'est_snr': 'Optimization Step SNR',
'est_nvar': 'Optimization Step Normalized Variance (w/o lr)',
}
yscale_log = ['Tloss', 'Vloss', 'est_var'] # , 'est_var'
yscale_base = []
# yscale_sci = ['est_bias', 'est_var']
plot_fs = {'Tacc': plot_f, 'Vacc': plot_f,
'Terror': plot_f, 'Verror': plot_f,
'Tloss': plot_f, 'Vloss': plot_f,
}
for k in list(ylabel.keys()):
if k not in xlabel:
xlabel[k] = 'Training Iteration'
if k not in plot_fs:
plot_fs[k] = plot_f
if k not in plot_fs:
plot_fs[k] = plt.plot
if not isinstance(data, list):
data = [data]
run_names = [run_names]
color = ['blue', 'orangered', 'limegreen', 'darkkhaki', 'cyan', 'grey']
color = color[:ncolor]
style = ['-', '--', ':', '-.']
# plt.rcParams.update({'font.size': 12})
plt.grid(linewidth=1)
legends = []
for i in range(len(data)):
if tag_name not in data[i]:
continue
legends += [get_legend(lg_tags, run_names[i], lg_replace)]
plot_fs[tag_name](
data[i][tag_name][0], data[i][tag_name][1],
linestyle=style[(color0 + i) // len(color)],
color=color[(color0 + i) % len(color)], linewidth=2)
if not no_title:
plt.title(titles[tag_name])
if tag_name in yscale_log:
ax = plt.gca()
if tag_name in yscale_base:
ax.set_yscale('log', basey=np.e)
ax.yaxis.set_major_formatter(mtick.FuncFormatter(ticks))
else:
ax.set_yscale('log')
else:
ax = plt.gca()
ax.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
if ylim is not None:
plt.ylim(ylim)
# plt.xlim([0, 25000])
plt.legend(legends, bbox_to_anchor=(1.1, 1.05))
plt.xlabel(xlabel[tag_name])
plt.ylabel(ylabel[tag_name])
def ticks(y, pos):
return r'$e^{{{:.0f}}}$'.format(np.log(y))
def plot_runs_and_tags(get_data_f, plot_f, logdir, patterns, tag_names,
fig_name, lg_tags, ylim, batch_size=None, sep_h=True,
ncolor=None, save_single=False, lg_replace=[],
no_title=False):
run_names = get_run_names_events(logdir, patterns)
data = get_data_f(logdir, run_names, tag_names, batch_size)
if len(data) == 0:
return data, run_names
num = len(tag_names)
height = (num + 1) // 2
width = 2 if num > 1 else 1
if not save_single:
fig = plt.figure(figsize=(7 * width, 4 * height))
fig.subplots(height, width)
else:
plt.figure(figsize=(7, 4))
plt.tight_layout(pad=1., w_pad=3., h_pad=3.0)
fi = 1
if save_single:
fig_dir = fig_name[:fig_name.rfind('.')]
try:
os.makedirs(fig_dir)
except os.error:
pass
for i in range(len(tag_names)):
yl = ylim[i]
if not isinstance(yl, list) and yl is not None:
yl = ylim
if not save_single:
plt.subplot(height, width, fi)
plot_tag(data, plot_f, list(run_names), tag_names[i], lg_tags, yl,
ncolor=ncolor, lg_replace=lg_replace, no_title=no_title)
if save_single:
plt.savefig('%s/%s.pdf' % (fig_dir, tag_names[i]),
dpi=100, bbox_inches='tight')
plt.figure(figsize=(7, 4))
fi += 1
plt.savefig(fig_name, dpi=100, bbox_inches='tight')
return data, run_names
| 8,772 | 36.016878 | 104 |
py
|
learning-to-quantize
|
learning-to-quantize-master/__init__.py
| 0 | 0 | 0 |
py
|
|
learning-to-quantize
|
learning-to-quantize-master/models/cifar10_wresnet2.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
# return self.fc(out)
# return F.log_softmax(out, dim=-1)
return x
| 3,794 | 42.62069 | 116 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/logreg.py
|
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, dim, num_class):
super(Linear, self).__init__()
self.linear = nn.Linear(dim, num_class)
def forward(self, x):
x = self.linear(x)
return F.log_softmax(x, dim=-1)
class TwoLinear(nn.Module):
def __init__(self, dim, num_class):
super(TwoLinear, self).__init__()
self.linear1 = nn.Linear(dim, dim)
self.linear2 = nn.Linear(dim, num_class)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return F.log_softmax(x, dim=-1)
| 637 | 24.52 | 48 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/linreg.py
|
import torch.nn as nn
# import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, dim, num_class):
super(Linear, self).__init__()
self.linear = nn.Linear(dim, num_class)
def forward(self, x):
x = self.linear(x)
return x
class TwoLinear(nn.Module):
def __init__(self, dim, num_class):
super(TwoLinear, self).__init__()
self.linear1 = nn.Linear(dim, dim)
self.linear2 = nn.Linear(dim, num_class)
def forward(self, x):
# x = F.relu(self.linear1(x))
x = self.linear1(x)
x = self.linear2(x)
return x
| 623 | 23 | 48 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/loss.py
|
import torch.nn.functional as F
def nll_loss(model, data, reduction='mean', weights=1):
data, target = data[0].cuda(), data[1].cuda()
model.zero_grad()
output = model(data)
loss = F.nll_loss(output, target, reduction=reduction)*weights
return loss
| 270 | 26.1 | 66 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/cifar10_wresnet.py
|
# https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
# return self.fc(out)
return F.log_softmax(out, dim=-1)
| 3,850 | 43.264368 | 116 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/cifar10.py
|
# https://github.com/akamaster/pytorch_resnet_cifar10
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
__all__ = ['ResNet', 'resnet8', 'resnet20', 'resnet32',
'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
if hasattr(init, 'kaiming_normal_'):
init.kaiming_normal_(m.weight)
else:
init.kaiming_normal(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_class=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_class)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return F.log_softmax(out, dim=-1)
def resnet8(num_class=10):
return ResNet(BasicBlock, [1, 1, 1], num_class=num_class)
def resnet20(num_class=10):
return ResNet(BasicBlock, [3, 3, 3], num_class=num_class)
def resnet32(num_class=10):
return ResNet(BasicBlock, [5, 5, 5], num_class=num_class)
def resnet44(num_class=10):
return ResNet(BasicBlock, [7, 7, 7], num_class=num_class)
def resnet56(num_class=10):
return ResNet(BasicBlock, [9, 9, 9], num_class=num_class)
def resnet110(num_class=10):
return ResNet(BasicBlock, [18, 18, 18], num_class=num_class)
def resnet1202(num_class=10):
return ResNet(BasicBlock, [200, 200, 200], num_class=num_class)
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(
lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters()))))
class Convnet(nn.Module):
def __init__(self, dropout=True, num_class=10):
"""
2conv + 2fc + dropout, from adam's paper
similar to mnist's convnet
100 epochs lr update at 50
"""
super(Convnet, self).__init__()
self.dropout = dropout
# self.input_drop = nn.Dropout2d(p=0.2)
self.conv1 = nn.Conv2d(3, 64, kernel_size=5)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5)
# self.conv2 = nn.Conv2d(64, 64, kernel_size=5)
# self.conv3 = nn.Conv2d(64, 128, kernel_size=5)
self.fc1 = nn.Linear(128*5*5, 1000)
self.fc2 = nn.Linear(1000, num_class)
def forward(self, x):
if self.dropout:
x = F.dropout2d(x, training=self.training, p=0.2)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
# x = F.relu(F.max_pool2d(self.conv3(x), 3))
x = x.view(-1, 128*5*5)
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class MLP(nn.Module):
def __init__(self, dropout=True, num_class=10):
"""
mnist MLP
"""
super(MLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(3*32*32, 1024)
self.fc2 = nn.Linear(1024, 1024)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(1024, num_class)
def forward(self, x):
x = x.view(-1, 3*32*32)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 7,687 | 31.033333 | 78 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/__init__.py
|
import torch
import torch.nn
import models.mnist
import models.cifar10
import models.logreg
import models.imagenet
import models.cifar10_wresnet
import models.loss
def init_model(opt):
if opt.dataset == 'mnist':
if opt.arch == 'cnn':
model = models.mnist.Convnet(not opt.nodropout)
elif opt.arch == 'bigcnn':
model = models.mnist.BigConvnet(not opt.nodropout)
elif opt.arch == 'mlp':
model = models.mnist.MLP(not opt.nodropout)
elif opt.arch == 'smlp':
model = models.mnist.SmallMLP(not opt.nodropout)
elif opt.arch == 'ssmlp':
model = models.mnist.SuperSmallMLP(not opt.nodropout)
elif (opt.dataset == 'cifar10' or opt.dataset == 'svhn'
or opt.dataset == 'cifar100'):
if opt.arch == 'cnn':
model = models.cifar10.Convnet(num_class=opt.num_class)
elif opt.arch == 'mlp':
model = models.cifar10.MLP(num_class=opt.num_class)
elif opt.arch.startswith('wrn'):
depth, widen_factor = map(int, opt.arch[3:].split('-'))
model = models.cifar10_wresnet.WideResNet(
depth, opt.num_class, widen_factor, 0.3)
else:
model = models.cifar10.__dict__[opt.arch](
num_class=opt.num_class)
model = torch.nn.DataParallel(model)
elif opt.dataset == 'imagenet':
model = models.imagenet.Model(opt.arch, opt.pretrained)
elif opt.dataset.startswith('imagenet'):
model = models.imagenet.Model(opt.arch, opt.pretrained, opt.num_class)
elif opt.dataset == 'logreg':
model = models.logreg.Linear(opt.dim, opt.num_class)
elif opt.dataset == '10class':
model = models.logreg.Linear(opt.dim, opt.num_class)
elif opt.dataset == '5class':
model = models.logreg.Linear(opt.dim, opt.num_class)
model.criterion = models.loss.nll_loss
if opt.cuda:
model.cuda()
return model
| 1,969 | 36.169811 | 78 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/imagenet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models
class Model(nn.Module):
def __init__(self, arch, pretrained=False, nclass=None):
super(Model, self).__init__()
model = torchvision.models.__dict__[arch](pretrained)
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
else:
model = torch.nn.DataParallel(model)
if nclass is not None and nclass != model.module.fc.out_features:
if arch.startswith('resnet'):
model.module.fc = nn.Linear(model.module.fc.in_features,
nclass)
else:
raise Exception('Not implemented.')
self.model = model
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=-1)
| 910 | 34.038462 | 73 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/clone_model.py
|
import torch
import torch.nn as nn
import copy
from torch.nn.parallel.parallel_apply import parallel_apply
class CloneModel(nn.Module):
def __init__(self, module, batch_size):
super(CloneModel, self).__init__()
self.replicas = [module]
self.batch_size = batch_size
for i in range(batch_size):
self.replicas += copy.deepcopy(module)
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.scatter(inputs, kwargs)
for i in range(1, self.batch_size):
self.replicas[i].load_state_dict(self.replicas[0].state_dict())
outputs = parallel_apply(self.replicas, inputs, kwargs)
return self.gather(outputs)
def scatter(self, inputs, kwargs):
x = inputs[0]
xs = torch.split(x, 1)
kwargs = None
return [xs], kwargs
def gather(self, outputs):
pass
| 887 | 28.6 | 75 |
py
|
learning-to-quantize
|
learning-to-quantize-master/models/mnist.py
|
import torch.nn as nn
import torch.nn.functional as F
class MNISTNet(nn.Module):
def __init__(self, dropout=True):
"""30 epochs no lr update
"""
super(MNISTNet, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = self.conv2(x)
if self.dropout:
x = self.conv2_drop(x)
x = F.relu(F.max_pool2d(x, 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class Convnet(nn.Module):
def __init__(self, dropout=True):
"""
2conv + 2fc + dropout, something to get ~.5% error.
something close to what maxout paper uses?
30 epochs no lr update
"""
super(Convnet, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 64, kernel_size=5)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(128*4*4, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = self.conv2(x)
if self.dropout:
x = self.conv2_drop(x)
x = F.relu(F.max_pool2d(x, 2))
x = x.view(-1, 128*4*4)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class BigConvnet(nn.Module):
def __init__(self, dropout=True):
"""
Bigger than Convnet, 1000 hidden dims
"""
super(BigConvnet, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 1000, kernel_size=5)
self.conv2 = nn.Conv2d(1000, 1000, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(1000*4*4, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = self.conv2(x)
if self.dropout:
x = self.conv2_drop(x)
x = F.relu(F.max_pool2d(x, 2))
x = x.view(-1, 1000*4*4)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class MLP(nn.Module):
def __init__(self, dropout=True):
"""
Dropout paper, table 2, row 4, 1.25% error.
http://www.cs.toronto.edu/~nitish/dropout/mnist.pbtxt
50 epochs, lr update 30
"""
super(MLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(28*28, 1024)
self.fc2 = nn.Linear(1024, 1024)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(1024, 10)
def forward(self, x):
x = x.view(-1, 28*28)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
class SmallMLP(nn.Module):
def __init__(self, dropout=True):
"""
Like MLP but smaller hidden dims
"""
super(SmallMLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(28*28, 50)
self.fc2 = nn.Linear(50, 50)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(50, 10)
def forward(self, x):
x = x.view(-1, 28*28)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
class SuperSmallMLP(nn.Module):
def __init__(self, dropout=True):
"""
Like MLP but smaller hidden dims
"""
super(SuperSmallMLP, self).__init__()
self.dropout = dropout
self.fc1 = nn.Linear(28*28, 20)
self.fc2 = nn.Linear(20, 20)
# self.fc3 = nn.Linear(1024, 1024)
self.fc4 = nn.Linear(20, 10)
def forward(self, x):
x = x.view(-1, 28*28)
if self.dropout:
x = F.dropout(x, training=self.training, p=0.2)
x = F.relu(self.fc1(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
# if self.dropout:
# x = F.dropout(x, training=self.training)
# x = F.relu(self.fc3(x))
if self.dropout:
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=-1)
| 5,494 | 30.58046 | 61 |
py
|
learning-to-quantize
|
learning-to-quantize-master/estim/optim.py
|
import logging
import torch
import utils
from data import get_minvar_loader
from log_utils import LogCollector
from estim.gvar import MinVarianceGradient
class OptimizerFactory(object):
def __init__(self, model, train_loader, tb_logger, opt):
self.model = model
self.opt = opt
self.niters = 0
self.optimizer = None
self.epoch = 0
self.logger = LogCollector(opt)
self.param_groups = None
self.gest_used = False
minvar_loader = get_minvar_loader(train_loader, opt)
self.gvar = MinVarianceGradient(
model, minvar_loader, opt, tb_logger)
self.reset()
def reset(self):
model = self.model
opt = self.opt
if opt.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(),
lr=opt.lr, momentum=opt.momentum,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov)
elif opt.optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=opt.lr,
weight_decay=opt.weight_decay)
self.optimizer = optimizer
if self.param_groups is not None:
self.optimizer.param_groups = self.param_groups
else:
self.param_groups = self.optimizer.param_groups
def step(self, profiler):
gvar = self.gvar
opt = self.opt
model = self.model
self.optimizer.zero_grad()
# Frequent snaps
inits = list(map(int, opt.g_osnap_iter.split(',')[0:2]))
every = int(opt.g_osnap_iter.split(',')[-1])
if (((self.niters - opt.gvar_start) % every == 0 or self.niters in inits)
and self.niters >= opt.gvar_start):
print(self.niters)
if opt.g_estim == 'nuq' and opt.nuq_method != 'none':
stats = gvar.gest.snap_online_mean(model)
if opt.nuq_parallel == 'ngpu':
for qdq in gvar.gest.qdq:
qdq.set_mean_variance(stats)
else:
gvar.gest.qdq.set_mean_variance(stats)
if opt.nuq_method == 'amq' or opt.nuq_method == 'alq' or opt.nuq_method == 'alq_nb' or opt.nuq_method == 'amq_nb':
if opt.nuq_parallel == 'ngpu':
for qdq in gvar.gest.qdq:
qdq.update_levels()
else:
gvar.gest.qdq.update_levels()
pg_used = gvar.gest_used
loss = gvar.grad(self.niters)
if gvar.gest_used != pg_used:
logging.info('Optimizer reset.')
self.gest_used = gvar.gest_used
utils.adjust_lr(self, opt)
self.reset()
self.optimizer.step()
profiler.toc('optim')
profiler.end()
return loss
| 2,964 | 33.476744 | 126 |
py
|
learning-to-quantize
|
learning-to-quantize-master/estim/sgd.py
|
import torch
import torch.nn
import torch.multiprocessing
from .gestim import GradientEstimator
class SGDEstimator(GradientEstimator):
def __init__(self, *args, **kwargs):
super(SGDEstimator, self).__init__(*args, **kwargs)
self.init_data_iter()
def grad(self, model, in_place=False):
data = next(self.data_iter)
loss = model.criterion(model, data)
if in_place:
loss.backward()
return loss
g = torch.autograd.grad(loss, model.parameters())
return g
| 543 | 22.652174 | 59 |
py
|
learning-to-quantize
|
learning-to-quantize-master/estim/gvar.py
|
import torch
import torch.nn
import torch.multiprocessing
import numpy as np
from estim.sgd import SGDEstimator
from estim.nuq import NUQEstimator
#from estim.nuq import NUQEstimatorSingleGPUParallel
from estim.nuq import NUQEstimatorMultiGPUParallel
class MinVarianceGradient(object):
def __init__(self, model, data_loader, opt, tb_logger):
self.model = model
sgd = SGDEstimator(data_loader, opt, tb_logger)
if opt.g_estim == 'sgd':
gest = SGDEstimator(data_loader, opt, tb_logger)
elif opt.g_estim == 'nuq':
if opt.nuq_parallel == 'no':
gest = NUQEstimator(data_loader, opt, tb_logger)
# elif opt.nuq_parallel == 'gpu1':
# gest = NUQEstimatorSingleGPUParallel(
# data_loader, opt, tb_logger)
else:
gest = NUQEstimatorMultiGPUParallel(
data_loader, opt, tb_logger)
self.sgd = sgd
self.gest = gest
self.opt = opt
self.tb_logger = tb_logger
self.gest_used = False
self.Esgd = 0
self.last_log_iter = 0
self.opt = opt
def is_log_iter(self, niters):
opt = self.opt
if (niters-self.last_log_iter >= opt.gvar_log_iter
and niters >= opt.gvar_start):
self.last_log_iter = niters
return True
return False
def create_histogram(self, norms, buckets):
keys = norms.keys()
bucket_norms = {}
def find_bucket(x):
for i in range(len(buckets) - 1):
if x >= buckets[i] and x < buckets[i + 1]:
return i
return len(buckets) - 1
for key in keys:
bucket = find_bucket(key)
if bucket not in bucket_norms.keys():
bucket_norms[bucket] = []
bucket_norms[bucket].append(norms[key])
variance = []
for i in range(len(buckets)):
if i not in bucket_norms.keys():
bucket_norms[i] = []
variance.append(0)
else:
variance.append(torch.var(torch.stack(bucket_norms[i])))
return variance
def log_var(self, model, niters):
tb_logger = self.tb_logger
gviter = self.opt.gvar_estim_iter
Ege, var_e, snr_e, nv_e = self.gest.get_Ege_var(model, gviter)
Esgd, var_s, snr_s, nv_s = self.sgd.get_Ege_var(model, gviter)
if self.opt.g_estim == 'sgd':
parameters = torch.cat([layer.view(-1)
for layer in self.sgd.grad(model)])
tb_logger.log_histogram('sgd_dist', parameters, step=niters)
norms = self.sgd.get_norm_distribution(
model, gviter, self.opt.nuq_bucket_size)
tb_logger.log_histogram(
'norm_dist', list(norms.keys()), step=niters)
variance = self.create_histogram(norms, [0, 0.01, 0.05, 0.1, 0.2])
for index, var in enumerate(variance):
tb_logger.log_value('var/' + str(index), var, step=niters)
variances, means, total_mean, total_variance, total_variance_normalized, total_mean_normalized, total_mean_unconcatenated, total_variance_unconcatenated = self.sgd.get_gradient_distribution(
model, gviter, self.opt.nuq_bucket_size)
bias = torch.mean(torch.cat(
[(ee-gg).abs().flatten() for ee, gg in zip(Ege, Esgd)]))
if self.opt.g_estim == 'nuq':
if self.opt.nuq_method != 'none':
tb_logger.log_value('bits', float(
self.gest.qdq.bits), step=niters)
tb_logger.log_value('levels', float(
len(self.gest.qdq.levels)), step=niters)
for index, level in enumerate(self.gest.qdq.levels):
tb_logger.log_value(
'levels/' + str(index), float(level), step=niters)
tb_logger.log_value('includes_zero', float(
1 if 0 in self.gest.qdq.levels else 0), step=niters)
number_of_positive_levels = 0
number_of_negative_levels = 0
for level in self.gest.qdq.levels:
if level > 0:
number_of_positive_levels += 1
elif level < 0:
number_of_negative_levels += 1
tb_logger.log_value('positive_levels', float(
number_of_positive_levels), step=niters)
tb_logger.log_value('negative_levels', float(
number_of_negative_levels), step=niters)
if self.gest.qdq.error is not None:
tb_logger.log_value(
'nb_error', self.gest.qdq.error, step=niters)
if self.gest.qdq.grad_dist_nl is not None:
tb_logger.log_value(
'stats/mean', self.gest.qdq.grad_dist_nl.mean, step=niters)
tb_logger.log_value(
'stats/sigma', self.gest.qdq.grad_dist_nl.sigma, step=niters)
if self.opt.nuq_method == 'amq' or self.opt.nuq_method == 'amq_nb':
tb_logger.log_value('multiplier', float(
self.gest.qdq.multiplier), step=niters)
print('est_var is', var_e)
tb_logger.log_value('grad_bias', float(bias), step=niters)
tb_logger.log_value('est_var', float(var_e), step=niters)
tb_logger.log_value('sgd_var', float(var_s), step=niters)
tb_logger.log_value('est_snr', float(snr_e), step=niters)
tb_logger.log_value('sgd_snr', float(snr_s), step=niters)
tb_logger.log_value('est_nvar', float(nv_e), step=niters)
tb_logger.log_value('sgd_nvar', float(nv_s), step=niters)
tb_logger.log_value('tot_var_norm', float(
total_variance_normalized), step=niters)
tb_logger.log_value('tot_var', float(total_variance), step=niters)
tb_logger.log_value('tot_mean_norm', float(
total_mean_normalized), step=niters)
tb_logger.log_value('tot_mean', float(total_mean), step=niters)
tb_logger.log_value('tot_var_norm_layer', float(
total_variance_unconcatenated), step=niters)
tb_logger.log_value('tot_mean_norm_layer', float(), step=niters)
sgd_x, est_x = ('', '[X]') if self.gest_used else ('[X]', '')
return ('G Bias: %.8f\t'
'%sSGD Var: %.8f\t %sEst Var: %.8f\t'
'SGD N-Var: %.8f\t Est N-Var: %.8f\t'
% (43, sgd_x, var_s, est_x, var_e, nv_s, nv_e))
def grad(self, niters):
model = self.model
model.train()
use_sgd = self.use_sgd(niters)
if use_sgd:
self.gest_used = False
return self.sgd.grad(model, in_place=True)
self.gest_used = True
return self.gest.grad(model, in_place=True)
def use_sgd(self, niters):
return not self.opt.g_optim or niters < self.opt.g_optim_start
def state_dict(self):
return self.gest.state_dict()
def load_state_dict(self, state):
self.gest.load_state_dict(state)
| 7,202 | 41.875 | 198 |
py
|
learning-to-quantize
|
learning-to-quantize-master/estim/nuq.py
|
import torch
import torch.nn
import torch.multiprocessing
import numpy as np
import copy
import math
from args import opt_to_nuq_kwargs
from .gestim import GradientEstimator
from nuq.quantize import QuantizeMultiBucket
class NUQEstimator(GradientEstimator):
def __init__(self, *args, **kwargs):
super(NUQEstimator, self).__init__(*args, **kwargs)
self.init_data_iter()
self.qdq = QuantizeMultiBucket(**opt_to_nuq_kwargs(self.opt))
self.ngpu = self.opt.nuq_ngpu
self.acc_grad = None
def state_dict(self):
return {
'qdq': self.qdq.state_dict()
}
def load_state_dict(self, state):
print(state)
self.qdq.load_state_dict(state['qdq'])
def get_norm_distribution(self, model, gviter, bucket_size=1024):
norms = {}
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
flattened_parameters = self._flatten(
minibatch_gradient)
num_bucket = int(np.ceil(len(flattened_parameters) / bucket_size))
for bucket_i in range(num_bucket):
start = bucket_i * bucket_size
end = min((bucket_i + 1) * bucket_size,
len(flattened_parameters))
x_bucket = flattened_parameters[start:end].clone()
if bucket_i not in norms.keys():
norms[bucket_i] = []
norms[bucket_i].append(x_bucket)
return norms
def grad(self, model_new, in_place=False):
model = model_new
ig_sm_bkts = self.opt.nuq_ig_sm_bkts
if self.acc_grad is None:
self.acc_grad = []
with torch.no_grad():
for p in model.parameters():
self.acc_grad += [torch.zeros_like(p)]
else:
for a in self.acc_grad:
a.zero_()
for i in range(self.ngpu):
model.zero_grad()
data = next(self.data_iter)
loss = model.criterion(model, data)
grad = torch.autograd.grad(loss, model.parameters())
layers = len(list(model.parameters()))
per_layer = not self.opt.nuq_layer
with torch.no_grad():
if not per_layer:
flatt_grad = self._flatten(grad)
flatt_grad_q = self.qdq.quantize(flatt_grad, ig_sm_bkts)
grad_like_q = self.unflatten(flatt_grad_q, grad)
for g, a in zip(grad_like_q, self.acc_grad):
a += g / self.ngpu
else:
for g, a in zip(grad, self.acc_grad):
a += self.qdq.quantize(g, ig_sm_bkts) / self.ngpu
if in_place:
for p, a in zip(model.parameters(), self.acc_grad):
if p.grad is None:
p.grad = a.clone()
else:
p.grad.copy_(a)
return loss
return self.acc_grad
class NUQEstimatorMultiGPUParallel(GradientEstimator):
def __init__(self, *args, **kwargs):
super(NUQEstimatorMultiGPUParallel, self).__init__(*args, **kwargs)
self.init_data_iter()
nuq_kwargs = opt_to_nuq_kwargs(self.opt)
self.ngpu = self.opt.nuq_ngpu
self.acc_grad = None
self.models = None
self.qdq = []
for i in range(self.ngpu):
with torch.cuda.device(i):
self.qdq += [QuantizeMultiBucket(**nuq_kwargs)]
def grad(self, model_new, in_place=False):
if self.models is None:
self.models = [model_new]
for i in range(1, self.ngpu):
with torch.cuda.device(i):
self.models += [copy.deepcopy(model_new)]
self.models[-1] = self.models[-1].cuda()
else:
# sync weights
for i in range(1, self.ngpu):
for p0, pi in zip(self.models[0].parameters(),
self.models[i].parameters()):
with torch.no_grad():
pi.copy_(p0)
models = self.models
# forward-backward prop
loss = []
for i in range(self.ngpu):
models[i].zero_grad() # criterion does it
data = next(self.data_iter)
with torch.cuda.device(i):
loss += [models[i].criterion(models[i], data)]
loss[i].backward()
loss = loss[-1]
layers = len(list(models[0].parameters()))
# quantize all grads
for i in range(self.ngpu):
with torch.no_grad():
with torch.cuda.device(i):
torch.cuda.synchronize()
if self.opt.nuq_layer == 1:
flattened_array = self._flatten(
models[i].parameters())
gradient_quantized = self.qdq[i].quantize(
flattened_array, layers) / self.ngpu
unflattened_array = self.unflatten(
gradient_quantized, models[i].parameters())
for p, q in zip(models[i].parameters(),
unflattened_array):
p.grad.copy_(q)
else:
for p in models[i].parameters():
p.grad.copy_(self.qdq[i].quantize(
p.grad, layers) / self.ngpu)
# aggregate grads into gpu0
for i in range(1, self.ngpu):
for p0, pi in zip(models[0].parameters(), models[i].parameters()):
p0.grad.add_(pi.grad.to('cuda:0'))
if in_place:
return loss
acc_grad = []
with torch.no_grad():
for p in models[0].parameters():
acc_grad += [p.grad.clone()]
return acc_grad
| 5,978 | 35.018072 | 78 |
py
|
learning-to-quantize
|
learning-to-quantize-master/estim/dist.py
|
from scipy.stats import truncnorm
from scipy import integrate
import numpy as np
import bisect
import matplotlib.pyplot as plt
class Distribution:
def __init__(self, begin=-1, end=+1, nbins=1000, bin_type='linear'):
self.begin = begin
self.end = end
self.bin_edges = bin_edges = self._get_bin_edges(nbins, bin_type)
self.bin_centers = (bin_edges[1:]+bin_edges[:-1])/2
self.bin_width = (bin_edges[1:]-bin_edges[:-1])
def _get_bin_edges(self, nbins, bin_type):
if bin_type == 'linear':
bin_edges = np.linspace(self.begin, self.end, nbins)
elif bin_type == 'log':
bin_edges = np.logspace(self.begin, self.end, nbins)/10
bin_edges = np.concatenate((-np.flip(bin_edges), [0], bin_edges))
# TODO: assumes symmetric around 0
return bin_edges
def est_var_adjacent_levels(self, left_level, right_level):
# From Eq 6 in the paper
# int_a^b sigma^2(r) f(r) dr
# = sum_{ind(e_l)}^{ind(e_r)} f(r)
# int_{max(a,e_l)}^{min(e_r,b)} sigma^2(r) dr
# TODO: test this function
bin_edges = self.bin_edges
# TODO: test these bisects
# left_edge = bisect.bisect_right(bin_edges, left_level)-1
# right_edge = bisect.bisect_left(bin_edges, right_level)
var = 0
c = left_level
d = right_level
# for index in range(left_edge, right_edge):
# a = max(bin_edges[index], left_level)
# b = min(bin_edges[index+1], right_level)
# # int_a^b (x - c) (d - x) dx = 1/6 (a - b) (2 a^2 + 2 a b
# # - 3 a (c + d) + 2 b^2 - 3 b (c + d) + 6 c d)
# # where c is left_level and d is right_level
# center = (a+b)/2
# var += self.pdf(center) * 1/6 * (a-b) * (
# 2*a**2+2*a*b-3*a*(c+d)+2*b**2-3*b*(c+d)+6*c*d)
def f(x):
return (x - c) * (d - x) * self.pdf(x)
# print('adj_lev', left_level, right_level)
intg = integrate.quad(f, c, d)[0]
# x = np.linspace(c, d, 10000)
# y = [f(val_x) for val_x in x]
# plt.plot(x, y)
# plt.show()
# assert np.abs(intg - var) < 1e-5
return intg
def estimate_variance_adj_inv(self, left_level, right_level):
# calculate Eq 8 of the paper
# ppf(cdf(d) - int_c^d (r - c) * pdf(r) dr / (d - c))
# integration is equal to
# = sum_{ind(e_l)}^{ind(e_r)} f(r)
# int_{max(a,e_l)}^{min(e_r,b)} (r-c) dr
# where c is left_level and d is right_level
bin_edges = self.bin_edges
# left_edge = bisect.bisect_right(bin_edges, left_level)-1
# right_edge = bisect.bisect_left(bin_edges, right_level)
intg = 0
c = left_level
d = right_level
# for index in range(left_edge, right_edge):
# a = max(bin_edges[index], left_level)
# b = min(bin_edges[index+1], right_level)
# # int_a^b (r - c) dr = -1/2 (a - b) (a + b - 2 c)
# # where c is left_level and d is right_level
# center = (a+b)/2
# intg += self.pdf(center) * -1/2 * (a - b) * (a + b - 2 * c)
def f(x):
return (x - c) * self.pdf(x)
# print('adj_lev_inv', left_level, right_level)
# x = np.linspace(c, d, 10000)
# y = [f(val_x) for val_x in x]
# plt.plot(x, y)
# plt.show()
intg_by_intg = integrate.quad(f, c, d)[0]
# err = np.abs(intg - intg_by_intg)
#assert err < 1e-4, \
# 'Integration does not have enough accuracy left level %s, right level %s, error is %s' \
# % (left_level, right_level, err)
inv_arg = self.cdf(right_level) - intg_by_intg / (d-c)
return self.ppf(inv_arg)
def estimate_variance(self, levels):
# TODO: test this function
var = 0
for index, left_level in enumerate(levels[:-1]):
right_level = levels[index+1]
var += self.est_var_adjacent_levels(
left_level, right_level)
return var
def estimate_variance_int(self, levels, dist=None):
# variance estimate calculation by integration
# optional dist parameter to provide your own distribution function
var = 0.0
dist = self if dist is None else dist
for index, _ in enumerate(levels[:-1]):
def f(x):
pdf = dist.pdf(x)
index_l = bisect.bisect_left(levels, x) - 1
variance = (x - levels[index_l]) * (levels[index_l + 1] - x)
return variance * pdf
var += integrate.quad(lambda x: f(x),
levels[index], levels[index + 1])[0]
return var
def pdf(self, x):
raise NotImplementedError('PDF has not been implemented.')
def cdf(self, x):
raise NotImplementedError('CDF has not been implemented.')
class TruncNorm(Distribution):
def __init__(self, mean, sigma, begin=-1, end=+1, nbins=100,
bin_type='linear'):
super().__init__(begin, end, nbins, bin_type)
self.mean = mean
self.sigma = sigma
self.begin = begin
self.end = end
self.nbins = nbins
self.a = (begin - self.mean) / self.sigma
self.b = (end - self.mean) / self.sigma
def cdf(self, x):
a = self.a
b = self.b
mu = self.mean
sigma = self.sigma
return truncnorm.cdf(x, a, b, loc=mu, scale=sigma)
def pdf(self, x):
a = self.a
b = self.b
mu = self.mean
sigma = self.sigma
return truncnorm.pdf(x, a, b, loc=mu, scale=sigma)
def ppf(self, x):
a = self.a
b = self.b
mu = self.mean
sigma = self.sigma
return truncnorm.ppf(x, a, b, loc=mu, scale=sigma)
class CondNormalTrunc(Distribution):
def __init__(self, means, sigmas, norms, begin=-1, end=1,
nbins=1000, bin_type='linear'):
super().__init__(begin, end, nbins, bin_type)
self.means = np.asarray(means)
self.sigmas = np.asarray(sigmas)
self.norms = np.asarray(norms)
self.end = end
self.begin = begin
self.total_norm = np.sum(self.norms)
self.a = (begin - self.means) / self.sigmas
self.b = (end - self.means) / self.sigmas
self.coeff = self.norms / self.total_norm
def cdf(self, x):
cdfs = truncnorm.cdf(
x, self.a, self.b, loc=self.means, scale=self.sigmas)
return np.sum(np.dot(cdfs, self.coeff))
def pdf(self, x):
pdfs = truncnorm.pdf(
x, self.a, self.b, loc=self.means, scale=self.sigmas)
return np.sum(np.dot(pdfs, self.coeff))
class CondNormalTruncHist(Distribution):
def __init__(self, means, sigmas, norms, begin=-1, end=+1, nbins=100,
bin_type='linear'):
super().__init__(begin, end, nbins, bin_type)
self.means = np.asarray(means)
self.sigmas = np.asarray(sigmas)
self.norms = np.asarray(norms)
self.begin = begin
self.end = end
self.nbins = nbins
self.total_norm = np.sum(self.norms)
self.a = (begin - self.means) / self.sigmas
self.b = (end - self.means) / self.sigmas
self.coeff = self.norms / self.total_norm
self.pdf_bin_sum = self._quantized_sum_pdf()
self.cdf_bin_sum = np.cumsum(self.pdf_bin_sum).clip(0, 1)
# self.ppf_bin_width = (self.cdf_bin_sum[1:]-self.cdf_bin_sum[:-1])
self.pdf_at_centers = self.pdf_bin_sum / self.bin_width
def _quantized_sum_pdf(self):
from scipy import stats
mu = self.means
sigma = self.sigmas
norms = self.norms
a_vals = self.a
b_vals = self.b
bin_edges = self.bin_edges
pdf_bin_sum = 0
for m, s, n, a_val, b_val in zip(mu, sigma, norms, a_vals, b_vals):
cdfa = stats.truncnorm.cdf(bin_edges[:-1], loc=m, scale=s,
a=a_val, b=b_val)
cdfb = stats.truncnorm.cdf(bin_edges[1:], loc=m, scale=s,
a=a_val, b=b_val)
pdfb = cdfb-cdfa
pdfb /= pdfb.sum()
pdf_bin_sum = n / self.total_norm * pdfb + pdf_bin_sum
pdf_bin_sum /= pdf_bin_sum.sum()
return pdf_bin_sum
def cdf(self, x):
index = bisect.bisect_right(self.bin_edges, x)-1
if index == len(self.bin_edges)-1:
# case: x=self.end
return 1.0
cdf_at_x = self.cdf_bin_sum[index-1] if index > 0 else 0
weight = (x-self.bin_edges[index])/self.bin_width[index]
cdf_at_x += weight*self.pdf_bin_sum[index]
return cdf_at_x
def pdf(self, x):
index = bisect.bisect_right(self.bin_edges, x)-1
if index == len(self.pdf_at_centers):
return 0.0
return self.pdf_at_centers[index]
def ppf(self, cdf_at_x):
# TODO: I need to test
index = bisect.bisect_right(self.cdf_bin_sum, cdf_at_x)-1
if index == len(self.cdf_bin_sum)-1:
# case: cdf_at_x = 1
return 1.0
# TODO: should we set to self.begin or self.a?
# special case: left edge
x = self.bin_edges[index] if index >= 0 else self.begin
ppf_bin_width = self.cdf_bin_sum[index+1]-self.cdf_bin_sum[index]
weight = (cdf_at_x-self.cdf_bin_sum[index])/ppf_bin_width
x += weight*self.bin_width[index]
return x
| 9,644 | 34.855019 | 101 |
py
|
learning-to-quantize
|
learning-to-quantize-master/estim/gestim.py
|
import torch
import torch.nn
import torch.multiprocessing
import numpy as np
import math
import random
import copy
import logging
from data import InfiniteLoader
class GradientEstimator(object):
def __init__(self, data_loader, opt, tb_logger=None, *args, **kwargs):
self.opt = opt
self.model = None
self.data_loader = data_loader
self.tb_logger = tb_logger
self.niters = 0
self.random_indices = None
def update_niters(self, niters):
self.niters = niters
def init_data_iter(self):
self.data_iter = iter(InfiniteLoader(self.data_loader))
self.estim_iter = iter(InfiniteLoader(self.data_loader))
def snap_batch(self, model):
pass
def update_sampler(self):
pass
def _calc_stats_buckets(self, buckets):
stats = {
'sigma': [],
'mean': []
}
i = 0
for bucket in buckets:
current_bk = torch.stack(buckets[bucket])
stats['mean'].append(torch.mean(current_bk).cpu().item())
stats['sigma'].append(torch.sqrt(torch.mean(
torch.var(current_bk, dim=0, unbiased=False))).cpu().item())
i += 1
return stats
def _get_raw_grad(self, model):
dt = self.data_iter
self.data_iter = self.estim_iter
model.zero_grad()
data = next(self.data_iter)
loss = model.criterion(model, data)
grad = torch.autograd.grad(loss, model.parameters())
self.data_iter = dt
return grad
def _get_grad_samples(self, model, num_of_samples):
grads = []
for i in range(num_of_samples):
grad = self._get_raw_grad(model)
copy_array = []
for layer in grad:
copy_array.append(layer.clone())
grads.append(copy_array)
return grads
def _get_stats_lb(self, grads):
# get stats layer based
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
sep_bias_grad = self.opt.sep_bias_grad
# total number of weights
nw = sum([w.numel() for w in grads[0]])
# total sum of gradients
tsum = torch.zeros(nw).cuda()
buckets = None
total_norm = None
for i, grad in enumerate(grads):
fl_norm_lb = self._flatt_and_normalize_lb(grad, bs, nocat=True)
if buckets is None:
buckets = [[] for j in range(len(fl_norm_lb))]
total_norm = [0.0 for j in range(len(fl_norm_lb))]
fl_norm = self._flatten_lb(grad, nocat=True)
tsum += self._flatten_lb(fl_norm_lb, nocat=False)
for j in range(len(fl_norm_lb)):
buckets[j].append(fl_norm_lb[j])
total_norm[j] += fl_norm[j].norm()
stats = self._calc_stats_buckets(buckets)
stats['norm'] = torch.tensor(total_norm)
return stats
def _get_stats_lb_sep(self, grads):
# get stats layer based
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
sep_bias_grad = self.opt.sep_bias_grad
buckets_bias = {}
total_norm_bias = {}
buckets_weights = {}
total_norm_weights = {}
samples = len(grads)
fl_norm_bias, fl_norm_weights = self._flatten_sep(grads[0])
fl_norm_lb_bias, fl_norm_lb_weights = \
self._flatt_and_normalize_lb_sep(grads[0], bs, nocat=True)
j = 0
for layer in fl_norm_lb_bias:
for bias in layer:
buckets_bias[j] = []
total_norm_bias[j] = 0.0
j += 1
j = 0
for layer in fl_norm_lb_weights:
for weights in layer:
buckets_weights[j] = []
total_norm_weights[j] = 0.0
j += 1
for i, grad in enumerate(grads):
fl_norm_lb_bias, fl_norm_lb_weights = \
self._flatt_and_normalize_lb_sep(grad, bs, nocat=True)
fl_norm_bias, fl_norm_weights = self._flatten_lb_sep(grad, bs)
j = 0
for layer in fl_norm_lb_bias:
for bias in layer:
buckets_bias[j].append(bias)
j += 1
j = 0
for layer in fl_norm_lb_weights:
for weight in layer:
buckets_weights[j].append(weight)
j += 1
j = 0
for layer in fl_norm_bias:
for bias in layer:
total_norm_bias[j] += bias.norm() / samples
j += 1
j = 0
for layer in fl_norm_weights:
for weight in layer:
total_norm_weights[j] += weight.norm() / samples
j += 1
stats_bias = self._calc_stats_buckets(buckets_bias)
stats_bias['norm'] = torch.tensor(list(total_norm_bias.values()))
stats_bias['norm'] = stats_bias['norm'].cpu().tolist()
stats_weights = self._calc_stats_buckets(buckets_weights)
stats_weights['norm'] = torch.tensor(list(total_norm_weights.values()))
stats_weights['norm'] = stats_weights['norm'].cpu().tolist()
stats = {
'bias': stats_bias,
'weights': stats_weights
}
return stats
def _bucketize(self, grad, bs, stats_nb):
ig_sm_bkts = self.opt.nuq_ig_sm_bkts
variance = 0
num_params = 0
tot_sum = 0
num_buckets = int(np.ceil(len(grad) / bs))
for bucket in range(num_buckets):
start = bucket * bs
end = min((bucket + 1) * bs, len(grad))
current_bk = grad[start:end]
norm = current_bk.norm()
current_bk = current_bk / norm
b_len = len(current_bk)
# TODO: REMOVE THIS LINE
if b_len != bs and ig_sm_bkts:
continue
num_params += b_len
var = torch.var(current_bk)
# update norm-less variance
variance += var * (b_len - 1)
tot_sum += torch.sum(current_bk)
stats_nb['norms'].append(norm)
stats_nb['sigmas'].append(torch.sqrt(var))
stats_nb['means'].append(torch.mean(current_bk))
return tot_sum, variance, num_params
def _get_stats_sep(self, grads):
# get stats for weights and bias separately
pass
def _get_stats_nl_lb(self, grads):
# get stats normless
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
samples = len(grads)
tsum = 0.0
tot_var = 0.0
num_params = len(self._flatt_and_normalize_lb(grads[0], bs))
for grad in grads:
params = self._flatt_and_normalize_lb(grad, bs)
tsum += self._flatten([torch.cat(layer)
for layer in params])
mean = tsum / samples
for grad in grads:
params = self._flatt_and_normalize_lb_sep(grad, bs)
tot_var += torch.sum((mean - self._flatten(
[torch.cat(layer) for layer in params])) ** 2)
tot_mean = tsum / num_params
tot_var /= (num_params * samples)
return {
'mean': tot_mean,
'var': tot_var
}
def _get_stats_nl_lb_sep(self, grads):
# get normless stats, bias and weights separated
bs = self.opt.nuq_bucket_size
nuq_layer = self.opt.nuq_layer
sep_bias_grad = self.opt.sep_bias_grad
samples = len(grads)
tsum_bias = 0.0
tot_var_bias = 0.0
tot_var_weights = 0.0
tsum_weights = 0.0
bias, weights = self._flatt_and_normalize_lb_sep(grads[0], bs)
num_bias = len(torch.cat(bias))
num_weights = len(torch.cat(weights))
for grad in grads:
bias, weights = self._flatt_and_normalize_lb_sep(grad, bs)
tsum_bias += torch.cat(bias)
tsum_weights += torch.cat(weights)
mean_bias = tsum_bias / samples
mean_weights = tsum_weights / samples
for grad in grads:
bias, weights = self._flatt_and_normalize_lb_sep(grad, bs)
tot_var_bias += torch.sum((mean_bias - torch.cat(bias)) ** 2)
tot_var_weights += torch.sum((mean_weights -
torch.cat(weights)) ** 2)
tot_mean_bias = torch.sum(mean_bias) / num_bias
tot_mean_weights = torch.sum(mean_weights) / num_weights
tot_var_weights /= (num_weights * samples)
tot_var_bias /= (num_bias * samples)
stats = {
'bias': {
'sigma': torch.sqrt(tot_var_bias).cpu().item(),
'mean': tot_mean_bias.cpu().item()
},
'weights': {
'sigma': torch.sqrt(tot_var_weights).cpu().item(),
'mean': tot_mean_weights.cpu().item()
}
}
return stats
def _get_stats(self, grads):
# get stats
pass
def snap_online(self, model):
num_of_samples = self.opt.nuq_number_of_samples
grads = self._get_grad_samples(model, num_of_samples)
lb = not self.opt.nuq_layer
sep = True if self.opt.sep_bias_grad == 1 else False
# TODO implement variations of lb and sep
stats = {
'nb': self._get_stats_lb_sep(grads),
'nl': self._get_stats_nl_lb_sep(grads)
}
return stats
def snap_online_mean(self, model):
stats_nb = {
'means': [],
'sigmas': [],
'norms': []
}
total_variance = 0.0
tot_sum = 0.0
num_of_samples = self.opt.nuq_number_of_samples
total_params = 0
bs = self.opt.nuq_bucket_size
lb = not self.opt.nuq_layer
ig_sm_bkts = self.opt.ig_sm_bkts
params = list(model.parameters())
for i in range(num_of_samples):
grad = self._get_raw_grad(model)
if lb:
flattened = self._flatten_lb(grad)
for i, layer in enumerate(flattened):
b_sum, b_var, b_params = self._bucketize(
layer, bs, stats_nb)
tot_sum += b_sum
total_variance += b_var
total_params += b_params
else:
flattened = self._flatten(grad)
b_sum, b_var, b_params = self._bucketize(
flattened, bs, stats_nb)
tot_sum += b_sum
total_variance += b_var
total_params += b_params
nw = sum([w.numel() for w in model.parameters()])
stats_nb['means'] = torch.stack(stats_nb['means']).cpu().tolist()
stats_nb['sigmas'] = torch.stack(stats_nb['sigmas']).cpu().tolist()
stats_nb['norms'] = torch.stack(stats_nb['norms']).cpu().tolist()
if len(stats_nb['means']) > self.opt.dist_num:
indexes = np.argsort(-np.asarray(stats_nb['norms']))[
:self.opt.dist_num]
stats_nb['means'] = np.array(stats_nb['means'])[indexes].tolist()
stats_nb['sigmas'] = np.array(stats_nb['sigmas'])[
indexes].tolist()
stats_nb['norms'] = np.array(stats_nb['norms'])[indexes].tolist()
stats = {
'nb': stats_nb,
'nl': {
'mean': (tot_sum / total_params).cpu().item(),
'sigma':
torch.sqrt(total_variance / total_params).cpu().item(),
}
}
return stats
def grad(self, model_new, in_place=False, data=None):
raise NotImplementedError('grad not implemented')
def _normalize(self, layer, bucket_size, nocat=False):
normalized = []
num_bucket = int(np.ceil(len(layer) / bucket_size))
for bucket_i in range(num_bucket):
start = bucket_i * bucket_size
end = min((bucket_i + 1) * bucket_size, len(layer))
x_bucket = layer[start:end].clone()
norm = x_bucket.norm()
normalized.append(x_bucket / (norm + 1e-7))
if not nocat:
return torch.cat(normalized)
else:
return normalized
def grad_estim(self, model):
# ensuring continuity of data seen in training
# TODO: make sure sub-classes never use any other data_iter, e.g. raw
dt = self.data_iter
self.data_iter = self.estim_iter
ret = self.grad(model)
self.data_iter = dt
return ret
def get_Ege_var(self, model, gviter):
# estimate grad mean and variance
Ege = [torch.zeros_like(g) for g in model.parameters()]
for i in range(gviter):
ge = self.grad_estim(model)
for e, g in zip(Ege, ge):
e += g
for e in Ege:
e /= gviter
nw = sum([w.numel() for w in model.parameters()])
var_e = 0
Es = [torch.zeros_like(g) for g in model.parameters()]
En = [torch.zeros_like(g) for g in model.parameters()]
for i in range(gviter):
ge = self.grad_estim(model)
v = sum([(gg-ee).pow(2).sum() for ee, gg in zip(Ege, ge)])
for s, e, g, n in zip(Es, Ege, ge, En):
s += g.pow(2)
n += (e-g).pow(2)
var_e += v/nw
var_e /= gviter
# Division by gviter cancels out in ss/nn
snr_e = sum(
[((ss+1e-10).log()-(nn+1e-10).log()).sum()
for ss, nn in zip(Es, En)])/nw
nv_e = sum([(nn/(ss+1e-7)).sum() for ss, nn in zip(Es, En)])/nw
return Ege, var_e, snr_e, nv_e
def _flatten_lb_sep(self, gradient, bs=None):
# flatten layer based and handle weights and bias separately
flatt_params = [], []
for layer in gradient:
if len(layer.size()) == 1:
if bs is None:
flatt_params[0].append(
torch.flatten(layer))
else:
buckets = []
flatt = torch.flatten(layer)
num_bucket = int(np.ceil(len(flatt) / bs))
for bucket_i in range(num_bucket):
start = bucket_i * bs
end = min((bucket_i + 1) * bs, len(flatt))
x_bucket = flatt[start:end].clone()
buckets.append(x_bucket)
flatt_params[0].append(
buckets)
else:
if bs is None:
flatt_params[1].append(
torch.flatten(layer))
else:
buckets = []
flatt = torch.flatten(layer)
num_bucket = int(np.ceil(len(flatt) / bs))
for bucket_i in range(num_bucket):
start = bucket_i * bs
end = min((bucket_i + 1) * bs, len(flatt))
x_bucket = flatt[start:end].clone()
buckets.append(x_bucket)
flatt_params[1].append(
buckets)
return flatt_params
def _flatten_lb(self, gradient):
# flatten layer based
flatt_params = []
for layer_parameters in gradient:
flatt_params.append(torch.flatten(layer_parameters))
return flatt_params
def _flatten_sep(self, gradient, bs=None):
# flatten weights and bias separately
flatt_params = [], []
for layer_parameters in gradient:
if len(layer_parameters.size()) == 1:
flatt_params[0].append(
torch.flatten(layer_parameters))
else:
flatt_params[1].append(torch.flatten(layer_parameters))
return torch.cat(flatt_params[0]), torch.cat(flatt_params[1])
def _flatten(self, gradient):
flatt_params = []
for layer_parameters in gradient:
flatt_params.append(torch.flatten(layer_parameters))
return torch.cat(flatt_params)
def unflatten(self, gradient, parameters, tensor=False):
shaped_gradient = []
begin = 0
for layer in parameters:
size = layer.view(-1).shape[0]
shaped_gradient.append(
gradient[begin:begin+size].view(layer.shape))
begin += size
if tensor:
return torch.stack(shaped_gradient)
else:
return shaped_gradient
def _flatt_and_normalize_lb_sep(self, gradient, bucket_size=1024,
nocat=False):
# flatten and normalize weight and bias separately
bs = bucket_size
# totally flat and layer-based layers
flatt_params_lb = self._flatten_lb_sep(gradient)
normalized_buckets_lb = [], []
for bias in flatt_params_lb[0]:
normalized_buckets_lb[0].append(
self._normalize(bias, bucket_size, nocat))
for weight in flatt_params_lb[1]:
normalized_buckets_lb[1].append(
self._normalize(weight, bucket_size, nocat))
return normalized_buckets_lb
def _flatt_and_normalize_lb(self, gradient, bucket_size=1024, nocat=False):
flatt_params_lb = self._flatten_lb(gradient)
normalized_buckets_lb = []
for layer in flatt_params_lb:
normalized_buckets_lb.append(
self._normalize(layer, bucket_size, nocat))
return normalized_buckets_lb
def _flatt_and_normalize(self, gradient, bucket_size=1024, nocat=False):
flatt_params = self._flatten(gradient)
return self._normalize(flatt_params, bucket_size, nocat)
def _flatt_and_normalize_sep(self, gradient,
bucket_size=1024, nocat=False):
flatt_params = self._flatten_sep(gradient)
return [self._normalize(flatt_params[0], bucket_size, nocat),
self._normalize(flatt_params[1], bucket_size, nocat)]
def get_gradient_distribution(self, model, gviter, bucket_size):
"""
gviter: Number of minibatches to apply on the model
model: Model to be evaluated
"""
bucket_size = self.opt.nuq_bucket_size
mean_estimates_normalized = self._flatt_and_normalize(
model.parameters(), bucket_size)
mean_estimates_unconcatenated = self._flatt_and_normalize_lb(
model.parameters(), bucket_size)
# estimate grad mean and variance
mean_estimates = [torch.zeros_like(g) for g in model.parameters()]
mean_estimates_unconcatenated = [torch.zeros_like(
g) for g in mean_estimates_unconcatenated]
mean_estimates_normalized = torch.zeros_like(mean_estimates_normalized)
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
minibatch_gradient_normalized = self._flatt_and_normalize(
minibatch_gradient, bucket_size)
minibatch_gradient_unconcatenated = self._flatt_and_normalize_lb(
minibatch_gradient, bucket_size)
for e, g in zip(mean_estimates, minibatch_gradient):
e += g
for e, g in zip(mean_estimates_unconcatenated, minibatch_gradient_unconcatenated):
e += g
mean_estimates_normalized += minibatch_gradient_normalized
# Calculate the mean
for e in mean_estimates:
e /= gviter
for e in mean_estimates_unconcatenated:
e /= gviter
mean_estimates_normalized /= gviter
# Number of Weights
number_of_weights = sum([layer.numel()
for layer in model.parameters()])
variance_estimates = [torch.zeros_like(g) for g in model.parameters()]
variance_estimates_unconcatenated = [
torch.zeros_like(g) for g in mean_estimates_unconcatenated]
variance_estimates_normalized = torch.zeros_like(
mean_estimates_normalized)
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
minibatch_gradient_normalized = self._flatt_and_normalize(
minibatch_gradient, bucket_size)
minibatch_gradient_unconcatenated = self._flatt_and_normalize_lb(
minibatch_gradient, bucket_size)
v = [(gg - ee).pow(2)
for ee, gg in zip(mean_estimates, minibatch_gradient)]
v_normalized = (mean_estimates_normalized -
minibatch_gradient_normalized).pow(2)
v_normalized_unconcatenated = [(gg - ee).pow(2) for ee, gg in zip(
mean_estimates_unconcatenated, minibatch_gradient_unconcatenated)]
for e, g in zip(variance_estimates, v):
e += g
for e, g in zip(variance_estimates_unconcatenated, v_normalized_unconcatenated):
e += g
variance_estimates_normalized += v_normalized
variance_estimates_normalized = variance_estimates_normalized / gviter
for e in variance_estimates_unconcatenated:
e /= gviter
variances = []
means = []
# random_indices = self.get_random_index(model, 4)
# for index in random_indices:
# variance_estimate_layer = variance_estimates[index[0]]
# mean_estimate_layer = mean_estimates[index[0]]
# for weight in index[1:]:
# variance_estimate_layer = variance_estimate_layer[weight]
# variance_estimate_layer.squeeze_()
# mean_estimate_layer = mean_estimate_layer[weight]
# mean_estimate_layer.squeeze_()
# variance = variance_estimate_layer / (gviter)
# variances.append(variance)
# means.append(mean_estimate_layer)
total_mean = torch.tensor(0, dtype=float)
for mean_estimate in mean_estimates:
total_mean += torch.sum(mean_estimate)
total_variance = torch.tensor(0, dtype=float)
for variance_estimate in variance_estimates:
total_variance += torch.sum(variance_estimate)
total_variance = total_variance / number_of_weights
total_mean = total_mean / number_of_weights
total_variance_normalized = torch.tensor(0, dtype=float)
total_variance_normalized = torch.sum(
variance_estimates_normalized) / number_of_weights
total_mean_normalized = torch.tensor(0, dtype=float)
total_mean_normalized = torch.sum(
mean_estimates_normalized) / number_of_weights
total_mean_unconcatenated = sum([torch.sum(
mean) / mean.numel() for mean in mean_estimates_unconcatenated]) / len(mean_estimates)
total_variance_unconcatenated = sum([torch.sum(variance) / variance.numel(
) for variance in variance_estimates_unconcatenated]) / len(mean_estimates)
return variances, means, total_mean, total_variance, total_variance_normalized, total_mean_normalized, total_mean_unconcatenated, total_variance_unconcatenated
def get_norm_distribution(self, model, gviter, bucket_size=1024):
norms = {}
for i in range(gviter):
minibatch_gradient = self.grad_estim(model)
flattened_parameters = self._flatten(
minibatch_gradient)
num_bucket = int(np.ceil(len(flattened_parameters) / bucket_size))
for bucket_i in range(num_bucket):
start = bucket_i * bucket_size
end = min((bucket_i + 1) * bucket_size,
len(flattened_parameters))
if (end == len(flattened_parameters)):
continue
x_bucket = flattened_parameters[start:end].clone()
norm = x_bucket.norm()
if norm.cpu() in norms.keys():
print('An error occured')
norms[norm.cpu()] = x_bucket
return norms
def state_dict(self):
return {}
def load_state_dict(self, state):
pass
def snap_model(self, model):
logging.info('Snap Model')
if self.model is None:
self.model = copy.deepcopy(model)
return
# update sum
for m, s in zip(model.parameters(), self.model.parameters()):
s.data.copy_(m.data)
| 24,608 | 34.105563 | 167 |
py
|
learning-to-quantize
|
learning-to-quantize-master/nuq/quantize.py
|
import numpy as np
import torch
from cuquant import QDQ
import math
from estim.dist import TruncNorm, CondNormalTrunc, CondNormalTruncHist
import time
from scipy.stats import truncnorm, norm
import scipy.integrate as integrate
EPS = 1e-7
def get_quantile_levels(bits, grad_dist):
"""quantile levels """
num_levels = 2 << bits - 1
cdf_points = np.linspace(0, 1, num=num_levels)
levels = [grad_dist.ppf(level) for level in cdf_points]
levels[0] = grad_dist.begin
levels[-1] = grad_dist.end
return levels
def get_ternary_levels():
return np.array([-1, 0, 1])
def get_uniform_levels(bits):
"""uniform (QSGD)"""
num_levels = 2 << bits - 1
levels_uni = np.linspace(-1, 1, num=num_levels)
return levels_uni
def get_uniform_levels(bits):
"""uniform (QSGD)"""
num_levels = 2 << bits - 1
levels_uni = np.linspace(-1, 1, num=num_levels)
return levels_uni
def get_exp_levels(bits, multiplier=0.5):
""" exponential (NUQSGD)
multiplier: is used to modify levels_exp based on the number of bits
"""
num_levels = 2 << bits - 1
levels = sum([[-multiplier**j for j in range(num_levels >> 1)],
[multiplier**j for j in reversed(range(num_levels >> 1))]],
[])
return np.asarray(levels)
def finite_diff_gradient_descent(f, begin, end, x0=None, niters=10, lr=1):
eps = (end-begin)/1000
if x0 is None:
x0 = (begin + end) / 2
x = x0
for i in range(niters):
df = (f(x+eps)-f(x-eps))/(2*eps)
x -= lr*df
return x
def bisection(begin, end, f):
x = (begin + end) / 2
if (np.abs(f(x) - 0) < 1e-7):
return x
both_negative = f(begin) < 0 and f(end) < 0
both_positive = f(begin) > 0 and f(end) > 0
if both_negative or both_positive:
print('Bisection failed')
x_neg_end_pos = f(x) < 0 and f(end) > 0
x_pos_end_neg = f(x) > 0 and f(end) < 0
if x_neg_end_pos or x_pos_end_neg:
return bisection(x, end, f)
return bisection(begin, x, f)
def amq_norm_based(initial_point, grad_dist, bits, lr=0.1, epochs=50):
mul = initial_point
s = 2 ** (bits - 1) - 1
all_mul = []
iter = 0
for epoch in range(epochs):
sum = 0.0
for norm, mean, sigma, coeff in zip(
grad_dist.norms,
grad_dist.means,
grad_dist.sigmas,
grad_dist.coeff):
dist_comp = TruncNorm(
mean, sigma, grad_dist.begin, grad_dist.end, grad_dist.nbins)
# from eq G.3 in Appendix
def arg1_1(j):
return mean * (j * mul ** (j - 1) + (j + 1) * mul ** j) \
- (2 * j + 1) * mul ** (2 * j)
arg1 = np.sum(np.asarray(
[arg1_1(j)*(dist_comp.cdf(mul**j) - dist_comp.cdf(mul**(j+1)))
for j in range(0, s)]))
def arg2_1(j):
return j * mul ** (j - 1) + (j + 1) * mul ** j
arg2 = np.sum(np.asarray(
[arg2_1(j) * (dist_comp.pdf(mul ** (j + 1))
- dist_comp.pdf(mul ** (j)))
for j in range(0, s)]))
sum += coeff * (arg1 + sigma ** 2 * arg2)
gradient = 2 * s * (mul ** (2 * s - 1)) * \
(grad_dist.cdf(mul ** s) - grad_dist.cdf(0)) + sum
mul = mul - lr * gradient
iter += 1
all_mul.append(mul)
return mul, all_mul
def amq_norm_less(initial_point, grad_dist, bits, lr=0.1, epochs=200):
mul = initial_point
s = 2 ** (bits - 1) - 1
mean = grad_dist.mean
sigma = grad_dist.sigma
all_mul = []
iter = 0
for epoch in range(epochs):
sum = 0.0
def arg1_1(j):
return mean * (j * mul ** (j - 1) + (j + 1) * mul ** j) \
- (2 * j + 1) * mul ** (2 * j)
arg1 = np.sum(np.asarray([arg1_1(j) * (
grad_dist.cdf(mul ** j) -
grad_dist.cdf(mul ** (j+1))) for j in range(0, s)]))
def arg2_1(j):
return j * mul ** (j - 1) + (j + 1) * mul ** j
arg2 = np.sum(np.asarray([
arg2_1(j) * (grad_dist.pdf(mul ** (j + 1)) -
grad_dist.pdf(mul ** (j))) for j in range(0, s)]))
gradient = 2 * s * (mul ** (2 * s - 1)) * \
(grad_dist.cdf(mul ** s) - grad_dist.cdf(0)) \
+ arg1 + sigma ** 2 * arg2
mul = mul - lr * gradient
iter += 1
all_mul.append(mul)
return mul, all_mul
def alq(initial_levels, grad_dist, epochs, inv=False, sym=True):
losses = []
# Assuming last level is 1, setting first dummy level to 0
if sym:
positive_levels = initial_levels[len(initial_levels) // 2:]
new_levels = [0] + list(positive_levels).copy()
else:
new_levels = list(initial_levels).copy()
all_levels = [new_levels.copy()]
for epoch in range(epochs):
def objective(x, left_level, right_level):
# from equation below corollary 1
left_var = grad_dist.est_var_adjacent_levels(left_level, x)
right_var = grad_dist.est_var_adjacent_levels(x, right_level)
return left_var+right_var
for index in range(1, len(new_levels)-1):
left_level = new_levels[index - 1]
right_level = new_levels[index + 1]
if inv:
new_levels[index] = grad_dist.estimate_variance_adj_inv(
left_level, right_level)
else:
new_levels[index] = finite_diff_gradient_descent(
lambda x: objective(x, left_level, right_level),
left_level, right_level, x0=new_levels[index])
assert new_levels[index] < right_level and \
new_levels[index] > left_level, \
"New level is not in the interval"
if sym:
negative_levels = [-level for level in new_levels]
negative_levels.reverse()
losses.append(grad_dist.estimate_variance(
negative_levels[:-1] + new_levels[1:]))
all_levels.append(new_levels.copy())
else:
losses.append(grad_dist.estimate_variance(new_levels))
all_levels.append(new_levels.copy())
if sym:
# dropping dummy level at 0
new_levels = new_levels[1:]
negative_levels = [-level for level in new_levels]
negative_levels.reverse()
new_levels = negative_levels + new_levels
return new_levels, all_levels, losses
def get_exp_levels(bits, multiplier):
""" exponential (NUQSGD)
multiplier: is used to modify levels_exp based on the number of bits
"""
num_levels = 2 << bits - 1
# if bits == 2:
# multiplier = 0.1
# elif bits == 4:
# multiplier = 0.5
# elif bits == 6:
# multiplier = 0.9
# elif bits == 8:
# multiplier = 0.95
levels = sum([[-multiplier**j for j in range(num_levels >> 1)],
[multiplier**j for j in reversed(range(num_levels >> 1))]],
[])
return levels
def get_exp_levels(bits, multiplier):
""" exponential (NUQSGD)
multiplier: is used to modify levels_exp based on the number of bits
"""
num_levels = 2 << bits - 1
# if bits == 2:
# multiplier = 0.1
# elif bits == 4:
# multiplier = 0.5
# elif bits == 6:
# multiplier = 0.9
# elif bits == 8:
# multiplier = 0.95
levels = sum([[-multiplier**j for j in range(num_levels >> 1)],
[multiplier**j for j in reversed(range(num_levels >> 1))]],
[])
return levels
class QuantizeMultiBucket(object):
def __init__(self, method, bits, bucket_size, multiplier, **kwargs):
"""
QSGD: qdqL2 + levels_uni
NUQSGD: qdqL2 + levels_exp
QSGD-inf: qdqLinf + levels_uni
"""
self.method = method
self.multiplier = multiplier
if kwargs['interval'] != None:
self.interval = kwargs['interval']
a, b = (-self.interval - 0) / 0.1, (self.interval - 0) / 0.1
if method == 'q':
self.levels = get_uniform_levels(bits)
self.norm_type = 'fro'
elif method == 'nuq':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'qinf':
self.levels = get_uniform_levels(bits)
self.norm_type = float('inf')
elif method == 'nuq2':
self.levels = get_quantile_levels(
bits, 0, 0.1, -self.interval, self.interval)
self.norm_type = 'fro'
elif method == 'nuq2inf':
self.levels = get_quantile_levels(
bits, 0, 0.1, -self.interval, self.interval)
self.norm_type = float('inf')
elif method == 'amq':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'amq_nb':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'alq':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'alq_nb':
self.levels = get_exp_levels(bits, multiplier)
self.norm_type = 'fro'
elif method == 'trn':
self.levels = get_ternary_levels()
self.norm_type = float('inf')
elif method == 'none':
return
self.number_of_iterations = 0
self.gradient_samples = []
self.gradient_samples_overtime = []
self.previous_best = None
self.bucket_size = bucket_size
self.bits = bits
self.epochs = kwargs['cd_epochs']
self.path = kwargs['path']
self.amq_lr = kwargs['amq_lr']
self.amq_epochs = kwargs['amq_epochs']
self.symmetric = kwargs['symmetric']
self.inv = kwargs['inv']
self.levels = torch.as_tensor(self.levels, dtype=torch.float32).cuda()
self.qdq = QDQ(self.levels)
self.mean_weights = 0
self.variance_weights = 0.1
self.error = None
def set_mean_variance(self, stats):
self.mean = mean = stats['nl']['mean']
self.variance = variance = stats['nl']['sigma'] ** 2
self.norms = norms = stats['nb']
self.number_of_iterations += 1
interval = self.interval
sigma = torch.sqrt(torch.tensor(self.variance)).cpu().item()
self.grad_dist_nb = CondNormalTruncHist(
norms['means'], norms['sigmas'], norms['norms'], -interval,
interval, nbins=100000, bin_type='linear')
self.grad_dist_nl = TruncNorm(
mean, sigma, -interval, interval, nbins=100000, bin_type='linear')
self.error = self.grad_dist_nb.estimate_variance(self.levels.cpu())
if self.method == 'amq':
np.savetxt(self.path + '/norms_mean' +
str(self.number_of_iterations), np.asarray(self.norms['means']))
np.savetxt(self.path + '/norms_sigma' +
str(self.number_of_iterations), np.asarray(self.norms['sigmas']))
np.savetxt(self.path + '/norms_norm' +
str(self.number_of_iterations), np.asarray(self.norms['norms']))
def update_levels(self):
interval = self.interval
mean = self.mean
bits = self.bits
variance = self.variance
grad_dist_nl = self.grad_dist_nl
grad_dist_nb = self.grad_dist_nb
sigma = torch.sqrt(torch.tensor(self.variance)).cpu().item()
half_point = int(len(self.levels) / 2)
quantile_levels = get_quantile_levels(bits, grad_dist_nb)
uniform_levels = get_uniform_levels(
self.bits)
exp_levels = get_exp_levels(
self.bits, 0.5)
bits = self.bits
if self.method == 'alq':
inv = self.inv
sym = self.symmetric
epochs = self.epochs
initial_levels = self.levels
levels_qua, _, losses_qua = alq(
quantile_levels, grad_dist_nl, epochs, inv, sym)
levels_uniform, _, losses_uni = alq(
uniform_levels, grad_dist_nl, epochs, inv, sym)
levels_exp, _, losses_exp = alq(
exp_levels, grad_dist_nl, epochs, inv, sym)
candidate_levels = np.asarray(
[levels_qua, levels_uniform, levels_exp])
candidate_losses = np.asarray(
[losses_qua[-1], losses_uni[-1], losses_exp[-1]])
self.levels = candidate_levels[np.argsort(candidate_losses)][0]
elif self.method == 'alq_nb':
epochs = self.epochs
inv = self.inv
sym = self.symmetric
quantile_levels = get_quantile_levels(bits, grad_dist_nb)
levels_qua, _, losses_qua = alq(
quantile_levels, grad_dist_nb, epochs, inv, sym)
levels_uniform, _, losses_uni = alq(
uniform_levels, grad_dist_nb, epochs, inv, sym)
levels_exp, _, losses_exp = alq(
exp_levels, grad_dist_nb, epochs, inv, sym)
candidate_levels = np.asarray(
[levels_qua, levels_uniform, levels_exp])
candidate_losses = np.asarray(
[losses_qua[-1], losses_uni[-1], losses_exp[-1]])
self.levels = candidate_levels[np.argsort(candidate_losses)][0]
elif self.method == 'amq':
initial_points = []
if self.previous_best is None:
initial_points = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 0.9]
else:
initial_points = [0.1, 0.2, 0.3, 0.4,
self.previous_best, 0.5, 0.8, 0.9]
optimal_points = []
for point in initial_points:
optimal_p, _ = amq_norm_less(point, grad_dist_nl, bits, self.amq_lr, self.amq_epochs)
optimal_points.append(optimal_p)
optimal_points_costs = [
grad_dist_nl.estimate_variance(get_exp_levels(bits, p)[
half_point:]) for p in optimal_points]
index = np.argmin(optimal_points_costs)
self.multiplier = optimal_points[index]
self.previous_best = self.multiplier
self.levels = get_exp_levels(bits, self.multiplier)
elif self.method == 'amq_nb':
initial_points = []
if self.previous_best is None:
initial_points = [0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 0.9]
else:
initial_points = [0.1, 0.2, 0.3, 0.4,
self.previous_best, 0.5, 0.8, 0.9]
optimal_points = []
for point in initial_points:
optimal_p, _ = amq_norm_based(point, grad_dist_nb, bits, self.amq_lr, self.amq_epochs)
optimal_points.append(optimal_p)
optimal_points_costs = [
grad_dist_nb.estimate_variance(get_exp_levels(bits, p)[
half_point:]) for p in optimal_points]
index = np.argmin(optimal_points_costs)
self.multiplier = optimal_points[index]
self.previous_best = self.multiplier
self.levels = get_exp_levels(self.bits, self.multiplier)
self.levels = torch.as_tensor(self.levels, dtype=torch.float32).cuda()
self.qdq = QDQ(self.levels)
def quantize(self, x, ig_sm_bkts):
if self.method == 'none':
return x
assert isinstance(x, torch.cuda.FloatTensor)
bucket_size = self.bucket_size
num_tail = math.ceil(x.numel()/bucket_size)*bucket_size-x.numel()
xv = torch.cat((x.view(-1),
torch.zeros(num_tail, dtype=x.dtype, device=x.device)))
xv = xv.view(-1, bucket_size)
norm = xv.norm(p=self.norm_type, dim=1, keepdim=True).expand(
xv.shape[0], xv.shape[1]).contiguous().view(-1).contiguous()
if ig_sm_bkts:
if xv.shape[0] > 1:
q = torch.zeros_like(xv)
r = torch.randint_like(xv, 1000001).long()
self.qdq.qdqGPU(xv[:-1], norm[:-1], q[:-1], r[:-1])
return torch.cat([q[:-1].view(-1), xv[-1][:-num_tail].view(-1)]).view(x.shape)
else:
return xv[-1][:-num_tail].view(x.shape)
else:
q = torch.zeros_like(x)
r = torch.randint_like(x, 1000001).long()
self.qdq.qdqGPU(x, norm, q, r)
return q
def state_dict(self):
if self.method == 'none':
return {}
return {
'levels': self.levels,
'means': self.grad_dist_nb.means,
'sigmas': self.grad_dist_nb.sigmas,
'norms': self.grad_dist_nb.norms,
'sigma': self.grad_dist_nl.sigma,
'mean': self.grad_dist_nl.mean,
'error': self.error
}
def load_state_dict(self, state):
if self.method == 'none':
return
self.levels = state['levels']
self.grad_dist_nb = CondNormalTruncHist(
state['means'], state['sigmas'], state['norms'], -1,
1, nbins=100000, bin_type='linear')
self.grad_dist_nl = TruncNorm(
state['mean'], state['sigma'], -1,
1, nbins=100000, bin_type='linear')
self.qdq = QDQ(self.levels)
self.error = state['error']
| 17,549 | 35.036961 | 102 |
py
|
learning-to-quantize
|
learning-to-quantize-master/nuq/cuda/test.py
|
import torch
import cuquant as qdq
import numpy as np
def test_qdq_gpu():
if not torch.cuda.is_available():
return
x = torch.randn(1000).cuda().uniform_(-1, 1)
q = qdq.qdq_gpu(x)
dq = np.unique(q.cpu().numpy())
print('x', x)
print('q', q)
print('unique q', dq)
print('# unique q', len(dq))
if __name__ == '__main__':
test_qdq_gpu()
| 380 | 18.05 | 48 |
py
|
learning-to-quantize
|
learning-to-quantize-master/nuq/cuda/qdq.py
|
import torch
import math
from cuquant import QDQ
def get_uniform_levels(bits):
num_levels = 2 << bits - 1
levels_uni = torch.linspace(-1, 1, steps=num_levels)
return levels_uni
def qdq_gpu(a):
assert isinstance(a, torch.cuda.FloatTensor)
bucket_size = 16
asize = a.size()
num_tail = math.ceil(a.numel()/bucket_size)*bucket_size-a.numel()
av = torch.cat((a.view(-1), torch.zeros_like(a)[:num_tail]))
c = torch.zeros_like(a)
av = av.view(-1, bucket_size)
norm = av.norm(dim=1, keepdim=True).expand(
av.shape[0], av.shape[1]).contiguous().view(-1).contiguous()
print('norm', norm)
r = torch.randint_like(a, 1000001).long()
levels = get_uniform_levels(4).cuda()
print('levels', levels)
print('#levels', len(levels))
qdq = QDQ(levels)
qdq.qdqGPU(a, norm, c, r)
return c.view(asize)
| 867 | 26.125 | 69 |
py
|
learning-to-quantize
|
learning-to-quantize-master/nuq/cuda/setup.py
|
import os
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
os.system('make -j%d' % os.cpu_count())
# Python interface
setup(
name='CuQuantize',
version='0.1.0',
install_requires=['torch'],
packages=['cuquant'],
package_dir={'cuquant': './'},
ext_modules=[
CUDAExtension(
name='cuquant_back',
include_dirs=['./'],
sources=[
'pybind/bind.cpp',
],
libraries=['cuquant'],
library_dirs=['objs'],
# extra_compile_args=['-g']
)
],
cmdclass={'build_ext': BuildExtension},
description='Quantize-Dequantize cuda kernel',
zip_safe=False,
)
| 735 | 23.533333 | 67 |
py
|
learning-to-quantize
|
learning-to-quantize-master/nuq/cuda/__init__.py
|
import torch
from cuquant_back import QDQ
from .qdq import qdq_gpu
| 69 | 10.666667 | 28 |
py
|
learning-to-quantize
|
learning-to-quantize-master/grid/cluster.py
|
from __future__ import print_function
def ssh(sargs):
"""
rm jobs/*.sh jobs/log/* -f && python grid_run.py --grid G --run_name X
pattern=""; for i in 1 2; do ./kill.sh $i $pattern; done
./start.sh
"""
jobs_0 = ['machine0_gpu0', 'machine0_gpu1',
'machine1_gpu0', 'machine1_gpu1',
]
# validate start.sh
njobs = [2]*4 # Number of parallel jobs on each machine
jobs = []
for s, n in zip(jobs_0, njobs):
jobs += ['%s_job%d' % (s, i) for i in range(n)]
parallel = False # each script runs in sequence
return jobs, parallel
def slurm(sargs, prefix):
"""
rm jobs/*.sh jobs/log/* -f && python grid_run.py --grid G --run_name X \
--cluster_args <njobs>,<ntasks>,<partitions>
pattern=""; for i in 1 2; do ./kill.sh $i $pattern; done
sbatch jobs/slurm.sbatch
"""
njobs, ntasks, partition = sargs.split(',', 2)
njobs = int(njobs)
ntasks = int(ntasks)
# njobs = 5 # Number of array jobs
# ntasks = 4 # Number of running jobs
partition = 'gpu'
jobs = [str(i) for i in range(njobs)]
sbatch_f = """#!/bin/bash
#SBATCH --job-name=array
#SBATCH --output=jobs/log/array_%A_%a.log
#SBATCH --array=0-{njobs}
#SBATCH --time=300:00:00
#SBATCH --gres=gpu:1 # Number of GPUs (per node)
#SBATCH -c 3
#SBATCH --mem=18G
#SBATCH --mail-type=ALL,ARRAY_TASKS
#SBATCH [email protected]
#SBATCH -p {partition}
#SBATCH --ntasks=1
date; hostname; pwd
python -c "import torch; print(torch.__version__)"
(while true; do nvidia-smi; top -b -n 1 | head -20; sleep 10; done) &
# the environment variable SLURM_ARRAY_TASK_ID contains
# the index corresponding to the current job step
source $HOME/Code/nuqsgd/nuqsgd.sh
bash jobs/{prefix}_$SLURM_ARRAY_TASK_ID.sh
""".format(njobs=njobs-1, ntasks=ntasks, partition=partition, prefix=prefix)
with open('jobs/' + prefix + '_slurm.sbatch', 'w') as f:
print(sbatch_f, file=f)
parallel = True # each script runs in parallel
return jobs, parallel
| 2,057 | 31.666667 | 76 |
py
|
learning-to-quantize
|
learning-to-quantize-master/grid/nuq.py
|
from collections import OrderedDict
def mnist(args):
dataset = 'mnist'
module_name = 'main.gvar'
log_dir = 'runs_%s_nuq' % dataset
exclude = ['dataset', 'epochs', 'lr_decay_epoch', 'g_epoch']
shared_args = [('dataset', dataset),
('lr', .1), # [.1, .05, .01]),
('weight_decay', 0),
('momentum', 0), # [0, 0.9]),
('epochs', [
(30, OrderedDict([('lr_decay_epoch', 30)])),
]),
('arch', ['cnn']), # 'cnn', 'mlp'
]
gvar_args = [
# ('gvar_estim_iter', 10),
('gvar_log_iter', 1000), # 100
('gvar_start', 0),
('g_optim', ''),
('g_optim_start', 0),
# ('g_epoch', ''),
]
args_sgd = [('g_estim', ['sgd'])]
args += [OrderedDict(shared_args+gvar_args+args_sgd)]
args_nuq = [
('g_estim', ['nuq']),
('nuq_bits', 4),
('nuq_bucket_size', [1024, 4096, 8192]), # 8192),
('nuq_ngpu', [2, 4]),
('nuq_method', ['q', 'qinf',
('nuq', OrderedDict([('nuq_mul', 0.5)]))
])
]
args += [OrderedDict(shared_args+gvar_args+args_nuq)]
return args, log_dir, module_name, exclude
def cifar10_full_resnet110(args):
dataset = 'cifar10'
module_name = 'main.gvar'
log_dir = 'runs_%s_full' % dataset
exclude = ['dataset', 'epochs', 'lr_decay_epoch', 'g_epoch',
'pretrained', 'niters', 'epoch_iters',
'gvar_log_iter', 'gvar_start', 'g_bsnap_iter',
'g_optim_start', 'nuq_truncated_interval', 'train_accuracy',
'nuq_number_of_samples', 'chkpt_iter', 'g_osnap_iter']
shared_args = [('dataset', dataset),
('optim', ['sgd']), # 'sgd', 'adam'
# ('arch', 'resnet32'),
('arch', ['resnet110']),
('batch_size', 128),
('lr', [0.1]),
('chkpt_iter', 2000),
('momentum', 0.9),
('weight_decay', 1e-4),
('niters', 80000),
('lr_decay_epoch', '40000,60000'),
('train_accuracy', ''),
]
gvar_args = [
# ('gvar_estim_iter', 10),
('gvar_log_iter', 100), # 100
('gvar_start', 0),
('g_osnap_iter', '100,2000,10000'),
('g_bsnap_iter', 10000),
('g_optim', ''),
('g_optim_start', 0),
# ('g_epoch', ''),
]
args_sgd = [('g_estim', ['sgd'])]
args += [OrderedDict(shared_args+gvar_args+args_sgd)]
args_nuq_sgd = [
('g_estim', ['nuq']),
('nuq_bits', [3, 4]),
('nuq_bucket_size', [8192, 8192*2]),
('nuq_ngpu', 4), # 2
('dist_num', [350]),
('nuq_layer', ''),
('nuq_ig_sm_bkts', ''),
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
('amq', OrderedDict([('nuq_amq_lr', 0.7), ('nuq_amq_epochs', 40)])),
('amq_nb', OrderedDict([('nuq_amq_lr', 0.7), ('nuq_amq_epochs', 40)])),
('alq', OrderedDict([('nuq_cd_epochs', 30)])),
'qinf',
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', ''), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30)])),
('nuq', OrderedDict([('nuq_mul', 0.5)])),
])
]
args += [OrderedDict(shared_args+gvar_args+args_nuq_sgd)]
args_super_sgd = [
('g_estim', ['nuq']),
('nuq_ngpu', 4), # 2
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
'none'
])
]
args += [OrderedDict(shared_args+gvar_args+args_super_sgd)]
return args, log_dir, module_name, exclude
def cifar10_full_resnet32(args):
dataset = 'cifar10'
module_name = 'main.gvar'
log_dir = 'runs_%s_full' % dataset
exclude = ['dataset', 'epochs', 'lr_decay_epoch', 'g_epoch',
'pretrained', 'niters', 'epoch_iters',
'gvar_log_iter', 'gvar_start', 'g_bsnap_iter',
'g_optim_start', 'nuq_truncated_interval', 'train_accuracy',
'nuq_number_of_samples', 'chkpt_iter', 'g_osnap_iter']
shared_args = [('dataset', dataset),
('optim', ['sgd']), # 'sgd', 'adam'
# ('arch', 'resnet32'),
('arch', ['resnet32']),
('batch_size', 128),
('lr', [0.1]),
('chkpt_iter', 2000),
('momentum', 0.9),
('weight_decay', 1e-4),
('niters', 80000),
('lr_decay_epoch', '40000,60000'),
('train_accuracy', ''),
]
gvar_args = [
# ('gvar_estim_iter', 10),
('gvar_log_iter', 100), # 100
('gvar_start', 0),
('g_osnap_iter', '100,2000,10000'),
('g_bsnap_iter', 10000),
('g_optim', ''),
('g_optim_start', 0),
# ('g_epoch', ''),
]
args_sgd = [('g_estim', ['sgd'])]
args += [OrderedDict(shared_args+gvar_args+args_sgd)]
args_nuq = [
('g_estim', ['nuq']),
('nuq_bits', [3, 4]),
('nuq_bucket_size', [8192, 8192*2]),
('nuq_ngpu', 4), # 2
('dist_num', [50]),
('nuq_layer', ''),
('nuq_ig_sm_bkts', ''),
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
('amq', OrderedDict([('nuq_amq_lr', 0.7), ('nuq_amq_epochs', 40)])),
('amq_nb', OrderedDict([('nuq_amq_lr', 0.7), ('nuq_amq_epochs', 40)])),
('alq', OrderedDict([('nuq_cd_epochs', 30)])),
'qinf',
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', ''), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30)])),
('nuq', OrderedDict([('nuq_mul', 0.5)])),
])
]
args += [OrderedDict(shared_args+gvar_args+args_nuq)]
args_super_sgd = [
('g_estim', ['nuq']),
('nuq_ngpu', 4), # 2
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
'none'
])
]
args += [OrderedDict(shared_args+gvar_args+args_super_sgd)]
return args, log_dir, module_name, exclude
def imagenet_half(args):
dataset = 'imagenet'
module_name = 'main.gvar'
log_dir = 'runs_%s_full' % dataset
exclude = ['dataset', 'epochs', 'lr_decay_epoch', 'g_epoch',
'pretrained', 'niters', 'epoch_iters',
'gvar_log_iter', 'gvar_start', 'g_bsnap_iter',
'g_optim_start', 'nuq_truncated_interval',
'nuq_number_of_samples', 'chkpt_iter', 'g_osnap_iter']
shared_args = [('dataset', dataset),
# ('optim', 'sgd'), # 'sgd', 'adam'
# ('arch', 'resnet18'),
('arch', ['mobilenet_v2', 'resnet18']),
# ('arch', ['inception_v3']),
('batch_size', 64), # 256),
('niters', 60*10000),
('chkpt_iter', 2000),
('lr', 0.1),
('lr_decay_epoch', '150000,225000'),
('momentum', 0.9),
('weight_decay', 1e-4),
# ('train_accuracy', ''),
]
gvar_args = [
# ('gvar_estim_iter', 10),
# ('gvar_log_iter', 1000), # 100
# ('gvar_start', 0),
# ('g_bsnap_iter', 100*10000),
# ('g_optim', ''),
# ('g_optim_start', 0),
# ('g_epoch', ''),
('gvar_log_iter', 100), # 100
('gvar_start', 0),
('g_osnap_iter', '100,2000,10000'),
('g_bsnap_iter', 10000),
# ('g_optim', ''),
# ('g_optim_start', 0),
# ('g_epoch', ''),
]
args_sgd = [('g_estim', ['sgd'])]
args += [OrderedDict(shared_args+gvar_args+args_sgd)]
args_nuq = [
('g_estim', ['nuq']),
('nuq_bits', [3, 4]),
('nuq_bucket_size', [8192, 8192*2]),
('nuq_ngpu', 4), # 2
('dist_num', [250]),
('nuq_layer', ''),
('nuq_ig_sm_bkts', ''),
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
('amq', OrderedDict([('nuq_amq_lr', 0.7)])),
('amq_nb', OrderedDict([('nuq_amq_lr', 0.7)])),
('alq', OrderedDict([('nuq_cd_epochs', 30)])),
'qinf',
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', ''), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30)])),
('nuq', OrderedDict([('nuq_mul', 0.5)])),
])
]
args += [OrderedDict(shared_args+gvar_args+args_nuq)]
args_super_sgd = [
('g_estim', ['nuq']),
('nuq_ngpu', 4), # 2
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
'none'
])
]
args += [OrderedDict(shared_args+gvar_args+args_super_sgd)]
return args, log_dir, module_name, exclude
def imagenet_full(args):
dataset = 'imagenet'
module_name = 'main.gvar'
log_dir = 'runs_%s_full' % dataset
exclude = ['dataset', 'epochs', 'lr_decay_epoch', 'g_epoch',
'pretrained', 'niters', 'epoch_iters',
'gvar_log_iter', 'gvar_start', 'g_bsnap_iter',
'g_optim_start', 'nuq_truncated_interval',
'nuq_number_of_samples', 'chkpt_iter', 'g_osnap_iter']
shared_args = [('dataset', dataset),
# ('optim', 'sgd'), # 'sgd', 'adam'
# ('arch', 'resnet18'),
('arch', ['resnet18']),
# ('arch', ['inception_v3']),
('batch_size', 64), # 256),
('niters', 60*10000),
('chkpt_iter', 2000),
('lr', 0.1),
('lr_decay_epoch', '300000,450000'),
('momentum', 0.9),
('weight_decay', 1e-4),
# ('train_accuracy', ''),
]
gvar_args = [
# ('gvar_estim_iter', 10),
# ('gvar_log_iter', 1000), # 100
# ('gvar_start', 0),
# ('g_bsnap_iter', 100*10000),
# ('g_optim', ''),
# ('g_optim_start', 0),
# ('g_epoch', ''),
('gvar_log_iter', 100), # 100
('gvar_start', 0),
('g_osnap_iter', '100,2000,10000'),
('g_bsnap_iter', 10000),
('g_optim', ''),
('g_optim_start', 0),
# ('g_epoch', ''),
]
args_sgd = [('g_estim', ['sgd'])]
args += [OrderedDict(shared_args+gvar_args+args_sgd)]
args_nuq = [
('g_estim', ['nuq']),
('nuq_bits', [3, 4]),
('nuq_bucket_size', [8192, 8192*2]),
('nuq_ngpu', 4), # 2
('dist_num', [350]),
('nuq_layer', ''),
('nuq_ig_sm_bkts', ''),
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
('amq', OrderedDict([('nuq_amq_lr', 0.7)])),
('amq_nb', OrderedDict([('nuq_amq_lr', 0.7)])),
('alq', OrderedDict([('nuq_cd_epochs', 30)])),
'qinf',
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', ''), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_inv', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30), ('nuq_sym', '')])),
('alq_nb', OrderedDict([('nuq_cd_epochs', 30)])),
('nuq', OrderedDict([('nuq_mul', 0.5)])),
])
]
args += [OrderedDict(shared_args+gvar_args+args_nuq)]
args_super_sgd = [
('g_estim', ['nuq']),
('nuq_ngpu', 4), # 2
('nuq_truncated_interval', 1),
('nuq_number_of_samples', 10),
('nuq_method', [
'none'
])
]
args += [OrderedDict(shared_args+gvar_args+args_super_sgd)]
return args, log_dir, module_name, exclude
| 12,833 | 36.747059 | 95 |
py
|
learning-to-quantize
|
learning-to-quantize-master/grid/__init__.py
| 0 | 0 | 0 |
py
|
|
learning-to-quantize
|
learning-to-quantize-master/main/gvar.py
|
from __future__ import print_function
import numpy as np
import logging
import os
import sys
import torch
import torch.nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.nn.functional as F
import torch.multiprocessing
import utils
import models
from data import get_loaders
from args import get_opt
from log_utils import TBXWrapper
from log_utils import Profiler
from estim.optim import OptimizerFactory
from tensorboardX import SummaryWriter
tb_logger = TBXWrapper()
def test(tb_logger, model, test_loader,
opt, niters, set_name='Test', prefix='V'):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target, idx in test_loader:
if opt.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = F.nll_loss(output, target, reduction='none')
test_loss += loss.sum().item()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
wrong = len(test_loader.dataset) - correct
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
error = 100. * wrong / len(test_loader.dataset)
logging.info(
'\n{0} set: Average loss: {1:.4f}'
', Accuracy: {2}/{3} ({4:.2f}%)'
', Error: {5}/{3} ({6:.2f}%)\n'.format(
set_name, test_loss, correct, len(test_loader.dataset),
accuracy, wrong, error))
tb_logger.log_value('%sloss' % prefix, test_loss, step=niters)
tb_logger.log_value('%scorrect' % prefix, correct, step=niters)
tb_logger.log_value('%swrong' % prefix, wrong, step=niters)
tb_logger.log_value('%sacc' % prefix, accuracy, step=niters)
tb_logger.log_value('%serror' % prefix, error, step=niters)
return accuracy
def train(tb_logger, epoch, train_loader, model, optimizer, opt, test_loader,
save_checkpoint, train_test_loader):
batch_time = Profiler()
model.train()
profiler = Profiler()
init_iters = optimizer.niters % opt.epoch_iters
optimizer.logger.reset()
for batch_idx in range(init_iters, opt.epoch_iters):
profiler.start()
# sgd step
loss = optimizer.step(profiler)
batch_time.toc('Time')
batch_time.end()
optimizer.niters += 1
niters = optimizer.niters
# if True:
if batch_idx % opt.log_interval == 0:
gvar_log = ''
prof_log = ''
if optimizer.gvar.is_log_iter(niters):
gvar_log = '\t' + optimizer.gvar.log_var(model, niters)
if opt.log_profiler:
prof_log = '\t' + str(profiler)
logging.info(
'Epoch: [{0}][{1}/{2}]({niters})\t'
'Loss: {loss:.6f}\t'
'{batch_time}\t'
'{opt_log}{gvar_log}{prof_log}'.format(
epoch, batch_idx, len(train_loader),
loss=loss.item(),
batch_time=str(batch_time),
opt_log=str(optimizer.logger),
gvar_log=gvar_log,
prof_log=prof_log,
niters=niters))
if batch_idx % opt.tblog_interval == 0:
tb_logger.log_value('epoch', epoch, step=niters)
lr = optimizer.param_groups[0]['lr']
tb_logger.log_value('lr', lr, step=niters)
tb_logger.log_value('niters', niters, step=niters)
tb_logger.log_value('batch_idx', batch_idx, step=niters)
tb_logger.log_value('loss', loss, step=niters)
optimizer.logger.tb_log(tb_logger, step=niters)
if optimizer.niters % opt.epoch_iters == 0:
if opt.train_accuracy:
test(tb_logger,
model, train_test_loader, opt, optimizer.niters,
'Train', 'T')
if optimizer.niters % opt.chkpt_iter == 0 or optimizer.niters % opt.epoch_iters == 0:
prec1 = test(tb_logger,
model, test_loader, opt, optimizer.niters)
save_checkpoint(model, float(prec1), opt, optimizer,
gvar=optimizer.gvar)
tb_logger.save_log()
def untrain(model, gvar, opt):
steps = opt.untrain_steps
lr = opt.untrain_lr
std = opt.untrain_std
for batch_idx in range(steps):
loss = gvar.grad(-1)
with torch.no_grad():
for p in model.parameters():
p += p.grad*lr # ascent
p += torch.zeros_like(p.grad).normal_(0, std) # noise
if batch_idx % opt.log_interval == 0:
logging.info(
'Untrain: [{0}/{1}]\t'
'Loss: {loss:.6f}'.format(
batch_idx, steps, loss=loss.item()))
def main():
opt = get_opt()
tb_logger.configure(opt.logger_name, flush_secs=5, opt=opt)
logfname = os.path.join(opt.logger_name, 'log.txt')
logging.basicConfig(
filename=logfname,
format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(opt.d))
torch.manual_seed(opt.seed)
if opt.cuda:
# TODO: remove deterministic
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(opt.seed)
np.random.seed(opt.seed)
# helps with wide-resnet by reducing memory and time 2x
cudnn.benchmark = True
train_loader, test_loader, train_test_loader = get_loaders(opt)
if opt.epoch_iters == 0:
opt.epoch_iters = int(
np.ceil(1. * len(train_loader.dataset) / opt.batch_size))
opt.maxiter = opt.epoch_iters * opt.epochs
if opt.g_epoch:
opt.gvar_start *= opt.epoch_iters
opt.g_optim_start = (opt.g_optim_start * opt.epoch_iters) + 1
model = models.init_model(opt)
optimizer = OptimizerFactory(model, train_loader, tb_logger, opt)
epoch = 0
save_checkpoint = utils.SaveCheckpoint()
# optionally resume from a checkpoint
if not opt.noresume:
model_path = os.path.join(opt.logger_name, opt.ckpt_name)
if os.path.isfile(model_path):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path)
best_prec1 = checkpoint['best_prec1']
optimizer.gvar.load_state_dict(checkpoint['gvar'])
optimizer.niters = checkpoint['niters']
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
save_checkpoint.best_prec1 = best_prec1
print("=> loaded checkpoint '{}' (epoch {}, best_prec {})"
.format(model_path, epoch, best_prec1))
else:
print("=> no checkpoint found at '{}'".format(model_path))
if opt.niters > 0:
max_iters = opt.niters
else:
max_iters = opt.epochs * opt.epoch_iters
if opt.untrain_steps > 0:
untrain(model, optimizer.gvar, opt)
while optimizer.niters < max_iters:
optimizer.epoch = epoch
utils.adjust_lr(optimizer, opt)
ecode = train(
tb_logger,
epoch, train_loader, model, optimizer, opt, test_loader,
save_checkpoint, train_test_loader)
if ecode == -1:
break
epoch += 1
tb_logger.save_log()
if __name__ == '__main__':
main()
| 7,579 | 34.754717 | 94 |
py
|
PyBDSF
|
PyBDSF-master/setup.py
|
from skbuild import setup # This line replaces 'from setuptools import setup'
setup(
name='bdsf',
version='1.11.0a1',
author='David Rafferty',
author_email='[email protected]',
url='https://github.com/lofar-astron/PyBDSF',
description='Blob Detection and Source Finder',
long_description=open('README.rst', 'rt').read(),
long_description_content_type='text/x-rst',
platforms='Linux, Mac OS X',
packages=['bdsf', 'bdsf.nat'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering :: Astronomy'
],
extras_require={
'ishell': ['ipython<8.11', 'matplotlib']
},
install_requires=['backports.shutil_get_terminal_size',
'astropy', 'numpy', 'scipy'],
entry_points = {
'console_scripts': [
'pybdsf = bdsf.pybdsf:main [ishell]',
'pybdsm = bdsf.pybdsf:main [ishell]'
]
},
zip_safe=False,
)
| 1,361 | 33.923077 | 78 |
py
|
PyBDSF
|
PyBDSF-master/test/test_watershed.py
|
import matplotlib.cm as cm
import scipy.ndimage as nd
from bdsf.const import fwsig
from bdsf.gausfit import Op_gausfit as gg
import bdsf.functions as func
from _cbdsm import MGFunction
from _cbdsm import lmder_fit, dn2g_fit, dnsg_fit
import numpy as N
from copy import deepcopy as cp
for isl in img.islands:
#isl = img.islands[153]
if isl.ngaus > 1:
thr = isl.mean + img.opts.thresh_pix*isl.rms
im = isl.image; mask = isl.mask_active; av = img.clipped_mean; im1 = cp(im)
ind = N.array(N.where(~mask)).transpose()
ind = [tuple(coord) for coord in ind if im[tuple(coord)] > thr]
n, m = isl.shape; iniposn = []; inipeak = []
for c in ind:
goodlist = [im[i,j] for i in range(c[0]-1,c[0]+2) for j in range(c[1]-1,c[1]+2) \
if i>=0 and i<n and j>=0 and j<m and (i,j) != c]
peak = N.sum(im[c] > goodlist) == len(goodlist)
if peak:
iniposn.append(c); inipeak.append(im[c])
nmulsrc = len(iniposn)
if nmulsrc > 1:
markers = N.zeros(im.shape, int)
markers[0,0] = 1
for ipk in range(nmulsrc):
pk = inipeak[ipk]; x, y = iniposn[ipk]
markers[int(round(x)), int(round(y))] = ipk+2
im2 = N.zeros(im.shape, int)
im1 = im1 - im1.min()
im1 = im1/im1.max()*255
im1 = 255-im1
nd.watershed_ift(N.array(im1, N.uint8), markers, output = im2)
fcn = MGFunction(im, isl.mask_active, 1)
fit = lmder_fit
gg1 = gg()
for ipk in range(nmulsrc):
ind = ipk+2
mom = func.momanalmask_gaus(im, im2, ind, 1.0, True)
indd = N.where(im2==ind)
mom[3] = 3.0; mom[4]=3.0
g = [float(N.max(im[indd])), int(round(mom[1])), int(round(mom[2])), mom[3]/fwsig, mom[4]/fwsig, mom[5]]
gg1.add_gaussian(fcn, g, dof = isl.size_active)
print g
fit(fcn, final=0, verbose=True)
print fcn.parameters
import pylab as pl
pl.figure()
pl.subplot(2,2,1);pl.imshow(N.transpose(im), interpolation='nearest', origin='lower'); pl.title(str(isl.island_id))
pl.subplot(2,2,2);pl.imshow(N.transpose(im1), interpolation='nearest', origin='lower'); pl.title(str(isl.island_id))
pl.subplot(2,2,3);pl.imshow(N.transpose(im2), interpolation='nearest', origin='lower'); pl.title(str(isl.island_id))
for g in fcn.parameters:
A, x1, x2, s1, s2, th = g
s1, s2 = map(abs, [s1, s2])
if s1 < s2: # s1 etc are sigma
ss1=s2; ss2=s1; th1 = divmod(th+90.0, 180)[1]
else:
ss1=s1; ss2=s2; th1 = divmod(th, 180)[1]
c = [A, x1, x2, ss1, ss2, th1]
x, y = N.indices(isl.shape)
x2, y2 = func.drawellipse(c)
#x2 = x2 + isl.origin[0]; y2 = y2 + isl.origin[1]
pl.subplot(2,2,4); pl.plot(x2, y2, '-r')
pl.imshow(N.transpose(im), origin='lower', interpolation='nearest')
import matplotlib.cm as cm
import scipy.ndimage as nd
import numpy as N
from bdsf.const import fwsig
from bdsf.gausfit import Op_gausfit as gg
import bdsf.functions as func
from _cbdsm import MGFunction
from _cbdsm import lmder_fit, dn2g_fit, dnsg_fit
image = N.zeros((100,100))
markers = N.zeros(image.shape, int)
op1 = N.zeros(image.shape, int)
op2 = N.zeros(image.shape, int)
x, y = N.indices(image.shape)
peaks = [2.0, 8.0, 8.0, 2.0]
posns = [(30, 20), (50, 20), (30, 70), (50, 70)]
bmaj = [2.0, 12.0, 2.0, 12.0]
brat = [2.0, 2.0, 2.0, 2.0]
markers[10,10] = 1
for ig in range(len(peaks)):
g = peaks[ig]*N.exp(-0.5*((x-posns[ig][0])*(x-posns[ig][0])+(y-posns[ig][1])*(y-posns[ig][1])) \
/(bmaj[ig]*bmaj[ig]/brat[ig]))
image = image + g
markers[int(round(posns[ig][0])), int(round(posns[ig][1]))] = ig+2
image1 = image - image.min()
image1 = image1/image1.max()*255
image1 = 255-image1
nd.watershed_ift(N.array(image1, N.uint8), markers, output = op1)
pl.figure();pl.imshow(N.transpose(image), interpolation='nearest', origin='lower'); pl.title('orig'); pl.colorbar()
pl.figure();pl.imshow(N.transpose(image1), interpolation='nearest', origin='lower'); pl.title('input1'); pl.colorbar()
pl.figure();pl.imshow(N.transpose(op1), interpolation='nearest', origin='lower'); pl.title('output1'); pl.colorbar()
pl.figure();pl.imshow(N.transpose(markers), interpolation='nearest', origin='lower'); pl.title('markers'); pl.colorbar()
| 4,358 | 39.738318 | 122 |
py
|
PyBDSF
|
PyBDSF-master/test/test.py
|
import sys
import numpy as N
sys.path.append('')
def plotim():
""" Plots the image and overlays the island borders with the island number. Also draws the detected gaussians
at their fwhm radius, with each source being a colour (and line style). """
bdsm.analysis.plotresults(img)
def getisl(c):
""" Plots the image and overlays the island borders with the island number. Also draws the detected gaussians
at their fwhm radius, with each source being a colour (and line style). """
islid = bdsm.analysis.getisland(img, c)
return img.islands[islid]
def plot_morph_isl(img, isls=None, thr=None):
bdsm.analysis.plot_morph_isl(img, isls, thr)
def call_pybdsm(version, parameters):
if version not in ['stable', 'david', 'test']: raise RuntimeError(version+" Version unknown")
if version == 'stable': import bdsm_stable as bdsm
if version == 'david': import bdsm_david as bdsm
if version == 'test': import bdsm_test as bdsm
img = bdsm.execute(bdsm.fits_chain, parameters)
return img, bdsm
#img, bdsm = call_pybdsm('test', {'fits_name': "subim.fits", 'beam' : (0.0015, 0.0015, 0.0), 'thresh':"hard", 'atrous_do' : False})
#img, bdsm = call_pybdsm('test', {'fits_name': "concatenated-003-002.restored.fits", 'thresh':"hard", 'atrous_do' : False, 'stop_at' : 'isl'})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "HydraA_74MHz_image.fits", 'thresh':"hard", 'atrous_do' : True, 'atrous_bdsm_do' : False, 'atrous_jmax' : 6, 'solnname' : 'del-norms_nobeam_deeper', 'ini_gausfit' : 'nobeam', 'opdir_overwrite' : 'append', 'mean_map' : False, 'rms_map' : False, 'thresh_pix' : 60, 'thresh_isl' : 45})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "HydraA_74MHz_image.pybdsm.atrous.w6_norms_deep.fits", 'thresh':"hard", 'atrous_do' : False, 'solnname' : 'nobeam', 'opdir_overwrite' : 'append', 'mean_map' : False, 'rms_map' : False, 'mean_map' : False, 'ini_gausfit' : 'nobeam', 'flag_smallsrc' : False, 'flag_minsnr' : 0.2, 'flag_maxsnr' : 3.0, 'flag_maxsize_isl' : 5.0, 'flag_maxsize_bm' : 45.0, 'flag_minsize_bm' : 0.2})
#img, bdsm = call_pybdsm('test', {'fits_name': "A2255_85CM_BEAM_cut.fits", 'beam' : (0.0167, 0.0167, 0.0), 'thresh':"hard", 'atrous_do' : True, 'atrous_bdsm_do' : True, 'solnname' : 'del', 'ini_gausfit' : 'fbdsm', 'opdir_overwrite' : 'append'})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "matteo_mfs.im.fits", 'beam' : (0.002, 0.002, 0.0), 'thresh':"hard"})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "WN65341H.fits", 'beam': (.0165515, .01500, 0.0), 'thresh':"hard", 'atrous_do' : False})
#img, bdsm = call_pybdsm('test', {'fits_name': "WN35078H.fits", 'beam': (.0261, .01500, 0.0), 'thresh':"hard", 'atrous_do' : True, 'shapelet_do' : False, 'ini_gausfit' : 'default' })
#img, bdsm = call_pybdsm('test', {'fits_name': "3C274-P.FITS", 'beam': (.00654, .00654, -45.0), 'thresh':"hard", 'atrous_do' : True, 'atrous_jmax' : 5, 'bbs_patches' : 'single', 'solnname' : 'new', 'ini_gausfit' : 'default', 'opdir_overwrite' : 'append', 'atrous_bdsm_do' : True, 'rms_map' : False, 'mean_map' : False, 'thresh_pix' : 100, 'thresh_isl' : 60})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "Cas_A-P.models.FITS", 'thresh':"hard", 'atrous_do' : False, 'rms_map' : False, 'mean_map' : False })
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "VIRA-4.MOD.FITS", 'thresh':"hard", 'atrous_do' : True })
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "VIRA-4.MOD.pybdsm.atrous.w6.fits", 'thresh':"hard", 'rms_box' : (63, 21)})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "Cyg_A-P_mod.FITS", 'thresh':"hard", 'atrous_do' : False , 'rms_map' : False })
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "Cyg_A-4.model.FITS", 'thresh':"hard", 'atrous_do' : False, 'rms_map' : False , 'thresh_pix' : 6})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "Cyg_A-P-cut.fits", 'thresh':"hard", 'atrous_do' : True , 'rms_map' : False, 'mean_map' : 'const', 'thresh_pix' : 1000, 'thresh_isl' : 800, 'ini_gausfit' : 'default', 'solnname' : 'del', 'atrous_bdsm_do' : False})
img, bdsm = call_pybdsm('test' ,{'fits_name': "Cyg_A-P-cut.pybdsm.atrous.w12.fits", 'thresh':"hard", 'atrous_do' : False , 'rms_map' : False, 'mean_map' : 'const', 'ini_gausfit' : 'fbdsm', 'solnname' : 'del', 'opdir_overwrite' : 'append', 'stop_at' : 'isl'})
#img, bdsm = call_pybdsm('test' ,{'fits_name': "Cyg_A-P-cut.pybdsm.atrous.w12.fits", 'thresh':"hard", 'atrous_do' : False , 'rms_map' : False, 'mean_map' : 'const', 'thresh_pix' : 30, 'thresh_isl' : 20, 'ini_gausfit' : 'fbdsm'})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "SB128_138-002-002.fits", 'thresh':"hard", 'solnname' : 'try' })
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "concatenated-000-004.restored.fits", 'rms_box' : (130, 40), 'thresh':"hard", 'atrous_do' : False, 'shapelet_do' : False })
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "mi_spam.fits", 'beam': (.0222, .0222, 0.0), 'thresh':"hard", 'atrous_do' : False })
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "marijke.fits", 'beam': (.004, .004, 0.0), 'thresh':"hard", 'atrous_do' : True, 'thresh_isl' : 20, 'thresh_pix' : 100})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "SST1cont.image.restored.fits", 'beam': (.008333, .008333, 0.0), 'thresh':"hard", 'atrous_do' : False})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "bootbig.FITS", 'beam': (.00154, .00154, 0.0), 'thresh':"hard", \
# 'atrous_do' : True, 'atrous_bdsm_do' : False})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "WN35060H", 'beam': (.0165515, .01500, 0.0), 'thresh':"hard"})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "lock_cube1.fits", 'beam': (.0054, .0044, 0.0), \
# 'collapse_mode' : 'average', 'collapse_wt' : 'unity', 'beam_sp_derive' : \
# True, 'atrous_do' : True, 'debug_figs' : True})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "newcube1.fits", 'beam': (.00389, .003056, 0.0), \
# 'collapse_mode' : 'average', 'collapse_wt' : 'rms', 'thresh' : 'hard', 'atrous_do' : True})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "sim1.1.FITS", 'beam': (.00143, .00143, 0.0),\
# 'collapse_mode' : 'average', 'collapse_wt' : 'rms', 'thresh' : 'hard', 'thresh_pix' : '30'})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "A2255_I.fits", 'beam': (.018, .014, 5.0), 'collapse_mode'
# : 'average', 'collapse_wt' : 'rms', 'thresh' : 'hard', 'thresh_isl' : 20.0, 'thresh_pix' : 50.0,
# 'polarisation_do': True, 'atrous_do' : True})
#img = bdsm.execute(bdsm.fits_chain,{'fits_name': "try.fits", 'beam': (.056, .028, 160.0), 'thresh':"hard", 'thresh_pix':20.})
| 6,695 | 68.030928 | 425 |
py
|
PyBDSF
|
PyBDSF-master/test/tbdsf_process_image.py
|
import bdsf
import sys
# Process the image
img = bdsf.process_image('tbdsf_process_image.in', ncores=2)
# List of operations that must have been done on `img`.
operations = [
'readimage', 'collapse', 'preprocess', 'rmsimage', 'threshold',
'islands', 'gausfit', 'gaul2srl', 'make_residimage', 'wavelet_atrous',
'shapelets', 'spectralindex', 'polarisation', 'psf_vary', 'cleanup'
]
# Return exit status 0 if everything went fine, otherwise return 1.
if img and all(oper in img.completed_Ops for oper in operations):
sys.exit(0)
else:
sys.exit(1)
| 568 | 27.45 | 74 |
py
|
PyBDSF
|
PyBDSF-master/test/colourcorrection.py
|
"""
This is for pybdsm for calculating spectral index. We assume a linear spectral index
in log(freq) and then each channel has a flux which is bit wrong because of the colour
correction problem within that band.
Now we average n such channels. There will be another error made, partly because of the
colour correction now for channels (including discretisation) and the colour correction
of the earlier 2nd order colour correction.
This is to see how much they differ. Refer notebook for forumlae.
"""
import numpy as N
import pylab as pl
import math
nchan = N.array([9, 17])
alpha_arr = N.arange(-1.3, -0.3, 0.1)
deltanu = N.array([0.05e6, 0.1e6, 0.2e6])
freq = N.arange(40.0e6, 200.0e6, 10.0e6)
pl1 = pl.figure()
pl2 = pl.figure()
pl3 = pl.figure()
k = 0
for inchan, n in enumerate(nchan):
for ibw, bw in enumerate(deltanu):
k += 1
for ia, alpha in enumerate(alpha_arr):
f_diff1 = N.zeros(len(freq))
f_diff2 = N.zeros(len(freq))
for ifreq, f in enumerate(freq):
f_arr = N.arange(f-(n-1)/2*bw, f+(n+1)/2*bw, bw)
f_naive = N.mean(f_arr)
f1 = N.power(f_arr, alpha)
f2 = N.power(f_arr, alpha-2.0)
f1 = 1.0/n*N.sum(f1)
f2 = 1.0/n*N.sum(f2)*bw*bw*alpha*(alpha-1.0)/24.0
f_eff1 = N.power(f1, 1.0/alpha)
f_eff2 = N.power(f1+f2, 1.0/alpha)
f_diff1[ifreq] = f_naive - f_eff2
f_diff2[ifreq] = f_eff1 - f_eff2
fig = pl.figure(pl1.number)
adjustprops = dict(wspace=0.5, hspace=0.5)
fig.subplots_adjust(**adjustprops)
ax = pl.subplot(2,3,k)
pl.plot(freq/1e6, f_diff1/1e3)
pl.title('n='+str(n)+'; bw='+str(bw/1e6)+' MHz')
pl.xlabel('Freq(MHz)')
pl.ylabel('Diff in freq (kHz)')
pl.setp(ax.get_xticklabels(), rotation='vertical', fontsize=12)
fig = pl.figure(pl2.number)
adjustprops = dict(wspace=0.5, hspace=0.5)
fig.subplots_adjust(**adjustprops)
ax = pl.subplot(2,3,k)
pl.plot(freq/1e6, f_diff2)
pl.title('n='+str(n)+'; bw='+str(bw/1e6)+' MHz')
pl.xlabel('Freq(MHz)')
pl.ylabel('Diff due to 2nd order (Hz)')
pl.setp(ax.get_xticklabels(), rotation='vertical', fontsize=12)
fig = pl.figure(pl3.number)
adjustprops = dict(wspace=0.9, hspace=0.5)
fig.subplots_adjust(**adjustprops)
ax = pl.subplot(2,3,k)
f2 = f_naive+5e6
y = f_diff1*alpha/f_naive/math.log(f_naive/(f2))
pl.plot(freq/1e6, y)
pl.title('n='+str(n)+'; bw='+str(bw/1e6)+' MHz')
pl.xlabel('Freq(MHz)')
pl.ylabel('Error in sp.in. for f2=f1+10MHz')
pl.setp(ax.get_xticklabels(), rotation='vertical', fontsize=12)
pl.figure(pl1.number)
pl.savefig('colourcorr_full.png')
pl.figure(pl2.number)
pl.savefig('colourcorr_order1-2.png')
pl.figure(pl3.number)
pl.savefig('colourcorr_delta_spin.png')
| 3,214 | 35.123596 | 88 |
py
|
PyBDSF
|
PyBDSF-master/test/do_stuff.py
|
"""make watershed images for each island in isls """
def do_ws(isls, crms):
import bdsm.functions as func
import os, subprocess
import pylab as pl
import numpy as N
thr = crms
for isl in isls:
image = isl.image*~isl.mask_active
op1, markers1 = func.watershed(image, thr=thr*3.)
pl.figure()
pl.suptitle('Island '+str(isl.island_id))
pl.subplot(2,2,1); pl.imshow(N.transpose(image), origin='lower', interpolation='nearest', vmin=-7*thr, vmax=15*thr); pl.title('Image')
pl.subplot(2,2,2); pl.imshow(N.transpose(op1*~isl.mask_active), origin='lower', interpolation='nearest'); pl.title('watershed1')
pl.subplot(2,2,3); pl.imshow(N.transpose(markers1*~isl.mask_active), origin='lower', interpolation='nearest'); pl.title('markers1')
def open_isl(isls, crms):
import pylab as pl
import scipy.ndimage as nd
import numpy as N
thr = crms
ft1 = N.array(((1,0,1), (0,1,0), (1,0,1)), int)
ft2 = N.array(((0,1,0), (1,1,1), (0,1,0)), int)
ft3 = N.ones((3,3), int)
ft5 = N.ones((5,5), int)
for isl in isls:
ma = ~isl.mask_active
open1 = nd.binary_opening(ma, ft1)
open2 = nd.binary_opening(ma, ft2)
open3 = nd.binary_opening(ma, ft3)
open5 = nd.binary_opening(ma, ft5)
pl.figure()
pl.suptitle('Island '+str(isl.island_id))
pl.subplot(2,2,1); pl.imshow(N.transpose(isl.image), origin='lower', interpolation='nearest'); pl.title('Image')
pl.subplot(2,2,2); pl.imshow(N.transpose(ma), origin='lower', interpolation='nearest'); pl.title('mask')
pl.subplot(2,2,3); pl.imshow(N.transpose(open3), origin='lower', interpolation='nearest'); pl.title('open 3x3')
pl.subplot(2,2,4); pl.imshow(N.transpose(open5), origin='lower', interpolation='nearest'); pl.title('open 5x5')
#pl.subplot(2,2,3); pl.imshow(N.transpose(open1), origin='lower', interpolation='nearest'); pl.title('open diag')
#pl.subplot(2,2,4); pl.imshow(N.transpose(open2), origin='lower', interpolation='nearest'); pl.title('open str')
pl.savefig('cyga_p_w12_bigisl_'+str(isl.island_id)+'_open.png')
| 2,153 | 38.888889 | 140 |
py
|
PyBDSF
|
PyBDSF-master/test/Ateammodels.py
|
import pylab as pl
import bdsf, pyfits
import numpy as N
import os, subprocess
from bdsf.FITS import Op_loadFITS
from bdsf.collapse import Op_collapse
from bdsf.preprocess import Op_preprocess
from bdsf.rmsimage import Op_rmsimage
from bdsf.threshold import Op_threshold
from bdsf.islands import Op_islands
import bdsf.functions as func
from bdsf.analysis import plotresults
""" Try blindly running bdsf to see if boxsize is ok, so fitting doesnt hang. Then try various segmenting algorithms which dont
depend on rms ? """
directory = "A-team/"
ls = subprocess.Popen(["ls",directory], stdout=subprocess.PIPE).communicate()[0]
ls = ls.split('\n')
files = []; rmsbox = []
chain = [Op_loadFITS(), Op_collapse(), Op_preprocess(), Op_rmsimage(), Op_threshold(), Op_islands()]
#ls = ['subim.fits']
bms = [(0.0015, 0.0015, 0.0)]
directory=''
for ifile, filename in enumerate(ls):
op = subprocess.Popen(["file",directory+filename], stdout=subprocess.PIPE).communicate()[0]
if "FITS image data" in op:
print 'Processing ', filename
img = bdsf.execute(chain, {'fits_name': filename, 'thresh':"hard", 'solnname' : 'new', 'beam' : bms[ifile]), 'indir' : directory})
files.append(filename)
rmsbox.append(img.opts.rms_box)
thr = img.clipped_rms
op1, markers1 = func.watershed(img.image, thr=thr*3.)
pl.figure()
pl.suptitle(img.filename)
pl.subplot(2,2,1); pl.imshow(N.transpose(img.image), origin='lower', interpolation='nearest', vmin=-7*thr, vmax=15*thr); pl.title('Image')
pl.subplot(2,2,2); pl.imshow(N.transpose(op1), origin='lower', interpolation='nearest'), pl.title('watershed1')
pl.subplot(2,2,3); pl.imshow(N.transpose(markers1), origin='lower', interpolation='nearest'), pl.title('markers1')
pl.subplot(2,2,4); plotresults(img, newfig=False, cbar=False)
pl.savefig(directory+filename+'_watershed.png')
else:
print directory+filename+' is not a FITS file !!'
| 1,932 | 33.517857 | 142 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.